diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..b873e47b74 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,28 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v2.3.0 + hooks: + - id: check-yaml + - id: check-json + - id: end-of-file-fixer + - id: trailing-whitespace + +repos: + - repo: https://github.com/psf/black + rev: stable + hooks: + - id: black + +repos: + - repo: https://github.com/pocc/pre-commit-hooks + rev: master + hooks: + - id: cppcheck + args: ["--error-exitcode=0"] + +repos: + - repo: https://github.com/crate-ci/typos + rev: v1.15.7 + hooks: + - id: typos + diff --git a/docs/en/07-develop/03-insert-data/01-sql-writing.mdx b/docs/en/07-develop/03-insert-data/01-sql-writing.mdx index 3731882fb2..4d1b67e451 100644 --- a/docs/en/07-develop/03-insert-data/01-sql-writing.mdx +++ b/docs/en/07-develop/03-insert-data/01-sql-writing.mdx @@ -33,7 +33,7 @@ The below SQL statement is used to insert one row into table "d1001". INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31); ``` -`ts1` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). +`ts1` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). ### Insert Multiple Rows @@ -43,7 +43,7 @@ Multiple rows can be inserted in a single SQL statement. The example below inser INSERT INTO d1001 VALUES (ts2, 10.2, 220, 0.23) (ts2, 10.3, 218, 0.25); ``` -`ts1` and `ts2` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). +`ts1` and `ts2` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). ### Insert into Multiple Tables @@ -53,7 +53,7 @@ Data can be inserted into multiple tables in the same SQL statement. The example INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31) (ts2, 12.6, 218, 0.33) d1002 VALUES (ts3, 12.3, 221, 0.31); ``` -`ts1`, `ts2` and `ts3` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). +`ts1`, `ts2` and `ts3` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). For more details about `INSERT` please refer to [INSERT](/taos-sql/insert). diff --git a/docs/en/07-develop/07-tmq.mdx b/docs/en/07-develop/07-tmq.mdx index 578f38e73d..f5e0378a00 100644 --- a/docs/en/07-develop/07-tmq.mdx +++ b/docs/en/07-develop/07-tmq.mdx @@ -244,6 +244,8 @@ The following SQL statement creates a topic in TDengine: CREATE TOPIC topic_name AS SELECT ts, c1, c2, c3 FROM tmqdb.stb WHERE c1 > 1; ``` +- There is an upper limit to the number of topics created, controlled by the parameter tmqMaxTopicNum, with a default of 20 + Multiple subscription types are supported. #### Subscribe to a Column @@ -265,14 +267,15 @@ You can subscribe to a topic through a SELECT statement. Statements that specify Syntax: ```sql -CREATE TOPIC topic_name AS STABLE stb_name +CREATE TOPIC topic_name [with meta] AS STABLE stb_name [where_condition] ``` Creating a topic in this manner differs from a `SELECT * from stbName` statement as follows: - The table schema can be modified. - Unstructured data is returned. The format of the data returned changes based on the supertable schema. -- A different table schema may exist for every data block to be processed. +- The 'with meta' parameter is optional. When selected, statements such as creating super tables and sub tables will be returned, mainly used for Taosx to perform super table migration +- The 'where_condition' parameter is optional and will be used to filter and subscribe to sub tables that meet the criteria. Where conditions cannot have ordinary columns, only tags or tbnames. Functions can be used in where conditions to filter tags, but cannot be aggregate functions because sub table tag values cannot be aggregated. It can also be a constant expression, such as 2>1 (subscribing to all child tables), Or false (subscribe to 0 sub tables) - The data returned does not include tags. ### Subscribe to a Database @@ -280,10 +283,12 @@ Creating a topic in this manner differs from a `SELECT * from stbName` statement Syntax: ```sql -CREATE TOPIC topic_name [WITH META] AS DATABASE db_name; +CREATE TOPIC topic_name [with meta] AS DATABASE db_name; ``` -This SQL statement creates a subscription to all tables in the database. You can add the `WITH META` parameter to include schema changes in the subscription, including creating and deleting supertables; adding, deleting, and modifying columns; and creating, deleting, and modifying the tags of subtables. Consumers can determine the message type from the API. Note that this differs from Kafka. +This SQL statement creates a subscription to all tables in the database. + +- The 'with meta' parameter is optional. When selected, it will return statements for creating all super tables and sub tables in the database, mainly used for Taosx database migration ## Create a Consumer @@ -295,7 +300,7 @@ You configure the following parameters when creating a consumer: | `td.connect.user` | string | User Name | | | `td.connect.pass` | string | Password | | | `td.connect.port` | string | Port of the server side | | -| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. | +| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. Each topic can create up to 100 consumer groups. | | `client.id` | string | Client ID | Maximum length: 192. | | `auto.offset.reset` | enum | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) | | `enable.auto.commit` | boolean | Commit automatically; true: user application doesn't need to explicitly commit; false: user application need to handle commit by itself | Default value is true | diff --git a/docs/en/07-develop/09-udf.md b/docs/en/07-develop/09-udf.md index 825d3c6f8b..5137e35c0a 100644 --- a/docs/en/07-develop/09-udf.md +++ b/docs/en/07-develop/09-udf.md @@ -17,7 +17,7 @@ When you create a user-defined function, you must implement standard interface f - For aggregate functions, implement the `aggfn_start`, `aggfn`, and `aggfn_finish` interface functions. - To initialize your function, implement the `udf_init` function. To terminate your function, implement the `udf_destroy` function. -There are strict naming conventions for these interface functions. The names of the start, finish, init, and destroy interfaces must be _start, _finish, _init, and _destroy, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function. +There are strict naming conventions for these interface functions. The names of the start, finish, init, and destroy interfaces must be `_start`, `_finish`, `_init`, and `_destroy`, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function. ### Implementing a Scalar Function in C The implementation of a scalar function is described as follows: @@ -318,7 +318,7 @@ The implementation of a scalar UDF is described as follows: def process(input: datablock) -> tuple[output_type]: ``` -Description: this function prcesses datablock, which is the input; you can use datablock.data(row, col) to access the python object at location(row,col); the output is a tuple object consisted of objects of type outputtype +Description: this function processes datablock, which is the input; you can use datablock.data(row, col) to access the python object at location(row,col); the output is a tuple object consisted of objects of type outputtype #### Aggregate UDF Interface @@ -356,7 +356,7 @@ def process(input: datablock) -> tuple[output_type]: # return tuple object consisted of object of type outputtype ``` -Note:process() must be implemeted, init() and destroy() must be defined too but they can do nothing. +Note:process() must be implemented, init() and destroy() must be defined too but they can do nothing. #### Aggregate Template @@ -377,7 +377,7 @@ def finish(buf: bytes) -> output_type: #return obj of type outputtype ``` -Note: aggregate UDF requires init(), destroy(), start(), reduce() and finish() to be impemented. start() generates the initial result in buffer, then the input data is divided into multiple row data blocks, reduce() is invoked for each data block `inputs` and intermediate `buf`, finally finish() is invoked to generate final result from the intermediate result `buf`. +Note: aggregate UDF requires init(), destroy(), start(), reduce() and finish() to be implemented. start() generates the initial result in buffer, then the input data is divided into multiple row data blocks, reduce() is invoked for each data block `inputs` and intermediate `buf`, finally finish() is invoked to generate final result from the intermediate result `buf`. ### Data Mapping between TDengine SQL and Python UDF @@ -559,7 +559,7 @@ Note: Prior to TDengine 3.0.5.0 (excluding), updating a UDF requires to restart #### Sample 3: UDF with n arguments -A UDF which accepts n intergers, likee (x1, x2, ..., xn) and output the sum of the product of each value and its sequence number: 1 * x1 + 2 * x2 + ... + n * xn. If there is `null` in the input, then the result is `null`. The difference from sample 1 is that it can accept any number of columns as input and process each column. Assume the program is written in /root/udf/nsum.py: +A UDF which accepts n integers, likee (x1, x2, ..., xn) and output the sum of the product of each value and its sequence number: 1 * x1 + 2 * x2 + ... + n * xn. If there is `null` in the input, then the result is `null`. The difference from sample 1 is that it can accept any number of columns as input and process each column. Assume the program is written in /root/udf/nsum.py: ```python def init(): @@ -607,7 +607,7 @@ Query OK, 4 row(s) in set (0.010653s) #### Sample 4: Utilize 3rd party package -A UDF which accepts a timestamp and output the next closed Sunday. This sample requires to use third party package `moment`, you need to install it firslty. +A UDF which accepts a timestamp and output the next closed Sunday. This sample requires to use third party package `moment`, you need to install it firstly. ```shell pip3 install moment @@ -701,7 +701,7 @@ Query OK, 4 row(s) in set (1.011474s) #### Sample 5: Aggregate Function -An aggregate function which calculates the difference of the maximum and the minimum in a column. An aggregate funnction takes multiple rows as input and output only one data. The execution process of an aggregate UDF is like map-reduce, the framework divides the input into multiple parts, each mapper processes one block and the reducer aggregates the result of the mappers. The reduce() of Python UDF has the functionality of both map() and reduce(). The reduce() takes two arguments: the data to be processed; and the result of other tasks executing reduce(). For exmaple, assume the code is in `/root/udf/myspread.py`. +An aggregate function which calculates the difference of the maximum and the minimum in a column. An aggregate funnction takes multiple rows as input and output only one data. The execution process of an aggregate UDF is like map-reduce, the framework divides the input into multiple parts, each mapper processes one block and the reducer aggregates the result of the mappers. The reduce() of Python UDF has the functionality of both map() and reduce(). The reduce() takes two arguments: the data to be processed; and the result of other tasks executing reduce(). For example, assume the code is in `/root/udf/myspread.py`. ```python import io @@ -755,7 +755,7 @@ In this example, we implemented an aggregate function, and added some logging. 2. log() is the function for logging, it converts the input object to string and output with an end of line 3. destroy() closes the log file \ 4. start() returns the initial buffer for storing the intermediate result -5. reduce() processes each daa block and aggregates the result +5. reduce() processes each data block and aggregates the result 6. finish() converts the final buffer() to final result\ Create the UDF. diff --git a/docs/en/12-taos-sql/10-function.md b/docs/en/12-taos-sql/10-function.md index b517bcb3cc..afc1581c22 100644 --- a/docs/en/12-taos-sql/10-function.md +++ b/docs/en/12-taos-sql/10-function.md @@ -672,7 +672,7 @@ If you input a specific column, the number of non-null values in the column is r ELAPSED(ts_primary_key [, time_unit]) ``` -**Description**: `elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length. +**Description**: `elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` clause, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length. **Return value type**: Double if the input value is not NULL; @@ -999,18 +999,14 @@ SAMPLE(expr, k) **Description**: _k_ sampling values of a specific column. The applicable range of _k_ is [1,1000]. -**Return value type**: Same as the column being operated plus the associated timestamp +**Return value type**: Same as the column being operated -**Applicable data types**: Any data type except for tags of STable +**Applicable data types**: Any data type **Applicable nested query**: Inner query and Outer query **Applicable table types**: standard tables and supertables -**More explanations**: - -- This function cannot be used in expression calculation. - ### TAIL @@ -1055,11 +1051,11 @@ TOP(expr, k) UNIQUE(expr) ``` -**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword, but it can also be used to match tags or timestamp. The first occurrence of a timestamp or tag is used. +**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword. **Return value type**:Same as the data type of the column being operated upon -**Applicable column types**: Any data types except for timestamp +**Applicable column types**: Any data types **Applicable table types**: table, STable diff --git a/docs/en/12-taos-sql/12-distinguished.md b/docs/en/12-taos-sql/12-distinguished.md index b082f7b888..7f0b8c7769 100644 --- a/docs/en/12-taos-sql/12-distinguished.md +++ b/docs/en/12-taos-sql/12-distinguished.md @@ -21,7 +21,7 @@ part_list can be any scalar expression, such as a column, constant, scalar funct A PARTITION BY clause is processed as follows: - The PARTITION BY clause must occur after the WHERE clause -- The PARTITION BY caluse partitions the data according to the specified dimensions, then perform computation on each partition. The performed computation is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause. +- The PARTITION BY clause partitions the data according to the specified dimensions, then perform computation on each partition. The performed computation is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause. - The PARTITION BY clause can be used together with a window clause or GROUP BY clause. In this case, the window or GROUP BY clause takes effect on every partition. For example, the following statement partitions the table by the location tag, performs downsampling over a 10 minute window, and returns the maximum value: ```sql diff --git a/docs/en/12-taos-sql/24-show.md b/docs/en/12-taos-sql/24-show.md index eb70a7664b..bd4a60b20e 100644 --- a/docs/en/12-taos-sql/24-show.md +++ b/docs/en/12-taos-sql/24-show.md @@ -36,7 +36,7 @@ Shows information about connections to the system. SHOW CONSUMERS; ``` -Shows information about all active consumers in the system. +Shows information about all consumers in the system. ## SHOW CREATE DATABASE diff --git a/docs/en/14-reference/03-connector/04-java.mdx b/docs/en/14-reference/03-connector/04-java.mdx index ebd2891a9e..e8c407b125 100644 --- a/docs/en/14-reference/03-connector/04-java.mdx +++ b/docs/en/14-reference/03-connector/04-java.mdx @@ -36,7 +36,8 @@ REST connection supports all platforms that can run Java. | taos-jdbcdriver version | major changes | TDengine version | | :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: | -| 3.2.1 | subscription add seek function | 3.0.5.0 or later | +| 3.2.3 | Fixed resultSet data parsing failure in some cases | 3.0.5.0 or later | +| 3.2.2 | subscription add seek function | 3.0.5.0 or later | | 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket | 3.0.3.0 or later | | 3.2.0 | This version has been deprecated | - | | 3.1.0 | JDBC REST connection supports subscription over WebSocket | - | @@ -284,9 +285,9 @@ The configuration parameters in the URL are as follows: - batchfetch: true: pulls result sets in batches when executing queries; false: pulls result sets row by row. The default value is: false. batchfetch uses HTTP for data transfer. JDBC REST supports batch pulls. taos-jdbcdriver and TDengine transfer data via WebSocket connection. Compared with HTTP, WebSocket enables JDBC REST connection to support large data volume querying and improve query performance. - charset: specify the charset to parse the string, this parameter is valid only when set batchfetch to true. - batchErrorIgnore: true: when executing executeBatch of Statement, if one SQL execution fails in the middle, continue to execute the following SQL. false: no longer execute any statement after the failed SQL. The default value is: false. -- httpConnectTimeout: REST connection timeout in milliseconds, the default value is 5000 ms. -- httpSocketTimeout: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when batchfetch is false. -- messageWaitTimeout: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when batchfetch is true. +- httpConnectTimeout: REST connection timeout in milliseconds, the default value is 60000 ms. +- httpSocketTimeout: socket timeout in milliseconds, the default value is 60000 ms. It only takes effect when batchfetch is false. +- messageWaitTimeout: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when batchfetch is true. - useSSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection. - httpPoolSize: size of REST concurrent requests. The default value is 20. @@ -352,9 +353,9 @@ The configuration parameters in properties are as follows. - TSDBDriver.PROPERTY_KEY_CHARSET: In the character set used by the client, the default value is the system character set. - TSDBDriver.PROPERTY_KEY_LOCALE: this only takes effect when using JDBC native connection. Client language environment, the default value is system current locale. - TSDBDriver.PROPERTY_KEY_TIME_ZONE: only takes effect when using JDBC native connection. In the time zone used by the client, the default value is the system's current time zone. -- TSDBDriver.HTTP_CONNECT_TIMEOUT: REST connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using JDBC REST connection. -- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when using JDBC REST connection and batchfetch is false. -- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when using JDBC REST connection and batchfetch is true. +- TSDBDriver.HTTP_CONNECT_TIMEOUT: REST connection timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection. +- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is false. +- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is true. - TSDBDriver.PROPERTY_KEY_USE_SSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection. It only takes effect when using JDBC REST connection. - TSDBDriver.HTTP_POOL_SIZE: size of REST concurrent requests. The default value is 20. For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](/reference/config/#Client-Only). diff --git a/docs/en/14-reference/03-connector/05-go.mdx b/docs/en/14-reference/03-connector/05-go.mdx index 06d643c6c8..b3d4857d75 100644 --- a/docs/en/14-reference/03-connector/05-go.mdx +++ b/docs/en/14-reference/03-connector/05-go.mdx @@ -31,63 +31,78 @@ REST connections are supported on all platforms that can run Go. Please refer to [version support list](https://github.com/taosdata/driver-go#remind) -## Supported features +## Handling exceptions -### Native connections +If it is a TDengine error, you can get the error code and error information in the following ways. +```go +// import "github.com/taosdata/driver-go/v3/errors" + if err != nil { + tError, is := err.(*errors.TaosError) + if is { + fmt.Println("errorCode:", int(tError.Code)) + fmt.Println("errorMessage:", tError.ErrStr) + } else { + fmt.Println(err.Error()) + } + } +``` -A "native connection" is established by the connector directly to the TDengine instance via the TDengine client driver (taosc). The supported functional features are: +## TDengine DataType vs. Go DataType -* Normal queries -* Continuous queries -* Subscriptions -* Schemaless interface -* Parameter binding interface +| TDengine DataType | Go Type | +|-------------------|-----------| +| TIMESTAMP | time.Time | +| TINYINT | int8 | +| SMALLINT | int16 | +| INT | int32 | +| BIGINT | int64 | +| TINYINT UNSIGNED | uint8 | +| SMALLINT UNSIGNED | uint16 | +| INT UNSIGNED | uint32 | +| BIGINT UNSIGNED | uint64 | +| FLOAT | float32 | +| DOUBLE | float64 | +| BOOL | bool | +| BINARY | string | +| NCHAR | string | +| JSON | []byte | -### REST connection - -A "REST connection" is a connection between the application and the TDengine instance via the REST API provided by the taosAdapter component. The following features are supported: - -* Normal queries -* Continuous queries +**Note**: Only TAG supports JSON types ## Installation Steps ### Pre-installation preparation * Install Go development environment (Go 1.14 and above, GCC 4.8.5 and above) -- If you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](/reference/connector/#install-client-driver) for specific steps +* If you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](/reference/connector/#install-client-driver) for specific steps Configure the environment variables and check the command. * ```go env``` * ```gcc -v``` -### Use go get to install - -`go get -u github.com/taosdata/driver-go/v3@latest` - -### Manage with go mod +### Install the connectors 1. Initialize the project with the `go mod` command. - ```text - go mod init taos-demo - ``` + ```text + go mod init taos-demo + ``` 2. Introduce taosSql - ```go - import ( - "database/sql" - _ "github.com/taosdata/driver-go/v3/taosSql" - ) - ``` + ```go + import ( + "database/sql" + _ "github.com/taosdata/driver-go/v3/taosSql" + ) + ``` 3. Update the dependency packages with `go mod tidy`. - ```text - go mod tidy - ``` + ```text + go mod tidy + ``` 4. Run the program with `go run taos-demo` or compile the binary with the `go build` command. @@ -98,8 +113,6 @@ Configure the environment variables and check the command. ## Establishing a connection -### Data source name (DSN) - Data source names have a standard format, e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php), but no type prefix (square brackets indicate optionally): ``` text @@ -111,9 +124,7 @@ DSN in full form. ```text username:password@protocol(address)/dbname?param=value ``` -### Connecting via connector - - + _taosSql_ implements Go's `database/sql/driver` interface via cgo. You can use the [`database/sql`](https://golang.org/pkg/database/sql/) interface by simply introducing the driver. @@ -209,42 +220,165 @@ func main() { +### Specify the URL and Properties to get the connection + +The Go connector does not support this feature + +### Priority of configuration parameters + +The Go connector does not support this feature + ## Usage examples -### Write data +### Create database and tables -#### SQL Write +```go +var taosDSN = "root:taosdata@tcp(localhost:6030)/" +taos, err := sql.Open("taosSql", taosDSN) +if err != nil { + log.Fatalln("failed to connect TDengine, err:", err) +} +defer taos.Close() +_, err := taos.Exec("CREATE DATABASE power") +if err != nil { + log.Fatalln("failed to create database, err:", err) +} +_, err = taos.Exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)") +if err != nil { + log.Fatalln("failed to create stable, err:", err) +} +``` + +### Insert data -#### InfluxDB line protocol write - - - -#### OpenTSDB Telnet line protocol write - - - -#### OpenTSDB JSON line protocol write - - - -### Query data +### Querying data -### More sample programs +### execute SQL with reqId -* [sample program](https://github.com/taosdata/driver-go/tree/3.0/examples) +This reqId can be used to request link tracing. +```go +db, err := sql.Open("taosSql", "root:taosdata@tcp(localhost:6030)/") +if err != nil { + panic(err) +} +defer db.Close() +ctx := context.WithValue(context.Background(), common.ReqIDKey, common.GetReqID()) +_, err = db.ExecContext(ctx, "create database if not exists example_taos_sql") +if err != nil { + panic(err) +} +``` -## Usage limitations +### Writing data via parameter binding -Since the REST interface is stateless, the `use db` syntax will not work. You need to put the db name into the SQL command, e.g. `create table if not exists tb1 (ts timestamp, a int)` to `create table if not exists test.tb1 (ts timestamp, a int)` otherwise it will report the error `[0x217] Database not specified or available`. + + -You can also put the db name in the DSN by changing `root:taosdata@http(localhost:6041)/` to `root:taosdata@http(localhost:6041)/test`. Executing the `create database` statement when the specified db does not exist will not report an error while executing other queries or writing against that db will report an error. +```go +package main -The complete example is as follows. +import ( + "time" + + "github.com/taosdata/driver-go/v3/af" + "github.com/taosdata/driver-go/v3/common" + "github.com/taosdata/driver-go/v3/common/param" +) + +func main() { + db, err := af.Open("", "root", "taosdata", "", 0) + if err != nil { + panic(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists example_stmt") + if err != nil { + panic(err) + } + _, err = db.Exec("create table if not exists example_stmt.tb1(ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")") + if err != nil { + panic(err) + } + stmt := db.InsertStmt() + err = stmt.Prepare("insert into example_stmt.tb1 values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + if err != nil { + panic(err) + } + now := time.Now() + params := make([]*param.Param, 14) + params[0] = param.NewParam(2). + AddTimestamp(now, common.PrecisionMilliSecond). + AddTimestamp(now.Add(time.Second), common.PrecisionMilliSecond) + params[1] = param.NewParam(2).AddBool(true).AddNull() + params[2] = param.NewParam(2).AddTinyint(2).AddNull() + params[3] = param.NewParam(2).AddSmallint(3).AddNull() + params[4] = param.NewParam(2).AddInt(4).AddNull() + params[5] = param.NewParam(2).AddBigint(5).AddNull() + params[6] = param.NewParam(2).AddUTinyint(6).AddNull() + params[7] = param.NewParam(2).AddUSmallint(7).AddNull() + params[8] = param.NewParam(2).AddUInt(8).AddNull() + params[9] = param.NewParam(2).AddUBigint(9).AddNull() + params[10] = param.NewParam(2).AddFloat(10).AddNull() + params[11] = param.NewParam(2).AddDouble(11).AddNull() + params[12] = param.NewParam(2).AddBinary([]byte("binary")).AddNull() + params[13] = param.NewParam(2).AddNchar("nchar").AddNull() + + paramTypes := param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(6). + AddNchar(5) + err = stmt.BindParam(params, paramTypes) + if err != nil { + panic(err) + } + err = stmt.AddBatch() + if err != nil { + panic(err) + } + err = stmt.Execute() + if err != nil { + panic(err) + } + err = stmt.Close() + if err != nil { + panic(err) + } + // select * from example_stmt.tb1 +} +``` + + + ```go package main @@ -254,295 +388,734 @@ import ( "fmt" "time" + "github.com/taosdata/driver-go/v3/common" + "github.com/taosdata/driver-go/v3/common/param" _ "github.com/taosdata/driver-go/v3/taosRestful" + "github.com/taosdata/driver-go/v3/ws/stmt" ) func main() { - var taosDSN = "root:taosdata@http(localhost:6041)/test" - taos, err := sql.Open("taosRestful", taosDSN) + db, err := sql.Open("taosRestful", "root:taosdata@http(localhost:6041)/") if err != nil { - fmt.Println("failed to connect TDengine, err:", err) - return - } - defer taos.Close() - taos.Exec("create database if not exists test") - taos.Exec("create table if not exists tb1 (ts timestamp, a int)") - _, err = taos.Exec("insert into tb1 values(now, 0)(now+1s,1)(now+2s,2)(now+3s,3)") - if err != nil { - fmt.Println("failed to insert, err:", err) - return - } - rows, err := taos.Query("select * from tb1") - if err != nil { - fmt.Println("failed to select from table, err:", err) - return + panic(err) } + defer db.Close() + prepareEnv(db) - defer rows.Close() - for rows.Next() { - var r struct { - ts time.Time - a int - } - err := rows.Scan(&r.ts, &r.a) + config := stmt.NewConfig("ws://127.0.0.1:6041/rest/stmt", 0) + config.SetConnectUser("root") + config.SetConnectPass("taosdata") + config.SetConnectDB("example_ws_stmt") + config.SetMessageTimeout(common.DefaultMessageTimeout) + config.SetWriteWait(common.DefaultWriteWait) + config.SetErrorHandler(func(connector *stmt.Connector, err error) { + panic(err) + }) + config.SetCloseHandler(func() { + fmt.Println("stmt connector closed") + }) + + connector, err := stmt.NewConnector(config) + if err != nil { + panic(err) + } + now := time.Now() + { + stmt, err := connector.Init() if err != nil { - fmt.Println("scan error:\n", err) - return + panic(err) } - fmt.Println(r.ts, r.a) + err = stmt.Prepare("insert into ? using all_json tags(?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + if err != nil { + panic(err) + } + err = stmt.SetTableName("tb1") + if err != nil { + panic(err) + } + err = stmt.SetTags(param.NewParam(1).AddJson([]byte(`{"tb":1}`)), param.NewColumnType(1).AddJson(0)) + if err != nil { + panic(err) + } + params := []*param.Param{ + param.NewParam(3).AddTimestamp(now, 0).AddTimestamp(now.Add(time.Second), 0).AddTimestamp(now.Add(time.Second*2), 0), + param.NewParam(3).AddBool(true).AddNull().AddBool(true), + param.NewParam(3).AddTinyint(1).AddNull().AddTinyint(1), + param.NewParam(3).AddSmallint(1).AddNull().AddSmallint(1), + param.NewParam(3).AddInt(1).AddNull().AddInt(1), + param.NewParam(3).AddBigint(1).AddNull().AddBigint(1), + param.NewParam(3).AddUTinyint(1).AddNull().AddUTinyint(1), + param.NewParam(3).AddUSmallint(1).AddNull().AddUSmallint(1), + param.NewParam(3).AddUInt(1).AddNull().AddUInt(1), + param.NewParam(3).AddUBigint(1).AddNull().AddUBigint(1), + param.NewParam(3).AddFloat(1).AddNull().AddFloat(1), + param.NewParam(3).AddDouble(1).AddNull().AddDouble(1), + param.NewParam(3).AddBinary([]byte("test_binary")).AddNull().AddBinary([]byte("test_binary")), + param.NewParam(3).AddNchar("test_nchar").AddNull().AddNchar("test_nchar"), + } + paramTypes := param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(0). + AddNchar(0) + err = stmt.BindParam(params, paramTypes) + if err != nil { + panic(err) + } + err = stmt.AddBatch() + if err != nil { + panic(err) + } + err = stmt.Exec() + if err != nil { + panic(err) + } + affected := stmt.GetAffectedRows() + fmt.Println("all_json affected rows:", affected) + err = stmt.Close() + if err != nil { + panic(err) + } + } + { + stmt, err := connector.Init() + if err != nil { + panic(err) + } + err = stmt.Prepare("insert into ? using all_all tags(?,?,?,?,?,?,?,?,?,?,?,?,?,?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + err = stmt.SetTableName("tb1") + if err != nil { + panic(err) + } + + err = stmt.SetTableName("tb2") + if err != nil { + panic(err) + } + err = stmt.SetTags( + param.NewParam(14). + AddTimestamp(now, 0). + AddBool(true). + AddTinyint(2). + AddSmallint(2). + AddInt(2). + AddBigint(2). + AddUTinyint(2). + AddUSmallint(2). + AddUInt(2). + AddUBigint(2). + AddFloat(2). + AddDouble(2). + AddBinary([]byte("tb2")). + AddNchar("tb2"), + param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(0). + AddNchar(0), + ) + if err != nil { + panic(err) + } + params := []*param.Param{ + param.NewParam(3).AddTimestamp(now, 0).AddTimestamp(now.Add(time.Second), 0).AddTimestamp(now.Add(time.Second*2), 0), + param.NewParam(3).AddBool(true).AddNull().AddBool(true), + param.NewParam(3).AddTinyint(1).AddNull().AddTinyint(1), + param.NewParam(3).AddSmallint(1).AddNull().AddSmallint(1), + param.NewParam(3).AddInt(1).AddNull().AddInt(1), + param.NewParam(3).AddBigint(1).AddNull().AddBigint(1), + param.NewParam(3).AddUTinyint(1).AddNull().AddUTinyint(1), + param.NewParam(3).AddUSmallint(1).AddNull().AddUSmallint(1), + param.NewParam(3).AddUInt(1).AddNull().AddUInt(1), + param.NewParam(3).AddUBigint(1).AddNull().AddUBigint(1), + param.NewParam(3).AddFloat(1).AddNull().AddFloat(1), + param.NewParam(3).AddDouble(1).AddNull().AddDouble(1), + param.NewParam(3).AddBinary([]byte("test_binary")).AddNull().AddBinary([]byte("test_binary")), + param.NewParam(3).AddNchar("test_nchar").AddNull().AddNchar("test_nchar"), + } + paramTypes := param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(0). + AddNchar(0) + err = stmt.BindParam(params, paramTypes) + if err != nil { + panic(err) + } + err = stmt.AddBatch() + if err != nil { + panic(err) + } + err = stmt.Exec() + if err != nil { + panic(err) + } + affected := stmt.GetAffectedRows() + fmt.Println("all_all affected rows:", affected) + err = stmt.Close() + if err != nil { + panic(err) + } + + } +} + +func prepareEnv(db *sql.DB) { + steps := []string{ + "create database example_ws_stmt", + "create table example_ws_stmt.all_json(ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")" + + "tags(t json)", + "create table example_ws_stmt.all_all(" + + "ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")" + + "tags(" + + "tts timestamp," + + "tc1 bool," + + "tc2 tinyint," + + "tc3 smallint," + + "tc4 int," + + "tc5 bigint," + + "tc6 tinyint unsigned," + + "tc7 smallint unsigned," + + "tc8 int unsigned," + + "tc9 bigint unsigned," + + "tc10 float," + + "tc11 double," + + "tc12 binary(20)," + + "tc13 nchar(20))", + } + for _, step := range steps { + _, err := db.Exec(step) + if err != nil { + panic(err) + } + } +} + +``` + + + + + +### Schemaless Writing + + + + +```go +import ( + "fmt" + + "github.com/taosdata/driver-go/v3/af" +) + +func main() { + conn, err := af.Open("localhost", "root", "taosdata", "", 6030) + if err != nil { + fmt.Println("fail to connect, err:", err) + } + defer conn.Close() + _, err = conn.Exec("create database if not exists example") + if err != nil { + panic(err) + } + _, err = conn.Exec("use example") + if err != nil { + panic(err) + } + influxdbData := "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000" + err = conn.InfluxDBInsertLines([]string{influxdbData}, "ns") + if err != nil { + panic(err) + } + telnetData := "stb0_0 1626006833 4 host=host0 interface=eth0" + err = conn.OpenTSDBInsertTelnetLines([]string{telnetData}) + if err != nil { + panic(err) + } + jsonData := "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}" + err = conn.OpenTSDBInsertJsonPayload(jsonData) + if err != nil { + panic(err) + } +} +``` + + + + +```go +import ( + "database/sql" + "log" + "time" + + "github.com/taosdata/driver-go/v3/common" + _ "github.com/taosdata/driver-go/v3/taosWS" + "github.com/taosdata/driver-go/v3/ws/schemaless" +) + +func main() { + db, err := sql.Open("taosWS", "root:taosdata@ws(localhost:6041)/") + if err != nil { + log.Fatal(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists schemaless_ws") + if err != nil { + log.Fatal(err) + } + s, err := schemaless.NewSchemaless(schemaless.NewConfig("ws://localhost:6041/rest/schemaless", 1, + schemaless.SetDb("schemaless_ws"), + schemaless.SetReadTimeout(10*time.Second), + schemaless.SetWriteTimeout(10*time.Second), + schemaless.SetUser("root"), + schemaless.SetPassword("taosdata"), + schemaless.SetErrorHandler(func(err error) { + log.Fatal(err) + }), + )) + if err != nil { + panic(err) + } + influxdbData := "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000" + telnetData := "stb0_0 1626006833 4 host=host0 interface=eth0" + jsonData := "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}" + + err = s.Insert(influxdbData, schemaless.InfluxDBLineProtocol, "ns", 0, common.GetReqID()) + if err != nil { + panic(err) + } + err = s.Insert(telnetData, schemaless.OpenTSDBTelnetLineProtocol, "ms", 0, common.GetReqID()) + if err != nil { + panic(err) + } + err = s.Insert(jsonData, schemaless.OpenTSDBJsonFormatProtocol, "ms", 0, common.GetReqID()) + if err != nil { + panic(err) } } ``` + + + + +### Schemaless with reqId + +```go +func (s *Schemaless) Insert(lines string, protocol int, precision string, ttl int, reqID int64) error +``` + +You can get the unique id by `common.GetReqID()`. + +### Data Subscription + +The TDengine Go Connector supports subscription functionality with the following application API. + +#### Create a Topic + +```go + db, err := af.Open("", "root", "taosdata", "", 0) + if err != nil { + panic(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists example_tmq WAL_RETENTION_PERIOD 86400") + if err != nil { + panic(err) + } + _, err = db.Exec("create topic if not exists example_tmq_topic as DATABASE example_tmq") + if err != nil { + panic(err) + } +``` + +#### Create a Consumer + +```go + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "group.id": "test", + "auto.offset.reset": "earliest", + "td.connect.ip": "127.0.0.1", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "td.connect.port": "6030", + "client.id": "test_tmq_client", + "enable.auto.commit": "false", + "msg.with.table.name": "true", + }) + if err != nil { + panic(err) + } +``` + +#### Subscribe to consume data + +```go + err = consumer.Subscribe("example_tmq_topic", nil) + if err != nil { + panic(err) + } + for i := 0; i < 5; i++ { + ev := consumer.Poll(500) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Printf("get message:%v\n", e) + case tmqcommon.Error: + fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() + } + } +``` + +#### Assignment subscription Offset + +```go + partitions, err := consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + panic(err) + } + } +``` + +#### Close subscriptions + +```go + err = consumer.Close() + if err != nil { + panic(err) + } +``` + +#### Full Sample Code + + + + +```go +package main + +import ( + "fmt" + "os" + + "github.com/taosdata/driver-go/v3/af" + "github.com/taosdata/driver-go/v3/af/tmq" + tmqcommon "github.com/taosdata/driver-go/v3/common/tmq" +) + +func main() { + db, err := af.Open("", "root", "taosdata", "", 0) + if err != nil { + panic(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists example_tmq WAL_RETENTION_PERIOD 86400") + if err != nil { + panic(err) + } + _, err = db.Exec("create topic if not exists example_tmq_topic as DATABASE example_tmq") + if err != nil { + panic(err) + } + if err != nil { + panic(err) + } + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "group.id": "test", + "auto.offset.reset": "earliest", + "td.connect.ip": "127.0.0.1", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "td.connect.port": "6030", + "client.id": "test_tmq_client", + "enable.auto.commit": "false", + "msg.with.table.name": "true", + }) + if err != nil { + panic(err) + } + err = consumer.Subscribe("example_tmq_topic", nil) + if err != nil { + panic(err) + } + _, err = db.Exec("create table example_tmq.t1 (ts timestamp,v int)") + if err != nil { + panic(err) + } + _, err = db.Exec("insert into example_tmq.t1 values(now,1)") + if err != nil { + panic(err) + } + for i := 0; i < 5; i++ { + ev := consumer.Poll(500) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Printf("get message:%v\n", e) + case tmqcommon.Error: + fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() + } + } + partitions, err := consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + panic(err) + } + } + + partitions, err = consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + } + + err = consumer.Close() + if err != nil { + panic(err) + } +} +``` + + + + +```go +package main + +import ( + "database/sql" + "fmt" + + "github.com/taosdata/driver-go/v3/common" + tmqcommon "github.com/taosdata/driver-go/v3/common/tmq" + _ "github.com/taosdata/driver-go/v3/taosRestful" + "github.com/taosdata/driver-go/v3/ws/tmq" +) + +func main() { + db, err := sql.Open("taosRestful", "root:taosdata@http(localhost:6041)/") + if err != nil { + panic(err) + } + defer db.Close() + prepareEnv(db) + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "ws.url": "ws://127.0.0.1:6041/rest/tmq", + "ws.message.channelLen": uint(0), + "ws.message.timeout": common.DefaultMessageTimeout, + "ws.message.writeWait": common.DefaultWriteWait, + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "group.id": "example", + "client.id": "example_consumer", + "auto.offset.reset": "earliest", + }) + if err != nil { + panic(err) + } + err = consumer.Subscribe("example_ws_tmq_topic", nil) + if err != nil { + panic(err) + } + go func() { + _, err := db.Exec("create table example_ws_tmq.t_all(ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")") + if err != nil { + panic(err) + } + _, err = db.Exec("insert into example_ws_tmq.t_all values(now,true,2,3,4,5,6,7,8,9,10.123,11.123,'binary','nchar')") + if err != nil { + panic(err) + } + }() + for i := 0; i < 5; i++ { + ev := consumer.Poll(500) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Printf("get message:%v\n", e) + case tmqcommon.Error: + fmt.Printf("%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() + } + } + partitions, err := consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + panic(err) + } + } + + partitions, err = consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + } + + err = consumer.Close() + if err != nil { + panic(err) + } +} + +func prepareEnv(db *sql.DB) { + _, err := db.Exec("create database example_ws_tmq WAL_RETENTION_PERIOD 86400") + if err != nil { + panic(err) + } + _, err = db.Exec("create topic example_ws_tmq_topic as database example_ws_tmq") + if err != nil { + panic(err) + } +} +``` + + + + +### More sample programs + +* [sample program](https://github.com/taosdata/driver-go/tree/3.0/examples) + + ## Frequently Asked Questions 1. bind interface in database/sql crashes - REST does not support parameter binding related interface. It is recommended to use `db.Exec` and `db.Query`. + REST does not support parameter binding related interface. It is recommended to use `db.Exec` and `db.Query`. 2. error `[0x217] Database not specified or available` after executing other statements with `use db` statement - The execution of SQL command in the REST interface is not contextual, so using `use db` statement will not work, see the usage restrictions section above. + The execution of SQL command in the REST interface is not contextual, so using `use db` statement will not work, see the usage restrictions section above. 3. use `taosSql` without error but use `taosRestful` with error `[0x217] Database not specified or available` - Because the REST interface is stateless, using the `use db` statement will not take effect. See the usage restrictions section above. + Because the REST interface is stateless, using the `use db` statement will not take effect. See the usage restrictions section above. 4. `readBufferSize` parameter has no significant effect after being increased - Increasing `readBufferSize` will reduce the number of `syscall` calls when fetching results. If the query result is smaller, modifying this parameter will not improve performance significantly. If you increase the parameter value too much, the bottleneck will be parsing JSON data. If you need to optimize the query speed, you must adjust the value based on the actual situation to achieve the best query performance. + Increasing `readBufferSize` will reduce the number of `syscall` calls when fetching results. If the query result is smaller, modifying this parameter will not improve performance significantly. If you increase the parameter value too much, the bottleneck will be parsing JSON data. If you need to optimize the query speed, you must adjust the value based on the actual situation to achieve the best query performance. 5. `disableCompression` parameter is set to `false` when the query efficiency is reduced - When set `disableCompression` parameter to `false`, the query result will be compressed by `gzip` and then transmitted, so you have to decompress the data by `gzip` after getting it. + When set `disableCompression` parameter to `false`, the query result will be compressed by `gzip` and then transmitted, so you have to decompress the data by `gzip` after getting it. 6. `go get` command can't get the package, or timeout to get the package - Set Go proxy `go env -w GOPROXY=https://goproxy.cn,direct`. - -## Common APIs - -### database/sql API - -* `sql.Open(DRIVER_NAME string, dataSourceName string) *DB` - - Use This API to open a DB, returning an object of type \*DB. - -:::info -This API is created successfully without checking permissions, but only when you execute a Query or Exec, and check if user/password/host/port is legal. -::: - -* `func (db *DB) Exec(query string, args ...interface{}) (Result, error)` - - `sql.Open` built-in method to execute non-query related SQL. - -* `func (db *DB) Query(query string, args ...interface{}) (*Rows, error)` - - `sql.Open` Built-in method to execute query statements. - -### Advanced functions (af) API - -The `af` package encapsulates TDengine advanced functions such as connection management, subscriptions, schemaless, parameter binding, etc. - -#### Connection management - -* `af.Open(host, user, pass, db string, port int) (*Connector, error)` - - This API creates a connection to taosd via cgo. - -* `func (conn *Connector) Close() error` - - Closes the connection. - -#### Subscribe - -* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)` - -Creates consumer group. - -* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error` -Note: `rebalanceCb` is reserved for compatibility purpose - -Subscribes a topic. - -* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error` -Note: `rebalanceCb` is reserved for compatibility purpose - -Subscribes to topics. - -* `func (c *Consumer) Poll(timeoutMs int) tmq.Event` - -Polling information. - -* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)` -Note: `tmq.TopicPartition` is reserved for compatibility purpose - -Commit information. - -* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)` - -Get Assignment(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required). - -* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error` -Note: `ignoredTimeoutMs` is reserved for compatibility purpose - -Seek offset(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required). - -* `func (c *Consumer) Unsubscribe() error` - -Unsubscribe. - -* `func (c *Consumer) Close() error` - -Close consumer. - -#### schemaless - -* `func (conn *Connector) InfluxDBInsertLines(lines []string, precision string) error` - - Write to InfluxDB line protocol. - -* `func (conn *Connector) OpenTSDBInsertTelnetLines(lines []string) error` - - Write OpenTDSB telnet protocol data. - -* `func (conn *Connector) OpenTSDBInsertJsonPayload(payload string) error` - - Writes OpenTSDB JSON protocol data. - -#### parameter binding - -* `func (conn *Connector) StmtExecute(sql string, params *param.Param) (res driver.Result, err error)` - - Parameter bound single row insert. - -* `func (conn *Connector) InsertStmt() *insertstmt.InsertStmt` - - Initialize the parameters. - -* `func (stmt *InsertStmt) Prepare(sql string) error` - - Parameter binding preprocessing SQL statement. - -* `func (stmt *InsertStmt) SetTableName(name string) error` - - Bind the table name parameter. - -* `func (stmt *InsertStmt) SetSubTableName(name string) error` - - Parameter binding to set the sub table name. - -* `func (stmt *InsertStmt) BindParam(params []*param.Param, bindType *param.ColumnType) error` - - Parameter bind multiple rows of data. - -* `func (stmt *InsertStmt) AddBatch() error` - - Add to a parameter-bound batch. - -* `func (stmt *InsertStmt) Execute() error` - - Execute a parameter binding. - -* `func (stmt *InsertStmt) GetAffectedRows() int` - - Gets the number of affected rows inserted by the parameter binding. - -* `func (stmt *InsertStmt) Close() error` - - Closes the parameter binding. - -### Subscribe via WebSocket - -* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)` - -Creates consumer group. - -* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error` -Note: `rebalanceCb` is reserved for compatibility purpose - -Subscribes a topic. - -* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error` -Note: `rebalanceCb` is reserved for compatibility purpose - -Subscribes to topics. - -* `func (c *Consumer) Poll(timeoutMs int) tmq.Event` - -Polling information. - -* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)` -Note: `tmq.TopicPartition` is reserved for compatibility purpose - -Commit information. - -* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)` - -Get Assignment(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required). - -* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error` -Note: `ignoredTimeoutMs` is reserved for compatibility purpose - -Seek offset(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required). - -* `func (c *Consumer) Unsubscribe() error` - -Unsubscribe. - -* `func (c *Consumer) Close() error` - -Close consumer. - -For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/main/examples/tmqoverws/main.go) - -### parameter binding via WebSocket - -* `func NewConnector(config *Config) (*Connector, error)` - - Create a connection. - -* `func (c *Connector) Init() (*Stmt, error)` - - Initialize the parameters. - -* `func (c *Connector) Close() error` - - Close the connection. - -* `func (s *Stmt) Prepare(sql string) error` - - Parameter binding preprocessing SQL statement. - -* `func (s *Stmt) SetTableName(name string) error` - - Bind the table name parameter. - -* `func (s *Stmt) SetTags(tags *param.Param, bindType *param.ColumnType) error` - - Set tags. - -* `func (s *Stmt) BindParam(params []*param.Param, bindType *param.ColumnType) error` - - Parameter bind multiple rows of data. - -* `func (s *Stmt) AddBatch() error` - - Add to a parameter-bound batch. - -* `func (s *Stmt) Exec() error` - - Execute a parameter binding. - -* `func (s *Stmt) GetAffectedRows() int` - - Gets the number of affected rows inserted by the parameter binding. - -* `func (s *Stmt) Close() error` - - Closes the parameter binding. - -For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/main/examples/stmtoverws/main.go) + Set Go proxy `go env -w GOPROXY=https://goproxy.cn,direct`. ## API Reference diff --git a/docs/en/14-reference/03-connector/06-rust.mdx b/docs/en/14-reference/03-connector/06-rust.mdx index 344bd3590e..986b5cd104 100644 --- a/docs/en/14-reference/03-connector/06-rust.mdx +++ b/docs/en/14-reference/03-connector/06-rust.mdx @@ -31,21 +31,57 @@ Websocket connections are supported on all platforms that can run Go. | connector-rust version | TDengine version | major features | | :----------------: | :--------------: | :--------------------------------------------------: | -| v0.8.10 | 3.0.5.0 or later | TMQ: Get consuming progress and seek offset to consume. | +| v0.8.12 | 3.0.5.0 or later | TMQ: Get consuming progress and seek offset to consume. | | v0.8.0 | 3.0.4.0 | Support schemaless insert. | | v0.7.6 | 3.0.3.0 | Support req_id in query. | | v0.6.0 | 3.0.0.0 | Base features. | The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues. -## Installation +## Handling exceptions + +After the error is reported, the specific information of the error can be obtained: + +```rust +match conn.exec(sql) { + Ok(_) => { + Ok(()) + } + Err(e) => { + eprintln!("ERROR: {:?}", e); + Err(e) + } +} +``` + +## TDengine DataType vs. Rust DataType + +TDengine currently supports timestamp, number, character, Boolean type, and the corresponding type conversion with Rust is as follows: + +| TDengine DataType | Rust DataType | +| ----------------- | ----------------- | +| TIMESTAMP | Timestamp | +| INT | i32 | +| BIGINT | i64 | +| FLOAT | f32 | +| DOUBLE | f64 | +| SMALLINT | i16 | +| TINYINT | i8 | +| BOOL | bool | +| BINARY | Vec | +| NCHAR | String | +| JSON | serde_json::Value | + +Note: Only TAG supports JSON types + +## Installation Steps ### Pre-installation preparation * Install the Rust development toolchain * If using the native connection, please install the TDengine client driver. Please refer to [install client driver](/reference/connector#install-client-driver) -### Add taos dependency +### Install the connectors Depending on the connection method, add the [taos][taos] dependency in your Rust project as follows: @@ -146,7 +182,8 @@ let builder = TaosBuilder::from_dsn("taos://localhost:6030")?; let conn1 = builder.build(); // use websocket protocol. -let conn2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?; +let builder2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?; +let conn2 = builder2.build(); ``` After the connection is established, you can perform operations on your database. @@ -228,41 +265,191 @@ There are two ways to query data: Using built-in types or the [serde](https://se ## Usage examples -### Write data +### Create database and tables -#### SQL Write +```rust +use taos::*; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let dsn = "taos://localhost:6030"; + let builder = TaosBuilder::from_dsn(dsn)?; + + let taos = builder.build()?; + + let db = "query"; + + // create database + taos.exec_many([ + format!("DROP DATABASE IF EXISTS `{db}`"), + format!("CREATE DATABASE `{db}`"), + format!("USE `{db}`"), + ]) + .await?; + + // create table + taos.exec_many([ + // create super table + "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \ + TAGS (`groupid` INT, `location` BINARY(16))", + // create child table + "CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')", + ]).await?; +} +``` + +> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set. + +### Insert data -#### STMT Write - - - -#### Schemaless Write - - - ### Query data -## API Reference +### execute SQL with req_id -### Connector Constructor - -You create a connector constructor by using a DSN. +This req_id can be used to request link tracing. ```rust -let cfg = TaosBuilder::default().build()?; +let rs = taos.query_with_req_id("select * from stable where tag1 is null", 1)?; ``` -You use the builder object to create multiple connections. +### Writing data via parameter binding + +TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases. + +Parameter binding details see [API Reference](#stmt-api) + + + +### Schemaless Writing + +TDengine supports schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. For more information, see [Schemaless Writing](../../schemaless). + + + +### Schemaless with req_id + +This req_id can be used to request link tracing. ```rust -let conn: Taos = cfg.build(); +let sml_data = SmlDataBuilder::default() + .protocol(SchemalessProtocol::Line) + .data(data) + .req_id(100u64) + .build()?; + +client.put(&sml_data)? ``` -### Connection pooling +### Data Subscription + +TDengine starts subscriptions through [TMQ](../../../taos-sql/tmq/). + +#### Create a Topic + +```rust +taos.exec_many([ + // create topic for subscription + format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}") +]) +.await?; +``` + +#### Create a Consumer + +You create a TMQ connector by using a DSN. + +```rust +let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?; +``` + +Create a consumer: + +```rust +let mut consumer = tmq.build()?; +``` + +#### Subscribe to consume data + +A single consumer can subscribe to one or more topics. + +```rust +consumer.subscribe(["tmq_meters"]).await?; +``` + +The TMQ is of [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) type. You can use the corresponding API to consume each message in the queue and then use `.commit` to mark them as consumed. + +```rust +{ + let mut stream = consumer.stream(); + + while let Some((offset, message)) = stream.try_next().await? { + // get information from offset + + // the topic + let topic = offset.topic(); + // the vgroup id, like partition id in kafka. + let vgroup_id = offset.vgroup_id(); + println!("* in vgroup id {vgroup_id} of topic {topic}\n"); + + if let Some(data) = message.into_data() { + while let Some(block) = data.fetch_raw_block().await? { + // one block for one table, get table name if needed + let name = block.table_name(); + let records: Vec = block.deserialize().try_collect()?; + println!( + "** table: {}, got {} records: {:#?}\n", + name.unwrap(), + records.len(), + records + ); + } + } + consumer.commit(offset).await?; + } +} +``` + +Get assignments: + +Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0 + +```rust +let assignments = consumer.assignments().await.unwrap(); +``` + +#### Assignment subscription Offset + +Seek offset: + +Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0 + +```rust +consumer.offset_seek(topic, vgroup_id, offset).await; +``` + +#### Close subscriptions + +```rust +consumer.unsubscribe().await; +``` + +The following parameters can be configured for the TMQ DSN. Only `group.id` is mandatory. + +- `group.id`: Within a consumer group, load balancing is implemented by consuming messages on an at-least-once basis. +- `client.id`: Subscriber client ID. +- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default is earliest. Note: This parameter is set per consumer group. +- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential. +- `auto.commit.interval.ms`: Interval for automatic commits. + +#### Full Sample Code + +For more information, see [GitHub sample file](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs). + +### Use with connection pool In complex applications, we recommend enabling connection pools. [taos] implements connection pools based on [r2d2]. @@ -292,7 +479,17 @@ In the application code, use `pool.get()? ` to get a connection object [Taos]. let taos = pool.get()?; ``` -### Connectors +### More sample programs + +The source code of the sample application is under `TDengine/examples/rust` : + +[rust example](https://github.com/taosdata/TDengine/tree/3.0/examples/rust) + +## Frequently Asked Questions + +For additional troubleshooting, see [FAQ](../../../train-faq/faq). + +## API Reference The [Taos][struct.Taos] object provides an API to perform operations on multiple databases. @@ -378,9 +575,13 @@ Note that Rust asynchronous functions and an asynchronous runtime are required. - `.create_database(database: &str)`: Executes the `CREATE DATABASE` statement. - `.use_database(database: &str)`: Executes the `USE` statement. -In addition, this structure is also the entry point for [Parameter Binding](#Parameter Binding Interface) and [Line Protocol Interface](#Line Protocol Interface). Please refer to the specific API descriptions for usage. +In addition, this structure is also the entry point for Parameter Binding and Line Protocol Interface. Please refer to the specific API descriptions for usage. -### Bind Interface +

+ +Bind Interface + +

Similar to the C interface, Rust provides the bind interface's wrapping. First, the [Taos][struct.taos] object creates a parameter binding object [Stmt] for an SQL statement. @@ -391,7 +592,7 @@ stmt.prepare("INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)")?; The bind object provides a set of interfaces for implementing parameter binding. -#### `.set_tbname(name)` +`.set_tbname(name)` To bind table names. @@ -400,7 +601,7 @@ let mut stmt = taos.stmt("insert into ? values(? ,?)")?; stmt.set_tbname("d0")?; ``` -#### `.set_tags(&[tag])` +`.set_tags(&[tag])` Bind sub-table table names and tag values when the SQL statement uses a super table. @@ -410,7 +611,7 @@ stmt.set_tbname("d0")?; stmt.set_tags(&[Value::VarChar("taos".to_string())])?; ``` -#### `.bind(&[column])` +`.bind(&[column])` Bind value types. Use the [ColumnView] structure to create and bind the required types. @@ -434,7 +635,7 @@ let params = vec![ let rows = stmt.bind(¶ms)?.add_batch()?.execute()?; ``` -#### `.execute()` +`.execute()` Execute SQL. [Stmt] objects can be reused, re-binded, and executed after execution. Before execution, ensure that all data has been added to the queue with `.add_batch`. @@ -449,92 +650,6 @@ stmt.execute()?; For a working example, see [GitHub](https://github.com/taosdata/taos-connector-rust/blob/main/examples/bind.rs). -### Subscriptions - -TDengine starts subscriptions through [TMQ](../../../taos-sql/tmq/). - -You create a TMQ connector by using a DSN. - -```rust -let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?; -``` - -Create a consumer: - -```rust -let mut consumer = tmq.build()?; -``` - -A single consumer can subscribe to one or more topics. - -```rust -consumer.subscribe(["tmq_meters"]).await?; -``` - -The TMQ is of [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) type. You can use the corresponding API to consume each message in the queue and then use `.commit` to mark them as consumed. - -```rust -{ - let mut stream = consumer.stream(); - - while let Some((offset, message)) = stream.try_next().await? { - // get information from offset - - // the topic - let topic = offset.topic(); - // the vgroup id, like partition id in kafka. - let vgroup_id = offset.vgroup_id(); - println!("* in vgroup id {vgroup_id} of topic {topic}\n"); - - if let Some(data) = message.into_data() { - while let Some(block) = data.fetch_raw_block().await? { - // one block for one table, get table name if needed - let name = block.table_name(); - let records: Vec = block.deserialize().try_collect()?; - println!( - "** table: {}, got {} records: {:#?}\n", - name.unwrap(), - records.len(), - records - ); - } - } - consumer.commit(offset).await?; - } -} -``` - -Get assignments: - -Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0 - -```rust -let assignments = consumer.assignments().await.unwrap(); -``` - -Seek offset: - -Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0 - -```rust -consumer.offset_seek(topic, vgroup_id, offset).await; -``` - -Unsubscribe: - -```rust -consumer.unsubscribe().await; -``` - -The following parameters can be configured for the TMQ DSN. Only `group.id` is mandatory. - -- `group.id`: Within a consumer group, load balancing is implemented by consuming messages on an at-least-once basis. -- `client.id`: Subscriber client ID. -- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default is earliest. Note: This parameter is set per consumer group. -- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential. -- `auto.commit.interval.ms`: Interval for automatic commits. - -For more information, see [GitHub sample file](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs). For information about other structure APIs, see the [Rust documentation](https://docs.rs/taos). diff --git a/docs/en/14-reference/03-connector/07-python.mdx b/docs/en/14-reference/03-connector/07-python.mdx index 461bdfbf16..43713117f9 100644 --- a/docs/en/14-reference/03-connector/07-python.mdx +++ b/docs/en/14-reference/03-connector/07-python.mdx @@ -24,6 +24,16 @@ The source code for the Python connector is hosted on [GitHub](https://github.co We recommend using the latest version of `taospy`, regardless of the version of TDengine. +|Python Connector Version|major changes| +|:-------------------:|:----:| +|2.7.9|support for getting assignment and seek function on subscription| +|2.7.8|add `execute_many` method| + +|Python Websocket Connector Version|major changes| +|:----------------------------:|:-----:| +|0.2.5|1. support for getting assignment and seek function on subscription
2. support schemaless
3. support STMT| +|0.2.4|support `unsubscribe` on subscription| + ## Handling Exceptions There are 4 types of exception in python connector. diff --git a/docs/zh/07-develop/07-tmq.mdx b/docs/zh/07-develop/07-tmq.mdx index a87a1f64f8..54a8af2287 100644 --- a/docs/zh/07-develop/07-tmq.mdx +++ b/docs/zh/07-develop/07-tmq.mdx @@ -243,6 +243,7 @@ TDengine 使用 SQL åˆ›å»ŗäø€äøŖ topic: ```sql CREATE TOPIC topic_name AS SELECT ts, c1, c2, c3 FROM tmqdb.stb WHERE c1 > 1; ``` +- topicåˆ›å»ŗäøŖę•°ęœ‰äøŠé™ļ¼Œé€ščæ‡å‚ę•° tmqMaxTopicNum ęŽ§åˆ¶ļ¼Œé»˜č®¤ 20 äøŖ TMQ ę”ÆęŒå¤šē§č®¢é˜…ē±»åž‹ļ¼š @@ -265,14 +266,15 @@ CREATE TOPIC topic_name as subquery čÆ­ę³•ļ¼š ```sql -CREATE TOPIC topic_name AS STABLE stb_name +CREATE TOPIC topic_name [with meta] AS STABLE stb_name [where_condition] ``` äøŽ `SELECT * from stbName` č®¢é˜…ēš„åŒŗåˆ«ę˜Æļ¼š - äøä¼šé™åˆ¶ē”Øęˆ·ēš„č”Øē»“ęž„å˜ę›“ć€‚ - čæ”å›žēš„ę˜Æéžē»“ęž„åŒ–ēš„ę•°ę®ļ¼ščæ”å›žę•°ę®ēš„ē»“ęž„ä¼šéšä¹‹č¶…ēŗ§č”Øēš„č”Øē»“ęž„å˜åŒ–č€Œå˜åŒ–ć€‚ -- ē”Øęˆ·åÆ¹äŗŽč¦å¤„ē†ēš„ęÆäø€äøŖę•°ę®å—éƒ½åÆčƒ½ęœ‰äøåŒēš„č”Øē»“ęž„ć€‚ +- with meta å‚ę•°åÆé€‰ļ¼Œé€‰ę‹©ę—¶å°†čæ”å›žåˆ›å»ŗč¶…ēŗ§č”Øļ¼Œå­č”Øē­‰čÆ­å„ļ¼Œäø»č¦ē”ØäŗŽtaosxåšč¶…ēŗ§č”Øčæē§» +- where_condition å‚ę•°åÆé€‰ļ¼Œé€‰ę‹©ę—¶å°†ē”Øę„čæ‡ę»¤ē¬¦åˆę”ä»¶ēš„å­č”Øļ¼Œč®¢é˜…čæ™äŗ›å­č”Øć€‚where ę”ä»¶é‡Œäøčƒ½ęœ‰ę™®é€šåˆ—ļ¼ŒåŖčƒ½ę˜Ætagꈖtbname,whereę”ä»¶é‡ŒåÆä»„ē”Øå‡½ę•°ļ¼Œē”Øę„čæ‡ę»¤tagļ¼Œä½†ę˜Æäøčƒ½ę˜Æčšåˆå‡½ę•°ļ¼Œå› äøŗå­č”Øtagå€¼ę— ę³•åščšåˆć€‚ä¹ŸåÆä»„ę˜Æåøøé‡č”Øč¾¾å¼ļ¼ŒęÆ”å¦‚ 2 > 1ļ¼ˆč®¢é˜…å…ØéƒØå­č”Øļ¼‰ļ¼Œęˆ–č€… falseļ¼ˆč®¢é˜…0个子蔨) - čæ”å›žę•°ę®äøåŒ…å«ę ‡ē­¾ć€‚ ### ę•°ę®åŗ“č®¢é˜… @@ -280,11 +282,13 @@ CREATE TOPIC topic_name AS STABLE stb_name čÆ­ę³•ļ¼š ```sql -CREATE TOPIC topic_name AS DATABASE db_name; +CREATE TOPIC topic_name [with meta] AS DATABASE db_name; ``` é€ščæ‡čÆ„čÆ­å„åÆåˆ›å»ŗäø€äøŖåŒ…å«ę•°ę®åŗ“ę‰€ęœ‰č”Øę•°ę®ēš„č®¢é˜… +- with meta å‚ę•°åÆé€‰ļ¼Œé€‰ę‹©ę—¶å°†čæ”å›žåˆ›å»ŗę•°ę®åŗ“é‡Œę‰€ęœ‰č¶…ēŗ§č”Øļ¼Œå­č”Øēš„čÆ­å„ļ¼Œäø»č¦ē”ØäŗŽtaosxåšę•°ę®åŗ“čæē§» + ## åˆ›å»ŗę¶ˆč“¹č€… *consumer* ę¶ˆč“¹č€…éœ€č¦é€ščæ‡äø€ē³»åˆ—é…ē½®é€‰é”¹åˆ›å»ŗļ¼ŒåŸŗē”€é…ē½®é”¹å¦‚äø‹č”Øę‰€ē¤ŗļ¼š @@ -295,7 +299,7 @@ CREATE TOPIC topic_name AS DATABASE db_name; | `td.connect.user` | string | ē”Øęˆ·å | | | `td.connect.pass` | string | 密码 | | | `td.connect.port` | integer | ęœåŠ”ē«Æēš„ē«Æå£å· | | -| `group.id` | string | ę¶ˆč“¹ē»„ IDļ¼ŒåŒäø€ę¶ˆč“¹ē»„å…±äŗ«ę¶ˆč“¹čæ›åŗ¦ | **必唫锹**ć€‚ęœ€å¤§é•æåŗ¦ļ¼š192怂 | +| `group.id` | string | ę¶ˆč“¹ē»„ IDļ¼ŒåŒäø€ę¶ˆč“¹ē»„å…±äŗ«ę¶ˆč“¹čæ›åŗ¦ |
**必唫锹**ć€‚ęœ€å¤§é•æåŗ¦ļ¼š192怂
ęÆäøŖtopicęœ€å¤šåÆå»ŗē«‹100äøŖ consumer group | | `client.id` | string | 客户端 ID | ęœ€å¤§é•æåŗ¦ļ¼š192怂 | | `auto.offset.reset` | enum | ę¶ˆč“¹ē»„č®¢é˜…ēš„åˆå§‹ä½ē½® |
`earliest`: default;ä»Žå¤“å¼€å§‹č®¢é˜…;
`latest`: ä»…ä»Žęœ€ę–°ę•°ę®å¼€å§‹č®¢é˜…;
`none`: ę²”ęœ‰ęäŗ¤ēš„ offset ę— ę³•č®¢é˜… | | `enable.auto.commit` | boolean | ę˜Æå¦åÆē”Øę¶ˆč“¹ä½ē‚¹č‡ŖåŠØęäŗ¤ļ¼Œtrue: č‡ŖåŠØęäŗ¤ļ¼Œå®¢ęˆ·ē«Æåŗ”ē”Øę— éœ€commitļ¼›falseļ¼šå®¢ęˆ·ē«Æåŗ”ē”Øéœ€č¦č‡Ŗč”Œcommit | é»˜č®¤å€¼äøŗ true | diff --git a/docs/zh/07-develop/09-udf.md b/docs/zh/07-develop/09-udf.md index ae11273a39..ff46437687 100644 --- a/docs/zh/07-develop/09-udf.md +++ b/docs/zh/07-develop/09-udf.md @@ -17,7 +17,7 @@ TDengine ę”ÆęŒé€ščæ‡ C/Python čÆ­čØ€čæ›č”Œ UDF å®šä¹‰ć€‚ęŽ„äø‹ę„ē»“åˆē¤ŗä¾‹ - čšåˆå‡½ę•°éœ€č¦å®žēŽ°čšåˆęŽ„å£å‡½ę•° aggfn_start , aggfn , aggfn_finish怂 - å¦‚ęžœéœ€č¦åˆå§‹åŒ–ļ¼Œå®žēŽ° udf_initļ¼›å¦‚ęžœéœ€č¦ęø…ē†å·„ä½œļ¼Œå®žēŽ°udf_destroy怂 -ęŽ„å£å‡½ę•°ēš„åē§°ę˜Æ UDF åē§°ļ¼Œęˆ–č€…ę˜Æ UDF åē§°å’Œē‰¹å®šåŽē¼€ļ¼ˆ_start, _finish, _init, _destroy)ēš„čæžęŽ„ć€‚åˆ—č”Øäø­ēš„scalarfn,aggfn, udféœ€č¦ę›æę¢ęˆudfå‡½ę•°åć€‚ +ęŽ„å£å‡½ę•°ēš„åē§°ę˜Æ UDF åē§°ļ¼Œęˆ–č€…ę˜Æ UDF åē§°å’Œē‰¹å®šåŽē¼€ļ¼ˆ`_start`, `_finish`, `_init`, `_destroy`)ēš„čæžęŽ„ć€‚åˆ—č”Øäø­ēš„scalarfn,aggfn, udféœ€č¦ę›æę¢ęˆudfå‡½ę•°åć€‚ ### 用 C čÆ­čØ€å®žēŽ°ę ‡é‡å‡½ę•° ę ‡é‡å‡½ę•°å®žēŽ°ęØ”ęæå¦‚äø‹ diff --git a/docs/zh/08-connector/14-java.mdx b/docs/zh/08-connector/14-java.mdx index 27b732b883..c7da2bd4f5 100644 --- a/docs/zh/08-connector/14-java.mdx +++ b/docs/zh/08-connector/14-java.mdx @@ -36,14 +36,15 @@ REST čæžęŽ„ę”ÆęŒę‰€ęœ‰čƒ½čæč”Œ Java ēš„å¹³å°ć€‚ | taos-jdbcdriver ē‰ˆęœ¬ | äø»č¦å˜åŒ– | TDengine ē‰ˆęœ¬ | | :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: | +| 3.2.3 | äæ®å¤ ResultSet åœØäø€äŗ›ęƒ…å†µę•°ę®č§£ęžå¤±č“„ | - | | 3.2.2 | ę–°å¢žåŠŸčƒ½ļ¼šę•°ę®č®¢é˜…ę”ÆęŒ seek åŠŸčƒ½ć€‚ | 3.0.5.0 åŠę›“é«˜ē‰ˆęœ¬ | | 3.2.1 | ę–°å¢žåŠŸčƒ½ļ¼šWebSocket čæžęŽ„ę”ÆęŒ schemaless äøŽ prepareStatement å†™å…„ć€‚å˜ę›“ļ¼šconsumer poll čæ”å›žē»“ęžœé›†äøŗ ConsumerRecordļ¼ŒåÆé€ščæ‡ value() čŽ·å–ęŒ‡å®šē»“ęžœé›†ę•°ę®ć€‚ | 3.0.3.0 åŠę›“é«˜ē‰ˆęœ¬ | | 3.2.0 | å­˜åœØčæžęŽ„é—®é¢˜ļ¼ŒäøęŽØčä½æē”Ø | - | | 3.1.0 | WebSocket čæžęŽ„ę”ÆęŒč®¢é˜…åŠŸčƒ½ | - | | 3.0.1 - 3.0.4 | äæ®å¤äø€äŗ›ęƒ…å†µäø‹ē»“ęžœé›†ę•°ę®č§£ęžé”™čÆÆēš„é—®é¢˜ć€‚3.0.1 在 JDK 11 ēŽÆå¢ƒē¼–čÆ‘ļ¼ŒJDK 8 ēŽÆå¢ƒäø‹å»ŗč®®ä½æē”Øå…¶ä»–ē‰ˆęœ¬ | - | | 3.0.0 | ę”ÆęŒ TDengine 3.0 | 3.0.0.0 åŠę›“é«˜ē‰ˆęœ¬ | -| 2.0.42 | 修在 WebSocket čæžęŽ„äø­ wasNull ęŽ„å£čæ”å›žå€¼ | - | -| 2.0.41 | 修正 REST čæžęŽ„äø­ē”Øęˆ·åå’ŒåÆ†ē č½¬ē ę–¹å¼ | - | +| 2.0.42 | äæ®å¤ WebSocket čæžęŽ„äø­ wasNull ęŽ„å£čæ”å›žå€¼ | - | +| 2.0.41 | äæ®å¤ REST čæžęŽ„äø­ē”Øęˆ·åå’ŒåÆ†ē č½¬ē ę–¹å¼ | - | | 2.0.39 - 2.0.40 | 增加 REST čæžęŽ„/请求 超时设置 | - | | 2.0.38 | JDBC REST čæžęŽ„å¢žåŠ ę‰¹é‡ę‹‰å–åŠŸčƒ½ | - | | 2.0.37 | å¢žåŠ åÆ¹ json tag ę”ÆęŒ | - | @@ -287,9 +288,9 @@ url äø­ēš„é…ē½®å‚ę•°å¦‚äø‹ļ¼š - batchfetch: trueļ¼šåœØę‰§č”ŒęŸ„čÆ¢ę—¶ę‰¹é‡ę‹‰å–ē»“ęžœé›†ļ¼›falseļ¼šé€č”Œę‹‰å–ē»“ęžœé›†ć€‚é»˜č®¤å€¼äøŗļ¼šfalseć€‚é€č”Œę‹‰å–ē»“ęžœé›†ä½æē”Ø HTTP ę–¹å¼čæ›č”Œę•°ę®ä¼ č¾“ć€‚JDBC REST čæžęŽ„ę”ÆęŒę‰¹é‡ę‹‰å–ę•°ę®åŠŸčƒ½ć€‚taos-jdbcdriver äøŽ TDengine ä¹‹é—“é€ščæ‡ WebSocket čæžęŽ„čæ›č”Œę•°ę®ä¼ č¾“ć€‚ē›øč¾ƒäŗŽ HTTP,WebSocket åÆä»„ä½æ JDBC REST čæžęŽ„ę”ÆęŒå¤§ę•°ę®é‡ęŸ„čÆ¢ļ¼Œå¹¶ęå‡ęŸ„čÆ¢ę€§čƒ½ć€‚ - charset: å½“å¼€åÆę‰¹é‡ę‹‰å–ę•°ę®ę—¶ļ¼ŒęŒ‡å®šč§£ęžå­—ē¬¦äø²ę•°ę®ēš„å­—ē¬¦é›†ć€‚ - batchErrorIgnore:trueļ¼šåœØę‰§č”Œ Statement ēš„ executeBatch ę—¶ļ¼Œå¦‚ęžœäø­é—“ęœ‰äø€ę” SQL ę‰§č”Œå¤±č“„ļ¼Œē»§ē»­ę‰§č”Œäø‹é¢ēš„ SQL 了。falseļ¼šäøå†ę‰§č”Œå¤±č“„ SQL åŽēš„ä»»ä½•čÆ­å„ć€‚é»˜č®¤å€¼äøŗļ¼šfalse怂 -- httpConnectTimeout: čæžęŽ„č¶…ę—¶ę—¶é—“ļ¼Œå•ä½ ms, é»˜č®¤å€¼äøŗ 5000怂 -- httpSocketTimeout: socket č¶…ę—¶ę—¶é—“ļ¼Œå•ä½ msļ¼Œé»˜č®¤å€¼äøŗ 5000ć€‚ä»…åœØ batchfetch 设置为 false ę—¶ē”Ÿę•ˆć€‚ -- messageWaitTimeout: ę¶ˆęÆč¶…ę—¶ę—¶é—“, 单位 ms, é»˜č®¤å€¼äøŗ 3000怂 ä»…åœØ batchfetch 设置为 true ę—¶ē”Ÿę•ˆć€‚ +- httpConnectTimeout: čæžęŽ„č¶…ę—¶ę—¶é—“ļ¼Œå•ä½ ms, é»˜č®¤å€¼äøŗ 60000怂 +- httpSocketTimeout: socket č¶…ę—¶ę—¶é—“ļ¼Œå•ä½ msļ¼Œé»˜č®¤å€¼äøŗ 60000ć€‚ä»…åœØ batchfetch 设置为 false ę—¶ē”Ÿę•ˆć€‚ +- messageWaitTimeout: ę¶ˆęÆč¶…ę—¶ę—¶é—“, 单位 ms, é»˜č®¤å€¼äøŗ 60000怂 ä»…åœØ batchfetch 设置为 true ę—¶ē”Ÿę•ˆć€‚ - useSSL: čæžęŽ„äø­ę˜Æå¦ä½æē”Ø SSL怂 - httpPoolSize: REST å¹¶å‘čÆ·ę±‚å¤§å°ļ¼Œé»˜č®¤ 20怂 @@ -355,9 +356,9 @@ properties äø­ēš„é…ē½®å‚ę•°å¦‚äø‹ļ¼š - TSDBDriver.PROPERTY_KEY_CHARSETļ¼šå®¢ęˆ·ē«Æä½æē”Øēš„å­—ē¬¦é›†ļ¼Œé»˜č®¤å€¼äøŗē³»ē»Ÿå­—ē¬¦é›†ć€‚ - TSDBDriver.PROPERTY_KEY_LOCALEļ¼šä»…åœØä½æē”Ø JDBC åŽŸē”ŸčæžęŽ„ę—¶ē”Ÿę•ˆć€‚ å®¢ęˆ·ē«ÆčÆ­čØ€ēŽÆå¢ƒļ¼Œé»˜č®¤å€¼ē³»ē»Ÿå½“å‰ locale怂 - TSDBDriver.PROPERTY_KEY_TIME_ZONEļ¼šä»…åœØä½æē”Ø JDBC åŽŸē”ŸčæžęŽ„ę—¶ē”Ÿę•ˆć€‚ å®¢ęˆ·ē«Æä½æē”Øēš„ę—¶åŒŗļ¼Œé»˜č®¤å€¼äøŗē³»ē»Ÿå½“å‰ę—¶åŒŗć€‚ -- TSDBDriver.HTTP_CONNECT_TIMEOUT: čæžęŽ„č¶…ę—¶ę—¶é—“ļ¼Œå•ä½ ms, é»˜č®¤å€¼äøŗ 5000ć€‚ä»…åœØ REST čæžęŽ„ę—¶ē”Ÿę•ˆć€‚ -- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket č¶…ę—¶ę—¶é—“ļ¼Œå•ä½ msļ¼Œé»˜č®¤å€¼äøŗ 5000ć€‚ä»…åœØ REST čæžęŽ„äø” batchfetch 设置为 false ę—¶ē”Ÿę•ˆć€‚ -- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: ę¶ˆęÆč¶…ę—¶ę—¶é—“, 单位 ms, é»˜č®¤å€¼äøŗ 3000怂 ä»…åœØ REST čæžęŽ„äø” batchfetch 设置为 true ę—¶ē”Ÿę•ˆć€‚ +- TSDBDriver.HTTP_CONNECT_TIMEOUT: čæžęŽ„č¶…ę—¶ę—¶é—“ļ¼Œå•ä½ ms, é»˜č®¤å€¼äøŗ 60000ć€‚ä»…åœØ REST čæžęŽ„ę—¶ē”Ÿę•ˆć€‚ +- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket č¶…ę—¶ę—¶é—“ļ¼Œå•ä½ msļ¼Œé»˜č®¤å€¼äøŗ 60000ć€‚ä»…åœØ REST čæžęŽ„äø” batchfetch 设置为 false ę—¶ē”Ÿę•ˆć€‚ +- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: ę¶ˆęÆč¶…ę—¶ę—¶é—“, 单位 ms, é»˜č®¤å€¼äøŗ 60000怂 ä»…åœØ REST čæžęŽ„äø” batchfetch 设置为 true ę—¶ē”Ÿę•ˆć€‚ - TSDBDriver.PROPERTY_KEY_USE_SSL: čæžęŽ„äø­ę˜Æå¦ä½æē”Ø SSLć€‚ä»…åœØ REST čæžęŽ„ę—¶ē”Ÿę•ˆć€‚ - TSDBDriver.HTTP_POOL_SIZE: REST å¹¶å‘čÆ·ę±‚å¤§å°ļ¼Œé»˜č®¤ 20怂 此外对 JDBC åŽŸē”ŸčæžęŽ„ļ¼Œé€ščæ‡ęŒ‡å®š URL 和 Properties čæ˜åÆä»„ęŒ‡å®šå…¶ä»–å‚ę•°ļ¼ŒęÆ”å¦‚ę—„åæ—ēŗ§åˆ«ć€SQL é•æåŗ¦ē­‰ć€‚ę›“å¤ščÆ¦ē»†é…ē½®čÆ·å‚č€ƒ[å®¢ęˆ·ē«Æé…ē½®](/reference/config/#ä»…å®¢ęˆ·ē«Æé€‚ē”Ø)怂 diff --git a/docs/zh/08-connector/20-go.mdx b/docs/zh/08-connector/20-go.mdx index d431be35cb..90ef4d83ca 100644 --- a/docs/zh/08-connector/20-go.mdx +++ b/docs/zh/08-connector/20-go.mdx @@ -32,24 +32,44 @@ REST čæžęŽ„ę”ÆęŒę‰€ęœ‰čƒ½čæč”Œ Go ēš„å¹³å°ć€‚ čÆ·å‚č€ƒ[ē‰ˆęœ¬ę”ÆęŒåˆ—č”Ø](https://github.com/taosdata/driver-go#remind) -## ę”ÆęŒēš„åŠŸčƒ½ē‰¹ę€§ +## 处理异常 -### åŽŸē”ŸčæžęŽ„ +å¦‚ęžœę˜Æ TDengine é”™čÆÆåÆä»„é€ščæ‡ä»„äø‹ę–¹å¼čŽ·å–é”™čÆÆē å’Œé”™čÆÆäæ”ęÆć€‚ -ā€œåŽŸē”ŸčæžęŽ„ā€ęŒ‡čæžęŽ„å™Øé€ščæ‡ TDengine 客户端驱动(taoscļ¼‰ē›“ęŽ„äøŽ TDengine čæč”Œå®žä¾‹å»ŗē«‹ēš„čæžęŽ„ć€‚ę”ÆęŒēš„åŠŸčƒ½ē‰¹ę€§ęœ‰ļ¼š +```go +// import "github.com/taosdata/driver-go/v3/errors" + if err != nil { + tError, is := err.(*errors.TaosError) + if is { + fmt.Println("errorCode:", int(tError.Code)) + fmt.Println("errorMessage:", tError.ErrStr) + } else { + fmt.Println(err.Error()) + } + } +``` -* ę™®é€šęŸ„čÆ¢ -* čæžē»­ęŸ„čÆ¢ -* č®¢é˜… -* schemaless ęŽ„å£ -* å‚ę•°ē»‘å®šęŽ„å£ +## TDengine DataType 和 Go DataType -### REST čæžęŽ„ +| TDengine DataType | Go Type | +|-------------------|-----------| +| TIMESTAMP | time.Time | +| TINYINT | int8 | +| SMALLINT | int16 | +| INT | int32 | +| BIGINT | int64 | +| TINYINT UNSIGNED | uint8 | +| SMALLINT UNSIGNED | uint16 | +| INT UNSIGNED | uint32 | +| BIGINT UNSIGNED | uint64 | +| FLOAT | float32 | +| DOUBLE | float64 | +| BOOL | bool | +| BINARY | string | +| NCHAR | string | +| JSON | []byte | -"REST čæžęŽ„"ęŒ‡čæžęŽ„å™Øé€ščæ‡ taosAdapter ē»„ä»¶ęä¾›ēš„ REST API äøŽ TDengine čæč”Œå®žä¾‹å»ŗē«‹ēš„čæžęŽ„ć€‚ę”ÆęŒēš„åŠŸčƒ½ē‰¹ę€§ęœ‰ļ¼š - -* ę™®é€šęŸ„čÆ¢ -* čæžē»­ęŸ„čÆ¢ +**ę³Øę„**:JSON ē±»åž‹ä»…åœØ tag äø­ę”ÆęŒć€‚ ## 安装歄骤 @@ -63,32 +83,28 @@ REST čæžęŽ„ę”ÆęŒę‰€ęœ‰čƒ½čæč”Œ Go ēš„å¹³å°ć€‚ * ```go env``` * ```gcc -v``` -### 使用 go get 安装 - -`go get -u github.com/taosdata/driver-go/v3@latest` - -### 使用 go mod 箔理 +### å®‰č£…čæžęŽ„å™Ø 1. 使用 `go mod` å‘½ä»¤åˆå§‹åŒ–é”¹ē›®ļ¼š - ```text - go mod init taos-demo - ``` + ```text + go mod init taos-demo + ``` 2. 引兄 taosSql : - ```go - import ( - "database/sql" - _ "github.com/taosdata/driver-go/v3/taosSql" - ) - ``` + ```go + import ( + "database/sql" + _ "github.com/taosdata/driver-go/v3/taosSql" + ) + ``` 3. 使用 `go mod tidy` ę›“ę–°ä¾čµ–åŒ…ļ¼š - ```text - go mod tidy - ``` + ```text + go mod tidy + ``` 4. 使用 `go run taos-demo` čæč”ŒēØ‹åŗęˆ–ä½æē”Ø `go build` å‘½ä»¤ē¼–čÆ‘å‡ŗäŗŒčæ›åˆ¶ę–‡ä»¶ć€‚ @@ -99,8 +115,6 @@ REST čæžęŽ„ę”ÆęŒę‰€ęœ‰čƒ½čæč”Œ Go ēš„å¹³å°ć€‚ ## å»ŗē«‹čæžęŽ„ -### ę•°ę®ęŗåē§°ļ¼ˆDSN) - ę•°ę®ęŗåē§°å…·ęœ‰é€šē”Øę ¼å¼ļ¼Œä¾‹å¦‚ [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php)ļ¼Œä½†ę²”ęœ‰ē±»åž‹å‰ē¼€ļ¼ˆę–¹ę‹¬å·č”Øē¤ŗåÆé€‰ļ¼‰ļ¼š ``` text @@ -113,9 +127,7 @@ REST čæžęŽ„ę”ÆęŒę‰€ęœ‰čƒ½čæč”Œ Go ēš„å¹³å°ć€‚ username:password@protocol(address)/dbname?param=value ``` -### ä½æē”ØčæžęŽ„å™Øčæ›č”ŒčæžęŽ„ - - + _taosSql_ é€ščæ‡ cgo å®žēŽ°äŗ† Go ēš„ `database/sql/driver` ęŽ„å£ć€‚åŖéœ€č¦å¼•å…„é©±åŠØå°±åÆä»„ä½æē”Ø [`database/sql`](https://golang.org/pkg/database/sql/) ēš„ęŽ„å£ć€‚ @@ -213,42 +225,165 @@ func main() { +### ęŒ‡å®š URL 和 Properties čŽ·å–čæžęŽ„ + +Go čæžęŽ„å™Øäøę”ÆęŒę­¤åŠŸčƒ½ + +### é…ē½®å‚ę•°ēš„ä¼˜å…ˆēŗ§ + +Go čæžęŽ„å™Øäøę”ÆęŒę­¤åŠŸčƒ½ + ## 使用示例 -### å†™å…„ę•°ę® +### åˆ›å»ŗę•°ę®åŗ“å’Œč”Ø -#### SQL 写兄 +```go +var taosDSN = "root:taosdata@tcp(localhost:6030)/" +taos, err := sql.Open("taosSql", taosDSN) +if err != nil { + log.Fatalln("failed to connect TDengine, err:", err) +} +defer taos.Close() +_, err := taos.Exec("CREATE DATABASE power") +if err != nil { + log.Fatalln("failed to create database, err:", err) +} +_, err = taos.Exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)") +if err != nil { + log.Fatalln("failed to create stable, err:", err) +} +``` + +### ę’å…„ę•°ę® -#### InfluxDB č”Œåč®®å†™å…„ - - - -#### OpenTSDB Telnet č”Œåč®®å†™å…„ - - - -#### OpenTSDB JSON č”Œåč®®å†™å…„ - - - ### ęŸ„čÆ¢ę•°ę® -### ę›“å¤šē¤ŗä¾‹ēØ‹åŗ +### ę‰§č”Œåø¦ęœ‰ reqId ēš„ SQL -* [ē¤ŗä¾‹ēØ‹åŗ](https://github.com/taosdata/driver-go/tree/3.0/examples) -* [视频教程](https://www.taosdata.com/blog/2020/11/11/1951.html)怂 +ę­¤ reqId åÆē”ØäŗŽčÆ·ę±‚é“¾č·Æčæ½čøŖć€‚ -## ä½æē”Øé™åˆ¶ +```go +db, err := sql.Open("taosSql", "root:taosdata@tcp(localhost:6030)/") +if err != nil { + panic(err) +} +defer db.Close() +ctx := context.WithValue(context.Background(), common.ReqIDKey, common.GetReqID()) +_, err = db.ExecContext(ctx, "create database if not exists example_taos_sql") +if err != nil { + panic(err) +} +``` -ē”±äŗŽ REST ęŽ„å£ę— ēŠ¶ę€ę‰€ä»„ `use db` čÆ­ę³•äøä¼šē”Ÿę•ˆļ¼Œéœ€č¦å°† db åē§°ę”¾åˆ° SQL čÆ­å„äø­ļ¼Œå¦‚ļ¼š`create table if not exists tb1 (ts timestamp, a int)`改为`create table if not exists test.tb1 (ts timestamp, a int)`å¦åˆ™å°†ęŠ„é”™`[0x217] Database not specified or available`怂 +### é€ščæ‡å‚ę•°ē»‘å®šå†™å…„ę•°ę® -ä¹ŸåÆä»„å°† db åē§°ę”¾åˆ° DSN äø­ļ¼Œå°† `root:taosdata@http(localhost:6041)/` 改为 `root:taosdata@http(localhost:6041)/test`ć€‚å½“ęŒ‡å®šēš„ db äøå­˜åœØę—¶ę‰§č”Œ `create database` čÆ­å„äøä¼šęŠ„é”™ļ¼Œč€Œę‰§č”Œé’ˆåÆ¹čÆ„ db ēš„å…¶ä»–ęŸ„čÆ¢ęˆ–å†™å…„ę“ä½œä¼šęŠ„é”™ć€‚ + + -å®Œę•“ē¤ŗä¾‹å¦‚äø‹ļ¼š +```go +package main + +import ( + "time" + + "github.com/taosdata/driver-go/v3/af" + "github.com/taosdata/driver-go/v3/common" + "github.com/taosdata/driver-go/v3/common/param" +) + +func main() { + db, err := af.Open("", "root", "taosdata", "", 0) + if err != nil { + panic(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists example_stmt") + if err != nil { + panic(err) + } + _, err = db.Exec("create table if not exists example_stmt.tb1(ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")") + if err != nil { + panic(err) + } + stmt := db.InsertStmt() + err = stmt.Prepare("insert into example_stmt.tb1 values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + if err != nil { + panic(err) + } + now := time.Now() + params := make([]*param.Param, 14) + params[0] = param.NewParam(2). + AddTimestamp(now, common.PrecisionMilliSecond). + AddTimestamp(now.Add(time.Second), common.PrecisionMilliSecond) + params[1] = param.NewParam(2).AddBool(true).AddNull() + params[2] = param.NewParam(2).AddTinyint(2).AddNull() + params[3] = param.NewParam(2).AddSmallint(3).AddNull() + params[4] = param.NewParam(2).AddInt(4).AddNull() + params[5] = param.NewParam(2).AddBigint(5).AddNull() + params[6] = param.NewParam(2).AddUTinyint(6).AddNull() + params[7] = param.NewParam(2).AddUSmallint(7).AddNull() + params[8] = param.NewParam(2).AddUInt(8).AddNull() + params[9] = param.NewParam(2).AddUBigint(9).AddNull() + params[10] = param.NewParam(2).AddFloat(10).AddNull() + params[11] = param.NewParam(2).AddDouble(11).AddNull() + params[12] = param.NewParam(2).AddBinary([]byte("binary")).AddNull() + params[13] = param.NewParam(2).AddNchar("nchar").AddNull() + + paramTypes := param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(6). + AddNchar(5) + err = stmt.BindParam(params, paramTypes) + if err != nil { + panic(err) + } + err = stmt.AddBatch() + if err != nil { + panic(err) + } + err = stmt.Execute() + if err != nil { + panic(err) + } + err = stmt.Close() + if err != nil { + panic(err) + } + // select * from example_stmt.tb1 +} +``` + + + ```go package main @@ -258,288 +393,733 @@ import ( "fmt" "time" + "github.com/taosdata/driver-go/v3/common" + "github.com/taosdata/driver-go/v3/common/param" _ "github.com/taosdata/driver-go/v3/taosRestful" + "github.com/taosdata/driver-go/v3/ws/stmt" ) func main() { - var taosDSN = "root:taosdata@http(localhost:6041)/test" - taos, err := sql.Open("taosRestful", taosDSN) + db, err := sql.Open("taosRestful", "root:taosdata@http(localhost:6041)/") if err != nil { - fmt.Println("failed to connect TDengine, err:", err) - return - } - defer taos.Close() - taos.Exec("create database if not exists test") - taos.Exec("create table if not exists tb1 (ts timestamp, a int)") - _, err = taos.Exec("insert into tb1 values(now, 0)(now+1s,1)(now+2s,2)(now+3s,3)") - if err != nil { - fmt.Println("failed to insert, err:", err) - return - } - rows, err := taos.Query("select * from tb1") - if err != nil { - fmt.Println("failed to select from table, err:", err) - return + panic(err) } + defer db.Close() + prepareEnv(db) - defer rows.Close() - for rows.Next() { - var r struct { - ts time.Time - a int - } - err := rows.Scan(&r.ts, &r.a) + config := stmt.NewConfig("ws://127.0.0.1:6041/rest/stmt", 0) + config.SetConnectUser("root") + config.SetConnectPass("taosdata") + config.SetConnectDB("example_ws_stmt") + config.SetMessageTimeout(common.DefaultMessageTimeout) + config.SetWriteWait(common.DefaultWriteWait) + config.SetErrorHandler(func(connector *stmt.Connector, err error) { + panic(err) + }) + config.SetCloseHandler(func() { + fmt.Println("stmt connector closed") + }) + + connector, err := stmt.NewConnector(config) + if err != nil { + panic(err) + } + now := time.Now() + { + stmt, err := connector.Init() if err != nil { - fmt.Println("scan error:\n", err) - return + panic(err) } - fmt.Println(r.ts, r.a) + err = stmt.Prepare("insert into ? using all_json tags(?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + if err != nil { + panic(err) + } + err = stmt.SetTableName("tb1") + if err != nil { + panic(err) + } + err = stmt.SetTags(param.NewParam(1).AddJson([]byte(`{"tb":1}`)), param.NewColumnType(1).AddJson(0)) + if err != nil { + panic(err) + } + params := []*param.Param{ + param.NewParam(3).AddTimestamp(now, 0).AddTimestamp(now.Add(time.Second), 0).AddTimestamp(now.Add(time.Second*2), 0), + param.NewParam(3).AddBool(true).AddNull().AddBool(true), + param.NewParam(3).AddTinyint(1).AddNull().AddTinyint(1), + param.NewParam(3).AddSmallint(1).AddNull().AddSmallint(1), + param.NewParam(3).AddInt(1).AddNull().AddInt(1), + param.NewParam(3).AddBigint(1).AddNull().AddBigint(1), + param.NewParam(3).AddUTinyint(1).AddNull().AddUTinyint(1), + param.NewParam(3).AddUSmallint(1).AddNull().AddUSmallint(1), + param.NewParam(3).AddUInt(1).AddNull().AddUInt(1), + param.NewParam(3).AddUBigint(1).AddNull().AddUBigint(1), + param.NewParam(3).AddFloat(1).AddNull().AddFloat(1), + param.NewParam(3).AddDouble(1).AddNull().AddDouble(1), + param.NewParam(3).AddBinary([]byte("test_binary")).AddNull().AddBinary([]byte("test_binary")), + param.NewParam(3).AddNchar("test_nchar").AddNull().AddNchar("test_nchar"), + } + paramTypes := param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(0). + AddNchar(0) + err = stmt.BindParam(params, paramTypes) + if err != nil { + panic(err) + } + err = stmt.AddBatch() + if err != nil { + panic(err) + } + err = stmt.Exec() + if err != nil { + panic(err) + } + affected := stmt.GetAffectedRows() + fmt.Println("all_json affected rows:", affected) + err = stmt.Close() + if err != nil { + panic(err) + } + } + { + stmt, err := connector.Init() + if err != nil { + panic(err) + } + err = stmt.Prepare("insert into ? using all_all tags(?,?,?,?,?,?,?,?,?,?,?,?,?,?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + err = stmt.SetTableName("tb1") + if err != nil { + panic(err) + } + + err = stmt.SetTableName("tb2") + if err != nil { + panic(err) + } + err = stmt.SetTags( + param.NewParam(14). + AddTimestamp(now, 0). + AddBool(true). + AddTinyint(2). + AddSmallint(2). + AddInt(2). + AddBigint(2). + AddUTinyint(2). + AddUSmallint(2). + AddUInt(2). + AddUBigint(2). + AddFloat(2). + AddDouble(2). + AddBinary([]byte("tb2")). + AddNchar("tb2"), + param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(0). + AddNchar(0), + ) + if err != nil { + panic(err) + } + params := []*param.Param{ + param.NewParam(3).AddTimestamp(now, 0).AddTimestamp(now.Add(time.Second), 0).AddTimestamp(now.Add(time.Second*2), 0), + param.NewParam(3).AddBool(true).AddNull().AddBool(true), + param.NewParam(3).AddTinyint(1).AddNull().AddTinyint(1), + param.NewParam(3).AddSmallint(1).AddNull().AddSmallint(1), + param.NewParam(3).AddInt(1).AddNull().AddInt(1), + param.NewParam(3).AddBigint(1).AddNull().AddBigint(1), + param.NewParam(3).AddUTinyint(1).AddNull().AddUTinyint(1), + param.NewParam(3).AddUSmallint(1).AddNull().AddUSmallint(1), + param.NewParam(3).AddUInt(1).AddNull().AddUInt(1), + param.NewParam(3).AddUBigint(1).AddNull().AddUBigint(1), + param.NewParam(3).AddFloat(1).AddNull().AddFloat(1), + param.NewParam(3).AddDouble(1).AddNull().AddDouble(1), + param.NewParam(3).AddBinary([]byte("test_binary")).AddNull().AddBinary([]byte("test_binary")), + param.NewParam(3).AddNchar("test_nchar").AddNull().AddNchar("test_nchar"), + } + paramTypes := param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(0). + AddNchar(0) + err = stmt.BindParam(params, paramTypes) + if err != nil { + panic(err) + } + err = stmt.AddBatch() + if err != nil { + panic(err) + } + err = stmt.Exec() + if err != nil { + panic(err) + } + affected := stmt.GetAffectedRows() + fmt.Println("all_all affected rows:", affected) + err = stmt.Close() + if err != nil { + panic(err) + } + + } +} + +func prepareEnv(db *sql.DB) { + steps := []string{ + "create database example_ws_stmt", + "create table example_ws_stmt.all_json(ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")" + + "tags(t json)", + "create table example_ws_stmt.all_all(" + + "ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")" + + "tags(" + + "tts timestamp," + + "tc1 bool," + + "tc2 tinyint," + + "tc3 smallint," + + "tc4 int," + + "tc5 bigint," + + "tc6 tinyint unsigned," + + "tc7 smallint unsigned," + + "tc8 int unsigned," + + "tc9 bigint unsigned," + + "tc10 float," + + "tc11 double," + + "tc12 binary(20)," + + "tc13 nchar(20))", + } + for _, step := range steps { + _, err := db.Exec(step) + if err != nil { + panic(err) + } + } +} + +``` + + + + +### ę— ęØ”å¼å†™å…„ + + + + +```go +import ( + "fmt" + + "github.com/taosdata/driver-go/v3/af" +) + +func main() { + conn, err := af.Open("localhost", "root", "taosdata", "", 6030) + if err != nil { + fmt.Println("fail to connect, err:", err) + } + defer conn.Close() + _, err = conn.Exec("create database if not exists example") + if err != nil { + panic(err) + } + _, err = conn.Exec("use example") + if err != nil { + panic(err) + } + influxdbData := "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000" + err = conn.InfluxDBInsertLines([]string{influxdbData}, "ns") + if err != nil { + panic(err) + } + telnetData := "stb0_0 1626006833 4 host=host0 interface=eth0" + err = conn.OpenTSDBInsertTelnetLines([]string{telnetData}) + if err != nil { + panic(err) + } + jsonData := "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}" + err = conn.OpenTSDBInsertJsonPayload(jsonData) + if err != nil { + panic(err) + } +} +``` + + + + +```go +import ( + "database/sql" + "log" + "time" + + "github.com/taosdata/driver-go/v3/common" + _ "github.com/taosdata/driver-go/v3/taosWS" + "github.com/taosdata/driver-go/v3/ws/schemaless" +) + +func main() { + db, err := sql.Open("taosWS", "root:taosdata@ws(localhost:6041)/") + if err != nil { + log.Fatal(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists schemaless_ws") + if err != nil { + log.Fatal(err) + } + s, err := schemaless.NewSchemaless(schemaless.NewConfig("ws://localhost:6041/rest/schemaless", 1, + schemaless.SetDb("schemaless_ws"), + schemaless.SetReadTimeout(10*time.Second), + schemaless.SetWriteTimeout(10*time.Second), + schemaless.SetUser("root"), + schemaless.SetPassword("taosdata"), + schemaless.SetErrorHandler(func(err error) { + log.Fatal(err) + }), + )) + if err != nil { + panic(err) + } + influxdbData := "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000" + telnetData := "stb0_0 1626006833 4 host=host0 interface=eth0" + jsonData := "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}" + + err = s.Insert(influxdbData, schemaless.InfluxDBLineProtocol, "ns", 0, common.GetReqID()) + if err != nil { + panic(err) + } + err = s.Insert(telnetData, schemaless.OpenTSDBTelnetLineProtocol, "ms", 0, common.GetReqID()) + if err != nil { + panic(err) + } + err = s.Insert(jsonData, schemaless.OpenTSDBJsonFormatProtocol, "ms", 0, common.GetReqID()) + if err != nil { + panic(err) } } ``` + + + +### ę‰§č”Œåø¦ęœ‰ reqId ēš„ę— ęØ”å¼å†™å…„ + +```go +func (s *Schemaless) Insert(lines string, protocol int, precision string, ttl int, reqID int64) error +``` + +åÆä»„é€ščæ‡ `common.GetReqID()` čŽ·å–å”Æäø€ id怂 + +### ę•°ę®č®¢é˜… + +TDengine Go čæžęŽ„å™Øę”ÆęŒč®¢é˜…åŠŸčƒ½ļ¼Œåŗ”ē”Ø API å¦‚äø‹ļ¼š + +#### åˆ›å»ŗ Topic + +```go + db, err := af.Open("", "root", "taosdata", "", 0) + if err != nil { + panic(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists example_tmq WAL_RETENTION_PERIOD 86400") + if err != nil { + panic(err) + } + _, err = db.Exec("create topic if not exists example_tmq_topic as DATABASE example_tmq") + if err != nil { + panic(err) + } +``` + +#### åˆ›å»ŗ Consumer + +```go + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "group.id": "test", + "auto.offset.reset": "earliest", + "td.connect.ip": "127.0.0.1", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "td.connect.port": "6030", + "client.id": "test_tmq_client", + "enable.auto.commit": "false", + "msg.with.table.name": "true", + }) + if err != nil { + panic(err) + } +``` + +#### č®¢é˜…ę¶ˆč“¹ę•°ę® + +```go + err = consumer.Subscribe("example_tmq_topic", nil) + if err != nil { + panic(err) + } + for i := 0; i < 5; i++ { + ev := consumer.Poll(500) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Printf("get message:%v\n", e) + case tmqcommon.Error: + fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() + } + } +``` + +#### ęŒ‡å®šč®¢é˜… Offset + +```go + partitions, err := consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + panic(err) + } + } +``` + +#### å…³é—­č®¢é˜… + +```go + err = consumer.Close() + if err != nil { + panic(err) + } +``` + +#### å®Œę•“ē¤ŗä¾‹ + + + + +```go +package main + +import ( + "fmt" + "os" + + "github.com/taosdata/driver-go/v3/af" + "github.com/taosdata/driver-go/v3/af/tmq" + tmqcommon "github.com/taosdata/driver-go/v3/common/tmq" +) + +func main() { + db, err := af.Open("", "root", "taosdata", "", 0) + if err != nil { + panic(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists example_tmq WAL_RETENTION_PERIOD 86400") + if err != nil { + panic(err) + } + _, err = db.Exec("create topic if not exists example_tmq_topic as DATABASE example_tmq") + if err != nil { + panic(err) + } + if err != nil { + panic(err) + } + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "group.id": "test", + "auto.offset.reset": "earliest", + "td.connect.ip": "127.0.0.1", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "td.connect.port": "6030", + "client.id": "test_tmq_client", + "enable.auto.commit": "false", + "msg.with.table.name": "true", + }) + if err != nil { + panic(err) + } + err = consumer.Subscribe("example_tmq_topic", nil) + if err != nil { + panic(err) + } + _, err = db.Exec("create table example_tmq.t1 (ts timestamp,v int)") + if err != nil { + panic(err) + } + _, err = db.Exec("insert into example_tmq.t1 values(now,1)") + if err != nil { + panic(err) + } + for i := 0; i < 5; i++ { + ev := consumer.Poll(500) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Printf("get message:%v\n", e) + case tmqcommon.Error: + fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() + } + } + partitions, err := consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + panic(err) + } + } + + partitions, err = consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + } + + err = consumer.Close() + if err != nil { + panic(err) + } +} +``` + + + + +```go +package main + +import ( + "database/sql" + "fmt" + + "github.com/taosdata/driver-go/v3/common" + tmqcommon "github.com/taosdata/driver-go/v3/common/tmq" + _ "github.com/taosdata/driver-go/v3/taosRestful" + "github.com/taosdata/driver-go/v3/ws/tmq" +) + +func main() { + db, err := sql.Open("taosRestful", "root:taosdata@http(localhost:6041)/") + if err != nil { + panic(err) + } + defer db.Close() + prepareEnv(db) + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "ws.url": "ws://127.0.0.1:6041/rest/tmq", + "ws.message.channelLen": uint(0), + "ws.message.timeout": common.DefaultMessageTimeout, + "ws.message.writeWait": common.DefaultWriteWait, + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "group.id": "example", + "client.id": "example_consumer", + "auto.offset.reset": "earliest", + }) + if err != nil { + panic(err) + } + err = consumer.Subscribe("example_ws_tmq_topic", nil) + if err != nil { + panic(err) + } + go func() { + _, err := db.Exec("create table example_ws_tmq.t_all(ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")") + if err != nil { + panic(err) + } + _, err = db.Exec("insert into example_ws_tmq.t_all values(now,true,2,3,4,5,6,7,8,9,10.123,11.123,'binary','nchar')") + if err != nil { + panic(err) + } + }() + for i := 0; i < 5; i++ { + ev := consumer.Poll(500) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Printf("get message:%v\n", e) + case tmqcommon.Error: + fmt.Printf("%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() + } + } + partitions, err := consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + panic(err) + } + } + + partitions, err = consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + } + + err = consumer.Close() + if err != nil { + panic(err) + } +} + +func prepareEnv(db *sql.DB) { + _, err := db.Exec("create database example_ws_tmq WAL_RETENTION_PERIOD 86400") + if err != nil { + panic(err) + } + _, err = db.Exec("create topic example_ws_tmq_topic as database example_ws_tmq") + if err != nil { + panic(err) + } +} +``` + + + + +### ę›“å¤šē¤ŗä¾‹ēØ‹åŗ + +* [ē¤ŗä¾‹ēØ‹åŗ](https://github.com/taosdata/driver-go/tree/3.0/examples) +* [视频教程](https://www.taosdata.com/blog/2020/11/11/1951.html)怂 + ## åøøč§é—®é¢˜ 1. database/sql äø­ stmtļ¼ˆå‚ę•°ē»‘å®šļ¼‰ē›øå…³ęŽ„å£å“©ęŗƒ - REST äøę”ÆęŒå‚ę•°ē»‘å®šē›øå…³ęŽ„å£ļ¼Œå»ŗč®®ä½æē”Ø`db.Exec`和`db.Query`怂 + REST äøę”ÆęŒå‚ę•°ē»‘å®šē›øå…³ęŽ„å£ļ¼Œå»ŗč®®ä½æē”Ø`db.Exec`和`db.Query`怂 2. 使用 `use db` čÆ­å„åŽę‰§č”Œå…¶ä»–čÆ­å„ęŠ„é”™ `[0x217] Database not specified or available` - 在 REST ęŽ„å£äø­ SQL čÆ­å„ēš„ę‰§č”Œę— äøŠäø‹ę–‡å…³č”ļ¼Œä½æē”Ø `use db` čÆ­å„äøä¼šē”Ÿę•ˆļ¼Œč§£å†³åŠžę³•č§äøŠę–¹ä½æē”Øé™åˆ¶ē« čŠ‚ć€‚ + 在 REST ęŽ„å£äø­ SQL čÆ­å„ēš„ę‰§č”Œę— äøŠäø‹ę–‡å…³č”ļ¼Œä½æē”Ø `use db` čÆ­å„äøä¼šē”Ÿę•ˆļ¼Œč§£å†³åŠžę³•č§äøŠę–¹ä½æē”Øé™åˆ¶ē« čŠ‚ć€‚ 3. 使用 taosSql äøęŠ„é”™ä½æē”Ø taosRestful ꊄ错 `[0x217] Database not specified or available` - å› äøŗ REST ęŽ„å£ę— ēŠ¶ę€ļ¼Œä½æē”Ø `use db` čÆ­å„äøä¼šē”Ÿę•ˆļ¼Œč§£å†³åŠžę³•č§äøŠę–¹ä½æē”Øé™åˆ¶ē« čŠ‚ć€‚ + å› äøŗ REST ęŽ„å£ę— ēŠ¶ę€ļ¼Œä½æē”Ø `use db` čÆ­å„äøä¼šē”Ÿę•ˆļ¼Œč§£å†³åŠžę³•č§äøŠę–¹ä½æē”Øé™åˆ¶ē« čŠ‚ć€‚ 4. `readBufferSize` å‚ę•°č°ƒå¤§åŽę— ę˜Žę˜¾ę•ˆęžœ - `readBufferSize` č°ƒå¤§åŽä¼šå‡å°‘čŽ·å–ē»“ęžœę—¶ `syscall` ēš„č°ƒē”Øć€‚å¦‚ęžœęŸ„čÆ¢ē»“ęžœēš„ę•°ę®é‡äøå¤§ļ¼Œäæ®ę”¹čÆ„å‚ę•°äøä¼šåø¦ę„ę˜Žę˜¾ęå‡ļ¼Œå¦‚ęžœčÆ„å‚ę•°äæ®ę”¹čæ‡å¤§ļ¼Œē“¶é¢ˆä¼šåœØč§£ęž JSON ę•°ę®ć€‚å¦‚ęžœéœ€č¦ä¼˜åŒ–ęŸ„čÆ¢é€Ÿåŗ¦ļ¼Œéœ€č¦ę ¹ę®å®žé™…ęƒ…å†µč°ƒę•“čÆ„å€¼ę„č¾¾åˆ°ęŸ„čÆ¢ę•ˆęžœęœ€ä¼˜ć€‚ + `readBufferSize` č°ƒå¤§åŽä¼šå‡å°‘čŽ·å–ē»“ęžœę—¶ `syscall` ēš„č°ƒē”Øć€‚å¦‚ęžœęŸ„čÆ¢ē»“ęžœēš„ę•°ę®é‡äøå¤§ļ¼Œäæ®ę”¹čÆ„å‚ę•°äøä¼šåø¦ę„ę˜Žę˜¾ęå‡ļ¼Œå¦‚ęžœčÆ„å‚ę•°äæ®ę”¹čæ‡å¤§ļ¼Œē“¶é¢ˆä¼šåœØč§£ęž JSON ę•°ę®ć€‚å¦‚ęžœéœ€č¦ä¼˜åŒ–ęŸ„čÆ¢é€Ÿåŗ¦ļ¼Œéœ€č¦ę ¹ę®å®žé™…ęƒ…å†µč°ƒę•“čÆ„å€¼ę„č¾¾åˆ°ęŸ„čÆ¢ę•ˆęžœęœ€ä¼˜ć€‚ 5. `disableCompression` å‚ę•°č®¾ē½®äøŗ `false` ę—¶ęŸ„čÆ¢ę•ˆēŽ‡é™ä½Ž - 当 `disableCompression` å‚ę•°č®¾ē½®äøŗ `false` ę—¶ęŸ„čÆ¢ē»“ęžœä¼šä½æē”Ø `gzip` åŽ‹ē¼©åŽä¼ č¾“ļ¼Œę‹æåˆ°ę•°ę®åŽč¦å…ˆčæ›č”Œ `gzip` č§£åŽ‹ć€‚ + 当 `disableCompression` å‚ę•°č®¾ē½®äøŗ `false` ę—¶ęŸ„čÆ¢ē»“ęžœä¼šä½æē”Ø `gzip` åŽ‹ē¼©åŽä¼ č¾“ļ¼Œę‹æåˆ°ę•°ę®åŽč¦å…ˆčæ›č”Œ `gzip` č§£åŽ‹ć€‚ 6. `go get` å‘½ä»¤ę— ę³•čŽ·å–åŒ…ļ¼Œęˆ–č€…čŽ·å–åŒ…č¶…ę—¶ 设置 Go 代理 `go env -w GOPROXY=https://goproxy.cn,direct`怂 -## 常用 API - -### database/sql API - -* `sql.Open(DRIVER_NAME string, dataSourceName string) *DB` - - 评 API ē”Øę„ę‰“å¼€ DBļ¼Œčæ”å›žäø€äøŖē±»åž‹äøŗ \*DB ēš„åÆ¹č±”ć€‚ - -:::info -评 API ęˆåŠŸåˆ›å»ŗēš„ę—¶å€™ļ¼Œå¹¶ę²”ęœ‰åšęƒé™ē­‰ę£€ęŸ„ļ¼ŒåŖęœ‰åœØēœŸę­£ę‰§č”Œ Query ꈖ者 Exec ēš„ę—¶å€™ę‰čƒ½ēœŸę­£ēš„åŽ»åˆ›å»ŗčæžęŽ„ļ¼Œå¹¶åŒę—¶ę£€ęŸ„ user/password/host/port ę˜Æäøę˜Æåˆę³•ć€‚ -::: - -* `func (db *DB) Exec(query string, args ...interface{}) (Result, error)` - - `sql.Open` å†…ē½®ēš„ę–¹ę³•ļ¼Œē”Øę„ę‰§č”ŒéžęŸ„čÆ¢ē›øå…³ SQL怂 - -* `func (db *DB) Query(query string, args ...interface{}) (*Rows, error)` - - `sql.Open` å†…ē½®ēš„ę–¹ę³•ļ¼Œē”Øę„ę‰§č”ŒęŸ„čÆ¢čÆ­å„ć€‚ - -### 高级功能(af)API - -`af` åŒ…å°č£…äŗ†čæžęŽ„ē®”ē†ć€č®¢é˜…ć€schemalessć€å‚ę•°ē»‘å®šē­‰ TDengine é«˜ēŗ§åŠŸčƒ½ć€‚ - -#### čæžęŽ„ē®”ē† - -* `af.Open(host, user, pass, db string, port int) (*Connector, error)` - - 评 API é€ščæ‡ cgo åˆ›å»ŗäøŽ taosd ēš„čæžęŽ„ć€‚ - -* `func (conn *Connector) Close() error` - - å…³é—­äøŽ taosd ēš„čæžęŽ„ć€‚ - -#### č®¢é˜… - -* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)` - - åˆ›å»ŗę¶ˆč“¹č€…ć€‚ - -* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error` -ę³Øę„ļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `rebalanceCb` å‚ę•°ļ¼Œå½“å‰ęœŖä½æē”Ø - - č®¢é˜…å•äøŖäø»é¢˜ć€‚ - -* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error` -ę³Øę„ļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `rebalanceCb` å‚ę•°ļ¼Œå½“å‰ęœŖä½æē”Ø - - č®¢é˜…äø»é¢˜ć€‚ - -* `func (c *Consumer) Poll(timeoutMs int) tmq.Event` - - č½®čÆ¢ę¶ˆęÆć€‚ - -* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)` -ę³Øę„ļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `tmq.TopicPartition` å‚ę•°ļ¼Œå½“å‰ęœŖä½æē”Ø - - ęäŗ¤ę¶ˆęÆć€‚ - -* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)` - - čŽ·å–ę¶ˆč“¹čæ›åŗ¦ć€‚(éœ€č¦ TDengine >= 3.0.5.0, driver-go >= v3.5.0) - -* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error` -ę³Øę„ļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `ignoredTimeoutMs` å‚ę•°ļ¼Œå½“å‰ęœŖä½æē”Ø - - ęŒ‰ē…§ęŒ‡å®šēš„čæ›åŗ¦ę¶ˆč“¹ć€‚(éœ€č¦ TDengine >= 3.0.5.0, driver-go >= v3.5.0) - -* `func (c *Consumer) Close() error` - - å…³é—­čæžęŽ„ć€‚ - -#### schemaless - -* `func (conn *Connector) InfluxDBInsertLines(lines []string, precision string) error` - - 写兄 InfluxDB č”Œåč®®ć€‚ - -* `func (conn *Connector) OpenTSDBInsertTelnetLines(lines []string) error` - - 写兄 OpenTDSB telnet åč®®ę•°ę®ć€‚ - -* `func (conn *Connector) OpenTSDBInsertJsonPayload(payload string) error` - - 写兄 OpenTSDB JSON åč®®ę•°ę®ć€‚ - -#### å‚ę•°ē»‘å®š - -* `func (conn *Connector) StmtExecute(sql string, params *param.Param) (res driver.Result, err error)` - - å‚ę•°ē»‘å®šå•č”Œę’å…„ć€‚ - -* `func (conn *Connector) InsertStmt() *insertstmt.InsertStmt` - - åˆå§‹åŒ–å‚ę•°ć€‚ - -* `func (stmt *InsertStmt) Prepare(sql string) error` - - å‚ę•°ē»‘å®šé¢„å¤„ē† SQL čÆ­å„ć€‚ - -* `func (stmt *InsertStmt) SetTableName(name string) error` - - å‚ę•°ē»‘å®šč®¾ē½®č”Øåć€‚ - -* `func (stmt *InsertStmt) SetSubTableName(name string) error` - - å‚ę•°ē»‘å®šč®¾ē½®å­č”Øåć€‚ - -* `func (stmt *InsertStmt) BindParam(params []*param.Param, bindType *param.ColumnType) error` - - å‚ę•°ē»‘å®šå¤šč”Œę•°ę®ć€‚ - -* `func (stmt *InsertStmt) AddBatch() error` - - ę·»åŠ åˆ°å‚ę•°ē»‘å®šę‰¹å¤„ē†ć€‚ - -* `func (stmt *InsertStmt) Execute() error` - - ę‰§č”Œå‚ę•°ē»‘å®šć€‚ - -* `func (stmt *InsertStmt) GetAffectedRows() int` - - čŽ·å–å‚ę•°ē»‘å®šę’å…„å—å½±å“č”Œę•°ć€‚ - -* `func (stmt *InsertStmt) Close() error` - - ē»“ęŸå‚ę•°ē»‘å®šć€‚ - -### é€ščæ‡ WebSocket č®¢é˜… - -* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)` - - åˆ›å»ŗę¶ˆč“¹č€…ć€‚ - -* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error` -ę³Øę„ļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `rebalanceCb` å‚ę•°ļ¼Œå½“å‰ęœŖä½æē”Ø - - č®¢é˜…å•äøŖäø»é¢˜ć€‚ - -* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error` -ę³Øę„ļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `rebalanceCb` å‚ę•°ļ¼Œå½“å‰ęœŖä½æē”Ø - - č®¢é˜…äø»é¢˜ć€‚ - -* `func (c *Consumer) Poll(timeoutMs int) tmq.Event` - - č½®čÆ¢ę¶ˆęÆć€‚ - -* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)` -ę³Øę„ļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `tmq.TopicPartition` å‚ę•°ļ¼Œå½“å‰ęœŖä½æē”Ø - - ęäŗ¤ę¶ˆęÆć€‚ - -* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)` - - čŽ·å–ę¶ˆč“¹čæ›åŗ¦ć€‚(éœ€č¦ TDengine >= 3.0.5.0, driver-go >= v3.5.0) - -* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error` -ę³Øę„ļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `ignoredTimeoutMs` å‚ę•°ļ¼Œå½“å‰ęœŖä½æē”Ø - - ęŒ‰ē…§ęŒ‡å®šēš„čæ›åŗ¦ę¶ˆč“¹ć€‚(éœ€č¦ TDengine >= 3.0.5.0, driver-go >= v3.5.0) - -* `func (c *Consumer) Close() error` - - å…³é—­čæžęŽ„ć€‚ - -å®Œę•“č®¢é˜…ē¤ŗä¾‹å‚č§ [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/main/examples/tmqoverws/main.go) - -### é€ščæ‡ WebSocket čæ›č”Œå‚ę•°ē»‘å®š - -* `func NewConnector(config *Config) (*Connector, error)` - - åˆ›å»ŗčæžęŽ„ć€‚ - -* `func (c *Connector) Init() (*Stmt, error)` - - åˆå§‹åŒ–å‚ę•°ć€‚ - -* `func (c *Connector) Close() error` - - å…³é—­čæžęŽ„ć€‚ - -* `func (s *Stmt) Prepare(sql string) error` - - å‚ę•°ē»‘å®šé¢„å¤„ē† SQL čÆ­å„ć€‚ - -* `func (s *Stmt) SetTableName(name string) error` - - å‚ę•°ē»‘å®šč®¾ē½®č”Øåć€‚ - -* `func (s *Stmt) SetTags(tags *param.Param, bindType *param.ColumnType) error` - - å‚ę•°ē»‘å®šč®¾ē½®ę ‡ē­¾ć€‚ - -* `func (s *Stmt) BindParam(params []*param.Param, bindType *param.ColumnType) error` - - å‚ę•°ē»‘å®šå¤šč”Œę•°ę®ć€‚ - -* `func (s *Stmt) AddBatch() error` - - ę·»åŠ åˆ°å‚ę•°ē»‘å®šę‰¹å¤„ē†ć€‚ - -* `func (s *Stmt) Exec() error` - - ę‰§č”Œå‚ę•°ē»‘å®šć€‚ - -* `func (s *Stmt) GetAffectedRows() int` - - čŽ·å–å‚ę•°ē»‘å®šę’å…„å—å½±å“č”Œę•°ć€‚ - -* `func (s *Stmt) Close() error` - - ē»“ęŸå‚ę•°ē»‘å®šć€‚ - -å®Œę•“å‚ę•°ē»‘å®šē¤ŗä¾‹å‚č§ [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/main/examples/stmtoverws/main.go) - ## API å‚č€ƒ å…ØéƒØ API 见 [driver-go 文攣](https://pkg.go.dev/github.com/taosdata/driver-go/v3) diff --git a/docs/zh/08-connector/26-rust.mdx b/docs/zh/08-connector/26-rust.mdx index c23228c8cf..79a6badfea 100644 --- a/docs/zh/08-connector/26-rust.mdx +++ b/docs/zh/08-connector/26-rust.mdx @@ -30,21 +30,57 @@ Websocket čæžęŽ„ę”ÆęŒę‰€ęœ‰čƒ½čæč”Œ Rust ēš„å¹³å°ć€‚ | Rust čæžęŽ„å™Øē‰ˆęœ¬ | TDengine ē‰ˆęœ¬ | 主要功能 | | :----------------: | :--------------: | :--------------------------------------------------: | -| v0.8.10 | 3.0.5.0 or later | ę¶ˆęÆč®¢é˜…ļ¼ščŽ·å–ę¶ˆč“¹čæ›åŗ¦åŠęŒ‰ē…§ęŒ‡å®ščæ›åŗ¦å¼€å§‹ę¶ˆč“¹ć€‚ | +| v0.8.12 | 3.0.5.0 or later | ę¶ˆęÆč®¢é˜…ļ¼ščŽ·å–ę¶ˆč“¹čæ›åŗ¦åŠęŒ‰ē…§ęŒ‡å®ščæ›åŗ¦å¼€å§‹ę¶ˆč“¹ć€‚ | | v0.8.0 | 3.0.4.0 | ę”ÆęŒę— ęØ”å¼å†™å…„ć€‚ | | v0.7.6 | 3.0.3.0 | ę”ÆęŒåœØčÆ·ę±‚äø­ä½æē”Ø req_id怂 | | v0.6.0 | 3.0.0.0 | åŸŗē”€åŠŸčƒ½ć€‚ | Rust čæžęŽ„å™Øä»ē„¶åœØåæ«é€Ÿå¼€å‘äø­ļ¼Œ1.0 ä¹‹å‰ę— ę³•äæčÆå…¶å‘åŽå…¼å®¹ć€‚å»ŗč®®ä½æē”Ø 3.0 ē‰ˆęœ¬ä»„äøŠēš„ TDengineļ¼Œä»„éæå…å·²ēŸ„é—®é¢˜ć€‚ -## 安装 +## 处理错误 + +åœØęŠ„é”™åŽļ¼ŒåÆä»„čŽ·å–åˆ°é”™čÆÆēš„å…·ä½“äæ”ęÆļ¼š + +```rust +match conn.exec(sql) { + Ok(_) => { + Ok(()) + } + Err(e) => { + eprintln!("ERROR: {:?}", e); + Err(e) + } +} +``` + +## TDengine DataType 和 Rust DataType + +TDengine ē›®å‰ę”ÆęŒę—¶é—“ęˆ³ć€ę•°å­—ć€å­—ē¬¦ć€åøƒå°”ē±»åž‹ļ¼ŒäøŽ Rust åÆ¹åŗ”ē±»åž‹č½¬ę¢å¦‚äø‹ļ¼š + +| TDengine DataType | Rust DataType | +| ----------------- | ----------------- | +| TIMESTAMP | Timestamp | +| INT | i32 | +| BIGINT | i64 | +| FLOAT | f32 | +| DOUBLE | f64 | +| SMALLINT | i16 | +| TINYINT | i8 | +| BOOL | bool | +| BINARY | Vec | +| NCHAR | String | +| JSON | serde_json::Value | + +**ę³Øę„**:JSON ē±»åž‹ä»…åœØ tag äø­ę”ÆęŒć€‚ + +## 安装歄骤 ### å®‰č£…å‰å‡†å¤‡ * 安装 Rust 开发巄具链 * å¦‚ęžœä½æē”ØåŽŸē”ŸčæžęŽ„ļ¼ŒčÆ·å®‰č£… TDengine å®¢ęˆ·ē«Æé©±åŠØļ¼Œå…·ä½“ę­„éŖ¤čÆ·å‚č€ƒ[å®‰č£…å®¢ęˆ·ē«Æé©±åŠØ](../#å®‰č£…å®¢ęˆ·ē«Æé©±åŠØ) -### 添加 taos ä¾čµ– +### å®‰č£…čæžęŽ„å™Ø ę ¹ę®é€‰ę‹©ēš„čæžęŽ„ę–¹å¼ļ¼ŒęŒ‰ē…§å¦‚äø‹čÆ“ę˜ŽåœØ [Rust](https://rust-lang.org) é”¹ē›®äø­ę·»åŠ  [taos][taos] ä¾čµ–ļ¼š @@ -151,7 +187,8 @@ let builder = TaosBuilder::from_dsn("taos://localhost:6030")?; let conn1 = builder.build(); // use websocket protocol. -let conn2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?; +let builder2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?; +let conn2 = builder2.build(); ``` å»ŗē«‹čæžęŽ„åŽļ¼Œę‚ØåÆä»„čæ›č”Œē›øå…³ę•°ę®åŗ“ę“ä½œļ¼š @@ -233,41 +270,191 @@ async fn demo(taos: &Taos, db: &str) -> Result<(), Error> { ## 使用示例 -### å†™å…„ę•°ę® +### åˆ›å»ŗę•°ę®åŗ“å’Œč”Ø -#### SQL 写兄 +```rust +use taos::*; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let dsn = "taos://localhost:6030"; + let builder = TaosBuilder::from_dsn(dsn)?; + + let taos = builder.build()?; + + let db = "query"; + + // create database + taos.exec_many([ + format!("DROP DATABASE IF EXISTS `{db}`"), + format!("CREATE DATABASE `{db}`"), + format!("USE `{db}`"), + ]) + .await?; + + // create table + taos.exec_many([ + // create super table + "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \ + TAGS (`groupid` INT, `location` BINARY(16))", + // create child table + "CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')", + ]).await?; +} +``` + +> **ę³Øę„**ļ¼šå¦‚ęžœäøä½æē”Ø `use db` ęŒ‡å®šę•°ę®åŗ“ļ¼Œåˆ™åŽē»­åÆ¹č”Øēš„ę“ä½œéƒ½éœ€č¦å¢žåŠ ę•°ę®åŗ“åē§°ä½œäøŗå‰ē¼€ļ¼Œå¦‚ db.tb怂 + +### ę’å…„ę•°ę® -#### STMT 写兄 - - - -#### Schemaless 写兄 - - - ### ęŸ„čÆ¢ę•°ę® -## API å‚č€ƒ +### ę‰§č”Œåø¦ęœ‰ req_id ēš„ SQL -### čæžęŽ„ęž„é€ å™Ø - -é€ščæ‡ DSN ę„ęž„å»ŗäø€äøŖčæžęŽ„å™Øęž„é€ å™Øć€‚ +ę­¤ req_id åÆē”ØäŗŽčÆ·ę±‚é“¾č·Æčæ½čøŖć€‚ ```rust -let cfg = TaosBuilder::default().build()?; +let rs = taos.query_with_req_id("select * from stable where tag1 is null", 1)?; ``` -使用 `builder` åÆ¹č±”åˆ›å»ŗå¤šäøŖčæžęŽ„ļ¼š +### é€ščæ‡å‚ę•°ē»‘å®šå†™å…„ę•°ę® + +TDengine ēš„ Rust čæžęŽ„å™Øå®žēŽ°äŗ†å‚ę•°ē»‘å®šę–¹å¼åÆ¹ę•°ę®å†™å…„ļ¼ˆINSERTļ¼‰åœŗę™Æēš„ę”ÆęŒć€‚é‡‡ē”Øčæ™ē§ę–¹å¼å†™å…„ę•°ę®ę—¶ļ¼Œčƒ½éæå… SQL čÆ­ę³•č§£ęžēš„čµ„ęŗę¶ˆč€—ļ¼Œä»Žč€ŒåœØå¾ˆå¤šęƒ…å†µäø‹ę˜¾č‘—ęå‡å†™å…„ę€§čƒ½ć€‚ + +å‚ę•°ē»‘å®šęŽ„å£čÆ¦č§[APIå‚č€ƒ](#stmt-api) + + + +### ę— ęØ”å¼å†™å…„ + +TDengine ę”ÆęŒę— ęØ”å¼å†™å…„åŠŸčƒ½ć€‚ę— ęØ”å¼å†™å…„å…¼å®¹ InfluxDB ēš„ č”Œåč®®ļ¼ˆLine Protocol)、OpenTSDB ēš„ telnet č”Œåč®®å’Œ OpenTSDB ēš„ JSON ę ¼å¼åč®®ć€‚čÆ¦ęƒ…čÆ·å‚č§[ę— ęØ”å¼å†™å…„](../../reference/schemaless/)怂 + + + +### ę‰§č”Œåø¦ęœ‰ req_id ēš„ę— ęØ”å¼å†™å…„ + +ę­¤ req_id åÆē”ØäŗŽčÆ·ę±‚é“¾č·Æčæ½čøŖć€‚ ```rust -let conn: Taos = cfg.build(); +let sml_data = SmlDataBuilder::default() + .protocol(SchemalessProtocol::Line) + .data(data) + .req_id(100u64) + .build()?; + +client.put(&sml_data)? ``` -### čæžęŽ„ę±  +### ę•°ę®č®¢é˜… + +TDengine é€ščæ‡ę¶ˆęÆé˜Ÿåˆ— [TMQ](../../../taos-sql/tmq/) åÆåŠØäø€äøŖč®¢é˜…ć€‚ + +#### åˆ›å»ŗ Topic + +```rust +taos.exec_many([ + // create topic for subscription + format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}") +]) +.await?; +``` + +#### åˆ›å»ŗ Consumer + +从 DSN å¼€å§‹ļ¼Œęž„å»ŗäø€äøŖ TMQ čæžęŽ„å™Øć€‚ + +```rust +let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?; +``` + +åˆ›å»ŗę¶ˆč“¹č€…ļ¼š + +```rust +let mut consumer = tmq.build()?; +``` + +#### č®¢é˜…ę¶ˆč“¹ę•°ę® + +ę¶ˆč“¹č€…åÆč®¢é˜…äø€äøŖęˆ–å¤šäøŖ `TOPIC`怂 + +```rust +consumer.subscribe(["tmq_meters"]).await?; +``` + +TMQ ę¶ˆęÆé˜Ÿåˆ—ę˜Æäø€äøŖ [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) ē±»åž‹ļ¼ŒåÆä»„ä½æē”Øē›øåŗ” API åÆ¹ęÆäøŖę¶ˆęÆčæ›č”Œę¶ˆč“¹ļ¼Œå¹¶é€ščæ‡ `.commit` čæ›č”Œå·²ę¶ˆč“¹ę ‡č®°ć€‚ + +```rust +{ + let mut stream = consumer.stream(); + + while let Some((offset, message)) = stream.try_next().await? { + // get information from offset + + // the topic + let topic = offset.topic(); + // the vgroup id, like partition id in kafka. + let vgroup_id = offset.vgroup_id(); + println!("* in vgroup id {vgroup_id} of topic {topic}\n"); + + if let Some(data) = message.into_data() { + while let Some(block) = data.fetch_raw_block().await? { + // one block for one table, get table name if needed + let name = block.table_name(); + let records: Vec = block.deserialize().try_collect()?; + println!( + "** table: {}, got {} records: {:#?}\n", + name.unwrap(), + records.len(), + records + ); + } + } + consumer.commit(offset).await?; + } +} +``` + +čŽ·å–ę¶ˆč“¹čæ›åŗ¦ļ¼š + +ē‰ˆęœ¬č¦ę±‚ connector-rust >= v0.8.8, TDengine >= 3.0.5.0 + +```rust +let assignments = consumer.assignments().await.unwrap(); +``` + +#### ęŒ‡å®šč®¢é˜… Offset + +ęŒ‰ē…§ęŒ‡å®šēš„čæ›åŗ¦ę¶ˆč“¹ļ¼š + +ē‰ˆęœ¬č¦ę±‚ connector-rust >= v0.8.8, TDengine >= 3.0.5.0 + +```rust +consumer.offset_seek(topic, vgroup_id, offset).await; +``` + +#### å…³é—­č®¢é˜… + +```rust +consumer.unsubscribe().await; +``` + +åÆ¹äŗŽ TMQ DSN, ęœ‰ä»„äø‹é…ē½®é”¹åÆä»„čæ›č”Œč®¾ē½®ļ¼Œéœ€č¦ę³Øę„ēš„ę˜Æļ¼Œ`group.id` ę˜Æåæ…é”»ēš„ć€‚ + +- `group.id`: åŒäø€äøŖę¶ˆč“¹č€…ē»„ļ¼Œå°†ä»„č‡³å°‘ę¶ˆč“¹äø€ę¬”ēš„ę–¹å¼čæ›č”Œę¶ˆęÆč“Ÿč½½å‡č””ć€‚ +- `client.id`: åÆé€‰ēš„č®¢é˜…å®¢ęˆ·ē«ÆčÆ†åˆ«é”¹ć€‚ +- `auto.offset.reset`: åÆé€‰åˆå§‹åŒ–č®¢é˜…čµ·ē‚¹ļ¼Œ *earliest* äøŗä»Žå¤“å¼€å§‹č®¢é˜…ļ¼Œ *latest* äøŗä»…ä»Žęœ€ę–°ę•°ę®å¼€å§‹č®¢é˜…ļ¼Œé»˜č®¤äøŗä»Žå¤“č®¢é˜…ć€‚ę³Øę„ļ¼Œę­¤é€‰é”¹åœØåŒäø€äøŖ `group.id` äø­ä»…ē”Ÿę•ˆäø€ę¬”ć€‚ +- `enable.auto.commit`: 当设置为 `true` ę—¶ļ¼Œå°†åÆē”Øč‡ŖåŠØę ‡č®°ęØ”å¼ļ¼Œå½“åÆ¹ę•°ę®äø€č‡“ę€§äøę•ę„Ÿę—¶ļ¼ŒåÆä»„åÆē”Øę­¤ę–¹å¼ć€‚ +- `auto.commit.interval.ms`: č‡ŖåŠØę ‡č®°ēš„ę—¶é—“é—“éš”ć€‚ + +#### å®Œę•“ē¤ŗä¾‹ + +å®Œę•“č®¢é˜…ē¤ŗä¾‹å‚č§ [GitHub 示例文件](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs). + +### äøŽčæžęŽ„ę± ä½æē”Ø åœØå¤ę‚åŗ”ē”Øäø­ļ¼Œå»ŗč®®åÆē”ØčæžęŽ„ę± ć€‚[taos] ēš„čæžęŽ„ę± é»˜č®¤ļ¼ˆå¼‚ę­„ęØ”å¼ļ¼‰ä½æē”Ø [deadpool] å®žēŽ°ć€‚ @@ -295,7 +482,17 @@ let pool: Pool = Pool::builder(Manager::from_dsn(self.dsn.clone()). let taos = pool.get()?; ``` -### čæžęŽ„ +### ę›“å¤šē¤ŗä¾‹ēØ‹åŗ + +ē¤ŗä¾‹ēØ‹åŗęŗē ä½äŗŽ `TDengine/examples/rust` äø‹: + +čÆ·å‚č€ƒļ¼š[rust example](https://github.com/taosdata/TDengine/tree/3.0/examples/rust) + +## åøøč§é—®é¢˜ + +čÆ·å‚č€ƒ [FAQ](../../../train-faq/faq) + +## API å‚č€ƒ [Taos][struct.Taos] åÆ¹č±”ęä¾›äŗ†å¤šäøŖę•°ę®åŗ“ę“ä½œēš„ API: @@ -381,9 +578,13 @@ let taos = pool.get()?; - `.create_database(database: &str)`: ę‰§č”Œ `CREATE DATABASE` čÆ­å„ć€‚ - `.use_database(database: &str)`: ę‰§č”Œ `USE` čÆ­å„ć€‚ -é™¤ę­¤ä¹‹å¤–ļ¼ŒčÆ„ē»“ęž„ä¹Ÿę˜Æ [å‚ę•°ē»‘å®š](#å‚ę•°ē»‘å®šęŽ„å£) 和 [č”Œåč®®ęŽ„å£](#č”Œåč®®ęŽ„å£) ēš„å…„å£ļ¼Œä½æē”Øę–¹ę³•čÆ·å‚č€ƒå…·ä½“ēš„ API čÆ“ę˜Žć€‚ +é™¤ę­¤ä¹‹å¤–ļ¼ŒčÆ„ē»“ęž„ä¹Ÿę˜Æå‚ę•°ē»‘å®šå’Œč”Œåč®®ęŽ„å£ēš„å…„å£ļ¼Œä½æē”Øę–¹ę³•čÆ·å‚č€ƒå…·ä½“ēš„ API čÆ“ę˜Žć€‚ -### å‚ę•°ē»‘å®šęŽ„å£ +

+ +å‚ę•°ē»‘å®šęŽ„å£ + +

äøŽ C ęŽ„å£ē±»ä¼¼ļ¼ŒRust ęä¾›å‚ę•°ē»‘å®šęŽ„å£ć€‚é¦–å…ˆļ¼Œé€ščæ‡ [Taos][struct.Taos] åÆ¹č±”åˆ›å»ŗäø€äøŖ SQL čÆ­å„ēš„å‚ę•°ē»‘å®šåÆ¹č±” [Stmt]: @@ -394,7 +595,7 @@ stmt.prepare("INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)")?; å‚ę•°ē»‘å®šåÆ¹č±”ęä¾›äŗ†äø€ē»„ęŽ„å£ē”ØäŗŽå®žēŽ°å‚ę•°ē»‘å®šļ¼š -#### `.set_tbname(name)` +`.set_tbname(name)` ē”ØäŗŽē»‘å®šč”Øåć€‚ @@ -403,7 +604,7 @@ let mut stmt = taos.stmt("insert into ? values(? ,?)")?; stmt.set_tbname("d0")?; ``` -#### `.set_tags(&[tag])` +`.set_tags(&[tag])` 当 SQL čÆ­å„ä½æē”Øč¶…ēŗ§č”Øę—¶ļ¼Œē”ØäŗŽē»‘å®šå­č”Øč”Øåå’Œę ‡ē­¾å€¼ļ¼š @@ -413,7 +614,7 @@ stmt.set_tbname("d0")?; stmt.set_tags(&[Value::VarChar("ę¶›ę€".to_string())])?; ``` -#### `.bind(&[column])` +`.bind(&[column])` ē”ØäŗŽē»‘å®šå€¼ē±»åž‹ć€‚ä½æē”Ø [ColumnView] ē»“ęž„ä½“ęž„å»ŗéœ€č¦ēš„ē±»åž‹å¹¶ē»‘å®šļ¼š @@ -437,7 +638,7 @@ let params = vec![ let rows = stmt.bind(¶ms)?.add_batch()?.execute()?; ``` -#### `.execute()` +`.execute()` ę‰§č”Œ SQL怂[Stmt] åÆ¹č±”åÆä»„å¤ē”Øļ¼ŒåœØę‰§č”ŒåŽåÆä»„é‡ę–°ē»‘å®šå¹¶ę‰§č”Œć€‚ę‰§č”Œå‰čÆ·ē”®äæę‰€ęœ‰ę•°ę®å·²é€ščæ‡ `.add_batch` åŠ å…„åˆ°ę‰§č”Œé˜Ÿåˆ—äø­ć€‚ @@ -452,92 +653,6 @@ stmt.execute()?; äø€äøŖåÆčæč”Œēš„ē¤ŗä¾‹čÆ·č§ [GitHub äøŠēš„ē¤ŗä¾‹](https://github.com/taosdata/taos-connector-rust/blob/main/examples/bind.rs)怂 -### č®¢é˜… - -TDengine é€ščæ‡ę¶ˆęÆé˜Ÿåˆ— [TMQ](../../../taos-sql/tmq/) åÆåŠØäø€äøŖč®¢é˜…ć€‚ - -从 DSN å¼€å§‹ļ¼Œęž„å»ŗäø€äøŖ TMQ čæžęŽ„å™Øć€‚ - -```rust -let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?; -``` - -åˆ›å»ŗę¶ˆč“¹č€…ļ¼š - -```rust -let mut consumer = tmq.build()?; -``` - -ę¶ˆč“¹č€…åÆč®¢é˜…äø€äøŖęˆ–å¤šäøŖ `TOPIC`怂 - -```rust -consumer.subscribe(["tmq_meters"]).await?; -``` - -TMQ ę¶ˆęÆé˜Ÿåˆ—ę˜Æäø€äøŖ [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) ē±»åž‹ļ¼ŒåÆä»„ä½æē”Øē›øåŗ” API åÆ¹ęÆäøŖę¶ˆęÆčæ›č”Œę¶ˆč“¹ļ¼Œå¹¶é€ščæ‡ `.commit` čæ›č”Œå·²ę¶ˆč“¹ę ‡č®°ć€‚ - -```rust -{ - let mut stream = consumer.stream(); - - while let Some((offset, message)) = stream.try_next().await? { - // get information from offset - - // the topic - let topic = offset.topic(); - // the vgroup id, like partition id in kafka. - let vgroup_id = offset.vgroup_id(); - println!("* in vgroup id {vgroup_id} of topic {topic}\n"); - - if let Some(data) = message.into_data() { - while let Some(block) = data.fetch_raw_block().await? { - // one block for one table, get table name if needed - let name = block.table_name(); - let records: Vec = block.deserialize().try_collect()?; - println!( - "** table: {}, got {} records: {:#?}\n", - name.unwrap(), - records.len(), - records - ); - } - } - consumer.commit(offset).await?; - } -} -``` - -čŽ·å–ę¶ˆč“¹čæ›åŗ¦ļ¼š - -ē‰ˆęœ¬č¦ę±‚ connector-rust >= v0.8.8, TDengine >= 3.0.5.0 - -```rust -let assignments = consumer.assignments().await.unwrap(); -``` - -ęŒ‰ē…§ęŒ‡å®šēš„čæ›åŗ¦ę¶ˆč“¹ļ¼š - -ē‰ˆęœ¬č¦ę±‚ connector-rust >= v0.8.8, TDengine >= 3.0.5.0 - -```rust -consumer.offset_seek(topic, vgroup_id, offset).await; -``` - -åœę­¢č®¢é˜…ļ¼š - -```rust -consumer.unsubscribe().await; -``` - -åÆ¹äŗŽ TMQ DSN, ęœ‰ä»„äø‹é…ē½®é”¹åÆä»„čæ›č”Œč®¾ē½®ļ¼Œéœ€č¦ę³Øę„ēš„ę˜Æļ¼Œ`group.id` ę˜Æåæ…é”»ēš„ć€‚ - -- `group.id`: åŒäø€äøŖę¶ˆč“¹č€…ē»„ļ¼Œå°†ä»„č‡³å°‘ę¶ˆč“¹äø€ę¬”ēš„ę–¹å¼čæ›č”Œę¶ˆęÆč“Ÿč½½å‡č””ć€‚ -- `client.id`: åÆé€‰ēš„č®¢é˜…å®¢ęˆ·ē«ÆčÆ†åˆ«é”¹ć€‚ -- `auto.offset.reset`: åÆé€‰åˆå§‹åŒ–č®¢é˜…čµ·ē‚¹ļ¼Œ *earliest* äøŗä»Žå¤“å¼€å§‹č®¢é˜…ļ¼Œ *latest* äøŗä»…ä»Žęœ€ę–°ę•°ę®å¼€å§‹č®¢é˜…ļ¼Œé»˜č®¤äøŗä»Žå¤“č®¢é˜…ć€‚ę³Øę„ļ¼Œę­¤é€‰é”¹åœØåŒäø€äøŖ `group.id` äø­ä»…ē”Ÿę•ˆäø€ę¬”ć€‚ -- `enable.auto.commit`: 当设置为 `true` ę—¶ļ¼Œå°†åÆē”Øč‡ŖåŠØę ‡č®°ęØ”å¼ļ¼Œå½“åÆ¹ę•°ę®äø€č‡“ę€§äøę•ę„Ÿę—¶ļ¼ŒåÆä»„åÆē”Øę­¤ę–¹å¼ć€‚ -- `auto.commit.interval.ms`: č‡ŖåŠØę ‡č®°ēš„ę—¶é—“é—“éš”ć€‚ - -å®Œę•“č®¢é˜…ē¤ŗä¾‹å‚č§ [GitHub 示例文件](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs). å…¶ä»–ē›øå…³ē»“ęž„ä½“ API ä½æē”ØčÆ“ę˜ŽčÆ·ē§»ę­„ Rust ę–‡ę”£ę‰˜ē®”ē½‘é”µļ¼šć€‚ diff --git a/docs/zh/08-connector/30-python.mdx b/docs/zh/08-connector/30-python.mdx index 8752dc2145..c3ec224354 100644 --- a/docs/zh/08-connector/30-python.mdx +++ b/docs/zh/08-connector/30-python.mdx @@ -25,6 +25,16 @@ Python čæžęŽ„å™Øēš„ęŗē ę‰˜ē®”åœØ [GitHub](https://github.com/taosdata/taos-con ę— č®ŗä½æē”Øä»€ä¹ˆē‰ˆęœ¬ēš„ TDengine éƒ½å»ŗč®®ä½æē”Øęœ€ę–°ē‰ˆęœ¬ēš„ `taospy`怂 +|Python Connector ē‰ˆęœ¬|äø»č¦å˜åŒ–| +|:-------------------:|:----:| +|2.7.9|ę•°ę®č®¢é˜…ę”ÆęŒčŽ·å–ę¶ˆč“¹čæ›åŗ¦å’Œé‡ē½®ę¶ˆč“¹čæ›åŗ¦| +|2.7.8|ę–°å¢ž `execute_many`| + +|Python Websocket Connector ē‰ˆęœ¬|äø»č¦å˜åŒ–| +|:----------------------------:|:-----:| +|0.2.5|1. ę•°ę®č®¢é˜…ę”ÆęŒčŽ·å–ę¶ˆč“¹čæ›åŗ¦å’Œé‡ē½®ę¶ˆč“¹čæ›åŗ¦
2. ę”ÆęŒ schemaless
3. ę”ÆęŒ STMT| +|0.2.4|ę•°ę®č®¢é˜…ę–°å¢žå–ę¶ˆč®¢é˜…ę–¹ę³•| + ## 处理异常 Python čæžęŽ„å™ØåÆčƒ½ä¼šäŗ§ē”Ÿ 4 ē§å¼‚åøøļ¼š @@ -549,7 +559,7 @@ consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"}) #### č®¢é˜… topics -Comsumer API ēš„ `subscribe` ę–¹ę³•ē”ØäŗŽč®¢é˜… topics,consumer ę”ÆęŒåŒę—¶č®¢é˜…å¤šäøŖ topic怂 +Consumer API ēš„ `subscribe` ę–¹ę³•ē”ØäŗŽč®¢é˜… topics,consumer ę”ÆęŒåŒę—¶č®¢é˜…å¤šäøŖ topic怂 ```python consumer.subscribe(['topic1', 'topic2']) @@ -631,7 +641,7 @@ consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws #### č®¢é˜… topics -Comsumer API ēš„ `subscribe` ę–¹ę³•ē”ØäŗŽč®¢é˜… topics,consumer ę”ÆęŒåŒę—¶č®¢é˜…å¤šäøŖ topic怂 +Consumer API ēš„ `subscribe` ę–¹ę³•ē”ØäŗŽč®¢é˜… topics,consumer ę”ÆęŒåŒę—¶č®¢é˜…å¤šäøŖ topic怂 ```python consumer.subscribe(['topic1', 'topic2']) diff --git a/docs/zh/12-taos-sql/10-function.md b/docs/zh/12-taos-sql/10-function.md index 416d41614d..fc0cfbe330 100644 --- a/docs/zh/12-taos-sql/10-function.md +++ b/docs/zh/12-taos-sql/10-function.md @@ -991,18 +991,14 @@ SAMPLE(expr, k) **åŠŸčƒ½čÆ“ę˜Ž**: čŽ·å–ę•°ę®ēš„ k äøŖé‡‡ę ·å€¼ć€‚å‚ę•° k ēš„åˆę³•č¾“å…„čŒƒå›“ę˜Æ 1≤ k ≤ 1000怂 -**čæ”å›žē»“ęžœē±»åž‹**: åŒåŽŸå§‹ę•°ę®ē±»åž‹ļ¼Œ čæ”å›žē»“ęžœäø­åø¦ęœ‰čÆ„č”Œč®°å½•ēš„ę—¶é—“ęˆ³ć€‚ +**čæ”å›žē»“ęžœē±»åž‹**: åŒåŽŸå§‹ę•°ę®ē±»åž‹ć€‚ -**é€‚ē”Øę•°ę®ē±»åž‹**: åœØč¶…ēŗ§č”ØęŸ„čÆ¢äø­ä½æē”Øę—¶ļ¼Œäøčƒ½åŗ”ē”ØåœØę ‡ē­¾ä¹‹äøŠć€‚ +**é€‚ē”Øę•°ę®ē±»åž‹**: å…ØéƒØē±»åž‹å­—ę®µć€‚ **åµŒå„—å­ęŸ„čÆ¢ę”ÆęŒ**: é€‚ē”ØäŗŽå†…å±‚ęŸ„čÆ¢å’Œå¤–å±‚ęŸ„čÆ¢ć€‚ **é€‚ē”ØäŗŽ**ļ¼šč”Øå’Œč¶…ēŗ§č”Øć€‚ -**ä½æē”ØčÆ“ę˜Ž**: - -- äøčƒ½å‚äøŽč”Øč¾¾å¼č®”ē®—ļ¼›čÆ„å‡½ę•°åÆä»„åŗ”ē”ØåœØę™®é€šč”Øå’Œč¶…ēŗ§č”ØäøŠļ¼› - ### TAIL @@ -1047,11 +1043,11 @@ TOP(expr, k) UNIQUE(expr) ``` -**åŠŸčƒ½čÆ“ę˜Ž**ļ¼ščæ”å›žčÆ„åˆ—ēš„ę•°å€¼é¦–ę¬”å‡ŗēŽ°ēš„å€¼ć€‚čÆ„å‡½ę•°åŠŸčƒ½äøŽ distinct ē›øä¼¼ļ¼Œä½†ę˜ÆåÆä»„åŒ¹é…ę ‡ē­¾å’Œę—¶é—“ęˆ³äæ”ęÆć€‚åÆä»„é’ˆåÆ¹é™¤ę—¶é—“åˆ—ä»„å¤–ēš„å­—ę®µčæ›č”ŒęŸ„čÆ¢ļ¼ŒåÆä»„åŒ¹é…ę ‡ē­¾å’Œę—¶é—“ęˆ³ļ¼Œå…¶äø­ēš„ę ‡ē­¾å’Œę—¶é—“ęˆ³ę˜Æē¬¬äø€ę¬”å‡ŗēŽ°ę—¶åˆ»ēš„ę ‡ē­¾å’Œę—¶é—“ęˆ³ć€‚ +**åŠŸčƒ½čÆ“ę˜Ž**ļ¼ščæ”å›žčÆ„åˆ—ę•°ę®é¦–ę¬”å‡ŗēŽ°ēš„å€¼ć€‚čÆ„å‡½ę•°åŠŸčƒ½äøŽ distinct 相似。 **čæ”å›žę•°ę®ē±»åž‹**ļ¼šåŒåŗ”ē”Øēš„å­—ę®µć€‚ -**é€‚ē”Øę•°ę®ē±»åž‹**ļ¼šé€‚åˆäŗŽé™¤ę—¶é—“ē±»åž‹ä»„å¤–ēš„å­—ę®µć€‚ +**é€‚ē”Øę•°ę®ē±»åž‹**ļ¼šå…ØéƒØē±»åž‹å­—ę®µć€‚ **é€‚ē”ØäŗŽ**: č”Øå’Œč¶…ēŗ§č”Øć€‚ diff --git a/docs/zh/12-taos-sql/24-show.md b/docs/zh/12-taos-sql/24-show.md index 12ad665e42..f3397ae82d 100644 --- a/docs/zh/12-taos-sql/24-show.md +++ b/docs/zh/12-taos-sql/24-show.md @@ -36,7 +36,7 @@ SHOW CONNECTIONS; SHOW CONSUMERS; ``` -ę˜¾ē¤ŗå½“å‰ę•°ę®åŗ“äø‹ę‰€ęœ‰ę“»č·ƒēš„ę¶ˆč“¹č€…ēš„äæ”ęÆć€‚ +ę˜¾ē¤ŗå½“å‰ę•°ę®åŗ“äø‹ę‰€ęœ‰ę¶ˆč“¹č€…ēš„äæ”ęÆć€‚ ## SHOW CREATE DATABASE diff --git a/include/common/tglobal.h b/include/common/tglobal.h index d53f78b41e..bc4037c642 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -164,6 +164,8 @@ extern char tsSmlTagName[]; // extern bool tsSmlDataFormat; // extern int32_t tsSmlBatchSize; +extern int32_t tmqMaxTopicNum; + // wal extern int64_t tsWalFsyncDataSizeLimit; diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index 2cf8eacdac..3fc94f4408 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -145,7 +145,7 @@ enum { TD_DEF_MSG_TYPE(TDMT_MND_TMQ_DROP_TOPIC, "drop-topic", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_TMQ_SUBSCRIBE, "subscribe", SCMSubscribeReq, SCMSubscribeRsp) TD_DEF_MSG_TYPE(TDMT_MND_TMQ_ASK_EP, "ask-ep", SMqAskEpReq, SMqAskEpRsp) - TD_DEF_MSG_TYPE(TDMT_MND_TMQ_CONSUMER_LOST, "consumer-lost", SMqConsumerLostMsg, NULL) +// TD_DEF_MSG_TYPE(TDMT_MND_TMQ_CONSUMER_LOST, "consumer-lost", SMqConsumerLostMsg, NULL) TD_DEF_MSG_TYPE(TDMT_MND_TMQ_CONSUMER_RECOVER, "consumer-recover", SMqConsumerRecoverMsg, NULL) TD_DEF_MSG_TYPE(TDMT_MND_TMQ_HB, "consumer-hb", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_TMQ_DO_REBALANCE, "do-rebalance", SMqDoRebalanceMsg, NULL) diff --git a/include/libs/function/tudf.h b/include/libs/function/tudf.h index b71d50d43c..6b15833917 100644 --- a/include/libs/function/tudf.h +++ b/include/libs/function/tudf.h @@ -111,6 +111,12 @@ int32_t udfStartUdfd(int32_t startDnodeId); */ int32_t udfStopUdfd(); +/** + * get udfd pid + * + */ + int32_t udfGetUdfdPid(int32_t* pUdfdPid); + #ifdef __cplusplus } #endif diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 4eddefa820..772a668f0f 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -769,6 +769,8 @@ int32_t* taosGetErrno(); #define TSDB_CODE_TMQ_CONSUMER_MISMATCH TAOS_DEF_ERROR_CODE(0, 0x4001) #define TSDB_CODE_TMQ_CONSUMER_CLOSED TAOS_DEF_ERROR_CODE(0, 0x4002) #define TSDB_CODE_TMQ_CONSUMER_ERROR TAOS_DEF_ERROR_CODE(0, 0x4003) +#define TSDB_CODE_TMQ_TOPIC_OUT_OF_RANGE TAOS_DEF_ERROR_CODE(0, 0x4004) +#define TSDB_CODE_TMQ_GROUP_OUT_OF_RANGE TAOS_DEF_ERROR_CODE(0, 0x4005) // stream #define TSDB_CODE_STREAM_TASK_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x4100) diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 503120fe85..13dc019feb 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -749,6 +749,9 @@ static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SArray *pColumns, pReq.suid = pTableMeta->uid; pReq.source = TD_REQ_FROM_TAOX; pSql = (action == SCHEMA_ACTION_ADD_COLUMN) ? "sml_add_column" : "sml_modify_column_size"; + } else{ + uError("SML:0x%" PRIx64 " invalid action:%d", info->id, action); + goto end; } code = buildRequest(info->taos->id, pSql, strlen(pSql), NULL, false, &pRequest, 0); diff --git a/source/client/src/clientStmt.c b/source/client/src/clientStmt.c index 975b304bf4..8ac9550aca 100644 --- a/source/client/src/clientStmt.c +++ b/source/client/src/clientStmt.c @@ -939,8 +939,6 @@ int stmtClose(TAOS_STMT* stmt) { stmtCleanSQLInfo(pStmt); taosMemoryFree(stmt); - STMT_DLOG_E("stmt freed"); - return TSDB_CODE_SUCCESS; } diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index 8758cec2ec..83550aa15d 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -652,7 +652,7 @@ static void asyncCommitOffset(tmq_t* tmq, const TAOS_RES* pRes, int32_t type, tm int32_t j = 0; int32_t numOfVgroups = taosArrayGetSize(pTopic->vgs); for (j = 0; j < numOfVgroups; j++) { - SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j); + SMqClientVg* pVg = (SMqClientVg*)taosArrayGet(pTopic->vgs, j); if (pVg->vgId == vgId) { break; } @@ -666,7 +666,7 @@ static void asyncCommitOffset(tmq_t* tmq, const TAOS_RES* pRes, int32_t type, tm return; } - SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j); + SMqClientVg* pVg = (SMqClientVg*)taosArrayGet(pTopic->vgs, j); if (pVg->offsetInfo.currentOffset.type > 0 && !tOffsetEqual(&pVg->offsetInfo.currentOffset, &pVg->offsetInfo.committedOffset)) { code = doSendCommitMsg(tmq, pVg, pTopic->topicName, pParamSet, j, numOfVgroups, type); @@ -742,13 +742,15 @@ static void asyncCommitAllOffsets(tmq_t* tmq, tmq_commit_cb* pCommitFp, void* us static void generateTimedTask(int64_t refId, int32_t type) { tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId); - if (tmq != NULL) { - int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM, 0); - *pTaskType = type; - taosWriteQitem(tmq->delayedTask, pTaskType); - tsem_post(&tmq->rspSem); - taosReleaseRef(tmqMgmt.rsetId, refId); - } + if(tmq == NULL) return; + + int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM, 0); + if(pTaskType == NULL) return; + + *pTaskType = type; + taosWriteQitem(tmq->delayedTask, pTaskType); + tsem_post(&tmq->rspSem); + taosReleaseRef(tmqMgmt.rsetId, refId); } void tmqAssignAskEpTask(void* param, void* tmrId) { @@ -763,19 +765,19 @@ void tmqAssignDelayedCommitTask(void* param, void* tmrId) { taosMemoryFree(param); } -void tmqAssignDelayedReportTask(void* param, void* tmrId) { - int64_t refId = *(int64_t*)param; - tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId); - if (tmq != NULL) { - int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM, 0); - *pTaskType = TMQ_DELAYED_TASK__REPORT; - taosWriteQitem(tmq->delayedTask, pTaskType); - tsem_post(&tmq->rspSem); - } - - taosReleaseRef(tmqMgmt.rsetId, refId); - taosMemoryFree(param); -} +//void tmqAssignDelayedReportTask(void* param, void* tmrId) { +// int64_t refId = *(int64_t*)param; +// tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId); +// if (tmq != NULL) { +// int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM, 0); +// *pTaskType = TMQ_DELAYED_TASK__REPORT; +// taosWriteQitem(tmq->delayedTask, pTaskType); +// tsem_post(&tmq->rspSem); +// } +// +// taosReleaseRef(tmqMgmt.rsetId, refId); +// taosMemoryFree(param); +//} int32_t tmqHbCb(void* param, SDataBuf* pMsg, int32_t code) { if (pMsg) { @@ -813,7 +815,7 @@ void tmqSendHbReq(void* param, void* tmrId) { offRows->offset = pVg->offsetInfo.currentOffset; char buf[TSDB_OFFSET_LEN] = {0}; tFormatOffset(buf, TSDB_OFFSET_LEN, &offRows->offset); - tscInfo("report offset: vgId:%d, offset:%s, rows:%"PRId64, offRows->vgId, buf, offRows->rows); + tscInfo("consumer:0x%" PRIx64 ",report offset: vgId:%d, offset:%s, rows:%"PRId64, tmq->consumerId, offRows->vgId, buf, offRows->rows); } } // tmq->needReportOffsetRows = false; @@ -1489,7 +1491,8 @@ static void initClientTopicFromRsp(SMqClientTopic* pTopic, SMqSubTopicEp* pTopic makeTopicVgroupKey(vgKey, pTopic->topicName, pVgEp->vgId); SVgroupSaveInfo* pInfo = taosHashGet(pVgOffsetHashMap, vgKey, strlen(vgKey)); - STqOffsetVal offsetNew = {.type = tmq->resetOffsetCfg}; + STqOffsetVal offsetNew = {0}; + offsetNew.type = tmq->resetOffsetCfg; SMqClientVg clientVg = { .pollCnt = 0, diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 7cd33955c1..c2024a9a77 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -160,9 +160,9 @@ static const SSysDbTableSchema streamSchema[] = { static const SSysDbTableSchema streamTaskSchema[] = { {.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, - {.name = "task_id", .bytes = 8, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "task_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, {.name = "node_type", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, - {.name = "node_id", .bytes = 8, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "node_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, {.name = "level", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, }; @@ -290,7 +290,7 @@ static const SSysDbTableSchema subscriptionSchema[] = { {.name = "topic_name", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "consumer_group", .bytes = TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, - {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, + {.name = "consumer_id", .bytes = 32, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "offset", .bytes = TSDB_OFFSET_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "rows", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, }; @@ -352,7 +352,7 @@ static const SSysDbTableSchema connectionsSchema[] = { static const SSysDbTableSchema consumerSchema[] = { - {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, + {.name = "consumer_id", .bytes = 32, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "consumer_group", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "client_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 5f6ec92d50..bacafee349 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -105,6 +105,8 @@ char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; // user defined child table // bool tsSmlDataFormat = false; // int32_t tsSmlBatchSize = 10000; +// tmq +int32_t tmqMaxTopicNum = 20; // query int32_t tsQueryPolicy = 1; int32_t tsQueryRspPolicy = 0; @@ -511,6 +513,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddString(pCfg, "telemetryServer", tsTelemServer, 0) != 0) return -1; if (cfgAddInt32(pCfg, "telemetryPort", tsTelemPort, 1, 65056, 0) != 0) return -1; + if (cfgAddInt32(pCfg, "tmqMaxTopicNum", tmqMaxTopicNum, 1, 10000, 1) != 0) return -1; + if (cfgAddInt32(pCfg, "transPullupInterval", tsTransPullupInterval, 1, 10000, 1) != 0) return -1; if (cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, 1) != 0) return -1; if (cfgAddInt32(pCfg, "ttlUnit", tsTtlUnit, 1, 86400 * 365, 1) != 0) return -1; @@ -882,6 +886,8 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tstrncpy(tsTelemServer, cfgGetItem(pCfg, "telemetryServer")->str, TSDB_FQDN_LEN); tsTelemPort = (uint16_t)cfgGetItem(pCfg, "telemetryPort")->i32; + tmqMaxTopicNum= cfgGetItem(pCfg, "tmqMaxTopicNum")->i32; + tsTransPullupInterval = cfgGetItem(pCfg, "transPullupInterval")->i32; tsMqRebalanceInterval = cfgGetItem(pCfg, "mqRebalanceInterval")->i32; tsTtlUnit = cfgGetItem(pCfg, "ttlUnit")->i32; diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 4e8797b1ec..debb93e8ba 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -6982,8 +6982,11 @@ int32_t tDecodeSVAlterTbReqSetCtime(SDecoder* pDecoder, SVAlterTbReq* pReq, int6 if (tStartDecode(pDecoder) < 0) return -1; if (tDecodeSVAlterTbReqCommon(pDecoder, pReq) < 0) return -1; - *(int64_t *)(pDecoder->data + pDecoder->pos) = ctimeMs; - if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1; + pReq->ctimeMs = 0; + if (!tDecodeIsEnd(pDecoder)) { + *(int64_t *)(pDecoder->data + pDecoder->pos) = ctimeMs; + if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1; + } tEndDecode(pDecoder); return 0; @@ -7541,8 +7544,11 @@ int32_t tDecodeSBatchDeleteReq(SDecoder *pDecoder, SBatchDeleteReq *pReq) { int32_t tDecodeSBatchDeleteReqSetCtime(SDecoder *pDecoder, SBatchDeleteReq *pReq, int64_t ctimeMs) { if (tDecodeSBatchDeleteReqCommon(pDecoder, pReq)) return -1; - *(int64_t *)(pDecoder->data + pDecoder->pos) = ctimeMs; - if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1; + pReq->ctimeMs = 0; + if (!tDecodeIsEnd(pDecoder)) { + *(int64_t *)(pDecoder->data + pDecoder->pos) = ctimeMs; + if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1; + } return 0; } diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c index d8c43747f7..7a5581efbe 100644 --- a/source/common/src/ttime.c +++ b/source/common/src/ttime.c @@ -969,7 +969,7 @@ void taosFormatUtcTime(char* buf, int32_t bufLen, int64_t t, int32_t precision) default: fractionLen = 0; - ASSERT(false); + return; } if (taosLocalTime(", &ptm, buf) == NULL) { diff --git a/source/dnode/mnode/impl/inc/mndConsumer.h b/source/dnode/mnode/impl/inc/mndConsumer.h index 96401511d2..a3a31cfc5a 100644 --- a/source/dnode/mnode/impl/inc/mndConsumer.h +++ b/source/dnode/mnode/impl/inc/mndConsumer.h @@ -25,14 +25,15 @@ extern "C" { enum { MQ_CONSUMER_STATUS_REBALANCE = 1, // MQ_CONSUMER_STATUS__MODIFY_IN_REB, // this value is not used anymore - MQ_CONSUMER_STATUS__READY, - MQ_CONSUMER_STATUS__LOST, + MQ_CONSUMER_STATUS_READY, + MQ_CONSUMER_STATUS_LOST, // MQ_CONSUMER_STATUS__LOST_IN_REB, // this value is not used anymore - MQ_CONSUMER_STATUS__LOST_REBD, -}; +// MQ_CONSUMER_STATUS__LOST_REBD, +};\ int32_t mndInitConsumer(SMnode *pMnode); void mndCleanupConsumer(SMnode *pMnode); +void mndDropConsumerFromSdb(SMnode *pMnode, int64_t consumerId); SMqConsumerObj *mndAcquireConsumer(SMnode *pMnode, int64_t consumerId); void mndReleaseConsumer(SMnode *pMnode, SMqConsumerObj *pConsumer); diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 8b75795d41..696549fa05 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -137,12 +137,12 @@ typedef enum { } EDndReason; typedef enum { - CONSUMER_UPDATE__TOUCH = 1, // rebalance req do not need change consume topic - CONSUMER_UPDATE__ADD, - CONSUMER_UPDATE__REMOVE, - CONSUMER_UPDATE__LOST, - CONSUMER_UPDATE__RECOVER, - CONSUMER_UPDATE__REBALANCE, // subscribe req need change consume topic + CONSUMER_UPDATE_REB_MODIFY_NOTOPIC = 1, // topic do not need modified after rebalance + CONSUMER_UPDATE_REB_MODIFY_TOPIC, // topic need modified after rebalance + CONSUMER_UPDATE_REB_MODIFY_REMOVE, // topic need removed after rebalance +// CONSUMER_UPDATE_TIMER_LOST, + CONSUMER_UPDATE_RECOVER, + CONSUMER_UPDATE_SUB_MODIFY, // modify after subscribe req } ECsmUpdateType; typedef struct { @@ -549,7 +549,7 @@ typedef struct { // data for display int32_t pid; SEpSet ep; - int64_t upTime; + int64_t createTime; int64_t subscribeTime; int64_t rebalanceTime; @@ -560,7 +560,7 @@ typedef struct { } SMqConsumerObj; SMqConsumerObj* tNewSMqConsumerObj(int64_t consumerId, char cgroup[TSDB_CGROUP_LEN]); -void tDeleteSMqConsumerObj(SMqConsumerObj* pConsumer); +void tDeleteSMqConsumerObj(SMqConsumerObj* pConsumer, bool delete); int32_t tEncodeSMqConsumerObj(void** buf, const SMqConsumerObj* pConsumer); void* tDecodeSMqConsumerObj(const void* buf, SMqConsumerObj* pConsumer, int8_t sver); diff --git a/source/dnode/mnode/impl/inc/mndSubscribe.h b/source/dnode/mnode/impl/inc/mndSubscribe.h index fad316ea12..ba4328b8fe 100644 --- a/source/dnode/mnode/impl/inc/mndSubscribe.h +++ b/source/dnode/mnode/impl/inc/mndSubscribe.h @@ -25,6 +25,7 @@ extern "C" { int32_t mndInitSubscribe(SMnode *pMnode); void mndCleanupSubscribe(SMnode *pMnode); +int32_t mndGetGroupNumByTopic(SMnode *pMnode, const char *topicName); SMqSubscribeObj *mndAcquireSubscribe(SMnode *pMnode, const char *CGroup, const char *topicName); SMqSubscribeObj *mndAcquireSubscribeByKey(SMnode *pMnode, const char *key); void mndReleaseSubscribe(SMnode *pMnode, SMqSubscribeObj *pSub); diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index 4dded61ce3..47cc4a1ce7 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -26,6 +26,7 @@ #define MND_CONSUMER_VER_NUMBER 2 #define MND_CONSUMER_RESERVE_SIZE 64 +#define MND_MAX_GROUP_PER_TOPIC 100 #define MND_CONSUMER_LOST_HB_CNT 6 #define MND_CONSUMER_LOST_CLEAR_THRESHOLD 43200 @@ -63,7 +64,7 @@ int32_t mndInitConsumer(SMnode *pMnode) { mndSetMsgHandle(pMnode, TDMT_MND_TMQ_HB, mndProcessMqHbReq); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_ASK_EP, mndProcessAskEpReq); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_TIMER, mndProcessMqTimerMsg); - mndSetMsgHandle(pMnode, TDMT_MND_TMQ_CONSUMER_LOST, mndProcessConsumerLostMsg); +// mndSetMsgHandle(pMnode, TDMT_MND_TMQ_CONSUMER_LOST, mndProcessConsumerLostMsg); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_CONSUMER_RECOVER, mndProcessConsumerRecoverMsg); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_LOST_CONSUMER_CLEAR, mndProcessConsumerClearMsg); @@ -75,6 +76,22 @@ int32_t mndInitConsumer(SMnode *pMnode) { void mndCleanupConsumer(SMnode *pMnode) {} +void mndDropConsumerFromSdb(SMnode *pMnode, int64_t consumerId){ + SMqConsumerClearMsg *pClearMsg = rpcMallocCont(sizeof(SMqConsumerClearMsg)); + if (pClearMsg == NULL) { + mError("consumer:0x%"PRIx64" failed to clear consumer due to out of memory. alloc size:%d", consumerId, (int32_t)sizeof(SMqConsumerClearMsg)); + return; + } + + pClearMsg->consumerId = consumerId; + SRpcMsg rpcMsg = { + .msgType = TDMT_MND_TMQ_LOST_CONSUMER_CLEAR, .pCont = pClearMsg, .contLen = sizeof(SMqConsumerClearMsg)}; + + mInfo("consumer:0x%" PRIx64 " drop from sdb", consumerId); + tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); + return; +} + bool mndRebTryStart() { int32_t old = atomic_val_compare_exchange_32(&mqRebInExecCnt, 0, 1); mDebug("tq timer, rebalance counter old val:%d", old); @@ -105,50 +122,48 @@ void mndRebCntDec() { } } -static int32_t mndProcessConsumerLostMsg(SRpcMsg *pMsg) { - SMnode *pMnode = pMsg->info.node; - SMqConsumerLostMsg *pLostMsg = pMsg->pCont; - SMqConsumerObj *pConsumer = mndAcquireConsumer(pMnode, pLostMsg->consumerId); - if (pConsumer == NULL) { - return 0; - } - - mInfo("process consumer lost msg, consumer:0x%" PRIx64 " status:%d(%s)", pLostMsg->consumerId, pConsumer->status, - mndConsumerStatusName(pConsumer->status)); - - if (pConsumer->status != MQ_CONSUMER_STATUS__READY) { - mndReleaseConsumer(pMnode, pConsumer); - return -1; - } - - SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumer->consumerId, pConsumer->cgroup); - pConsumerNew->updateType = CONSUMER_UPDATE__LOST; - - mndReleaseConsumer(pMnode, pConsumer); - - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pMsg, "lost-csm"); - if (pTrans == NULL) { - goto FAIL; - } - - if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) { - goto FAIL; - } - - if (mndTransPrepare(pMnode, pTrans) != 0) { - goto FAIL; - } - - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); - mndTransDrop(pTrans); - return 0; -FAIL: - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); - mndTransDrop(pTrans); - return -1; -} +//static int32_t mndProcessConsumerLostMsg(SRpcMsg *pMsg) { +// SMnode *pMnode = pMsg->info.node; +// SMqConsumerLostMsg *pLostMsg = pMsg->pCont; +// SMqConsumerObj *pConsumer = mndAcquireConsumer(pMnode, pLostMsg->consumerId); +// if (pConsumer == NULL) { +// return 0; +// } +// +// mInfo("process consumer lost msg, consumer:0x%" PRIx64 " status:%d(%s)", pLostMsg->consumerId, pConsumer->status, +// mndConsumerStatusName(pConsumer->status)); +// +// if (pConsumer->status != MQ_CONSUMER_STATUS_READY) { +// mndReleaseConsumer(pMnode, pConsumer); +// return -1; +// } +// +// SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumer->consumerId, pConsumer->cgroup); +// pConsumerNew->updateType = CONSUMER_UPDATE_TIMER_LOST; +// +// mndReleaseConsumer(pMnode, pConsumer); +// +// STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pMsg, "lost-csm"); +// if (pTrans == NULL) { +// goto FAIL; +// } +// +// if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) { +// goto FAIL; +// } +// +// if (mndTransPrepare(pMnode, pTrans) != 0) { +// goto FAIL; +// } +// +// tDeleteSMqConsumerObj(pConsumerNew, true); +// mndTransDrop(pTrans); +// return 0; +//FAIL: +// tDeleteSMqConsumerObj(pConsumerNew, true); +// mndTransDrop(pTrans); +// return -1; +//} static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg) { SMnode *pMnode = pMsg->info.node; @@ -162,14 +177,14 @@ static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg) { mInfo("receive consumer recover msg, consumer:0x%" PRIx64 " status:%d(%s)", pRecoverMsg->consumerId, pConsumer->status, mndConsumerStatusName(pConsumer->status)); - if (pConsumer->status != MQ_CONSUMER_STATUS__LOST_REBD) { + if (pConsumer->status != MQ_CONSUMER_STATUS_LOST) { mndReleaseConsumer(pMnode, pConsumer); terrno = TSDB_CODE_MND_CONSUMER_NOT_READY; return -1; } SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumer->consumerId, pConsumer->cgroup); - pConsumerNew->updateType = CONSUMER_UPDATE__RECOVER; + pConsumerNew->updateType = CONSUMER_UPDATE_RECOVER; mndReleaseConsumer(pMnode, pConsumer); @@ -181,13 +196,13 @@ static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg) { if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) goto FAIL; if (mndTransPrepare(pMnode, pTrans) != 0) goto FAIL; - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); + mndTransDrop(pTrans); return 0; FAIL: - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); + mndTransDrop(pTrans); return -1; } @@ -206,13 +221,13 @@ static int32_t mndProcessConsumerClearMsg(SRpcMsg *pMsg) { mInfo("consumer:0x%" PRIx64 " needs to be cleared, status %s", pClearMsg->consumerId, mndConsumerStatusName(pConsumer->status)); - if (pConsumer->status != MQ_CONSUMER_STATUS__LOST_REBD) { - mndReleaseConsumer(pMnode, pConsumer); - return -1; - } +// if (pConsumer->status != MQ_CONSUMER_STATUS_LOST) { +// mndReleaseConsumer(pMnode, pConsumer); +// return -1; +// } SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumer->consumerId, pConsumer->cgroup); - pConsumerNew->updateType = CONSUMER_UPDATE__LOST; +// pConsumerNew->updateType = CONSUMER_UPDATE_TIMER_LOST; mndReleaseConsumer(pMnode, pConsumer); @@ -223,14 +238,14 @@ static int32_t mndProcessConsumerClearMsg(SRpcMsg *pMsg) { if (mndSetConsumerDropLogs(pMnode, pTrans, pConsumerNew) != 0) goto FAIL; if (mndTransPrepare(pMnode, pTrans) != 0) goto FAIL; - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); + mndTransDrop(pTrans); return 0; FAIL: - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); + mndTransDrop(pTrans); return -1; } @@ -297,56 +312,29 @@ static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg) { int32_t hbStatus = atomic_add_fetch_32(&pConsumer->hbStatus, 1); int32_t status = atomic_load_32(&pConsumer->status); - mDebug("check for consumer:0x%" PRIx64 " status:%d(%s), sub-time:%" PRId64 ", uptime:%" PRId64 ", hbstatus:%d", - pConsumer->consumerId, status, mndConsumerStatusName(status), pConsumer->subscribeTime, pConsumer->upTime, + mDebug("check for consumer:0x%" PRIx64 " status:%d(%s), sub-time:%" PRId64 ", createTime:%" PRId64 ", hbstatus:%d", + pConsumer->consumerId, status, mndConsumerStatusName(status), pConsumer->subscribeTime, pConsumer->createTime, hbStatus); - if (status == MQ_CONSUMER_STATUS__READY) { - if (hbStatus > MND_CONSUMER_LOST_HB_CNT) { - SMqConsumerLostMsg *pLostMsg = rpcMallocCont(sizeof(SMqConsumerLostMsg)); - if (pLostMsg == NULL) { - mError("consumer:0x%"PRIx64" failed to transfer consumer status to lost due to out of memory. alloc size:%d", - pConsumer->consumerId, (int32_t)sizeof(SMqConsumerLostMsg)); - continue; + if (status == MQ_CONSUMER_STATUS_READY) { + if (taosArrayGetSize(pConsumer->assignedTopics) == 0) { // unsubscribe or close + mndDropConsumerFromSdb(pMnode, pConsumer->consumerId); + } else if (hbStatus > MND_CONSUMER_LOST_HB_CNT) { + taosRLockLatch(&pConsumer->lock); + int32_t topicNum = taosArrayGetSize(pConsumer->currentTopics); + for (int32_t i = 0; i < topicNum; i++) { + char key[TSDB_SUBSCRIBE_KEY_LEN]; + char *removedTopic = taosArrayGetP(pConsumer->currentTopics, i); + mndMakeSubscribeKey(key, pConsumer->cgroup, removedTopic); + SMqRebInfo *pRebSub = mndGetOrCreateRebSub(pRebMsg->rebSubHash, key); + taosArrayPush(pRebSub->removedConsumers, &pConsumer->consumerId); } - - pLostMsg->consumerId = pConsumer->consumerId; - SRpcMsg rpcMsg = { - .msgType = TDMT_MND_TMQ_CONSUMER_LOST, .pCont = pLostMsg, .contLen = sizeof(SMqConsumerLostMsg)}; - - mDebug("consumer:0x%"PRIx64" hb not received beyond threshold %d, set to lost", pConsumer->consumerId, - MND_CONSUMER_LOST_HB_CNT); - tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); + taosRUnLockLatch(&pConsumer->lock); } - } else if (status == MQ_CONSUMER_STATUS__LOST_REBD) { - // if the client is lost longer than one day, clear it. Otherwise, do nothing about the lost consumers. - if (hbStatus > MND_CONSUMER_LOST_CLEAR_THRESHOLD) { - SMqConsumerClearMsg *pClearMsg = rpcMallocCont(sizeof(SMqConsumerClearMsg)); - if (pClearMsg == NULL) { - mError("consumer:0x%"PRIx64" failed to clear consumer due to out of memory. alloc size:%d", - pConsumer->consumerId, (int32_t)sizeof(SMqConsumerClearMsg)); - continue; - } - - pClearMsg->consumerId = pConsumer->consumerId; - SRpcMsg rpcMsg = { - .msgType = TDMT_MND_TMQ_LOST_CONSUMER_CLEAR, .pCont = pClearMsg, .contLen = sizeof(SMqConsumerClearMsg)}; - - mDebug("consumer:0x%" PRIx64 " lost beyond threshold %d, clear it", pConsumer->consumerId, - MND_CONSUMER_LOST_CLEAR_THRESHOLD); - tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); + } else if (status == MQ_CONSUMER_STATUS_LOST) { + if (hbStatus > MND_CONSUMER_LOST_CLEAR_THRESHOLD) { // clear consumer if lost a day + mndDropConsumerFromSdb(pMnode, pConsumer->consumerId); } - } else if (status == MQ_CONSUMER_STATUS__LOST) { - taosRLockLatch(&pConsumer->lock); - int32_t topicNum = taosArrayGetSize(pConsumer->currentTopics); - for (int32_t i = 0; i < topicNum; i++) { - char key[TSDB_SUBSCRIBE_KEY_LEN]; - char *removedTopic = taosArrayGetP(pConsumer->currentTopics, i); - mndMakeSubscribeKey(key, pConsumer->cgroup, removedTopic); - SMqRebInfo *pRebSub = mndGetOrCreateRebSub(pRebMsg->rebSubHash, key); - taosArrayPush(pRebSub->removedConsumers, &pConsumer->consumerId); - } - taosRUnLockLatch(&pConsumer->lock); } else { // MQ_CONSUMER_STATUS_REBALANCE taosRLockLatch(&pConsumer->lock); @@ -413,7 +401,7 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) { int32_t status = atomic_load_32(&pConsumer->status); - if (status == MQ_CONSUMER_STATUS__LOST_REBD) { + if (status == MQ_CONSUMER_STATUS_LOST) { mInfo("try to recover consumer:0x%" PRIx64 "", consumerId); SMqConsumerRecoverMsg *pRecoverMsg = rpcMallocCont(sizeof(SMqConsumerRecoverMsg)); @@ -475,7 +463,7 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) { mError("consumer:0x%" PRIx64 " group:%s not consistent with data in sdb, saved cgroup:%s", consumerId, req.cgroup, pConsumer->cgroup); terrno = TSDB_CODE_MND_CONSUMER_NOT_EXIST; - return -1; + goto FAIL; } atomic_store_32(&pConsumer->hbStatus, 0); @@ -483,7 +471,7 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) { // 1. check consumer status int32_t status = atomic_load_32(&pConsumer->status); - if (status == MQ_CONSUMER_STATUS__LOST_REBD) { + if (status == MQ_CONSUMER_STATUS_LOST) { mInfo("try to recover consumer:0x%" PRIx64, consumerId); SMqConsumerRecoverMsg *pRecoverMsg = rpcMallocCont(sizeof(SMqConsumerRecoverMsg)); @@ -497,10 +485,10 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) { tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &pRpcMsg); } - if (status != MQ_CONSUMER_STATUS__READY) { + if (status != MQ_CONSUMER_STATUS_READY) { mInfo("consumer:0x%" PRIx64 " not ready, status: %s", consumerId, mndConsumerStatusName(status)); terrno = TSDB_CODE_MND_CONSUMER_NOT_READY; - return -1; + goto FAIL; } int32_t serverEpoch = atomic_load_32(&pConsumer->epoch); @@ -582,7 +570,7 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) { void *buf = rpcMallocCont(tlen); if (buf == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; + goto FAIL; } SMqRspHead* pHead = buf; @@ -669,6 +657,7 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { char *cgroup = subscribe.cgroup; SMqConsumerObj *pExistedConsumer = NULL; SMqConsumerObj *pConsumerNew = NULL; + STrans *pTrans = NULL; int32_t code = -1; SArray *pTopicList = subscribe.topicNames; @@ -676,9 +665,17 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { taosArrayRemoveDuplicate(pTopicList, taosArrayCompareString, freeItem); int32_t newTopicNum = taosArrayGetSize(pTopicList); + for(int i = 0; i < newTopicNum; i++){ + int32_t gNum = mndGetGroupNumByTopic(pMnode, (const char*)taosArrayGetP(pTopicList, i)); + if(gNum >= MND_MAX_GROUP_PER_TOPIC){ + terrno = TSDB_CODE_TMQ_GROUP_OUT_OF_RANGE; + code = terrno; + goto _over; + } + } // check topic existence - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pMsg, "subscribe"); + pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pMsg, "subscribe"); if (pTrans == NULL) { goto _over; } @@ -701,8 +698,7 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { pConsumerNew->autoCommitInterval = subscribe.autoCommitInterval; pConsumerNew->resetOffsetCfg = subscribe.resetOffsetCfg; - // set the update type - pConsumerNew->updateType = CONSUMER_UPDATE__REBALANCE; +// pConsumerNew->updateType = CONSUMER_UPDATE_SUB_MODIFY; // use insert logic taosArrayDestroy(pConsumerNew->assignedTopics); pConsumerNew->assignedTopics = taosArrayDup(pTopicList, topicNameDup); @@ -721,7 +717,7 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { " cgroup:%s, current status:%d(%s), subscribe topic num: %d", consumerId, subscribe.cgroup, status, mndConsumerStatusName(status), newTopicNum); - if (status != MQ_CONSUMER_STATUS__READY) { + if (status != MQ_CONSUMER_STATUS_READY) { terrno = TSDB_CODE_MND_CONSUMER_NOT_READY; goto _over; } @@ -732,11 +728,11 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { } // set the update type - pConsumerNew->updateType = CONSUMER_UPDATE__REBALANCE; + pConsumerNew->updateType = CONSUMER_UPDATE_SUB_MODIFY; taosArrayDestroy(pConsumerNew->assignedTopics); pConsumerNew->assignedTopics = taosArrayDup(pTopicList, topicNameDup); - int32_t oldTopicNum = (pExistedConsumer->currentTopics) ? taosArrayGetSize(pExistedConsumer->currentTopics) : 0; + int32_t oldTopicNum = taosArrayGetSize(pExistedConsumer->currentTopics); int32_t i = 0, j = 0; while (i < oldTopicNum || j < newTopicNum) { @@ -791,10 +787,7 @@ _over: mndReleaseConsumer(pMnode, pExistedConsumer); } - if (pConsumerNew) { - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); - } + tDeleteSMqConsumerObj(pConsumerNew, true); // TODO: replace with destroy subscribe msg taosArrayDestroyP(subscribe.topicNames, (FDelete)taosMemoryFree); @@ -894,17 +887,17 @@ CM_DECODE_OVER: } static int32_t mndConsumerActionInsert(SSdb *pSdb, SMqConsumerObj *pConsumer) { - mDebug("consumer:0x%" PRIx64 " cgroup:%s status:%d(%s) epoch:%d load from sdb, perform insert action", + mInfo("consumer:0x%" PRIx64 " sub insert, cgroup:%s status:%d(%s) epoch:%d", pConsumer->consumerId, pConsumer->cgroup, pConsumer->status, mndConsumerStatusName(pConsumer->status), pConsumer->epoch); - pConsumer->subscribeTime = pConsumer->upTime; + pConsumer->subscribeTime = taosGetTimestampMs(); return 0; } static int32_t mndConsumerActionDelete(SSdb *pSdb, SMqConsumerObj *pConsumer) { - mDebug("consumer:0x%" PRIx64 " perform delete action, status:(%d)%s", pConsumer->consumerId, pConsumer->status, + mInfo("consumer:0x%" PRIx64 " perform delete action, status:(%d)%s", pConsumer->consumerId, pConsumer->status, mndConsumerStatusName(pConsumer->status)); - tDeleteSMqConsumerObj(pConsumer); + tDeleteSMqConsumerObj(pConsumer, false); return 0; } @@ -913,10 +906,9 @@ static void updateConsumerStatus(SMqConsumerObj *pConsumer) { if (taosArrayGetSize(pConsumer->rebNewTopics) == 0 && taosArrayGetSize(pConsumer->rebRemovedTopics) == 0) { if (status == MQ_CONSUMER_STATUS_REBALANCE) { - pConsumer->status = MQ_CONSUMER_STATUS__READY; - } else if (status == MQ_CONSUMER_STATUS__LOST) { - ASSERT(taosArrayGetSize(pConsumer->currentTopics) == 0); - pConsumer->status = MQ_CONSUMER_STATUS__LOST_REBD; + pConsumer->status = MQ_CONSUMER_STATUS_READY; + } else if (status == MQ_CONSUMER_STATUS_READY) { + pConsumer->status = MQ_CONSUMER_STATUS_LOST; } } } @@ -930,7 +922,7 @@ static void removeFromNewTopicList(SMqConsumerObj *pConsumer, const char *pTopic taosArrayRemove(pConsumer->rebNewTopics, i); taosMemoryFree(p); - mDebug("consumer:0x%" PRIx64 " remove new topic:%s in the topic list, remain newTopics:%d", pConsumer->consumerId, + mInfo("consumer:0x%" PRIx64 " remove new topic:%s in the topic list, remain newTopics:%d", pConsumer->consumerId, pTopic, (int)taosArrayGetSize(pConsumer->rebNewTopics)); break; } @@ -946,7 +938,7 @@ static void removeFromRemoveTopicList(SMqConsumerObj *pConsumer, const char *pTo taosArrayRemove(pConsumer->rebRemovedTopics, i); taosMemoryFree(p); - mDebug("consumer:0x%" PRIx64 " remove topic:%s in the removed topic list, remain removedTopics:%d", + mInfo("consumer:0x%" PRIx64 " remove topic:%s in the removed topic list, remain removedTopics:%d", pConsumer->consumerId, pTopic, (int)taosArrayGetSize(pConsumer->rebRemovedTopics)); break; } @@ -961,7 +953,7 @@ static void removeFromCurrentTopicList(SMqConsumerObj *pConsumer, const char *pT taosArrayRemove(pConsumer->currentTopics, i); taosMemoryFree(topic); - mDebug("consumer:0x%" PRIx64 " remove topic:%s in the current topic list, remain currentTopics:%d", + mInfo("consumer:0x%" PRIx64 " remove topic:%s in the current topic list, remain currentTopics:%d", pConsumer->consumerId, pTopic, (int)taosArrayGetSize(pConsumer->currentTopics)); break; } @@ -984,47 +976,46 @@ static bool existInCurrentTopicList(const SMqConsumerObj* pConsumer, const char* } static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer, SMqConsumerObj *pNewConsumer) { - mDebug("consumer:0x%" PRIx64 " perform update action, update type:%d, subscribe-time:%" PRId64 ", uptime:%" PRId64, - pOldConsumer->consumerId, pNewConsumer->updateType, pOldConsumer->subscribeTime, pOldConsumer->upTime); + mInfo("consumer:0x%" PRIx64 " perform update action, update type:%d, subscribe-time:%" PRId64 ", createTime:%" PRId64, + pOldConsumer->consumerId, pNewConsumer->updateType, pOldConsumer->subscribeTime, pOldConsumer->createTime); taosWLockLatch(&pOldConsumer->lock); - if (pNewConsumer->updateType == CONSUMER_UPDATE__REBALANCE) { + if (pNewConsumer->updateType == CONSUMER_UPDATE_SUB_MODIFY) { TSWAP(pOldConsumer->rebNewTopics, pNewConsumer->rebNewTopics); TSWAP(pOldConsumer->rebRemovedTopics, pNewConsumer->rebRemovedTopics); TSWAP(pOldConsumer->assignedTopics, pNewConsumer->assignedTopics); - pOldConsumer->subscribeTime = pNewConsumer->upTime; + pOldConsumer->subscribeTime = taosGetTimestampMs(); pOldConsumer->status = MQ_CONSUMER_STATUS_REBALANCE; - } else if (pNewConsumer->updateType == CONSUMER_UPDATE__LOST) { - int32_t sz = taosArrayGetSize(pOldConsumer->currentTopics); - for (int32_t i = 0; i < sz; i++) { - char *topic = taosStrdup(taosArrayGetP(pOldConsumer->currentTopics, i)); - taosArrayPush(pOldConsumer->rebRemovedTopics, &topic); - } - - pOldConsumer->rebalanceTime = pNewConsumer->upTime; - - int32_t prevStatus = pOldConsumer->status; - pOldConsumer->status = MQ_CONSUMER_STATUS__LOST; - mDebug("consumer:0x%" PRIx64 " state %s -> %s, reb-time:%" PRId64 ", reb-removed-topics:%d", - pOldConsumer->consumerId, mndConsumerStatusName(prevStatus), mndConsumerStatusName(pOldConsumer->status), - pOldConsumer->rebalanceTime, (int)taosArrayGetSize(pOldConsumer->rebRemovedTopics)); - } else if (pNewConsumer->updateType == CONSUMER_UPDATE__RECOVER) { + mInfo("consumer:0x%" PRIx64 " sub update, modify existed consumer",pOldConsumer->consumerId); +// } else if (pNewConsumer->updateType == CONSUMER_UPDATE_TIMER_LOST) { +// int32_t sz = taosArrayGetSize(pOldConsumer->currentTopics); +// for (int32_t i = 0; i < sz; i++) { +// char *topic = taosStrdup(taosArrayGetP(pOldConsumer->currentTopics, i)); +// taosArrayPush(pOldConsumer->rebRemovedTopics, &topic); +// } +// +// int32_t prevStatus = pOldConsumer->status; +// pOldConsumer->status = MQ_CONSUMER_STATUS_LOST; +// mInfo("consumer:0x%" PRIx64 " timer update, timer lost. state %s -> %s, reb-time:%" PRId64 ", reb-removed-topics:%d", +// pOldConsumer->consumerId, mndConsumerStatusName(prevStatus), mndConsumerStatusName(pOldConsumer->status), +// pOldConsumer->rebalanceTime, (int)taosArrayGetSize(pOldConsumer->rebRemovedTopics)); + } else if (pNewConsumer->updateType == CONSUMER_UPDATE_RECOVER) { int32_t sz = taosArrayGetSize(pOldConsumer->assignedTopics); for (int32_t i = 0; i < sz; i++) { char *topic = taosStrdup(taosArrayGetP(pOldConsumer->assignedTopics, i)); taosArrayPush(pOldConsumer->rebNewTopics, &topic); } - pOldConsumer->rebalanceTime = pNewConsumer->upTime; pOldConsumer->status = MQ_CONSUMER_STATUS_REBALANCE; - } else if (pNewConsumer->updateType == CONSUMER_UPDATE__TOUCH) { + mInfo("consumer:0x%" PRIx64 " timer update, timer recover",pOldConsumer->consumerId); + } else if (pNewConsumer->updateType == CONSUMER_UPDATE_REB_MODIFY_NOTOPIC) { atomic_add_fetch_32(&pOldConsumer->epoch, 1); - pOldConsumer->rebalanceTime = pNewConsumer->upTime; - - } else if (pNewConsumer->updateType == CONSUMER_UPDATE__ADD) { + pOldConsumer->rebalanceTime = taosGetTimestampMs(); + mInfo("consumer:0x%" PRIx64 " reb update, only rebalance time", pOldConsumer->consumerId); + } else if (pNewConsumer->updateType == CONSUMER_UPDATE_REB_MODIFY_TOPIC) { char *pNewTopic = taosStrdup(taosArrayGetP(pNewConsumer->rebNewTopics, 0)); // check if exist in current topic @@ -1033,6 +1024,7 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer, // add to current topic bool existing = existInCurrentTopicList(pOldConsumer, pNewTopic); if (existing) { + mError("consumer:0x%" PRIx64 "new topic:%s should not in currentTopics", pOldConsumer->consumerId, pNewTopic); taosMemoryFree(pNewTopic); } else { // added into current topic list taosArrayPush(pOldConsumer->currentTopics, &pNewTopic); @@ -1044,17 +1036,17 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer, updateConsumerStatus(pOldConsumer); // the re-balance is triggered when the new consumer is launched. - pOldConsumer->rebalanceTime = pNewConsumer->upTime; + pOldConsumer->rebalanceTime = taosGetTimestampMs(); atomic_add_fetch_32(&pOldConsumer->epoch, 1); - mDebug("consumer:0x%" PRIx64 " state (%d)%s -> (%d)%s, new epoch:%d, reb-time:%" PRId64 + mInfo("consumer:0x%" PRIx64 " reb update add, state (%d)%s -> (%d)%s, new epoch:%d, reb-time:%" PRId64 ", current topics:%d, newTopics:%d, removeTopics:%d", pOldConsumer->consumerId, status, mndConsumerStatusName(status), pOldConsumer->status, mndConsumerStatusName(pOldConsumer->status), pOldConsumer->epoch, pOldConsumer->rebalanceTime, (int)taosArrayGetSize(pOldConsumer->currentTopics), (int)taosArrayGetSize(pOldConsumer->rebNewTopics), (int)taosArrayGetSize(pOldConsumer->rebRemovedTopics)); - } else if (pNewConsumer->updateType == CONSUMER_UPDATE__REMOVE) { + } else if (pNewConsumer->updateType == CONSUMER_UPDATE_REB_MODIFY_REMOVE) { char *removedTopic = taosArrayGetP(pNewConsumer->rebRemovedTopics, 0); // remove from removed topic @@ -1067,10 +1059,10 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer, int32_t status = pOldConsumer->status; updateConsumerStatus(pOldConsumer); - pOldConsumer->rebalanceTime = pNewConsumer->upTime; + pOldConsumer->rebalanceTime = taosGetTimestampMs(); atomic_add_fetch_32(&pOldConsumer->epoch, 1); - mDebug("consumer:0x%" PRIx64 " state (%d)%s -> (%d)%s, new epoch:%d, reb-time:%" PRId64 + mInfo("consumer:0x%" PRIx64 " reb update remove, state (%d)%s -> (%d)%s, new epoch:%d, reb-time:%" PRId64 ", current topics:%d, newTopics:%d, removeTopics:%d", pOldConsumer->consumerId, status, mndConsumerStatusName(status), pOldConsumer->status, mndConsumerStatusName(pOldConsumer->status), pOldConsumer->epoch, pOldConsumer->rebalanceTime, @@ -1133,8 +1125,12 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock * int32_t cols = 0; // consumer id + char consumerIdHex[32] = {0}; + sprintf(varDataVal(consumerIdHex), "0x%"PRIx64, pConsumer->consumerId); + varDataSetLen(consumerIdHex, strlen(varDataVal(consumerIdHex))); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, numOfRows, (const char *)&pConsumer->consumerId, false); + colDataSetVal(pColInfo, numOfRows, (const char *)consumerIdHex, false); // consumer group char cgroup[TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE] = {0}; @@ -1175,7 +1171,7 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock * // up time pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, numOfRows, (const char *)&pConsumer->upTime, false); + colDataSetVal(pColInfo, numOfRows, (const char *)&pConsumer->createTime, false); // subscribe time pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); @@ -1190,7 +1186,7 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock * tFormatOffset(buf, TSDB_OFFSET_LEN, &pVal); char parasStr[64 + TSDB_OFFSET_LEN + VARSTR_HEADER_SIZE] = {0}; - sprintf(varDataVal(parasStr), "tbname:%d,commit:%d,interval:%d,reset:%s", pConsumer->withTbName, pConsumer->autoCommit, pConsumer->autoCommitInterval, buf); + sprintf(varDataVal(parasStr), "tbname:%d,commit:%d,interval:%dms,reset:%s", pConsumer->withTbName, pConsumer->autoCommit, pConsumer->autoCommitInterval, buf); varDataSetLen(parasStr, strlen(varDataVal(parasStr))); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); @@ -1216,10 +1212,9 @@ static void mndCancelGetNextConsumer(SMnode *pMnode, void *pIter) { static const char *mndConsumerStatusName(int status) { switch (status) { - case MQ_CONSUMER_STATUS__READY: + case MQ_CONSUMER_STATUS_READY: return "ready"; - case MQ_CONSUMER_STATUS__LOST: - case MQ_CONSUMER_STATUS__LOST_REBD: + case MQ_CONSUMER_STATUS_LOST: return "lost"; case MQ_CONSUMER_STATUS_REBALANCE: return "rebalancing"; diff --git a/source/dnode/mnode/impl/src/mndDef.c b/source/dnode/mnode/impl/src/mndDef.c index 6b281fca6b..287b39d8c7 100644 --- a/source/dnode/mnode/impl/src/mndDef.c +++ b/source/dnode/mnode/impl/src/mndDef.c @@ -218,7 +218,7 @@ void *tDecodeSMqVgEp(const void *buf, SMqVgEp *pVgEp, int8_t sver) { return (void *)buf; } -SMqConsumerObj *tNewSMqConsumerObj(int64_t consumerId, char cgroup[TSDB_CGROUP_LEN]) { +SMqConsumerObj *tNewSMqConsumerObj(int64_t consumerId, char* cgroup) { SMqConsumerObj *pConsumer = taosMemoryCalloc(1, sizeof(SMqConsumerObj)); if (pConsumer == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -249,16 +249,20 @@ SMqConsumerObj *tNewSMqConsumerObj(int64_t consumerId, char cgroup[TSDB_CGROUP_L return NULL; } - pConsumer->upTime = taosGetTimestampMs(); + pConsumer->createTime = taosGetTimestampMs(); return pConsumer; } -void tDeleteSMqConsumerObj(SMqConsumerObj *pConsumer) { +void tDeleteSMqConsumerObj(SMqConsumerObj *pConsumer, bool delete) { + if(pConsumer == NULL) return; taosArrayDestroyP(pConsumer->currentTopics, (FDelete)taosMemoryFree); taosArrayDestroyP(pConsumer->rebNewTopics, (FDelete)taosMemoryFree); taosArrayDestroyP(pConsumer->rebRemovedTopics, (FDelete)taosMemoryFree); taosArrayDestroyP(pConsumer->assignedTopics, (FDelete)taosMemoryFree); + if(delete){ + taosMemoryFree(pConsumer); + } } int32_t tEncodeSMqConsumerObj(void **buf, const SMqConsumerObj *pConsumer) { @@ -273,7 +277,7 @@ int32_t tEncodeSMqConsumerObj(void **buf, const SMqConsumerObj *pConsumer) { tlen += taosEncodeFixedI32(buf, pConsumer->pid); tlen += taosEncodeSEpSet(buf, &pConsumer->ep); - tlen += taosEncodeFixedI64(buf, pConsumer->upTime); + tlen += taosEncodeFixedI64(buf, pConsumer->createTime); tlen += taosEncodeFixedI64(buf, pConsumer->subscribeTime); tlen += taosEncodeFixedI64(buf, pConsumer->rebalanceTime); @@ -343,7 +347,7 @@ void *tDecodeSMqConsumerObj(const void *buf, SMqConsumerObj *pConsumer, int8_t s buf = taosDecodeFixedI32(buf, &pConsumer->pid); buf = taosDecodeSEpSet(buf, &pConsumer->ep); - buf = taosDecodeFixedI64(buf, &pConsumer->upTime); + buf = taosDecodeFixedI64(buf, &pConsumer->createTime); buf = taosDecodeFixedI64(buf, &pConsumer->subscribeTime); buf = taosDecodeFixedI64(buf, &pConsumer->rebalanceTime); diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c index 42bd47ea13..3c2335a6ee 100644 --- a/source/dnode/mnode/impl/src/mndProfile.c +++ b/source/dnode/mnode/impl/src/mndProfile.c @@ -233,7 +233,6 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) { } code = -1; - taosIp2String(pReq->info.conn.clientIp, ip); if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CONNECT) != 0) { mGError("user:%s, failed to login from %s since %s", pReq->info.conn.user, ip, terrstr()); @@ -271,6 +270,7 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) { } } +_CONNECT: pConn = mndCreateConn(pMnode, pReq->info.conn.user, connReq.connType, pReq->info.conn.clientIp, pReq->info.conn.clientPort, connReq.pid, connReq.app, connReq.startTime); if (pConn == NULL) { diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index 61691a30d5..7ecd994b5a 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -160,10 +160,10 @@ static int32_t mndBuildSubChangeReq(void **pBuf, int32_t *pLen, SMqSubscribeObj static int32_t mndPersistSubChangeVgReq(SMnode *pMnode, STrans *pTrans, SMqSubscribeObj *pSub, const SMqRebOutputVg *pRebVg, SSubplan* pPlan) { -// if (pRebVg->oldConsumerId == pRebVg->newConsumerId) { -// terrno = TSDB_CODE_MND_INVALID_SUB_OPTION; -// return -1; -// } + if (pRebVg->oldConsumerId == pRebVg->newConsumerId) { + terrno = TSDB_CODE_MND_INVALID_SUB_OPTION; + return -1; + } void *buf; int32_t tlen; @@ -175,7 +175,7 @@ static int32_t mndPersistSubChangeVgReq(SMnode *pMnode, STrans *pTrans, SMqSubsc SVgObj *pVgObj = mndAcquireVgroup(pMnode, vgId); if (pVgObj == NULL) { taosMemoryFree(buf); - terrno = TSDB_CODE_OUT_OF_MEMORY; + terrno = TSDB_CODE_MND_VGROUP_NOT_EXIST; return -1; } @@ -296,17 +296,17 @@ static void addUnassignedVgroups(SMqRebOutputObj *pOutput, SHashObj *pHash) { } } -static void putNoTransferToOutput(SMqRebOutputObj *pOutput, SMqConsumerEp *pConsumerEp){ - for(int i = 0; i < taosArrayGetSize(pConsumerEp->vgs); i++){ - SMqVgEp *pVgEp = (SMqVgEp *)taosArrayGetP(pConsumerEp->vgs, i); - SMqRebOutputVg outputVg = { - .oldConsumerId = pConsumerEp->consumerId, - .newConsumerId = pConsumerEp->consumerId, - .pVgEp = pVgEp, - }; - taosArrayPush(pOutput->rebVgs, &outputVg); - } -} +//static void putNoTransferToOutput(SMqRebOutputObj *pOutput, SMqConsumerEp *pConsumerEp){ +// for(int i = 0; i < taosArrayGetSize(pConsumerEp->vgs); i++){ +// SMqVgEp *pVgEp = (SMqVgEp *)taosArrayGetP(pConsumerEp->vgs, i); +// SMqRebOutputVg outputVg = { +// .oldConsumerId = pConsumerEp->consumerId, +// .newConsumerId = pConsumerEp->consumerId, +// .pVgEp = pVgEp, +// }; +// taosArrayPush(pOutput->rebVgs, &outputVg); +// } +//} static void transferVgroupsForConsumers(SMqRebOutputObj *pOutput, SHashObj *pHash, int32_t minVgCnt, int32_t imbConsumerNum) { @@ -357,7 +357,7 @@ static void transferVgroupsForConsumers(SMqRebOutputObj *pOutput, SHashObj *pHas } } } - putNoTransferToOutput(pOutput, pConsumerEp); +// putNoTransferToOutput(pOutput, pConsumerEp); } } @@ -468,40 +468,51 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR } } - if(taosHashGetSize(pOutput->pSub->consumerHash) == 0) { // if all consumer is removed +// if(taosHashGetSize(pOutput->pSub->consumerHash) == 0) { // if all consumer is removed SMqSubscribeObj *pSub = mndAcquireSubscribeByKey(pMnode, pInput->pRebInfo->key); // put all offset rows if (pSub) { taosRLockLatch(&pSub->lock); - bool init = false; if (pOutput->pSub->offsetRows == NULL) { pOutput->pSub->offsetRows = taosArrayInit(4, sizeof(OffsetRows)); - init = true; } pIter = NULL; while (1) { pIter = taosHashIterate(pSub->consumerHash, pIter); if (pIter == NULL) break; SMqConsumerEp *pConsumerEp = (SMqConsumerEp *)pIter; - if (init) { - taosArrayAddAll(pOutput->pSub->offsetRows, pConsumerEp->offsetRows); -// mDebug("pSub->offsetRows is init"); - } else { - for (int j = 0; j < taosArrayGetSize(pConsumerEp->offsetRows); j++) { - OffsetRows *d1 = taosArrayGet(pConsumerEp->offsetRows, j); - for (int i = 0; i < taosArrayGetSize(pOutput->pSub->offsetRows); i++) { - OffsetRows *d2 = taosArrayGet(pOutput->pSub->offsetRows, i); - if (d1->vgId == d2->vgId) { - d2->rows += d1->rows; - d2->offset = d1->offset; -// mDebug("pSub->offsetRows add vgId:%d, after:%"PRId64", before:%"PRId64, d2->vgId, d2->rows, d1->rows); - } + SMqConsumerEp *pConsumerEpNew = taosHashGet(pOutput->pSub->consumerHash, &pConsumerEp->consumerId, sizeof(int64_t)); + + for (int j = 0; j < taosArrayGetSize(pConsumerEp->offsetRows); j++) { + OffsetRows *d1 = taosArrayGet(pConsumerEp->offsetRows, j); + bool jump = false; + for (int i = 0; pConsumerEpNew && i < taosArrayGetSize(pConsumerEpNew->vgs); i++){ + SMqVgEp *pVgEp = taosArrayGetP(pConsumerEpNew->vgs, i); + if(pVgEp->vgId == d1->vgId){ + jump = true; + mInfo("pSub->offsetRows jump, because consumer id:%"PRIx64 " and vgId:%d not change", pConsumerEp->consumerId, pVgEp->vgId); + break; } } + if(jump) continue; + bool find = false; + for (int i = 0; i < taosArrayGetSize(pOutput->pSub->offsetRows); i++) { + OffsetRows *d2 = taosArrayGet(pOutput->pSub->offsetRows, i); + if (d1->vgId == d2->vgId) { + d2->rows += d1->rows; + d2->offset = d1->offset; + find = true; + mInfo("pSub->offsetRows add vgId:%d, after:%"PRId64", before:%"PRId64, d2->vgId, d2->rows, d1->rows); + break; + } + } + if(!find){ + taosArrayPush(pOutput->pSub->offsetRows, d1); + } } } taosRUnLockLatch(&pSub->lock); mndReleaseSubscribe(pMnode, pSub); - } +// } } // 8. generate logs @@ -576,50 +587,44 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu return -1; } + char topic[TSDB_TOPIC_FNAME_LEN] = {0}; + char cgroup[TSDB_CGROUP_LEN] = {0}; + mndSplitSubscribeKey(pOutput->pSub->key, topic, cgroup, true); + // 3. commit log: consumer to update status and epoch // 3.1 set touched consumer int32_t consumerNum = taosArrayGetSize(pOutput->modifyConsumers); for (int32_t i = 0; i < consumerNum; i++) { int64_t consumerId = *(int64_t *)taosArrayGet(pOutput->modifyConsumers, i); - SMqConsumerObj *pConsumerOld = mndAcquireConsumer(pMnode, consumerId); - SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumerOld->consumerId, pConsumerOld->cgroup); - pConsumerNew->updateType = CONSUMER_UPDATE__TOUCH; - mndReleaseConsumer(pMnode, pConsumerOld); + SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(consumerId, cgroup); + pConsumerNew->updateType = CONSUMER_UPDATE_REB_MODIFY_NOTOPIC; if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) { - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); mndTransDrop(pTrans); return -1; } - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); } // 3.2 set new consumer consumerNum = taosArrayGetSize(pOutput->newConsumers); for (int32_t i = 0; i < consumerNum; i++) { int64_t consumerId = *(int64_t *)taosArrayGet(pOutput->newConsumers, i); + SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(consumerId, cgroup); + pConsumerNew->updateType = CONSUMER_UPDATE_REB_MODIFY_TOPIC; - SMqConsumerObj *pConsumerOld = mndAcquireConsumer(pMnode, consumerId); - SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumerOld->consumerId, pConsumerOld->cgroup); - pConsumerNew->updateType = CONSUMER_UPDATE__ADD; - char *topic = taosMemoryCalloc(1, TSDB_TOPIC_FNAME_LEN); - char cgroup[TSDB_CGROUP_LEN]; - mndSplitSubscribeKey(pOutput->pSub->key, topic, cgroup, true); - taosArrayPush(pConsumerNew->rebNewTopics, &topic); - mndReleaseConsumer(pMnode, pConsumerOld); + char* topicTmp = taosStrdup(topic); + taosArrayPush(pConsumerNew->rebNewTopics, &topicTmp); if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) { - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); mndTransDrop(pTrans); return -1; } - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); } // 3.3 set removed consumer @@ -627,24 +632,19 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu for (int32_t i = 0; i < consumerNum; i++) { int64_t consumerId = *(int64_t *)taosArrayGet(pOutput->removedConsumers, i); - SMqConsumerObj *pConsumerOld = mndAcquireConsumer(pMnode, consumerId); - SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumerOld->consumerId, pConsumerOld->cgroup); - pConsumerNew->updateType = CONSUMER_UPDATE__REMOVE; - char *topic = taosMemoryCalloc(1, TSDB_TOPIC_FNAME_LEN); - char cgroup[TSDB_CGROUP_LEN]; - mndSplitSubscribeKey(pOutput->pSub->key, topic, cgroup, true); - taosArrayPush(pConsumerNew->rebRemovedTopics, &topic); - mndReleaseConsumer(pMnode, pConsumerOld); + SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(consumerId, cgroup); + pConsumerNew->updateType = CONSUMER_UPDATE_REB_MODIFY_REMOVE; + + char* topicTmp = taosStrdup(topic); + taosArrayPush(pConsumerNew->rebRemovedTopics, &topicTmp); if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) { - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); mndTransDrop(pTrans); return -1; } - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); } // 4. TODO commit log: modification log @@ -771,8 +771,10 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) { } static int32_t mndProcessDropCgroupReq(SRpcMsg *pMsg) { - SMnode *pMnode = pMsg->info.node; - SMDropCgroupReq dropReq = {0}; + SMnode *pMnode = pMsg->info.node; + SMDropCgroupReq dropReq = {0}; + STrans *pTrans = NULL; + int32_t code = TSDB_CODE_ACTION_IN_PROGRESS; if (tDeserializeSMDropCgroupReq(pMsg->pCont, pMsg->contLen, &dropReq) != 0) { terrno = TSDB_CODE_INVALID_MSG; @@ -791,38 +793,54 @@ static int32_t mndProcessDropCgroupReq(SRpcMsg *pMsg) { } } + taosWLockLatch(&pSub->lock); if (taosHashGetSize(pSub->consumerHash) != 0) { terrno = TSDB_CODE_MND_CGROUP_USED; mError("cgroup:%s on topic:%s, failed to drop since %s", dropReq.cgroup, dropReq.topic, terrstr()); - mndReleaseSubscribe(pMnode, pSub); - return -1; + code = -1; + goto end; } - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pMsg, "drop-cgroup"); + void *pIter = NULL; + SMqConsumerObj *pConsumer; + while (1) { + pIter = sdbFetch(pMnode->pSdb, SDB_CONSUMER, pIter, (void **)&pConsumer); + if (pIter == NULL) { + break; + } + + if (strcmp(dropReq.cgroup, pConsumer->cgroup) == 0) { + mndDropConsumerFromSdb(pMnode, pConsumer->consumerId); + } + sdbRelease(pMnode->pSdb, pConsumer); + } + + pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pMsg, "drop-cgroup"); if (pTrans == NULL) { mError("cgroup: %s on topic:%s, failed to drop since %s", dropReq.cgroup, dropReq.topic, terrstr()); - mndReleaseSubscribe(pMnode, pSub); - mndTransDrop(pTrans); - return -1; + code = -1; + goto end; } mInfo("trans:%d, used to drop cgroup:%s on topic %s", pTrans->id, dropReq.cgroup, dropReq.topic); if (mndSetDropSubCommitLogs(pMnode, pTrans, pSub) < 0) { mError("cgroup %s on topic:%s, failed to drop since %s", dropReq.cgroup, dropReq.topic, terrstr()); - mndReleaseSubscribe(pMnode, pSub); - mndTransDrop(pTrans); - return -1; + code = -1; + goto end; } if (mndTransPrepare(pMnode, pTrans) < 0) { - mndReleaseSubscribe(pMnode, pSub); - mndTransDrop(pTrans); - return -1; + code = -1; + goto end; } - mndReleaseSubscribe(pMnode, pSub); - return TSDB_CODE_ACTION_IN_PROGRESS; +end: + taosWUnLockLatch(&pSub->lock); + mndReleaseSubscribe(pMnode, pSub); + mndTransDrop(pTrans); + + return code; } void mndCleanupSubscribe(SMnode *pMnode) {} @@ -989,6 +1007,32 @@ SMqSubscribeObj *mndAcquireSubscribeByKey(SMnode *pMnode, const char *key) { return pSub; } +int32_t mndGetGroupNumByTopic(SMnode *pMnode, const char *topicName) { + int32_t num = 0; + SSdb *pSdb = pMnode->pSdb; + + void *pIter = NULL; + SMqSubscribeObj *pSub = NULL; + while (1) { + pIter = sdbFetch(pSdb, SDB_SUBSCRIBE, pIter, (void **)&pSub); + if (pIter == NULL) break; + + + char topic[TSDB_TOPIC_FNAME_LEN]; + char cgroup[TSDB_CGROUP_LEN]; + mndSplitSubscribeKey(pSub->key, topic, cgroup, true); + if (strcmp(topic, topicName) != 0) { + sdbRelease(pSdb, pSub); + continue; + } + + num++; + sdbRelease(pSdb, pSub); + } + + return num; +} + void mndReleaseSubscribe(SMnode *pMnode, SMqSubscribeObj *pSub) { SSdb *pSdb = pMnode->pSdb; sdbRelease(pSdb, pSub); @@ -1114,9 +1158,13 @@ static int32_t buildResult(SSDataBlock *pBlock, int32_t* numOfRows, int64_t cons colDataSetVal(pColInfo, *numOfRows, (const char *)&pVgEp->vgId, false); // consumer id + char consumerIdHex[32] = {0}; + sprintf(varDataVal(consumerIdHex), "0x%"PRIx64, consumerId); + varDataSetLen(consumerIdHex, strlen(varDataVal(consumerIdHex))); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, *numOfRows, (const char *)&consumerId, consumerId == -1); - + colDataSetVal(pColInfo, *numOfRows, (const char *)consumerIdHex, consumerId == -1); + mDebug("mnd show subscriptions: topic %s, consumer:0x%" PRIx64 " cgroup %s vgid %d", varDataVal(topic), consumerId, varDataVal(cgroup), pVgEp->vgId); diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index 4bbe531bf8..485823edf3 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -569,6 +569,11 @@ static int32_t mndProcessCreateTopicReq(SRpcMsg *pReq) { SMqTopicObj *pTopic = NULL; SDbObj *pDb = NULL; SCMCreateTopicReq createTopicReq = {0}; + if (sdbGetSize(pMnode->pSdb, SDB_TOPIC) >= tmqMaxTopicNum){ + terrno = TSDB_CODE_TMQ_TOPIC_OUT_OF_RANGE; + mError("topic num out of range"); + return code; + } if (tDeserializeSCMCreateTopicReq(pReq->pCont, pReq->contLen, &createTopicReq) != 0) { terrno = TSDB_CODE_INVALID_MSG; @@ -681,7 +686,11 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) { break; } - if (pConsumer->status == MQ_CONSUMER_STATUS__LOST_REBD) continue; + if (pConsumer->status == MQ_CONSUMER_STATUS_LOST){ + mndDropConsumerFromSdb(pMnode, pConsumer->consumerId); + mndReleaseConsumer(pMnode, pConsumer); + continue; + } int32_t sz = taosArrayGetSize(pConsumer->assignedTopics); for (int32_t i = 0; i < sz; i++) { diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 424eac13b0..eb2d2e267b 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -1980,6 +1980,11 @@ static int metaUpdateTtl(SMeta *pMeta, const SMetaEntry *pME) { int metaUpdateChangeTime(SMeta *pMeta, tb_uid_t uid, int64_t changeTimeMs) { if (!tsTtlChangeOnWrite) return 0; + if (changeTimeMs <= 0) { + metaWarn("Skip to change ttl deletetion time on write, uid: %" PRId64, uid); + return TSDB_CODE_VERSION_NOT_COMPATIBLE; + } + STtlUpdCtimeCtx ctx = {.uid = uid, .changeTimeMs = changeTimeMs}; return ttlMgrUpdateChangeTime(pMeta->pTtlMgr, &ctx); diff --git a/source/dnode/vnode/src/meta/metaTtl.c b/source/dnode/vnode/src/meta/metaTtl.c index c283472c24..af4827a9c7 100644 --- a/source/dnode/vnode/src/meta/metaTtl.c +++ b/source/dnode/vnode/src/meta/metaTtl.c @@ -358,7 +358,8 @@ int ttlMgrFlush(STtlManger *pTtlMgr, TXN *pTxn) { STtlCacheEntry *cacheEntry = taosHashGet(pTtlMgr->pTtlCache, pUid, sizeof(*pUid)); if (cacheEntry == NULL) { - metaError("ttlMgr flush failed to get ttl cache since %s", tstrerror(terrno)); + metaError("ttlMgr flush failed to get ttl cache since %s, uid: %" PRId64 ", type: %d", tstrerror(terrno), *pUid, + pEntry->type); goto _out; } diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 77a966715e..5f53f1c50c 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -388,7 +388,7 @@ bool tqNextBlockInWal(STqReader* pReader, const char* id) { int32_t numOfBlocks = taosArrayGetSize(pReader->submit.aSubmitTbData); while (pReader->nextBlk < numOfBlocks) { - tqDebug("tq reader next data block %d/%d, len:%d %" PRId64 " %d", pReader->nextBlk, + tqTrace("tq reader next data block %d/%d, len:%d %" PRId64 " %d", pReader->nextBlk, numOfBlocks, pReader->msg.msgLen, pReader->msg.ver, pReader->nextBlk); SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk); @@ -403,7 +403,7 @@ bool tqNextBlockInWal(STqReader* pReader, const char* id) { void* ret = taosHashGet(pReader->tbIdHash, &pSubmitTbData->uid, sizeof(int64_t)); if (ret != NULL) { - tqDebug("tq reader return submit block, uid:%" PRId64 ", ver:%" PRId64, pSubmitTbData->uid, pReader->msg.ver); + tqTrace("tq reader return submit block, uid:%" PRId64 ", ver:%" PRId64, pSubmitTbData->uid, pReader->msg.ver); SSDataBlock* pRes = NULL; int32_t code = tqRetrieveDataBlock(pReader, &pRes, NULL); @@ -412,11 +412,11 @@ bool tqNextBlockInWal(STqReader* pReader, const char* id) { } } else { pReader->nextBlk += 1; - tqDebug("tq reader discard submit block, uid:%" PRId64 ", continue", pSubmitTbData->uid); + tqTrace("tq reader discard submit block, uid:%" PRId64 ", continue", pSubmitTbData->uid); } } - qDebug("stream scan return empty, all %d submit blocks consumed, %s", numOfBlocks, id); + qTrace("stream scan return empty, all %d submit blocks consumed, %s", numOfBlocks, id); tDestroySubmitReq(&pReader->submit, TSDB_MSG_FLG_DECODE); pReader->msg.msgStr = NULL; @@ -604,7 +604,7 @@ static int32_t doSetVal(SColumnInfoData* pColumnInfoData, int32_t rowIndex, SCol } int32_t tqRetrieveDataBlock(STqReader* pReader, SSDataBlock** pRes, const char* id) { - tqDebug("tq reader retrieve data block %p, index:%d", pReader->msg.msgStr, pReader->nextBlk); + tqTrace("tq reader retrieve data block %p, index:%d", pReader->msg.msgStr, pReader->nextBlk); SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk++); SSDataBlock* pBlock = pReader->pResBlock; diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c index 9349c6eb0d..650f62828f 100644 --- a/source/dnode/vnode/src/tq/tqSink.c +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -335,6 +335,7 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d tagArray = taosArrayInit(1, sizeof(STagVal)); if (!tagArray) { tdDestroySVCreateTbReq(pCreateTbReq); + taosMemoryFreeClear(pCreateTbReq); goto _end; } STagVal tagVal = { @@ -350,6 +351,7 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d tagArray = taosArrayDestroy(tagArray); if (pTag == NULL) { tdDestroySVCreateTbReq(pCreateTbReq); + taosMemoryFreeClear(pCreateTbReq); terrno = TSDB_CODE_OUT_OF_MEMORY; goto _end; } diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 57317643a8..85fcde2a52 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -234,8 +234,10 @@ static int32_t vnodePreProcessSubmitTbData(SVnode *pVnode, SDecoder *pCoder, int } } - *(int64_t *)(pCoder->data + pCoder->pos) = ctimeMs; - pCoder->pos += sizeof(int64_t); + if (!tDecodeIsEnd(pCoder)) { + *(int64_t *)(pCoder->data + pCoder->pos) = ctimeMs; + pCoder->pos += sizeof(int64_t); + } tEndDecode(pCoder); diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index c8b66836d5..88e2165a12 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -1078,6 +1078,16 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT SOperatorInfo* pOperator = pTaskInfo->pRoot; const char* id = GET_TASKID(pTaskInfo); + if(subType == TOPIC_SUB_TYPE__COLUMN && pOffset->type == TMQ_OFFSET__LOG){ + pOperator = extractOperatorInTree(pOperator, QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN, id); + if (pOperator == NULL) { + return -1; + } + SStreamScanInfo* pInfo = pOperator->info; + SStoreTqReader* pReaderAPI = &pTaskInfo->storageAPI.tqReaderFn; + SWalReader* pWalReader = pReaderAPI->tqReaderGetWalReader(pInfo->tqReader); + walReaderVerifyOffset(pWalReader, pOffset); + } // if pOffset equal to current offset, means continue consume if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.currentOffset)) { return 0; diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 4365cd8b95..1c5bd6d59c 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -2415,6 +2415,10 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) { } static int32_t firstLastTransferInfoImpl(SFirstLastRes* pInput, SFirstLastRes* pOutput, bool isFirst) { + if (!pInput->hasResult) { + return TSDB_CODE_FAILED; + } + if (pOutput->hasResult) { if (isFirst) { if (pInput->ts > pOutput->ts) { diff --git a/source/libs/function/src/thistogram.c b/source/libs/function/src/thistogram.c index e7d631f638..b56691f35d 100644 --- a/source/libs/function/src/thistogram.c +++ b/source/libs/function/src/thistogram.c @@ -474,8 +474,8 @@ double* tHistogramUniform(SHistogramInfo* pHisto, double* ratio, int32_t num) { } ASSERTS(total <= numOfElem && total + pHisto->elems[j + 1].num > numOfElem, - "tHistogramUniform Error, total:%d, numOfElem:%d, elems[%d].num:%d", - total, numOfElem, j + 1, pHisto->elems[j + 1].num); + "tHistogramUniform Error, total:%ld, numOfElem:%ld, elems[%d].num:%ld", + total, (int64_t)numOfElem, j + 1, pHisto->elems[j + 1].num); double delta = numOfElem - total; if (fabs(delta) < FLT_EPSILON) { diff --git a/source/libs/function/src/tpercentile.c b/source/libs/function/src/tpercentile.c index 3ec802a7ce..8101b342a4 100644 --- a/source/libs/function/src/tpercentile.c +++ b/source/libs/function/src/tpercentile.c @@ -39,6 +39,7 @@ static SFilePage *loadDataFromFilePage(tMemBucket *pMemBucket, int32_t slotIdx) if (p != NULL) { pIdList = *(SArray **)p; } else { + taosMemoryFree(buffer); return NULL; } @@ -48,6 +49,7 @@ static SFilePage *loadDataFromFilePage(tMemBucket *pMemBucket, int32_t slotIdx) SFilePage *pg = getBufPage(pMemBucket->pBuffer, *pageId); if (pg == NULL) { + taosMemoryFree(buffer); return NULL; } diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c index 31a7dfdbc5..5b9f44c812 100644 --- a/source/libs/function/src/tudf.c +++ b/source/libs/function/src/tudf.c @@ -255,6 +255,18 @@ int32_t udfStopUdfd() { return 0; } +int32_t udfGetUdfdPid(int32_t* pUdfdPid) { + SUdfdData *pData = &udfdGlobal; + if (pData->spawnErr) { + return pData->spawnErr; + } + uv_pid_t pid = uv_process_get_pid(&pData->process); + if (pUdfdPid) { + *pUdfdPid = (int32_t)pid; + } + return TSDB_CODE_SUCCESS; +} + //============================================================================================== /* Copyright (c) 2013, Ben Noordhuis * The QUEUE is copied from queue.h under libuv diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c index 3b827a2f99..93259924d5 100644 --- a/source/libs/function/src/udfd.c +++ b/source/libs/function/src/udfd.c @@ -965,40 +965,6 @@ int32_t udfdFillUdfInfoFromMNode(void *clientRpc, char *udfName, SUdf *udf) { return code; } -int32_t udfdConnectToMnode() { - SConnectReq connReq = {0}; - connReq.connType = CONN_TYPE__UDFD; - tstrncpy(connReq.app, "udfd", sizeof(connReq.app)); - tstrncpy(connReq.user, TSDB_DEFAULT_USER, sizeof(connReq.user)); - char pass[TSDB_PASSWORD_LEN + 1] = {0}; - taosEncryptPass_c((uint8_t *)(TSDB_DEFAULT_PASS), strlen(TSDB_DEFAULT_PASS), pass); - tstrncpy(connReq.passwd, pass, sizeof(connReq.passwd)); - connReq.pid = taosGetPId(); - connReq.startTime = taosGetTimestampMs(); - strcpy(connReq.sVer, version); - - int32_t contLen = tSerializeSConnectReq(NULL, 0, &connReq); - void *pReq = rpcMallocCont(contLen); - tSerializeSConnectReq(pReq, contLen, &connReq); - - SUdfdRpcSendRecvInfo *msgInfo = taosMemoryCalloc(1, sizeof(SUdfdRpcSendRecvInfo)); - msgInfo->rpcType = UDFD_RPC_MNODE_CONNECT; - uv_sem_init(&msgInfo->resultSem, 0); - - SRpcMsg rpcMsg = {0}; - rpcMsg.msgType = TDMT_MND_CONNECT; - rpcMsg.pCont = pReq; - rpcMsg.contLen = contLen; - rpcMsg.info.ahandle = msgInfo; - rpcSendRequest(global.clientRpc, &global.mgmtEp.epSet, &rpcMsg, NULL); - - uv_sem_wait(&msgInfo->resultSem); - int32_t code = msgInfo->code; - uv_sem_destroy(&msgInfo->resultSem); - taosMemoryFree(msgInfo); - return code; -} - static bool udfdRpcRfp(int32_t code, tmsg_t msgType) { if (code == TSDB_CODE_RPC_NETWORK_UNAVAIL || code == TSDB_CODE_RPC_BROKEN_LINK || code == TSDB_CODE_SYN_NOT_LEADER || code == TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED || code == TSDB_CODE_SYN_RESTORING || @@ -1378,23 +1344,6 @@ static int32_t udfdRun() { return 0; } -void udfdConnectMnodeThreadFunc(void *args) { - int32_t retryMnodeTimes = 0; - int32_t code = 0; - while (retryMnodeTimes++ <= TSDB_MAX_REPLICA) { - uv_sleep(100 * (1 << retryMnodeTimes)); - code = udfdConnectToMnode(); - if (code == 0) { - break; - } - fnError("udfd can not connect to mnode, code: %s. retry", tstrerror(code)); - } - - if (code != 0) { - fnError("udfd can not connect to mnode"); - } -} - int32_t udfdInitResidentFuncs() { if (strlen(tsUdfdResFuncs) == 0) { return TSDB_CODE_SUCCESS; @@ -1497,9 +1446,6 @@ int main(int argc, char *argv[]) { udfdInitResidentFuncs(); - uv_thread_t mnodeConnectThread; - uv_thread_create(&mnodeConnectThread, udfdConnectMnodeThreadFunc, NULL); - udfdRun(); removeListeningPipe(); diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index 5ceebc8c83..b3afbb53c1 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -3746,10 +3746,10 @@ int32_t fltSclBuildRangeFromBlockSma(SFltSclColumnRange *colRange, SColumnDataAg taosArrayPush(points, &startPt); taosArrayPush(points, &endPt); } - SFltSclDatum min; + SFltSclDatum min = {0}; fltSclBuildDatumFromBlockSmaValue(&min, colRange->colNode->node.resType.type, pAgg->min); SFltSclPoint minPt = {.excl = false, .start = true, .val = min}; - SFltSclDatum max; + SFltSclDatum max = {0}; fltSclBuildDatumFromBlockSmaValue(&max, colRange->colNode->node.resType.type, pAgg->max); SFltSclPoint maxPt = {.excl = false, .start = false, .val = max}; taosArrayPush(points, &minPt); diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index 7457b2197e..93bcd6a4d9 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -74,7 +74,6 @@ void streamSchedByTimer(void* param, void* tmrId) { atomic_store_8(&pTask->triggerStatus, TASK_TRIGGER_STATUS__INACTIVE); if (tAppendDataToInputQueue(pTask, (SStreamQueueItem*)trigger) < 0) { - taosFreeQitem(trigger); taosTmrReset(streamSchedByTimer, (int32_t)pTask->triggerParam, pTask, streamEnv.timer, &pTask->timer); return; } diff --git a/source/libs/stream/src/tstreamFileState.c b/source/libs/stream/src/tstreamFileState.c index dc9a1f80bb..c10ef1e557 100644 --- a/source/libs/stream/src/tstreamFileState.c +++ b/source/libs/stream/src/tstreamFileState.c @@ -360,7 +360,7 @@ int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot, SRowBuffPos* pPos = *(SRowBuffPos**)pNode->data; ASSERT(pPos->pRowBuff && pFileState->rowSize > 0); if (streamStateGetBatchSize(batch) >= BATCH_LIMIT) { - code = streamStatePutBatch_rocksdb(pFileState->pFileStore, batch); + streamStatePutBatch_rocksdb(pFileState->pFileStore, batch); streamStateClearBatch(batch); } @@ -373,7 +373,7 @@ int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot, taosMemoryFree(buf); if (streamStateGetBatchSize(batch) > 0) { - code = streamStatePutBatch_rocksdb(pFileState->pFileStore, batch); + streamStatePutBatch_rocksdb(pFileState->pFileStore, batch); } streamStateClearBatch(batch); @@ -385,7 +385,7 @@ int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot, int32_t len = 0; sprintf(keyBuf, "%s:%" PRId64 "", taskKey, ((SStreamState*)pFileState->pFileStore)->checkPointId); streamFileStateEncode(&pFileState->flushMark, &valBuf, &len); - code = streamStatePutBatch(pFileState->pFileStore, "default", batch, keyBuf, valBuf, len, 0); + streamStatePutBatch(pFileState->pFileStore, "default", batch, keyBuf, valBuf, len, 0); taosMemoryFree(valBuf); } { @@ -489,7 +489,7 @@ int32_t recoverSnapshot(SStreamFileState* pFileState) { break; } memcpy(pNewPos->pRowBuff, pVal, pVLen); - code = tSimpleHashPut(pFileState->rowBuffMap, pNewPos->pKey, pFileState->rowSize, &pNewPos, POINTER_BYTES); + code = tSimpleHashPut(pFileState->rowBuffMap, pNewPos->pKey, pFileState->keyLen, &pNewPos, POINTER_BYTES); if (code != TSDB_CODE_SUCCESS) { destroyRowBuffPos(pNewPos); break; diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 90dbc2180e..d2b9edf753 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -630,7 +630,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_INVALID_FILE, "Index file is inval TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_INVALID_MSG, "Invalid message") TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_MISMATCH, "Consumer mismatch") TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_CLOSED, "Consumer closed") -TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_ERROR, "Consumer error, to see log") +TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_ERROR, "Consumer error, to see log") +TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_TOPIC_OUT_OF_RANGE, "Topic num out of range") +TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_GROUP_OUT_OF_RANGE, "Group num out of range 100") // stream TAOS_DEFINE_ERROR(TSDB_CODE_STREAM_TASK_NOT_EXIST, "Stream task not exist") diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 1dff2a90d0..21fed2e1f5 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -129,6 +129,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TD-19201.py ,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TD-21561.py ,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TS-3404.py +,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TS-3581.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/balance_vgroups_r1.py -N 6 ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/taosShell.py @@ -780,7 +781,7 @@ ,,y,script,./test.sh -f tsim/user/basic.sim ,,y,script,./test.sh -f tsim/user/password.sim ,,y,script,./test.sh -f tsim/user/privilege_db.sim -,,y,script,./test.sh -f tsim/user/privilege_sysinfo.sim +#,,y,script,./test.sh -f tsim/user/privilege_sysinfo.sim ,,y,script,./test.sh -f tsim/user/privilege_topic.sim ,,y,script,./test.sh -f tsim/user/privilege_table.sim ,,y,script,./test.sh -f tsim/db/alter_option.sim diff --git a/tests/script/tsim/query/udf.sim b/tests/script/tsim/query/udf.sim index e539f11531..fbf9d50c25 100644 --- a/tests/script/tsim/query/udf.sim +++ b/tests/script/tsim/query/udf.sim @@ -8,6 +8,9 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c udf -v 1 system sh/exec.sh -n dnode1 -s start sql connect +sql alter user root pass 'taosdata2' +system sh/exec.sh -n dnode1 -s stop +system sh/exec.sh -n dnode1 -s start print ======== step1 udf system sh/compile_udf.sh diff --git a/tests/system-test/7-tmq/checkOffsetRowParams.py b/tests/system-test/7-tmq/checkOffsetRowParams.py index 8a24148064..f7e4c61c9c 100644 --- a/tests/system-test/7-tmq/checkOffsetRowParams.py +++ b/tests/system-test/7-tmq/checkOffsetRowParams.py @@ -245,7 +245,7 @@ class TDTestCase: tdSql.query("show consumers") tdSql.checkRows(1) - tdSql.checkData(0, 8, "tbname:1,commit:1,interval:2000,reset:earliest") + tdSql.checkData(0, 8, "tbname:1,commit:1,interval:2000ms,reset:earliest") time.sleep(2) tdLog.info("start insert data") diff --git a/tests/system-test/7-tmq/tmqParamsTest.py b/tests/system-test/7-tmq/tmqParamsTest.py index 82000ba02a..f48eaa84d4 100644 --- a/tests/system-test/7-tmq/tmqParamsTest.py +++ b/tests/system-test/7-tmq/tmqParamsTest.py @@ -94,7 +94,7 @@ class TDTestCase: consumer_commit = 1 if consumer_dict["enable.auto.commit"] == "true" else 0 consumer_tbname = 1 if consumer_dict["msg.with.table.name"] == "true" else 0 consumer_ret = "earliest" if offset_value == "" else offset_value - expected_parameters=f'tbname:{consumer_tbname},commit:{consumer_commit},interval:{paraDict["auto_commit_interval"]},reset:{consumer_ret}' + expected_parameters=f'tbname:{consumer_tbname},commit:{consumer_commit},interval:{paraDict["auto_commit_interval"]}ms,reset:{consumer_ret}' if len(offset_value) == 0: del consumer_dict["auto.offset.reset"] consumer = Consumer(consumer_dict) diff --git a/tests/system-test/99-TDcase/TS-3581.py b/tests/system-test/99-TDcase/TS-3581.py new file mode 100644 index 0000000000..18488af0a6 --- /dev/null +++ b/tests/system-test/99-TDcase/TS-3581.py @@ -0,0 +1,79 @@ +import taos +import sys +import time +import socket +import os +import threading + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class TDTestCase: + hostname = socket.gethostname() + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + #tdSql.init(conn.cursor()) + tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files or "taosd.exe" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def create_tables(self): + tdSql.execute(f'''CREATE STABLE `dwd_log_master` (`ts` TIMESTAMP, `dim_ip` NCHAR(64)) TAGS (`group_id` BIGINT, `st_hour` NCHAR(2), `org_id` NCHAR(32), + `dev_manufacturer_name` NCHAR(64), `dev_manufacturer_id` INT, `dev_category_name` NCHAR(64), `dev_category_id` INT, `dev_feature_name` NCHAR(64), + `dev_feature_id` INT, `dev_ip` NCHAR(64), `black_list` TINYINT, `white_list` TINYINT)''') + tdSql.execute(f'''CREATE TABLE `dwd_log_master_475021043` USING `dwd_log_master` (`group_id`, `st_hour`, `org_id`, `dev_manufacturer_name`, `dev_manufacturer_id`, + `dev_category_name`, `dev_category_id`, `dev_feature_name`, `dev_feature_id`, `dev_ip`, `black_list`, `white_list`) TAGS + (475021043, "14", NULL, NULL, NULL, NULL, NULL, NULL, NULL, "172.18.22.230", NULL, NULL)''') + + def insert_data(self): + tdLog.debug("start to insert data ............") + + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:30.000','192.168.192.102')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:31.000','172.18.23.249')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:32.000','192.168.200.231')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:33.000','172.18.22.231')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:34.000','192.168.210.231')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:35.000','192.168.192.100')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:36.000','192.168.192.231')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:37.000','172.18.23.231')") + + tdLog.debug("insert data ............ [OK]") + + def run(self): + tdSql.prepare() + self.create_tables() + self.insert_data() + tdLog.printNoPrefix("======== test TS-3581") + + for i in range(100): + tdSql.query(f"select first(ts), last(ts), count(*) from dwd_log_master;") + tdSql.checkRows(1) + print(tdSql.queryResult) + tdSql.checkData(0, 0, '2023-06-26 14:38:30.000') + return + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tools/shell/src/shellWebsocket.c b/tools/shell/src/shellWebsocket.c index d8920cb4c3..af7f13c69c 100644 --- a/tools/shell/src/shellWebsocket.c +++ b/tools/shell/src/shellWebsocket.c @@ -17,6 +17,9 @@ #include #include +// save current database name +char curDBName[128] = ""; // TDB_MAX_DBNAME_LEN is 24, put large + int shell_conn_ws_server(bool first) { char cuttedDsn[SHELL_WS_DSN_BUFF] = {0}; int dsnLen = strlen(shell.args.dsn); @@ -59,6 +62,14 @@ int shell_conn_ws_server(bool first) { fprintf(stdout, "successfully connected to cloud service\n"); } fflush(stdout); + + // switch to current database if have + if(curDBName[0] !=0) { + char command[256]; + sprintf(command, "use %s;", curDBName); + shellRunSingleCommandWebsocketImp(command); + } + return 0; } @@ -290,7 +301,46 @@ void shellRunSingleCommandWebsocketImp(char *command) { if (shellRegexMatch(command, "^\\s*use\\s+[a-zA-Z0-9_]+\\s*;\\s*$", REG_EXTENDED | REG_ICASE)) { - fprintf(stdout, "Database changed.\r\n\r\n"); + + // copy dbname to curDBName + char *p = command; + bool firstStart = false; + bool firstEnd = false; + int i = 0; + while (*p != 0) { + if (*p != ' ') { + // not blank + if (!firstStart) { + firstStart = true; + } else if (firstEnd) { + if(*p == ';' && *p != '\\') { + break; + } + // database name + curDBName[i++] = *p; + if(i + 4 > sizeof(curDBName)) { + // DBName is too long, reset zero and break + i = 0; + break; + } + } + } else { + // blank + if(firstStart == true && firstEnd == false){ + firstEnd = true; + } + if(firstStart && firstEnd && i > 0){ + // blank after database name + break; + } + } + // move next + p++; + } + // append end + curDBName[i] = 0; + + fprintf(stdout, "Database changed to %s.\r\n\r\n", curDBName); fflush(stdout); ws_free_result(res); return;