diff --git a/CMakeLists.txt b/CMakeLists.txt index 66a6fd328d..ac368c29fe 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -41,6 +41,7 @@ add_subdirectory(source) add_subdirectory(tools) add_subdirectory(utils) add_subdirectory(examples/c) +add_subdirectory(tests) include(${TD_SUPPORT_DIR}/cmake.install) # docs diff --git a/deps/arm/dm_static/libdmodule.a b/deps/arm/dm_static/libdmodule.a index fff5943606..9e69059651 100644 Binary files a/deps/arm/dm_static/libdmodule.a and b/deps/arm/dm_static/libdmodule.a differ diff --git a/deps/darwin/arm/dm_static/libdmodule.a b/deps/darwin/arm/dm_static/libdmodule.a index 09c501f2ef..5fbfc4dd58 100644 Binary files a/deps/darwin/arm/dm_static/libdmodule.a and b/deps/darwin/arm/dm_static/libdmodule.a differ diff --git a/deps/darwin/x64/dm_static/libdmodule.a b/deps/darwin/x64/dm_static/libdmodule.a index 2230393565..9f81fe8381 100644 Binary files a/deps/darwin/x64/dm_static/libdmodule.a and b/deps/darwin/x64/dm_static/libdmodule.a differ diff --git a/deps/x86/dm_static/libdmodule.a b/deps/x86/dm_static/libdmodule.a index 995713ce7b..cce0217501 100644 Binary files a/deps/x86/dm_static/libdmodule.a and b/deps/x86/dm_static/libdmodule.a differ diff --git a/docs/en/07-develop/01-connect/index.md b/docs/en/07-develop/01-connect/index.md index 3f09f9fb6a..11375bd060 100644 --- a/docs/en/07-develop/01-connect/index.md +++ b/docs/en/07-develop/01-connect/index.md @@ -177,7 +177,7 @@ Just need to add the reference to [TDengine.Connector](https://www.nuget.org/pac - + diff --git a/docs/en/07-develop/04-query-data/_cs_async.mdx b/docs/en/07-develop/04-query-data/_cs_async.mdx deleted file mode 100644 index 19c8e58f32..0000000000 --- a/docs/en/07-develop/04-query-data/_cs_async.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```csharp -{{#include docs/examples/csharp/asyncQuery/Program.cs}} -``` diff --git a/docs/en/07-develop/04-query-data/index.mdx b/docs/en/07-develop/04-query-data/index.mdx index 413baf6b2c..70b1140748 100644 --- a/docs/en/07-develop/04-query-data/index.mdx +++ b/docs/en/07-develop/04-query-data/index.mdx @@ -15,7 +15,6 @@ import CQuery from "./_c.mdx"; import PhpQuery from "./_php.mdx"; import PyAsync from "./_py_async.mdx"; import NodeAsync from "./_js_async.mdx"; -import CsAsync from "./_cs_async.mdx"; import CAsync from "./_c_async.mdx"; ## Introduction @@ -174,9 +173,6 @@ Please note that async query can only be used with a native connection. - - - diff --git a/docs/en/07-develop/07-tmq.mdx b/docs/en/07-develop/07-tmq.mdx index 8e7bbf22e9..b920d61a59 100644 --- a/docs/en/07-develop/07-tmq.mdx +++ b/docs/en/07-develop/07-tmq.mdx @@ -248,23 +248,23 @@ function close() ```csharp +class ConsumerBuilder + ConsumerBuilder(IEnumerable> config) -virtual IConsumer Build() - -Consumer(ConsumerBuilder builder) +public IConsumer Build() void Subscribe(IEnumerable topics) void Subscribe(string topic) -ConsumeResult Consume(int millisecondsTimeout) +ConsumeResult Consume(int millisecondsTimeout) List Subscription() void Unsubscribe() -void Commit(ConsumeResult consumerResult) +List Commit() void Close() ``` @@ -500,25 +500,19 @@ let consumer = taos.consumer({ ```csharp -using TDengineTMQ; - -// Create consumer groups on demand (GourpID) and enable automatic commits (EnableAutoCommit), -// an automatic commit interval (AutoCommitIntervalMs), and a username (TDConnectUser) and password (TDConnectPasswd) -var cfg = new ConsumerConfig - { - EnableAutoCommit = "true" - AutoCommitIntervalMs = "1000" - GourpId = "TDengine-TMQ-C#", - TDConnectUser = "root", - TDConnectPasswd = "taosdata", - AutoOffsetReset = "latest" - MsgWithTableName = "true", - TDConnectIp = "127.0.0.1", - TDConnectPort = "6030" - }; - -var consumer = new ConsumerBuilder(cfg).Build(); - +var cfg = new Dictionary() +{ + { "group.id", "group1" }, + { "auto.offset.reset", "latest" }, + { "td.connect.ip", "127.0.0.1" }, + { "td.connect.user", "root" }, + { "td.connect.pass", "taosdata" }, + { "td.connect.port", "6030" }, + { "client.id", "tmq_example" }, + { "enable.auto.commit", "true" }, + { "msg.with.table.name", "false" }, +}; +var consumer = new ConsumerBuilder>(cfg).Build(); ``` @@ -747,10 +741,12 @@ while(true){ ## Consume data while (true) { - var consumerRes = consumer.Consume(100); - // process ConsumeResult - ProcessMsg(consumerRes); - consumer.Commit(consumerRes); + using (var result = consumer.Consume(500)) + { + if (result == null) continue; + ProcessMsg(result); + consumer.Commit(); + } } ``` diff --git a/docs/en/14-reference/02-rest-api/02-rest-api.mdx b/docs/en/14-reference/02-rest-api/02-rest-api.mdx index 8f8e966f8d..5cb680e34b 100644 --- a/docs/en/14-reference/02-rest-api/02-rest-api.mdx +++ b/docs/en/14-reference/02-rest-api/02-rest-api.mdx @@ -262,6 +262,63 @@ The following types may be returned: - "INT UNSIGNED" - "BIGINT UNSIGNED" - "JSON" +- "VARBINARY" +- "GEOMETRY" + +`VARBINARY` and `GEOMETRY` types return data as Hex string, example: + +Prepare data + +```bash +create database demo +use demo +create table t(ts timestamp,c1 varbinary(20),c2 geometry(100)) +insert into t values(now,'\x7f8290','point(100 100)') +``` + +Execute query + +```bash +curl --location 'http://:/rest/sql' \ +--header 'Content-Type: text/plain' \ +--header 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' \ +--data 'select * from demo.t' +``` + +Return results + +```json +{ + "code": 0, + "column_meta": [ + [ + "ts", + "TIMESTAMP", + 8 + ], + [ + "c1", + "VARBINARY", + 20 + ], + [ + "c2", + "GEOMETRY", + 100 + ] + ], + "data": [ + [ + "2023-11-01T06:28:15.210Z", + "7f8290", + "010100000000000000000059400000000000005940" + ] + ], + "rows": 1 +} +``` + +- `010100000000000000000059400000000000005940` is [Well-Known Binary (WKB)](https://libgeos.org/specifications/wkb/) format for `point(100 100)` #### Errors diff --git a/docs/en/14-reference/03-connector/09-csharp.mdx b/docs/en/14-reference/03-connector/09-csharp.mdx index 282be3af6b..25bef1cf51 100644 --- a/docs/en/14-reference/03-connector/09-csharp.mdx +++ b/docs/en/14-reference/03-connector/09-csharp.mdx @@ -1,5 +1,5 @@ --- -title: C# Connector +title: TDengine C# Connector sidebar_label: C# description: This document describes the TDengine C# connector. toc_max_heading_level: 4 @@ -8,21 +8,16 @@ toc_max_heading_level: 4 import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -import Preparition from "./_preparation.mdx" -import CSInsert from "../../07-develop/03-insert-data/_cs_sql.mdx" -import CSInfluxLine from "../../07-develop/03-insert-data/_cs_line.mdx" -import CSOpenTSDBTelnet from "../../07-develop/03-insert-data/_cs_opts_telnet.mdx" -import CSOpenTSDBJson from "../../07-develop/03-insert-data/_cs_opts_json.mdx" -import CSQuery from "../../07-develop/04-query-data/_cs.mdx" -import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx" +`TDengine.Connector` is the C# language connector provided by TDengine. C# developers can use it to develop C# application software that accesses TDengine cluster data. -`TDengine.Connector` is a C# language connector provided by TDengine that allows C# developers to develop C# applications that access TDengine cluster data. +The `TDengine.Connector` connector supports establishing a connection with the TDengine running instance through the TDengine client driver (taosc), and provides functions such as data writing, query, data subscription, schemaless data writing, and parameter binding interface data writing. `TDengine.Connector` also supports WebSocket since v3.0.1, establishes WebSocket connection, and provides functions such as data writing, query, and parameter binding interface data writing. -The `TDengine.Connector` connector supports connect to TDengine instances via the TDengine client driver (taosc), providing data writing, querying, subscription, schemaless writing, bind interface, etc.The `TDengine.Connector` also supports WebSocket from v3.0.1 and developers can build connection through DSN, which supports data writing, querying, and parameter binding, etc. +This article introduces how to install `TDengine.Connector` in a Linux or Windows environment, and connect to the TDengine cluster through `TDengine.Connector` to perform basic operations such as data writing and querying. -This article describes how to install `TDengine.Connector` in a Linux or Windows environment and connect to TDengine clusters via `TDengine.Connector` to perform basic operations such as data writing and querying. - -Note: TDengine Connector 3.x is not compatible with TDengine 2.x. In an environment with TDengine 2.x, you must use TDengine.Connector 1.x for the C# connector. +:::warning +* `TDengine.Connector` version 3.1.0 has been completely refactored and is no longer compatible with 3.0.2 and previous versions. For 3.0.2 documents, please refer to [nuget](https://www.nuget.org/packages/TDengine.Connector/3.0.2) +* `TDengine.Connector` 3.x is not compatible with TDengine 2.x. If you need to use the C# connector in an environment running TDengine 2.x version, please use the 1.x version of TDengine.Connector. +::: The source code of `TDengine.Connector` is hosted on [GitHub](https://github.com/taosdata/taos-connector-dotnet/tree/3.0). @@ -30,286 +25,1164 @@ The source code of `TDengine.Connector` is hosted on [GitHub](https://github.com The supported platforms are the same as those supported by the TDengine client driver. -:::note -Please note TDengine does not support 32bit Windows any more. +:::warning +TDengine no longer supports 32-bit Windows platforms. ::: ## Version support -Please refer to [version support list](../#version-support) +| **Connector version** | **TDengine version** | +|-----------------------|----------------------| +| 3.1.0 | 3.2.1.0/3.1.1.18 | -## Supported features +## Handling exceptions - +`TDengine.Connector` will throw an exception and the application needs to handle the exception. The taosc exception type `TDengineError` contains error code and error information, and the application can handle it based on the error code and error information. - +## TDengine DataType vs. C# DataType -1. Connection Management -2. General Query -3. Continuous Query -4. Parameter Binding -5. Subscription -6. Schemaless +| TDengine DataType | C# Type | +|-------------------|-------------------------| +| TIMESTAMP | DateTime | +| TINYINT | sbyte | +| SMALLINT | short | +| INT | int | +| BIGINT | long | +| TINYINT UNSIGNED | byte | +| SMALLINT UNSIGNED | ushort | +| INT UNSIGNED | uint | +| BIGINT UNSIGNED | ulong | +| FLOAT | float | +| DOUBLE | double | +| BOOL | bool | +| BINARY | byte[] | +| NCHAR | string (utf-8 encoding) | +| JSON | byte[] | - - - - -1. Connection Management -2. General Query -3. Continuous Query -4. Parameter Binding - - - +**Note**: JSON type is only supported in tag. ## Installation Steps ### Pre-installation preparation -* Install the [.NET SDK](https://dotnet.microsoft.com/download) +* Install [.NET SDK](https://dotnet.microsoft.com/download) * [Nuget Client](https://docs.microsoft.com/en-us/nuget/install-nuget-client-tools) (optional installation) -* Install TDengine client driver, please refer to [Install client driver](../#install-client-driver) for details +* Install the TDengine client driver. For specific steps, please refer to [Installing the client driver](../#install-client-driver) -### Install `TDengine.Connector` +### Install the connectors - - +Nuget package `TDengine.Connector` can be added to the current project through dotnet CLI under the path of the current .NET project. -You can reference the `TDengine.Connector` published in Nuget to the current project via the `dotnet` CLI under the path of the existing .NET project. - -``` bash +```bash dotnet add package TDengine.Connector ``` -You may also modify the current.NET project file. You can include the following 'ItemGroup' in your project file (.csproj). +You can also modify the `.csproj` file of the current project and add the following ItemGroup. ``` XML - - - + + + ``` - - +## Establishing a connection -In this scenario, modifying your project file is required in order to copy the WebSocket dependency dynamic library from the nuget package into your project. -```XML - - - - - - - - - -``` - -Notice: `TDengine.Connector` only version>= 3.0.2 includes the dynamic library for WebSocket. - - - - -## Establish a Connection - - - - - + + ``` csharp -using TDengineDriver; - -namespace TDengineExample +var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); +using (var client = DbDriver.Open(builder)) { - - internal class EstablishConnection - { - static void Main(String[] args) - { - string host = "localhost"; - short port = 6030; - string username = "root"; - string password = "taosdata"; - string dbname = ""; - - var conn = TDengine.Connect(host, username, password, dbname, port); - if (conn == IntPtr.Zero) - { - Console.WriteLine("Connect to TDengine failed"); - } - else - { - Console.WriteLine("Connect to TDengine success"); - } - TDengine.Close(conn); - TDengine.Cleanup(); - } - } + Console.WriteLine("connected") } - ``` + - - -The structure of the DSN description string is as follows: - -```text -[]://[[:@]:][/][?=[&=]] -|------------|---|-----------|-----------|------|------|------------|-----------------------| -| protocol | | username | password | host | port | database | params | -``` - -The parameters are described as follows: - -* **protocol**: Specify which connection method to use (support http/ws). For example, `ws://localhost:6041` uses Websocket to establish connections. -* **username/password**: Username and password used to create connections. -* **host/port**: Specifies the server and port to establish a connection. Websocket connections default to `localhost:6041`. -* **database**: Specify the default database to connect to. It's optional. -* **params**: Optional parameters. - -A sample DSN description string is as follows: - -```text -ws://localhost:6041/test -``` - -``` csharp -{{#include docs/examples/csharp/wsConnect/Program.cs}} +```csharp +var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"); +using (var client = DbDriver.Open(builder)) +{ + Console.WriteLine("connected") +} ``` +The parameters supported by `ConnectionStringBuilder` are as follows: +* protocol: connection protocol, optional value is Native or WebSocket, default is Native +* host: the address of the running instance of TDengine or taosadapter, +* port: The port of the running instance of TDengine or taosadapter. + * When using WebSocket without SSL, the default is 6041. + * When using WebSocket with SSL, the default is 443. +* useSSL: Whether to use SSL, the default is false, only valid when the protocol is WebSocket +* token: The token used to connect to TDengine Cloud, only valid when the protocol is WebSocket +* username: username to connect to TDengine +* password: password to connect to TDengine +* db: database connected to TDengine +* timezone: The time zone for parsing time results, the default is `TimeZoneInfo.Local`, use the `TimeZoneInfo.FindSystemTimeZoneById` method to parse the string into a `TimeZoneInfo` object. +* connTimeout: WebSocket connection timeout, only valid when the protocol is WebSocket, the default is 1 minute, use the `TimeSpan.Parse` method to parse the string into a `TimeSpan` object. +* readTimeout: WebSocket read timeout, only valid when the protocol is WebSocket, the default is 5 minutes, use the `TimeSpan.Parse` method to parse the string into a `TimeSpan` object. +* writeTimeout: WebSocket write timeout, only valid when the protocol is WebSocket, the default is 10 seconds, use the `TimeSpan.Parse` method to parse the string into a `TimeSpan` object. + +### Specify the URL and Properties to get the connection + +The C# connector does not support this feature + +### Priority of configuration parameters + +The C# connector does not support this feature + ## Usage examples -### Write data +### Create database and tables -#### SQL Write - - - - - - - - - - + + ```csharp -{{#include docs/examples/csharp/wsInsert/Program.cs}} +using System; +using System.Text; +using TDengine.Driver; +using TDengine.Driver.Client; + +namespace NativeQuery +{ + internal class Query + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + client.Exec("create database power"); + client.Exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } + } + } + } +} +``` + + + + +```csharp +using System; +using System.Text; +using TDengine.Driver; +using TDengine.Driver.Client; + +namespace WSQuery +{ + internal class Query + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + client.Exec("create database power"); + client.Exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } + } + } + } +} ``` -#### InfluxDB line protocol write +### Insert data - + + -#### OpenTSDB Telnet line protocol write +```csharp +using System; +using System.Text; +using TDengine.Driver; +using TDengine.Driver.Client; - - -#### OpenTSDB JSON line protocol write - - - -#### Parameter Binding - - - - - -``` csharp -{{#include docs/examples/csharp/stmtInsert/Program.cs}} +namespace NativeQuery +{ + internal class Query + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + string insertQuery = + "INSERT INTO " + + "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + + "VALUES " + + "('2023-10-03 14:38:05.000', 10.30000, 219, 0.31000) " + + "('2023-10-03 14:38:15.000', 12.60000, 218, 0.33000) " + + "('2023-10-03 14:38:16.800', 12.30000, 221, 0.31000) " + + "power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " + + "VALUES " + + "('2023-10-03 14:38:16.650', 10.30000, 218, 0.25000) " + + "power.d1003 USING power.meters TAGS(2,'California.LosAngeles') " + + "VALUES " + + "('2023-10-03 14:38:05.500', 11.80000, 221, 0.28000) " + + "('2023-10-03 14:38:16.600', 13.40000, 223, 0.29000) " + + "power.d1004 USING power.meters TAGS(3,'California.LosAngeles') " + + "VALUES " + + "('2023-10-03 14:38:05.000', 10.80000, 223, 0.29000) " + + "('2023-10-03 14:38:06.500', 11.50000, 221, 0.35000)"; + client.Exec(insertQuery); + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } + } + } + } +} ``` - - + ```csharp -{{#include docs/examples/csharp/wsStmt/Program.cs}} +using System; +using System.Text; +using TDengine.Driver; +using TDengine.Driver.Client; + +namespace WSQuery +{ + internal class Query + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + string insertQuery = + "INSERT INTO " + + "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + + "VALUES " + + "('2023-10-03 14:38:05.000', 10.30000, 219, 0.31000) " + + "('2023-10-03 14:38:15.000', 12.60000, 218, 0.33000) " + + "('2023-10-03 14:38:16.800', 12.30000, 221, 0.31000) " + + "power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " + + "VALUES " + + "('2023-10-03 14:38:16.650', 10.30000, 218, 0.25000) " + + "power.d1003 USING power.meters TAGS(2,'California.LosAngeles') " + + "VALUES " + + "('2023-10-03 14:38:05.500', 11.80000, 221, 0.28000) " + + "('2023-10-03 14:38:16.600', 13.40000, 223, 0.29000) " + + "power.d1004 USING power.meters TAGS(3,'California.LosAngeles') " + + "VALUES " + + "('2023-10-03 14:38:05.000', 10.80000, 223, 0.29000) " + + "('2023-10-03 14:38:06.500', 11.50000, 221, 0.35000)"; + client.Exec(insertQuery); + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } + } + } + } +} ``` -### Query data +### Querying data -#### Synchronous Query - - - - - - - - - - + + ```csharp -{{#include docs/examples/csharp/wsQuery/Program.cs}} +using System; +using System.Text; +using TDengine.Driver; +using TDengine.Driver.Client; + +namespace NativeQuery +{ + internal class Query + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + client.Exec("use power"); + string query = "SELECT * FROM meters"; + using (var rows = client.Query(query)) + { + while (rows.Read()) + { + Console.WriteLine($"{((DateTime)rows.GetValue(0)):yyyy-MM-dd HH:mm:ss.fff}, {rows.GetValue(1)}, {rows.GetValue(2)}, {rows.GetValue(3)}, {rows.GetValue(4)}, {Encoding.UTF8.GetString((byte[])rows.GetValue(5))}"); + } + } + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } + } + } + } +} +``` + + + + +```csharp +using System; +using System.Text; +using TDengine.Driver; +using TDengine.Driver.Client; + +namespace WSQuery +{ + internal class Query + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + client.Exec("use power"); + string query = "SELECT * FROM meters"; + using (var rows = client.Query(query)) + { + while (rows.Read()) + { + Console.WriteLine($"{((DateTime)rows.GetValue(0)):yyyy-MM-dd HH:mm:ss.fff}, {rows.GetValue(1)}, {rows.GetValue(2)}, {rows.GetValue(3)}, {rows.GetValue(4)}, {Encoding.UTF8.GetString((byte[])rows.GetValue(5))}"); + } + } + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } + } + } + } +} ``` -#### Asynchronous query +### execute SQL with reqId - + + + +```csharp +using System; +using System.Text; +using TDengine.Driver; +using TDengine.Driver.Client; + +namespace NativeQueryWithReqID +{ + internal abstract class QueryWithReqID + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + client.Exec($"create database if not exists test_db",ReqId.GetReqId()); + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } + } + } + } +} +``` + + + + +```csharp +using System; +using System.Text; +using TDengine.Driver; +using TDengine.Driver.Client; + +namespace WSQueryWithReqID +{ + internal abstract class QueryWithReqID + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + client.Exec($"create database if not exists test_db",ReqId.GetReqId()); + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } + } + } + } +} +``` + + + + +### Writing data via parameter binding + + + + +```csharp +using System; +using TDengine.Driver; +using TDengine.Driver.Client; + +namespace NativeStmt +{ + internal abstract class NativeStmt + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + client.Exec("create database power"); + client.Exec( + "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + using (var stmt = client.StmtInit()) + { + stmt.Prepare( + "Insert into power.d1001 using power.meters tags(2,'California.SanFrancisco') values(?,?,?,?)"); + var ts = new DateTime(2023, 10, 03, 14, 38, 05, 000); + stmt.BindRow(new object[] { ts, (float)10.30000, (int)219, (float)0.31000 }); + stmt.AddBatch(); + stmt.Exec(); + var affected = stmt.Affected(); + Console.WriteLine($"affected rows: {affected}"); + } + } + catch (Exception e) + { + Console.WriteLine(e); + throw; + } + } + } + } +} +``` + + + + +```csharp +using System; +using TDengine.Driver; +using TDengine.Driver.Client; + +namespace WSStmt +{ + internal abstract class WSStmt + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + client.Exec("create database power"); + client.Exec( + "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + using (var stmt = client.StmtInit()) + { + stmt.Prepare( + "Insert into power.d1001 using power.meters tags(2,'California.SanFrancisco') values(?,?,?,?)"); + var ts = new DateTime(2023, 10, 03, 14, 38, 05, 000); + stmt.BindRow(new object[] { ts, (float)10.30000, (int)219, (float)0.31000 }); + stmt.AddBatch(); + stmt.Exec(); + var affected = stmt.Affected(); + Console.WriteLine($"affected rows: {affected}"); + } + } + catch (Exception e) + { + Console.WriteLine(e); + throw; + } + } + } + } +} +``` + + + + +Note: When using BindRow, you need to pay attention to the one-to-one correspondence between the original C# column type and the TDengine column type. For the specific correspondence, please refer to [TDengine DataType and C# DataType](#tdengine-datatype-vs-c-datatype). + +### Schemaless Writing + + + + +```csharp +using TDengine.Driver; +using TDengine.Driver.Client; + +namespace NativeSchemaless +{ + internal class Program + { + public static void Main(string[] args) + { + var builder = + new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + client.Exec("create database sml"); + client.Exec("use sml"); + var influxDBData = + "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000"; + client.SchemalessInsert(new string[] { influxDBData }, + TDengineSchemalessProtocol.TSDB_SML_LINE_PROTOCOL, + TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NANO_SECONDS, 0, ReqId.GetReqId()); + var telnetData = "stb0_0 1626006833 4 host=host0 interface=eth0"; + client.SchemalessInsert(new string[] { telnetData }, + TDengineSchemalessProtocol.TSDB_SML_TELNET_PROTOCOL, + TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS, 0, ReqId.GetReqId()); + var jsonData = + "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"; + client.SchemalessInsert(new string[] { jsonData }, TDengineSchemalessProtocol.TSDB_SML_JSON_PROTOCOL, + TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS, 0, ReqId.GetReqId()); + } + } + } +} +``` + + + + +```csharp +using TDengine.Driver; +using TDengine.Driver.Client; + +namespace WSSchemaless +{ + internal class Program + { + public static void Main(string[] args) + { + var builder = + new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + client.Exec("create database sml"); + client.Exec("use sml"); + var influxDBData = + "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000"; + client.SchemalessInsert(new string[] { influxDBData }, + TDengineSchemalessProtocol.TSDB_SML_LINE_PROTOCOL, + TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NANO_SECONDS, 0, ReqId.GetReqId()); + var telnetData = "stb0_0 1626006833 4 host=host0 interface=eth0"; + client.SchemalessInsert(new string[] { telnetData }, + TDengineSchemalessProtocol.TSDB_SML_TELNET_PROTOCOL, + TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS, 0, ReqId.GetReqId()); + var jsonData = + "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"; + client.SchemalessInsert(new string[] { jsonData }, TDengineSchemalessProtocol.TSDB_SML_JSON_PROTOCOL, + TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS, 0, ReqId.GetReqId()); + } + } + } +} +``` + + + + +### Schemaless with reqId + +```csharp +public void SchemalessInsert(string[] lines, TDengineSchemalessProtocol protocol, + TDengineSchemalessPrecision precision, + int ttl, long reqId) +``` + +### Data Subscription + +#### Create a Topic + + + + +```csharp +using System; +using System.Text; +using TDengine.Driver; +using TDengine.Driver.Client; + +namespace NativeSubscription +{ + internal class Program + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + client.Exec("create database power"); + client.Exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + client.Exec("CREATE TOPIC topic_meters as SELECT * from power.meters"); + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } + } + } + } +} +``` + + + + +```csharp +using System; +using System.Text; +using TDengine.Driver; +using TDengine.Driver.Client; + +namespace WSSubscription +{ + internal class Program + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + client.Exec("create database power"); + client.Exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + client.Exec("CREATE TOPIC topic_meters as SELECT * from power.meters"); + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } + } + } + } +} +``` + + + + +#### Create a Consumer + + + + +```csharp +var cfg = new Dictionary() +{ + { "group.id", "group1" }, + { "auto.offset.reset", "latest" }, + { "td.connect.ip", "127.0.0.1" }, + { "td.connect.user", "root" }, + { "td.connect.pass", "taosdata" }, + { "td.connect.port", "6030" }, + { "client.id", "tmq_example" }, + { "enable.auto.commit", "true" }, + { "msg.with.table.name", "false" }, +}; +var consumer = new ConsumerBuilder>(cfg).Build(); +``` + + + + +```csharp +var cfg = new Dictionary() +{ + { "td.connect.type", "WebSocket" }, + { "group.id", "group1" }, + { "auto.offset.reset", "latest" }, + { "td.connect.ip", "localhost" }, + { "td.connect.port","6041"}, + { "useSSL", "false" }, + { "td.connect.user", "root" }, + { "td.connect.pass", "taosdata" }, + { "client.id", "tmq_example" }, + { "enable.auto.commit", "true" }, + { "msg.with.table.name", "false" }, +}; +var consumer = new ConsumerBuilder>(cfg).Build(); +``` + + + + +The configuration parameters supported by consumer are as follows: +* td.connect.type: connection type, optional value is Native or WebSocket, default is Native +* td.connect.ip: The address of the TDengine running instance or taosadapter +* td.connect.port: The port of the running instance of TDengine or taosadapter. + * When using WebSocket without SSL, the default is 6041. + * When using WebSocket with SSL, the default is 443. +* useSSL: Whether to use SSL, the default is false, only valid when the protocol is WebSocket +* token: The token used to connect to TDengine Cloud, only valid when the protocol is WebSocket +* td.connect.user: username to connect to TDengine +* td.connect.pass: Password for connecting to TDengine +* group.id: consumer group ID +* client.id: consumer ID +* enable.auto.commit: Whether to automatically commit offset, the default is true +* auto.commit.interval.ms: The interval for automatically submitting offsets, the default is 5000 milliseconds +* auto.offset.reset: When offset does not exist, where to start consumption, the optional value is earliest or latest, the default is latest +* msg.with.table.name: Whether the message contains the table name + +Supports subscribing to the result set `Dictionary` where the key is the column name and the value is the column value. + +If you use object to receive column values, you need to pay attention to: +* There needs to be a one-to-one correspondence between the original C# column type and the TDengine column type. For specific correspondence, please refer to [TDengine DataType and C# DataType] (#tdengine-datatype-and-c-datatype). +* The column name is consistent with the class attribute name and can be get and set. +* Explicitly set the value parser `ConsumerBuilder.SetValueDeserializer(new ReferenceDeserializer());` + +An example is as follows + +Result class + +```csharp + class Result + { + public DateTime ts { get; set; } + public float current { get; set; } + public int voltage { get; set; } + public float phase { get; set; } + } +``` + +Set up parser + +```csharp +var tmqBuilder = new ConsumerBuilder(cfg); +tmqBuilder.SetValueDeserializer(new ReferenceDeserializer()); +var consumer = tmqBuilder.Build(); +``` + +You can also implement a custom deserializer, implement the `IDeserializer` interface and pass it in through the `ConsumerBuilder.SetValueDeserializer` method. + +```csharp + public interface IDeserializer + { + T Deserialize(ITMQRows data, bool isNull, SerializationContext context); + } +``` + +#### Subscribe to consume data + +```csharp +consumer.Subscribe(new List() { "topic_meters" }); +while (true) +{ + using (var cr = consumer.Consume(500)) + { + if (cr == null) continue; + foreach (var message in cr.Message) + { + Console.WriteLine( + $"message {{{((DateTime)message.Value["ts"]).ToString("yyyy-MM-dd HH:mm:ss.fff")}, " + + $"{message.Value["current"]}, {message.Value["voltage"]}, {message.Value["phase"]}}}"); + } + } +} +``` + +#### Assignment subscription Offset + +```csharp +consumer.Assignment.ForEach(a => +{ + Console.WriteLine($"{a}, seek to 0"); + consumer.Seek(new TopicPartitionOffset(a.Topic, a.Partition, 0)); + Thread.Sleep(TimeSpan.FromSeconds(1)); +}); +``` + +#### Commit offset + +```csharp +public void Commit(ConsumeResult consumerResult) +public List Commit() +public void Commit(IEnumerable offsets) +``` + +#### Close subscriptions + +```csharp +consumer.Unsubscribe(); +consumer.Close(); +``` + +#### Full Sample Code + + + + +```csharp +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using TDengine.Driver; +using TDengine.Driver.Client; +using TDengine.TMQ; + +namespace NativeSubscription +{ + internal class Program + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + client.Exec("CREATE DATABASE power"); + client.Exec("USE power"); + client.Exec( + "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + client.Exec("CREATE TOPIC topic_meters as SELECT * from power.meters"); + var cfg = new Dictionary() + { + { "group.id", "group1" }, + { "auto.offset.reset", "latest" }, + { "td.connect.ip", "127.0.0.1" }, + { "td.connect.user", "root" }, + { "td.connect.pass", "taosdata" }, + { "td.connect.port", "6030" }, + { "client.id", "tmq_example" }, + { "enable.auto.commit", "true" }, + { "msg.with.table.name", "false" }, + }; + var consumer = new ConsumerBuilder>(cfg).Build(); + consumer.Subscribe(new List() { "topic_meters" }); + Task.Run(InsertData); + while (true) + { + using (var cr = consumer.Consume(500)) + { + if (cr == null) continue; + foreach (var message in cr.Message) + { + Console.WriteLine( + $"message {{{((DateTime)message.Value["ts"]).ToString("yyyy-MM-dd HH:mm:ss.fff")}, " + + $"{message.Value["current"]}, {message.Value["voltage"]}, {message.Value["phase"]}}}"); + } + consumer.Commit(); + } + } + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } + } + } + + static void InsertData() + { + var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + while (true) + { + client.Exec("INSERT into power.d1001 using power.meters tags(2,'California.SanFrancisco') values(now,11.5,219,0.30)"); + Task.Delay(1000).Wait(); + } + } + } + } +} +``` + + + + +```csharp +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using TDengine.Driver; +using TDengine.Driver.Client; +using TDengine.TMQ; + +namespace WSSubscription +{ + internal class Program + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + client.Exec("CREATE DATABASE power"); + client.Exec("USE power"); + client.Exec( + "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + client.Exec("CREATE TOPIC topic_meters as SELECT * from power.meters"); + var cfg = new Dictionary() + { + { "td.connect.type", "WebSocket" }, + { "group.id", "group1" }, + { "auto.offset.reset", "latest" }, + { "td.connect.ip", "localhost" }, + { "td.connect.port","6041"}, + { "useSSL", "false" }, + { "td.connect.user", "root" }, + { "td.connect.pass", "taosdata" }, + { "client.id", "tmq_example" }, + { "enable.auto.commit", "true" }, + { "msg.with.table.name", "false" }, + }; + var consumer = new ConsumerBuilder>(cfg).Build(); + consumer.Subscribe(new List() { "topic_meters" }); + Task.Run(InsertData); + while (true) + { + using (var cr = consumer.Consume(500)) + { + if (cr == null) continue; + foreach (var message in cr.Message) + { + Console.WriteLine( + $"message {{{((DateTime)message.Value["ts"]).ToString("yyyy-MM-dd HH:mm:ss.fff")}, " + + $"{message.Value["current"]}, {message.Value["voltage"]}, {message.Value["phase"]}}}"); + } + consumer.Commit(); + } + } + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } + } + } + + static void InsertData() + { + var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + while (true) + { + client.Exec("INSERT into power.d1001 using power.meters tags(2,'California.SanFrancisco') values(now,11.5,219,0.30)"); + Task.Delay(1000).Wait(); + } + } + } + } +} +``` + + + + +### ADO.NET + +The C# connector supports the ADO.NET interface, and you can connect to the TDengine running instance through the ADO.NET interface to perform operations such as data writing and querying. + + + + +```csharp +using System; +using TDengine.Data.Client; + +namespace NativeADO +{ + internal class Program + { + public static void Main(string[] args) + { + const string connectionString = "host=localhost;port=6030;username=root;password=taosdata"; + using (var connection = new TDengineConnection(connectionString)) + { + try + { + connection.Open(); + using (var command = new TDengineCommand(connection)) + { + command.CommandText = "create database power"; + command.ExecuteNonQuery(); + connection.ChangeDatabase("power"); + command.CommandText = + "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"; + command.ExecuteNonQuery(); + command.CommandText = "INSERT INTO " + + "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + + "VALUES " + + "(?,?,?,?)"; + var parameters = command.Parameters; + parameters.Add(new TDengineParameter("@0", new DateTime(2023,10,03,14,38,05,000))); + parameters.Add(new TDengineParameter("@1", (float)10.30000)); + parameters.Add(new TDengineParameter("@2", (int)219)); + parameters.Add(new TDengineParameter("@3", (float)0.31000)); + command.ExecuteNonQuery(); + command.Parameters.Clear(); + command.CommandText = "SELECT * FROM meters"; + using (var reader = command.ExecuteReader()) + { + while (reader.Read()) + { + Console.WriteLine( + $"{((DateTime) reader.GetValue(0)):yyyy-MM-dd HH:mm:ss.fff}, {reader.GetValue(1)}, {reader.GetValue(2)}, {reader.GetValue(3)}, {reader.GetValue(4)}, {System.Text.Encoding.UTF8.GetString((byte[]) reader.GetValue(5))}"); + } + } + } + } + catch (Exception e) + { + Console.WriteLine(e); + throw; + } + } + } + } +} +``` + + + + +```csharp +using System; +using TDengine.Data.Client; + +namespace WSADO +{ + internal class Program + { + public static void Main(string[] args) + { + const string connectionString = "protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"; + using (var connection = new TDengineConnection(connectionString)) + { + try + { + connection.Open(); + using (var command = new TDengineCommand(connection)) + { + command.CommandText = "create database power"; + command.ExecuteNonQuery(); + connection.ChangeDatabase("power"); + command.CommandText = + "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"; + command.ExecuteNonQuery(); + command.CommandText = "INSERT INTO " + + "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + + "VALUES " + + "(?,?,?,?)"; + var parameters = command.Parameters; + parameters.Add(new TDengineParameter("@0", new DateTime(2023,10,03,14,38,05,000))); + parameters.Add(new TDengineParameter("@1", (float)10.30000)); + parameters.Add(new TDengineParameter("@2", (int)219)); + parameters.Add(new TDengineParameter("@3", (float)0.31000)); + command.ExecuteNonQuery(); + command.Parameters.Clear(); + command.CommandText = "SELECT * FROM meters"; + using (var reader = command.ExecuteReader()) + { + while (reader.Read()) + { + Console.WriteLine( + $"{((DateTime) reader.GetValue(0)):yyyy-MM-dd HH:mm:ss.fff}, {reader.GetValue(1)}, {reader.GetValue(2)}, {reader.GetValue(3)}, {reader.GetValue(4)}, {System.Text.Encoding.UTF8.GetString((byte[]) reader.GetValue(5))}"); + } + } + } + } + catch (Exception e) + { + Console.WriteLine(e); + throw; + } + } + } + } +} +``` + + + + +* The connection parameters are consistent with those in [Establishing a connection](#establishing-a-connection). +* The name of TDengineParameter needs to start with @, such as @0, @1, @2, etc. The value needs to have a one-to-one correspondence between the C# column type and the TDengine column type. For the specific correspondence, please refer to [TDengine DataType and C# DataType](#tdengine-datatype-vs-c-datatype). ### More sample programs -|Sample program |Sample program description | -|--------------------------------------------------------------------------------------------------------------------|--------------------------------------------| -| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/Query/Query.cs) | Table creation, data insertion, and query examples with TDengine.Connector | -| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/JSONTag) | Writing and querying JSON tag data with TDengine Connector | -| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/NET6Examples/Stmt) | Parameter binding with TDengine Connector | -| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/schemaless) | Schemaless writes with TDengine Connector | -| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/AsyncQuery/QueryAsync.cs) | Asynchronous queries with TDengine Connector | -| [Subscription](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/TMQ/TMQ.cs) | Subscription example with TDengine Connector | -| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/FrameWork45/WS/WebSocketSample.cs) | WebSocket basic data in and out with TDengine connector | -| [WebSocket Parameter Binding](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/FrameWork45/WS/WebSocketSTMT.cs) | WebSocket parameter binding example | - -## Important update records - -| TDengine.Connector | Description | -|--------------------|--------------------------------| -| 3.0.2 | Support .NET Framework 4.5 and above. Support .Net standard 2.0. Nuget package includes dynamic library for WebSocket.| -| 3.0.1 | Support WebSocket and Cloud, With function query, insert, and parameter binding| -| 3.0.0 | Supports TDengine 3.0.0.0. TDengine 2.x is not supported. Added `TDengine.Impl.GetData()` interface to deserialize query results. | -| 1.0.7 | Fixed TDengine.Query() memory leak. | -| 1.0.6 | Fix schemaless bug in 1.0.4 and 1.0.5. | -| 1.0.5 | Fix Windows sync query Chinese error bug. | 1.0.4 | Fix schemaless bug. | -| 1.0.4 | Add asynchronous query, subscription, and other functions. Fix the binding parameter bug. | -| 1.0.3 | Add parameter binding, schemaless, JSON tag, etc. | -| 1.0.2 | Add connection management, synchronous query, error messages, etc. | - -## Other descriptions - -### Third-party driver - -`Taos` is an ADO.NET connector for TDengine, supporting Linux and Windows platforms. Community contributor `Maikebing@@maikebing contributes the connector`. Please refer to: - -* Interface download: - -## Frequently Asked Questions - -1. "Unable to establish connection", "Unable to resolve FQDN" - - Usually, it's caused by an incorrect FQDN configuration. Please refer to this section in the [FAQ](https://docs.tdengine.com/2.4/train-faq/faq/#2-how-to-handle-unable-to-establish-connection) to troubleshoot. - -2. Unhandled exception. System.DllNotFoundException: Unable to load DLL 'taos' or one of its dependencies: The specified module cannot be found. - - This is usually because the program did not find the dependent client driver. The solution is to copy `C:\TDengine\driver\taos.dll` to the `C:\Windows\System32\` directory on Windows, and create the following soft link on Linux `ln -s /usr/local/taos/driver/libtaos.so.x.x .x.x /usr/lib/libtaos.so` will work. - -## API Reference - -[API Reference](https://docs.taosdata.com/api/connector-csharp/html/860d2ac1-dd52-39c9-e460-0829c4e5a40b.htm) +[sample program](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples) \ No newline at end of file diff --git a/docs/examples/csharp/.gitignore b/docs/examples/csharp/.gitignore index c228f1be2a..b0df020f44 100644 --- a/docs/examples/csharp/.gitignore +++ b/docs/examples/csharp/.gitignore @@ -1,5 +1,5 @@ .vs -asyncQuery/bin +.idea connect/bin influxdbLine/bin optsJSON/bin @@ -12,7 +12,6 @@ wsConnect/bin wsInsert/bin wsQuery/bin wsStmt/bin -asyncQuery/obj connect/obj influxdbLine/obj optsJSON/obj diff --git a/docs/examples/csharp/asyncQuery/Program.cs b/docs/examples/csharp/asyncQuery/Program.cs deleted file mode 100644 index 864f06a15e..0000000000 --- a/docs/examples/csharp/asyncQuery/Program.cs +++ /dev/null @@ -1,111 +0,0 @@ -using System; -using System.Collections.Generic; -using TDengineDriver; -using TDengineDriver.Impl; -using System.Runtime.InteropServices; - -namespace TDengineExample -{ - public class AsyncQueryExample - { - static void Main() - { - IntPtr conn = GetConnection(); - try - { - QueryAsyncCallback queryAsyncCallback = new QueryAsyncCallback(QueryCallback); - TDengine.QueryAsync(conn, "select * from meters", queryAsyncCallback, IntPtr.Zero); - Thread.Sleep(2000); - } - finally - { - TDengine.Close(conn); - } - - } - - static void QueryCallback(IntPtr param, IntPtr taosRes, int code) - { - if (code == 0 && taosRes != IntPtr.Zero) - { - FetchRawBlockAsyncCallback fetchRowAsyncCallback = new FetchRawBlockAsyncCallback(FetchRawBlockCallback); - TDengine.FetchRawBlockAsync(taosRes, fetchRowAsyncCallback, param); - } - else - { - throw new Exception($"async query data failed,code:{code},reason:{TDengine.Error(taosRes)}"); - } - } - - // Iteratively call this interface until "numOfRows" is no greater than 0. - static void FetchRawBlockCallback(IntPtr param, IntPtr taosRes, int numOfRows) - { - if (numOfRows > 0) - { - Console.WriteLine($"{numOfRows} rows async retrieved"); - IntPtr pdata = TDengine.GetRawBlock(taosRes); - List metaList = TDengine.FetchFields(taosRes); - List dataList = LibTaos.ReadRawBlock(pdata, metaList, numOfRows); - - for (int i = 0; i < dataList.Count; i++) - { - if (i != 0 && (i + 1) % metaList.Count == 0) - { - Console.WriteLine("{0}\t|", dataList[i]); - } - else - { - Console.Write("{0}\t|", dataList[i]); - } - } - Console.WriteLine(""); - TDengine.FetchRawBlockAsync(taosRes, FetchRawBlockCallback, param); - } - else - { - if (numOfRows == 0) - { - Console.WriteLine("async retrieve complete."); - } - else - { - throw new Exception($"FetchRawBlockCallback callback error, error code {numOfRows}"); - } - TDengine.FreeResult(taosRes); - } - } - - static IntPtr GetConnection() - { - string host = "localhost"; - short port = 6030; - string username = "root"; - string password = "taosdata"; - string dbname = "power"; - var conn = TDengine.Connect(host, username, password, dbname, port); - if (conn == IntPtr.Zero) - { - throw new Exception("Connect to TDengine failed"); - } - else - { - Console.WriteLine("Connect to TDengine success"); - } - return conn; - } - } -} - -// //output: -// // Connect to TDengine success -// // 8 rows async retrieved - -// // 1538548685500 | 11.8 | 221 | 0.28 | california.losangeles | 2 | -// // 1538548696600 | 13.4 | 223 | 0.29 | california.losangeles | 2 | -// // 1538548685000 | 10.8 | 223 | 0.29 | california.losangeles | 3 | -// // 1538548686500 | 11.5 | 221 | 0.35 | california.losangeles | 3 | -// // 1538548685000 | 10.3 | 219 | 0.31 | california.sanfrancisco | 2 | -// // 1538548695000 | 12.6 | 218 | 0.33 | california.sanfrancisco | 2 | -// // 1538548696800 | 12.3 | 221 | 0.31 | california.sanfrancisco | 2 | -// // 1538548696650 | 10.3 | 218 | 0.25 | california.sanfrancisco | 3 | -// // async retrieve complete. \ No newline at end of file diff --git a/docs/examples/csharp/asyncQuery/asyncquery.csproj b/docs/examples/csharp/asyncQuery/asyncquery.csproj deleted file mode 100644 index 7c5b693f28..0000000000 --- a/docs/examples/csharp/asyncQuery/asyncquery.csproj +++ /dev/null @@ -1,15 +0,0 @@ - - - - Exe - net6.0 - enable - enable - TDengineExample.AsyncQueryExample - - - - - - - diff --git a/docs/examples/csharp/connect/Program.cs b/docs/examples/csharp/connect/Program.cs index 955db40c7c..0152b61b03 100644 --- a/docs/examples/csharp/connect/Program.cs +++ b/docs/examples/csharp/connect/Program.cs @@ -1,4 +1,5 @@ -using TDengineDriver; +using TDengine.Driver; +using TDengine.Driver.Client; namespace TDengineExample { @@ -7,23 +8,11 @@ namespace TDengineExample { static void Main(String[] args) { - string host = "localhost"; - short port = 6030; - string username = "root"; - string password = "taosdata"; - string dbname = ""; - - var conn = TDengine.Connect(host, username, password, dbname, port); - if (conn == IntPtr.Zero) + var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) { - throw new Exception("Connect to TDengine failed"); + Console.WriteLine("connected"); } - else - { - Console.WriteLine("Connect to TDengine success"); - } - TDengine.Close(conn); - TDengine.Cleanup(); } } } diff --git a/docs/examples/csharp/connect/connect.csproj b/docs/examples/csharp/connect/connect.csproj index a08e86d4b4..e4412a68db 100644 --- a/docs/examples/csharp/connect/connect.csproj +++ b/docs/examples/csharp/connect/connect.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/csharp.sln b/docs/examples/csharp/csharp.sln index 560dde55cb..59c198b4c9 100644 --- a/docs/examples/csharp/csharp.sln +++ b/docs/examples/csharp/csharp.sln @@ -3,8 +3,6 @@ Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio Version 16 VisualStudioVersion = 16.0.30114.105 MinimumVisualStudioVersion = 10.0.40219.1 -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "asyncquery", "asyncQuery\asyncquery.csproj", "{E2A5F00C-14E7-40E1-A2DE-6AB2975616D3}" -EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "connect", "connect\connect.csproj", "{CCC5042D-93FC-4AE0-B2F6-7E692FD476B7}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "influxdbline", "influxdbLine\influxdbline.csproj", "{6A24FB80-1E3C-4E2D-A5AB-914FA583874D}" @@ -38,10 +36,6 @@ Global HideSolutionNode = FALSE EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution - {E2A5F00C-14E7-40E1-A2DE-6AB2975616D3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {E2A5F00C-14E7-40E1-A2DE-6AB2975616D3}.Debug|Any CPU.Build.0 = Debug|Any CPU - {E2A5F00C-14E7-40E1-A2DE-6AB2975616D3}.Release|Any CPU.ActiveCfg = Release|Any CPU - {E2A5F00C-14E7-40E1-A2DE-6AB2975616D3}.Release|Any CPU.Build.0 = Release|Any CPU {CCC5042D-93FC-4AE0-B2F6-7E692FD476B7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {CCC5042D-93FC-4AE0-B2F6-7E692FD476B7}.Debug|Any CPU.Build.0 = Debug|Any CPU {CCC5042D-93FC-4AE0-B2F6-7E692FD476B7}.Release|Any CPU.ActiveCfg = Release|Any CPU diff --git a/docs/examples/csharp/influxdbLine/Program.cs b/docs/examples/csharp/influxdbLine/Program.cs index a620c01609..fcf2928f0c 100644 --- a/docs/examples/csharp/influxdbLine/Program.cs +++ b/docs/examples/csharp/influxdbLine/Program.cs @@ -1,4 +1,5 @@ -using TDengineDriver; +using TDengine.Driver; +using TDengine.Driver.Client; namespace TDengineExample { @@ -6,60 +7,23 @@ namespace TDengineExample { static void Main() { - IntPtr conn = GetConnection(); - PrepareDatabase(conn); - string[] lines = { - "meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", - "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", - "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", - "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250" - }; - IntPtr res = TDengine.SchemalessInsert(conn, lines, lines.Length, (int)TDengineSchemalessProtocol.TSDB_SML_LINE_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS); - if (TDengine.ErrorNo(res) != 0) + var builder = + new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) { - throw new Exception("SchemalessInsert failed since " + TDengine.Error(res)); - } - else - { - int affectedRows = TDengine.AffectRows(res); - Console.WriteLine($"SchemalessInsert success, affected {affectedRows} rows"); - } - TDengine.FreeResult(res); - - } - static IntPtr GetConnection() - { - string host = "localhost"; - short port = 6030; - string username = "root"; - string password = "taosdata"; - string dbname = ""; - var conn = TDengine.Connect(host, username, password, dbname, port); - if (conn == IntPtr.Zero) - { - throw new Exception("Connect to TDengine failed"); - } - else - { - Console.WriteLine("Connect to TDengine success"); - } - return conn; - } - - static void PrepareDatabase(IntPtr conn) - { - IntPtr res = TDengine.Query(conn, "CREATE DATABASE test WAL_RETENTION_PERIOD 3600"); - if (TDengine.ErrorNo(res) != 0) - { - throw new Exception("failed to create database, reason: " + TDengine.Error(res)); - } - res = TDengine.Query(conn, "USE test"); - if (TDengine.ErrorNo(res) != 0) - { - throw new Exception("failed to change database, reason: " + TDengine.Error(res)); + client.Exec("CREATE DATABASE test WAL_RETENTION_PERIOD 3600"); + client.Exec("use test"); + string[] lines = { + "meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", + "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", + "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", + "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250" + }; + client.SchemalessInsert(lines, + TDengineSchemalessProtocol.TSDB_SML_LINE_PROTOCOL, + TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS, 0, ReqId.GetReqId()); } } - } } diff --git a/docs/examples/csharp/influxdbLine/influxdbline.csproj b/docs/examples/csharp/influxdbLine/influxdbline.csproj index 4889f8fde9..4fe883b6cb 100644 --- a/docs/examples/csharp/influxdbLine/influxdbline.csproj +++ b/docs/examples/csharp/influxdbLine/influxdbline.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/optsJSON/Program.cs b/docs/examples/csharp/optsJSON/Program.cs index 8dcc1dce92..a93c4dad0f 100644 --- a/docs/examples/csharp/optsJSON/Program.cs +++ b/docs/examples/csharp/optsJSON/Program.cs @@ -1,69 +1,28 @@ -using TDengineDriver; +using TDengine.Driver; +using TDengine.Driver.Client; namespace TDengineExample { internal class OptsJsonExample { - static void Main() + public static void Main(string[] args) { - IntPtr conn = GetConnection(); - try + var builder = + new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) { - PrepareDatabase(conn); - string[] lines = { "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," + - " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}, " + - "{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," + - " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}]" - }; - - IntPtr res = TDengine.SchemalessInsert(conn, lines, 1, (int)TDengineSchemalessProtocol.TSDB_SML_JSON_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED); - if (TDengine.ErrorNo(res) != 0) + client.Exec("CREATE DATABASE test WAL_RETENTION_PERIOD 3600"); + client.Exec("use test"); + string[] lines = { - throw new Exception("SchemalessInsert failed since " + TDengine.Error(res)); - } - else - { - int affectedRows = TDengine.AffectRows(res); - Console.WriteLine($"SchemalessInsert success, affected {affectedRows} rows"); - } - TDengine.FreeResult(res); - } - finally - { - TDengine.Close(conn); - } - } - static IntPtr GetConnection() - { - string host = "localhost"; - short port = 6030; - string username = "root"; - string password = "taosdata"; - string dbname = ""; - var conn = TDengine.Connect(host, username, password, dbname, port); - if (conn == IntPtr.Zero) - { - throw new Exception("Connect to TDengine failed"); - } - else - { - Console.WriteLine("Connect to TDengine success"); - } - return conn; - } - - static void PrepareDatabase(IntPtr conn) - { - IntPtr res = TDengine.Query(conn, "CREATE DATABASE test WAL_RETENTION_PERIOD 3600"); - if (TDengine.ErrorNo(res) != 0) - { - throw new Exception("failed to create database, reason: " + TDengine.Error(res)); - } - res = TDengine.Query(conn, "USE test"); - if (TDengine.ErrorNo(res) != 0) - { - throw new Exception("failed to change database, reason: " + TDengine.Error(res)); + "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," + + " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}, " + + "{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," + + " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}]" + }; + client.SchemalessInsert(lines, TDengineSchemalessProtocol.TSDB_SML_JSON_PROTOCOL, + TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS, 0, ReqId.GetReqId()); } } } -} +} \ No newline at end of file diff --git a/docs/examples/csharp/optsJSON/optsJSON.csproj b/docs/examples/csharp/optsJSON/optsJSON.csproj index 208f04c82d..9c3edf53e3 100644 --- a/docs/examples/csharp/optsJSON/optsJSON.csproj +++ b/docs/examples/csharp/optsJSON/optsJSON.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/optsTelnet/Program.cs b/docs/examples/csharp/optsTelnet/Program.cs index ccd29d0cfc..b64aa9fe2d 100644 --- a/docs/examples/csharp/optsTelnet/Program.cs +++ b/docs/examples/csharp/optsTelnet/Program.cs @@ -1,72 +1,31 @@ -using TDengineDriver; +using TDengine.Driver; +using TDengine.Driver.Client; namespace TDengineExample { internal class OptsTelnetExample { - static void Main() + public static void Main(string[] args) { - IntPtr conn = GetConnection(); - try + var builder = + new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) { - PrepareDatabase(conn); + client.Exec("CREATE DATABASE test WAL_RETENTION_PERIOD 3600"); + client.Exec("USE test"); string[] lines = { - "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2", - "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2", - "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3", - "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3", - "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2", - "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2", - "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3", - "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3", - }; - IntPtr res = TDengine.SchemalessInsert(conn, lines, lines.Length, (int)TDengineSchemalessProtocol.TSDB_SML_TELNET_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED); - if (TDengine.ErrorNo(res) != 0) - { - throw new Exception("SchemalessInsert failed since " + TDengine.Error(res)); - } - else - { - int affectedRows = TDengine.AffectRows(res); - Console.WriteLine($"SchemalessInsert success, affected {affectedRows} rows"); - } - TDengine.FreeResult(res); - } - catch - { - TDengine.Close(conn); - } - } - static IntPtr GetConnection() - { - string host = "localhost"; - short port = 6030; - string username = "root"; - string password = "taosdata"; - string dbname = ""; - var conn = TDengine.Connect(host, username, password, dbname, port); - if (conn == IntPtr.Zero) - { - throw new Exception("Connect to TDengine failed"); - } - else - { - Console.WriteLine("Connect to TDengine success"); - } - return conn; - } - - static void PrepareDatabase(IntPtr conn) - { - IntPtr res = TDengine.Query(conn, "CREATE DATABASE test WAL_RETENTION_PERIOD 3600"); - if (TDengine.ErrorNo(res) != 0) - { - throw new Exception("failed to create database, reason: " + TDengine.Error(res)); - } - res = TDengine.Query(conn, "USE test"); - if (TDengine.ErrorNo(res) != 0) - { - throw new Exception("failed to change database, reason: " + TDengine.Error(res)); + "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2", + "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2", + "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3", + "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3", + }; + client.SchemalessInsert(lines, + TDengineSchemalessProtocol.TSDB_SML_TELNET_PROTOCOL, + TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS, 0, ReqId.GetReqId()); } } } diff --git a/docs/examples/csharp/optsTelnet/optstelnet.csproj b/docs/examples/csharp/optsTelnet/optstelnet.csproj index 32c76ec418..4d6941e962 100644 --- a/docs/examples/csharp/optsTelnet/optstelnet.csproj +++ b/docs/examples/csharp/optsTelnet/optstelnet.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/query/Program.cs b/docs/examples/csharp/query/Program.cs index 84c7f9db1f..4c354d9a5e 100644 --- a/docs/examples/csharp/query/Program.cs +++ b/docs/examples/csharp/query/Program.cs @@ -1,80 +1,35 @@ -using TDengineDriver; -using TDengineDriver.Impl; -using System.Runtime.InteropServices; +using System.Text; +using TDengine.Driver; +using TDengine.Driver.Client; namespace TDengineExample { internal class QueryExample { - static void Main() + public static void Main(string[] args) { - IntPtr conn = GetConnection(); - try + var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) { - // run query - IntPtr res = TDengine.Query(conn, "SELECT * FROM meters LIMIT 2"); - if (TDengine.ErrorNo(res) != 0) + try { - throw new Exception("Failed to query since: " + TDengine.Error(res)); - } - - // get filed count - int fieldCount = TDengine.FieldCount(res); - Console.WriteLine("fieldCount=" + fieldCount); - - // print column names - List metas = LibTaos.GetMeta(res); - for (int i = 0; i < metas.Count; i++) - { - Console.Write(metas[i].name + "\t"); - } - Console.WriteLine(); - - // print values - List resData = LibTaos.GetData(res); - for (int i = 0; i < resData.Count; i++) - { - Console.Write($"|{resData[i].ToString()} \t"); - if (((i + 1) % metas.Count == 0)) + client.Exec("use power"); + string query = "SELECT * FROM meters"; + using (var rows = client.Query(query)) { - Console.WriteLine(""); + while (rows.Read()) + { + Console.WriteLine( + $"{((DateTime)rows.GetValue(0)):yyyy-MM-dd HH:mm:ss.fff}, {rows.GetValue(1)}, {rows.GetValue(2)}, {rows.GetValue(3)}, {rows.GetValue(4)}, {Encoding.UTF8.GetString((byte[])rows.GetValue(5))}"); + } } } - Console.WriteLine(); - - // Free result after use - TDengine.FreeResult(res); + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } } - finally - { - TDengine.Close(conn); - } - - } - static IntPtr GetConnection() - { - string host = "localhost"; - short port = 6030; - string username = "root"; - string password = "taosdata"; - string dbname = "power"; - var conn = TDengine.Connect(host, username, password, dbname, port); - if (conn == IntPtr.Zero) - { - throw new Exception("Connect to TDengine failed"); - } - else - { - Console.WriteLine("Connect to TDengine success"); - } - return conn; } } -} - -// output: -// Connect to TDengine success -// fieldCount=6 -// ts current voltage phase location groupid -// 1648432611249 10.3 219 0.31 California.SanFrancisco 2 -// 1648432611749 12.6 218 0.33 California.SanFrancisco 2 \ No newline at end of file +} \ No newline at end of file diff --git a/docs/examples/csharp/query/query.csproj b/docs/examples/csharp/query/query.csproj index 360d73b2c0..9924d331af 100644 --- a/docs/examples/csharp/query/query.csproj +++ b/docs/examples/csharp/query/query.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/sqlInsert/Program.cs b/docs/examples/csharp/sqlInsert/Program.cs index 25a945a459..c3c700aba2 100644 --- a/docs/examples/csharp/sqlInsert/Program.cs +++ b/docs/examples/csharp/sqlInsert/Program.cs @@ -1,69 +1,47 @@ -using TDengineDriver; - +using System.Text; +using TDengine.Driver; +using TDengine.Driver.Client; namespace TDengineExample { internal class SQLInsertExample { - static void Main() + public static void Main(string[] args) { - IntPtr conn = GetConnection(); - try + var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) { - IntPtr res = TDengine.Query(conn, "CREATE DATABASE power WAL_RETENTION_PERIOD 3600"); - CheckRes(conn, res, "failed to create database"); - res = TDengine.Query(conn, "USE power"); - CheckRes(conn, res, "failed to change database"); - res = TDengine.Query(conn, "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"); - CheckRes(conn, res, "failed to create stable"); - var sql = "INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) " + - "d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) " + - "d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " + - "d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)"; - res = TDengine.Query(conn, sql); - CheckRes(conn, res, "failed to insert data"); - int affectedRows = TDengine.AffectRows(res); - Console.WriteLine("affectedRows " + affectedRows); - TDengine.FreeResult(res); - } - finally - { - TDengine.Close(conn); - } - - } - - static IntPtr GetConnection() - { - string host = "localhost"; - short port = 6030; - string username = "root"; - string password = "taosdata"; - string dbname = ""; - var conn = TDengine.Connect(host, username, password, dbname, port); - if (conn == IntPtr.Zero) - { - throw new Exception("Connect to TDengine failed"); - } - else - { - Console.WriteLine("Connect to TDengine success"); - } - return conn; - } - - static void CheckRes(IntPtr conn, IntPtr res, String errorMsg) - { - if (TDengine.ErrorNo(res) != 0) - { - throw new Exception($"{errorMsg} since: {TDengine.Error(res)}"); + try + { + client.Exec("create database power"); + client.Exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + string insertQuery = + "INSERT INTO " + + "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + + "VALUES " + + "('2023-10-03 14:38:05.000', 10.30000, 219, 0.31000) " + + "('2023-10-03 14:38:15.000', 12.60000, 218, 0.33000) " + + "('2023-10-03 14:38:16.800', 12.30000, 221, 0.31000) " + + "power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " + + "VALUES " + + "('2023-10-03 14:38:16.650', 10.30000, 218, 0.25000) " + + "power.d1003 USING power.meters TAGS(2,'California.LosAngeles') " + + "VALUES " + + "('2023-10-03 14:38:05.500', 11.80000, 221, 0.28000) " + + "('2023-10-03 14:38:16.600', 13.40000, 223, 0.29000) " + + "power.d1004 USING power.meters TAGS(3,'California.LosAngeles') " + + "VALUES " + + "('2023-10-03 14:38:05.000', 10.80000, 223, 0.29000) " + + "('2023-10-03 14:38:06.500', 11.50000, 221, 0.35000)"; + client.Exec(insertQuery); + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } } } - } } - -// output: -// Connect to TDengine success -// affectedRows 8 diff --git a/docs/examples/csharp/sqlInsert/sqlinsert.csproj b/docs/examples/csharp/sqlInsert/sqlinsert.csproj index 1b6f745c82..aa8e1402ae 100644 --- a/docs/examples/csharp/sqlInsert/sqlinsert.csproj +++ b/docs/examples/csharp/sqlInsert/sqlinsert.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/stmtInsert/Program.cs b/docs/examples/csharp/stmtInsert/Program.cs index 2e856a49bb..e6b11a63e4 100644 --- a/docs/examples/csharp/stmtInsert/Program.cs +++ b/docs/examples/csharp/stmtInsert/Program.cs @@ -1,109 +1,38 @@ -using TDengineDriver; +using TDengine.Driver; +using TDengine.Driver.Client; namespace TDengineExample { internal class StmtInsertExample { - private static IntPtr conn; - private static IntPtr stmt; - static void Main() + public static void Main(string[] args) { - conn = GetConnection(); - try + var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) { - PrepareSTable(); - // 1. init and prepare - stmt = TDengine.StmtInit(conn); - if (stmt == IntPtr.Zero) + try { - throw new Exception("failed to init stmt."); + client.Exec($"create database power"); + client.Exec( + "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + using (var stmt = client.StmtInit()) + { + stmt.Prepare( + "Insert into power.d1001 using power.meters tags(2,'California.SanFrancisco') values(?,?,?,?)"); + var ts = new DateTime(2023, 10, 03, 14, 38, 05, 000); + stmt.BindRow(new object[] { ts, (float)10.30000, (int)219, (float)0.31000 }); + stmt.AddBatch(); + stmt.Exec(); + var affected = stmt.Affected(); + Console.WriteLine($"affected rows: {affected}"); + } } - int res = TDengine.StmtPrepare(stmt, "INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)"); - CheckStmtRes(res, "failed to prepare stmt"); - - // 2. bind table name and tags - TAOS_MULTI_BIND[] tags = new TAOS_MULTI_BIND[2] { TaosMultiBind.MultiBindBinary(new string[] { "California.SanFrancisco" }), TaosMultiBind.MultiBindInt(new int?[] { 2 }) }; - res = TDengine.StmtSetTbnameTags(stmt, "d1001", tags); - CheckStmtRes(res, "failed to bind table name and tags"); - - // 3. bind values - TAOS_MULTI_BIND[] values = new TAOS_MULTI_BIND[4] { - TaosMultiBind.MultiBindTimestamp(new long[2] { 1648432611249, 1648432611749}), - TaosMultiBind.MultiBindFloat(new float?[2] { 10.3f, 12.6f}), - TaosMultiBind.MultiBindInt(new int?[2] { 219, 218}), - TaosMultiBind.MultiBindFloat(new float?[2]{ 0.31f, 0.33f}) - }; - res = TDengine.StmtBindParamBatch(stmt, values); - CheckStmtRes(res, "failed to bind params"); - - // 4. add batch - res = TDengine.StmtAddBatch(stmt); - CheckStmtRes(res, "failed to add batch"); - - // 5. execute - res = TDengine.StmtExecute(stmt); - CheckStmtRes(res, "failed to execute"); - - // 6. free - TaosMultiBind.FreeTaosBind(tags); - TaosMultiBind.FreeTaosBind(values); - } - finally - { - TDengine.Close(conn); - } - - } - - static IntPtr GetConnection() - { - string host = "localhost"; - short port = 6030; - string username = "root"; - string password = "taosdata"; - string dbname = ""; - var conn = TDengine.Connect(host, username, password, dbname, port); - if (conn == IntPtr.Zero) - { - throw new Exception("Connect to TDengine failed"); - } - else - { - Console.WriteLine("Connect to TDengine success"); - } - return conn; - } - - static void PrepareSTable() - { - IntPtr res = TDengine.Query(conn, "CREATE DATABASE power WAL_RETENTION_PERIOD 3600"); - CheckResPtr(res, "failed to create database"); - res = TDengine.Query(conn, "USE power"); - CheckResPtr(res, "failed to change database"); - res = TDengine.Query(conn, "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"); - CheckResPtr(res, "failed to create stable"); - } - - static void CheckStmtRes(int res, string errorMsg) - { - if (res != 0) - { - Console.WriteLine(errorMsg + ", " + TDengine.StmtErrorStr(stmt)); - int code = TDengine.StmtClose(stmt); - if (code != 0) + catch (Exception e) { - throw new Exception($"failed to close stmt, {code} reason: {TDengine.StmtErrorStr(stmt)} "); + Console.WriteLine(e); + throw; } } } - - static void CheckResPtr(IntPtr res, string errorMsg) - { - if (TDengine.ErrorNo(res) != 0) - { - throw new Exception(errorMsg + " since:" + TDengine.Error(res)); - } - } - } -} +} \ No newline at end of file diff --git a/docs/examples/csharp/stmtInsert/stmtinsert.csproj b/docs/examples/csharp/stmtInsert/stmtinsert.csproj index f5b2b67397..9fa3109b80 100644 --- a/docs/examples/csharp/stmtInsert/stmtinsert.csproj +++ b/docs/examples/csharp/stmtInsert/stmtinsert.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/subscribe/Program.cs b/docs/examples/csharp/subscribe/Program.cs index 1fba209f22..d7c1a52117 100644 --- a/docs/examples/csharp/subscribe/Program.cs +++ b/docs/examples/csharp/subscribe/Program.cs @@ -1,95 +1,72 @@ -using System; -using TDengineTMQ; -using TDengineDriver; -using System.Runtime.InteropServices; + +using TDengine.Driver; +using TDengine.Driver.Client; +using TDengine.TMQ; namespace TMQExample { internal class SubscribeDemo { - static void Main(string[] args) + public static void Main(string[] args) { - IntPtr conn = GetConnection(); - string topic = "topic_example"; - //create topic - IntPtr res = TDengine.Query(conn, $"create topic if not exists {topic} as select * from meters"); - - if (TDengine.ErrorNo(res) != 0 ) + var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) { - throw new Exception($"create topic failed, reason:{TDengine.Error(res)}"); - } - - var cfg = new ConsumerConfig - { - GourpId = "group_1", - TDConnectUser = "root", - TDConnectPasswd = "taosdata", - MsgWithTableName = "true", - TDConnectIp = "127.0.0.1", - }; - - // create consumer - var consumer = new ConsumerBuilder(cfg) - .Build(); - - // subscribe - consumer.Subscribe(topic); - - // consume - for (int i = 0; i < 5; i++) - { - var consumeRes = consumer.Consume(300); - // print consumeResult - foreach (KeyValuePair kv in consumeRes.Message) + try { - Console.WriteLine("topic partitions:\n{0}", kv.Key.ToString()); - - kv.Value.Metas.ForEach(meta => + client.Exec("CREATE DATABASE power"); + client.Exec("USE power"); + client.Exec( + "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + client.Exec("CREATE TOPIC topic_meters as SELECT * from power.meters"); + var cfg = new Dictionary() { - Console.Write("{0} {1}({2}) \t|", meta.name, meta.TypeName(), meta.size); - }); - Console.WriteLine(""); - kv.Value.Datas.ForEach(data => + { "group.id", "group1" }, + { "auto.offset.reset", "latest" }, + { "td.connect.ip", "127.0.0.1" }, + { "td.connect.user", "root" }, + { "td.connect.pass", "taosdata" }, + { "td.connect.port", "6030" }, + { "client.id", "tmq_example" }, + { "enable.auto.commit", "true" }, + { "msg.with.table.name", "false" }, + }; + var consumer = new ConsumerBuilder>(cfg).Build(); + consumer.Subscribe(new List() { "topic_meters" }); + Task.Run(InsertData); + while (true) { - Console.WriteLine(data.ToString()); - }); + using (var cr = consumer.Consume(500)) + { + if (cr == null) continue; + foreach (var message in cr.Message) + { + Console.WriteLine( + $"message {{{((DateTime)message.Value["ts"]).ToString("yyyy-MM-dd HH:mm:ss.fff")}, " + + $"{message.Value["current"]}, {message.Value["voltage"]}, {message.Value["phase"]}}}"); + } + } + } + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; } - - consumer.Commit(consumeRes); - Console.WriteLine("\n================ {0} done ", i); - } - - // retrieve topic list - List topics = consumer.Subscription(); - topics.ForEach(t => Console.WriteLine("topic name:{0}", t)); - - // unsubscribe - consumer.Unsubscribe(); - - // close consumer after use.Otherwise will lead memory leak. - consumer.Close(); - TDengine.Close(conn); - } - static IntPtr GetConnection() + static void InsertData() { - string host = "localhost"; - short port = 6030; - string username = "root"; - string password = "taosdata"; - string dbname = "power"; - var conn = TDengine.Connect(host, username, password, dbname, port); - if (conn == IntPtr.Zero) + var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) { - throw new Exception("Connect to TDengine failed"); + while (true) + { + client.Exec("INSERT into power.d1001 using power.meters tags(2,'California.SanFrancisco') values(now,11.5,219,0.30)"); + Task.Delay(1000).Wait(); + } } - else - { - Console.WriteLine("Connect to TDengine success"); - } - return conn; } } diff --git a/docs/examples/csharp/subscribe/subscribe.csproj b/docs/examples/csharp/subscribe/subscribe.csproj index 191b3f9e9b..df6618d214 100644 --- a/docs/examples/csharp/subscribe/subscribe.csproj +++ b/docs/examples/csharp/subscribe/subscribe.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/wsConnect/Program.cs b/docs/examples/csharp/wsConnect/Program.cs index a534bb8a65..edf0eb31e6 100644 --- a/docs/examples/csharp/wsConnect/Program.cs +++ b/docs/examples/csharp/wsConnect/Program.cs @@ -1,28 +1,18 @@ using System; -using TDengineWS.Impl; +using TDengine.Driver; +using TDengine.Driver.Client; namespace Examples { public class WSConnExample { - static int Main(string[] args) + static void Main(string[] args) { - string DSN = "ws://root:taosdata@127.0.0.1:6041/test"; - IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN); - - if (wsConn == IntPtr.Zero) + var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) { - Console.WriteLine("get WS connection failed"); - return -1; + Console.WriteLine("connected"); } - else - { - Console.WriteLine("Establish connect success."); - // close connection. - LibTaosWS.WSClose(wsConn); - } - - return 0; } } } diff --git a/docs/examples/csharp/wsConnect/wsConnect.csproj b/docs/examples/csharp/wsConnect/wsConnect.csproj index c7988b6e9c..01ab149af5 100644 --- a/docs/examples/csharp/wsConnect/wsConnect.csproj +++ b/docs/examples/csharp/wsConnect/wsConnect.csproj @@ -6,7 +6,7 @@ - + diff --git a/docs/examples/csharp/wsInsert/Program.cs b/docs/examples/csharp/wsInsert/Program.cs index 7fa6af805c..73136477e7 100644 --- a/docs/examples/csharp/wsInsert/Program.cs +++ b/docs/examples/csharp/wsInsert/Program.cs @@ -1,61 +1,46 @@ using System; -using TDengineWS.Impl; +using TDengine.Driver; +using TDengine.Driver.Client; namespace Examples { public class WSInsertExample { - static int Main(string[] args) + public static void Main(string[] args) { - string DSN = "ws://root:taosdata@127.0.0.1:6041/test"; - IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN); - - // Assert if connection is validate - if (wsConn == IntPtr.Zero) + var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) { - Console.WriteLine("get WS connection failed"); - return -1; - } - else - { - Console.WriteLine("Establish connect success."); - } - - string createTable = "CREATE STABLE test.meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);"; - string insert = "INSERT INTO test.d1001 USING test.meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)" + - "test.d1002 USING test.meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)" + - "test.d1003 USING test.meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " + - "test.d1004 USING test.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)"; - - IntPtr wsRes = LibTaosWS.WSQuery(wsConn, createTable); - ValidInsert("create table", wsRes); - LibTaosWS.WSFreeResult(wsRes); - - wsRes = LibTaosWS.WSQuery(wsConn, insert); - ValidInsert("insert data", wsRes); - LibTaosWS.WSFreeResult(wsRes); - - // close connection. - LibTaosWS.WSClose(wsConn); - - return 0; - } - - static void ValidInsert(string desc, IntPtr wsRes) - { - int code = LibTaosWS.WSErrorNo(wsRes); - if (code != 0) - { - Console.WriteLine($"execute SQL failed: reason: {LibTaosWS.WSErrorStr(wsRes)}, code:{code}"); - } - else - { - Console.WriteLine("{0} success affect {2} rows, cost {1} nanoseconds", desc, LibTaosWS.WSTakeTiming(wsRes), LibTaosWS.WSAffectRows(wsRes)); + try + { + client.Exec("create database power"); + client.Exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + string insertQuery = + "INSERT INTO " + + "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + + "VALUES " + + "('2023-10-03 14:38:05.000', 10.30000, 219, 0.31000) " + + "('2023-10-03 14:38:15.000', 12.60000, 218, 0.33000) " + + "('2023-10-03 14:38:16.800', 12.30000, 221, 0.31000) " + + "power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " + + "VALUES " + + "('2023-10-03 14:38:16.650', 10.30000, 218, 0.25000) " + + "power.d1003 USING power.meters TAGS(2,'California.LosAngeles') " + + "VALUES " + + "('2023-10-03 14:38:05.500', 11.80000, 221, 0.28000) " + + "('2023-10-03 14:38:16.600', 13.40000, 223, 0.29000) " + + "power.d1004 USING power.meters TAGS(3,'California.LosAngeles') " + + "VALUES " + + "('2023-10-03 14:38:05.000', 10.80000, 223, 0.29000) " + + "('2023-10-03 14:38:06.500', 11.50000, 221, 0.35000)"; + client.Exec(insertQuery); + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } } } } - } -// Establish connect success. -// create table success affect 0 rows, cost 3717542 nanoseconds -// insert data success affect 8 rows, cost 2613637 nanoseconds diff --git a/docs/examples/csharp/wsInsert/wsInsert.csproj b/docs/examples/csharp/wsInsert/wsInsert.csproj index 5aa419b2c8..0459e8e38b 100644 --- a/docs/examples/csharp/wsInsert/wsInsert.csproj +++ b/docs/examples/csharp/wsInsert/wsInsert.csproj @@ -6,7 +6,7 @@ enable - + diff --git a/docs/examples/csharp/wsQuery/Program.cs b/docs/examples/csharp/wsQuery/Program.cs index 8ee900a05a..40aac24597 100644 --- a/docs/examples/csharp/wsQuery/Program.cs +++ b/docs/examples/csharp/wsQuery/Program.cs @@ -1,79 +1,36 @@ using System; -using TDengineWS.Impl; -using System.Collections.Generic; -using TDengineDriver; +using System.Text; +using TDengine.Driver; +using TDengine.Driver.Client; namespace Examples { public class WSQueryExample { - static int Main(string[] args) + public static void Main(string[] args) { - string DSN = "ws://root:taosdata@127.0.0.1:6041/test"; - IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN); - if (wsConn == IntPtr.Zero) + var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) { - Console.WriteLine("get WS connection failed"); - return -1; - } - else - { - Console.WriteLine("Establish connect success."); - } - - string select = "select * from test.meters"; - - // optional:wsRes = LibTaosWS.WSQuery(wsConn, select); - IntPtr wsRes = LibTaosWS.WSQueryTimeout(wsConn, select, 1); - // Assert if query execute success. - int code = LibTaosWS.WSErrorNo(wsRes); - if (code != 0) - { - Console.WriteLine($"execute SQL failed: reason: {LibTaosWS.WSErrorStr(wsRes)}, code:{code}"); - LibTaosWS.WSFreeResult(wsRes); - return -1; - } - - // get meta data - List metas = LibTaosWS.WSGetFields(wsRes); - // get retrieved data - List dataSet = LibTaosWS.WSGetData(wsRes); - - // do something with result. - foreach (var meta in metas) - { - Console.Write("{0} {1}({2}) \t|\t", meta.name, meta.TypeName(), meta.size); - } - Console.WriteLine(""); - - for (int i = 0; i < dataSet.Count;) - { - for (int j = 0; j < metas.Count; j++) + try { - Console.Write("{0}\t|\t", dataSet[i]); - i++; + client.Exec("use power"); + string query = "SELECT * FROM meters"; + using (var rows = client.Query(query)) + { + while (rows.Read()) + { + Console.WriteLine( + $"{((DateTime)rows.GetValue(0)):yyyy-MM-dd HH:mm:ss.fff}, {rows.GetValue(1)}, {rows.GetValue(2)}, {rows.GetValue(3)}, {rows.GetValue(4)}, {Encoding.UTF8.GetString((byte[])rows.GetValue(5))}"); + } + } + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; } - Console.WriteLine(""); } - - // Free result after use. - LibTaosWS.WSFreeResult(wsRes); - - // close connection. - LibTaosWS.WSClose(wsConn); - - return 0; } } -} - -// Establish connect success. -// ts TIMESTAMP(8) | current FLOAT(4) | voltage INT(4) | phase FLOAT(4) | location BINARY(64) | groupid INT(4) | -// 1538548685000 | 10.8 | 223 | 0.29 | California.LosAngeles | 3 | -// 1538548686500 | 11.5 | 221 | 0.35 | California.LosAngeles | 3 | -// 1538548685500 | 11.8 | 221 | 0.28 | California.LosAngeles | 2 | -// 1538548696600 | 13.4 | 223 | 0.29 | California.LosAngeles | 2 | -// 1538548685000 | 10.3 | 219 | 0.31 | California.SanFrancisco | 2 | -// 1538548695000 | 12.6 | 218 | 0.33 | California.SanFrancisco | 2 | -// 1538548696800 | 12.3 | 221 | 0.31 | California.SanFrancisco | 2 | -// 1538548696650 | 10.3 | 218 | 0.25 | California.SanFrancisco | 3 | +} \ No newline at end of file diff --git a/docs/examples/csharp/wsQuery/wsQuery.csproj b/docs/examples/csharp/wsQuery/wsQuery.csproj index bcc7c19a59..ff283947b4 100644 --- a/docs/examples/csharp/wsQuery/wsQuery.csproj +++ b/docs/examples/csharp/wsQuery/wsQuery.csproj @@ -7,7 +7,7 @@ - + diff --git a/docs/examples/csharp/wsStmt/Program.cs b/docs/examples/csharp/wsStmt/Program.cs index f8673357db..5166dfea92 100644 --- a/docs/examples/csharp/wsStmt/Program.cs +++ b/docs/examples/csharp/wsStmt/Program.cs @@ -1,98 +1,41 @@ using System; -using TDengineWS.Impl; -using TDengineDriver; -using System.Runtime.InteropServices; +using TDengine.Driver; +using TDengine.Driver.Client; namespace Examples { public class WSStmtExample { - static int Main(string[] args) + public static void Main(string[] args) { - const string DSN = "ws://root:taosdata@127.0.0.1:6041/test"; - const string table = "meters"; - const string database = "test"; - const string childTable = "d1005"; - string insert = $"insert into ? using {database}.{table} tags(?,?) values(?,?,?,?)"; - const int numOfTags = 2; - const int numOfColumns = 4; - - // Establish connection - IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN); - if (wsConn == IntPtr.Zero) + var builder = + new ConnectionStringBuilder( + "protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) { - Console.WriteLine($"get WS connection failed"); - return -1; - } - else - { - Console.WriteLine("Establish connect success..."); - } - - // init stmt - IntPtr wsStmt = LibTaosWS.WSStmtInit(wsConn); - if (wsStmt != IntPtr.Zero) - { - int code = LibTaosWS.WSStmtPrepare(wsStmt, insert); - ValidStmtStep(code, wsStmt, "WSStmtPrepare"); - - TAOS_MULTI_BIND[] wsTags = new TAOS_MULTI_BIND[] { WSMultiBind.WSBindNchar(new string[] { "California.SanDiego" }), WSMultiBind.WSBindInt(new int?[] { 4 }) }; - code = LibTaosWS.WSStmtSetTbnameTags(wsStmt, $"{database}.{childTable}", wsTags, numOfTags); - ValidStmtStep(code, wsStmt, "WSStmtSetTbnameTags"); - - TAOS_MULTI_BIND[] data = new TAOS_MULTI_BIND[4]; - data[0] = WSMultiBind.WSBindTimestamp(new long[] { 1538548687000, 1538548688000, 1538548689000, 1538548690000, 1538548691000 }); - data[1] = WSMultiBind.WSBindFloat(new float?[] { 10.30F, 10.40F, 10.50F, 10.60F, 10.70F }); - data[2] = WSMultiBind.WSBindInt(new int?[] { 223, 221, 222, 220, 219 }); - data[3] = WSMultiBind.WSBindFloat(new float?[] { 0.31F, 0.32F, 0.33F, 0.35F, 0.28F }); - code = LibTaosWS.WSStmtBindParamBatch(wsStmt, data, numOfColumns); - ValidStmtStep(code, wsStmt, "WSStmtBindParamBatch"); - - code = LibTaosWS.WSStmtAddBatch(wsStmt); - ValidStmtStep(code, wsStmt, "WSStmtAddBatch"); - - IntPtr stmtAffectRowPtr = Marshal.AllocHGlobal(Marshal.SizeOf(typeof(Int32))); - code = LibTaosWS.WSStmtExecute(wsStmt, stmtAffectRowPtr); - ValidStmtStep(code, wsStmt, "WSStmtExecute"); - Console.WriteLine("WS STMT insert {0} rows...", Marshal.ReadInt32(stmtAffectRowPtr)); - Marshal.FreeHGlobal(stmtAffectRowPtr); - - LibTaosWS.WSStmtClose(wsStmt); - - // Free unmanaged memory - WSMultiBind.WSFreeTaosBind(wsTags); - WSMultiBind.WSFreeTaosBind(data); - - //check result with SQL "SELECT * FROM test.d1005;" - } - else - { - Console.WriteLine("Init STMT failed..."); - } - - // close connection. - LibTaosWS.WSClose(wsConn); - - return 0; - } - - static void ValidStmtStep(int code, IntPtr wsStmt, string desc) - { - if (code != 0) - { - Console.WriteLine($"{desc} failed,reason: {LibTaosWS.WSErrorStr(wsStmt)}, code: {code}"); - } - else - { - Console.WriteLine("{0} success...", desc); + try + { + client.Exec($"create database power"); + client.Exec( + "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + using (var stmt = client.StmtInit()) + { + stmt.Prepare( + "Insert into power.d1001 using power.meters tags(2,'California.SanFrancisco') values(?,?,?,?)"); + var ts = new DateTime(2023, 10, 03, 14, 38, 05, 000); + stmt.BindRow(new object[] { ts, (float)10.30000, (int)219, (float)0.31000 }); + stmt.AddBatch(); + stmt.Exec(); + var affected = stmt.Affected(); + Console.WriteLine($"affected rows: {affected}"); + } + } + catch (Exception e) + { + Console.WriteLine(e); + throw; + } } } } -} - -// WSStmtPrepare success... -// WSStmtSetTbnameTags success... -// WSStmtBindParamBatch success... -// WSStmtAddBatch success... -// WSStmtExecute success... -// WS STMT insert 5 rows... +} \ No newline at end of file diff --git a/docs/examples/csharp/wsStmt/wsStmt.csproj b/docs/examples/csharp/wsStmt/wsStmt.csproj index bcc7c19a59..ff283947b4 100644 --- a/docs/examples/csharp/wsStmt/wsStmt.csproj +++ b/docs/examples/csharp/wsStmt/wsStmt.csproj @@ -7,7 +7,7 @@ - + diff --git a/docs/zh/07-develop/01-connect/index.md b/docs/zh/07-develop/01-connect/index.md index 83032067d9..9d954f78ce 100644 --- a/docs/zh/07-develop/01-connect/index.md +++ b/docs/zh/07-develop/01-connect/index.md @@ -176,7 +176,7 @@ npm install @tdengine/rest - + diff --git a/docs/zh/07-develop/04-query-data/_cs_async.mdx b/docs/zh/07-develop/04-query-data/_cs_async.mdx deleted file mode 100644 index 19c8e58f32..0000000000 --- a/docs/zh/07-develop/04-query-data/_cs_async.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```csharp -{{#include docs/examples/csharp/asyncQuery/Program.cs}} -``` diff --git a/docs/zh/07-develop/04-query-data/index.mdx b/docs/zh/07-develop/04-query-data/index.mdx index d4d6d8192d..e892f9fc8d 100644 --- a/docs/zh/07-develop/04-query-data/index.mdx +++ b/docs/zh/07-develop/04-query-data/index.mdx @@ -16,7 +16,6 @@ import CQuery from "./_c.mdx"; import PhpQuery from "./_php.mdx"; import PyAsync from "./_py_async.mdx"; import NodeAsync from "./_js_async.mdx"; -import CsAsync from "./_cs_async.mdx"; import CAsync from "./_c_async.mdx"; ## 主要查询功能 @@ -175,9 +174,6 @@ Query OK, 6 rows in database (0.005515s) - - - diff --git a/docs/zh/07-develop/07-tmq.mdx b/docs/zh/07-develop/07-tmq.mdx index d3b27fedc3..f945f53b5c 100644 --- a/docs/zh/07-develop/07-tmq.mdx +++ b/docs/zh/07-develop/07-tmq.mdx @@ -248,23 +248,23 @@ function close() ```csharp +class ConsumerBuilder + ConsumerBuilder(IEnumerable> config) -virtual IConsumer Build() - -Consumer(ConsumerBuilder builder) +public IConsumer Build() void Subscribe(IEnumerable topics) void Subscribe(string topic) -ConsumeResult Consume(int millisecondsTimeout) +ConsumeResult Consume(int millisecondsTimeout) List Subscription() void Unsubscribe() -void Commit(ConsumeResult consumerResult) +List Commit() void Close() ``` @@ -501,25 +501,19 @@ let consumer = taos.consumer({ ```csharp -using TDengineTMQ; - -// 根据需要,设置消费组 (GourpId)、自动提交 (EnableAutoCommit)、 -// 自动提交时间间隔 (AutoCommitIntervalMs)、用户名 (TDConnectUser)、密码 (TDConnectPasswd) 等参数 -var cfg = new ConsumerConfig - { - EnableAutoCommit = "true" - AutoCommitIntervalMs = "1000" - GourpId = "TDengine-TMQ-C#", - TDConnectUser = "root", - TDConnectPasswd = "taosdata", - AutoOffsetReset = "latest" - MsgWithTableName = "true", - TDConnectIp = "127.0.0.1", - TDConnectPort = "6030" - }; - -var consumer = new ConsumerBuilder(cfg).Build(); - +var cfg = new Dictionary() +{ + { "group.id", "group1" }, + { "auto.offset.reset", "latest" }, + { "td.connect.ip", "127.0.0.1" }, + { "td.connect.user", "root" }, + { "td.connect.pass", "taosdata" }, + { "td.connect.port", "6030" }, + { "client.id", "tmq_example" }, + { "enable.auto.commit", "true" }, + { "msg.with.table.name", "false" }, +}; +var consumer = new ConsumerBuilder>(cfg).Build(); ``` @@ -748,10 +742,12 @@ while(true){ // 消费数据 while (true) { - var consumerRes = consumer.Consume(100); - // process ConsumeResult - ProcessMsg(consumerRes); - consumer.Commit(consumerRes); + using (var result = consumer.Consume(500)) + { + if (result == null) continue; + ProcessMsg(result); + consumer.Commit(); + } } ``` diff --git a/docs/zh/08-connector/02-rest-api.mdx b/docs/zh/08-connector/02-rest-api.mdx index f3f1e087d8..904b06cfe1 100644 --- a/docs/zh/08-connector/02-rest-api.mdx +++ b/docs/zh/08-connector/02-rest-api.mdx @@ -257,6 +257,63 @@ curl -L -u username:password -d "" :/rest/sql/[db_name][?tz=timez - "INT UNSIGNED" - "BIGINT UNSIGNED" - "JSON" +- "VARBINARY" +- "GEOMETRY" + +`VARBINARY` 和 `GEOMETRY` 类型返回数据为 Hex 字符串,样例: + +准备数据 + +```bash +create database demo +use demo +create table t(ts timestamp,c1 varbinary(20),c2 geometry(100)) +insert into t values(now,'\x7f8290','point(100 100)') +``` + +执行查询 + +```bash +curl --location 'http://:/rest/sql' \ +--header 'Content-Type: text/plain' \ +--header 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' \ +--data 'select * from demo.t' +``` + +返回结果 + +```json +{ + "code": 0, + "column_meta": [ + [ + "ts", + "TIMESTAMP", + 8 + ], + [ + "c1", + "VARBINARY", + 20 + ], + [ + "c2", + "GEOMETRY", + 100 + ] + ], + "data": [ + [ + "2023-11-01T06:28:15.210Z", + "7f8290", + "010100000000000000000059400000000000005940" + ] + ], + "rows": 1 +} +``` + +- `010100000000000000000059400000000000005940` 为 `point(100 100)` 的 [Well-Known Binary (WKB)](https://libgeos.org/specifications/wkb/) 格式 #### 错误 diff --git a/docs/zh/08-connector/40-csharp.mdx b/docs/zh/08-connector/40-csharp.mdx index 325c71da88..f88002d3dc 100644 --- a/docs/zh/08-connector/40-csharp.mdx +++ b/docs/zh/08-connector/40-csharp.mdx @@ -1,27 +1,22 @@ --- toc_max_heading_level: 4 sidebar_label: C# -title: C# Connector +title: TDengine C# Connector --- import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -import Preparation from "./_preparation.mdx" -import CSInsert from "../07-develop/03-insert-data/_cs_sql.mdx" -import CSInfluxLine from "../07-develop/03-insert-data/_cs_line.mdx" -import CSOpenTSDBTelnet from "../07-develop/03-insert-data/_cs_opts_telnet.mdx" -import CSOpenTSDBJson from "../07-develop/03-insert-data/_cs_opts_json.mdx" -import CSQuery from "../07-develop/04-query-data/_cs.mdx" -import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx" - `TDengine.Connector` 是 TDengine 提供的 C# 语言连接器。C# 开发人员可以通过它开发存取 TDengine 集群数据的 C# 应用软件。 -`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、数据订阅、schemaless 数据写入、参数绑定接口数据写入等功能。 `TDengine.Connector` 自 v3.0.1 起还支持 WebSocket,通过 DSN 建立 WebSocket 连接,提供数据写入、查询、参数绑定接口数据写入等功能。 +`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、数据订阅、schemaless 数据写入、参数绑定接口数据写入等功能。 `TDengine.Connector` 自 v3.0.1 起还支持 WebSocket 连接,提供数据写入、查询、参数绑定接口数据写入等功能。 本文介绍如何在 Linux 或 Windows 环境中安装 `TDengine.Connector`,并通过 `TDengine.Connector` 连接 TDengine 集群,进行数据写入、查询等基本操作。 -注意:`TDengine.Connector` 3.x 不兼容 TDengine 2.x,如果在运行 TDengine 2.x 版本的环境下需要使用 C# 连接器请使用 TDengine.Connector 的 1.x 版本 。 +:::warning +* `TDengine.Connector` 3.1.0 版本进行了完整的重构,不再兼容 3.0.2 及以前版本。3.0.2 文档请参考 [nuget](https://www.nuget.org/packages/TDengine.Connector/3.0.2) +* `TDengine.Connector` 3.x 不兼容 TDengine 2.x,如果在运行 TDengine 2.x 版本的环境下需要使用 C# 连接器请使用 TDengine.Connector 的 1.x 版本。 +::: `TDengine.Connector` 的源码托管在 [GitHub](https://github.com/taosdata/taos-connector-dotnet/tree/3.0)。 @@ -29,39 +24,43 @@ import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx" 支持的平台和 TDengine 客户端驱动支持的平台一致。 -:::note -注意 TDengine 不再支持 32 位 Windows 平台。 +:::warning +TDengine 不再支持 32 位 Windows 平台。 ::: ## 版本支持 -请参考[版本支持列表](../#版本支持) +| **Connector version** | **TDengine version** | +|-----------------------|----------------------| +| 3.1.0 | 3.2.1.0/3.1.1.18 | -## 支持的功能特性 +## 处理异常 - +`TDengine.Connector` 会抛出异常,应用程序需要处理异常。taosc 异常类型 `TDengineError`,包含错误码和错误信息,应用程序可以根据错误码和错误信息进行处理。 - +## TDengine DataType 和 C# DataType -1. 连接管理 -2. 普通查询 -3. 连续查询 -4. 参数绑定 -5. 数据订阅(TMQ) -6. Schemaless - - +| TDengine DataType | C# Type | +|-------------------|------------------| +| TIMESTAMP | DateTime | +| TINYINT | sbyte | +| SMALLINT | short | +| INT | int | +| BIGINT | long | +| TINYINT UNSIGNED | byte | +| SMALLINT UNSIGNED | ushort | +| INT UNSIGNED | uint | +| BIGINT UNSIGNED | ulong | +| FLOAT | float | +| DOUBLE | double | +| BOOL | bool | +| BINARY | byte[] | +| NCHAR | string (utf-8编码) | +| JSON | byte[] | - - -1. 连接管理 -2. 普通查询 -3. 连续查询 -4. 参数绑定 - - - - +:::note +JSON 类型仅在 tag 中支持。 +::: ## 安装步骤 @@ -71,10 +70,7 @@ import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx" * [Nuget 客户端](https://docs.microsoft.com/en-us/nuget/install-nuget-client-tools) (可选安装) * 安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动) -### 安装 TDengine.Connector - - - +### 安装连接器 可以在当前 .NET 项目的路径下,通过 dotnet CLI 添加 Nuget package `TDengine.Connector` 到当前项目。 @@ -86,234 +82,1109 @@ dotnet add package TDengine.Connector ``` XML - + ``` - - - - -需要修改目标项目的 `.csproj` 项目文件,将 `.nupkg` 中的 `runtimes` 目录中的动态库复制到当前项目的 `$(OutDir)` 目录下。 - -```XML - - - - - - - - - -``` - -注意:`TDengine.Connector` 自 version>= 3.0.2 的 nuget package 中才会有动态库( taosws.dll,libtaows.so )。 - - - - ## 建立连接 - - + -使用 host、username、password、port 等信息建立连接。 - ``` csharp -using TDengineDriver; - -namespace TDengineExample +var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); +using (var client = DbDriver.Open(builder)) { + Console.WriteLine("connected"); +} +``` - internal class EstablishConnection + + + +```csharp +var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"); +using (var client = DbDriver.Open(builder)) +{ + Console.WriteLine("connected"); +} +``` + + + +ConnectionStringBuilder 支持的参数如下: +* protocol: 连接协议,可选值为 Native 或 WebSocket,默认为 Native +* host: TDengine 或 taosadapter 运行实例的地址 +* port: TDengine 或 taosadapter 运行实例的端口 + * 当 protocol 为 WebSocket 时 useSSL 为 false 时,port 默认为 6041 + * 当 protocol 为 WebSocket 时 useSSL 为 true 时,port 默认为 443 +* useSSL: 是否使用 SSL 连接,仅当 protocol 为 WebSocket 时有效,默认为 false +* token: 连接 TDengine cloud 的 token,仅当 protocol 为 WebSocket 时有效 +* username: 连接 TDengine 的用户名 +* password: 连接 TDengine 的密码 +* db: 连接 TDengine 的数据库 +* timezone: 解析时间结果的时区,默认为 `TimeZoneInfo.Local`,使用 `TimeZoneInfo.FindSystemTimeZoneById` 方法解析字符串为 `TimeZoneInfo` 对象。 +* connTimeout: WebSocket 连接超时时间,仅当 protocol 为 WebSocket 时有效,默认为 1 分钟,使用 `TimeSpan.Parse` 方法解析字符串为 `TimeSpan` 对象。 +* readTimeout: WebSocket 读超时时间,仅当 protocol 为 WebSocket 时有效,默认为 5 分钟,使用 `TimeSpan.Parse` 方法解析字符串为 `TimeSpan` 对象。 +* writeTimeout: WebSocket 写超时时间,仅当 protocol 为 WebSocket 时有效,默认为 10 秒,使用 `TimeSpan.Parse` 方法解析字符串为 `TimeSpan` 对象。 + +### 指定 URL 和 Properties 获取连接 + +C# 连接器不支持此功能 + +### 配置参数的优先级 + +C# 连接器不支持此功能 + +## 使用示例 + +### 创建数据库和表 + + + + +```csharp +using System; +using System.Text; +using TDengine.Driver; +using TDengine.Driver.Client; + +namespace NativeQuery +{ + internal class Query { - static void Main(String[] args) + public static void Main(string[] args) { - string host = "localhost"; - short port = 6030; - string username = "root"; - string password = "taosdata"; - string dbname = ""; - - var conn = TDengine.Connect(host, username, password, dbname, port); - if (conn == IntPtr.Zero) + var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) { - Console.WriteLine("Connect to TDengine failed"); + try + { + client.Exec("create database power"); + client.Exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } } - else - { - Console.WriteLine("Connect to TDengine success"); - } - TDengine.Close(conn); - TDengine.Cleanup(); } } } ``` + - +```csharp +using System; +using System.Text; +using TDengine.Driver; +using TDengine.Driver.Client; -使用 DSN 建立 WebSocket 连接 DSN 连接。 描述字符串基本结构如下: - -```text -[]://[[:@]:][/][?=[&=]] -|------------|---|-----------|-----------|------|------|------------|-----------------------| -| protocol | | username | password | host | port | database | params | -``` - -各部分意义见下表: - -* **protocol**: 显示指定以何种方式建立连接,例如:`ws://localhost:6041` 指定以 Websocket 方式建立连接(支持 http/ws )。 - -* **username/password**: 用于创建连接的用户名及密码(默认 `root/taosdata` )。 - -* **host/port**: 指定创建连接的服务器及端口,WebSocket 连接默认为 `localhost:6041` 。 - -* **database**: 指定默认连接的数据库名,可选参数。 - -* **params**:其他可选参数。 - -``` csharp -{{#include docs/examples/csharp/wsConnect/Program.cs}} +namespace WSQuery +{ + internal class Query + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + client.Exec("create database power"); + client.Exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } + } + } + } +} ``` -## 使用示例 - -### 写入数据 - -#### SQL 写入 - - +### 插入数据 + - +```csharp +using System; +using System.Text; +using TDengine.Driver; +using TDengine.Driver.Client; + +namespace NativeQuery +{ + internal class Query + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + string insertQuery = + "INSERT INTO " + + "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + + "VALUES " + + "('2023-10-03 14:38:05.000', 10.30000, 219, 0.31000) " + + "('2023-10-03 14:38:15.000', 12.60000, 218, 0.33000) " + + "('2023-10-03 14:38:16.800', 12.30000, 221, 0.31000) " + + "power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " + + "VALUES " + + "('2023-10-03 14:38:16.650', 10.30000, 218, 0.25000) " + + "power.d1003 USING power.meters TAGS(2,'California.LosAngeles') " + + "VALUES " + + "('2023-10-03 14:38:05.500', 11.80000, 221, 0.28000) " + + "('2023-10-03 14:38:16.600', 13.40000, 223, 0.29000) " + + "power.d1004 USING power.meters TAGS(3,'California.LosAngeles') " + + "VALUES " + + "('2023-10-03 14:38:05.000', 10.80000, 223, 0.29000) " + + "('2023-10-03 14:38:06.500', 11.50000, 221, 0.35000)"; + client.Exec(insertQuery); + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } + } + } + } +} +``` - - + ```csharp -{{#include docs/examples/csharp/wsInsert/Program.cs}} +using System; +using System.Text; +using TDengine.Driver; +using TDengine.Driver.Client; + +namespace WSQuery +{ + internal class Query + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + string insertQuery = + "INSERT INTO " + + "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + + "VALUES " + + "('2023-10-03 14:38:05.000', 10.30000, 219, 0.31000) " + + "('2023-10-03 14:38:15.000', 12.60000, 218, 0.33000) " + + "('2023-10-03 14:38:16.800', 12.30000, 221, 0.31000) " + + "power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " + + "VALUES " + + "('2023-10-03 14:38:16.650', 10.30000, 218, 0.25000) " + + "power.d1003 USING power.meters TAGS(2,'California.LosAngeles') " + + "VALUES " + + "('2023-10-03 14:38:05.500', 11.80000, 221, 0.28000) " + + "('2023-10-03 14:38:16.600', 13.40000, 223, 0.29000) " + + "power.d1004 USING power.meters TAGS(3,'California.LosAngeles') " + + "VALUES " + + "('2023-10-03 14:38:05.000', 10.80000, 223, 0.29000) " + + "('2023-10-03 14:38:06.500', 11.50000, 221, 0.35000)"; + client.Exec(insertQuery); + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } + } + } + } +} ``` - - - -#### InfluxDB 行协议写入 - - - -#### OpenTSDB Telnet 行协议写入 - - - -#### OpenTSDB JSON 行协议写入 - - - -#### 参数绑定 - - - - - -``` csharp -{{#include docs/examples/csharp/stmtInsert/Program.cs}} -``` - - - - - -```csharp -{{#include docs/examples/csharp/wsStmt/Program.cs}} -``` - - - ### 查询数据 -#### 同步查询 - - - + - - - - - - ```csharp -{{#include docs/examples/csharp/wsQuery/Program.cs}} +using System; +using System.Text; +using TDengine.Driver; +using TDengine.Driver.Client; + +namespace NativeQuery +{ + internal class Query + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + client.Exec("use power"); + string query = "SELECT * FROM meters"; + using (var rows = client.Query(query)) + { + while (rows.Read()) + { + Console.WriteLine($"{((DateTime)rows.GetValue(0)):yyyy-MM-dd HH:mm:ss.fff}, {rows.GetValue(1)}, {rows.GetValue(2)}, {rows.GetValue(3)}, {rows.GetValue(4)}, {Encoding.UTF8.GetString((byte[])rows.GetValue(5))}"); + } + } + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } + } + } + } +} ``` + +```csharp +using System; +using System.Text; +using TDengine.Driver; +using TDengine.Driver.Client; + +namespace WSQuery +{ + internal class Query + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + client.Exec("use power"); + string query = "SELECT * FROM meters"; + using (var rows = client.Query(query)) + { + while (rows.Read()) + { + Console.WriteLine($"{((DateTime)rows.GetValue(0)):yyyy-MM-dd HH:mm:ss.fff}, {rows.GetValue(1)}, {rows.GetValue(2)}, {rows.GetValue(3)}, {rows.GetValue(4)}, {Encoding.UTF8.GetString((byte[])rows.GetValue(5))}"); + } + } + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } + } + } + } +} +``` + + -#### 异步查询 +### 执行带有 reqId 的 SQL - + + + +```csharp +using System; +using System.Text; +using TDengine.Driver; +using TDengine.Driver.Client; + +namespace NativeQueryWithReqID +{ + internal abstract class QueryWithReqID + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + client.Exec($"create database if not exists test_db",ReqId.GetReqId()); + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } + } + } + } +} +``` + + + + +```csharp +using System; +using System.Text; +using TDengine.Driver; +using TDengine.Driver.Client; + +namespace WSQueryWithReqID +{ + internal abstract class QueryWithReqID + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + client.Exec($"create database if not exists test_db",ReqId.GetReqId()); + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } + } + } + } +} +``` + + + + +### 通过参数绑定写入数据 + + + + +```csharp +using System; +using TDengine.Driver; +using TDengine.Driver.Client; + +namespace NativeStmt +{ + internal abstract class NativeStmt + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + client.Exec("create database power"); + client.Exec( + "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + using (var stmt = client.StmtInit()) + { + stmt.Prepare( + "Insert into power.d1001 using power.meters tags(2,'California.SanFrancisco') values(?,?,?,?)"); + var ts = new DateTime(2023, 10, 03, 14, 38, 05, 000); + stmt.BindRow(new object[] { ts, (float)10.30000, (int)219, (float)0.31000 }); + stmt.AddBatch(); + stmt.Exec(); + var affected = stmt.Affected(); + Console.WriteLine($"affected rows: {affected}"); + } + } + catch (Exception e) + { + Console.WriteLine(e); + throw; + } + } + } + } +} +``` + + + + +```csharp +using System; +using TDengine.Driver; +using TDengine.Driver.Client; + +namespace WSStmt +{ + internal abstract class WSStmt + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + client.Exec("create database power"); + client.Exec( + "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + using (var stmt = client.StmtInit()) + { + stmt.Prepare( + "Insert into power.d1001 using power.meters tags(2,'California.SanFrancisco') values(?,?,?,?)"); + var ts = new DateTime(2023, 10, 03, 14, 38, 05, 000); + stmt.BindRow(new object[] { ts, (float)10.30000, (int)219, (float)0.31000 }); + stmt.AddBatch(); + stmt.Exec(); + var affected = stmt.Affected(); + Console.WriteLine($"affected rows: {affected}"); + } + } + catch (Exception e) + { + Console.WriteLine(e); + throw; + } + } + } + } +} +``` + + + + +注意:使用 BindRow 需要注意原始 C# 列类型与 TDengine 列类型的需要一一对应,具体对应关系请参考 [TDengine DataType 和 C# DataType](#tdengine-datatype-和-c-datatype)。 + +### 无模式写入 + + + + +```csharp +using TDengine.Driver; +using TDengine.Driver.Client; + +namespace NativeSchemaless +{ + internal class Program + { + public static void Main(string[] args) + { + var builder = + new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + client.Exec("create database sml"); + client.Exec("use sml"); + var influxDBData = + "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000"; + client.SchemalessInsert(new string[] { influxDBData }, + TDengineSchemalessProtocol.TSDB_SML_LINE_PROTOCOL, + TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NANO_SECONDS, 0, ReqId.GetReqId()); + var telnetData = "stb0_0 1626006833 4 host=host0 interface=eth0"; + client.SchemalessInsert(new string[] { telnetData }, + TDengineSchemalessProtocol.TSDB_SML_TELNET_PROTOCOL, + TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS, 0, ReqId.GetReqId()); + var jsonData = + "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"; + client.SchemalessInsert(new string[] { jsonData }, TDengineSchemalessProtocol.TSDB_SML_JSON_PROTOCOL, + TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS, 0, ReqId.GetReqId()); + } + } + } +} +``` + + + + +```csharp +using TDengine.Driver; +using TDengine.Driver.Client; + +namespace WSSchemaless +{ + internal class Program + { + public static void Main(string[] args) + { + var builder = + new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + client.Exec("create database sml"); + client.Exec("use sml"); + var influxDBData = + "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000"; + client.SchemalessInsert(new string[] { influxDBData }, + TDengineSchemalessProtocol.TSDB_SML_LINE_PROTOCOL, + TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NANO_SECONDS, 0, ReqId.GetReqId()); + var telnetData = "stb0_0 1626006833 4 host=host0 interface=eth0"; + client.SchemalessInsert(new string[] { telnetData }, + TDengineSchemalessProtocol.TSDB_SML_TELNET_PROTOCOL, + TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS, 0, ReqId.GetReqId()); + var jsonData = + "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"; + client.SchemalessInsert(new string[] { jsonData }, TDengineSchemalessProtocol.TSDB_SML_JSON_PROTOCOL, + TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS, 0, ReqId.GetReqId()); + } + } + } +} +``` + + + + +### 执行带有 reqId 的无模式写入 + +```csharp +public void SchemalessInsert(string[] lines, TDengineSchemalessProtocol protocol, + TDengineSchemalessPrecision precision, + int ttl, long reqId) +``` + +### 数据订阅 + +#### 创建 Topic + + + + +```csharp +using System; +using System.Text; +using TDengine.Driver; +using TDengine.Driver.Client; + +namespace NativeSubscription +{ + internal class Program + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + client.Exec("create database power"); + client.Exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + client.Exec("CREATE TOPIC topic_meters as SELECT * from power.meters"); + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } + } + } + } +} +``` + + + + +```csharp +using System; +using System.Text; +using TDengine.Driver; +using TDengine.Driver.Client; + +namespace WSSubscription +{ + internal class Program + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + client.Exec("create database power"); + client.Exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + client.Exec("CREATE TOPIC topic_meters as SELECT * from power.meters"); + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } + } + } + } +} +``` + + + + +#### 创建 Consumer + + + + +```csharp +var cfg = new Dictionary() +{ + { "group.id", "group1" }, + { "auto.offset.reset", "latest" }, + { "td.connect.ip", "127.0.0.1" }, + { "td.connect.user", "root" }, + { "td.connect.pass", "taosdata" }, + { "td.connect.port", "6030" }, + { "client.id", "tmq_example" }, + { "enable.auto.commit", "true" }, + { "msg.with.table.name", "false" }, +}; +var consumer = new ConsumerBuilder>(cfg).Build(); +``` + + + + +```csharp +var cfg = new Dictionary() +{ + { "td.connect.type", "WebSocket" }, + { "group.id", "group1" }, + { "auto.offset.reset", "latest" }, + { "td.connect.ip", "localhost" }, + { "td.connect.port", "6041" }, + { "useSSL", "false" }, + { "td.connect.user", "root" }, + { "td.connect.pass", "taosdata" }, + { "client.id", "tmq_example" }, + { "enable.auto.commit", "true" }, + { "msg.with.table.name", "false" }, +}; +var consumer = new ConsumerBuilder>(cfg).Build(); +``` + + + + +consumer 支持的配置参数如下: +* td.connect.type: 连接类型,可选值为 Native 或 WebSocket,默认为 Native +* td.connect.ip: TDengine 或 taosadapter 运行实例的地址 +* td.connect.port: TDengine 或 taosadapter 运行实例的端口 + * 当 td.connect.type 为 WebSocket 且 useSSL 为 false 时,td.connect.port 默认为 6041 + * 当 td.connect.type 为 WebSocket 且 useSSL 为 true 时,td.connect.port 默认为 443 +* useSSL: 是否使用 SSL 连接,仅当 td.connect.type 为 WebSocket 时有效,默认为 false +* token: 连接 TDengine cloud 的 token,仅当 td.connect.type 为 WebSocket 时有效 + +* td.connect.user: 连接 TDengine 的用户名 +* td.connect.pass: 连接 TDengine 的密码 +* group.id: 消费者组 ID +* client.id: 消费者 ID +* enable.auto.commit: 是否自动提交 offset,默认为 true +* auto.commit.interval.ms: 自动提交 offset 的间隔时间,默认为 5000 毫秒 +* auto.offset.reset: 当 offset 不存在时,从哪里开始消费,可选值为 earliest 或 latest,默认为 latest +* msg.with.table.name: 消息是否包含表名 + + +支持订阅结果集 `Dictionary` key 为列名,value 为列值。 + +如果使用 object 接收列值,需要注意: +* 原始 C# 列类型与 TDengine 列类型的需要一一对应,具体对应关系请参考 [TDengine DataType 和 C# DataType](#tdengine-datatype-和-c-datatype)。 +* 列名与 class 属性名一致,并可读写。 +* 明确设置 value 解析器`ConsumerBuilder.SetValueDeserializer(new ReferenceDeserializer());` + +样例如下 + +结果 class + +```csharp + class Result + { + public DateTime ts { get; set; } + public float current { get; set; } + public int voltage { get; set; } + public float phase { get; set; } + } +``` + +设置解析器 + +```csharp +var tmqBuilder = new ConsumerBuilder(cfg); +tmqBuilder.SetValueDeserializer(new ReferenceDeserializer()); +var consumer = tmqBuilder.Build(); +``` + +也可实现自定义解析器,实现 `IDeserializer` 接口并通过`ConsumerBuilder.SetValueDeserializer`方法传入。 + +```csharp + public interface IDeserializer + { + T Deserialize(ITMQRows data, bool isNull, SerializationContext context); + } +``` + +#### 订阅消费数据 + +```csharp +consumer.Subscribe(new List() { "topic_meters" }); +while (true) +{ + using (var cr = consumer.Consume(500)) + { + if (cr == null) continue; + foreach (var message in cr.Message) + { + Console.WriteLine( + $"message {{{((DateTime)message.Value["ts"]).ToString("yyyy-MM-dd HH:mm:ss.fff")}, " + + $"{message.Value["current"]}, {message.Value["voltage"]}, {message.Value["phase"]}}}"); + } + } +} +``` + +#### 指定订阅 Offset + +```csharp +consumer.Assignment.ForEach(a => +{ + Console.WriteLine($"{a}, seek to 0"); + consumer.Seek(new TopicPartitionOffset(a.Topic, a.Partition, 0)); + Thread.Sleep(TimeSpan.FromSeconds(1)); +}); +``` + +#### 提交 Offset + +```csharp +public void Commit(ConsumeResult consumerResult) +public List Commit() +public void Commit(IEnumerable offsets) +``` + +#### 关闭订阅 + +```csharp +consumer.Unsubscribe(); +consumer.Close(); +``` + +#### 完整示例 + + + + +```csharp +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using TDengine.Driver; +using TDengine.Driver.Client; +using TDengine.TMQ; + +namespace NativeSubscription +{ + internal class Program + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + client.Exec("CREATE DATABASE power"); + client.Exec("USE power"); + client.Exec( + "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + client.Exec("CREATE TOPIC topic_meters as SELECT * from power.meters"); + var cfg = new Dictionary() + { + { "group.id", "group1" }, + { "auto.offset.reset", "latest" }, + { "td.connect.ip", "127.0.0.1" }, + { "td.connect.user", "root" }, + { "td.connect.pass", "taosdata" }, + { "td.connect.port", "6030" }, + { "client.id", "tmq_example" }, + { "enable.auto.commit", "true" }, + { "msg.with.table.name", "false" }, + }; + var consumer = new ConsumerBuilder>(cfg).Build(); + consumer.Subscribe(new List() { "topic_meters" }); + Task.Run(InsertData); + while (true) + { + using (var cr = consumer.Consume(500)) + { + if (cr == null) continue; + foreach (var message in cr.Message) + { + Console.WriteLine( + $"message {{{((DateTime)message.Value["ts"]).ToString("yyyy-MM-dd HH:mm:ss.fff")}, " + + $"{message.Value["current"]}, {message.Value["voltage"]}, {message.Value["phase"]}}}"); + } + consumer.Commit(); + } + } + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } + } + } + + static void InsertData() + { + var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + while (true) + { + client.Exec("INSERT into power.d1001 using power.meters tags(2,'California.SanFrancisco') values(now,11.5,219,0.30)"); + Task.Delay(1000).Wait(); + } + } + } + } +} +``` + + + + +```csharp +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using TDengine.Driver; +using TDengine.Driver.Client; +using TDengine.TMQ; + +namespace WSSubscription +{ + internal class Program + { + public static void Main(string[] args) + { + var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + try + { + client.Exec("CREATE DATABASE power"); + client.Exec("USE power"); + client.Exec( + "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + client.Exec("CREATE TOPIC topic_meters as SELECT * from power.meters"); + var cfg = new Dictionary() + { + { "td.connect.type", "WebSocket" }, + { "group.id", "group1" }, + { "auto.offset.reset", "latest" }, + { "td.connect.ip", "localhost" }, + { "td.connect.port", "6041" }, + { "useSSL", "false" }, + { "td.connect.user", "root" }, + { "td.connect.pass", "taosdata" }, + { "client.id", "tmq_example" }, + { "enable.auto.commit", "true" }, + { "msg.with.table.name", "false" }, + }; + var consumer = new ConsumerBuilder>(cfg).Build(); + consumer.Subscribe(new List() { "topic_meters" }); + Task.Run(InsertData); + while (true) + { + using (var cr = consumer.Consume(500)) + { + if (cr == null) continue; + foreach (var message in cr.Message) + { + Console.WriteLine( + $"message {{{((DateTime)message.Value["ts"]).ToString("yyyy-MM-dd HH:mm:ss.fff")}, " + + $"{message.Value["current"]}, {message.Value["voltage"]}, {message.Value["phase"]}}}"); + } + consumer.Commit(); + } + } + } + catch (Exception e) + { + Console.WriteLine(e.ToString()); + throw; + } + } + } + + static void InsertData() + { + var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"); + using (var client = DbDriver.Open(builder)) + { + while (true) + { + client.Exec("INSERT into power.d1001 using power.meters tags(2,'California.SanFrancisco') values(now,11.5,219,0.30)"); + Task.Delay(1000).Wait(); + } + } + } + } +} +``` + + + + +### ADO.NET + +C# 连接器支持 ADO.NET 接口,可以通过 ADO.NET 接口连接 TDengine 运行实例,进行数据写入、查询等操作。 + + + + +```csharp +using System; +using TDengine.Data.Client; + +namespace NativeADO +{ + internal class Program + { + public static void Main(string[] args) + { + const string connectionString = "host=localhost;port=6030;username=root;password=taosdata"; + using (var connection = new TDengineConnection(connectionString)) + { + try + { + connection.Open(); + using (var command = new TDengineCommand(connection)) + { + command.CommandText = "create database power"; + command.ExecuteNonQuery(); + connection.ChangeDatabase("power"); + command.CommandText = + "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"; + command.ExecuteNonQuery(); + command.CommandText = "INSERT INTO " + + "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + + "VALUES " + + "(?,?,?,?)"; + var parameters = command.Parameters; + parameters.Add(new TDengineParameter("@0", new DateTime(2023,10,03,14,38,05,000))); + parameters.Add(new TDengineParameter("@1", (float)10.30000)); + parameters.Add(new TDengineParameter("@2", (int)219)); + parameters.Add(new TDengineParameter("@3", (float)0.31000)); + command.ExecuteNonQuery(); + command.Parameters.Clear(); + command.CommandText = "SELECT * FROM meters"; + using (var reader = command.ExecuteReader()) + { + while (reader.Read()) + { + Console.WriteLine( + $"{((DateTime) reader.GetValue(0)):yyyy-MM-dd HH:mm:ss.fff}, {reader.GetValue(1)}, {reader.GetValue(2)}, {reader.GetValue(3)}, {reader.GetValue(4)}, {System.Text.Encoding.UTF8.GetString((byte[]) reader.GetValue(5))}"); + } + } + } + } + catch (Exception e) + { + Console.WriteLine(e); + throw; + } + } + } + } +} +``` + + + + +```csharp +using System; +using TDengine.Data.Client; + +namespace WSADO +{ + internal class Program + { + public static void Main(string[] args) + { + const string connectionString = "protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"; + using (var connection = new TDengineConnection(connectionString)) + { + try + { + connection.Open(); + using (var command = new TDengineCommand(connection)) + { + command.CommandText = "create database power"; + command.ExecuteNonQuery(); + connection.ChangeDatabase("power"); + command.CommandText = + "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"; + command.ExecuteNonQuery(); + command.CommandText = "INSERT INTO " + + "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + + "VALUES " + + "(?,?,?,?)"; + var parameters = command.Parameters; + parameters.Add(new TDengineParameter("@0", new DateTime(2023,10,03,14,38,05,000))); + parameters.Add(new TDengineParameter("@1", (float)10.30000)); + parameters.Add(new TDengineParameter("@2", (int)219)); + parameters.Add(new TDengineParameter("@3", (float)0.31000)); + command.ExecuteNonQuery(); + command.Parameters.Clear(); + command.CommandText = "SELECT * FROM meters"; + using (var reader = command.ExecuteReader()) + { + while (reader.Read()) + { + Console.WriteLine( + $"{((DateTime) reader.GetValue(0)):yyyy-MM-dd HH:mm:ss.fff}, {reader.GetValue(1)}, {reader.GetValue(2)}, {reader.GetValue(3)}, {reader.GetValue(4)}, {System.Text.Encoding.UTF8.GetString((byte[]) reader.GetValue(5))}"); + } + } + } + } + catch (Exception e) + { + Console.WriteLine(e); + throw; + } + } + } + } +} +``` + + + + +* 连接参数与[建立连接](#建立连接)中的连接参数一致。 +* TDengineParameter 的 name 需要以 @ 开头,如 @0、@1、@2 等,value 需要 C# 列类型与 TDengine 列类型一一对应,具体对应关系请参考 [TDengine DataType 和 C# DataType](#tdengine-datatype-和-c-datatype)。 ### 更多示例程序 -|示例程序 | 示例程序描述 | -|--------------------------------------------------------------------------------------------------------------------|--------------------------------------------| -| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/Query/Query.cs) | 使用 TDengine.Connector 实现的建表、插入、查询示例 | -| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/JSONTag) | 使用 TDengine.Connector 实现的写入和查询 JSON tag 类型数据的示例 | -| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/NET6Examples/Stmt) | 使用 TDengine.Connector 实现的参数绑定插入和查询的示例 | -| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/schemaless) | 使用 TDengine.Connector 实现的使用 schemaless 写入的示例 | -| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/AsyncQuery/QueryAsync.cs) | 使用 TDengine.Connector 实现的异步查询的示例 | -| [数据订阅(TMQ)](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/TMQ/TMQ.cs) | 使用 TDengine.Connector 实现的订阅数据的示例 | -| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/FrameWork45/WS/WebSocketSample.cs) | 使用 TDengine.Connector 的 WebSocket 基本的示例 | -| [Basic WebSocket STMT](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/FrameWork45/WS/WebSocketSTMT.cs) | 使用 TDengine.Connector 的 WebSocket STMT 基本的示例 | - -## 重要更新记录 - -| TDengine.Connector | 说明 | -|--------------------|--------------------------------| -| 3.0.2 | 支持 .NET Framework 4.5 及以上,支持 .NET standard 2.0。Nuget Package 包含 WebSocket 动态库。 | -| 3.0.1 | 支持 WebSocket 和 Cloud,查询,插入,参数绑定。 | -| 3.0.0 | 支持 TDengine 3.0.0.0,不兼容 2.x。新增接口TDengine.Impl.GetData(),解析查询结果。 | -| 1.0.7 | 修复 TDengine.Query()内存泄露。 | -| 1.0.6 | 修复 schemaless 在 1.0.4 和 1.0.5 中失效 bug。 | -| 1.0.5 | 修复 Windows 同步查询中文报错 bug。 | -| 1.0.4 | 新增异步查询,订阅等功能。修复绑定参数 bug。 | -| 1.0.3 | 新增参数绑定、schemaless、 json tag等功能。 | -| 1.0.2 | 新增连接管理、同步查询、错误信息等功能。 | - -## 其他说明 - -### 第三方驱动 - -[`IoTSharp.Data.Taos`](https://github.com/IoTSharp/EntityFrameworkCore.Taos) 是一个 TDengine 的 ADO.NET 连接器,其中包含了用于EntityFrameworkCore 的提供程序 IoTSharp.EntityFrameworkCore.Taos 和健康检查组件 IoTSharp.HealthChecks.Taos ,支持 Linux,Windows 平台。该连接器由社区贡献者`麦壳饼@@maikebing` 提供,具体请参考: - -* 接口下载: -* 用法说明: - -## 常见问题 - -1. "Unable to establish connection","Unable to resolve FQDN" - - 一般是因为 FQDN 配置不正确。可以参考[如何彻底搞懂 TDengine 的 FQDN](https://www.taosdata.com/blog/2021/07/29/2741.html)解决。 - -2. Unhandled exception. System.DllNotFoundException: Unable to load DLL 'taos' or one of its dependencies: 找不到指定的模块。 - - 一般是因为程序没有找到依赖的客户端驱动。解决方法为:Windows 下可以将 `C:\TDengine\driver\taos.dll` 拷贝到 `C:\Windows\System32\ ` 目录下,Linux 下建立如下软链接 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。 - -## API 参考 - -[API 参考](https://docs.taosdata.com/api/connector-csharp/html/860d2ac1-dd52-39c9-e460-0829c4e5a40b.htm) +[示例程序](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples) \ No newline at end of file diff --git a/include/common/cos.h b/include/common/cos.h index c6b159c1da..afeca3ca03 100644 --- a/include/common/cos.h +++ b/include/common/cos.h @@ -34,7 +34,7 @@ extern int32_t tsS3UploadDelaySec; int32_t s3Init(); void s3CleanUp(); int32_t s3PutObjectFromFile(const char *file, const char *object); -int32_t s3PutObjectFromFile2(const char *file, const char *object); +int32_t s3PutObjectFromFile2(const char *file, const char *object, int8_t withcp); void s3DeleteObjectsByPrefix(const char *prefix); void s3DeleteObjects(const char *object_name[], int nobject); bool s3Exists(const char *object_name); diff --git a/include/common/cos_cp.h b/include/common/cos_cp.h new file mode 100644 index 0000000000..fd778b1a1d --- /dev/null +++ b/include/common/cos_cp.h @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _TD_COMMON_COS_CP_H_ +#define _TD_COMMON_COS_CP_H_ + +#include "os.h" +#include "tdef.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum { + COS_CP_TYPE_UPLOAD, // upload + COS_CP_TYPE_DOWNLOAD // download +} ECpType; + +typedef struct { + int32_t index; // the index of part, start from 0 + int64_t offset; // the offset point of part + int64_t size; // the size of part + int completed; // COS_TRUE completed, COS_FALSE uncompleted + char etag[128]; // the etag of part, for upload + uint64_t crc64; +} SCheckpointPart; + +typedef struct { + ECpType cp_type; // 0 upload, 1 download + char md5[64]; // the md5 of checkout content + TdFilePtr thefile; // the handle of checkpoint file + + char file_path[TSDB_FILENAME_LEN]; // local file path + int64_t file_size; // local file size, for upload + int32_t file_last_modified; // local file last modified time, for upload + char file_md5[64]; // md5 of the local file content, for upload, reserved + + char object_name[128]; // object name + int64_t object_size; // object size, for download + char object_last_modified[64]; // object last modified time, for download + char object_etag[128]; // object etag, for download + + char upload_id[128]; // upload id + + int part_num; // the total number of parts + int64_t part_size; // the part size, byte + SCheckpointPart* parts; // the parts of local or object, from 0 +} SCheckpoint; + +int32_t cos_cp_open(char const* cp_path, SCheckpoint* checkpoint); +void cos_cp_close(TdFilePtr fd); +void cos_cp_remove(char const* filepath); + +int32_t cos_cp_load(char const* filepath, SCheckpoint* checkpoint); +int32_t cos_cp_dump(SCheckpoint* checkpoint); +void cos_cp_get_undo_parts(SCheckpoint* checkpoint, int* part_num, SCheckpointPart* parts, int64_t* consume_bytes); +void cos_cp_update(SCheckpoint* checkpoint, int32_t part_index, char const* etag, uint64_t crc64); +void cos_cp_build_upload(SCheckpoint* checkpoint, char const* filepath, int64_t size, int32_t mtime, + char const* upload_id, int64_t part_size); +bool cos_cp_is_valid_upload(SCheckpoint* checkpoint, int64_t size, int32_t mtime); + +void cos_cp_build_download(SCheckpoint* checkpoint, char const* filepath, char const* object_name, int64_t object_size, + char const* object_lmtime, char const* object_etag, int64_t part_size); +bool cos_cp_is_valid_download(SCheckpoint* checkpoint, char const* object_name, int64_t object_size, + char const* object_lmtime, char const* object_etag); + +#ifdef __cplusplus +} +#endif + +#endif /*_TD_COMMON_COS_CP_H_*/ diff --git a/include/common/tglobal.h b/include/common/tglobal.h index 33cfada338..91f8bbc7f3 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -107,8 +107,9 @@ extern int32_t tsMonitorMaxLogs; extern bool tsMonitorComp; // audit -extern bool tsEnableAudit; -extern bool tsEnableAuditCreateTable; +extern bool tsEnableAudit; +extern bool tsEnableAuditCreateTable; +extern int32_t tsAuditInterval; // telem extern bool tsEnableTelem; diff --git a/include/libs/audit/audit.h b/include/libs/audit/audit.h index 85d462b96b..dd3df27866 100644 --- a/include/libs/audit/audit.h +++ b/include/libs/audit/audit.h @@ -23,13 +23,13 @@ #include "tjson.h" #include "tmsgcb.h" #include "trpc.h" -#include "mnode.h" #ifdef __cplusplus extern "C" { #endif #define AUDIT_DETAIL_MAX 65472 +#define AUDIT_OPERATION_LEN 20 typedef struct { const char *server; @@ -37,13 +37,28 @@ typedef struct { bool comp; } SAuditCfg; +typedef struct { + int64_t curTime; + char strClusterId[TSDB_CLUSTER_ID_LEN]; + char clientAddress[50]; + char user[TSDB_USER_LEN]; + char operation[AUDIT_OPERATION_LEN]; + char target1[TSDB_DB_NAME_LEN]; //put db name + char target2[TSDB_STREAM_NAME_LEN]; //put stb name, table name, topic name, user name, stream name, use max + char* detail; +} SAuditRecord; + int32_t auditInit(const SAuditCfg *pCfg); +void auditCleanup(); void auditSend(SJson *pJson); void auditRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2, char *detail, int32_t len); +void auditAddRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2, + char *detail, int32_t len); +void auditSendRecordsInBatch(); #ifdef __cplusplus } #endif -#endif /*_TD_MONITOR_H_*/ +#endif /*_TD_AUDIT_H_*/ diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 8f3e100db6..f6737b4e27 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -509,11 +509,8 @@ typedef struct SStreamMeta { SArray* chkpSaved; SArray* chkpInUse; SRWLatch chkpDirLock; - - void* qHandle; - int32_t pauseTaskNum; - - void* bkdChkptMgt; + void* qHandle; + void* bkdChkptMgt; } SStreamMeta; int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo); @@ -840,11 +837,10 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int3 int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta); SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId); void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask); -int32_t streamMetaReopen(SStreamMeta* pMeta); +void streamMetaClear(SStreamMeta* pMeta); void streamMetaInitBackend(SStreamMeta* pMeta); int32_t streamMetaCommit(SStreamMeta* pMeta); int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta); -int32_t streamMetaReloadAllTasks(SStreamMeta* pMeta); int64_t streamMetaGetLatestCheckpointId(SStreamMeta* pMeta); void streamMetaNotifyClose(SStreamMeta* pMeta); int32_t streamTaskSetDb(SStreamMeta* pMeta, void* pTask, char* key); diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h index a428a9ae6a..e54237fe8b 100644 --- a/include/libs/sync/sync.h +++ b/include/libs/sync/sync.h @@ -33,11 +33,12 @@ extern "C" { #define SYNC_MAX_PROGRESS_WAIT_MS 4000 #define SYNC_MAX_START_TIME_RANGE_MS (1000 * 20) #define SYNC_MAX_RECV_TIME_RANGE_MS 1200 -#define SYNC_DEL_WAL_MS (1000 * 60) #define SYNC_ADD_QUORUM_COUNT 3 #define SYNC_VNODE_LOG_RETENTION (TSDB_SYNC_LOG_BUFFER_RETENTION + 1) #define SNAPSHOT_WAIT_MS 1000 * 5 +#define SYNC_WAL_LOG_RETENTION_SIZE (8LL * 1024 * 1024 * 1024) + #define SYNC_MAX_RETRY_BACKOFF 5 #define SYNC_LOG_REPL_RETRY_WAIT_MS 100 #define SYNC_APPEND_ENTRIES_TIMEOUT_MS 10000 @@ -219,6 +220,7 @@ typedef struct SSyncLogStore { SyncIndex (*syncLogWriteIndex)(struct SSyncLogStore* pLogStore); SyncIndex (*syncLogLastIndex)(struct SSyncLogStore* pLogStore); + SyncIndex (*syncLogIndexRetention)(struct SSyncLogStore* pLogStore, int64_t bytes); SyncTerm (*syncLogLastTerm)(struct SSyncLogStore* pLogStore); int32_t (*syncLogAppendEntry)(struct SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry, bool forcSync); diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h index a56a5567eb..7c00ff5178 100644 --- a/include/libs/wal/wal.h +++ b/include/libs/wal/wal.h @@ -225,6 +225,7 @@ bool walIsEmpty(SWal *); int64_t walGetFirstVer(SWal *); int64_t walGetSnapshotVer(SWal *); int64_t walGetLastVer(SWal *); +int64_t walGetVerRetention(SWal *pWal, int64_t bytes); int64_t walGetCommittedVer(SWal *); int64_t walGetAppliedVer(SWal *); diff --git a/include/os/osString.h b/include/os/osString.h index 4982ac8bfd..8e8f1546e7 100644 --- a/include/os/osString.h +++ b/include/os/osString.h @@ -47,18 +47,13 @@ typedef int32_t TdUcs4; #define strtof STR_TO_F_FUNC_TAOS_FORBID #endif -#ifdef WINDOWS -#define tstrdup(str) _strdup(str) -#else -#define tstrdup(str) strdup(str) -#endif - #define tstrncpy(dst, src, size) \ do { \ strncpy((dst), (src), (size)); \ (dst)[(size)-1] = 0; \ } while (0) +char *tstrdup(const char *src); int32_t taosUcs4len(TdUcs4 *ucs4); int64_t taosStr2int64(const char *str); diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h index 75003d76d8..b4ee619332 100644 --- a/source/client/inc/clientInt.h +++ b/source/client/inc/clientInt.h @@ -205,7 +205,7 @@ typedef struct SRequestSendRecvBody { __taos_async_fn_t queryFp; __taos_async_fn_t fetchFp; EQueryExecMode execMode; - void* param; + void* interParam; SDataBuf requestMsg; int64_t queryJob; // query job, created according to sql query DAG. int32_t subplanNum; @@ -287,6 +287,7 @@ typedef struct SRequestObj { typedef struct SSyncQueryParam { tsem_t sem; SRequestObj* pRequest; + void* userParam; } SSyncQueryParam; void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4); @@ -420,6 +421,7 @@ int32_t buildPreviousRequest(SRequestObj *pRequest, const char* sql, SRequestObj int32_t prepareAndParseSqlSyntax(SSqlCallbackWrapper **ppWrapper, SRequestObj *pRequest, bool updateMetaForce); void returnToUser(SRequestObj* pRequest); void stopAllQueries(SRequestObj *pRequest); +void doRequestCallback(SRequestObj* pRequest, int32_t code); void freeQueryParam(SSyncQueryParam* param); #ifdef TD_ENTERPRISE diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index 85624c31c5..77cda347a4 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -316,6 +316,15 @@ void *createRequest(uint64_t connId, int32_t type, int64_t reqid) { terrno = TSDB_CODE_TSC_DISCONNECTED; return NULL; } + SSyncQueryParam *interParam = taosMemoryCalloc(1, sizeof(SSyncQueryParam)); + if (interParam == NULL) { + doDestroyRequest(pRequest); + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + tsem_init(&interParam->sem, 0, 0); + interParam->pRequest = pRequest; + pRequest->body.interParam = interParam; pRequest->resType = RES_TYPE__QUERY; pRequest->requestId = reqid == 0 ? generateRequestId() : reqid; @@ -437,12 +446,10 @@ void doDestroyRequest(void *p) { deregisterRequest(pRequest); } - if (pRequest->syncQuery) { - if (pRequest->body.param) { - tsem_destroy(&((SSyncQueryParam *)pRequest->body.param)->sem); - } - taosMemoryFree(pRequest->body.param); + if (pRequest->body.interParam) { + tsem_destroy(&((SSyncQueryParam *)pRequest->body.interParam)->sem); } + taosMemoryFree(pRequest->body.interParam); qDestroyQuery(pRequest->pQuery); nodesDestroyAllocator(pRequest->allocatorRefId); diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 4462bd9c9d..f615111b7f 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -196,21 +196,7 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param, (*pRequest)->sqlLen = sqlLen; (*pRequest)->validateOnly = validateSql; - SSyncQueryParam* newpParam = NULL; - if (param == NULL) { - newpParam = taosMemoryCalloc(1, sizeof(SSyncQueryParam)); - if (newpParam == NULL) { - destroyRequest(*pRequest); - *pRequest = NULL; - return TSDB_CODE_OUT_OF_MEMORY; - } - - tsem_init(&newpParam->sem, 0, 0); - newpParam->pRequest = (*pRequest); - param = newpParam; - } - - (*pRequest)->body.param = param; + ((SSyncQueryParam*)(*pRequest)->body.interParam)->userParam = param; STscObj* pTscObj = (*pRequest)->pTscObj; int32_t err = taosHashPut(pTscObj->pRequests, &(*pRequest)->self, sizeof((*pRequest)->self), &(*pRequest)->self, @@ -218,7 +204,6 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param, if (err) { tscError("%" PRId64 " failed to add to request container, reqId:0x%" PRIx64 ", conn:%" PRId64 ", %s", (*pRequest)->self, (*pRequest)->requestId, pTscObj->id, sql); - freeQueryParam(newpParam); destroyRequest(*pRequest); *pRequest = NULL; return TSDB_CODE_OUT_OF_MEMORY; @@ -230,7 +215,6 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param, nodesCreateAllocator((*pRequest)->requestId, tsQueryNodeChunkSize, &((*pRequest)->allocatorRefId))) { tscError("%" PRId64 " failed to create node allocator, reqId:0x%" PRIx64 ", conn:%" PRId64 ", %s", (*pRequest)->self, (*pRequest)->requestId, pTscObj->id, sql); - freeQueryParam(newpParam); destroyRequest(*pRequest); *pRequest = NULL; return TSDB_CODE_OUT_OF_MEMORY; @@ -336,7 +320,7 @@ static SAppInstInfo* getAppInfo(SRequestObj* pRequest) { return pRequest->pTscOb void asyncExecLocalCmd(SRequestObj* pRequest, SQuery* pQuery) { SRetrieveTableRsp* pRsp = NULL; if (pRequest->validateOnly) { - pRequest->body.queryFp(pRequest->body.param, pRequest, 0); + doRequestCallback(pRequest, 0); return; } @@ -358,18 +342,18 @@ void asyncExecLocalCmd(SRequestObj* pRequest, SQuery* pQuery) { pRequest->requestId); } - pRequest->body.queryFp(pRequest->body.param, pRequest, code); + doRequestCallback(pRequest, code); } int32_t asyncExecDdlQuery(SRequestObj* pRequest, SQuery* pQuery) { if (pRequest->validateOnly) { - pRequest->body.queryFp(pRequest->body.param, pRequest, 0); + doRequestCallback(pRequest, 0); return TSDB_CODE_SUCCESS; } // drop table if exists not_exists_table if (NULL == pQuery->pCmdMsg) { - pRequest->body.queryFp(pRequest->body.param, pRequest, 0); + doRequestCallback(pRequest, 0); return TSDB_CODE_SUCCESS; } @@ -384,7 +368,7 @@ int32_t asyncExecDdlQuery(SRequestObj* pRequest, SQuery* pQuery) { int64_t transporterId = 0; int32_t code = asyncSendMsgToServer(pAppInfo->pTransporter, &pMsgInfo->epSet, &transporterId, pSendMsg); if (code) { - pRequest->body.queryFp(pRequest->body.param, pRequest, code); + doRequestCallback(pRequest, code); } return code; } @@ -913,7 +897,7 @@ void continuePostSubQuery(SRequestObj* pRequest, TAOS_ROW row) { void returnToUser(SRequestObj* pRequest) { if (pRequest->relation.userRefId == pRequest->self || 0 == pRequest->relation.userRefId) { // return to client - pRequest->body.queryFp(pRequest->body.param, pRequest, pRequest->code); + doRequestCallback(pRequest, pRequest->code); return; } @@ -921,7 +905,7 @@ void returnToUser(SRequestObj* pRequest) { if (pUserReq) { pUserReq->code = pRequest->code; // return to client - pUserReq->body.queryFp(pUserReq->body.param, pUserReq, pUserReq->code); + doRequestCallback(pUserReq, pUserReq->code); releaseRequest(pRequest->relation.userRefId); return; } else { @@ -1031,7 +1015,7 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) { pRequest->pWrapper = NULL; // return to client - pRequest->body.queryFp(pRequest->body.param, pRequest, code); + doRequestCallback(pRequest, code); } } @@ -1186,7 +1170,7 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat pRequest->code = terrno; } - pRequest->body.queryFp(pRequest->body.param, pRequest, code); + doRequestCallback(pRequest, code); } // todo not to be released here @@ -1199,7 +1183,7 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM int32_t code = 0; if (pRequest->parseOnly) { - pRequest->body.queryFp(pRequest->body.param, pRequest, 0); + doRequestCallback(pRequest, 0); return; } @@ -1233,11 +1217,11 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM } case QUERY_EXEC_MODE_EMPTY_RESULT: pRequest->type = TSDB_SQL_RETRIEVE_EMPTY_RESULT; - pRequest->body.queryFp(pRequest->body.param, pRequest, 0); + doRequestCallback(pRequest, 0); break; default: tscError("0x%" PRIx64 " invalid execMode %d", pRequest->self, pQuery->execMode); - pRequest->body.queryFp(pRequest->body.param, pRequest, -1); + doRequestCallback(pRequest, -1); break; } } @@ -1703,8 +1687,8 @@ void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4) } static void syncFetchFn(void* param, TAOS_RES* res, int32_t numOfRows) { - SSyncQueryParam* pParam = param; - tsem_post(&pParam->sem); + tsem_t* sem = param; + tsem_post(sem); } void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4) { @@ -1722,10 +1706,11 @@ void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertU // convert ucs4 to native multi-bytes string pResultInfo->convertUcs4 = convertUcs4; - - SSyncQueryParam* pParam = pRequest->body.param; - taos_fetch_rows_a(pRequest, syncFetchFn, pParam); - tsem_wait(&pParam->sem); + tsem_t sem; + tsem_init(&sem, 0, 0); + taos_fetch_rows_a(pRequest, syncFetchFn, &sem); + tsem_wait(&sem); + tsem_destroy(&sem); } if (pResultInfo->numOfRows == 0 || pRequest->code != TSDB_CODE_SUCCESS) { @@ -2492,6 +2477,10 @@ TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly) { tscDebug("taos_query start with sql:%s", sql); SSyncQueryParam* param = taosMemoryCalloc(1, sizeof(SSyncQueryParam)); + if (NULL == param) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } tsem_init(¶m->sem, 0, 0); taosAsyncQueryImpl(*(int64_t*)taos, sql, syncQueryFn, param, validateOnly); @@ -2501,9 +2490,8 @@ TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly) { if (param->pRequest != NULL) { param->pRequest->syncQuery = true; pRequest = param->pRequest; - } else { - taosMemoryFree(param); } + taosMemoryFree(param); tscDebug("taos_query end with sql:%s", sql); @@ -2517,19 +2505,20 @@ TAOS_RES* taosQueryImplWithReqid(TAOS* taos, const char* sql, bool validateOnly, } SSyncQueryParam* param = taosMemoryCalloc(1, sizeof(SSyncQueryParam)); + if (param == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } tsem_init(¶m->sem, 0, 0); taosAsyncQueryImplWithReqid(*(int64_t*)taos, sql, syncQueryFn, param, validateOnly, reqid); tsem_wait(¶m->sem); - SRequestObj* pRequest = NULL; if (param->pRequest != NULL) { param->pRequest->syncQuery = true; pRequest = param->pRequest; - } else { - taosMemoryFree(param); } - + taosMemoryFree(param); return pRequest; } @@ -2547,13 +2536,13 @@ static void fetchCallback(void* pResult, void* param, int32_t code) { if (code != TSDB_CODE_SUCCESS) { pRequest->code = code; taosMemoryFreeClear(pResultInfo->pData); - pRequest->body.fetchFp(pRequest->body.param, pRequest, 0); + pRequest->body.fetchFp(((SSyncQueryParam *)pRequest->body.interParam)->userParam, pRequest, 0); return; } if (pRequest->code != TSDB_CODE_SUCCESS) { taosMemoryFreeClear(pResultInfo->pData); - pRequest->body.fetchFp(pRequest->body.param, pRequest, 0); + pRequest->body.fetchFp(((SSyncQueryParam *)pRequest->body.interParam)->userParam, pRequest, 0); return; } @@ -2574,20 +2563,12 @@ static void fetchCallback(void* pResult, void* param, int32_t code) { atomic_add_fetch_64((int64_t*)&pActivity->fetchBytes, pRequest->body.resInfo.payloadLen); } - pRequest->body.fetchFp(pRequest->body.param, pRequest, pResultInfo->numOfRows); + pRequest->body.fetchFp(((SSyncQueryParam *)pRequest->body.interParam)->userParam, pRequest, pResultInfo->numOfRows); } void taosAsyncFetchImpl(SRequestObj* pRequest, __taos_async_fn_t fp, void* param) { - if (pRequest->syncQuery && pRequest->body.param != param) { - if (pRequest->body.param) { - tsem_destroy(&((SSyncQueryParam *)pRequest->body.param)->sem); - } - taosMemoryFree(pRequest->body.param); - pRequest->syncQuery = false; - } - pRequest->body.fetchFp = fp; - pRequest->body.param = param; + ((SSyncQueryParam *)pRequest->body.interParam)->userParam = param; SReqResultInfo* pResultInfo = &pRequest->body.resInfo; @@ -2625,6 +2606,10 @@ void taosAsyncFetchImpl(SRequestObj* pRequest, __taos_async_fn_t fp, void* param schedulerFetchRows(pRequest->body.queryJob, &req); } +void doRequestCallback(SRequestObj* pRequest, int32_t code) { + pRequest->body.queryFp(((SSyncQueryParam *)pRequest->body.interParam)->userParam, pRequest, code); +} + int32_t clientParseSql(void* param, const char* dbName, const char* sql, bool parseOnly, const char* effectiveUser, SParseSqlRes* pRes) { #ifndef TD_ENTERPRISE return TSDB_CODE_SUCCESS; @@ -2633,4 +2618,3 @@ int32_t clientParseSql(void* param, const char* dbName, const char* sql, bool pa #endif } - diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 3a91ae82d5..1ce7c02dcf 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -1095,7 +1095,7 @@ static void doAsyncQueryFromParse(SMetaData *pResultMeta, void *param, int32_t c pRequest->pWrapper = NULL; terrno = code; pRequest->code = code; - pRequest->body.queryFp(pRequest->body.param, pRequest, code); + doRequestCallback(pRequest, code); } } @@ -1112,7 +1112,7 @@ void continueInsertFromCsv(SSqlCallbackWrapper *pWrapper, SRequestObj *pRequest) pRequest->pWrapper = NULL; terrno = code; pRequest->code = code; - pRequest->body.queryFp(pRequest->body.param, pRequest, code); + doRequestCallback(pRequest, code); } } @@ -1210,7 +1210,7 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) { terrno = code; pRequest->code = code; tscDebug("call sync query cb with code: %s", tstrerror(code)); - pRequest->body.queryFp(pRequest->body.param, pRequest, code); + doRequestCallback(pRequest, code); return; } @@ -1242,7 +1242,7 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) { terrno = code; pRequest->code = code; - pRequest->body.queryFp(pRequest->body.param, pRequest, code); + doRequestCallback(pRequest, code); } } @@ -1545,12 +1545,12 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) { conn.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp); - code = catalogAsyncGetAllMeta(pCtg, &conn, &catalogReq, syncCatalogFn, pRequest->body.param, NULL); + code = catalogAsyncGetAllMeta(pCtg, &conn, &catalogReq, syncCatalogFn, pRequest->body.interParam, NULL); if (code) { goto _return; } - SSyncQueryParam *pParam = pRequest->body.param; + SSyncQueryParam *pParam = pRequest->body.interParam; tsem_wait(&pParam->sem); _return: diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c index 693efbc364..5b0b59cc2c 100644 --- a/source/client/src/clientMsgHandler.c +++ b/source/client/src/clientMsgHandler.c @@ -41,7 +41,7 @@ int32_t genericRspCallback(void* param, SDataBuf* pMsg, int32_t code) { taosMemoryFree(pMsg->pEpSet); taosMemoryFree(pMsg->pData); if (pRequest->body.queryFp != NULL) { - pRequest->body.queryFp(pRequest->body.param, pRequest, code); + doRequestCallback(pRequest, code); } else { tsem_post(&pRequest->body.rspSem); } @@ -199,7 +199,7 @@ int32_t processCreateDbRsp(void* param, SDataBuf* pMsg, int32_t code) { } if (pRequest->body.queryFp) { - pRequest->body.queryFp(pRequest->body.param, pRequest, code); + doRequestCallback(pRequest, code); } else { tsem_post(&pRequest->body.rspSem); } @@ -235,7 +235,8 @@ int32_t processUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) { setErrno(pRequest, code); if (pRequest->body.queryFp != NULL) { - pRequest->body.queryFp(pRequest->body.param, pRequest, pRequest->code); + doRequestCallback(pRequest, pRequest->code); + } else { tsem_post(&pRequest->body.rspSem); } @@ -299,7 +300,7 @@ int32_t processUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) { taosMemoryFree(pMsg->pEpSet); if (pRequest->body.queryFp != NULL) { - pRequest->body.queryFp(pRequest->body.param, pRequest, pRequest->code); + doRequestCallback(pRequest, pRequest->code); } else { tsem_post(&pRequest->body.rspSem); } @@ -343,7 +344,7 @@ int32_t processCreateSTableRsp(void* param, SDataBuf* pMsg, int32_t code) { } } - pRequest->body.queryFp(pRequest->body.param, pRequest, code); + doRequestCallback(pRequest, code); } else { tsem_post(&pRequest->body.rspSem); } @@ -380,7 +381,7 @@ int32_t processDropDbRsp(void* param, SDataBuf* pMsg, int32_t code) { taosMemoryFree(pMsg->pEpSet); if (pRequest->body.queryFp != NULL) { - pRequest->body.queryFp(pRequest->body.param, pRequest, code); + doRequestCallback(pRequest, code); } else { tsem_post(&pRequest->body.rspSem); } @@ -420,7 +421,7 @@ int32_t processAlterStbRsp(void* param, SDataBuf* pMsg, int32_t code) { } } - pRequest->body.queryFp(pRequest->body.param, pRequest, code); + doRequestCallback(pRequest, code); } else { tsem_post(&pRequest->body.rspSem); } @@ -534,7 +535,7 @@ int32_t processShowVariablesRsp(void* param, SDataBuf* pMsg, int32_t code) { taosMemoryFree(pMsg->pEpSet); if (pRequest->body.queryFp != NULL) { - pRequest->body.queryFp(pRequest->body.param, pRequest, code); + doRequestCallback(pRequest, code); } else { tsem_post(&pRequest->body.rspSem); } diff --git a/source/common/src/cos.c b/source/common/src/cos.c index 7c8676e9f5..fcc777ac99 100644 --- a/source/common/src/cos.c +++ b/source/common/src/cos.c @@ -1,6 +1,8 @@ #define ALLOW_FORBID_FUNC #include "cos.h" +#include "cos_cp.h" +#include "tdef.h" extern char tsS3Endpoint[]; extern char tsS3AccessKeyId[]; @@ -86,7 +88,7 @@ typedef struct { char err_msg[128]; S3Status status; uint64_t content_length; - char * buf; + char *buf; int64_t buf_pos; } TS3SizeCBD; @@ -270,7 +272,7 @@ typedef struct list_parts_callback_data { typedef struct MultipartPartData { put_object_callback_data put_object_data; int seq; - UploadManager * manager; + UploadManager *manager; } MultipartPartData; static int putObjectDataCallback(int bufferSize, char *buffer, void *callbackData) { @@ -317,12 +319,23 @@ S3Status MultipartResponseProperiesCallback(const S3ResponseProperties *properti MultipartPartData *data = (MultipartPartData *)callbackData; int seq = data->seq; - const char * etag = properties->eTag; + const char *etag = properties->eTag; data->manager->etags[seq - 1] = strdup(etag); data->manager->next_etags_pos = seq; return S3StatusOK; } +S3Status MultipartResponseProperiesCallbackWithCp(const S3ResponseProperties *properties, void *callbackData) { + responsePropertiesCallbackNull(properties, callbackData); + + MultipartPartData *data = (MultipartPartData *)callbackData; + int seq = data->seq; + const char *etag = properties->eTag; + data->manager->etags[seq - 1] = strdup(etag); + // data->manager->next_etags_pos = seq; + return S3StatusOK; +} + static int multipartPutXmlCallback(int bufferSize, char *buffer, void *callbackData) { UploadManager *manager = (UploadManager *)callbackData; int ret = 0; @@ -446,37 +459,343 @@ static int try_get_parts_info(const char *bucketName, const char *key, UploadMan return 0; } */ -int32_t s3PutObjectFromFile2(const char *file, const char *object) { - int32_t code = 0; - const char *key = object; - // const char *uploadId = 0; - const char * filename = 0; + +static int32_t s3PutObjectFromFileSimple(S3BucketContext *bucket_context, char const *object_name, int64_t size, + S3PutProperties *put_prop, put_object_callback_data *data) { + int32_t code = 0; + S3PutObjectHandler putObjectHandler = {{&responsePropertiesCallbackNull, &responseCompleteCallback}, + &putObjectDataCallback}; + + do { + S3_put_object(bucket_context, object_name, size, put_prop, 0, 0, &putObjectHandler, data); + } while (S3_status_is_retryable(data->status) && should_retry()); + + if (data->status != S3StatusOK) { + s3PrintError(__FILE__, __LINE__, __func__, data->status, data->err_msg); + code = TAOS_SYSTEM_ERROR(EIO); + } else if (data->contentLength) { + uError("%s Failed to read remaining %llu bytes from input", __func__, (unsigned long long)data->contentLength); + code = TAOS_SYSTEM_ERROR(EIO); + } + + return code; +} + +static int32_t s3PutObjectFromFileWithoutCp(S3BucketContext *bucket_context, char const *object_name, + int64_t contentLength, S3PutProperties *put_prop, + put_object_callback_data *data) { + int32_t code = 0; + uint64_t totalContentLength = contentLength; + uint64_t todoContentLength = contentLength; + UploadManager manager = {0}; + + uint64_t chunk_size = MULTIPART_CHUNK_SIZE >> 3; + int totalSeq = (contentLength + chunk_size - 1) / chunk_size; + const int max_part_num = 10000; + if (totalSeq > max_part_num) { + chunk_size = (contentLength + max_part_num - contentLength % max_part_num) / max_part_num; + totalSeq = (contentLength + chunk_size - 1) / chunk_size; + } + + MultipartPartData partData; + memset(&partData, 0, sizeof(MultipartPartData)); + int partContentLength = 0; + + S3MultipartInitialHandler handler = {{&responsePropertiesCallbackNull, &responseCompleteCallback}, + &initial_multipart_callback}; + + S3PutObjectHandler putObjectHandler = {{&MultipartResponseProperiesCallback, &responseCompleteCallback}, + &putObjectDataCallback}; + + S3MultipartCommitHandler commit_handler = { + {&responsePropertiesCallbackNull, &responseCompleteCallback}, &multipartPutXmlCallback, 0}; + + manager.etags = (char **)taosMemoryCalloc(totalSeq, sizeof(char *)); + manager.next_etags_pos = 0; + do { + S3_initiate_multipart(bucket_context, object_name, 0, &handler, 0, timeoutMsG, &manager); + } while (S3_status_is_retryable(manager.status) && should_retry()); + + if (manager.upload_id == 0 || manager.status != S3StatusOK) { + s3PrintError(__FILE__, __LINE__, __func__, manager.status, manager.err_msg); + code = TAOS_SYSTEM_ERROR(EIO); + goto clean; + } + +upload: + todoContentLength -= chunk_size * manager.next_etags_pos; + for (int seq = manager.next_etags_pos + 1; seq <= totalSeq; seq++) { + partData.manager = &manager; + partData.seq = seq; + if (partData.put_object_data.gb == NULL) { + partData.put_object_data = *data; + } + partContentLength = ((contentLength > chunk_size) ? chunk_size : contentLength); + // printf("%s Part Seq %d, length=%d\n", srcSize ? "Copying" : "Sending", seq, partContentLength); + partData.put_object_data.contentLength = partContentLength; + partData.put_object_data.originalContentLength = partContentLength; + partData.put_object_data.totalContentLength = todoContentLength; + partData.put_object_data.totalOriginalContentLength = totalContentLength; + put_prop->md5 = 0; + do { + S3_upload_part(bucket_context, object_name, put_prop, &putObjectHandler, seq, manager.upload_id, + partContentLength, 0, timeoutMsG, &partData); + } while (S3_status_is_retryable(partData.put_object_data.status) && should_retry()); + if (partData.put_object_data.status != S3StatusOK) { + s3PrintError(__FILE__, __LINE__, __func__, partData.put_object_data.status, partData.put_object_data.err_msg); + code = TAOS_SYSTEM_ERROR(EIO); + goto clean; + } + contentLength -= chunk_size; + todoContentLength -= chunk_size; + } + + int i; + int size = 0; + size += growbuffer_append(&(manager.gb), "", strlen("")); + char buf[256]; + int n; + for (i = 0; i < totalSeq; i++) { + if (!manager.etags[i]) { + code = TAOS_SYSTEM_ERROR(EIO); + goto clean; + } + n = snprintf(buf, sizeof(buf), + "%d" + "%s", + i + 1, manager.etags[i]); + size += growbuffer_append(&(manager.gb), buf, n); + } + size += growbuffer_append(&(manager.gb), "", strlen("")); + manager.remaining = size; + + do { + S3_complete_multipart_upload(bucket_context, object_name, &commit_handler, manager.upload_id, manager.remaining, 0, + timeoutMsG, &manager); + } while (S3_status_is_retryable(manager.status) && should_retry()); + if (manager.status != S3StatusOK) { + s3PrintError(__FILE__, __LINE__, __func__, manager.status, manager.err_msg); + code = TAOS_SYSTEM_ERROR(EIO); + goto clean; + } + +clean: + if (manager.upload_id) { + taosMemoryFree(manager.upload_id); + } + for (i = 0; i < manager.next_etags_pos; i++) { + taosMemoryFree(manager.etags[i]); + } + growbuffer_destroy(manager.gb); + taosMemoryFree(manager.etags); + + return code; +} + +static int32_t s3PutObjectFromFileWithCp(S3BucketContext *bucket_context, const char *file, int32_t lmtime, + char const *object_name, int64_t contentLength, S3PutProperties *put_prop, + put_object_callback_data *data) { + int32_t code = 0; + + uint64_t totalContentLength = contentLength; + // uint64_t todoContentLength = contentLength; + UploadManager manager = {0}; + + uint64_t chunk_size = MULTIPART_CHUNK_SIZE >> 3; + int totalSeq = (contentLength + chunk_size - 1) / chunk_size; + const int max_part_num = 10000; + if (totalSeq > max_part_num) { + chunk_size = (contentLength + max_part_num - contentLength % max_part_num) / max_part_num; + totalSeq = (contentLength + chunk_size - 1) / chunk_size; + } + + bool need_init_upload = true; + char file_cp_path[TSDB_FILENAME_LEN]; + snprintf(file_cp_path, TSDB_FILENAME_LEN, "%s.cp", file); + + SCheckpoint cp = {0}; + cp.parts = taosMemoryCalloc(max_part_num, sizeof(SCheckpointPart)); + + if (taosCheckExistFile(file_cp_path)) { + if (!cos_cp_load(file_cp_path, &cp) && cos_cp_is_valid_upload(&cp, contentLength, lmtime)) { + manager.upload_id = strdup(cp.upload_id); + need_init_upload = false; + } else { + cos_cp_remove(file_cp_path); + } + } + + if (need_init_upload) { + S3MultipartInitialHandler handler = {{&responsePropertiesCallbackNull, &responseCompleteCallback}, + &initial_multipart_callback}; + do { + S3_initiate_multipart(bucket_context, object_name, 0, &handler, 0, timeoutMsG, &manager); + } while (S3_status_is_retryable(manager.status) && should_retry()); + + if (manager.upload_id == 0 || manager.status != S3StatusOK) { + s3PrintError(__FILE__, __LINE__, __func__, manager.status, manager.err_msg); + code = TAOS_SYSTEM_ERROR(EIO); + goto clean; + } + + cos_cp_build_upload(&cp, file, contentLength, lmtime, manager.upload_id, chunk_size); + } + + if (cos_cp_open(file_cp_path, &cp)) { + code = TAOS_SYSTEM_ERROR(EIO); + goto clean; + } + + int part_num = 0; + int64_t consume_bytes = 0; + // SCheckpointPart *parts = taosMemoryCalloc(cp.part_num, sizeof(SCheckpointPart)); + // cos_cp_get_undo_parts(&cp, &part_num, parts, &consume_bytes); + + MultipartPartData partData; + memset(&partData, 0, sizeof(MultipartPartData)); + int partContentLength = 0; + + S3PutObjectHandler putObjectHandler = {{&MultipartResponseProperiesCallbackWithCp, &responseCompleteCallback}, + &putObjectDataCallback}; + + S3MultipartCommitHandler commit_handler = { + {&responsePropertiesCallbackNull, &responseCompleteCallback}, &multipartPutXmlCallback, 0}; + + manager.etags = (char **)taosMemoryCalloc(totalSeq, sizeof(char *)); + manager.next_etags_pos = 0; + +upload: + // todoContentLength -= chunk_size * manager.next_etags_pos; + for (int i = 0; i < cp.part_num; ++i) { + if (cp.parts[i].completed) { + continue; + } + + if (i > 0 && cp.parts[i - 1].completed) { + if (taosLSeekFile(data->infileFD, cp.parts[i].offset, SEEK_SET) < 0) { + code = TAOS_SYSTEM_ERROR(errno); + goto clean; + } + } + + int seq = cp.parts[i].index + 1; + + partData.manager = &manager; + partData.seq = seq; + if (partData.put_object_data.gb == NULL) { + partData.put_object_data = *data; + } + + partContentLength = cp.parts[i].size; + partData.put_object_data.contentLength = partContentLength; + partData.put_object_data.originalContentLength = partContentLength; + // partData.put_object_data.totalContentLength = todoContentLength; + partData.put_object_data.totalOriginalContentLength = totalContentLength; + put_prop->md5 = 0; + do { + S3_upload_part(bucket_context, object_name, put_prop, &putObjectHandler, seq, manager.upload_id, + partContentLength, 0, timeoutMsG, &partData); + } while (S3_status_is_retryable(partData.put_object_data.status) && should_retry()); + if (partData.put_object_data.status != S3StatusOK) { + s3PrintError(__FILE__, __LINE__, __func__, partData.put_object_data.status, partData.put_object_data.err_msg); + code = TAOS_SYSTEM_ERROR(EIO); + + //(void)cos_cp_dump(&cp); + goto clean; + } + + if (!manager.etags[seq - 1]) { + code = TAOS_SYSTEM_ERROR(EIO); + goto clean; + } + + cos_cp_update(&cp, cp.parts[seq - 1].index, manager.etags[seq - 1], 0); + (void)cos_cp_dump(&cp); + + contentLength -= chunk_size; + // todoContentLength -= chunk_size; + } + + cos_cp_close(cp.thefile); + cp.thefile = 0; + + int size = 0; + size += growbuffer_append(&(manager.gb), "", strlen("")); + char buf[256]; + int n; + for (int i = 0; i < cp.part_num; ++i) { + n = snprintf(buf, sizeof(buf), + "%d" + "%s", + // i + 1, manager.etags[i]); + cp.parts[i].index + 1, cp.parts[i].etag); + size += growbuffer_append(&(manager.gb), buf, n); + } + size += growbuffer_append(&(manager.gb), "", strlen("")); + manager.remaining = size; + + do { + S3_complete_multipart_upload(bucket_context, object_name, &commit_handler, manager.upload_id, manager.remaining, 0, + timeoutMsG, &manager); + } while (S3_status_is_retryable(manager.status) && should_retry()); + if (manager.status != S3StatusOK) { + s3PrintError(__FILE__, __LINE__, __func__, manager.status, manager.err_msg); + code = TAOS_SYSTEM_ERROR(EIO); + goto clean; + } + + cos_cp_remove(file_cp_path); + +clean: + /* + if (parts) { + taosMemoryFree(parts); + } + */ + if (cp.thefile) { + cos_cp_close(cp.thefile); + } + if (cp.parts) { + taosMemoryFree(cp.parts); + } + + if (manager.upload_id) { + taosMemoryFree(manager.upload_id); + } + for (int i = 0; i < cp.part_num; ++i) { + if (manager.etags[i]) { + taosMemoryFree(manager.etags[i]); + } + } + taosMemoryFree(manager.etags); + growbuffer_destroy(manager.gb); + + return code; +} + +int32_t s3PutObjectFromFile2(const char *file, const char *object_name, int8_t withcp) { + int32_t code = 0; + int32_t lmtime = 0; + const char *filename = 0; uint64_t contentLength = 0; - const char * cacheControl = 0, *contentType = 0, *md5 = 0; - const char * contentDispositionFilename = 0, *contentEncoding = 0; + const char *cacheControl = 0, *contentType = 0, *md5 = 0; + const char *contentDispositionFilename = 0, *contentEncoding = 0; int64_t expires = -1; S3CannedAcl cannedAcl = S3CannedAclPrivate; int metaPropertiesCount = 0; S3NameValue metaProperties[S3_MAX_METADATA_COUNT]; char useServerSideEncryption = 0; put_object_callback_data data = {0}; - // int noStatus = 0; - // data.infile = 0; - // data.gb = 0; - // data.infileFD = NULL; - // data.noStatus = noStatus; - - // uError("ERROR: %s stat file %s: ", __func__, file); - if (taosStatFile(file, &contentLength, NULL, NULL) < 0) { - uError("ERROR: %s Failed to stat file %s: ", __func__, file); + if (taosStatFile(file, &contentLength, &lmtime, NULL) < 0) { code = TAOS_SYSTEM_ERROR(errno); + uError("ERROR: %s Failed to stat file %s: ", __func__, file); return code; } if (!(data.infileFD = taosOpenFile(file, TD_FILE_READ))) { - uError("ERROR: %s Failed to open file %s: ", __func__, file); code = TAOS_SYSTEM_ERROR(errno); + uError("ERROR: %s Failed to open file %s: ", __func__, file); return code; } @@ -493,143 +812,13 @@ int32_t s3PutObjectFromFile2(const char *file, const char *object) { metaProperties, useServerSideEncryption}; if (contentLength <= MULTIPART_CHUNK_SIZE) { - S3PutObjectHandler putObjectHandler = {{&responsePropertiesCallbackNull, &responseCompleteCallback}, - &putObjectDataCallback}; - - do { - S3_put_object(&bucketContext, key, contentLength, &putProperties, 0, 0, &putObjectHandler, &data); - } while (S3_status_is_retryable(data.status) && should_retry()); - - if (data.status != S3StatusOK) { - s3PrintError(__FILE__, __LINE__, __func__, data.status, data.err_msg); - code = TAOS_SYSTEM_ERROR(EIO); - } else if (data.contentLength) { - uError("ERROR: %s Failed to read remaining %llu bytes from input", __func__, - (unsigned long long)data.contentLength); - code = TAOS_SYSTEM_ERROR(EIO); - } + code = s3PutObjectFromFileSimple(&bucketContext, object_name, contentLength, &putProperties, &data); } else { - uint64_t totalContentLength = contentLength; - uint64_t todoContentLength = contentLength; - UploadManager manager = {0}; - // manager.upload_id = 0; - // manager.gb = 0; - - // div round up - int seq; - uint64_t chunk_size = MULTIPART_CHUNK_SIZE >> 3; - int totalSeq = (contentLength + chunk_size - 1) / chunk_size; - const int max_part_num = 10000; - if (totalSeq > max_part_num) { - chunk_size = (contentLength + max_part_num - contentLength % max_part_num) / max_part_num; - totalSeq = (contentLength + chunk_size - 1) / chunk_size; + if (withcp) { + code = s3PutObjectFromFileWithCp(&bucketContext, file, lmtime, object_name, contentLength, &putProperties, &data); + } else { + code = s3PutObjectFromFileWithoutCp(&bucketContext, object_name, contentLength, &putProperties, &data); } - - MultipartPartData partData; - memset(&partData, 0, sizeof(MultipartPartData)); - int partContentLength = 0; - - S3MultipartInitialHandler handler = {{&responsePropertiesCallbackNull, &responseCompleteCallback}, - &initial_multipart_callback}; - - S3PutObjectHandler putObjectHandler = {{&MultipartResponseProperiesCallback, &responseCompleteCallback}, - &putObjectDataCallback}; - - S3MultipartCommitHandler commit_handler = { - {&responsePropertiesCallbackNull, &responseCompleteCallback}, &multipartPutXmlCallback, 0}; - - manager.etags = (char **)taosMemoryCalloc(totalSeq, sizeof(char *)); - manager.next_etags_pos = 0; - /* - if (uploadId) { - manager.upload_id = strdup(uploadId); - manager.remaining = contentLength; - if (!try_get_parts_info(tsS3BucketName, key, &manager)) { - fseek(data.infile, -(manager.remaining), 2); - taosLSeekFile(data.infileFD, -(manager.remaining), SEEK_END); - contentLength = manager.remaining; - goto upload; - } else { - goto clean; - } - } - */ - do { - S3_initiate_multipart(&bucketContext, key, 0, &handler, 0, timeoutMsG, &manager); - } while (S3_status_is_retryable(manager.status) && should_retry()); - - if (manager.upload_id == 0 || manager.status != S3StatusOK) { - s3PrintError(__FILE__, __LINE__, __func__, manager.status, manager.err_msg); - code = TAOS_SYSTEM_ERROR(EIO); - goto clean; - } - - upload: - todoContentLength -= chunk_size * manager.next_etags_pos; - for (seq = manager.next_etags_pos + 1; seq <= totalSeq; seq++) { - partData.manager = &manager; - partData.seq = seq; - if (partData.put_object_data.gb == NULL) { - partData.put_object_data = data; - } - partContentLength = ((contentLength > chunk_size) ? chunk_size : contentLength); - // printf("%s Part Seq %d, length=%d\n", srcSize ? "Copying" : "Sending", seq, partContentLength); - partData.put_object_data.contentLength = partContentLength; - partData.put_object_data.originalContentLength = partContentLength; - partData.put_object_data.totalContentLength = todoContentLength; - partData.put_object_data.totalOriginalContentLength = totalContentLength; - putProperties.md5 = 0; - do { - S3_upload_part(&bucketContext, key, &putProperties, &putObjectHandler, seq, manager.upload_id, - partContentLength, 0, timeoutMsG, &partData); - } while (S3_status_is_retryable(partData.put_object_data.status) && should_retry()); - if (partData.put_object_data.status != S3StatusOK) { - s3PrintError(__FILE__, __LINE__, __func__, partData.put_object_data.status, partData.put_object_data.err_msg); - code = TAOS_SYSTEM_ERROR(EIO); - goto clean; - } - contentLength -= chunk_size; - todoContentLength -= chunk_size; - } - - int i; - int size = 0; - size += growbuffer_append(&(manager.gb), "", strlen("")); - char buf[256]; - int n; - for (i = 0; i < totalSeq; i++) { - if (!manager.etags[i]) { - code = TAOS_SYSTEM_ERROR(EIO); - goto clean; - } - n = snprintf(buf, sizeof(buf), - "%d" - "%s", - i + 1, manager.etags[i]); - size += growbuffer_append(&(manager.gb), buf, n); - } - size += growbuffer_append(&(manager.gb), "", strlen("")); - manager.remaining = size; - - do { - S3_complete_multipart_upload(&bucketContext, key, &commit_handler, manager.upload_id, manager.remaining, 0, - timeoutMsG, &manager); - } while (S3_status_is_retryable(manager.status) && should_retry()); - if (manager.status != S3StatusOK) { - s3PrintError(__FILE__, __LINE__, __func__, manager.status, manager.err_msg); - code = TAOS_SYSTEM_ERROR(EIO); - goto clean; - } - - clean: - if (manager.upload_id) { - taosMemoryFree(manager.upload_id); - } - for (i = 0; i < manager.next_etags_pos; i++) { - taosMemoryFree(manager.etags[i]); - } - growbuffer_destroy(manager.gb); - taosMemoryFree(manager.etags); } if (data.infileFD) { @@ -648,7 +837,7 @@ typedef struct list_bucket_callback_data { char nextMarker[1024]; int keyCount; int allDetails; - SArray * objectArray; + SArray *objectArray; } list_bucket_callback_data; static S3Status listBucketCallback(int isTruncated, const char *nextMarker, int contentsCount, @@ -693,11 +882,11 @@ static void s3FreeObjectKey(void *pItem) { static SArray *getListByPrefix(const char *prefix) { S3BucketContext bucketContext = {0, tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId, tsS3AccessKeySecret, - 0, awsRegionG}; + 0, awsRegionG}; S3ListBucketHandler listBucketHandler = {{&responsePropertiesCallbackNull, &responseCompleteCallback}, &listBucketCallback}; - const char * marker = 0, *delimiter = 0; + const char *marker = 0, *delimiter = 0; int maxkeys = 0, allDetails = 0; list_bucket_callback_data data; data.objectArray = taosArrayInit(32, sizeof(void *)); @@ -738,7 +927,7 @@ static SArray *getListByPrefix(const char *prefix) { void s3DeleteObjects(const char *object_name[], int nobject) { S3BucketContext bucketContext = {0, tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId, tsS3AccessKeySecret, - 0, awsRegionG}; + 0, awsRegionG}; S3ResponseHandler responseHandler = {0, &responseCompleteCallback}; for (int i = 0; i < nobject; ++i) { @@ -789,7 +978,7 @@ int32_t s3GetObjectBlock(const char *object_name, int64_t offset, int64_t size, const char *ifMatch = 0, *ifNotMatch = 0; S3BucketContext bucketContext = {0, tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId, tsS3AccessKeySecret, - 0, awsRegionG}; + 0, awsRegionG}; S3GetConditions getConditions = {ifModifiedSince, ifNotModifiedSince, ifMatch, ifNotMatch}; S3GetObjectHandler getObjectHandler = {{&responsePropertiesCallback, &responseCompleteCallback}, &getObjectDataCallback}; @@ -827,7 +1016,7 @@ int32_t s3GetObjectToFile(const char *object_name, char *fileName) { const char *ifMatch = 0, *ifNotMatch = 0; S3BucketContext bucketContext = {0, tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId, tsS3AccessKeySecret, - 0, awsRegionG}; + 0, awsRegionG}; S3GetConditions getConditions = {ifModifiedSince, ifNotModifiedSince, ifMatch, ifNotMatch}; S3GetObjectHandler getObjectHandler = {{&responsePropertiesCallbackNull, &responseCompleteCallback}, &getObjectCallback}; @@ -858,7 +1047,7 @@ int32_t s3GetObjectsByPrefix(const char *prefix, const char *path) { if (objectArray == NULL) return -1; for (size_t i = 0; i < taosArrayGetSize(objectArray); i++) { - char * object = taosArrayGetP(objectArray, i); + char *object = taosArrayGetP(objectArray, i); const char *tmp = strchr(object, '/'); tmp = (tmp == NULL) ? object : tmp + 1; char fileName[PATH_MAX] = {0}; @@ -949,12 +1138,12 @@ static void s3InitRequestOptions(cos_request_options_t *options, int is_cname) { int32_t s3PutObjectFromFile(const char *file_str, const char *object_str) { int32_t code = 0; - cos_pool_t * p = NULL; + cos_pool_t *p = NULL; int is_cname = 0; - cos_status_t * s = NULL; + cos_status_t *s = NULL; cos_request_options_t *options = NULL; cos_string_t bucket, object, file; - cos_table_t * resp_headers; + cos_table_t *resp_headers; // int traffic_limit = 0; cos_pool_create(&p, NULL); @@ -983,18 +1172,19 @@ int32_t s3PutObjectFromFile(const char *file_str, const char *object_str) { return code; } -int32_t s3PutObjectFromFile2(const char *file_str, const char *object_str) { +int32_t s3PutObjectFromFile2(const char *file_str, const char *object_str, int8_t withcp) { int32_t code = 0; - cos_pool_t * p = NULL; + cos_pool_t *p = NULL; int is_cname = 0; - cos_status_t * s = NULL; - cos_request_options_t * options = NULL; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; cos_string_t bucket, object, file; - cos_table_t * resp_headers; + cos_table_t *resp_headers; int traffic_limit = 0; - cos_table_t * headers = NULL; + cos_table_t *headers = NULL; cos_resumable_clt_params_t *clt_params = NULL; + (void)withcp; cos_pool_create(&p, NULL); options = cos_request_options_create(p); s3InitRequestOptions(options, is_cname); @@ -1025,11 +1215,11 @@ int32_t s3PutObjectFromFile2(const char *file_str, const char *object_str) { } void s3DeleteObjectsByPrefix(const char *prefix_str) { - cos_pool_t * p = NULL; + cos_pool_t *p = NULL; cos_request_options_t *options = NULL; int is_cname = 0; cos_string_t bucket; - cos_status_t * s = NULL; + cos_status_t *s = NULL; cos_string_t prefix; cos_pool_create(&p, NULL); @@ -1044,10 +1234,10 @@ void s3DeleteObjectsByPrefix(const char *prefix_str) { } void s3DeleteObjects(const char *object_name[], int nobject) { - cos_pool_t * p = NULL; + cos_pool_t *p = NULL; int is_cname = 0; cos_string_t bucket; - cos_table_t * resp_headers = NULL; + cos_table_t *resp_headers = NULL; cos_request_options_t *options = NULL; cos_list_t object_list; cos_list_t deleted_object_list; @@ -1081,14 +1271,14 @@ void s3DeleteObjects(const char *object_name[], int nobject) { bool s3Exists(const char *object_name) { bool ret = false; - cos_pool_t * p = NULL; + cos_pool_t *p = NULL; int is_cname = 0; - cos_status_t * s = NULL; - cos_request_options_t * options = NULL; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; cos_string_t bucket; cos_string_t object; - cos_table_t * resp_headers; - cos_table_t * headers = NULL; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; cos_object_exist_status_e object_exist; cos_pool_create(&p, NULL); @@ -1115,15 +1305,15 @@ bool s3Exists(const char *object_name) { bool s3Get(const char *object_name, const char *path) { bool ret = false; - cos_pool_t * p = NULL; + cos_pool_t *p = NULL; int is_cname = 0; - cos_status_t * s = NULL; + cos_status_t *s = NULL; cos_request_options_t *options = NULL; cos_string_t bucket; cos_string_t object; cos_string_t file; - cos_table_t * resp_headers = NULL; - cos_table_t * headers = NULL; + cos_table_t *resp_headers = NULL; + cos_table_t *headers = NULL; int traffic_limit = 0; //创建内存池 @@ -1159,15 +1349,15 @@ bool s3Get(const char *object_name, const char *path) { int32_t s3GetObjectBlock(const char *object_name, int64_t offset, int64_t block_size, bool check, uint8_t **ppBlock) { (void)check; int32_t code = 0; - cos_pool_t * p = NULL; + cos_pool_t *p = NULL; int is_cname = 0; - cos_status_t * s = NULL; + cos_status_t *s = NULL; cos_request_options_t *options = NULL; cos_string_t bucket; cos_string_t object; - cos_table_t * resp_headers; - cos_table_t * headers = NULL; - cos_buf_t * content = NULL; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + cos_buf_t *content = NULL; // cos_string_t file; // int traffic_limit = 0; char range_buf[64]; @@ -1261,7 +1451,7 @@ void s3EvictCache(const char *path, long object_size) { terrno = TAOS_SYSTEM_ERROR(errno); vError("failed to open %s since %s", dir_name, terrstr()); } - SArray * evict_files = taosArrayInit(16, sizeof(SEvictFile)); + SArray *evict_files = taosArrayInit(16, sizeof(SEvictFile)); tdbDirEntryPtr pDirEntry; while ((pDirEntry = taosReadDir(pDir)) != NULL) { char *name = taosGetDirEntryName(pDirEntry); @@ -1303,13 +1493,13 @@ void s3EvictCache(const char *path, long object_size) { long s3Size(const char *object_name) { long size = 0; - cos_pool_t * p = NULL; + cos_pool_t *p = NULL; int is_cname = 0; - cos_status_t * s = NULL; + cos_status_t *s = NULL; cos_request_options_t *options = NULL; cos_string_t bucket; cos_string_t object; - cos_table_t * resp_headers = NULL; + cos_table_t *resp_headers = NULL; //创建内存池 cos_pool_create(&p, NULL); @@ -1344,7 +1534,7 @@ long s3Size(const char *object_name) { int32_t s3Init() { return 0; } void s3CleanUp() {} int32_t s3PutObjectFromFile(const char *file, const char *object) { return 0; } -int32_t s3PutObjectFromFile2(const char *file, const char *object) { return 0; } +int32_t s3PutObjectFromFile2(const char *file, const char *object, int8_t withcp) { return 0; } void s3DeleteObjectsByPrefix(const char *prefix) {} void s3DeleteObjects(const char *object_name[], int nobject) {} bool s3Exists(const char *object_name) { return false; } diff --git a/source/common/src/cos_cp.c b/source/common/src/cos_cp.c new file mode 100644 index 0000000000..6d37b4d4dc --- /dev/null +++ b/source/common/src/cos_cp.c @@ -0,0 +1,417 @@ +#define ALLOW_FORBID_FUNC + +#include "cos_cp.h" +#include "cJSON.h" +#include "tutil.h" + +int32_t cos_cp_open(char const* cp_path, SCheckpoint* checkpoint) { + int32_t code = 0; + + TdFilePtr fd = taosOpenFile(cp_path, TD_FILE_WRITE | TD_FILE_CREATE /* | TD_FILE_TRUNC*/ | TD_FILE_WRITE_THROUGH); + if (!fd) { + code = TAOS_SYSTEM_ERROR(errno); + uError("ERROR: %s Failed to open %s", __func__, cp_path); + return code; + } + + checkpoint->thefile = fd; + + return code; +} + +void cos_cp_close(TdFilePtr fd) { taosCloseFile(&fd); } +void cos_cp_remove(char const* filepath) { taosRemoveFile(filepath); } + +static int32_t cos_cp_parse_body(char* cp_body, SCheckpoint* cp) { + int32_t code = 0; + cJSON const* item2 = NULL; + + cJSON* json = cJSON_Parse(cp_body); + if (NULL == json) { + code = TSDB_CODE_FILE_CORRUPTED; + uError("ERROR: %s Failed to parse json", __func__); + goto _exit; + } + + cJSON const* item = cJSON_GetObjectItem(json, "ver"); + if (!cJSON_IsNumber(item) || item->valuedouble != 1) { + code = TSDB_CODE_FILE_CORRUPTED; + uError("ERROR: %s Failed to parse json ver: %f", __func__, item->valuedouble); + goto _exit; + } + + item = cJSON_GetObjectItem(json, "type"); + if (!cJSON_IsNumber(item)) { + code = TSDB_CODE_FILE_CORRUPTED; + uError("ERROR: %s Failed to parse json", __func__); + goto _exit; + } + cp->cp_type = item->valuedouble; + + item = cJSON_GetObjectItem(json, "md5"); + if (cJSON_IsString(item)) { + memcpy(cp->md5, item->valuestring, strlen(item->valuestring)); + } + + item = cJSON_GetObjectItem(json, "upload_id"); + if (cJSON_IsString(item)) { + strncpy(cp->upload_id, item->valuestring, 128); + } + + item2 = cJSON_GetObjectItem(json, "file"); + if (cJSON_IsObject(item2)) { + item = cJSON_GetObjectItem(item2, "size"); + if (cJSON_IsNumber(item)) { + cp->file_size = item->valuedouble; + } + + item = cJSON_GetObjectItem(item2, "lastmodified"); + if (cJSON_IsNumber(item)) { + cp->file_last_modified = item->valuedouble; + } + + item = cJSON_GetObjectItem(item2, "path"); + if (cJSON_IsString(item)) { + strncpy(cp->file_path, item->valuestring, TSDB_FILENAME_LEN); + } + + item = cJSON_GetObjectItem(item2, "file_md5"); + if (cJSON_IsString(item)) { + strncpy(cp->file_md5, item->valuestring, 64); + } + } + + item2 = cJSON_GetObjectItem(json, "object"); + if (cJSON_IsObject(item2)) { + item = cJSON_GetObjectItem(item2, "object_size"); + if (cJSON_IsNumber(item)) { + cp->object_size = item->valuedouble; + } + + item = cJSON_GetObjectItem(item2, "object_name"); + if (cJSON_IsString(item)) { + strncpy(cp->object_name, item->valuestring, 128); + } + + item = cJSON_GetObjectItem(item2, "object_last_modified"); + if (cJSON_IsString(item)) { + strncpy(cp->object_last_modified, item->valuestring, 64); + } + + item = cJSON_GetObjectItem(item2, "object_etag"); + if (cJSON_IsString(item)) { + strncpy(cp->object_etag, item->valuestring, 128); + } + } + + item2 = cJSON_GetObjectItem(json, "cpparts"); + if (cJSON_IsObject(item2)) { + item = cJSON_GetObjectItem(item2, "number"); + if (cJSON_IsNumber(item)) { + cp->part_num = item->valuedouble; + } + + item = cJSON_GetObjectItem(item2, "size"); + if (cJSON_IsNumber(item)) { + cp->part_size = item->valuedouble; + } + + item2 = cJSON_GetObjectItem(item2, "parts"); + if (cJSON_IsArray(item2) && cp->part_num > 0) { + cJSON_ArrayForEach(item, item2) { + cJSON const* item3 = cJSON_GetObjectItem(item, "index"); + int32_t index = 0; + if (cJSON_IsNumber(item3)) { + index = item3->valuedouble; + cp->parts[index].index = index; + } + + item3 = cJSON_GetObjectItem(item, "offset"); + if (cJSON_IsNumber(item3)) { + cp->parts[index].offset = item3->valuedouble; + } + + item3 = cJSON_GetObjectItem(item, "size"); + if (cJSON_IsNumber(item3)) { + cp->parts[index].size = item3->valuedouble; + } + + item3 = cJSON_GetObjectItem(item, "completed"); + if (cJSON_IsNumber(item3)) { + cp->parts[index].completed = item3->valuedouble; + } + + item3 = cJSON_GetObjectItem(item, "crc64"); + if (cJSON_IsNumber(item3)) { + cp->parts[index].crc64 = item3->valuedouble; + } + + item3 = cJSON_GetObjectItem(item, "etag"); + if (cJSON_IsString(item3)) { + strncpy(cp->parts[index].etag, item3->valuestring, 128); + } + } + } + } + +_exit: + if (json) cJSON_Delete(json); + if (cp_body) taosMemoryFree(cp_body); + + return code; +} + +int32_t cos_cp_load(char const* filepath, SCheckpoint* checkpoint) { + int32_t code = 0; + + TdFilePtr fd = taosOpenFile(filepath, TD_FILE_READ); + if (!fd) { + code = TAOS_SYSTEM_ERROR(errno); + uError("ERROR: %s Failed to open %s", __func__, filepath); + goto _exit; + } + + int64_t size = -1; + code = taosStatFile(filepath, &size, NULL, NULL); + if (code) { + uError("ERROR: %s Failed to stat %s", __func__, filepath); + goto _exit; + } + + char* cp_body = taosMemoryMalloc(size + 1); + + int64_t n = taosReadFile(fd, cp_body, size); + if (n < 0) { + code = TAOS_SYSTEM_ERROR(errno); + uError("ERROR: %s Failed to read %s", __func__, filepath); + goto _exit; + } else if (n != size) { + code = TSDB_CODE_FILE_CORRUPTED; + uError("ERROR: %s Failed to read %s %" PRId64 "/%" PRId64, __func__, filepath, n, size); + goto _exit; + } + taosCloseFile(&fd); + cp_body[size] = '\0'; + + return cos_cp_parse_body(cp_body, checkpoint); + +_exit: + if (fd) { + taosCloseFile(&fd); + } + if (cp_body) { + taosMemoryFree(cp_body); + } + + return code; +} + +static int32_t cos_cp_save_json(cJSON const* json, SCheckpoint* checkpoint) { + int32_t code = 0; + + char* data = cJSON_PrintUnformatted(json); + if (NULL == data) { + return TSDB_CODE_OUT_OF_MEMORY; + } + + TdFilePtr fp = checkpoint->thefile; + if (taosFtruncateFile(fp, 0) < 0) { + code = TAOS_SYSTEM_ERROR(errno); + goto _exit; + } + if (taosLSeekFile(fp, 0, SEEK_SET) < 0) { + code = TAOS_SYSTEM_ERROR(errno); + goto _exit; + } + if (taosWriteFile(fp, data, strlen(data)) < 0) { + code = TAOS_SYSTEM_ERROR(errno); + goto _exit; + } + + if (taosFsyncFile(fp) < 0) { + code = TAOS_SYSTEM_ERROR(errno); + goto _exit; + } + +_exit: + taosMemoryFree(data); + return code; +} + +int32_t cos_cp_dump(SCheckpoint* cp) { + int32_t code = 0; + int32_t lino = 0; + + cJSON* ojson = NULL; + cJSON* json = cJSON_CreateObject(); + if (!json) return TSDB_CODE_OUT_OF_MEMORY; + + if (NULL == cJSON_AddNumberToObject(json, "ver", 1)) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + + if (NULL == cJSON_AddNumberToObject(json, "type", cp->cp_type)) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + + if (NULL == cJSON_AddStringToObject(json, "md5", cp->md5)) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + + if (NULL == cJSON_AddStringToObject(json, "upload_id", cp->upload_id)) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + + if (COS_CP_TYPE_UPLOAD == cp->cp_type) { + ojson = cJSON_AddObjectToObject(json, "file"); + if (!ojson) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + if (NULL == cJSON_AddNumberToObject(ojson, "size", cp->file_size)) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + if (NULL == cJSON_AddNumberToObject(ojson, "lastmodified", cp->file_last_modified)) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + if (NULL == cJSON_AddStringToObject(ojson, "path", cp->file_path)) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + if (NULL == cJSON_AddStringToObject(ojson, "file_md5", cp->file_md5)) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + } else if (COS_CP_TYPE_DOWNLOAD == cp->cp_type) { + ojson = cJSON_AddObjectToObject(json, "object"); + if (!ojson) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + if (NULL == cJSON_AddNumberToObject(ojson, "object_size", cp->object_size)) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + if (NULL == cJSON_AddStringToObject(ojson, "object_name", cp->object_name)) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + if (NULL == cJSON_AddStringToObject(ojson, "object_last_modified", cp->object_last_modified)) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + if (NULL == cJSON_AddStringToObject(ojson, "object_etag", cp->object_etag)) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + } + + ojson = cJSON_AddObjectToObject(json, "cpparts"); + if (!ojson) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + if (NULL == cJSON_AddNumberToObject(ojson, "number", cp->part_num)) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + if (NULL == cJSON_AddNumberToObject(ojson, "size", cp->part_size)) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + + cJSON* ajson = cJSON_AddArrayToObject(ojson, "parts"); + if (!ajson) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + for (int i = 0; i < cp->part_num; ++i) { + cJSON* item = cJSON_CreateObject(); + if (!item) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + cJSON_AddItemToArray(ajson, item); + + if (NULL == cJSON_AddNumberToObject(item, "index", cp->parts[i].index)) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + if (NULL == cJSON_AddNumberToObject(item, "offset", cp->parts[i].offset)) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + if (NULL == cJSON_AddNumberToObject(item, "size", cp->parts[i].size)) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + if (NULL == cJSON_AddNumberToObject(item, "completed", cp->parts[i].completed)) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + if (NULL == cJSON_AddNumberToObject(item, "crc64", cp->parts[i].crc64)) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + if (NULL == cJSON_AddStringToObject(item, "etag", cp->parts[i].etag)) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + } + + code = cos_cp_save_json(json, cp); + TSDB_CHECK_CODE(code, lino, _exit); + +_exit: + if (code) { + uError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + cJSON_Delete(json); + return code; +} + +void cos_cp_get_undo_parts(SCheckpoint* checkpoint, int* part_num, SCheckpointPart* parts, int64_t* consume_bytes) {} + +void cos_cp_update(SCheckpoint* checkpoint, int32_t part_index, char const* etag, uint64_t crc64) { + checkpoint->parts[part_index].completed = 1; + strncpy(checkpoint->parts[part_index].etag, etag, 128); + checkpoint->parts[part_index].crc64 = crc64; +} + +void cos_cp_build_upload(SCheckpoint* checkpoint, char const* filepath, int64_t size, int32_t mtime, + char const* upload_id, int64_t part_size) { + int i = 0; + + checkpoint->cp_type = COS_CP_TYPE_UPLOAD; + strncpy(checkpoint->file_path, filepath, TSDB_FILENAME_LEN); + + checkpoint->file_size = size; + checkpoint->file_last_modified = mtime; + strncpy(checkpoint->upload_id, upload_id, 128); + + checkpoint->part_size = part_size; + for (; i * part_size < size; i++) { + checkpoint->parts[i].index = i; + checkpoint->parts[i].offset = i * part_size; + checkpoint->parts[i].size = TMIN(part_size, (size - i * part_size)); + checkpoint->parts[i].completed = 0; + checkpoint->parts[i].etag[0] = '\0'; + } + checkpoint->part_num = i; +} + +static bool cos_cp_verify_md5(SCheckpoint* cp) { return true; } + +bool cos_cp_is_valid_upload(SCheckpoint* checkpoint, int64_t size, int32_t mtime) { + if (cos_cp_verify_md5(checkpoint) && checkpoint->file_size == size && checkpoint->file_last_modified == mtime) { + return true; + } + + return false; +} diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 39cd2d604b..798916d3b5 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -95,8 +95,9 @@ int32_t tsMonitorMaxLogs = 100; bool tsMonitorComp = false; // audit -bool tsEnableAudit = true; -bool tsEnableAuditCreateTable = true; +bool tsEnableAudit = true; +bool tsEnableAuditCreateTable = true; +int32_t tsAuditInterval = 5000; // telem #ifdef TD_ENTERPRISE @@ -686,6 +687,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddBool(pCfg, "audit", tsEnableAudit, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER) != 0) return -1; if (cfgAddBool(pCfg, "auditCreateTable", tsEnableAuditCreateTable, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0) return -1; + if (cfgAddInt32(pCfg, "auditInterval", tsAuditInterval, 500, 200000, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0) + return -1; if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, CFG_SCOPE_BOTH, CFG_DYN_NONE) != 0) return -1; if (cfgAddBool(pCfg, "telemetryReporting", tsEnableTelem, CFG_SCOPE_BOTH, CFG_DYN_ENT_SERVER) != 0) return -1; @@ -1137,6 +1140,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsEnableAudit = cfgGetItem(pCfg, "audit")->bval; tsEnableAuditCreateTable = cfgGetItem(pCfg, "auditCreateTable")->bval; + tsAuditInterval = cfgGetItem(pCfg, "auditInterval")->i32; tsEnableTelem = cfgGetItem(pCfg, "telemetryReporting")->bval; tsEnableCrashReport = cfgGetItem(pCfg, "crashReporting")->bval; diff --git a/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h b/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h index 9e43c2af47..80502e2662 100644 --- a/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h +++ b/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h @@ -30,12 +30,14 @@ typedef struct SDnodeMgmt { TdThread statusThread; TdThread notifyThread; TdThread monitorThread; + TdThread auditThread; TdThread crashReportThread; SSingleWorker mgmtWorker; ProcessCreateNodeFp processCreateNodeFp; ProcessAlterNodeTypeFp processAlterNodeTypeFp; ProcessDropNodeFp processDropNodeFp; SendMonitorReportFp sendMonitorReportFp; + SendAuditRecordsFp sendAuditRecordsFp; GetVnodeLoadsFp getVnodeLoadsFp; GetVnodeLoadsFp getVnodeLoadsLiteFp; GetMnodeLoadsFp getMnodeLoadsFp; @@ -62,7 +64,9 @@ void dmStopStatusThread(SDnodeMgmt *pMgmt); int32_t dmStartNotifyThread(SDnodeMgmt *pMgmt); void dmStopNotifyThread(SDnodeMgmt *pMgmt); int32_t dmStartMonitorThread(SDnodeMgmt *pMgmt); +int32_t dmStartAuditThread(SDnodeMgmt *pMgmt); void dmStopMonitorThread(SDnodeMgmt *pMgmt); +void dmStopAuditThread(SDnodeMgmt *pMgmt); int32_t dmStartCrashReportThread(SDnodeMgmt *pMgmt); void dmStopCrashReportThread(SDnodeMgmt *pMgmt); int32_t dmStartWorker(SDnodeMgmt *pMgmt); diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmInt.c b/source/dnode/mgmt/mgmt_dnode/src/dmInt.c index 4bd32cac20..b9dd45f1c0 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmInt.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmInt.c @@ -29,6 +29,9 @@ static int32_t dmStartMgmt(SDnodeMgmt *pMgmt) { if (dmStartMonitorThread(pMgmt) != 0) { return -1; } + if (dmStartAuditThread(pMgmt) != 0) { + return -1; + } if (dmStartCrashReportThread(pMgmt) != 0) { return -1; } @@ -38,6 +41,7 @@ static int32_t dmStartMgmt(SDnodeMgmt *pMgmt) { static void dmStopMgmt(SDnodeMgmt *pMgmt) { pMgmt->pData->stopped = true; dmStopMonitorThread(pMgmt); + dmStopAuditThread(pMgmt); dmStopStatusThread(pMgmt); #if defined(TD_ENTERPRISE) dmStopNotifyThread(pMgmt); @@ -60,6 +64,7 @@ static int32_t dmOpenMgmt(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { pMgmt->processAlterNodeTypeFp = pInput->processAlterNodeTypeFp; pMgmt->processDropNodeFp = pInput->processDropNodeFp; pMgmt->sendMonitorReportFp = pInput->sendMonitorReportFp; + pMgmt->sendAuditRecordsFp = pInput->sendAuditRecordFp; pMgmt->getVnodeLoadsFp = pInput->getVnodeLoadsFp; pMgmt->getVnodeLoadsLiteFp = pInput->getVnodeLoadsLiteFp; pMgmt->getMnodeLoadsFp = pInput->getMnodeLoadsFp; diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmWorker.c b/source/dnode/mgmt/mgmt_dnode/src/dmWorker.c index d6bdaf51bc..af43804db4 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmWorker.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmWorker.c @@ -99,6 +99,27 @@ static void *dmMonitorThreadFp(void *param) { return NULL; } +static void *dmAuditThreadFp(void *param) { + SDnodeMgmt *pMgmt = param; + int64_t lastTime = taosGetTimestampMs(); + setThreadName("dnode-audit"); + + while (1) { + taosMsleep(100); + if (pMgmt->pData->dropped || pMgmt->pData->stopped) break; + + int64_t curTime = taosGetTimestampMs(); + if (curTime < lastTime) lastTime = curTime; + float interval = curTime - lastTime; + if (interval >= tsAuditInterval) { + (*pMgmt->sendAuditRecordsFp)(); + lastTime = curTime; + } + } + + return NULL; +} + static void *dmCrashReportThreadFp(void *param) { SDnodeMgmt *pMgmt = param; int64_t lastTime = taosGetTimestampMs(); @@ -218,6 +239,20 @@ int32_t dmStartMonitorThread(SDnodeMgmt *pMgmt) { return 0; } +int32_t dmStartAuditThread(SDnodeMgmt *pMgmt) { + TdThreadAttr thAttr; + taosThreadAttrInit(&thAttr); + taosThreadAttrSetDetachState(&thAttr, PTHREAD_CREATE_JOINABLE); + if (taosThreadCreate(&pMgmt->auditThread, &thAttr, dmAuditThreadFp, pMgmt) != 0) { + dError("failed to create audit thread since %s", strerror(errno)); + return -1; + } + + taosThreadAttrDestroy(&thAttr); + tmsgReportStartup("dnode-audit", "initialized"); + return 0; +} + void dmStopMonitorThread(SDnodeMgmt *pMgmt) { if (taosCheckPthreadValid(pMgmt->monitorThread)) { taosThreadJoin(pMgmt->monitorThread, NULL); @@ -225,6 +260,13 @@ void dmStopMonitorThread(SDnodeMgmt *pMgmt) { } } +void dmStopAuditThread(SDnodeMgmt *pMgmt) { + if (taosCheckPthreadValid(pMgmt->auditThread)) { + taosThreadJoin(pMgmt->auditThread, NULL); + taosThreadClear(&pMgmt->auditThread); + } +} + int32_t dmStartCrashReportThread(SDnodeMgmt *pMgmt) { if (!tsEnableCrashReport) { return 0; diff --git a/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h b/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h index 36097438a2..a2326138b6 100644 --- a/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h +++ b/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h @@ -70,6 +70,21 @@ typedef struct SUdfdData { int32_t dnodeId; } SUdfdData; +#ifndef TD_MODULE_OPTIMIZE +typedef struct SDnode { + int8_t once; + bool stop; + EDndRunStatus status; + SStartupInfo startup; + SDnodeData data; + SUdfdData udfdData; + TdThreadMutex mutex; + TdFilePtr lockfile; + STfs *pTfs; + SMgmtWrapper wrappers[NODE_END]; + SDnodeTrans trans; +} SDnode; +#else typedef struct SDnode { int8_t once; bool stop; @@ -83,6 +98,7 @@ typedef struct SDnode { STfs *pTfs; SMgmtWrapper wrappers[NODE_END]; } SDnode; +#endif // dmEnv.c SDnode *dmInstance(); @@ -124,6 +140,7 @@ int32_t dmProcessNodeMsg(SMgmtWrapper *pWrapper, SRpcMsg *pMsg); // dmMonitor.c void dmSendMonitorReport(); +void dmSendAuditRecords(); void dmGetVnodeLoads(SMonVloadInfo *pInfo); void dmGetVnodeLoadsLite(SMonVloadInfo *pInfo); void dmGetMnodeLoads(SMonMloadInfo *pInfo); diff --git a/source/dnode/mgmt/node_mgmt/src/dmEnv.c b/source/dnode/mgmt/node_mgmt/src/dmEnv.c index e0503c83c6..f9bba19fbb 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmEnv.c +++ b/source/dnode/mgmt/node_mgmt/src/dmEnv.c @@ -189,6 +189,7 @@ void dmCleanup() { if (dmCheckRepeatCleanup(pDnode) != 0) return; dmCleanupDnode(pDnode); monCleanup(); + auditCleanup(); syncCleanUp(); walCleanUp(); udfcClose(); @@ -396,6 +397,7 @@ SMgmtInputOpt dmBuildMgmtInputOpt(SMgmtWrapper *pWrapper) { .processAlterNodeTypeFp = dmProcessAlterNodeTypeReq, .processDropNodeFp = dmProcessDropNodeReq, .sendMonitorReportFp = dmSendMonitorReport, + .sendAuditRecordFp = auditSendRecordsInBatch, .getVnodeLoadsFp = dmGetVnodeLoads, .getVnodeLoadsLiteFp = dmGetVnodeLoadsLite, .getMnodeLoadsFp = dmGetMnodeLoads, diff --git a/source/dnode/mgmt/node_mgmt/src/dmMonitor.c b/source/dnode/mgmt/node_mgmt/src/dmMonitor.c index b3db7c3058..c42aa6a1ae 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmMonitor.c +++ b/source/dnode/mgmt/node_mgmt/src/dmMonitor.c @@ -16,6 +16,7 @@ #define _DEFAULT_SOURCE #include "dmMgmt.h" #include "dmNodes.h" +#include "audit.h" static void dmGetMonitorBasicInfo(SDnode *pDnode, SMonBasicInfo *pInfo) { pInfo->protocol = 1; @@ -108,6 +109,11 @@ void dmSendMonitorReport() { monSendReport(); } +//Todo: put this in seperate file in the future +void dmSendAuditRecords() { + auditSendRecordsInBatch(); +} + void dmGetVnodeLoads(SMonVloadInfo *pInfo) { SDnode *pDnode = dmInstance(); SMgmtWrapper *pWrapper = &pDnode->wrappers[VNODE]; diff --git a/source/dnode/mgmt/node_util/inc/dmUtil.h b/source/dnode/mgmt/node_util/inc/dmUtil.h index 0a52c578a5..4769ef8538 100644 --- a/source/dnode/mgmt/node_util/inc/dmUtil.h +++ b/source/dnode/mgmt/node_util/inc/dmUtil.h @@ -86,6 +86,7 @@ typedef enum { typedef int32_t (*ProcessCreateNodeFp)(EDndNodeType ntype, SRpcMsg *pMsg); typedef int32_t (*ProcessDropNodeFp)(EDndNodeType ntype, SRpcMsg *pMsg); typedef void (*SendMonitorReportFp)(); +typedef void (*SendAuditRecordsFp)(); typedef void (*GetVnodeLoadsFp)(SMonVloadInfo *pInfo); typedef void (*GetMnodeLoadsFp)(SMonMloadInfo *pInfo); typedef void (*GetQnodeLoadsFp)(SQnodeLoad *pInfo); @@ -120,6 +121,7 @@ typedef struct { ProcessAlterNodeTypeFp processAlterNodeTypeFp; ProcessDropNodeFp processDropNodeFp; SendMonitorReportFp sendMonitorReportFp; + SendAuditRecordsFp sendAuditRecordFp; GetVnodeLoadsFp getVnodeLoadsFp; GetVnodeLoadsFp getVnodeLoadsLiteFp; GetMnodeLoadsFp getMnodeLoadsFp; diff --git a/source/dnode/mgmt/test/qnode/dqnode.cpp b/source/dnode/mgmt/test/qnode/dqnode.cpp index 3beb57c516..a72d2b42b9 100644 --- a/source/dnode/mgmt/test/qnode/dqnode.cpp +++ b/source/dnode/mgmt/test/qnode/dqnode.cpp @@ -37,6 +37,7 @@ TEST_F(DndTestQnode, 01_Create_Qnode) { SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_QNODE, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, TSDB_CODE_INVALID_OPTION); + rpcFreeCont(pRsp->pCont); } { @@ -50,6 +51,7 @@ TEST_F(DndTestQnode, 01_Create_Qnode) { SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_QNODE, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } { @@ -63,6 +65,7 @@ TEST_F(DndTestQnode, 01_Create_Qnode) { SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_QNODE, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, TSDB_CODE_QNODE_ALREADY_DEPLOYED); + rpcFreeCont(pRsp->pCont); } test.Restart(); @@ -78,6 +81,7 @@ TEST_F(DndTestQnode, 01_Create_Qnode) { SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_QNODE, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, TSDB_CODE_QNODE_ALREADY_DEPLOYED); + rpcFreeCont(pRsp->pCont); } } @@ -94,6 +98,7 @@ TEST_F(DndTestQnode, 02_Drop_Qnode) { SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_QNODE, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, TSDB_CODE_INVALID_OPTION); + rpcFreeCont(pRsp->pCont); } #endif @@ -108,6 +113,7 @@ TEST_F(DndTestQnode, 02_Drop_Qnode) { SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_QNODE, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } { @@ -121,6 +127,7 @@ TEST_F(DndTestQnode, 02_Drop_Qnode) { SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_QNODE, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, TSDB_CODE_QNODE_NOT_DEPLOYED); + rpcFreeCont(pRsp->pCont); } test.Restart(); @@ -136,6 +143,7 @@ TEST_F(DndTestQnode, 02_Drop_Qnode) { SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_QNODE, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, TSDB_CODE_QNODE_NOT_DEPLOYED); + rpcFreeCont(pRsp->pCont); } { @@ -149,5 +157,6 @@ TEST_F(DndTestQnode, 02_Drop_Qnode) { SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_QNODE, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } } \ No newline at end of file diff --git a/source/dnode/mgmt/test/snode/dsnode.cpp b/source/dnode/mgmt/test/snode/dsnode.cpp index 30d6a34813..a60ad6f0e4 100644 --- a/source/dnode/mgmt/test/snode/dsnode.cpp +++ b/source/dnode/mgmt/test/snode/dsnode.cpp @@ -37,6 +37,7 @@ TEST_F(DndTestSnode, 01_Create_Snode) { SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_SNODE, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, TSDB_CODE_INVALID_OPTION); + rpcFreeCont(pRsp->pCont); } { @@ -50,6 +51,7 @@ TEST_F(DndTestSnode, 01_Create_Snode) { SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_SNODE, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } { @@ -63,6 +65,7 @@ TEST_F(DndTestSnode, 01_Create_Snode) { SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_SNODE, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, TSDB_CODE_SNODE_ALREADY_DEPLOYED); + rpcFreeCont(pRsp->pCont); } test.Restart(); @@ -78,6 +81,7 @@ TEST_F(DndTestSnode, 01_Create_Snode) { SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_SNODE, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, TSDB_CODE_SNODE_ALREADY_DEPLOYED); + rpcFreeCont(pRsp->pCont); } } @@ -94,6 +98,7 @@ TEST_F(DndTestSnode, 01_Drop_Snode) { SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_SNODE, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, TSDB_CODE_INVALID_OPTION); + rpcFreeCont(pRsp->pCont); } #endif @@ -108,6 +113,7 @@ TEST_F(DndTestSnode, 01_Drop_Snode) { SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_SNODE, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } { @@ -121,6 +127,7 @@ TEST_F(DndTestSnode, 01_Drop_Snode) { SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_SNODE, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, TSDB_CODE_SNODE_NOT_DEPLOYED); + rpcFreeCont(pRsp->pCont); } test.Restart(); @@ -136,6 +143,7 @@ TEST_F(DndTestSnode, 01_Drop_Snode) { SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_SNODE, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, TSDB_CODE_SNODE_NOT_DEPLOYED); + rpcFreeCont(pRsp->pCont); } { @@ -149,5 +157,6 @@ TEST_F(DndTestSnode, 01_Drop_Snode) { SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_SNODE, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } } \ No newline at end of file diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index e8d5dfd1f5..bd67af712a 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -704,10 +704,10 @@ static int32_t mndPersistTaskDropReq(SMnode *pMnode, STrans *pTrans, SStreamTask pReq->streamId = pTask->id.streamId; STransAction action = {0}; - SEpSet epset = {0}; - if(pTask->info.nodeId == SNODE_HANDLE){ + SEpSet epset = {0}; + if (pTask->info.nodeId == SNODE_HANDLE) { SSnodeObj *pObj = NULL; - void *pIter = NULL; + void *pIter = NULL; while (1) { pIter = sdbFetch(pMnode->pSdb, SDB_SNODE, pIter, (void **)&pObj); if (pIter == NULL) { @@ -717,10 +717,16 @@ static int32_t mndPersistTaskDropReq(SMnode *pMnode, STrans *pTrans, SStreamTask addEpIntoEpSet(&epset, pObj->pDnode->fqdn, pObj->pDnode->port); sdbRelease(pMnode->pSdb, pObj); } - }else{ + } else { SVgObj *pVgObj = mndAcquireVgroup(pMnode, pTask->info.nodeId); - epset = mndGetVgroupEpset(pMnode, pVgObj); - mndReleaseVgroup(pMnode, pVgObj); + if (pVgObj != NULL) { + epset = mndGetVgroupEpset(pMnode, pVgObj); + mndReleaseVgroup(pMnode, pVgObj); + } else { + mDebug("orphaned task:0x%x need to be dropped, nodeId:%d, no redo action", pTask->id.taskId, pTask->info.nodeId); + taosMemoryFree(pReq); + return 0; + } } // The epset of nodeId of this task may have been expired now, let's use the newest epset from mnode. @@ -1657,6 +1663,7 @@ static void setTaskAttrInResBlock(SStreamObj *pStream, SStreamTask *pTask, SSDat STaskStatusEntry *pe = taosHashGet(execInfo.pTaskMap, &id, sizeof(id)); if (pe == NULL) { + mError("task:0x%" PRIx64 " not exists in vnode, no valid status/stage info", id.taskId); return; } diff --git a/source/dnode/mnode/impl/src/mndStreamTrans.c b/source/dnode/mnode/impl/src/mndStreamTrans.c index fa36d69d6e..43e85a9405 100644 --- a/source/dnode/mnode/impl/src/mndStreamTrans.c +++ b/source/dnode/mnode/impl/src/mndStreamTrans.c @@ -48,8 +48,7 @@ int32_t clearFinishedTrans(SMnode* pMnode) { void* pKey = taosHashGetKey(pEntry, &keyLen); // key is the name of src/dst db name SKeyInfo info = {.pKey = pKey, .keyLen = keyLen}; - - mDebug("transId:%d %s startTs:%" PRId64 "cleared due to finished", pEntry->transId, pEntry->name, + mDebug("transId:%d %s startTs:%" PRId64 " cleared since finished", pEntry->transId, pEntry->name, pEntry->startTime); taosArrayPush(pList, &info); } else { diff --git a/source/dnode/mnode/impl/test/acct/acct.cpp b/source/dnode/mnode/impl/test/acct/acct.cpp index 1f59a7fcca..a3f9362a4a 100644 --- a/source/dnode/mnode/impl/test/acct/acct.cpp +++ b/source/dnode/mnode/impl/test/acct/acct.cpp @@ -33,6 +33,7 @@ TEST_F(MndTestAcct, 01_Create_Acct) { SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_ACCT, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, TSDB_CODE_OPS_NOT_SUPPORT); + rpcFreeCont(pRsp->pCont); } TEST_F(MndTestAcct, 02_Alter_Acct) { @@ -43,6 +44,7 @@ TEST_F(MndTestAcct, 02_Alter_Acct) { SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_ACCT, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, TSDB_CODE_OPS_NOT_SUPPORT); + rpcFreeCont(pRsp->pCont); } TEST_F(MndTestAcct, 03_Drop_Acct) { @@ -53,4 +55,5 @@ TEST_F(MndTestAcct, 03_Drop_Acct) { SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_ACCT, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, TSDB_CODE_OPS_NOT_SUPPORT); + rpcFreeCont(pRsp->pCont); } diff --git a/source/dnode/mnode/impl/test/sma/sma.cpp b/source/dnode/mnode/impl/test/sma/sma.cpp index 9a2372d904..ed059a2994 100644 --- a/source/dnode/mnode/impl/test/sma/sma.cpp +++ b/source/dnode/mnode/impl/test/sma/sma.cpp @@ -253,12 +253,14 @@ TEST_F(MndTestSma, 02_Create_Show_Meta_Drop_Restart_BSma) { pReq = BuildCreateDbReq(dbname, &contLen); pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } { pReq = BuildCreateBSmaStbReq(stbname, &contLen); pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname); EXPECT_EQ(test.GetShowRows(), 1); } @@ -269,12 +271,14 @@ TEST_F(MndTestSma, 02_Create_Show_Meta_Drop_Restart_BSma) { pReq = BuildCreateBSmaStbReq(stbname, &contLen); pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_STB_ALREADY_EXIST); + rpcFreeCont(pRsp->pCont); } { pReq = BuildDropStbReq(stbname, &contLen); pRsp = test.SendReq(TDMT_MND_DROP_STB, pReq, contLen); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname); EXPECT_EQ(test.GetShowRows(), 0); } @@ -283,5 +287,6 @@ TEST_F(MndTestSma, 02_Create_Show_Meta_Drop_Restart_BSma) { pReq = BuildDropStbReq(stbname, &contLen); pRsp = test.SendReq(TDMT_MND_DROP_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_STB_NOT_EXIST); + rpcFreeCont(pRsp->pCont); } } diff --git a/source/dnode/mnode/impl/test/stb/stb.cpp b/source/dnode/mnode/impl/test/stb/stb.cpp index 1adbb87e19..aa12c107a1 100644 --- a/source/dnode/mnode/impl/test/stb/stb.cpp +++ b/source/dnode/mnode/impl/test/stb/stb.cpp @@ -155,6 +155,7 @@ void* MndTestStb::BuildAlterStbAddTagReq(const char* stbname, const char* tagnam tSerializeSMAlterStbReq(pHead, contLen, &req); *pContLen = contLen; + taosArrayDestroy(req.pFields); return pHead; } @@ -176,6 +177,7 @@ void* MndTestStb::BuildAlterStbDropTagReq(const char* stbname, const char* tagna tSerializeSMAlterStbReq(pHead, contLen, &req); *pContLen = contLen; + taosArrayDestroy(req.pFields); return pHead; } @@ -204,6 +206,7 @@ void* MndTestStb::BuildAlterStbUpdateTagNameReq(const char* stbname, const char* tSerializeSMAlterStbReq(pHead, contLen, &req); *pContLen = contLen; + taosArrayDestroy(req.pFields); return pHead; } @@ -226,6 +229,7 @@ void* MndTestStb::BuildAlterStbUpdateTagBytesReq(const char* stbname, const char tSerializeSMAlterStbReq(pHead, contLen, &req); *pContLen = contLen; + taosArrayDestroy(req.pFields); return pHead; } @@ -247,6 +251,7 @@ void* MndTestStb::BuildAlterStbAddColumnReq(const char* stbname, const char* col tSerializeSMAlterStbReq(pHead, contLen, &req); *pContLen = contLen; + taosArrayDestroy(req.pFields); return pHead; } @@ -268,6 +273,7 @@ void* MndTestStb::BuildAlterStbDropColumnReq(const char* stbname, const char* co tSerializeSMAlterStbReq(pHead, contLen, &req); *pContLen = contLen; + taosArrayDestroy(req.pFields); return pHead; } @@ -290,6 +296,7 @@ void* MndTestStb::BuildAlterStbUpdateColumnBytesReq(const char* stbname, const c tSerializeSMAlterStbReq(pHead, contLen, &req); *pContLen = contLen; + taosArrayDestroy(req.pFields); return pHead; } @@ -303,6 +310,7 @@ TEST_F(MndTestStb, 01_Create_Show_Meta_Drop_Restart_Stb) { SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } { @@ -311,6 +319,7 @@ TEST_F(MndTestStb, 01_Create_Show_Meta_Drop_Restart_Stb) { SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } { @@ -334,6 +343,7 @@ TEST_F(MndTestStb, 01_Create_Show_Meta_Drop_Restart_Stb) { STableMetaRsp metaRsp = {0}; tDeserializeSTableMetaRsp(pMsg->pCont, pMsg->contLen, &metaRsp); + rpcFreeCont(pMsg->pCont); EXPECT_STREQ(metaRsp.dbFName, dbname); EXPECT_STREQ(metaRsp.tbName, "stb"); @@ -410,6 +420,7 @@ TEST_F(MndTestStb, 01_Create_Show_Meta_Drop_Restart_Stb) { SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_STB, pHead, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } { @@ -423,6 +434,7 @@ TEST_F(MndTestStb, 01_Create_Show_Meta_Drop_Restart_Stb) { SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_DB, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } } @@ -436,6 +448,7 @@ TEST_F(MndTestStb, 02_Alter_Stb_AddTag) { SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } { @@ -443,30 +456,35 @@ TEST_F(MndTestStb, 02_Alter_Stb_AddTag) { SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildAlterStbAddTagReq("1.d3.stb", "tag4", &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_DB_NOT_EXIST); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildAlterStbAddTagReq("1.d2.stb3", "tag4", &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_STB_NOT_EXIST); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildAlterStbAddTagReq(stbname, "tag3", &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_TAG_ALREADY_EXIST); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildAlterStbAddTagReq(stbname, "col1", &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_COLUMN_ALREADY_EXIST); + rpcFreeCont(pRsp->pCont); } { @@ -474,6 +492,7 @@ TEST_F(MndTestStb, 02_Alter_Stb_AddTag) { SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname); } @@ -483,6 +502,7 @@ TEST_F(MndTestStb, 02_Alter_Stb_AddTag) { SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_DB, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } } @@ -495,18 +515,21 @@ TEST_F(MndTestStb, 03_Alter_Stb_DropTag) { void* pReq = BuildCreateDbReq(dbname, &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildCreateStbReq(stbname, &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildAlterStbDropTagReq(stbname, "tag5", &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_TAG_NOT_EXIST); + rpcFreeCont(pRsp->pCont); } { @@ -514,6 +537,7 @@ TEST_F(MndTestStb, 03_Alter_Stb_DropTag) { SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname); EXPECT_EQ(test.GetShowRows(), 1); @@ -524,6 +548,7 @@ TEST_F(MndTestStb, 03_Alter_Stb_DropTag) { SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_DB, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } } @@ -536,41 +561,48 @@ TEST_F(MndTestStb, 04_Alter_Stb_AlterTagName) { void* pReq = BuildCreateDbReq(dbname, &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildCreateStbReq(stbname, &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildAlterStbUpdateTagNameReq(stbname, "tag5", "tag6", &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_TAG_NOT_EXIST); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildAlterStbUpdateTagNameReq(stbname, "col1", "tag6", &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_TAG_NOT_EXIST); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildAlterStbUpdateTagNameReq(stbname, "tag3", "col1", &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_COLUMN_ALREADY_EXIST); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildAlterStbUpdateTagNameReq(stbname, "tag3", "tag2", &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_TAG_ALREADY_EXIST); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildAlterStbUpdateTagNameReq(stbname, "tag3", "tag2", &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_TAG_ALREADY_EXIST); + rpcFreeCont(pRsp->pCont); } { @@ -578,6 +610,7 @@ TEST_F(MndTestStb, 04_Alter_Stb_AlterTagName) { SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname); EXPECT_EQ(test.GetShowRows(), 1); @@ -588,6 +621,7 @@ TEST_F(MndTestStb, 04_Alter_Stb_AlterTagName) { SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_DB, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } } @@ -600,36 +634,42 @@ TEST_F(MndTestStb, 05_Alter_Stb_AlterTagBytes) { void* pReq = BuildCreateDbReq(dbname, &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildCreateStbReq(stbname, &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildAlterStbUpdateTagBytesReq(stbname, "tag5", 12, &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_TAG_NOT_EXIST); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildAlterStbUpdateTagBytesReq(stbname, "tag1", 13, &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_STB_OPTION); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildAlterStbUpdateTagBytesReq(stbname, "tag3", 8, &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_ROW_BYTES); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildAlterStbUpdateTagBytesReq(stbname, "tag3", 20, &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname); EXPECT_EQ(test.GetShowRows(), 1); @@ -640,6 +680,7 @@ TEST_F(MndTestStb, 05_Alter_Stb_AlterTagBytes) { SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_DB, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } } @@ -653,6 +694,7 @@ TEST_F(MndTestStb, 06_Alter_Stb_AddColumn) { SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } { @@ -660,30 +702,35 @@ TEST_F(MndTestStb, 06_Alter_Stb_AddColumn) { SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildAlterStbAddColumnReq("1.d7.stb", "tag4", &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_DB_NOT_EXIST); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildAlterStbAddColumnReq("1.d6.stb3", "tag4", &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_STB_NOT_EXIST); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildAlterStbAddColumnReq(stbname, "tag3", &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_TAG_ALREADY_EXIST); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildAlterStbAddColumnReq(stbname, "col1", &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_COLUMN_ALREADY_EXIST); + rpcFreeCont(pRsp->pCont); } { @@ -691,6 +738,7 @@ TEST_F(MndTestStb, 06_Alter_Stb_AddColumn) { SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname); EXPECT_EQ(test.GetShowRows(), 1); @@ -701,6 +749,7 @@ TEST_F(MndTestStb, 06_Alter_Stb_AddColumn) { SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_DB, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } } @@ -713,30 +762,35 @@ TEST_F(MndTestStb, 07_Alter_Stb_DropColumn) { void* pReq = BuildCreateDbReq(dbname, &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildCreateStbReq(stbname, &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildAlterStbDropColumnReq(stbname, "col4", &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_COLUMN_NOT_EXIST); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildAlterStbDropColumnReq(stbname, "col1", &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_STB_ALTER_OPTION); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildAlterStbDropColumnReq(stbname, "ts", &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_STB_ALTER_OPTION); + rpcFreeCont(pRsp->pCont); } { @@ -744,6 +798,7 @@ TEST_F(MndTestStb, 07_Alter_Stb_DropColumn) { SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } { @@ -751,6 +806,7 @@ TEST_F(MndTestStb, 07_Alter_Stb_DropColumn) { SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname); EXPECT_EQ(test.GetShowRows(), 1); @@ -761,6 +817,7 @@ TEST_F(MndTestStb, 07_Alter_Stb_DropColumn) { SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_DB, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } } @@ -773,42 +830,49 @@ TEST_F(MndTestStb, 08_Alter_Stb_AlterTagBytes) { void* pReq = BuildCreateDbReq(dbname, &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildCreateStbReq(stbname, &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildAlterStbUpdateColumnBytesReq(stbname, "col5", 12, &contLen, 0); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_COLUMN_NOT_EXIST); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildAlterStbUpdateColumnBytesReq(stbname, "ts", 8, &contLen, 0); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_STB_OPTION); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildAlterStbUpdateColumnBytesReq(stbname, "col1", 8, &contLen, 0); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_ROW_BYTES); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildAlterStbUpdateColumnBytesReq(stbname, "col1", TSDB_MAX_BYTES_PER_ROW, &contLen, 0); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_ROW_BYTES); + rpcFreeCont(pRsp->pCont); } { void* pReq = BuildAlterStbUpdateColumnBytesReq(stbname, "col1", 20, &contLen, 0); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname); EXPECT_EQ(test.GetShowRows(), 1); @@ -818,6 +882,7 @@ TEST_F(MndTestStb, 08_Alter_Stb_AlterTagBytes) { void* pReq = BuildAlterStbUpdateColumnBytesReq(stbname, "col_not_exist", 20, &contLen, 1); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); ASSERT_EQ(pRsp->code, TSDB_CODE_MND_COLUMN_NOT_EXIST); + rpcFreeCont(pRsp->pCont); test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname); EXPECT_EQ(test.GetShowRows(), 1); @@ -828,5 +893,6 @@ TEST_F(MndTestStb, 08_Alter_Stb_AlterTagBytes) { SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_DB, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); + rpcFreeCont(pRsp->pCont); } } diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index ee76a27414..138c58b45f 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -699,7 +699,23 @@ end: return ret; } -void freePtr(void* ptr) { taosMemoryFree(*(void**)ptr); } +static void freePtr(void* ptr) { taosMemoryFree(*(void**)ptr); } + +static STaskId replaceStreamTaskId(SStreamTask* pTask) { + ASSERT(pTask->info.fillHistory); + STaskId id = {.streamId = pTask->id.streamId, .taskId = pTask->id.taskId}; + + pTask->id.streamId = pTask->streamTaskId.streamId; + pTask->id.taskId = pTask->streamTaskId.taskId; + + return id; +} + +static void restoreStreamTaskId(SStreamTask* pTask, STaskId* pId) { + ASSERT(pTask->info.fillHistory); + pTask->id.taskId = pId->taskId; + pTask->id.streamId = pId->streamId; +} int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) { int32_t vgId = TD_VID(pTq->pVnode); @@ -713,15 +729,9 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) { streamTaskOpenAllUpstreamInput(pTask); if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) { - SStreamTask* pStateTask = pTask; - - STaskId taskId = {.streamId = 0, .taskId = 0}; + STaskId taskId = {0}; if (pTask->info.fillHistory) { - taskId.streamId = pTask->id.streamId; - taskId.taskId = pTask->id.taskId; - - pTask->id.streamId = pTask->streamTaskId.streamId; - pTask->id.taskId = pTask->streamTaskId.taskId; + taskId = replaceStreamTaskId(pTask); } pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask, false, -1, -1); @@ -731,9 +741,9 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) { } else { tqDebug("s-task:%s state:%p", pTask->id.idStr, pTask->pState); } + if (pTask->info.fillHistory) { - pTask->id.streamId = taskId.streamId; - pTask->id.taskId = taskId.taskId; + restoreStreamTaskId(pTask, &taskId); } SReadHandle handle = { @@ -754,15 +764,9 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) { qSetTaskId(pTask->exec.pExecutor, pTask->id.taskId, pTask->id.streamId); } else if (pTask->info.taskLevel == TASK_LEVEL__AGG) { - SStreamTask* pSateTask = pTask; - // SStreamTask task = {0}; - - STaskId taskId = {.streamId = 0, .taskId = 0}; + STaskId taskId = {0}; if (pTask->info.fillHistory) { - taskId.streamId = pTask->id.streamId; - taskId.taskId = pTask->id.taskId; - pTask->id.streamId = pTask->streamTaskId.streamId; - pTask->id.taskId = pTask->streamTaskId.taskId; + taskId = replaceStreamTaskId(pTask); } pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask, false, -1, -1); @@ -774,15 +778,13 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) { } if (pTask->info.fillHistory) { - pTask->id.streamId = taskId.streamId; - pTask->id.taskId = taskId.taskId; + restoreStreamTaskId(pTask, &taskId); } - int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTask->upstreamInfo.pList); SReadHandle handle = { .checkpointId = pTask->chkInfo.checkpointId, .vnode = NULL, - .numOfVgroups = numOfVgroups, + .numOfVgroups = (int32_t)taosArrayGetSize(pTask->upstreamInfo.pList), .pStateBackend = pTask->pState, .fillHistory = pTask->info.fillHistory, .winRange = pTask->dataRange.window, @@ -828,12 +830,6 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) { pTask->exec.pWalReader = walOpenReader(pTq->pVnode->pWal, &cond, pTask->id.taskId); } - // // reset the task status from unfinished transaction - // if (pTask->status.taskStatus == TASK_STATUS__PAUSE) { - // tqWarn("s-task:%s reset task status to be normal, status kept in taskMeta: Paused", pTask->id.idStr); - // pTask->status.taskStatus = TASK_STATUS__READY; - // } - streamTaskResetUpstreamStageInfo(pTask); streamSetupScheduleTrigger(pTask); SCheckpointInfo* pChkInfo = &pTask->chkInfo; diff --git a/source/dnode/vnode/src/tq/tqStreamStateSnap.c b/source/dnode/vnode/src/tq/tqStreamStateSnap.c index 2ab710176d..50f413bcc9 100644 --- a/source/dnode/vnode/src/tq/tqStreamStateSnap.c +++ b/source/dnode/vnode/src/tq/tqStreamStateSnap.c @@ -181,5 +181,5 @@ int32_t streamStateRebuildFromSnap(SStreamStateWriter* pWriter, int64_t chkpId) } int32_t streamStateLoadTasks(SStreamStateWriter* pWriter) { - return streamMetaReloadAllTasks(pWriter->pTq->pStreamMeta); + return streamMetaLoadAllTasks(pWriter->pTq->pStreamMeta); } diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c index b1d49bf31b..a106fe148b 100644 --- a/source/dnode/vnode/src/tqCommon/tqCommon.c +++ b/source/dnode/vnode/src/tqCommon/tqCommon.c @@ -712,9 +712,9 @@ int32_t resetStreamTaskStatus(SStreamMeta* pMeta) { } static int32_t restartStreamTasks(SStreamMeta* pMeta, bool isLeader) { - int32_t vgId = pMeta->vgId; - int32_t code = 0; - int64_t st = taosGetTimestampMs(); + int32_t vgId = pMeta->vgId; + int32_t code = 0; + int64_t st = taosGetTimestampMs(); while(1) { int32_t startVal = atomic_val_compare_exchange_32(&pMeta->startInfo.taskStarting, 0, 1); @@ -736,17 +736,9 @@ static int32_t restartStreamTasks(SStreamMeta* pMeta, bool isLeader) { } streamMetaWLock(pMeta); - code = streamMetaReopen(pMeta); - if (code != TSDB_CODE_SUCCESS) { - tqError("vgId:%d failed to reopen stream meta", vgId); - streamMetaWUnLock(pMeta); - code = terrno; - return code; - } + streamMetaClear(pMeta); - streamMetaInitBackend(pMeta); int64_t el = taosGetTimestampMs() - st; - tqInfo("vgId:%d close&reload state elapsed time:%.3fs", vgId, el/1000.); code = streamMetaLoadAllTasks(pMeta); @@ -758,10 +750,10 @@ static int32_t restartStreamTasks(SStreamMeta* pMeta, bool isLeader) { } if (isLeader && !tsDisableStream) { - tqInfo("vgId:%d restart all stream tasks after all tasks being updated", vgId); resetStreamTaskStatus(pMeta); - streamMetaWUnLock(pMeta); + tqInfo("vgId:%d restart all stream tasks after all tasks being updated", vgId); + startStreamTasks(pMeta); } else { streamMetaResetStartInfo(&pMeta->startInfo); diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index c7bf9399a0..1799823011 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -53,11 +53,10 @@ static void tsdbCloseBICache(STsdb *pTsdb) { } static int32_t tsdbOpenBCache(STsdb *pTsdb) { - int32_t code = 0; - // SLRUCache *pCache = taosLRUCacheInit(10 * 1024 * 1024, 0, .5); - int32_t szPage = pTsdb->pVnode->config.tsdbPageSize; - - SLRUCache *pCache = taosLRUCacheInit((int64_t)tsS3BlockCacheSize * tsS3BlockSize * szPage, 0, .5); + int32_t code = 0; + int32_t szPage = pTsdb->pVnode->config.tsdbPageSize; + int64_t szBlock = tsS3BlockSize <= 1024 ? 1024 : tsS3BlockSize; + SLRUCache *pCache = taosLRUCacheInit((int64_t)tsS3BlockCacheSize * szBlock * szPage, 0, .5); if (pCache == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto _err; @@ -67,8 +66,9 @@ static int32_t tsdbOpenBCache(STsdb *pTsdb) { taosThreadMutexInit(&pTsdb->bMutex, NULL); -_err: pTsdb->bCache = pCache; + +_err: return code; } @@ -452,9 +452,11 @@ static SLastCol *tsdbCacheLookup(STsdb *pTsdb, tb_uid_t uid, int16_t cid, int8_t static void reallocVarData(SColVal *pColVal) { if (IS_VAR_DATA_TYPE(pColVal->type)) { uint8_t *pVal = pColVal->value.pData; - pColVal->value.pData = taosMemoryMalloc(pColVal->value.nData); - if (pColVal->value.nData) { + if (pColVal->value.nData > 0) { + pColVal->value.pData = taosMemoryMalloc(pColVal->value.nData); memcpy(pColVal->value.pData, pVal, pColVal->value.nData); + } else { + pColVal->value.pData = NULL; } } } @@ -2755,14 +2757,16 @@ static int32_t mergeLastCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SC *pCol = (SLastCol){.ts = rowTs, .colVal = *pColVal}; if (IS_VAR_DATA_TYPE(pColVal->type) /*&& pColVal->value.nData > 0*/) { - pCol->colVal.value.pData = taosMemoryMalloc(pCol->colVal.value.nData); - if (pCol->colVal.value.pData == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - code = TSDB_CODE_OUT_OF_MEMORY; - goto _err; - } if (pColVal->value.nData > 0) { + pCol->colVal.value.pData = taosMemoryMalloc(pCol->colVal.value.nData); + if (pCol->colVal.value.pData == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData); + } else { + pCol->colVal.value.pData = NULL; } } @@ -2802,17 +2806,21 @@ static int32_t mergeLastCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SC tsdbRowGetColVal(pRow, pTSchema, slotIds[iCol], pColVal); if (!COL_VAL_IS_VALUE(tColVal) && COL_VAL_IS_VALUE(pColVal)) { SLastCol lastCol = {.ts = rowTs, .colVal = *pColVal}; - if (IS_VAR_DATA_TYPE(pColVal->type) && pColVal->value.nData > 0) { + if (IS_VAR_DATA_TYPE(pColVal->type) /* && pColVal->value.nData > 0 */) { SLastCol *pLastCol = (SLastCol *)taosArrayGet(pColArray, iCol); taosMemoryFree(pLastCol->colVal.value.pData); - lastCol.colVal.value.pData = taosMemoryMalloc(lastCol.colVal.value.nData); - if (lastCol.colVal.value.pData == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - code = TSDB_CODE_OUT_OF_MEMORY; - goto _err; + if (pColVal->value.nData > 0) { + lastCol.colVal.value.pData = taosMemoryMalloc(lastCol.colVal.value.nData); + if (lastCol.colVal.value.pData == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + memcpy(lastCol.colVal.value.pData, pColVal->value.pData, pColVal->value.nData); + } else { + lastCol.colVal.value.pData = NULL; } - memcpy(lastCol.colVal.value.pData, pColVal->value.pData, pColVal->value.nData); } taosArraySet(pColArray, iCol, &lastCol); @@ -2923,14 +2931,18 @@ static int32_t mergeLastRowCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, *pCol = (SLastCol){.ts = rowTs, .colVal = *pColVal}; if (IS_VAR_DATA_TYPE(pColVal->type) /*&& pColVal->value.nData > 0*/) { - pCol->colVal.value.pData = taosMemoryMalloc(pCol->colVal.value.nData); - if (pCol->colVal.value.pData == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - code = TSDB_CODE_OUT_OF_MEMORY; - goto _err; - } if (pColVal->value.nData > 0) { - memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData); + pCol->colVal.value.pData = taosMemoryMalloc(pCol->colVal.value.nData); + if (pCol->colVal.value.pData == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + if (pColVal->value.nData > 0) { + memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData); + } + } else { + pCol->colVal.value.pData = NULL; } } diff --git a/source/dnode/vnode/src/tsdb/tsdbDataFileRAW.c b/source/dnode/vnode/src/tsdb/tsdbDataFileRAW.c index 3f448379c9..d2e3cb08e5 100644 --- a/source/dnode/vnode/src/tsdb/tsdbDataFileRAW.c +++ b/source/dnode/vnode/src/tsdb/tsdbDataFileRAW.c @@ -115,7 +115,7 @@ static int32_t tsdbDataFileRAWWriterDoClose(SDataFileRAWWriter *writer) { return static int32_t tsdbDataFileRAWWriterCloseCommit(SDataFileRAWWriter *writer, TFileOpArray *opArr) { int32_t code = 0; int32_t lino = 0; - ASSERT(writer->ctx->offset == writer->file.size); + ASSERT(writer->ctx->offset <= writer->file.size); ASSERT(writer->config->fid == writer->file.fid); STFileOp op = (STFileOp){ diff --git a/source/dnode/vnode/src/tsdb/tsdbFS2.c b/source/dnode/vnode/src/tsdb/tsdbFS2.c index e933b3a7dc..2511fa443b 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFS2.c +++ b/source/dnode/vnode/src/tsdb/tsdbFS2.c @@ -529,7 +529,8 @@ static int32_t tsdbFSDoSanAndFix(STFileSystem *fs) { for (const STfsFile *file = NULL; (file = tfsReaddir(dir)) != NULL;) { if (taosIsDir(file->aname)) continue; - if (tsdbFSGetFileObjHashEntry(&fobjHash, file->aname) == NULL) { + if (tsdbFSGetFileObjHashEntry(&fobjHash, file->aname) == NULL && + strncmp(file->aname + strlen(file->aname) - 3, ".cp", 3)) { int32_t nlevel = tfsGetLevel(fs->tsdb->pVnode->pTfs); remove_file(file->aname, nlevel > 1 && file->did.level == nlevel - 1); } @@ -1113,7 +1114,7 @@ int32_t tsdbFSCreateCopyRangedSnapshot(STFileSystem *fs, TFileSetRangeArray *pRa TARRAY2_FOREACH(fs->fSetArr, fset) { int64_t ever = VERSION_MAX; if (pHash) { - int32_t fid = fset->fid; + int32_t fid = fset->fid; STFileSetRange *u = taosHashGet(pHash, &fid, sizeof(fid)); if (u) { ever = u->sver - 1; @@ -1145,10 +1146,10 @@ int32_t tsdbFSDestroyCopyRangedSnapshot(TFileSetArray **fsetArr) { return tsdbFS int32_t tsdbFSCreateRefRangedSnapshot(STFileSystem *fs, int64_t sver, int64_t ever, TFileSetRangeArray *pRanges, TFileSetRangeArray **fsrArr) { - int32_t code = 0; - STFileSet *fset; + int32_t code = 0; + STFileSet *fset; STFileSetRange *fsr1 = NULL; - SHashObj *pHash = NULL; + SHashObj *pHash = NULL; fsrArr[0] = taosMemoryCalloc(1, sizeof(*fsrArr[0])); if (fsrArr[0] == NULL) { @@ -1171,7 +1172,7 @@ int32_t tsdbFSCreateRefRangedSnapshot(STFileSystem *fs, int64_t sver, int64_t ev int64_t ever1 = ever; if (pHash) { - int32_t fid = fset->fid; + int32_t fid = fset->fid; STFileSetRange *u = taosHashGet(pHash, &fid, sizeof(fid)); if (u) { sver1 = u->sver; diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index a2ef109800..52ee6d0b14 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -67,7 +67,7 @@ static SVersionRange getQueryVerRange(SVnode* pVnode, SQueryTableDataCond* pCond static int32_t doBuildDataBlock(STsdbReader* pReader); static TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* pReader); static bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo* pDumpInfo); -static bool hasDataInSttBlock(SSttBlockReader* pSttBlockReader); +static bool hasDataInSttBlock(STableBlockScanInfo *pInfo); static void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter); static int32_t getInitialDelIndex(const SArray* pDelSkyline, int32_t order); static void resetTableListIndex(SReaderStatus* pStatus); @@ -1466,7 +1466,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; int64_t tsLast = INT64_MIN; - if (hasDataInSttBlock(pSttBlockReader)) { + if (hasDataInSttBlock(pBlockScanInfo)) { tsLast = getCurrentKeyInSttBlock(pSttBlockReader); } @@ -1485,7 +1485,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* int64_t minKey = 0; if (pReader->info.order == TSDB_ORDER_ASC) { minKey = INT64_MAX; // chosen the minimum value - if (minKey > tsLast && hasDataInSttBlock(pSttBlockReader)) { + if (minKey > tsLast && hasDataInSttBlock(pBlockScanInfo)) { minKey = tsLast; } @@ -1498,7 +1498,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* } } else { minKey = INT64_MIN; - if (minKey < tsLast && hasDataInSttBlock(pSttBlockReader)) { + if (minKey < tsLast && hasDataInSttBlock(pBlockScanInfo)) { minKey = tsLast; } @@ -1705,7 +1705,7 @@ static int32_t mergeFileBlockAndSttBlock(STsdbReader* pReader, SSttBlockReader* } bool dataInDataFile = hasDataInFileBlock(pBlockData, pDumpInfo); - bool dataInSttFile = hasDataInSttBlock(pSttBlockReader); + bool dataInSttFile = hasDataInSttBlock(pBlockScanInfo); if (dataInDataFile && (!dataInSttFile)) { // no stt file block available, only data block exists return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, key, pReader); @@ -1791,7 +1791,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo* TSDBROW* piRow = getValidMemRow(&pBlockScanInfo->iiter, pDelList, pReader); int64_t tsLast = INT64_MIN; - if (hasDataInSttBlock(pSttBlockReader)) { + if (hasDataInSttBlock(pBlockScanInfo)) { tsLast = getCurrentKeyInSttBlock(pSttBlockReader); } @@ -1840,7 +1840,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo* minKey = key; } - if (minKey > tsLast && hasDataInSttBlock(pSttBlockReader)) { + if (minKey > tsLast && hasDataInSttBlock(pBlockScanInfo)) { minKey = tsLast; } } else { @@ -1857,7 +1857,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo* minKey = key; } - if (minKey < tsLast && hasDataInSttBlock(pSttBlockReader)) { + if (minKey < tsLast && hasDataInSttBlock(pBlockScanInfo)) { minKey = tsLast; } } @@ -2065,7 +2065,7 @@ static bool initSttBlockReader(SSttBlockReader* pSttBlockReader, STableBlockScan // the stt block reader has been initialized for this table. if (pSttBlockReader->uid == pScanInfo->uid) { - return hasDataInSttBlock(pSttBlockReader); + return hasDataInSttBlock(pScanInfo); } if (pSttBlockReader->uid != 0) { @@ -2158,7 +2158,9 @@ static bool initSttBlockReader(SSttBlockReader* pSttBlockReader, STableBlockScan return hasData; } -static bool hasDataInSttBlock(SSttBlockReader* pSttBlockReader) { return pSttBlockReader->mergeTree.pIter != NULL; } +static bool hasDataInSttBlock(STableBlockScanInfo *pInfo) { + return pInfo->sttKeyInfo.status == STT_FILE_HAS_DATA; +} bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo* pDumpInfo) { if ((pBlockData->nRow > 0) && (pBlockData->nRow != pDumpInfo->totalRows)) { @@ -2733,7 +2735,7 @@ static int32_t doLoadSttBlockSequentially(STsdbReader* pReader) { int64_t st = taosGetTimestampUs(); while (1) { // no data in stt block and block, no need to proceed. - if (!hasDataInSttBlock(pSttBlockReader)) { + if (!hasDataInSttBlock(pScanInfo)) { break; } @@ -2850,7 +2852,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { initSttBlockReader(pSttBlockReader, pScanInfo, pReader); // no data in stt block, no need to proceed. - while (hasDataInSttBlock(pSttBlockReader)) { + while (hasDataInSttBlock(pScanInfo)) { ASSERT(pScanInfo->sttKeyInfo.status == STT_FILE_HAS_DATA); code = buildComposedDataBlockImpl(pReader, pScanInfo, &pReader->status.fileBlockData, pSttBlockReader); diff --git a/source/dnode/vnode/src/tsdb/tsdbReadUtil.c b/source/dnode/vnode/src/tsdb/tsdbReadUtil.c index 3c26badc0e..a223a2dc2d 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReadUtil.c +++ b/source/dnode/vnode/src/tsdb/tsdbReadUtil.c @@ -252,6 +252,7 @@ static void doCleanupInfoForNextFileset(STableBlockScanInfo* pScanInfo) { taosArrayClear(pScanInfo->pFileDelData); // del data from each file set pScanInfo->cleanSttBlocks = false; pScanInfo->numOfRowsInStt = 0; + pScanInfo->sttBlockReturned = false; INIT_TIMEWINDOW(&pScanInfo->sttWindow); INIT_TIMEWINDOW(&pScanInfo->filesetWindow); pScanInfo->sttKeyInfo.status = STT_FILE_READER_UNINIT; diff --git a/source/dnode/vnode/src/tsdb/tsdbRetention.c b/source/dnode/vnode/src/tsdb/tsdbRetention.c index dc98f46ac5..9ecb2fd5ba 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRetention.c +++ b/source/dnode/vnode/src/tsdb/tsdbRetention.c @@ -112,7 +112,7 @@ static int32_t tsdbCopyFileS3(SRTNer *rtner, const STFileObj *from, const STFile TSDB_CHECK_CODE(code, lino, _exit); char *object_name = taosDirEntryBaseName(fname); - code = s3PutObjectFromFile2(from->fname, object_name); + code = s3PutObjectFromFile2(from->fname, object_name, 1); TSDB_CHECK_CODE(code, lino, _exit); taosCloseFile(&fdFrom); diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 1f951097a4..f77dbc5eee 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -1012,7 +1012,7 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t ver, void *pReq, char* keyJoined = taosStringBuilderGetResult(&sb, &len); if(pOriginRpc->info.conn.user != NULL && strlen(pOriginRpc->info.conn.user) > 0){ - auditRecord(pOriginRpc, clusterId, "createTable", name.dbname, "", keyJoined, len); + auditAddRecord(pOriginRpc, clusterId, "createTable", name.dbname, "", keyJoined, len); } taosStringBuilderDestroy(&sb); @@ -1236,7 +1236,7 @@ static int32_t vnodeProcessDropTbReq(SVnode *pVnode, int64_t ver, void *pReq, in char *keyJoined = taosStringBuilderGetResult(&sb, &len); if(pOriginRpc->info.conn.user != NULL && strlen(pOriginRpc->info.conn.user) > 0){ - auditRecord(pOriginRpc, clusterId, "dropTable", name.dbname, "", keyJoined, len); + auditAddRecord(pOriginRpc, clusterId, "dropTable", name.dbname, "", keyJoined, len); } taosStringBuilderDestroy(&sb); diff --git a/source/libs/audit/inc/auditInt.h b/source/libs/audit/inc/auditInt.h index b6c6ec87e8..e5fed2e473 100644 --- a/source/libs/audit/inc/auditInt.h +++ b/source/libs/audit/inc/auditInt.h @@ -17,9 +17,12 @@ #define _TD_AUDIT_INT_H_ #include "audit.h" +#include "tarray.h" typedef struct { SAuditCfg cfg; + SArray *records; + TdThreadMutex lock; } SAudit; #endif /*_TD_AUDIT_INT_H_*/ diff --git a/source/libs/audit/src/auditMain.c b/source/libs/audit/src/auditMain.c index c408f0d87b..7616617ff0 100644 --- a/source/libs/audit/src/auditMain.c +++ b/source/libs/audit/src/auditMain.c @@ -14,6 +14,8 @@ */ #define _DEFAULT_SOURCE + +#include "tarray.h" #include "auditInt.h" #include "taoserror.h" #include "thttp.h" @@ -21,25 +23,56 @@ #include "tjson.h" #include "tglobal.h" #include "mnode.h" +#include "audit.h" SAudit tsAudit = {0}; char* tsAuditUri = "/audit"; +char* tsAuditBatchUri = "/audit-batch"; int32_t auditInit(const SAuditCfg *pCfg) { tsAudit.cfg = *pCfg; + tsAudit.records = taosArrayInit(0, sizeof(SAuditRecord *)); + taosThreadMutexInit(&tsAudit.lock, NULL); return 0; } +void auditCleanup() { + tsLogFp = NULL; + taosArrayDestroy(tsAudit.records); + tsAudit.records = NULL; + taosThreadMutexDestroy(&tsAudit.lock); +} + extern void auditRecordImp(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2, char *detail, int32_t len); +extern void auditAddRecordImp(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2, + char *detail, int32_t len); +extern void auditSendRecordsInBatchImp(); void auditRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2, char *detail, int32_t len) { auditRecordImp(pReq, clusterId, operation, target1, target2, detail, len); } +void auditAddRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2, + char *detail, int32_t len) { + auditAddRecordImp(pReq, clusterId, operation, target1, target2, detail, len); +} + +void auditSendRecordsInBatch(){ + auditSendRecordsInBatchImp(); +} + #ifndef TD_ENTERPRISE void auditRecordImp(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2, char *detail, int32_t len) { } + +void auditAddRecordImp(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2, + char *detail, int32_t len) { +} + +void auditSendRecordsInBatchImp(){ + +} #endif diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index cba26c46b5..f7e55b71be 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -636,6 +636,7 @@ typedef struct SStreamEventAggOperatorInfo { bool isHistoryOp; SArray* historyWins; bool reCkBlock; + bool recvGetAll; SSDataBlock* pCheckpointRes; SFilterInfo* pStartCondInfo; SFilterInfo* pEndCondInfo; @@ -748,7 +749,7 @@ void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numO void clearResultRowInitFlag(SqlFunctionCtx* pCtx, int32_t numOfOutput); SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pResultRowInfo, char* pData, - int16_t bytes, bool masterscan, uint64_t groupId, SExecTaskInfo* pTaskInfo, + int32_t bytes, bool masterscan, uint64_t groupId, SExecTaskInfo* pTaskInfo, bool isIntervalQuery, SAggSupporter* pSup, bool keepGroup); int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBlock* pSrcBlock, SqlFunctionCtx* pCtx, @@ -837,6 +838,8 @@ void compactTimeWindow(SExprSupp* pSup, SStreamAggSupporter* pAggSup, STimeW int32_t releaseOutputBuf(void* pState, SRowBuffPos* pPos, SStateStore* pAPI); void resetWinRange(STimeWindow* winRange); bool checkExpiredData(SStateStore* pAPI, SUpdateInfo* pUpdateInfo, STimeWindowAggSupp* pTwSup, uint64_t tableId, TSKEY ts); +int64_t getDeleteMark(SWindowPhysiNode* pWinPhyNode, int64_t interval); +void resetUnCloseSessionWinInfo(SSHashObj* winMap); int32_t encodeSSessionKey(void** buf, SSessionKey* key); void* decodeSSessionKey(void* buf, SSessionKey* key); diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 1fa911e646..6cee79bff2 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -984,6 +984,21 @@ int32_t qSetStreamOperatorOptionForScanHistory(qTaskInfo_t tinfo) { qInfo("save stream param for state: %d, %" PRId64, pSup->calTrigger, pSup->deleteMark); + pSup->calTriggerSaved = pSup->calTrigger; + pSup->deleteMarkSaved = pSup->deleteMark; + pSup->calTrigger = STREAM_TRIGGER_AT_ONCE; + pSup->deleteMark = INT64_MAX; + pInfo->ignoreExpiredDataSaved = pInfo->ignoreExpiredData; + pInfo->ignoreExpiredData = false; + } else if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT) { + SStreamEventAggOperatorInfo* pInfo = pOperator->info; + STimeWindowAggSupp* pSup = &pInfo->twAggSup; + + ASSERT(pSup->calTrigger == STREAM_TRIGGER_AT_ONCE || pSup->calTrigger == STREAM_TRIGGER_WINDOW_CLOSE); + ASSERT(pSup->calTriggerSaved == 0 && pSup->deleteMarkSaved == 0); + + qInfo("save stream param for state: %d, %" PRId64, pSup->calTrigger, pSup->deleteMark); + pSup->calTriggerSaved = pSup->calTrigger; pSup->deleteMarkSaved = pSup->deleteMark; pSup->calTrigger = STREAM_TRIGGER_AT_ONCE; diff --git a/source/libs/executor/src/executorInt.c b/source/libs/executor/src/executorInt.c index 138da16324..95d26fdd0e 100644 --- a/source/libs/executor/src/executorInt.c +++ b/source/libs/executor/src/executorInt.c @@ -137,7 +137,7 @@ SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, i * +----------+---------------+ */ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pResultRowInfo, char* pData, - int16_t bytes, bool masterscan, uint64_t groupId, SExecTaskInfo* pTaskInfo, + int32_t bytes, bool masterscan, uint64_t groupId, SExecTaskInfo* pTaskInfo, bool isIntervalQuery, SAggSupporter* pSup, bool keepGroup) { SET_RES_WINDOW_KEY(pSup->keyBuf, pData, bytes, groupId); if (!keepGroup) { diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index eb18278870..1e9771edd6 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -65,7 +65,7 @@ typedef struct SPartitionOperatorInfo { static void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInfo** pGroupInfo, int32_t len); static int32_t* setupColumnOffset(const SSDataBlock* pBlock, int32_t rowCapacity); static int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo, int32_t numOfCols, char* pData, - int16_t bytes, uint64_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup); + int32_t bytes, uint64_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup); static SArray* extractColumnInfo(SNodeList* pNodeList); static void freeGroupKey(void* param) { @@ -1016,7 +1016,7 @@ _error: } int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo, int32_t numOfCols, char* pData, - int16_t bytes, uint64_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup) { + int32_t bytes, uint64_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup) { SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SResultRowInfo* pResultRowInfo = &binfo->resultRowInfo; SqlFunctionCtx* pCtx = pOperator->exprSupp.pCtx; diff --git a/source/libs/executor/src/streameventwindowoperator.c b/source/libs/executor/src/streameventwindowoperator.c index 914b95ba83..6adab74344 100644 --- a/source/libs/executor/src/streameventwindowoperator.c +++ b/source/libs/executor/src/streameventwindowoperator.c @@ -176,7 +176,7 @@ int32_t updateEventWindowInfo(SStreamAggSupporter* pAggSup, SEventWindowInfo* pW for (int32_t i = start; i < rows; ++i) { if (pTsData[i] >= maxTs) { - return i - 1 - start; + return i - start; } if (pWin->skey > pTsData[i]) { @@ -486,6 +486,11 @@ static SSDataBlock* doStreamEventAgg(SOperatorInfo* pOperator) { return pInfo->pCheckpointRes; } + if (pInfo->recvGetAll) { + pInfo->recvGetAll = false; + resetUnCloseSessionWinInfo(pInfo->streamAggSup.pResultRows); + } + setOperatorCompleted(pOperator); return NULL; } @@ -510,6 +515,7 @@ static SSDataBlock* doStreamEventAgg(SOperatorInfo* pOperator) { deleteSessionWinState(&pInfo->streamAggSup, pBlock, pInfo->pSeUpdated, pInfo->pSeDeleted); continue; } else if (pBlock->info.type == STREAM_GET_ALL) { + pInfo->recvGetAll = true; getAllSessionWindow(pInfo->streamAggSup.pResultRows, pInfo->pSeUpdated); continue; } else if (pBlock->info.type == STREAM_CREATE_CHILD_TABLE) { @@ -610,6 +616,10 @@ void streamEventReloadState(SOperatorInfo* pOperator) { qDebug("===stream=== reload state. try process result %" PRId64 ", %" PRIu64 ", index:%d", pSeKeyBuf[i].win.skey, pSeKeyBuf[i].groupId, i); getSessionWindowInfoByKey(pAggSup, pSeKeyBuf + i, &curInfo.winInfo); + //event window has been deleted + if (!IS_VALID_SESSION_WIN(curInfo.winInfo)) { + continue; + } setEventWindowFlag(pAggSup, &curInfo); if (!curInfo.pWinFlag->startFlag || curInfo.pWinFlag->endFlag) { continue; @@ -672,6 +682,7 @@ SOperatorInfo* createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhys .calTrigger = pEventNode->window.triggerType, .maxTs = INT64_MIN, .minTs = INT64_MAX, + .deleteMark = getDeleteMark(&pEventNode->window, 0), }; initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); @@ -720,6 +731,7 @@ SOperatorInfo* createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhys pInfo->pCheckpointRes = createSpecialDataBlock(STREAM_CHECKPOINT); pInfo->reCkBlock = false; + pInfo->recvGetAll = false; // for stream void* buff = NULL; diff --git a/source/libs/executor/src/streamtimewindowoperator.c b/source/libs/executor/src/streamtimewindowoperator.c index 3dfc92d953..ef3a4d6f90 100644 --- a/source/libs/executor/src/streamtimewindowoperator.c +++ b/source/libs/executor/src/streamtimewindowoperator.c @@ -634,9 +634,6 @@ static void addRetriveWindow(SArray* wins, SStreamIntervalOperatorInfo* pInfo, i for (int32_t i = 0; i < size; i++) { SWinKey* winKey = taosArrayGet(wins, i); STimeWindow nextWin = getFinalTimeWindow(winKey->ts, &pInfo->interval); - if (isOverdue(nextWin.ekey, &pInfo->twAggSup) && pInfo->ignoreExpiredData) { - continue; - } void* chIds = taosHashGet(pInfo->pPullDataMap, winKey, sizeof(SWinKey)); if (!chIds) { SPullWindowInfo pull = { @@ -801,7 +798,7 @@ static void doStreamIntervalAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDat } while (1) { bool isClosed = isCloseWindow(&nextWin, &pInfo->twAggSup); - if ((!IS_FINAL_INTERVAL_OP(pOperator) && pInfo->ignoreExpiredData && + if ((!IS_FINAL_INTERVAL_OP(pOperator) && pInfo->ignoreExpiredData && pSDataBlock->info.type != STREAM_PULL_DATA && checkExpiredData(&pInfo->stateStore, pInfo->pUpdateInfo, &pInfo->twAggSup, pSDataBlock->info.id.uid, nextWin.ekey)) || !inSlidingWindow(&pInfo->interval, &nextWin, &pSDataBlock->info)) { @@ -1345,12 +1342,12 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { return buildIntervalResult(pOperator); } -static int64_t getDeleteMark(SIntervalPhysiNode* pIntervalPhyNode) { - if (pIntervalPhyNode->window.deleteMark <= 0) { +int64_t getDeleteMark(SWindowPhysiNode* pWinPhyNode, int64_t interval) { + if (pWinPhyNode->deleteMark <= 0) { return DEAULT_DELETE_MARK; } - int64_t deleteMark = TMAX(pIntervalPhyNode->window.deleteMark, pIntervalPhyNode->window.watermark); - deleteMark = TMAX(deleteMark, pIntervalPhyNode->interval); + int64_t deleteMark = TMAX(pWinPhyNode->deleteMark, pWinPhyNode->watermark); + deleteMark = TMAX(deleteMark, interval); return deleteMark; } @@ -1442,7 +1439,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, .calTrigger = pIntervalPhyNode->window.triggerType, .maxTs = INT64_MIN, .minTs = INT64_MAX, - .deleteMark = getDeleteMark(pIntervalPhyNode), + .deleteMark = getDeleteMark(&pIntervalPhyNode->window, pIntervalPhyNode->interval), .deleteMarkSaved = 0, .calTriggerSaved = 0, }; @@ -2565,7 +2562,7 @@ void doStreamSessionSaveCheckpoint(SOperatorInfo* pOperator) { taosMemoryFree(buf); } -static void resetUnCloseSessionWinInfo(SSHashObj* winMap) { +void resetUnCloseSessionWinInfo(SSHashObj* winMap) { void* pIte = NULL; int32_t iter = 0; while ((pIte = tSimpleHashIterate(winMap, pIte, &iter)) != NULL) { @@ -3963,7 +3960,7 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys .calTrigger = pIntervalPhyNode->window.triggerType, .maxTs = INT64_MIN, .minTs = INT64_MAX, - .deleteMark = getDeleteMark(pIntervalPhyNode)}; + .deleteMark = getDeleteMark(&pIntervalPhyNode->window, pIntervalPhyNode->interval)}; ASSERTS(pInfo->twAggSup.calTrigger != STREAM_TRIGGER_MAX_DELAY, "trigger type should not be max delay"); diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 25a3c509a7..93f3b6c109 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -5466,13 +5466,12 @@ bool blockDistSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo) { } int32_t blockDistFunction(SqlFunctionCtx* pCtx) { - const int32_t BLOCK_DIST_RESULT_ROWS = 24; + const int32_t BLOCK_DIST_RESULT_ROWS = 25; SInputColumnInfoData* pInput = &pCtx->input; SColumnInfoData* pInputCol = pInput->pData[0]; - - SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); - STableBlockDistInfo* pDistInfo = GET_ROWCELL_INTERBUF(pResInfo); + SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); + STableBlockDistInfo* pDistInfo = GET_ROWCELL_INTERBUF(pResInfo); STableBlockDistInfo p1 = {0}; tDeserializeBlockDistInfo(varDataVal(pInputCol->pData), varDataLen(pInputCol->pData), &p1); diff --git a/source/libs/index/test/CMakeLists.txt b/source/libs/index/test/CMakeLists.txt index 2bc7353aa5..1ee835573b 100644 --- a/source/libs/index/test/CMakeLists.txt +++ b/source/libs/index/test/CMakeLists.txt @@ -164,7 +164,7 @@ IF(NOT TD_DARWIN) ) add_test( - NAME idxtest + NAME idxTest COMMAND idxTest ) add_test( diff --git a/source/libs/index/test/jsonUT.cc b/source/libs/index/test/jsonUT.cc index 0e76980799..bee8e0dd85 100644 --- a/source/libs/index/test/jsonUT.cc +++ b/source/libs/index/test/jsonUT.cc @@ -60,6 +60,7 @@ class JsonEnv : public ::testing::Test { } virtual void TearDown() { indexJsonClose(index); + indexOptsDestroy(opts); printf("destory\n"); taosMsleep(1000); } @@ -152,6 +153,7 @@ TEST_F(JsonEnv, testWrite) { indexMultiTermQueryAdd(mq, q, QUERY_TERM); indexJsonSearch(index, mq, result); EXPECT_EQ(100, taosArrayGetSize(result)); + taosArrayDestroy(result); indexMultiTermQueryDestroy(mq); } } @@ -211,6 +213,7 @@ TEST_F(JsonEnv, testWriteMillonData) { indexJsonSearch(index, mq, result); EXPECT_EQ(10, taosArrayGetSize(result)); indexMultiTermQueryDestroy(mq); + taosArrayDestroy(result); } { { @@ -226,6 +229,7 @@ TEST_F(JsonEnv, testWriteMillonData) { indexJsonSearch(index, mq, result); EXPECT_EQ(0, taosArrayGetSize(result)); indexMultiTermQueryDestroy(mq); + taosArrayDestroy(result); } { { @@ -241,6 +245,7 @@ TEST_F(JsonEnv, testWriteMillonData) { indexJsonSearch(index, mq, result); EXPECT_EQ(10, taosArrayGetSize(result)); indexMultiTermQueryDestroy(mq); + taosArrayDestroy(result); } } } @@ -311,6 +316,7 @@ TEST_F(JsonEnv, testWriteJsonNumberData) { indexJsonSearch(index, mq, result); EXPECT_EQ(1000, taosArrayGetSize(result)); indexMultiTermQueryDestroy(mq); + taosArrayDestroy(result); } { std::string colName("test"); @@ -325,6 +331,7 @@ TEST_F(JsonEnv, testWriteJsonNumberData) { indexJsonSearch(index, mq, result); EXPECT_EQ(0, taosArrayGetSize(result)); indexMultiTermQueryDestroy(mq); + taosArrayDestroy(result); } { std::string colName("test"); @@ -340,6 +347,7 @@ TEST_F(JsonEnv, testWriteJsonNumberData) { indexJsonSearch(index, mq, result); EXPECT_EQ(1000, taosArrayGetSize(result)); indexMultiTermQueryDestroy(mq); + taosArrayDestroy(result); } { std::string colName("test"); @@ -355,6 +363,7 @@ TEST_F(JsonEnv, testWriteJsonNumberData) { indexJsonSearch(index, mq, result); EXPECT_EQ(0, taosArrayGetSize(result)); indexMultiTermQueryDestroy(mq); + taosArrayDestroy(result); } { std::string colName("test"); @@ -370,6 +379,7 @@ TEST_F(JsonEnv, testWriteJsonNumberData) { indexJsonSearch(index, mq, result); EXPECT_EQ(1000, taosArrayGetSize(result)); indexMultiTermQueryDestroy(mq); + taosArrayDestroy(result); } } @@ -413,6 +423,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT) { indexJsonSearch(index, mq, result); EXPECT_EQ(1000, taosArrayGetSize(result)); indexMultiTermQueryDestroy(mq); + taosArrayDestroy(result); } { std::string colName("test1"); @@ -427,6 +438,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT) { indexJsonSearch(index, mq, result); EXPECT_EQ(0, taosArrayGetSize(result)); indexMultiTermQueryDestroy(mq); + taosArrayDestroy(result); } { std::string colName("test1"); @@ -442,6 +454,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT) { indexJsonSearch(index, mq, result); EXPECT_EQ(1000, taosArrayGetSize(result)); indexMultiTermQueryDestroy(mq); + taosArrayDestroy(result); } { std::string colName("test1"); @@ -456,6 +469,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT) { indexJsonSearch(index, mq, result); EXPECT_EQ(0, taosArrayGetSize(result)); indexMultiTermQueryDestroy(mq); + taosArrayDestroy(result); } { std::string colName("test1"); @@ -470,6 +484,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT) { indexJsonSearch(index, mq, result); EXPECT_EQ(1000, taosArrayGetSize(result)); indexMultiTermQueryDestroy(mq); + taosArrayDestroy(result); } { std::string colName("other_column"); @@ -499,6 +514,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT) { indexJsonSearch(index, mq, result); EXPECT_EQ(0, taosArrayGetSize(result)); indexMultiTermQueryDestroy(mq); + taosArrayDestroy(result); } { std::string colName("test1"); @@ -527,6 +543,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT) { indexJsonSearch(index, mq, result); EXPECT_EQ(2000, taosArrayGetSize(result)); indexMultiTermQueryDestroy(mq); + taosArrayDestroy(result); } } TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT2) { @@ -553,6 +570,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT2) { int val = 9; Search(index, colName, TSDB_DATA_TYPE_INT, &val, sizeof(val), QUERY_GREATER_EQUAL, &res); EXPECT_EQ(1000, taosArrayGetSize(res)); + taosArrayDestroy(res); } { SArray* res = NULL; @@ -560,6 +578,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT2) { std::string colVal("xxxxxxxxxxxxxxx"); Search(index, colName, TSDB_DATA_TYPE_BINARY, (void*)(colVal.c_str()), colVal.size(), QUERY_TERM, &res); EXPECT_EQ(1000, taosArrayGetSize(res)); + taosArrayDestroy(res); } } TEST_F(JsonEnv, testWriteJsonTfileAndCache_FLOAT) { @@ -583,6 +602,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_FLOAT) { float val = 1.9; Search(index, colName, TSDB_DATA_TYPE_FLOAT, &val, sizeof(val), QUERY_GREATER_EQUAL, &res); EXPECT_EQ(2000, taosArrayGetSize(res)); + taosArrayDestroy(res); } { SArray* res = NULL; @@ -590,6 +610,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_FLOAT) { float val = 2.1; Search(index, colName, TSDB_DATA_TYPE_FLOAT, &val, sizeof(val), QUERY_GREATER_EQUAL, &res); EXPECT_EQ(1000, taosArrayGetSize(res)); + taosArrayDestroy(res); } { std::string colName("test1"); @@ -597,6 +618,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_FLOAT) { float val = 2.1; Search(index, colName, TSDB_DATA_TYPE_FLOAT, &val, sizeof(val), QUERY_GREATER_EQUAL, &res); EXPECT_EQ(1000, taosArrayGetSize(res)); + taosArrayDestroy(res); } } TEST_F(JsonEnv, testWriteJsonTfileAndCache_DOUBLE) { @@ -618,29 +640,34 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_DOUBLE) { double val = 1.9; Search(index, "test1", TSDB_DATA_TYPE_DOUBLE, &val, sizeof(val), QUERY_GREATER_EQUAL, &res); EXPECT_EQ(2000, taosArrayGetSize(res)); + taosArrayDestroy(res); } { SArray* res = NULL; double val = 2.1; Search(index, "test1", TSDB_DATA_TYPE_DOUBLE, &val, sizeof(val), QUERY_GREATER_EQUAL, &res); EXPECT_EQ(1000, taosArrayGetSize(res)); + taosArrayDestroy(res); } { SArray* res = NULL; double val = 2.1; Search(index, "test1", TSDB_DATA_TYPE_DOUBLE, &val, sizeof(val), QUERY_GREATER_EQUAL, &res); EXPECT_EQ(1000, taosArrayGetSize(res)); + taosArrayDestroy(res); } { SArray* res = NULL; double val = 10.0; Search(index, "test1", TSDB_DATA_TYPE_DOUBLE, &val, sizeof(val), QUERY_LESS_EQUAL, &res); EXPECT_EQ(2000, taosArrayGetSize(res)); + taosArrayDestroy(res); } { SArray* res = NULL; double val = 10.0; Search(index, "test1", TSDB_DATA_TYPE_DOUBLE, &val, sizeof(val), QUERY_LESS_THAN, &res); EXPECT_EQ(1000, taosArrayGetSize(res)); + taosArrayDestroy(res); } } diff --git a/source/libs/nodes/test/nodesTestMain.cpp b/source/libs/nodes/test/nodesTestMain.cpp index a7f9a06611..356b13f4a7 100644 --- a/source/libs/nodes/test/nodesTestMain.cpp +++ b/source/libs/nodes/test/nodesTestMain.cpp @@ -53,6 +53,7 @@ TEST(NodesTest, traverseTest) { EXPECT_EQ(res, DEAL_RES_CONTINUE); EXPECT_EQ(nodeType(pRoot), QUERY_NODE_VALUE); EXPECT_EQ(string(((SValueNode*)pRoot)->literal), "18"); + nodesDestroyNode(pRoot); } int main(int argc, char* argv[]) { diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c index 90cec4522f..b1cd10eac9 100644 --- a/source/libs/scalar/src/scalar.c +++ b/source/libs/scalar/src/scalar.c @@ -1367,8 +1367,8 @@ EDealRes sclRewriteCaseWhen(SNode **pNode, SScalarCtx *ctx) { } else { int32_t type = output.columnData->info.type; if (IS_VAR_DATA_TYPE(type)) { // todo refactor - res->datum.p = output.columnData->pData; - output.columnData->pData = NULL; + res->datum.p = taosMemoryCalloc(varDataTLen(output.columnData->pData) + 1, sizeof(char)); // add \0 to the end for print json value + memcpy(res->datum.p, output.columnData->pData, varDataTLen(output.columnData->pData)); } else { nodesSetValueNodeValue(res, output.columnData->pData); } diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c index 7b8decc007..f987420ece 100644 --- a/source/libs/scheduler/src/schRemote.c +++ b/source/libs/scheduler/src/schRemote.c @@ -455,7 +455,6 @@ int32_t schHandleCallback(void *param, SDataBuf *pMsg, int32_t rspCode) { tstrerror(rspCode)); SCH_ERR_JRET(schProcessOnCbBegin(&pJob, &pTask, pParam->queryId, pParam->refId, pParam->taskId)); - code = schHandleResponseMsg(pJob, pTask, pParam->execId, pMsg, rspCode); pMsg->pData = NULL; diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c index e2561de841..9458539a9d 100644 --- a/source/libs/stream/src/streamCheckpoint.c +++ b/source/libs/stream/src/streamCheckpoint.c @@ -515,7 +515,7 @@ static int uploadCheckpointToS3(char* id, char* path) { char object[PATH_MAX] = {0}; snprintf(object, sizeof(object), "%s%s%s", id, TD_DIRSEP, name); - if (s3PutObjectFromFile2(filename, object) != 0) { + if (s3PutObjectFromFile2(filename, object, 0) != 0) { taosCloseDir(&pDir); return -1; } diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 807f120cb7..23cb6f5a35 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -31,7 +31,6 @@ int32_t streamMetaId = 0; int32_t taskDbWrapperId = 0; static void metaHbToMnode(void* param, void* tmrId); -static void streamMetaClear(SStreamMeta* pMeta); static int32_t streamMetaBegin(SStreamMeta* pMeta); static void streamMetaCloseImpl(void* arg); @@ -395,41 +394,6 @@ _err: return NULL; } -int32_t streamMetaReopen(SStreamMeta* pMeta) { - streamMetaClear(pMeta); - - // NOTE: role should not be changed during reopen meta - pMeta->streamBackendRid = -1; - pMeta->streamBackend = NULL; - - char* defaultPath = taosMemoryCalloc(1, strlen(pMeta->path) + 128); - sprintf(defaultPath, "%s%s%s", pMeta->path, TD_DIRSEP, "state"); - taosRemoveDir(defaultPath); - - char* newPath = taosMemoryCalloc(1, strlen(pMeta->path) + 128); - sprintf(newPath, "%s%s%s", pMeta->path, TD_DIRSEP, "received"); - - int32_t code = taosStatFile(newPath, NULL, NULL, NULL); - if (code == 0) { - // directory exists - code = taosRenameFile(newPath, defaultPath); - if (code != 0) { - terrno = TAOS_SYSTEM_ERROR(code); - stError("vgId:%d failed to rename file, from %s to %s, code:%s", pMeta->vgId, newPath, defaultPath, - tstrerror(terrno)); - - taosMemoryFree(defaultPath); - taosMemoryFree(newPath); - return -1; - } - } - - taosMemoryFree(defaultPath); - taosMemoryFree(newPath); - - return 0; -} - // todo refactor: the lock shoud be restricted in one function void streamMetaInitBackend(SStreamMeta* pMeta) { pMeta->streamBackend = streamBackendInit(pMeta->path, pMeta->chkpId, pMeta->vgId); @@ -829,28 +793,27 @@ static void doClear(void* pKey, void* pVal, TBC* pCur, SArray* pRecycleList) { taosArrayDestroy(pRecycleList); } -int32_t streamMetaReloadAllTasks(SStreamMeta* pMeta) { - if (pMeta == NULL) return 0; - - return streamMetaLoadAllTasks(pMeta); -} int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta) { TBC* pCur = NULL; - int32_t vgId = pMeta->vgId; - - stInfo("vgId:%d load stream tasks from meta files", vgId); - - if (tdbTbcOpen(pMeta->pTaskDb, &pCur, NULL) < 0) { - stError("vgId:%d failed to open stream meta, code:%s", vgId, tstrerror(terrno)); - return -1; - } - void* pKey = NULL; int32_t kLen = 0; void* pVal = NULL; int32_t vLen = 0; SDecoder decoder; - SArray* pRecycleList = taosArrayInit(4, sizeof(STaskId)); + + if (pMeta == NULL) { + return TSDB_CODE_SUCCESS; + } + + SArray* pRecycleList = taosArrayInit(4, sizeof(STaskId)); + int32_t vgId = pMeta->vgId; + stInfo("vgId:%d load stream tasks from meta files", vgId); + + if (tdbTbcOpen(pMeta->pTaskDb, &pCur, NULL) < 0) { + stError("vgId:%d failed to open stream meta, code:%s", vgId, tstrerror(terrno)); + taosArrayDestroy(pRecycleList); + return -1; + } tdbTbcMoveToFirst(pCur); while (tdbTbcNext(pCur, &pKey, &kLen, &pVal, &vLen) == 0) { diff --git a/source/libs/sync/inc/syncRaftLog.h b/source/libs/sync/inc/syncRaftLog.h index de8bd81b30..137baab558 100644 --- a/source/libs/sync/inc/syncRaftLog.h +++ b/source/libs/sync/inc/syncRaftLog.h @@ -46,6 +46,7 @@ SyncIndex raftLogBeginIndex(struct SSyncLogStore* pLogStore); SyncIndex raftLogEndIndex(struct SSyncLogStore* pLogStore); int32_t raftLogEntryCount(struct SSyncLogStore* pLogStore); SyncIndex raftLogLastIndex(struct SSyncLogStore* pLogStore); +SyncIndex raftLogIndexRetention(struct SSyncLogStore* pLogStore, int64_t bytes); SyncTerm raftLogLastTerm(struct SSyncLogStore* pLogStore); int32_t raftLogGetEntry(struct SSyncLogStore* pLogStore, SyncIndex index, SSyncRaftEntry** ppEntry); diff --git a/source/libs/sync/inc/syncSnapshot.h b/source/libs/sync/inc/syncSnapshot.h index f8ee99e8a0..66d8edfdfc 100644 --- a/source/libs/sync/inc/syncSnapshot.h +++ b/source/libs/sync/inc/syncSnapshot.h @@ -63,7 +63,6 @@ typedef struct SSyncSnapshotSender { int64_t sendingMS; SyncTerm term; int64_t startTime; - int64_t waitTime; int64_t lastSendTime; bool finish; diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 199c7a1445..b8740a2858 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -305,6 +305,10 @@ SyncIndex syncMinMatchIndex(SSyncNode* pSyncNode) { return minMatchIndex; } +static SyncIndex syncLogRetentionIndex(SSyncNode* pSyncNode, int64_t bytes) { + return pSyncNode->pLogStore->syncLogIndexRetention(pSyncNode->pLogStore, bytes); +} + int32_t syncBeginSnapshot(int64_t rid, int64_t lastApplyIndex) { SSyncNode* pSyncNode = syncNodeAcquire(rid); if (pSyncNode == NULL) { @@ -331,7 +335,6 @@ int32_t syncBeginSnapshot(int64_t rid, int64_t lastApplyIndex) { } else { // vnode if (pSyncNode->replicaNum > 1) { - // multi replicas logRetention = SYNC_VNODE_LOG_RETENTION; } } @@ -344,7 +347,9 @@ int32_t syncBeginSnapshot(int64_t rid, int64_t lastApplyIndex) { syncNodeRelease(pSyncNode); return 0; } - logRetention = TMAX(logRetention, lastApplyIndex - pSyncNode->minMatchIndex + logRetention); + SyncIndex retentionIndex = + TMAX(pSyncNode->minMatchIndex, syncLogRetentionIndex(pSyncNode, SYNC_WAL_LOG_RETENTION_SIZE)); + logRetention += TMAX(0, lastApplyIndex - retentionIndex); } _DEL_WAL: diff --git a/source/libs/sync/src/syncRaftLog.c b/source/libs/sync/src/syncRaftLog.c index b167f2ecb6..b9c6838fda 100644 --- a/source/libs/sync/src/syncRaftLog.c +++ b/source/libs/sync/src/syncRaftLog.c @@ -70,6 +70,7 @@ SSyncLogStore* logStoreCreate(SSyncNode* pSyncNode) { pLogStore->syncLogIsEmpty = raftLogIsEmpty; pLogStore->syncLogEntryCount = raftLogEntryCount; pLogStore->syncLogLastIndex = raftLogLastIndex; + pLogStore->syncLogIndexRetention = raftLogIndexRetention; pLogStore->syncLogLastTerm = raftLogLastTerm; pLogStore->syncLogAppendEntry = raftLogAppendEntry; pLogStore->syncLogGetEntry = raftLogGetEntry; @@ -154,6 +155,15 @@ SyncIndex raftLogLastIndex(struct SSyncLogStore* pLogStore) { return lastVer; } +SyncIndex raftLogIndexRetention(struct SSyncLogStore* pLogStore, int64_t bytes) { + SyncIndex lastIndex; + SSyncLogStoreData* pData = pLogStore->data; + SWal* pWal = pData->pWal; + SyncIndex lastVer = walGetVerRetention(pWal, bytes); + + return lastVer; +} + SyncIndex raftLogWriteIndex(struct SSyncLogStore* pLogStore) { SSyncLogStoreData* pData = pLogStore->data; SWal* pWal = pData->pWal; diff --git a/source/libs/sync/src/syncSnapshot.c b/source/libs/sync/src/syncSnapshot.c index 93e81fd8e2..f060e9da13 100644 --- a/source/libs/sync/src/syncSnapshot.c +++ b/source/libs/sync/src/syncSnapshot.c @@ -23,8 +23,9 @@ #include "syncReplication.h" #include "syncUtil.h" +static SyncIndex syncNodeGetSnapBeginIndex(SSyncNode *ths); + static void syncSnapBufferReset(SSyncSnapBuffer *pBuf) { - taosThreadMutexLock(&pBuf->mutex); for (int64_t i = pBuf->start; i < pBuf->end; ++i) { if (pBuf->entryDeleteCb) { pBuf->entryDeleteCb(pBuf->entries[i % pBuf->size]); @@ -34,7 +35,6 @@ static void syncSnapBufferReset(SSyncSnapBuffer *pBuf) { pBuf->start = SYNC_SNAPSHOT_SEQ_BEGIN + 1; pBuf->end = pBuf->start; pBuf->cursor = pBuf->start - 1; - taosThreadMutexUnlock(&pBuf->mutex); } static void syncSnapBufferDestroy(SSyncSnapBuffer **ppBuf) { @@ -81,7 +81,6 @@ SSyncSnapshotSender *snapshotSenderCreate(SSyncNode *pSyncNode, int32_t replicaI pSender->replicaIndex = replicaIndex; pSender->term = raftStoreGetTerm(pSyncNode); pSender->startTime = -1; - pSender->waitTime = -1; pSender->pSyncNode->pFsm->FpGetSnapshotInfo(pSender->pSyncNode->pFsm, &pSender->snapshot); pSender->finish = false; @@ -108,6 +107,19 @@ void syncSnapBlockDestroy(void *ptr) { taosMemoryFree(pBlk); } +static int32_t snapshotSenderClearInfoData(SSyncSnapshotSender *pSender) { + if (pSender->snapshotParam.data) { + taosMemoryFree(pSender->snapshotParam.data); + pSender->snapshotParam.data = NULL; + } + + if (pSender->snapshot.data) { + taosMemoryFree(pSender->snapshot.data); + pSender->snapshot.data = NULL; + } + return 0; +} + void snapshotSenderDestroy(SSyncSnapshotSender *pSender) { if (pSender == NULL) return; @@ -122,10 +134,8 @@ void snapshotSenderDestroy(SSyncSnapshotSender *pSender) { syncSnapBufferDestroy(&pSender->pSndBuf); } - if (pSender->snapshotParam.data) { - taosMemoryFree(pSender->snapshotParam.data); - pSender->snapshotParam.data = NULL; - } + snapshotSenderClearInfoData(pSender); + // free sender taosMemoryFree(pSender); } @@ -198,20 +208,24 @@ void snapshotSenderStop(SSyncSnapshotSender *pSender, bool finish) { // update flag int8_t stopped = !atomic_val_compare_exchange_8(&pSender->start, true, false); if (stopped) return; + taosThreadMutexLock(&pSender->pSndBuf->mutex); + { + pSender->finish = finish; - pSender->finish = finish; - pSender->waitTime = -1; + // close reader + if (pSender->pReader != NULL) { + pSender->pSyncNode->pFsm->FpSnapshotStopRead(pSender->pSyncNode->pFsm, pSender->pReader); + pSender->pReader = NULL; + } - // close reader - if (pSender->pReader != NULL) { - pSender->pSyncNode->pFsm->FpSnapshotStopRead(pSender->pSyncNode->pFsm, pSender->pReader); - pSender->pReader = NULL; + syncSnapBufferReset(pSender->pSndBuf); + + snapshotSenderClearInfoData(pSender); + + SRaftId destId = pSender->pSyncNode->replicasId[pSender->replicaIndex]; + sSInfo(pSender, "snapshot sender stop, to dnode:%d, finish:%d", DID(&destId), finish); } - - syncSnapBufferReset(pSender->pSndBuf); - - SRaftId destId = pSender->pSyncNode->replicasId[pSender->replicaIndex]; - sSInfo(pSender, "snapshot sender stop, to dnode:%d, finish:%d", DID(&destId), finish); + taosThreadMutexUnlock(&pSender->pSndBuf->mutex); } int32_t syncSnapSendMsg(SSyncSnapshotSender *pSender, int32_t seq, void *pBlock, int32_t blockLen, int32_t typ) { @@ -324,6 +338,9 @@ int32_t snapshotReSend(SSyncSnapshotSender *pSender) { SSyncSnapBuffer *pSndBuf = pSender->pSndBuf; int32_t code = -1; taosThreadMutexLock(&pSndBuf->mutex); + if (pSender->pReader == NULL || pSender->finish || !snapshotSenderIsStart(pSender)) { + goto _out; + } for (int32_t seq = pSndBuf->cursor + 1; seq < pSndBuf->end; ++seq) { SyncSnapBlock *pBlk = pSndBuf->entries[seq % pSndBuf->size]; @@ -338,6 +355,12 @@ int32_t snapshotReSend(SSyncSnapshotSender *pSender) { pBlk->sendTimeMs = nowMs; } + if (pSender->seq != SYNC_SNAPSHOT_SEQ_END && pSndBuf->end <= pSndBuf->start) { + if (snapshotSend(pSender) != 0) { + goto _out; + } + } + if (pSender->seq == SYNC_SNAPSHOT_SEQ_END && pSndBuf->end <= pSndBuf->start) { if (syncSnapSendMsg(pSender, pSender->seq, NULL, 0, 0) != 0) { goto _out; @@ -361,14 +384,7 @@ int32_t syncNodeStartSnapshot(SSyncNode *pSyncNode, SRaftId *pDestId) { return 0; } - int64_t timeNow = taosGetTimestampMs(); - if (pSender->waitTime <= 0) { - pSender->waitTime = timeNow + SNAPSHOT_WAIT_MS; - } - if (timeNow < pSender->waitTime) { - sSDebug(pSender, "snapshot sender waitTime not expired yet, ignore"); - return 0; - } + taosMsleep(1); int32_t code = snapshotSenderStart(pSender); if (code != 0) { @@ -416,6 +432,19 @@ SSyncSnapshotReceiver *snapshotReceiverCreate(SSyncNode *pSyncNode, SRaftId from return pReceiver; } +static int32_t snapshotReceiverClearInfoData(SSyncSnapshotReceiver *pReceiver) { + if (pReceiver->snapshotParam.data) { + taosMemoryFree(pReceiver->snapshotParam.data); + pReceiver->snapshotParam.data = NULL; + } + + if (pReceiver->snapshot.data) { + taosMemoryFree(pReceiver->snapshot.data); + pReceiver->snapshot.data = NULL; + } + return 0; +} + void snapshotReceiverDestroy(SSyncSnapshotReceiver *pReceiver) { if (pReceiver == NULL) return; @@ -429,22 +458,13 @@ void snapshotReceiverDestroy(SSyncSnapshotReceiver *pReceiver) { pReceiver->pWriter = NULL; } - // free data of snapshot info - if (pReceiver->snapshotParam.data) { - taosMemoryFree(pReceiver->snapshotParam.data); - pReceiver->snapshotParam.data = NULL; - } - - if (pReceiver->snapshot.data) { - taosMemoryFree(pReceiver->snapshot.data); - pReceiver->snapshot.data = NULL; - } - // free snap buf if (pReceiver->pRcvBuf) { syncSnapBufferDestroy(&pReceiver->pRcvBuf); } + snapshotReceiverClearInfoData(pReceiver); + // free receiver taosMemoryFree(pReceiver); } @@ -504,7 +524,9 @@ void snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *p pReceiver->term = pPreMsg->term; pReceiver->fromId = pPreMsg->srcId; pReceiver->startTime = pPreMsg->startTime; - ASSERT(pReceiver->startTime); + + pReceiver->snapshotParam.start = syncNodeGetSnapBeginIndex(pReceiver->pSyncNode); + pReceiver->snapshotParam.end = -1; sRInfo(pReceiver, "snapshot receiver start, from dnode:%d.", DID(&pReceiver->fromId)); } @@ -514,19 +536,24 @@ void snapshotReceiverStop(SSyncSnapshotReceiver *pReceiver) { int8_t stopped = !atomic_val_compare_exchange_8(&pReceiver->start, true, false); if (stopped) return; - - if (pReceiver->pWriter != NULL) { - int32_t ret = pReceiver->pSyncNode->pFsm->FpSnapshotStopWrite(pReceiver->pSyncNode->pFsm, pReceiver->pWriter, false, - &pReceiver->snapshot); - if (ret != 0) { - sRError(pReceiver, "snapshot receiver stop write failed since %s", terrstr()); + taosThreadMutexLock(&pReceiver->pRcvBuf->mutex); + { + if (pReceiver->pWriter != NULL) { + int32_t ret = pReceiver->pSyncNode->pFsm->FpSnapshotStopWrite(pReceiver->pSyncNode->pFsm, pReceiver->pWriter, + false, &pReceiver->snapshot); + if (ret != 0) { + sRError(pReceiver, "snapshot receiver stop write failed since %s", terrstr()); + } + pReceiver->pWriter = NULL; + } else { + sRInfo(pReceiver, "snapshot receiver stop, writer is null"); } - pReceiver->pWriter = NULL; - } else { - sRInfo(pReceiver, "snapshot receiver stop, writer is null"); - } - syncSnapBufferReset(pReceiver->pRcvBuf); + syncSnapBufferReset(pReceiver->pRcvBuf); + + snapshotReceiverClearInfoData(pReceiver); + } + taosThreadMutexUnlock(&pReceiver->pRcvBuf->mutex); } static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pMsg) { @@ -656,7 +683,10 @@ static int32_t syncSnapReceiverExchgSnapInfo(SSyncNode *pSyncNode, SSyncSnapshot memcpy(pInfo->data, pMsg->data, pMsg->dataLen); // exchange snap info - pSyncNode->pFsm->FpGetSnapshotInfo(pSyncNode->pFsm, pInfo); + if (pSyncNode->pFsm->FpGetSnapshotInfo(pSyncNode->pFsm, pInfo) != 0) { + sRError(pReceiver, "failed to get snapshot info. type: %d", pMsg->payloadType); + goto _out; + } SSyncTLV *datHead = pInfo->data; if (datHead->typ != TDMT_SYNC_PREP_SNAPSHOT_REPLY) { sRError(pReceiver, "unexpected data typ in data of snapshot info. typ: %d", datHead->typ); diff --git a/source/libs/tfs/test/tfsTest.cpp b/source/libs/tfs/test/tfsTest.cpp index 9bbf6bc729..1f16e585ae 100644 --- a/source/libs/tfs/test/tfsTest.cpp +++ b/source/libs/tfs/test/tfsTest.cpp @@ -218,6 +218,7 @@ TEST_F(TfsTest, 04_File) { EXPECT_STREQ(outfile.aname, file0.aname); EXPECT_STREQ(outfile.rname, "fname"); EXPECT_EQ(outfile.pTfs, pTfs); + taosMemoryFree(ret); } { diff --git a/source/libs/transport/test/CMakeLists.txt b/source/libs/transport/test/CMakeLists.txt index da4cda5dc7..e68e93c48e 100644 --- a/source/libs/transport/test/CMakeLists.txt +++ b/source/libs/transport/test/CMakeLists.txt @@ -106,4 +106,3 @@ add_test( NAME transUtilUt COMMAND transportTest ) - diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index 933014466a..b897eb4922 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -654,6 +654,23 @@ _err: return -1; } +int64_t walGetVerRetention(SWal* pWal, int64_t bytes) { + int64_t ver = -1; + int64_t totSize = 0; + taosThreadMutexLock(&pWal->mutex); + int32_t fileIdx = taosArrayGetSize(pWal->fileInfoSet); + while (--fileIdx) { + SWalFileInfo* pInfo = taosArrayGet(pWal->fileInfoSet, fileIdx); + if (totSize >= bytes) { + ver = pInfo->lastVer; + break; + } + totSize += pInfo->fileSize; + } + taosThreadMutexUnlock(&pWal->mutex); + return ver + 1; +} + int walCheckAndRepairIdx(SWal* pWal) { int32_t sz = taosArrayGetSize(pWal->fileInfoSet); int32_t fileIdx = sz; diff --git a/source/libs/wal/test/walMetaTest.cpp b/source/libs/wal/test/walMetaTest.cpp index 70d8921be3..fb64bec722 100644 --- a/source/libs/wal/test/walMetaTest.cpp +++ b/source/libs/wal/test/walMetaTest.cpp @@ -441,4 +441,5 @@ TEST_F(WalRetentionEnv, repairMeta1) { EXPECT_EQ(newStr[j], pRead->pHead->head.body[j]); } } + walCloseReader(pRead); } diff --git a/source/os/src/osString.c b/source/os/src/osString.c index 8aac606473..9119c1d470 100644 --- a/source/os/src/osString.c +++ b/source/os/src/osString.c @@ -24,6 +24,14 @@ extern int wcwidth(wchar_t c); extern int wcswidth(const wchar_t *s, size_t n); +char *tstrdup(const char *str) { +#ifdef WINDOWS + return _strdup(str); +#else + return strdup(str); +#endif +} + #ifdef WINDOWS char *strsep(char **stringp, const char *delim) { char *s; diff --git a/source/os/test/osTests.cpp b/source/os/test/osTests.cpp index aad3eda9aa..16660a9477 100644 --- a/source/os/test/osTests.cpp +++ b/source/os/test/osTests.cpp @@ -111,11 +111,6 @@ void fileOperateOnBusy(void *param) { ret = taosUnLockFile(pFile); printf("On busy thread unlock file ret:%d\n", ret); -#ifdef _TD_DARWIN_64 - ASSERT_EQ(ret, 0); -#else - ASSERT_NE(ret, 0); -#endif ret = taosCloseFile(&pFile); printf("On busy thread close file ret:%d\n", ret); @@ -134,6 +129,8 @@ TEST(osTest, osFile) { printf("create file success\n"); taosCloseFile(&pOutFD); + taosCloseFile(&pOutFD); + TdFilePtr pFile = taosOpenFile(fname, TD_FILE_CREATE | TD_FILE_WRITE); printf("open file\n"); ASSERT_NE(pFile, nullptr); diff --git a/source/util/src/tarray.c b/source/util/src/tarray.c index 26d149b5b5..bc98461592 100644 --- a/source/util/src/tarray.c +++ b/source/util/src/tarray.c @@ -430,6 +430,7 @@ int32_t taosArraySearchIdx(const SArray* pArray, const void* key, __compar_fn_t return item == NULL ? -1 : (int32_t)((char*)item - (char*)pArray->pData) / pArray->elemSize; } +#ifdef BUILD_NO_CALL static int32_t taosArrayPartition(SArray* pArray, int32_t i, int32_t j, __ext_compar_fn_t fn, const void* userData) { void* key = taosArrayGetP(pArray, i); while (i < j) { @@ -485,6 +486,7 @@ static void taosArrayInsertSort(SArray* pArray, __ext_compar_fn_t fn, const void } } } +#endif int32_t taosEncodeArray(void** buf, const SArray* pArray, FEncode encode) { int32_t tlen = 0; diff --git a/source/util/src/tcompare.c b/source/util/src/tcompare.c index 4bacda48d2..b6b71855f3 100644 --- a/source/util/src/tcompare.c +++ b/source/util/src/tcompare.c @@ -1097,7 +1097,8 @@ int32_t patternMatch(const char *pattern, size_t psize, const char *str, size_t c1 = str[j++]; ++nMatchChar; - if (c == '\\' && pattern[i] == '_' && c1 == '_') { + if (c == '\\' && pattern[i] == c1 && + (c1 == '_' || c1 == '%')) { i++; continue; } diff --git a/source/util/test/CMakeLists.txt b/source/util/test/CMakeLists.txt index 94f8deee44..f4f3880388 100644 --- a/source/util/test/CMakeLists.txt +++ b/source/util/test/CMakeLists.txt @@ -33,6 +33,14 @@ ENDIF() INCLUDE_DIRECTORIES(${TD_SOURCE_DIR}/src/util/inc) +# arrayTest +add_executable(arrayTest "arrayTest.cpp") +target_link_libraries(arrayTest os util gtest_main) +add_test( + NAME arrayTest + COMMAND arrayTest +) + # # freelistTest # add_executable(freelistTest "") # target_sources(freelistTest diff --git a/source/util/test/arrayTest.cpp b/source/util/test/arrayTest.cpp index a579837791..307719b984 100644 --- a/source/util/test/arrayTest.cpp +++ b/source/util/test/arrayTest.cpp @@ -82,3 +82,138 @@ TEST(arrayTest, array_search_test) { taosArrayDestroy(pa); } + +// call taosArrayResize +TEST(arrayTest, array_data_correct) { + SArray* pa = (SArray*)taosArrayInit(1, sizeof(int32_t)); + SArray* pa1 = (SArray*)taosArrayInit(1, sizeof(int32_t)); + size_t cnt = 1000; + + + for (int32_t i = 0; i < cnt; ++i) { + taosArrayPush(pa, &i); + int32_t v = cnt + i; + taosArrayPush(pa1, &v); + } + ASSERT_EQ(taosArrayGetSize(pa), cnt); + ASSERT_EQ(taosArrayAddBatch(pa, NULL, 0), nullptr); + ASSERT_NE(taosArrayAddBatch(pa, taosArrayGet(pa1, 0), cnt), nullptr); + + int32_t* pv = NULL; + for (int32_t i = 0; i < cnt*2; i++) { + pv = (int32_t*)taosArrayGet(pa, i); + ASSERT_EQ(*pv, i); + } + + taosArrayDestroy(pa); + taosArrayDestroy(pa1); +} + +// free +static void arrayFree(void *param) { + void *pItem = *(void **)param; + if (pItem != NULL) { + taosMemoryFree(pItem); + } +} + +// string compare +static int32_t strCompare(const void *a, const void *b) { + const char *x = *(const char **)a; + const char *y = *(const char **)b; + + return strcmp(x, y); +} + +// int32 compare +static int int32Compare(const void* a, const void* b) { + int32_t l = *(int32_t*)a; + int32_t r = *(int32_t*)b; + return l - r; +} + +// no need free +TEST(arrayTest, check_duplicate_nofree) { + // no need free item + int32_t count = 5; + SArray* pa = taosArrayInit(1, sizeof(int32_t)); + for (int32_t i = 1; i <= count; i++) { + for (int32_t j = 0; j < i; j++) { + taosArrayPush(pa, &i); + //printf(" nofree put i=%d v=%d\n",i, i); + } + } + + taosArrayRemoveDuplicate(pa, int32Compare, NULL); + printf("nofree taosArrayRemoveDuplicate size=%d\n", (int32_t)taosArrayGetSize(pa)); + ASSERT_EQ(taosArrayGetSize(pa), count); + for (int32_t i = 1; i <= count; i++) { + int32_t v = *(int32_t*)taosArrayGet(pa, i-1); + //printf(" nofree get i=%d v=%d\n",i, v); + ASSERT_EQ(v, i); + } + + taosArrayDestroy(pa); +} + +// need free +TEST(arrayTest, check_duplicate_needfree) { + // no need free item + int32_t count = 5; + const char* format="hello-word-%d"; + SArray *pa = taosArrayInit(1, sizeof(char *)); + for (int32_t i = 1; i <= count; i++) { + for (int32_t j = 0; j < i; j++) { + char *v = (char *)taosMemoryCalloc(100, sizeof(char)); + sprintf(v, format, i); + //printf(" needfree put i=%d v=%s\n", i, v); + taosArrayPush(pa, &v); + } + } + + taosArrayRemoveDuplicate(pa, strCompare, arrayFree); + printf("needfree taosArrayRemoveDuplicate size=%d\n", (int32_t)taosArrayGetSize(pa)); + ASSERT_EQ(taosArrayGetSize(pa), count); + char value[100]; + for (int32_t i = 1; i <= count; i++) { + sprintf(value, format, i); + char * v = (char *)taosArrayGetP(pa, i - 1); + //printf(" needfree get i=%d v=%s\n", i, v); + ASSERT_STREQ(v, value); + } + + taosArrayClearP(pa, taosMemoryFree); + taosArrayDestroyP(pa, taosMemoryFree); +} + +// over all +TEST(arrayTest, check_overall) { + + ASSERT_EQ(taosArrayInit(1, 0), nullptr); + ASSERT_EQ(taosArrayGet(NULL, 1), nullptr); + ASSERT_EQ(taosArrayGetP(NULL, 1), nullptr); + ASSERT_EQ(taosArrayInsert(NULL, 1, NULL), nullptr); + + //ASSERT_EQ(taosArrayInit(0x10000000, 10000), nullptr); + //ASSERT_EQ(taosArrayInit_s(10000,0x10000000-1), nullptr); + + SArray* pa = taosArrayInit(1, sizeof(uint64_t)); + ASSERT_EQ(taosArrayGet(pa, 10), nullptr); + ASSERT_EQ(taosArrayGetLast(pa), nullptr); + + uint64_t v = 100; + taosArrayPush(pa, &v); + taosArrayPopFrontBatch(pa, 100); + FDelete fnNull = NULL; + taosArrayClearEx(pa, fnNull); + taosArrayDestroyEx(pa, fnNull); + + int32_t count = 5; + uint64_t list[5]= {1,2,3,4,5}; + + SArray* pb = taosArrayFromList(list, count, sizeof(uint64_t)); + for (int32_t i=0; i < count; i++) { + ASSERT_EQ(*(uint64_t*)taosArrayGet(pb, i), list[i]); + } + taosArrayDestroy(pb); +} \ No newline at end of file diff --git a/source/util/test/pageBufferTest.cpp b/source/util/test/pageBufferTest.cpp index 50d3656ccd..d31ad011d8 100644 --- a/source/util/test/pageBufferTest.cpp +++ b/source/util/test/pageBufferTest.cpp @@ -217,6 +217,7 @@ void testFlushAndReadBackBuffer() { pPg = (SFilePage*)getBufPage(pBuf, pageId); ASSERT_TRUE(checkBufVarData(pPg, rowData + 3, len)); destroyDiskbasedBuf(pBuf); + taosMemoryFree(rowData); } } // namespace diff --git a/source/util/test/utilTests.cpp b/source/util/test/utilTests.cpp index ff1d91aa9d..01a55ae710 100644 --- a/source/util/test/utilTests.cpp +++ b/source/util/test/utilTests.cpp @@ -204,6 +204,11 @@ TEST(utilTest, char_pattern_match_test) { const char* str12 = NULL; ret = patternMatch(pattern12, 4, str12, 0, &pInfo); ASSERT_EQ(ret, TSDB_PATTERN_NOMATCH); + + const char* pattern13 = "a\\%c"; + const char* str13 = "a%c"; + ret = patternMatch(pattern13, 5, str13, strlen(str13), &pInfo); + ASSERT_EQ(ret, TSDB_PATTERN_MATCH); } TEST(utilTest, char_pattern_match_no_terminated) { diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 59cbbb3147..6266946db3 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,4 +1,4 @@ -#ADD_SUBDIRECTORY(examples/c) -ADD_SUBDIRECTORY(tsim) -ADD_SUBDIRECTORY(test/c) -#ADD_SUBDIRECTORY(comparisonTest/tdengine) + +if(${BUILD_TEST}) + add_subdirectory(taosc_test) +endif(${BUILD_TEST}) \ No newline at end of file diff --git a/tests/army/empty.py b/tests/army/empty.py new file mode 100644 index 0000000000..e3a3f57c74 --- /dev/null +++ b/tests/army/empty.py @@ -0,0 +1,40 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import time + +import taos +from frame.log import * +from frame.cases import * +from frame.sql import * + +class TDTestCase: + # init + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), True) + + # run + def run(self): + # check two db query result same + tdLog.info(f"hello world.") + + # stop + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/army/enterprise/multi-level/mlevel_basic.py b/tests/army/enterprise/multi-level/mlevel_basic.py new file mode 100644 index 0000000000..e3a3f57c74 --- /dev/null +++ b/tests/army/enterprise/multi-level/mlevel_basic.py @@ -0,0 +1,40 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import time + +import taos +from frame.log import * +from frame.cases import * +from frame.sql import * + +class TDTestCase: + # init + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), True) + + # run + def run(self): + # check two db query result same + tdLog.info(f"hello world.") + + # stop + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/army/frame/__init__.py b/tests/army/frame/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/army/frame/autogen.py b/tests/army/frame/autogen.py new file mode 100644 index 0000000000..9dca96e7b0 --- /dev/null +++ b/tests/army/frame/autogen.py @@ -0,0 +1,163 @@ +# -*- coding: utf-8 -*- + +import sys +from frame.log import * +from frame.cases import * +from frame.sql import * +import threading +import random +import string +import time + + +# +# Auto Gen class +# +class AutoGen: + def __init__(self): + self.ts = 1600000000000 + self.batch_size = 100 + seed = time.time() % 10000 + random.seed(seed) + + # set start ts + def set_start_ts(self, ts): + self.ts = ts + + # set batch size + def set_batch_size(self, batch_size): + self.batch_size = batch_size + + # _columns_sql + def gen_columns_sql(self, pre, cnt, binary_len, nchar_len): + types = [ + 'timestamp', + 'tinyint', + 'smallint', + 'tinyint unsigned', + 'smallint unsigned', + 'int', + 'bigint', + 'int unsigned', + 'bigint unsigned', + 'float', + 'double', + 'bool', + f'varchar({binary_len})', + f'nchar({nchar_len})' + ] + + sqls = "" + metas = [] + for i in range(cnt): + colname = f"{pre}{i}" + sel = i % len(types) + coltype = types[sel] + sql = f"{colname} {coltype}" + if sqls != "": + sqls += "," + sqls += sql + metas.append(sel) + + return metas, sqls; + + # gen tags data + def gen_data(self, i, marr): + datas = "" + for c in marr: + data = "" + if c == 0 : # timestamp + data = "%d" % (self.ts + i) + elif c <= 4 : # small + data = "%d"%(i%128) + elif c <= 8 : # int + data = f"{i}" + elif c <= 10 : # float + data = "%f"%(i+i/1000) + elif c <= 11 : # bool + data = "%d"%(i%2) + elif c == 12 : # binary + data = '"' + self.random_string(self.bin_len) + '"' + elif c == 13 : # binary + data = '"' + self.random_string(self.nch_len) + '"' + + if datas != "": + datas += "," + datas += data + + return datas + + # generate specail wide random string + def random_string(self, count): + letters = string.ascii_letters + return ''.join(random.choice(letters) for i in range(count)) + + # create db + def create_db(self, dbname, vgroups = 2, replica = 1): + self.dbname = dbname + tdSql.execute(f'create database {dbname} vgroups {vgroups} replica {replica}') + tdSql.execute(f'use {dbname}') + + # create table or stable + def create_stable(self, stbname, tag_cnt, column_cnt, binary_len, nchar_len): + self.bin_len = binary_len + self.nch_len = nchar_len + self.stbname = stbname + self.mtags, tags = self.gen_columns_sql("t", tag_cnt, binary_len, nchar_len) + self.mcols, cols = self.gen_columns_sql("c", column_cnt - 1, binary_len, nchar_len) + + sql = f"create table {stbname} (ts timestamp, {cols}) tags({tags})" + tdSql.execute(sql) + + # create child table + def create_child(self, stbname, prename, cnt): + self.child_cnt = cnt + self.child_name = prename + for i in range(cnt): + tags_data = self.gen_data(i, self.mtags) + sql = f"create table {prename}{i} using {stbname} tags({tags_data})" + tdSql.execute(sql) + + tdLog.info(f"create child tables {cnt} ok") + + def insert_data_child(self, child_name, cnt, batch_size, step): + values = "" + print("insert child data") + ts = self.ts + + # loop do + for i in range(cnt): + value = self.gen_data(i, self.mcols) + ts += step + values += f"({ts},{value}) " + if batch_size == 1 or (i > 0 and i % batch_size == 0) : + sql = f"insert into {child_name} values {values}" + tdSql.execute(sql) + values = "" + + # end batch + if values != "": + sql = f"insert into {child_name} values {values}" + tdSql.execute(sql) + tdLog.info(f" insert data i={i}") + values = "" + + tdLog.info(f" insert child data {child_name} finished, insert rows={cnt}") + + # insert data + def insert_data(self, cnt): + for i in range(self.child_cnt): + name = f"{self.child_name}{i}" + self.insert_data_child(name, cnt, self.batch_size, 1) + + tdLog.info(f" insert data ok, child table={self.child_cnt} insert rows={cnt}") + + # insert same timestamp to all childs + def insert_samets(self, cnt): + for i in range(self.child_cnt): + name = f"{self.child_name}{i}" + self.insert_data_child(name, cnt, self.batch_size, 0) + + tdLog.info(f" insert same timestamp ok, child table={self.child_cnt} insert rows={cnt}") + + diff --git a/tests/army/frame/boundary.py b/tests/army/frame/boundary.py new file mode 100644 index 0000000000..086821e7cf --- /dev/null +++ b/tests/army/frame/boundary.py @@ -0,0 +1,44 @@ +class DataBoundary: + def __init__(self): + self.TINYINT_BOUNDARY = [-128, 127] + self.SMALLINT_BOUNDARY = [-32768, 32767] + self.INT_BOUNDARY = [-2147483648, 2147483647] + self.BIGINT_BOUNDARY = [-9223372036854775808, 9223372036854775807] + self.UTINYINT_BOUNDARY = [0, 255] + self.USMALLINT_BOUNDARY = [0, 65535] + self.UINT_BOUNDARY = [0, 4294967295] + self.UBIGINT_BOUNDARY = [0, 18446744073709551615] + self.FLOAT_BOUNDARY = [-3.40E+38, 3.40E+38] + self.DOUBLE_BOUNDARY = [-1.7e+308, 1.7e+308] + self.BOOL_BOUNDARY = [True, False] + self.BINARY_MAX_LENGTH = 16374 + self.NCHAR_MAX_LENGTH = 4093 + self.DBNAME_MAX_LENGTH = 64 + self.STBNAME_MAX_LENGTH = 192 + self.TBNAME_MAX_LENGTH = 192 + self.CHILD_TBNAME_MAX_LENGTH = 192 + self.TAG_KEY_MAX_LENGTH = 64 + self.COL_KEY_MAX_LENGTH = 64 + self.MAX_TAG_COUNT = 128 + self.MAX_TAG_COL_COUNT = 4096 + self.mnodeShmSize = [6292480, 2147483647] + self.mnodeShmSize_default = 6292480 + self.vnodeShmSize = [6292480, 2147483647] + self.vnodeShmSize_default = 31458304 + self.DB_PARAM_BUFFER_CONFIG = {"create_name": "buffer", "query_name": "buffer", "vnode_json_key": "szBuf", "boundary": [3, 16384], "default": 96} + self.DB_PARAM_CACHELAST_CONFIG = {"create_name": "cachelast", "query_name": "cache_model", "vnode_json_key": "", "boundary": [0, 1, 2, 3], "default": 0} + self.DB_PARAM_COMP_CONFIG = {"create_name": "comp", "query_name": "compression", "vnode_json_key": "", "boundary": [0, 1, 2], "default": 2} + self.DB_PARAM_DURATION_CONFIG = {"create_name": "duration", "query_name": "duration", "vnode_json_key": "daysPerFile", "boundary": [1, 3650, '60m', '5256000m', '1h', '87600h', '1d', '3650d'], "default": "14400m"} + self.DB_PARAM_FSYNC_CONFIG = {"create_name": "fsync", "query_name": "fsync", "vnode_json_key": "", "boundary": [0, 180000], "default": 3000} + self.DB_PARAM_KEEP_CONFIG = {"create_name": "keep", "query_name": "fsync", "vnode_json_key": "", "boundary": [1, 365000,'1440m','525600000m','24h','8760000h','1d','365000d'], "default": "5256000m,5256000m,5256000m"} + self.DB_PARAM_MAXROWS_CONFIG = {"create_name": "maxrows", "query_name": "maxrows", "vnode_json_key": "maxRows", "boundary": [200, 10000], "default": 4096} + self.DB_PARAM_MINROWS_CONFIG = {"create_name": "minrows", "query_name": "minrows", "vnode_json_key": "minRows", "boundary": [10, 1000], "default": 100} + self.DB_PARAM_NTABLES_CONFIG = {"create_name": "ntables", "query_name": "ntables", "vnode_json_key": "", "boundary": 0, "default": 0} + self.DB_PARAM_PAGES_CONFIG = {"create_name": "pages", "query_name": "pages", "vnode_json_key": "szCache", "boundary": [64], "default": 256} + self.DB_PARAM_PAGESIZE_CONFIG = {"create_name": "pagesize", "query_name": "pagesize", "vnode_json_key": "szPage", "boundary": [1, 16384], "default": 4} + self.DB_PARAM_PRECISION_CONFIG = {"create_name": "precision", "query_name": "precision", "vnode_json_key": "", "boundary": ['ms', 'us', 'ns'], "default": "ms"} + self.DB_PARAM_REPLICA_CONFIG = {"create_name": "replica", "query_name": "replica", "vnode_json_key": "", "boundary": [1], "default": 1} + self.DB_PARAM_SINGLE_STABLE_CONFIG = {"create_name": "single_stable", "query_name": "single_stable_model", "vnode_json_key": "", "boundary": [0, 1], "default": 0} + self.DB_PARAM_STRICT_CONFIG = {"create_name": "strict", "query_name": "strict", "vnode_json_key": "", "boundary": {"off": 0, "strict": 1}, "default": "off"} + self.DB_PARAM_VGROUPS_CONFIG = {"create_name": "vgroups", "query_name": "vgroups", "vnode_json_key": "", "boundary": [1, 32], "default": 2} + self.DB_PARAM_WAL_CONFIG = {"create_name": "wal", "query_name": "wal", "vnode_json_key": "", "boundary": [1, 2], "default": 1} \ No newline at end of file diff --git a/tests/army/frame/cases.py b/tests/army/frame/cases.py new file mode 100644 index 0000000000..590c0d3711 --- /dev/null +++ b/tests/army/frame/cases.py @@ -0,0 +1,150 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import time +import datetime +import inspect +import importlib +import traceback +from frame.log import * + + +class TDCase: + def __init__(self, name, case): + self.name = name + self.case = case + self._logSql = True + + +class TDCases: + def __init__(self): + self.linuxCases = [] + self.windowsCases = [] + self.clusterCases = [] + + def __dynamicLoadModule(self, fileName): + moduleName = fileName.replace(".py", "").replace(os.sep, ".") + return importlib.import_module(moduleName, package='..') + + def logSql(self, logSql): + self._logSql = logSql + + def addWindows(self, name, case): + self.windowsCases.append(TDCase(name, case)) + + def addLinux(self, name, case): + self.linuxCases.append(TDCase(name, case)) + + def addCluster(self, name, case): + self.clusterCases.append(TDCase(name, case)) + + def runAllLinux(self, conn): + # TODO: load all Linux cases here + runNum = 0 + for tmp in self.linuxCases: + if tmp.name.find(fileName) != -1: + case = testModule.TDTestCase() + case.init(conn) + case.run() + case.stop() + runNum += 1 + continue + + tdLog.info("total %d Linux test case(s) executed" % (runNum)) + + def runOneLinux(self, conn, fileName, replicaVar=1): + testModule = self.__dynamicLoadModule(fileName) + + runNum = 0 + for tmp in self.linuxCases: + if tmp.name.find(fileName) != -1: + case = testModule.TDTestCase() + case.init(conn, self._logSql, replicaVar) + try: + case.run() + except Exception as e: + tdLog.notice(repr(e)) + traceback.print_exc() + tdLog.exit("%s failed" % (fileName)) + case.stop() + runNum += 1 + continue + + def runAllWindows(self, conn): + # TODO: load all Windows cases here + runNum = 0 + for tmp in self.windowsCases: + if tmp.name.find(fileName) != -1: + case = testModule.TDTestCase() + case.init(conn) + case.run() + case.stop() + runNum += 1 + continue + + tdLog.notice("total %d Windows test case(s) executed" % (runNum)) + + def runOneWindows(self, conn, fileName, replicaVar=1): + testModule = self.__dynamicLoadModule(fileName) + + runNum = 0 + for tmp in self.windowsCases: + if tmp.name.find(fileName) != -1: + case = testModule.TDTestCase() + case.init(conn, self._logSql,replicaVar) + try: + case.run() + except Exception as e: + tdLog.notice(repr(e)) + tdLog.exit("%s failed" % (fileName)) + case.stop() + runNum += 1 + continue + tdLog.notice("total %d Windows case(s) executed" % (runNum)) + + def runAllCluster(self): + # TODO: load all cluster case module here + + runNum = 0 + for tmp in self.clusterCases: + if tmp.name.find(fileName) != -1: + tdLog.notice("run cases like %s" % (fileName)) + case = testModule.TDTestCase() + case.init() + case.run() + case.stop() + runNum += 1 + continue + + tdLog.notice("total %d Cluster test case(s) executed" % (runNum)) + + def runOneCluster(self, fileName): + testModule = self.__dynamicLoadModule(fileName) + + runNum = 0 + for tmp in self.clusterCases: + if tmp.name.find(fileName) != -1: + tdLog.notice("run cases like %s" % (fileName)) + case = testModule.TDTestCase() + case.init() + case.run() + case.stop() + runNum += 1 + continue + + tdLog.notice("total %d Cluster test case(s) executed" % (runNum)) + + +tdCases = TDCases() diff --git a/tests/army/frame/cluster.py b/tests/army/frame/cluster.py new file mode 100644 index 0000000000..4da53840c0 --- /dev/null +++ b/tests/army/frame/cluster.py @@ -0,0 +1,108 @@ +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +import taos +import sys +import time +import os +import socket + +from frame.log import * +from frame.sql import * +from frame.cases import * +from frame.dnodes import * +from frame.common import * + +class ClusterDnodes(TDDnodes): + """rewrite TDDnodes and make MyDdnodes as TDDnodes child class""" + def __init__(self ,dnodes_lists): + + super(ClusterDnodes,self).__init__() + self.dnodes = dnodes_lists # dnode must be TDDnode instance + self.simDeployed = False + self.testCluster = False + self.valgrind = 0 + self.killValgrind = 1 + + +class ConfigureyCluster: + """This will create defined number of dnodes and create a cluster. + at the same time, it will return TDDnodes list: dnodes, """ + hostname = socket.gethostname() + + def __init__(self): + self.dnodes = [] + self.dnodeNums = 5 + self.independent = True + self.startPort = 6030 + self.portStep = 100 + self.mnodeNums = 0 + + def configure_cluster(self ,dnodeNums=5,mnodeNums=0,independentMnode=True,startPort=6030,portStep=100,hostname="%s"%hostname): + self.startPort=int(startPort) + self.portStep=int(portStep) + self.hostname=hostname + self.dnodeNums = int(dnodeNums) + self.mnodeNums = int(mnodeNums) + self.dnodes = [] + startPort_sec = int(startPort+portStep) + for num in range(1, (self.dnodeNums+1)): + dnode = TDDnode(num) + dnode.addExtraCfg("firstEp", f"{hostname}:{self.startPort}") + dnode.addExtraCfg("fqdn", f"{hostname}") + dnode.addExtraCfg("serverPort", f"{self.startPort + (num-1)*self.portStep}") + dnode.addExtraCfg("secondEp", f"{hostname}:{startPort_sec}") + + # configure dnoe of independent mnodes + if num <= self.mnodeNums and self.mnodeNums != 0 and independentMnode == True : + tdLog.info(f"set mnode:{num} supportVnodes 0") + dnode.addExtraCfg("supportVnodes", 0) + # print(dnode) + self.dnodes.append(dnode) + return self.dnodes + + def create_dnode(self,conn,dnodeNum): + tdSql.init(conn.cursor()) + dnodeNum=int(dnodeNum) + for dnode in self.dnodes[1:dnodeNum]: + # print(dnode.cfgDict) + dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"] + tdSql.execute(" create dnode '%s';"%dnode_id) + + + def create_mnode(self,conn,mnodeNums): + tdSql.init(conn.cursor()) + mnodeNums=int(mnodeNums) + for i in range(2,mnodeNums+1): + tdLog.info("create mnode on dnode %d"%i) + tdSql.execute(" create mnode on dnode %d;"%i) + + + + def check_dnode(self,conn): + tdSql.init(conn.cursor()) + count=0 + while count < 5: + tdSql.query("select * from information_schema.ins_dnodes") + # tdLog.debug(tdSql.queryResult) + status=0 + for i in range(self.dnodeNums): + if tdSql.queryResult[i][4] == "ready": + status+=1 + # tdLog.debug(status) + + if status == self.dnodeNums: + tdLog.debug(" create cluster with %d dnode and check cluster dnode all ready within 5s! " %self.dnodeNums) + break + count+=1 + time.sleep(1) + else: + tdLog.exit("create cluster with %d dnode but check dnode not ready within 5s ! "%self.dnodeNums) + + def checkConnectStatus(self,dnodeNo,hostname=hostname): + dnodeNo = int(dnodeNo) + tdLog.info("check dnode-%d connection"%(dnodeNo+1)) + hostname = socket.gethostname() + port = 6030 + dnodeNo*100 + connectToDnode = tdCom.newcon(host=hostname,port=port) + return connectToDnode + +cluster = ConfigureyCluster() \ No newline at end of file diff --git a/tests/army/frame/common.py b/tests/army/frame/common.py new file mode 100644 index 0000000000..93059ff078 --- /dev/null +++ b/tests/army/frame/common.py @@ -0,0 +1,1867 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import random +import string +import requests +import time +import socket +import json +import toml +from frame.boundary import DataBoundary +import taos +from frame.log import * +from frame.sql import * +from frame.cases import * +from frame.dnodes import * +from frame.common import * +from frame.constant import * +from dataclasses import dataclass,field +from typing import List +from datetime import datetime +import re +@dataclass +class DataSet: + ts_data : List[int] = field(default_factory=list) + int_data : List[int] = field(default_factory=list) + bint_data : List[int] = field(default_factory=list) + sint_data : List[int] = field(default_factory=list) + tint_data : List[int] = field(default_factory=list) + uint_data : List[int] = field(default_factory=list) + ubint_data : List[int] = field(default_factory=list) + usint_data : List[int] = field(default_factory=list) + utint_data : List[int] = field(default_factory=list) + float_data : List[float] = field(default_factory=list) + double_data : List[float] = field(default_factory=list) + bool_data : List[int] = field(default_factory=list) + vchar_data : List[str] = field(default_factory=list) + nchar_data : List[str] = field(default_factory=list) + + def get_order_set(self, + rows, + int_step :int = 1, + bint_step :int = 1, + sint_step :int = 1, + tint_step :int = 1, + uint_step :int = 1, + ubint_step :int = 1, + usint_step :int = 1, + utint_step :int = 1, + float_step :float = 1, + double_step :float = 1, + bool_start :int = 1, + vchar_prefix:str = "vachar_", + vchar_step :int = 1, + nchar_prefix:str = "nchar_测试_", + nchar_step :int = 1, + ts_step :int = 1 + ): + for i in range(rows): + self.int_data.append( int(i * int_step % INT_MAX )) + self.bint_data.append( int(i * bint_step % BIGINT_MAX )) + self.sint_data.append( int(i * sint_step % SMALLINT_MAX )) + self.tint_data.append( int(i * tint_step % TINYINT_MAX )) + self.uint_data.append( int(i * uint_step % INT_UN_MAX )) + self.ubint_data.append( int(i * ubint_step % BIGINT_UN_MAX )) + self.usint_data.append( int(i * usint_step % SMALLINT_UN_MAX )) + self.utint_data.append( int(i * utint_step % TINYINT_UN_MAX )) + self.float_data.append( float(i * float_step % FLOAT_MAX )) + self.double_data.append( float(i * double_step % DOUBLE_MAX )) + self.bool_data.append( bool((i + bool_start) % 2 )) + self.vchar_data.append( f"{vchar_prefix}{i * vchar_step}" ) + self.nchar_data.append( f"{nchar_prefix}{i * nchar_step}") + self.ts_data.append( int(datetime.timestamp(datetime.now()) * 1000 - i * ts_step)) + + def get_disorder_set(self, rows, **kwargs): + for k, v in kwargs.items(): + int_low = v if k == "int_low" else INT_MIN + int_up = v if k == "int_up" else INT_MAX + bint_low = v if k == "bint_low" else BIGINT_MIN + bint_up = v if k == "bint_up" else BIGINT_MAX + sint_low = v if k == "sint_low" else SMALLINT_MIN + sint_up = v if k == "sint_up" else SMALLINT_MAX + tint_low = v if k == "tint_low" else TINYINT_MIN + tint_up = v if k == "tint_up" else TINYINT_MAX + pass + + +class TDCom: + def __init__(self): + self.sml_type = None + self.env_setting = None + self.smlChildTableName_value = None + self.defaultJSONStrType_value = None + self.smlTagNullName_value = None + self.default_varchar_length = 6 + self.default_nchar_length = 6 + self.default_varchar_datatype = "letters" + self.default_nchar_datatype = "letters" + self.default_tagname_prefix = "t" + self.default_colname_prefix = "c" + self.default_stbname_prefix = "stb" + self.default_ctbname_prefix = "ctb" + self.default_tbname_prefix = "tb" + self.default_tag_index_start_num = 1 + self.default_column_index_start_num = 1 + self.default_stbname_index_start_num = 1 + self.default_ctbname_index_start_num = 1 + self.default_tbname_index_start_num = 1 + self.default_tagts_name = "ts" + self.default_colts_name = "ts" + self.dbname = "test" + self.stb_name = "stb" + self.ctb_name = "ctb" + self.tb_name = "tb" + self.tbname = str() + self.need_tagts = False + self.tag_type_str = "" + self.column_type_str = "" + self.columns_str = None + self.ts_value = None + self.tag_value_list = list() + self.column_value_list = list() + self.full_type_list = ["tinyint", "smallint", "int", "bigint", "tinyint unsigned", "smallint unsigned", "int unsigned", "bigint unsigned", "float", "double", "binary", "nchar", "bool"] + self.white_list = ["statsd", "node_exporter", "collectd", "icinga2", "tcollector", "information_schema", "performance_schema"] + self.Boundary = DataBoundary() + self.white_list = ["statsd", "node_exporter", "collectd", "icinga2", "tcollector", "information_schema", "performance_schema"] + self.case_name = str() + self.des_table_suffix = "_output" + self.stream_suffix = "_stream" + self.range_count = 5 + self.default_interval = 5 + self.stream_timeout = 12 + self.create_stream_sleep = 0.5 + self.record_history_ts = str() + self.precision = "ms" + self.date_time = self.genTs(precision=self.precision)[0] + self.subtable = True + self.partition_tbname_alias = "ptn_alias" if self.subtable else "" + self.partition_col_alias = "pcol_alias" if self.subtable else "" + self.partition_tag_alias = "ptag_alias" if self.subtable else "" + self.partition_expression_alias = "pexp_alias" if self.subtable else "" + self.des_table_suffix = "_output" + self.stream_suffix = "_stream" + self.subtable_prefix = "prefix_" if self.subtable else "" + self.subtable_suffix = "_suffix" if self.subtable else "" + self.downsampling_function_list = ["min(c1)", "max(c2)", "sum(c3)", "first(c4)", "last(c5)", "apercentile(c6, 50)", "avg(c7)", "count(c8)", "spread(c1)", + "stddev(c2)", "hyperloglog(c11)", "timediff(1, 0, 1h)", "timezone()", "to_iso8601(1)", 'to_unixtimestamp("1970-01-01T08:00:00+08:00")', "min(t1)", "max(t2)", "sum(t3)", + "first(t4)", "last(t5)", "apercentile(t6, 50)", "avg(t7)", "count(t8)", "spread(t1)", "stddev(t2)", "hyperloglog(t11)"] + self.stb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.downsampling_function_list))) + self.tb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.downsampling_function_list[0:15]))) + self.stb_source_select_str = ','.join(self.downsampling_function_list) + self.tb_source_select_str = ','.join(self.downsampling_function_list[0:15]) + self.fill_function_list = ["min(c1)", "max(c2)", "sum(c3)", "apercentile(c6, 50)", "avg(c7)", "count(c8)", "spread(c1)", + "stddev(c2)", "hyperloglog(c11)", "timediff(1, 0, 1h)", "timezone()", "to_iso8601(1)", 'to_unixtimestamp("1970-01-01T08:00:00+08:00")', "min(t1)", "max(t2)", "sum(t3)", + "first(t4)", "last(t5)", "apercentile(t6, 50)", "avg(t7)", "count(t8)", "spread(t1)", "stddev(t2)", "hyperloglog(t11)"] + self.fill_stb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.fill_function_list))) + self.fill_stb_source_select_str = ','.join(self.fill_function_list) + self.fill_tb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.fill_function_list[0:13]))) + self.fill_tb_source_select_str = ','.join(self.fill_function_list[0:13]) + self.ext_tb_source_select_str = ','.join(self.downsampling_function_list[0:13]) + self.stream_case_when_tbname = "tbname" + + self.update = True + self.disorder = True + if self.disorder: + self.update = False + self.partition_by_downsampling_function_list = ["min(c1)", "max(c2)", "sum(c3)", "first(c4)", "last(c5)", "count(c8)", "spread(c1)", + "stddev(c2)", "hyperloglog(c11)", "min(t1)", "max(t2)", "sum(t3)", "first(t4)", "last(t5)", "count(t8)", "spread(t1)", "stddev(t2)"] + + self.stb_data_filter_sql = f'ts >= {self.date_time}+1s and c1 = 1 or c2 > 1 and c3 != 4 or c4 <= 3 and c9 <> 0 or c10 is not Null or c11 is Null or \ + c12 between "na" and "nchar4" and c11 not between "bi" and "binary" and c12 match "nchar[19]" and c12 nmatch "nchar[25]" or c13 = True or \ + c5 in (1, 2, 3) or c6 not in (6, 7) and c12 like "nch%" and c11 not like "bina_" and c6 < 10 or c12 is Null or c8 >= 4 and t1 = 1 or t2 > 1 \ + and t3 != 4 or c4 <= 3 and t9 <> 0 or t10 is not Null or t11 is Null or t12 between "na" and "nchar4" and t11 not between "bi" and "binary" \ + or t12 match "nchar[19]" or t12 nmatch "nchar[25]" or t13 = True or t5 in (1, 2, 3) or t6 not in (6, 7) and t12 like "nch%" \ + and t11 not like "bina_" and t6 <= 10 or t12 is Null or t8 >= 4' + self.tb_data_filter_sql = self.stb_data_filter_sql.partition(" and t1")[0] + + self.filter_source_select_elm = "*" + self.stb_filter_des_select_elm = "ts, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13" + self.partitial_stb_filter_des_select_elm = ",".join(self.stb_filter_des_select_elm.split(",")[:3]) + self.exchange_stb_filter_des_select_elm = ",".join([self.stb_filter_des_select_elm.split(",")[0], self.stb_filter_des_select_elm.split(",")[2], self.stb_filter_des_select_elm.split(",")[1]]) + self.partitial_ext_tb_source_select_str = ','.join(self.downsampling_function_list[0:2]) + self.tb_filter_des_select_elm = self.stb_filter_des_select_elm.partition(", t1")[0] + self.tag_filter_des_select_elm = self.stb_filter_des_select_elm.partition("c13, ")[2] + self.partition_by_stb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.partition_by_downsampling_function_list))) + self.partition_by_stb_source_select_str = ','.join(self.partition_by_downsampling_function_list) + self.exchange_tag_filter_des_select_elm = ",".join([self.stb_filter_des_select_elm.partition("c13, ")[2].split(",")[0], self.stb_filter_des_select_elm.partition("c13, ")[2].split(",")[2], self.stb_filter_des_select_elm.partition("c13, ")[2].split(",")[1]]) + self.partitial_tag_filter_des_select_elm = ",".join(self.stb_filter_des_select_elm.partition("c13, ")[2].split(",")[:3]) + self.partitial_tag_stb_filter_des_select_elm = "ts, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, t1, t3, t2, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13" + self.cast_tag_filter_des_select_elm = "t5,t11,t13" + self.cast_tag_stb_filter_des_select_elm = "ts, t1, t2, t3, t4, cast(t1 as TINYINT UNSIGNED), t6, t7, t8, t9, t10, cast(t2 as varchar(256)), t12, cast(t3 as bool)" + self.tag_count = len(self.tag_filter_des_select_elm.split(",")) + self.state_window_range = list() + # def init(self, conn, logSql): + # # tdSql.init(conn.cursor(), logSql) + + def preDefine(self): + header = {'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='} + sql_url = "http://127.0.0.1:6041/rest/sql" + sqlt_url = "http://127.0.0.1:6041/rest/sqlt" + sqlutc_url = "http://127.0.0.1:6041/rest/sqlutc" + influx_url = "http://127.0.0.1:6041/influxdb/v1/write" + telnet_url = "http://127.0.0.1:6041/opentsdb/v1/put/telnet" + return header, sql_url, sqlt_url, sqlutc_url, influx_url, telnet_url + + def genTcpParam(self): + MaxBytes = 1024*1024 + host ='127.0.0.1' + port = 6046 + return MaxBytes, host, port + + def tcpClient(self, input): + MaxBytes = tdCom.genTcpParam()[0] + host = tdCom.genTcpParam()[1] + port = tdCom.genTcpParam()[2] + sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) + sock.connect((host, port)) + sock.send(input.encode()) + sock.close() + + def restApiPost(self, sql): + requests.post(self.preDefine()[1], sql.encode("utf-8"), headers = self.preDefine()[0]) + + def createDb(self, dbname="test", db_update_tag=0, api_type="taosc"): + if api_type == "taosc": + if db_update_tag == 0: + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname} precision 'us'") + else: + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname} precision 'us' update 1") + elif api_type == "restful": + if db_update_tag == 0: + self.restApiPost(f"drop database if exists {dbname}") + self.restApiPost(f"create database if not exists {dbname} precision 'us'") + else: + self.restApiPost(f"drop database if exists {dbname}") + self.restApiPost(f"create database if not exists {dbname} precision 'us' update 1") + tdSql.execute(f'use {dbname}') + + def genUrl(self, url_type, dbname, precision): + if url_type == "influxdb": + if precision is None: + url = self.preDefine()[4] + "?" + "db=" + dbname + else: + url = self.preDefine()[4] + "?" + "db=" + dbname + "&precision=" + precision + elif url_type == "telnet": + url = self.preDefine()[5] + "/" + dbname + else: + url = self.preDefine()[1] + return url + + def schemalessApiPost(self, sql, url_type="influxdb", dbname="test", precision=None): + if url_type == "influxdb": + url = self.genUrl(url_type, dbname, precision) + elif url_type == "telnet": + url = self.genUrl(url_type, dbname, precision) + res = requests.post(url, sql.encode("utf-8"), headers = self.preDefine()[0]) + return res + + def cleanTb(self, type="taosc", dbname="db"): + ''' + type is taosc or restful + ''' + query_sql = f"show {dbname}.stables" + res_row_list = tdSql.query(query_sql, True) + stb_list = map(lambda x: x[0], res_row_list) + for stb in stb_list: + if type == "taosc": + tdSql.execute(f'drop table if exists {dbname}.`{stb}`') + if not stb[0].isdigit(): + tdSql.execute(f'drop table if exists {dbname}.{stb}') + elif type == "restful": + self.restApiPost(f"drop table if exists {dbname}.`{stb}`") + if not stb[0].isdigit(): + self.restApiPost(f"drop table if exists {dbname}.{stb}") + + def dateToTs(self, datetime_input): + return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f"))) + + def genTs(self, precision="ms", ts="", protype="taosc", ns_tag=None): + """ + protype = "taosc" or "restful" + gen ts and datetime + """ + if precision == "ns": + if ts == "" or ts is None: + ts = time.time_ns() + else: + ts = ts + if ns_tag is None: + dt = ts + else: + dt = datetime.fromtimestamp(ts // 1000000000) + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000000000)).zfill(9) + if protype == "restful": + dt = datetime.fromtimestamp(ts // 1000000000) + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000000000)).zfill(9) + else: + if ts == "" or ts is None: + ts = time.time() + else: + ts = ts + if precision == "ms" or precision is None: + ts = int(round(ts * 1000)) + dt = datetime.fromtimestamp(ts // 1000) + if protype == "taosc": + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000)).zfill(3) + '000' + elif protype == "restful": + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000)).zfill(3) + else: + pass + elif precision == "us": + ts = int(round(ts * 1000000)) + dt = datetime.fromtimestamp(ts // 1000000) + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000000)).zfill(6) + return ts, dt + + def get_long_name(self, length=10, mode="letters"): + """ + generate long name + mode could be numbers/letters/letters_mixed/mixed + """ + if mode == "numbers": + population = string.digits + elif mode == "letters": + population = string.ascii_letters.lower() + elif mode == "letters_mixed": + population = string.ascii_letters.upper() + string.ascii_letters.lower() + else: + population = string.ascii_letters.lower() + string.digits + return "".join(random.choices(population, k=length)) + + def getLongName(self, len, mode = "mixed"): + """ + generate long name + mode could be numbers/letters/letters_mixed/mixed + """ + if mode == "numbers": + chars = ''.join(random.choice(string.digits) for i in range(len)) + elif mode == "letters": + chars = ''.join(random.choice(string.ascii_letters.lower()) for i in range(len)) + elif mode == "letters_mixed": + chars = ''.join(random.choice(string.ascii_letters.upper() + string.ascii_letters.lower()) for i in range(len)) + else: + chars = ''.join(random.choice(string.ascii_letters.lower() + string.digits) for i in range(len)) + return chars + + def restartTaosd(self, index=1, db_name="db"): + tdDnodes.stop(index) + tdDnodes.startWithoutSleep(index) + tdSql.execute(f"use {db_name}") + + def typeof(self, variate): + v_type=None + if type(variate) is int: + v_type = "int" + elif type(variate) is str: + v_type = "str" + elif type(variate) is float: + v_type = "float" + elif type(variate) is bool: + v_type = "bool" + elif type(variate) is list: + v_type = "list" + elif type(variate) is tuple: + v_type = "tuple" + elif type(variate) is dict: + v_type = "dict" + elif type(variate) is set: + v_type = "set" + return v_type + + def splitNumLetter(self, input_mix_str): + nums, letters = "", "" + for i in input_mix_str: + if i.isdigit(): + nums += i + elif i.isspace(): + pass + else: + letters += i + return nums, letters + + def smlPass(self, func): + smlChildTableName = "no" + def wrapper(*args): + # if tdSql.getVariable("smlChildTableName")[0].upper() == "ID": + if smlChildTableName.upper() == "ID": + return func(*args) + else: + pass + return wrapper + + def close(self): + self.cursor.close() + + ######################################################################################################################################## + # new common API + ######################################################################################################################################## + def create_database(self,tsql, dbName='test',dropFlag=1,**kwargs): + if dropFlag == 1: + tsql.execute("drop database if exists %s"%(dbName)) + ''' + vgroups replica precision strict wal fsync comp cachelast single_stable buffer pagesize pages minrows maxrows duration keep retentions + ''' + sqlString = f'create database if not exists {dbName} ' + + dbParams = "" + if len(kwargs) > 0: + for param, value in kwargs.items(): + if param == "precision": + dbParams += f'{param} "{value}" ' + else: + dbParams += f'{param} {value} ' + sqlString += f'{dbParams}' + + tdLog.debug("create db sql: %s"%sqlString) + tsql.execute(sqlString) + tdLog.debug("complete to create database %s"%(dbName)) + return + + # def create_stable(self,tsql, dbName,stbName,column_elm_list=None, tag_elm_list=None): + # colSchema = '' + # for i in range(columnDict['int']): + # colSchema += ', c%d int'%i + # tagSchema = '' + # for i in range(tagDict['int']): + # if i > 0: + # tagSchema += ',' + # tagSchema += 't%d int'%i + + # tsql.execute("create table if not exists %s.%s (ts timestamp %s) tags(%s)"%(dbName, stbName, colSchema, tagSchema)) + # tdLog.debug("complete to create %s.%s" %(dbName, stbName)) + # return + + # def create_ctables(self,tsql, dbName,stbName,ctbNum,tagDict): + # tsql.execute("use %s" %dbName) + # tagsValues = '' + # for i in range(tagDict['int']): + # if i > 0: + # tagsValues += ',' + # tagsValues += '%d'%i + + # pre_create = "create table" + # sql = pre_create + # #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + # for i in range(ctbNum): + # sql += " %s_%d using %s tags(%s)"%(stbName,i,stbName,tagsValues) + # if (i > 0) and (i%100 == 0): + # tsql.execute(sql) + # sql = pre_create + # if sql != pre_create: + # tsql.execute(sql) + + # tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) + # return + + # def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs=0): + # tdLog.debug("start to insert data ............") + # tsql.execute("use %s" %dbName) + # pre_insert = "insert into " + # sql = pre_insert + # if startTs == 0: + # t = time.time() + # startTs = int(round(t * 1000)) + # #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + # for i in range(ctbNum): + # sql += " %s_%d values "%(stbName,i) + # for j in range(rowsPerTbl): + # sql += "(%d, %d, %d)"%(startTs + j, j, j) + # if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)): + # tsql.execute(sql) + # if j < rowsPerTbl - 1: + # sql = "insert into %s_%d values " %(stbName,i) + # else: + # sql = "insert into " + # #end sql + # if sql != pre_insert: + # #print("insert sql:%s"%sql) + # tsql.execute(sql) + # tdLog.debug("insert data ............ [OK]") + # return + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files or "taosd.exe" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def getClientCfgPath(self): + buildPath = self.getBuildPath() + + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + return cfgPath + + def newcon(self,host='localhost',port=6030,user='root',password='taosdata'): + con=taos.connect(host=host, user=user, password=password, port=port) + # print(con) + return con + + def newcur(self,host='localhost',port=6030,user='root',password='taosdata'): + cfgPath = self.getClientCfgPath() + con=taos.connect(host=host, user=user, password=password, config=cfgPath, port=port) + cur=con.cursor() + # print(cur) + return cur + + def newTdSql(self, host='localhost',port=6030,user='root',password='taosdata'): + newTdSql = TDSql() + cur = self.newcur(host=host,port=port,user=user,password=password) + newTdSql.init(cur, False) + return newTdSql + + ################################################################################################################ + # port from the common.py of new test frame + ################################################################################################################ + def gen_default_tag_str(self): + default_tag_str = "" + for tag_type in self.full_type_list: + if tag_type.lower() not in ["varchar", "binary", "nchar"]: + default_tag_str += f" {self.default_tagname_prefix}{self.default_tag_index_start_num} {tag_type}," + else: + if tag_type.lower() in ["varchar", "binary"]: + default_tag_str += f" {self.default_tagname_prefix}{self.default_tag_index_start_num} {tag_type}({self.default_varchar_length})," + else: + default_tag_str += f" {self.default_tagname_prefix}{self.default_tag_index_start_num} {tag_type}({self.default_nchar_length})," + self.default_tag_index_start_num += 1 + if self.need_tagts: + default_tag_str = self.default_tagts_name + " timestamp," + default_tag_str + return default_tag_str[:-1].lstrip() + + def gen_default_column_str(self): + self.default_column_index_start_num = 1 + default_column_str = "" + for col_type in self.full_type_list: + if col_type.lower() not in ["varchar", "binary", "nchar"]: + default_column_str += f" {self.default_colname_prefix}{self.default_column_index_start_num} {col_type}," + else: + if col_type.lower() in ["varchar", "binary"]: + default_column_str += f" {self.default_colname_prefix}{self.default_column_index_start_num} {col_type}({self.default_varchar_length})," + else: + default_column_str += f" {self.default_colname_prefix}{self.default_column_index_start_num} {col_type}({self.default_nchar_length})," + self.default_column_index_start_num += 1 + default_column_str = self.default_colts_name + " timestamp," + default_column_str + return default_column_str[:-1].lstrip() + + def gen_tag_type_str(self, tagname_prefix, tag_elm_list): + tag_index_start_num = 1 + tag_type_str = "" + if tag_elm_list is None: + tag_type_str = self.gen_default_tag_str() + else: + for tag_elm in tag_elm_list: + if "count" in tag_elm: + total_count = int(tag_elm["count"]) + else: + total_count = 1 + if total_count > 0: + for _ in range(total_count): + tag_type_str += f'{tagname_prefix}{tag_index_start_num} {tag_elm["type"]}, ' + if tag_elm["type"] in ["varchar", "binary", "nchar"]: + tag_type_str = tag_type_str.rstrip()[:-1] + f'({tag_elm["len"]}), ' + tag_index_start_num += 1 + else: + continue + tag_type_str = tag_type_str.rstrip()[:-1] + + return tag_type_str + + def gen_column_type_str(self, colname_prefix, column_elm_list): + column_index_start_num = 1 + column_type_str = "" + if column_elm_list is None: + column_type_str = self.gen_default_column_str() + else: + for column_elm in column_elm_list: + if "count" in column_elm: + total_count = int(column_elm["count"]) + else: + total_count = 1 + if total_count > 0: + for _ in range(total_count): + column_type_str += f'{colname_prefix}{column_index_start_num} {column_elm["type"]}, ' + if column_elm["type"] in ["varchar", "binary", "nchar"]: + column_type_str = column_type_str.rstrip()[:-1] + f'({column_elm["len"]}), ' + column_index_start_num += 1 + else: + continue + column_type_str = self.default_colts_name + " timestamp, " + column_type_str.rstrip()[:-1] + return column_type_str + + def gen_random_type_value(self, type_name, binary_length, binary_type, nchar_length, nchar_type): + if type_name.lower() == "tinyint": + return random.randint(self.Boundary.TINYINT_BOUNDARY[0], self.Boundary.TINYINT_BOUNDARY[1]) + elif type_name.lower() == "smallint": + return random.randint(self.Boundary.SMALLINT_BOUNDARY[0], self.Boundary.SMALLINT_BOUNDARY[1]) + elif type_name.lower() == "int": + return random.randint(self.Boundary.INT_BOUNDARY[0], self.Boundary.INT_BOUNDARY[1]) + elif type_name.lower() == "bigint": + return random.randint(self.Boundary.BIGINT_BOUNDARY[0], self.Boundary.BIGINT_BOUNDARY[1]) + elif type_name.lower() == "tinyint unsigned": + return random.randint(self.Boundary.UTINYINT_BOUNDARY[0], self.Boundary.UTINYINT_BOUNDARY[1]) + elif type_name.lower() == "smallint unsigned": + return random.randint(self.Boundary.USMALLINT_BOUNDARY[0], self.Boundary.USMALLINT_BOUNDARY[1]) + elif type_name.lower() == "int unsigned": + return random.randint(self.Boundary.UINT_BOUNDARY[0], self.Boundary.UINT_BOUNDARY[1]) + elif type_name.lower() == "bigint unsigned": + return random.randint(self.Boundary.UBIGINT_BOUNDARY[0], self.Boundary.UBIGINT_BOUNDARY[1]) + elif type_name.lower() == "float": + return random.uniform(self.Boundary.FLOAT_BOUNDARY[0], self.Boundary.FLOAT_BOUNDARY[1]) + elif type_name.lower() == "double": + return random.uniform(self.Boundary.FLOAT_BOUNDARY[0], self.Boundary.FLOAT_BOUNDARY[1]) + elif type_name.lower() == "binary": + return f'{self.get_long_name(binary_length, binary_type)}' + elif type_name.lower() == "varchar": + return self.get_long_name(binary_length, binary_type) + elif type_name.lower() == "nchar": + return self.get_long_name(nchar_length, nchar_type) + elif type_name.lower() == "bool": + return random.choice(self.Boundary.BOOL_BOUNDARY) + elif type_name.lower() == "timestamp": + return self.genTs()[0] + else: + pass + + def gen_tag_value_list(self, tag_elm_list): + tag_value_list = list() + if tag_elm_list is None: + tag_value_list = list(map(lambda i: self.gen_random_type_value(i, self.default_varchar_length, self.default_varchar_datatype, self.default_nchar_length, self.default_nchar_datatype), self.full_type_list)) + else: + for tag_elm in tag_elm_list: + if "count" in tag_elm: + total_count = int(tag_elm["count"]) + else: + total_count = 1 + if total_count > 0: + for _ in range(total_count): + if tag_elm["type"] in ["varchar", "binary", "nchar"]: + tag_value_list.append(self.gen_random_type_value(tag_elm["type"], tag_elm["len"], self.default_varchar_datatype, tag_elm["len"], self.default_nchar_datatype)) + else: + tag_value_list.append(self.gen_random_type_value(tag_elm["type"], "", "", "", "")) + else: + continue + return tag_value_list + + def gen_column_value_list(self, column_elm_list, ts_value=None): + if ts_value is None: + ts_value = self.genTs()[0] + + column_value_list = list() + column_value_list.append(ts_value) + if column_elm_list is None: + column_value_list = list(map(lambda i: self.gen_random_type_value(i, self.default_varchar_length, self.default_varchar_datatype, self.default_nchar_length, self.default_nchar_datatype), self.full_type_list)) + else: + for column_elm in column_elm_list: + if "count" in column_elm: + total_count = int(column_elm["count"]) + else: + total_count = 1 + if total_count > 0: + for _ in range(total_count): + if column_elm["type"] in ["varchar", "binary", "nchar"]: + column_value_list.append(self.gen_random_type_value(column_elm["type"], column_elm["len"], self.default_varchar_datatype, column_elm["len"], self.default_nchar_datatype)) + else: + column_value_list.append(self.gen_random_type_value(column_elm["type"], "", "", "", "")) + else: + continue + # column_value_list = [self.ts_value] + self.column_value_list + return column_value_list + + def create_stable(self, tsql, dbname=None, stbname="stb", column_elm_list=None, tag_elm_list=None, + count=1, default_stbname_prefix="stb", **kwargs): + colname_prefix = 'c' + tagname_prefix = 't' + stbname_index_start_num = 1 + stb_params = "" + if len(kwargs) > 0: + for param, value in kwargs.items(): + stb_params += f'{param} "{value}" ' + column_type_str = self.gen_column_type_str(colname_prefix, column_elm_list) + tag_type_str = self.gen_tag_type_str(tagname_prefix, tag_elm_list) + + if int(count) <= 1: + create_stable_sql = f'create table {dbname}.{stbname} ({column_type_str}) tags ({tag_type_str}) {stb_params};' + tdLog.info("create stb sql: %s"%create_stable_sql) + tsql.execute(create_stable_sql) + else: + for _ in range(count): + create_stable_sql = f'create table {dbname}.{default_stbname_prefix}{stbname_index_start_num} ({column_type_str}) tags ({tag_type_str}) {stb_params};' + stbname_index_start_num += 1 + tsql.execute(create_stable_sql) + + def create_ctable(self, tsql, dbname=None, stbname=None, tag_elm_list=None, count=1, default_ctbname_prefix="ctb", **kwargs): + ctbname_index_start_num = 0 + ctb_params = "" + if len(kwargs) > 0: + for param, value in kwargs.items(): + ctb_params += f'{param} "{value}" ' + tag_value_list = self.gen_tag_value_list(tag_elm_list) + tag_value_str = "" + # tag_value_str = ", ".join(str(v) for v in self.tag_value_list) + for tag_value in tag_value_list: + if isinstance(tag_value, str): + tag_value_str += f'"{tag_value}", ' + else: + tag_value_str += f'{tag_value}, ' + tag_value_str = tag_value_str.rstrip()[:-1] + + if int(count) <= 1: + create_ctable_sql = f'create table {dbname}.{default_ctbname_prefix}{ctbname_index_start_num} using {dbname}.{stbname} tags ({tag_value_str}) {ctb_params};' + tsql.execute(create_ctable_sql) + else: + for _ in range(count): + create_ctable_sql = f'create table {dbname}.{default_ctbname_prefix}{ctbname_index_start_num} using {dbname}.{stbname} tags ({tag_value_str}) {ctb_params};' + ctbname_index_start_num += 1 + tdLog.info("create ctb sql: %s"%create_ctable_sql) + tsql.execute(create_ctable_sql) + + def create_table(self, tsql, dbname=None, tbname="ntb", column_elm_list=None, count=1, **kwargs): + tbname_index_start_num = 1 + tbname_prefix="ntb" + + tb_params = "" + if len(kwargs) > 0: + for param, value in kwargs.items(): + tb_params += f'{param} "{value}" ' + column_type_str = self.gen_column_type_str(tbname_prefix, column_elm_list) + + if int(count) <= 1: + create_table_sql = f'create table {dbname}.{tbname} ({column_type_str}) {tb_params};' + tsql.execute(create_table_sql) + else: + for _ in range(count): + create_table_sql = f'create table {dbname}.{tbname_prefix}{tbname_index_start_num} ({column_type_str}) {tb_params};' + tbname_index_start_num += 1 + tsql.execute(create_table_sql) + + def insert_rows(self, tsql, dbname=None, tbname=None, column_ele_list=None, start_ts_value=None, count=1): + if start_ts_value is None: + start_ts_value = self.genTs()[0] + + column_value_list = self.gen_column_value_list(column_ele_list, start_ts_value) + # column_value_str = ", ".join(str(v) for v in self.column_value_list) + column_value_str = "" + for column_value in column_value_list: + if isinstance(column_value, str): + column_value_str += f'"{column_value}", ' + else: + column_value_str += f'{column_value}, ' + column_value_str = column_value_str.rstrip()[:-1] + if int(count) <= 1: + insert_sql = f'insert into {self.tb_name} values ({column_value_str});' + tsql.execute(insert_sql) + else: + for num in range(count): + column_value_list = self.gen_column_value_list(column_ele_list, f'{start_ts_value}+{num}s') + # column_value_str = ", ".join(str(v) for v in column_value_list) + column_value_str = '' + idx = 0 + for column_value in column_value_list: + if isinstance(column_value, str) and idx != 0: + column_value_str += f'"{column_value}", ' + else: + column_value_str += f'{column_value}, ' + idx += 1 + column_value_str = column_value_str.rstrip()[:-1] + insert_sql = f'insert into {dbname}.{tbname} values ({column_value_str});' + tsql.execute(insert_sql) + def getOneRow(self, location, containElm): + res_list = list() + if 0 <= location < tdSql.queryRows: + for row in tdSql.queryResult: + if row[location] == containElm: + res_list.append(row) + return res_list + else: + tdLog.exit(f"getOneRow out of range: row_index={location} row_count={self.query_row}") + + def killProcessor(self, processorName): + if (platform.system().lower() == 'windows'): + os.system("TASKKILL /F /IM %s.exe"%processorName) + else: + os.system("unset LD_PRELOAD; pkill %s " % processorName) + + def gen_tag_col_str(self, gen_type, data_type, count): + """ + gen multi tags or cols by gen_type + """ + return ','.join(map(lambda i: f'{gen_type}{i} {data_type}', range(count))) + + # stream + def create_stream(self, stream_name, des_table, source_sql, trigger_mode=None, watermark=None, max_delay=None, ignore_expired=None, ignore_update=None, subtable_value=None, fill_value=None, fill_history_value=None, stb_field_name_value=None, tag_value=None, use_exist_stb=False, use_except=False): + """create_stream + + Args: + stream_name (str): stream_name + des_table (str): target stable + source_sql (str): stream sql + trigger_mode (str, optional): at_once/window_close/max_delay. Defaults to None. + watermark (str, optional): watermark time. Defaults to None. + max_delay (str, optional): max_delay time. Defaults to None. + ignore_expired (int, optional): ignore expired data. Defaults to None. + ignore_update (int, optional): ignore update data. Defaults to None. + subtable_value (str, optional): subtable. Defaults to None. + fill_value (str, optional): fill. Defaults to None. + fill_history_value (int, optional): 0/1. Defaults to None. + stb_field_name_value (str, optional): existed stb. Defaults to None. + tag_value (str, optional): custom tag. Defaults to None. + use_exist_stb (bool, optional): use existed stb tag. Defaults to False. + use_except (bool, optional): Exception tag. Defaults to False. + + Returns: + str: stream + """ + if subtable_value is None: + subtable = "" + else: + subtable = f'subtable({subtable_value})' + + if fill_value is None: + fill = "" + else: + fill = f'fill({fill_value})' + + if fill_history_value is None: + fill_history = "" + else: + fill_history = f'fill_history {fill_history_value}' + + if use_exist_stb: + if stb_field_name_value is None: + stb_field_name = "" + else: + stb_field_name = f'({stb_field_name_value})' + + if tag_value is None: + tags = "" + else: + tags = f'tags({tag_value})' + else: + stb_field_name = "" + tags = "" + + + if trigger_mode is None: + stream_options = "" + if watermark is not None: + stream_options = f'watermark {watermark}' + if ignore_expired: + stream_options += f" ignore expired {ignore_expired}" + else: + stream_options += f" ignore expired 0" + if ignore_update: + stream_options += f" ignore update {ignore_update}" + else: + stream_options += f" ignore update 0" + if not use_except: + tdSql.execute(f'create stream if not exists {stream_name} trigger at_once {stream_options} {fill_history} into {des_table} {subtable} as {source_sql} {fill};') + time.sleep(self.create_stream_sleep) + return None + else: + return f'create stream if not exists {stream_name} {stream_options} {fill_history} into {des_table} {subtable} as {source_sql} {fill};' + else: + if watermark is None: + if trigger_mode == "max_delay": + stream_options = f'trigger {trigger_mode} {max_delay}' + else: + stream_options = f'trigger {trigger_mode}' + else: + if trigger_mode == "max_delay": + stream_options = f'trigger {trigger_mode} {max_delay} watermark {watermark}' + else: + stream_options = f'trigger {trigger_mode} watermark {watermark}' + if ignore_expired: + stream_options += f" ignore expired {ignore_expired}" + else: + stream_options += f" ignore expired 0" + + if ignore_update: + stream_options += f" ignore update {ignore_update}" + else: + stream_options += f" ignore update 0" + if not use_except: + tdSql.execute(f'create stream if not exists {stream_name} {stream_options} {fill_history} into {des_table}{stb_field_name} {tags} {subtable} as {source_sql} {fill};') + time.sleep(self.create_stream_sleep) + return None + else: + return f'create stream if not exists {stream_name} {stream_options} {fill_history} into {des_table}{stb_field_name} {tags} {subtable} as {source_sql} {fill};' + + def pause_stream(self, stream_name, if_exist=True, if_not_exist=False): + """pause_stream + + Args: + stream_name (str): stream_name + if_exist (bool, optional): Defaults to True. + if_not_exist (bool, optional): Defaults to False. + """ + if_exist_value = "if exists" if if_exist else "" + if_not_exist_value = "if not exists" if if_not_exist else "" + tdSql.execute(f'pause stream {if_exist_value} {if_not_exist_value} {stream_name}') + + def resume_stream(self, stream_name, if_exist=True, if_not_exist=False, ignore_untreated=False): + """resume_stream + + Args: + stream_name (str): stream_name + if_exist (bool, optional): Defaults to True. + if_not_exist (bool, optional): Defaults to False. + ignore_untreated (bool, optional): Defaults to False. + """ + if_exist_value = "if exists" if if_exist else "" + if_not_exist_value = "if not exists" if if_not_exist else "" + ignore_untreated_value = "ignore untreated" if ignore_untreated else "" + tdSql.execute(f'resume stream {if_exist_value} {if_not_exist_value} {ignore_untreated_value} {stream_name}') + + def drop_all_streams(self): + """drop all streams + """ + tdSql.query("show streams") + stream_name_list = list(map(lambda x: x[0], tdSql.queryResult)) + for stream_name in stream_name_list: + tdSql.execute(f'drop stream if exists {stream_name};') + + def drop_db(self, dbname="test"): + """drop a db + + Args: + dbname (str, optional): Defaults to "test". + """ + if dbname[0].isdigit(): + tdSql.execute(f'drop database if exists `{dbname}`') + else: + tdSql.execute(f'drop database if exists {dbname}') + + def drop_all_db(self): + """drop all databases + """ + tdSql.query("show databases;") + db_list = list(map(lambda x: x[0], tdSql.queryResult)) + for dbname in db_list: + if dbname not in self.white_list and "telegraf" not in dbname: + tdSql.execute(f'drop database if exists `{dbname}`') + + def time_cast(self, time_value, split_symbol="+"): + """cast bigint to timestamp + + Args: + time_value (bigint): ts + split_symbol (str, optional): split sympol. Defaults to "+". + + Returns: + _type_: timestamp + """ + ts_value = str(time_value).split(split_symbol)[0] + if split_symbol in str(time_value): + ts_value_offset = str(time_value).split(split_symbol)[1] + else: + ts_value_offset = "0s" + return f'cast({ts_value} as timestamp){split_symbol}{ts_value_offset}' + + def clean_env(self): + """drop all streams and databases + """ + self.drop_all_streams() + self.drop_all_db() + + def set_precision_offset(self, precision): + if precision == "ms": + self.offset = 1000 + elif precision == "us": + self.offset = 1000000 + elif precision == "ns": + self.offset = 1000000000 + else: + pass + + def genTs(self, precision="ms", ts="", protype="taosc", ns_tag=None): + """generate ts + + Args: + precision (str, optional): db precision. Defaults to "ms". + ts (str, optional): input ts. Defaults to "". + protype (str, optional): "taosc" or "restful". Defaults to "taosc". + ns_tag (_type_, optional): use ns. Defaults to None. + + Returns: + timestamp, datetime: timestamp and datetime + """ + if precision == "ns": + if ts == "" or ts is None: + ts = time.time_ns() + else: + ts = ts + if ns_tag is None: + dt = ts + else: + dt = datetime.fromtimestamp(ts // 1000000000) + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000000000)).zfill(9) + if protype == "restful": + dt = datetime.fromtimestamp(ts // 1000000000) + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000000000)).zfill(9) + else: + if ts == "" or ts is None: + ts = time.time() + else: + ts = ts + if precision == "ms" or precision is None: + ts = int(round(ts * 1000)) + dt = datetime.fromtimestamp(ts // 1000) + if protype == "taosc": + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000)).zfill(3) + '000' + elif protype == "restful": + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000)).zfill(3) + else: + pass + elif precision == "us": + ts = int(round(ts * 1000000)) + dt = datetime.fromtimestamp(ts // 1000000) + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000000)).zfill(6) + return ts, dt + + def sgen_column_type_str(self, column_elm_list): + """generage column type str + + Args: + column_elm_list (list): column_elm_list + """ + self.column_type_str = "" + if column_elm_list is None: + self.column_type_str = self.gen_default_column_str() + else: + for column_elm in column_elm_list: + if "count" in column_elm: + total_count = int(column_elm["count"]) + else: + total_count = 1 + if total_count > 0: + for _ in range(total_count): + self.column_type_str += f'{self.default_colname_prefix}{self.default_column_index_start_num} {column_elm["type"]}, ' + if column_elm["type"] in ["varchar", "binary", "nchar"]: + self.column_type_str = self.column_type_str.rstrip()[:-1] + f'({column_elm["len"]}), ' + self.default_column_index_start_num += 1 + else: + continue + self.column_type_str = self.default_colts_name + " timestamp, " + self.column_type_str.rstrip()[:-1] + + def sgen_tag_type_str(self, tag_elm_list): + """generage tag type str + + Args: + tag_elm_list (list): tag_elm_list + """ + self.tag_type_str = "" + if tag_elm_list is None: + self.tag_type_str = self.gen_default_tag_str() + else: + for tag_elm in tag_elm_list: + if "count" in tag_elm: + total_count = int(tag_elm["count"]) + else: + total_count = 1 + if total_count > 0: + for _ in range(total_count): + self.tag_type_str += f'{self.default_tagname_prefix}{self.default_tag_index_start_num} {tag_elm["type"]}, ' + if tag_elm["type"] in ["varchar", "binary", "nchar"]: + self.tag_type_str = self.tag_type_str.rstrip()[:-1] + f'({tag_elm["len"]}), ' + self.default_tag_index_start_num += 1 + else: + continue + self.tag_type_str = self.tag_type_str.rstrip()[:-1] + if self.need_tagts: + self.tag_type_str = self.default_tagts_name + " timestamp, " + self.tag_type_str + + def sgen_tag_value_list(self, tag_elm_list, ts_value=None): + """generage tag value str + + Args: + tag_elm_list (list): _description_ + ts_value (timestamp, optional): Defaults to None. + """ + if self.need_tagts: + self.ts_value = self.genTs()[0] + if ts_value is not None: + self.ts_value = ts_value + + if tag_elm_list is None: + self.tag_value_list = list(map(lambda i: self.gen_random_type_value(i, self.default_varchar_length, self.default_varchar_datatype, self.default_nchar_length, self.default_nchar_datatype), self.full_type_list)) + else: + for tag_elm in tag_elm_list: + if "count" in tag_elm: + total_count = int(tag_elm["count"]) + else: + total_count = 1 + if total_count > 0: + for _ in range(total_count): + if tag_elm["type"] in ["varchar", "binary", "nchar"]: + self.tag_value_list.append(self.gen_random_type_value(tag_elm["type"], tag_elm["len"], self.default_varchar_datatype, tag_elm["len"], self.default_nchar_datatype)) + else: + self.tag_value_list.append(self.gen_random_type_value(tag_elm["type"], "", "", "", "")) + else: + continue + # if self.need_tagts and self.ts_value is not None and len(str(self.ts_value)) > 0: + if self.need_tagts: + self.tag_value_list = [self.ts_value] + self.tag_value_list + + def screateDb(self, dbname="test", drop_db=True, **kwargs): + """create database + + Args: + dbname (str, optional): Defaults to "test". + drop_db (bool, optional): Defaults to True. + """ + tdLog.info("creating db ...") + db_params = "" + if len(kwargs) > 0: + for param, value in kwargs.items(): + if param == "precision": + db_params += f'{param} "{value}" ' + else: + db_params += f'{param} {value} ' + if drop_db: + self.drop_db(dbname) + tdSql.execute(f'create database if not exists {dbname} {db_params}') + tdSql.execute(f'use {dbname}') + + def screate_stable(self, dbname=None, stbname="stb", use_name="table", column_elm_list=None, tag_elm_list=None, + need_tagts=False, count=1, default_stbname_prefix="stb", default_stbname_index_start_num=1, + default_column_index_start_num=1, default_tag_index_start_num=1, **kwargs): + """_summary_ + + Args: + dbname (str, optional): Defaults to None. + stbname (str, optional): Defaults to "stb". + use_name (str, optional): stable/table, Defaults to "table". + column_elm_list (list, optional): use for sgen_column_type_str(), Defaults to None. + tag_elm_list (list, optional): use for sgen_tag_type_str(), Defaults to None. + need_tagts (bool, optional): tag use timestamp, Defaults to False. + count (int, optional): stable count, Defaults to 1. + default_stbname_prefix (str, optional): Defaults to "stb". + default_stbname_index_start_num (int, optional): Defaults to 1. + default_column_index_start_num (int, optional): Defaults to 1. + default_tag_index_start_num (int, optional): Defaults to 1. + """ + tdLog.info("creating stable ...") + if dbname is not None: + self.dbname = dbname + self.need_tagts = need_tagts + self.default_stbname_prefix = default_stbname_prefix + self.default_stbname_index_start_num = default_stbname_index_start_num + self.default_column_index_start_num = default_column_index_start_num + self.default_tag_index_start_num = default_tag_index_start_num + stb_params = "" + if len(kwargs) > 0: + for param, value in kwargs.items(): + stb_params += f'{param} "{value}" ' + self.sgen_column_type_str(column_elm_list) + self.sgen_tag_type_str(tag_elm_list) + if self.dbname is not None: + stb_name = f'{self.dbname}.{stbname}' + else: + stb_name = stbname + if int(count) <= 1: + create_stable_sql = f'create {use_name} {stb_name} ({self.column_type_str}) tags ({self.tag_type_str}) {stb_params};' + tdSql.execute(create_stable_sql) + else: + for _ in range(count): + create_stable_sql = f'create {use_name} {self.dbname}.{default_stbname_prefix}{default_stbname_index_start_num} ({self.column_type_str}) tags ({self.tag_type_str}) {stb_params};' + default_stbname_index_start_num += 1 + tdSql.execute(create_stable_sql) + + def screate_ctable(self, dbname=None, stbname=None, ctbname="ctb", use_name="table", tag_elm_list=None, ts_value=None, count=1, default_varchar_datatype="letters", default_nchar_datatype="letters", default_ctbname_prefix="ctb", default_ctbname_index_start_num=1, **kwargs): + """_summary_ + + Args: + dbname (str, optional): Defaults to None. + stbname (str, optional): Defaults to None. + ctbname (str, optional): Defaults to "ctb". + use_name (str, optional): Defaults to "table". + tag_elm_list (list, optional): use for sgen_tag_type_str(), Defaults to None. + ts_value (timestamp, optional): Defaults to None. + count (int, optional): ctb count, Defaults to 1. + default_varchar_datatype (str, optional): Defaults to "letters". + default_nchar_datatype (str, optional): Defaults to "letters". + default_ctbname_prefix (str, optional): Defaults to "ctb". + default_ctbname_index_start_num (int, optional): Defaults to 1. + """ + tdLog.info("creating childtable ...") + self.default_varchar_datatype = default_varchar_datatype + self.default_nchar_datatype = default_nchar_datatype + self.default_ctbname_prefix = default_ctbname_prefix + self.default_ctbname_index_start_num = default_ctbname_index_start_num + ctb_params = "" + if len(kwargs) > 0: + for param, value in kwargs.items(): + ctb_params += f'{param} "{value}" ' + self.sgen_tag_value_list(tag_elm_list, ts_value) + tag_value_str = "" + # tag_value_str = ", ".join(str(v) for v in self.tag_value_list) + for tag_value in self.tag_value_list: + if isinstance(tag_value, str): + tag_value_str += f'"{tag_value}", ' + else: + tag_value_str += f'{tag_value}, ' + tag_value_str = tag_value_str.rstrip()[:-1] + if dbname is not None: + self.dbname = dbname + ctb_name = f'{self.dbname}.{ctbname}' + else: + ctb_name = ctbname + if stbname is not None: + stb_name = stbname + if int(count) <= 1: + create_ctable_sql = f'create {use_name} {ctb_name} using {stb_name} tags ({tag_value_str}) {ctb_params};' + tdSql.execute(create_ctable_sql) + else: + for _ in range(count): + create_stable_sql = f'create {use_name} {self.dbname}.{default_ctbname_prefix}{default_ctbname_index_start_num} using {self.stb_name} tags ({tag_value_str}) {ctb_params};' + default_ctbname_index_start_num += 1 + tdSql.execute(create_stable_sql) + + def sgen_column_value_list(self, column_elm_list, need_null, ts_value=None): + """_summary_ + + Args: + column_elm_list (list): gen_random_type_value() + need_null (bool): if insert null + ts_value (timestamp, optional): Defaults to None. + """ + self.column_value_list = list() + self.ts_value = self.genTs()[0] + if ts_value is not None: + self.ts_value = ts_value + + if column_elm_list is None: + self.column_value_list = list(map(lambda i: self.gen_random_type_value(i, self.default_varchar_length, self.default_varchar_datatype, self.default_nchar_length, self.default_nchar_datatype), self.full_type_list)) + else: + for column_elm in column_elm_list: + if "count" in column_elm: + total_count = int(column_elm["count"]) + else: + total_count = 1 + if total_count > 0: + for _ in range(total_count): + if column_elm["type"] in ["varchar", "binary", "nchar"]: + self.column_value_list.append(self.gen_random_type_value(column_elm["type"], column_elm["len"], self.default_varchar_datatype, column_elm["len"], self.default_nchar_datatype)) + else: + self.column_value_list.append(self.gen_random_type_value(column_elm["type"], "", "", "", "")) + else: + continue + if need_null: + for i in range(int(len(self.column_value_list)/2)): + index_num = random.randint(0, len(self.column_value_list)-1) + self.column_value_list[index_num] = None + self.column_value_list = [self.ts_value] + self.column_value_list + + def screate_table(self, dbname=None, tbname="tb", use_name="table", column_elm_list=None, + count=1, default_tbname_prefix="tb", default_tbname_index_start_num=1, + default_column_index_start_num=1, **kwargs): + """create ctable + + Args: + dbname (str, optional): Defaults to None. + tbname (str, optional): Defaults to "tb". + use_name (str, optional): Defaults to "table". + column_elm_list (list, optional): Defaults to None. + count (int, optional): Defaults to 1. + default_tbname_prefix (str, optional): Defaults to "tb". + default_tbname_index_start_num (int, optional): Defaults to 1. + default_column_index_start_num (int, optional): Defaults to 1. + """ + tdLog.info("creating table ...") + if dbname is not None: + self.dbname = dbname + self.default_tbname_prefix = default_tbname_prefix + self.default_tbname_index_start_num = default_tbname_index_start_num + self.default_column_index_start_num = default_column_index_start_num + tb_params = "" + if len(kwargs) > 0: + for param, value in kwargs.items(): + tb_params += f'{param} "{value}" ' + self.sgen_column_type_str(column_elm_list) + if self.dbname is not None: + tb_name = f'{self.dbname}.{tbname}' + else: + tb_name = tbname + if int(count) <= 1: + create_table_sql = f'create {use_name} {tb_name} ({self.column_type_str}) {tb_params};' + tdSql.execute(create_table_sql) + else: + for _ in range(count): + create_table_sql = f'create {use_name} {self.dbname}.{default_tbname_prefix}{default_tbname_index_start_num} ({self.column_type_str}) {tb_params};' + default_tbname_index_start_num += 1 + tdSql.execute(create_table_sql) + + def sinsert_rows(self, dbname=None, tbname=None, column_ele_list=None, ts_value=None, count=1, need_null=False): + """insert rows + + Args: + dbname (str, optional): Defaults to None. + tbname (str, optional): Defaults to None. + column_ele_list (list, optional): Defaults to None. + ts_value (timestamp, optional): Defaults to None. + count (int, optional): Defaults to 1. + need_null (bool, optional): Defaults to False. + """ + tdLog.info("stream inserting ...") + if dbname is not None: + self.dbname = dbname + if tbname is not None: + self.tbname = f'{self.dbname}.{tbname}' + else: + if tbname is not None: + self.tbname = tbname + + self.sgen_column_value_list(column_ele_list, need_null, ts_value) + # column_value_str = ", ".join(str(v) for v in self.column_value_list) + column_value_str = "" + for column_value in self.column_value_list: + if column_value is None: + column_value_str += 'Null, ' + elif isinstance(column_value, str) and "+" not in column_value and "-" not in column_value: + column_value_str += f'"{column_value}", ' + else: + column_value_str += f'{column_value}, ' + column_value_str = column_value_str.rstrip()[:-1] + if int(count) <= 1: + insert_sql = f'insert into {self.tbname} values ({column_value_str});' + tdSql.execute(insert_sql) + else: + for num in range(count): + ts_value = self.genTs()[0] + self.sgen_column_value_list(column_ele_list, need_null, f'{ts_value}+{num}s') + column_value_str = "" + for column_value in self.column_value_list: + if column_value is None: + column_value_str += 'Null, ' + elif isinstance(column_value, str) and "+" not in column_value: + column_value_str += f'"{column_value}", ' + else: + column_value_str += f'{column_value}, ' + column_value_str = column_value_str.rstrip()[:-1] + insert_sql = f'insert into {self.tbname} values ({column_value_str});' + tdSql.execute(insert_sql) + + def sdelete_rows(self, dbname=None, tbname=None, start_ts=None, end_ts=None, ts_key=None): + """delete rows + + Args: + dbname (str, optional): Defaults to None. + tbname (str, optional): Defaults to None. + start_ts (timestamp, optional): range start. Defaults to None. + end_ts (timestamp, optional): range end. Defaults to None. + ts_key (str, optional): timestamp column name. Defaults to None. + """ + if dbname is not None: + self.dbname = dbname + if tbname is not None: + self.tbname = f'{self.dbname}.{tbname}' + else: + if tbname is not None: + self.tbname = tbname + if ts_key is None: + ts_col_name = self.default_colts_name + else: + ts_col_name = ts_key + + base_del_sql = f'delete from {self.tbname} ' + if end_ts is not None: + if ":" in start_ts and "-" in start_ts: + start_ts = f"{start_ts}" + if ":" in end_ts and "-" in end_ts: + end_ts = f"{end_ts}" + base_del_sql += f'where {ts_col_name} between {start_ts} and {end_ts};' + else: + if start_ts is not None: + if ":" in start_ts and "-" in start_ts: + start_ts = f"{start_ts}" + base_del_sql += f'where {ts_col_name} = {start_ts};' + tdSql.execute(base_del_sql) + + def check_stream_field_type(self, sql, input_function): + """confirm stream field + + Args: + sql (str): input sql + input_function (str): scalar + """ + tdSql.query(sql) + res = tdSql.queryResult + if input_function in ["acos", "asin", "atan", "cos", "log", "pow", "sin", "sqrt", "tan"]: + tdSql.checkEqual(res[1][1], "DOUBLE") + tdSql.checkEqual(res[2][1], "DOUBLE") + elif input_function in ["lower", "ltrim", "rtrim", "upper"]: + tdSql.checkEqual(res[1][1], "VARCHAR") + tdSql.checkEqual(res[2][1], "VARCHAR") + tdSql.checkEqual(res[3][1], "NCHAR") + elif input_function in ["char_length", "length"]: + tdSql.checkEqual(res[1][1], "BIGINT") + tdSql.checkEqual(res[2][1], "BIGINT") + tdSql.checkEqual(res[3][1], "BIGINT") + elif input_function in ["concat", "concat_ws"]: + tdSql.checkEqual(res[1][1], "VARCHAR") + tdSql.checkEqual(res[2][1], "NCHAR") + tdSql.checkEqual(res[3][1], "NCHAR") + tdSql.checkEqual(res[4][1], "NCHAR") + elif input_function in ["substr"]: + tdSql.checkEqual(res[1][1], "VARCHAR") + tdSql.checkEqual(res[2][1], "VARCHAR") + tdSql.checkEqual(res[3][1], "VARCHAR") + tdSql.checkEqual(res[4][1], "NCHAR") + else: + tdSql.checkEqual(res[1][1], "INT") + tdSql.checkEqual(res[2][1], "DOUBLE") + + def round_handle(self, input_list): + """round list elem + + Args: + input_list (list): input value list + + Returns: + _type_: round list + """ + tdLog.info("round rows ...") + final_list = list() + for i in input_list: + tmpl = list() + for j in i: + if type(j) != datetime and type(j) != str: + tmpl.append(round(j, 1)) + else: + tmpl.append(j) + final_list.append(tmpl) + return final_list + + def float_handle(self, input_list): + """float list elem + + Args: + input_list (list): input value list + + Returns: + _type_: float list + """ + tdLog.info("float rows ...") + final_list = list() + for i in input_list: + tmpl = list() + for j_i,j_v in enumerate(i): + if type(j_v) != datetime and j_v is not None and str(j_v).isdigit() and j_i <= 12: + tmpl.append(float(j_v)) + else: + tmpl.append(j_v) + final_list.append(tuple(tmpl)) + return final_list + + def str_ts_trans_bigint(self, str_ts): + """trans str ts to bigint + + Args: + str_ts (str): human-date + + Returns: + bigint: bigint-ts + """ + tdSql.query(f'select cast({str_ts} as bigint)') + return tdSql.queryResult[0][0] + + def cast_query_data(self, query_data): + """cast query-result for existed-stb + + Args: + query_data (list): query data list + + Returns: + list: new list after cast + """ + tdLog.info("cast query data ...") + col_type_list = self.column_type_str.split(',') + tag_type_list = self.tag_type_str.split(',') + col_tag_type_list = col_type_list + tag_type_list + nl = list() + for query_data_t in query_data: + query_data_l = list(query_data_t) + for i,v in enumerate(query_data_l): + if v is not None: + if " ".join(col_tag_type_list[i].strip().split(" ")[1:]) == "nchar(6)": + tdSql.query(f'select cast("{v}" as binary(6))') + else: + tdSql.query(f'select cast("{v}" as {" ".join(col_tag_type_list[i].strip().split(" ")[1:])})') + query_data_l[i] = tdSql.queryResult[0][0] + else: + query_data_l[i] = v + nl.append(tuple(query_data_l)) + return nl + + def trans_time_to_s(self, runtime): + """trans time to s + + Args: + runtime (str): 1d/1h/1m... + + Returns: + int: second + """ + if "d" in str(runtime).lower(): + d_num = re.findall("\d+\.?\d*", runtime.replace(" ", ""))[0] + s_num = float(d_num) * 24 * 60 * 60 + elif "h" in str(runtime).lower(): + h_num = re.findall("\d+\.?\d*", runtime.replace(" ", ""))[0] + s_num = float(h_num) * 60 * 60 + elif "m" in str(runtime).lower(): + m_num = re.findall("\d+\.?\d*", runtime.replace(" ", ""))[0] + s_num = float(m_num) * 60 + elif "s" in str(runtime).lower(): + s_num = re.findall("\d+\.?\d*", runtime.replace(" ", ""))[0] + else: + s_num = 60 + return int(s_num) + + def check_query_data(self, sql1, sql2, sorted=False, fill_value=None, tag_value_list=None, defined_tag_count=None, partition=True, use_exist_stb=False, subtable=None, reverse_check=False): + """confirm query result + + Args: + sql1 (str): select .... + sql2 (str): select .... + sorted (bool, optional): if sort result list. Defaults to False. + fill_value (str, optional): fill. Defaults to None. + tag_value_list (list, optional): Defaults to None. + defined_tag_count (int, optional): Defaults to None. + partition (bool, optional): Defaults to True. + use_exist_stb (bool, optional): Defaults to False. + subtable (str, optional): Defaults to None. + reverse_check (bool, optional): not equal. Defaults to False. + + Returns: + bool: False if failed + """ + tdLog.info("checking query data ...") + if tag_value_list: + dvalue = len(self.tag_type_str.split(',')) - defined_tag_count + tdSql.query(sql1) + res1 = tdSql.queryResult + tdSql.query(sql2) + res2 = self.cast_query_data(tdSql.queryResult) if tag_value_list or use_exist_stb else tdSql.queryResult + tdSql.sql = sql1 + new_list = list() + if tag_value_list: + res1 = self.float_handle(res1) + res2 = self.float_handle(res2) + for i,v in enumerate(res2): + if i < len(tag_value_list): + if partition: + new_list.append(tuple(list(v)[:-(dvalue+defined_tag_count)] + list(tag_value_list[i]) + [None]*dvalue)) + else: + new_list.append(tuple(list(v)[:-(dvalue+defined_tag_count)] + [None]*len(self.tag_type_str.split(',')))) + res2 = new_list + else: + if use_exist_stb: + res1 = self.float_handle(res1) + res2 = self.float_handle(res2) + for i,v in enumerate(res2): + new_list.append(tuple(list(v)[:-(13)] + [None]*len(self.tag_type_str.split(',')))) + res2 = new_list + + latency = 0 + if sorted: + res1.sort() + res2.sort() + if fill_value == "LINEAR": + res1 = self.round_handle(res1) + res2 = self.round_handle(res2) + if not reverse_check: + while res1 != res2: + tdLog.info("query retrying ...") + new_list = list() + tdSql.query(sql1) + res1 = tdSql.queryResult + tdSql.query(sql2) + # res2 = tdSql.queryResult + res2 = self.cast_query_data(tdSql.queryResult) if tag_value_list or use_exist_stb else tdSql.queryResult + tdSql.sql = sql1 + + if tag_value_list: + res1 = self.float_handle(res1) + res2 = self.float_handle(res2) + for i,v in enumerate(res2): + if i < len(tag_value_list): + if partition: + new_list.append(tuple(list(v)[:-(dvalue+defined_tag_count)] + list(tag_value_list[i]) + [None]*dvalue)) + else: + new_list.append(tuple(list(v)[:-(dvalue+defined_tag_count)] + [None]*len(self.tag_type_str.split(',')))) + res2 = new_list + else: + if use_exist_stb: + res1 = self.float_handle(res1) + res2 = self.float_handle(res2) + for i,v in enumerate(res2): + new_list.append(tuple(list(v)[:-(13)] + [None]*len(self.tag_type_str.split(',')))) + res2 = new_list + if sorted or tag_value_list: + res1.sort() + res2.sort() + if fill_value == "LINEAR": + res1 = self.round_handle(res1) + res2 = self.round_handle(res2) + if latency < self.stream_timeout: + latency += 0.2 + time.sleep(0.2) + else: + if latency == 0: + return False + tdSql.checkEqual(res1, res2) + # tdSql.checkEqual(res1, res2) if not reverse_check else tdSql.checkNotEqual(res1, res2) + else: + while res1 == res2: + tdLog.info("query retrying ...") + new_list = list() + tdSql.query(sql1) + res1 = tdSql.queryResult + tdSql.query(sql2) + # res2 = tdSql.queryResult + res2 = self.cast_query_data(tdSql.queryResult) if tag_value_list or use_exist_stb else tdSql.queryResult + tdSql.sql = sql1 + + if tag_value_list: + res1 = self.float_handle(res1) + res2 = self.float_handle(res2) + for i,v in enumerate(res2): + if i < len(tag_value_list): + if partition: + new_list.append(tuple(list(v)[:-(dvalue+defined_tag_count)] + list(tag_value_list[i]) + [None]*dvalue)) + else: + new_list.append(tuple(list(v)[:-(dvalue+defined_tag_count)] + [None]*len(self.tag_type_str.split(',')))) + res2 = new_list + else: + if use_exist_stb: + res1 = self.float_handle(res1) + res2 = self.float_handle(res2) + for i,v in enumerate(res2): + new_list.append(tuple(list(v)[:-(13)] + [None]*len(self.tag_type_str.split(',')))) + res2 = new_list + if sorted or tag_value_list: + res1.sort() + res2.sort() + if fill_value == "LINEAR": + res1 = self.round_handle(res1) + res2 = self.round_handle(res2) + if latency < self.stream_timeout: + latency += 0.2 + time.sleep(0.2) + else: + if latency == 0: + return False + tdSql.checkNotEqual(res1, res2) + # tdSql.checkEqual(res1, res2) if not reverse_check else tdSql.checkNotEqual(res1, res2) + + def check_stream_res(self, sql, expected_res, max_delay): + """confirm stream result + + Args: + sql (str): select ... + expected_res (str): expected result + max_delay (int): max_delay value + + Returns: + bool: False if failed + """ + tdSql.query(sql) + latency = 0 + + while tdSql.queryRows != expected_res: + tdSql.query(sql) + if latency < self.stream_timeout: + latency += 0.2 + time.sleep(0.2) + else: + if max_delay is not None: + if latency == 0: + return False + tdSql.checkEqual(tdSql.queryRows, expected_res) + + def check_stream(self, sql1, sql2, expected_count, max_delay=None): + """confirm stream + + Args: + sql1 (str): select ... + sql2 (str): select ... + expected_count (int): expected_count + max_delay (int, optional): max_delay value. Defaults to None. + """ + self.check_stream_res(sql1, expected_count, max_delay) + self.check_query_data(sql1, sql2) + + def cal_watermark_window_close_session_endts(self, start_ts, watermark=None, session=None): + """cal endts for close window + + Args: + start_ts (epoch time): self.date_time + watermark (int, optional): > session. Defaults to None. + session (int, optional): Defaults to None. + + Returns: + int: as followed + """ + if watermark is not None: + return start_ts + watermark*self.offset + 1 + else: + return start_ts + session*self.offset + 1 + + def cal_watermark_window_close_interval_endts(self, start_ts, interval, watermark=None): + """cal endts for close window + + Args: + start_ts (epoch time): self.date_time + interval (int): [s] + watermark (int, optional): [s]. Defaults to None. + + Returns: + _type_: _description_ + """ + if watermark is not None: + return int(start_ts/self.offset)*self.offset + (interval - (int(start_ts/self.offset))%interval)*self.offset + watermark*self.offset + else: + return int(start_ts/self.offset)*self.offset + (interval - (int(start_ts/self.offset))%interval)*self.offset + + def update_delete_history_data(self, delete): + """update and delete history data + + Args: + delete (bool): True/False + """ + self.sinsert_rows(tbname=self.ctb_name, ts_value=self.record_history_ts) + self.sinsert_rows(tbname=self.tb_name, ts_value=self.record_history_ts) + if delete: + self.sdelete_rows(tbname=self.ctb_name, start_ts=self.time_cast(self.record_history_ts, "-")) + self.sdelete_rows(tbname=self.tb_name, start_ts=self.time_cast(self.record_history_ts, "-")) + + def prepare_data(self, interval=None, watermark=None, session=None, state_window=None, state_window_max=127, interation=3, range_count=None, precision="ms", fill_history_value=0, ext_stb=None): + """prepare stream data + + Args: + interval (int, optional): Defaults to None. + watermark (int, optional): Defaults to None. + session (int, optional): Defaults to None. + state_window (str, optional): Defaults to None. + state_window_max (int, optional): Defaults to 127. + interation (int, optional): Defaults to 3. + range_count (int, optional): Defaults to None. + precision (str, optional): Defaults to "ms". + fill_history_value (int, optional): Defaults to 0. + ext_stb (bool, optional): Defaults to None. + """ + self.clean_env() + self.dataDict = { + "stb_name" : f"{self.case_name}_stb", + "ctb_name" : f"{self.case_name}_ct1", + "tb_name" : f"{self.case_name}_tb1", + "ext_stb_name" : f"ext_{self.case_name}_stb", + "ext_ctb_name" : f"ext_{self.case_name}_ct1", + "ext_tb_name" : f"ext_{self.case_name}_tb1", + "interval" : interval, + "watermark": watermark, + "session": session, + "state_window": state_window, + "state_window_max": state_window_max, + "iteration": interation, + "range_count": range_count, + "start_ts": 1655903478508, + } + if range_count is not None: + self.range_count = range_count + if precision is not None: + self.precision = precision + self.set_precision_offset(self.precision) + + self.stb_name = self.dataDict["stb_name"] + self.ctb_name = self.dataDict["ctb_name"] + self.tb_name = self.dataDict["tb_name"] + self.ext_stb_name = self.dataDict["ext_stb_name"] + self.ext_ctb_name = self.dataDict["ext_ctb_name"] + self.ext_tb_name = self.dataDict["ext_tb_name"] + self.stb_stream_des_table = f'{self.stb_name}{self.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.des_table_suffix}' + self.ext_stb_stream_des_table = f'{self.ext_stb_name}{self.des_table_suffix}' + self.ext_ctb_stream_des_table = f'{self.ext_ctb_name}{self.des_table_suffix}' + self.ext_tb_stream_des_table = f'{self.ext_tb_name}{self.des_table_suffix}' + self.date_time = self.genTs(precision=self.precision)[0] + + self.screateDb(dbname=self.dbname, precision=self.precision) + if ext_stb: + self.screate_stable(dbname=self.dbname, stbname=self.ext_stb_stream_des_table) + self.screate_ctable(dbname=self.dbname, stbname=self.ext_stb_stream_des_table, ctbname=self.ext_ctb_stream_des_table) + self.screate_table(dbname=self.dbname, tbname=self.ext_tb_stream_des_table) + self.screate_stable(dbname=self.dbname, stbname=self.stb_name) + self.screate_ctable(dbname=self.dbname, stbname=self.stb_name, ctbname=self.ctb_name) + self.screate_table(dbname=self.dbname, tbname=self.tb_name) + if fill_history_value == 1: + for i in range(self.range_count): + ts_value = str(self.date_time)+f'-{self.default_interval*(i+1)}s' + self.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + self.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + if i == 1: + self.record_history_ts = ts_value + +def is_json(msg): + if isinstance(msg, str): + try: + json.loads(msg) + return True + except: + return False + else: + return False + +def get_path(tool="taosd"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files or ("%s.exe"%tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + return "" + return paths[0] + +def dict2toml(in_dict: dict, file:str): + if not isinstance(in_dict, dict): + return "" + with open(file, 'w') as f: + toml.dump(in_dict, f) + +tdCom = TDCom() diff --git a/tests/army/frame/constant.py b/tests/army/frame/constant.py new file mode 100644 index 0000000000..c4541386b4 --- /dev/null +++ b/tests/army/frame/constant.py @@ -0,0 +1,197 @@ +# -*- coding: utf-8 -*- + +# basic type +TAOS_DATA_TYPE = [ + "INT", "BIGINT", "SMALLINT", "TINYINT", "INT UNSIGNED", "BIGINT UNSIGNED", "SMALLINT UNSIGNED", "TINYINT UNSIGNED", + "FLOAT", "DOUBLE", + "BOOL", + "BINARY", "NCHAR", "VARCHAR", + "TIMESTAMP", + # "MEDIUMBLOB", "BLOB", # add in 3.x + # "DECIMAL", "NUMERIC", # add in 3.x + "JSON", # only for tag +] + +TAOS_NUM_TYPE = [ + "INT", "BIGINT", "SMALLINT", "TINYINT", "INT UNSIGNED", "BIGINT UNSIGNED", "SMALLINT UNSIGNED", "TINYINT UNSIGNED", "FLOAT", "DOUBLE", + # "DECIMAL", "NUMERIC", # add in 3.x +] +TAOS_CHAR_TYPE = [ + "BINARY", "NCHAR", "VARCHAR", +] +TAOS_BOOL_TYPE = ["BOOL",] +TAOS_TS_TYPE = ["TIMESTAMP",] +TAOS_BIN_TYPE = [ + "MEDIUMBLOB", "BLOB", # add in 3.x +] + +TAOS_TIME_INIT = ["b", "u", "a", "s", "m", "h", "d", "w", "n", "y"] +TAOS_PRECISION = ["ms", "us", "ns"] +PRECISION_DEFAULT = "ms" +PRECISION = PRECISION_DEFAULT + +TAOS_KEYWORDS = [ + "ABORT", "CREATE", "IGNORE", "NULL", "STAR", + "ACCOUNT", "CTIME", "IMMEDIATE", "OF", "STATE", + "ACCOUNTS", "DATABASE", "IMPORT", "OFFSET", "STATEMENT", + "ADD", "DATABASES", "IN", "OR", "STATE_WINDOW", + "AFTER", "DAYS", "INITIALLY", "ORDER", "STORAGE", + "ALL", "DBS", "INSERT", "PARTITIONS", "STREAM", + "ALTER", "DEFERRED", "INSTEAD", "PASS", "STREAMS", + "AND", "DELIMITERS", "INT", "PLUS", "STRING", + "AS", "DESC", "INTEGER", "PPS", "SYNCDB", + "ASC", "DESCRIBE", "INTERVAL", "PRECISION", "TABLE", + "ATTACH", "DETACH", "INTO", "PREV", "TABLES", + "BEFORE", "DISTINCT", "IS", "PRIVILEGE", "TAG", + "BEGIN", "DIVIDE", "ISNULL", "QTIME", "TAGS", + "BETWEEN", "DNODE", "JOIN", "QUERIES", "TBNAME", + "BIGINT", "DNODES", "KEEP", "QUERY", "TIMES", + "BINARY", "DOT", "KEY", "QUORUM", "TIMESTAMP", + "BITAND", "DOUBLE", "KILL", "RAISE", "TINYINT", + "BITNOT", "DROP", "LE", "REM", "TOPIC", + "BITOR", "EACH", "LIKE", "REPLACE", "TOPICS", + "BLOCKS", "END", "LIMIT", "REPLICA", "TRIGGER", + "BOOL", "EQ", "LINEAR", "RESET", "TSERIES", + "BY", "EXISTS", "LOCAL", "RESTRICT", "UMINUS", + "CACHE", "EXPLAIN", "LP", "ROW", "UNION", + "CACHEMODEL", "FAIL", "LSHIFT", "RP", "UNSIGNED", + "CASCADE", "FILE", "LT", "RSHIFT", "UPDATE", + "CHANGE", "FILL", "MATCH", "SCORES", "UPLUS", + "CLUSTER", "FLOAT", "MAXROWS", "SELECT", "USE", + "COLON", "FOR", "MINROWS", "SEMI", "USER", + "COLUMN", "FROM", "MINUS", "SESSION", "USERS", + "COMMA", "FSYNC", "MNODES", "SET", "USING", + "COMP", "GE", "MODIFY", "SHOW", "VALUES", + "COMPACT", "GLOB", "MODULES", "SLASH", "VARIABLE", + "CONCAT", "GRANTS", "NCHAR", "SLIDING", "VARIABLES", + "CONFLICT", "GROUP", "NE", "SLIMIT", "VGROUPS", + "CONNECTION", "GT", "NONE", "SMALLINT", "VIEW", + "CONNECTIONS", "HAVING", "NOT", "SOFFSET", "VNODES", + "CONNS", "ID", "NOTNULL", "STABLE", "WAL", + "COPY", "IF", "NOW", "STABLES", "WHERE", +] + +NUM_FUNC = [ + "ABS", "ACOS", "ASIN", "ATAN", "CEIL", "COS", "FLOOR", "LOG", "POW", "ROUND", "SIN", "SQRT", "TAN", +] + +STR_FUNC = [ + "CHAR_LENGTH", "CONCAT", "CONCAT_WS", "LENGTH", "LOWER","LTRIM", "RTRIM", "SUBSTR", "UPPER", +] + +CONVER_FUNC = ["CASR", "TO_ISO8601", "TO_JSON", "TP_UNIXTIMESTAMP"] + +SELECT_FUNC = [ + "APERCENTILE", "BOTTOM", "FIRST", "INTERP", "LAST", "MAX", "MIN", "PERCENTILE", "TAIL", "TOP", "UNIQUE", +] + +AGG_FUNC = [ + "AVG", "COUNT", "ELAPSED", "LEASTSQUARES", "MODE", "SPREAD", "STDDEV", "SUM", "HYPERLOGLOG", "HISTOGRAM", +] + +TS_FUNC = [ + "CSUM", "DERIVATIVE", "DIFF", "IRATE", "MAVG", "SAMPLE", "STATECOUNT", "STATEDURATION", "TWA" +] + +SYSINFO_FUNC = [ + "DATABASE", "CLIENT_VERSION", "SERVER_VERSION", "SERVER_STATUS", "CURRENT_USER", "USER" +] + + + +# basic data type boundary +TINYINT_MAX = 127 +TINYINT_MIN = -128 + +TINYINT_UN_MAX = 255 +TINYINT_UN_MIN = 0 + +SMALLINT_MAX = 32767 +SMALLINT_MIN = -32768 + +SMALLINT_UN_MAX = 65535 +SMALLINT_UN_MIN = 0 + +INT_MAX = 2_147_483_647 +INT_MIN = -2_147_483_648 + +INT_UN_MAX = 4_294_967_295 +INT_UN_MIN = 0 + +BIGINT_MAX = 9_223_372_036_854_775_807 +BIGINT_MIN = -9_223_372_036_854_775_808 + +BIGINT_UN_MAX = 18_446_744_073_709_551_615 +BIGINT_UN_MIN = 0 + +FLOAT_MAX = 3.40E+38 +FLOAT_MIN = -3.40E+38 + +DOUBLE_MAX = 1.7E+308 +DOUBLE_MIN = -1.7E+308 + +# schema boundary +BINARY_LENGTH_MAX = 16374 +NCAHR_LENGTH_MAX = 4093 +DBNAME_LENGTH_MAX = 64 + +STBNAME_LENGTH_MAX = 192 +STBNAME_LENGTH_MIN = 1 + +TBNAME_LENGTH_MAX = 192 +TBNAME_LENGTH_MIN = 1 + +CHILD_TBNAME_LENGTH_MAX = 192 +CHILD_TBNAME_LENGTH_MIN = 1 + +TAG_NAME_LENGTH_MAX = 64 +TAG_NAME_LENGTH_MIN = 1 + +COL_NAME_LENGTH_MAX = 64 +COL_NAME_LENGTH_MIN = 1 + +TAG_COUNT_MAX = 128 +TAG_COUNT_MIN = 1 + +COL_COUNT_MAX = 4096 +COL_COUNT_MIN = 2 + +TAG_COL_COUNT_MAX = 4096 +TAG_COL_COUNT_MIN = 3 + +MNODE_SHM_SIZE_MAX = 2_147_483_647 +MNODE_SHM_SIZE_MIN = 6_292_480 +MNODE_SHM_SIZE_DEFAULT = 6_292_480 + +VNODE_SHM_SIZE_MAX = 2_147_483_647 +VNODE_SHM_SIZE_MIN = 6_292_480 +VNODE_SHM_SIZE_DEFAULT = 31_458_304 + +# time_init +TIME_MS = 1 +TIME_US = TIME_MS/1000 +TIME_NS = TIME_US/1000 + +TIME_S = 1000 * TIME_MS +TIME_M = 60 * TIME_S +TIME_H = 60 * TIME_M +TIME_D = 24 * TIME_H +TIME_W = 7 * TIME_D +TIME_N = 30 * TIME_D +TIME_Y = 365 * TIME_D + + +# session parameters +INTERVAL_MIN = 1 * TIME_MS if PRECISION == PRECISION_DEFAULT else 1 * TIME_US + + +# streams and related agg-function +SMA_INDEX_FUNCTIONS = ["MIN", "MAX"] +ROLLUP_FUNCTIONS = ["AVG", "SUM", "MIN", "MAX", "LAST", "FIRST"] +BLOCK_FUNCTIONS = ["SUM", "MIN", "MAX"] +SMA_WATMARK_MAXDELAY_INIT = ['a', "s", "m"] +WATERMARK_MAX = 900000 +WATERMARK_MIN = 0 + +MAX_DELAY_MAX = 900000 +MAX_DELAY_MIN = 1 \ No newline at end of file diff --git a/tests/army/frame/dnodes-default.py b/tests/army/frame/dnodes-default.py new file mode 100644 index 0000000000..a1d1698b00 --- /dev/null +++ b/tests/army/frame/dnodes-default.py @@ -0,0 +1,502 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import os.path +import subprocess +from frame.log import * + + +class TDSimClient: + def __init__(self): + self.testCluster = False + + self.cfgDict = { + "numOfLogLines": "100000000", + "numOfThreadsPerCore": "2.0", + "locale": "en_US.UTF-8", + "charset": "UTF-8", + "asyncLog": "0", + "minTablesPerVnode": "4", + "maxTablesPerVnode": "1000", + "tableIncStepPerVnode": "10000", + "maxVgroupsPerDb": "1000", + "sdbDebugFlag": "143", + "rpcDebugFlag": "135", + "tmrDebugFlag": "131", + "cDebugFlag": "135", + "udebugFlag": "135", + "jnidebugFlag": "135", + "qdebugFlag": "135", + "telemetryReporting": "0", + } + def init(self, path): + self.__init__() + self.path = path + + def getLogDir(self): + self.logDir = os.path.join(self.path,"sim","psim","log") + return self.logDir + + def getCfgDir(self): + self.cfgDir = os.path.join(self.path,"sim","psim","cfg") + return self.cfgDir + + def setTestCluster(self, value): + self.testCluster = value + + def addExtraCfg(self, option, value): + self.cfgDict.update({option: value}) + + def cfg(self, option, value): + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def deploy(self): + self.logDir = os.path.join(self.path,"sim","psim","log") + self.cfgDir = os.path.join(self.path,"sim","psim","cfg") + self.cfgPath = os.path.join(self.path,"sim","psim","cfg","taos.cfg") + + cmd = "rm -rf " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "touch " + self.cfgPath + if os.system(cmd) != 0: + tdLog.exit(cmd) + + if self.testCluster: + self.cfg("masterIp", "192.168.0.1") + self.cfg("secondIp", "192.168.0.2") + self.cfg("logDir", self.logDir) + + for key, value in self.cfgDict.items(): + self.cfg(key, value) + + tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath)) + + +class TDDnode: + def __init__(self, index): + self.index = index + self.running = 0 + self.deployed = 0 + self.testCluster = False + self.valgrind = 0 + + def init(self, path): + self.path = path + + def setTestCluster(self, value): + self.testCluster = value + + def setValgrind(self, value): + self.valgrind = value + + def getDataSize(self): + totalSize = 0 + + if (self.deployed == 1): + for dirpath, dirnames, filenames in os.walk(self.dataDir): + for f in filenames: + fp = os.path.join(dirpath, f) + + if not os.path.islink(fp): + totalSize = totalSize + os.path.getsize(fp) + + return totalSize + + def deploy(self): + self.logDir = os.path.join(self.path,"sim","dnode%d" % self.index, "log") + self.dataDir = os.path.join(self.path,"sim","dnode%d" % self.index, "data") + self.cfgDir = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg") + self.cfgPath = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg","taos.cfg") + + cmd = "rm -rf " + self.dataDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.dataDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "touch " + self.cfgPath + if os.system(cmd) != 0: + tdLog.exit(cmd) + + if self.testCluster: + self.startIP() + + if self.testCluster: + self.cfg("masterIp", "192.168.0.1") + self.cfg("secondIp", "192.168.0.2") + self.cfg("publicIp", "192.168.0.%d" % (self.index)) + self.cfg("internalIp", "192.168.0.%d" % (self.index)) + self.cfg("privateIp", "192.168.0.%d" % (self.index)) + self.cfg("dataDir", self.dataDir) + self.cfg("logDir", self.logDir) + self.cfg("numOfLogLines", "100000000") + self.cfg("mnodeEqualVnodeNum", "0") + self.cfg("walLevel", "2") + self.cfg("fsync", "1000") + self.cfg("statusInterval", "1") + self.cfg("numOfMnodes", "3") + self.cfg("numOfThreadsPerCore", "2.0") + self.cfg("monitor", "0") + self.cfg("maxVnodeConnections", "30000") + self.cfg("maxMgmtConnections", "30000") + self.cfg("maxMeterConnections", "30000") + self.cfg("maxShellConns", "30000") + self.cfg("locale", "en_US.UTF-8") + self.cfg("charset", "UTF-8") + self.cfg("asyncLog", "0") + self.cfg("anyIp", "0") + self.cfg("dDebugFlag", "135") + self.cfg("mDebugFlag", "135") + self.cfg("sdbDebugFlag", "135") + self.cfg("rpcDebugFlag", "135") + self.cfg("tmrDebugFlag", "131") + self.cfg("cDebugFlag", "135") + self.cfg("httpDebugFlag", "135") + self.cfg("monitorDebugFlag", "135") + self.cfg("udebugFlag", "135") + self.cfg("jnidebugFlag", "135") + self.cfg("qdebugFlag", "135") + self.deployed = 1 + tdLog.debug( + "dnode:%d is deployed and configured by %s" % + (self.index, self.cfgPath)) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def start(self): + buildPath = self.getBuildPath() + + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + + binPath = buildPath + "/build/bin/taosd" + + if self.deployed == 0: + tdLog.exit("dnode:%d is not deployed" % (self.index)) + + if self.valgrind == 0: + cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( + binPath, self.cfgDir) + else: + valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes" + + cmd = "nohup %s %s -c %s 2>&1 & " % ( + valgrindCmdline, binPath, self.cfgDir) + + print(cmd) + + if os.system(cmd) != 0: + tdLog.exit(cmd) + self.running = 1 + tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) + + tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index)) + time.sleep(5) + + def stop(self): + if self.valgrind == 0: + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + if self.running != 0: + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + while(processID): + killCmd = "kill -INT %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + for port in range(6030, 6041): + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) + if self.valgrind: + time.sleep(2) + + self.running = 0 + tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index)) + + def forcestop(self): + if self.valgrind == 0: + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + if self.running != 0: + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + while(processID): + killCmd = "kill -KILL %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + for port in range(6030, 6041): + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) + if self.valgrind: + time.sleep(2) + + self.running = 0 + tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index)) + + def startIP(self): + cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def stopIP(self): + cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % ( + self.index, self.index) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def cfg(self, option, value): + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def getDnodeRootDir(self, index): + dnodeRootDir = os.path.join(self.path,"sim","psim","dnode%d" % index) + return dnodeRootDir + + def getDnodesRootDir(self): + dnodesRootDir = os.path.join(self.path,"sim","psim") + return dnodesRootDir + + +class TDDnodes: + def __init__(self): + self.dnodes = [] + self.dnodes.append(TDDnode(1)) + self.dnodes.append(TDDnode(2)) + self.dnodes.append(TDDnode(3)) + self.dnodes.append(TDDnode(4)) + self.dnodes.append(TDDnode(5)) + self.dnodes.append(TDDnode(6)) + self.dnodes.append(TDDnode(7)) + self.dnodes.append(TDDnode(8)) + self.dnodes.append(TDDnode(9)) + self.dnodes.append(TDDnode(10)) + self.simDeployed = False + + def init(self, path): + psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + binPath = os.path.dirname(os.path.realpath(__file__)) + binPath = binPath + "/../../../debug/" + tdLog.debug("binPath %s" % (binPath)) + binPath = os.path.realpath(binPath) + tdLog.debug("binPath real path %s" % (binPath)) + + # cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath) + # tdLog.debug(cmd) + # os.system(cmd) + + # cmd = "sudo cp %s/build/bin/taos /usr/local/bin/taos/" % (binPath) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + # tdLog.debug("execute %s" % (cmd)) + + # cmd = "sudo cp %s/build/bin/taosd /usr/local/bin/taos/" % (binPath) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + # tdLog.debug("execute %s" % (cmd)) + + if path == "": + # self.path = os.path.expanduser('~') + self.path = os.path.abspath(binPath + "../../") + else: + self.path = os.path.realpath(path) + + for i in range(len(self.dnodes)): + self.dnodes[i].init(self.path) + + self.sim = TDSimClient() + self.sim.init(self.path) + + def setTestCluster(self, value): + self.testCluster = value + + def setValgrind(self, value): + self.valgrind = value + + def deploy(self, index): + self.sim.setTestCluster(self.testCluster) + + if (self.simDeployed == False): + self.sim.deploy() + self.simDeployed = True + + self.check(index) + self.dnodes[index - 1].setTestCluster(self.testCluster) + self.dnodes[index - 1].setValgrind(self.valgrind) + self.dnodes[index - 1].deploy() + + def cfg(self, index, option, value): + self.check(index) + self.dnodes[index - 1].cfg(option, value) + + def start(self, index): + self.check(index) + self.dnodes[index - 1].start() + + def stop(self, index): + self.check(index) + self.dnodes[index - 1].stop() + + def getDataSize(self, index): + self.check(index) + return self.dnodes[index - 1].getDataSize() + + def forcestop(self, index): + self.check(index) + self.dnodes[index - 1].forcestop() + + def startIP(self, index): + self.check(index) + + if self.testCluster: + self.dnodes[index - 1].startIP() + + def stopIP(self, index): + self.check(index) + + if self.dnodes[index - 1].testCluster: + self.dnodes[index - 1].stopIP() + + def check(self, index): + if index < 1 or index > 10: + tdLog.exit("index:%d should on a scale of [1, 10]" % (index)) + + def stopAll(self): + tdLog.info("stop all dnodes") + for i in range(len(self.dnodes)): + self.dnodes[i].stop() + + psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + if processID: + cmd = "sudo systemctl stop taosd" + os.system(cmd) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + + def getDnodesRootDir(self): + dnodesRootDir = "%s/sim" % (self.path) + return dnodesRootDir + + def getSimCfgPath(self): + return self.sim.getCfgDir() + + def getSimLogPath(self): + return self.sim.getLogDir() + + def addSimExtraCfg(self, option, value): + self.sim.addExtraCfg(option, value) + + +tdDnodes = TDDnodes() diff --git a/tests/army/frame/dnodes-no-random-fail.py b/tests/army/frame/dnodes-no-random-fail.py new file mode 100644 index 0000000000..3b3083396b --- /dev/null +++ b/tests/army/frame/dnodes-no-random-fail.py @@ -0,0 +1,500 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import os.path +import subprocess +from frame.log import * + + +class TDSimClient: + def __init__(self): + self.testCluster = False + + self.cfgDict = { + "numOfLogLines": "100000000", + "numOfThreadsPerCore": "2.0", + "locale": "en_US.UTF-8", + "charset": "UTF-8", + "asyncLog": "0", + "anyIp": "0", + "sdbDebugFlag": "135", + "rpcDebugFlag": "135", + "tmrDebugFlag": "131", + "cDebugFlag": "135", + "udebugFlag": "135", + "jnidebugFlag": "135", + "qdebugFlag": "135", + "telemetryReporting": "0", + } + + def init(self, path): + self.__init__() + self.path = path + + def getLogDir(self): + self.logDir = os.path.join(self.path,"sim","psim","log") + return self.logDir + + def getCfgDir(self): + self.cfgDir = os.path.join(self.path,"sim","psim","cfg") + return self.cfgDir + + def setTestCluster(self, value): + self.testCluster = value + + def addExtraCfg(self, option, value): + self.cfgDict.update({option: value}) + + def cfg(self, option, value): + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def deploy(self): + self.logDir = os.path.join(self.path,"sim","psim","log") + self.cfgDir = os.path.join(self.path,"sim","psim","cfg") + self.cfgPath = os.path.join(self.path,"sim","psim","cfg","taos.cfg") + + cmd = "rm -rf " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "touch " + self.cfgPath + if os.system(cmd) != 0: + tdLog.exit(cmd) + + if self.testCluster: + self.cfg("masterIp", "192.168.0.1") + self.cfg("secondIp", "192.168.0.2") + self.cfg("logDir", self.logDir) + + for key, value in self.cfgDict.items(): + self.cfg(key, value) + + tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath)) + + +class TDDnode: + def __init__(self, index): + self.index = index + self.running = 0 + self.deployed = 0 + self.testCluster = False + self.valgrind = 0 + + def init(self, path): + self.path = path + + def setTestCluster(self, value): + self.testCluster = value + + def setValgrind(self, value): + self.valgrind = value + + def getDataSize(self): + totalSize = 0 + + if (self.deployed == 1): + for dirpath, dirnames, filenames in os.walk(self.dataDir): + for f in filenames: + fp = os.path.join(dirpath, f) + + if not os.path.islink(fp): + totalSize = totalSize + os.path.getsize(fp) + + return totalSize + + def deploy(self): + self.logDir = os.path.join(self.path,"sim","dnode%d" % self.index, "log") + self.dataDir = os.path.join(self.path,"sim","dnode%d" % self.index, "data") + self.cfgDir = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg") + self.cfgPath = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg","taos.cfg") + + cmd = "rm -rf " + self.dataDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.dataDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "touch " + self.cfgPath + if os.system(cmd) != 0: + tdLog.exit(cmd) + + if self.testCluster: + self.startIP() + + if self.testCluster: + self.cfg("masterIp", "192.168.0.1") + self.cfg("secondIp", "192.168.0.2") + self.cfg("publicIp", "192.168.0.%d" % (self.index)) + self.cfg("internalIp", "192.168.0.%d" % (self.index)) + self.cfg("privateIp", "192.168.0.%d" % (self.index)) + self.cfg("dataDir", self.dataDir) + self.cfg("logDir", self.logDir) + self.cfg("numOfLogLines", "100000000") + self.cfg("mnodeEqualVnodeNum", "0") + self.cfg("walLevel", "2") + self.cfg("fsync", "1000") + self.cfg("statusInterval", "1") + self.cfg("numOfMnodes", "3") + self.cfg("numOfThreadsPerCore", "2.0") + self.cfg("monitor", "0") + self.cfg("maxVnodeConnections", "30000") + self.cfg("maxMgmtConnections", "30000") + self.cfg("maxMeterConnections", "30000") + self.cfg("maxShellConns", "30000") + self.cfg("locale", "en_US.UTF-8") + self.cfg("charset", "UTF-8") + self.cfg("asyncLog", "0") + self.cfg("anyIp", "0") + self.cfg("dDebugFlag", "135") + self.cfg("mDebugFlag", "135") + self.cfg("sdbDebugFlag", "135") + self.cfg("rpcDebugFlag", "135") + self.cfg("tmrDebugFlag", "131") + self.cfg("cDebugFlag", "135") + self.cfg("httpDebugFlag", "135") + self.cfg("monitorDebugFlag", "135") + self.cfg("udebugFlag", "135") + self.cfg("jnidebugFlag", "135") + self.cfg("qdebugFlag", "135") + self.deployed = 1 + tdLog.debug( + "dnode:%d is deployed and configured by %s" % + (self.index, self.cfgPath)) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def start(self): + buildPath = self.getBuildPath() + + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + + binPath = buildPath + "/build/bin/taosd" + + if self.deployed == 0: + tdLog.exit("dnode:%d is not deployed" % (self.index)) + + if self.valgrind == 0: + cmd = "nohup %s -c %s --random-file-fail-factor 0 > /dev/null 2>&1 & " % ( + binPath, self.cfgDir) + else: + valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes" + + cmd = "nohup %s %s -c %s 2>&1 & " % ( + valgrindCmdline, binPath, self.cfgDir) + + print(cmd) + + if os.system(cmd) != 0: + tdLog.exit(cmd) + self.running = 1 + tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) + + tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index)) + time.sleep(5) + + def stop(self): + if self.valgrind == 0: + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + if self.running != 0: + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + while(processID): + killCmd = "kill -INT %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + for port in range(6030, 6041): + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) + if self.valgrind: + time.sleep(2) + + self.running = 0 + tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index)) + + def forcestop(self): + if self.valgrind == 0: + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + if self.running != 0: + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + while(processID): + killCmd = "kill -KILL %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + for port in range(6030, 6041): + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) + if self.valgrind: + time.sleep(2) + + self.running = 0 + tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index)) + + def startIP(self): + cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def stopIP(self): + cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % ( + self.index, self.index) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def cfg(self, option, value): + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def getDnodeRootDir(self, index): + dnodeRootDir = os.path.join(self.path,"sim","psim","dnode%d" % index) + return dnodeRootDir + + def getDnodesRootDir(self): + dnodesRootDir = os.path.join(self.path,"sim","psim") + return dnodesRootDir + + +class TDDnodes: + def __init__(self): + self.dnodes = [] + self.dnodes.append(TDDnode(1)) + self.dnodes.append(TDDnode(2)) + self.dnodes.append(TDDnode(3)) + self.dnodes.append(TDDnode(4)) + self.dnodes.append(TDDnode(5)) + self.dnodes.append(TDDnode(6)) + self.dnodes.append(TDDnode(7)) + self.dnodes.append(TDDnode(8)) + self.dnodes.append(TDDnode(9)) + self.dnodes.append(TDDnode(10)) + self.simDeployed = False + + def init(self, path): + psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + binPath = os.path.dirname(os.path.realpath(__file__)) + binPath = binPath + "/../../../debug/" + tdLog.debug("binPath %s" % (binPath)) + binPath = os.path.realpath(binPath) + tdLog.debug("binPath real path %s" % (binPath)) + + # cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath) + # tdLog.debug(cmd) + # os.system(cmd) + + # cmd = "sudo cp %s/build/bin/taos /usr/local/bin/taos/" % (binPath) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + # tdLog.debug("execute %s" % (cmd)) + + # cmd = "sudo cp %s/build/bin/taosd /usr/local/bin/taos/" % (binPath) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + # tdLog.debug("execute %s" % (cmd)) + + if path == "": + # self.path = os.path.expanduser('~') + self.path = os.path.abspath(binPath + "../../") + else: + self.path = os.path.realpath(path) + + for i in range(len(self.dnodes)): + self.dnodes[i].init(self.path) + + self.sim = TDSimClient() + self.sim.init(self.path) + + def setTestCluster(self, value): + self.testCluster = value + + def setValgrind(self, value): + self.valgrind = value + + def deploy(self, index): + self.sim.setTestCluster(self.testCluster) + + if (self.simDeployed == False): + self.sim.deploy() + self.simDeployed = True + + self.check(index) + self.dnodes[index - 1].setTestCluster(self.testCluster) + self.dnodes[index - 1].setValgrind(self.valgrind) + self.dnodes[index - 1].deploy() + + def cfg(self, index, option, value): + self.check(index) + self.dnodes[index - 1].cfg(option, value) + + def start(self, index): + self.check(index) + self.dnodes[index - 1].start() + + def stop(self, index): + self.check(index) + self.dnodes[index - 1].stop() + + def getDataSize(self, index): + self.check(index) + return self.dnodes[index - 1].getDataSize() + + def forcestop(self, index): + self.check(index) + self.dnodes[index - 1].forcestop() + + def startIP(self, index): + self.check(index) + + if self.testCluster: + self.dnodes[index - 1].startIP() + + def stopIP(self, index): + self.check(index) + + if self.dnodes[index - 1].testCluster: + self.dnodes[index - 1].stopIP() + + def check(self, index): + if index < 1 or index > 10: + tdLog.exit("index:%d should on a scale of [1, 10]" % (index)) + + def stopAll(self): + tdLog.info("stop all dnodes") + for i in range(len(self.dnodes)): + self.dnodes[i].stop() + + psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + if processID: + cmd = "sudo systemctl stop taosd" + os.system(cmd) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + + def getDnodesRootDir(self): + dnodesRootDir = "%s/sim" % (self.path) + return dnodesRootDir + + def getSimCfgPath(self): + return self.sim.getCfgDir() + + def getSimLogPath(self): + return self.sim.getLogDir() + + def addSimExtraCfg(self, option, value): + self.sim.addExtraCfg(option, value) + + +tdDnodes = TDDnodes() diff --git a/tests/army/frame/dnodes-random-fail.py b/tests/army/frame/dnodes-random-fail.py new file mode 100644 index 0000000000..794adfd7ed --- /dev/null +++ b/tests/army/frame/dnodes-random-fail.py @@ -0,0 +1,497 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import os.path +import subprocess +from frame.log import * + + +class TDSimClient: + def __init__(self): + self.testCluster = False + + self.cfgDict = { + "numOfLogLines": "100000000", + "locale": "en_US.UTF-8", + "charset": "UTF-8", + "asyncLog": "0", + "rpcDebugFlag": "135", + "tmrDebugFlag": "131", + "cDebugFlag": "135", + "udebugFlag": "135", + "jnidebugFlag": "135", + "qdebugFlag": "135", + "telemetryReporting": "0", + } + + def init(self, path): + self.__init__() + self.path = path + + def getLogDir(self): + self.logDir = os.path.join(self.path,"sim","psim","log") + return self.logDir + + def getCfgDir(self): + self.cfgDir = os.path.join(self.path,"sim","psim","cfg") + return self.cfgDir + + def setTestCluster(self, value): + self.testCluster = value + + def addExtraCfg(self, option, value): + self.cfgDict.update({option: value}) + + def cfg(self, option, value): + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def deploy(self): + self.logDir = os.path.join(self.path,"sim","psim","log") + self.cfgDir = os.path.join(self.path,"sim","psim","cfg") + self.cfgPath = os.path.join(self.path,"sim","psim","cfg","taos.cfg") + + cmd = "rm -rf " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "touch " + self.cfgPath + if os.system(cmd) != 0: + tdLog.exit(cmd) + + if self.testCluster: + self.cfg("masterIp", "192.168.0.1") + self.cfg("secondIp", "192.168.0.2") + self.cfg("logDir", self.logDir) + + for key, value in self.cfgDict.items(): + self.cfg(key, value) + + tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath)) + + +class TDDnode: + def __init__(self, index): + self.index = index + self.running = 0 + self.deployed = 0 + self.testCluster = False + self.valgrind = 0 + + def init(self, path): + self.path = path + + def setTestCluster(self, value): + self.testCluster = value + + def setValgrind(self, value): + self.valgrind = value + + def getDataSize(self): + totalSize = 0 + + if (self.deployed == 1): + for dirpath, dirnames, filenames in os.walk(self.dataDir): + for f in filenames: + fp = os.path.join(dirpath, f) + + if not os.path.islink(fp): + totalSize = totalSize + os.path.getsize(fp) + + return totalSize + + def deploy(self): + self.logDir = os.path.join(self.path,"sim","dnode%d" % self.index, "log") + self.dataDir = os.path.join(self.path,"sim","dnode%d" % self.index, "data") + self.cfgDir = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg") + self.cfgPath = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg","taos.cfg") + + cmd = "rm -rf " + self.dataDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.dataDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "touch " + self.cfgPath + if os.system(cmd) != 0: + tdLog.exit(cmd) + + if self.testCluster: + self.startIP() + + if self.testCluster: + self.cfg("masterIp", "192.168.0.1") + self.cfg("secondIp", "192.168.0.2") + self.cfg("publicIp", "192.168.0.%d" % (self.index)) + self.cfg("internalIp", "192.168.0.%d" % (self.index)) + self.cfg("privateIp", "192.168.0.%d" % (self.index)) + self.cfg("dataDir", self.dataDir) + self.cfg("logDir", self.logDir) + self.cfg("numOfLogLines", "100000000") + self.cfg("mnodeEqualVnodeNum", "0") + self.cfg("walLevel", "2") + self.cfg("fsync", "1000") + self.cfg("statusInterval", "1") + self.cfg("numOfMnodes", "3") + self.cfg("numOfThreadsPerCore", "2.0") + self.cfg("monitor", "0") + self.cfg("maxVnodeConnections", "30000") + self.cfg("maxMgmtConnections", "30000") + self.cfg("maxMeterConnections", "30000") + self.cfg("maxShellConns", "30000") + self.cfg("locale", "en_US.UTF-8") + self.cfg("charset", "UTF-8") + self.cfg("asyncLog", "0") + self.cfg("anyIp", "0") + self.cfg("dDebugFlag", "135") + self.cfg("mDebugFlag", "135") + self.cfg("sdbDebugFlag", "135") + self.cfg("rpcDebugFlag", "135") + self.cfg("tmrDebugFlag", "131") + self.cfg("cDebugFlag", "135") + self.cfg("httpDebugFlag", "135") + self.cfg("monitorDebugFlag", "135") + self.cfg("udebugFlag", "135") + self.cfg("jnidebugFlag", "135") + self.cfg("qdebugFlag", "135") + self.deployed = 1 + tdLog.debug( + "dnode:%d is deployed and configured by %s" % + (self.index, self.cfgPath)) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def start(self): + buildPath = self.getBuildPath() + + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + + binPath = buildPath + "/build/bin/taosd" + + if self.deployed == 0: + tdLog.exit("dnode:%d is not deployed" % (self.index)) + + if self.valgrind == 0: + cmd = "nohup %s -c %s --alloc-random-fail --random-file-fail-factor 5 > /dev/null 2>&1 & " % ( + binPath, self.cfgDir) + else: + valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes" + + cmd = "nohup %s %s -c %s 2>&1 & " % ( + valgrindCmdline, binPath, self.cfgDir) + + print(cmd) + + if os.system(cmd) != 0: + tdLog.exit(cmd) + self.running = 1 + tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) + + tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index)) + time.sleep(5) + + def stop(self): + if self.valgrind == 0: + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + if self.running != 0: + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + while(processID): + killCmd = "kill -INT %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + for port in range(6030, 6041): + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) + if self.valgrind: + time.sleep(2) + + self.running = 0 + tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index)) + + def forcestop(self): + if self.valgrind == 0: + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + if self.running != 0: + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + while(processID): + killCmd = "kill -KILL %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + for port in range(6030, 6041): + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) + if self.valgrind: + time.sleep(2) + + self.running = 0 + tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index)) + + def startIP(self): + cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def stopIP(self): + cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % ( + self.index, self.index) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def cfg(self, option, value): + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def getDnodeRootDir(self, index): + dnodeRootDir = os.path.join(self.path,"sim","psim","dnode%d" % index) + return dnodeRootDir + + def getDnodesRootDir(self): + dnodesRootDir = os.path.join(self.path,"sim","psim") + return dnodesRootDir + + +class TDDnodes: + def __init__(self): + self.dnodes = [] + self.dnodes.append(TDDnode(1)) + self.dnodes.append(TDDnode(2)) + self.dnodes.append(TDDnode(3)) + self.dnodes.append(TDDnode(4)) + self.dnodes.append(TDDnode(5)) + self.dnodes.append(TDDnode(6)) + self.dnodes.append(TDDnode(7)) + self.dnodes.append(TDDnode(8)) + self.dnodes.append(TDDnode(9)) + self.dnodes.append(TDDnode(10)) + self.simDeployed = False + + def init(self, path): + psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + binPath = os.path.dirname(os.path.realpath(__file__)) + binPath = binPath + "/../../../debug/" + tdLog.debug("binPath %s" % (binPath)) + binPath = os.path.realpath(binPath) + tdLog.debug("binPath real path %s" % (binPath)) + + # cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath) + # tdLog.debug(cmd) + # os.system(cmd) + + # cmd = "sudo cp %s/build/bin/taos /usr/local/bin/taos/" % (binPath) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + # tdLog.debug("execute %s" % (cmd)) + + # cmd = "sudo cp %s/build/bin/taosd /usr/local/bin/taos/" % (binPath) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + # tdLog.debug("execute %s" % (cmd)) + + if path == "": + # self.path = os.path.expanduser('~') + self.path = os.path.abspath(binPath + "../../") + else: + self.path = os.path.realpath(path) + + for i in range(len(self.dnodes)): + self.dnodes[i].init(self.path) + + self.sim = TDSimClient() + self.sim.init(self.path) + + def setTestCluster(self, value): + self.testCluster = value + + def setValgrind(self, value): + self.valgrind = value + + def deploy(self, index): + self.sim.setTestCluster(self.testCluster) + + if (self.simDeployed == False): + self.sim.deploy() + self.simDeployed = True + + self.check(index) + self.dnodes[index - 1].setTestCluster(self.testCluster) + self.dnodes[index - 1].setValgrind(self.valgrind) + self.dnodes[index - 1].deploy() + + def cfg(self, index, option, value): + self.check(index) + self.dnodes[index - 1].cfg(option, value) + + def start(self, index): + self.check(index) + self.dnodes[index - 1].start() + + def stop(self, index): + self.check(index) + self.dnodes[index - 1].stop() + + def getDataSize(self, index): + self.check(index) + return self.dnodes[index - 1].getDataSize() + + def forcestop(self, index): + self.check(index) + self.dnodes[index - 1].forcestop() + + def startIP(self, index): + self.check(index) + + if self.testCluster: + self.dnodes[index - 1].startIP() + + def stopIP(self, index): + self.check(index) + + if self.dnodes[index - 1].testCluster: + self.dnodes[index - 1].stopIP() + + def check(self, index): + if index < 1 or index > 10: + tdLog.exit("index:%d should on a scale of [1, 10]" % (index)) + + def stopAll(self): + tdLog.info("stop all dnodes") + for i in range(len(self.dnodes)): + self.dnodes[i].stop() + + psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + if processID: + cmd = "sudo systemctl stop taosd" + os.system(cmd) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + + def getDnodesRootDir(self): + dnodesRootDir = "%s/sim" % (self.path) + return dnodesRootDir + + def getSimCfgPath(self): + return self.sim.getCfgDir() + + def getSimLogPath(self): + return self.sim.getLogDir() + + def addSimExtraCfg(self, option, value): + self.sim.addExtraCfg(option, value) + + +tdDnodes = TDDnodes() diff --git a/tests/army/frame/dnodes.py b/tests/army/frame/dnodes.py new file mode 100644 index 0000000000..5581f29a57 --- /dev/null +++ b/tests/army/frame/dnodes.py @@ -0,0 +1,890 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import os.path +import platform +import distro +import subprocess +from time import sleep +import base64 +import json +import copy +from fabric2 import Connection +from frame.log import * +from shutil import which + + +class TDSimClient: + def __init__(self, path): + self.testCluster = False + self.path = path + self.cfgDict = { + "fqdn": "localhost", + "numOfLogLines": "100000000", + "locale": "en_US.UTF-8", + "charset": "UTF-8", + "asyncLog": "0", + "rpcDebugFlag": "135", + "tmrDebugFlag": "131", + "cDebugFlag": "135", + "uDebugFlag": "135", + "jniDebugFlag": "135", + "qDebugFlag": "135", + "supportVnodes": "1024", + "enableQueryHb": "1", + "telemetryReporting": "0", + "tqDebugflag": "135", + "wDebugflag":"135", + } + + def getLogDir(self): + self.logDir = os.path.join(self.path,"sim","psim","log") + return self.logDir + + def getCfgDir(self): + self.cfgDir = os.path.join(self.path,"sim","psim","cfg") + return self.cfgDir + + def setTestCluster(self, value): + self.testCluster = value + + def addExtraCfg(self, option, value): + self.cfgDict.update({option: value}) + + def cfg(self, option, value): + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def deploy(self, *updatecfgDict): + self.logDir = os.path.join(self.path,"sim","psim","log") + self.cfgDir = os.path.join(self.path,"sim","psim","cfg") + self.cfgPath = os.path.join(self.path,"sim","psim","cfg","taos.cfg") + + cmd = "rm -rf " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) + os.makedirs(self.logDir) + + cmd = "rm -rf " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) + os.makedirs(self.cfgDir) + + cmd = "touch " + self.cfgPath + if os.system(cmd) != 0: + tdLog.exit(cmd) + + if self.testCluster: + self.cfg("masterIp", "192.168.0.1") + self.cfg("secondIp", "192.168.0.2") + self.cfg("logDir", self.logDir) + + for key, value in self.cfgDict.items(): + self.cfg(key, value) + + try: + if bool(updatecfgDict) and updatecfgDict[0] and updatecfgDict[0][0]: + clientCfg = dict (updatecfgDict[0][0].get('clientCfg')) + for key, value in clientCfg.items(): + self.cfg(key, value) + except Exception: + pass + + tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath)) + + +class TDDnode: + def __init__(self, index): + self.index = index + self.running = 0 + self.deployed = 0 + self.testCluster = False + self.valgrind = 0 + self.asan = False + self.remoteIP = "" + self.cfgDict = { + "fqdn": "localhost", + "monitor": "0", + "maxShellConns": "30000", + "locale": "en_US.UTF-8", + "charset": "UTF-8", + "asyncLog": "0", + "mDebugFlag": "143", + "dDebugFlag": "143", + "vDebugFlag": "143", + "tqDebugFlag": "143", + "cDebugFlag": "143", + "stDebugFlag": "143", + "smaDebugFlag": "143", + "jniDebugFlag": "143", + "qDebugFlag": "143", + "rpcDebugFlag": "143", + "tmrDebugFlag": "131", + "uDebugFlag": "135", + "sDebugFlag": "135", + "wDebugFlag": "135", + "numOfLogLines": "100000000", + "statusInterval": "1", + "enableQueryHb": "1", + "supportVnodes": "1024", + "telemetryReporting": "0" + } + + def init(self, path, remoteIP = ""): + self.path = path + self.remoteIP = remoteIP + if (not self.remoteIP == ""): + try: + self.config = eval(self.remoteIP) + self.remote_conn = Connection(host=self.config["host"], port=self.config["port"], user=self.config["user"], connect_kwargs={'password':self.config["password"]}) + except Exception as r: + print(r) + + def setTestCluster(self, value): + self.testCluster = value + + def setValgrind(self, value): + self.valgrind = value + + def setAsan(self, value): + self.asan = value + if value: + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + self.execPath = os.path.abspath(self.path + "/community/tests/script/sh/exec.sh") + else: + self.execPath = os.path.abspath(self.path + "/tests/script/sh/exec.sh") + + def getDataSize(self): + totalSize = 0 + + if (self.deployed == 1): + for dirpath, dirnames, filenames in os.walk(self.dataDir): + for f in filenames: + fp = os.path.join(dirpath, f) + + if not os.path.islink(fp): + totalSize = totalSize + os.path.getsize(fp) + + return totalSize + + def addExtraCfg(self, option, value): + self.cfgDict.update({option: value}) + + def remoteExec(self, updateCfgDict, execCmd): + valgrindStr = '' + if (self.valgrind==1): + valgrindStr = '-g' + remoteCfgDict = copy.deepcopy(updateCfgDict) + if ("logDir" in remoteCfgDict): + del remoteCfgDict["logDir"] + if ("dataDir" in remoteCfgDict): + del remoteCfgDict["dataDir"] + if ("cfgDir" in remoteCfgDict): + del remoteCfgDict["cfgDir"] + remoteCfgDictStr = base64.b64encode(json.dumps(remoteCfgDict).encode()).decode() + execCmdStr = base64.b64encode(execCmd.encode()).decode() + with self.remote_conn.cd((self.config["path"]+sys.path[0].replace(self.path, '')).replace('\\','/')): + self.remote_conn.run("python3 ./test.py %s -d %s -e %s"%(valgrindStr,remoteCfgDictStr,execCmdStr)) + + def deploy(self, *updatecfgDict): + self.logDir = os.path.join(self.path,"sim","dnode%d" % self.index, "log") + self.dataDir = os.path.join(self.path,"sim","dnode%d" % self.index, "data") + self.cfgDir = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg") + self.cfgPath = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg","taos.cfg") + + cmd = "rm -rf " + self.dataDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + # cmd = "mkdir -p " + self.dataDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) + os.makedirs(self.dataDir) + + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) + os.makedirs(self.logDir) + + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) + os.makedirs(self.cfgDir) + + cmd = "touch " + self.cfgPath + if os.system(cmd) != 0: + tdLog.exit(cmd) + + if self.testCluster: + self.startIP() + + if self.testCluster: + self.cfg("masterIp", "192.168.0.1") + self.cfg("secondIp", "192.168.0.2") + self.cfg("publicIp", "192.168.0.%d" % (self.index)) + self.cfg("internalIp", "192.168.0.%d" % (self.index)) + self.cfg("privateIp", "192.168.0.%d" % (self.index)) + self.cfgDict["dataDir"] = self.dataDir + self.cfgDict["logDir"] = self.logDir + # self.cfg("dataDir",self.dataDir) + # self.cfg("logDir",self.logDir) + # print(updatecfgDict) + isFirstDir = 1 + if bool(updatecfgDict) and updatecfgDict[0] and updatecfgDict[0][0]: + for key, value in updatecfgDict[0][0].items(): + if key == "clientCfg" and self.remoteIP == "" and not platform.system().lower() == 'windows': + continue + if value == 'dataDir': + if isFirstDir: + self.cfgDict.pop('dataDir') + self.cfg(value, key) + isFirstDir = 0 + else: + self.cfg(value, key) + else: + self.addExtraCfg(key, value) + if (self.remoteIP == ""): + for key, value in self.cfgDict.items(): + self.cfg(key, value) + else: + self.remoteExec(self.cfgDict, "tdDnodes.deploy(%d,updateCfgDict)"%self.index) + + self.deployed = 1 + tdLog.debug( + "dnode:%d is deployed and configured by %s" % + (self.index, self.cfgPath)) + + def getPath(self, tool="taosd"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files or ("%s.exe"%tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + return "" + return paths[0] + + def starttaosd(self): + binPath = self.getPath() + + if (binPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found: %s" % binPath) + + if self.deployed == 0: + tdLog.exit("dnode:%d is not deployed" % (self.index)) + + if self.valgrind == 0: + if platform.system().lower() == 'windows': + cmd = "mintty -h never %s -c %s" % ( + binPath, self.cfgDir) + else: + if self.asan: + asanDir = "%s/sim/asan/dnode%d.asan" % ( + self.path, self.index) + cmd = "nohup %s -c %s > /dev/null 2> %s & " % ( + binPath, self.cfgDir, asanDir) + else: + cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( + binPath, self.cfgDir) + else: + valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir + + if platform.system().lower() == 'windows': + cmd = "mintty -h never %s %s -c %s" % ( + valgrindCmdline, binPath, self.cfgDir) + else: + cmd = "nohup %s %s -c %s 2>&1 & " % ( + valgrindCmdline, binPath, self.cfgDir) + + print(cmd) + + if (not self.remoteIP == ""): + self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].deployed=1\ntdDnodes.dnodes[%d].logDir=\"%%s/sim/dnode%%d/log\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.dnodes[%d].cfgDir=\"%%s/sim/dnode%%d/cfg\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.start(%d)"%(self.index-1,self.index-1,self.index-1,self.index,self.index-1,self.index-1,self.index,self.index)) + self.running = 1 + else: + if os.system(cmd) != 0: + tdLog.exit(cmd) + self.running = 1 + tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) + if self.valgrind == 0: + time.sleep(0.1) + key1 = 'from offline to online' + bkey1 = bytes(key1, encoding="utf8") + key2= 'TDengine initialized successfully' + bkey2 = bytes(key2, encoding="utf8") + logFile = self.logDir + "/taosdlog.0" + i = 0 + # while not os.path.exists(logFile): + # sleep(0.1) + # i += 1 + # if i > 10: + # break + # tailCmdStr = 'tail -f ' + # if platform.system().lower() == 'windows': + # tailCmdStr = 'tail -n +0 -f ' + # popen = subprocess.Popen( + # tailCmdStr + logFile, + # stdout=subprocess.PIPE, + # stderr=subprocess.PIPE, + # shell=True) + # pid = popen.pid + # # print('Popen.pid:' + str(pid)) + # timeout = time.time() + 60 * 2 + # while True: + # line = popen.stdout.readline().strip() + # print(line) + # if bkey1 in line: + # popen.kill() + # break + # elif bkey2 in line: + # popen.kill() + # break + # if time.time() > timeout: + # print(time.time(),timeout) + # tdLog.exit('wait too long for taosd start') + tdLog.debug("the dnode:%d has been started." % (self.index)) + else: + tdLog.debug( + "wait 10 seconds for the dnode:%d to start." % + (self.index)) + time.sleep(10) + + def start(self): + binPath = self.getPath() + + if (binPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found: %s" % binPath) + + if self.deployed == 0: + tdLog.exit("dnode:%d is not deployed" % (self.index)) + + if self.valgrind == 0: + if platform.system().lower() == 'windows': + cmd = "mintty -h never %s -c %s" % ( + binPath, self.cfgDir) + else: + if self.asan: + asanDir = "%s/sim/asan/dnode%d.asan" % ( + self.path, self.index) + cmd = "nohup %s -c %s > /dev/null 2> %s & " % ( + binPath, self.cfgDir, asanDir) + else: + cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( + binPath, self.cfgDir) + else: + valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir + + if platform.system().lower() == 'windows': + cmd = "mintty -h never %s %s -c %s" % ( + valgrindCmdline, binPath, self.cfgDir) + else: + cmd = "nohup %s %s -c %s 2>&1 & " % ( + valgrindCmdline, binPath, self.cfgDir) + + print(cmd) + + if (not self.remoteIP == ""): + self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].deployed=1\ntdDnodes.dnodes[%d].logDir=\"%%s/sim/dnode%%d/log\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.dnodes[%d].cfgDir=\"%%s/sim/dnode%%d/cfg\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.start(%d)"%(self.index-1,self.index-1,self.index-1,self.index,self.index-1,self.index-1,self.index,self.index)) + self.running = 1 + else: + os.system("rm -rf %s/taosdlog.0"%self.logDir) + if os.system(cmd) != 0: + tdLog.exit(cmd) + self.running = 1 + tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) + if self.valgrind == 0: + time.sleep(0.1) + key = 'from offline to online' + bkey = bytes(key, encoding="utf8") + logFile = self.logDir + "/taosdlog.0" + i = 0 + while not os.path.exists(logFile): + sleep(0.1) + i += 1 + if i > 50: + break + with open(logFile) as f: + timeout = time.time() + 10 * 2 + while True: + line = f.readline().encode('utf-8') + if bkey in line: + break + if time.time() > timeout: + tdLog.exit('wait too long for taosd start') + tdLog.debug("the dnode:%d has been started." % (self.index)) + else: + tdLog.debug( + "wait 10 seconds for the dnode:%d to start." % + (self.index)) + time.sleep(10) + + def startWithoutSleep(self): + binPath = self.getPath() + + if (binPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found: %s" % binPath) + + if self.deployed == 0: + tdLog.exit("dnode:%d is not deployed" % (self.index)) + + if self.valgrind == 0: + if platform.system().lower() == 'windows': + cmd = "mintty -h never %s -c %s" % (binPath, self.cfgDir) + else: + if self.asan: + asanDir = "%s/sim/asan/dnode%d.asan" % ( + self.path, self.index) + cmd = "nohup %s -c %s > /dev/null 2> %s & " % ( + binPath, self.cfgDir, asanDir) + else: + cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( + binPath, self.cfgDir) + else: + valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir + if platform.system().lower() == 'windows': + cmd = "mintty -h never %s %s -c %s" % ( + valgrindCmdline, binPath, self.cfgDir) + else: + cmd = "nohup %s %s -c %s 2>&1 & " % ( + valgrindCmdline, binPath, self.cfgDir) + print(cmd) + + if (self.remoteIP == ""): + if os.system(cmd) != 0: + tdLog.exit(cmd) + else: + self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].deployed=1\ntdDnodes.dnodes[%d].logDir=\"%%s/sim/dnode%%d/log\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.dnodes[%d].cfgDir=\"%%s/sim/dnode%%d/cfg\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.startWithoutSleep(%d)"%(self.index-1,self.index-1,self.index-1,self.index,self.index-1,self.index-1,self.index,self.index)) + + self.running = 1 + tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) + + def stop(self): + if self.asan: + stopCmd = "%s -s stop -n dnode%d" % (self.execPath, self.index) + tdLog.info("execute script: " + stopCmd) + os.system(stopCmd) + return + + if (not self.remoteIP == ""): + self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].stop()"%(self.index-1,self.index-1)) + tdLog.info("stop dnode%d"%self.index) + return + if self.valgrind == 0: + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + if self.running != 0: + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs" % toBeKilled + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8").strip() + + onlyKillOnceWindows = 0 + while(processID): + if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'): + killCmd = "kill -INT %s > /dev/null 2>&1" % processID + if platform.system().lower() == 'windows': + killCmd = "kill -INT %s > nul 2>&1" % processID + os.system(killCmd) + onlyKillOnceWindows = 1 + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8").strip() + if not platform.system().lower() == 'windows': + for port in range(6030, 6041): + fuserCmd = "fuser -k -n tcp %d > /dev/null" % port + os.system(fuserCmd) + if self.valgrind: + time.sleep(2) + + self.running = 0 + tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index)) + + + def stoptaosd(self): + tdLog.debug("start to stop taosd on dnode: %d "% (self.index)) + # print(self.asan,self.running,self.remoteIP,self.valgrind) + if self.asan: + stopCmd = "%s -s stop -n dnode%d" % (self.execPath, self.index) + tdLog.info("execute script: " + stopCmd) + os.system(stopCmd) + return + + if (not self.remoteIP == ""): + self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].stop()"%(self.index-1,self.index-1)) + tdLog.info("stop dnode%d"%self.index) + return + if self.valgrind == 0: + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + if self.running != 0: + if platform.system().lower() == 'windows': + psCmd = "for /f %%a in ('wmic process where \"name='taosd.exe' and CommandLine like '%%dnode%d%%'\" get processId ^| xargs echo ^| awk ^'{print $2}^' ^&^& echo aa') do @(ps | grep %%a | awk '{print $1}' | xargs)" % (self.index) + else: + psCmd = "ps -ef|grep -w %s| grep dnode%d|grep -v grep | awk '{print $2}' | xargs" % (toBeKilled,self.index) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8").strip() + + onlyKillOnceWindows = 0 + while(processID): + if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'): + killCmd = "kill -INT %s > /dev/null 2>&1" % processID + if platform.system().lower() == 'windows': + killCmd = "kill -INT %s > nul 2>&1" % processID + os.system(killCmd) + onlyKillOnceWindows = 1 + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8").strip() + if self.valgrind: + time.sleep(2) + + self.running = 0 + tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index)) + + def forcestop(self): + if self.asan: + stopCmd = "%s -s stop -n dnode%d -x SIGKILL" + \ + (self.execPath, self.index) + tdLog.info("execute script: " + stopCmd) + os.system(stopCmd) + return + + if (not self.remoteIP == ""): + self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].forcestop()"%(self.index-1,self.index-1)) + return + if self.valgrind == 0: + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + if self.running != 0: + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs" % toBeKilled + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8").strip() + + onlyKillOnceWindows = 0 + while(processID): + if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'): + killCmd = "kill -KILL %s > /dev/null 2>&1" % processID + os.system(killCmd) + onlyKillOnceWindows = 1 + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8").strip() + for port in range(6030, 6041): + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) + if self.valgrind: + time.sleep(2) + + self.running = 0 + tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index)) + + def startIP(self): + cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def stopIP(self): + cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % ( + self.index, self.index) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def cfg(self, option, value): + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def getDnodeRootDir(self, index): + dnodeRootDir = os.path.join(self.path,"sim","psim","dnode%d" % index) + return dnodeRootDir + + def getDnodesRootDir(self): + dnodesRootDir = os.path.join(self.path,"sim","psim") + return dnodesRootDir + + +class TDDnodes: + def __init__(self): + self.dnodes = [] + self.dnodes.append(TDDnode(1)) + self.dnodes.append(TDDnode(2)) + self.dnodes.append(TDDnode(3)) + self.dnodes.append(TDDnode(4)) + self.dnodes.append(TDDnode(5)) + self.dnodes.append(TDDnode(6)) + self.dnodes.append(TDDnode(7)) + self.dnodes.append(TDDnode(8)) + self.dnodes.append(TDDnode(9)) + self.dnodes.append(TDDnode(10)) + self.simDeployed = False + self.testCluster = False + self.valgrind = 0 + self.asan = False + self.killValgrind = 0 + + def init(self, path, remoteIP = ""): + binPath = self.dnodes[0].getPath() + "/../../../" + # tdLog.debug("binPath %s" % (binPath)) + binPath = os.path.realpath(binPath) + # tdLog.debug("binPath real path %s" % (binPath)) + + if path == "": + self.path = os.path.abspath(binPath + "../../") + else: + self.path = os.path.realpath(path) + + for i in range(len(self.dnodes)): + self.dnodes[i].init(self.path, remoteIP) + self.sim = TDSimClient(self.path) + + def setTestCluster(self, value): + self.testCluster = value + + def setValgrind(self, value): + self.valgrind = value + + def setAsan(self, value): + self.asan = value + if value: + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + self.stopDnodesPath = os.path.abspath(self.path + "/community/tests/script/sh/stop_dnodes.sh") + self.stopDnodesSigintPath = os.path.abspath(self.path + "/community/tests/script/sh/sigint_stop_dnodes.sh") + else: + self.stopDnodesPath = os.path.abspath(self.path + "/tests/script/sh/stop_dnodes.sh") + self.stopDnodesSigintPath = os.path.abspath(self.path + "/tests/script/sh/sigint_stop_dnodes.sh") + tdLog.info("run in address sanitizer mode") + + def setKillValgrind(self, value): + self.killValgrind = value + + def deploy(self, index, *updatecfgDict): + self.sim.setTestCluster(self.testCluster) + + if (self.simDeployed == False): + self.sim.deploy(updatecfgDict) + self.simDeployed = True + + self.check(index) + self.dnodes[index - 1].setTestCluster(self.testCluster) + self.dnodes[index - 1].setValgrind(self.valgrind) + self.dnodes[index - 1].setAsan(self.asan) + self.dnodes[index - 1].deploy(updatecfgDict) + + def cfg(self, index, option, value): + self.check(index) + self.dnodes[index - 1].cfg(option, value) + + def starttaosd(self, index): + self.check(index) + self.dnodes[index - 1].starttaosd() + + def stoptaosd(self, index): + self.check(index) + self.dnodes[index - 1].stoptaosd() + + def start(self, index): + self.check(index) + self.dnodes[index - 1].start() + + def startWithoutSleep(self, index): + self.check(index) + self.dnodes[index - 1].startWithoutSleep() + + def stop(self, index): + self.check(index) + self.dnodes[index - 1].stop() + + def getDataSize(self, index): + self.check(index) + return self.dnodes[index - 1].getDataSize() + + def forcestop(self, index): + self.check(index) + self.dnodes[index - 1].forcestop() + + def startIP(self, index): + self.check(index) + + if self.testCluster: + self.dnodes[index - 1].startIP() + + def stopIP(self, index): + self.check(index) + + if self.dnodes[index - 1].testCluster: + self.dnodes[index - 1].stopIP() + + def check(self, index): + if index < 1 or index > 10: + tdLog.exit("index:%d should on a scale of [1, 10]" % (index)) + + def StopAllSigint(self): + tdLog.info("stop all dnodes sigint, asan:%d" % self.asan) + if self.asan: + tdLog.info("execute script: %s" % self.stopDnodesSigintPath) + os.system(self.stopDnodesSigintPath) + tdLog.info("execute finished") + return + + def killProcesser(self, processerName): + if platform.system().lower() == 'windows': + killCmd = ("wmic process where name=\"%s.exe\" call terminate > NUL 2>&1" % processerName) + psCmd = ("wmic process where name=\"%s.exe\" | findstr \"%s.exe\"" % (processerName, processerName)) + else: + killCmd = ( + "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" + % processerName + ) + psCmd = ("ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % processerName) + + processID = "" + + try: + processID = subprocess.check_output(psCmd, shell=True) + while processID: + os.system(killCmd) + time.sleep(1) + try: + processID = subprocess.check_output(psCmd, shell=True) + except Exception as err: + processID = "" + tdLog.debug('**** kill pid warn: {err}') + except Exception as err: + processID = "" + tdLog.debug(f'**** find pid warn: {err}') + + + + def stopAll(self): + tdLog.info("stop all dnodes, asan:%d" % self.asan) + if platform.system().lower() != 'windows': + distro_id = distro.id() + else: + distro_id = "not alpine" + if self.asan and distro_id != "alpine": + tdLog.info("execute script: %s" % self.stopDnodesPath) + os.system(self.stopDnodesPath) + tdLog.info("execute finished") + return + + if (not self.dnodes[0].remoteIP == ""): + self.dnodes[0].remoteExec(self.dnodes[0].cfgDict, "for i in range(len(tdDnodes.dnodes)):\n tdDnodes.dnodes[i].running=1\ntdDnodes.stopAll()") + return + for i in range(len(self.dnodes)): + self.dnodes[i].stop() + + + if (distro_id == "alpine"): + psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}' | xargs" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() + while(processID): + print(processID) + killCmd = "kill -9 %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8").strip() + elif platform.system().lower() == 'windows': + self.killProcesser("taosd") + self.killProcesser("tmq_sim") + self.killProcesser("taosBenchmark") + else: + psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}' | xargs" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() + if processID: + cmd = "sudo systemctl stop taosd" + os.system(cmd) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}' | xargs" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() + while(processID): + killCmd = "kill -9 %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8").strip() + if self.killValgrind == 1: + psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}' | xargs" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() + while(processID): + if platform.system().lower() == 'windows': + killCmd = "kill -TERM %s > nul 2>&1" % processID + else: + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8").strip() + + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + + def getDnodesRootDir(self): + dnodesRootDir = "%s/sim" % (self.path) + return dnodesRootDir + + def getSimCfgPath(self): + return self.sim.getCfgDir() + + def getSimLogPath(self): + return self.sim.getLogDir() + + def addSimExtraCfg(self, option, value): + self.sim.addExtraCfg(option, value) + + def getAsan(self): + return self.asan + +tdDnodes = TDDnodes() \ No newline at end of file diff --git a/tests/army/frame/gettime.py b/tests/army/frame/gettime.py new file mode 100644 index 0000000000..d4a5e18dc9 --- /dev/null +++ b/tests/army/frame/gettime.py @@ -0,0 +1,65 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import time +from datetime import datetime + +class GetTime: + + def get_ms_timestamp(self,ts_str): + _ts_str = ts_str + if "+" in _ts_str: + timestamp = datetime.fromisoformat(_ts_str) + return int((timestamp-datetime.fromtimestamp(0,timestamp.tzinfo)).total_seconds())*1000+int(timestamp.microsecond / 1000) + if " " in ts_str: + p = ts_str.split(" ")[1] + if len(p) > 15 : + _ts_str = ts_str[:-3] + if ':' in _ts_str and '.' in _ts_str: + timestamp = datetime.strptime(_ts_str, "%Y-%m-%d %H:%M:%S.%f") + date_time = int(int(time.mktime(timestamp.timetuple()))*1000 + timestamp.microsecond/1000) + elif ':' in _ts_str and '.' not in _ts_str: + timestamp = datetime.strptime(_ts_str, "%Y-%m-%d %H:%M:%S") + date_time = int(int(time.mktime(timestamp.timetuple()))*1000 + timestamp.microsecond/1000) + else: + timestamp = datetime.strptime(_ts_str, "%Y-%m-%d") + date_time = int(int(time.mktime(timestamp.timetuple()))*1000 + timestamp.microsecond/1000) + return date_time + def get_us_timestamp(self,ts_str): + _ts = self.get_ms_timestamp(ts_str) * 1000 + if " " in ts_str: + p = ts_str.split(" ")[1] + if len(p) > 12: + us_ts = p[12:15] + _ts += int(us_ts) + return _ts + def get_ns_timestamp(self,ts_str): + _ts = self.get_us_timestamp(ts_str) *1000 + if " " in ts_str: + p = ts_str.split(" ")[1] + if len(p) > 15: + us_ts = p[15:] + _ts += int(us_ts) + return _ts + def time_transform(self,ts_str,precision): + date_time = [] + if precision == 'ms': + for i in ts_str: + date_time.append(self.get_ms_timestamp(i)) + elif precision == 'us': + for i in ts_str: + date_time.append(self.get_us_timestamp(i)) + elif precision == 'ns': + for i in ts_str: + date_time.append(self.get_ns_timestamp(i)) + return date_time \ No newline at end of file diff --git a/tests/army/frame/log.py b/tests/army/frame/log.py new file mode 100644 index 0000000000..000c907ea4 --- /dev/null +++ b/tests/army/frame/log.py @@ -0,0 +1,49 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import time +import datetime +from distutils.log import warn as printf + + +class TDLog: + def __init__(self): + self.path = "" + + def info(self, info): + print("%s %s\n" % (datetime.datetime.now(), info)) + + def sleep(self, sec): + print("%s sleep %d seconds" % (datetime.datetime.now(), sec)) + time.sleep(sec) + + def debug(self, err): + print("\033[1;36m%s %s\033[0m" % (datetime.datetime.now(), err)) + + def success(self, info): + printf("\033[1;32m%s %s\033[0m" % (datetime.datetime.now(), info)) + + def notice(self, err): + print("\033[1;33m%s %s\033[0m" % (datetime.datetime.now(), err)) + + def exit(self, err): + print("\033[1;31m%s %s\033[0m" % (datetime.datetime.now(), err)) + sys.exit(1) + + def printNoPrefix(self, info): + print("\033[1;36m%s\033[0m" % (info)) + + +tdLog = TDLog() diff --git a/tests/army/frame/pathFinding.py b/tests/army/frame/pathFinding.py new file mode 100644 index 0000000000..df03f0ed68 --- /dev/null +++ b/tests/army/frame/pathFinding.py @@ -0,0 +1,83 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + + +import os +from frame.log import * + + + +class TDFindPath: + """This class is for finding path within TDengine + """ + def __init__(self): + self.file = "" + + + def init(self, file): + """[summary] + + Args: + file (str): the file location you want to start the query. Generally using __file__ + """ + self.file = file + + def getTaosdemoPath(self): + """for finding the path of directory containing taosdemo + + Returns: + str: the path to directory containing taosdemo + """ + selfPath = os.path.dirname(os.path.realpath(self.file)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info(f"taosd found in {buildPath}") + return buildPath + "/build/bin/" + + def getTDenginePath(self): + """for finding the root path of TDengine + + Returns: + str: the root path of TDengine + """ + selfPath = os.path.dirname(os.path.realpath(self.file)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + print(projPath) + for root, dirs, files in os.walk(projPath): + if ("sim" in dirs): + print(root) + rootRealPath = os.path.realpath(root) + if (rootRealPath == ""): + tdLog.exit("TDengine not found!") + else: + tdLog.info(f"TDengine found in {rootRealPath}") + return rootRealPath + +tdFindPath = TDFindPath() \ No newline at end of file diff --git a/tests/army/frame/sql.py b/tests/army/frame/sql.py new file mode 100644 index 0000000000..d7dce2bc3e --- /dev/null +++ b/tests/army/frame/sql.py @@ -0,0 +1,655 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import time +import datetime +import inspect +import traceback +import psutil +import shutil +import pandas as pd +from frame.log import * +from frame.constant import * + +# from datetime import timezone +import time + +def _parse_ns_timestamp(timestr): + dt_obj = datetime.datetime.strptime(timestr[:len(timestr)-3], "%Y-%m-%d %H:%M:%S.%f") + tz = int(int((dt_obj-datetime.datetime.fromtimestamp(0,dt_obj.tzinfo)).total_seconds())*1e9) + int(dt_obj.microsecond * 1000) + int(timestr[-3:]) + return tz + + +def _parse_datetime(timestr): + try: + return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S.%f') + except ValueError: + pass + try: + return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S') + except ValueError: + pass + +class TDSql: + def __init__(self): + self.queryRows = 0 + self.queryCols = 0 + self.affectedRows = 0 + + def init(self, cursor, log=False): + self.cursor = cursor + + if (log): + caller = inspect.getframeinfo(inspect.stack()[1][0]) + self.cursor.log(caller.filename + ".sql") + + def close(self): + self.cursor.close() + + def prepare(self, dbname="db", drop=True, **kwargs): + tdLog.info(f"prepare database:{dbname}") + s = 'reset query cache' + try: + self.cursor.execute(s) + except: + tdLog.notice("'reset query cache' is not supported") + if drop: + s = f'drop database if exists {dbname}' + self.cursor.execute(s) + s = f'create database {dbname}' + for k, v in kwargs.items(): + s += f" {k} {v}" + if "duration" not in kwargs: + s += " duration 300" + self.cursor.execute(s) + s = f'use {dbname}' + self.cursor.execute(s) + time.sleep(2) + + def error(self, sql, expectedErrno = None, expectErrInfo = None): + caller = inspect.getframeinfo(inspect.stack()[1][0]) + expectErrNotOccured = True + + try: + self.cursor.execute(sql) + except BaseException as e: + expectErrNotOccured = False + self.errno = e.errno + error_info = repr(e) + self.error_info = ','.join(error_info[error_info.index('(')+1:-1].split(",")[:-1]).replace("'","") + # self.error_info = (','.join(error_info.split(",")[:-1]).split("(",1)[1:][0]).replace("'","") + if expectErrNotOccured: + tdLog.exit("%s(%d) failed: sql:%s, expect error not occured" % (caller.filename, caller.lineno, sql)) + else: + self.queryRows = 0 + self.queryCols = 0 + self.queryResult = None + + if expectedErrno != None: + if expectedErrno == self.errno: + tdLog.info("sql:%s, expected errno %s occured" % (sql, expectedErrno)) + else: + tdLog.exit("%s(%d) failed: sql:%s, errno %s occured, but not expected errno %s" % (caller.filename, caller.lineno, sql, self.errno, expectedErrno)) + else: + tdLog.info("sql:%s, expect error occured" % (sql)) + + if expectErrInfo != None: + if expectErrInfo == self.error_info or expectErrInfo in self.error_info: + tdLog.info("sql:%s, expected expectErrInfo %s occured" % (sql, expectErrInfo)) + else: + tdLog.exit("%s(%d) failed: sql:%s, expectErrInfo %s occured, but not expected errno %s" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo)) + else: + tdLog.info("sql:%s, expect error occured" % (sql)) + + return self.error_info + + def query(self, sql, row_tag=None, queryTimes=10, count_expected_res=None): + self.sql = sql + i=1 + while i <= queryTimes: + try: + self.cursor.execute(sql) + self.queryResult = self.cursor.fetchall() + self.queryRows = len(self.queryResult) + self.queryCols = len(self.cursor.description) + + if count_expected_res is not None: + counter = 0 + while count_expected_res != self.queryResult[0][0]: + self.cursor.execute(sql) + self.queryResult = self.cursor.fetchall() + if counter < queryTimes: + counter += 0.5 + time.sleep(0.5) + else: + return False + if row_tag: + return self.queryResult + return self.queryRows + except Exception as e: + tdLog.notice("Try to query again, query times: %d "%i) + if i == queryTimes: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + i+=1 + time.sleep(1) + pass + + + def is_err_sql(self, sql): + err_flag = True + try: + self.cursor.execute(sql) + except BaseException: + err_flag = False + + return False if err_flag else True + + def getVariable(self, search_attr): + ''' + get variable of search_attr access "show variables" + ''' + try: + sql = 'show variables' + param_list = self.query(sql, row_tag=True) + for param in param_list: + if param[0] == search_attr: + return param[1], param_list + except Exception as e: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + + def getColNameList(self, sql, col_tag=None): + self.sql = sql + try: + col_name_list = [] + col_type_list = [] + self.cursor.execute(sql) + for query_col in self.cursor.description: + col_name_list.append(query_col[0]) + col_type_list.append(query_col[1]) + except Exception as e: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + if col_tag: + return col_name_list, col_type_list + return col_name_list + + def waitedQuery(self, sql, expectRows, timeout): + tdLog.info("sql: %s, try to retrieve %d rows in %d seconds" % (sql, expectRows, timeout)) + self.sql = sql + try: + for i in range(timeout): + self.cursor.execute(sql) + self.queryResult = self.cursor.fetchall() + self.queryRows = len(self.queryResult) + self.queryCols = len(self.cursor.description) + tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows)) + if self.queryRows >= expectRows: + return (self.queryRows, i) + time.sleep(1) + except Exception as e: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + return (self.queryRows, timeout) + + def getRows(self): + return self.queryRows + + def checkRows(self, expectRows): + if self.queryRows == expectRows: + tdLog.info("sql:%s, queryRows:%d == expect:%d" % (self.sql, self.queryRows, expectRows)) + return True + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, self.queryRows, expectRows) + tdLog.exit("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args) + + def checkRows_range(self, excepte_row_list): + if self.queryRows in excepte_row_list: + tdLog.info(f"sql:{self.sql}, queryRows:{self.queryRows} in expect:{excepte_row_list}") + return True + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{self.sql}, queryRows:{self.queryRows} not in expect:{excepte_row_list}") + + def checkCols(self, expectCols): + if self.queryCols == expectCols: + tdLog.info("sql:%s, queryCols:%d == expect:%d" % (self.sql, self.queryCols, expectCols)) + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, self.queryCols, expectCols) + tdLog.exit("%s(%d) failed: sql:%s, queryCols:%d != expect:%d" % args) + + def checkRowCol(self, row, col): + caller = inspect.getframeinfo(inspect.stack()[2][0]) + if row < 0: + args = (caller.filename, caller.lineno, self.sql, row) + tdLog.exit("%s(%d) failed: sql:%s, row:%d is smaller than zero" % args) + if col < 0: + args = (caller.filename, caller.lineno, self.sql, row) + tdLog.exit("%s(%d) failed: sql:%s, col:%d is smaller than zero" % args) + if row > self.queryRows: + args = (caller.filename, caller.lineno, self.sql, row, self.queryRows) + tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args) + if col > self.queryCols: + args = (caller.filename, caller.lineno, self.sql, col, self.queryCols) + tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args) + + def checkDataType(self, row, col, dataType): + self.checkRowCol(row, col) + return self.cursor.istype(col, dataType) + + + def checkData(self, row, col, data, show = False): + if row >= self.queryRows: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, row+1, self.queryRows) + tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args) + if col >= self.queryCols: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, col+1, self.queryCols) + tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args) + + self.checkRowCol(row, col) + + if self.queryResult[row][col] != data: + if self.cursor.istype(col, "TIMESTAMP"): + # suppose user want to check nanosecond timestamp if a longer data passed`` + if isinstance(data,str) : + if (len(data) >= 28): + if self.queryResult[row][col] == _parse_ns_timestamp(data): + if(show): + tdLog.info("check successfully") + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) + else: + if self.queryResult[row][col].astimezone(datetime.timezone.utc) == _parse_datetime(data).astimezone(datetime.timezone.utc): + # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") + if(show): + tdLog.info("check successfully") + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) + return + elif isinstance(data,int): + if len(str(data)) == 16: + precision = 'us' + elif len(str(data)) == 13: + precision = 'ms' + elif len(str(data)) == 19: + precision = 'ns' + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) + return + success = False + if precision == 'ms': + dt_obj = self.queryResult[row][col] + ts = int(int((dt_obj-datetime.datetime.fromtimestamp(0,dt_obj.tzinfo)).total_seconds())*1000) + int(dt_obj.microsecond/1000) + if ts == data: + success = True + elif precision == 'us': + dt_obj = self.queryResult[row][col] + ts = int(int((dt_obj-datetime.datetime.fromtimestamp(0,dt_obj.tzinfo)).total_seconds())*1e6) + int(dt_obj.microsecond) + if ts == data: + success = True + elif precision == 'ns': + if data == self.queryResult[row][col]: + success = True + if success: + if(show): + tdLog.info("check successfully") + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) + return + elif isinstance(data,datetime.datetime): + dt_obj = self.queryResult[row][col] + delt_data = data-datetime.datetime.fromtimestamp(0,data.tzinfo) + delt_result = self.queryResult[row][col] - datetime.datetime.fromtimestamp(0,self.queryResult[row][col].tzinfo) + if delt_data == delt_result: + if(show): + tdLog.info("check successfully") + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) + return + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) + + if str(self.queryResult[row][col]) == str(data): + # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") + if(show): + tdLog.info("check successfully") + return + + elif isinstance(data, float): + if abs(data) >= 1 and abs((self.queryResult[row][col] - data) / data) <= 0.000001: + # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") + if(show): + tdLog.info("check successfully") + elif abs(data) < 1 and abs(self.queryResult[row][col] - data) <= 0.000001: + # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") + if(show): + tdLog.info("check successfully") + + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) + return + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) + if(show): + tdLog.info("check successfully") + + # return true or false replace exit, no print out + def checkRowColNoExit(self, row, col): + caller = inspect.getframeinfo(inspect.stack()[2][0]) + if row < 0: + args = (caller.filename, caller.lineno, self.sql, row) + return False + if col < 0: + args = (caller.filename, caller.lineno, self.sql, row) + return False + if row > self.queryRows: + args = (caller.filename, caller.lineno, self.sql, row, self.queryRows) + return False + if col > self.queryCols: + args = (caller.filename, caller.lineno, self.sql, col, self.queryCols) + return False + + return True + + + # return true or false replace exit, no print out + def checkDataNoExit(self, row, col, data): + if self.checkRowColNoExit(row, col) == False: + return False + if self.queryResult[row][col] != data: + if self.cursor.istype(col, "TIMESTAMP"): + # suppose user want to check nanosecond timestamp if a longer data passed + if (len(data) >= 28): + if pd.to_datetime(self.queryResult[row][col]) == pd.to_datetime(data): + return True + else: + if self.queryResult[row][col] == _parse_datetime(data): + return True + return False + + if str(self.queryResult[row][col]) == str(data): + return True + elif isinstance(data, float): + if abs(data) >= 1 and abs((self.queryResult[row][col] - data) / data) <= 0.000001: + return True + elif abs(data) < 1 and abs(self.queryResult[row][col] - data) <= 0.000001: + return True + else: + return False + else: + return False + + return True + + + # loop execute sql then sleep(waitTime) , if checkData ok break loop + def checkDataLoop(self, row, col, data, sql, loopCount, waitTime): + # loop check util checkData return true + for i in range(loopCount): + self.query(sql) + if self.checkDataNoExit(row, col, data) : + self.checkData(row, col, data) + return + time.sleep(waitTime) + + # last check + self.query(sql) + self.checkData(row, col, data) + + + def getData(self, row, col): + self.checkRowCol(row, col) + return self.queryResult[row][col] + + def getResult(self, sql): + self.sql = sql + try: + self.cursor.execute(sql) + self.queryResult = self.cursor.fetchall() + except Exception as e: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + return self.queryResult + + def executeTimes(self, sql, times): + for i in range(times): + try: + return self.cursor.execute(sql) + except BaseException: + time.sleep(1) + continue + + def execute(self, sql, queryTimes=30, show=False): + self.sql = sql + if show: + tdLog.info(sql) + i=1 + while i <= queryTimes: + try: + self.affectedRows = self.cursor.execute(sql) + return self.affectedRows + except Exception as e: + tdLog.notice("Try to execute sql again, query times: %d "%i) + if i == queryTimes: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + i+=1 + time.sleep(1) + pass + + def checkAffectedRows(self, expectAffectedRows): + if self.affectedRows != expectAffectedRows: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, self.affectedRows, expectAffectedRows) + tdLog.exit("%s(%d) failed: sql:%s, affectedRows:%d != expect:%d" % args) + + tdLog.info("sql:%s, affectedRows:%d == expect:%d" % (self.sql, self.affectedRows, expectAffectedRows)) + + def checkColNameList(self, col_name_list, expect_col_name_list): + if col_name_list == expect_col_name_list: + tdLog.info("sql:%s, col_name_list:%s == expect_col_name_list:%s" % (self.sql, col_name_list, expect_col_name_list)) + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, col_name_list, expect_col_name_list) + tdLog.exit("%s(%d) failed: sql:%s, col_name_list:%s != expect_col_name_list:%s" % args) + + def __check_equal(self, elm, expect_elm): + if elm == expect_elm: + return True + if type(elm) in(list, tuple) and type(expect_elm) in(list, tuple): + if len(elm) != len(expect_elm): + return False + if len(elm) == 0: + return True + for i in range(len(elm)): + flag = self.__check_equal(elm[i], expect_elm[i]) + if not flag: + return False + return True + return False + + def checkEqual(self, elm, expect_elm): + if elm == expect_elm: + tdLog.info("sql:%s, elm:%s == expect_elm:%s" % (self.sql, elm, expect_elm)) + return + if self.__check_equal(elm, expect_elm): + tdLog.info("sql:%s, elm:%s == expect_elm:%s" % (self.sql, elm, expect_elm)) + return + + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, elm, expect_elm) + # tdLog.info("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args) + raise Exception("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args) + + def checkNotEqual(self, elm, expect_elm): + if elm != expect_elm: + tdLog.info("sql:%s, elm:%s != expect_elm:%s" % (self.sql, elm, expect_elm)) + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, elm, expect_elm) + tdLog.info("%s(%d) failed: sql:%s, elm:%s == expect_elm:%s" % args) + raise Exception + + def get_times(self, time_str, precision="ms"): + caller = inspect.getframeinfo(inspect.stack()[1][0]) + if time_str[-1] not in TAOS_TIME_INIT: + tdLog.exit(f"{caller.filename}({caller.lineno}) failed: {time_str} not a standard taos time init") + if precision not in TAOS_PRECISION: + tdLog.exit(f"{caller.filename}({caller.lineno}) failed: {precision} not a standard taos time precision") + + if time_str[-1] == TAOS_TIME_INIT[0]: + times = int(time_str[:-1]) * TIME_NS + if time_str[-1] == TAOS_TIME_INIT[1]: + times = int(time_str[:-1]) * TIME_US + if time_str[-1] == TAOS_TIME_INIT[2]: + times = int(time_str[:-1]) * TIME_MS + if time_str[-1] == TAOS_TIME_INIT[3]: + times = int(time_str[:-1]) * TIME_S + if time_str[-1] == TAOS_TIME_INIT[4]: + times = int(time_str[:-1]) * TIME_M + if time_str[-1] == TAOS_TIME_INIT[5]: + times = int(time_str[:-1]) * TIME_H + if time_str[-1] == TAOS_TIME_INIT[6]: + times = int(time_str[:-1]) * TIME_D + if time_str[-1] == TAOS_TIME_INIT[7]: + times = int(time_str[:-1]) * TIME_W + if time_str[-1] == TAOS_TIME_INIT[8]: + times = int(time_str[:-1]) * TIME_N + if time_str[-1] == TAOS_TIME_INIT[9]: + times = int(time_str[:-1]) * TIME_Y + + if precision == "ms": + return int(times) + elif precision == "us": + return int(times*1000) + elif precision == "ns": + return int(times*1000*1000) + + def get_type(self, col): + if self.cursor.istype(col, "BOOL"): + return "BOOL" + if self.cursor.istype(col, "INT"): + return "INT" + if self.cursor.istype(col, "BIGINT"): + return "BIGINT" + if self.cursor.istype(col, "TINYINT"): + return "TINYINT" + if self.cursor.istype(col, "SMALLINT"): + return "SMALLINT" + if self.cursor.istype(col, "FLOAT"): + return "FLOAT" + if self.cursor.istype(col, "DOUBLE"): + return "DOUBLE" + if self.cursor.istype(col, "BINARY"): + return "BINARY" + if self.cursor.istype(col, "NCHAR"): + return "NCHAR" + if self.cursor.istype(col, "TIMESTAMP"): + return "TIMESTAMP" + if self.cursor.istype(col, "JSON"): + return "JSON" + if self.cursor.istype(col, "TINYINT UNSIGNED"): + return "TINYINT UNSIGNED" + if self.cursor.istype(col, "SMALLINT UNSIGNED"): + return "SMALLINT UNSIGNED" + if self.cursor.istype(col, "INT UNSIGNED"): + return "INT UNSIGNED" + if self.cursor.istype(col, "BIGINT UNSIGNED"): + return "BIGINT UNSIGNED" + + def taosdStatus(self, state): + tdLog.sleep(5) + pstate = 0 + for i in range(30): + pstate = 0 + pl = pspids() + for pid in pl: + try: + if psProcess(pid).name() == 'taosd': + print('have already started') + pstate = 1 + break + except psNoSuchProcess: + pass + if pstate == state :break + if state or pstate: + tdLog.sleep(1) + continue + pstate = 0 + break + + args=(pstate,state) + if pstate == state: + tdLog.info("taosd state is %d == expect:%d" %args) + else: + tdLog.exit("taosd state is %d != expect:%d" %args) + pass + + def haveFile(self, dir, state): + if os.path.exists(dir) and os.path.isdir(dir): + if not os.listdir(dir): + if state : + tdLog.exit("dir: %s is empty, expect: not empty" %dir) + else: + tdLog.info("dir: %s is empty, expect: empty" %dir) + else: + if state : + tdLog.info("dir: %s is not empty, expect: not empty" %dir) + else: + tdLog.exit("dir: %s is not empty, expect: empty" %dir) + else: + tdLog.exit("dir: %s doesn't exist" %dir) + def createDir(self, dir): + if os.path.exists(dir): + shrmtree(dir) + tdLog.info("dir: %s is removed" %dir) + os.makedirs( dir, 755 ) + tdLog.info("dir: %s is created" %dir) + pass + +tdSql = TDSql() diff --git a/tests/army/frame/sqlset.py b/tests/army/frame/sqlset.py new file mode 100644 index 0000000000..3a56090750 --- /dev/null +++ b/tests/army/frame/sqlset.py @@ -0,0 +1,70 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from sql import tdSql + +class TDSetSql: + def init(self, conn, logSql): + + self.stbname = 'stb' + + def set_create_normaltable_sql(self, ntbname='ntb', + column_dict={'ts':'timestamp','col1':'tinyint','col2':'smallint','col3':'int','col4':'bigint','col5': 'unsigned int','col6': 'unsigned tinyint','col7': 'unsigned smallint', + 'col8': 'unsigned int','col9': 'unsigned bigint','col10': 'float','col11': 'double','col12': 'bool','col13': 'binary(20)','col14': 'nchar(20)'}): + column_sql = '' + for k, v in column_dict.items(): + column_sql += f"{k} {v}," + create_ntb_sql = f'create table {ntbname} ({column_sql[:-1]})' + return create_ntb_sql + + def set_create_stable_sql(self,stbname='stb', + column_dict={'ts':'timestamp','col1':'tinyint','col2':'smallint','col3':'int','col4':'bigint','col5': 'unsigned int','col6': 'unsigned tinyint','col7': 'unsigned smallint', + 'col8': 'unsigned int','col9': 'unsigned bigint','col10': 'float','col11': 'double','col12': 'bool','col13': 'binary(20)','col14': 'nchar(20)'}, + tag_dict={'ts_tag':'timestamp','t1':'tinyint','t2':'smallint','t3':'int','t4':'bigint','t5': 'unsigned int','t6': 'unsigned tinyint','t7': 'unsigned smallint', + 't8': 'unsigned int','t9': 'unsigned bigint','t10': 'float','t11': 'double','t12': 'bool','t13': 'binary(20)','t14': 'nchar(20)'}): + column_sql = '' + tag_sql = '' + for k,v in column_dict.items(): + column_sql += f"{k} {v}," + for k,v in tag_dict.items(): + tag_sql += f"{k} {v}," + create_stb_sql = f'create table {stbname} ({column_sql[:-1]}) tags({tag_sql[:-1]})' + return create_stb_sql + + def set_insertsql(self,column_dict,tbname,binary_str=None,nchar_str=None): + sql = '' + for k, v in column_dict.items(): + if v.lower() == 'timestamp' or v.lower() == 'tinyint' or v.lower() == 'smallint' or v.lower() == 'int' or v.lower() == 'bigint' or \ + v.lower() == 'tinyint unsigned' or v.lower() == 'smallint unsigned' or v.lower() == 'int unsigned' or v.lower() == 'bigint unsigned' or v.lower() == 'bool': + sql += '%d,' + elif v.lower() == 'float' or v.lower() == 'double': + sql += '%f,' + elif 'binary' in v.lower(): + sql += f'"{binary_str}%d",' + elif 'nchar' in v.lower(): + sql += f'"{nchar_str}%d",' + return (f'insert into {tbname} values({sql[:-1]})') + + def insert_values(self,column_dict,i,insert_sql,insert_list,ts): + for k, v in column_dict.items(): + if v.lower() in[ 'tinyint' , 'smallint' , 'int', 'bigint' , 'tinyint unsigned' , 'smallint unsigned' , 'int unsigned' , 'bigint unsigned'] or\ + 'binary' in v.lower() or 'nchar' in v.lower(): + insert_list.append(0 + i) + elif v.lower() == 'float' or v.lower() == 'double': + insert_list.append(0.1 + i) + elif v.lower() == 'bool': + insert_list.append(i % 2) + elif v.lower() == 'timestamp': + insert_list.append(ts + i) + tdSql.execute(insert_sql%(tuple(insert_list))) + \ No newline at end of file diff --git a/tests/army/frame/sub.py b/tests/army/frame/sub.py new file mode 100644 index 0000000000..1fa4dfa674 --- /dev/null +++ b/tests/army/frame/sub.py @@ -0,0 +1,44 @@ +################################################################### + # Copyright (c) 2020 by TAOS Technologies, Inc. + # All rights reserved. + # + # This file is proprietary and confidential to TAOS Technologies. + # No part of this file may be reproduced, stored, transmitted, + # disclosed or used in any form or by any means other than as + # expressly provided by the written permission from Jianhui Tao + # +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import time +import datetime +from frame.log import * + +class TDSub: + def __init__(self): + self.consumedRows = 0 + self.consumedCols = 0 + + def init(self, sub): + self.sub = sub + + def close(self, keepProgress): + self.sub.close(keepProgress) + + def consume(self): + self.result = self.sub.consume() + self.result.fetch_all() + self.consumedRows = self.result.row_count + self.consumedCols = self.result.field_count + return self.consumedRows + + def checkRows(self, expectRows): + if self.consumedRows != expectRows: + tdLog.exit("consumed rows:%d != expect:%d" % (self.consumedRows, expectRows)) + tdLog.info("consumed rows:%d == expect:%d" % (self.consumedRows, expectRows)) + + +tdSub = TDSub() diff --git a/tests/army/frame/taosadapter.py b/tests/army/frame/taosadapter.py new file mode 100644 index 0000000000..7830aab08c --- /dev/null +++ b/tests/army/frame/taosadapter.py @@ -0,0 +1,261 @@ +import requests +from fabric2 import Connection +from frame.log import * +from frame.common import * + + +class TAdapter: + def __init__(self): + self.running = 0 + self.deployed = 0 + self.remoteIP = "" + self.taosadapter_cfg_dict = { + "debug" : True, + "taosConfigDir" : "", + "port" : 6041, + "logLevel" : "error", + "cors" : { + "allowAllOrigins" : True, + }, + "pool" : { + "maxConnect" : 4000, + "maxIdle" : 4000, + "idleTimeout" : "1h" + }, + "ssl" : { + "enable" : False, + "certFile" : "", + "keyFile" : "", + }, + "log" : { + "path" : "", + "rotationCount" : 30, + "rotationTime" : "24h", + "rotationSize" : "1GB", + "enableRecordHttpSql" : True, + "sqlRotationCount" : 2, + "sqlRotationTime" : "24h", + "sqlRotationSize" : "1GB", + }, + "monitor" : { + "collectDuration" : "3s", + "incgroup" : False, + "pauseQueryMemoryThreshold" : 70, + "pauseAllMemoryThreshold" : 80, + "identity" : "", + "writeToTD" : True, + "user" : "root", + "password" : "taosdata", + "writeInterval" : "30s" + }, + "opentsdb" : { + "enable" : True + }, + "influxdb" : { + "enable" : True + }, + "statsd" : { + "enable" : True + }, + "collectd" : { + "enable" : True + }, + "opentsdb_telnet" : { + "enable" : True + }, + "node_exporter" : { + "enable" : True + }, + "prometheus" : { + "enable" : True + }, + } + # TODO: add taosadapter env: + # 1. init cfg.toml.dict :OK + # 2. dump dict to toml : OK + # 3. update cfg.toml.dict :OK + # 4. check adapter exists : OK + # 5. deploy adapter cfg : OK + # 6. adapter start : OK + # 7. adapter stop + + def init(self, path, remoteIP=""): + self.path = path + self.remoteIP = remoteIP + binPath = get_path() + "/../../../" + binPath = os.path.realpath(binPath) + + if path == "": + self.path = os.path.abspath(binPath + "../../") + else: + self.path = os.path.realpath(path) + + if self.remoteIP: + try: + self.config = eval(remoteIP) + self.remote_conn = Connection(host=self.config["host"], port=self.config["port"], user=self.config["user"], connect_kwargs={'password':self.config["password"]}) + except Exception as e: + tdLog.notice(e) + + def update_cfg(self, update_dict :dict): + if not isinstance(update_dict, dict): + return + if "log" in update_dict and "path" in update_dict["log"]: + del update_dict["log"]["path"] + for key, value in update_dict.items(): + if key in ["cors", "pool", "ssl", "log", "monitor", "opentsdb", "influxdb", "statsd", "collectd", "opentsdb_telnet", "node_exporter", "prometheus"]: + if isinstance(value, dict): + for k, v in value.items(): + self.taosadapter_cfg_dict[key][k] = v + else: + self.taosadapter_cfg_dict[key] = value + + def check_adapter(self): + if get_path(tool="taosadapter"): + return False + else: + return True + + def remote_exec(self, updateCfgDict, execCmd): + remoteCfgDict = copy.deepcopy(updateCfgDict) + if "log" in remoteCfgDict and "path" in remoteCfgDict["log"]: + del remoteCfgDict["log"]["path"] + + remoteCfgDictStr = base64.b64encode(toml.dumps(remoteCfgDict).encode()).decode() + execCmdStr = base64.b64encode(execCmd.encode()).decode() + with self.remote_conn.cd((self.config["path"]+sys.path[0].replace(self.path, '')).replace('\\','/')): + self.remote_conn.run(f"python3 ./test.py -D {remoteCfgDictStr} -e {execCmdStr}" ) + + def cfg(self, option, value): + cmd = f"echo {option} = {value} >> {self.cfg_path}" + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def deploy(self, *update_cfg_dict): + self.log_dir = os.path.join(self.path,"sim","dnode1","log") + self.cfg_dir = os.path.join(self.path,"sim","dnode1","cfg") + self.cfg_path = os.path.join(self.cfg_dir,"taosadapter.toml") + + cmd = f"touch {self.cfg_path}" + if os.system(cmd) != 0: + tdLog.exit(cmd) + + self.taosadapter_cfg_dict["log"]["path"] = self.log_dir + if bool(update_cfg_dict): + self.update_cfg(update_dict=update_cfg_dict) + + if (self.remoteIP == ""): + dict2toml(self.taosadapter_cfg_dict, self.cfg_path) + else: + self.remote_exec(self.taosadapter_cfg_dict, "tAdapter.deploy(update_cfg_dict)") + + self.deployed = 1 + + tdLog.debug(f"taosadapter is deployed and configured by {self.cfg_path}") + + def start(self): + bin_path = get_path(tool="taosadapter") + + if (bin_path == ""): + tdLog.exit("taosadapter not found!") + else: + tdLog.info(f"taosadapter found: {bin_path}") + + if platform.system().lower() == 'windows': + cmd = f"mintty -h never {bin_path} -c {self.cfg_path}" + else: + cmd = f"nohup {bin_path} -c {self.cfg_path} > /dev/null & " + + if self.remoteIP: + self.remote_exec(self.taosadapter_cfg_dict, f"tAdapter.deployed=1\ntAdapter.log_dir={self.log_dir}\ntAdapter.cfg_dir={self.cfg_dir}\ntAdapter.start()") + self.running = 1 + else: + os.system(f"rm -rf {self.log_dir}{os.sep}taosadapter*") + if os.system(cmd) != 0: + tdLog.exit(cmd) + self.running = 1 + tdLog.debug(f"taosadapter is running with {cmd} " ) + + time.sleep(0.1) + + taosadapter_port = self.taosadapter_cfg_dict["port"] + for i in range(5): + ip = 'localhost' + if self.remoteIP != "": + ip = self.remoteIP + url = f'http://{ip}:{taosadapter_port}/-/ping' + try: + r = requests.get(url) + if r.status_code == 200: + tdLog.info(f"the taosadapter has been started, using port:{taosadapter_port}") + break + except Exception: + tdLog.info(f"the taosadapter do not started!!!") + time.sleep(1) + + def start_taosadapter(self): + """ + use this method, must deploy taosadapter + """ + bin_path = get_path(tool="taosadapter") + + if (bin_path == ""): + tdLog.exit("taosadapter not found!") + else: + tdLog.info(f"taosadapter found: {bin_path}") + + if self.deployed == 0: + tdLog.exit("taosadapter is not deployed") + + if platform.system().lower() == 'windows': + cmd = f"mintty -h never {bin_path} -c {self.cfg_dir}" + else: + cmd = f"nohup {bin_path} -c {self.cfg_path} > /dev/null & " + + if self.remoteIP: + self.remote_exec(self.taosadapter_cfg_dict, f"tAdapter.deployed=1\ntAdapter.log_dir={self.log_dir}\ntAdapter.cfg_dir={self.cfg_dir}\ntAdapter.start()") + self.running = 1 + else: + if os.system(cmd) != 0: + tdLog.exit(cmd) + self.running = 1 + tdLog.debug(f"taosadapter is running with {cmd} " ) + + time.sleep(0.1) + + def stop(self, force_kill=False): + signal = "-9" if force_kill else "-15" + if self.remoteIP: + self.remote_exec(self.taosadapter_cfg_dict, "tAdapter.running=1\ntAdapter.stop()") + tdLog.info("stop taosadapter") + return + toBeKilled = "taosadapter" + if platform.system().lower() == 'windows': + psCmd = f"ps -ef|grep -w {toBeKilled}| grep -v grep | awk '{{print $2}}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() + while(processID): + killCmd = "kill %s %s > nul 2>&1" % (signal, processID) + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() + self.running = 0 + tdLog.debug(f"taosadapter is stopped by kill {signal}") + + else: + if self.running != 0: + psCmd = f"ps -ef|grep -w {toBeKilled}| grep -v grep | awk '{{print $2}}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() + while(processID): + killCmd = "kill %s %s > /dev/null 2>&1" % (signal, processID) + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() + port = 6041 + fuserCmd = f"fuser -k -n tcp {port} > /dev/null" + os.system(fuserCmd) + self.running = 0 + tdLog.debug(f"taosadapter is stopped by kill {signal}") + + + +tAdapter = TAdapter() diff --git a/tests/army/frame/taosdemoCfg.py b/tests/army/frame/taosdemoCfg.py new file mode 100644 index 0000000000..5f8198681e --- /dev/null +++ b/tests/army/frame/taosdemoCfg.py @@ -0,0 +1,465 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import time +import datetime +import inspect +import psutil +import shutil +import json +from frame.log import * +from multiprocessing import cpu_count + + +# TODO: fully test the function. Handle exceptions. +# Handle json format not accepted by taosdemo + +### How to use TaosdemoCfg: +# Before you start: +# Make sure you understand how is taosdemo's JSON file structured. Because the python used does +# not support directory in directory for self objects, the config is being tear to different parts. +# Please make sure you understand which directory represent which part of which type of the file +# This module will reassemble the parts when creating the JSON file. +# +# Basic use example +# step 1:use self.append_sql_stb() to append the insert/query/subscribe directory into the module +# you can append many insert/query/subscribe directory, but pay attention about taosdemo's limit +# step 2:use alter function to alter the specific config +# step 3:use the generation function to generate the files +# +# step 1 and step 2 can be replaced with using import functions +class TDTaosdemoCfg: + def __init__(self): + self.insert_cfg = { + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": cpu_count(), + "create_table_thread_count": cpu_count(), + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "num_of_records_per_req": 32766, + "max_sql_len": 32766, + "databases": None + } + + self.db = { + "name": 'db', + "drop": 'yes', + "replica": 1, + "duration": 10, + "cache": 16, + "blocks": 6, + "precision": "ms", + "keep": 3650, + "minRows": 100, + "maxRows": 4096, + "comp": 2, + "walLevel": 1, + "cachelast": 0, + "quorum": 1, + "fsync": 3000, + "update": 0 + } + + self.query_cfg = { + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_times": 2, + "query_mode": "taosc", + "specified_table_query": None, + "super_table_query": None + } + + self.table_query = { + "query_interval": 1, + "concurrent": 3, + "sqls": None + } + + self.stable_query = { + "stblname": "stb", + "query_interval": 1, + "threads": 3, + "sqls": None + } + + self.sub_cfg = { + "filetype": "subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "db", + "confirm_parameter_prompt": "no", + "specified_table_query": None, + "super_table_query": None + } + + self.table_sub = { + "concurrent": 1, + "mode": "sync", + "interval": 10000, + "restart": "yes", + "keepProgress": "yes", + "sqls": None + } + + self.stable_sub = { + "stblname": "stb", + "threads": 1, + "mode": "sync", + "interval": 10000, + "restart": "yes", + "keepProgress": "yes", + "sqls": None + } + + self.stbs = [] + self.stb_template = { + "name": "stb", + "child_table_exists": "no", + "childtable_count": 100, + "childtable_prefix": "stb_", + "auto_create_table": "no", + "batch_create_tbl_num": 5, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_limit": 10, + "childtable_offset": 0, + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 32766, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT", "count": 1}], + "tags": [{"type": "BIGINT", "count": 1}] + } + + self.tb_query_sql = [] + self.tb_query_sql_template = { + "sql": "select last_row(*) from stb_0 ", + "result": "temp/query_res0.txt" + } + + self.stb_query_sql = [] + self.stb_query_sql_template = { + "sql": "select last_row(ts) from xxxx", + "result": "temp/query_res2.txt" + } + + self.tb_sub_sql = [] + self.tb_sub_sql_template = { + "sql": "select * from stb_0 ;", + "result": "temp/subscribe_res0.txt" + } + + self.stb_sub_sql = [] + self.stb_sub_sql_template = { + "sql": "select * from xxxx where ts > '2021-02-25 11:35:00.000' ;", + "result": "temp/subscribe_res1.txt" + } + + # The following functions are import functions for different dicts and lists + # except import_sql, all other import functions will a dict and overwrite the origional dict + # dict_in: the dict used to overwrite the target + def import_insert_cfg(self, dict_in): + self.insert_cfg = dict_in + + def import_db(self, dict_in): + self.db = dict_in + + def import_stbs(self, dict_in): + self.stbs = dict_in + + def import_query_cfg(self, dict_in): + self.query_cfg = dict_in + + def import_table_query(self, dict_in): + self.table_query = dict_in + + def import_stable_query(self, dict_in): + self.stable_query = dict_in + + def import_sub_cfg(self, dict_in): + self.sub_cfg = dict_in + + def import_table_sub(self, dict_in): + self.table_sub = dict_in + + def import_stable_sub(self, dict_in): + self.stable_sub = dict_in + + def import_sql(self, Sql_in, mode): + """used for importing the sql later used + + Args: + Sql_in (dict): the imported sql dict + mode (str): the sql storing location within TDTaosdemoCfg + format: 'fileType_tableType' + fileType: query, sub + tableType: table, stable + """ + if mode == 'query_table': + self.tb_query_sql = Sql_in + elif mode == 'query_stable': + self.stb_query_sql = Sql_in + elif mode == 'sub_table': + self.tb_sub_sql = Sql_in + elif mode == 'sub_stable': + self.stb_sub_sql = Sql_in + # import functions end + + # The following functions are alter functions for different dicts + # Args: + # key: the key that is going to be modified + # value: the value of the key that is going to be modified + # if key = 'databases' | "specified_table_query" | "super_table_query"|"sqls" + # value will not be used + + def alter_insert_cfg(self, key, value): + + if key == 'databases': + self.insert_cfg[key] = [ + { + 'dbinfo': self.db, + 'super_tables': self.stbs + } + ] + else: + self.insert_cfg[key] = value + + def alter_db(self, key, value): + self.db[key] = value + + def alter_query_tb(self, key, value): + if key == "sqls": + self.table_query[key] = self.tb_query_sql + else: + self.table_query[key] = value + + def alter_query_stb(self, key, value): + if key == "sqls": + self.stable_query[key] = self.stb_query_sql + else: + self.stable_query[key] = value + + def alter_query_cfg(self, key, value): + if key == "specified_table_query": + self.query_cfg["specified_table_query"] = self.table_query + elif key == "super_table_query": + self.query_cfg["super_table_query"] = self.stable_query + else: + self.query_cfg[key] = value + + def alter_sub_cfg(self, key, value): + if key == "specified_table_query": + self.sub_cfg["specified_table_query"] = self.table_sub + elif key == "super_table_query": + self.sub_cfg["super_table_query"] = self.stable_sub + else: + self.sub_cfg[key] = value + + def alter_sub_stb(self, key, value): + if key == "sqls": + self.stable_sub[key] = self.stb_sub_sql + else: + self.stable_sub[key] = value + + def alter_sub_tb(self, key, value): + if key == "sqls": + self.table_sub[key] = self.tb_sub_sql + else: + self.table_sub[key] = value + # alter function ends + + # the following functions are for handling the sql lists + def append_sql_stb(self, target, value): + """for appending sql dict into specific sql list + + Args: + target (str): the target append list + format: 'fileType_tableType' + fileType: query, sub + tableType: table, stable + unique: 'insert_stbs' + value (dict): the sql dict going to be appended + """ + if target == 'insert_stbs': + self.stbs.append(value) + elif target == 'query_table': + self.tb_query_sql.append(value) + elif target == 'query_stable': + self.stb_query_sql.append(value) + elif target == 'sub_table': + self.tb_sub_sql.append(value) + elif target == 'sub_stable': + self.stb_sub_sql.append(value) + + def pop_sql_stb(self, target, index): + """for poping a sql dict from specific sql list + + Args: + target (str): the target append list + format: 'fileType_tableType' + fileType: query, sub + tableType: table, stable + unique: 'insert_stbs' + index (int): the sql dict that is going to be popped + """ + if target == 'insert_stbs': + self.stbs.pop(index) + elif target == 'query_table': + self.tb_query_sql.pop(index) + elif target == 'query_stable': + self.stb_query_sql.pop(index) + elif target == 'sub_table': + self.tb_sub_sql.pop(index) + elif target == 'sub_stable': + self.stb_sub_sql.pop(index) + # sql list modification function end + + # The following functions are get functions for different dicts + def get_db(self): + return self.db + + def get_stb(self): + return self.stbs + + def get_insert_cfg(self): + return self.insert_cfg + + def get_query_cfg(self): + return self.query_cfg + + def get_tb_query(self): + return self.table_query + + def get_stb_query(self): + return self.stable_query + + def get_sub_cfg(self): + return self.sub_cfg + + def get_tb_sub(self): + return self.table_sub + + def get_stb_sub(self): + return self.stable_sub + + def get_sql(self, target): + """general get function for all sql lists + + Args: + target (str): the sql list want to get + format: 'fileType_tableType' + fileType: query, sub + tableType: table, stable + unique: 'insert_stbs' + """ + if target == 'query_table': + return self.tb_query_sql + elif target == 'query_stable': + return self.stb_query_sql + elif target == 'sub_table': + return self.tb_sub_sql + elif target == 'sub_stable': + return self.stb_sub_sql + + def get_template(self, target): + """general get function for the default sql template + + Args: + target (str): the sql list want to get + format: 'fileType_tableType' + fileType: query, sub + tableType: table, stable + unique: 'insert_stbs' + """ + if target == 'insert_stbs': + return self.stb_template + elif target == 'query_table': + return self.tb_query_sql_template + elif target == 'query_stable': + return self.stb_query_sql_template + elif target == 'sub_table': + return self.tb_sub_sql_template + elif target == 'sub_stable': + return self.stb_sub_sql_template + else: + print(f'did not find {target}') + + # the folloing are the file generation functions + """defalut document: + generator functio for generating taosdemo json file + will assemble the dicts and dump the final json + + Args: + pathName (str): the directory wanting the json file to be + fileName (str): the name suffix of the json file + Returns: + str: [pathName]/[filetype]_[filName].json + """ + + def generate_insert_cfg(self, pathName, fileName): + cfgFileName = f'{pathName}/insert_{fileName}.json' + self.alter_insert_cfg('databases', None) + with open(cfgFileName, 'w') as file: + json.dump(self.insert_cfg, file) + return cfgFileName + + def generate_query_cfg(self, pathName, fileName): + cfgFileName = f'{pathName}/query_{fileName}.json' + self.alter_query_tb('sqls', None) + self.alter_query_stb('sqls', None) + self.alter_query_cfg('specified_table_query', None) + self.alter_query_cfg('super_table_query', None) + with open(cfgFileName, 'w') as file: + json.dump(self.query_cfg, file) + return cfgFileName + + def generate_subscribe_cfg(self, pathName, fileName): + cfgFileName = f'{pathName}/subscribe_{fileName}.json' + self.alter_sub_tb('sqls', None) + self.alter_sub_stb('sqls', None) + self.alter_sub_cfg('specified_table_query', None) + self.alter_sub_cfg('super_table_query', None) + with open(cfgFileName, 'w') as file: + json.dump(self.sub_cfg, file) + return cfgFileName + # file generation functions ends + + def drop_cfg_file(self, fileName): + os.remove(f'{fileName}') + + +taosdemoCfg = TDTaosdemoCfg() diff --git a/tests/army/frame/types.py b/tests/army/frame/types.py new file mode 100644 index 0000000000..218a477026 --- /dev/null +++ b/tests/army/frame/types.py @@ -0,0 +1,38 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from enum import Enum + +class TDSmlProtocolType(Enum): + ''' + Schemaless Protocol types + 0 - unknown + 1 - InfluxDB Line Protocol + 2 - OpenTSDB Telnet Protocl + 3 - OpenTSDB JSON Protocol + ''' + UNKNOWN = 0 + LINE = 1 + TELNET = 2 + JSON = 3 + +class TDSmlTimestampType(Enum): + NOT_CONFIGURED = 0 + HOUR = 1 + MINUTE = 2 + SECOND = 3 + MILLI_SECOND = 4 + MICRO_SECOND = 5 + NANO_SECOND = 6 + + diff --git a/tests/army/loop.sh b/tests/army/loop.sh new file mode 100755 index 0000000000..b3bc9728de --- /dev/null +++ b/tests/army/loop.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +################################################## +# +# Do simulation test +# +################################################## + +set -e +#set -x + +CMD_NAME= +LOOP_TIMES=5 +SLEEP_TIME=0 + +while getopts "hf:t:s:" arg +do + case $arg in + f) + CMD_NAME=$OPTARG + ;; + t) + LOOP_TIMES=$OPTARG + ;; + s) + SLEEP_TIME=$OPTARG + ;; + h) + echo "Usage: $(basename $0) -f [cmd name] " + echo " -t [loop times] " + echo " -s [sleep time] " + exit 0 + ;; + ?) + echo "unknow argument" + ;; + esac +done + +echo LOOP_TIMES ${LOOP_TIMES} +echo CMD_NAME ${CMD_NAME} +echo SLEEP_TIME ${SLEEP_TIME} + +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' + +for ((i=0; i<$LOOP_TIMES; i++ )) +do + echo -e $GREEN loop $i $NC + echo -e $GREEN cmd $CMD_NAME $NC + $CMD_NAME + sleep ${SLEEP_TIME} +done diff --git a/tests/army/pytest.sh b/tests/army/pytest.sh new file mode 100755 index 0000000000..bae0fdf278 --- /dev/null +++ b/tests/army/pytest.sh @@ -0,0 +1,114 @@ +#!/bin/bash + +################################################## +# +# Do simulation test +# +################################################## + +set +e +#set -x +if [[ "$OSTYPE" == "darwin"* ]]; then + TD_OS="Darwin" +else + OS=$(cat /etc/*-release | grep "^NAME=" | cut -d= -f2) + len=$(echo ${#OS}) + len=$((len - 2)) + TD_OS=$(echo -ne ${OS:1:${len}} | cut -d" " -f1) +fi + +UNAME_BIN=$(which uname) +OS_TYPE=$($UNAME_BIN) + +cd . + +# Get responsible directories +CODE_DIR=$(dirname $0) +CODE_DIR=$(pwd) + +IN_TDINTERNAL="community" +if [[ "$CODE_DIR" == *"$IN_TDINTERNAL"* ]]; then + cd ../../.. +else + cd ../../ +fi + +TOP_DIR=$(pwd) +TAOSD_DIR=$(find . -name "taosd" | grep bin | head -n1) + +cut_opt="-f " + +if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then + BIN_DIR=$(find . -name "taosd" | grep bin | head -n1 | cut -d '/' ${cut_opt}2,3) +else + BIN_DIR=$(find . -name "taosd" | grep bin | head -n1 | cut -d '/' ${cut_opt}2) +fi + +declare -x BUILD_DIR=$TOP_DIR/$BIN_DIR +declare -x SIM_DIR=$TOP_DIR/sim +PROGRAM=$BUILD_DIR/build/bin/tsim +PRG_DIR=$SIM_DIR/tsim +ASAN_DIR=$SIM_DIR/asan + +chmod -R 777 $PRG_DIR +echo "------------------------------------------------------------------------" +echo "Start TDengine Testing Case ..." +echo "BUILD_DIR: $BUILD_DIR" +echo "SIM_DIR : $SIM_DIR" +echo "CODE_DIR : $CODE_DIR" +echo "ASAN_DIR : $ASAN_DIR" + +# prevent delete / folder or /usr/bin +if [ ${#SIM_DIR} -lt 10 ]; then + echo "len(SIM_DIR) < 10 , danger so exit. SIM_DIR=$SIM_DIR" + exit 1 +fi + +rm -rf $SIM_DIR/* + +mkdir -p $PRG_DIR +mkdir -p $ASAN_DIR + +cd $CODE_DIR +ulimit -n 600000 +ulimit -c unlimited + +#sudo sysctl -w kernel.core_pattern=$TOP_DIR/core.%p.%e + +echo "ExcuteCmd:" $* + +if [[ "$TD_OS" == "Alpine" ]]; then + $* +else + AsanFile=$ASAN_DIR/psim.info + echo "AsanFile:" $AsanFile + + unset LD_PRELOAD + #export LD_PRELOAD=libasan.so.5 + #export LD_PRELOAD=$(gcc -print-file-name=libasan.so) + export LD_PRELOAD="$(realpath "$(gcc -print-file-name=libasan.so)") $(realpath "$(gcc -print-file-name=libstdc++.so)")" + echo "Preload AsanSo:" $? + + $* -a 2>$AsanFile + + unset LD_PRELOAD + for ((i = 1; i <= 20; i++)); do + AsanFileLen=$(cat $AsanFile | wc -l) + echo "AsanFileLen:" $AsanFileLen + if [ $AsanFileLen -gt 10 ]; then + break + fi + sleep 1 + done + # check case successful + AsanFileSuccessLen=$(grep -w "successfully executed" $AsanFile | wc -l) + echo "AsanFileSuccessLen:" $AsanFileSuccessLen + + if [ $AsanFileSuccessLen -gt 0 ]; then + echo "Execute script successfully and check asan" + $CODE_DIR/../script/sh/checkAsan.sh + else + echo "Execute script failure" + exit 1 + fi +fi diff --git a/tests/army/test.py b/tests/army/test.py new file mode 100644 index 0000000000..cc114e0d16 --- /dev/null +++ b/tests/army/test.py @@ -0,0 +1,688 @@ +#!/usr/bin/python +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### +# install pip +# pip install src/connector/python/ + +# -*- coding: utf-8 -*- +import os +import sys +import getopt +import subprocess +import time +import base64 +import json +import platform +import socket +import threading +import importlib + +import toml + +from frame.log import * +from frame.dnodes import * +from frame.cases import * +from frame.cluster import * +from frame.taosadapter import * + +import taos +import taosrest +import taosws + +def checkRunTimeError(): + import win32gui + timeCount = 0 + while 1: + time.sleep(1) + timeCount = timeCount + 1 + print("checkRunTimeError",timeCount) + if (timeCount>600): + print("stop the test.") + os.system("TASKKILL /F /IM taosd.exe") + os.system("TASKKILL /F /IM taos.exe") + os.system("TASKKILL /F /IM tmq_sim.exe") + os.system("TASKKILL /F /IM mintty.exe") + os.system("TASKKILL /F /IM python.exe") + quit(0) + hwnd = win32gui.FindWindow(None, "Microsoft Visual C++ Runtime Library") + if hwnd: + os.system("TASKKILL /F /IM taosd.exe") + +# +# run case on previous cluster +# +def runOnPreviousCluster(host, config, fileName): + print("enter run on previeous") + + # load case module + sep = "/" + if platform.system().lower() == 'windows': + sep = os.sep + moduleName = fileName.replace(".py", "").replace(sep, ".") + uModule = importlib.import_module(moduleName) + case = uModule.TDTestCase() + + # create conn + conn = taos.connect(host, config) + + # run case + case.init(conn, False) + try: + case.run() + except Exception as e: + tdLog.notice(repr(e)) + tdLog.exit("%s failed" % (fileName)) + # stop + case.stop() + + +if __name__ == "__main__": + + # + # analysis paramaters + # + fileName = "all" + deployPath = "" + masterIp = "" + testCluster = False + valgrind = 0 + killValgrind = 1 + logSql = True + stop = 0 + restart = False + dnodeNums = 1 + mnodeNums = 0 + updateCfgDict = {} + adapter_cfg_dict = {} + execCmd = "" + queryPolicy = 1 + createDnodeNums = 1 + restful = False + websocket = False + replicaVar = 1 + asan = False + independentMnode = False + previousCluster = False + opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:RWD:n:i:aP', [ + 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums','queryPolicy','createDnodeNums','restful','websocket','adaptercfgupdate','replicaVar','independentMnode','previous']) + for key, value in opts: + if key in ['-h', '--help']: + tdLog.printNoPrefix( + 'A collection of test cases written using Python') + tdLog.printNoPrefix('-f Name of test case file written by Python') + tdLog.printNoPrefix('-p Deploy Path for Simulator') + tdLog.printNoPrefix('-m Master Ip for Simulator') + tdLog.printNoPrefix('-l logSql Flag') + tdLog.printNoPrefix('-s stop All dnodes') + tdLog.printNoPrefix('-c Test Cluster Flag') + tdLog.printNoPrefix('-g valgrind Test Flag') + tdLog.printNoPrefix('-r taosd restart test') + tdLog.printNoPrefix('-d update cfg dict, base64 json str') + tdLog.printNoPrefix('-k not kill valgrind processer') + tdLog.printNoPrefix('-e eval str to run') + tdLog.printNoPrefix('-N start dnodes numbers in clusters') + tdLog.printNoPrefix('-M create mnode numbers in clusters') + tdLog.printNoPrefix('-Q set queryPolicy in one dnode') + tdLog.printNoPrefix('-C create Dnode Numbers in one cluster') + tdLog.printNoPrefix('-R restful realization form') + tdLog.printNoPrefix('-W websocket connection') + tdLog.printNoPrefix('-D taosadapter update cfg dict ') + tdLog.printNoPrefix('-n the number of replicas') + tdLog.printNoPrefix('-i independentMnode Mnode') + tdLog.printNoPrefix('-a address sanitizer mode') + tdLog.printNoPrefix('-P run case with [P]revious cluster, do not create new cluster to run case.') + + sys.exit(0) + + if key in ['-r', '--restart']: + restart = True + + if key in ['-f', '--file']: + fileName = value + + if key in ['-p', '--path']: + deployPath = value + + if key in ['-m', '--master']: + masterIp = value + + if key in ['-l', '--logSql']: + if (value.upper() == "TRUE"): + logSql = True + elif (value.upper() == "FALSE"): + logSql = False + else: + tdLog.printNoPrefix("logSql value %s is invalid" % logSql) + sys.exit(0) + + if key in ['-c', '--cluster']: + testCluster = True + + if key in ['-g', '--valgrind']: + valgrind = 1 + + if key in ['-s', '--stop']: + stop = 1 + + if key in ['-d', '--updateCfgDict']: + try: + updateCfgDict = eval(base64.b64decode(value.encode()).decode()) + except: + print('updateCfgDict convert fail.') + sys.exit(0) + + if key in ['-k', '--killValgrind']: + killValgrind = 1 + + if key in ['-e', '--execCmd']: + try: + execCmd = base64.b64decode(value.encode()).decode() + except: + print('execCmd run fail.') + sys.exit(0) + + if key in ['-N', '--dnodeNums']: + dnodeNums = value + + if key in ['-M', '--mnodeNums']: + mnodeNums = value + + if key in ['-Q', '--queryPolicy']: + queryPolicy = value + + if key in ['-C', '--createDnodeNums']: + createDnodeNums = value + + if key in ['-i', '--independentMnode']: + independentMnode = value + + if key in ['-R', '--restful']: + restful = True + + if key in ['-W', '--websocket']: + websocket = True + + if key in ['-a', '--asan']: + asan = True + + if key in ['-D', '--adaptercfgupdate']: + try: + adaptercfgupdate = eval(base64.b64decode(value.encode()).decode()) + except: + print('adapter cfg update convert fail.') + sys.exit(0) + + if key in ['-n', '--replicaVar']: + replicaVar = value + + if key in ['-P', '--previous']: + previousCluster = True + + # + # do exeCmd command + # + if not execCmd == "": + if restful or websocket: + tAdapter.init(deployPath) + else: + tdDnodes.init(deployPath) + print(execCmd) + exec(execCmd) + quit() + + # + # do stop option + # + if (stop != 0): + if (valgrind == 0): + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" % toBeKilled + + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled + processID = subprocess.check_output(psCmd, shell=True) + + while(processID): + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output(psCmd, shell=True) + + for port in range(6030, 6041): + usePortPID = "lsof -i tcp:%d | grep LISTEN | awk '{print $2}'" % port + processID = subprocess.check_output(usePortPID, shell=True) + + if processID: + killCmd = "kill -TERM %s" % processID + os.system(killCmd) + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) + if valgrind: + time.sleep(2) + + if restful or websocket: + toBeKilled = "taosadapter" + + # killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" % toBeKilled + killCmd = f"pkill {toBeKilled}" + + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled + # psCmd = f"pgrep {toBeKilled}" + processID = subprocess.check_output(psCmd, shell=True) + + while(processID): + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output(psCmd, shell=True) + + port = 6041 + usePortPID = f"lsof -i tcp:{port} | grep LISTEN | awk '{{print $2}}'" + processID = subprocess.check_output(usePortPID, shell=True) + + if processID: + killCmd = f"kill -TERM {processID}" + os.system(killCmd) + fuserCmd = f"fuser -k -n tcp {port}" + os.system(fuserCmd) + + tdLog.info('stop taosadapter') + + tdLog.info('stop All dnodes') + + # + # get hostname + # + if masterIp == "": + host = socket.gethostname() + else: + try: + config = eval(masterIp) + host = config["host"] + except Exception as r: + host = masterIp + tdLog.info("Procedures for tdengine deployed in %s" % (host)) + + # + # do previousCluster option + # + if previousCluster: + tdDnodes.init(deployPath, masterIp) + runOnPreviousCluster(host, tdDnodes.getSimCfgPath(), fileName) + tdLog.info("run on previous cluster end.") + quit() + + # + # windows run + # + if platform.system().lower() == 'windows': + fileName = fileName.replace("/", os.sep) + if (masterIp == "" and not fileName == "0-others\\udf_create.py"): + threading.Thread(target=checkRunTimeError,daemon=True).start() + tdLog.info("Procedures for testing self-deployment") + tdDnodes.init(deployPath, masterIp) + tdDnodes.setTestCluster(testCluster) + tdDnodes.setValgrind(valgrind) + tdDnodes.stopAll() + key_word = 'tdCases.addWindows' + is_test_framework = 0 + try: + if key_word in open(fileName, encoding='UTF-8').read(): + is_test_framework = 1 + except Exception as r: + print(r) + updateCfgDictStr = '' + # adapter_cfg_dict_str = '' + if is_test_framework: + moduleName = fileName.replace(".py", "").replace(os.sep, ".") + uModule = importlib.import_module(moduleName) + try: + ucase = uModule.TDTestCase() + if ((json.dumps(updateCfgDict) == '{}') and hasattr(ucase, 'updatecfgDict')): + updateCfgDict = ucase.updatecfgDict + updateCfgDictStr = "-d %s"%base64.b64encode(json.dumps(updateCfgDict).encode()).decode() + if ((json.dumps(adapter_cfg_dict) == '{}') and hasattr(ucase, 'taosadapter_cfg_dict')): + adapter_cfg_dict = ucase.taosadapter_cfg_dict + # adapter_cfg_dict_str = f"-D {base64.b64encode(toml.dumps(adapter_cfg_dict).encode()).decode()}" + except Exception as r: + print(r) + else: + pass + # if restful: + tAdapter.init(deployPath, masterIp) + tAdapter.stop(force_kill=True) + + if dnodeNums == 1 : + tdDnodes.deploy(1,updateCfgDict) + tdDnodes.start(1) + tdCases.logSql(logSql) + if restful or websocket: + tAdapter.deploy(adapter_cfg_dict) + tAdapter.start() + + if queryPolicy != 1: + queryPolicy=int(queryPolicy) + if restful: + conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc") + elif websocket: + conn = taosws.connect(f"taosws://root:taosdata@{host}:6041") + else: + conn = taos.connect(host,config=tdDnodes.getSimCfgPath()) + + cursor = conn.cursor() + cursor.execute("create qnode on dnode 1") + cursor.execute(f'alter local "queryPolicy" "{queryPolicy}"') + cursor.execute("show local variables") + res = cursor.fetchall() + for i in range(cursor.rowcount): + if res[i][0] == "queryPolicy" : + if int(res[i][1]) == int(queryPolicy): + tdLog.info(f'alter queryPolicy to {queryPolicy} successfully') + else: + tdLog.debug(res) + tdLog.exit(f"alter queryPolicy to {queryPolicy} failed") + else : + tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode"%(dnodeNums,mnodeNums)) + dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums, mnodeNums=mnodeNums, independentMnode=independentMnode) + tdDnodes = ClusterDnodes(dnodeslist) + tdDnodes.init(deployPath, masterIp) + tdDnodes.setTestCluster(testCluster) + tdDnodes.setValgrind(valgrind) + tdDnodes.stopAll() + for dnode in tdDnodes.dnodes: + tdDnodes.deploy(dnode.index, updateCfgDict) + for dnode in tdDnodes.dnodes: + tdDnodes.starttaosd(dnode.index) + tdCases.logSql(logSql) + + if restful or websocket: + tAdapter.deploy(adapter_cfg_dict) + tAdapter.start() + + if restful: + conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc") + elif websocket: + conn = taosws.connect(f"taosws://root:taosdata@{host}:6041") + else: + conn = taos.connect(host,config=tdDnodes.getSimCfgPath()) + # tdLog.info(tdDnodes.getSimCfgPath(),host) + if createDnodeNums == 1: + createDnodeNums=dnodeNums + else: + createDnodeNums=createDnodeNums + cluster.create_dnode(conn,createDnodeNums) + cluster.create_mnode(conn,mnodeNums) + try: + if cluster.check_dnode(conn) : + print("check dnode ready") + except Exception as r: + print(r) + if queryPolicy != 1: + queryPolicy=int(queryPolicy) + if restful: + conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc") + elif websocket: + conn = taosws.connect(f"taosws://root:taosdata@{host}:6041") + else: + conn = taos.connect(host,config=tdDnodes.getSimCfgPath()) + + cursor = conn.cursor() + cursor.execute("create qnode on dnode 1") + cursor.execute(f'alter local "queryPolicy" "{queryPolicy}"') + cursor.execute("show local variables") + res = cursor.fetchall() + for i in range(cursor.rowcount): + if res[i][0] == "queryPolicy" : + if int(res[i][1]) == int(queryPolicy): + tdLog.info(f'alter queryPolicy to {queryPolicy} successfully') + else: + tdLog.debug(res) + tdLog.exit(f"alter queryPolicy to {queryPolicy} failed") + + if ucase is not None and hasattr(ucase, 'noConn') and ucase.noConn == True: + conn = None + else: + if restful: + conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc") + elif websocket: + conn = taosws.connect(f"taosws://root:taosdata@{host}:6041") + else: + conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath()) + + if testCluster: + tdLog.info("Procedures for testing cluster") + if fileName == "all": + tdCases.runAllCluster() + else: + tdCases.runOneCluster(fileName) + else: + tdLog.info("Procedures for testing self-deployment") + if restful: + conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc") + elif websocket: + conn = taosws.connect(f"taosws://root:taosdata@{host}:6041") + else: + conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath()) + + if fileName == "all": + tdCases.runAllWindows(conn) + else: + tdCases.runOneWindows(conn, fileName, replicaVar) + + if restart: + if fileName == "all": + tdLog.info("not need to query ") + else: + sp = fileName.rsplit(".", 1) + if len(sp) == 2 and sp[1] == "py": + tdDnodes.stopAll() + tdDnodes.start(1) + time.sleep(1) + if restful: + conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc") + elif websocket: + conn = taosws.connect(f"taosws://root:taosdata@{host}:6041") + else: + conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath()) + tdLog.info("Procedures for tdengine deployed in %s" % (host)) + tdLog.info("query test after taosd restart") + tdCases.runOneWindows(conn, sp[0] + "_" + "restart.py", replicaVar) + else: + tdLog.info("not need to query") + else: + tdDnodes.setKillValgrind(killValgrind) + tdDnodes.init(deployPath, masterIp) + tdDnodes.setTestCluster(testCluster) + tdDnodes.setValgrind(valgrind) + tdDnodes.setAsan(asan) + tdDnodes.stopAll() + is_test_framework = 0 + key_word = 'tdCases.addLinux' + try: + if key_word in open(fileName).read(): + is_test_framework = 1 + except: + pass + if is_test_framework: + moduleName = fileName.replace(".py", "").replace("/", ".") + uModule = importlib.import_module(moduleName) + try: + ucase = uModule.TDTestCase() + if (json.dumps(updateCfgDict) == '{}'): + updateCfgDict = ucase.updatecfgDict + if (json.dumps(adapter_cfg_dict) == '{}'): + adapter_cfg_dict = ucase.taosadapter_cfg_dict + except: + pass + + if restful or websocket: + tAdapter.init(deployPath, masterIp) + tAdapter.stop(force_kill=True) + + if dnodeNums == 1 : + # dnode is one + tdDnodes.deploy(1,updateCfgDict) + tdDnodes.start(1) + tdCases.logSql(logSql) + + if restful or websocket: + tAdapter.deploy(adapter_cfg_dict) + tAdapter.start() + + if queryPolicy != 1: + queryPolicy=int(queryPolicy) + if restful: + conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc") + elif websocket: + conn = taosws.connect(f"taosws://root:taosdata@{host}:6041") + else: + conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath()) + # tdSql.init(conn.cursor()) + # tdSql.execute("create qnode on dnode 1") + # tdSql.execute('alter local "queryPolicy" "%d"'%queryPolicy) + # tdSql.query("show local variables;") + # for i in range(tdSql.queryRows): + # if tdSql.queryResult[i][0] == "queryPolicy" : + # if int(tdSql.queryResult[i][1]) == int(queryPolicy): + # tdLog.info('alter queryPolicy to %d successfully'%queryPolicy) + # else : + # tdLog.debug(tdSql.queryResult) + # tdLog.exit("alter queryPolicy to %d failed"%queryPolicy) + + cursor = conn.cursor() + cursor.execute("create qnode on dnode 1") + cursor.execute(f'alter local "queryPolicy" "{queryPolicy}"') + cursor.execute("show local variables") + res = cursor.fetchall() + for i in range(cursor.rowcount): + if res[i][0] == "queryPolicy" : + if int(res[i][1]) == int(queryPolicy): + tdLog.info(f'alter queryPolicy to {queryPolicy} successfully') + else: + tdLog.debug(res) + tdLog.exit(f"alter queryPolicy to {queryPolicy} failed") + + else : + # dnode > 1 cluster + tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode"%(dnodeNums,mnodeNums)) + print(independentMnode,"independentMnode valuse") + dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums, mnodeNums=mnodeNums, independentMnode=independentMnode) + tdDnodes = ClusterDnodes(dnodeslist) + tdDnodes.init(deployPath, masterIp) + tdDnodes.setTestCluster(testCluster) + tdDnodes.setValgrind(valgrind) + tdDnodes.setAsan(asan) + tdDnodes.stopAll() + for dnode in tdDnodes.dnodes: + tdDnodes.deploy(dnode.index,updateCfgDict) + for dnode in tdDnodes.dnodes: + tdDnodes.starttaosd(dnode.index) + tdCases.logSql(logSql) + + if restful or websocket: + tAdapter.deploy(adapter_cfg_dict) + tAdapter.start() + + # create taos connect + if restful: + conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc") + elif websocket: + conn = taosws.connect(f"taosws://root:taosdata@{host}:6041") + else: + conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath()) + print(tdDnodes.getSimCfgPath(),host) + if createDnodeNums == 1: + createDnodeNums=dnodeNums + else: + createDnodeNums=createDnodeNums + cluster.create_dnode(conn,createDnodeNums) + cluster.create_mnode(conn,mnodeNums) + + try: + if cluster.check_dnode(conn) : + print("check dnode ready") + except Exception as r: + print(r) + + # do queryPolicy option + if queryPolicy != 1: + queryPolicy=int(queryPolicy) + if restful: + conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc") + elif websocket: + conn = taosws.connect(f"taosws://root:taosdata@{host}:6041") + else: + conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath()) + + cursor = conn.cursor() + cursor.execute("create qnode on dnode 1") + cursor.execute(f'alter local "queryPolicy" "{queryPolicy}"') + cursor.execute("show local variables") + res = cursor.fetchall() + for i in range(cursor.rowcount): + if res[i][0] == "queryPolicy" : + if int(res[i][1]) == int(queryPolicy): + tdLog.info(f'alter queryPolicy to {queryPolicy} successfully') + else: + tdLog.debug(res) + tdLog.exit(f"alter queryPolicy to {queryPolicy} failed") + + + # run case + if testCluster: + tdLog.info("Procedures for testing cluster") + if fileName == "all": + tdCases.runAllCluster() + else: + tdCases.runOneCluster(fileName) + else: + tdLog.info("Procedures for testing self-deployment") + if restful: + conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc") + elif websocket: + conn = taosws.connect(f"taosws://root:taosdata@{host}:6041") + else: + conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath()) + + if fileName == "all": + tdCases.runAllLinux(conn) + else: + tdCases.runOneLinux(conn, fileName, replicaVar) + + # do restart option + if restart: + if fileName == "all": + tdLog.info("not need to query ") + else: + sp = fileName.rsplit(".", 1) + if len(sp) == 2 and sp[1] == "py": + tdDnodes.stopAll() + tdDnodes.start(1) + time.sleep(1) + if restful: + conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc") + elif websocket: + conn = taosws.connect(f"taosws://root:taosdata@{host}:6041") + else: + conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath()) + tdLog.info("Procedures for tdengine deployed in %s" % (host)) + tdLog.info("query test after taosd restart") + tdCases.runOneLinux(conn, sp[0] + "_" + "restart.py", replicaVar) + else: + tdLog.info("not need to query") + + # close for end + if conn is not None: + conn.close() + if asan: + tdDnodes.StopAllSigint() + tdLog.info("Address sanitizer mode finished") + sys.exit(0) diff --git a/tests/docs-examples-test/csharp.sh b/tests/docs-examples-test/csharp.sh index c08ffd6d62..9d7867d829 100644 --- a/tests/docs-examples-test/csharp.sh +++ b/tests/docs-examples-test/csharp.sh @@ -11,10 +11,9 @@ dotnet run --project connect/connect.csproj taos -s "drop database if exists power" dotnet run --project sqlInsert/sqlinsert.csproj dotnet run --project query/query.csproj -dotnet run --project asyncQuery/asyncquery.csproj -dotnet run --project subscribe/subscribe.csproj +#dotnet run --project subscribe/subscribe.csproj -taos -s "drop topic if exists topic_example" +#taos -s "drop topic if exists topic_example" taos -s "drop database if exists power" dotnet run --project stmtInsert/stmtinsert.csproj @@ -28,10 +27,12 @@ taos -s "drop database if exists test" dotnet run --project optsJSON/optsJSON.csproj taos -s "create database if not exists test" +taos -s "drop database if exists power" dotnet run --project wsConnect/wsConnect.csproj dotnet run --project wsInsert/wsInsert.csproj -dotnet run --project wsStmt/wsStmt.csproj dotnet run --project wsQuery/wsQuery.csproj +taos -s "drop database if exists power" +dotnet run --project wsStmt/wsStmt.csproj taos -s "drop database if exists test" taos -s "drop database if exists power" diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 549e24b0a6..d0961830e4 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -5,6 +5,9 @@ #unit-test ,,y,unit-test,bash test.sh +#army-test +,,y,army,./pytest.sh python3 ./test.py -f empty.py + #system test ,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/stream_basic.py ,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/scalar_function.py diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index c05df0a852..99d166ee33 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -78,7 +78,7 @@ class TDSql: self.cursor.execute(s) time.sleep(2) - def error(self, sql, expectedErrno = None, expectErrInfo = None): + def error(self, sql, expectedErrno = None, expectErrInfo = None, fullMatched = True): caller = inspect.getframeinfo(inspect.stack()[1][0]) expectErrNotOccured = True @@ -97,21 +97,30 @@ class TDSql: self.queryCols = 0 self.queryResult = None - if expectedErrno != None: - if expectedErrno == self.errno: - tdLog.info("sql:%s, expected errno %s occured" % (sql, expectedErrno)) - else: - tdLog.exit("%s(%d) failed: sql:%s, errno %s occured, but not expected errno %s" % (caller.filename, caller.lineno, sql, self.errno, expectedErrno)) - else: - tdLog.info("sql:%s, expect error occured" % (sql)) + if fullMatched: + if expectedErrno != None: + if expectedErrno == self.errno: + tdLog.info("sql:%s, expected errno %s occured" % (sql, expectedErrno)) + else: + tdLog.exit("%s(%d) failed: sql:%s, errno '%s' occured, but not expected errno '%s'" % (caller.filename, caller.lineno, sql, self.errno, expectedErrno)) - if expectErrInfo != None: - if expectErrInfo == self.error_info or expectErrInfo in self.error_info: - tdLog.info("sql:%s, expected expectErrInfo %s occured" % (sql, expectErrInfo)) - else: - tdLog.exit("%s(%d) failed: sql:%s, expectErrInfo %s occured, but not expected errno %s" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo)) + if expectErrInfo != None: + if expectErrInfo == self.error_info: + tdLog.info("sql:%s, expected expectErrInfo '%s' occured" % (sql, expectErrInfo)) + else: + tdLog.exit("%s(%d) failed: sql:%s, expectErrInfo '%s' occured, but not expected errno '%s'" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo)) else: - tdLog.info("sql:%s, expect error occured" % (sql)) + if expectedErrno != None: + if expectedErrno in self.errno: + tdLog.info("sql:%s, expected errno %s occured" % (sql, expectedErrno)) + else: + tdLog.exit("%s(%d) failed: sql:%s, errno '%s' occured, but not expected errno '%s'" % (caller.filename, caller.lineno, sql, self.errno, expectedErrno)) + + if expectErrInfo != None: + if expectErrInfo in self.error_info: + tdLog.info("sql:%s, expected expectErrInfo '%s' occured" % (sql, expectErrInfo)) + else: + tdLog.exit("%s(%d) failed: sql:%s, expectErrInfo %s occured, but not expected errno '%s'" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo)) return self.error_info diff --git a/tests/system-test/1-insert/insert_timestamp.py b/tests/system-test/1-insert/insert_timestamp.py index 621912f664..bc3b1c0275 100644 --- a/tests/system-test/1-insert/insert_timestamp.py +++ b/tests/system-test/1-insert/insert_timestamp.py @@ -1,4 +1,5 @@ -import sys +import datetime +import sys from util.log import * from util.cases import * from util.sql import * @@ -11,57 +12,138 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), True) - #def prepare_data(self): - - def run(self): - tdSql.execute("create database test_insert_timestamp;") + """ + timestamp输入插入规则: + 对于插入的字段类型为timestamp类型的字段,只允许这么几种情况: + timestamp + timestamp +/- interval + interval + timestamp + timestamp可以是字符串譬如:"2023-12-05 00:00:00.000", 也可以是int型, 譬如:1701619200000 + interval支持:b, u, a, s, m, h, d, w 不支持n, y,譬如:1h, 2d + + 仅支持2元表达式,譬如:timestamp + 2h, 不支持2元以上表达,譬如timestamp + 2h + 1d + """ + + tdSql.execute("create database test_insert_timestamp PRECISION 'ns';") tdSql.execute("use test_insert_timestamp;") tdSql.execute("create stable st(ts timestamp, c1 int) tags(id int);") tdSql.execute("create table test_t using st tags(1);") - tdSql.error("insert into test_t values(now + today(), 1 ); ") - tdSql.error("insert into test_t values(now - today(), 1 ); ") - tdSql.error("insert into test_t values(today() + now(), 1 ); ") - tdSql.error("insert into test_t values(today() - now(), 1 ); ") - tdSql.error("insert into test_t values(2h - now(), 1 ); ") - tdSql.error("insert into test_t values(2h - today(), 1 ); ") - tdSql.error("insert into test_t values(2h - 1h, 1 ); ") - tdSql.error("insert into test_t values(2h + 1h, 1 ); ") - tdSql.error("insert into test_t values('2023-11-28 00:00:00.000' + '2023-11-28 00:00:00.000', 1 ); ") - tdSql.error("insert into test_t values('2023-11-28 00:00:00.000' + 1701111600000, 1 ); ") - tdSql.error("insert into test_t values(1701111500000 + 1701111600000, 1 ); ") - tdSql.error("insert into test_insert_timestamp.test_t values(1701111600000 + 1h + 1s, 4); ") + expectErrInfo = "syntax error" + # 异常场景:timestamp + timestamp + tdSql.error("insert into test_t values(now + today(), 1 );", expectErrInfo=expectErrInfo, fullMatched=False) + tdSql.error("insert into test_t values(now - today(), 1 );", expectErrInfo=expectErrInfo, fullMatched=False) + tdSql.error("insert into test_t values(today() + now(), 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False) + tdSql.error("insert into test_t values(today() - now(), 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False) + tdSql.error("insert into test_t values('2023-11-28 00:00:00.000' + '2023-11-28 00:00:00.000', 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False) + tdSql.error("insert into test_t values('2023-11-28 00:00:00.000' + 1701111600000, 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False) + tdSql.error("insert into test_t values(1701111500000 + 1701111600000, 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False) - tdSql.execute("insert into test_insert_timestamp.test_t values(1701111600000 + 1h, 4); ") - tdSql.execute("insert into test_insert_timestamp.test_t values(2h + 1701111600000, 5); ") - tdSql.execute("insert into test_insert_timestamp.test_t values('2023-11-28 00:00:00.000' + 1h, 1); ") - tdSql.execute("insert into test_insert_timestamp.test_t values(3h + '2023-11-28 00:00:00.000', 3); ") - tdSql.execute("insert into test_insert_timestamp.test_t values(1701111600000 - 1h, 2); ") - tdSql.execute("insert into test_insert_timestamp.test_t values(1701122400000, 6); ") - tdSql.execute("insert into test_insert_timestamp.test_t values('2023-11-28 07:00:00.000', 7); ") + # 异常场景:timestamp + interval + interval + tdSql.error("insert into test_t values(today() + 1d + 1s, 1);", expectErrInfo=expectErrInfo, fullMatched=False) + + # 异常场景:interval - timestamp + tdSql.error("insert into test_t values(2h - now(), 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False) + tdSql.error("insert into test_t values(2h - today(), 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False) + + # 异常场景:interval + interval + tdSql.error("insert into test_t values(2h - 1h, 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False) + tdSql.error("insert into test_t values(2h + 1h, 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False) + + # 异常场景:非法interval类型n + tdSql.error("insert into test_t values(today() + 2n, 7); ", expectErrInfo=expectErrInfo, fullMatched=False) + + # 异常场景:非法interval类型y + tdSql.error("insert into test_t values(today() - 2y, 8);", expectErrInfo=expectErrInfo, fullMatched=False) + + # 异常场景:数据类型不对 + tdSql.error("insert into test_t values('a1701619200000', 8);", expectErrInfo=expectErrInfo, fullMatched=False) + tdSql.error("insert into test_t values('ss2023-12-05 00:00:00.000' + '1701619200000', 1);", expectErrInfo=expectErrInfo, fullMatched=False) + tdSql.error("insert into test_t values(123456, 1);", expectErrInfo="Timestamp data out of range") + tdSql.error("insert into test_t values(123.456, 1);", expectErrInfo=expectErrInfo, fullMatched=False) + tdSql.error("insert into test_t values(True, 1);", expectErrInfo=expectErrInfo, fullMatched=False) + tdSql.error("insert into test_t values(None, 1);", expectErrInfo=expectErrInfo, fullMatched=False) + tdSql.error("insert into test_t values(null, 1);", expectErrInfo=expectErrInfo, fullMatched=False) + + # 异常场景:格式不对 + tdSql.error("insert into test_t values('2023-122-05 00:00:00.000' + '1701619200000', 1);", expectErrInfo=expectErrInfo, fullMatched=False) + tdSql.error("insert into test_t values('2023-12--05 00:00:00.000' + '1701619200000', 1);", expectErrInfo=expectErrInfo, fullMatched=False) + tdSql.error("insert into test_t values('12/12/2023' + 10a, 1);", expectErrInfo=expectErrInfo, fullMatched=False) + tdSql.error("insert into test_t values(1701619200000111, 1);", expectErrInfo="Timestamp data out of range", fullMatched=False) + + # 正常场景:timestamp + interval + tdSql.execute("insert into test_t values(today() + 2b, 1);") + tdSql.execute("insert into test_t values(1701619200000000000 + 2u, 2);") + tdSql.execute("insert into test_t values(today + 2a, 3);") + tdSql.execute("insert into test_t values('2023-12-05 23:59:59.999' + 2a, 4);") + tdSql.execute("insert into test_t values(1701921599000000000 + 3a, 5);") + + # 正常场景:timestamp - interval + tdSql.execute("insert into test_t values(today() - 2s, 6);") + tdSql.execute("insert into test_t values(now() - 2m, 7);") + tdSql.execute("insert into test_t values(today - 2h, 8);") + tdSql.execute("insert into test_t values('2023-12-05 00:00:00.000000000' - 2a, 9);") + tdSql.execute("insert into test_t values(1701669000000000000 - 2a, 10);") + + # 正常场景:interval + timestamp + tdSql.execute("insert into test_t values(2d + now, 11);") + tdSql.execute("insert into test_t values(2w + today, 12);") + + # 正常场景:timestamp + tdSql.execute("insert into test_t values('2023-12-05 00:00:00.000', 13);") + tdSql.execute("insert into test_t values(1701629100000000000, 14);") + tdSql.execute("insert into test_t values(now() + 2s, 15);") + tdSql.execute("insert into test_t values('2023-12-05 00:00:59.999999999+07:00' + 10a, 16);") + tdSql.execute("insert into test_t values('2023-12-05T00:00:59.110+07:00' + 10a, 17);") + tdSql.execute("insert into test_t values('2023-12-05' + 10a, 18);") + tdSql.execute("insert into test_t values('2023-11-15', -15);") + tdSql.execute("insert into test_t values(1701619200000000000 - 2a, -10);") + tdSql.execute("insert into test_t values(1701619200000000000, -5);") + tdSql.execute("insert into test_t values('2023-12-05 12:12:12' + 10a, 19);") + + # 验证数据 + tdSql.query(f'select ts,c1 from test_t order by c1;') + tdSql.checkRows(22) + tdSql.checkEqual(tdSql.queryResult[0][0], 1699977600000000000) # c1=-15 + tdSql.checkEqual(tdSql.queryResult[1][0], 1701619199998000000) # c1=-10 + tdSql.checkEqual(tdSql.queryResult[2][0], 1701619200000000000) # c1=-5 + tdSql.checkEqual(tdSql.queryResult[3][0], self.__get_today_ts() + 2) # c1=1 + tdSql.checkEqual(tdSql.queryResult[4][0], 1701619200000002000) # c1=2 + tdSql.checkEqual(tdSql.queryResult[5][0], self.__get_today_ts() + 2000000) # c1=3 + tdSql.checkEqual(tdSql.queryResult[6][0], 1701792000001000000) # c1=4 + tdSql.checkEqual(tdSql.queryResult[7][0], 1701921599003000000) # c1=5 + tdSql.checkEqual(tdSql.queryResult[8][0], self.__get_today_ts() - 2000000000) # c1=6 + tdSql.checkEqual(self.__convert_ts_to_date(tdSql.queryResult[9][0]), str(datetime.date.today())) # c1=7 + tdSql.checkEqual(tdSql.queryResult[10][0], self.__get_today_ts() - 7200000000000) # c1=8 + tdSql.checkEqual(tdSql.queryResult[11][0], 1701705599998000000) # c1=9 + tdSql.checkEqual(tdSql.queryResult[12][0], 1701668999998000000) # c1=10 + tdSql.checkEqual(self.__convert_ts_to_date(tdSql.queryResult[13][0]), str(datetime.date.today() + datetime.timedelta(days=2))) # c1=11 + tdSql.checkEqual(self.__convert_ts_to_date(tdSql.queryResult[14][0]), str(datetime.date.today() + datetime.timedelta(days=14))) # c1=12 + tdSql.checkEqual(tdSql.queryResult[15][0], 1701705600000000000) # c1=13 + tdSql.checkEqual(tdSql.queryResult[16][0], 1701629100000000000) # c1=14 + tdSql.checkEqual(self.__convert_ts_to_date(tdSql.queryResult[17][0]), str(datetime.date.today())) # c1=15 + tdSql.checkEqual(tdSql.queryResult[18][0], 1701709260009999999) # c1=16 + tdSql.checkEqual(tdSql.queryResult[19][0], 1701709259120000000) # c1=17 + tdSql.checkEqual(tdSql.queryResult[20][0], 1701705600010000000) # c1=18 + tdSql.checkEqual(tdSql.queryResult[21][0], 1701749532010000000) # c1=19 - tdSql.query(f'select ts, c1 from test_t order by ts;') - tdSql.checkRows(7) - tdSql.checkEqual(tdSql.queryResult[0][0], datetime.datetime(2023, 11, 28, 1, 0, 0) ) - tdSql.checkEqual(tdSql.queryResult[0][1], 1) - tdSql.checkEqual(tdSql.queryResult[1][0], datetime.datetime(2023, 11, 28, 2, 0, 0) ) - tdSql.checkEqual(tdSql.queryResult[1][1], 2) - tdSql.checkEqual(tdSql.queryResult[2][0], datetime.datetime(2023, 11, 28, 3, 0, 0) ) - tdSql.checkEqual(tdSql.queryResult[2][1], 3) - tdSql.checkEqual(tdSql.queryResult[3][0], datetime.datetime(2023, 11, 28, 4, 0, 0) ) - tdSql.checkEqual(tdSql.queryResult[3][1], 4) - tdSql.checkEqual(tdSql.queryResult[4][0], datetime.datetime(2023, 11, 28, 5, 0, 0) ) - tdSql.checkEqual(tdSql.queryResult[4][1], 5) - tdSql.checkEqual(tdSql.queryResult[5][0], datetime.datetime(2023, 11, 28, 6, 0, 0) ) - tdSql.checkEqual(tdSql.queryResult[5][1], 6) - tdSql.checkEqual(tdSql.queryResult[6][0], datetime.datetime(2023, 11, 28, 7, 0, 0) ) - tdSql.checkEqual(tdSql.queryResult[6][1], 7) - tdSql.execute("drop table if exists test_t ;") tdSql.execute("drop stable if exists st;") tdSql.execute("drop database if exists test_insert_timestamp;") - + + def __convert_ts_to_date(self, ts: int) -> str: + # 创建datetime对象并进行转换 + dt_object = datetime.datetime.fromtimestamp(ts / 1e9) + + # 格式化日期字符串 + formatted_date = dt_object.strftime('%Y-%m-%d') + # print("转换后的日期为:", formatted_date) + return formatted_date + + def __get_today_ts(self) -> int: + return int(time.mktime(time.strptime(str(datetime.date.today()), "%Y-%m-%d"))) * 1000000000 + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/system-test/pytest.sh b/tests/system-test/pytest.sh index 2c95516d10..bae0fdf278 100755 --- a/tests/system-test/pytest.sh +++ b/tests/system-test/pytest.sh @@ -58,6 +58,12 @@ echo "SIM_DIR : $SIM_DIR" echo "CODE_DIR : $CODE_DIR" echo "ASAN_DIR : $ASAN_DIR" +# prevent delete / folder or /usr/bin +if [ ${#SIM_DIR} -lt 10 ]; then + echo "len(SIM_DIR) < 10 , danger so exit. SIM_DIR=$SIM_DIR" + exit 1 +fi + rm -rf $SIM_DIR/* mkdir -p $PRG_DIR diff --git a/tests/taosc_test/CMakeLists.txt b/tests/taosc_test/CMakeLists.txt new file mode 100644 index 0000000000..3ea6964462 --- /dev/null +++ b/tests/taosc_test/CMakeLists.txt @@ -0,0 +1,28 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) +PROJECT(TDengine) + +FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest /usr/local/taos/include) +FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib /usr/lib64 /usr/local/taos/driver/) +FIND_LIBRARY(LIB_GTEST_SHARED_DIR libgtest.so /usr/lib/ /usr/local/lib /usr/lib64 /usr/local/taos/driver/) + +IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR)) + MESSAGE(STATUS "gTest library found, build os test") + + INCLUDE_DIRECTORIES(${HEADER_GTEST_INCLUDE_DIR}) + AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) + +ENDIF() + +aux_source_directory(src OS_SRC) +# taoscTest +add_executable(taoscTest "taoscTest.cpp") +target_link_libraries(taoscTest taos os gtest_main) +target_include_directories( + taoscTest + PUBLIC "${TD_SOURCE_DIR}/include/os" +) +add_test( + NAME taoscTest + COMMAND taoscTest +) + diff --git a/tests/taosc_test/taoscTest.cpp b/tests/taosc_test/taoscTest.cpp new file mode 100644 index 0000000000..fbdb152ab4 --- /dev/null +++ b/tests/taosc_test/taoscTest.cpp @@ -0,0 +1,265 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wwrite-strings" +#pragma GCC diagnostic ignored "-Wunused-function" +#pragma GCC diagnostic ignored "-Wunused-variable" +#pragma GCC diagnostic ignored "-Wsign-compare" +#pragma GCC diagnostic ignored "-Wsign-compare" +#pragma GCC diagnostic ignored "-Wformat" +#pragma GCC diagnostic ignored "-Wint-to-pointer-cast" +#pragma GCC diagnostic ignored "-Wpointer-arith" + +#include "os.h" +#include "taos.h" + +class taoscTest : public ::testing::Test { + protected: + static void SetUpTestCase() { + printf("start test setup.\n"); + TAOS* taos = taos_connect("localhost", "root", "taosdata", NULL, 0); + ASSERT_TRUE(taos != nullptr); + + TAOS_RES* res = taos_query(taos, "drop database IF EXISTS taosc_test_db;"); + if (taos_errno(res) != 0) { + printf("error in drop database taosc_test_db, reason:%s\n", taos_errstr(res)); + return; + } + taosSsleep(5); + taos_free_result(res); + printf("drop database taosc_test_db,finished.\n"); + + res = taos_query(taos, "create database taosc_test_db;"); + if (taos_errno(res) != 0) { + printf("error in create database taosc_test_db, reason:%s\n", taos_errstr(res)); + return; + } + taosSsleep(5); + taos_free_result(res); + printf("create database taosc_test_db,finished.\n"); + + taos_close(taos); + } + + static void TearDownTestCase() {} + + void SetUp() override {} + + void TearDown() override {} +}; + +tsem_t query_sem; +int getRecordCounts = 0; +int insertCounts = 100; +void* pUserParam = NULL; + +void fetchCallback(void* param, void* res, int32_t numOfRow) { + ASSERT_TRUE(numOfRow >= 0); + if (numOfRow == 0) { + printf("completed\n"); + taos_free_result(res); + tsem_post(&query_sem); + return; + } + + printf("numOfRow = %d \n", numOfRow); + int numFields = taos_num_fields(res); + TAOS_FIELD* fields = taos_fetch_fields(res); + + for (int i = 0; i < numOfRow; ++i) { + TAOS_ROW row = taos_fetch_row(res); + char temp[256] = {0}; + taos_print_row(temp, row, fields, numFields); + // printf("%s\n", temp); + } + + getRecordCounts += numOfRow; + taos_fetch_raw_block_a(res, fetchCallback, param); +} + +void queryCallback(void* param, void* res, int32_t code) { + ASSERT_TRUE(code == 0); + ASSERT_TRUE(param == pUserParam); + taos_fetch_raw_block_a(res, fetchCallback, param); +} + +TEST_F(taoscTest, taos_query_a_test) { + char sql[1024] = {0}; + int32_t code = 0; + TAOS* taos = taos_connect("localhost", "root", "taosdata", NULL, 0); + ASSERT_TRUE(taos != nullptr); + + const char* table_name = "taosc_test_table1"; + sprintf(sql, "create table taosc_test_db.%s (ts TIMESTAMP, val INT)", table_name); + TAOS_RES* res = taos_query(taos, sql); + if (taos_errno(res) != 0) { + printf("error in table database %s, reason:%s\n", table_name, taos_errstr(res)); + } + ASSERT_TRUE(taos_errno(res) == 0); + taos_free_result(res); + taosSsleep(2); + + for (int i = 0; i < insertCounts; i++) { + char sql[128]; + sprintf(sql, "insert into taosc_test_db.%s values(now() + %ds, %d)", table_name, i, i); + res = taos_query(taos, sql); + ASSERT_TRUE(taos_errno(res) == 0); + taos_free_result(res); + } + + pUserParam = NULL; + void* tmpParam = pUserParam; + tsem_init(&query_sem, 0, 0); + sprintf(sql, "select * from taosc_test_db.%s;", table_name); + taos_query_a(taos, sql, queryCallback, pUserParam); + tsem_wait(&query_sem); + ASSERT_TRUE(pUserParam == tmpParam); + + ASSERT_EQ(getRecordCounts, insertCounts); + taos_close(taos); + + printf("taos_query_a test finished.\n"); +} + +TEST_F(taoscTest, taos_query_a_param_test) { + char sql[1024] = {0}; + int32_t code = 0; + TAOS* taos = taos_connect("localhost", "root", "taosdata", NULL, 0); + ASSERT_TRUE(taos != nullptr); + + int pArray[2] = {0, 1}; + pUserParam = NULL; + void* tmpParam = pUserParam; + getRecordCounts = 0; + const char* table_name = "taosc_test_table1"; + tsem_init(&query_sem, 0, 0); + sprintf(sql, "select * from taosc_test_db.%s;", table_name); + taos_query_a(taos, sql, queryCallback, pUserParam); + tsem_wait(&query_sem); + ASSERT_TRUE(pUserParam == tmpParam); + ASSERT_EQ(pArray[0], 0); + ASSERT_EQ(pArray[1], 1); + + ASSERT_EQ(getRecordCounts, insertCounts); + taos_close(taos); + + printf("taos_query_a_param test finished.\n"); +} + +TEST_F(taoscTest, taos_query_test) { + char sql[1024] = {0}; + int32_t code = 0; + TAOS* taos = taos_connect("localhost", "root", "taosdata", NULL, 0); + ASSERT_TRUE(taos != nullptr); + + const char* table_name = "taosc_test_table1"; + sprintf(sql, "select * from taosc_test_db.%s;", table_name); + TAOS_RES* res = taos_query(taos, sql); + ASSERT_TRUE(res != nullptr); + + getRecordCounts = 0; + TAOS_ROW row; + while ((row = taos_fetch_row(res))) { + getRecordCounts++; + } + ASSERT_EQ(getRecordCounts, insertCounts); + taos_free_result(res); + taos_close(taos); + + printf("taos_query test finished.\n"); +} + +void queryCallback2(void* param, void* res, int32_t code) { + ASSERT_TRUE(code == 0); + ASSERT_TRUE(param == pUserParam); + // After using taos_query_a to query, using taos_fetch_row in the callback will cause blocking. + // Reason: schProcessOnCbBegin SCH_LOCK_TASK(pTask) + /* TAOS_ROW row; + while ((row = taos_fetch_row(res))) { + getRecordCounts++; + } */ + tsem_post(&query_sem); + taos_free_result(res); +} + +TEST_F(taoscTest, taos_query_a_t2) { + char sql[1024] = {0}; + int32_t code = 0; + TAOS* taos = taos_connect("localhost", "root", "taosdata", NULL, 0); + ASSERT_TRUE(taos != nullptr); + + getRecordCounts = 0; + + const char* table_name = "taosc_test_table1"; + sprintf(sql, "select * from taosc_test_db.%s;", table_name); + + pUserParam = NULL; + tsem_init(&query_sem, 0, 0); + taos_query_a(taos, sql, queryCallback2, pUserParam); + tsem_timewait(&query_sem, 10 * 1000); + + ASSERT_NE(getRecordCounts, insertCounts); + taos_close(taos); + + printf("taos_query_a_t2 test finished.\n"); +} + +void queryCallbackStartFetchThread(void* param, void* res, int32_t code) { + printf("queryCallbackStartFetchThread start...\n"); + ASSERT_TRUE(code == 0); + void** tmp = (void**)param; + *tmp = res; + printf("queryCallbackStartFetchThread end. res:%p\n", res); + tsem_post(&query_sem); +} + +TEST_F(taoscTest, taos_query_a_fetch_row) { + char sql[1024] = {0}; + int32_t code = 0; + TAOS* taos = taos_connect("localhost", "root", "taosdata", NULL, 0); + ASSERT_TRUE(taos != nullptr); + + getRecordCounts = 0; + + const char* table_name = "taosc_test_table1"; + sprintf(sql, "select * from taosc_test_db.%s;", table_name); + + tsem_init(&query_sem, 0, 0); + printf("taos_query_a_fetch_row query start...\n"); + TAOS_RES *res; + TAOS_RES **pres = &res; + taos_query_a(taos, sql, queryCallbackStartFetchThread, pres); + tsem_wait(&query_sem); + + getRecordCounts = 0; + TAOS_ROW row; + printf("taos_query_a_fetch_row taos_fetch_row start...\n"); + + while ((row = taos_fetch_row(*pres))) { + getRecordCounts++; + } + printf("taos_query_a_fetch_row taos_fetch_row end. %p record count:%d.\n", *pres, getRecordCounts); + taos_free_result(*pres); + + ASSERT_EQ(getRecordCounts, insertCounts); + taos_close(taos); + + printf("taos_query_a_fetch_row test finished.\n"); +} + diff --git a/tests/unit-test/test.sh b/tests/unit-test/test.sh index 4122597717..292767e00c 100755 --- a/tests/unit-test/test.sh +++ b/tests/unit-test/test.sh @@ -6,7 +6,7 @@ function usage() { echo -e "\t -h help" } -ent=0 +ent=1 while getopts "eh" opt; do case $opt in e) @@ -34,7 +34,14 @@ else cd ../../../debug fi -ctest -j8 +set -e + +pgrep taosd || taosd >> /dev/null 2>&1 & + +sleep 10 + +ctest -E "smlTest|funcTest|profileTest|sdbTest|showTest|geomTest|idxFstUtilUT|idxTest|idxUtilUT|idxFstUT|parserTest|plannerTest|transUT|transUtilUt" -j8 + ret=$? exit $ret diff --git a/utils/test/c/tmqSim.c b/utils/test/c/tmqSim.c index 14e30008fe..006bd516d0 100644 --- a/utils/test/c/tmqSim.c +++ b/utils/test/c/tmqSim.c @@ -906,6 +906,7 @@ void* consumeThreadFunc(void* param) { pPrint("tmq_commit() manual commit when consume end.\n"); /*tmq_commit(pInfo->tmq, NULL, 0);*/ tmq_commit_sync(pInfo->tmq, NULL); + tmq_commit_cb_print(pInfo->tmq, 0, pInfo); taosFprintfFile(g_fp, "tmq_commit() manual commit over.\n"); pPrint("tmq_commit() manual commit over.\n"); }