diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 9a6a5329ae..9bbda8309f 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 3.0 + GIT_TAG main SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/docs/en/12-taos-sql/02-database.md b/docs/en/12-taos-sql/02-database.md index 24ccc440a6..865e9b8db0 100644 --- a/docs/en/12-taos-sql/02-database.md +++ b/docs/en/12-taos-sql/02-database.md @@ -41,7 +41,7 @@ database_option: { ## Parameters -- BUFFER: specifies the size (in MB) of the write buffer for each vnode. Enter a value between 3 and 16384. The default value is 96. +- BUFFER: specifies the size (in MB) of the write buffer for each vnode. Enter a value between 3 and 16384. The default value is 256. - CACHEMODEL: specifies how the latest data in subtables is stored in the cache. The default value is none. - none: The latest data is not cached. - last_row: The last row of each subtable is cached. This option significantly improves the performance of the LAST_ROW function. diff --git a/docs/en/12-taos-sql/19-limit.md b/docs/en/12-taos-sql/19-limit.md index 22ad2055e4..23bb8ce917 100644 --- a/docs/en/12-taos-sql/19-limit.md +++ b/docs/en/12-taos-sql/19-limit.md @@ -36,7 +36,7 @@ The following characters cannot occur in a password: single quotation marks ('), - Maximum numbers of databases, STables, tables are dependent only on the system resources. - The number of replicas can only be 1 or 3. - The maximum length of a username is 23 bytes. -- The maximum length of a password is 128 bytes. +- The maximum length of a password is 31 bytes. - The maximum number of rows depends on system resources. - The maximum number of vnodes in a database is 1024. diff --git a/docs/en/12-taos-sql/25-grant.md b/docs/en/12-taos-sql/25-grant.md index 8b4c439352..c214e11876 100644 --- a/docs/en/12-taos-sql/25-grant.md +++ b/docs/en/12-taos-sql/25-grant.md @@ -16,7 +16,7 @@ This statement creates a user account. The maximum length of user_name is 23 bytes. -The maximum length of password is 128 bytes. The password can include leters, digits, and special characters excluding single quotation marks, double quotation marks, backticks, backslashes, and spaces. The password cannot be empty. +The maximum length of password is 31 bytes. The password can include leters, digits, and special characters excluding single quotation marks, double quotation marks, backticks, backslashes, and spaces. The password cannot be empty. `SYSINFO` indicates whether the user is allowed to view system information. `1` means allowed, `0` means not allowed. System information includes server configuration, dnode, vnode, storage. The default value is `1`. diff --git a/docs/en/12-taos-sql/27-index.md b/docs/en/12-taos-sql/27-index.md index 3a253743d1..6d029bdd92 100644 --- a/docs/en/12-taos-sql/27-index.md +++ b/docs/en/12-taos-sql/27-index.md @@ -28,6 +28,24 @@ Performs pre-aggregation on the specified column over the time window defined by - WATERMARK: Enter a value between 0ms and 900000ms. The most precise unit supported is milliseconds. The default value is 5 seconds. This option can be used only on supertables. - MAX_DELAY: Enter a value between 1ms and 900000ms. The most precise unit supported is milliseconds. The default value is the value of interval provided that it does not exceed 900000ms. This option can be used only on supertables. Note: Retain the default value if possible. Configuring a small MAX_DELAY may cause results to be frequently pushed, affecting storage and query performance. +```sql +DROP DATABASE IF EXISTS d0; +CREATE DATABASE d0; +USE d0; +CREATE TABLE IF NOT EXISTS st1 (ts timestamp, c1 int, c2 float, c3 double) TAGS (t1 int unsigned); +CREATE TABLE ct1 USING st1 TAGS(1000); +CREATE TABLE ct2 USING st1 TAGS(2000); +INSERT INTO ct1 VALUES(now+0s, 10, 2.0, 3.0); +INSERT INTO ct1 VALUES(now+1s, 11, 2.1, 3.1)(now+2s, 12, 2.2, 3.2)(now+3s, 13, 2.3, 3.3); +CREATE SMA INDEX sma_index_name1 ON st1 FUNCTION(max(c1),max(c2),min(c1)) INTERVAL(5m,10s) SLIDING(5m) WATERMARK 5s MAX_DELAY 1m; +-- query from SMA Index +ALTER LOCAL 'querySmaOptimize' '1'; +SELECT max(c2),min(c1) FROM st1 INTERVAL(5m,10s) SLIDING(5m); +SELECT _wstart,_wend,_wduration,max(c2),min(c1) FROM st1 INTERVAL(5m,10s) SLIDING(5m); +-- query from raw data +ALTER LOCAL 'querySmaOptimize' '0'; +``` + ### FULLTEXT Indexing Creates a text index for the specified column. FULLTEXT indexing improves performance for queries with text filtering. The index_option syntax is not supported for FULLTEXT indexing. FULLTEXT indexing is supported for JSON tag columns only. Multiple columns cannot be indexed together. However, separate indices can be created for each column. @@ -41,7 +59,6 @@ DROP INDEX index_name; ## View Indices ````sql -```sql SHOW INDEXES FROM tbl_name [FROM db_name]; SHOW INDEXES FROM [db_name.]tbl_name ; ```` diff --git a/docs/en/14-reference/05-taosbenchmark.md b/docs/en/14-reference/05-taosbenchmark.md index 8fc20c149f..2348810d9e 100644 --- a/docs/en/14-reference/05-taosbenchmark.md +++ b/docs/en/14-reference/05-taosbenchmark.md @@ -470,3 +470,26 @@ The configuration parameters for subscribing to a super table are set in `super_ - **sql**: The SQL command to be executed. For the query SQL of super table, keep "xxxx" in the SQL command. The program will automatically replace it with all the sub-table names of the super table. Replace it with all the sub-table names in the super table. - **result**: The file to save the query result. If not specified, taosBenchmark will not save result. + +#### data type on taosBenchmark + +| # | **TDengine** | **taosBenchmark** +| --- | :----------------: | :---------------: +| 1 | TIMESTAMP | timestamp +| 2 | INT | int +| 3 | INT UNSIGNED | uint +| 4 | BIGINT | bigint +| 5 | BIGINT UNSIGNED | ubigint +| 6 | FLOAT | float +| 7 | DOUBLE | double +| 8 | BINARY | binary +| 9 | SMALLINT | smallint +| 10 | SMALLINT UNSIGNED | usmallint +| 11 | TINYINT | tinyint +| 12 | TINYINT UNSIGNED | utinyint +| 13 | BOOL | bool +| 14 | NCHAR | nchar +| 15 | VARCHAR | varchar +| 15 | JSON | json + +note:Lowercase characters must be used on taosBenchmark datatype diff --git a/docs/en/20-third-party/14-dbeaver.md b/docs/en/20-third-party/14-dbeaver.md index 1882e12503..fd0a0672f2 100644 --- a/docs/en/20-third-party/14-dbeaver.md +++ b/docs/en/20-third-party/14-dbeaver.md @@ -12,50 +12,25 @@ To use DBeaver to manage TDengine, you need to prepare the following: - Install DBeaver. DBeaver supports mainstream operating systems including Windows, macOS, and Linux. Please make sure you download and install the correct version (23.1.1+) and platform package. Please refer to the [official DBeaver documentation](https://github.com/dbeaver/dbeaver/wiki/Installation) for detailed installation steps. - If you use an on-premises TDengine cluster, please make sure that TDengine and taosAdapter are deployed and running properly. For detailed information, please refer to the taosAdapter User Manual. -- If you use TDengine Cloud, please [register](https://cloud.tdengine.com/) for an account. -## Usage - -### Use DBeaver to access on-premises TDengine cluster +## Use DBeaver to access on-premises TDengine cluster 1. Start the DBeaver application, click the button or menu item to choose **New Database Connection**, and then select **TDengine** in the **Timeseries** category. -![Connect TDengine with DBeaver](./dbeaver/dbeaver-connect-tdengine-en.webp) + ![Connect TDengine with DBeaver](./dbeaver/dbeaver-connect-tdengine-en.webp) 2. Configure the TDengine connection by filling in the host address, port number, username, and password. If TDengine is deployed on the local machine, you are only required to fill in the username and password. The default username is root and the default password is taosdata. Click **Test Connection** to check whether the connection is workable. If you do not have the TDengine Java connector installed on the local machine, DBeaver will prompt you to download and install it. -![Configure the TDengine connection](./dbeaver/dbeaver-config-tdengine-en.webp)) + ![Configure the TDengine connection](./dbeaver/dbeaver-config-tdengine-en.webp)) 3. If the connection is successful, it will be displayed as shown in the following figure. If the connection fails, please check whether the TDengine service and taosAdapter are running correctly and whether the host address, port number, username, and password are correct. -![Connection successful](./dbeaver/dbeaver-connect-tdengine-test-en.webp) + ![Connection successful](./dbeaver/dbeaver-connect-tdengine-test-en.webp) 4. Use DBeaver to select databases and tables and browse your data stored in TDengine. -![Browse TDengine data with DBeaver](./dbeaver/dbeaver-browse-data-en.webp) + ![Browse TDengine data with DBeaver](./dbeaver/dbeaver-browse-data-en.webp) 5. You can also manipulate TDengine data by executing SQL commands. -![Use SQL commands to manipulate TDengine data in DBeaver](./dbeaver/dbeaver-sql-execution-en.webp) - -### Use DBeaver to access TDengine Cloud - -1. Log in to the TDengine Cloud service, select **Programming** > **Java** in the management console, and then copy the string value of `TDENGINE_JDBC_URL` displayed in the **Config** section. - -![Copy JDBC URL from TDengine Cloud](./dbeaver/tdengine-cloud-jdbc-dsn-en.webp) - -2. Start the DBeaver application, click the button or menu item to choose **New Database Connection**, and then select **TDengine Cloud** in the **Timeseries** category. - -![Connect TDengine Cloud with DBeaver](./dbeaver/dbeaver-connect-tdengine-cloud-en.webp) - -3. Configure the TDengine Cloud connection by filling in the JDBC URL value. Click **Test Connection**. If you do not have the TDengine Java connector installed on the local machine, DBeaver will prompt you to download and install it. If the connection is successful, it will be displayed as shown in the following figure. If the connection fails, please check whether the TDengine Cloud service is running properly and whether the JDBC URL is correct. - -![Configure the TDengine Cloud connection](./dbeaver/dbeaver-connect-tdengine-cloud-test-en.webp) - -4. Use DBeaver to select databases and tables and browse your data stored in TDengine Cloud. - -![Browse TDengine Cloud data with DBeaver](./dbeaver/dbeaver-browse-data-cloud-en.webp) - -5. You can also manipulate TDengine Cloud data by executing SQL commands. - -![Use SQL commands to manipulate TDengine Cloud data in DBeaver](./dbeaver/dbeaver-sql-execution-cloud-en.webp) + ![Use SQL commands to manipulate TDengine data in DBeaver](./dbeaver/dbeaver-sql-execution-en.webp) diff --git a/docs/zh/08-connector/30-python.mdx b/docs/zh/08-connector/30-python.mdx index ec1ec4b7c7..15c11d05c3 100644 --- a/docs/zh/08-connector/30-python.mdx +++ b/docs/zh/08-connector/30-python.mdx @@ -1,4 +1,5 @@ --- +toc_max_heading_level: 4 sidebar_label: Python title: TDengine Python Connector description: "taospy 是 TDengine 的官方 Python 连接器。taospy 提供了丰富的 API, 使得 Python 应用可以很方便地使用 TDengine。tasopy 对 TDengine 的原生接口和 REST 接口都进行了封装, 分别对应 tasopy 的两个子模块:taos 和 taosrest。除了对原生接口和 REST 接口的封装,taospy 还提供了符合 Python 数据访问规范(PEP 249)的编程接口。这使得 taospy 和很多第三方工具集成变得简单,比如 SQLAlchemy 和 pandas" diff --git a/docs/zh/12-taos-sql/02-database.md b/docs/zh/12-taos-sql/02-database.md index ca1d616e71..de104b6834 100644 --- a/docs/zh/12-taos-sql/02-database.md +++ b/docs/zh/12-taos-sql/02-database.md @@ -41,7 +41,7 @@ database_option: { ### 参数说明 -- BUFFER: 一个 VNODE 写入内存池大小,单位为 MB,默认为 96,最小为 3,最大为 16384。 +- BUFFER: 一个 VNODE 写入内存池大小,单位为 MB,默认为 256,最小为 3,最大为 16384。 - CACHEMODEL:表示是否在内存中缓存子表的最近数据。默认为 none。 - none:表示不缓存。 - last_row:表示缓存子表最近一行数据。这将显著改善 LAST_ROW 函数的性能表现。 diff --git a/docs/zh/12-taos-sql/19-limit.md b/docs/zh/12-taos-sql/19-limit.md index e5a492580e..6c815fc5f0 100644 --- a/docs/zh/12-taos-sql/19-limit.md +++ b/docs/zh/12-taos-sql/19-limit.md @@ -36,7 +36,7 @@ description: 合法字符集和命名中的限制规则 - 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制 - 数据库的副本数只能设置为 1 或 3 - 用户名的最大长度是 23 字节 -- 用户密码的最大长度是 128 字节 +- 用户密码的最大长度是 31 字节 - 总数据行数取决于可用资源 - 单个数据库的虚拟结点数上限为 1024 diff --git a/docs/zh/12-taos-sql/25-grant.md b/docs/zh/12-taos-sql/25-grant.md index 7fb9447101..a9c3910500 100644 --- a/docs/zh/12-taos-sql/25-grant.md +++ b/docs/zh/12-taos-sql/25-grant.md @@ -16,7 +16,7 @@ CREATE USER use_name PASS 'password' [SYSINFO {1|0}]; use_name 最长为 23 字节。 -password 最长为 128 字节,合法字符包括"a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/",不可以出现单双引号、撇号、反斜杠和空格,且不可以为空。 +password 最长为 31 字节,合法字符包括"a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/",不可以出现单双引号、撇号、反斜杠和空格,且不可以为空。 SYSINFO 表示用户是否可以查看系统信息。1 表示可以查看,0 表示不可以查看。系统信息包括服务端配置信息、服务端各种节点信息(如 DNODE、QNODE等)、存储相关的信息等。默认为可以查看系统信息。 diff --git a/docs/zh/12-taos-sql/27-index.md b/docs/zh/12-taos-sql/27-index.md index 59aa5292e4..7c301a202d 100644 --- a/docs/zh/12-taos-sql/27-index.md +++ b/docs/zh/12-taos-sql/27-index.md @@ -28,6 +28,24 @@ functions: - WATERMARK: 最小单位毫秒,取值范围 [0ms, 900000ms],默认值为 5 秒,只可用于超级表。 - MAX_DELAY: 最小单位毫秒,取值范围 [1ms, 900000ms],默认值为 interval 的值(但不能超过最大值),只可用于超级表。注:不建议 MAX_DELAY 设置太小,否则会过于频繁的推送结果,影响存储和查询性能,如无特殊需求,取默认值即可。 +```sql +DROP DATABASE IF EXISTS d0; +CREATE DATABASE d0; +USE d0; +CREATE TABLE IF NOT EXISTS st1 (ts timestamp, c1 int, c2 float, c3 double) TAGS (t1 int unsigned); +CREATE TABLE ct1 USING st1 TAGS(1000); +CREATE TABLE ct2 USING st1 TAGS(2000); +INSERT INTO ct1 VALUES(now+0s, 10, 2.0, 3.0); +INSERT INTO ct1 VALUES(now+1s, 11, 2.1, 3.1)(now+2s, 12, 2.2, 3.2)(now+3s, 13, 2.3, 3.3); +CREATE SMA INDEX sma_index_name1 ON st1 FUNCTION(max(c1),max(c2),min(c1)) INTERVAL(5m,10s) SLIDING(5m) WATERMARK 5s MAX_DELAY 1m; +-- 从 SMA 索引查询 +ALTER LOCAL 'querySmaOptimize' '1'; +SELECT max(c2),min(c1) FROM st1 INTERVAL(5m,10s) SLIDING(5m); +SELECT _wstart,_wend,_wduration,max(c2),min(c1) FROM st1 INTERVAL(5m,10s) SLIDING(5m); +-- 从原始数据查询 +ALTER LOCAL 'querySmaOptimize' '0'; +``` + ### FULLTEXT 索引 对指定列建立文本索引,可以提升含有文本过滤的查询的性能。FULLTEXT 索引不支持 index_option 语法。现阶段只支持对 JSON 类型的标签列创建 FULLTEXT 索引。不支持多列联合索引,但可以为每个列分布创建 FULLTEXT 索引。 @@ -41,7 +59,6 @@ DROP INDEX index_name; ## 查看索引 ````sql -```sql SHOW INDEXES FROM tbl_name [FROM db_name]; SHOW INDEXES FROM [db_name.]tbl_name; ```` diff --git a/docs/zh/14-reference/05-taosbenchmark.md b/docs/zh/14-reference/05-taosbenchmark.md index c5d98767f9..319046ba8f 100644 --- a/docs/zh/14-reference/05-taosbenchmark.md +++ b/docs/zh/14-reference/05-taosbenchmark.md @@ -437,3 +437,29 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) - **sqls** : - **sql** : 执行的 SQL 命令,必填。 + +#### 配置文件中数据类型书写对照表 + +| # | **引擎** | **taosBenchmark** +| --- | :----------------: | :---------------: +| 1 | TIMESTAMP | timestamp +| 2 | INT | int +| 3 | INT UNSIGNED | uint +| 4 | BIGINT | bigint +| 5 | BIGINT UNSIGNED | ubigint +| 6 | FLOAT | float +| 7 | DOUBLE | double +| 8 | BINARY | binary +| 9 | SMALLINT | smallint +| 10 | SMALLINT UNSIGNED | usmallint +| 11 | TINYINT | tinyint +| 12 | TINYINT UNSIGNED | utinyint +| 13 | BOOL | bool +| 14 | NCHAR | nchar +| 15 | VARCHAR | varchar +| 15 | JSON | json + +注意:taosBenchmark 配置文件中数据类型必须小写方可识别 + + + diff --git a/docs/zh/20-third-party/13-dbeaver.md b/docs/zh/20-third-party/13-dbeaver.md index 20c8baa7dc..c096fd41a5 100644 --- a/docs/zh/20-third-party/13-dbeaver.md +++ b/docs/zh/20-third-party/13-dbeaver.md @@ -8,21 +8,16 @@ DBeaver 是一款流行的跨平台数据库管理工具,方便开发者、数 ## 前置条件 -### 安装 DBeaver - 使用 DBeaver 管理 TDengine 需要以下几方面的准备工作。 - 安装 DBeaver。DBeaver 支持主流操作系统包括 Windows、macOS 和 Linux。请注意[下载](https://dbeaver.io/download/)正确平台和版本(23.1.1+)的安装包。详细安装步骤请参考 [DBeaver 官方文档](https://github.com/dbeaver/dbeaver/wiki/Installation)。 - 如果使用独立部署的 TDengine 集群,请确认 TDengine 正常运行,并且 taosAdapter 已经安装并正常运行,具体细节请参考 [taosAdapter 的使用手册](/reference/taosadapter)。 -- 如果使用 TDengine Cloud,请[注册](https://cloud.taosdata.com/)相应账号。 -## 使用步骤 - -### 使用 DBeaver 访问内部部署的 TDengine +## 使用 DBeaver 访问内部部署的 TDengine 1. 启动 DBeaver 应用,点击按钮或菜单项选择“连接到数据库”,然后在时间序列分类栏中选择 TDengine。 -![DBeaver 连接 TDengine](./dbeaver/dbeaver-connect-tdengine-zh.webp) + ![DBeaver 连接 TDengine](./dbeaver/dbeaver-connect-tdengine-zh.webp) 2. 配置 TDengine 连接,填入主机地址、端口号、用户名和密码。如果 TDengine 部署在本机,可以只填用户名和密码,默认用户名为 root,默认密码为 taosdata。点击“测试连接”可以对连接是否可用进行测试。如果本机没有安装 TDengine Java 连接器,DBeaver 会提示下载安装。 @@ -31,37 +26,12 @@ DBeaver 是一款流行的跨平台数据库管理工具,方便开发者、数 3. 连接成功将显示如下图所示。如果显示连接失败,请检查 TDengine 服务和 taosAdapter 是否正确运行,主机地址、端口号、用户名和密码是否正确。 -![连接成功](./dbeaver/dbeaver-connect-tdengine-test-zh.webp) + ![连接成功](./dbeaver/dbeaver-connect-tdengine-test-zh.webp) 4. 使用 DBeaver 选择数据库和表可以浏览 TDengine 服务的数据。 -![DBeaver 浏览 TDengine 数据](./dbeaver/dbeaver-browse-data-zh.webp) + ![DBeaver 浏览 TDengine 数据](./dbeaver/dbeaver-browse-data-zh.webp) 5. 也可以通过执行 SQL 命令的方式对 TDengine 数据进行操作。 -![DBeaver SQL 命令](./dbeaver/dbeaver-sql-execution-zh.webp) - -### 使用 DBeaver 访问 TDengine Cloud - -1. 登录 TDengine Cloud 服务,在管理界面中选择“编程”和“Java”,然后复制 TDENGINE_JDBC_URL 的字符串值。 - -![复制 TDengine Cloud DSN](./dbeaver/tdengine-cloud-jdbc-dsn-zh.webp) - -2. 启动 DBeaver 应用,点击按钮或菜单项选择“连接到数据库”,然后在时间序列分类栏中选择 TDengine Cloud。 - -![DBeaver 连接 TDengine Cloud](./dbeaver/dbeaver-connect-tdengine-cloud-zh.webp) - - -3. 配置 TDengine Cloud 连接,填入 JDBC_URL 值。点击“测试连接”,如果本机没有安装 TDengine Java - 连接器,DBeaver 会提示下载安装。连接成功将显示如下图所示。如果显示连接失败,请检查 TDengine Cloud 服务是否启动,JDBC_URL 是否正确。 - - ![配置 TDengine Cloud 连接](./dbeaver/dbeaver-connect-tdengine-cloud-test-zh.webp) - -4. 使用 DBeaver 选择数据库和表可以浏览 TDengine Cloud 服务的数据。 - -![DBeaver 浏览 TDengine Cloud 数据](./dbeaver/dbeaver-browse-cloud-data-zh.webp) - -5. 也可以通过执行 SQL 命令的方式对 TDengine Cloud 数据进行操作。 - -![DBeaver SQL 命令 操作 TDengine Cloud](./dbeaver/dbeaver-sql-execution-cloud-zh.webp) - + ![DBeaver SQL 命令](./dbeaver/dbeaver-sql-execution-zh.webp) diff --git a/examples/C#/taosdemo/README.md b/examples/C#/taosdemo/README.md index 3cba3529bf..970d5332ac 100644 --- a/examples/C#/taosdemo/README.md +++ b/examples/C#/taosdemo/README.md @@ -36,7 +36,11 @@ dotnet build -c Release ## Usage ``` -Usage: mono taosdemo.exe [OPTION...] +Usage with mono: +$ mono taosdemo.exe [OPTION...] + +Usage with dotnet: +Usage: .\bin\Release\net5.0\taosdemo.exe [OPTION...] --help Show usage. diff --git a/examples/C#/taosdemo/taosdemo.cs b/examples/C#/taosdemo/taosdemo.cs index e092c48f15..a48439d192 100644 --- a/examples/C#/taosdemo/taosdemo.cs +++ b/examples/C#/taosdemo/taosdemo.cs @@ -72,7 +72,7 @@ namespace TDengineDriver { if ("--help" == argv[i]) { - Console.WriteLine("Usage: mono taosdemo.exe [OPTION...]"); + Console.WriteLine("Usage: taosdemo.exe [OPTION...]"); Console.WriteLine(""); HelpPrint("--help", "Show usage."); Console.WriteLine(""); @@ -305,7 +305,7 @@ namespace TDengineDriver this.conn = TDengine.Connect(this.host, this.user, this.password, db, this.port); if (this.conn == IntPtr.Zero) { - Console.WriteLine("Connect to TDengine failed"); + Console.WriteLine("Connect to TDengine failed. Reason: {0}\n", TDengine.Error(0)); CleanAndExitProgram(1); } else diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 6506a45964..b9365172d5 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -106,7 +106,6 @@ enum { HEARTBEAT_KEY_DBINFO, HEARTBEAT_KEY_STBINFO, HEARTBEAT_KEY_TMQ, - HEARTBEAT_KEY_USER_PASSINFO, }; typedef enum _mgmt_table { @@ -636,6 +635,7 @@ typedef struct { SEpSet epSet; int32_t svrTimestamp; int32_t passVer; + int32_t authVer; char sVer[TSDB_VERSION_LEN]; char sDetailVer[128]; } SConnectRsp; @@ -720,14 +720,6 @@ int32_t tSerializeSGetUserAuthRsp(void* buf, int32_t bufLen, SGetUserAuthRsp* pR int32_t tDeserializeSGetUserAuthRsp(void* buf, int32_t bufLen, SGetUserAuthRsp* pRsp); void tFreeSGetUserAuthRsp(SGetUserAuthRsp* pRsp); -typedef struct SUserPassVersion { - char user[TSDB_USER_LEN]; - int32_t version; -} SUserPassVersion; - -typedef SGetUserAuthReq SGetUserPassReq; -typedef SUserPassVersion SGetUserPassRsp; - /* * for client side struct, only column id, type, bytes are necessary * But for data in vnode side, we need all the following information. @@ -1071,14 +1063,6 @@ int32_t tSerializeSUserAuthBatchRsp(void* buf, int32_t bufLen, SUserAuthBatchRsp int32_t tDeserializeSUserAuthBatchRsp(void* buf, int32_t bufLen, SUserAuthBatchRsp* pRsp); void tFreeSUserAuthBatchRsp(SUserAuthBatchRsp* pRsp); -typedef struct { - SArray* pArray; // Array of SGetUserPassRsp -} SUserPassBatchRsp; - -int32_t tSerializeSUserPassBatchRsp(void* buf, int32_t bufLen, SUserPassBatchRsp* pRsp); -int32_t tDeserializeSUserPassBatchRsp(void* buf, int32_t bufLen, SUserPassBatchRsp* pRsp); -void tFreeSUserPassBatchRsp(SUserPassBatchRsp* pRsp); - typedef struct { char db[TSDB_DB_FNAME_LEN]; STimeWindow timeRange; diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h index f253b47e50..58bdb77df3 100644 --- a/include/libs/parser/parser.h +++ b/include/libs/parser/parser.h @@ -58,6 +58,7 @@ typedef struct SParseContext { bool isSuperUser; bool enableSysInfo; bool async; + bool hasInvisibleCol; const char* svrVer; bool nodeOffline; SArray* pTableMetaPos; // sql table pos => catalog data pos diff --git a/include/libs/transport/trpc.h b/include/libs/transport/trpc.h index c73e5c127a..93e4d72ad7 100644 --- a/include/libs/transport/trpc.h +++ b/include/libs/transport/trpc.h @@ -46,6 +46,7 @@ typedef struct SRpcHandleInfo { int8_t noResp; // has response or not(default 0, 0: resp, 1: no resp) int8_t persistHandle; // persist handle or not int8_t hasEpSet; + int32_t cliVer; // app info void *ahandle; // app handle set by client @@ -83,6 +84,7 @@ typedef struct SRpcInit { int32_t sessions; // number of sessions allowed int8_t connType; // TAOS_CONN_UDP, TAOS_CONN_TCPC, TAOS_CONN_TCPS int32_t idleTime; // milliseconds, 0 means idle timer is disabled + int32_t compatibilityVer; int32_t retryMinInterval; // retry init interval int32_t retryStepFactor; // retry interval factor diff --git a/include/util/taoserror.h b/include/util/taoserror.h index fbeadd0f06..c747ba49c8 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -775,6 +775,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_TMQ_CONSUMER_ERROR TAOS_DEF_ERROR_CODE(0, 0x4003) #define TSDB_CODE_TMQ_TOPIC_OUT_OF_RANGE TAOS_DEF_ERROR_CODE(0, 0x4004) #define TSDB_CODE_TMQ_GROUP_OUT_OF_RANGE TAOS_DEF_ERROR_CODE(0, 0x4005) +#define TSDB_CODE_TMQ_SNAPSHOT_ERROR TAOS_DEF_ERROR_CODE(0, 0x4006) // stream #define TSDB_CODE_STREAM_TASK_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x4100) diff --git a/packaging/checkPackageRuning.py b/packaging/checkPackageRuning.py index 96e2378fb3..914ee83f29 100755 --- a/packaging/checkPackageRuning.py +++ b/packaging/checkPackageRuning.py @@ -87,7 +87,7 @@ os.system("rm -rf /tmp/dumpdata/*") # dump data out print("taosdump dump out data") -os.system("taosdump -o /tmp/dumpdata -D test -y -h %s "%serverHost) +os.system("taosdump -o /tmp/dumpdata -D test -h %s "%serverHost) # drop database of test print("drop database test") @@ -95,7 +95,7 @@ os.system(" taos -s ' drop database test ;' -h %s "%serverHost) # dump data in print("taosdump dump data in") -os.system("taosdump -i /tmp/dumpdata -y -h %s "%serverHost) +os.system("taosdump -i /tmp/dumpdata -h %s "%serverHost) result = conn.query("SELECT count(*) from test.meters") diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h index fa444779f3..736582dff2 100644 --- a/source/client/inc/clientInt.h +++ b/source/client/inc/clientInt.h @@ -63,7 +63,7 @@ typedef struct { // statistics int32_t reportCnt; int32_t connKeyCnt; - int32_t passKeyCnt; // with passVer call back + int8_t connHbFlag; // 0 init, 1 send req, 2 get resp int64_t reportBytes; // not implemented int64_t startTime; // ctl @@ -83,8 +83,9 @@ typedef struct { int8_t threadStop; int8_t quitByKill; TdThread thread; - TdThreadMutex lock; // used when app init and cleanup + TdThreadMutex lock; // used when app init and cleanup SHashObj* appSummary; + SHashObj* appHbHash; // key: clusterId SArray* appHbMgrs; // SArray one for each cluster FHbReqHandle reqHandle[CONN_TYPE__MAX]; FHbRspHandle rspHandle[CONN_TYPE__MAX]; @@ -146,6 +147,7 @@ typedef struct STscObj { int64_t id; // ref ID returned by taosAddRef TdThreadMutex mutex; // used to protect the operation on db int32_t numOfReqs; // number of sqlObj bound to this connection + int32_t authVer; SAppInstInfo* pAppInfo; SHashObj* pRequests; SPassInfo passInfo; diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index c64bbfbdb6..238b3613f5 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -29,6 +29,7 @@ #include "trpc.h" #include "tsched.h" #include "ttime.h" +#include "tversion.h" #if defined(CUS_NAME) || defined(CUS_PROMPT) || defined(CUS_EMAIL) #include "cus_name.h" @@ -111,7 +112,8 @@ static void deregisterRequest(SRequestObj *pRequest) { atomic_add_fetch_64((int64_t *)&pActivity->numOfSlowQueries, 1); if (tsSlowLogScope & reqType) { taosPrintSlowLog("PID:%d, Conn:%u, QID:0x%" PRIx64 ", Start:%" PRId64 ", Duration:%" PRId64 "us, SQL:%s", - taosGetPId(), pTscObj->connId, pRequest->requestId, pRequest->metric.start, duration, pRequest->sqlstr); + taosGetPId(), pTscObj->connId, pRequest->requestId, pRequest->metric.start, duration, + pRequest->sqlstr); } } @@ -175,6 +177,8 @@ void *openTransporter(const char *user, const char *auth, int32_t numOfThread) { rpcInit.connLimitNum = connLimitNum; rpcInit.timeToGetConn = tsTimeToGetAvailableConn; + taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); + void *pDnodeConn = rpcOpen(&rpcInit); if (pDnodeConn == NULL) { tscError("failed to init connection to server"); @@ -358,17 +362,16 @@ int32_t releaseRequest(int64_t rid) { return taosReleaseRef(clientReqRefPool, ri int32_t removeRequest(int64_t rid) { return taosRemoveRef(clientReqRefPool, rid); } - void destroySubRequests(SRequestObj *pRequest) { - int32_t reqIdx = -1; + int32_t reqIdx = -1; SRequestObj *pReqList[16] = {NULL}; - uint64_t tmpRefId = 0; + uint64_t tmpRefId = 0; if (pRequest->relation.userRefId && pRequest->relation.userRefId != pRequest->self) { return; } - - SRequestObj* pTmp = pRequest; + + SRequestObj *pTmp = pRequest; while (pTmp->relation.prevRefId) { tmpRefId = pTmp->relation.prevRefId; pTmp = acquireRequest(tmpRefId); @@ -376,9 +379,9 @@ void destroySubRequests(SRequestObj *pRequest) { pReqList[++reqIdx] = pTmp; releaseRequest(tmpRefId); } else { - tscError("0x%" PRIx64 ", prev req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pTmp->self, - tmpRefId, pTmp->requestId); - break; + tscError("0x%" PRIx64 ", prev req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pTmp->self, tmpRefId, + pTmp->requestId); + break; } } @@ -391,16 +394,15 @@ void destroySubRequests(SRequestObj *pRequest) { pTmp = acquireRequest(tmpRefId); if (pTmp) { tmpRefId = pTmp->relation.nextRefId; - removeRequest(pTmp->self); + removeRequest(pTmp->self); releaseRequest(pTmp->self); } else { tscError("0x%" PRIx64 " is not there", tmpRefId); - break; + break; } } } - void doDestroyRequest(void *p) { if (NULL == p) { return; @@ -412,7 +414,7 @@ void doDestroyRequest(void *p) { tscTrace("begin to destroy request %" PRIx64 " p:%p", reqId, pRequest); destroySubRequests(pRequest); - + taosHashRemove(pRequest->pTscObj->pRequests, &pRequest->self, sizeof(pRequest->self)); schedulerFreeJob(&pRequest->body.queryJob, 0); @@ -473,15 +475,15 @@ void taosStopQueryImpl(SRequestObj *pRequest) { } void stopAllQueries(SRequestObj *pRequest) { - int32_t reqIdx = -1; + int32_t reqIdx = -1; SRequestObj *pReqList[16] = {NULL}; - uint64_t tmpRefId = 0; + uint64_t tmpRefId = 0; if (pRequest->relation.userRefId && pRequest->relation.userRefId != pRequest->self) { return; } - - SRequestObj* pTmp = pRequest; + + SRequestObj *pTmp = pRequest; while (pTmp->relation.prevRefId) { tmpRefId = pTmp->relation.prevRefId; pTmp = acquireRequest(tmpRefId); @@ -489,9 +491,9 @@ void stopAllQueries(SRequestObj *pRequest) { pReqList[++reqIdx] = pTmp; releaseRequest(tmpRefId); } else { - tscError("0x%" PRIx64 ", prev req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pTmp->self, - tmpRefId, pTmp->requestId); - break; + tscError("0x%" PRIx64 ", prev req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pTmp->self, tmpRefId, + pTmp->requestId); + break; } } @@ -510,12 +512,11 @@ void stopAllQueries(SRequestObj *pRequest) { releaseRequest(pTmp->self); } else { tscError("0x%" PRIx64 " is not there", tmpRefId); - break; + break; } } } - void crashReportThreadFuncUnexpectedStopped(void) { atomic_store_32(&clientStop, -1); } static void *tscCrashReportThreadFp(void *param) { diff --git a/source/client/src/clientHb.c b/source/client/src/clientHb.c index cbfa48b322..0c48049f0c 100644 --- a/source/client/src/clientHb.c +++ b/source/client/src/clientHb.c @@ -22,10 +22,10 @@ typedef struct { union { struct { - int64_t clusterId; - int32_t passKeyCnt; - int32_t passVer; - int32_t reqCnt; + SAppHbMgr *pAppHbMgr; + int64_t clusterId; + int32_t reqCnt; + int8_t connHbFlag; }; }; } SHbParam; @@ -34,12 +34,14 @@ static SClientHbMgr clientHbMgr = {0}; static int32_t hbCreateThread(); static void hbStopThread(); +static int32_t hbUpdateUserAuthInfo(SAppHbMgr *pAppHbMgr, SUserAuthBatchRsp *batchRsp); static int32_t hbMqHbReqHandle(SClientHbKey *connKey, void *param, SClientHbReq *req) { return 0; } static int32_t hbMqHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) { return 0; } -static int32_t hbProcessUserAuthInfoRsp(void *value, int32_t valueLen, struct SCatalog *pCatalog) { +static int32_t hbProcessUserAuthInfoRsp(void *value, int32_t valueLen, struct SCatalog *pCatalog, + SAppHbMgr *pAppHbMgr) { int32_t code = 0; SUserAuthBatchRsp batchRsp = {0}; @@ -56,54 +58,68 @@ static int32_t hbProcessUserAuthInfoRsp(void *value, int32_t valueLen, struct SC catalogUpdateUserAuthInfo(pCatalog, rsp); } + if (numOfBatchs > 0) hbUpdateUserAuthInfo(pAppHbMgr, &batchRsp); + + atomic_val_compare_exchange_8(&pAppHbMgr->connHbFlag, 1, 2); + taosArrayDestroy(batchRsp.pArray); return TSDB_CODE_SUCCESS; } -static int32_t hbProcessUserPassInfoRsp(void *value, int32_t valueLen, SClientHbKey *connKey, SAppHbMgr *pAppHbMgr) { - int32_t code = 0; - int32_t numOfBatchs = 0; - SUserPassBatchRsp batchRsp = {0}; - if (tDeserializeSUserPassBatchRsp(value, valueLen, &batchRsp) != 0) { - code = TSDB_CODE_INVALID_MSG; - return code; - } - - numOfBatchs = taosArrayGetSize(batchRsp.pArray); - - SClientHbReq *pReq = NULL; - while ((pReq = taosHashIterate(pAppHbMgr->activeInfo, pReq))) { - STscObj *pTscObj = (STscObj *)acquireTscObj(pReq->connKey.tscRid); - if (!pTscObj) { - continue; - } - SPassInfo *passInfo = &pTscObj->passInfo; - if (!passInfo->fp) { - releaseTscObj(pReq->connKey.tscRid); +static int32_t hbUpdateUserAuthInfo(SAppHbMgr *pAppHbMgr, SUserAuthBatchRsp *batchRsp) { + uint64_t clusterId = pAppHbMgr->pAppInstInfo->clusterId; + for (int i = 0; i < TARRAY_SIZE(clientHbMgr.appHbMgrs); ++i) { + SAppHbMgr *hbMgr = taosArrayGetP(clientHbMgr.appHbMgrs, i); + if (!hbMgr || hbMgr->pAppInstInfo->clusterId != clusterId) { continue; } - for (int32_t i = 0; i < numOfBatchs; ++i) { - SGetUserPassRsp *rsp = taosArrayGet(batchRsp.pArray, i); - if (0 == strncmp(rsp->user, pTscObj->user, TSDB_USER_LEN)) { - int32_t oldVer = atomic_load_32(&passInfo->ver); - if (oldVer < rsp->version) { - atomic_store_32(&passInfo->ver, rsp->version); - if (passInfo->fp) { - (*passInfo->fp)(passInfo->param, &passInfo->ver, TAOS_NOTIFY_PASSVER); + SClientHbReq *pReq = NULL; + SGetUserAuthRsp *pRsp = NULL; + while ((pReq = taosHashIterate(hbMgr->activeInfo, pReq))) { + STscObj *pTscObj = (STscObj *)acquireTscObj(pReq->connKey.tscRid); + if (!pTscObj) { + continue; + } + + if (!pRsp) { + for (int32_t j = 0; j < TARRAY_SIZE(batchRsp->pArray); ++j) { + SGetUserAuthRsp *rsp = TARRAY_GET_ELEM(batchRsp->pArray, j); + if (0 == strncmp(rsp->user, pTscObj->user, TSDB_USER_LEN)) { + pRsp = rsp; + break; } - tscDebug("update passVer of user %s from %d to %d, tscRid:%" PRIi64, rsp->user, oldVer, + } + if (!pRsp) { + releaseTscObj(pReq->connKey.tscRid); + break; + } + } + + pTscObj->authVer = pRsp->version; +#if 0 + if (pTscObj->sysInfo != pRsp->sysInfo) { + tscDebug("update sysInfo of user %s from %" PRIi8 " to %" PRIi8 ", tscRid:%" PRIi64, pRsp->user, + pTscObj->sysInfo, pRsp->sysInfo, pTscObj->id); + pTscObj->sysInfo = pRsp->sysInfo; + } +#endif + if (pTscObj->passInfo.fp) { + SPassInfo *passInfo = &pTscObj->passInfo; + int32_t oldVer = atomic_load_32(&passInfo->ver); + if (oldVer < pRsp->passVer) { + atomic_store_32(&passInfo->ver, pRsp->passVer); + if (passInfo->fp) { + (*passInfo->fp)(passInfo->param, &pRsp->passVer, TAOS_NOTIFY_PASSVER); + } + tscDebug("update passVer of user %s from %d to %d, tscRid:%" PRIi64, pRsp->user, oldVer, atomic_load_32(&passInfo->ver), pTscObj->id); } - break; } + releaseTscObj(pReq->connKey.tscRid); } - releaseTscObj(pReq->connKey.tscRid); } - - taosArrayDestroy(batchRsp.pArray); - - return code; + return 0; } static int32_t hbGenerateVgInfoFromRsp(SDBVgInfo **pInfo, SUseDbRsp *rsp) { @@ -121,7 +137,6 @@ static int32_t hbGenerateVgInfoFromRsp(SDBVgInfo **pInfo, SUseDbRsp *rsp) { vgInfo->hashSuffix = rsp->hashSuffix; vgInfo->vgHash = taosHashInit(rsp->vgNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK); if (NULL == vgInfo->vgHash) { - taosMemoryFree(vgInfo); tscError("hash init[%d] failed", rsp->vgNum); code = TSDB_CODE_OUT_OF_MEMORY; goto _return; @@ -131,8 +146,6 @@ static int32_t hbGenerateVgInfoFromRsp(SDBVgInfo **pInfo, SUseDbRsp *rsp) { SVgroupInfo *pInfo = taosArrayGet(rsp->pVgroupInfos, j); if (taosHashPut(vgInfo->vgHash, &pInfo->vgId, sizeof(int32_t), pInfo, sizeof(SVgroupInfo)) != 0) { tscError("hash push failed, errno:%d", errno); - taosHashCleanup(vgInfo->vgHash); - taosMemoryFree(vgInfo); code = TSDB_CODE_OUT_OF_MEMORY; goto _return; } @@ -316,7 +329,7 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) { break; } - hbProcessUserAuthInfoRsp(kv->value, kv->valueLen, pCatalog); + hbProcessUserAuthInfoRsp(kv->value, kv->valueLen, pCatalog, pAppHbMgr); break; } case HEARTBEAT_KEY_DBINFO: { @@ -353,15 +366,6 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) { hbProcessStbInfoRsp(kv->value, kv->valueLen, pCatalog); break; } - case HEARTBEAT_KEY_USER_PASSINFO: { - if (kv->valueLen <= 0 || NULL == kv->value) { - tscError("invalid hb user pass info, len:%d, value:%p", kv->valueLen, kv->value); - break; - } - - hbProcessUserPassInfoRsp(kv->value, kv->valueLen, &pRsp->connKey, pAppHbMgr); - break; - } default: tscError("invalid hb key type:%d", kv->key); break; @@ -479,7 +483,6 @@ int32_t hbBuildQueryDesc(SQueryHbReqBasic *hbBasic, STscObj *pObj) { if (code) { taosArrayDestroy(desc.subDesc); desc.subDesc = NULL; - desc.subPlanNum = 0; } desc.subPlanNum = taosArrayGetSize(desc.subDesc); } else { @@ -543,7 +546,7 @@ int32_t hbGetQueryBasicInfo(SClientHbKey *connKey, SClientHbReq *req) { return TSDB_CODE_SUCCESS; } -static int32_t hbGetUserBasicInfo(SClientHbKey *connKey, SHbParam *param, SClientHbReq *req) { +static int32_t hbGetUserAuthInfo(SClientHbKey *connKey, SHbParam *param, SClientHbReq *req) { STscObj *pTscObj = (STscObj *)acquireTscObj(connKey->tscRid); if (!pTscObj) { tscWarn("tscObj rid %" PRIx64 " not exist", connKey->tscRid); @@ -552,46 +555,61 @@ static int32_t hbGetUserBasicInfo(SClientHbKey *connKey, SHbParam *param, SClien int32_t code = 0; - if (param && (param->passVer != INT32_MIN) && (param->passVer <= pTscObj->passInfo.ver)) { - tscDebug("hb got user basic info, no need since passVer %d <= %d", param->passVer, pTscObj->passInfo.ver); + SKv kv = {.key = HEARTBEAT_KEY_USER_AUTHINFO}; + SKv *pKv = NULL; + if ((pKv = taosHashGet(req->info, &kv.key, sizeof(kv.key)))) { + int32_t userNum = pKv->valueLen / sizeof(SUserAuthVersion); + SUserAuthVersion *userAuths = (SUserAuthVersion *)pKv->value; + for (int32_t i = 0; i < userNum; ++i) { + SUserAuthVersion *pUserAuth = userAuths + i; + // both key and user exist, update version + if (strncmp(pUserAuth->user, pTscObj->user, TSDB_USER_LEN) == 0) { + pUserAuth->version = htonl(-1); // force get userAuthInfo + goto _return; + } + } + // key exists, user not exist, append user + SUserAuthVersion *qUserAuth = + (SUserAuthVersion *)taosMemoryRealloc(pKv->value, (userNum + 1) * sizeof(SUserAuthVersion)); + if (qUserAuth) { + strncpy((qUserAuth + userNum)->user, pTscObj->user, TSDB_USER_LEN); + (qUserAuth + userNum)->version = htonl(-1); // force get userAuthInfo + pKv->value = qUserAuth; + pKv->valueLen += sizeof(SUserAuthVersion); + } else { + code = TSDB_CODE_OUT_OF_MEMORY; + } goto _return; } - SUserPassVersion *user = taosMemoryMalloc(sizeof(SUserPassVersion)); + // key/user not exist, add user + SUserAuthVersion *user = taosMemoryMalloc(sizeof(SUserAuthVersion)); if (!user) { code = TSDB_CODE_OUT_OF_MEMORY; goto _return; } - strncpy(user->user, pTscObj->user, TSDB_USER_LEN); - user->version = htonl(pTscObj->passInfo.ver); + tstrncpy(user->user, pTscObj->user, TSDB_USER_LEN); + user->version = htonl(-1); // force get userAuthInfo + kv.valueLen = sizeof(SUserAuthVersion); + kv.value = user; - SKv kv = { - .key = HEARTBEAT_KEY_USER_PASSINFO, - .valueLen = sizeof(SUserPassVersion), - .value = user, - }; - - tscDebug("hb got user basic info, valueLen:%d, user:%s, passVer:%d, tscRid:%" PRIi64, kv.valueLen, user->user, - pTscObj->passInfo.ver, connKey->tscRid); + tscDebug("hb got user auth info, valueLen:%d, user:%s, authVer:%d, tscRid:%" PRIi64, kv.valueLen, user->user, + pTscObj->authVer, connKey->tscRid); if (!req->info) { req->info = taosHashInit(64, hbKeyHashFunc, 1, HASH_ENTRY_LOCK); } if (taosHashPut(req->info, &kv.key, sizeof(kv.key), &kv, sizeof(kv)) < 0) { + taosMemoryFree(user); code = terrno ? terrno : TSDB_CODE_APP_ERROR; goto _return; } - // assign the passVer - if (param) { - param->passVer = pTscObj->passInfo.ver; - } - _return: releaseTscObj(connKey->tscRid); if (code) { - tscError("hb got user basic info failed since %s", terrstr(code)); + tscError("hb got user auth info failed since %s", terrstr(code)); } return code; @@ -749,14 +767,21 @@ int32_t hbQueryHbReqHandle(SClientHbKey *connKey, void *param, SClientHbReq *req hbGetQueryBasicInfo(connKey, req); - if (hbParam->passKeyCnt > 0) { - hbGetUserBasicInfo(connKey, hbParam, req); - } - if (hbParam->reqCnt == 0) { - code = hbGetExpiredUserInfo(connKey, pCatalog, req); - if (TSDB_CODE_SUCCESS != code) { - return code; + if (!taosHashGet(clientHbMgr.appHbHash, &hbParam->clusterId, sizeof(hbParam->clusterId))) { + code = hbGetExpiredUserInfo(connKey, pCatalog, req); + if (TSDB_CODE_SUCCESS != code) { + return code; + } + } + + // invoke after hbGetExpiredUserInfo + if (2 != atomic_load_8(&hbParam->pAppHbMgr->connHbFlag)) { + code = hbGetUserAuthInfo(connKey, hbParam, req); + if (TSDB_CODE_SUCCESS != code) { + return code; + } + atomic_store_8(&hbParam->pAppHbMgr->connHbFlag, 1); } code = hbGetExpiredDBInfo(connKey, pCatalog, req); @@ -770,7 +795,7 @@ int32_t hbQueryHbReqHandle(SClientHbKey *connKey, void *param, SClientHbReq *req } } - ++hbParam->reqCnt; // success to get catalog info + ++hbParam->reqCnt; // success to get catalog info return TSDB_CODE_SUCCESS; } @@ -815,9 +840,9 @@ SClientHbBatchReq *hbGatherAllInfo(SAppHbMgr *pAppHbMgr) { if (param.clusterId == 0) { // init param.clusterId = pOneReq->clusterId; - param.passVer = INT32_MIN; + param.pAppHbMgr = pAppHbMgr; + param.connHbFlag = atomic_load_8(&pAppHbMgr->connHbFlag); } - param.passKeyCnt = atomic_load_32(&pAppHbMgr->passKeyCnt); break; } default: @@ -901,6 +926,10 @@ static void *hbThreadFunc(void *param) { int sz = taosArrayGetSize(clientHbMgr.appHbMgrs); if (sz > 0) { hbGatherAppInfo(); + if (sz > 1 && !clientHbMgr.appHbHash) { + clientHbMgr.appHbHash = taosHashInit(0, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, HASH_NO_LOCK); + } + taosHashClear(clientHbMgr.appHbHash); } for (int i = 0; i < sz; i++) { @@ -953,7 +982,7 @@ static void *hbThreadFunc(void *param) { asyncSendMsgToServer(pAppInstInfo->pTransporter, &epSet, &transporterId, pInfo); tFreeClientHbBatchReq(pReq); // hbClearReqInfo(pAppHbMgr); - + taosHashPut(clientHbMgr.appHbHash, &pAppHbMgr->pAppInstInfo->clusterId, sizeof(uint64_t), NULL, 0); atomic_add_fetch_32(&pAppHbMgr->reportCnt, 1); } @@ -961,6 +990,7 @@ static void *hbThreadFunc(void *param) { taosMsleep(HEARTBEAT_INTERVAL); } + taosHashCleanup(clientHbMgr.appHbHash); return NULL; } @@ -1009,7 +1039,7 @@ SAppHbMgr *appHbMgrInit(SAppInstInfo *pAppInstInfo, char *key) { // init stat pAppHbMgr->startTime = taosGetTimestampMs(); pAppHbMgr->connKeyCnt = 0; - pAppHbMgr->passKeyCnt = 0; + pAppHbMgr->connHbFlag = 0; pAppHbMgr->reportCnt = 0; pAppHbMgr->reportBytes = 0; pAppHbMgr->key = taosStrdup(key); @@ -1127,7 +1157,6 @@ void hbMgrCleanUp() { appHbMgrCleanup(); taosArrayDestroy(clientHbMgr.appHbMgrs); taosThreadMutexUnlock(&clientHbMgr.lock); - clientHbMgr.appHbMgrs = NULL; } @@ -1180,12 +1209,6 @@ void hbDeregisterConn(STscObj *pTscObj, SClientHbKey connKey) { } atomic_sub_fetch_32(&pAppHbMgr->connKeyCnt, 1); - - taosThreadMutexLock(&pTscObj->mutex); - if (pTscObj->passInfo.fp) { - atomic_sub_fetch_32(&pAppHbMgr->passKeyCnt, 1); - } - taosThreadMutexUnlock(&pTscObj->mutex); } // set heart beat thread quit mode , if quicByKill 1 then kill thread else quit from inner diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 955c90fc81..14d6394fc4 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -26,7 +26,7 @@ #include "tpagedbuf.h" #include "tref.h" #include "tsched.h" - +#include "tversion.h" static int32_t initEpSetFromCfg(const char* firstEp, const char* secondEp, SCorEpSet* pEpSet); static SMsgSendInfo* buildConnectMsg(SRequestObj* pRequest); @@ -237,8 +237,9 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param, return TSDB_CODE_SUCCESS; } -int32_t buildPreviousRequest(SRequestObj *pRequest, const char* sql, SRequestObj** pNewRequest) { - int32_t code = buildRequest(pRequest->pTscObj->id, sql, strlen(sql), pRequest, pRequest->validateOnly, pNewRequest, 0); +int32_t buildPreviousRequest(SRequestObj* pRequest, const char* sql, SRequestObj** pNewRequest) { + int32_t code = + buildRequest(pRequest->pTscObj->id, sql, strlen(sql), pRequest, pRequest->validateOnly, pNewRequest, 0); if (TSDB_CODE_SUCCESS == code) { pRequest->relation.prevRefId = (*pNewRequest)->self; (*pNewRequest)->relation.nextRefId = pRequest->self; @@ -502,8 +503,7 @@ void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t pResInfo->userFields[i].bytes = pSchema[i].bytes; pResInfo->userFields[i].type = pSchema[i].type; - if (pSchema[i].type == TSDB_DATA_TYPE_VARCHAR || - pSchema[i].type == TSDB_DATA_TYPE_GEOMETRY) { + if (pSchema[i].type == TSDB_DATA_TYPE_VARCHAR || pSchema[i].type == TSDB_DATA_TYPE_GEOMETRY) { pResInfo->userFields[i].bytes -= VARSTR_HEADER_SIZE; } else if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR || pSchema[i].type == TSDB_DATA_TYPE_JSON) { pResInfo->userFields[i].bytes = (pResInfo->userFields[i].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; @@ -891,7 +891,7 @@ static bool incompletaFileParsing(SNode* pStmt) { void continuePostSubQuery(SRequestObj* pRequest, TAOS_ROW row) { SSqlCallbackWrapper* pWrapper = pRequest->pWrapper; - int32_t code = nodesAcquireAllocator(pWrapper->pParseCtx->allocatorId); + int32_t code = nodesAcquireAllocator(pWrapper->pParseCtx->allocatorId); if (TSDB_CODE_SUCCESS == code) { int64_t analyseStart = taosGetTimestampUs(); code = qContinueParsePostQuery(pWrapper->pParseCtx, pRequest->pQuery, (void**)row); @@ -934,7 +934,7 @@ void postSubQueryFetchCb(void* param, TAOS_RES* res, int32_t rowNum) { TAOS_ROW row = NULL; if (rowNum > 0) { - row = taos_fetch_row(res); // for single row only now + row = taos_fetch_row(res); // for single row only now } SRequestObj* pNextReq = acquireRequest(pRequest->relation.nextRefId); @@ -2135,6 +2135,7 @@ TSDB_SERVER_STATUS taos_check_server_status(const char* fqdn, int port, char* de connLimitNum = TMIN(connLimitNum, 500); rpcInit.connLimitNum = connLimitNum; rpcInit.timeToGetConn = tsTimeToGetAvailableConn; + taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); clientRpc = rpcOpen(&rpcInit); if (clientRpc == NULL) { @@ -2494,11 +2495,10 @@ TAOS_RES* taosQueryImplWithReqid(TAOS* taos, const char* sql, bool validateOnly, return pRequest; } +static void fetchCallback(void* pResult, void* param, int32_t code) { + SRequestObj* pRequest = (SRequestObj*)param; -static void fetchCallback(void *pResult, void *param, int32_t code) { - SRequestObj *pRequest = (SRequestObj *)param; - - SReqResultInfo *pResultInfo = &pRequest->body.resInfo; + SReqResultInfo* pResultInfo = &pRequest->body.resInfo; tscDebug("0x%" PRIx64 " enter scheduler fetch cb, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code, tstrerror(code), pRequest->requestId); @@ -2520,7 +2520,7 @@ static void fetchCallback(void *pResult, void *param, int32_t code) { } pRequest->code = - setQueryResultFromRsp(pResultInfo, (const SRetrieveTableRsp *)pResultInfo->pData, pResultInfo->convertUcs4, true); + setQueryResultFromRsp(pResultInfo, (const SRetrieveTableRsp*)pResultInfo->pData, pResultInfo->convertUcs4, true); if (pRequest->code != TSDB_CODE_SUCCESS) { pResultInfo->numOfRows = 0; pRequest->code = code; @@ -2531,19 +2531,19 @@ static void fetchCallback(void *pResult, void *param, int32_t code) { pRequest->self, pResultInfo->numOfRows, pResultInfo->totalRows, pResultInfo->completed, pRequest->requestId); - STscObj *pTscObj = pRequest->pTscObj; - SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary; - atomic_add_fetch_64((int64_t *)&pActivity->fetchBytes, pRequest->body.resInfo.payloadLen); + STscObj* pTscObj = pRequest->pTscObj; + SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary; + atomic_add_fetch_64((int64_t*)&pActivity->fetchBytes, pRequest->body.resInfo.payloadLen); } pRequest->body.fetchFp(pRequest->body.param, pRequest, pResultInfo->numOfRows); } -void taosAsyncFetchImpl(SRequestObj *pRequest, __taos_async_fn_t fp, void *param) { +void taosAsyncFetchImpl(SRequestObj* pRequest, __taos_async_fn_t fp, void* param) { pRequest->body.fetchFp = fp; pRequest->body.param = param; - SReqResultInfo *pResultInfo = &pRequest->body.resInfo; + SReqResultInfo* pResultInfo = &pRequest->body.resInfo; // this query has no results or error exists, return directly if (taos_num_fields(pRequest) == 0 || pRequest->code != TSDB_CODE_SUCCESS) { @@ -2578,5 +2578,3 @@ void taosAsyncFetchImpl(SRequestObj *pRequest, __taos_async_fn_t fp, void *param schedulerFetchRows(pRequest->body.queryJob, &req); } - - diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 63b16a30c5..e262ee04b9 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -135,11 +135,6 @@ int taos_set_notify_cb(TAOS *taos, __taos_notify_fn_t fp, void *param, int type) switch (type) { case TAOS_NOTIFY_PASSVER: { taosThreadMutexLock(&pObj->mutex); - if (fp && !pObj->passInfo.fp) { - atomic_add_fetch_32(&pObj->pAppInfo->pAppHbMgr->passKeyCnt, 1); - } else if (!fp && pObj->passInfo.fp) { - atomic_sub_fetch_32(&pObj->pAppInfo->pAppHbMgr->passKeyCnt, 1); - } pObj->passInfo.fp = fp; pObj->passInfo.param = param; taosThreadMutexUnlock(&pObj->mutex); @@ -563,13 +558,12 @@ int taos_select_db(TAOS *taos, const char *db) { return code; } - void taos_stop_query(TAOS_RES *res) { if (res == NULL || TD_RES_TMQ(res) || TD_RES_TMQ_META(res) || TD_RES_TMQ_METADATA(res)) { return; } - stopAllQueries((SRequestObj*)res); + stopAllQueries((SRequestObj *)res); } bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col) { @@ -790,7 +784,7 @@ void destorySqlCallbackWrapper(SSqlCallbackWrapper *pWrapper) { taosMemoryFree(pWrapper); } -void destroyCtxInRequest(SRequestObj* pRequest) { +void destroyCtxInRequest(SRequestObj *pRequest) { schedulerFreeJob(&pRequest->body.queryJob, 0); qDestroyQuery(pRequest->pQuery); pRequest->pQuery = NULL; @@ -798,7 +792,6 @@ void destroyCtxInRequest(SRequestObj* pRequest) { pRequest->pWrapper = NULL; } - static void doAsyncQueryFromAnalyse(SMetaData *pResultMeta, void *param, int32_t code) { SSqlCallbackWrapper *pWrapper = (SSqlCallbackWrapper *)param; SRequestObj *pRequest = pWrapper->pRequest; @@ -812,15 +805,15 @@ static void doAsyncQueryFromAnalyse(SMetaData *pResultMeta, void *param, int32_t if (TSDB_CODE_SUCCESS == code) { code = qAnalyseSqlSemantic(pWrapper->pParseCtx, pWrapper->pCatalogReq, pResultMeta, pQuery); } - + pRequest->metric.analyseCostUs += taosGetTimestampUs() - analyseStart; - + handleQueryAnslyseRes(pWrapper, pResultMeta, code); } -int32_t cloneCatalogReq(SCatalogReq* * ppTarget, SCatalogReq* pSrc) { - int32_t code = TSDB_CODE_SUCCESS; - SCatalogReq* pTarget = taosMemoryCalloc(1, sizeof(SCatalogReq)); +int32_t cloneCatalogReq(SCatalogReq **ppTarget, SCatalogReq *pSrc) { + int32_t code = TSDB_CODE_SUCCESS; + SCatalogReq *pTarget = taosMemoryCalloc(1, sizeof(SCatalogReq)); if (pTarget == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; } else { @@ -847,17 +840,16 @@ int32_t cloneCatalogReq(SCatalogReq* * ppTarget, SCatalogReq* pSrc) { return code; } - -void handleSubQueryFromAnalyse(SSqlCallbackWrapper *pWrapper, SMetaData *pResultMeta, SNode* pRoot) { - SRequestObj* pNewRequest = NULL; - SSqlCallbackWrapper* pNewWrapper = NULL; - int32_t code = buildPreviousRequest(pWrapper->pRequest, pWrapper->pRequest->sqlstr, &pNewRequest); +void handleSubQueryFromAnalyse(SSqlCallbackWrapper *pWrapper, SMetaData *pResultMeta, SNode *pRoot) { + SRequestObj *pNewRequest = NULL; + SSqlCallbackWrapper *pNewWrapper = NULL; + int32_t code = buildPreviousRequest(pWrapper->pRequest, pWrapper->pRequest->sqlstr, &pNewRequest); if (code) { handleQueryAnslyseRes(pWrapper, pResultMeta, code); return; } - pNewRequest->pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY); + pNewRequest->pQuery = (SQuery *)nodesMakeNode(QUERY_NODE_QUERY); if (NULL == pNewRequest->pQuery) { code = TSDB_CODE_OUT_OF_MEMORY; } else { @@ -876,16 +868,16 @@ void handleSubQueryFromAnalyse(SSqlCallbackWrapper *pWrapper, SMetaData *pResult } void handleQueryAnslyseRes(SSqlCallbackWrapper *pWrapper, SMetaData *pResultMeta, int32_t code) { - SRequestObj *pRequest = pWrapper->pRequest; - SQuery *pQuery = pRequest->pQuery; + SRequestObj *pRequest = pWrapper->pRequest; + SQuery *pQuery = pRequest->pQuery; if (code == TSDB_CODE_SUCCESS && pQuery->pPrevRoot) { - SNode* prevRoot = pQuery->pPrevRoot; + SNode *prevRoot = pQuery->pPrevRoot; pQuery->pPrevRoot = NULL; handleSubQueryFromAnalyse(pWrapper, pResultMeta, prevRoot); return; } - + if (code == TSDB_CODE_SUCCESS) { pRequest->stableQuery = pQuery->stableQuery; if (pQuery->pRoot) { @@ -1048,7 +1040,7 @@ int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt) { } int32_t prepareAndParseSqlSyntax(SSqlCallbackWrapper **ppWrapper, SRequestObj *pRequest, bool updateMetaForce) { - int32_t code = TSDB_CODE_SUCCESS; + int32_t code = TSDB_CODE_SUCCESS; STscObj *pTscObj = pRequest->pTscObj; SSqlCallbackWrapper *pWrapper = taosMemoryCalloc(1, sizeof(SSqlCallbackWrapper)); if (pWrapper == NULL) { @@ -1086,7 +1078,6 @@ int32_t prepareAndParseSqlSyntax(SSqlCallbackWrapper **ppWrapper, SRequestObj *p return code; } - void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) { SSqlCallbackWrapper *pWrapper = NULL; int32_t code = TSDB_CODE_SUCCESS; @@ -1133,12 +1124,12 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) { } void restartAsyncQuery(SRequestObj *pRequest, int32_t code) { - int32_t reqIdx = 0; + int32_t reqIdx = 0; SRequestObj *pReqList[16] = {NULL}; SRequestObj *pUserReq = NULL; pReqList[0] = pRequest; - uint64_t tmpRefId = 0; - SRequestObj* pTmp = pRequest; + uint64_t tmpRefId = 0; + SRequestObj *pTmp = pRequest; while (pTmp->relation.prevRefId) { tmpRefId = pTmp->relation.prevRefId; pTmp = acquireRequest(tmpRefId); @@ -1146,9 +1137,9 @@ void restartAsyncQuery(SRequestObj *pRequest, int32_t code) { pReqList[++reqIdx] = pTmp; releaseRequest(tmpRefId); } else { - tscError("0x%" PRIx64 ", prev req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pTmp->self, - tmpRefId, pTmp->requestId); - break; + tscError("0x%" PRIx64 ", prev req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pTmp->self, tmpRefId, + pTmp->requestId); + break; } } @@ -1157,11 +1148,11 @@ void restartAsyncQuery(SRequestObj *pRequest, int32_t code) { pTmp = acquireRequest(tmpRefId); if (pTmp) { tmpRefId = pTmp->relation.nextRefId; - removeRequest(pTmp->self); + removeRequest(pTmp->self); releaseRequest(pTmp->self); } else { tscError("0x%" PRIx64 " is not there", tmpRefId); - break; + break; } } diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c index d6fdb29b59..9ab618cf3a 100644 --- a/source/client/src/clientMsgHandler.c +++ b/source/client/src/clientMsgHandler.c @@ -131,6 +131,7 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) { pTscObj->connType = connectRsp.connType; pTscObj->passInfo.ver = connectRsp.passVer; + pTscObj->authVer = connectRsp.authVer; hbRegisterConn(pTscObj->pAppInfo->pAppHbMgr, pTscObj->id, connectRsp.clusterId, connectRsp.connType); diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index cfc8ae9186..90b10e0920 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -1286,6 +1286,10 @@ static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) { taosArrayPush(pArray, &pVgData); pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY); + if (NULL == pQuery) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE; pQuery->msgType = TDMT_VND_ALTER_TABLE; pQuery->stableQuery = false; diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index c13f52a3d8..6a0c3171fb 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -151,7 +151,7 @@ typedef struct { int32_t vgId; int32_t vgStatus; int32_t vgSkipCnt; // here used to mark the slow vgroups - bool receivedInfoFromVnode; // has already received info from vnode +// bool receivedInfoFromVnode; // has already received info from vnode int64_t emptyBlockReceiveTs; // once empty block is received, idle for ignoreCnt then start to poll data bool seekUpdated; // offset is updated by seek operator, therefore, not update by vnode rsp. SEpSet epSet; @@ -636,6 +636,7 @@ static void asyncCommitOffset(tmq_t* tmq, const TAOS_RES* pRes, int32_t type, tm pParamSet->callbackFn = pCommitFp; pParamSet->userParam = userParam; + taosRLockLatch(&tmq->lock); int32_t numOfTopics = taosArrayGetSize(tmq->clientTopics); tscDebug("consumer:0x%" PRIx64 " do manual commit offset for %s, vgId:%d", tmq->consumerId, pTopicName, vgId); @@ -646,6 +647,7 @@ static void asyncCommitOffset(tmq_t* tmq, const TAOS_RES* pRes, int32_t type, tm pTopicName, numOfTopics); taosMemoryFree(pParamSet); pCommitFp(tmq, TSDB_CODE_SUCCESS, userParam); + taosRUnLockLatch(&tmq->lock); return; } @@ -663,6 +665,7 @@ static void asyncCommitOffset(tmq_t* tmq, const TAOS_RES* pRes, int32_t type, tm vgId, numOfVgroups, pTopicName); taosMemoryFree(pParamSet); pCommitFp(tmq, TSDB_CODE_SUCCESS, userParam); + taosRUnLockLatch(&tmq->lock); return; } @@ -675,10 +678,13 @@ static void asyncCommitOffset(tmq_t* tmq, const TAOS_RES* pRes, int32_t type, tm taosMemoryFree(pParamSet); pCommitFp(tmq, code, userParam); } + // update the offset value. + pVg->offsetInfo.committedOffset = pVg->offsetInfo.currentOffset; } else { // do not perform commit, callback user function directly. taosMemoryFree(pParamSet); pCommitFp(tmq, code, userParam); } + taosRUnLockLatch(&tmq->lock); } static void asyncCommitAllOffsets(tmq_t* tmq, tmq_commit_cb* pCommitFp, void* userParam) { @@ -696,6 +702,7 @@ static void asyncCommitAllOffsets(tmq_t* tmq, tmq_commit_cb* pCommitFp, void* us // init as 1 to prevent concurrency issue pParamSet->waitingRspNum = 1; + taosRLockLatch(&tmq->lock); int32_t numOfTopics = taosArrayGetSize(tmq->clientTopics); tscDebug("consumer:0x%" PRIx64 " start to commit offset for %d topics", tmq->consumerId, numOfTopics); @@ -725,6 +732,7 @@ static void asyncCommitAllOffsets(tmq_t* tmq, tmq_commit_cb* pCommitFp, void* us } } } + taosRUnLockLatch(&tmq->lock); tscDebug("consumer:0x%" PRIx64 " total commit:%d for %d topics", tmq->consumerId, pParamSet->waitingRspNum - 1, numOfTopics); @@ -799,6 +807,7 @@ void tmqSendHbReq(void* param, void* tmrId) { SMqHbReq req = {0}; req.consumerId = tmq->consumerId; req.epoch = tmq->epoch; + taosRLockLatch(&tmq->lock); // if(tmq->needReportOffsetRows){ req.topics = taosArrayInit(taosArrayGetSize(tmq->clientTopics), sizeof(TopicOffsetRows)); for(int i = 0; i < taosArrayGetSize(tmq->clientTopics); i++){ @@ -820,6 +829,7 @@ void tmqSendHbReq(void* param, void* tmrId) { } // tmq->needReportOffsetRows = false; // } + taosRUnLockLatch(&tmq->lock); int32_t tlen = tSerializeSMqHbReq(NULL, 0, &req); if (tlen < 0) { @@ -986,10 +996,12 @@ int32_t tmq_subscription(tmq_t* tmq, tmq_list_t** topics) { if (*topics == NULL) { *topics = tmq_list_new(); } + taosRLockLatch(&tmq->lock); for (int i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) { SMqClientTopic* topic = taosArrayGet(tmq->clientTopics, i); tmq_list_append(*topics, strchr(topic->topicName, '.') + 1); } + taosRUnLockLatch(&tmq->lock); return 0; } @@ -1414,7 +1426,7 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) { tDecoderClear(&decoder); memcpy(&pRspWrapper->dataRsp, pMsg->pData, sizeof(SMqRspHead)); - char buf[TSDB_OFFSET_LEN]; + char buf[TSDB_OFFSET_LEN] = {0}; tFormatOffset(buf, TSDB_OFFSET_LEN, &pRspWrapper->dataRsp.rspOffset); tscDebug("consumer:0x%" PRIx64 " recv poll rsp, vgId:%d, req ver:%" PRId64 ", rsp:%s type %d, reqId:0x%" PRIx64, tmq->consumerId, vgId, pRspWrapper->dataRsp.reqOffset.version, buf, rspType, requestId); @@ -1509,7 +1521,7 @@ static void initClientTopicFromRsp(SMqClientTopic* pTopic, SMqSubTopicEp* pTopic clientVg.offsetInfo.walVerBegin = -1; clientVg.offsetInfo.walVerEnd = -1; clientVg.seekUpdated = false; - clientVg.receivedInfoFromVnode = false; +// clientVg.receivedInfoFromVnode = false; taosArrayPush(pTopic->vgs, &clientVg); } @@ -1527,12 +1539,7 @@ static void freeClientVgInfo(void* param) { static bool doUpdateLocalEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) { bool set = false; - int32_t topicNumCur = taosArrayGetSize(tmq->clientTopics); int32_t topicNumGet = taosArrayGetSize(pRsp->topics); - - char vgKey[TSDB_TOPIC_FNAME_LEN + 22]; - tscInfo("consumer:0x%" PRIx64 " update ep epoch from %d to epoch %d, incoming topics:%d, existed topics:%d", - tmq->consumerId, tmq->epoch, epoch, topicNumGet, topicNumCur); if (epoch <= tmq->epoch) { return false; } @@ -1548,6 +1555,12 @@ static bool doUpdateLocalEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) return false; } + taosWLockLatch(&tmq->lock); + int32_t topicNumCur = taosArrayGetSize(tmq->clientTopics); + + char vgKey[TSDB_TOPIC_FNAME_LEN + 22]; + tscInfo("consumer:0x%" PRIx64 " update ep epoch from %d to epoch %d, incoming topics:%d, existed topics:%d", + tmq->consumerId, tmq->epoch, epoch, topicNumGet, topicNumCur); // todo extract method for (int32_t i = 0; i < topicNumCur; i++) { // find old topic @@ -1559,7 +1572,7 @@ static bool doUpdateLocalEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) SMqClientVg* pVgCur = taosArrayGet(pTopicCur->vgs, j); makeTopicVgroupKey(vgKey, pTopicCur->topicName, pVgCur->vgId); - char buf[TSDB_OFFSET_LEN]; + char buf[TSDB_OFFSET_LEN] = {0}; tFormatOffset(buf, TSDB_OFFSET_LEN, &pVgCur->offsetInfo.currentOffset); tscInfo("consumer:0x%" PRIx64 ", epoch:%d vgId:%d vgKey:%s, offset:%s", tmq->consumerId, epoch, pVgCur->vgId, vgKey, buf); @@ -1579,7 +1592,6 @@ static bool doUpdateLocalEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) taosHashCleanup(pVgOffsetHashMap); - taosWLockLatch(&tmq->lock); // destroy current buffered existed topics info if (tmq->clientTopics) { taosArrayDestroyEx(tmq->clientTopics, freeClientVgInfo); @@ -1788,7 +1800,7 @@ static int32_t doTmqPollImpl(tmq_t* pTmq, SMqClientTopic* pTopic, SMqClientVg* p sendInfo->msgType = TDMT_VND_TMQ_CONSUME; int64_t transporterId = 0; - char offsetFormatBuf[TSDB_OFFSET_LEN]; + char offsetFormatBuf[TSDB_OFFSET_LEN] = {0}; tFormatOffset(offsetFormatBuf, tListLen(offsetFormatBuf), &pVg->offsetInfo.currentOffset); tscDebug("consumer:0x%" PRIx64 " send poll to %s vgId:%d, epoch %d, req:%s, reqId:0x%" PRIx64, pTmq->consumerId, @@ -1807,6 +1819,9 @@ static int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) { if(atomic_load_8(&tmq->status) == TMQ_CONSUMER_STATUS__RECOVER){ return 0; } + int32_t code = 0; + + taosWLockLatch(&tmq->lock); int32_t numOfTopics = taosArrayGetSize(tmq->clientTopics); tscDebug("consumer:0x%" PRIx64 " start to poll data, numOfTopics:%d", tmq->consumerId, numOfTopics); @@ -1816,7 +1831,7 @@ static int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) { for (int j = 0; j < numOfVg; j++) { SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j); - if (taosGetTimestampMs() - pVg->emptyBlockReceiveTs < EMPTY_BLOCK_POLL_IDLE_DURATION) { // less than 100ms + if (taosGetTimestampMs() - pVg->emptyBlockReceiveTs < EMPTY_BLOCK_POLL_IDLE_DURATION) { // less than 10ms tscTrace("consumer:0x%" PRIx64 " epoch %d, vgId:%d idle for 10ms before start next poll", tmq->consumerId, tmq->epoch, pVg->vgId); continue; @@ -1831,15 +1846,17 @@ static int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) { } atomic_store_32(&pVg->vgSkipCnt, 0); - int32_t code = doTmqPollImpl(tmq, pTopic, pVg, timeout); + code = doTmqPollImpl(tmq, pTopic, pVg, timeout); if (code != TSDB_CODE_SUCCESS) { - return code; + goto end; } } } - tscDebug("consumer:0x%" PRIx64 " end to poll data", tmq->consumerId); - return 0; +end: + taosWUnLockLatch(&tmq->lock); + tscDebug("consumer:0x%" PRIx64 " end to poll data, code:%d", tmq->consumerId, code); + return code; } static int32_t tmqHandleNoPollRsp(tmq_t* tmq, SMqRspWrapper* rspWrapper, bool* pReset) { @@ -1862,6 +1879,23 @@ static int32_t tmqHandleNoPollRsp(tmq_t* tmq, SMqRspWrapper* rspWrapper, bool* p return 0; } +static void updateVgInfo(SMqClientVg* pVg, STqOffsetVal* offset, int64_t sver, int64_t ever, int64_t consumerId){ + if (!pVg->seekUpdated) { + tscDebug("consumer:0x%" PRIx64" local offset is update, since seekupdate not set", consumerId); + pVg->offsetInfo.currentOffset = *offset; + } else { + tscDebug("consumer:0x%" PRIx64" local offset is NOT update, since seekupdate is set", consumerId); + } + + // update the status + atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE); + + // update the valid wal version range + pVg->offsetInfo.walVerBegin = sver; + pVg->offsetInfo.walVerEnd = ever; +// pVg->receivedInfoFromVnode = true; +} + static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { tscDebug("consumer:0x%" PRIx64 " start to handle the rsp, total:%d", tmq->consumerId, tmq->qall->numOfItems); @@ -1891,12 +1925,14 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { SMqDataRsp* pDataRsp = &pollRspWrapper->dataRsp; if (pDataRsp->head.epoch == consumerEpoch) { + taosWLockLatch(&tmq->lock); SMqClientVg* pVg = getVgInfo(tmq, pollRspWrapper->topicName, pollRspWrapper->vgId); pollRspWrapper->vgHandle = pVg; pollRspWrapper->topicHandle = getTopicInfo(tmq, pollRspWrapper->topicName); if(pollRspWrapper->vgHandle == NULL || pollRspWrapper->topicHandle == NULL){ tscError("consumer:0x%" PRIx64 " get vg or topic error, topic:%s vgId:%d", tmq->consumerId, pollRspWrapper->topicName, pollRspWrapper->vgId); + taosWUnLockLatch(&tmq->lock); return NULL; } // update the epset @@ -1908,24 +1944,9 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { pVg->epSet = *pollRspWrapper->pEpset; } - // update the local offset value only for the returned values, only when the local offset is NOT updated - // by tmq_offset_seek function - if (!pVg->seekUpdated) { - tscDebug("consumer:0x%" PRIx64" local offset is update, since seekupdate not set", tmq->consumerId); - pVg->offsetInfo.currentOffset = pDataRsp->rspOffset; - } else { - tscDebug("consumer:0x%" PRIx64" local offset is NOT update, since seekupdate is set", tmq->consumerId); - } + updateVgInfo(pVg, &pDataRsp->rspOffset, pDataRsp->head.walsver, pDataRsp->head.walever, tmq->consumerId); - // update the status - atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE); - - // update the valid wal version range - pVg->offsetInfo.walVerBegin = pDataRsp->head.walsver; - pVg->offsetInfo.walVerEnd = pDataRsp->head.walever; - pVg->receivedInfoFromVnode = true; - - char buf[TSDB_OFFSET_LEN]; + char buf[TSDB_OFFSET_LEN] = {0}; tFormatOffset(buf, TSDB_OFFSET_LEN, &pDataRsp->rspOffset); if (pDataRsp->blockNum == 0) { tscDebug("consumer:0x%" PRIx64 " empty block received, vgId:%d, offset:%s, vg total:%" PRId64 @@ -1944,8 +1965,10 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { tmq->consumerId, pVg->vgId, buf, pDataRsp->blockNum, numOfRows, pVg->numOfRows, tmq->totalRows, pollRspWrapper->reqId); taosFreeQitem(pollRspWrapper); + taosWUnLockLatch(&tmq->lock); return pRsp; } + taosWUnLockLatch(&tmq->lock); } else { tscDebug("consumer:0x%" PRIx64 " vgId:%d msg discard since epoch mismatch: msg epoch %d, consumer epoch %d", tmq->consumerId, pollRspWrapper->vgId, pDataRsp->head.epoch, consumerEpoch); @@ -1960,23 +1983,22 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { tscDebug("consumer:0x%" PRIx64 " process meta rsp", tmq->consumerId); if (pollRspWrapper->metaRsp.head.epoch == consumerEpoch) { + taosWLockLatch(&tmq->lock); SMqClientVg* pVg = getVgInfo(tmq, pollRspWrapper->topicName, pollRspWrapper->vgId); pollRspWrapper->vgHandle = pVg; pollRspWrapper->topicHandle = getTopicInfo(tmq, pollRspWrapper->topicName); if(pollRspWrapper->vgHandle == NULL || pollRspWrapper->topicHandle == NULL){ tscError("consumer:0x%" PRIx64 " get vg or topic error, topic:%s vgId:%d", tmq->consumerId, pollRspWrapper->topicName, pollRspWrapper->vgId); + taosWUnLockLatch(&tmq->lock); return NULL; } - if(pollRspWrapper->metaRsp.rspOffset.type != 0){ // if offset is validate - pVg->offsetInfo.currentOffset = pollRspWrapper->metaRsp.rspOffset; - } - - atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE); + updateVgInfo(pVg, &pollRspWrapper->metaRsp.rspOffset, pollRspWrapper->metaRsp.head.walsver, pollRspWrapper->metaRsp.head.walever, tmq->consumerId); // build rsp SMqMetaRspObj* pRsp = tmqBuildMetaRspFromWrapper(pollRspWrapper); taosFreeQitem(pollRspWrapper); + taosWUnLockLatch(&tmq->lock); return pRsp; } else { tscDebug("consumer:0x%" PRIx64 " vgId:%d msg discard since epoch mismatch: msg epoch %d, consumer epoch %d", @@ -1989,27 +2011,18 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { int32_t consumerEpoch = atomic_load_32(&tmq->epoch); if (pollRspWrapper->taosxRsp.head.epoch == consumerEpoch) { + taosWLockLatch(&tmq->lock); SMqClientVg* pVg = getVgInfo(tmq, pollRspWrapper->topicName, pollRspWrapper->vgId); pollRspWrapper->vgHandle = pVg; pollRspWrapper->topicHandle = getTopicInfo(tmq, pollRspWrapper->topicName); if(pollRspWrapper->vgHandle == NULL || pollRspWrapper->topicHandle == NULL){ tscError("consumer:0x%" PRIx64 " get vg or topic error, topic:%s vgId:%d", tmq->consumerId, pollRspWrapper->topicName, pollRspWrapper->vgId); + taosWUnLockLatch(&tmq->lock); return NULL; } - // update the local offset value only for the returned values, only when the local offset is NOT updated - // by tmq_offset_seek function - if (!pVg->seekUpdated) { - if(pollRspWrapper->taosxRsp.rspOffset.type != 0) { // if offset is validate - tscDebug("consumer:0x%" PRIx64" local offset is update, since seekupdate not set", tmq->consumerId); - pVg->offsetInfo.currentOffset = pollRspWrapper->taosxRsp.rspOffset; - } - } else { - tscDebug("consumer:0x%" PRIx64" local offset is NOT update, since seekupdate is set", tmq->consumerId); - } - - atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE); + updateVgInfo(pVg, &pollRspWrapper->taosxRsp.rspOffset, pollRspWrapper->taosxRsp.head.walsver, pollRspWrapper->taosxRsp.head.walever, tmq->consumerId); if (pollRspWrapper->taosxRsp.blockNum == 0) { tscDebug("consumer:0x%" PRIx64 " taosx empty block received, vgId:%d, vg total:%" PRId64 ", reqId:0x%" PRIx64, @@ -2017,6 +2030,7 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { pVg->emptyBlockReceiveTs = taosGetTimestampMs(); pRspWrapper = tmqFreeRspWrapper(pRspWrapper); taosFreeQitem(pollRspWrapper); + taosWUnLockLatch(&tmq->lock); continue; } else { pVg->emptyBlockReceiveTs = 0; // reset the ts @@ -2033,16 +2047,16 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { tmq->totalRows += numOfRows; - char buf[TSDB_OFFSET_LEN]; + char buf[TSDB_OFFSET_LEN] = {0}; tFormatOffset(buf, TSDB_OFFSET_LEN, &pVg->offsetInfo.currentOffset); tscDebug("consumer:0x%" PRIx64 " process taosx poll rsp, vgId:%d, offset:%s, blocks:%d, rows:%" PRId64 - ", vg total:%" PRId64 ", total:%" PRId64 ", reqId:0x%" PRIx64, + ", vg total:%" PRId64 ", total:%" PRId64 ", reqId:0x%" PRIx64, tmq->consumerId, pVg->vgId, buf, pollRspWrapper->dataRsp.blockNum, numOfRows, pVg->numOfRows, tmq->totalRows, pollRspWrapper->reqId); taosFreeQitem(pollRspWrapper); + taosWUnLockLatch(&tmq->lock); return pRsp; - } else { tscDebug("consumer:0x%" PRIx64 " vgId:%d msg discard since epoch mismatch: msg epoch %d, consumer epoch %d", tmq->consumerId, pollRspWrapper->vgId, pollRspWrapper->taosxRsp.head.epoch, consumerEpoch); @@ -2121,7 +2135,8 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) { } } -static void displayConsumeStatistics(const tmq_t* pTmq) { +static void displayConsumeStatistics(tmq_t* pTmq) { + taosRLockLatch(&pTmq->lock); int32_t numOfTopics = taosArrayGetSize(pTmq->clientTopics); tscDebug("consumer:0x%" PRIx64 " closing poll:%" PRId64 " rows:%" PRId64 " topics:%d, final epoch:%d", pTmq->consumerId, pTmq->pollCnt, pTmq->totalRows, numOfTopics, pTmq->epoch); @@ -2137,7 +2152,7 @@ static void displayConsumeStatistics(const tmq_t* pTmq) { tscDebug("topic:%s, %d. vgId:%d rows:%" PRId64, pTopics->topicName, j, pVg->vgId, pVg->numOfRows); } } - + taosRUnLockLatch(&pTmq->lock); tscDebug("consumer:0x%" PRIx64 " rows dist end", pTmq->consumerId); } @@ -2533,6 +2548,9 @@ static int32_t tmqGetWalInfoCb(void* param, SDataBuf* pMsg, int32_t code) { } static void destroyCommonInfo(SMqVgCommon* pCommon) { + if(pCommon == NULL){ + return; + } taosArrayDestroy(pCommon->pList); tsem_destroy(&pCommon->rsp); taosThreadMutexDestroy(&pCommon->mutex); @@ -2540,56 +2558,75 @@ static void destroyCommonInfo(SMqVgCommon* pCommon) { taosMemoryFree(pCommon); } +static bool isInSnapshotMode(int8_t type, bool useSnapshot){ + if ((type < TMQ_OFFSET__LOG && useSnapshot) || type > TMQ_OFFSET__LOG) { + return true; + } + return false; +} + int32_t tmq_get_topic_assignment(tmq_t* tmq, const char* pTopicName, tmq_topic_assignment** assignment, int32_t* numOfAssignment) { *numOfAssignment = 0; *assignment = NULL; + SMqVgCommon* pCommon = NULL; int32_t accId = tmq->pTscObj->acctId; char tname[128] = {0}; sprintf(tname, "%d.%s", accId, pTopicName); + int32_t code = TSDB_CODE_SUCCESS; + taosWLockLatch(&tmq->lock); SMqClientTopic* pTopic = getTopicByName(tmq, tname); if (pTopic == NULL) { - return TSDB_CODE_INVALID_PARA; + code = TSDB_CODE_INVALID_PARA; + goto end; } // in case of snapshot is opened, no valid offset will return *numOfAssignment = taosArrayGetSize(pTopic->vgs); + for (int32_t j = 0; j < (*numOfAssignment); ++j) { + SMqClientVg* pClientVg = taosArrayGet(pTopic->vgs, j); + int32_t type = pClientVg->offsetInfo.currentOffset.type; + if (isInSnapshotMode(type, tmq->useSnapshot)) { + tscError("consumer:0x%" PRIx64 " offset type:%d not wal version, assignment not allowed", tmq->consumerId, type); + code = TSDB_CODE_TMQ_SNAPSHOT_ERROR; + goto end; + } + } *assignment = taosMemoryCalloc(*numOfAssignment, sizeof(tmq_topic_assignment)); if (*assignment == NULL) { tscError("consumer:0x%" PRIx64 " failed to malloc buffer, size:%" PRIzu, tmq->consumerId, (*numOfAssignment) * sizeof(tmq_topic_assignment)); - return TSDB_CODE_OUT_OF_MEMORY; + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; } bool needFetch = false; for (int32_t j = 0; j < (*numOfAssignment); ++j) { SMqClientVg* pClientVg = taosArrayGet(pTopic->vgs, j); - if (!pClientVg->receivedInfoFromVnode) { + if (pClientVg->offsetInfo.currentOffset.type != TMQ_OFFSET__LOG) { needFetch = true; break; } tmq_topic_assignment* pAssignment = &(*assignment)[j]; - if (pClientVg->offsetInfo.currentOffset.type == TMQ_OFFSET__LOG) { - pAssignment->currentOffset = pClientVg->offsetInfo.currentOffset.version; - } else { - pAssignment->currentOffset = 0; - } - + pAssignment->currentOffset = pClientVg->offsetInfo.currentOffset.version; pAssignment->begin = pClientVg->offsetInfo.walVerBegin; pAssignment->end = pClientVg->offsetInfo.walVerEnd; pAssignment->vgId = pClientVg->vgId; + tscInfo("consumer:0x%" PRIx64 " get assignment from local:%d->%" PRId64, tmq->consumerId, + pAssignment->vgId, pAssignment->currentOffset); } if (needFetch) { - SMqVgCommon* pCommon = taosMemoryCalloc(1, sizeof(SMqVgCommon)); + pCommon = taosMemoryCalloc(1, sizeof(SMqVgCommon)); if (pCommon == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; - return terrno; + code = terrno; + goto end; } pCommon->pList= taosArrayInit(4, sizeof(tmq_topic_assignment)); @@ -2604,8 +2641,8 @@ int32_t tmq_get_topic_assignment(tmq_t* tmq, const char* pTopicName, tmq_topic_a SMqVgWalInfoParam* pParam = taosMemoryMalloc(sizeof(SMqVgWalInfoParam)); if (pParam == NULL) { - destroyCommonInfo(pCommon); - return terrno; + code = terrno; + goto end; } pParam->epoch = tmq->epoch; @@ -2619,30 +2656,30 @@ int32_t tmq_get_topic_assignment(tmq_t* tmq, const char* pTopicName, tmq_topic_a int32_t msgSize = tSerializeSMqPollReq(NULL, 0, &req); if (msgSize < 0) { taosMemoryFree(pParam); - destroyCommonInfo(pCommon); - return terrno; + code = terrno; + goto end; } char* msg = taosMemoryCalloc(1, msgSize); if (NULL == msg) { taosMemoryFree(pParam); - destroyCommonInfo(pCommon); - return terrno; + code = terrno; + goto end; } if (tSerializeSMqPollReq(msg, msgSize, &req) < 0) { taosMemoryFree(msg); taosMemoryFree(pParam); - destroyCommonInfo(pCommon); - return terrno; + code = terrno; + goto end; } SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo)); if (sendInfo == NULL) { taosMemoryFree(pParam); taosMemoryFree(msg); - destroyCommonInfo(pCommon); - return terrno; + code = terrno; + goto end; } sendInfo->msgInfo = (SDataBuf){.pData = msg, .len = msgSize, .handle = NULL}; @@ -2653,29 +2690,26 @@ int32_t tmq_get_topic_assignment(tmq_t* tmq, const char* pTopicName, tmq_topic_a sendInfo->msgType = TDMT_VND_TMQ_VG_WALINFO; int64_t transporterId = 0; - char offsetFormatBuf[TSDB_OFFSET_LEN]; + char offsetFormatBuf[TSDB_OFFSET_LEN] = {0}; tFormatOffset(offsetFormatBuf, tListLen(offsetFormatBuf), &pClientVg->offsetInfo.currentOffset); tscInfo("consumer:0x%" PRIx64 " %s retrieve wal info vgId:%d, epoch %d, req:%s, reqId:0x%" PRIx64, - tmq->consumerId, pTopic->topicName, pClientVg->vgId, tmq->epoch, offsetFormatBuf, req.reqId); + tmq->consumerId, pTopic->topicName, pClientVg->vgId, tmq->epoch, offsetFormatBuf, req.reqId); asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &pClientVg->epSet, &transporterId, sendInfo); } tsem_wait(&pCommon->rsp); - int32_t code = pCommon->code; + code = pCommon->code; terrno = code; if (code != TSDB_CODE_SUCCESS) { - taosMemoryFree(*assignment); - *assignment = NULL; - *numOfAssignment = 0; - } else { - int32_t num = taosArrayGetSize(pCommon->pList); - for(int32_t i = 0; i < num; ++i) { - (*assignment)[i] = *(tmq_topic_assignment*)taosArrayGet(pCommon->pList, i); - } - *numOfAssignment = num; + goto end; } + int32_t num = taosArrayGetSize(pCommon->pList); + for(int32_t i = 0; i < num; ++i) { + (*assignment)[i] = *(tmq_topic_assignment*)taosArrayGet(pCommon->pList, i); + } + *numOfAssignment = num; for (int32_t j = 0; j < (*numOfAssignment); ++j) { tmq_topic_assignment* p = &(*assignment)[j]; @@ -2687,26 +2721,23 @@ int32_t tmq_get_topic_assignment(tmq_t* tmq, const char* pTopicName, tmq_topic_a } SVgOffsetInfo* pOffsetInfo = &pClientVg->offsetInfo; - - pOffsetInfo->currentOffset.type = TMQ_OFFSET__LOG; - - char offsetBuf[TSDB_OFFSET_LEN] = {0}; - tFormatOffset(offsetBuf, tListLen(offsetBuf), &pOffsetInfo->currentOffset); - - tscInfo("vgId:%d offset is update to:%s", p->vgId, offsetBuf); + tscInfo("vgId:%d offset is update to:%"PRId64, p->vgId, p->currentOffset); pOffsetInfo->walVerBegin = p->begin; pOffsetInfo->walVerEnd = p->end; - pOffsetInfo->currentOffset.version = p->currentOffset; - pOffsetInfo->committedOffset.version = p->currentOffset; } } - - destroyCommonInfo(pCommon); - return code; - } else { - return TSDB_CODE_SUCCESS; } + +end: + if(code != TSDB_CODE_SUCCESS){ + taosMemoryFree(*assignment); + *assignment = NULL; + *numOfAssignment = 0; + } + destroyCommonInfo(pCommon); + taosWUnLockLatch(&tmq->lock); + return code; } void tmq_free_assignment(tmq_topic_assignment* pAssignment) { @@ -2727,9 +2758,11 @@ int32_t tmq_offset_seek(tmq_t* tmq, const char* pTopicName, int32_t vgId, int64_ char tname[128] = {0}; sprintf(tname, "%d.%s", accId, pTopicName); + taosWLockLatch(&tmq->lock); SMqClientTopic* pTopic = getTopicByName(tmq, tname); if (pTopic == NULL) { tscError("consumer:0x%" PRIx64 " invalid topic name:%s", tmq->consumerId, pTopicName); + taosWUnLockLatch(&tmq->lock); return TSDB_CODE_INVALID_PARA; } @@ -2745,56 +2778,58 @@ int32_t tmq_offset_seek(tmq_t* tmq, const char* pTopicName, int32_t vgId, int64_ if (pVg == NULL) { tscError("consumer:0x%" PRIx64 " invalid vgroup id:%d", tmq->consumerId, vgId); + taosWUnLockLatch(&tmq->lock); return TSDB_CODE_INVALID_PARA; } SVgOffsetInfo* pOffsetInfo = &pVg->offsetInfo; int32_t type = pOffsetInfo->currentOffset.type; - if (type != TMQ_OFFSET__LOG && !OFFSET_IS_RESET_OFFSET(type)) { + if (isInSnapshotMode(type, tmq->useSnapshot)) { tscError("consumer:0x%" PRIx64 " offset type:%d not wal version, seek not allowed", tmq->consumerId, type); - return TSDB_CODE_INVALID_PARA; + taosWUnLockLatch(&tmq->lock); + return TSDB_CODE_TMQ_SNAPSHOT_ERROR; } if (type == TMQ_OFFSET__LOG && (offset < pOffsetInfo->walVerBegin || offset > pOffsetInfo->walVerEnd)) { tscError("consumer:0x%" PRIx64 " invalid seek params, offset:%" PRId64 ", valid range:[%" PRId64 ", %" PRId64 "]", tmq->consumerId, offset, pOffsetInfo->walVerBegin, pOffsetInfo->walVerEnd); + taosWUnLockLatch(&tmq->lock); return TSDB_CODE_INVALID_PARA; } // update the offset, and then commit to vnode - if (pOffsetInfo->currentOffset.type == TMQ_OFFSET__LOG) { - pOffsetInfo->currentOffset.version = offset; - pOffsetInfo->committedOffset.version = INT64_MIN; - pVg->seekUpdated = true; - } - - SMqRspObj rspObj = {.resType = RES_TYPE__TMQ, .vgId = pVg->vgId}; - tstrncpy(rspObj.topic, tname, tListLen(rspObj.topic)); + pOffsetInfo->currentOffset.type = TMQ_OFFSET__LOG; + pOffsetInfo->currentOffset.version = offset >= 1 ? offset - 1 : 0; +// pOffsetInfo->committedOffset.version = INT64_MIN; + pVg->seekUpdated = true; tscInfo("consumer:0x%" PRIx64 " seek to %" PRId64 " on vgId:%d", tmq->consumerId, offset, pVg->vgId); + taosWUnLockLatch(&tmq->lock); - SSyncCommitInfo* pInfo = taosMemoryMalloc(sizeof(SSyncCommitInfo)); - if (pInfo == NULL) { - tscError("consumer:0x%"PRIx64" failed to prepare seek operation", tmq->consumerId); - return TSDB_CODE_OUT_OF_MEMORY; - } +// SMqRspObj rspObj = {.resType = RES_TYPE__TMQ, .vgId = pVg->vgId}; +// tstrncpy(rspObj.topic, tname, tListLen(rspObj.topic)); +// +// SSyncCommitInfo* pInfo = taosMemoryMalloc(sizeof(SSyncCommitInfo)); +// if (pInfo == NULL) { +// tscError("consumer:0x%"PRIx64" failed to prepare seek operation", tmq->consumerId); +// return TSDB_CODE_OUT_OF_MEMORY; +// } +// +// tsem_init(&pInfo->sem, 0, 0); +// pInfo->code = 0; +// +// asyncCommitOffset(tmq, &rspObj, TDMT_VND_TMQ_SEEK_TO_OFFSET, commitCallBackFn, pInfo); +// +// tsem_wait(&pInfo->sem); +// int32_t code = pInfo->code; +// +// tsem_destroy(&pInfo->sem); +// taosMemoryFree(pInfo); +// +// if (code != TSDB_CODE_SUCCESS) { +// tscError("consumer:0x%" PRIx64 " failed to send seek to vgId:%d, code:%s", tmq->consumerId, pVg->vgId, tstrerror(code)); +// } - tsem_init(&pInfo->sem, 0, 0); - pInfo->code = 0; - - asyncCommitOffset(tmq, &rspObj, TDMT_VND_TMQ_SEEK_TO_OFFSET, commitCallBackFn, pInfo); - - tsem_wait(&pInfo->sem); - int32_t code = pInfo->code; - - tsem_destroy(&pInfo->sem); - taosMemoryFree(pInfo); - - if (code != TSDB_CODE_SUCCESS) { - tscError("consumer:0x%" PRIx64 " failed to send seek to vgId:%d, code:%s", tmq->consumerId, pVg->vgId, - tstrerror(code)); - } - - return code; + return 0; } \ No newline at end of file diff --git a/source/client/test/clientTests.cpp b/source/client/test/clientTests.cpp index ccc17289b0..3c46d17802 100644 --- a/source/client/test/clientTests.cpp +++ b/source/client/test/clientTests.cpp @@ -1073,6 +1073,146 @@ TEST(clientCase, sub_db_test) { fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows); } +TEST(clientCase, td_25129) { +// taos_options(TSDB_OPTION_CONFIGDIR, "~/first/cfg"); + + TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); + ASSERT_NE(pConn, nullptr); + + tmq_conf_t* conf = tmq_conf_new(); + + tmq_conf_set(conf, "enable.auto.commit", "false"); + tmq_conf_set(conf, "auto.commit.interval.ms", "2000"); + tmq_conf_set(conf, "group.id", "group_id_2"); + tmq_conf_set(conf, "td.connect.user", "root"); + tmq_conf_set(conf, "td.connect.pass", "taosdata"); + tmq_conf_set(conf, "auto.offset.reset", "earliest"); + tmq_conf_set(conf, "msg.with.table.name", "true"); + + tmq_t* tmq = tmq_consumer_new(conf, NULL, 0); + tmq_conf_destroy(conf); + + // 创建订阅 topics 列表 + tmq_list_t* topicList = tmq_list_new(); + tmq_list_append(topicList, "tp"); + + // 启动订阅 + tmq_subscribe(tmq, topicList); + tmq_list_destroy(topicList); + + TAOS_FIELD* fields = NULL; + int32_t numOfFields = 0; + int32_t precision = 0; + int32_t totalRows = 0; + int32_t msgCnt = 0; + int32_t timeout = 2000; + + int32_t count = 0; + + tmq_topic_assignment* pAssign = NULL; + int32_t numOfAssign = 0; + + int32_t code = tmq_get_topic_assignment(tmq, "tp", &pAssign, &numOfAssign); + if (code != 0) { + printf("error occurs:%s\n", tmq_err2str(code)); + tmq_free_assignment(pAssign); + tmq_consumer_close(tmq); + taos_close(pConn); + fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows); + return; + } + + for(int i = 0; i < numOfAssign; i++){ + printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end); + } + +// tmq_offset_seek(tmq, "tp", pAssign[0].vgId, 4); + tmq_free_assignment(pAssign); + + code = tmq_get_topic_assignment(tmq, "tp", &pAssign, &numOfAssign); + if (code != 0) { + printf("error occurs:%s\n", tmq_err2str(code)); + tmq_free_assignment(pAssign); + tmq_consumer_close(tmq); + taos_close(pConn); + fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows); + return; + } + + for(int i = 0; i < numOfAssign; i++){ + printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end); + } + + tmq_free_assignment(pAssign); + + code = tmq_get_topic_assignment(tmq, "tp", &pAssign, &numOfAssign); + if (code != 0) { + printf("error occurs:%s\n", tmq_err2str(code)); + tmq_free_assignment(pAssign); + tmq_consumer_close(tmq); + taos_close(pConn); + fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows); + return; + } + + for(int i = 0; i < numOfAssign; i++){ + printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end); + } + + while (1) { + TAOS_RES* pRes = tmq_consumer_poll(tmq, timeout); + if (pRes) { + char buf[128]; + + const char* topicName = tmq_get_topic_name(pRes); +// const char* dbName = tmq_get_db_name(pRes); +// int32_t vgroupId = tmq_get_vgroup_id(pRes); +// +// printf("topic: %s\n", topicName); +// printf("db: %s\n", dbName); +// printf("vgroup id: %d\n", vgroupId); + + printSubResults(pRes, &totalRows); + } else { + tmq_offset_seek(tmq, "tp", pAssign[0].vgId, pAssign[0].currentOffset); + tmq_offset_seek(tmq, "tp", pAssign[1].vgId, pAssign[1].currentOffset); + continue; + } + +// tmq_commit_sync(tmq, pRes); + if (pRes != NULL) { + taos_free_result(pRes); + // if ((++count) > 1) { + // break; + // } + } else { + break; + } + +// tmq_offset_seek(tmq, "tp", pAssign[0].vgId, pAssign[0].begin); + } + + tmq_free_assignment(pAssign); + + code = tmq_get_topic_assignment(tmq, "tp", &pAssign, &numOfAssign); + if (code != 0) { + printf("error occurs:%s\n", tmq_err2str(code)); + tmq_free_assignment(pAssign); + tmq_consumer_close(tmq); + taos_close(pConn); + fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows); + return; + } + + for(int i = 0; i < numOfAssign; i++){ + printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end); + } + + tmq_consumer_close(tmq); + taos_close(pConn); + fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows); +} + TEST(clientCase, sub_tb_test) { taos_options(TSDB_OPTION_CONFIGDIR, "~/first/cfg"); diff --git a/source/common/src/systable.c b/source/common/src/systable.c index a767f829d1..6fdc74f692 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -315,7 +315,7 @@ static const SSysTableMeta infosMeta[] = { {TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema), true}, {TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema), true}, {TSDB_INS_TABLE_QNODES, qnodesSchema, tListLen(qnodesSchema), true}, - {TSDB_INS_TABLE_SNODES, snodesSchema, tListLen(snodesSchema)}, + {TSDB_INS_TABLE_SNODES, snodesSchema, tListLen(snodesSchema), true}, {TSDB_INS_TABLE_CLUSTER, clusterSchema, tListLen(clusterSchema), true}, {TSDB_INS_TABLE_DATABASES, userDBSchema, tListLen(userDBSchema), false}, {TSDB_INS_TABLE_FUNCTIONS, userFuncSchema, tListLen(userFuncSchema), false}, diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index adb3dd48c6..a25ee04a1b 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -1525,6 +1525,9 @@ int32_t tSerializeSGetUserAuthRspImpl(SEncoder *pEncoder, SGetUserAuthRsp *pRsp) useDb = taosHashIterate(pRsp->useDbs, useDb); } + // since 3.0.7.0 + if (tEncodeI32(pEncoder, pRsp->passVer) < 0) return -1; + return 0; } @@ -1646,6 +1649,12 @@ int32_t tDeserializeSGetUserAuthRspImpl(SDecoder *pDecoder, SGetUserAuthRsp *pRs taosHashPut(pRsp->useDbs, key, strlen(key), &ref, sizeof(ref)); taosMemoryFree(key); } + // since 3.0.7.0 + if (!tDecodeIsEnd(pDecoder)) { + if (tDecodeI32(pDecoder, &pRsp->passVer) < 0) return -1; + } else { + pRsp->passVer = 0; + } } return 0; @@ -3031,59 +3040,6 @@ void tFreeSUserAuthBatchRsp(SUserAuthBatchRsp *pRsp) { taosArrayDestroy(pRsp->pArray); } -int32_t tSerializeSUserPassBatchRsp(void *buf, int32_t bufLen, SUserPassBatchRsp *pRsp) { - SEncoder encoder = {0}; - tEncoderInit(&encoder, buf, bufLen); - - if (tStartEncode(&encoder) < 0) return -1; - - int32_t numOfBatch = taosArrayGetSize(pRsp->pArray); - if (tEncodeI32(&encoder, numOfBatch) < 0) return -1; - for (int32_t i = 0; i < numOfBatch; ++i) { - SGetUserPassRsp *pUserPassRsp = taosArrayGet(pRsp->pArray, i); - if (tEncodeCStr(&encoder, pUserPassRsp->user) < 0) return -1; - if (tEncodeI32(&encoder, pUserPassRsp->version) < 0) return -1; - } - tEndEncode(&encoder); - - int32_t tlen = encoder.pos; - tEncoderClear(&encoder); - return tlen; -} - -int32_t tDeserializeSUserPassBatchRsp(void *buf, int32_t bufLen, SUserPassBatchRsp *pRsp) { - SDecoder decoder = {0}; - tDecoderInit(&decoder, buf, bufLen); - - if (tStartDecode(&decoder) < 0) return -1; - - int32_t numOfBatch = taosArrayGetSize(pRsp->pArray); - if (tDecodeI32(&decoder, &numOfBatch) < 0) return -1; - - pRsp->pArray = taosArrayInit(numOfBatch, sizeof(SGetUserPassRsp)); - if (pRsp->pArray == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; - } - - for (int32_t i = 0; i < numOfBatch; ++i) { - SGetUserPassRsp rsp = {0}; - if (tDecodeCStrTo(&decoder, rsp.user) < 0) return -1; - if (tDecodeI32(&decoder, &rsp.version) < 0) return -1; - taosArrayPush(pRsp->pArray, &rsp); - } - tEndDecode(&decoder); - - tDecoderClear(&decoder); - return 0; -} - -void tFreeSUserPassBatchRsp(SUserPassBatchRsp *pRsp) { - if(pRsp) { - taosArrayDestroy(pRsp->pArray); - } -} - int32_t tSerializeSDbCfgReq(void *buf, int32_t bufLen, SDbCfgReq *pReq) { SEncoder encoder = {0}; tEncoderInit(&encoder, buf, bufLen); @@ -4161,6 +4117,7 @@ int32_t tSerializeSConnectRsp(void *buf, int32_t bufLen, SConnectRsp *pRsp) { if (tEncodeCStr(&encoder, pRsp->sVer) < 0) return -1; if (tEncodeCStr(&encoder, pRsp->sDetailVer) < 0) return -1; if (tEncodeI32(&encoder, pRsp->passVer) < 0) return -1; + if (tEncodeI32(&encoder, pRsp->authVer) < 0) return -1; tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -4190,6 +4147,12 @@ int32_t tDeserializeSConnectRsp(void *buf, int32_t bufLen, SConnectRsp *pRsp) { } else { pRsp->passVer = 0; } + // since 3.0.7.0 + if (!tDecodeIsEnd(&decoder)) { + if (tDecodeI32(&decoder, &pRsp->authVer) < 0) return -1; + } else { + pRsp->authVer = 0; + } tEndDecode(&decoder); diff --git a/source/common/src/trow.c b/source/common/src/trow.c index 8ae77bcd0a..039f436505 100644 --- a/source/common/src/trow.c +++ b/source/common/src/trow.c @@ -423,7 +423,7 @@ int32_t tdSTSRowNew(SArray *pArray, STSchema *pTSchema, STSRow **ppRow) { val = (const void *)&pColVal->value.val; } } else { - pColVal = NULL; + // pColVal = NULL; valType = TD_VTYPE_NONE; } diff --git a/source/dnode/mgmt/exe/dmMain.c b/source/dnode/mgmt/exe/dmMain.c index da9a57387d..01a9a245be 100644 --- a/source/dnode/mgmt/exe/dmMain.c +++ b/source/dnode/mgmt/exe/dmMain.c @@ -19,6 +19,9 @@ #include "tconfig.h" #include "tglobal.h" #include "version.h" +#ifdef TD_JEMALLOC_ENABLED +#include "jemalloc/jemalloc.h" +#endif #if defined(CUS_NAME) || defined(CUS_PROMPT) || defined(CUS_EMAIL) #include "cus_name.h" @@ -255,6 +258,10 @@ static void taosCleanupArgs() { } int main(int argc, char const *argv[]) { +#ifdef TD_JEMALLOC_ENABLED + bool jeBackgroundThread = true; + mallctl("background_thread", NULL, NULL, &jeBackgroundThread, sizeof(bool)); +#endif if (!taosCheckSystemIsLittleEnd()) { printf("failed to start since on non-little-end machines\n"); return -1; diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index ea46b70693..5d6d16ccf8 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -16,6 +16,7 @@ #define _DEFAULT_SOURCE #include "dmMgmt.h" #include "qworker.h" +#include "tversion.h" static inline void dmSendRsp(SRpcMsg *pMsg) { rpcSendResponse(pMsg); } @@ -73,6 +74,13 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) { dGTrace("msg:%s is received, handle:%p len:%d code:0x%x app:%p refId:%" PRId64, TMSG_INFO(pRpc->msgType), pRpc->info.handle, pRpc->contLen, pRpc->code, pRpc->info.ahandle, pRpc->info.refId); + int32_t svrVer = 0; + taosVersionStrToInt(version, &svrVer); + if (0 != taosCheckVersionCompatible(pRpc->info.cliVer, svrVer, 3)) { + dError("Version not compatible, cli ver: %d, svr ver: %d", pRpc->info.cliVer, svrVer); + goto _OVER; + } + switch (pRpc->msgType) { case TDMT_DND_NET_TEST: dmProcessNetTestReq(pDnode, pRpc); @@ -305,6 +313,7 @@ int32_t dmInitClient(SDnode *pDnode) { rpcInit.supportBatch = 1; rpcInit.batchSize = 8 * 1024; rpcInit.timeToGetConn = tsTimeToGetAvailableConn; + taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); pTrans->clientRpc = rpcOpen(&rpcInit); if (pTrans->clientRpc == NULL) { @@ -339,7 +348,7 @@ int32_t dmInitServer(SDnode *pDnode) { rpcInit.idleTime = tsShellActivityTimer * 1000; rpcInit.parent = pDnode; rpcInit.compressSize = tsCompressMsgSize; - + taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); pTrans->serverRpc = rpcOpen(&rpcInit); if (pTrans->serverRpc == NULL) { dError("failed to init dnode rpc server"); diff --git a/source/dnode/mgmt/test/sut/src/client.cpp b/source/dnode/mgmt/test/sut/src/client.cpp index a27a511651..95eea2359d 100644 --- a/source/dnode/mgmt/test/sut/src/client.cpp +++ b/source/dnode/mgmt/test/sut/src/client.cpp @@ -16,6 +16,7 @@ #include "sut.h" #include "tdatablock.h" #include "tmisce.h" +#include "tversion.h" static void processClientRsp(void* parent, SRpcMsg* pRsp, SEpSet* pEpSet) { TestClient* client = (TestClient*)parent; @@ -53,6 +54,7 @@ void TestClient::DoInit() { rpcInit.parent = this; // rpcInit.secret = (char*)secretEncrypt; // rpcInit.spi = 1; + taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); clientRpc = rpcOpen(&rpcInit); ASSERT(clientRpc); diff --git a/source/dnode/mnode/impl/inc/mndUser.h b/source/dnode/mnode/impl/inc/mndUser.h index 93ae38e554..8b930e7f18 100644 --- a/source/dnode/mnode/impl/inc/mndUser.h +++ b/source/dnode/mnode/impl/inc/mndUser.h @@ -35,8 +35,6 @@ SHashObj *mndDupTableHash(SHashObj *pOld); SHashObj *mndDupTopicHash(SHashObj *pOld); int32_t mndValidateUserAuthInfo(SMnode *pMnode, SUserAuthVersion *pUsers, int32_t numOfUses, void **ppRsp, int32_t *pRspLen); -int32_t mndValidateUserPassInfo(SMnode *pMnode, SUserPassVersion *pUsers, int32_t numOfUses, void **ppRsp, - int32_t *pRspLen); int32_t mndUserRemoveDb(SMnode *pMnode, STrans *pTrans, char *db); int32_t mndUserRemoveTopic(SMnode *pMnode, STrans *pTrans, char *topic); diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index 47cc4a1ce7..bdf9931ca2 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -419,6 +419,9 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) { mDebug("heartbeat report offset rows.%s:%s", pConsumer->cgroup, data->topicName); SMqSubscribeObj *pSub = mndAcquireSubscribe(pMnode, pConsumer->cgroup, data->topicName); + if(pSub == NULL){ + continue; + } taosWLockLatch(&pSub->lock); SMqConsumerEp *pConsumerEp = taosHashGet(pSub->consumerHash, &consumerId, sizeof(int64_t)); if(pConsumerEp){ diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index 1a981362a8..2cc60e6fcc 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -382,6 +382,40 @@ static int32_t mndCheckDbCfg(SMnode *pMnode, SDbCfg *pCfg) { return terrno; } +static int32_t mndCheckInChangeDbCfg(SMnode *pMnode, SDbCfg *pCfg) { + terrno = TSDB_CODE_MND_INVALID_DB_OPTION; + if (pCfg->buffer < TSDB_MIN_BUFFER_PER_VNODE || pCfg->buffer > TSDB_MAX_BUFFER_PER_VNODE) return -1; + if (pCfg->pages < TSDB_MIN_PAGES_PER_VNODE || pCfg->pages > TSDB_MAX_PAGES_PER_VNODE) return -1; + if (pCfg->pageSize < TSDB_MIN_PAGESIZE_PER_VNODE || pCfg->pageSize > TSDB_MAX_PAGESIZE_PER_VNODE) return -1; + if (pCfg->daysPerFile < TSDB_MIN_DAYS_PER_FILE || pCfg->daysPerFile > TSDB_MAX_DAYS_PER_FILE) return -1; + if (pCfg->daysToKeep0 < TSDB_MIN_KEEP || pCfg->daysToKeep0 > TSDB_MAX_KEEP) return -1; + if (pCfg->daysToKeep1 < TSDB_MIN_KEEP || pCfg->daysToKeep1 > TSDB_MAX_KEEP) return -1; + if (pCfg->daysToKeep2 < TSDB_MIN_KEEP || pCfg->daysToKeep2 > TSDB_MAX_KEEP) return -1; + if (pCfg->daysToKeep0 < pCfg->daysPerFile) return -1; + if (pCfg->daysToKeep0 > pCfg->daysToKeep1) return -1; + if (pCfg->daysToKeep1 > pCfg->daysToKeep2) return -1; + if (pCfg->walFsyncPeriod < TSDB_MIN_FSYNC_PERIOD || pCfg->walFsyncPeriod > TSDB_MAX_FSYNC_PERIOD) return -1; + if (pCfg->walLevel < TSDB_MIN_WAL_LEVEL || pCfg->walLevel > TSDB_MAX_WAL_LEVEL) return -1; + if (pCfg->cacheLast < TSDB_CACHE_MODEL_NONE || pCfg->cacheLast > TSDB_CACHE_MODEL_BOTH) return -1; + if (pCfg->cacheLastSize < TSDB_MIN_DB_CACHE_SIZE || pCfg->cacheLastSize > TSDB_MAX_DB_CACHE_SIZE) return -1; + if (pCfg->replications < TSDB_MIN_DB_REPLICA || pCfg->replications > TSDB_MAX_DB_REPLICA) return -1; + if (pCfg->replications != 1 && pCfg->replications != 3) return -1; + if (pCfg->sstTrigger < TSDB_MIN_STT_TRIGGER || pCfg->sstTrigger > TSDB_MAX_STT_TRIGGER) return -1; + if (pCfg->minRows < TSDB_MIN_MINROWS_FBLOCK || pCfg->minRows > TSDB_MAX_MINROWS_FBLOCK) return -1; + if (pCfg->maxRows < TSDB_MIN_MAXROWS_FBLOCK || pCfg->maxRows > TSDB_MAX_MAXROWS_FBLOCK) return -1; + if (pCfg->minRows > pCfg->maxRows) return -1; + if (pCfg->walRetentionPeriod < TSDB_DB_MIN_WAL_RETENTION_PERIOD) return -1; + if (pCfg->walRetentionSize < TSDB_DB_MIN_WAL_RETENTION_SIZE) return -1; + if (pCfg->strict < TSDB_DB_STRICT_OFF || pCfg->strict > TSDB_DB_STRICT_ON) return -1; + if (pCfg->replications > mndGetDnodeSize(pMnode)) { + terrno = TSDB_CODE_MND_NO_ENOUGH_DNODES; + return -1; + } + + terrno = 0; + return terrno; +} + static void mndSetDefaultDbCfg(SDbCfg *pCfg) { if (pCfg->numOfVgroups < 0) pCfg->numOfVgroups = TSDB_DEFAULT_VN_PER_DB; if (pCfg->numOfStables < 0) pCfg->numOfStables = TSDB_DEFAULT_DB_SINGLE_STABLE; @@ -897,7 +931,7 @@ static int32_t mndProcessAlterDbReq(SRpcMsg *pReq) { code = mndSetDbCfgFromAlterDbReq(&dbObj, &alterReq); if (code != 0) goto _OVER; - code = mndCheckDbCfg(pMnode, &dbObj.cfg); + code = mndCheckInChangeDbCfg(pMnode, &dbObj.cfg); if (code != 0) goto _OVER; dbObj.cfgVersion++; diff --git a/source/dnode/mnode/impl/src/mndPrivilege.c b/source/dnode/mnode/impl/src/mndPrivilege.c index de0374c6e8..bec516b1ee 100644 --- a/source/dnode/mnode/impl/src/mndPrivilege.c +++ b/source/dnode/mnode/impl/src/mndPrivilege.c @@ -36,7 +36,9 @@ int32_t mndSetUserAuthRsp(SMnode *pMnode, SUserObj *pUser, SGetUserAuthRsp *pRsp memcpy(pRsp->user, pUser->user, TSDB_USER_LEN); pRsp->superAuth = 1; pRsp->enable = pUser->enable; + pRsp->sysInfo = pUser->sysInfo; pRsp->version = pUser->authVersion; + pRsp->passVer = pUser->passVersion; return 0; } #endif \ No newline at end of file diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c index 3c2335a6ee..524ea1a06b 100644 --- a/source/dnode/mnode/impl/src/mndProfile.c +++ b/source/dnode/mnode/impl/src/mndProfile.c @@ -288,6 +288,7 @@ _CONNECT: connectRsp.dnodeNum = mndGetDnodeSize(pMnode); connectRsp.svrTimestamp = taosGetTimestampSec(); connectRsp.passVer = pUser->passVersion; + connectRsp.authVer = pUser->authVersion; strcpy(connectRsp.sVer, version); snprintf(connectRsp.sDetailVer, sizeof(connectRsp.sDetailVer), "ver:%s\nbuild:%s\ngitinfo:%s", version, buildinfo, @@ -552,16 +553,6 @@ static int32_t mndProcessQueryHeartBeat(SMnode *pMnode, SRpcMsg *pMsg, SClientHb } break; } - case HEARTBEAT_KEY_USER_PASSINFO: { - void *rspMsg = NULL; - int32_t rspLen = 0; - mndValidateUserPassInfo(pMnode, kv->value, kv->valueLen / sizeof(SUserPassVersion), &rspMsg, &rspLen); - if (rspMsg && rspLen > 0) { - SKv kv1 = {.key = HEARTBEAT_KEY_USER_PASSINFO, .valueLen = rspLen, .value = rspMsg}; - taosArrayPush(hbRsp.info, &kv1); - } - break; - } default: mError("invalid kv key:%d", kv->key); hbRsp.status = TSDB_CODE_APP_ERROR; @@ -827,6 +818,9 @@ static int32_t packQueriesIntoBlock(SShowObj* pShow, SConnObj* pConn, SSDataBloc pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->stableQuery, false); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->isSubQuery, false); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->subPlanNum, false); diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index 7ecd994b5a..48de21199b 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -275,7 +275,7 @@ static void doAddNewConsumers(SMqRebOutputObj *pOutput, const SMqRebInputObj *pI taosHashPut(pOutput->pSub->consumerHash, &consumerId, sizeof(int64_t), &newConsumerEp, sizeof(SMqConsumerEp)); taosArrayPush(pOutput->newConsumers, &consumerId); - mInfo("sub:%s mq rebalance add new consumer:%" PRIx64, pSubKey, consumerId); + mInfo("sub:%s mq rebalance add new consumer:0x%" PRIx64, pSubKey, consumerId); } } diff --git a/source/dnode/mnode/impl/src/mndUser.c b/source/dnode/mnode/impl/src/mndUser.c index 90d16a0a81..1fc2e42b8c 100644 --- a/source/dnode/mnode/impl/src/mndUser.c +++ b/source/dnode/mnode/impl/src/mndUser.c @@ -801,7 +801,8 @@ static int32_t mndProcessAlterUserReq(SRpcMsg *pReq) { goto _OVER; } - if (TSDB_ALTER_USER_PASSWD == alterReq.alterType && alterReq.pass[0] == 0) { + if (TSDB_ALTER_USER_PASSWD == alterReq.alterType && + (alterReq.pass[0] == 0 || strlen(alterReq.pass) >= TSDB_PASSWORD_LEN)) { terrno = TSDB_CODE_MND_INVALID_PASS_FORMAT; goto _OVER; } @@ -824,7 +825,6 @@ static int32_t mndProcessAlterUserReq(SRpcMsg *pReq) { if (mndUserDupObj(pUser, &newUser) != 0) goto _OVER; - newUser.passVersion = pUser->passVersion; if (alterReq.alterType == TSDB_ALTER_USER_PASSWD) { char pass[TSDB_PASSWORD_LEN + 1] = {0}; taosEncryptPass_c((uint8_t *)alterReq.pass, strlen(alterReq.pass), pass); @@ -1431,69 +1431,6 @@ _OVER: return code; } -int32_t mndValidateUserPassInfo(SMnode *pMnode, SUserPassVersion *pUsers, int32_t numOfUses, void **ppRsp, - int32_t *pRspLen) { - int32_t code = 0; - SUserPassBatchRsp batchRsp = {0}; - - for (int32_t i = 0; i < numOfUses; ++i) { - SUserObj *pUser = mndAcquireUser(pMnode, pUsers[i].user); - if (pUser == NULL) { - mError("user:%s, failed to validate user pass since %s", pUsers[i].user, terrstr()); - continue; - } - - pUsers[i].version = ntohl(pUsers[i].version); - if (pUser->passVersion <= pUsers[i].version) { - mTrace("user:%s, not update since mnd passVer %d <= client passVer %d", pUsers[i].user, pUser->passVersion, - pUsers[i].version); - mndReleaseUser(pMnode, pUser); - continue; - } - - SGetUserPassRsp rsp = {0}; - memcpy(rsp.user, pUser->user, TSDB_USER_LEN); - rsp.version = pUser->passVersion; - - if (!batchRsp.pArray && !(batchRsp.pArray = taosArrayInit(numOfUses, sizeof(SGetUserPassRsp)))) { - code = TSDB_CODE_OUT_OF_MEMORY; - mndReleaseUser(pMnode, pUser); - goto _OVER; - } - - taosArrayPush(batchRsp.pArray, &rsp); - mndReleaseUser(pMnode, pUser); - } - - if (taosArrayGetSize(batchRsp.pArray) <= 0) { - goto _OVER; - } - - int32_t rspLen = tSerializeSUserPassBatchRsp(NULL, 0, &batchRsp); - if (rspLen < 0) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _OVER; - } - void *pRsp = taosMemoryMalloc(rspLen); - if (pRsp == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _OVER; - } - tSerializeSUserPassBatchRsp(pRsp, rspLen, &batchRsp); - - *ppRsp = pRsp; - *pRspLen = rspLen; - -_OVER: - if (code) { - *ppRsp = NULL; - *pRspLen = 0; - } - - tFreeSUserPassBatchRsp(&batchRsp); - return code; -} - int32_t mndUserRemoveDb(SMnode *pMnode, STrans *pTrans, char *db) { int32_t code = 0; SSdb *pSdb = pMnode->pSdb; diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h index 7f95e48c41..b77bb54714 100644 --- a/source/dnode/vnode/src/inc/tq.h +++ b/source/dnode/vnode/src/inc/tq.h @@ -134,7 +134,8 @@ int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, STaosxR int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t numOfCols, int8_t precision); int32_t tqSendDataRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqDataRsp* pRsp, int32_t type, int32_t vgId); -int32_t tqPushDataRsp(STqHandle* pHandle, int32_t vgId); +//int32_t tqPushDataRsp(STqHandle* pHandle, int32_t vgId); +int32_t tqPushEmptyDataRsp(STqHandle* pHandle, int32_t vgId); // tqMeta int32_t tqMetaOpen(STQ* pTq); diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index cb4b3231f6..670ab2643b 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -207,7 +207,10 @@ int metaCreateSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) { tb_uid_t uid = *(tb_uid_t *)pData; tdbFree(pData); SMetaInfo info; - metaGetInfo(pMeta, uid, &info, NULL); + if (metaGetInfo(pMeta, uid, &info, NULL) == TSDB_CODE_NOT_FOUND) { + terrno = TSDB_CODE_PAR_TABLE_NOT_EXIST; + return -1; + } if (info.uid == info.suid) { return 0; } else { @@ -939,7 +942,7 @@ int metaTtlDropTable(SMeta *pMeta, int64_t timePointMs, SArray *tbUids) { return 0; } - metaInfo("ttl find expired table count: %zu" , TARRAY_SIZE(tbUids)); + metaInfo("ttl find expired table count: %zu", TARRAY_SIZE(tbUids)); metaDropTables(pMeta, tbUids); return 0; diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 41e0a97d79..ec9a796cfd 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -266,26 +266,43 @@ void tqNotifyClose(STQ* pTq) { // return 0; //} -int32_t tqPushDataRsp(STqHandle* pHandle, int32_t vgId) { +int32_t tqPushEmptyDataRsp(STqHandle* pHandle, int32_t vgId) { + SMqPollReq req = {0}; + if (tDeserializeSMqPollReq(pHandle->msg->pCont, pHandle->msg->contLen, &req) < 0) { + tqError("tDeserializeSMqPollReq %d failed", pHandle->msg->contLen); + terrno = TSDB_CODE_INVALID_MSG; + return -1; + } + SMqDataRsp dataRsp = {0}; - dataRsp.head.consumerId = pHandle->consumerId; - dataRsp.head.epoch = pHandle->epoch; - dataRsp.head.mqMsgType = TMQ_MSG_TYPE__POLL_DATA_RSP; - - int64_t sver = 0, ever = 0; - walReaderValidVersionRange(pHandle->execHandle.pTqReader->pWalReader, &sver, &ever); - tqDoSendDataRsp(&pHandle->msg->info, &dataRsp, pHandle->epoch, pHandle->consumerId, TMQ_MSG_TYPE__POLL_DATA_RSP, sver, - ever); - - char buf1[TSDB_OFFSET_LEN] = {0}; - char buf2[TSDB_OFFSET_LEN] = {0}; - tFormatOffset(buf1, tListLen(buf1), &dataRsp.reqOffset); - tFormatOffset(buf2, tListLen(buf2), &dataRsp.rspOffset); - tqDebug("vgId:%d, from consumer:0x%" PRIx64 " (epoch %d) push rsp, block num: %d, req:%s, rsp:%s", vgId, - dataRsp.head.consumerId, dataRsp.head.epoch, dataRsp.blockNum, buf1, buf2); + tqInitDataRsp(&dataRsp, &req); + dataRsp.blockNum = 0; + dataRsp.rspOffset = dataRsp.reqOffset; + tqSendDataRsp(pHandle, pHandle->msg, &req, &dataRsp, TMQ_MSG_TYPE__POLL_DATA_RSP, vgId); + tDeleteMqDataRsp(&dataRsp); return 0; } +//int32_t tqPushDataRsp(STqHandle* pHandle, int32_t vgId) { +// SMqDataRsp dataRsp = {0}; +// dataRsp.head.consumerId = pHandle->consumerId; +// dataRsp.head.epoch = pHandle->epoch; +// dataRsp.head.mqMsgType = TMQ_MSG_TYPE__POLL_RSP; +// +// int64_t sver = 0, ever = 0; +// walReaderValidVersionRange(pHandle->execHandle.pTqReader->pWalReader, &sver, &ever); +// tqDoSendDataRsp(&pHandle->msg->info, &dataRsp, pHandle->epoch, pHandle->consumerId, TMQ_MSG_TYPE__POLL_RSP, sver, +// ever); +// +// char buf1[TSDB_OFFSET_LEN] = {0}; +// char buf2[TSDB_OFFSET_LEN] = {0}; +// tFormatOffset(buf1, tListLen(buf1), &dataRsp.reqOffset); +// tFormatOffset(buf2, tListLen(buf2), &dataRsp.rspOffset); +// tqDebug("vgId:%d, from consumer:0x%" PRIx64 " (epoch %d) push rsp, block num: %d, req:%s, rsp:%s", vgId, +// dataRsp.head.consumerId, dataRsp.head.epoch, dataRsp.blockNum, buf1, buf2); +// return 0; +//} + int32_t tqSendDataRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqDataRsp* pRsp, int32_t type, int32_t vgId) { int64_t sver = 0, ever = 0; @@ -524,7 +541,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) { if (!exec) { tqSetHandleExec(pHandle); // qSetTaskCode(pHandle->execHandle.task, TDB_CODE_SUCCESS); - tqDebug("tmq poll: consumer:0x%" PRIx64 "vgId:%d, topic:%s, set handle exec, pHandle:%p", consumerId, vgId, + tqDebug("tmq poll: consumer:0x%" PRIx64 " vgId:%d, topic:%s, set handle exec, pHandle:%p", consumerId, vgId, req.subKey, pHandle); taosWUnLockLatch(&pTq->lock); break; @@ -544,7 +561,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) { pHandle->epoch = reqEpoch; } - char buf[TSDB_OFFSET_LEN]; + char buf[TSDB_OFFSET_LEN] = {0}; tFormatOffset(buf, TSDB_OFFSET_LEN, &reqOffset); tqDebug("tmq poll: consumer:0x%" PRIx64 " (epoch %d), subkey %s, recv poll req vgId:%d, req:%s, reqId:0x%" PRIx64, consumerId, req.epoch, pHandle->subKey, vgId, buf, req.reqId); @@ -552,7 +569,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) { code = tqExtractDataForMq(pTq, pHandle, &req, pMsg); tqSetHandleIdle(pHandle); - tqDebug("tmq poll: consumer:0x%" PRIx64 "vgId:%d, topic:%s, , set handle idle, pHandle:%p", consumerId, vgId, + tqDebug("tmq poll: consumer:0x%" PRIx64 " vgId:%d, topic:%s, set handle idle, pHandle:%p", consumerId, vgId, req.subKey, pHandle); return code; } @@ -594,48 +611,47 @@ int32_t tqProcessVgWalInfoReq(STQ* pTq, SRpcMsg* pMsg) { SMqDataRsp dataRsp = {0}; tqInitDataRsp(&dataRsp, &req); - STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, req.subKey); - if (pOffset != NULL) { - if (pOffset->val.type != TMQ_OFFSET__LOG) { - tqError("consumer:0x%" PRIx64 " vgId:%d subkey:%s use snapshot, no valid wal info", consumerId, vgId, req.subKey); - terrno = TSDB_CODE_INVALID_PARA; - tDeleteMqDataRsp(&dataRsp); - return -1; - } + if (req.useSnapshot == true) { + tqError("consumer:0x%" PRIx64 " vgId:%d subkey:%s snapshot not support wal info", consumerId, vgId, req.subKey); + terrno = TSDB_CODE_INVALID_PARA; + tDeleteMqDataRsp(&dataRsp); + return -1; + } - dataRsp.rspOffset.type = TMQ_OFFSET__LOG; - dataRsp.rspOffset.version = pOffset->val.version; - } else { - if (req.useSnapshot == true) { - tqError("consumer:0x%" PRIx64 " vgId:%d subkey:%s snapshot not support wal info", consumerId, vgId, req.subKey); - terrno = TSDB_CODE_INVALID_PARA; - tDeleteMqDataRsp(&dataRsp); - return -1; - } + dataRsp.rspOffset.type = TMQ_OFFSET__LOG; - dataRsp.rspOffset.type = TMQ_OFFSET__LOG; - - if (reqOffset.type == TMQ_OFFSET__LOG) { - int64_t currentVer = walReaderGetCurrentVer(pHandle->execHandle.pTqReader->pWalReader); - if (currentVer == -1) { // not start to read data from wal yet, return req offset directly - dataRsp.rspOffset.version = reqOffset.version; - } else { - dataRsp.rspOffset.version = currentVer; // return current consume offset value + if (reqOffset.type == TMQ_OFFSET__LOG) { + dataRsp.rspOffset.version = reqOffset.version; + } else if(reqOffset.type < 0){ + STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, req.subKey); + if (pOffset != NULL) { + if (pOffset->val.type != TMQ_OFFSET__LOG) { + tqError("consumer:0x%" PRIx64 " vgId:%d subkey:%s, no valid wal info", consumerId, vgId, req.subKey); + terrno = TSDB_CODE_INVALID_PARA; + tDeleteMqDataRsp(&dataRsp); + return -1; } - } else if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEST) { - dataRsp.rspOffset.version = sver; // not consume yet, set the earliest position - } else if (reqOffset.type == TMQ_OFFSET__RESET_LATEST) { - dataRsp.rspOffset.version = ever; - } else { - tqError("consumer:0x%" PRIx64 " vgId:%d subkey:%s invalid offset type:%d", consumerId, vgId, req.subKey, - reqOffset.type); - terrno = TSDB_CODE_INVALID_PARA; - tDeleteMqDataRsp(&dataRsp); - return -1; + + dataRsp.rspOffset.version = pOffset->val.version; + tqInfo("consumer:0x%" PRIx64 " vgId:%d subkey:%s get assignment from store:%"PRId64, consumerId, vgId, req.subKey, dataRsp.rspOffset.version); + }else{ + if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEST) { + dataRsp.rspOffset.version = sver; // not consume yet, set the earliest position + } else if (reqOffset.type == TMQ_OFFSET__RESET_LATEST) { + dataRsp.rspOffset.version = ever; + } + tqInfo("consumer:0x%" PRIx64 " vgId:%d subkey:%s get assignment from init:%"PRId64, consumerId, vgId, req.subKey, dataRsp.rspOffset.version); } + } else { + tqError("consumer:0x%" PRIx64 " vgId:%d subkey:%s invalid offset type:%d", consumerId, vgId, req.subKey, + reqOffset.type); + terrno = TSDB_CODE_INVALID_PARA; + tDeleteMqDataRsp(&dataRsp); + return -1; } tqDoSendDataRsp(&pMsg->info, &dataRsp, req.epoch, req.consumerId, TMQ_MSG_TYPE__WALINFO_RSP, sver, ever); + tDeleteMqDataRsp(&dataRsp); return 0; } @@ -1300,7 +1316,7 @@ int32_t extractDelDataBlock(const void* pData, int32_t len, int64_t ver, SStream SDecoder* pCoder = &(SDecoder){0}; SDeleteRes* pRes = &(SDeleteRes){0}; - *pRefBlock = NULL; + (*pRefBlock) = NULL; pRes->uidList = taosArrayInit(0, sizeof(tb_uid_t)); if (pRes->uidList == NULL) { @@ -1340,7 +1356,7 @@ int32_t extractDelDataBlock(const void* pData, int32_t len, int64_t ver, SStream taosArrayDestroy(pRes->uidList); *pRefBlock = taosAllocateQitem(sizeof(SStreamRefDataBlock), DEF_QITEM, 0); - if (pRefBlock == NULL) { + if ((*pRefBlock) == NULL) { return TSDB_CODE_OUT_OF_MEMORY; } diff --git a/source/dnode/vnode/src/tq/tqOffset.c b/source/dnode/vnode/src/tq/tqOffset.c index 0a9905b544..11bb737225 100644 --- a/source/dnode/vnode/src/tq/tqOffset.c +++ b/source/dnode/vnode/src/tq/tqOffset.c @@ -104,7 +104,7 @@ STqOffsetStore* tqOffsetOpen(STQ* pTq) { pStore->needCommit = 0; pTq->pOffsetStore = pStore; - pStore->pHash = taosHashInit(64, MurmurHash3_32, true, HASH_NO_LOCK); + pStore->pHash = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK); if (pStore->pHash == NULL) { taosMemoryFree(pStore); return NULL; diff --git a/source/dnode/vnode/src/tq/tqPush.c b/source/dnode/vnode/src/tq/tqPush.c index 4048ebe3f9..06af53d453 100644 --- a/source/dnode/vnode/src/tq/tqPush.c +++ b/source/dnode/vnode/src/tq/tqPush.c @@ -64,7 +64,9 @@ int32_t tqRegisterPushHandle(STQ* pTq, void* handle, SRpcMsg* pMsg) { memcpy(pHandle->msg, pMsg, sizeof(SRpcMsg)); pHandle->msg->pCont = rpcMallocCont(pMsg->contLen); } else { - tqPushDataRsp(pHandle, vgId); +// tqPushDataRsp(pHandle, vgId); + tqPushEmptyDataRsp(pHandle, vgId); + void* tmp = pHandle->msg->pCont; memcpy(pHandle->msg, pMsg, sizeof(SRpcMsg)); pHandle->msg->pCont = tmp; @@ -89,7 +91,8 @@ int32_t tqUnregisterPushHandle(STQ* pTq, void *handle) { tqDebug("vgId:%d remove pHandle:%p,ret:%d consumer Id:0x%" PRIx64, vgId, pHandle, ret, pHandle->consumerId); if(pHandle->msg != NULL) { - tqPushDataRsp(pHandle, vgId); +// tqPushDataRsp(pHandle, vgId); + tqPushEmptyDataRsp(pHandle, vgId); rpcFreeCont(pHandle->msg->pCont); taosMemoryFree(pHandle->msg); diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c index c61d42d44e..11bfcf7fc5 100644 --- a/source/dnode/vnode/src/tq/tqUtil.c +++ b/source/dnode/vnode/src/tq/tqUtil.c @@ -93,7 +93,7 @@ static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHand if (pOffset != NULL) { *pOffsetVal = pOffset->val; - char formatBuf[TSDB_OFFSET_LEN]; + char formatBuf[TSDB_OFFSET_LEN] = {0}; tFormatOffset(formatBuf, TSDB_OFFSET_LEN, pOffsetVal); tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, existed offset found, offset reset to %s and continue. reqId:0x%" PRIx64, @@ -145,6 +145,7 @@ static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle, uint64_t consumerId = pRequest->consumerId; int32_t vgId = TD_VID(pTq->pVnode); int code = 0; + terrno = 0; SMqDataRsp dataRsp = {0}; tqInitDataRsp(&dataRsp, pRequest); diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index 4e69063941..52ad923fca 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -1026,7 +1026,7 @@ int32_t tsdbCacheGetBatch(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCache return code; } - +/* int32_t tsdbCacheGet(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsReader *pr, int8_t ltype) { int32_t code = 0; SLRUCache *pCache = pTsdb->lruCache; @@ -1074,7 +1074,7 @@ int32_t tsdbCacheGet(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsR return code; } - +*/ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKEY eKey) { int32_t code = 0; // fetch schema @@ -1824,10 +1824,11 @@ static int32_t getNextRowFromFSLast(void *iter, TSDBROW **ppRow, bool *pIgnoreEa } *pIgnoreEarlierTs = false; + /* if (!hasVal) { state->state = SFSLASTNEXTROW_FILESET; } - + */ if (!state->checkRemainingRow) { state->checkRemainingRow = true; } @@ -2015,10 +2016,9 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow, bool *pIgnoreEarlie tMapDataGetItemByIdx(&state->blockMap, state->iBlock, &block, tGetDataBlk); if (block.maxKey.ts <= state->lastTs) { *pIgnoreEarlierTs = true; - if (state->pBlockData) { - tBlockDataDestroy(state->pBlockData); - state->pBlockData = NULL; - } + + tBlockDataDestroy(state->pBlockData); + state->pBlockData = NULL; *ppRow = NULL; return code; @@ -3171,97 +3171,46 @@ static int32_t mergeLastRowCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, TSKEY rowTs = TSDBROW_TS(pRow); - if (lastRowTs == TSKEY_MAX) { - lastRowTs = rowTs; + lastRowTs = rowTs; - for (int16_t iCol = noneCol; iCol < nCols; ++iCol) { - if (iCol >= nLastCol) { - break; - } - SLastCol *pCol = taosArrayGet(pColArray, iCol); - if (pCol->colVal.cid != pTSchema->columns[slotIds[iCol]].colId) { - continue; - } - if (slotIds[iCol] == 0) { - STColumn *pTColumn = &pTSchema->columns[0]; - - *pColVal = COL_VAL_VALUE(pTColumn->colId, pTColumn->type, (SValue){.val = rowTs}); - taosArraySet(pColArray, 0, &(SLastCol){.ts = rowTs, .colVal = *pColVal}); - continue; - } - tsdbRowGetColVal(pRow, pTSchema, slotIds[iCol], pColVal); - - *pCol = (SLastCol){.ts = rowTs, .colVal = *pColVal}; - if (IS_VAR_DATA_TYPE(pColVal->type) /*&& pColVal->value.nData > 0*/) { - pCol->colVal.value.pData = taosMemoryMalloc(pCol->colVal.value.nData); - if (pCol->colVal.value.pData == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - code = TSDB_CODE_OUT_OF_MEMORY; - goto _err; - } - if (pColVal->value.nData > 0) { - memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData); - } - } - - /*if (COL_VAL_IS_NONE(pColVal)) { - if (!setNoneCol) { - noneCol = iCol; - setNoneCol = true; - } - } else {*/ - int32_t aColIndex = taosArraySearchIdx(aColArray, &pColVal->cid, compareInt16Val, TD_EQ); - if (aColIndex >= 0) { - taosArrayRemove(aColArray, aColIndex); - } - //} - } - if (!setNoneCol) { - // done, goto return pColArray - break; - } else { - continue; - } - } - - // merge into pColArray - setNoneCol = false; for (int16_t iCol = noneCol; iCol < nCols; ++iCol) { if (iCol >= nLastCol) { break; } - // high version's column value - SLastCol *lastColVal = (SLastCol *)taosArrayGet(pColArray, iCol); - if (lastColVal->colVal.cid != pTSchema->columns[slotIds[iCol]].colId) { + SLastCol *pCol = taosArrayGet(pColArray, iCol); + if (pCol->colVal.cid != pTSchema->columns[slotIds[iCol]].colId) { continue; } - SColVal *tColVal = &lastColVal->colVal; + if (slotIds[iCol] == 0) { + STColumn *pTColumn = &pTSchema->columns[0]; + *pColVal = COL_VAL_VALUE(pTColumn->colId, pTColumn->type, (SValue){.val = rowTs}); + taosArraySet(pColArray, 0, &(SLastCol){.ts = rowTs, .colVal = *pColVal}); + continue; + } tsdbRowGetColVal(pRow, pTSchema, slotIds[iCol], pColVal); - if (COL_VAL_IS_NONE(tColVal) && !COL_VAL_IS_NONE(pColVal)) { - SLastCol lastCol = {.ts = rowTs, .colVal = *pColVal}; - if (IS_VAR_DATA_TYPE(pColVal->type) && pColVal->value.nData > 0) { - SLastCol *pLastCol = (SLastCol *)taosArrayGet(pColArray, iCol); - taosMemoryFree(pLastCol->colVal.value.pData); - lastCol.colVal.value.pData = taosMemoryMalloc(lastCol.colVal.value.nData); - if (lastCol.colVal.value.pData == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - code = TSDB_CODE_OUT_OF_MEMORY; - goto _err; - } - memcpy(lastCol.colVal.value.pData, pColVal->value.pData, pColVal->value.nData); + *pCol = (SLastCol){.ts = rowTs, .colVal = *pColVal}; + if (IS_VAR_DATA_TYPE(pColVal->type) /*&& pColVal->value.nData > 0*/) { + pCol->colVal.value.pData = taosMemoryMalloc(pCol->colVal.value.nData); + if (pCol->colVal.value.pData == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; } + if (pColVal->value.nData > 0) { + memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData); + } + } - taosArraySet(pColArray, iCol, &lastCol); - int32_t aColIndex = taosArraySearchIdx(aColArray, &lastCol.colVal.cid, compareInt16Val, TD_EQ); + int32_t aColIndex = taosArraySearchIdx(aColArray, &pColVal->cid, compareInt16Val, TD_EQ); + if (aColIndex >= 0) { taosArrayRemove(aColArray, aColIndex); - } else if (COL_VAL_IS_NONE(tColVal) && !COL_VAL_IS_NONE(pColVal) && !setNoneCol) { - noneCol = iCol; - setNoneCol = true; } } - } while (setNoneCol); + + break; + } while (1); if (!hasRow) { if (ignoreEarlierTs) { diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 165448fb7b..cfeb1288d4 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -289,6 +289,10 @@ static int32_t setColumnIdSlotList(SBlockLoadSuppInfo* pSupInfo, SColumnInfo* pC static int32_t updateBlockSMAInfo(STSchema* pSchema, SBlockLoadSuppInfo* pSupInfo) { int32_t i = 0, j = 0; + if (j < pSupInfo->numOfCols && PRIMARYKEY_TIMESTAMP_COL_ID == pSupInfo->colId[j]) { + j += 1; + } + while (i < pSchema->numOfCols && j < pSupInfo->numOfCols) { STColumn* pTCol = &pSchema->columns[i]; if (pTCol->colId == pSupInfo->colId[j]) { @@ -3064,6 +3068,7 @@ static int32_t moveToNextFile(STsdbReader* pReader, SBlockNumber* pBlockNum, SAr // only check here, since the iterate data in memory is very fast. if (pReader->code != TSDB_CODE_SUCCESS) { tsdbWarn("tsdb reader is stopped ASAP, code:%s, %s", strerror(pReader->code), pReader->idStr); + taosArrayDestroy(pIndexList); return pReader->code; } @@ -5586,4 +5591,3 @@ void tsdbReaderSetId(STsdbReader* pReader, const char* idstr) { } void tsdbReaderSetCloseFlag(STsdbReader* pReader) { pReader->code = TSDB_CODE_TSC_QUERY_CANCELLED; } - diff --git a/source/dnode/vnode/src/vnd/vnodeCommit.c b/source/dnode/vnode/src/vnd/vnodeCommit.c index f8b9451100..b2360a57da 100644 --- a/source/dnode/vnode/src/vnd/vnodeCommit.c +++ b/source/dnode/vnode/src/vnd/vnodeCommit.c @@ -356,7 +356,12 @@ static int32_t vnodeCommitTask(void *arg) { // commit code = vnodeCommitImpl(pInfo); - if (code) goto _exit; + if (code) { + vFatal("vgId:%d, failed to commit vnode since %s", TD_VID(pVnode), terrstr()); + taosMsleep(100); + exit(EXIT_FAILURE); + goto _exit; + } vnodeReturnBufPool(pVnode); diff --git a/source/dnode/vnode/src/vnd/vnodeRetention.c b/source/dnode/vnode/src/vnd/vnodeRetention.c index 71878ae4f4..1194abcbed 100644 --- a/source/dnode/vnode/src/vnd/vnodeRetention.c +++ b/source/dnode/vnode/src/vnd/vnodeRetention.c @@ -113,7 +113,7 @@ int32_t vnodeAsyncRentention(SVnode *pVnode, int64_t now) { _exit: if (code) { - vError("vgId:%d %s failed at line %d since %s", TD_VID(pInfo->pVnode), __func__, lino, tstrerror(code)); + vError("vgId:%d %s failed at line %d since %s", TD_VID(pVnode), __func__, lino, tstrerror(code)); if (pInfo) taosMemoryFree(pInfo); } else { vInfo("vgId:%d %s done", TD_VID(pInfo->pVnode), __func__); diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index d4efa38c3e..e4a7ed224c 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -567,7 +567,9 @@ int32_t vnodePreprocessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) { int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) { vTrace("message in vnode query queue is processing"); - if ((pMsg->msgType == TDMT_SCH_QUERY || pMsg->msgType == TDMT_VND_TMQ_CONSUME || pMsg->msgType == TDMT_VND_TMQ_CONSUME_PUSH) && !syncIsReadyForRead(pVnode->sync)) { + if ((pMsg->msgType == TDMT_SCH_QUERY || pMsg->msgType == TDMT_VND_TMQ_CONSUME || + pMsg->msgType == TDMT_VND_TMQ_CONSUME_PUSH) && + !syncIsReadyForRead(pVnode->sync)) { vnodeRedirectRpcMsg(pVnode, pMsg, terrno); return 0; } @@ -623,8 +625,8 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { return vnodeGetTableCfg(pVnode, pMsg, true); case TDMT_VND_BATCH_META: return vnodeGetBatchMeta(pVnode, pMsg); -// case TDMT_VND_TMQ_CONSUME: -// return tqProcessPollReq(pVnode->pTq, pMsg); + // case TDMT_VND_TMQ_CONSUME: + // return tqProcessPollReq(pVnode->pTq, pMsg); case TDMT_VND_TMQ_VG_WALINFO: return tqProcessVgWalInfoReq(pVnode->pTq, pMsg); default: @@ -1376,7 +1378,8 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t ver, void *pReq, in } if (info.suid) { - metaGetInfo(pVnode->pMeta, info.suid, &info, NULL); + code = metaGetInfo(pVnode->pMeta, info.suid, &info, NULL); + ASSERT(code == 0); } if (pSubmitTbData->sver != info.skmVer) { diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c index f736e9be98..f975517669 100644 --- a/source/libs/catalog/src/catalog.c +++ b/source/libs/catalog/src/catalog.c @@ -341,13 +341,10 @@ int32_t ctgChkAuth(SCatalog* pCtg, SRequestConnInfo* pConn, SUserAuthInfo *pReq, SCtgAuthReq req = {0}; req.pRawReq = pReq; req.pConn = pConn; - req.onlyCache = exists ? true : false; + req.onlyCache = false; CTG_ERR_RET(ctgGetUserDbAuthFromMnode(pCtg, pConn, pReq->user, &req.authInfo, NULL)); CTG_ERR_JRET(ctgChkSetAuthRes(pCtg, &req, &rsp)); - if (rsp.metaNotExists && exists) { - *exists = false; - } _return: diff --git a/source/libs/catalog/src/ctgCache.c b/source/libs/catalog/src/ctgCache.c index c856211635..605f5efeb4 100644 --- a/source/libs/catalog/src/ctgCache.c +++ b/source/libs/catalog/src/ctgCache.c @@ -1721,9 +1721,7 @@ int32_t ctgWriteTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFNam ctgDebug("stb 0x%" PRIx64 " updated to cache, dbFName:%s, tbName:%s, tbType:%d", meta->suid, dbFName, tbName, meta->tableType); - if (pCache) { - CTG_ERR_RET(ctgUpdateRentStbVersion(pCtg, dbFName, tbName, dbId, meta->suid, pCache)); - } + CTG_ERR_RET(ctgUpdateRentStbVersion(pCtg, dbFName, tbName, dbId, meta->suid, pCache)); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/catalog/src/ctgUtil.c b/source/libs/catalog/src/ctgUtil.c index e7abbc5ead..86f6a51d9b 100644 --- a/source/libs/catalog/src/ctgUtil.c +++ b/source/libs/catalog/src/ctgUtil.c @@ -926,7 +926,6 @@ int32_t ctgGenerateVgList(SCatalog* pCtg, SHashObj* vgHash, SArray** pList) { } pIter = taosHashIterate(vgHash, pIter); - vgInfo = NULL; } *pList = vgList; diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c index d7f0702cb6..ecda1d596a 100644 --- a/source/libs/command/src/command.c +++ b/source/libs/command/src/command.c @@ -87,7 +87,7 @@ static int32_t buildDescResultDataBlock(SSDataBlock** pOutput) { return code; } -static void setDescResultIntoDataBlock(bool sysInfoUser, SSDataBlock* pBlock, int32_t numOfRows, STableMeta* pMeta) { +static int32_t setDescResultIntoDataBlock(bool sysInfoUser, SSDataBlock* pBlock, int32_t numOfRows, STableMeta* pMeta) { blockDataEnsureCapacity(pBlock, numOfRows); pBlock->info.rows = 0; @@ -114,6 +114,11 @@ static void setDescResultIntoDataBlock(bool sysInfoUser, SSDataBlock* pBlock, in colDataSetVal(pCol4, pBlock->info.rows, buf, false); ++(pBlock->info.rows); } + if (pBlock->info.rows <= 0) { + qError("no permission to view any columns"); + return TSDB_CODE_PAR_PERMISSION_DENIED; + } + return TSDB_CODE_SUCCESS; } static int32_t execDescribe(bool sysInfoUser, SNode* pStmt, SRetrieveTableRsp** pRsp) { @@ -123,7 +128,7 @@ static int32_t execDescribe(bool sysInfoUser, SNode* pStmt, SRetrieveTableRsp** SSDataBlock* pBlock = NULL; int32_t code = buildDescResultDataBlock(&pBlock); if (TSDB_CODE_SUCCESS == code) { - setDescResultIntoDataBlock(sysInfoUser, pBlock, numOfRows, pDesc->pMeta); + code = setDescResultIntoDataBlock(sysInfoUser, pBlock, numOfRows, pDesc->pMeta); } if (TSDB_CODE_SUCCESS == code) { code = buildRetrieveTableRsp(pBlock, DESCRIBE_RESULT_COLS, pRsp); diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index cfea233a1c..832750e967 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -1109,7 +1109,6 @@ int32_t getTableList(void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, S code = doFilterTag(pTagIndexCond, &metaArg, pUidList, &status, &pStorageAPI->metaFilter); if (code != 0 || status == SFLT_NOT_INDEX) { // temporarily disable it for performance sake qDebug("failed to get tableIds from index, suid:%" PRIu64, pScanNode->uid); - code = TSDB_CODE_SUCCESS; } else { qInfo("succ to get filter result, table num: %d", (int)taosArrayGetSize(pUidList)); } diff --git a/source/libs/executor/src/joinoperator.c b/source/libs/executor/src/joinoperator.c index 13ab5d05a5..f334ae02f6 100644 --- a/source/libs/executor/src/joinoperator.c +++ b/source/libs/executor/src/joinoperator.c @@ -150,9 +150,12 @@ static int32_t initTagColskeyBuf(int32_t* keyLen, char** keyBuf, const SArray* p int32_t nullFlagSize = sizeof(int8_t) * numOfGroupCols; (*keyLen) += nullFlagSize; - (*keyBuf) = taosMemoryCalloc(1, (*keyLen)); - if ((*keyBuf) == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; + if (*keyLen >= 0) { + + (*keyBuf) = taosMemoryCalloc(1, (*keyLen)); + if ((*keyBuf) == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } } return TSDB_CODE_SUCCESS; diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c index 7c009c942a..cb74392a10 100644 --- a/source/libs/executor/src/timesliceoperator.c +++ b/source/libs/executor/src/timesliceoperator.c @@ -315,7 +315,7 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp if (pDst->info.type == TSDB_DATA_TYPE_FLOAT) { float v = 0; if (!IS_VAR_DATA_TYPE(pVar->nType)) { - GET_TYPED_DATA(v, float, pVar->nType, &pVar->i); + GET_TYPED_DATA(v, float, pVar->nType, &pVar->f); } else { v = taosStr2Float(varDataVal(pVar->pz), NULL); } @@ -323,7 +323,7 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp } else if (pDst->info.type == TSDB_DATA_TYPE_DOUBLE) { double v = 0; if (!IS_VAR_DATA_TYPE(pVar->nType)) { - GET_TYPED_DATA(v, double, pVar->nType, &pVar->i); + GET_TYPED_DATA(v, double, pVar->nType, &pVar->d); } else { v = taosStr2Double(varDataVal(pVar->pz), NULL); } @@ -333,7 +333,15 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp if (!IS_VAR_DATA_TYPE(pVar->nType)) { GET_TYPED_DATA(v, int64_t, pVar->nType, &pVar->i); } else { - v = taosStr2int64(varDataVal(pVar->pz)); + v = taosStr2Int64(varDataVal(pVar->pz), NULL, 10); + } + colDataSetVal(pDst, rows, (char*)&v, false); + } else if (IS_UNSIGNED_NUMERIC_TYPE(pDst->info.type)) { + uint64_t v = 0; + if (!IS_VAR_DATA_TYPE(pVar->nType)) { + GET_TYPED_DATA(v, uint64_t, pVar->nType, &pVar->u); + } else { + v = taosStr2UInt64(varDataVal(pVar->pz), NULL, 10); } colDataSetVal(pDst, rows, (char*)&v, false); } else if (IS_BOOLEAN_TYPE(pDst->info.type)) { diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 21b36d69ec..3e16a40575 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -2702,13 +2702,12 @@ static int32_t doSetPrevVal(SDiffInfo* pDiffInfo, int32_t type, const char* pv, } static int32_t doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SColumnInfoData* pOutput, int32_t pos, - int32_t order, int64_t ts) { - int32_t factor = (order == TSDB_ORDER_ASC) ? 1 : -1; + int64_t ts) { pDiffInfo->prevTs = ts; switch (type) { case TSDB_DATA_TYPE_INT: { int32_t v = *(int32_t*)pv; - int64_t delta = factor * (v - pDiffInfo->prev.i64); // direct previous may be null + int64_t delta = v - pDiffInfo->prev.i64; // direct previous may be null if (delta < 0 && pDiffInfo->ignoreNegative) { colDataSetNull_f_s(pOutput, pos); } else { @@ -2721,7 +2720,7 @@ static int32_t doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_TINYINT: { int8_t v = *(int8_t*)pv; - int64_t delta = factor * (v - pDiffInfo->prev.i64); // direct previous may be null + int64_t delta = v - pDiffInfo->prev.i64; // direct previous may be null if (delta < 0 && pDiffInfo->ignoreNegative) { colDataSetNull_f_s(pOutput, pos); } else { @@ -2732,7 +2731,7 @@ static int32_t doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, } case TSDB_DATA_TYPE_SMALLINT: { int16_t v = *(int16_t*)pv; - int64_t delta = factor * (v - pDiffInfo->prev.i64); // direct previous may be null + int64_t delta = v - pDiffInfo->prev.i64; // direct previous may be null if (delta < 0 && pDiffInfo->ignoreNegative) { colDataSetNull_f_s(pOutput, pos); } else { @@ -2744,7 +2743,7 @@ static int32_t doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, case TSDB_DATA_TYPE_TIMESTAMP: case TSDB_DATA_TYPE_BIGINT: { int64_t v = *(int64_t*)pv; - int64_t delta = factor * (v - pDiffInfo->prev.i64); // direct previous may be null + int64_t delta = v - pDiffInfo->prev.i64; // direct previous may be null if (delta < 0 && pDiffInfo->ignoreNegative) { colDataSetNull_f_s(pOutput, pos); } else { @@ -2755,7 +2754,7 @@ static int32_t doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, } case TSDB_DATA_TYPE_FLOAT: { float v = *(float*)pv; - double delta = factor * (v - pDiffInfo->prev.d64); // direct previous may be null + double delta = v - pDiffInfo->prev.d64; // direct previous may be null if ((delta < 0 && pDiffInfo->ignoreNegative) || isinf(delta) || isnan(delta)) { // check for overflow colDataSetNull_f_s(pOutput, pos); } else { @@ -2766,7 +2765,7 @@ static int32_t doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, } case TSDB_DATA_TYPE_DOUBLE: { double v = *(double*)pv; - double delta = factor * (v - pDiffInfo->prev.d64); // direct previous may be null + double delta = v - pDiffInfo->prev.d64; // direct previous may be null if ((delta < 0 && pDiffInfo->ignoreNegative) || isinf(delta) || isnan(delta)) { // check for overflow colDataSetNull_f_s(pOutput, pos); } else { @@ -2797,82 +2796,42 @@ int32_t diffFunction(SqlFunctionCtx* pCtx) { SColumnInfoData* pOutput = (SColumnInfoData*)pCtx->pOutput; - if (pCtx->order == TSDB_ORDER_ASC) { - for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { - int32_t pos = startOffset + numOfElems; + for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { + int32_t pos = startOffset + numOfElems; - if (colDataIsNull_f(pInputCol->nullbitmap, i)) { - if (pDiffInfo->includeNull) { - colDataSetNull_f_s(pOutput, pos); + if (colDataIsNull_f(pInputCol->nullbitmap, i)) { + if (pDiffInfo->includeNull) { + colDataSetNull_f_s(pOutput, pos); - numOfElems += 1; - } - continue; + numOfElems += 1; } - - char* pv = colDataGetData(pInputCol, i); - - if (pDiffInfo->hasPrev) { - if (tsList[i] == pDiffInfo->prevTs) { - return TSDB_CODE_FUNC_DUP_TIMESTAMP; - } - int32_t code = doHandleDiff(pDiffInfo, pInputCol->info.type, pv, pOutput, pos, pCtx->order, tsList[i]); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - // handle selectivity - if (pCtx->subsidiaries.num > 0) { - appendSelectivityValue(pCtx, i, pos); - } - - numOfElems++; - } else { - int32_t code = doSetPrevVal(pDiffInfo, pInputCol->info.type, pv, tsList[i]); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - } - - pDiffInfo->hasPrev = true; + continue; } - } else { - for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { - int32_t pos = startOffset + numOfElems; - if (colDataIsNull_f(pInputCol->nullbitmap, i)) { - if (pDiffInfo->includeNull) { - colDataSetNull_f_s(pOutput, pos); - numOfElems += 1; - } - continue; + char* pv = colDataGetData(pInputCol, i); + + if (pDiffInfo->hasPrev) { + if (tsList[i] == pDiffInfo->prevTs) { + return TSDB_CODE_FUNC_DUP_TIMESTAMP; + } + int32_t code = doHandleDiff(pDiffInfo, pInputCol->info.type, pv, pOutput, pos, tsList[i]); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + // handle selectivity + if (pCtx->subsidiaries.num > 0) { + appendSelectivityValue(pCtx, i, pos); } - char* pv = colDataGetData(pInputCol, i); - - // there is a row of previous data block to be handled in the first place. - if (pDiffInfo->hasPrev) { - if (tsList[i] == pDiffInfo->prevTs) { - return TSDB_CODE_FUNC_DUP_TIMESTAMP; - } - int32_t code = doHandleDiff(pDiffInfo, pInputCol->info.type, pv, pOutput, pos, pCtx->order, tsList[i]); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - // handle selectivity - if (pCtx->subsidiaries.num > 0) { - appendSelectivityValue(pCtx, i, pos); - } - - numOfElems++; - } else { - int32_t code = doSetPrevVal(pDiffInfo, pInputCol->info.type, pv, tsList[i]); - if (code != TSDB_CODE_SUCCESS) { - return code; - } + numOfElems++; + } else { + int32_t code = doSetPrevVal(pDiffInfo, pInputCol->info.type, pv, tsList[i]); + if (code != TSDB_CODE_SUCCESS) { + return code; } - - pDiffInfo->hasPrev = true; } + + pDiffInfo->hasPrev = true; } pResInfo->numOfRes = numOfElems; diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c index 93259924d5..7371017111 100644 --- a/source/libs/function/src/udfd.c +++ b/source/libs/function/src/udfd.c @@ -29,6 +29,7 @@ #include "tmsg.h" #include "trpc.h" #include "tmisce.h" +#include "tversion.h" // clang-format on #define UDFD_MAX_SCRIPT_PLUGINS 64 @@ -61,7 +62,6 @@ const char *udfdCPluginUdfInitLoadInitDestoryFuncs(SUdfCPluginCtx *udfCtx, const char destroyFuncName[TSDB_FUNC_NAME_LEN + 9] = {0}; char *destroySuffix = "_destroy"; - strcpy(destroyFuncName, udfName); snprintf(destroyFuncName, sizeof(destroyFuncName), "%s%s", udfName, destroySuffix); uv_dlsym(&udfCtx->lib, destroyFuncName, (void **)(&udfCtx->destroyFunc)); return udfName; @@ -69,7 +69,7 @@ const char *udfdCPluginUdfInitLoadInitDestoryFuncs(SUdfCPluginCtx *udfCtx, const void udfdCPluginUdfInitLoadAggFuncs(SUdfCPluginCtx *udfCtx, const char *udfName) { char processFuncName[TSDB_FUNC_NAME_LEN] = {0}; - strcpy(processFuncName, udfName); + strncpy(processFuncName, udfName, sizeof(processFuncName)); uv_dlsym(&udfCtx->lib, processFuncName, (void **)(&udfCtx->aggProcFunc)); char startFuncName[TSDB_FUNC_NAME_LEN + 7] = {0}; @@ -94,6 +94,7 @@ int32_t udfdCPluginUdfInit(SScriptUdfInfo *udf, void **pUdfCtx) { err = uv_dlopen(udf->path, &udfCtx->lib); if (err != 0) { fnError("can not load library %s. error: %s", udf->path, uv_strerror(err)); + taosMemoryFree(udfCtx); return TSDB_CODE_UDF_LOAD_UDF_FAILURE; } const char *udfName = udf->name; @@ -102,7 +103,7 @@ int32_t udfdCPluginUdfInit(SScriptUdfInfo *udf, void **pUdfCtx) { if (udf->funcType == UDF_FUNC_TYPE_SCALAR) { char processFuncName[TSDB_FUNC_NAME_LEN] = {0}; - strcpy(processFuncName, udfName); + strncpy(processFuncName, udfName, sizeof(processFuncName)); uv_dlsym(&udfCtx->lib, processFuncName, (void **)(&udfCtx->scalarProcFunc)); } else if (udf->funcType == UDF_FUNC_TYPE_AGG) { udfdCPluginUdfInitLoadAggFuncs(udfCtx, udfName); @@ -1038,7 +1039,7 @@ int32_t udfdOpenClientRpc() { connLimitNum = TMIN(connLimitNum, 500); rpcInit.connLimitNum = connLimitNum; rpcInit.timeToGetConn = tsTimeToGetAvailableConn; - + taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); global.clientRpc = rpcOpen(&rpcInit); if (global.clientRpc == NULL) { fnError("failed to init dnode rpc client"); diff --git a/source/libs/parser/src/parInsertSml.c b/source/libs/parser/src/parInsertSml.c index 0e5ffc57da..78b05b6df5 100644 --- a/source/libs/parser/src/parInsertSml.c +++ b/source/libs/parser/src/parInsertSml.c @@ -127,7 +127,7 @@ static int32_t smlBuildTagRow(SArray* cols, SBoundColInfo* tags, SSchema* pSchem if(kv->keyLen != strlen(pTagSchema->name) || memcmp(kv->key, pTagSchema->name, kv->keyLen) != 0 || kv->type != pTagSchema->type){ code = TSDB_CODE_SML_INVALID_DATA; - uError("SML smlBuildCol error col not same %s", pTagSchema->name); + uError("SML smlBuildTagRow error col not same %s", pTagSchema->name); goto end; } @@ -210,7 +210,7 @@ int32_t smlBuildCol(STableDataCxt* pTableCxt, SSchema* schema, void* data, int32 SSmlKv* kv = (SSmlKv*)data; if(kv->keyLen != strlen(pColSchema->name) || memcmp(kv->key, pColSchema->name, kv->keyLen) != 0 || kv->type != pColSchema->type){ ret = TSDB_CODE_SML_INVALID_DATA; - uError("SML smlBuildCol error col not same %s", pColSchema->name); + uInfo("SML smlBuildCol error col not same %s", pColSchema->name); goto end; } if (kv->type == TSDB_DATA_TYPE_NCHAR) { diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index ebf50f4784..1427ada6da 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -882,6 +882,7 @@ static int32_t createColumnsByTable(STranslateContext* pCxt, const STableNode* p (igTags ? 0 : ((TSDB_SUPER_TABLE == pMeta->tableType) ? pMeta->tableInfo.numOfTags : 0)); for (int32_t i = 0; i < nums; ++i) { if (invisibleColumn(pCxt->pParseCxt->enableSysInfo, pMeta->tableType, pMeta->schema[i].flags)) { + pCxt->pParseCxt->hasInvisibleCol = true; continue; } SColumnNode* pCol = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); @@ -3207,7 +3208,11 @@ static int32_t translateSelectList(STranslateContext* pCxt, SSelectStmt* pSelect code = translateFillValues(pCxt, pSelect); } if (NULL == pSelect->pProjectionList || 0 >= pSelect->pProjectionList->length) { - code = TSDB_CODE_PAR_INVALID_SELECTED_EXPR; + if (pCxt->pParseCxt->hasInvisibleCol) { + code = TSDB_CODE_PAR_PERMISSION_DENIED; + } else { + code = TSDB_CODE_PAR_INVALID_SELECTED_EXPR; + } } return code; } @@ -6106,6 +6111,9 @@ static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* // for (int32_t i = 0; i < pMeta->tableInfo.numOfColumns; ++i) { SSchema* column = &pMeta->schema[0]; SColumnNode* col = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); + if (NULL == col) { + return TSDB_CODE_OUT_OF_MEMORY; + } strcpy(col->colName, column->name); strcpy(col->node.aliasName, col->colName); strcpy(col->node.userAlias, col->colName); @@ -6216,7 +6224,7 @@ static int32_t translateAlterLocal(STranslateContext* pCxt, SAlterLocalStmt* pSt char* p = strchr(pStmt->config, ' '); if (NULL != p) { *p = 0; - strcpy(pStmt->value, p + 1); + tstrncpy(pStmt->value, p + 1, sizeof(pStmt->value)); } return TSDB_CODE_SUCCESS; } diff --git a/source/libs/scheduler/src/schJob.c b/source/libs/scheduler/src/schJob.c index e7bfe95795..78e0807775 100644 --- a/source/libs/scheduler/src/schJob.c +++ b/source/libs/scheduler/src/schJob.c @@ -135,6 +135,7 @@ int32_t schUpdateJobStatus(SSchJob *pJob, int8_t newStatus) { break; case JOB_TASK_STATUS_DROP: SCH_ERR_JRET(TSDB_CODE_QRY_JOB_FREED); + break; default: SCH_JOB_ELOG("invalid job status:%s", jobTaskStatusStr(oriStatus)); diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c index 80fdc7594c..01b4e7e9e6 100644 --- a/source/libs/scheduler/src/schRemote.c +++ b/source/libs/scheduler/src/schRemote.c @@ -392,6 +392,7 @@ int32_t schProcessResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SD // NEVER REACH HERE SCH_TASK_ELOG("invalid status to handle drop task rsp, refId:0x%" PRIx64, pJob->refId); SCH_ERR_JRET(TSDB_CODE_SCH_INTERNAL_ERROR); + break; } case TDMT_SCH_LINK_BROKEN: SCH_TASK_ELOG("link broken received, error:%x - %s", rspCode, tstrerror(rspCode)); diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c index d4ded2dd8b..9985e7d6a1 100644 --- a/source/libs/scheduler/src/schTask.c +++ b/source/libs/scheduler/src/schTask.c @@ -962,7 +962,6 @@ int32_t schHandleExplainRes(SArray *pExplainRes) { localRsp->rsp.numOfPlans = 0; localRsp->rsp.subplanInfo = NULL; pTask = NULL; - pJob = NULL; } _return: diff --git a/source/libs/stream/src/streamData.c b/source/libs/stream/src/streamData.c index bad104bc8e..92f1fc47ab 100644 --- a/source/libs/stream/src/streamData.c +++ b/source/libs/stream/src/streamData.c @@ -27,6 +27,7 @@ SStreamDataBlock* createStreamDataFromDispatchMsg(const SStreamDispatchReq* pReq int32_t blockNum = pReq->blockNum; SArray* pArray = taosArrayInit_s(sizeof(SSDataBlock), blockNum); if (pArray == NULL) { + taosFreeQitem(pData); return NULL; } diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 9adae2a2f5..c8aa6f5615 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -232,8 +232,9 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) { } if (taosArrayGetSize(pRes) == 0) { + taosArrayDestroy(pRes); + if (finished) { - taosArrayDestroy(pRes); qDebug("s-task:%s finish recover exec task ", pTask->id.idStr); break; } else { diff --git a/source/libs/sync/test/sync_test_lib/src/syncIO.c b/source/libs/sync/test/sync_test_lib/src/syncIO.c index 2e00785586..4f8ae59348 100644 --- a/source/libs/sync/test/sync_test_lib/src/syncIO.c +++ b/source/libs/sync/test/sync_test_lib/src/syncIO.c @@ -21,6 +21,7 @@ #include "tglobal.h" #include "ttimer.h" #include "tutil.h" +#include "tversion.h" bool gRaftDetailLog = false; SSyncIO *gSyncIO = NULL; @@ -188,7 +189,7 @@ static int32_t syncIOStartInternal(SSyncIO *io) { rpcInit.idleTime = 100; rpcInit.user = "sync-io"; rpcInit.connType = TAOS_CONN_CLIENT; - + taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); io->clientRpc = rpcOpen(&rpcInit); if (io->clientRpc == NULL) { sError("failed to initialize RPC"); @@ -209,7 +210,7 @@ static int32_t syncIOStartInternal(SSyncIO *io) { rpcInit.idleTime = 2 * 1500; rpcInit.parent = io; rpcInit.connType = TAOS_CONN_SERVER; - + taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); void *pRpc = rpcOpen(&rpcInit); if (pRpc == NULL) { sError("failed to start RPC server"); @@ -470,11 +471,10 @@ static void syncIOTickPing(void *param, void *tmrId) { taosTmrReset(syncIOTickPing, io->pingTimerMS, io, io->timerMgr, &io->pingTimer); } -void syncEntryDestory(SSyncRaftEntry* pEntry) {} +void syncEntryDestory(SSyncRaftEntry *pEntry) {} - -void syncUtilMsgNtoH(void* msg) { - SMsgHead* pHead = msg; +void syncUtilMsgNtoH(void *msg) { + SMsgHead *pHead = msg; pHead->contLen = ntohl(pHead->contLen); pHead->vgId = ntohl(pHead->vgId); } @@ -487,9 +487,9 @@ static inline bool syncUtilCanPrint(char c) { } } -char* syncUtilPrintBin(char* ptr, uint32_t len) { +char *syncUtilPrintBin(char *ptr, uint32_t len) { int64_t memLen = (int64_t)(len + 1); - char* s = taosMemoryMalloc(memLen); + char *s = taosMemoryMalloc(memLen); ASSERT(s != NULL); memset(s, 0, len + 1); memcpy(s, ptr, len); @@ -502,13 +502,13 @@ char* syncUtilPrintBin(char* ptr, uint32_t len) { return s; } -char* syncUtilPrintBin2(char* ptr, uint32_t len) { +char *syncUtilPrintBin2(char *ptr, uint32_t len) { uint32_t len2 = len * 4 + 1; - char* s = taosMemoryMalloc(len2); + char *s = taosMemoryMalloc(len2); ASSERT(s != NULL); memset(s, 0, len2); - char* p = s; + char *p = s; for (int32_t i = 0; i < len; ++i) { int32_t n = sprintf(p, "%d,", ptr[i]); p += n; @@ -516,7 +516,7 @@ char* syncUtilPrintBin2(char* ptr, uint32_t len) { return s; } -void syncUtilU642Addr(uint64_t u64, char* host, int64_t len, uint16_t* port) { +void syncUtilU642Addr(uint64_t u64, char *host, int64_t len, uint16_t *port) { uint32_t hostU32 = (uint32_t)((u64 >> 32) & 0x00000000FFFFFFFF); struct in_addr addr = {.s_addr = hostU32}; @@ -524,7 +524,7 @@ void syncUtilU642Addr(uint64_t u64, char* host, int64_t len, uint16_t* port) { *port = (uint16_t)((u64 & 0x00000000FFFF0000) >> 16); } -uint64_t syncUtilAddr2U64(const char* host, uint16_t port) { +uint64_t syncUtilAddr2U64(const char *host, uint16_t port) { uint32_t hostU32 = taosGetIpv4FromFqdn(host); if (hostU32 == (uint32_t)-1) { sError("failed to resolve ipv4 addr, host:%s", host); diff --git a/source/libs/tdb/src/db/tdbBtree.c b/source/libs/tdb/src/db/tdbBtree.c index 08e61c2272..612179b205 100644 --- a/source/libs/tdb/src/db/tdbBtree.c +++ b/source/libs/tdb/src/db/tdbBtree.c @@ -360,7 +360,7 @@ int tdbBtreePGet(SBTree *pBt, const void *pKey, int kLen, void **ppKey, int *pkL } *ppKey = pTKey; *pkLen = cd.kLen; - memcpy(*ppKey, cd.pKey, cd.kLen); + memcpy(*ppKey, cd.pKey, (size_t)cd.kLen); } if (ppVal) { @@ -372,7 +372,7 @@ int tdbBtreePGet(SBTree *pBt, const void *pKey, int kLen, void **ppKey, int *pkL } *ppVal = pTVal; *vLen = cd.vLen; - memcpy(*ppVal, cd.pVal, cd.vLen); + memcpy(*ppVal, cd.pVal, (size_t)cd.vLen); } if (TDB_CELLDECODER_FREE_KEY(&cd)) { @@ -1866,7 +1866,7 @@ int tdbBtreeNext(SBTC *pBtc, void **ppKey, int *kLen, void **ppVal, int *vLen) { *ppKey = pKey; *kLen = cd.kLen; - memcpy(pKey, cd.pKey, cd.kLen); + memcpy(pKey, cd.pKey, (size_t)cd.kLen); if (ppVal) { if (cd.vLen > 0) { @@ -1925,7 +1925,7 @@ int tdbBtreePrev(SBTC *pBtc, void **ppKey, int *kLen, void **ppVal, int *vLen) { *ppKey = pKey; *kLen = cd.kLen; - memcpy(pKey, cd.pKey, cd.kLen); + memcpy(pKey, cd.pKey, (size_t)cd.kLen); if (ppVal) { // TODO: vLen may be zero @@ -1937,7 +1937,7 @@ int tdbBtreePrev(SBTC *pBtc, void **ppKey, int *kLen, void **ppVal, int *vLen) { *ppVal = pVal; *vLen = cd.vLen; - memcpy(pVal, cd.pVal, cd.vLen); + memcpy(pVal, cd.pVal, (size_t)cd.vLen); } ret = tdbBtcMoveToPrev(pBtc); diff --git a/source/libs/tdb/src/db/tdbDb.c b/source/libs/tdb/src/db/tdbDb.c index fe9d51dc82..4f595d8d4a 100644 --- a/source/libs/tdb/src/db/tdbDb.c +++ b/source/libs/tdb/src/db/tdbDb.c @@ -62,7 +62,10 @@ int32_t tdbOpen(const char *dbname, int32_t szPage, int32_t pages, TDB **ppDb, i } memset(pDb->pgrHash, 0, tsize); - taosMulModeMkDir(dbname, 0755); + ret = taosMulModeMkDir(dbname, 0755); + if (ret < 0) { + return -1; + } #ifdef USE_MAINDB // open main db diff --git a/source/libs/tdb/src/db/tdbPager.c b/source/libs/tdb/src/db/tdbPager.c index 896b0713df..474e5d2270 100644 --- a/source/libs/tdb/src/db/tdbPager.c +++ b/source/libs/tdb/src/db/tdbPager.c @@ -1092,6 +1092,7 @@ int tdbPagerRestoreJournals(SPager *pPager) { jname[dirLen] = '/'; sprintf(jname + dirLen + 1, TDB_MAINDB_NAME "-journal.%" PRId64, *pTxnId); if (tdbPagerRestore(pPager, jname) < 0) { + taosArrayDestroy(pTxnList); tdbCloseDir(&pDir); tdbError("failed to restore file due to %s. jFileName:%s", strerror(errno), jname); diff --git a/source/libs/transport/inc/transComm.h b/source/libs/transport/inc/transComm.h index a2c486767f..3b304e2c77 100644 --- a/source/libs/transport/inc/transComm.h +++ b/source/libs/transport/inc/transComm.h @@ -154,6 +154,7 @@ typedef struct { #pragma pack(push, 1) +#define TRANS_VER 2 typedef struct { char version : 4; // RPC version char comp : 2; // compression algorithm, 0:no compression 1:lz4 @@ -166,6 +167,7 @@ typedef struct { uint64_t timestamp; char user[TSDB_UNI_LEN]; + int32_t compatibilityVer; uint32_t magicNum; STraceId traceId; uint64_t ahandle; // ahandle assigned by client diff --git a/source/libs/transport/inc/transportInt.h b/source/libs/transport/inc/transportInt.h index 8ea0064d44..ca48da690b 100644 --- a/source/libs/transport/inc/transportInt.h +++ b/source/libs/transport/inc/transportInt.h @@ -46,10 +46,10 @@ typedef struct { int8_t connType; char label[TSDB_LABEL_LEN]; char user[TSDB_UNI_LEN]; // meter ID - - int32_t compressSize; // -1: no compress, 0 : all data compressed, size: compress data if larger than size - int8_t encryption; // encrypt or not - + int32_t compatibilityVer; + int32_t compressSize; // -1: no compress, 0 : all data compressed, size: compress data if larger than size + int8_t encryption; // encrypt or not + int32_t retryMinInterval; // retry init interval int32_t retryStepFactor; // retry interval factor int32_t retryMaxInterval; // retry max interval diff --git a/source/libs/transport/src/trans.c b/source/libs/transport/src/trans.c index 0771f9198a..08b0451982 100644 --- a/source/libs/transport/src/trans.c +++ b/source/libs/transport/src/trans.c @@ -50,6 +50,7 @@ void* rpcOpen(const SRpcInit* pInit) { } pRpc->encryption = pInit->encryption; + pRpc->compatibilityVer = pInit->compatibilityVer; pRpc->retryMinInterval = pInit->retryMinInterval; // retry init interval pRpc->retryStepFactor = pInit->retryStepFactor; diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 1709fc3cb1..8062a0618b 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -391,6 +391,7 @@ void cliHandleResp(SCliConn* conn) { transMsg.info.ahandle = NULL; transMsg.info.traceId = pHead->traceId; transMsg.info.hasEpSet = pHead->hasEpSet; + transMsg.info.cliVer = htonl(pHead->compatibilityVer); SCliMsg* pMsg = NULL; STransConnCtx* pCtx = NULL; @@ -488,6 +489,7 @@ void cliHandleExceptImpl(SCliConn* pConn, int32_t code) { transMsg.code = code == -1 ? (pConn->broken ? TSDB_CODE_RPC_BROKEN_LINK : TSDB_CODE_RPC_NETWORK_UNAVAIL) : code; transMsg.msgType = pMsg ? pMsg->msg.msgType + 1 : 0; transMsg.info.ahandle = NULL; + transMsg.info.cliVer = pTransInst->compatibilityVer; if (pMsg == NULL && !CONN_NO_PERSIST_BY_APP(pConn)) { transMsg.info.ahandle = transCtxDumpVal(&pConn->ctx, transMsg.msgType); @@ -984,11 +986,10 @@ void cliSendBatch(SCliConn* pConn) { SCliThrd* pThrd = pConn->hostThrd; STrans* pTransInst = pThrd->pTransInst; - SCliBatch* pBatch = pConn->pBatch; - SCliBatchList* pList = pBatch->pList; - pList->connCnt += 1; + SCliBatch* pBatch = pConn->pBatch; + int32_t wLen = pBatch->wLen; - int32_t wLen = pBatch->wLen; + pBatch->pList->connCnt += 1; uv_buf_t* wb = taosMemoryCalloc(wLen, sizeof(uv_buf_t)); int i = 0; @@ -1018,6 +1019,8 @@ void cliSendBatch(SCliConn* pConn) { memcpy(pHead->user, pTransInst->user, strlen(pTransInst->user)); pHead->traceId = pMsg->info.traceId; pHead->magicNum = htonl(TRANS_MAGIC_NUM); + pHead->version = TRANS_VER; + pHead->compatibilityVer = htonl(pTransInst->compatibilityVer); } pHead->timestamp = taosHton64(taosGetTimestampUs()); @@ -1074,6 +1077,8 @@ void cliSend(SCliConn* pConn) { memcpy(pHead->user, pTransInst->user, strlen(pTransInst->user)); pHead->traceId = pMsg->info.traceId; pHead->magicNum = htonl(TRANS_MAGIC_NUM); + pHead->version = TRANS_VER; + pHead->compatibilityVer = htonl(pTransInst->compatibilityVer); } pHead->timestamp = taosHton64(taosGetTimestampUs()); @@ -1346,6 +1351,7 @@ static void doNotifyApp(SCliMsg* pMsg, SCliThrd* pThrd) { transMsg.info.ahandle = pMsg->ctx->ahandle; transMsg.info.traceId = pMsg->msg.info.traceId; transMsg.info.hasEpSet = false; + transMsg.info.cliVer = pTransInst->compatibilityVer; if (pCtx->pSem != NULL) { if (pCtx->pRsp == NULL) { } else { @@ -1527,6 +1533,9 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) { // persist conn already release by server STransMsg resp; cliBuildExceptResp(pMsg, &resp); + // refactorr later + resp.info.cliVer = pTransInst->compatibilityVer; + if (pMsg->type != Release) { pTransInst->cfp(pTransInst->parent, &resp, NULL); } @@ -1836,6 +1845,7 @@ void cliIteraConnMsgs(SCliConn* conn) { if (-1 == cliBuildExceptResp(cmsg, &resp)) { continue; } + resp.info.cliVer = pTransInst->compatibilityVer; pTransInst->cfp(pTransInst->parent, &resp, NULL); cmsg->ctx->ahandle = NULL; diff --git a/source/libs/transport/src/transComm.c b/source/libs/transport/src/transComm.c index 0dfc7677b3..b14db9497e 100644 --- a/source/libs/transport/src/transComm.c +++ b/source/libs/transport/src/transComm.c @@ -192,7 +192,7 @@ bool transReadComplete(SConnBuffer* connBuf) { memcpy((char*)&head, connBuf->buf, sizeof(head)); int32_t msgLen = (int32_t)htonl(head.msgLen); p->total = msgLen; - p->invalid = TRANS_NOVALID_PACKET(htonl(head.magicNum)); + p->invalid = TRANS_NOVALID_PACKET(htonl(head.magicNum)) || head.version != TRANS_VER; } if (p->total >= p->len) { p->left = p->total - p->len; diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c index da3b0ad626..f23e176c79 100644 --- a/source/libs/transport/src/transSvr.c +++ b/source/libs/transport/src/transSvr.c @@ -196,6 +196,7 @@ static bool uvHandleReq(SSvrConn* pConn) { tError("%s conn %p recv invalid packet, failed to decompress", transLabel(pTransInst), pConn); return false; } + tDebug("head version: %d 2", pHead->version); pHead->code = htonl(pHead->code); pHead->msgLen = htonl(pHead->msgLen); @@ -236,8 +237,8 @@ static bool uvHandleReq(SSvrConn* pConn) { if (pConn->status == ConnNormal && pHead->noResp == 0) { transRefSrvHandle(pConn); if (cost >= EXCEPTION_LIMIT_US) { - tGDebug("%s conn %p %s received from %s, local info:%s, len:%d, cost:%dus, recv exception", transLabel(pTransInst), - pConn, TMSG_INFO(transMsg.msgType), pConn->dst, pConn->src, msgLen, (int)cost); + tGDebug("%s conn %p %s received from %s, local info:%s, len:%d, cost:%dus, recv exception", + transLabel(pTransInst), pConn, TMSG_INFO(transMsg.msgType), pConn->dst, pConn->src, msgLen, (int)cost); } else { tGDebug("%s conn %p %s received from %s, local info:%s, len:%d, cost:%dus", transLabel(pTransInst), pConn, TMSG_INFO(transMsg.msgType), pConn->dst, pConn->src, msgLen, (int)cost); @@ -245,8 +246,8 @@ static bool uvHandleReq(SSvrConn* pConn) { } else { if (cost >= EXCEPTION_LIMIT_US) { tGDebug("%s conn %p %s received from %s, local info:%s, len:%d, noResp:%d, code:%d, cost:%dus, recv exception", - transLabel(pTransInst), pConn, TMSG_INFO(transMsg.msgType), pConn->dst, pConn->src, msgLen, pHead->noResp, - transMsg.code, (int)(cost)); + transLabel(pTransInst), pConn, TMSG_INFO(transMsg.msgType), pConn->dst, pConn->src, msgLen, pHead->noResp, + transMsg.code, (int)(cost)); } else { tGDebug("%s conn %p %s received from %s, local info:%s, len:%d, noResp:%d, code:%d, cost:%dus", transLabel(pTransInst), pConn, TMSG_INFO(transMsg.msgType), pConn->dst, pConn->src, msgLen, pHead->noResp, @@ -262,6 +263,7 @@ static bool uvHandleReq(SSvrConn* pConn) { transMsg.info.handle = (void*)transAcquireExHandle(transGetRefMgt(), pConn->refId); transMsg.info.refId = pConn->refId; transMsg.info.traceId = pHead->traceId; + transMsg.info.cliVer = htonl(pHead->compatibilityVer); tGTrace("%s handle %p conn:%p translated to app, refId:%" PRIu64, transLabel(pTransInst), transMsg.info.handle, pConn, pConn->refId); @@ -410,6 +412,8 @@ static int uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) { pHead->traceId = pMsg->info.traceId; pHead->hasEpSet = pMsg->info.hasEpSet; pHead->magicNum = htonl(TRANS_MAGIC_NUM); + pHead->compatibilityVer = htonl(((STrans*)pConn->pTransInst)->compatibilityVer); + pHead->version = TRANS_VER; // handle invalid drop_task resp, TD-20098 if (pConn->inType == TDMT_SCH_DROP_TASK && pMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) { diff --git a/source/libs/transport/test/cliBench.c b/source/libs/transport/test/cliBench.c index aaee162cd7..8a5276b814 100644 --- a/source/libs/transport/test/cliBench.c +++ b/source/libs/transport/test/cliBench.c @@ -19,6 +19,7 @@ #include "transLog.h" #include "trpc.h" #include "tutil.h" +#include "tversion.h" typedef struct { int index; @@ -155,7 +156,7 @@ int main(int argc, char *argv[]) { } initLogEnv(); - + taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); void *pRpc = rpcOpen(&rpcInit); if (pRpc == NULL) { tError("failed to initialize RPC"); diff --git a/source/libs/transport/test/svrBench.c b/source/libs/transport/test/svrBench.c index 4e2395b17b..a3fa81662c 100644 --- a/source/libs/transport/test/svrBench.c +++ b/source/libs/transport/test/svrBench.c @@ -13,12 +13,13 @@ * along with this program. If not, see . */ -//#define _DEFAULT_SOURCE +// #define _DEFAULT_SOURCE #include "os.h" #include "tglobal.h" #include "tqueue.h" #include "transLog.h" #include "trpc.h" +#include "tversion.h" int msgSize = 128; int commit = 0; @@ -151,6 +152,8 @@ int main(int argc, char *argv[]) { rpcInit.numOfThreads = 1; rpcInit.cfp = processRequestMsg; rpcInit.idleTime = 2 * 1500; + + taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); rpcDebugFlag = 131; for (int i = 1; i < argc; ++i) { @@ -187,7 +190,7 @@ int main(int argc, char *argv[]) { rpcInit.connType = TAOS_CONN_SERVER; initLogEnv(); - + taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); void *pRpc = rpcOpen(&rpcInit); if (pRpc == NULL) { tError("failed to start RPC server"); diff --git a/source/libs/transport/test/transUT.cpp b/source/libs/transport/test/transUT.cpp index 88a1e2564f..2fa94c358f 100644 --- a/source/libs/transport/test/transUT.cpp +++ b/source/libs/transport/test/transUT.cpp @@ -18,10 +18,10 @@ #include "tdatablock.h" #include "tglobal.h" #include "tlog.h" +#include "tmisce.h" #include "transLog.h" #include "trpc.h" -#include "tmisce.h" - +#include "tversion.h" using namespace std; const char *label = "APP"; @@ -54,6 +54,8 @@ class Client { rpcInit_.user = (char *)user; rpcInit_.parent = this; rpcInit_.connType = TAOS_CONN_CLIENT; + + taosVersionStrToInt(version, &(rpcInit_.compatibilityVer)); this->transCli = rpcOpen(&rpcInit_); tsem_init(&this->sem, 0, 0); } @@ -66,6 +68,7 @@ class Client { void Restart(CB cb) { rpcClose(this->transCli); rpcInit_.cfp = cb; + taosVersionStrToInt(version, &(rpcInit_.compatibilityVer)); this->transCli = rpcOpen(&rpcInit_); } void Stop() { @@ -117,6 +120,7 @@ class Server { rpcInit_.cfp = processReq; rpcInit_.user = (char *)user; rpcInit_.connType = TAOS_CONN_SERVER; + taosVersionStrToInt(version, &(rpcInit_.compatibilityVer)); } void Start() { this->transSrv = rpcOpen(&this->rpcInit_); diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index 1e70ce4a1c..01d23a7e96 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -596,18 +596,18 @@ int walCheckAndRepairIdxFile(SWal* pWal, int32_t fileIdx) { // ftruncate idx file if (offset < fileSize) { if (taosFtruncateFile(pIdxFile, offset) < 0) { - wError("vgId:%d, failed to ftruncate file due to %s. offset:%" PRId64 ", file:%s", pWal->cfg.vgId, - strerror(errno), offset, fnameStr); terrno = TAOS_SYSTEM_ERROR(errno); + wError("vgId:%d, failed to ftruncate file since %s. offset:%" PRId64 ", file:%s", pWal->cfg.vgId, terrstr(), + offset, fnameStr); goto _err; } } // rebuild idx file if (taosLSeekFile(pIdxFile, 0, SEEK_END) < 0) { - wError("vgId:%d, failed to seek file due to %s. offset:%" PRId64 ", file:%s", pWal->cfg.vgId, strerror(errno), - offset, fnameStr); terrno = TAOS_SYSTEM_ERROR(errno); + wError("vgId:%d, failed to seek file since %s. offset:%" PRId64 ", file:%s", pWal->cfg.vgId, terrstr(), offset, + fnameStr); goto _err; } @@ -619,11 +619,12 @@ int walCheckAndRepairIdxFile(SWal* pWal, int32_t fileIdx) { idxEntry.offset += sizeof(SWalCkHead) + ckHead.head.bodyLen; if (walReadLogHead(pLogFile, idxEntry.offset, &ckHead) < 0) { - wError("vgId:%d, failed to read wal log head since %s. offset:%" PRId64 ", file:%s", pWal->cfg.vgId, terrstr(), - idxEntry.offset, fLogNameStr); + wError("vgId:%d, failed to read wal log head since %s. index:%" PRId64 ", offset:%" PRId64 ", file:%s", + pWal->cfg.vgId, terrstr(), idxEntry.ver, idxEntry.offset, fLogNameStr); goto _err; } if (taosWriteFile(pIdxFile, &idxEntry, sizeof(SWalIdxEntry)) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); wError("vgId:%d, failed to append file since %s. file:%s", pWal->cfg.vgId, terrstr(), fnameStr); goto _err; } @@ -631,6 +632,7 @@ int walCheckAndRepairIdxFile(SWal* pWal, int32_t fileIdx) { } if (taosFsyncFile(pIdxFile) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); wError("vgId:%d, faild to fsync file since %s. file:%s", pWal->cfg.vgId, terrstr(), fnameStr); goto _err; } diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c index 1223e3756c..786f48ce88 100644 --- a/source/libs/wal/src/walRead.c +++ b/source/libs/wal/src/walRead.c @@ -82,6 +82,11 @@ int32_t walNextValidMsg(SWalReader *pReader) { ", applied index:%" PRId64", end index:%" PRId64, pReader->pWal->cfg.vgId, fetchVer, lastVer, committedVer, appliedVer, endVer); + if (fetchVer > endVer){ + terrno = TSDB_CODE_WAL_LOG_NOT_EXIST; + return -1; + } + while (fetchVer <= endVer) { if (walFetchHeadNew(pReader, fetchVer) < 0) { return -1; diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c index 9b7b3dfd50..ef97bff896 100644 --- a/source/libs/wal/src/walWrite.c +++ b/source/libs/wal/src/walWrite.c @@ -473,7 +473,10 @@ static int32_t walWriteIndex(SWal *pWal, int64_t ver, int64_t offset) { // check alignment of idx entries int64_t endOffset = taosLSeekFile(pWal->pIdxFile, 0, SEEK_END); if (endOffset < 0) { - wFatal("vgId:%d, failed to seek end of idxfile due to %s. ver:%" PRId64 "", pWal->cfg.vgId, strerror(errno), ver); + wFatal("vgId:%d, failed to seek end of WAL idxfile due to %s. ver:%" PRId64 "", pWal->cfg.vgId, strerror(errno), + ver); + taosMsleep(100); + exit(EXIT_FAILURE); } return 0; } @@ -533,16 +536,20 @@ static FORCE_INLINE int32_t walWriteImpl(SWal *pWal, int64_t index, tmsg_t msgTy END: // recover in a reverse order if (taosFtruncateFile(pWal->pLogFile, offset) < 0) { - wFatal("vgId:%d, failed to ftruncate logfile to offset:%" PRId64 " during recovery due to %s", pWal->cfg.vgId, - offset, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); + wFatal("vgId:%d, failed to recover WAL logfile from write error since %s, offset:%" PRId64, pWal->cfg.vgId, + terrstr(), offset); + taosMsleep(100); + exit(EXIT_FAILURE); } int64_t idxOffset = (index - pFileInfo->firstVer) * sizeof(SWalIdxEntry); if (taosFtruncateFile(pWal->pIdxFile, idxOffset) < 0) { - wFatal("vgId:%d, failed to ftruncate idxfile to offset:%" PRId64 "during recovery due to %s", pWal->cfg.vgId, - idxOffset, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); + wFatal("vgId:%d, failed to recover WAL idxfile from write error since %s, offset:%" PRId64, pWal->cfg.vgId, + terrstr(), idxOffset); + taosMsleep(100); + exit(EXIT_FAILURE); } return -1; } diff --git a/source/util/src/terror.c b/source/util/src/terror.c index f33fb71040..f9ccef8a6b 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -632,6 +632,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_SCALAR_CONVERT_ERROR, "Cannot convert to s //tmq TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_INVALID_MSG, "Invalid message") +TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_SNAPSHOT_ERROR, "Can not operate in snapshot mode") TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_MISMATCH, "Consumer mismatch") TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_CLOSED, "Consumer closed") TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_ERROR, "Consumer error, to see log") diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index 70588887a0..c07bafa1ea 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -486,24 +486,11 @@ static inline int32_t taosBuildLogHead(char *buffer, const char *flags) { static inline void taosPrintLogImp(ELogLevel level, int32_t dflag, const char *buffer, int32_t len) { if ((dflag & DEBUG_FILE) && tsLogObj.logHandle && tsLogObj.logHandle->pFile != NULL && osLogSpaceAvailable()) { taosUpdateLogNums(level); -#if 0 - // DEBUG_FATAL and DEBUG_ERROR are duplicated - // fsync will cause thread blocking and may also generate log misalignment in case of asyncLog - if (tsAsyncLog && level != DEBUG_FATAL) { - taosPushLogBuffer(tsLogObj.logHandle, buffer, len); - } else { - taosWriteFile(tsLogObj.logHandle->pFile, buffer, len); - if (level == DEBUG_FATAL) { - taosFsyncFile(tsLogObj.logHandle->pFile); - } - } -#else if (tsAsyncLog) { taosPushLogBuffer(tsLogObj.logHandle, buffer, len); } else { taosWriteFile(tsLogObj.logHandle->pFile, buffer, len); } -#endif if (tsLogObj.maxLines > 0) { atomic_add_fetch_32(&tsLogObj.lines, 1); diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 931364307f..620748bc26 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -33,8 +33,12 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/subscribeStb3.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/subscribeDb0.py -N 3 -n 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/ins_topics_test.py +,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqMaxTopic.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqParamsTest.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqClientConsLog.py +,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqMaxGroupIds.py +,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqConsumeDiscontinuousData.py +,,n,system-test,python3 ./test.py -f 7-tmq/tmqDropConsumer.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/delete_stable.py @@ -320,6 +324,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mode.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/Now.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/Now.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/orderBy.py -N 5 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/percentile.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/percentile.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/pow.py @@ -751,6 +756,8 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/odbc.py ,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TD-21561.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TD-20582.py +,,n,system-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/insertMix.py -N 3 +,,n,system-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/stt.py -N 3 #tsim test ,,y,script,./test.sh -f tsim/tmq/basic2Of2ConsOverlap.sim diff --git a/tests/script/api/passwdTest.c b/tests/script/api/passwdTest.c index 1bf4987689..d9cb2128ef 100644 --- a/tests/script/api/passwdTest.c +++ b/tests/script/api/passwdTest.c @@ -32,9 +32,21 @@ #define nRoot 10 #define nUser 10 #define USER_LEN 24 +#define BUF_LEN 1024 + +typedef uint16_t VarDataLenT; + +#define TSDB_NCHAR_SIZE sizeof(int32_t) +#define VARSTR_HEADER_SIZE sizeof(VarDataLenT) + +#define GET_FLOAT_VAL(x) (*(float *)(x)) +#define GET_DOUBLE_VAL(x) (*(double *)(x)) + +#define varDataLen(v) ((VarDataLenT *)(v))[0] void createUsers(TAOS *taos, const char *host, char *qstr); void passVerTestMulti(const char *host, char *qstr); +void sysInfoTest(TAOS *taos, const char *host, char *qstr); int nPassVerNotified = 0; TAOS *taosu[nRoot] = {0}; @@ -83,6 +95,95 @@ static void queryDB(TAOS *taos, char *command) { taos_free_result(pSql); } +int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) { + int len = 0; + char split = ' '; + + for (int i = 0; i < numFields; ++i) { + if (i > 0) { + str[len++] = split; + } + + if (row[i] == NULL) { + len += sprintf(str + len, "%s", "NULL"); + continue; + } + + switch (fields[i].type) { + case TSDB_DATA_TYPE_TINYINT: + len += sprintf(str + len, "%d", *((int8_t *)row[i])); + break; + case TSDB_DATA_TYPE_UTINYINT: + len += sprintf(str + len, "%u", *((uint8_t *)row[i])); + break; + case TSDB_DATA_TYPE_SMALLINT: + len += sprintf(str + len, "%d", *((int16_t *)row[i])); + break; + case TSDB_DATA_TYPE_USMALLINT: + len += sprintf(str + len, "%u", *((uint16_t *)row[i])); + break; + case TSDB_DATA_TYPE_INT: + len += sprintf(str + len, "%d", *((int32_t *)row[i])); + break; + case TSDB_DATA_TYPE_UINT: + len += sprintf(str + len, "%u", *((uint32_t *)row[i])); + break; + case TSDB_DATA_TYPE_BIGINT: + len += sprintf(str + len, "%" PRId64, *((int64_t *)row[i])); + break; + case TSDB_DATA_TYPE_UBIGINT: + len += sprintf(str + len, "%" PRIu64, *((uint64_t *)row[i])); + break; + case TSDB_DATA_TYPE_FLOAT: { + float fv = 0; + fv = GET_FLOAT_VAL(row[i]); + len += sprintf(str + len, "%f", fv); + } break; + case TSDB_DATA_TYPE_DOUBLE: { + double dv = 0; + dv = GET_DOUBLE_VAL(row[i]); + len += sprintf(str + len, "%lf", dv); + } break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + case TSDB_DATA_TYPE_GEOMETRY: { + int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE); + memcpy(str + len, row[i], charLen); + len += charLen; + } break; + case TSDB_DATA_TYPE_TIMESTAMP: + len += sprintf(str + len, "%" PRId64, *((int64_t *)row[i])); + break; + case TSDB_DATA_TYPE_BOOL: + len += sprintf(str + len, "%d", *((int8_t *)row[i])); + default: + break; + } + } + return len; +} + +static int printResult(TAOS_RES *res, char *output) { + int numFields = taos_num_fields(res); + TAOS_FIELD *fields = taos_fetch_fields(res); + char header[BUF_LEN] = {0}; + int len = 0; + for (int i = 0; i < numFields; ++i) { + len += sprintf(header + len, "%s ", fields[i].name); + } + puts(header); + if (output) { + strncpy(output, header, BUF_LEN); + } + + TAOS_ROW row = NULL; + while ((row = taos_fetch_row(res))) { + char temp[BUF_LEN] = {0}; + printRow(temp, row, fields, numFields); + puts(temp); + } +} + int main(int argc, char *argv[]) { char qstr[1024]; @@ -99,6 +200,7 @@ int main(int argc, char *argv[]) { } createUsers(taos, argv[1], qstr); passVerTestMulti(argv[1], qstr); + sysInfoTest(taos, argv[1], qstr); taos_close(taos); taos_cleanup(); @@ -167,6 +269,8 @@ void passVerTestMulti(const char *host, char *qstr) { int nConn = nRoot + nUser; for (int i = 0; i < 15; ++i) { + printf("%s:%d [%d] second(s) elasped, passVer notification received:%d, total:%d\n", __func__, __LINE__, i, + nPassVerNotified, nConn); if (nPassVerNotified >= nConn) break; sleep(1); } @@ -175,19 +279,100 @@ void passVerTestMulti(const char *host, char *qstr) { for (int i = 0; i < nRoot; ++i) { taos_close(taos[i]); printf("%s:%d close taos[%d]\n", __func__, __LINE__, i); - sleep(1); + // sleep(1); } for (int i = 0; i < nUser; ++i) { taos_close(taosu[i]); printf("%s:%d close taosu[%d]\n", __func__, __LINE__, i); + // sleep(1); + } + + fprintf(stderr, "######## %s #########\n", __func__); + if (nPassVerNotified >= nConn) { + fprintf(stderr, ">>> succeed to get passVer notification since nNotify %d >= nConn %d\n", nPassVerNotified, + nConn); + } else { + fprintf(stderr, ">>> failed to get passVer notification since nNotify %d < nConn %d\n", nPassVerNotified, nConn); + } + fprintf(stderr, "######## %s #########\n", __func__); + // sleep(300); +} + +void sysInfoTest(TAOS *taosRoot, const char *host, char *qstr) { + TAOS *taos[nRoot] = {0}; + char userName[USER_LEN] = "user0"; + + for (int i = 0; i < nRoot; ++i) { + taos[i] = taos_connect(host, "user0", "taos", NULL, 0); + if (taos[i] == NULL) { + fprintf(stderr, "failed to connect to server, reason:%s\n", "null taos" /*taos_errstr(taos)*/); + exit(1); + } + } + + queryDB(taosRoot, "create database if not exists demo11 vgroups 1 minrows 10"); + queryDB(taosRoot, "create database if not exists demo12 vgroups 1 minrows 10"); + queryDB(taosRoot, "create database if not exists demo13 vgroups 1 minrows 10"); + + queryDB(taosRoot, "create table demo11.stb (ts timestamp, c1 int) tags(t1 int)"); + queryDB(taosRoot, "create table demo12.stb (ts timestamp, c1 int) tags(t1 int)"); + queryDB(taosRoot, "create table demo13.stb (ts timestamp, c1 int) tags(t1 int)"); + + sprintf(qstr, "show grants"); + char output[BUF_LEN]; + TAOS_RES *res = NULL; + int32_t nRep = 0; + +_REP: + fprintf(stderr, "######## %s loop:%d #########\n", __func__, nRep); + res = taos_query(taos[0], qstr); + if (taos_errno(res) != 0) { + fprintf(stderr, "%s:%d failed to execute: %s since %s\n", __func__, __LINE__, qstr, taos_errstr(res)); + taos_free_result(res); + exit(EXIT_FAILURE); + } + printResult(res, output); + taos_free_result(res); + if (!strstr(output, "timeseries")) { + fprintf(stderr, "%s:%d expected output: 'timeseries' not occur\n", __func__, __LINE__); + exit(EXIT_FAILURE); + } + + queryDB(taosRoot, "alter user user0 sysinfo 0"); + + fprintf(stderr, "%s:%d sleep 2 seconds to wait HB take effect\n", __func__, __LINE__); + for (int i = 1; i <= 2; ++i) { sleep(1); } - if (nPassVerNotified >= nConn) { - fprintf(stderr, "succeed to get passVer notification since nNotify %d >= nConn %d\n", nPassVerNotified, nConn); - } else { - fprintf(stderr, "failed to get passVer notification since nNotify %d < nConn %d\n", nPassVerNotified, nConn); + res = taos_query(taos[0], qstr); + if (taos_errno(res) != 0) { + if (!strstr(taos_errstr(res), "Permission denied")) { + fprintf(stderr, "%s:%d expected error: 'Permission denied' not occur\n", __func__, __LINE__); + taos_free_result(res); + exit(EXIT_FAILURE); + } } - // sleep(300); + taos_free_result(res); + + queryDB(taosRoot, "alter user user0 sysinfo 1"); + fprintf(stderr, "%s:%d sleep 2 seconds to wait HB take effect\n", __func__, __LINE__); + for (int i = 1; i <= 2; ++i) { + sleep(1); + } + + if(++nRep < 5) { + goto _REP; + } + + // close the taos_conn + for (int i = 0; i < nRoot; ++i) { + taos_close(taos[i]); + fprintf(stderr, "%s:%d close taos[%d]\n", __func__, __LINE__, i); + } + + fprintf(stderr, "######## %s #########\n", __func__); + fprintf(stderr, ">>> succeed to run sysInfoTest\n"); + fprintf(stderr, "######## %s #########\n", __func__); } \ No newline at end of file diff --git a/tests/system-test/0-others/compatibility.py b/tests/system-test/0-others/compatibility.py index cd71de0c06..9dbfd7f0ea 100644 --- a/tests/system-test/0-others/compatibility.py +++ b/tests/system-test/0-others/compatibility.py @@ -152,6 +152,7 @@ class TDTestCase: os.system("LD_LIBRARY_PATH=/usr/lib taos -f 0-others/TS-3131.tsql") cmd = f" LD_LIBRARY_PATH={bPath}/build/lib {bPath}/build/bin/taos -h localhost ;" + tdLog.info(f"new client version connect to old version taosd, commad return value:{cmd}") if os.system(cmd) == 0: raise Exception("failed to execute system command. cmd: %s" % cmd) diff --git a/tests/system-test/0-others/user_privilege_all.py b/tests/system-test/0-others/user_privilege_all.py new file mode 100644 index 0000000000..2e796882c8 --- /dev/null +++ b/tests/system-test/0-others/user_privilege_all.py @@ -0,0 +1,409 @@ +from itertools import product +import taos +import time +from taos.tmq import * +from util.cases import * +from util.common import * +from util.log import * +from util.sql import * +from util.sqlset import * + + +class TDTestCase: + """This test case is used to veirfy the user privilege for insert and select operation on + stable、child table and table + """ + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + # init the tdsql + tdSql.init(conn.cursor()) + self.setsql = TDSetSql() + # user info + self.username = 'test' + self.password = 'test' + # db info + self.dbname = "user_privilege_all_db" + self.stbname = 'stb' + self.common_tbname = "tb" + self.ctbname_list = ["ct1", "ct2"] + self.common_table_dict = { + 'ts':'timestamp', + 'col1':'float', + 'col2':'int' + } + self.stable_column_dict = { + 'ts': 'timestamp', + 'col1': 'float', + 'col2': 'int', + } + self.tag_dict = { + 'ctbname': 'binary(10)' + } + + # case list + self.cases = { + "test_db_table_both_no_permission": { + "db_privilege": "none", + "stable_priviege": "none", + "child_table_ct1_privilege": "none", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "none", + "sql": ["insert into ct1 using stb tags('ct1') values(now, 1.1, 1)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 3.3, 3);", + "select * from tb;"], + "res": [False, False, False, False, False, False] + }, + "test_db_no_permission_table_read": { + "db_privilege": "none", + "stable_priviege": "none", + "child_table_ct1_privilege": "none", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "read", + "sql": ["insert into ct1 using stb tags('ct1') values(now, 1.1, 1)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 3.3, 3);", + "select * from tb;"], + "res": [False, False, False, False, False, True] + }, + "test_db_no_permission_childtable_read": { + "db_privilege": "none", + "stable_priviege": "none", + "child_table_ct1_privilege": "read", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "none", + "sql": ["insert into ct1 using stb tags('ct1') values(now, 1.1, 1)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 3.3, 3);", + "select * from tb;"], + "res": [False, True, True, False, False, False] + }, + "test_db_no_permission_table_write": { + "db_privilege": "none", + "stable_priviege": "none", + "child_table_ct1_privilege": "none", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "write", + "sql": ["insert into ct1 using stb tags('ct1') values(now, 1.1, 1)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 3.3, 3);", + "select * from tb;"], + "res": [False, False, False, False, True, False] + }, + "test_db_no_permission_childtable_write": { + "db_privilege": "none", + "stable_priviege": "none", + "child_table_ct1_privilege": "none", + "child_table_ct2_privilege": "write", + "table_tb_privilege": "none", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 1.1, 1)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 3.3, 3);", + "select * from tb;"], + "res": [True, False, False, False, False, False] + }, + "test_db_read_table_no_permission": { + "db_privilege": "read", + "stable_priviege": "none", + "child_table_ct1_privilege": "none", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "none", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 1.1, 1)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 3.3, 3);", + "select * from tb;"], + "res": [False, True, True, True, False, True] + }, + "test_db_read_table_read": { + "db_privilege": "read", + "stable_priviege": "none", + "child_table_ct1_privilege": "none", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "read", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 1.1, 1)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 3.3, 3);", + "select * from tb;"], + "res": [False, True, True, True, False, True] + }, + "test_db_read_childtable_read": { + "db_privilege": "read", + "stable_priviege": "none", + "child_table_ct1_privilege": "read", + "child_table_ct2_privilege": "read", + "table_tb_privilege": "none", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 1.1, 1)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 3.3, 3);", + "select * from tb;"], + "res": [False, True, True, True, False, True] + }, + "test_db_read_table_write": { + "db_privilege": "read", + "stable_priviege": "none", + "child_table_ct1_privilege": "none", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "write", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 1.1, 1)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 4.4, 4);", + "select * from tb;"], + "res": [False, True, True, True, True, True] + }, + "test_db_read_childtable_write": { + "db_privilege": "read", + "stable_priviege": "none", + "child_table_ct1_privilege": "write", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "none", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 1.1, 1)", + "insert into ct1 using stb tags('ct1') values(now, 5.5, 5)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 4.4, 4);", + "select * from tb;"], + "res": [False, True, True, True, True, False, True] + }, + "test_db_write_table_no_permission": { + "db_privilege": "write", + "stable_priviege": "none", + "child_table_ct1_privilege": "none", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "none", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 6.6, 6)", + "insert into ct1 using stb tags('ct1') values(now, 7.7, 7)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 8.8, 8);", + "select * from tb;"], + "res": [True, True, False, False, False, True, False] + }, + "test_db_write_table_write": { + "db_privilege": "write", + "stable_priviege": "none", + "child_table_ct1_privilege": "none", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "none", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 9.9, 9)", + "insert into ct1 using stb tags('ct1') values(now, 10.0, 10)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 11.1, 11);", + "select * from tb;"], + "res": [True, True, False, False, False, True, False] + }, + "test_db_write_childtable_write": { + "db_privilege": "write", + "stable_priviege": "none", + "child_table_ct1_privilege": "none", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "none", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 12.2, 12)", + "insert into ct1 using stb tags('ct1') values(now, 13.3, 13)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 14.4, 14);", + "select * from tb;"], + "res": [True, True, False, False, False, True, False] + }, + "test_db_write_table_read": { + "db_privilege": "write", + "stable_priviege": "none", + "child_table_ct1_privilege": "none", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "read", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 15.5, 15)", + "insert into ct1 using stb tags('ct1') values(now, 16.6, 16)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 17.7, 17);", + "select * from tb;"], + "res": [True, True, False, False, False, True, True] + }, + "test_db_write_childtable_read": { + "db_privilege": "write", + "stable_priviege": "none", + "child_table_ct1_privilege": "read", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "none", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 18.8, 18)", + "insert into ct1 using stb tags('ct1') values(now, 19.9, 19)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 20.0, 20);", + "select * from tb;"], + "res": [True, True, True, True, False, True, False] + } + } + + def prepare_data(self): + """Create the db and data for test + """ + tdLog.debug("Start to prepare the data for test") + # create datebase + tdSql.execute(f"create database {self.dbname}") + tdSql.execute(f"use {self.dbname}") + + # create stable + tdSql.execute(self.setsql.set_create_stable_sql(self.stbname, self.stable_column_dict, self.tag_dict)) + tdLog.debug("Create stable {} successfully".format(self.stbname)) + + # insert data into child table + for ctname in self.ctbname_list: + tdSql.execute(f"insert into {ctname} using {self.stbname} tags('{ctname}') values(now, 1.1, 1)") + tdSql.execute(f"insert into {ctname} using {self.stbname} tags('{ctname}') values(now, 2.1, 2)") + + # create common table + tdSql.execute(self.setsql.set_create_normaltable_sql(self.common_tbname, self.common_table_dict)) + tdLog.debug("Create common table {} successfully".format(self.common_tbname)) + + # insert data into common table + tdSql.execute(f"insert into {self.common_tbname} values(now, 1.1, 1)") + tdSql.execute(f"insert into {self.common_tbname} values(now, 2.2, 2)") + tdLog.debug("Finish to prepare the data") + + def create_user(self): + """Create the user for test + """ + tdSql.execute(f'create user {self.username} pass "{self.password}"') + tdLog.debug("sql:" + f'create user {self.username} pass "{self.password}" successfully') + + def grant_privilege(self, username, privilege, table, tag_condition=None): + """Add the privilege for the user + """ + try: + if tag_condition: + tdSql.execute(f'grant {privilege} on {self.dbname}.{table} with {tag_condition} to {username}') + else: + tdSql.execute(f'grant {privilege} on {self.dbname}.{table} to {username}') + time.sleep(2) + tdLog.debug("Grant {} privilege on {}.{} with condition {} to {} successfully".format(privilege, self.dbname, table, tag_condition, username)) + except Exception as ex: + tdLog.exit(ex) + + def remove_privilege(self, username, privilege, table, tag_condition=None): + """Remove the privilege for the user + """ + try: + if tag_condition: + tdSql.execute(f'revoke {privilege} on {self.dbname}.{table} with {tag_condition} from {username}') + else: + tdSql.execute(f'revoke {privilege} on {self.dbname}.{table} from {username}') + tdLog.debug("Revoke {} privilege on {}.{} with condition {} from {} successfully".format(privilege, self.dbname, table, tag_condition, username)) + except Exception as ex: + tdLog.exit(ex) + + def run(self): + self.create_user() + # prepare the test data + self.prepare_data() + + for case_name in self.cases.keys(): + tdLog.debug("Execute the case {} with params {}".format(case_name, str(self.cases[case_name]))) + # grant privilege for user test if case need + if self.cases[case_name]["db_privilege"] != "none": + self.grant_privilege(self.username, self.cases[case_name]["db_privilege"], "*") + if self.cases[case_name]["stable_priviege"] != "none": + self.grant_privilege(self.username, self.cases[case_name]["stable_priviege"], self.stbname) + if self.cases[case_name]["child_table_ct1_privilege"] != "none" and self.cases[case_name]["child_table_ct2_privilege"] != "none": + self.grant_privilege(self.username, self.cases[case_name]["child_table_ct1_privilege"], self.stbname, "ctbname='ct1' or ctbname='ct2'") + elif self.cases[case_name]["child_table_ct1_privilege"] != "none": + self.grant_privilege(self.username, self.cases[case_name]["child_table_ct1_privilege"], self.stbname, "ctbname='ct1'") + elif self.cases[case_name]["child_table_ct2_privilege"] != "none": + self.grant_privilege(self.username, self.cases[case_name]["child_table_ct2_privilege"], self.stbname, "ctbname='ct2'") + if self.cases[case_name]["table_tb_privilege"] != "none": + self.grant_privilege(self.username, self.cases[case_name]["table_tb_privilege"], self.common_tbname) + # connect db with user test + testconn = taos.connect(user=self.username, password=self.password) + if case_name != "test_db_table_both_no_permission": + testconn.execute("use %s;" % self.dbname) + # check privilege of user test from ins_user_privileges table + res = testconn.query("select * from information_schema.ins_user_privileges;") + tdLog.debug("Current information_schema.ins_user_privileges values: {}".format(res.fetch_all())) + # check privilege of user test by executing sql query + for index in range(len(self.cases[case_name]["sql"])): + tdLog.debug("Execute sql: {}".format(self.cases[case_name]["sql"][index])) + try: + # for write privilege + if "insert " in self.cases[case_name]["sql"][index]: + testconn.execute(self.cases[case_name]["sql"][index]) + # check the expected result + if self.cases[case_name]["res"][index]: + tdLog.debug("Write data with sql {} successfully".format(self.cases[case_name]["sql"][index])) + # for read privilege + elif "select " in self.cases[case_name]["sql"][index]: + res = testconn.query(self.cases[case_name]["sql"][index]) + data = res.fetch_all() + tdLog.debug("query result: {}".format(data)) + # check query results by cases + if case_name in ["test_db_no_permission_childtable_read", "test_db_write_childtable_read"] and self.cases[case_name]["sql"][index] == "select * from ct2;": + if not self.cases[case_name]["res"][index]: + if 0 == len(data): + tdLog.debug("Query with sql {} successfully as expected with empty result".format(self.cases[case_name]["sql"][index])) + continue + else: + tdLog.exit("Query with sql {} failed with result {}".format(self.cases[case_name]["sql"][index], data)) + # check the expected result + if self.cases[case_name]["res"][index]: + if len(data) > 0: + tdLog.debug("Query with sql {} successfully".format(self.cases[case_name]["sql"][index])) + else: + tdLog.exit("Query with sql {} failed with result {}".format(self.cases[case_name]["sql"][index], data)) + else: + tdLog.exit("Execute query sql {} successfully, but expected failed".format(self.cases[case_name]["sql"][index])) + except BaseException as ex: + # check the expect false result + if not self.cases[case_name]["res"][index]: + tdLog.debug("Execute sql {} failed with {} as expected".format(self.cases[case_name]["sql"][index], str(ex))) + continue + # unexpected exception + else: + tdLog.exit(ex) + # remove the privilege + if self.cases[case_name]["db_privilege"] != "none": + self.remove_privilege(self.username, self.cases[case_name]["db_privilege"], "*") + if self.cases[case_name]["stable_priviege"] != "none": + self.remove_privilege(self.username, self.cases[case_name]["stable_priviege"], self.stbname) + if self.cases[case_name]["child_table_ct1_privilege"] != "none": + self.remove_privilege(self.username, self.cases[case_name]["child_table_ct1_privilege"], self.stbname, "ctbname='ct1'") + if self.cases[case_name]["child_table_ct2_privilege"] != "none": + self.remove_privilege(self.username, self.cases[case_name]["child_table_ct2_privilege"], self.stbname, "ctbname='ct2'") + if self.cases[case_name]["table_tb_privilege"] != "none": + self.remove_privilege(self.username, self.cases[case_name]["table_tb_privilege"], self.common_tbname) + # close the connection of user test + testconn.close() + + def stop(self): + # remove the user + tdSql.execute(f'drop user {self.username}') + # close the connection + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/diff.py b/tests/system-test/2-query/diff.py index cdea8964b4..c6f233eefa 100644 --- a/tests/system-test/2-query/diff.py +++ b/tests/system-test/2-query/diff.py @@ -23,7 +23,7 @@ class TDTestCase: tdSql.execute( f"create table {dbname}.ntb(ts timestamp,c1 int,c2 double,c3 float)") tdSql.execute( - f"insert into {dbname}.ntb values(now,1,1.0,10.5)(now+1s,10,-100.0,5.1)(now+10s,-1,15.1,5.0)") + f"insert into {dbname}.ntb values('2023-01-01 00:00:01',1,1.0,10.5)('2023-01-01 00:00:02',10,-100.0,5.1)('2023-01-01 00:00:03',-1,15.1,5.0)") tdSql.query(f"select diff(c1,0) from {dbname}.ntb") tdSql.checkRows(2) @@ -233,6 +233,40 @@ class TDTestCase: tdSql.checkRows(19) tdSql.checkData(0,0,None) + # TD-25098 + + tdSql.query(f"select ts, diff(c1) from {dbname}.ntb order by ts") + tdSql.checkRows(2) + tdSql.checkData(0, 0, '2023-01-01 00:00:02.000') + tdSql.checkData(1, 0, '2023-01-01 00:00:03.000') + + tdSql.checkData(0, 1, 9) + tdSql.checkData(1, 1, -11) + + tdSql.query(f"select ts, diff(c1) from {dbname}.ntb order by ts desc") + tdSql.checkRows(2) + tdSql.checkData(0, 0, '2023-01-01 00:00:03.000') + tdSql.checkData(1, 0, '2023-01-01 00:00:02.000') + + tdSql.checkData(0, 1, -11) + tdSql.checkData(1, 1, 9) + + tdSql.query(f"select ts, diff(c1) from (select * from {dbname}.ntb order by ts)") + tdSql.checkRows(2) + tdSql.checkData(0, 0, '2023-01-01 00:00:02.000') + tdSql.checkData(1, 0, '2023-01-01 00:00:03.000') + + tdSql.checkData(0, 1, 9) + tdSql.checkData(1, 1, -11) + + tdSql.query(f"select ts, diff(c1) from (select * from {dbname}.ntb order by ts desc)") + tdSql.checkRows(2) + tdSql.checkData(0, 0, '2023-01-01 00:00:02.000') + tdSql.checkData(1, 0, '2023-01-01 00:00:01.000') + + tdSql.checkData(0, 1, 11) + tdSql.checkData(1, 1, -9) + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index 47a4bc4dcf..b6cefbe36f 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -44,7 +44,7 @@ class TDTestCase: tdSql.execute( f'''create table if not exists {dbname}.{tbname} - (ts timestamp, c0 tinyint, c1 smallint, c2 int, c3 bigint, c4 double, c5 float, c6 bool, c7 varchar(10), c8 nchar(10)) + (ts timestamp, c0 tinyint, c1 smallint, c2 int, c3 bigint, c4 double, c5 float, c6 bool, c7 varchar(10), c8 nchar(10), c9 tinyint unsigned, c10 smallint unsigned, c11 int unsigned, c12 bigint unsigned) ''' ) @@ -52,9 +52,9 @@ class TDTestCase: tdSql.execute(f"use db") - tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:05', 5, 5, 5, 5, 5.0, 5.0, true, 'varchar', 'nchar')") - tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:10', 10, 10, 10, 10, 10.0, 10.0, true, 'varchar', 'nchar')") - tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:15', 15, 15, 15, 15, 15.0, 15.0, true, 'varchar', 'nchar')") + tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:05', 5, 5, 5, 5, 5.0, 5.0, true, 'varchar', 'nchar', 5, 5, 5, 5)") + tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:10', 10, 10, 10, 10, 10.0, 10.0, true, 'varchar', 'nchar', 10, 10, 10, 10)") + tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:15', 15, 15, 15, 15, 15.0, 15.0, true, 'varchar', 'nchar', 15, 15, 15, 15)") tdLog.printNoPrefix("==========step3:fill null") @@ -129,21 +129,71 @@ class TDTestCase: tdLog.printNoPrefix("==========step4:fill value") ## {. . .} - tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(value, 1)") + col_list = {'c0', 'c1', 'c2', 'c3', 'c9', 'c10', 'c11', 'c12'} + for col in col_list: + tdSql.query(f"select interp({col}) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(value, 1)") + tdSql.checkRows(13) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, 5) + tdSql.checkData(2, 0, 1) + tdSql.checkData(3, 0, 1) + tdSql.checkData(4, 0, 1) + tdSql.checkData(5, 0, 1) + tdSql.checkData(6, 0, 10) + tdSql.checkData(7, 0, 1) + tdSql.checkData(8, 0, 1) + tdSql.checkData(9, 0, 1) + tdSql.checkData(10, 0, 1) + tdSql.checkData(11, 0, 15) + tdSql.checkData(12, 0, 1) + + tdSql.query(f"select interp(c4) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(value, 1)") tdSql.checkRows(13) - tdSql.checkData(0, 0, 1) - tdSql.checkData(1, 0, 5) - tdSql.checkData(2, 0, 1) - tdSql.checkData(3, 0, 1) - tdSql.checkData(4, 0, 1) - tdSql.checkData(5, 0, 1) - tdSql.checkData(6, 0, 10) - tdSql.checkData(7, 0, 1) - tdSql.checkData(8, 0, 1) - tdSql.checkData(9, 0, 1) - tdSql.checkData(10, 0, 1) - tdSql.checkData(11, 0, 15) - tdSql.checkData(12, 0, 1) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, 5.0) + tdSql.checkData(2, 0, 1.0) + tdSql.checkData(3, 0, 1.0) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 1.0) + tdSql.checkData(6, 0, 10.0) + tdSql.checkData(7, 0, 1.0) + tdSql.checkData(8, 0, 1.0) + tdSql.checkData(9, 0, 1.0) + tdSql.checkData(10, 0, 1.0) + tdSql.checkData(11, 0, 15.0) + tdSql.checkData(12, 0, 1.0) + + tdSql.query(f"select interp(c5) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(value, 1)") + tdSql.checkRows(13) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, 5.0) + tdSql.checkData(2, 0, 1.0) + tdSql.checkData(3, 0, 1.0) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 1.0) + tdSql.checkData(6, 0, 10.0) + tdSql.checkData(7, 0, 1.0) + tdSql.checkData(8, 0, 1.0) + tdSql.checkData(9, 0, 1.0) + tdSql.checkData(10, 0, 1.0) + tdSql.checkData(11, 0, 15.0) + tdSql.checkData(12, 0, 1.0) + + tdSql.query(f"select interp(c6) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(value, 1)") + tdSql.checkRows(13) + tdSql.checkData(0, 0, True) + tdSql.checkData(1, 0, True) + tdSql.checkData(2, 0, True) + tdSql.checkData(3, 0, True) + tdSql.checkData(4, 0, True) + tdSql.checkData(5, 0, True) + tdSql.checkData(6, 0, True) + tdSql.checkData(7, 0, True) + tdSql.checkData(8, 0, True) + tdSql.checkData(9, 0, True) + tdSql.checkData(10, 0, True) + tdSql.checkData(11, 0, True) + tdSql.checkData(12, 0, True) ## {} ... tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:01', '2020-02-01 00:00:04') every(1s) fill(value, 1)") diff --git a/tests/system-test/2-query/orderBy.py b/tests/system-test/2-query/orderBy.py new file mode 100644 index 0000000000..fed1651b3a --- /dev/null +++ b/tests/system-test/2-query/orderBy.py @@ -0,0 +1,298 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import random +import time +import copy + +import taos +from util.log import * +from util.cases import * +from util.sql import * + +class TDTestCase: + + # get col value and total max min ... + def getColsValue(self, i, j): + # c1 value + if random.randint(1, 10) == 5: + c1 = None + else: + c1 = 1 + + # c2 value + if j % 3200 == 0: + c2 = 8764231 + elif random.randint(1, 10) == 5: + c2 = None + else: + c2 = random.randint(-87654297, 98765321) + + # c3 is order + c3 = i * self.childRow + j + + value = f"({self.ts}, " + + # c1 + if c1 is None: + value += "null," + else: + self.c1Cnt += 1 + value += f"{c1}," + # c2 + if c2 is None: + value += "null," + else: + value += f"{c2}," + # total count + self.c2Cnt += 1 + # max + if self.c2Max is None: + self.c2Max = c2 + else: + if c2 > self.c2Max: + self.c2Max = c2 + # min + if self.c2Min is None: + self.c2Min = c2 + else: + if c2 < self.c2Min: + self.c2Min = c2 + # sum + if self.c2Sum is None: + self.c2Sum = c2 + else: + self.c2Sum += c2 + + # c3 + value += f"{c3}," + # ts1 same with ts + value += f"{self.ts})" + + # move next + self.ts += 1 + + return value + + # insert data + def insertData(self): + tdLog.info("insert data ....") + sqls = "" + for i in range(self.childCnt): + # insert child table + values = "" + pre_insert = f"insert into t{i} values " + for j in range(self.childRow): + if values == "": + values = self.getColsValue(i, j) + else: + values += "," + self.getColsValue(i, j) + + # batch insert + if j % self.batchSize == 0 and values != "": + sql = pre_insert + values + tdSql.execute(sql) + values = "" + # append last + if values != "": + sql = pre_insert + values + tdSql.execute(sql) + values = "" + + sql = "flush database db;" + tdLog.info(sql) + tdSql.execute(sql) + # insert finished + tdLog.info(f"insert data successfully.\n" + f" inserted child table = {self.childCnt}\n" + f" inserted child rows = {self.childRow}\n" + f" total inserted rows = {self.childCnt*self.childRow}\n") + return + + + # prepareEnv + def prepareEnv(self): + # init + self.ts = 1680000000000*1000 + self.childCnt = 10 + self.childRow = 100000 + self.batchSize = 5000 + + # total + self.c1Cnt = 0 + self.c2Cnt = 0 + self.c2Max = None + self.c2Min = None + self.c2Sum = None + + # create database db + sql = f"create database db vgroups 2 precision 'us' " + tdLog.info(sql) + tdSql.execute(sql) + sql = f"use db" + tdSql.execute(sql) + + # alter config + sql = "alter local 'querySmaOptimize 1';" + tdLog.info(sql) + tdSql.execute(sql) + + # create super talbe st + sql = f"create table st(ts timestamp, c1 int, c2 bigint, c3 bigint, ts1 timestamp) tags(area int)" + tdLog.info(sql) + tdSql.execute(sql) + + # create child table + for i in range(self.childCnt): + sql = f"create table t{i} using st tags({i}) " + tdSql.execute(sql) + + # insert data + self.insertData() + + # check data correct + def checkExpect(self, sql, expectVal): + tdSql.query(sql) + rowCnt = tdSql.getRows() + for i in range(rowCnt): + val = tdSql.getData(i,0) + if val != expectVal: + tdLog.exit(f"Not expect . query={val} expect={expectVal} i={i} sql={sql}") + return False + + tdLog.info(f"check expect ok. sql={sql} expect ={expectVal} rowCnt={rowCnt}") + return True + + # check query + def queryResultSame(self, sql1, sql2): + # sql + tdLog.info(sql1) + start1 = time.time() + rows1 = tdSql.query(sql1) + spend1 = time.time() - start1 + res1 = copy.copy(tdSql.queryResult) + + tdLog.info(sql2) + start2 = time.time() + tdSql.query(sql2) + spend2 = time.time() - start2 + res2 = tdSql.queryResult + + rowlen1 = len(res1) + rowlen2 = len(res2) + + if rowlen1 != rowlen2: + tdLog.exit(f"rowlen1={rowlen1} rowlen2={rowlen2} both not equal.") + return False + + for i in range(rowlen1): + row1 = res1[i] + row2 = res2[i] + collen1 = len(row1) + collen2 = len(row2) + if collen1 != collen2: + tdLog.exit(f"collen1={collen1} collen2={collen2} both not equal.") + return False + for j in range(collen1): + if row1[j] != row2[j]: + tdLog.exit(f"col={j} col1={row1[j]} col2={row2[j]} both col not equal.") + return False + + # warning performance + diff = (spend2 - spend1)*100/spend1 + tdLog.info("spend1=%.6fs spend2=%.6fs diff=%.1f%%"%(spend1, spend2, diff)) + if spend2 > spend1 and diff > 50: + tdLog.info("warning: the diff for performance after spliting is over 20%") + + return True + + + # init + def init(self, conn, logSql, replicaVar=1): + seed = time.clock_gettime(time.CLOCK_REALTIME) + random.seed(seed) + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), True) + + # check time macro + def queryBasic(self): + # check count + expectVal = self.childCnt * self.childRow + sql = f"select count(ts) from st " + self.checkExpect(sql, expectVal) + + # check diff + sql = f"select count(*) from (select diff(ts) as dif from st order by ts)" + self.checkExpect(sql, expectVal - 1) + + # check ts order count + sql = f"select count(*) from (select diff(ts) as dif from st order by ts) where dif!=1" + self.checkExpect(sql, 0) + + # check ts1 order count + sql = f"select count(*) from (select diff(ts1) as dif from st order by ts1) where dif!=1" + self.checkExpect(sql, 0) + + # check c3 order asc + sql = f"select count(*) from (select diff(c3) as dif from st order by c3) where dif!=1" + self.checkExpect(sql, 0) + + # check c3 order desc todo FIX + #sql = f"select count(*) from (select diff(c3) as dif from st order by c3 desc) where dif!=-1" + #self.checkExpect(sql, 0) + + + # advance + def queryAdvance(self): + # interval order todo FIX + #sql = f"select _wstart,count(ts),max(c2),min(c2) from st interval(100u) sliding(50u) order by _wstart limit 10" + #tdSql.query(sql) + #tdSql.checkRows(10) + + # simulate crash sql + sql = f"select _wstart,count(ts),max(c2),min(c2) from st interval(100a) sliding(10a) order by _wstart limit 10" + tdSql.query(sql) + tdSql.checkRows(10) + + # extent + sql = f"select _wstart,count(ts),max(c2),min(c2) from st interval(100a) sliding(10a) order by _wstart desc limit 5" + tdSql.query(sql) + tdSql.checkRows(5) + + # data correct checked + sql1 = "select sum(a),sum(b), max(c), min(d),sum(e) from (select _wstart,count(ts) as a,count(c2) as b ,max(c2) as c, min(c2) as d, sum(c2) as e from st interval(100a) sliding(100a) order by _wstart desc);" + sql2 = "select count(*) as a, count(c2) as b, max(c2) as c, min(c2) as d, sum(c2) as e from st;" + self.queryResultSame(sql1, sql2) + + # run + def run(self): + # prepare env + self.prepareEnv() + + # basic + self.queryBasic() + + # advance + self.queryAdvance() + + + # stop + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insertMix.py b/tests/system-test/5-taos-tools/taosbenchmark/insertMix.py new file mode 100644 index 0000000000..60daa8cdc2 --- /dev/null +++ b/tests/system-test/5-taos-tools/taosbenchmark/insertMix.py @@ -0,0 +1,102 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +import subprocess +import time + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + """ + [TD-13823] taosBenchmark test cases + """ + return + + def init(self, conn, logSql, replicaVar=1): + # comment off by Shuduo for CI self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getPath(self, tool="taosBenchmark"): + if (platform.system().lower() == 'windows'): + tool = tool + ".exe" + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if "community" in selfPath: + projPath = selfPath[: selfPath.find("community")] + else: + projPath = selfPath[: selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if (tool) in files: + rootRealPath = os.path.dirname(os.path.realpath(root)) + if "packaging" not in rootRealPath: + paths.append(os.path.join(root, tool)) + break + if len(paths) == 0: + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + + def checkDataCorrect(self): + sql = "select count(*) from meters" + tdSql.query(sql) + allCnt = tdSql.getData(0, 0) + if allCnt < 2000000: + tdLog.exit(f"taosbenchmark insert row small. row count={allCnt} sql={sql}") + return + + # group by 10 child table + rowCnt = tdSql.query("select count(*),tbname from meters group by tbname") + tdSql.checkRows(10) + + # interval + sql = "select count(*),max(ic),min(dc),last(*) from meters interval(1s)" + rowCnt = tdSql.query(sql) + if rowCnt < 10: + tdLog.exit(f"taosbenchmark interval(1s) count small. row cout={rowCnt} sql={sql}") + return + + # nest query + tdSql.query("select count(*) from (select * from meters order by ts desc)") + tdSql.checkData(0, 0, allCnt) + + + def run(self): + binPath = self.getPath() + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/insertMix.json" % binPath + tdLog.info("%s" % cmd) + errcode = os.system("%s" % cmd) + if errcode != 0: + tdLog.exit(f"execute taosBenchmark ret error code={errcode}") + return + + tdSql.execute("use mixdb") + self.checkDataCorrect() + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/5-taos-tools/taosbenchmark/json/insertMix.json b/tests/system-test/5-taos-tools/taosbenchmark/json/insertMix.json new file mode 100644 index 0000000000..7f3b2103cc --- /dev/null +++ b/tests/system-test/5-taos-tools/taosbenchmark/json/insertMix.json @@ -0,0 +1,81 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "connection_pool_size": 8, + "num_of_records_per_req": 3000, + "thread_count": 10, + "create_table_thread_count": 2, + "result_file": "./insert_res_mix.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "check_sql": "yes", + "continue_if_fail": "no", + "databases": [ + { + "dbinfo": { + "name": "mixdb", + "drop": "yes", + "vgroups": 6, + "replica": 3, + "precision": "ms", + "keep": 3650, + "minRows": 100, + "maxRows": 4096 + }, + "super_tables": [ + { + "name": "meters", + "child_table_exists": "no", + "childtable_count": 10, + "insert_rows": 300000, + "childtable_prefix": "d", + "insert_mode": "taosc", + "insert_interval": 0, + "timestamp_step": 100, + "start_timestamp":1500000000000, + "disorder_ratio": 10, + "update_ratio": 5, + "delete_ratio": 1, + "disorder_fill_interval": 300, + "update_fill_interval": 25, + "generate_row_rule": 2, + "columns": [ + { "type": "bool", "name": "bc"}, + { "type": "float", "name": "fc", "max": 1, "min": 0 }, + { "type": "double", "name": "dc", "max": 1, "min": 0 }, + { "type": "tinyint", "name": "ti", "max": 100, "min": 0 }, + { "type": "smallint", "name": "si", "max": 100, "min": 0 }, + { "type": "int", "name": "ic", "max": 100, "min": 0 }, + { "type": "bigint", "name": "bi", "max": 100, "min": 0 }, + { "type": "utinyint", "name": "uti", "max": 100, "min": 0 }, + { "type": "usmallint", "name": "usi", "max": 100, "min": 0 }, + { "type": "uint", "name": "ui", "max": 100, "min": 0 }, + { "type": "ubigint", "name": "ubi", "max": 100, "min": 0 }, + { "type": "binary", "name": "bin", "len": 32}, + { "type": "nchar", "name": "nch", "len": 64} + ], + "tags": [ + { + "type": "tinyint", + "name": "groupid", + "max": 10, + "min": 1 + }, + { + "name": "location", + "type": "binary", + "len": 16, + "values": ["San Francisco", "Los Angles", "San Diego", + "San Jose", "Palo Alto", "Campbell", "Mountain View", + "Sunnyvale", "Santa Clara", "Cupertino"] + } + ] + } + ] + } + ] +} diff --git a/tests/system-test/5-taos-tools/taosbenchmark/json/stt.json b/tests/system-test/5-taos-tools/taosbenchmark/json/stt.json new file mode 100644 index 0000000000..27f32010ed --- /dev/null +++ b/tests/system-test/5-taos-tools/taosbenchmark/json/stt.json @@ -0,0 +1,81 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "connection_pool_size": 8, + "num_of_records_per_req": 3000, + "thread_count": 20, + "create_table_thread_count": 5, + "result_file": "./insert_res_wal.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "databases": [ + { + "dbinfo": { + "name": "db", + "drop": "yes", + "flush_each_batch": "yes", + "vgroups": 2, + "replica": 1, + "precision": "ms", + "keep": 3650, + "minRows": 100, + "maxRows": 4096 + }, + "super_tables": [ + { + "name": "meters", + "child_table_exists": "no", + "childtable_count": 1000, + "insert_rows": 2850, + "childtable_prefix": "d", + "insert_mode": "taosc", + "insert_interval": 0, + "timestamp_step": 10, + "disorder_ratio": 10, + "update_ratio": 5, + "delete_ratio": 1, + "disorder_fill_interval": 30, + "update_fill_interval": 25, + "generate_row_rule": 2, + "start_timestamp":"2022-01-01 10:00:00", + "columns": [ + { "type": "bool", "name": "bc"}, + { "type": "float", "name": "fc", "max": 1, "min": 0 }, + { "type": "double", "name": "dc", "max": 1, "min": 0 }, + { "type": "tinyint", "name": "ti", "max": 100, "min": 0 }, + { "type": "smallint", "name": "si", "max": 100, "min": 0 }, + { "type": "int", "name": "ic", "max": 100, "min": 0 }, + { "type": "bigint", "name": "bi", "max": 100, "min": 0 }, + { "type": "utinyint", "name": "uti", "max": 100, "min": 0 }, + { "type": "usmallint", "name": "usi", "max": 100, "min": 0 }, + { "type": "uint", "name": "ui", "max": 100, "min": 0 }, + { "type": "ubigint", "name": "ubi", "max": 100, "min": 0 }, + { "type": "binary", "name": "bin", "len": 32}, + { "type": "nchar", "name": "nch", "len": 64} + ], + "tags": [ + { + "type": "tinyint", + "name": "groupid", + "max": 10, + "min": 1 + }, + { + "name": "location", + "type": "binary", + "len": 16, + "values": ["San Francisco", "Los Angles", "San Diego", + "San Jose", "Palo Alto", "Campbell", "Mountain View", + "Sunnyvale", "Santa Clara", "Cupertino"] + } + ] + } + ] + } + ] +} + diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stt.py b/tests/system-test/5-taos-tools/taosbenchmark/stt.py new file mode 100644 index 0000000000..9b86bd8e40 --- /dev/null +++ b/tests/system-test/5-taos-tools/taosbenchmark/stt.py @@ -0,0 +1,102 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +import subprocess +import time + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + """ + [TD-13823] taosBenchmark test cases + """ + return + + def init(self, conn, logSql, replicaVar=1): + # comment off by Shuduo for CI self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getPath(self, tool="taosBenchmark"): + if (platform.system().lower() == 'windows'): + tool = tool + ".exe" + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if "community" in selfPath: + projPath = selfPath[: selfPath.find("community")] + else: + projPath = selfPath[: selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if (tool) in files: + rootRealPath = os.path.dirname(os.path.realpath(root)) + if "packaging" not in rootRealPath: + paths.append(os.path.join(root, tool)) + break + if len(paths) == 0: + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + + def checkDataCorrect(self): + sql = "select count(*) from meters" + tdSql.query(sql) + allCnt = tdSql.getData(0, 0) + if allCnt < 2000000: + tdLog.exit(f"taosbenchmark insert row small. row count={allCnt} sql={sql}") + return + + # group by 10 child table + rowCnt = tdSql.query("select count(*),tbname from meters group by tbname") + tdSql.checkRows(1000) + + # interval + sql = "select count(*),max(ic),min(dc),last(*) from meters interval(1s)" + rowCnt = tdSql.query(sql) + if rowCnt < 10: + tdLog.exit(f"taosbenchmark interval(1s) count small. row cout={rowCnt} sql={sql}") + return + + # nest query + tdSql.query("select count(*) from (select * from meters order by ts desc)") + tdSql.checkData(0, 0, allCnt) + + + def run(self): + binPath = self.getPath() + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/stt.json" % binPath + tdLog.info("%s" % cmd) + errcode = os.system("%s" % cmd) + if errcode != 0: + tdLog.exit(f"execute taosBenchmark ret error code={errcode}") + return + + tdSql.execute("use db") + self.checkDataCorrect() + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/subscribeStb3.py b/tests/system-test/7-tmq/subscribeStb3.py index 6f3230e687..ed44ab1fb1 100644 --- a/tests/system-test/7-tmq/subscribeStb3.py +++ b/tests/system-test/7-tmq/subscribeStb3.py @@ -546,7 +546,7 @@ class TDTestCase: keyList = 'group.id:cgrp1,\ enable.auto.commit:false,\ auto.commit.interval.ms:6000,\ - auto.offset.reset:none' + auto.offset.reset:earliest' self.insertConsumerInfo(consumerId, expectrowcnt/2,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("again start consume processor") @@ -569,7 +569,7 @@ class TDTestCase: keyList = 'group.id:cgrp1,\ enable.auto.commit:false,\ auto.commit.interval.ms:6000,\ - auto.offset.reset:none' + auto.offset.reset:earliest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("again start consume processor") diff --git a/tests/system-test/7-tmq/tmqCommon.py b/tests/system-test/7-tmq/tmqCommon.py index 6b633fa193..3ea8273e7f 100644 --- a/tests/system-test/7-tmq/tmqCommon.py +++ b/tests/system-test/7-tmq/tmqCommon.py @@ -37,6 +37,9 @@ from util.common import * # INSERT_DATA = 3 class TMQCom: + def __init__(self): + self.g_end_insert_flag = 0 + def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdSql.init(conn.cursor()) @@ -330,8 +333,11 @@ class TMQCom: ctbDict[i] = 0 #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) - rowsOfCtb = 0 + rowsOfCtb = 0 while rowsOfCtb < rowsPerTbl: + if (0 != self.g_end_insert_flag): + tdLog.debug("get signal to stop insert data") + break for i in range(ctbNum): sql += " %s.%s%d values "%(dbName,ctbPrefix,i+ctbStartIdx) rowsBatched = 0 @@ -571,6 +577,20 @@ class TMQCom: tdLog.info(tsql.queryResult) tdLog.info("wait subscriptions exit for %d s"%wait_cnt) + def killProcesser(self, processerName): + killCmd = ( + "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" + % processerName + ) + + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % processerName + processID = subprocess.check_output(psCmd, shell=True) + + while processID: + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output(psCmd, shell=True) + def close(self): self.cursor.close() diff --git a/tests/system-test/7-tmq/tmqConsumeDiscontinuousData.py b/tests/system-test/7-tmq/tmqConsumeDiscontinuousData.py new file mode 100644 index 0000000000..3dabca4cd1 --- /dev/null +++ b/tests/system-test/7-tmq/tmqConsumeDiscontinuousData.py @@ -0,0 +1,248 @@ + +import sys +import time +import datetime +import threading +from taos.tmq import Consumer +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135} + + def __init__(self): + self.vgroups = 1 + self.ctbNum = 10 + self.rowsPerTbl = 100 + self.tmqMaxTopicNum = 1 + self.tmqMaxGroups = 1 + self.walRetentionPeriod = 3 + self.actConsumeTotalRows = 0 + self.retryPoll = 0 + self.lock = threading.Lock() + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def getPath(self, tool="taosBenchmark"): + if (platform.system().lower() == 'windows'): + tool = tool + ".exe" + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10, + 'batchNum': 1, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period %d" % (paraDict['dbName'], self.walRetentionPeriod)) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + # tdLog.info("insert data") + # tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + # tdLog.info("restart taosd to ensure that the data falls into the disk") + # tdDnodes.stop(1) + # tdDnodes.start(1) + # tdSql.query("flush database %s"%(paraDict['dbName'])) + return + + def tmqSubscribe(self, **inputDict): + consumer_dict = { + "group.id": inputDict['group_id'], + "client.id": "client", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "auto.commit.interval.ms": "100", + "enable.auto.commit": "true", + "auto.offset.reset": "earliest", + "experimental.snapshot.enable": "false", + "msg.with.table.name": "false" + } + + consumer = Consumer(consumer_dict) + consumer.subscribe([inputDict['topic_name']]) + onceFlag = 0 + try: + while True: + if (1 == self.retryPoll): + time.sleep(2) + continue + res = consumer.poll(inputDict['pollDelay']) + if not res: + break + err = res.error() + if err is not None: + raise err + + val = res.value() + for block in val: + # print(block.fetchall()) + data = block.fetchall() + for row in data: + # print("===================================") + # print(row) + self.actConsumeTotalRows += 1 + if (0 == onceFlag): + onceFlag = 1 + with self.lock: + self.retryPoll = 1 + currentTime = datetime.now() + print("%s temp stop consume"%(str(currentTime))) + + currentTime = datetime.now() + print("%s already consume rows: %d, and sleep for a while"%(str(currentTime), self.actConsumeTotalRows)) + # time.sleep(self.walRetentionPeriod * 3) + finally: + consumer.unsubscribe() + consumer.close() + + return + + def asyncSubscribe(self, inputDict): + pThread = threading.Thread(target=self.tmqSubscribe, kwargs=inputDict) + pThread.start() + return pThread + + def tmqCase1(self): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 100, + 'batchNum': 1, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + # create topic + topicNameList = ['dbtstb_0001'] + tdLog.info("create topics from stb") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + for i in range(len(topicNameList)): + sqlString = "create topic %s as %s" %(topicNameList[i], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + + + + # start consumer + inputDict = {'group_id': "grpid_0001", + 'topic_name': topicNameList[0], + 'pollDelay': 10 + } + + pThread2 = self.asyncSubscribe(inputDict) + + pThread1 = tmqCom.asyncInsertDataByInterlace(paraDict) + pThread1.join() + tdLog.info("firstly call to flash database") + tdSql.query("flush database %s"%(paraDict['dbName'])) + time.sleep(self.walRetentionPeriod + 1) + tdLog.info("secondely call to flash database") + tdSql.query("flush database %s"%(paraDict['dbName'])) + + # wait the consumer to complete one poll + while (0 == self.retryPoll): + time.sleep(1) + continue + + with self.lock: + self.retryPoll = 0 + currentTime = datetime.now() + print("%s restart consume"%(str(currentTime))) + + paraDict["startTs"] = 1640966400000 + paraDict["ctbNum"] * paraDict["rowsPerTbl"] + pThread3 = tmqCom.asyncInsertDataByInterlace(paraDict) + + + tdLog.debug("wait sub-thread to end insert data") + pThread3.join() + + totalInsertRows = paraDict["ctbNum"] * paraDict["rowsPerTbl"] * 2 + tdLog.debug("wait sub-thread to end consume data") + pThread2.join() + + tdLog.info("act consume total rows: %d, act insert total rows: %d"%(self.actConsumeTotalRows, totalInsertRows)) + + if (self.actConsumeTotalRows >= totalInsertRows): + tdLog.exit("act consume rows: %d not equal expect: %d"%(self.actConsumeTotalRows, totalInsertRows)) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def run(self): + self.prepareTestEnv() + self.tmqCase1() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqDropConsumer.json b/tests/system-test/7-tmq/tmqDropConsumer.json new file mode 100644 index 0000000000..538e93ea5c --- /dev/null +++ b/tests/system-test/7-tmq/tmqDropConsumer.json @@ -0,0 +1,28 @@ +{ + "filetype": "subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "result_file": "tmq_res.txt", + "tmq_info": { + "concurrent": 2, + "poll_delay": 100000, + "group.id": "", + "group_mode": "independent", + "create_mode": "parallel", + "client.id": "cliid_0001", + "auto.offset.reset": "earliest", + "enable.manual.commit": "false", + "enable.auto.commit": "false", + "auto.commit.interval.ms": 1000, + "experimental.snapshot.enable": "false", + "msg.with.table.name": "false", + "rows_file": "", + "topic_list": [ + {"name": "dbtstb_0001", "sql": "select * from dbt.stb;"}, + {"name": "dbtstb_0002", "sql": "select * from dbt.stb;"} + ] + } +} diff --git a/tests/system-test/7-tmq/tmqDropConsumer.py b/tests/system-test/7-tmq/tmqDropConsumer.py new file mode 100644 index 0000000000..06ce4c0fd7 --- /dev/null +++ b/tests/system-test/7-tmq/tmqDropConsumer.py @@ -0,0 +1,293 @@ + +import sys +import time +import threading +from taos.tmq import Consumer +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135} + + def __init__(self): + self.vgroups = 2 + self.ctbNum = 10 + self.rowsPerTbl = 10 + self.tmqMaxTopicNum = 2 + self.tmqMaxGroups = 2 + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def getPath(self, tool="taosBenchmark"): + if (platform.system().lower() == 'windows'): + tool = tool + ".exe" + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 2, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 360000" % (paraDict['dbName'])) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("restart taosd to ensure that the data falls into the disk") + # tdDnodes.stop(1) + # tdDnodes.start(1) + tdSql.query("flush database %s"%(paraDict['dbName'])) + return + + def tmqSubscribe(self, topicName, newGroupId, expectResult): + # create new connector for new tdSql instance in my thread + # newTdSql = tdCom.newTdSql() + # topicName = inputDict['topic_name'] + # group_id = inputDict['group_id'] + + consumer_dict = { + "group.id": newGroupId, + "client.id": "client", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "auto.commit.interval.ms": "1000", + "enable.auto.commit": "true", + "auto.offset.reset": "earliest", + "experimental.snapshot.enable": "false", + "msg.with.table.name": "false" + } + + ret = 'success' + consumer = Consumer(consumer_dict) + # print("======%s"%(inputDict['topic_name'])) + try: + consumer.subscribe([topicName]) + except Exception as e: + tdLog.info("consumer.subscribe() fail ") + tdLog.info("%s"%(e)) + if (expectResult == "fail"): + consumer.close() + return 'success' + else: + consumer.close() + return 'fail' + + tdLog.info("consumer.subscribe() success ") + if (expectResult == "success"): + consumer.close() + return 'success' + else: + consumer.close() + return 'fail' + + def tmqCase1(self): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 100000000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + topicNameList = ['dbtstb_0001','dbtstb_0002'] + tdLog.info("create topics from stb") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + for i in range(len(topicNameList)): + sqlString = "create topic %s as %s" %(topicNameList[i], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + + # tdSql.query('show topics;') + # topicNum = tdSql.queryRows + # tdLog.info(" topic count: %d"%(topicNum)) + # if topicNum != len(topicNameList): + # tdLog.exit("show topics %d not equal expect num: %d"%(topicNum, len(topicNameList))) + + pThread = tmqCom.asyncInsertDataByInterlace(paraDict) + + # use taosBenchmark to subscribe + binPath = self.getPath() + cmd = "nohup %s -f ./7-tmq/tmqDropConsumer.json > /dev/null 2>&1 & " % binPath + tdLog.info("%s"%(cmd)) + os.system(cmd) + + expectTopicNum = len(topicNameList) + consumerThreadNum = 2 + expectConsumerNUm = expectTopicNum * consumerThreadNum + expectSubscribeNum = self.vgroups * expectTopicNum * consumerThreadNum + + tdSql.query('show topics;') + topicNum = tdSql.queryRows + tdLog.info(" get topic count: %d"%(topicNum)) + if topicNum != expectTopicNum: + tdLog.exit("show topics %d not equal expect num: %d"%(topicNum, expectTopicNum)) + + flag = 0 + while (1): + tdSql.query('show consumers;') + consumerNUm = tdSql.queryRows + tdLog.info(" get consumers count: %d"%(consumerNUm)) + if consumerNUm == expectConsumerNUm: + flag = 1 + break + else: + time.sleep(1) + + if (0 == flag): + tmqCom.g_end_insert_flag = 1 + tdLog.exit("show consumers %d not equal expect num: %d"%(topicNum, expectConsumerNUm)) + + flag = 0 + for i in range(10): + tdSql.query('show subscriptions;') + subscribeNum = tdSql.queryRows + tdLog.info(" get subscriptions count: %d"%(subscribeNum)) + if subscribeNum == expectSubscribeNum: + flag = 1 + break + else: + time.sleep(1) + + if (0 == flag): + tmqCom.g_end_insert_flag = 1 + tdLog.exit("show subscriptions %d not equal expect num: %d"%(subscribeNum, expectSubscribeNum)) + + # get all consumer group id + tdSql.query('show consumers;') + consumerNUm = tdSql.queryRows + groupIdList = [] + for i in range(consumerNUm): + groupId = tdSql.getData(i,1) + existFlag = 0 + for j in range(len(groupIdList)): + if (groupId == groupIdList[j]): + existFlag = 1 + break + if (0 == existFlag): + groupIdList.append(groupId) + + # kill taosBenchmark + tmqCom.killProcesser("taosBenchmark") + tdLog.info("kill taosBenchmak end") + + # wait the status to "lost" + while (1): + exitFlag = 1 + tdSql.query('show consumers;') + consumerNUm = tdSql.queryRows + for i in range(consumerNUm): + status = tdSql.getData(i,3) + if (status != "lost"): + exitFlag = 0 + time.sleep(2) + break + if (1 == exitFlag): + break + + tdLog.info("all consumers status into 'lost'") + + # drop consumer groups + tdLog.info("drop all consumers") + for i in range(len(groupIdList)): + for j in range(len(topicNameList)): + sqlCmd = f"drop consumer group `%s` on %s"%(groupIdList[i], topicNameList[j]) + tdLog.info("drop consumer cmd: %s"%(sqlCmd)) + tdSql.execute(sqlCmd) + + tmqCom.g_end_insert_flag = 1 + tdLog.debug("notify sub-thread to stop insert data") + pThread.join() + + tdSql.query('show consumers;') + consumerNUm = tdSql.queryRows + + tdSql.query('show subscriptions;') + subscribeNum = tdSql.queryRows + + if (0 != consumerNUm or 0 != subscribeNum): + tdLog.exit("drop consumer fail! consumerNUm %d, subscribeNum: %d"%(consumerNUm, subscribeNum)) + + tdLog.info("drop consuer success, there is no consumers and subscribes") + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def run(self): + self.prepareTestEnv() + self.tmqCase1() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqMaxGroupIds.json b/tests/system-test/7-tmq/tmqMaxGroupIds.json new file mode 100644 index 0000000000..beb16576b0 --- /dev/null +++ b/tests/system-test/7-tmq/tmqMaxGroupIds.json @@ -0,0 +1,27 @@ +{ + "filetype": "subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "result_file": "tmq_res.txt", + "tmq_info": { + "concurrent": 99, + "poll_delay": 100000, + "group.id": "", + "group_mode": "independent", + "create_mode": "parallel", + "client.id": "cliid_0001", + "auto.offset.reset": "earliest", + "enable.manual.commit": "false", + "enable.auto.commit": "false", + "auto.commit.interval.ms": 1000, + "experimental.snapshot.enable": "false", + "msg.with.table.name": "false", + "rows_file": "", + "topic_list": [ + {"name": "dbtstb_0001", "sql": "select * from dbt.stb;"} + ] + } +} diff --git a/tests/system-test/7-tmq/tmqMaxGroupIds.py b/tests/system-test/7-tmq/tmqMaxGroupIds.py new file mode 100644 index 0000000000..d22b79a44c --- /dev/null +++ b/tests/system-test/7-tmq/tmqMaxGroupIds.py @@ -0,0 +1,246 @@ + +import sys +import time +import threading +from taos.tmq import Consumer +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135} + + def __init__(self): + self.vgroups = 1 + self.ctbNum = 10 + self.rowsPerTbl = 10 + self.tmqMaxTopicNum = 20 + self.tmqMaxGroups = 100 + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def getPath(self, tool="taosBenchmark"): + if (platform.system().lower() == 'windows'): + tool = tool + ".exe" + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 360000" % (paraDict['dbName'])) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("restart taosd to ensure that the data falls into the disk") + # tdDnodes.stop(1) + # tdDnodes.start(1) + tdSql.query("flush database %s"%(paraDict['dbName'])) + return + + def tmqSubscribe(self, topicName, newGroupId, expectResult): + # create new connector for new tdSql instance in my thread + # newTdSql = tdCom.newTdSql() + # topicName = inputDict['topic_name'] + # group_id = inputDict['group_id'] + + consumer_dict = { + "group.id": newGroupId, + "client.id": "client", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "auto.commit.interval.ms": "1000", + "enable.auto.commit": "true", + "auto.offset.reset": "earliest", + "experimental.snapshot.enable": "false", + "msg.with.table.name": "false" + } + + ret = 'success' + consumer = Consumer(consumer_dict) + # print("======%s"%(inputDict['topic_name'])) + try: + consumer.subscribe([topicName]) + except Exception as e: + tdLog.info("consumer.subscribe() fail ") + tdLog.info("%s"%(e)) + if (expectResult == "fail"): + consumer.close() + return 'success' + else: + consumer.close() + return 'fail' + + tdLog.info("consumer.subscribe() success ") + if (expectResult == "success"): + consumer.close() + return 'success' + else: + consumer.close() + return 'fail' + + def tmqCase1(self): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 100000000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + topicNameList = ['dbtstb_0001'] + tdLog.info("create topics from stb") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + for i in range(len(topicNameList)): + sqlString = "create topic %s as %s" %(topicNameList[i], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + + # tdSql.query('show topics;') + # topicNum = tdSql.queryRows + # tdLog.info(" topic count: %d"%(topicNum)) + # if topicNum != len(topicNameList): + # tdLog.exit("show topics %d not equal expect num: %d"%(topicNum, len(topicNameList))) + + pThread = tmqCom.asyncInsertDataByInterlace(paraDict) + + # use taosBenchmark to subscribe + binPath = self.getPath() + cmd = "nohup %s -f ./7-tmq/tmqMaxGroupIds.json > /dev/null 2>&1 & " % binPath + tdLog.info("%s"%(cmd)) + os.system(cmd) + + expectTopicNum = 1 + expectConsumerNUm = 99 + expectSubscribeNum = 99 + + tdSql.query('show topics;') + topicNum = tdSql.queryRows + tdLog.info(" get topic count: %d"%(topicNum)) + if topicNum != expectTopicNum: + tdLog.exit("show topics %d not equal expect num: %d"%(topicNum, expectTopicNum)) + + flag = 0 + while (1): + tdSql.query('show consumers;') + consumerNUm = tdSql.queryRows + tdLog.info(" get consumers count: %d"%(consumerNUm)) + if consumerNUm == expectConsumerNUm: + flag = 1 + break + else: + time.sleep(1) + + if (0 == flag): + tdLog.exit("show consumers %d not equal expect num: %d"%(topicNum, expectConsumerNUm)) + + flag = 0 + for i in range(10): + tdSql.query('show subscriptions;') + subscribeNum = tdSql.queryRows + tdLog.info(" get subscriptions count: %d"%(subscribeNum)) + if subscribeNum == expectSubscribeNum: + flag = 1 + break + else: + time.sleep(1) + + if (0 == flag): + tdLog.exit("show subscriptions %d not equal expect num: %d"%(subscribeNum, expectSubscribeNum)) + + res = self.tmqSubscribe(topicNameList[0], "newGroupId_001", "success") + if res != 'success': + tdLog.exit("limit max groupid fail") + + res = self.tmqSubscribe(topicNameList[0], "newGroupId_002", "fail") + if res != 'success': + tdLog.exit("limit max groupid fail") + + tmqCom.g_end_insert_flag = 1 + tdLog.debug("notify sub-thread to stop insert data") + pThread.join() + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def run(self): + self.prepareTestEnv() + self.tmqCase1() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqMaxTopic.py b/tests/system-test/7-tmq/tmqMaxTopic.py new file mode 100644 index 0000000000..5dc49fe48f --- /dev/null +++ b/tests/system-test/7-tmq/tmqMaxTopic.py @@ -0,0 +1,262 @@ + +import sys +import time +import threading +from taos.tmq import Consumer +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135} + + def __init__(self): + self.vgroups = 1 + self.ctbNum = 10 + self.rowsPerTbl = 10 + self.tmqMaxTopicNum = 20 + self.tmqMaxGroups = 100 + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def modifyMaxTopics(self, tmqMaxTopicNum): + # single dnode + cfgDir = tdDnodes.dnodes[0].cfgDir + + # cluster dnodes + # tdDnodes[1].dataDir + # tdDnodes[1].logDir + # tdDnodes[1].cfgDir + + cfgFile = f"%s/taos.cfg"%(cfgDir) + shellCmd = 'echo "tmqMaxTopicNum %d" >> %s'%(tmqMaxTopicNum, cfgFile) + tdLog.info(" shell cmd: %s"%(shellCmd)) + os.system(shellCmd) + tdDnodes.stoptaosd(1) + tdDnodes.starttaosd(1) + time.sleep(5) + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("restart taosd to ensure that the data falls into the disk") + # tdDnodes.stop(1) + # tdDnodes.start(1) + tdSql.query("flush database %s"%(paraDict['dbName'])) + return + + def tmqSubscribe(self, **inputDict): + # create new connector for new tdSql instance in my thread + # newTdSql = tdCom.newTdSql() + # topicName = inputDict['topic_name'] + # group_id = inputDict['group_id'] + + consumer_dict = { + "group.id": inputDict['group_id_prefix'], + "client.id": "client", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "auto.commit.interval.ms": "1000", + "enable.auto.commit": "true", + "auto.offset.reset": "earliest", + "experimental.snapshot.enable": "false", + "msg.with.table.name": "false" + } + + for j in range(self.tmqMaxGroups): + consumer_dict["group.id"] = f"%s_%d"%(inputDict['group_id_prefix'], j) + consumer_dict["client.id"] = f"%s_%d"%(inputDict['group_id_prefix'], j) + print("======grpid: %s"%(consumer_dict["group.id"])) + consumer = Consumer(consumer_dict) + # print("======%s"%(inputDict['topic_name'])) + consumer.subscribe([inputDict['topic_name']]) + # res = consumer.poll(inputDict['pollDelay']) + return + + def asyncSubscribe(self, inputDict): + pThread = threading.Thread(target=self.tmqSubscribe, kwargs=inputDict) + pThread.start() + return pThread + + def tmqCase1(self): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNamePrefix = 'topicname_' + tdLog.info("create topics from stb") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + for i in range(self.tmqMaxTopicNum): + sqlString = "create topic %s%d as %s" %(topicNamePrefix, i, queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + + sqlString = "create topic %s%s as %s" %(topicNamePrefix, 'xyz', queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.error(sqlString) + + tdSql.query('show topics;') + topicNum = tdSql.queryRows + tdLog.info(" topic count: %d"%(topicNum)) + if topicNum != self.tmqMaxTopicNum: + tdLog.exit("show topics %d not equal expect num: %d"%(topicNum, self.tmqMaxTopicNum)) + + # self.updatecfgDict = {'tmqMaxTopicNum': 22} + # tdDnodes.stoptaosd(1) + # tdDnodes.deploy(1, self.updatecfgDict) + # tdDnodes.starttaosd(1) + # time.sleep(5) + + newTmqMaxTopicNum = 22 + self.modifyMaxTopics(newTmqMaxTopicNum) + + sqlString = "create topic %s%s as %s" %(topicNamePrefix, 'x', queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + + sqlString = "create topic %s%s as %s" %(topicNamePrefix, 'y', queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + + sqlString = "create topic %s%s as %s" %(topicNamePrefix, 'xyz', queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.error(sqlString) + + tdSql.query('show topics;') + topicNum = tdSql.queryRows + tdLog.info(" topic count: %d"%(topicNum)) + if topicNum != newTmqMaxTopicNum: + tdLog.exit("show topics %d not equal expect num: %d"%(topicNum, newTmqMaxTopicNum)) + + newTmqMaxTopicNum = 18 + self.modifyMaxTopics(newTmqMaxTopicNum) + + i = 0 + sqlString = "drop topic %s%d" %(topicNamePrefix, i) + tdLog.info("drop topic sql: %s"%sqlString) + tdSql.execute(sqlString) + + i = 1 + sqlString = "drop topic %s%d" %(topicNamePrefix, i) + tdLog.info("drop topic sql: %s"%sqlString) + tdSql.execute(sqlString) + + sqlString = "drop topic %s%s" %(topicNamePrefix, "x") + tdLog.info("drop topic sql: %s"%sqlString) + tdSql.execute(sqlString) + + sqlString = "drop topic %s%s" %(topicNamePrefix, "y") + tdLog.info("drop topic sql: %s"%sqlString) + tdSql.execute(sqlString) + + sqlString = "create topic %s%s as %s" %(topicNamePrefix, 'xyz', queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.error(sqlString) + + # pThreadList = [] + # for i in range(self.tmqMaxTopicNum): + # topic_name = f"%s%d" %(topicNamePrefix, i) + # print("======%s"%(topic_name)) + # group_id_prefix = f"grp_%d"%(i) + # inputDict = {'group_id_prefix': group_id_prefix, + # 'topic_name': topic_name, + # 'pollDelay': 1 + # } + + # pThread = self.asyncSubscribe(inputDict) + # pThreadList.append(pThread) + + # for j in range(self.tmqMaxGroups): + # pThreadList[j].join() + + # time.sleep(5) + # tdSql.query('show subscriptions;') + # subscribeNum = tdSql.queryRows + # expectNum = self.tmqMaxGroups * self.tmqMaxTopicNum + # tdLog.info("loop index: %d, ======subscriptions %d and expect num: %d"%(i, subscribeNum, expectNum)) + # if subscribeNum != expectNum: + # tdLog.exit("subscriptions %d not equal expect num: %d"%(subscribeNum, expectNum)) + + # # drop all topics + # for i in range(self.tmqMaxTopicNum): + # sqlString = "drop topic %s%d" %(topicNamePrefix, i) + # tdLog.info("drop topic sql: %s"%sqlString) + # tdSql.execute(sqlString) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def run(self): + self.prepareTestEnv() + self.tmqCase1() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 865d4680a3..e9dd067ac4 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -361,11 +361,11 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i case TSDB_DATA_TYPE_FLOAT: width = SHELL_FLOAT_WIDTH; if (tsEnableScience) { - taosFprintfFile(pFile, "%*e", width, GET_FLOAT_VAL(val)); + taosFprintfFile(pFile, "%*.7e", width, GET_FLOAT_VAL(val)); } else { - n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.5f", width, GET_FLOAT_VAL(val)); + n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.7f", width, GET_FLOAT_VAL(val)); if (n > SHELL_FLOAT_WIDTH) { - taosFprintfFile(pFile, "%*e", width, GET_FLOAT_VAL(val)); + taosFprintfFile(pFile, "%*.7e", width, GET_FLOAT_VAL(val)); } else { taosFprintfFile(pFile, "%s", buf); } @@ -374,10 +374,10 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i case TSDB_DATA_TYPE_DOUBLE: width = SHELL_DOUBLE_WIDTH; if (tsEnableScience) { - snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%.9e", GET_DOUBLE_VAL(val)); - taosFprintfFile(pFile, "%*s", width, buf); + snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.15e", width, GET_DOUBLE_VAL(val)); + taosFprintfFile(pFile, "%s", buf); } else { - n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.9f", width, GET_DOUBLE_VAL(val)); + n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.15f", width, GET_DOUBLE_VAL(val)); if (n > SHELL_DOUBLE_WIDTH) { taosFprintfFile(pFile, "%*.15e", width, GET_DOUBLE_VAL(val)); } else { @@ -612,11 +612,12 @@ void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t break; case TSDB_DATA_TYPE_FLOAT: if (tsEnableScience) { - printf("%*e", width, GET_FLOAT_VAL(val)); + printf("%*.7e",width,GET_FLOAT_VAL(val)); } else { - n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.5f", width, GET_FLOAT_VAL(val)); + n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.7f", width, GET_FLOAT_VAL(val)); if (n > SHELL_FLOAT_WIDTH) { - printf("%*e", width, GET_FLOAT_VAL(val)); + + printf("%*.7e", width,GET_FLOAT_VAL(val)); } else { printf("%s", buf); } @@ -624,14 +625,14 @@ void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t break; case TSDB_DATA_TYPE_DOUBLE: if (tsEnableScience) { - snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%.9e", GET_DOUBLE_VAL(val)); - printf("%*s", width, buf); + snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.15e", width,GET_DOUBLE_VAL(val)); + printf("%s", buf); } else { - n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.9f", width, GET_DOUBLE_VAL(val)); + n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.15f", width, GET_DOUBLE_VAL(val)); if (n > SHELL_DOUBLE_WIDTH) { printf("%*.15e", width, GET_DOUBLE_VAL(val)); } else { - printf("%s", buf); + printf("%*s", width,buf); } } break; diff --git a/tools/shell/src/shellNettest.c b/tools/shell/src/shellNettest.c index 1a6ac3489d..9fe92212ca 100644 --- a/tools/shell/src/shellNettest.c +++ b/tools/shell/src/shellNettest.c @@ -15,6 +15,7 @@ #define _GNU_SOURCE #include "shellInt.h" +#include "tversion.h" static void shellWorkAsClient() { SShellArgs *pArgs = &shell.args; @@ -33,6 +34,7 @@ static void shellWorkAsClient() { rpcInit.user = "_dnd"; rpcInit.timeToGetConn = tsTimeToGetAvailableConn; + taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); clientRpc = rpcOpen(&rpcInit); if (clientRpc == NULL) { printf("failed to init net test client since %s\r\n", terrstr()); @@ -123,6 +125,8 @@ static void shellWorkAsServer() { rpcInit.connType = TAOS_CONN_SERVER; rpcInit.idleTime = tsShellActivityTimer * 1000; + taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); + void *serverRpc = rpcOpen(&rpcInit); if (serverRpc == NULL) { printf("failed to init net test server since %s\r\n", terrstr());