diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000..7d877987ac --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,5 @@ +# Pull Request Checklist + +- [ ] Is the user manual updated? +- [ ] Are the test cases passed and automated? +- [ ] Is there no significant decrease in test coverage? diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 086883dfac..88806222a0 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -5,7 +5,7 @@ node { } file_zh_changed = '' file_en_changed = '' -file_no_doc_changed = '' +file_no_doc_changed = '1' def abortPreviousBuilds() { def currentJobName = env.JOB_NAME def currentBuildNumber = env.BUILD_NUMBER.toInteger() @@ -355,7 +355,7 @@ def pre_test_build_win() { bat ''' cd %WIN_COMMUNITY_ROOT%/tests/ci pip3 install taospy==2.7.16 - pip3 install taos-ws-py==0.3.3 + pip3 install taos-ws-py==0.3.5 xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32 ''' return 1 @@ -451,8 +451,8 @@ pipeline { stage('run test') { when { - allOf { - not { expression { file_no_doc_changed == '' }} + expression { + file_no_doc_changed != '' && env.CHANGE_TARGET != 'docs-cloud' } } parallel { @@ -656,4 +656,4 @@ pipeline { ) } } -} \ No newline at end of file +} diff --git a/cmake/cmake.define b/cmake/cmake.define index ff582261b3..eb95feaf82 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -97,10 +97,13 @@ ELSE() SET(TD_TAOS_TOOLS TRUE) ENDIF() +SET(TAOS_LIB taos) +SET(TAOS_LIB_STATIC taos_static) + IF(${TD_WINDOWS}) - SET(TAOS_LIB taos_static) + SET(TAOS_LIB_PLATFORM_SPEC taos_static) ELSE() - SET(TAOS_LIB taos) + SET(TAOS_LIB_PLATFORM_SPEC taos) ENDIF() # build TSZ by default diff --git a/contrib/test/CMakeLists.txt b/contrib/test/CMakeLists.txt index f544baafde..5d613dfed2 100644 --- a/contrib/test/CMakeLists.txt +++ b/contrib/test/CMakeLists.txt @@ -28,6 +28,9 @@ if(${BUILD_WITH_TRAFT}) # add_subdirectory(traft) endif(${BUILD_WITH_TRAFT}) -add_subdirectory(azure) +if(${BUILD_S3}) + add_subdirectory(azure) +endif() + add_subdirectory(tdev) add_subdirectory(lz4) diff --git a/docs/en/08-develop/01-connect/index.md b/docs/en/08-develop/01-connect/index.md index f4dbccba24..5f0a462ec2 100644 --- a/docs/en/08-develop/01-connect/index.md +++ b/docs/en/08-develop/01-connect/index.md @@ -90,7 +90,7 @@ If `maven` is used to manage the projects, what needs to be done is only adding com.taosdata.jdbc taos-jdbcdriver - 3.3.3 + 3.4.0 ``` diff --git a/docs/en/14-reference/03-taos-sql/10-function.md b/docs/en/14-reference/03-taos-sql/10-function.md index f6c1ef24d0..3852783c10 100644 --- a/docs/en/14-reference/03-taos-sql/10-function.md +++ b/docs/en/14-reference/03-taos-sql/10-function.md @@ -422,7 +422,7 @@ CAST(expr AS type_name) TO_ISO8601(expr [, timezone]) ``` -**Description**: The ISO8601 date/time format converted from a UNIX timestamp, plus the timezone. You can specify any time zone with the timezone parameter. If you do not enter this parameter, the time zone on the client is used. +**Description**: The ISO8601 date/time format converted from a timestamp, plus the timezone. You can specify any time zone with the timezone parameter. If you do not enter this parameter, the time zone on the client is used. **Return value type**: VARCHAR @@ -466,7 +466,7 @@ return_timestamp: { } ``` -**Description**: UNIX timestamp converted from a string of date/time format +**Description**: timestamp converted from a string of date/time format **Return value type**: BIGINT, TIMESTAMP diff --git a/docs/en/14-reference/03-taos-sql/31-compress.md b/docs/en/14-reference/03-taos-sql/31-compress.md index 39abfe69bd..f726c8bbe6 100644 --- a/docs/en/14-reference/03-taos-sql/31-compress.md +++ b/docs/en/14-reference/03-taos-sql/31-compress.md @@ -30,11 +30,12 @@ In this article, it specifically refers to the level within the secondary compre | Data Type | Optional Encoding Algorithm | Default Encoding Algorithm | Optional Compression Algorithm|Default Compression Algorithm| Default Compression Level| | :-----------:|:----------:|:-------:|:-------:|:----------:|:----:| -| tinyint/untinyint/smallint/usmallint/int/uint | simple8b| simple8b | lz4/zlib/zstd/xz| lz4 | medium| +| int/uint | simple8b| simple8b | lz4/zlib/zstd/xz| lz4 | medium| +| tinyint/untinyint/smallint/usmallint | simple8b| simple8b | lz4/zlib/zstd/xz| zlib| medium| | bigint/ubigint/timestamp | simple8b/delta-i | delta-i |lz4/zlib/zstd/xz | lz4| medium| |float/double | delta-d|delta-d |lz4/zlib/zstd/xz/tsz|lz4| medium| -|binary/nchar| disabled| disabled|lz4/zlib/zstd/xz| lz4| medium| -|bool| bit-packing| bit-packing| lz4/zlib/zstd/xz| lz4| medium| +|binary/nchar| disabled| disabled|lz4/zlib/zstd/xz| lz4| zstd| +|bool| bit-packing| bit-packing| lz4/zlib/zstd/xz| lz4| zstd| ## SQL diff --git a/docs/en/14-reference/05-connectors/14-java.mdx b/docs/en/14-reference/05-connectors/14-java.mdx index 1f4cf9895f..d064f37aeb 100644 --- a/docs/en/14-reference/05-connectors/14-java.mdx +++ b/docs/en/14-reference/05-connectors/14-java.mdx @@ -42,6 +42,9 @@ REST connection supports all platforms that can run Java. | taos-jdbcdriver version | major changes | TDengine version | | :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: | +| 3.4.0 | 1. Replace the fastjson library with the Jackson library; 2. WebSocket connection protocal uses independent identification; 3. Optimize the use of backend pull threads to avoid user misuse leading to timeouts.| - | +| 3.3.4 | 1. Fixed getInt error when data type is float| - | +| 3.3.3 | 1. Fixed the memory leak caused by WebSocket statement| - | | 3.3.2 | 1. Optimized websocket prepareStatement performance; 2. Improved mybatis support| - | | 3.3.0 | 1. Optimized data transmission performance under Websocket connection; 2. SSL validation skipping is supported but disabled by default| 3.3.2.0 or later | | 3.2.11 | Fixed the result set closing bug when using a native connection.| - | diff --git a/docs/en/14-reference/05-connectors/30-python.mdx b/docs/en/14-reference/05-connectors/30-python.mdx index 7263a3caa6..4f17261b33 100644 --- a/docs/en/14-reference/05-connectors/30-python.mdx +++ b/docs/en/14-reference/05-connectors/30-python.mdx @@ -41,12 +41,18 @@ We recommend using the latest version of `taospy`, regardless of the version of |Python Client Library Version|major changes| |:-------------------:|:----:| +|2.7.16|add subscription configuration (session.timeout.ms, max.poll.interval.ms)| +|2.7.15|added support for VARBINARY and GEOMETRY types| +|2.7.14|fix known issues| +|2.7.13|add TMQ synchronous submission offset interface| |2.7.12|1. added support for `varbinary` type (STMT does not yet support)
2. improved query performance (thanks to contributor [hadrianl](https://github.com/taosdata/taos-connector-python/pull/209))| |2.7.9|support for getting assignment and seek function on subscription| |2.7.8|add `execute_many` method| |Python Websocket Connection Version|major changes| |:----------------------------:|:-----:| +|0.3.5|1. added support for VARBINARY and GEOMETRY types
2. Fix known issues| +|0.3.2|1. optimize WebSocket SQL query and insertion performance
2. Fix known issues
3. Modify the readme and document| |0.2.9|bugs fixes| |0.2.5|1. support for getting assignment and seek function on subscription
2. support schemaless
3. support STMT| |0.2.4|support `unsubscribe` on subscription| diff --git a/docs/en/14-reference/05-connectors/35-node.mdx b/docs/en/14-reference/05-connectors/35-node.mdx index 476f9bab71..2aeef7af1e 100644 --- a/docs/en/14-reference/05-connectors/35-node.mdx +++ b/docs/en/14-reference/05-connectors/35-node.mdx @@ -27,6 +27,8 @@ Node.js client library needs to be run with Node.js 14 or higher version. | Node.js connector version | major changes | TDengine 版本 | | :-----------------------: | :------------------: | :----------------:| +| 3.1.2 | Optimized the data protocol and parsing, resulting in a significant improvement in performance | 3.2.0.0 or later | +| 3.1.1 | Optimized data transmission performance | 3.2.0.0 or later | | 3.1.0 | new version, supports websocket | 3.2.0.0 or later | ## Supported features diff --git a/docs/en/14-reference/12-config/index.md b/docs/en/14-reference/12-config/index.md index 77d183a5ef..63aa6ed447 100755 --- a/docs/en/14-reference/12-config/index.md +++ b/docs/en/14-reference/12-config/index.md @@ -773,7 +773,7 @@ lossyColumns float|double 02/22 10:49:27.607990 00002933 UTL lossyColumns float|double ``` -### ifAdtFse +### ifAdtFse | Attribute | Description | | -------- | -------------------------------- | @@ -898,4 +898,4 @@ lossyColumns float|double | 53 | udf | Yes | Yes | | | 54 | enableCoreFile | Yes | Yes | | | 55 | ttlChangeOnWrite | No | Yes | | -| 56 | keepTimeOffset | Yes | Yes(discarded since 3.2.0.0) | | +| 56 | keepTimeOffset | Yes | Yes(discarded since 3.2.0.0) | see "KEEP_TIME_OFFSET" | diff --git a/docs/examples/JDBC/mybatisplus-demo/pom.xml b/docs/examples/JDBC/mybatisplus-demo/pom.xml index f792946c96..2077e31d8d 100644 --- a/docs/examples/JDBC/mybatisplus-demo/pom.xml +++ b/docs/examples/JDBC/mybatisplus-demo/pom.xml @@ -47,7 +47,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.2.4 + 3.4.0 diff --git a/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/domain/Meters.java b/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/domain/Meters.java new file mode 100644 index 0000000000..e886e56269 --- /dev/null +++ b/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/domain/Meters.java @@ -0,0 +1,16 @@ +package com.taosdata.example.mybatisplusdemo.domain; + +import lombok.Data; + +import java.sql.Timestamp; + +@Data +public class Meters { + private String tbname; + private Timestamp ts; + private float current; + private int voltage; + private float phase; + private int groupid; + private byte[] location; +} diff --git a/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/MetersMapper.java b/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/MetersMapper.java new file mode 100644 index 0000000000..441c340886 --- /dev/null +++ b/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/MetersMapper.java @@ -0,0 +1,31 @@ +package com.taosdata.example.mybatisplusdemo.mapper; + +import com.baomidou.mybatisplus.core.mapper.BaseMapper; +import com.taosdata.example.mybatisplusdemo.domain.Meters; +import org.apache.ibatis.annotations.Insert; +import org.apache.ibatis.annotations.Param; +import org.apache.ibatis.annotations.Update; + +import java.util.List; + +public interface MetersMapper extends BaseMapper { + + @Update("CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))") + int createTable(); + + @Insert("insert into meters (tbname, ts, groupid, location, current, voltage, phase) values(#{tbname}, #{ts}, #{groupid}, #{location}, #{current}, #{voltage}, #{phase})") + int insertOne(Meters one); + + @Insert({ + "" + }) + int insertBatch(@Param("list") List metersList); + + @Update("drop stable if exists meters") + void dropTable(); +} diff --git a/docs/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml b/docs/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml index 985ed1675e..e9855bf011 100644 --- a/docs/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml +++ b/docs/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml @@ -1,7 +1,7 @@ spring: datasource: - driver-class-name: com.taosdata.jdbc.TSDBDriver - url: jdbc:TAOS://localhost:6030/mp_test?charset=UTF-8&locale=en_US.UTF-8&timezone=UTC-8 + driver-class-name: com.taosdata.jdbc.ws.WebSocketDriver + url: jdbc:TAOS-WS://localhost:6041/mp_test?charset=UTF-8&locale=en_US.UTF-8&timezone=UTC-8 username: root password: taosdata diff --git a/docs/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/MetersMapperTest.java b/docs/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/MetersMapperTest.java new file mode 100644 index 0000000000..2d8458e9d9 --- /dev/null +++ b/docs/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/MetersMapperTest.java @@ -0,0 +1,112 @@ +package com.taosdata.example.mybatisplusdemo.mapper; + +import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper; +import com.baomidou.mybatisplus.core.metadata.IPage; +import com.baomidou.mybatisplus.extension.plugins.pagination.Page; +import com.taosdata.example.mybatisplusdemo.domain.Meters; +import com.taosdata.example.mybatisplusdemo.domain.Weather; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; + +import java.sql.Timestamp; +import java.util.LinkedList; +import java.util.List; +import java.util.Random; + +@RunWith(SpringJUnit4ClassRunner.class) +@SpringBootTest +public class MetersMapperTest { + + private static Random random = new Random(System.currentTimeMillis()); + + @Autowired + private MetersMapper mapper; + + @Before + public void createTable(){ + mapper.dropTable(); + mapper.createTable(); + Meters one = new Meters(); + one.setTbname("test_10001"); + one.setGroupid(10001); + one.setCurrent(random.nextFloat()); + one.setPhase(random.nextFloat()); + one.setCurrent(12345); + one.setTs(new Timestamp(1605024000000l)); + one.setLocation("望京".getBytes()); + mapper.insertOne(one); + } + + @Test + public void testSelectList() { + List meters = mapper.selectList(null); + meters.forEach(System.out::println); + } + + @Test + public void testInsertBatch() { + List metersList = new LinkedList<>(); + for (int i = 0; i < 100; i++){ + Meters one = new Meters(); + one.setTbname("tb_" + i); + one.setGroupid(i); + one.setCurrent(random.nextFloat()); + one.setPhase(random.nextFloat()); + one.setCurrent(random.nextInt()); + one.setTs(new Timestamp(1605024000000l + i)); + one.setLocation(("望京" + i).getBytes()); + metersList.add(one); + + } + int affectRows = mapper.insertBatch(metersList); + Assert.assertEquals(100, affectRows); + } + + @Test + public void testSelectOne() { + QueryWrapper wrapper = new QueryWrapper<>(); + wrapper.eq("location", "望京".getBytes()); + Meters one = mapper.selectOne(wrapper); + System.out.println(one); + Assert.assertEquals(12345, one.getCurrent(), 0.00f); + Assert.assertArrayEquals("望京".getBytes(), one.getLocation()); + } + + // @Test + // public void testSelectByMap() { + // Map map = new HashMap<>(); + // map.put("location", "beijing"); + // List weathers = mapper.selectByMap(map); + // Assert.assertEquals(1, weathers.size()); + // } + + @Test + public void testSelectObjs() { + List ts = mapper.selectObjs(null); + System.out.println(ts); + } + + @Test + public void testSelectCount() { + int count = mapper.selectCount(null); +// Assert.assertEquals(5, count); + System.out.println(count); + } + + @Test + public void testSelectPage() { + IPage page = new Page(1, 2); + IPage metersIPage = mapper.selectPage(page, null); + System.out.println("total : " + metersIPage.getTotal()); + System.out.println("pages : " + metersIPage.getPages()); + for (Meters meters : metersIPage.getRecords()) { + System.out.println(meters); + } + } + +} \ No newline at end of file diff --git a/docs/examples/JDBC/springbootdemo/pom.xml b/docs/examples/JDBC/springbootdemo/pom.xml index ba75cdcec3..df8a3f5d61 100644 --- a/docs/examples/JDBC/springbootdemo/pom.xml +++ b/docs/examples/JDBC/springbootdemo/pom.xml @@ -70,7 +70,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.0.0 + 3.4.0 diff --git a/docs/examples/node/package.json b/docs/examples/node/package.json index 14303c8f37..d77c96fbb3 100644 --- a/docs/examples/node/package.json +++ b/docs/examples/node/package.json @@ -4,6 +4,12 @@ "main": "index.js", "license": "MIT", "dependencies": { - "@tdengine/websocket": "^3.1.1" - } + "@tdengine/websocket": "^3.1.2" + }, + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "keywords": [], + "author": "", + "description": "" } diff --git a/docs/examples/node/websocketexample/all_type_stmt.js b/docs/examples/node/websocketexample/all_type_stmt.js old mode 100644 new mode 100755 index f095bee090..2297923e75 --- a/docs/examples/node/websocketexample/all_type_stmt.js +++ b/docs/examples/node/websocketexample/all_type_stmt.js @@ -95,8 +95,8 @@ async function all_type_example() { tagParams.setBoolean([true]); tagParams.setVarchar(["hello"]); tagParams.setNchar(["stmt"]); - tagParams.setGeometry([geometryData]); tagParams.setVarBinary([vbData]); + tagParams.setGeometry([geometryData]); await stmt.setTags(tagParams); @@ -108,8 +108,8 @@ async function all_type_example() { bindParams.setBoolean([true]); bindParams.setVarchar(["hello"]); bindParams.setNchar(["stmt"]); - bindParams.setGeometry([geometryData]); bindParams.setVarBinary([vbData]); + bindParams.setGeometry([geometryData]); await stmt.bind(bindParams); await stmt.batch(); diff --git a/docs/zh/06-advanced/02-cache.md b/docs/zh/06-advanced/02-cache.md index 065adbf50a..875452205b 100644 --- a/docs/zh/06-advanced/02-cache.md +++ b/docs/zh/06-advanced/02-cache.md @@ -1,68 +1,44 @@ --- -sidebar_label: 数据缓存 -title: 数据缓存 +sidebar_label: 读缓存 +title: 读缓存 toc_max_heading_level: 4 --- -在工业互联网和物联网大数据应用场景中,时序数据库的性能表现尤为关键。这类应用程序不仅要求数据的实时写入能力,还需求能够迅速获取设备的最新状态或对最新数据进行实时计算。通常,大数据平台会通过部署 Redis 或类似的缓存技术来满足这些需求。然而,这种做法会增加系统的复杂性和运营成本。 +在物联网(IoT)和工业互联网(IIoT)大数据应用场景中,实时数据的价值往往远超历史数据。企业不仅需要数据处理系统具备高效的实时写入能力,更需要能快速获取设备的最新状态,或者对最新数据进行实时计算和分析。无论是工业设备的状态监控、车联网中的车辆位置追踪,还是智能仪表的实时读数,当前值都是业务运行中不可或缺的核心数据。这些数据直接关系到生产安全、运营效率以及用户体验。 -为了解决这一问题,TDengine 采用了针对性的缓存优化策略。通过精心设计的缓存机制,TDengine 实现了数据的实时高效写入和快速查询,从而有效降低整个集群的复杂性和运营成本。这种优化不仅提升了性能,还为用户带来了更简洁、易用的解决方案,使他们能够更专注于核心业务的发展。 +例如,在工业生产中,生产线设备的当前运行状态至关重要。操作员需要实时监控温度、压力、转速等关键指标,一旦设备出现异常,这些数据必须即时呈现,以便迅速调整工艺参数,避免停产或更大的损失。在车联网领域,以滴滴为例,车辆的实时位置数据是滴滴平台优化派单策略、提升运营效率的关键,确保每位乘客快速上车并享受更高质量的出行体验。 -## 写缓存 +同时,看板系统和智能仪表作为现场操作和用户端的窗口,也需要实时数据支撑。无论是工厂管理者通过看板获取的实时生产指标,还是家庭用户随时查询智能水表、电表的用量,实时性不仅影响到运营和决策效率,更直接关系到用户对服务的满意程度。 -TDengine 采用了一种创新的时间驱动缓存管理策略,亦称为写驱动的缓存管理机制。这一策略与传统的读驱动的缓存模式有所不同,其核心思想是将最新写入的数据优先保存在缓存中。当缓存容量达到预设的临界值时,系统会将最早存储的数据批量写入硬盘,从而实现缓存与硬盘之间的动态平衡。 +## 传统缓存方案的局限性 -在物联网数据应用中,用户往往最关注最近产生的数据,即设备的当前状态。TDengine 充分利用了这一业务特性,将最近到达的当前状态数据优先存储在缓存中,以便用户能够快速获取所需信息。 +为了满足这些高频实时查询需求,许多企业选择将 Redis 等缓存技术集成到大数据平台中,通过在数据库和应用之间添加一层缓存来提升查询性能。然而,这种方法也带来了不少问题: +- 系统复杂性增加:需要额外部署和维护缓存集群,对系统架构提出了更高的要求。 +- 运营成本上升:需要额外的硬件资源来支撑缓存,增加了维护和管理的开销。 +- 一致性问题:缓存和数据库之间的数据同步需要额外的机制来保障,否则可能出现数据不一致的情况。 -为了实现数据的分布式存储和高可用性,TDengine 引入了虚拟节点(vnode)的概念。每个 vnode 可以拥有多达 3 个副本,这些副本共同组成一个 vnode group,简称 vgroup。在创建数据库时,用户需要确定每个 vnode 的写入缓存大小,以确保数据的合理分配和高效存储。 +## TDengine 的解决方案:内置读缓存 -创建数据库时的两个关键参数 `vgroups` 和 `buffer` 分别决定了数据库中的数据由多少个 vgroup 进行处理,以及为每个 vnode 分配多少写入缓存。通过合理配置这两个 -参数,用户可以根据实际需求调整数据库的性能和存储容量,从而实现最佳的性能和成本效益。 +为了解决这些问题,TDengine 针对物联网和工业互联网的高频实时查询场景,设计并实现了读缓存机制。这一机制能够自动将每张表的最后一条记录缓存到内存中,从而在不引入第三方缓存技术的情况下,直接满足用户对当前值的实时查询需求。 -例 如, 下面的 SQL 创建了包含 10 个 vgroup,每个 vnode 占 用 256MB 内存的数据库。 -```sql -CREATE DATABASE POWER VGROUPS 10 BUFFER 256 CACHEMODEL 'NONE' PAGES 128 PAGESIZE 16; -``` +TDengine 采用时间驱动的缓存管理策略,将最新数据优先存储在缓存中,查询时无需访问硬盘即可快速返回结果。当缓存容量达到设定上限时,系统会批量将最早的数据写入硬盘,既提升了查询效率,也有效减少了硬盘的写入负担,延长硬件使用寿命。 -缓存越大越好,但超过一定阈值后再增加缓存对写入性能提升并无帮助。 +用户可通过设置 cachemodel 参数,自定义缓存模式,包括缓存最新一行数据、每列最近的非 NULL 值,或同时缓存行和列的数据。这种灵活设计在物联网场景中尤为重要,使设备状态的实时查询更加高效精准。 -## 读缓存 +这种读缓存机制的内置化设计显著降低了查询延迟,避免了引入 Redis 等外部系统的复杂性和运维成本。同时,减少了频繁查询对存储系统的压力,大幅提升系统的整体吞吐能力,确保在高并发场景下依然稳定高效运行。通过读缓存,TDengine 为用户提供了一种更轻量化的实时数据处理方案,不仅优化了查询性能,还降低了整体运维成本,为物联网和工业互联网用户提供强有力的技术支持。 -在创建数据库时,用户可以选择是否启用缓存机制以存储该数据库中每张子表的最新数据。这一缓存机制由数据库创建参数 cachemodel 进行控制。参数 cachemodel 具有如 -下 4 种情况: -- none: 不缓存 -- last_row: 缓存子表最近一行数据,这将显著改善 last_row 函数的性能 -- last_value: 缓存子表每一列最近的非 NULL 值,这将显著改善无特殊影响(比如 WHERE, ORDER BY, GROUP BY, INTERVAL)时的 last 函数的性能 -- both: 同时缓存最近的行和列,即等同于上述 cachemodel 值为 last_row 和 last_value 的行为同时生效 +## TDengine 的读缓存配置 + +在创建数据库时,用户可以选择是否启用缓存机制以存储该数据库中每张子表的最新数据。这一缓存机制由数据库创建参数 cachemodel 进行控制。参数 cachemodel 具有如 下 4 种情况: +- none:不缓存 +- last_row:缓存子表最近一行数据,这将显著改善 last_row 函数的性能 +- last_value:缓存子表每一列最近的非 NULL 值,这将显著改善无特殊影响(比如 WHERE,ORDER BY,GROUP BY, INTERVAL)时的 last 函数的性能 +- both:同时缓存最近的行和列,即等同于上述 cachemodel 值为 last_row 和 last_value 的行为同时生效 当使用数据库读缓存时,可以使用参数 cachesize 来配置每个 vnode 的内存大小。 -- cachesize:表示每个 vnode 中用于缓存子表最近数据的内存大小。默认为 1 ,范围是[1, 65536],单位是 MB。需要根据机器内存合理配置。 +- cachesize:表示每个 vnode 中用于缓存子表最近数据的内存大小。默认为 1 ,范围是[1,65536],单位是 MB。需要根据机器内存合理配置。 -## 元数据缓存 - -为了提升查询和写入操作的效率,每个 vnode 都配备了缓存机制,用于存储其曾经获取过的元数据。这一元数据缓存的大小由创建数据库时的两个参数 pages 和 pagesize 共同决定。其中,pagesize 参数的单位是 KB,用于指定每个缓存页的大小。如下 SQL 会为数据库 power 的每个 vnode 创建 128 个 page、每个 page 16KB 的元数据缓存 - -```sql -CREATE DATABASE POWER PAGES 128 PAGESIZE 16; -``` - -## 文件系统缓存 - -TDengine 采用 WAL 技术作为基本的数据可靠性保障手段。WAL 是一种先进的数据保护机制,旨在确保在发生故障时能够迅速恢复数据。其核心原理在于,在数据实际写入数据存储层之前,先将其变更记录到一个日志文件中。这样一来,即便集群遭遇崩溃或其他故障,也能确保数据安全无损。 - -TDengine 利用这些日志文件实现故障前的状态恢复。在写入 WAL 的过程中,数据是以顺序追加的方式写入硬盘文件的。因此,文件系统缓存在此过程中发挥着关键作用,对写入性能产生显著影响。为了确保数据真正落盘,系统会调用 fsync 函数,该函数负责将文件系统缓存中的数据强制写入硬盘。 - -数据库参数 wal_level 和 wal_fsync_period 共同决定了 WAL 的保存行为。。 -- wal_level:此参数控制 WAL 的保存级别。级别 1 表示仅将数据写入 WAL,但不立即执行 fsync 函数;级别 2 则表示在写入 WAL 的同时执行 fsync 函数。默认情况下,wal_level 设为 1。虽然执行 fsync 函数可以提高数据的持久性,但相应地也会降低写入性能。 -- wal_fsync_period:当 wal_level 设置为 2 时,这个参数控制执行 fsync 的频率。设置为 0 表示每次写入后立即执行 fsync,这可以确保数据的安全性,但可能会牺牲一些性能。当设置为大于 0 的数值时,表示 fsync 周期,默认为 3000,范围是[1, 180000],单位毫秒。 - -```sql -CREATE DATABASE POWER WAL_LEVEL 2 WAL_FSYNC_PERIOD 3000; -``` - -在创建数据库时可以选择不同的参数类型,来选择性能优先或者可靠性优先。 -- 1: 写 WAL 但不执行 fsync ,新写入 WAL 的数据保存在文件系统缓存中但并未写入磁盘,这种方式性能优先 -- 2: 写 WAL 且执行 fsync,新写入 WAL 的数据被立即同步到磁盘上,可靠性更高 +关于数据库的具体创建,相关参数和操作说明请参考[创建数据库](../../reference/taos-sql/database/) ## 实时数据查询的缓存实践 diff --git a/docs/zh/06-advanced/03-stream.md b/docs/zh/06-advanced/03-stream.md index c47831dde3..c26924561c 100644 --- a/docs/zh/06-advanced/03-stream.md +++ b/docs/zh/06-advanced/03-stream.md @@ -124,7 +124,7 @@ create stream if not exists count_history_s fill_history 1 into count_history as 窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,此时事件时间无法更新,可能导致无法得到最新的计算结果。 -因此,流计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式。MAX_DELAY 模式在窗口关闭时会立即触发计算。此外,当数据写入后,计算触发的时间超过 max delay 指定的时间,则立即触发计算。 +因此,流计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式:MAX_DELAY 模式在窗口关闭时会立即触发计算,它的单位可以自行指定,具体单位:a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。此外,当数据写入后,计算触发的时间超过 MAX_DELAY 指定的时间,则立即触发计算。 ### 流计算的窗口关闭 @@ -259,4 +259,4 @@ flush database test1; 5.修改 taos.cfg,去掉 disableStream 1,或将 disableStream 改为 0 -6.启动 taosd \ No newline at end of file +6.启动 taosd diff --git a/docs/zh/06-advanced/05-data-in/05-opcua.md b/docs/zh/06-advanced/05-data-in/05-opcua.md index 5795528d01..5123dacd1b 100644 --- a/docs/zh/06-advanced/05-data-in/05-opcua.md +++ b/docs/zh/06-advanced/05-data-in/05-opcua.md @@ -150,7 +150,7 @@ CSV 文件中的每个 Row 配置一个 OPC 数据点位。Row 的规则如下 #### 5.2. 选择数据点位 -可以通过配置 **根节点ID**、**命名空间**、**正则匹配** 等条件,对点位进行筛选。 +可以通过配置 **根节点ID**、**命名空间**、**节点ID**、**节点名称** 等条件,对点位进行筛选。 通过配置 **超级表名**、**表名称**,指定数据要写入的超级表、子表。 diff --git a/docs/zh/06-advanced/05-data-in/06-opcda.md b/docs/zh/06-advanced/05-data-in/06-opcda.md index 7da5b89fe6..32ac1c1f8a 100644 --- a/docs/zh/06-advanced/05-data-in/06-opcda.md +++ b/docs/zh/06-advanced/05-data-in/06-opcda.md @@ -126,7 +126,7 @@ CSV 文件中的每个 Row 配置一个 OPC 数据点位。Row 的规则如下 #### 4.2. 选择数据点位 -可以通过配置 **根节点ID** 和 **正则匹配** 作为过滤条件,对点位进行筛选。 +可以通过配置 **根节点ID**、**节点ID**、**节点名称** 作为过滤条件,对点位进行筛选。 通过配置 **超级表名**、**表名称**,指定数据要写入的超级表、子表。 diff --git a/docs/zh/06-advanced/05-data-in/pic/opcda-06-point.png b/docs/zh/06-advanced/05-data-in/pic/opcda-06-point.png index 9ab69fb386..b47463dfbb 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/opcda-06-point.png and b/docs/zh/06-advanced/05-data-in/pic/opcda-06-point.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/opcua-06-point.png b/docs/zh/06-advanced/05-data-in/pic/opcua-06-point.png index 783adae3be..18f01e8885 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/opcua-06-point.png and b/docs/zh/06-advanced/05-data-in/pic/opcua-06-point.png differ diff --git a/docs/zh/06-advanced/06-TDgpt/02-management.md b/docs/zh/06-advanced/06-TDgpt/02-management.md index b8261797f9..9aaa123299 100644 --- a/docs/zh/06-advanced/06-TDgpt/02-management.md +++ b/docs/zh/06-advanced/06-TDgpt/02-management.md @@ -4,10 +4,12 @@ sidebar_label: "安装部署" --- ### 环境准备 -为了使用 TDgpt 的高级时序数据分析功能功能,需要在 TDengine 集群中安装部署 AI node(Anode)。Anode 可以运行在 Linux/Windows/Mac 等操作系统之上。请确保安装部署 Anode之前,系统中已经具备 3.10 及以上版本的Python环境,以及相应的 Python 包自动安装组件 Pip,否则无法正常安装 Anode。 +使用 TDgpt 的高级时序数据分析功能需要在 TDengine 集群中安装部署 AI node(Anode)。Anode 可以运行在 Linux/Windows/MacOS 等平台上,同时需要 3.10 或以上版本的 Python 环境支持。 +> 部署 Anode 需要 TDengine Enterprise 3.3.4.3 及以后版本,请首先确认搭配 Anode 使用的 TDengine 能够支持 Anode。 ### 安装及卸载 -不同操作系统上安装及部署操作有细微的差异,主要是安装/卸载操作、安装路径、Anode服务的启停等几个方面。下面将以 Linux 系统为例,说明安装部署的整个流程。使用 Linux 环境下的安装包 TDengine-enterprise-anode-1.x.x.tar.gz 可进行 Anode 的安装部署工作,使用如下命令: +不同操作系统上安装及部署 Anode 有一些差异,主要是卸载操作、安装路径、服务启停等方面。本文以 Linux 系统为例,说明安装部署的流程。 +使用 Linux 环境下的安装包 TDengine-enterprise-anode-1.x.x.tar.gz 可进行 Anode 的安装部署工作,命令如下: ```bash tar -xzvf TDengine-enterprise-anode-1.0.0.tar.gz @@ -15,11 +17,11 @@ cd TDengine-enterprise-anode-1.0.0 sudo ./install.sh ``` -在安装完成 Anode 之后,执行命令 `rmtaosanode` 即可已经安装的 Anode。 -Anode 使用 Python 虚拟环境运行,避免影响安装环境中现有的 Python 库。安装后的默认 Python 虚拟环境目录位于 `/var/lib/taos/taosanode/venv/`。为了避免反复安装虚拟环境带来的开销,卸载 Anode 执行的命令 `rmtaosanode` 并不会自动删除该虚拟环境,如果您确认不需要 Python 的虚拟环境,手动删除即可。 +对于已经安装的 Anode,执行命令 `rmtaosanode` 即可完成卸载。 +为了避免影响系统已有的 Python 环境,Anode 使用虚拟环境运行。安装 Anode 会在目录 `/var/lib/taos/taosanode/venv/` 中创建默认的 Python 虚拟环境,Anode 运行所需要的库均安装在该目录下。为了避免反复安装虚拟环境带来的开销,卸载命令 `rmtaosanode` 并不会自动删除该虚拟环境,如果您确认不再需要 Python 的虚拟环境,手动删除该目录即可。 ### 启停服务 -在 Linux 系统中,安装 Anode 以后可以使用 `systemd` 来管理 Anode 服务。使用如下命令可以启动/停止/检查状态。 +在 Linux 系统中,安装 Anode 以后会自动创建 `taosanoded` 服务。可以使用 `systemd` 来管理 Anode 服务,使用如下命令启动/停止/检查 Anode。 ```bash systemctl start taosanoded @@ -28,6 +30,8 @@ systemctl status taosanoded ``` ### 目录及配置说明 +安装完成后,Anode 主体目录结构如下: + |目录/文件|说明| |---------------|------| |/usr/local/taos/taosanode/bin|可执行文件目录| @@ -39,13 +43,14 @@ systemctl status taosanoded #### 配置说明 -Anode 提供的服务使用 uWSGI 驱动,因此 Anode 和 uWSGI 的配置信息共同存放在相同的配置文件 `taosanode.ini`,该配置文件默认位于 `/etc/taos/`目录下,其具体内容及说明如下: +Anode 的服务需要使用 uWSGI 驱动驱动运行,因此 Anode 和 uWSGI 的配置信息共同存放在相同的配置文件 `taosanode.ini` 中,该配置文件默认位于 `/etc/taos/` 目录下。 +具体内容及配置项说明如下: ```ini [uwsgi] -# Anode HTTP service ip:port -http = 127.0.0.1:6050 +# Anode RESTful service ip:port +http = 127.0.0.1:6090 # base directory for Anode python files, do NOT modified this chdir = /usr/local/taos/taosanode/lib @@ -81,8 +86,8 @@ log-level = DEBUG ``` **提示** -请勿设置 `daemonize` 参数,该参数会导致 uWSGI 与 systemctl 冲突,从而无法正常启动。 -上面的示例配置文件 `taosanode.ini` 只包含了使用 Anode 提供服务的基础配置参数,对于 uWSGI 的其他配置参数设置及其含义和说明请参考 [uWSGIS官方文档](https://uwsgi-docs-zh.readthedocs.io/zh-cn/latest/Options.html)。 +请勿设置 `daemonize` 参数,该参数会导致 uWSGI 与 systemctl 冲突,从而导致 Anode 无法正常启动。 +上面的示例配置文件 `taosanode.ini` 只包含了使用 Anode 提供服务的基础配置参数,对于 uWSGI 的其他配置参数的设置及其说明请参考 [uWSGIS官方文档](https://uwsgi-docs-zh.readthedocs.io/zh-cn/latest/Options.html)。 Anode 运行配置主要是以下: - app-log: Anode 服务运行产生的日志,用户可以调整其到需要的位置 @@ -91,14 +96,15 @@ Anode 运行配置主要是以下: ### Anode 基本操作 +对于 Anode 的管理,用户需要通过 TDengine 的命令行接口 taos 进行。因此下述介绍的管理命令都需要先打开 taos, 连接到 TDengine 运行实例。 #### 创建 Anode ```sql CREATE ANODE {node_url} ``` -node_url 是提供服务的 Anode 的 IP 和 PORT, 例如:`create anode 'http://localhost:6050'`。启动 Anode 以后如果不注册到 TDengine 集群中,则无法提供正常的服务。不建议 Anode 注册到两个或多个集群中。 +node_url 是提供服务的 Anode 的 IP 和 PORT组成的字符串, 例如:`create anode '127.0.0.1:6090'`。Anode 启动后还需要注册到 TDengine 集群中才能提供服务。不建议将 Anode 同时注册到两个集群中。 #### 查看 Anode -列出集群中所有的数据分析节点,包括其 `FQDN`, `PORT`, `STATUS`。 +列出集群中所有的数据分析节点,包括其 `FQDN`, `PORT`, `STATUS`等属性。 ```sql SHOW ANODES; ``` @@ -111,7 +117,7 @@ SHOW ANODES FULL; #### 刷新集群中的分析算法缓存 ```SQL -UPDATE ANODE {node_id} +UPDATE ANODE {anode_id} UPDATE ALL ANODES ``` @@ -119,4 +125,4 @@ UPDATE ALL ANODES ```sql DROP ANODE {anode_id} ``` -删除 Anode 只是将 Anode 从 TDengine 集群中删除,管理 Anode 的启停仍然需要使用`systemctl`命令。 +删除 Anode 只是将 Anode 从 TDengine 集群中删除,管理 Anode 的启停仍然需要使用 `systemctl` 命令。卸载 Anode 则需要使用上面提到的 `rmtaosanode` 命令。 diff --git a/docs/zh/06-advanced/06-TDgpt/03-preprocess.md b/docs/zh/06-advanced/06-TDgpt/03-preprocess.md index 77ddbb49a4..9efd2bdf11 100644 --- a/docs/zh/06-advanced/06-TDgpt/03-preprocess.md +++ b/docs/zh/06-advanced/06-TDgpt/03-preprocess.md @@ -7,27 +7,43 @@ import activity from './pic/activity.png'; import wndata from './pic/white-noise-data.png' ### 分析流程 -在针对时序数据进行高级分析之前,首先进行数据的白噪声检查(White Noise Data check, WND)。整体的流程如下图所示。 +时序数据分析之前需要有预处理的过程,为减轻分析算法的负担,TDgpt 在将时序数据发给具体分析算法进行分析时,已经对数据做了预处理,整体的流程如下图所示。 预处理流程 -- 对于时间序列数据预测分析,首先进行白噪声检查,不是白噪声数据,进行数据重采样和时间戳对齐的预处理,预处理完成后进行数据预测分析。 -- 对于时间序列异常检测,首先进行白噪声检查,检查通过以后无后续的处理流程,直接进行异常检测分析。 +TDgpt 首先对输入数据进行白噪声检查(White Noise Data check), 检查通过以后针对预测分析,还要进行输入(历史)数据的重采样和时间戳对齐处理(异常检测跳过数据重采样和时间戳对齐步骤)。 +预处理完成以后,再进行预测或异常检测操作。预处理过程部署于预测或异常检测处理逻辑的一部分。 ### 白噪声检查 white-noise-data -白噪声时序数据可以简单地认为是随机数构成的时序数据序列(如上图所示),随机数的时间序列没有分析的价值,因此会直接返回空结果。白噪声检查采用 `Ljung-Box` 检验,`Ljung-Box` 统计量的计算过程需遍历整个输入序列。如果用户能够明确输入序列一定不是白噪声序列,那么可以通过增加参数 `wncheck=0` 要求分析平台忽略白噪声输入时间序列检查,从而节省计算资源。 +白噪声时序数据可以简单地认为是随机数构成的时间数据序列(如上图所示的正态分布随机数序列),随机数构成的时间序列没有分析的价值,因此会直接返回。白噪声检查采用经典的 `Ljung-Box` 统计量检验,计算 `Ljung-Box` 统计量需遍历整个输入时间序列。如果用户能够明确输入序列一定不是白噪声序列,那么可以在参数列表中增加参数 `wncheck=0` 强制要求分析平台忽略白噪声检查,从而节省计算资源。 TDgpt 暂不提供独立的时间序列白噪声检测功能。 -### 数据重采样和时间戳对齐 +### 重采样和时间戳对齐 -对于输入的时间序列数据,在对齐进行预测分析之前需要进行必要的预处理流程。预处理解决以下两个方面的问题: +对于进行预测分析的时间序列数据,在进行预测分析前需要进行必要的预处理。预处理主要解决以下两个问题: -- 真实时间序列数据时间戳未对齐。由于数据生成的原因或者网关给时间序列数据赋值时间戳并不能保证按照严格的时间间隔赋值,此时 分析平台会自动将输入数据按照用户指定的采样频率对时间戳进行对齐处理。例如输入时间序列 [11, 22, 29, 41],用户指定时间间隔为 10,该时间序列的时间戳将被自动重整为以下时间戳序列 [10, 20, 30, 40]。 -- 数据时间重采样。用户输入时间序列的采样频率超过了输出结果的频率,例如输入时间序列的采样频率是 5,输出结果的频率是 10,输入时间序列 [0, 5, 10, 15, 20, 25, 30] 将被重采用为间隔 为 10 的序列 [0, 10, 20,30],[5, 15, 25] 处的数据将被丢弃。 +- 真实时间序列数据时间戳未对齐。由于数据生成设备的原因或网关赋值时间戳的时候并不能保证按照严格的时间间隔赋值,时间序列数据并不能保证是严格按照采样频率对齐。例如采样频率为 1Hz 的一个时间序列数据序列,其时间戳序列如下: -需要注意的是,数据输入平台不支持缺失数据补齐后进行的预测分析,如果输入时间序列数据 [11, 22, 29, 49],并且用户要求的时间间隔为 10,重整对齐后的序列是 [10, 20, 30, 50] 那么该序列进行预测分析将返回错误。 + > ['20:12:21.143', '20:12:22.187', '20:12:23.032', '20:12:24.384', '20:12:25.033'] + + 预测返回的时间序列时间戳会严格对齐,例如返回后续的两个预测结果的时间戳,其时间一定如下:['20:12:26.000', '20:12:27.000']。因此上述的输入时间戳序列要进行时间戳对齐,变换成为如下时间戳序列 + + > ['20:12:21.000', '20:12:22.000', '20:12:23.000', '20:12:24.000', '20:12:25.000'] + + +- 数据时间重采样。用户输入时间序列的采样频率超过了输出结果的频率,例如输入时间序列的采样时间间隔是 5 sec,但是要求输出预测结果的采样时间间隔是 10sec + + > ['20:12:20.000', '20:12:25.000', '20:12:30.000', '20:12:35.000', '20:12:40.000'] + + 重采样为采样间隔为 10sec 的时间戳序列 + + > ['20:12:20.000', '20:12:30.000', '20:12:40.000'] + + 然后将其作为预测分析的输入, ['20:12:25.000', '20:12:35.000'] 数据被丢弃。 + +需要注意的是,预处理过程不支持缺失数据补齐操作,如果输入时间序列数据 ['20:12:10.113', '20:12:21.393', '20:12:29.143', '20:12:51.330'],并且要求的采样时间间隔为 10sec,重整对齐后的时间戳序列是 ['20:12:10.000', '20:12:20.000', '20:12:30.000', '20:12:50.000'] 那么对该序列进行预测分析将返回错误。 diff --git a/docs/zh/06-advanced/06-TDgpt/04-forecast/02-arima.md b/docs/zh/06-advanced/06-TDgpt/04-forecast/02-arima.md index 0b5a80ad71..469f557984 100644 --- a/docs/zh/06-advanced/06-TDgpt/04-forecast/02-arima.md +++ b/docs/zh/06-advanced/06-TDgpt/04-forecast/02-arima.md @@ -3,14 +3,14 @@ title: "ARIMA" sidebar_label: "ARIMA" --- -本节讲述 ARIMA 算法模型的使用方法。 +本节说明 ARIMA 算法模型的使用方法。 ## 功能概述 -ARIMA 即自回归移动平均模型(Autoregressive Integrated Moving Average, ARIMA),也记作 ARIMA(p,d,q),是统计模型中最常见的一种用来进行时间序列预测的模型。 +ARIMA:Autoregressive Integrated Moving Average,即自回归移动平均模型,记作 ARIMA(p,d,q),是统计模型中最常见的一种用来进行时间序列预测的模型。 ARIMA 模型是一种自回归模型,只需要自变量即可预测后续的值。ARIMA 模型要求时间序列**平稳**,或经过差分处理后平稳,如果是不平稳的数据,**无法**获得正确的结果。 ->平稳的时间序列:其性质不随观测时间的变化而变化。具有趋势或季节性的时间序列不是平稳时间序列——趋势和季节性使得时间序列在不同时段呈现不同性质。 +> 平稳的时间序列:其性质不随观测时间的变化而变化。具有趋势或季节性的时间序列不是平稳时间序列——趋势和季节性使得时间序列在不同时段呈现不同性质。 以下参数可以动态输入,控制预测过程中生成合适的 ARIMA 模型。 @@ -38,6 +38,11 @@ ARIMA 模型是一种自回归模型,只需要自变量即可预测后续的 FORECAST(i32, "algo=arima,alpha=95,period=10,start_p=1,max_p=5,start_q=1,max_q=5") ``` +完整的调用SQL语句如下: +```SQL +SELECT _frowts, FORECAST(i32, "algo=arima,alpha=95,period=10,start_p=1,max_p=5,start_q=1,max_q=5") from foo +``` + ```json5 { "rows": fc_rows, // 返回结果的行数 diff --git a/docs/zh/06-advanced/06-TDgpt/04-forecast/03-holtwinters.md b/docs/zh/06-advanced/06-TDgpt/04-forecast/03-holtwinters.md index 38662ca2b3..7e92a8ae1a 100644 --- a/docs/zh/06-advanced/06-TDgpt/04-forecast/03-holtwinters.md +++ b/docs/zh/06-advanced/06-TDgpt/04-forecast/03-holtwinters.md @@ -23,11 +23,16 @@ HoltWinters 有两种不同的季节性组成部分,当季节变化在该时 参数 `trend` 和 `seasonal`的均可以选择 `add` (加法模型)或 `mul`(乘法模型)。 ### 示例及结果 -针对 i32 列进行数据预测,输入列 i32 每 10 个点是一个周期,趋势采用乘法模型,季节采用乘法模型 +针对 i32 列进行数据预测,输入列 i32 每 10 个点是一个周期,趋势参数采用乘法模型,季节参数采用乘法模型 ``` FORECAST(i32, "algo=holtwinters,period=10,trend=mul,seasonal=mul") ``` +完整的调用SQL语句如下: +```SQL +SELECT _frowts, FORECAST(i32, "algo=holtwinters, peroid=10,trend=mul,seasonal=mul") from foo +``` + ```json5 { "rows": rows, // 返回结果的行数 diff --git a/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md b/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md index 8cc9cb5b6a..c7388ab9c0 100644 --- a/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md +++ b/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md @@ -3,9 +3,30 @@ title: 预测算法 description: 预测算法 --- -时序数据预测处理以持续一个时间段的时序数据作为输入,预测接下来一个连续时间区间内时间序列数据分布及运行的趋势。用户可以指定输出的(预测)时间序列数据点的数量,因此其输出的结果行数不确定。为此,我们引入了 `FORECAST` 函数提供预测服务。基础数据(用于预测的历史时间序列数据)是该函数的输入,预测结果是该函数的输出。用户可以通过 `FORECAST` 函数调用 Anode 提供的预测算法提供的服务。 +时序数据预测处理以持续一个时间段的时序数据作为输入,预测接下来一个连续时间区间内时间序列数据趋势。用户可以指定输出的(预测)时间序列数据点的数量,因此其输出的结果行数不确定。为此,TDengine 使用新 SQL 函数 `FORECAST` 提供时序数据预测服务。基础数据(用于预测的历史时间序列数据)是该函数的输入,预测结果是该函数的输出。用户可以通过 `FORECAST` 函数调用 Anode 提供的预测算法提供的服务。 -##### 语法 +在后续章节中,使用时序数据表`foo`作为示例,介绍预测和异常检测算法的使用方式,`foo` 表的模式如下: + +|列名称|类型|说明| +|---|---|---| +|ts| timestamp| 主时间戳列| +|i32| int32| 4字节整数,设备测量值 metric| + +```bash +taos> select * from foo; + ts | k | +======================================== + 2020-01-01 00:00:12.681 | 13 | + 2020-01-01 00:00:13.727 | 14 | + 2020-01-01 00:00:14.378 | 8 | + 2020-01-01 00:00:15.774 | 10 | + 2020-01-01 00:00:16.170 | 16 | + 2020-01-01 00:00:17.558 | 26 | + 2020-01-01 00:00:18.938 | 32 | + 2020-01-01 00:00:19.308 | 27 | +``` + +### 语法 ```SQL FORECAST(column_expr, option_expr) @@ -23,7 +44,7 @@ algo=expr1 1. `column_expr`:预测的时序数据列。与异常检测相同,只支持数值类型列输入。 2. `options`:异常检测函数的参数,使用规则与 anomaly_window 相同。预测支持 `conf`, `every`, `rows`, `start`, `rows` 几个控制参数,其含义如下: -**参数说明** +### 参数说明 |参数|含义|默认值| |---|---|---| @@ -31,7 +52,7 @@ algo=expr1 |wncheck|白噪声(white noise data)检查|默认值为 1,0 表示不进行检查| |conf|预测数据的置信区间范围 ,取值范围 [0, 100]|95| |every|预测数据的采样间隔|输入数据的采样间隔| -|start|预测结果的开始时间戳|输入数据最后一个时间戳加上一个采样时间段| +|start|预测结果的开始时间戳|输入数据最后一个时间戳加上一个采样间隔时间区间| |rows|预测结果的记录数|10| 1. 预测查询结果新增三个伪列,具体如下:`_FROWTS`:预测结果的时间戳、`_FLOW`:置信区间下界、`_FHIGH`:置信区间上界, 对于没有置信区间的预测算法,其置信区间同预测结果 @@ -39,34 +60,34 @@ algo=expr1 3. `EVERY`:可以与输入数据的采样频率不同。采样频率只能低于或等于输入数据采样频率,不能**高于**输入数据的采样频率。 4. 对于某些不需要计算置信区间的算法,即使指定了置信区间,返回的结果中其上下界退化成为一个点。 -**示例** +### 示例 ```SQL --- 使用 arima 算法进行预测,预测结果是 10 条记录(默认值),数据进行白噪声检查,默认置信区间 95%. SELECT _flow, _fhigh, _frowts, FORECAST(i32, "algo=arima") FROM foo; ---- 使用 arima 算法进行预测,输入数据的是周期数据,每 10 个采样点是一个周期。返回置信区间是 95%. -SELECT _flow, _fhigh, _frowts, FORECAST(i32, "algo=arima,alpha=95,period=10") +--- 使用 arima 算法进行预测,输入数据的是周期数据,每 10 个采样点是一个周期,返回置信区间是95%的上下边界,同时忽略白噪声检查 +SELECT _flow, _fhigh, _frowts, FORECAST(i32, "algo=arima,alpha=95,period=10,wncheck=0") FROM foo; ``` ``` taos> select _flow, _fhigh, _frowts, forecast(i32) from foo; _flow | _fhigh | _frowts | forecast(i32) | ======================================================================================== - 10.5286684 | 41.8038254 | 2020-01-01 00:01:35.001 | 26 | - -21.9861946 | 83.3938904 | 2020-01-01 00:01:36.001 | 30 | - -78.5686035 | 144.6729126 | 2020-01-01 00:01:37.001 | 33 | - -154.9797363 | 230.3057709 | 2020-01-01 00:01:38.001 | 37 | - -253.9852905 | 337.6083984 | 2020-01-01 00:01:39.001 | 41 | - -375.7857971 | 466.4594727 | 2020-01-01 00:01:40.001 | 45 | - -514.8043823 | 622.4426270 | 2020-01-01 00:01:41.001 | 53 | - -680.6343994 | 796.2861328 | 2020-01-01 00:01:42.001 | 57 | - -868.4956665 | 992.8603516 | 2020-01-01 00:01:43.001 | 62 | - -1076.1566162 | 1214.4498291 | 2020-01-01 00:01:44.001 | 69 | + 10.5286684 | 41.8038254 | 2020-01-01 00:01:35.000 | 26 | + -21.9861946 | 83.3938904 | 2020-01-01 00:01:36.000 | 30 | + -78.5686035 | 144.6729126 | 2020-01-01 00:01:37.000 | 33 | + -154.9797363 | 230.3057709 | 2020-01-01 00:01:38.000 | 37 | + -253.9852905 | 337.6083984 | 2020-01-01 00:01:39.000 | 41 | + -375.7857971 | 466.4594727 | 2020-01-01 00:01:40.000 | 45 | + -514.8043823 | 622.4426270 | 2020-01-01 00:01:41.000 | 53 | + -680.6343994 | 796.2861328 | 2020-01-01 00:01:42.000 | 57 | + -868.4956665 | 992.8603516 | 2020-01-01 00:01:43.000 | 62 | + -1076.1566162 | 1214.4498291 | 2020-01-01 00:01:44.000 | 69 | ``` -**可用预测算法** -- arima -- holtwinters +## 内置预测算法 +- [arima](./02-arima.md) +- [holtwinters](./03-holtwinters.md) diff --git a/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/02-anomaly-detection.md b/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/02-anomaly-detection.md deleted file mode 100644 index 511a9cef11..0000000000 --- a/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/02-anomaly-detection.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: "异常检测算法" -sidebar_label: "异常检测算法" ---- - -本节讲述异常检测算法模型的使用方法。 - -## 概述 -分析平台提供了 6 种异常检查模型,6 种异常检查模型分为 3 个类别,分别属于基于统计的异常检测模型、基于数据密度的检测模型、基于深度学习的异常检测模型。在不指定异常检测使用的方法的情况下,默认调用 iqr 的方法进行计算。 - - -### 统计学异常检测方法 - -- k-sigma[1]: 即 ***68–95–99.7 rule*** 。***k***值默认为 3,即序列均值的 3 倍标准差范围为边界,超过边界的是异常值。KSigma 要求数据整体上服从正态分布,如果一个点偏离均值 K 倍标准差,则该点被视为异常点. - -|参数|说明|是否必选|默认值| -|---|---|---|---| -|k|标准差倍数|选填|3| - - -- IQR[2]:四分位距 (Interquartile range, IQR) 是一种衡量变异性的方法. 四分位数将一个按等级排序的数据集划分为四个相等的部分。即 Q1(第 1 个四分位数)、Q2(第 2 个四分位数)和 Q3(第 3 个四分位数)。IQR 定义为 $Q3–Q1$,位于 $Q3+1.5$。无输入参数。 - -- Grubbs[3]: 又称为 Grubbs' test,即最大标准残差测试。Grubbs 通常用作检验最大值、最小值偏离均值的程度是否为异常,该单变量数据集遵循近似标准正态分布。非正态分布数据集不能使用该方法。无输入参数。 - -- SHESD[4]: 带有季节性的 ESD 检测算法。ESD 可以检测时间序列数据的多异常点。需要指定异常点比例的上界***k***,最差的情况是至多 49.9%。数据集的异常比例一般不超过 5% - -|参数|说明|是否必选|默认值| -|---|---|---|---| -|k|异常点在输入数据集中占比,范围是 $1\le K \le 49.9$ |选填|5| - - -### 基于数据密度的检测方法 -LOF[5]: 局部离群因子(LOF,又叫局部异常因子)算法是 Breunig 于 2000 年提出的一种基于密度的局部离群点检测算法,该方法适用于不同类簇密度分散情况迥异的数据。根据数据点周围的数据密集情况,首先计算每个数据点的一个局部可达密度,然后通过局部可达密度进一步计算得到每个数据点的一个离群因子,该离群因子即标识了一个数据点的离群程度,因子值越大,表示离群程度越高,因子值越小,表示离群程度越低。最后,输出离群程度最大的 $top(n)$ 个点。 - - -### 基于自编码器的检测方法 -使用自动编码器的异常检测模型。可以对具有周期性的数据具有较好的检测结果。但是使用该模型需要针对输入的时序数据进行训练,同时将训练完成的模型部署到服务目录中,才能够运行与使用。 - - -### 参考文献 -1. [https://en.wikipedia.org/wiki/68–95–99.7 rule](https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule) -2. https://en.wikipedia.org/wiki/Interquartile_range -3. Adikaram, K. K. L. B.; Hussein, M. A.; Effenberger, M.; Becker, T. (2015-01-14). "Data Transformation Technique to Improve the Outlier Detection Power of Grubbs's Test for Data Expected to Follow Linear Relation". Journal of Applied Mathematics. 2015: 1–9. doi:10.1155/2015/708948. -4. Hochenbaum, O. S. Vallis, and A. Kejariwal. 2017. Automatic Anomaly Detection in the Cloud Via Statistical Learning. arXiv preprint arXiv:1704.07706 (2017). -5. Breunig, M. M.; Kriegel, H.-P.; Ng, R. T.; Sander, J. (2000). LOF: Identifying Density-based Local Outliers (PDF). Proceedings of the 2000 ACM SIGMOD International Conference on Management of Data. SIGMOD. pp. 93–104. doi:10.1145/335191.335388. ISBN 1-58113-217-4. - diff --git a/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/02-statistics-approach.md b/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/02-statistics-approach.md new file mode 100644 index 0000000000..d0d6815c25 --- /dev/null +++ b/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/02-statistics-approach.md @@ -0,0 +1,57 @@ +--- +title: "统计学算法" +sidebar_label: "统计学算法" +--- + +- k-sigma[1]: 即 ***68–95–99.7 rule*** 。***k***值默认为 3,即序列均值的 3 倍标准差范围为边界,超过边界的是异常值。KSigma 要求数据整体上服从正态分布,如果一个点偏离均值 K 倍标准差,则该点被视为异常点. + +|参数|说明|是否必选|默认值| +|---|---|---|---| +|k|标准差倍数|选填|3| + +```SQL +--- 指定调用的算法为ksigma, 参数 k 为 2 +SELECT _WSTART, COUNT(*) +FROM foo +ANOMALY_WINDOW(foo.i32, "algo=ksigma,k=2") +``` + +- IQR[2]:Interquartile range(IQR),四分位距是一种衡量变异性的方法。四分位数将一个按等级排序的数据集划分为四个相等的部分。即 Q1(第 1 个四分位数)、Q2(第 2 个四分位数)和 Q3(第 3 个四分位数)。 $IQR=Q3-Q1$,对于 $v$, $Q1-(1.5 \times IQR) \le v \le Q3+(1.5 \times IQR)$ 是正常值,范围之外的是异常值。无输入参数。 + +```SQL +--- 指定调用的算法为 iqr, 无参数 +SELECT _WSTART, COUNT(*) +FROM foo +ANOMALY_WINDOW(foo.i32, "algo=iqr") +``` + +- Grubbs[3]: Grubbs' test,即最大标准残差测试。Grubbs 通常用作检验最大值、最小值偏离均值的程度是否为异常,要求单变量数据集遵循近似标准正态分布。非正态分布数据集不能使用该方法。无输入参数。 + +```SQL +--- 指定调用的算法为 grubbs, 无参数 +SELECT _WSTART, COUNT(*) +FROM foo +ANOMALY_WINDOW(foo.i32, "algo=grubbs") +``` + +- SHESD[4]: 带有季节性的 ESD 检测算法。ESD 可以检测时间序列数据的多异常点。需要指定异常检测方向('pos' / 'neg' / 'both'),异常值比例的上界***max_anoms***,最差的情况是至多 49.9%。数据集的异常比例一般不超过 5% + +|参数|说明|是否必选|默认值| +|---|---|---|---| +|direction|异常检测方向类型('pos' / 'neg' / 'both')|否|"both"| +|max_anoms|异常值比例 $0 < K \le 49.9$|否|0.05| +|period|一个周期包含的数据点|否|0| + + +```SQL +--- 指定调用的算法为 shesd, 参数 direction 为 both,异常值比例 5% +SELECT _WSTART, COUNT(*) +FROM foo +ANOMALY_WINDOW(foo.i32, "algo=shesd,direction=both,anoms=0.05") +``` + +### 参考文献 +1. [https://en.wikipedia.org/wiki/68–95–99.7 rule](https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule) +2. https://en.wikipedia.org/wiki/Interquartile_range +3. Adikaram, K. K. L. B.; Hussein, M. A.; Effenberger, M.; Becker, T. (2015-01-14). "Data Transformation Technique to Improve the Outlier Detection Power of Grubbs's Test for Data Expected to Follow Linear Relation". Journal of Applied Mathematics. 2015: 1–9. doi:10.1155/2015/708948. +4. Hochenbaum, O. S. Vallis, and A. Kejariwal. 2017. Automatic Anomaly Detection in the Cloud Via Statistical Learning. arXiv preprint arXiv:1704.07706 (2017). diff --git a/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/03-data-density.md b/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/03-data-density.md new file mode 100644 index 0000000000..7c0998c917 --- /dev/null +++ b/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/03-data-density.md @@ -0,0 +1,20 @@ +--- +title: "数据密度算法" +sidebar_label: "数据密度算法" +--- + +### 基于数据密度的检测方法 +LOF[1]: Local Outlier Factor(LOF),局部离群因子/局部异常因子, +是 Breunig 在 2000 年提出的一种基于密度的局部离群点检测算法,该方法适用于不同类簇密度分散情况迥异的数据。根据数据点周围的数据密集情况,首先计算每个数据点的一个局部可达密度,然后通过局部可达密度进一步计算得到每个数据点的一个离群因子, +该离群因子即标识了一个数据点的离群程度,因子值越大,表示离群程度越高,因子值越小,表示离群程度越低。最后,输出离群程度最大的 $topK$ 个点。 + +```SQL +--- 指定调用的算法为LOF,即可调用该算法 +SELECT count(*) +FROM foo +ANOMALY_WINDOW(foo.i32, "algo=lof") +``` + +### 参考文献 + +1. Breunig, M. M.; Kriegel, H.-P.; Ng, R. T.; Sander, J. (2000). LOF: Identifying Density-based Local Outliers (PDF). Proceedings of the 2000 ACM SIGMOD International Conference on Management of Data. SIGMOD. pp. 93–104. doi:10.1145/335191.335388. ISBN 1-58113-217-4. diff --git a/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/04-machine-learning.md b/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/04-machine-learning.md new file mode 100644 index 0000000000..d72b8e70a9 --- /dev/null +++ b/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/04-machine-learning.md @@ -0,0 +1,17 @@ +--- +title: "机器学习算法" +sidebar_label: "机器学习算法" +--- + +Autoencoder[1]: TDgpt 内置使用自编码器(Autoencoder)的异常检测算法,对周期性的时间序列数据具有较好的检测结果。使用该模型需要针对输入时序数据进行预训练,同时将训练完成的模型保存在到服务目录 `ad_autoencoder` 中,然后在 SQL 语句中指定调用该算法模型即可使用。 + +```SQL +--- 在 options 中增加 model 的名称,ad_autoencoder_foo, 针对 foo 数据集(表)训练的采用自编码器的异常检测模型进行异常检测 +SELECT COUNT(*), _WSTART +FROM foo +ANOMALY_DETECTION(col1, 'algo=encoder, model=ad_autoencoder_foo'); +``` + +### 参考文献 + +1. https://en.wikipedia.org/wiki/Autoencoder diff --git a/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/index.md b/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/index.md index c831b63668..632492ce72 100644 --- a/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/index.md +++ b/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/index.md @@ -5,12 +5,13 @@ description: 异常检测算法 import ad from '../pic/anomaly-detection.png'; -时序数据异常检测,在TDengine 查询处理中以异常窗口的形式服务。因此,可以将异常检测获得的窗口视为一种特殊的**事件窗口**,区别在于异常窗口的触发条件和结束条件不是用户指定,而是检测算法自动识别。因此,可以应用在事件窗口上的函数均可应用在异常窗口中。由于异常检测结果是一个时间窗口,因此调用异常检测的方式也与使用事件窗口的方式相同,在 `WHERE` 子句中使用 `ANOMALY_WINDOW` 关键词即可调用时序数据异常检测服务,同时窗口伪列(`_WSTART`, `_WEND`, `_WDURATION`)也能够像其他窗口函数一样使用。例如: +TDengine 中定义了异常(状态)窗口来提供异常检测服务。异常窗口可以视为一种特殊的**事件窗口(Event Window)**,即异常检测算法确定的连续异常时间序列数据所在的时间窗口。与普通事件窗口区别在于——时间窗口的起始时间和结束时间均是分析算法识别确定,不是用户给定的表达式进行判定。因此,在 `WHERE` 子句中使用 `ANOMALY_WINDOW` 关键词即可调用时序数据异常检测服务,同时窗口伪列(`_WSTART`, `_WEND`, `_WDURATION`)也能够像其他时间窗口一样用于描述异常窗口的起始时间(`_WSTART`)、结束时间(`_WEND`)、持续时间(`_WDURATION`)。例如: ```SQL -SELECT _wstart, _wend, SUM(i32) +--- 使用异常检测算法 IQR 对输入列 col_val 进行异常检测。同时输出异常窗口的起始时间、结束时间、以及异常窗口内 col 列的和。 +SELECT _wstart, _wend, SUM(col) FROM foo -ANOMALY_WINDOW(i32, "algo=iqr"); +ANOMALY_WINDOW(col_val, "algo=iqr"); ``` 如下图所示,Anode 将返回时序数据异常窗口 $[10:51:30, 10:53:40]$ @@ -36,18 +37,14 @@ algo=expr1 3. 异常检测的结果可以作为外层查询的子查询输入,在 `SELECT` 子句中使用的聚合函数或标量函数与其他类型的窗口查询相同。 4. 输入数据默认进行白噪声检查,如果输入数据是白噪声,将不会有任何(异常)窗口信息返回。 -**参数说明** +### 参数说明 |参数|含义|默认值| |---|---|---| |algo|异常检测调用的算法|iqr| -|wncheck|对输入数据列是否进行白噪声检查|取值为 0 或者 1,默认值为 1,表示进行白噪声检查| +|wncheck|对输入数据列是否进行白噪声检查,取值为0或1|1| -异常检测的返回结果以窗口形式呈现,因此窗口查询相关的伪列在这种场景下仍然可用。可用的伪列如下: -1. `_WSTART`: 异常窗口开始时间戳 -2. `_WEND`:异常窗口结束时间戳 -3. `_WDURATION`:异常窗口持续时间 -**示例** +### 示例 ```SQL --- 使用 iqr 算法进行异常检测,检测列 i32 列。 SELECT _wstart, _wend, SUM(i32) @@ -58,10 +55,8 @@ ANOMALY_WINDOW(i32, "algo=iqr"); SELECT _wstart, _wend, SUM(i32) FROM foo ANOMALY_WINDOW(i32, "algo=ksigma,k=2"); -``` -``` -taos> SELECT _wstart, _wend, count(*) FROM ai.atb ANOMAYL_WINDOW(i32); +taos> SELECT _wstart, _wend, count(*) FROM foo ANOMAYL_WINDOW(i32); _wstart | _wend | count(*) | ==================================================================== 2020-01-01 00:00:16.000 | 2020-01-01 00:00:17.000 | 2 | @@ -69,10 +64,6 @@ Query OK, 1 row(s) in set (0.028946s) ``` -**可用异常检测算法** -- iqr -- ksigma -- grubbs -- lof -- shesd -- tac +### 内置异常检测算法 +分析平台内置了6个异常检查模型,分为3个类别,分别是[基于统计学的算法](./02-statistics-approach.md)、[基于数据密度的算法](./03-data-density.md)、以及[基于机器学习的算法](./04-machine-learning.md)。在不指定异常检测使用的方法的情况下,默认调用 IQR 进行异常检测。 + diff --git a/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md b/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md index d375ad44b8..954076c8fd 100644 --- a/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md +++ b/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md @@ -10,8 +10,8 @@ sidebar_label: "预测算法" `execute` 方法执行完成后的返回一个如下字典对象, 预测返回结果如下: ```python return { - "mse": mse, # 预测算法的拟合数据最小均方误差(minimum squared error) - "res": res # 结果数组 [时间戳数组, 预测结果数组, 预测结果执行区间下界数组,预测结果执行区间上界数组] + "mse": mse, # 预测算法的拟合数据最小均方误差(minimum squared error) + "res": res # 结果数组 [时间戳数组, 预测结果数组, 预测结果执行区间下界数组,预测结果执行区间上界数组] } ``` @@ -77,14 +77,8 @@ class _MyForecastService(AbstractForecastService): """该算法无需任何输入参数,直接重载父类该函数,不处理算法参数设置逻辑""" pass ``` -将该文件保存在 `./taosanalytics/algo/ad/` 目录下,然后重启 taosanode 服务。然后就可以通过 SQL 语句调用该检测算法。 -```SQL ---- 对 col 列进行异常检测,通过指定 algo 参数为 myad 来调用新添加的异常检测类 -SELECT COUNT(*) FROM foo ANOMALY_DETECTION(col, 'algo=myad') -``` - -将该文件保存在 `./taosanalytics/algo/fc/` 目录下,然后重启 taosanode 服务。通过执行 `SHOW ANODES FULL` 能够看到新加入的算法,通过 SQL 语句调用该预测算法。 +将该文件保存在 `./taosanalytics/algo/fc/` 目录下,然后重启 taosanode 服务。在 TDengine 命令行接口中执行 `SHOW ANODES FULL` 能够看到新加入的算法。应用就可以通过 SQL 语句调用该预测算法。 ```SQL --- 对 col 列进行异常检测,通过指定 algo 参数为 myfc 来调用新添加的预测类 @@ -92,6 +86,7 @@ SELECT _flow, _fhigh, _frowts, FORECAST(col_name, "algo=myfc") FROM foo; ``` +如果是第一次启动该 Anode, 请按照 [TDgpt 安装部署](../../management/) 里的步骤先将该 Anode 添加到 TDengine 系统中。 ### 单元测试 diff --git a/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md b/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md index 8068931653..dc0a534706 100644 --- a/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md +++ b/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md @@ -48,13 +48,13 @@ class _MyAnomalyDetectionService(AbstractAnomalyDetectionService): pass ``` -将该文件保存在 `./taosanalytics/algo/ad/` 目录下,然后重启 taosanode 服务。然后就可以通过 SQL 语句调用该检测算法。 +将该文件保存在 `./taosanalytics/algo/ad/` 目录下,然后重启 taosanode 服务。在 TDengine 命令行接口 taos 中执行 `SHOW ANODES FULL` 就能够看到新加入的算法,然后应用就可以通过 SQL 语句调用该检测算法。 ```SQL --- 对 col 列进行异常检测,通过指定 algo 参数为 myad 来调用新添加的异常检测类 SELECT COUNT(*) FROM foo ANOMALY_DETECTION(col, 'algo=myad') ``` - +如果是第一次启动该 Anode, 请按照 [TDgpt 安装部署](../../management/) 里的步骤先将该 Anode 添加到 TDengine 系统中。 ### 单元测试 diff --git a/docs/zh/06-advanced/06-TDgpt/06-dev/index.md b/docs/zh/06-advanced/06-TDgpt/06-dev/index.md index 6ef9e67a20..b7f048cefc 100644 --- a/docs/zh/06-advanced/06-TDgpt/06-dev/index.md +++ b/docs/zh/06-advanced/06-TDgpt/06-dev/index.md @@ -2,14 +2,20 @@ title: "算法开发者指南" sidebar_label: "算法开发者指南" --- -TDgpt 是一个可扩展的时序数据高级分析平台,用户仅按照简易的步骤就能将新分析算法添加到分析平台中。将开发完成的算法代码文件放入对应的目录文件夹,然后重启 Anode 即可完成扩展升级。Anode 启动后会自动加载特定目录的分析算法。用户可以直接使用 SQL 语句调用添加到 TDgpt 系统中的分析算法。得益于 TDgpt 与 taosd 的松散耦合关系,分析平台升级对 taosd 完全没有影响。应用系统也不需要做任何更改就能够完成分析功能和分析算法的升级。 +TDgpt 是一个可扩展的时序数据高级分析平台,用户遵循简易的步骤就能将自己开发的分析算法添加到分析平台, 各种应用就可以通过SQL语句直接调用, 让高级分析算法的使用门槛降到几乎为零。目前 TDpgt 平台只支持使用 Python 语言开发的分析算法。 +Anode 采用类动态加载模式,在启动的时候扫描特定目录内满足约定条件的所有代码文件,并将其加载到系统中。因此,开发者只需要遵循以下几步就能完成新算法的添加工作: +1. 开发完成符合要求的分析算法类 +2. 将代码文件放入对应目录,然后重启 Anode +3. 使用SQL命令"CREATE ANODE",将 Anode 添加到 TDengine + +此时就完成了新算法的添加工作,之后应用就可以直接使用SQL语句调用新算法。得益于 TDgpt 与 TDengine主进程 `taosd` 的松散耦合,Anode算法升级对 `taosd` 完全没有影响。应用系统只需要调整对应的SQL语句调用新(升级的)算法,就能够快速完成分析功能和分析算法的升级。 -这种方式能够按需扩展新分析算法,极大地拓展了 TDgpt 适应的范围,用户可以将契合业务场景开发的(预测、异常检测)分析算法嵌入到 TDgpt,并通过 SQL 语句进行调用。在不更改或更改非常少的应用系统代码的前提下,就能够快速完成分析功能的平滑升级。 +这种方式能够按需扩展分析算法,极大地拓展 TDgpt 的适应范围,用户可以按需将更契合业务场景的、更准确的(预测、异常检测)分析算法动态嵌入到 TDgpt,并通过 SQL 语句进行调用。在基本不用更改应用系统代码的前提下,就能够快速完成分析功能的平滑升级。 -本节说明如何将预测算法和异常检测算法添加到 TDengine 分析平台。 +以下内容将说明如何将分析算法添加到 Anode 中并能够通过SQL语句进行调用。 ## 目录结构 -首先需要了解TDgpt的目录结构。其主体目录结构如下图: +Anode的主要目录结构如下图所示 ```bash . @@ -42,7 +48,7 @@ TDgpt 是一个可扩展的时序数据高级分析平台,用户仅按照简 ### 类命名规范 -由于算法采用自动加载,因此其只识别按照特定命名方式的类。算法类的名称需要以下划线开始,以 Service 结尾。例如:`_KsigmaService` 是 KSigma 异常检测算法类。 +Anode采用算法自动加载模式,因此只识别符合命名约定的 Python 类。需要加载的算法类名称需要以下划线 `_` 开始并以 `Service` 结尾。例如:`_KsigmaService` 是 KSigma 异常检测算法类。 ### 类继承约定 @@ -50,33 +56,27 @@ TDgpt 是一个可扩展的时序数据高级分析平台,用户仅按照简 - 预测算法需要从 `AbstractForecastService` 继承,同样需要实现其核心抽象方法 `execute` ### 类属性初始化 -每个算法实现的类需要静态初始化两个类属性,分别是: +实现的类需要初始化以下两个类属性: -- `name`:触发调用的关键词,全小写英文字母。该名称也是通过 `SHOW` 命令查看可用分析算法是显示的名称。 -- `desc`:算法的描述信息 +- `name`:识别该算法的关键词,全小写英文字母。通过 `SHOW` 命令查看可用算法显示的名称即为该名称。 +- `desc`:算法的基础描述信息 ```SQL ---- algo 后面的参数 algo_name 即为类名称 `name` -SELECT COUNT(*) FROM foo ANOMALY_DETECTION(col_name, 'algo=algo_name') -``` - -## 添加具有训练模型的分析算法 - -某些深度学习的分析算法需要使用输入时间序列数据进行训练,然后生成针对训练数据集的分析模型。这种情况下,同一个分析算法对应不同的输入数据集有不同的分析模型。 -这种类型的分析算法要添加到 TDgpt 中,首先需要在 `model` 目录中建立目录,将采用该算法针对不同的输入时间序列数据生成的训练模型均保存在该目录下。如下图所示,针对不同的数据集,采用自编码器训练的数据异常检测算法生成的模型均保存在该目录下。为了确保模型能够正常读取加载,要求存储的模型使用`joblib`库进行序列化保存。 -采用训练-保存模型的方式可以一次训练,多次调用的优势。避免动态训练调用所带来的反复训练开销。 - -调用已经保存的模型,需要首先调用`set_params`方法,并在参数中指定调用模型的名称 `{"model": "ad_encoder_keras"}` 即可调用该模型进行计算。调用方式如下: - -```python -def test_autoencoder_ad(self): - # 获取特定的算法对象 - # ... - - # 指定调用的模型,该模型是之前针对该数据集进行训练获得 - s.set_params({"model": "ad_encoder_keras"}) - - # 执行检查动作,并返回结果 - r = s.execute() +--- algo 后面的参数 name 即为类属性 `name` +SELECT COUNT(*) FROM foo ANOMALY_DETECTION(col_name, 'algo=name') ``` +## 添加具有模型的分析算法 + +基于统计学的分析算法可以直接针对输入时间序列数据进行分析,但是某些深度学习算法对于输入数据需要较长的时间训练,并且生成相应的模型。这种情况下,同一个分析算法对应不同的输入数据集有不同的分析模型。 +将具有模型的分析算法添加到 Anode 中,首先需要在 `model` 目录中建立该算法对应的目录(目录名称可自拟),将采用该算法针对不同的输入时间序列数据生成的训练模型均需要保存在该目录下,同时目录名称要在分析算法中确定,以便能够固定加载该目录下的分析模型。为了确保模型能够正常读取加载,存储的模型使用`joblib`库进行序列化保存。 + +下面以自编码器(Autoencoder)为例,说明如何添加要预先训练的模型进行异常检测。 +首先我们在`model`目录中创建一个目录 -- `ad_detection`,该目录将用来保存所有使用自编码器训练的模型。然后,我们使用自编码器对 foo 表的时间序列数据进行训练,得到模型 ad_autoencoder_foo,使用 `joblib`序列化以后保存在`ad_detection` 目录中。 + +使用 SQL 调用已经保存的模型,需要在调用参数中指定模型名称``model=ad_autoencoder_foo`,而 `algo=encoder` 是确定调用的自编码器生成的模型(这里的`encoder`说明调用的是自编码器算法模型,该名称是添加算法的时候在代码中定义)以便能够调用该模型。 + +```SQL +--- 在 options 中增加 model 的名称,ad_autoencoder_foo, 针对 foo 数据集(表)训练的采用自编码器的异常检测模型进行异常检测 +SELECT COUNT(*), _WSTART FROM foo ANOMALY_DETECTION(col1, 'algo=encoder, model=ad_autoencoder_foo'); +``` diff --git a/docs/zh/06-advanced/06-TDgpt/index.md b/docs/zh/06-advanced/06-TDgpt/index.md index 96b654b068..3f650b196b 100644 --- a/docs/zh/06-advanced/06-TDgpt/index.md +++ b/docs/zh/06-advanced/06-TDgpt/index.md @@ -6,17 +6,20 @@ title: TDgpt import TDgpt from './pic/data-analysis.png'; -TDgpt 是 TDengine Enterprise 中针对时序数据提供高级分析功能的企业级组件,能够独立于 TDengine 主进程部署和运行,不消耗和占用 TDengine 主进程的资源,通过内置接口向 TDengine 提供运行时动态扩展的高级时序数据分析功能。TDgpt 具有服务无状态、功能易扩展、快速弹性部署、应用轻量化、高安全性等特点。 -TDgpt 运行在部署于 TDengine 集群中的 AI Node (Anode)中。每个 TDengine 集群中可以部署一个或若干个 Anode 节点,不同的 Anode 节点之间不相关,无同步或协同的要求。Anode 注册到 TDengine 集群以后,就可以通过内部接口提供服务。TDgpt 提供的高级时序数据分析服务可分为时序数据异常检测和时序数据预测分析两个类别。 +TDgpt 是 TDengine Enterprise 中针对时序数据提供高级分析功能的企业级组件,通过内置接口向 TDengine 提供运行时动态扩展的时序数据分析服务。TDgpt 能够独立于 TDengine 主进程部署和运行,因此可避免消耗占用 TDengine 集群的主进程资源。 +TDgpt 具有服务无状态、功能易扩展、快速弹性部署、应用轻量化、高安全性等优势。 +TDgpt 运行在集群中的 AI Node (Anode)中,集群中可以部署若干个 Anode 节点,不同的 Anode 节点之间无同步依赖或协同的要求。Anode 注册到 TDengine 集群以后,立即就可以提供服务。TDgpt 提供的高级时序数据分析服务可分为时序数据异常检测和时序数据预测分析两大类。 下图是部署 TDgpt 的 TDengine 集群示意图。 TDgpt架构图 -通过注册指令将 Anode 注册到 Mnode 中以后,就加入到 TDengine 集群,并可被查询引擎动态调用执行。在查询处理过程中,查询引擎根据生成的物理执行计划,**按需**向 Anode 请求高级时序数据分析服务。用户可通过SQL语句与 Anode 节点交互,并使用其提供的全部分析服务。需要注意的是 Anode 不直接接受用户的数据分析请求。同时 Anode 提供高效的动态注册机制,其注册和卸载过程完全不影响 TDengine 集群的服务,只影响提供对应的查询服务能力。 +在查询处理过程中,Vnode中运行的查询引擎会根据查询处理物理执行计划,按需向 Anode 请求高级时序数据分析服务。因此用户可通过 SQL 语句与 Anode 节点交互并使用其提供的全部分析服务。需要注意的是 Anode 不直接接受用户的数据分析请求。同时 Anode 具备分析算法动态注册机制,其算法扩展过程完全不影响 TDengine 集群的服务,仅在非常小的(秒级)时间窗口内影响涉及高级分析的查询服务。 -TDgpt 提供的高级数据分析功能分为时序数据异常检测和时序数据预测。 -- 时序数据异常检测的结果采用异常窗口的形式提供,即分析系统自动将算法检测到的连续异常数据以时间窗口的形式返回,其使用方式与 TDengine 中其他类型的时间窗口(例如状态窗口、事件窗口)类似。特别地,可以将异常数据窗口视作为一种特殊的**事件窗口(Event Window)**,因此状态窗口可使用的所有查询操作均可应用在异常窗口上。 -- 时序数据预测是基于输入的时间序列数据,使用指定(或默认)预测算法给出输入时序数据后续时间序列的**预测**观测值数据。因此,不同于异常检测是以窗口的形式存在,时序数据预测在 TDengine 中是一个(不确定输出)函数。 +目前 TDgpt 提供如下的高级分析服务: +- 时序数据异常检测。TDengine 中定义了新的时间窗口——异常(状态)窗口——来提供异常检测服务。异常窗口可以视为一种特殊的**事件窗口(Event Window)**,即异常检测算法确定的连续异常时间序列数据所在的时间窗口。与普通事件窗口区别在于——时间窗口的起始时间和结束时间均是分析算法确定,不是用户指定的表达式判定。异常窗口使用方式与其他类型的时间窗口(例如状态窗口、会话窗口等)类似。因此时间窗口内可使用的查询操作均可应用在异常窗口上。 +- 时序数据预测。定义了一个新函数`FORECAST`,基于输入的(历史)时间序列数据调用指定(或默认)预测算法给出输入时序数据后续时间序列的**预测**数据。 + +TDgpt 还为算法开发者提供了一 SDK。任何开发者只需要按照[算法开发者指南](./dev)的步骤,就可以将自己独有的时序数据预测或时序数据异常检测算法无缝集成到 TDgpt, 这样 TDengine 用户就可以通过一条 SQL 获得时序数据预测结果或是异常窗口了, 大幅降低了用户使用新的时序数据分析算法的门槛,而且让 TDengine 成为一开放的系统。 diff --git a/docs/zh/07-develop/01-connect/index.md b/docs/zh/07-develop/01-connect/index.md index bd26bea46d..94f55967ec 100644 --- a/docs/zh/07-develop/01-connect/index.md +++ b/docs/zh/07-develop/01-connect/index.md @@ -89,7 +89,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速 com.taosdata.jdbc taos-jdbcdriver - 3.3.3 + 3.4.0 ``` diff --git a/docs/zh/07-develop/07-tmq.md b/docs/zh/07-develop/07-tmq.md index a91a764c67..8b272bdfff 100644 --- a/docs/zh/07-develop/07-tmq.md +++ b/docs/zh/07-develop/07-tmq.md @@ -16,7 +16,7 @@ TDengine 提供了类似于消息队列产品的数据订阅和消费接口。 **注意** 在 TDengine 连接器实现中,对于订阅查询,有以下限制。 -- 查询语句限制:订阅查询只能使用 select 语句,不支持其他类型的SQL,如 insert、update 或 delete 等。 +- 查询语句限制:订阅查询只能使用 select 语句,并不支持其他类型的SQL,如订阅库,订阅超级表(非 select 方式),insert、update 或 delete 等。 - 原始始数据查询:订阅查询只能查询原始数据,而不能查询聚合或计算结果。 - 时间顺序限制:订阅查询只能按照时间正序查询数据。 diff --git a/docs/zh/14-reference/01-components/01-taosd.md b/docs/zh/14-reference/01-components/01-taosd.md index dc35d40b07..101058c2a8 100644 --- a/docs/zh/14-reference/01-components/01-taosd.md +++ b/docs/zh/14-reference/01-components/01-taosd.md @@ -27,65 +27,65 @@ taosd 命令行参数如下 ### 连接相关 |参数名称|支持版本|参数含义| -|-----------------------|-----------|-| -|firstEp | |taosd 启动时,主动连接的集群中首个 dnode 的 end point,默认值 localhost:6030| -|secondEp | |taosd 启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint,无默认值| -|fqdn | |taosd 监听的服务地址,默认为所在服务器上配置的第一个 hostname| -|serverPort | |taosd 监听的端口,默认值 6030| -|compressMsgSize | |是否对 RPC 消息进行压缩;-1:所有消息都不压缩;0:所有消息都压缩;N (N>0):只有大于 N 个字节的消息才压缩;默认值 -1| -|shellActivityTimer | |客户端向 mnode 发送心跳的时长,单位为秒,取值范围 1-120,默认值 3| -|numOfRpcSessions | |RPC 支持的最大连接数,取值范围 100-100000,默认值 30000| -|numOfRpcThreads | |RPC 线程数目,默认值为 CPU 核数的一半| -|numOfTaskQueueThreads | |dnode 处理 RPC 消息的线程数| -|statusInterval | |dnode 与 mnode 之间的心跳间隔| -|rpcQueueMemoryAllowed | |dnode 允许的 rpc 消息占用的内存最大值,单位 bytes,取值范围 104857600-INT64_MAX,默认值 服务器内存的 1/10 | -|resolveFQDNRetryTime | |FQDN 解析失败时的重试次数| -|timeToGetAvailableConn | |获得可用连接的最长等待时间,取值范围 10-50000000,单位为毫秒,默认值 500000| -|maxShellConns | |允许创建的最大链接数| -|maxRetryWaitTime | |重连最大超时时间| -|shareConnLimit |3.3.4.3 之后|内部参数,一个链接可以共享的查询数目,取值范围 1-256,默认值 10| -|readTimeout |3.3.4.3 之后|内部参数,最小超时时间,取值范围 64-604800,单位为秒,默认值 900| +|-----------------------|----------|-| +|firstEp | |taosd 启动时,主动连接的集群中首个 dnode 的 end point,默认值 localhost:6030| +|secondEp | |taosd 启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint,无默认值| +|fqdn | |taosd 监听的服务地址,默认为所在服务器上配置的第一个 hostname| +|serverPort | |taosd 监听的端口,默认值 6030| +|compressMsgSize | |是否对 RPC 消息进行压缩;-1:所有消息都不压缩;0:所有消息都压缩;N (N>0):只有大于 N 个字节的消息才压缩;默认值 -1| +|shellActivityTimer | |客户端向 mnode 发送心跳的时长,单位为秒,取值范围 1-120,默认值 3| +|numOfRpcSessions | |RPC 支持的最大连接数,取值范围 100-100000,默认值 30000| +|numOfRpcThreads | |RPC 线程数目,默认值为 CPU 核数的一半| +|numOfTaskQueueThreads | |dnode 处理 RPC 消息的线程数| +|statusInterval | |dnode 与 mnode 之间的心跳间隔| +|rpcQueueMemoryAllowed | |dnode 允许的 rpc 消息占用的内存最大值,单位 bytes,取值范围 104857600-INT64_MAX,默认值 服务器内存的 1/10 | +|resolveFQDNRetryTime | |FQDN 解析失败时的重试次数| +|timeToGetAvailableConn | |获得可用连接的最长等待时间,取值范围 10-50000000,单位为毫秒,默认值 500000| +|maxShellConns | |允许创建的最大链接数| +|maxRetryWaitTime | |重连最大超时时间| +|shareConnLimit |3.3.4.3 后|内部参数,一个链接可以共享的查询数目,取值范围 1-256,默认值 10| +|readTimeout |3.3.4.3 后|内部参数,最小超时时间,取值范围 64-604800,单位为秒,默认值 900| ### 监控相关 |参数名称|支持版本|参数含义| -|-----------------------|-----------|-| -|monitor | |是否收集监控数据并上报,0:关闭;1:打开;默认值 0| -|monitorFqdn | |taosKeeper 服务所在服务器的 FQDN,默认值 无| -|monitorPort | |taosKeeper 服务所监听的端口号,默认值 6043| -|monitorInterval | |监控数据库记录系统参数(CPU/内存)的时间间隔,单位是秒,取值范围 1-200000 ,默认值 30| -|monitorMaxLogs | |缓存的待上报日志条数| -|monitorComp | |是否采用压缩方式上报监控日志时| -|monitorLogProtocol | |是否打印监控日志| -|monitorForceV2 | |是否使用 V2 版本协议上报| -|telemetryReporting | |是否上传 telemetry,0:不上传,1:上传,默认值 1| -|telemetryServer | |telemetry 服务器地址| -|telemetryPort | |telemetry 服务器端口编号| -|telemetryInterval | |telemetry 上传时间间隔,单位为秒,默认 43200| -|crashReporting | |是否上传 crash 信息;0:不上传,1:上传;默认值 1| +|-----------------------|----------|-| +|monitor | |是否收集监控数据并上报,0:关闭;1:打开;默认值 0| +|monitorFqdn | |taosKeeper 服务所在服务器的 FQDN,默认值 无| +|monitorPort | |taosKeeper 服务所监听的端口号,默认值 6043| +|monitorInterval | |监控数据库记录系统参数(CPU/内存)的时间间隔,单位是秒,取值范围 1-200000 ,默认值 30| +|monitorMaxLogs | |缓存的待上报日志条数| +|monitorComp | |是否采用压缩方式上报监控日志时| +|monitorLogProtocol | |是否打印监控日志| +|monitorForceV2 | |是否使用 V2 版本协议上报| +|telemetryReporting | |是否上传 telemetry,0:不上传,1:上传,默认值 1| +|telemetryServer | |telemetry 服务器地址| +|telemetryPort | |telemetry 服务器端口编号| +|telemetryInterval | |telemetry 上传时间间隔,单位为秒,默认 43200| +|crashReporting | |是否上传 crash 信息;0:不上传,1:上传;默认值 1| ### 查询相关 |参数名称|支持版本|参数含义| -|------------------------|-----------|-| -|countAlwaysReturnValue | |count/hyperloglog 函数在输入数据为空或者 NULL 的情况下是否返回值;0:返回空行,1:返回;默认值 1;该参数设置为 1 时,如果查询中含有 INTERVAL 子句或者该查询使用了 TSMA 时,且相应的组或窗口内数据为空或者 NULL,对应的组或窗口将不返回查询结果;注意此参数客户端和服务端值应保持一致| -|tagFilterCache | |是否缓存标签过滤结果| -|maxNumOfDistinctRes | |允许返回的 distinct 结果最大行数,默认值 10 万,最大允许值 1 亿| -|queryBufferSize | |暂不生效| -|queryRspPolicy | |查询响应策略| -|filterScalarMode | |强制使用标量过滤模式,0:关闭;1:开启,默认值 0| -|queryPlannerTrace | |内部参数,查询计划是否输出详细日志| -|queryNodeChunkSize | |内部参数,查询计划的块大小| -|queryUseNodeAllocator | |内部参数,查询计划的分配方法| -|queryMaxConcurrentTables| |内部参数,查询计划的并发数目| -|queryRsmaTolerance | |内部参数,用于判定查询哪一级 rsma 数据时的容忍时间,单位为毫秒| -|enableQueryHb | |内部参数,是否发送查询心跳消息| -|pqSortMemThreshold | |内部参数,排序使用的内存阈值| +|------------------------|----------|-| +|countAlwaysReturnValue | |count/hyperloglog 函数在输入数据为空或者 NULL 的情况下是否返回值;0:返回空行,1:返回;默认值 1;该参数设置为 1 时,如果查询中含有 INTERVAL 子句或者该查询使用了 TSMA 时,且相应的组或窗口内数据为空或者 NULL,对应的组或窗口将不返回查询结果;注意此参数客户端和服务端值应保持一致| +|tagFilterCache | |是否缓存标签过滤结果| +|maxNumOfDistinctRes | |允许返回的 distinct 结果最大行数,默认值 10 万,最大允许值 1 亿| +|queryBufferSize | |暂不生效| +|queryRspPolicy | |查询响应策略| +|filterScalarMode | |强制使用标量过滤模式,0:关闭;1:开启,默认值 0| +|queryPlannerTrace | |内部参数,查询计划是否输出详细日志| +|queryNodeChunkSize | |内部参数,查询计划的块大小| +|queryUseNodeAllocator | |内部参数,查询计划的分配方法| +|queryMaxConcurrentTables| |内部参数,查询计划的并发数目| +|queryRsmaTolerance | |内部参数,用于判定查询哪一级 rsma 数据时的容忍时间,单位为毫秒| +|enableQueryHb | |内部参数,是否发送查询心跳消息| +|pqSortMemThreshold | |内部参数,排序使用的内存阈值| ### 区域相关 |参数名称|支持版本|参数含义| -|-----------------|-----------|-| -|timezone | |时区;缺省从系统中动态获取当前的时区设置| -|locale | |系统区位信息及编码格式,缺省从系统中获取| -|charset | |字符集编码,缺省从系统中获取| +|-----------------|----------|-| +|timezone | |时区;缺省从系统中动态获取当前的时区设置| +|locale | |系统区位信息及编码格式,缺省从系统中获取| +|charset | |字符集编码,缺省从系统中获取| :::info 1. 为应对多时区的数据写入和查询问题,TDengine 采用 Unix 时间戳(Unix Timestamp)来记录和存储时间戳。Unix 时间戳的特点决定了任一时刻不论在任何时区,产生的时间戳均一致。需要注意的是,Unix 时间戳是在客户端完成转换和记录。为了确保客户端其他形式的时间转换为正确的 Unix 时间戳,需要设置正确的时区。 @@ -164,147 +164,148 @@ charset 的有效值是 UTF-8。 ### 存储相关 |参数名称|支持版本|参数含义| -|--------------------|-----------|-| -|dataDir | |数据文件目录,所有的数据文件都将写入该目录,默认值 /var/lib/taos| -|tempDir | |指定所有系统运行过程中的临时文件生成的目录,默认值 /tmp| -|minimalDataDirGB | |dataDir 指定的时序数据存储目录所需要保留的最小空间,单位 GB,默认值 2| -|minimalTmpDirGB | |tempDir 所指定的临时文件目录所需要保留的最小空间,单位 GB,默认值 1| -|minDiskFreeSize |3.1.1.0 之后|当某块磁盘上的可用空间小于等于这个阈值时,该磁盘将不再被选择用于生成新的数据文件,单位为字节,取值范围 52428800-1073741824,默认值为 52428800;企业版参数| -|s3MigrateIntervalSec|3.3.4.3 之后|本地数据文件自动上传 S3 的触发周期,单位为秒。最小值:600;最大值:100000。默认值 3600;企业版参数| -|s3MigrateEnabled |3.3.4.3 之后|是否自动进行 S3 迁移,默认值为 0,表示关闭自动 S3 迁移,可配置为 1;企业版参数| -|s3Accesskey |3.3.4.3 之后|冒号分隔的用户 SecretId:SecretKey,例如 AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E;企业版参数| -|s3Endpoint |3.3.4.3 之后|用户所在地域的 COS 服务域名,支持 http 和 https,bucket 的区域需要与 endpoint 保持一致,否则无法访问;企业版参数| -|s3BucketName |3.3.4.3 之后|存储桶名称,减号后面是用户注册 COS 服务的 AppId,其中 AppId 是 COS 特有,AWS 和阿里云都没有,配置时需要作为 bucket name 的一部分,使用减号分隔;参数值均为字符串类型,但不需要引号;例如 test0711-1309024725;企业版参数| -|s3PageCacheSize |3.3.4.3 之后|S3 page cache 缓存页数目,取值范围 4-1048576,单位为页,默认值 4096;企业版参数| -|s3UploadDelaySec |3.3.4.3 之后|data 文件持续多长时间不再变动后上传至 S3,取值范围 1-2592000 (30天),单位为秒,默认值 60;企业版参数| -|cacheLazyLoadThreshold | |内部参数,缓存的装载策略| +|--------------------|----------|-| +|dataDir | |数据文件目录,所有的数据文件都将写入该目录,默认值 /var/lib/taos| +|tempDir | |指定所有系统运行过程中的临时文件生成的目录,默认值 /tmp| +|minimalDataDirGB | |dataDir 指定的时序数据存储目录所需要保留的最小空间,单位 GB,默认值 2| +|minimalTmpDirGB | |tempDir 所指定的临时文件目录所需要保留的最小空间,单位 GB,默认值 1| +|minDiskFreeSize |3.1.1.0 后|当某块磁盘上的可用空间小于等于这个阈值时,该磁盘将不再被选择用于生成新的数据文件,单位为字节,取值范围 52428800-1073741824,默认值为 52428800;企业版参数| +|s3MigrateIntervalSec|3.3.4.3 后|本地数据文件自动上传 S3 的触发周期,单位为秒。最小值:600;最大值:100000。默认值 3600;企业版参数| +|s3MigrateEnabled |3.3.4.3 后|是否自动进行 S3 迁移,默认值为 0,表示关闭自动 S3 迁移,可配置为 1;企业版参数| +|s3Accesskey |3.3.4.3 后|冒号分隔的用户 SecretId:SecretKey,例如 AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E;企业版参数| +|s3Endpoint |3.3.4.3 后|用户所在地域的 COS 服务域名,支持 http 和 https,bucket 的区域需要与 endpoint 保持一致,否则无法访问;企业版参数| +|s3BucketName |3.3.4.3 后|存储桶名称,减号后面是用户注册 COS 服务的 AppId,其中 AppId 是 COS 特有,AWS 和阿里云都没有,配置时需要作为 bucket name 的一部分,使用减号分隔;参数值均为字符串类型,但不需要引号;例如 test0711-1309024725;企业版参数| +|s3PageCacheSize |3.3.4.3 后|S3 page cache 缓存页数目,取值范围 4-1048576,单位为页,默认值 4096;企业版参数| +|s3UploadDelaySec |3.3.4.3 后|data 文件持续多长时间不再变动后上传至 S3,取值范围 1-2592000 (30天),单位为秒,默认值 60;企业版参数| +|cacheLazyLoadThreshold| |内部参数,缓存的装载策略| ### 集群相关 |参数名称|支持版本|参数含义| -|--------------------------|-----------|-| -|supportVnodes | |dnode 支持的最大 vnode 数目,取值范围 0-4096,默认值 CPU 核数的 2 倍 + 5| -|numOfCommitThreads | |落盘线程的最大数量,取值范围 0-1024,默认值为 4| -|numOfMnodeReadThreads | |mnode 的 Read 线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不超过 4)| -|numOfVnodeQueryThreads | |vnode 的 Query 线程数目,取值范围 0-1024,默认值为 CPU 核数的两倍(不超过 16)| -|numOfVnodeFetchThreads | |vnode 的 Fetch 线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不超过 4)| -|numOfVnodeRsmaThreads | |vnode 的 Rsma 线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不超过 4)| -|numOfQnodeQueryThreads | |qnode 的 Query 线程数目,取值范围 0-1024,默认值为 CPU 核数的两倍(不超过 16)| -|numOfSnodeSharedThreads | |snode 的共享线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不小于 2,不超过 4)| -|numOfSnodeUniqueThreads | |snode 的独占线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不小于 2,不超过 4)| -|ratioOfVnodeStreamThreads | |流计算使用 vnode 线程的比例,取值范围 0.01-4,默认值 4| -|ttlUnit | |ttl 参数的单位,取值范围 1-31572500,单位为秒,默认值 86400| -|ttlPushInterval | |ttl 检测超时频率,取值范围 1-100000,单位为秒,默认值 10| -|ttlChangeOnWrite | |ttl 到期时间是否伴随表的修改操作改变;0:不改变,1:改变;默认值为 0| -|ttlBatchDropNum | |ttl 一批删除子表的数目,最小值为 0,默认值 10000| -|retentionSpeedLimitMB | |数据在不同级别硬盘上迁移时的速度限制,取值范围 0-1024,单位 MB,默认值 0,表示不限制| -|maxTsmaNum | |集群内可创建的TSMA个数;取值范围 0-3;默认值 3| -|tmqMaxTopicNum | |订阅最多可建立的 topic 数量;取值范围 1-10000;默认值为 20| -|tmqRowSize | |订阅数据块的最大记录条数,取值范围 1-1000000,默认值 4096| -|audit | |审计功能开关;企业版参数| -|auditInterval | |审计数据上报的时间间隔;企业版参数| -|auditCreateTable | |是否针对创建子表开启申计功能;企业版参数| -|encryptAlgorithm | |数据加密算法;企业版参数| -|encryptScope | |加密范围;企业版参数| -|enableWhiteList | |白名单功能开关;企业版参数| -|syncLogBufferMemoryAllowed| |一个 dnode 允许的 sync 日志缓存消息占用的内存最大值,单位 bytes,取值范围 104857600-INT64_MAX,默认值 服务器内存的 1/10,3.1.3.2/3.3.2.13 版本开始生效 | -|syncElectInterval | |内部参数,用于同步模块调试| -|syncHeartbeatInterval | |内部参数,用于同步模块调试| -|syncHeartbeatTimeout | |内部参数,用于同步模块调试| -|syncSnapReplMaxWaitN | |内部参数,用于同步模块调试| -|syncSnapReplMaxWaitN | |内部参数,用于同步模块调试| -|arbHeartBeatIntervalSec | |内部参数,用于同步模块调试| -|arbCheckSyncIntervalSec | |内部参数,用于同步模块调试| -|arbSetAssignedTimeoutSec | |内部参数,用于同步模块调试| -|mndSdbWriteDelta | |内部参数,用于 mnode 模块调试| -|mndLogRetention | |内部参数,用于 mnode 模块调试| -|skipGrant | |内部参数,用于授权检查| -|trimVDbIntervalSec | |内部参数,用于删除过期数据| -|ttlFlushThreshold | |内部参数,ttl 定时器的频率| -|compactPullupInterval | |内部参数,数据重整定时器的频率| -|walFsyncDataSizeLimit | |内部参数,WAL 进行 FSYNC 的阈值| -|transPullupInterval | |内部参数,mnode 执行事务的重试间隔| -|mqRebalanceInterval | |内部参数,消费者再平衡的时间间隔| -|uptimeInterval | |内部参数,用于记录系统启动时间| -|timeseriesThreshold | |内部参数,用于统计用量| -|udf | |是否启动 UDF 服务;0:不启动,1:启动;默认值为 0 | -|udfdResFuncs | |内部参数,用于 UDF 结果集设置| -|udfdLdLibPath | |内部参数,表示 UDF 装载的库路径| +|--------------------------|----------|-| +|supportVnodes | |dnode 支持的最大 vnode 数目,取值范围 0-4096,默认值 CPU 核数的 2 倍 + 5| +|numOfCommitThreads | |落盘线程的最大数量,取值范围 0-1024,默认值为 4| +|numOfMnodeReadThreads | |mnode 的 Read 线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不超过 4)| +|numOfVnodeQueryThreads | |vnode 的 Query 线程数目,取值范围 0-1024,默认值为 CPU 核数的两倍(不超过 16)| +|numOfVnodeFetchThreads | |vnode 的 Fetch 线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不超过 4)| +|numOfVnodeRsmaThreads | |vnode 的 Rsma 线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不超过 4)| +|numOfQnodeQueryThreads | |qnode 的 Query 线程数目,取值范围 0-1024,默认值为 CPU 核数的两倍(不超过 16)| +|numOfSnodeSharedThreads | |snode 的共享线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不小于 2,不超过 4)| +|numOfSnodeUniqueThreads | |snode 的独占线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不小于 2,不超过 4)| +|ratioOfVnodeStreamThreads | |流计算使用 vnode 线程的比例,取值范围 0.01-4,默认值 4| +|ttlUnit | |ttl 参数的单位,取值范围 1-31572500,单位为秒,默认值 86400| +|ttlPushInterval | |ttl 检测超时频率,取值范围 1-100000,单位为秒,默认值 10| +|ttlChangeOnWrite | |ttl 到期时间是否伴随表的修改操作改变;0:不改变,1:改变;默认值为 0| +|ttlBatchDropNum | |ttl 一批删除子表的数目,最小值为 0,默认值 10000| +|retentionSpeedLimitMB | |数据在不同级别硬盘上迁移时的速度限制,取值范围 0-1024,单位 MB,默认值 0,表示不限制| +|maxTsmaNum | |集群内可创建的TSMA个数;取值范围 0-3;默认值 3| +|tmqMaxTopicNum | |订阅最多可建立的 topic 数量;取值范围 1-10000;默认值为 20| +|tmqRowSize | |订阅数据块的最大记录条数,取值范围 1-1000000,默认值 4096| +|audit | |审计功能开关;企业版参数| +|auditInterval | |审计数据上报的时间间隔;企业版参数| +|auditCreateTable | |是否针对创建子表开启申计功能;企业版参数| +|encryptAlgorithm | |数据加密算法;企业版参数| +|encryptScope | |加密范围;企业版参数| +|enableWhiteList | |白名单功能开关;企业版参数| +|syncLogBufferMemoryAllowed| |一个 dnode 允许的 sync 日志缓存消息占用的内存最大值,单位 bytes,取值范围 104857600-INT64_MAX,默认值 服务器内存的 1/10,3.1.3.2/3.3.2.13 版本开始生效 | +|syncElectInterval | |内部参数,用于同步模块调试| +|syncHeartbeatInterval | |内部参数,用于同步模块调试| +|syncHeartbeatTimeout | |内部参数,用于同步模块调试| +|syncSnapReplMaxWaitN | |内部参数,用于同步模块调试| +|syncSnapReplMaxWaitN | |内部参数,用于同步模块调试| +|arbHeartBeatIntervalSec | |内部参数,用于同步模块调试| +|arbCheckSyncIntervalSec | |内部参数,用于同步模块调试| +|arbSetAssignedTimeoutSec | |内部参数,用于同步模块调试| +|mndSdbWriteDelta | |内部参数,用于 mnode 模块调试| +|mndLogRetention | |内部参数,用于 mnode 模块调试| +|skipGrant | |内部参数,用于授权检查| +|trimVDbIntervalSec | |内部参数,用于删除过期数据| +|ttlFlushThreshold | |内部参数,ttl 定时器的频率| +|compactPullupInterval | |内部参数,数据重整定时器的频率| +|walFsyncDataSizeLimit | |内部参数,WAL 进行 FSYNC 的阈值| +|transPullupInterval | |内部参数,mnode 执行事务的重试间隔| +|mqRebalanceInterval | |内部参数,消费者再平衡的时间间隔| +|uptimeInterval | |内部参数,用于记录系统启动时间| +|timeseriesThreshold | |内部参数,用于统计用量| +|udf | |是否启动 UDF 服务;0:不启动,1:启动;默认值为 0 | +|udfdResFuncs | |内部参数,用于 UDF 结果集设置| +|udfdLdLibPath | |内部参数,表示 UDF 装载的库路径| ### 流计算参数 |参数名称|支持版本|参数含义| -|-----------------------|-----------|-| -|disableStream | |流计算的启动开关| -|streamBufferSize | |控制内存中窗口状态缓存的大小,默认值为 128MB| -|streamAggCnt | |内部参数,并发进行聚合计算的数目| -|checkpointInterval | |内部参数,checkponit 同步间隔| -|concurrentCheckpoint | |内部参数,是否并发检查 checkpoint| -|maxStreamBackendCache | |内部参数,流计算使用的最大缓存| -|streamSinkDataRate | |内部参数,用于控制流计算结果的写入速度| +|-----------------------|----------|-| +|disableStream | |流计算的启动开关| +|streamBufferSize | |控制内存中窗口状态缓存的大小,默认值为 128MB| +|streamAggCnt | |内部参数,并发进行聚合计算的数目| +|checkpointInterval | |内部参数,checkponit 同步间隔| +|concurrentCheckpoint | |内部参数,是否并发检查 checkpoint| +|maxStreamBackendCache | |内部参数,流计算使用的最大缓存| +|streamSinkDataRate | |内部参数,用于控制流计算结果的写入速度| ### 日志相关 |参数名称|支持版本|参数含义| -|----------------|-----------|-| -|logDir | |日志文件目录,运行日志将写入该目录,默认值 /var/log/taos| -|minimalLogDirGB | |日志文件夹所在磁盘可用空间大小小于该值时,停止写日志,单位 GB,默认值 1| -|numOfLogLines | |单个日志文件允许的最大行数,默认值 10,000,000| -|asyncLog | |日志写入模式,0:同步,1:异步,默认值 1| -|logKeepDays | |日志文件的最长保存时间,单位:天,默认值 0,意味着无限保存,日志文件不会被重命名,也不会有新的日志文件滚动产生,但日志文件的内容有可能会不断滚动,取决于日志文件大小的设置;当设置为大于 0 的值时,当日志文件大小达到设置的上限时会被重命名为 taosdlog.yyy,其中 yyy 为日志文件最后修改的时间戳,并滚动产生新的日志文件| -|slowLogThreshold|3.3.3.0 之后|慢查询门限值,大于等于门限值认为是慢查询,单位秒,默认值 3 | -|slowLogMaxLen |3.3.3.0 之后|慢查询日志最大长度,取值范围 1-16384,默认值 4096| -|slowLogScope |3.3.3.0 之后|慢查询记录类型,取值范围 ALL/QUERY/INSERT/OTHERS/NONE,默认值 QUERY| -|slowLogExceptDb |3.3.3.0 之后|指定的数据库不上报慢查询,仅支持配置换一个数据库| -|debugFlag | |运行日志开关,131(输出错误和警告日志),135(输出错误、警告和调试日志),143(输出错误、警告、调试和跟踪日志);默认值 131 或 135 (取决于不同模块)| -|tmrDebugFlag | |定时器模块的日志开关,取值范围同上| -|uDebugFlag | |共用功能模块的日志开关,取值范围同上| -|rpcDebugFlag | |rpc 模块的日志开关,取值范围同上| -|qDebugFlag | |query 模块的日志开关,取值范围同上| -|dDebugFlag | |dnode 模块的日志开关,取值范围同上| -|vDebugFlag | |vnode 模块的日志开关,取值范围同上| -|mDebugFlag | |mnode 模块的日志开关,取值范围同上| -|azDebugFlag |3.3.4.3 之后|S3 模块的日志开关,取值范围同上| -|sDebugFlag | |sync 模块的日志开关,取值范围同上| -|tsdbDebugFlag | |tsdb 模块的日志开关,取值范围同上| -|tqDebugFlag | |tq 模块的日志开关,取值范围同上| -|fsDebugFlag | |fs 模块的日志开关,取值范围同上| -|udfDebugFlag | |udf 模块的日志开关,取值范围同上| -|smaDebugFlag | |sma 模块的日志开关,取值范围同上| -|idxDebugFlag | |index 模块的日志开关,取值范围同上| -|tdbDebugFlag | |tdb 模块的日志开关,取值范围同上| -|metaDebugFlag | |meta 模块的日志开关,取值范围同上| -|stDebugFlag | |stream 模块的日志开关,取值范围同上| -|sndDebugFlag | |snode 模块的日志开关,取值范围同上| +|----------------|----------|-| +|logDir | |日志文件目录,运行日志将写入该目录,默认值 /var/log/taos| +|minimalLogDirGB | |日志文件夹所在磁盘可用空间大小小于该值时,停止写日志,单位 GB,默认值 1| +|numOfLogLines | |单个日志文件允许的最大行数,默认值 10,000,000| +|asyncLog | |日志写入模式,0:同步,1:异步,默认值 1| +|logKeepDays | |日志文件的最长保存时间,单位:天,默认值 0,意味着无限保存,日志文件不会被重命名,也不会有新的日志文件滚动产生,但日志文件的内容有可能会不断滚动,取决于日志文件大小的设置;当设置为大于 0 的值时,当日志文件大小达到设置的上限时会被重命名为 taosdlog.yyy,其中 yyy 为日志文件最后修改的时间戳,并滚动产生新的日志文件| +|slowLogThreshold|3.3.3.0 后|慢查询门限值,大于等于门限值认为是慢查询,单位秒,默认值 3 | +|slowLogMaxLen |3.3.3.0 后|慢查询日志最大长度,取值范围 1-16384,默认值 4096| +|slowLogScope |3.3.3.0 后|慢查询记录类型,取值范围 ALL/QUERY/INSERT/OTHERS/NONE,默认值 QUERY| +|slowLogExceptDb |3.3.3.0 后|指定的数据库不上报慢查询,仅支持配置换一个数据库| +|debugFlag | |运行日志开关,131(输出错误和警告日志),135(输出错误、警告和调试日志),143(输出错误、警告、调试和跟踪日志);默认值 131 或 135 (取决于不同模块)| +|tmrDebugFlag | |定时器模块的日志开关,取值范围同上| +|uDebugFlag | |共用功能模块的日志开关,取值范围同上| +|rpcDebugFlag | |rpc 模块的日志开关,取值范围同上| +|qDebugFlag | |query 模块的日志开关,取值范围同上| +|dDebugFlag | |dnode 模块的日志开关,取值范围同上| +|vDebugFlag | |vnode 模块的日志开关,取值范围同上| +|mDebugFlag | |mnode 模块的日志开关,取值范围同上| +|azDebugFlag |3.3.4.3 后|S3 模块的日志开关,取值范围同上| +|sDebugFlag | |sync 模块的日志开关,取值范围同上| +|tsdbDebugFlag | |tsdb 模块的日志开关,取值范围同上| +|tqDebugFlag | |tq 模块的日志开关,取值范围同上| +|fsDebugFlag | |fs 模块的日志开关,取值范围同上| +|udfDebugFlag | |udf 模块的日志开关,取值范围同上| +|smaDebugFlag | |sma 模块的日志开关,取值范围同上| +|idxDebugFlag | |index 模块的日志开关,取值范围同上| +|tdbDebugFlag | |tdb 模块的日志开关,取值范围同上| +|metaDebugFlag | |meta 模块的日志开关,取值范围同上| +|stDebugFlag | |stream 模块的日志开关,取值范围同上| +|sndDebugFlag | |snode 模块的日志开关,取值范围同上| ### 调试相关 |参数名称|支持版本|参数含义| -|--------------------|-----------|-| -|enableCoreFile | |crash 时是否生成 core 文件,0:不生成,1:生成;默认值 1| -|configDir | |配置文件所在目录| -|scriptDir | |内部测试工具的脚本目录| -|assert | |断言控制开关,默认值 0| -|randErrorChance | |内部参数,用于随机失败测试| -|randErrorDivisor | |内部参数,用于随机失败测试| -|randErrorScope | |内部参数,用于随机失败测试| -|safetyCheckLevel | |内部参数,用于随机失败测试| -|experimental | |内部参数,用于一些实验特性| -|simdEnable |3.3.4.3 之后|内部参数,用于测试 SIMD 加速| -|AVX512Enable |3.3.4.3 之后|内部参数,用于测试 AVX512 加速| -|rsyncPort | |内部参数,用于调试流计算| -|snodeAddress | |内部参数,用于调试流计算| -|checkpointBackupDir | |内部参数,用于恢复 snode 数据| -|enableAuditDelete | |内部参数,用于测试审计功能| -|slowLogThresholdTest| |内部参数,用于测试慢日志| +|--------------------|----------|-| +|enableCoreFile | |crash 时是否生成 core 文件,0:不生成,1:生成;默认值 1| +|configDir | |配置文件所在目录| +|scriptDir | |内部测试工具的脚本目录| +|assert | |断言控制开关,默认值 0| +|randErrorChance | |内部参数,用于随机失败测试| +|randErrorDivisor | |内部参数,用于随机失败测试| +|randErrorScope | |内部参数,用于随机失败测试| +|safetyCheckLevel | |内部参数,用于随机失败测试| +|experimental | |内部参数,用于一些实验特性| +|simdEnable |3.3.4.3 后|内部参数,用于测试 SIMD 加速| +|AVX512Enable |3.3.4.3 后|内部参数,用于测试 AVX512 加速| +|rsyncPort | |内部参数,用于调试流计算| +|snodeAddress | |内部参数,用于调试流计算| +|checkpointBackupDir | |内部参数,用于恢复 snode 数据| +|enableAuditDelete | |内部参数,用于测试审计功能| +|slowLogThresholdTest| |内部参数,用于测试慢日志| +|bypassFlag |3.3.4.5 后|内部参数,用于短路测试,0:正常写入,1:写入消息在 taos 客户端发送 RPC 消息前返回,2:写入消息在 taosd 服务端收到 RPC 消息后返回,4:写入消息在 taosd 服务端写入内存缓存前返回,8:写入消息在 taosd 服务端数据落盘前返回;默认值 0| ### 压缩参数 |参数名称|支持版本|参数含义| -|------------|-----------|-| -|fPrecision | |设置 float 类型浮点数压缩精度 ,取值范围 0.1 ~ 0.00000001 ,默认值 0.00000001 , 小于此值的浮点数尾数部分将被截断| -|dPrecision | |设置 double 类型浮点数压缩精度 , 取值范围 0.1 ~ 0.0000000000000001 , 默认值 0.0000000000000001 , 小于此值的浮点数尾数部分将被截取| -|lossyColumn |3.3.0.0 之前|对 float 和/或 double 类型启用 TSZ 有损压缩;取值范围 float/double/none;默认值 none,表示关闭无损压缩| -|ifAdtFse | |在启用 TSZ 有损压缩时,使用 FSE 算法替换 HUFFMAN 算法,FSE 算法压缩速度更快,但解压稍慢,追求压缩速度可选用此算法;0:关闭,1:打开;默认值为 0| -|maxRange | |内部参数,用于有损压缩设置| -|curRange | |内部参数,用于有损压缩设置| -|compressor | |内部参数,用于有损压缩设置| +|------------|----------|-| +|fPrecision | |设置 float 类型浮点数压缩精度 ,取值范围 0.1 ~ 0.00000001 ,默认值 0.00000001 , 小于此值的浮点数尾数部分将被截断| +|dPrecision | |设置 double 类型浮点数压缩精度 , 取值范围 0.1 ~ 0.0000000000000001 , 默认值 0.0000000000000001 , 小于此值的浮点数尾数部分将被截取| +|lossyColumn |3.3.0.0 前|对 float 和/或 double 类型启用 TSZ 有损压缩;取值范围 float/double/none;默认值 none,表示关闭无损压缩| +|ifAdtFse | |在启用 TSZ 有损压缩时,使用 FSE 算法替换 HUFFMAN 算法,FSE 算法压缩速度更快,但解压稍慢,追求压缩速度可选用此算法;0:关闭,1:打开;默认值为 0| +|maxRange | |内部参数,用于有损压缩设置| +|curRange | |内部参数,用于有损压缩设置| +|compressor | |内部参数,用于有损压缩设置| **补充说明** 1. 在 3.2.0.0 ~ 3.3.0.0(不包含)版本生效,启用该参数后不能回退到升级前的版本 diff --git a/docs/zh/14-reference/01-components/02-taosc.md b/docs/zh/14-reference/01-components/02-taosc.md index 3efaddee26..631f457391 100755 --- a/docs/zh/14-reference/01-components/02-taosc.md +++ b/docs/zh/14-reference/01-components/02-taosc.md @@ -10,99 +10,99 @@ TDengine 客户端驱动提供了应用编程所需要的全部 API,并且在 ### 连接相关 |参数名称|支持版本|参数含义| -|----------------------|-----------|-| -|firstEp | |启动时,主动连接的集群中首个 dnode 的 endpoint,缺省值:hostname:6030,若无法获取该服务器的 hostname,则赋值为 localhost| -|secondEp | |启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint,没有缺省值| -|compressMsgSize | |是否对 RPC 消息进行压缩;-1:所有消息都不压缩;0:所有消息都压缩;N (N>0):只有大于 N 个字节的消息才压缩;缺省值 -1| -|shellActivityTimer | |客户端向 mnode 发送心跳的时长,单位为秒,取值范围 1-120,默认值 3| -|numOfRpcSessions | |RPC 支持的最大连接数,取值范围 100-100000,缺省值 30000| -|numOfRpcThreads | |RPC 线程数目,默认值为 CPU 核数的一半| -|timeToGetAvailableConn| |获得可用连接的最长等待时间,取值范围 10-50000000,单位为毫秒,缺省值 500000| -|useAdapter | |内部参数,是否使用 taosadapter,影响 CSV 文件导入| -|shareConnLimit |3.3.4.3 之后|内部参数,一个链接可以共享的查询数目,取值范围 1-256,默认值 10| -|readTimeout |3.3.4.3 之后|内部参数,最小超时时间,取值范围 64-604800,单位为秒,默认值 900| +|----------------------|----------|-| +|firstEp | |启动时,主动连接的集群中首个 dnode 的 endpoint,缺省值:hostname:6030,若无法获取该服务器的 hostname,则赋值为 localhost| +|secondEp | |启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint,没有缺省值| +|compressMsgSize | |是否对 RPC 消息进行压缩;-1:所有消息都不压缩;0:所有消息都压缩;N (N>0):只有大于 N 个字节的消息才压缩;缺省值 -1| +|shellActivityTimer | |客户端向 mnode 发送心跳的时长,单位为秒,取值范围 1-120,默认值 3| +|numOfRpcSessions | |RPC 支持的最大连接数,取值范围 100-100000,缺省值 30000| +|numOfRpcThreads | |RPC 线程数目,默认值为 CPU 核数的一半| +|timeToGetAvailableConn| |获得可用连接的最长等待时间,取值范围 10-50000000,单位为毫秒,缺省值 500000| +|useAdapter | |内部参数,是否使用 taosadapter,影响 CSV 文件导入| +|shareConnLimit |3.3.4.3 后|内部参数,一个链接可以共享的查询数目,取值范围 1-256,默认值 10| +|readTimeout |3.3.4.3 后|内部参数,最小超时时间,取值范围 64-604800,单位为秒,默认值 900| ### 查询相关 |参数名称|支持版本|参数含义| -|---------------------------------|-----------|-| -|countAlwaysReturnValue | |count/hyperloglog 函数在输入数据为空或者 NULL 的情况下是否返回值;0:返回空行,1:返回;默认值 1;该参数设置为 1 时,如果查询中含有 INTERVAL 子句或者该查询使用了 TSMA 时,且相应的组或窗口内数据为空或者 NULL,对应的组或窗口将不返回查询结果;注意此参数客户端和服务端值应保持一致| -|keepColumnName | |Last、First、LastRow 函数查询且未指定别名时,自动设置别名为列名(不含函数名),因此 order by 子句如果引用了该列名将自动引用该列对应的函数;1:表示自动设置别名为列名(不包含函数名),0:表示不自动设置别名;缺省值:0| -|multiResultFunctionStarReturnTags|3.3.3.0 以后|查询超级表时,last(\*)/last_row(\*)/first(\*) 是否返回标签列;查询普通表、子表时,不受该参数影响;0:不返回标签列,1:返回标签列;缺省值:0;该参数设置为 0 时,last(\*)/last_row(\*)/first(\*) 只返回超级表的普通列;为 1 时,返回超级表的普通列和标签列| -| - | |指定单个客户端元数据缓存大小的最大值,单位 MB;缺省值 -1,表示无限制| -|maxTsmaCalcDelay | |查询时客户端可允许的 tsma 计算延迟,若 tsma 的计算延迟大于配置值,则该 TSMA 将不会被使用;取值范围 600s - 86400s,即 10 分钟 - 1 小时;缺省值:600 秒| -|tsmaDataDeleteMark | |TSMA 计算的历史数据中间结果保存时间,单位为毫秒;取值范围 >= 3600000,即大于等于1h;缺省值:86400000,即 1d | -|queryPolicy | |查询语句的执行策略,1:只使用 vnode,不使用 qnode;2:没有扫描算子的子任务在 qnode 执行,带扫描算子的子任务在 vnode 执行;3:vnode 只运行扫描算子,其余算子均在 qnode 执行;缺省值:1| -|queryTableNotExistAsEmpty | |查询表不存在时是否返回空结果集;false:返回错误;true:返回空结果集;缺省值 false| -|querySmaOptimize | |sma index 的优化策略,0:表示不使用 sma index,永远从原始数据进行查询;1:表示使用 sma index,对符合的语句,直接从预计算的结果进行查询;缺省值:0| -|queryPlannerTrace | |内部参数,查询计划是否输出详细日志| -|queryNodeChunkSize | |内部参数,查询计划的块大小| -|queryUseNodeAllocator | |内部参数,查询计划的分配方法| -|queryMaxConcurrentTables | |内部参数,查询计划的并发数目| -|enableQueryHb | |内部参数,是否发送查询心跳消息| -|minSlidingTime | |内部参数,sliding 的最小允许值| -|minIntervalTime | |内部参数,interval 的最小允许值| +|---------------------------------|---------|-| +|countAlwaysReturnValue | |count/hyperloglog 函数在输入数据为空或者 NULL 的情况下是否返回值;0:返回空行,1:返回;默认值 1;该参数设置为 1 时,如果查询中含有 INTERVAL 子句或者该查询使用了 TSMA 时,且相应的组或窗口内数据为空或者 NULL,对应的组或窗口将不返回查询结果;注意此参数客户端和服务端值应保持一致| +|keepColumnName | |Last、First、LastRow 函数查询且未指定别名时,自动设置别名为列名(不含函数名),因此 order by 子句如果引用了该列名将自动引用该列对应的函数;1:表示自动设置别名为列名(不包含函数名),0:表示不自动设置别名;缺省值:0| +|multiResultFunctionStarReturnTags|3.3.3.0 后|查询超级表时,last(\*)/last_row(\*)/first(\*) 是否返回标签列;查询普通表、子表时,不受该参数影响;0:不返回标签列,1:返回标签列;缺省值:0;该参数设置为 0 时,last(\*)/last_row(\*)/first(\*) 只返回超级表的普通列;为 1 时,返回超级表的普通列和标签列| +|metaCacheMaxSize | |指定单个客户端元数据缓存大小的最大值,单位 MB;缺省值 -1,表示无限制| +|maxTsmaCalcDelay | |查询时客户端可允许的 tsma 计算延迟,若 tsma 的计算延迟大于配置值,则该 TSMA 将不会被使用;取值范围 600s - 86400s,即 10 分钟 - 1 小时;缺省值:600 秒| +|tsmaDataDeleteMark | |TSMA 计算的历史数据中间结果保存时间,单位为毫秒;取值范围 >= 3600000,即大于等于1h;缺省值:86400000,即 1d | +|queryPolicy | |查询语句的执行策略,1:只使用 vnode,不使用 qnode;2:没有扫描算子的子任务在 qnode 执行,带扫描算子的子任务在 vnode 执行;3:vnode 只运行扫描算子,其余算子均在 qnode 执行;缺省值:1| +|queryTableNotExistAsEmpty | |查询表不存在时是否返回空结果集;false:返回错误;true:返回空结果集;缺省值 false| +|querySmaOptimize | |sma index 的优化策略,0:表示不使用 sma index,永远从原始数据进行查询;1:表示使用 sma index,对符合的语句,直接从预计算的结果进行查询;缺省值:0| +|queryPlannerTrace | |内部参数,查询计划是否输出详细日志| +|queryNodeChunkSize | |内部参数,查询计划的块大小| +|queryUseNodeAllocator | |内部参数,查询计划的分配方法| +|queryMaxConcurrentTables | |内部参数,查询计划的并发数目| +|enableQueryHb | |内部参数,是否发送查询心跳消息| +|minSlidingTime | |内部参数,sliding 的最小允许值| +|minIntervalTime | |内部参数,interval 的最小允许值| ### 写入相关 |参数名称|支持版本|参数含义| -|------------------------------|-----------|-| -|smlChildTableName | |schemaless 自定义的子表名的 key,无缺省值| -|smlAutoChildTableNameDelimiter| |schemaless tag 之间的连接符,连起来作为子表名,无缺省值| -|smlTagName | |schemaless tag 为空时默认的 tag 名字,缺省值 "_tag_null"| -|smlTsDefaultName | |schemaless 自动建表的时间列名字通过该配置设置,缺省值 "_ts"| -|smlDot2Underline | |schemaless 把超级表名中的 dot 转成下划线| -|maxInsertBatchRows | |内部参数,一批写入的最大条数| +|------------------------------|----------|-| +|smlChildTableName | |schemaless 自定义的子表名的 key,无缺省值| +|smlAutoChildTableNameDelimiter| |schemaless tag 之间的连接符,连起来作为子表名,无缺省值| +|smlTagName | |schemaless tag 为空时默认的 tag 名字,缺省值 "_tag_null"| +|smlTsDefaultName | |schemaless 自动建表的时间列名字通过该配置设置,缺省值 "_ts"| +|smlDot2Underline | |schemaless 把超级表名中的 dot 转成下划线| +|maxInsertBatchRows | |内部参数,一批写入的最大条数| ### 区域相关 |参数名称|支持版本|参数含义| -|-----------------|-----------|-| -|timezone | |时区;缺省从系统中动态获取当前的时区设置| -|locale | |系统区位信息及编码格式,缺省从系统中获取| -|charset | |字符集编码,缺省从系统中获取| +|-----------------|----------|-| +|timezone | |时区;缺省从系统中动态获取当前的时区设置| +|locale | |系统区位信息及编码格式,缺省从系统中获取| +|charset | |字符集编码,缺省从系统中获取| ### 存储相关 |参数名称|支持版本|参数含义| -|-----------------|-----------|-| -|tempDir | |指定所有运行过程中的临时文件生成的目录,Linux 平台默认值为 /tmp| -|minimalTmpDirGB | |tempDir 所指定的临时文件目录所需要保留的最小空间,单位 GB,缺省值:1| +|-----------------|----------|-| +|tempDir | |指定所有运行过程中的临时文件生成的目录,Linux 平台默认值为 /tmp| +|minimalTmpDirGB | |tempDir 所指定的临时文件目录所需要保留的最小空间,单位 GB,缺省值:1| ### 日志相关 |参数名称|支持版本|参数含义| -|-----------------|-----------|-| -|logDir | |日志文件目录,运行日志将写入该目录,缺省值:/var/log/taos| -|minimalLogDirGB | |日志文件夹所在磁盘可用空间大小小于该值时,停止写日志,单位 GB,缺省值:1| -|numOfLogLines | |单个日志文件允许的最大行数,缺省值:10,000,000| -|asyncLog | |日志写入模式,0:同步,1:异步,缺省值:1| -|logKeepDays | |日志文件的最长保存时间,单位:天,缺省值:0,意味着无限保存,日志文件不会被重命名,也不会有新的日志文件滚动产生,但日志文件的内容有可能会不断滚动,取决于日志文件大小的设置;当设置为大于 0 的值时,当日志文件大小达到设置的上限时会被重命名为 taoslogx.yyy,其中 yyy 为日志文件最后修改的时间戳,并滚动产生新的日志文件| -|debugFlag | |运行日志开关,131(输出错误和警告日志),135(输出错误、警告和调试日志),143(输出错误、警告、调试和跟踪日志);默认值 131 或 135 (取决于不同模块)| -|tmrDebugFlag | |定时器模块的日志开关,取值范围同上| -|uDebugFlag | |共用功能模块的日志开关,取值范围同上| -|rpcDebugFlag | |rpc 模块的日志开关,取值范围同上| -|jniDebugFlag | |jni 模块的日志开关,取值范围同上| -|qDebugFlag | |query 模块的日志开关,取值范围同上| -|cDebugFlag | |客户端模块的日志开关,取值范围同上| -|simDebugFlag | |内部参数,测试工具的日志开关,取值范围同上| -|tqClientDebugFlag|3.3.4.3 之后|客户端模块的日志开关,取值范围同上| +|-----------------|----------|-| +|logDir | |日志文件目录,运行日志将写入该目录,缺省值:/var/log/taos| +|minimalLogDirGB | |日志文件夹所在磁盘可用空间大小小于该值时,停止写日志,单位 GB,缺省值:1| +|numOfLogLines | |单个日志文件允许的最大行数,缺省值:10,000,000| +|asyncLog | |日志写入模式,0:同步,1:异步,缺省值:1| +|logKeepDays | |日志文件的最长保存时间,单位:天,缺省值:0,意味着无限保存,日志文件不会被重命名,也不会有新的日志文件滚动产生,但日志文件的内容有可能会不断滚动,取决于日志文件大小的设置;当设置为大于 0 的值时,当日志文件大小达到设置的上限时会被重命名为 taoslogx.yyy,其中 yyy 为日志文件最后修改的时间戳,并滚动产生新的日志文件| +|debugFlag | |运行日志开关,131(输出错误和警告日志),135(输出错误、警告和调试日志),143(输出错误、警告、调试和跟踪日志);默认值 131 或 135 (取决于不同模块)| +|tmrDebugFlag | |定时器模块的日志开关,取值范围同上| +|uDebugFlag | |共用功能模块的日志开关,取值范围同上| +|rpcDebugFlag | |rpc 模块的日志开关,取值范围同上| +|jniDebugFlag | |jni 模块的日志开关,取值范围同上| +|qDebugFlag | |query 模块的日志开关,取值范围同上| +|cDebugFlag | |客户端模块的日志开关,取值范围同上| +|simDebugFlag | |内部参数,测试工具的日志开关,取值范围同上| +|tqClientDebugFlag|3.3.4.3 后|客户端模块的日志开关,取值范围同上| ### 调试相关 |参数名称|支持版本|参数含义| |-----------------|-----------|-| -|crashReporting | |是否上传 crash 到 telemetry,0:不上传,1:上传;缺省值:1| -|enableCoreFile | |crash 时是否生成 core 文件,0:不生成,1:生成;缺省值:1| -|assert | |断言控制开关,缺省值:0| -|configDir | |配置文件所在目录| -|scriptDir | |内部参数,测试用例的目录| -|randErrorChance |3.3.3.0 之后|内部参数,用于随机失败测试| -|randErrorDivisor |3.3.3.0 之后|内部参数,用于随机失败测试| -|randErrorScope |3.3.3.0 之后|内部参数,用于随机失败测试| -|safetyCheckLevel |3.3.3.0 之后|内部参数,用于随机失败测试| -|simdEnable |3.3.4.3 之后|内部参数,用于测试 SIMD 加速| -|AVX512Enable |3.3.4.3 之后|内部参数,用于测试 AVX512 加速| +|crashReporting | |是否上传 crash 到 telemetry,0:不上传,1:上传;缺省值:1| +|enableCoreFile | |crash 时是否生成 core 文件,0:不生成,1:生成;缺省值:1| +|assert | |断言控制开关,缺省值:0| +|configDir | |配置文件所在目录| +|scriptDir | |内部参数,测试用例的目录| +|randErrorChance |3.3.3.0 后|内部参数,用于随机失败测试| +|randErrorDivisor |3.3.3.0 后|内部参数,用于随机失败测试| +|randErrorScope |3.3.3.0 后|内部参数,用于随机失败测试| +|safetyCheckLevel |3.3.3.0 后|内部参数,用于随机失败测试| +|simdEnable |3.3.4.3 后|内部参数,用于测试 SIMD 加速| +|AVX512Enable |3.3.4.3 后|内部参数,用于测试 AVX512 加速| +|bypassFlag |3.3.4.5 后|内部参数,用于短路测试,0:正常写入,1:写入消息在 taos 客户端发送 RPC 消息前返回,2:写入消息在 taosd 服务端收到 RPC 消息后返回,4:写入消息在 taosd 服务端写入内存缓存前返回,8:写入消息在 taosd 服务端数据落盘前返回;缺省值:0| ### SHELL 相关 |参数名称|支持版本|参数含义| -|-----------------|-----------|-| -|enableScience | |是否开启科学计数法显示浮点数;0:不开始,1:开启;缺省值:1| +|-----------------|----------|-| +|enableScience | |是否开启科学计数法显示浮点数;0:不开始,1:开启;缺省值:1| ## API diff --git a/docs/zh/14-reference/03-taos-sql/02-database.md b/docs/zh/14-reference/03-taos-sql/02-database.md index 4f799bdde9..1ccc5071f0 100644 --- a/docs/zh/14-reference/03-taos-sql/02-database.md +++ b/docs/zh/14-reference/03-taos-sql/02-database.md @@ -8,10 +8,10 @@ description: "创建、删除数据库,查看、修改数据库参数" ```sql CREATE DATABASE [IF NOT EXISTS] db_name [database_options] - + database_options: database_option ... - + database_option: { VGROUPS value | PRECISION {'ms' | 'us' | 'ns'} @@ -26,6 +26,7 @@ database_option: { | MAXROWS value | MINROWS value | KEEP value + | KEEP_TIME_OFFSET value | STT_TRIGGER value | SINGLE_STABLE {0 | 1} | TABLE_PREFIX value @@ -43,7 +44,7 @@ database_option: { - VGROUPS:数据库中初始 vgroup 的数目。 - PRECISION:数据库的时间戳精度。ms 表示毫秒,us 表示微秒,ns 表示纳秒,默认 ms 毫秒。 -- REPLICA:表示数据库副本数,取值为 1、2 或 3,默认为 1; 2 仅在企业版 3.3.0.0 及以后版本中可用。在集群中使用,副本数必须小于或等于 DNODE 的数目。且使用时存在以下限制: +- REPLICA:表示数据库副本数,取值为 1、2 或 3,默认为 1; 2 仅在企业版 3.3.0.0 及以后版本中可用。在集群中使用,副本数必须小于或等于 DNODE 的数目。且使用时存在以下限制: - 暂不支持对双副本数据库相关 Vgroup 进行 SPLITE VGROUP 或 REDISTRIBUTE VGROUP 操作 - 单副本数据库可变更为双副本数据库,但不支持从双副本变更为其它副本数,也不支持从三副本变更为双副本 - BUFFER: 一个 VNODE 写入内存池大小,单位为 MB,默认为 256,最小为 3,最大为 16384。 @@ -63,7 +64,8 @@ database_option: { - DURATION:数据文件存储数据的时间跨度。可以使用加单位的表示形式,如 DURATION 100h、DURATION 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。不加时间单位时默认单位为天,如 DURATION 50 表示 50 天。 - MAXROWS:文件块中记录的最大条数,默认为 4096 条。 - MINROWS:文件块中记录的最小条数,默认为 100 条。 -- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于3倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 \<= keep 1 \<= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。 +- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于 3 倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据从而释放存储空间。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 \<= keep 1 \<= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。了解更多,请点击 [关于主键时间戳](https://docs.taosdata.com/reference/taos-sql/insert/) +- KEEP_TIME_OFFSET:自 3.2.0.0 版本生效。删除或迁移保存时间超过 KEEP 值的数据的延迟执行时间,默认值为 0 (小时)。在数据文件保存时间超过 KEEP 后,删除或迁移操作不会立即执行,而会额外等待本参数指定的时间间隔,以实现与业务高峰期错开的目的。 - STT_TRIGGER:表示落盘文件触发文件合并的个数。开源版本固定为 1,企业版本可设置范围为 1 到 16。对于少表高频写入场景,此参数建议使用默认配置;而对于多表低频写入场景,此参数建议配置较大的值。 - SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表,用于超级表列非常多的情况。 - 0:表示可以创建多张超级表。 @@ -78,6 +80,7 @@ database_option: { - WAL_FSYNC_PERIOD:当 WAL_LEVEL 参数设置为 2 时,用于设置落盘的周期。默认为 3000,单位毫秒。最小为 0,表示每次写入立即落盘;最大为 180000,即三分钟。 - WAL_RETENTION_PERIOD: 为了数据订阅消费,需要 WAL 日志文件额外保留的最大时长策略。WAL 日志清理,不受订阅客户端消费状态影响。单位为 s。默认为 3600,表示在 WAL 保留最近 3600 秒的数据,请根据数据订阅的需要修改这个参数为适当值。 - WAL_RETENTION_SIZE:为了数据订阅消费,需要 WAL 日志文件额外保留的最大累计大小策略。单位为 KB。默认为 0,表示累计大小无上限。 + ### 创建数据库示例 ```sql @@ -88,7 +91,7 @@ create database if not exists db vgroups 10 buffer 10 ### 使用数据库 -``` +```sql USE db_name; ``` @@ -96,7 +99,7 @@ USE db_name; ## 删除数据库 -``` +```sql DROP DATABASE [IF EXISTS] db_name ``` @@ -126,7 +129,7 @@ alter_database_option: { } ``` -### 修改 CACHESIZE +### 修改 CACHESIZE 修改数据库参数的命令使用简单,难的是如何确定是否需要修改以及如何修改。本小节描述如何判断数据库的 cachesize 是否够用。 @@ -155,13 +158,13 @@ alter_database_option: { ### 查看系统中的所有数据库 -``` +```sql SHOW DATABASES; ``` ### 显示一个数据库的创建语句 -``` +```sql SHOW CREATE DATABASE db_name \G; ``` diff --git a/docs/zh/14-reference/03-taos-sql/03-table.md b/docs/zh/14-reference/03-taos-sql/03-table.md index 81ad60e3d2..40e2802fcd 100644 --- a/docs/zh/14-reference/03-taos-sql/03-table.md +++ b/docs/zh/14-reference/03-taos-sql/03-table.md @@ -227,7 +227,7 @@ DROP TABLE [IF EXISTS] [db_name.]tb_name [, [IF EXISTS] [db_name.]tb_name] ... 如下 SQL 语句可以列出当前数据库中的所有表名。 ```sql -SHOW TABLES [LIKE tb_name_wildchar]; +SHOW TABLES [LIKE tb_name_wildcard]; ``` ### 显示表创建语句 diff --git a/docs/zh/14-reference/03-taos-sql/05-insert.md b/docs/zh/14-reference/03-taos-sql/05-insert.md index 40f8e95006..ccf24e882c 100644 --- a/docs/zh/14-reference/03-taos-sql/05-insert.md +++ b/docs/zh/14-reference/03-taos-sql/05-insert.md @@ -5,9 +5,11 @@ description: 写入数据的详细语法 --- ## 写入语法 + 写入记录支持两种语法, 正常语法和超级表语法. 正常语法下, 紧跟INSERT INTO后名的表名是子表名或者普通表名. 超级表语法下, 紧跟INSERT INTO后名的表名是超级表名 ### 正常语法 + ```sql INSERT INTO tb_name @@ -22,7 +24,9 @@ INSERT INTO INSERT INTO tb_name [(field1_name, ...)] subquery ``` + ### 超级表语法 + ```sql INSERT INTO stb1_name [(field1_name, ...)] @@ -32,16 +36,18 @@ INSERT INTO ...]; ``` -**关于时间戳** +#### 关于主键时间戳 -1. TDengine 要求插入的数据必须要有时间戳,插入数据的时间戳要注意以下几点: +TDengine 要求插入的数据必须要有时间戳,插入数据的时间戳要注意以下几点: -2. 时间戳不同的格式语法会有不同的精度影响。字符串格式的时间戳写法不受所在 DATABASE 的时间精度设置影响;而长整形格式的时间戳写法会受到所在 DATABASE 的时间精度设置影响。例如,时间戳"2021-07-13 16:16:48"的 UNIX 秒数为 1626164208。则其在毫秒精度下需要写作 1626164208000,在微秒精度设置下就需要写为 1626164208000000,纳秒精度设置下需要写为 1626164208000000000。 +1. 时间戳不同的格式语法会有不同的精度影响。字符串格式的时间戳写法不受所在 DATABASE 的时间精度设置影响;而长整形格式的时间戳写法会受到所在 DATABASE 的时间精度设置影响。例如,时间戳"2021-07-13 16:16:48"的 UNIX 秒数为 1626164208。则其在毫秒精度下需要写作 1626164208000,在微秒精度设置下就需要写为 1626164208000000,纳秒精度设置下需要写为 1626164208000000000。 -3. 一次插入多行数据时,不要把首列的时间戳的值都写 NOW。否则会导致语句中的多条记录使用相同的时间戳,于是就可能出现相互覆盖以致这些数据行无法全部被正确保存。其原因在于,NOW 函数在执行中会被解析为所在 SQL 语句的客户端执行时间,出现在同一语句中的多个 NOW 标记也就会被替换为完全相同的时间戳取值。 - 允许插入的最老记录的时间戳,是相对于当前服务器时间,减去配置的 KEEP 值(数据保留的天数, 可以在创建数据库时指定,缺省值是 3650 天)。允许插入的最新记录的时间戳,取决于数据库的 PRECISION 值(时间戳精度, 可以在创建数据库时指定, ms 表示毫秒,us 表示微秒,ns 表示纳秒,默认毫秒):如果是毫秒或微秒, 取值为 1970 年 1 月 1 日 00:00:00.000 UTC 加上 1000 年, 即 2970 年 1 月 1 日 00:00:00.000 UTC; 如果是纳秒, 取值为 1970 年 1 月 1 日 00:00:00.000000000 UTC 加上 292 年, 即 2262 年 1 月 1 日 00:00:00.000000000 UTC。 +2. 一次插入多行数据时,不要把首列的时间戳的值都写 NOW。否则会导致语句中的多条记录使用相同的时间戳,于是就可能出现相互覆盖以致这些数据行无法全部被正确保存。其原因在于,NOW 函数在执行中会被解析为所在 SQL 语句的客户端执行时间,出现在同一语句中的多个 NOW 标记也就会被替换为完全相同的时间戳取值。 -**语法说明** +3. 允许插入的最大时间戳为当前时间加上 100 年, 比如当前时间为`2024-11-11 12:00:00`,则允许插入的最大时间戳为`2124-11-11 12:00:00`。允许插入的最小时间戳取决于数据库的 KEEP 设置。企业版支持三级存储,可以设置多个 KEEP 时间,如下图所示,如果数据库的 KEEP 配置为`100h,100d,3650d`,则允许的最小时间戳为当前时间减去 3650 天。那么时间戳在`[Now - 100h, Now + 100y)`内的会保存在一级存储,时间戳在`[Now - 100d, Now - 100h)`内的会保存在二级存储,时间戳在`[Now - 3650d, Now - 100d)`内的会保存在三级存储。社区版不支持多级存储功能,只能配置一个 KEEP 值,如果配置多个,则取其最大者。如果时间戳不在有效时间范围内,TDengine 将返回错误“Timestamp out of range"。 +![Keep timerange 示意图](./pic/database-keep.jpg) + +#### 语法说明 1. 可以指定要插入值的列,对于未指定的列数据库将自动填充为 NULL。 @@ -56,22 +62,24 @@ INSERT INTO ```sql INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a'); ``` + 6. 对于向多个子表插入数据的情况,依然会有部分数据写入失败,部分数据写入成功的情况。这是因为多个子表可能分布在不同的 VNODE 上,客户端将 INSERT 语句完整解析后,将数据发往各个涉及的 VNODE 上,每个 VNODE 独立进行写入操作。如果某个 VNODE 因为某些原因(比如网络问题或磁盘故障)导致写入失败,并不会影响其他 VNODE 节点的写入。 7. 主键列值必须指定且不能为 NULL。 -**正常语法说明** +#### 正常语法说明 1. USING 子句是自动建表语法。如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 TAGS 取值。可以只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将置为 NULL。 2. 可以使用 `INSERT ... subquery` 语句将 TDengine 中的数据插入到指定表中。subquery 可以是任意的查询语句。此语法只能用于子表和普通表,且不支持自动建表。 -**超级表语法说明** +#### 超级表语法说明 1. 在 field_name 列表中必须指定 tbname 列,否则报错. tbname列是子表名, 类型是字符串. 其中字符不用转义, 不能包含点‘.‘ 2. 在 field_name 列表中支持标签列,当子表已经存在时,指定标签值并不会触发标签值的修改;当子表不存在时会使用所指定的标签值建立子表. 如果没有指定任何标签列,则把所有标签列的值设置为NULL 3. 不支持参数绑定写入 + ## 插入一条记录 指定已经创建好的数据子表的表名,并通过 VALUES 关键字提供一行或多行数据,即可向数据库写入这些数据。例如,执行如下语句可以写入一行记录: @@ -154,15 +162,18 @@ INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/c INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv' d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv'; ``` + ## 向超级表插入数据并自动创建子表 自动建表, 表名通过 tbname 列指定 + ```sql INSERT INTO meters(tbname, location, groupId, ts, current, voltage, phase) VALUES ('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:35.779', 10.15, 217, 0.33) ('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 217, 0.33) ``` + ## 通过 CSV 文件向超级表插入数据并自动创建子表 根据 csv 文件内容,为 超级表创建子表,并填充相应 column 与 tag diff --git a/docs/zh/14-reference/03-taos-sql/10-function.md b/docs/zh/14-reference/03-taos-sql/10-function.md index ae256a4ac0..2f4b739447 100644 --- a/docs/zh/14-reference/03-taos-sql/10-function.md +++ b/docs/zh/14-reference/03-taos-sql/10-function.md @@ -1065,7 +1065,7 @@ CAST(expr AS type_name) TO_ISO8601(expr [, timezone]) ``` -**功能说明**:将 UNIX 时间戳转换成为 ISO8601 标准的日期时间格式,并附加时区信息。timezone 参数允许用户为输出结果指定附带任意时区信息。如果 timezone 参数省略,输出结果则附带当前客户端的系统时区信息。 +**功能说明**:将时间戳转换成为 ISO8601 标准的日期时间格式,并附加时区信息。timezone 参数允许用户为输出结果指定附带任意时区信息。如果 timezone 参数省略,输出结果则附带当前客户端的系统时区信息。 **返回结果数据类型**:VARCHAR 类型。 @@ -1109,7 +1109,7 @@ return_timestamp: { } ``` -**功能说明**:将日期时间格式的字符串转换成为 UNIX 时间戳。 +**功能说明**:将日期时间格式的字符串转换成为时间戳。 **返回结果数据类型**:BIGINT, TIMESTAMP。 @@ -1257,8 +1257,8 @@ TIMEDIFF(expr1, expr2 [, time_unit]) **返回结果类型**:BIGINT。 **适用数据类型**: -- `expr1`:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。 -- `expr2`:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。 +- `expr1`:表示时间戳的 BIGINT, TIMESTAMP 类型,或符合 ISO8601/RFC3339 标准的日期时间格式的 VARCHAR, NCHAR 类型。 +- `expr2`:表示时间戳的 BIGINT, TIMESTAMP 类型,或符合 ISO8601/RFC3339 标准的日期时间格式的 VARCHAR, NCHAR 类型。 - `time_unit`:见使用说明。 **嵌套子查询支持**:适用于内层查询和外层查询。 @@ -1301,7 +1301,7 @@ use_current_timezone: { **返回结果数据类型**:TIMESTAMP。 -**应用字段**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。 +**应用字段**:表示时间戳的 BIGINT, TIMESTAMP 类型,或符合 ISO8601/RFC3339 标准的日期时间格式的 VARCHAR, NCHAR 类型。 **适用于**:表和超级表。 @@ -1364,7 +1364,7 @@ WEEK(expr [, mode]) **返回结果类型**:BIGINT。 **适用数据类型**: -- `expr`:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。 +- `expr`:表示时间戳的 BIGINT, TIMESTAMP 类型,或符合 ISO8601/RFC3339 标准的日期时间格式的 VARCHAR, NCHAR 类型。 - `mode`:0 - 7 之间的整数。 **嵌套子查询支持**:适用于内层查询和外层查询。 @@ -1424,7 +1424,7 @@ WEEKOFYEAR(expr) **返回结果类型**:BIGINT。 -**适用数据类型**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。 +**适用数据类型**:表示时间戳的 BIGINT, TIMESTAMP 类型,或符合 ISO8601/RFC3339 标准的日期时间格式的 VARCHAR, NCHAR 类型。 **嵌套子查询支持**:适用于内层查询和外层查询。 @@ -1451,7 +1451,7 @@ WEEKDAY(expr) **返回结果类型**:BIGINT。 -**适用数据类型**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。 +**适用数据类型**:表示 表示时间戳的 BIGINT, TIMESTAMP 类型,或符合 ISO8601/RFC3339 标准的日期时间格式的 VARCHAR, NCHAR 类型。 **嵌套子查询支持**:适用于内层查询和外层查询。 @@ -1478,7 +1478,7 @@ DAYOFWEEK(expr) **返回结果类型**:BIGINT。 -**适用数据类型**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。 +**适用数据类型**:表示时间戳的 BIGINT, TIMESTAMP 类型,或符合 ISO8601/RFC3339 标准的日期时间格式的 VARCHAR, NCHAR 类型。 **嵌套子查询支持**:适用于内层查询和外层查询。 diff --git a/docs/zh/14-reference/03-taos-sql/28-index.md b/docs/zh/14-reference/03-taos-sql/28-tsma.md similarity index 100% rename from docs/zh/14-reference/03-taos-sql/28-index.md rename to docs/zh/14-reference/03-taos-sql/28-tsma.md diff --git a/docs/zh/14-reference/03-taos-sql/32-compress.md b/docs/zh/14-reference/03-taos-sql/32-compress.md index 0f2b260832..51650c9123 100644 --- a/docs/zh/14-reference/03-taos-sql/32-compress.md +++ b/docs/zh/14-reference/03-taos-sql/32-compress.md @@ -31,11 +31,12 @@ description: 可配置压缩算法 | 数据类型 | 可选编码算法 | 编码算法默认值 | 可选压缩算法|压缩算法默认值| 压缩等级默认值| | :-----------:|:----------:|:-------:|:-------:|:----------:|:----:| -| tinyint/untinyint/smallint/usmallint/int/uint | simple8b| simple8b | lz4/zlib/zstd/xz| lz4 | medium| -| bigint/ubigint/timestamp | simple8b/delta-i | delta-i |lz4/zlib/zstd/xz | lz4| medium| +| int/uint | simple8b| simple8b | lz4/zlib/zstd/xz| lz4 | medium| +| tinyint/untinyint/smallint/usmallint | simple8b| simple8b | lz4/zlib/zstd/xz| zlib | medium| +| bigint/ubigint/timestamp | simple8b/delta-i | delta-i |lz4/zlib/zstd/xz | lz4| medium| |float/double | delta-d|delta-d |lz4/zlib/zstd/xz/tsz|lz4| medium| -|binary/nchar| disabled| disabled|lz4/zlib/zstd/xz| lz4| medium| -|bool| bit-packing| bit-packing| lz4/zlib/zstd/xz| lz4| medium| +|binary/nchar| disabled| disabled|lz4/zlib/zstd/xz| zstd| medium| +|bool| bit-packing| bit-packing| lz4/zlib/zstd/xz| zstd| medium| ## SQL 语法 diff --git a/docs/zh/14-reference/03-taos-sql/pic/database-keep.jpg b/docs/zh/14-reference/03-taos-sql/pic/database-keep.jpg new file mode 100644 index 0000000000..248a9041d3 Binary files /dev/null and b/docs/zh/14-reference/03-taos-sql/pic/database-keep.jpg differ diff --git a/docs/zh/14-reference/05-connector/14-java.mdx b/docs/zh/14-reference/05-connector/14-java.mdx index 0a167dd5ee..e8554ae668 100644 --- a/docs/zh/14-reference/05-connector/14-java.mdx +++ b/docs/zh/14-reference/05-connector/14-java.mdx @@ -34,6 +34,7 @@ REST 连接支持所有能运行 Java 的平台。 | taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 | | :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: | | 3.4.0 | 1. 使用 jackson 库替换 fastjson 库;2. WebSocket 采用独立协议标识;3. 优化后台拉取线程使用,避免用户误用导致超时。 | - | +| 3.3.4 | 1. 解决了 getInt 在数据类型为 float 报错 | - | | 3.3.3 | 1. 解决了 WebSocket statement 关闭导致的内存泄漏 | - | | 3.3.2 | 1. 优化 WebSocket 连接下的参数绑定性能;2. 优化了对 mybatis 的支持 | - | | 3.3.0 | 1. 优化 WebSocket 连接下的数据传输性能;2. 支持跳过 SSL 验证,默认关闭 | 3.3.2.0 及更高版本 | diff --git a/docs/zh/14-reference/05-connector/30-python.mdx b/docs/zh/14-reference/05-connector/30-python.mdx index 8436c30249..3991477635 100644 --- a/docs/zh/14-reference/05-connector/30-python.mdx +++ b/docs/zh/14-reference/05-connector/30-python.mdx @@ -41,6 +41,7 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con |Python Connector 版本|主要变化| |:-------------------:|:----:| +|2.7.16|新增订阅配置 (session.timeout.ms, max.poll.interval.ms)| |2.7.15|新增 VARBINARY 和 GEOMETRY 类型支持| |2.7.14|修复已知问题| |2.7.13|新增 tmq 同步提交 offset 接口| @@ -50,6 +51,7 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con |Python WebSocket Connector 版本|主要变化| |:----------------------------:|:-----:| +|0.3.5|新增 VARBINARY 和 GEOMETRY 类型支持,修复已知问题| |0.3.2|优化 WebSocket sql 查询和插入性能,修改 readme 和 文档,修复已知问题| |0.2.9|已知问题修复| |0.2.5|1. 数据订阅支持获取消费进度和重置消费进度
2. 支持 schemaless
3. 支持 STMT| diff --git a/docs/zh/14-reference/05-connector/35-node.mdx b/docs/zh/14-reference/05-connector/35-node.mdx index d9512eae78..df2abfab3d 100644 --- a/docs/zh/14-reference/05-connector/35-node.mdx +++ b/docs/zh/14-reference/05-connector/35-node.mdx @@ -26,6 +26,7 @@ Node.js 连接器目前仅支持 WebSocket 连接器, 其通过 taosAdapter | Node.js 连接器 版本 | 主要变化 | TDengine 版本 | | :------------------: | :----------------------: | :----------------: | +| 3.1.2 | 对数据协议和解析进行了优化,性能得到大幅提升| 3.3.2.0 及更高版本 | | 3.1.1 | 优化了数据传输性能 | 3.3.2.0 及更高版本 | | 3.1.0 | 新版本发布,支持 WebSocket 连接 | 3.2.0.0 及更高版本 | diff --git a/docs/zh/26-tdinternal/01-arch.md b/docs/zh/26-tdinternal/01-arch.md index 8aa69e45d5..7091ca9661 100644 --- a/docs/zh/26-tdinternal/01-arch.md +++ b/docs/zh/26-tdinternal/01-arch.md @@ -293,6 +293,14 @@ TDengine 采纳了一种独特的时间驱动缓存管理策略,亦称为写 此外,考虑到物联网数据的特点,用户通常最关注的是数据的实时性,即最新产生的数据。TDengine 很好地利用了这一特点,优先将最新到达的(即当前状态)数据存储在缓存中。具体而言,TDengine 会将最新到达的数据直接存入缓存,以便快速响应用户对最新一条或多条数据的查询和分析需求,从而在整体上提高数据库查询的响应速度。从这个角度来看,通过合理设置数据库参数,TDengine 完全可以作为数据缓存来使用,这样就无须再部署 Redis 或其他额外的缓存系统。这种做法不仅有效简化了系统架构,还有助于降低运维成本。需要注意的是,一旦 TDengine 重启,缓存中的数据将被清除,所有先前缓存的数据都会被批量写入硬盘,而不会像专业的 Key-Value 缓存系统那样自动将之前缓存的数据重新加载回缓存。 +### last/last_row 缓存 + +在时序数据的场景中,查询表的最后一条记录(last_row)或最后一条非 NULL 记录(last)是一个常见的需求。为了提高 TDengine 对这种查询的响应速度,TSDB 为每张表的 last 和 last_row 数据提供了 LRU 缓存。LRU 缓存采用延迟加载策略,当首次查询某张表的 last 或 last_row 时,缓存模块会去内存池和磁盘文件加载数据,处理后放入LRU 缓存,并返回给查询模块继续处理;当有新的数据插入或删除时,如果缓存需要更新,会进行相应的更新操作;如果缓存中没有当前被写入表的数据,则直接跳过,无需其它操作。 + +此外在缓存配置更新的时候,也会更新缓存数据。比如,缓存功能默认是关闭的,用户使用命令开启缓存功能之后,就会在首次查询时加载数据;当关闭缓存开关时,会释放之前的缓存区。当查询某一个子表的 last 或 last_row 数据时,如果缓存中没有,则从内存池和磁盘文件加载对应的 last 或 last_row 数据到缓存中;当查询某一个超级表的 last 或 last_row 数据时,这个超级表对应的所有子表都需要加载到缓存中。 + +通过数据库参数 cachemodel 可以配置某一个数据库的缓存参数,默认值为 "none",表示不开启缓存,另外三个值为 "last_row","last_value","both";分别是开启 last_row 缓存,开启 last 缓存,和两个同时开启。缓存当前所使用的内存数量,可在通过 show vgroups; 命令,在 cacheload 列中进行查看,单位为字节。 + ### 持久化存储 TDengine 采用了一种数据驱动的策略来实现缓存数据的持久化存储。当 vnode 中的缓存数据积累到一定量时,为了避免阻塞后续数据的写入,TDengine 会启动落盘线程,将这些缓存数据写入持久化存储设备。在此过程中,TDengine 会创建新的数据库日志文件用于数据落盘,并在落盘成功后删除旧的日志文件,以防止日志文件无限制增长。 diff --git a/docs/zh/26-tdinternal/10-cache.md b/docs/zh/26-tdinternal/10-cache.md new file mode 100644 index 0000000000..698f4ee87a --- /dev/null +++ b/docs/zh/26-tdinternal/10-cache.md @@ -0,0 +1,62 @@ +--- +sidebar_label: 数据缓存 +title: 数据缓存 +toc_max_heading_level: 4 +--- +在现代物联网(IoT)和工业互联网(IIoT)应用中,数据的高效管理对系统性能和用户体验至关重要。为了应对高并发环境下的实时读写需求,TDengine 设计了一套完整的缓存机制,包括写缓存、读缓存、元数据缓存和文件系统缓存。这些缓存机制紧密结合,既能优化数据查询的响应速度,又能提高数据写入的效率,同时保障数据的可靠性和系统的高可用性。通过灵活配置缓存参数,TDengine 为用户提供了性能与成本之间的最佳平衡。 + +## 写缓存 + +TDengine 采用了一种创新的时间驱动缓存管理策略,亦称为写驱动的缓存管理机制。这一策略与传统的读驱动的缓存模式有所不同,其核心思想是将最新写入的数据优先保存在缓存中。当缓存容量达到预设的临界值时,系统会将最早存储的数据批量写入硬盘,从而实现缓存与硬盘之间的动态平衡。 + +在物联网数据应用中,用户往往最关注最近产生的数据,即设备的当前状态。TDengine 充分利用了这一业务特性,将最近到达的当前状态数据优先存储在缓存中,以便用户能够快速获取所需信息。 + +为了实现数据的分布式存储和高可用性,TDengine 引入了虚拟节点(vnode)的概念。每个 vnode 可以拥有多达 3 个副本,这些副本共同组成一个 vnode group,简称 vgroup。在创建数据库时,用户需要确定每个 vnode 的写入缓存大小,以确保数据的合理分配和高效存储。 + +创建数据库时的两个关键参数 `vgroups` 和 `buffer` 分别决定了数据库中的数据由多少个 vgroup 进行处理,以及为每个 vnode 分配多少写入缓存。通过合理配置这两个 +参数,用户可以根据实际需求调整数据库的性能和存储容量,从而实现最佳的性能和成本效益。 + +例 如, 下面的 SQL 创建了包含 10 个 vgroup,每个 vnode 占 用 256MB 内存的数据库。 +```sql +CREATE DATABASE POWER VGROUPS 10 BUFFER 256 CACHEMODEL 'NONE' PAGES 128 PAGESIZE 16; +``` + +缓存越大越好,但超过一定阈值后再增加缓存对写入性能提升并无帮助。 + +## 读缓存 + +TDengine 的读缓存机制专为高频实时查询场景设计,尤其适用于物联网和工业互联网等需要实时掌握设备状态的业务场景。在这些场景中,用户往往最关心最新的数据,如设备的当前读数或状态。 + +通过设置 cachemodel 参数,TDengine 用户可以灵活选择适合的缓存模式,包括缓存最新一行数据、每列最近的非 NULL 值,或同时缓存行和列的数据。这种灵活性使 TDengine 能根据具体业务需求提供精准优化,在物联网场景下尤为突出,助力用户快速访问设备的最新状态。 + +这种设计不仅降低了查询的响应延迟,还能有效缓解存储系统的 I/O 压力。在高并发场景下,读缓存能够帮助系统维持更高的吞吐量,确保查询性能的稳定性。借助 TDengine 读缓存,用户无需再集成如 Redis 一类的外部缓存系统,避免了系统架构的复杂化,显著降低运维和部署成本。 + +此外,TDengine 的读缓存机制还能够根据实际业务场景灵活调整。在数据访问热点集中在最新记录的场景中,这种内置缓存能够显著提高用户体验,让关键数据的获取更加快速高效。相比传统缓存方案,这种无缝集成的缓存策略不仅简化了开发流程,还为用户提供了更高的性能保障。 + +关于 TDengine 读缓存的更多详细内容请看[读缓存](../../advanced/cache/) + +## 元数据缓存 + +为了提升查询和写入操作的效率,每个 vnode 都配备了缓存机制,用于存储其曾经获取过的元数据。这一元数据缓存的大小由创建数据库时的两个参数 pages 和 pagesize 共同决定。其中,pagesize 参数的单位是 KB,用于指定每个缓存页的大小。如下 SQL 会为数据库 power 的每个 vnode 创建 128 个 page、每个 page 16KB 的元数据缓存 + +```sql +CREATE DATABASE POWER PAGES 128 PAGESIZE 16; +``` + +## 文件系统缓存 + +TDengine 采用 WAL 技术作为基本的数据可靠性保障手段。WAL 是一种先进的数据保护机制,旨在确保在发生故障时能够迅速恢复数据。其核心原理在于,在数据实际写入数据存储层之前,先将其变更记录到一个日志文件中。这样一来,即便集群遭遇崩溃或其他故障,也能确保数据安全无损。 + +TDengine 利用这些日志文件实现故障前的状态恢复。在写入 WAL 的过程中,数据是以顺序追加的方式写入硬盘文件的。因此,文件系统缓存在此过程中发挥着关键作用,对写入性能产生显著影响。为了确保数据真正落盘,系统会调用 fsync 函数,该函数负责将文件系统缓存中的数据强制写入硬盘。 + +数据库参数 wal_level 和 wal_fsync_period 共同决定了 WAL 的保存行为。。 +- wal_level:此参数控制 WAL 的保存级别。级别 1 表示仅将数据写入 WAL,但不立即执行 fsync 函数;级别 2 则表示在写入 WAL 的同时执行 fsync 函数。默认情况下,wal_level 设为 1。虽然执行 fsync 函数可以提高数据的持久性,但相应地也会降低写入性能。 +- wal_fsync_period:当 wal_level 设置为 2 时,这个参数控制执行 fsync 的频率。设置为 0 则表示每次写入后立即执行 fsync,这可以确保数据的安全性,但可能会牺牲一些性能。当设置为大于 0 的数值时,则表示 fsync 周期,默认为 3000,范围是[1, 180000],单位毫秒。 + +```sql +CREATE DATABASE POWER WAL_LEVEL 2 WAL_FSYNC_PERIOD 3000; +``` + +在创建数据库时,用户可以根据需求选择不同的参数设置,以在性能和可靠性之间找到最佳平衡: +- 性能优先:将数据写入 WAL,但不立即执行 fsync 操作,此时新写入的数据仅保存在文件系统缓存中,尚未同步到磁盘。这种配置能够显著提高写入性能。 +- 可靠性优先:将数据写入 WAL 的同时执行 fsync 操作,将数据立即同步到磁盘,确保数据持久化,可靠性更高。 diff --git a/docs/zh/28-releases/03-notes/3.3.4.3.md b/docs/zh/28-releases/03-notes/3.3.4.3.md index 9ded536872..8ffd5802ed 100644 --- a/docs/zh/28-releases/03-notes/3.3.4.3.md +++ b/docs/zh/28-releases/03-notes/3.3.4.3.md @@ -3,6 +3,12 @@ title: 3.3.4.3 版本说明 sidebar_label: 3.3.4.3 description: 3.3.4.3 版本说明 --- + +### 行为变更及兼容性 +1. 多副本流计算中必须使用 snode +1. 增加了流计算的兼容性保证机制,避免后续函数变更产生新的兼容性问题,但之前版本的流计算必须重建,具体参见 https://docs.taosdata.com/advanced/stream/#流计算升级故障恢复 +1. 调整 case when 语句结果类型的判断方法 + ### 新特性 1. 新功能:流计算的 TWA 函数支持时间驱动的结果推送模式 1. 新功能:流计算的 Interp 函数支持时间驱动的结果推送模式 diff --git a/docs/zh/28-releases/03-notes/index.md b/docs/zh/28-releases/03-notes/index.md index 3268c088b2..d1a48ab9a8 100644 --- a/docs/zh/28-releases/03-notes/index.md +++ b/docs/zh/28-releases/03-notes/index.md @@ -3,6 +3,7 @@ title: 版本说明 sidebar_label: 版本说明 description: 各版本版本说明 --- -[3.3.4.3](./3.3.4.3) + +[3.3.4.3](./3.3.4.3) [3.3.3.0](./3.3.3.0) [3.3.2.0](./3.3.2.0) diff --git a/examples/c/CMakeLists.txt b/examples/c/CMakeLists.txt index 07fc2fd71b..e3c992f53f 100644 --- a/examples/c/CMakeLists.txt +++ b/examples/c/CMakeLists.txt @@ -1,7 +1,7 @@ PROJECT(TDengine) -IF (TD_LINUX) - INCLUDE_DIRECTORIES(. ${TD_SOURCE_DIR}/src/inc ${TD_SOURCE_DIR}/src/client/inc ${TD_SOURCE_DIR}/inc) +IF(TD_LINUX) + INCLUDE_DIRECTORIES(. ${TD_SOURCE_DIR}/src/inc ${TD_SOURCE_DIR}/src/client/inc ${TD_SOURCE_DIR}/inc) AUX_SOURCE_DIRECTORY(. SRC) add_executable(tmq "") @@ -12,58 +12,58 @@ IF (TD_LINUX) add_executable(asyncdemo "") target_sources(tmq - PRIVATE - "tmq.c" - ) + PRIVATE + "tmq.c" + ) target_sources(stream_demo - PRIVATE - "stream_demo.c" - ) + PRIVATE + "stream_demo.c" + ) target_sources(schemaless - PRIVATE - "schemaless.c" - ) + PRIVATE + "schemaless.c" + ) target_sources(prepare - PRIVATE + PRIVATE "prepare.c" - ) - + ) + target_sources(demo - PRIVATE + PRIVATE "demo.c" - ) + ) target_sources(asyncdemo - PRIVATE + PRIVATE "asyncdemo.c" - ) + ) target_link_libraries(tmq - taos - ) + ${TAOS_LIB} + ) target_link_libraries(stream_demo - taos - ) + ${TAOS_LIB} + ) target_link_libraries(schemaless - taos - ) + ${TAOS_LIB} + ) target_link_libraries(prepare - taos - ) - + ${TAOS_LIB} + ) + target_link_libraries(demo - taos - ) + ${TAOS_LIB} + ) target_link_libraries(asyncdemo - taos - ) + ${TAOS_LIB} + ) SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq) SET_TARGET_PROPERTIES(stream_demo PROPERTIES OUTPUT_NAME stream_demo) @@ -71,8 +71,9 @@ IF (TD_LINUX) SET_TARGET_PROPERTIES(prepare PROPERTIES OUTPUT_NAME prepare) SET_TARGET_PROPERTIES(demo PROPERTIES OUTPUT_NAME demo) SET_TARGET_PROPERTIES(asyncdemo PROPERTIES OUTPUT_NAME asyncdemo) -ENDIF () -IF (TD_DARWIN) - INCLUDE_DIRECTORIES(. ${TD_SOURCE_DIR}/src/inc ${TD_SOURCE_DIR}/src/client/inc ${TD_SOURCE_DIR}/inc) +ENDIF() + +IF(TD_DARWIN) + INCLUDE_DIRECTORIES(. ${TD_SOURCE_DIR}/src/inc ${TD_SOURCE_DIR}/src/client/inc ${TD_SOURCE_DIR}/inc) AUX_SOURCE_DIRECTORY(. SRC) -ENDIF () +ENDIF() diff --git a/include/libs/stream/streamMsg.h b/include/common/streamMsg.h similarity index 85% rename from include/libs/stream/streamMsg.h rename to include/common/streamMsg.h index 0ceaa93a72..3db92ba58d 100644 --- a/include/libs/stream/streamMsg.h +++ b/include/common/streamMsg.h @@ -17,12 +17,23 @@ #define TDENGINE_STREAMMSG_H #include "tmsg.h" -#include "trpc.h" +//#include "trpc.h" #ifdef __cplusplus extern "C" { #endif +typedef struct SStreamRetrieveReq SStreamRetrieveReq; +typedef struct SStreamDispatchReq SStreamDispatchReq; +typedef struct STokenBucket STokenBucket; +typedef struct SMetaHbInfo SMetaHbInfo; + +typedef struct SNodeUpdateInfo { + int32_t nodeId; + SEpSet prevEp; + SEpSet newEp; +} SNodeUpdateInfo; + typedef struct SStreamUpstreamEpInfo { int32_t nodeId; int32_t childId; @@ -170,8 +181,8 @@ typedef struct SStreamHbMsg { SArray* pUpdateNodes; // SArray, needs update the epsets in stream tasks for those nodes. } SStreamHbMsg; -int32_t tEncodeStreamHbMsg(SEncoder* pEncoder, const SStreamHbMsg* pRsp); -int32_t tDecodeStreamHbMsg(SDecoder* pDecoder, SStreamHbMsg* pRsp); +int32_t tEncodeStreamHbMsg(SEncoder* pEncoder, const SStreamHbMsg* pReq); +int32_t tDecodeStreamHbMsg(SDecoder* pDecoder, SStreamHbMsg* pReq); void tCleanupStreamHbMsg(SStreamHbMsg* pMsg); typedef struct { @@ -179,6 +190,9 @@ typedef struct { int32_t msgId; } SMStreamHbRspMsg; +int32_t tEncodeStreamHbRsp(SEncoder* pEncoder, const SMStreamHbRspMsg* pRsp); +int32_t tDecodeStreamHbRsp(SDecoder* pDecoder, SMStreamHbRspMsg* pRsp); + typedef struct SRetrieveChkptTriggerReq { SMsgHead head; int64_t streamId; @@ -189,6 +203,9 @@ typedef struct SRetrieveChkptTriggerReq { int64_t downstreamTaskId; } SRetrieveChkptTriggerReq; +int32_t tEncodeRetrieveChkptTriggerReq(SEncoder* pEncoder, const SRetrieveChkptTriggerReq* pReq); +int32_t tDecodeRetrieveChkptTriggerReq(SDecoder* pDecoder, SRetrieveChkptTriggerReq* pReq); + typedef struct SCheckpointTriggerRsp { int64_t streamId; int64_t checkpointId; @@ -198,6 +215,9 @@ typedef struct SCheckpointTriggerRsp { int32_t rspCode; } SCheckpointTriggerRsp; +int32_t tEncodeCheckpointTriggerRsp(SEncoder* pEncoder, const SCheckpointTriggerRsp* pRsp); +int32_t tDecodeCheckpointTriggerRsp(SDecoder* pDecoder, SCheckpointTriggerRsp* pRsp); + typedef struct SCheckpointReport { int64_t streamId; int32_t taskId; @@ -222,7 +242,7 @@ typedef struct SRestoreCheckpointInfo { int32_t nodeId; } SRestoreCheckpointInfo; -int32_t tEncodeRestoreCheckpointInfo (SEncoder* pEncoder, const SRestoreCheckpointInfo* pReq); +int32_t tEncodeRestoreCheckpointInfo(SEncoder* pEncoder, const SRestoreCheckpointInfo* pReq); int32_t tDecodeRestoreCheckpointInfo(SDecoder* pDecoder, SRestoreCheckpointInfo* pReq); typedef struct { @@ -232,10 +252,8 @@ typedef struct { int32_t reqType; } SStreamTaskRunReq; -typedef struct SCheckpointConsensusEntry { - SRestoreCheckpointInfo req; - int64_t ts; -} SCheckpointConsensusEntry; +int32_t tEncodeStreamTaskRunReq(SEncoder* pEncoder, const SStreamTaskRunReq* pReq); +int32_t tDecodeStreamTaskRunReq(SDecoder* pDecoder, SStreamTaskRunReq* pReq); #ifdef __cplusplus } diff --git a/include/common/tanalytics.h b/include/common/tanalytics.h index 85eb963129..d0af84ecfb 100644 --- a/include/common/tanalytics.h +++ b/include/common/tanalytics.h @@ -39,14 +39,14 @@ typedef struct { } SAnalyticsUrl; typedef enum { - ANAL_BUF_TYPE_JSON = 0, - ANAL_BUF_TYPE_JSON_COL = 1, - ANAL_BUF_TYPE_OTHERS, + ANALYTICS_BUF_TYPE_JSON = 0, + ANALYTICS_BUF_TYPE_JSON_COL = 1, + ANALYTICS_BUF_TYPE_OTHERS, } EAnalBufType; typedef enum { - ANAL_HTTP_TYPE_GET = 0, - ANAL_HTTP_TYPE_POST, + ANALYTICS_HTTP_TYPE_GET = 0, + ANALYTICS_HTTP_TYPE_POST, } EAnalHttpType; typedef struct { @@ -61,11 +61,11 @@ typedef struct { char fileName[TSDB_FILENAME_LEN]; int32_t numOfCols; SAnalyticsColBuf *pCols; -} SAnalBuf; +} SAnalyticBuf; int32_t taosAnalyticsInit(); void taosAnalyticsCleanup(); -SJson *taosAnalSendReqRetJson(const char *url, EAnalHttpType type, SAnalBuf *pBuf); +SJson *taosAnalSendReqRetJson(const char *url, EAnalHttpType type, SAnalyticBuf *pBuf); int32_t taosAnalGetAlgoUrl(const char *algoName, EAnalAlgoType type, char *url, int32_t urlLen); bool taosAnalGetOptStr(const char *option, const char *optName, char *optValue, int32_t optMaxLen); @@ -73,18 +73,18 @@ bool taosAnalGetOptInt(const char *option, const char *optName, int64_t *optV int64_t taosAnalGetVersion(); void taosAnalUpdate(int64_t newVer, SHashObj *pHash); -int32_t tsosAnalBufOpen(SAnalBuf *pBuf, int32_t numOfCols); -int32_t taosAnalBufWriteOptStr(SAnalBuf *pBuf, const char *optName, const char *optVal); -int32_t taosAnalBufWriteOptInt(SAnalBuf *pBuf, const char *optName, int64_t optVal); -int32_t taosAnalBufWriteOptFloat(SAnalBuf *pBuf, const char *optName, float optVal); -int32_t taosAnalBufWriteColMeta(SAnalBuf *pBuf, int32_t colIndex, int32_t colType, const char *colName); -int32_t taosAnalBufWriteDataBegin(SAnalBuf *pBuf); -int32_t taosAnalBufWriteColBegin(SAnalBuf *pBuf, int32_t colIndex); -int32_t taosAnalBufWriteColData(SAnalBuf *pBuf, int32_t colIndex, int32_t colType, void *colValue); -int32_t taosAnalBufWriteColEnd(SAnalBuf *pBuf, int32_t colIndex); -int32_t taosAnalBufWriteDataEnd(SAnalBuf *pBuf); -int32_t taosAnalBufClose(SAnalBuf *pBuf); -void taosAnalBufDestroy(SAnalBuf *pBuf); +int32_t tsosAnalBufOpen(SAnalyticBuf *pBuf, int32_t numOfCols); +int32_t taosAnalBufWriteOptStr(SAnalyticBuf *pBuf, const char *optName, const char *optVal); +int32_t taosAnalBufWriteOptInt(SAnalyticBuf *pBuf, const char *optName, int64_t optVal); +int32_t taosAnalBufWriteOptFloat(SAnalyticBuf *pBuf, const char *optName, float optVal); +int32_t taosAnalBufWriteColMeta(SAnalyticBuf *pBuf, int32_t colIndex, int32_t colType, const char *colName); +int32_t taosAnalBufWriteDataBegin(SAnalyticBuf *pBuf); +int32_t taosAnalBufWriteColBegin(SAnalyticBuf *pBuf, int32_t colIndex); +int32_t taosAnalBufWriteColData(SAnalyticBuf *pBuf, int32_t colIndex, int32_t colType, void *colValue); +int32_t taosAnalBufWriteColEnd(SAnalyticBuf *pBuf, int32_t colIndex); +int32_t taosAnalBufWriteDataEnd(SAnalyticBuf *pBuf); +int32_t taosAnalBufClose(SAnalyticBuf *pBuf); +void taosAnalBufDestroy(SAnalyticBuf *pBuf); const char *taosAnalAlgoStr(EAnalAlgoType algoType); EAnalAlgoType taosAnalAlgoInt(const char *algoName); diff --git a/include/common/tcommon.h b/include/common/tcommon.h index 1d9a9bcc61..4189d1ebdb 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -251,6 +251,7 @@ typedef struct SQueryTableDataCond { int32_t type; // data block load type: bool skipRollup; STimeWindow twindows; + STimeWindow extTwindows[2]; int64_t startVersion; int64_t endVersion; bool notLoadData; // response the actual data, not only the rows in the attribute of info.row of ssdatablock diff --git a/include/common/tdataformat.h b/include/common/tdataformat.h index 19f3e222d1..f899fc5589 100644 --- a/include/common/tdataformat.h +++ b/include/common/tdataformat.h @@ -57,9 +57,9 @@ const static uint8_t BIT2_MAP[4] = {0b11111100, 0b11110011, 0b11001111, 0b001111 #define ONE ((uint8_t)1) #define THREE ((uint8_t)3) #define DIV_8(i) ((i) >> 3) -#define MOD_8(i) ((i)&7) +#define MOD_8(i) ((i) & 7) #define DIV_4(i) ((i) >> 2) -#define MOD_4(i) ((i)&3) +#define MOD_4(i) ((i) & 3) #define MOD_4_TIME_2(i) (MOD_4(i) << 1) #define BIT1_SIZE(n) (DIV_8((n)-1) + 1) #define BIT2_SIZE(n) (DIV_4((n)-1) + 1) @@ -173,6 +173,8 @@ typedef struct { } SColDataCompressInfo; typedef void *(*xMallocFn)(void *, int32_t); +typedef int32_t (*checkWKBGeometryFn)(const unsigned char *geoWKB, size_t nGeom); +typedef int32_t (*initGeosFn)(); void tColDataDestroy(void *ph); void tColDataInit(SColData *pColData, int16_t cid, int8_t type, int8_t cflag); @@ -191,7 +193,8 @@ int32_t tColDataCompress(SColData *colData, SColDataCompressInfo *info, SBuffer int32_t tColDataDecompress(void *input, SColDataCompressInfo *info, SColData *colData, SBuffer *assist); // for stmt bind -int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind, int32_t buffMaxLen); +int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind, int32_t buffMaxLen, initGeosFn igeos, + checkWKBGeometryFn cgeos); int32_t tColDataSortMerge(SArray **arr); // for raw block @@ -378,7 +381,8 @@ int32_t tRowBuildFromBind(SBindInfo *infos, int32_t numOfInfos, bool infoSorted, SArray *rowArray); // stmt2 binding -int32_t tColDataAddValueByBind2(SColData *pColData, TAOS_STMT2_BIND *pBind, int32_t buffMaxLen); +int32_t tColDataAddValueByBind2(SColData *pColData, TAOS_STMT2_BIND *pBind, int32_t buffMaxLen, initGeosFn igeos, + checkWKBGeometryFn cgeos); typedef struct { int32_t columnId; diff --git a/include/common/tglobal.h b/include/common/tglobal.h index e6c471eaf1..5125c1caef 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -67,6 +67,7 @@ extern int64_t tsTickPerHour[3]; extern int32_t tsCountAlwaysReturnValue; extern float tsSelectivityRatio; extern int32_t tsTagFilterResCacheSize; +extern int32_t tsBypassFlag; // queue & threads extern int32_t tsNumOfRpcThreads; diff --git a/include/common/tmsg.h b/include/common/tmsg.h index bdf333b635..2294cf6f73 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -2187,8 +2187,9 @@ int32_t tSerializeSShowVariablesReq(void* buf, int32_t bufLen, SShowVariablesReq typedef struct { char name[TSDB_CONFIG_OPTION_LEN + 1]; - char value[TSDB_CONFIG_VALUE_LEN + 1]; + char value[TSDB_CONFIG_PATH_LEN + 1]; char scope[TSDB_CONFIG_SCOPE_LEN + 1]; + char info[TSDB_CONFIG_INFO_LEN + 1]; } SVariablesInfo; typedef struct { @@ -3797,7 +3798,14 @@ typedef struct { SMsgHead head; int64_t streamId; int32_t taskId; -} SVPauseStreamTaskReq, SVResetStreamTaskReq; +} SVPauseStreamTaskReq; + +typedef struct { + SMsgHead head; + int64_t streamId; + int32_t taskId; + int64_t chkptId; +} SVResetStreamTaskReq; typedef struct { char name[TSDB_STREAM_FNAME_LEN]; diff --git a/include/libs/catalog/catalog.h b/include/libs/catalog/catalog.h index df3f87973f..7c6f02513e 100644 --- a/include/libs/catalog/catalog.h +++ b/include/libs/catalog/catalog.h @@ -102,6 +102,7 @@ typedef struct SCatalogReq { bool svrVerRequired; bool forceUpdate; bool cloned; + bool forceFetchViewMeta; } SCatalogReq; typedef struct SMetaRes { diff --git a/include/libs/geometry/geosWrapper.h b/include/libs/geometry/geosWrapper.h index a5bc0cec17..d27d300b82 100644 --- a/include/libs/geometry/geosWrapper.h +++ b/include/libs/geometry/geosWrapper.h @@ -35,6 +35,7 @@ int32_t doGeomFromText(const char *inputWKT, unsigned char **outputGeom, size_t int32_t initCtxAsText(); int32_t doAsText(const unsigned char *inputGeom, size_t size, char **outputWKT); +int32_t checkWKB(const unsigned char *wkb, size_t size); int32_t initCtxRelationFunc(); int32_t doIntersects(const GEOSGeometry *geom1, const GEOSPreparedGeometry *preparedGeom1, const GEOSGeometry *geom2, @@ -47,11 +48,12 @@ int32_t doCovers(const GEOSGeometry *geom1, const GEOSPreparedGeometry *prepared bool swapped, char *res); int32_t doContains(const GEOSGeometry *geom1, const GEOSPreparedGeometry *preparedGeom1, const GEOSGeometry *geom2, bool swapped, char *res); -int32_t doContainsProperly(const GEOSGeometry *geom1, const GEOSPreparedGeometry *preparedGeom1, const GEOSGeometry *geom2, - bool swapped, char *res); +int32_t doContainsProperly(const GEOSGeometry *geom1, const GEOSPreparedGeometry *preparedGeom1, + const GEOSGeometry *geom2, bool swapped, char *res); -int32_t readGeometry(const unsigned char *input, GEOSGeometry **outputGeom, const GEOSPreparedGeometry **outputPreparedGeom); -void destroyGeometry(GEOSGeometry **geom, const GEOSPreparedGeometry **preparedGeom); +int32_t readGeometry(const unsigned char *input, GEOSGeometry **outputGeom, + const GEOSPreparedGeometry **outputPreparedGeom); +void destroyGeometry(GEOSGeometry **geom, const GEOSPreparedGeometry **preparedGeom); #ifdef __cplusplus } diff --git a/include/libs/nodes/cmdnodes.h b/include/libs/nodes/cmdnodes.h index 514eddbc24..867f8c8efc 100644 --- a/include/libs/nodes/cmdnodes.h +++ b/include/libs/nodes/cmdnodes.h @@ -42,10 +42,11 @@ extern "C" { #define SHOW_CREATE_VIEW_RESULT_FIELD1_LEN (TSDB_VIEW_FNAME_LEN + 4 + VARSTR_HEADER_SIZE) #define SHOW_CREATE_VIEW_RESULT_FIELD2_LEN (TSDB_MAX_ALLOWED_SQL_LEN + VARSTR_HEADER_SIZE) -#define SHOW_LOCAL_VARIABLES_RESULT_COLS 3 +#define SHOW_LOCAL_VARIABLES_RESULT_COLS 4 #define SHOW_LOCAL_VARIABLES_RESULT_FIELD1_LEN (TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE) -#define SHOW_LOCAL_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE) +#define SHOW_LOCAL_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_PATH_LEN + VARSTR_HEADER_SIZE) #define SHOW_LOCAL_VARIABLES_RESULT_FIELD3_LEN (TSDB_CONFIG_SCOPE_LEN + VARSTR_HEADER_SIZE) +#define SHOW_LOCAL_VARIABLES_RESULT_FIELD4_LEN (TSDB_CONFIG_INFO_LEN + VARSTR_HEADER_SIZE) #define COMPACT_DB_RESULT_COLS 3 #define COMPACT_DB_RESULT_FIELD1_LEN 32 @@ -321,7 +322,7 @@ typedef struct SAlterDnodeStmt { typedef struct { ENodeType type; - char url[TSDB_ANAL_ANODE_URL_LEN + 3]; + char url[TSDB_ANALYTIC_ANODE_URL_LEN + 3]; } SCreateAnodeStmt; typedef struct { diff --git a/include/libs/nodes/nodes.h b/include/libs/nodes/nodes.h index 72dd3ef3e0..6384c536ce 100644 --- a/include/libs/nodes/nodes.h +++ b/include/libs/nodes/nodes.h @@ -174,6 +174,7 @@ char* nodesGetNameFromColumnNode(SNode* pNode); int32_t nodesGetOutputNumFromSlotList(SNodeList* pSlots); void nodesSortList(SNodeList** pList, int32_t (*)(SNode* pNode1, SNode* pNode2)); void destroyFuncParam(void* pFuncStruct); +int32_t nodesListDeduplicate(SNodeList** pList); #ifdef __cplusplus } diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 48852e5552..89bc27a1fa 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -334,7 +334,7 @@ typedef struct SWindowLogicNode { int64_t windowSliding; SNodeList* pTsmaSubplans; SNode* pAnomalyExpr; - char anomalyOpt[TSDB_ANAL_ALGO_OPTION_LEN]; + char anomalyOpt[TSDB_ANALYTIC_ALGO_OPTION_LEN]; } SWindowLogicNode; typedef struct SFillLogicNode { @@ -740,7 +740,7 @@ typedef SCountWinodwPhysiNode SStreamCountWinodwPhysiNode; typedef struct SAnomalyWindowPhysiNode { SWindowPhysiNode window; SNode* pAnomalyKey; - char anomalyOpt[TSDB_ANAL_ALGO_OPTION_LEN]; + char anomalyOpt[TSDB_ANALYTIC_ALGO_OPTION_LEN]; } SAnomalyWindowPhysiNode; typedef struct SSortPhysiNode { diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h index 763882ab3a..7af74a347a 100644 --- a/include/libs/nodes/querynodes.h +++ b/include/libs/nodes/querynodes.h @@ -351,7 +351,7 @@ typedef struct SAnomalyWindowNode { ENodeType type; // QUERY_NODE_ANOMALY_WINDOW SNode* pCol; // timestamp primary key SNode* pExpr; - char anomalyOpt[TSDB_ANAL_ALGO_OPTION_LEN]; + char anomalyOpt[TSDB_ANALYTIC_ALGO_OPTION_LEN]; } SAnomalyWindowNode; typedef enum EFillMode { diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 2cf791c8da..6b8e9f12a6 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -319,11 +319,6 @@ typedef struct SSTaskBasicInfo { SInterval interval; } SSTaskBasicInfo; -typedef struct SStreamRetrieveReq SStreamRetrieveReq; -typedef struct SStreamDispatchReq SStreamDispatchReq; -typedef struct STokenBucket STokenBucket; -typedef struct SMetaHbInfo SMetaHbInfo; - typedef struct SDispatchMsgInfo { SStreamDispatchReq* pData; // current dispatch data @@ -626,11 +621,11 @@ typedef struct STaskStatusEntry { STaskCkptInfo checkpointInfo; } STaskStatusEntry; -typedef struct SNodeUpdateInfo { - int32_t nodeId; - SEpSet prevEp; - SEpSet newEp; -} SNodeUpdateInfo; +//typedef struct SNodeUpdateInfo { +// int32_t nodeId; +// SEpSet prevEp; +// SEpSet newEp; +//} SNodeUpdateInfo; typedef struct SStreamTaskState { ETaskStatus state; @@ -643,6 +638,11 @@ typedef struct SCheckpointConsensusInfo { int64_t streamId; } SCheckpointConsensusInfo; +typedef struct SCheckpointConsensusEntry { + SRestoreCheckpointInfo req; + int64_t ts; +} SCheckpointConsensusEntry; + void streamSetupScheduleTrigger(SStreamTask* pTask); // dispatch related @@ -718,6 +718,7 @@ int32_t streamTaskInitTriggerDispatchInfo(SStreamTask* pTask); void streamTaskSetTriggerDispatchConfirmed(SStreamTask* pTask, int32_t vgId); int32_t streamTaskSendCheckpointTriggerMsg(SStreamTask* pTask, int32_t dstTaskId, int32_t downstreamNodeId, SRpcHandleInfo* pInfo, int32_t code); +void streamTaskSetFailedCheckpointId(SStreamTask* pTask, int64_t failedId); int32_t streamQueueGetNumOfItems(const SStreamQueue* pQueue); int32_t streamQueueGetNumOfUnAccessedItems(const SStreamQueue* pQueue); diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 2c811495fd..6cedaeeef1 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -491,13 +491,14 @@ int32_t taosGetErrSize(); #define TSDB_CODE_MND_ANODE_TOO_MANY_ALGO_TYPE TAOS_DEF_ERROR_CODE(0, 0x0438) // analysis -#define TSDB_CODE_ANAL_URL_RSP_IS_NULL TAOS_DEF_ERROR_CODE(0, 0x0440) -#define TSDB_CODE_ANAL_URL_CANT_ACCESS TAOS_DEF_ERROR_CODE(0, 0x0441) -#define TSDB_CODE_ANAL_ALGO_NOT_FOUND TAOS_DEF_ERROR_CODE(0, 0x0442) -#define TSDB_CODE_ANAL_ALGO_NOT_LOAD TAOS_DEF_ERROR_CODE(0, 0x0443) -#define TSDB_CODE_ANAL_BUF_INVALID_TYPE TAOS_DEF_ERROR_CODE(0, 0x0444) -#define TSDB_CODE_ANAL_ANODE_RETURN_ERROR TAOS_DEF_ERROR_CODE(0, 0x0445) -#define TSDB_CODE_ANAL_ANODE_TOO_MANY_ROWS TAOS_DEF_ERROR_CODE(0, 0x0446) +#define TSDB_CODE_ANA_URL_RSP_IS_NULL TAOS_DEF_ERROR_CODE(0, 0x0440) +#define TSDB_CODE_ANA_URL_CANT_ACCESS TAOS_DEF_ERROR_CODE(0, 0x0441) +#define TSDB_CODE_ANA_ALGO_NOT_FOUND TAOS_DEF_ERROR_CODE(0, 0x0442) +#define TSDB_CODE_ANA_ALGO_NOT_LOAD TAOS_DEF_ERROR_CODE(0, 0x0443) +#define TSDB_CODE_ANA_BUF_INVALID_TYPE TAOS_DEF_ERROR_CODE(0, 0x0444) +#define TSDB_CODE_ANA_ANODE_RETURN_ERROR TAOS_DEF_ERROR_CODE(0, 0x0445) +#define TSDB_CODE_ANA_ANODE_TOO_MANY_ROWS TAOS_DEF_ERROR_CODE(0, 0x0446) +#define TSDB_CODE_ANA_WN_DATA TAOS_DEF_ERROR_CODE(0, 0x0447) // mnode-sma #define TSDB_CODE_MND_SMA_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0480) @@ -1011,6 +1012,7 @@ int32_t taosGetErrSize(); #define TSDB_CODE_STREAM_CONFLICT_EVENT TAOS_DEF_ERROR_CODE(0, 0x4106) #define TSDB_CODE_STREAM_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x4107) #define TSDB_CODE_STREAM_INPUTQ_FULL TAOS_DEF_ERROR_CODE(0, 0x4108) +#define TSDB_CODE_STREAM_INVLD_CHKPT TAOS_DEF_ERROR_CODE(0, 0x4109) // TDLite #define TSDB_CODE_TDLITE_IVLD_OPEN_FLAGS TAOS_DEF_ERROR_CODE(0, 0x5100) diff --git a/include/util/tbuffer.inc b/include/util/tbuffer.inc index 39090fb7fa..633517ca58 100644 --- a/include/util/tbuffer.inc +++ b/include/util/tbuffer.inc @@ -186,11 +186,25 @@ static int32_t tBufferGetI16(SBufferReader *reader, int16_t *value) { } static int32_t tBufferGetI32(SBufferReader *reader, int32_t *value) { - return tBufferGet(reader, sizeof(*value), value); + if (reader->offset + sizeof(int32_t) > reader->buffer->size) { + return TSDB_CODE_OUT_OF_RANGE; + } + if (value) { + *value = *(int32_t*)BR_PTR(reader); + } + reader->offset += sizeof(int32_t); + return 0; } static int32_t tBufferGetI64(SBufferReader *reader, int64_t *value) { - return tBufferGet(reader, sizeof(*value), value); + if (reader->offset + sizeof(int64_t) > reader->buffer->size) { + return TSDB_CODE_OUT_OF_RANGE; + } + if (value) { + *value = *(int64_t*)BR_PTR(reader); + } + reader->offset += sizeof(int64_t); + return 0; } static int32_t tBufferGetU8(SBufferReader *reader, uint8_t *value) { return tBufferGet(reader, sizeof(*value), value); } diff --git a/include/util/tdef.h b/include/util/tdef.h index ba30e78c59..c69d4f8f19 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -195,9 +195,9 @@ static const EOperatorType OPERATOR_ARRAY[] = { OP_TYPE_MULTI, OP_TYPE_DIV, OP_TYPE_REM, - + OP_TYPE_MINUS, - + OP_TYPE_BIT_AND, OP_TYPE_BIT_OR, @@ -213,7 +213,7 @@ static const EOperatorType OPERATOR_ARRAY[] = { OP_TYPE_NOT_LIKE, OP_TYPE_MATCH, OP_TYPE_NMATCH, - + OP_TYPE_IS_NULL, OP_TYPE_IS_NOT_NULL, OP_TYPE_IS_TRUE, @@ -222,7 +222,7 @@ static const EOperatorType OPERATOR_ARRAY[] = { OP_TYPE_IS_NOT_TRUE, OP_TYPE_IS_NOT_FALSE, OP_TYPE_IS_NOT_UNKNOWN, - //OP_TYPE_COMPARE_MAX_VALUE, + //OP_TYPE_COMPARE_MAX_VALUE, OP_TYPE_JSON_GET_VALUE, OP_TYPE_JSON_CONTAINS, @@ -335,12 +335,13 @@ typedef enum ELogicConditionType { #define TSDB_SLOW_QUERY_SQL_LEN 512 #define TSDB_SHOW_SUBQUERY_LEN 1000 #define TSDB_LOG_VAR_LEN 32 -#define TSDB_ANAL_ANODE_URL_LEN 128 -#define TSDB_ANAL_ALGO_NAME_LEN 64 -#define TSDB_ANAL_ALGO_TYPE_LEN 24 -#define TSDB_ANAL_ALGO_KEY_LEN (TSDB_ANAL_ALGO_NAME_LEN + 9) -#define TSDB_ANAL_ALGO_URL_LEN (TSDB_ANAL_ANODE_URL_LEN + TSDB_ANAL_ALGO_TYPE_LEN + 1) -#define TSDB_ANAL_ALGO_OPTION_LEN 256 + +#define TSDB_ANALYTIC_ANODE_URL_LEN 128 +#define TSDB_ANALYTIC_ALGO_NAME_LEN 64 +#define TSDB_ANALYTIC_ALGO_TYPE_LEN 24 +#define TSDB_ANALYTIC_ALGO_KEY_LEN (TSDB_ANALYTIC_ALGO_NAME_LEN + 9) +#define TSDB_ANALYTIC_ALGO_URL_LEN (TSDB_ANALYTIC_ANODE_URL_LEN + TSDB_ANALYTIC_ALGO_TYPE_LEN + 1) +#define TSDB_ANALYTIC_ALGO_OPTION_LEN 256 #define TSDB_MAX_EP_NUM 10 @@ -620,6 +621,16 @@ enum { enum { RAND_ERR_MEMORY = 1, RAND_ERR_FILE = 2, RAND_ERR_NETWORK = 4 }; +/** + * RB: return before + * RA: return after + * NR: not return, skip and go on following steps + */ +#define TSDB_BYPASS_RB_RPC_SEND_SUBMIT 0x01u +#define TSDB_BYPASS_RA_RPC_RECV_SUBMIT 0x02u +#define TSDB_BYPASS_RB_TSDB_WRITE_MEM 0x04u +#define TSDB_BYPASS_RB_TSDB_COMMIT 0x08u + #define DEFAULT_HANDLE 0 #define MNODE_HANDLE 1 #define QNODE_HANDLE -1 @@ -631,6 +642,8 @@ enum { RAND_ERR_MEMORY = 1, RAND_ERR_FILE = 2, RAND_ERR_NETWORK = 4 }; #define TSDB_CONFIG_VALUE_LEN 64 #define TSDB_CONFIG_SCOPE_LEN 8 #define TSDB_CONFIG_NUMBER 16 +#define TSDB_CONFIG_PATH_LEN 4096 +#define TSDB_CONFIG_INFO_LEN 64 #define QUERY_ID_SIZE 20 #define QUERY_OBJ_ID_SIZE 18 diff --git a/source/client/CMakeLists.txt b/source/client/CMakeLists.txt index 6d5f006517..2113aa7921 100644 --- a/source/client/CMakeLists.txt +++ b/source/client/CMakeLists.txt @@ -5,24 +5,24 @@ if(TD_ENTERPRISE) endif() if(TD_WINDOWS) - add_library(taos SHARED ${CLIENT_SRC} ${CMAKE_CURRENT_SOURCE_DIR}/src/taos.rc.in) + add_library(${TAOS_LIB} SHARED ${CLIENT_SRC} ${CMAKE_CURRENT_SOURCE_DIR}/src/taos.rc.in) else() - add_library(taos SHARED ${CLIENT_SRC}) + add_library(${TAOS_LIB} SHARED ${CLIENT_SRC}) endif() if(${TD_DARWIN}) - target_compile_options(taos PRIVATE -Wno-error=deprecated-non-prototype) + target_compile_options(${TAOS_LIB} PRIVATE -Wno-error=deprecated-non-prototype) endif() INCLUDE_DIRECTORIES(jni) target_include_directories( - taos + ${TAOS_LIB} PUBLIC "${TD_SOURCE_DIR}/include/client" PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" ) target_link_libraries( - taos + ${TAOS_LIB} INTERFACE api PRIVATE os util common transport monitor nodes parser command planner catalog scheduler function qcom geometry ) @@ -36,32 +36,32 @@ else() endif() set_target_properties( - taos + ${TAOS_LIB} PROPERTIES CLEAN_DIRECT_OUTPUT 1 ) set_target_properties( - taos + ${TAOS_LIB} PROPERTIES VERSION ${TD_VER_NUMBER} SOVERSION 1 ) -add_library(taos_static STATIC ${CLIENT_SRC}) +add_library(${TAOS_LIB_STATIC} STATIC ${CLIENT_SRC}) if(${TD_DARWIN}) - target_compile_options(taos_static PRIVATE -Wno-error=deprecated-non-prototype) + target_compile_options(${TAOS_LIB_STATIC} PRIVATE -Wno-error=deprecated-non-prototype) endif() target_include_directories( - taos_static + ${TAOS_LIB_STATIC} PUBLIC "${TD_SOURCE_DIR}/include/client" PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" ) target_link_libraries( - taos_static + ${TAOS_LIB_STATIC} INTERFACE api PRIVATE os util common transport monitor nodes parser command planner catalog scheduler function qcom geometry ) diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h index 90505ed25a..da56dcf75e 100644 --- a/source/client/inc/clientInt.h +++ b/source/client/inc/clientInt.h @@ -47,10 +47,11 @@ enum { RES_TYPE__TMQ_BATCH_META, }; -#define SHOW_VARIABLES_RESULT_COLS 3 +#define SHOW_VARIABLES_RESULT_COLS 4 #define SHOW_VARIABLES_RESULT_FIELD1_LEN (TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE) #define SHOW_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE) #define SHOW_VARIABLES_RESULT_FIELD3_LEN (TSDB_CONFIG_SCOPE_LEN + VARSTR_HEADER_SIZE) +#define SHOW_VARIABLES_RESULT_FIELD4_LEN (TSDB_CONFIG_INFO_LEN + VARSTR_HEADER_SIZE) #define TD_RES_QUERY(res) (*(int8_t*)(res) == RES_TYPE__QUERY) #define TD_RES_TMQ(res) (*(int8_t*)(res) == RES_TYPE__TMQ) diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 8a0b1ddaab..2c98ecac1b 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -1816,7 +1816,7 @@ int32_t doProcessMsgFromServerImpl(SRpcMsg* pMsg, SEpSet* pEpSet) { .handleRefId = pMsg->info.refId, .pEpSet = pEpSet}; - if (pMsg->contLen > 0) { + if (pMsg->code != TSDB_CODE_OUT_OF_MEMORY && pMsg->contLen > 0) { buf.pData = taosMemoryCalloc(1, pMsg->contLen); if (buf.pData == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -2211,7 +2211,7 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo) { static int32_t doConvertJson(SReqResultInfo* pResultInfo) { int32_t numOfRows = pResultInfo->numOfRows; int32_t numOfCols = pResultInfo->numOfCols; - bool needConvert = false; + bool needConvert = false; for (int32_t i = 0; i < numOfCols; ++i) { if (pResultInfo->fields[i].type == TSDB_DATA_TYPE_JSON) { needConvert = true; @@ -2400,8 +2400,8 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4) { p += sizeof(int32_t); if (rows != pResultInfo->numOfRows || cols != pResultInfo->numOfCols) { - tscError("setResultDataPtr paras error:rows;%d numOfRows:%" PRId64 " cols:%d numOfCols:%d", rows, pResultInfo->numOfRows, cols, - pResultInfo->numOfCols); + tscError("setResultDataPtr paras error:rows;%d numOfRows:%" PRId64 " cols:%d numOfCols:%d", rows, + pResultInfo->numOfRows, cols, pResultInfo->numOfCols); return TSDB_CODE_TSC_INTERNAL_ERROR; } @@ -2577,8 +2577,7 @@ int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableR // TODO handle the compressed case pResultInfo->totalRows += pResultInfo->numOfRows; - int32_t code = - setResultDataPtr(pResultInfo, convertUcs4); + int32_t code = setResultDataPtr(pResultInfo, convertUcs4); return code; } @@ -3032,13 +3031,13 @@ static void fetchCallback(void* pResult, void* param, int32_t code) { if (code != TSDB_CODE_SUCCESS) { pRequest->code = code; taosMemoryFreeClear(pResultInfo->pData); - pRequest->body.fetchFp(((SSyncQueryParam*)pRequest->body.interParam)->userParam, pRequest, 0); + pRequest->body.fetchFp(((SSyncQueryParam*)pRequest->body.interParam)->userParam, pRequest, code); return; } if (pRequest->code != TSDB_CODE_SUCCESS) { taosMemoryFreeClear(pResultInfo->pData); - pRequest->body.fetchFp(((SSyncQueryParam*)pRequest->body.interParam)->userParam, pRequest, 0); + pRequest->body.fetchFp(((SSyncQueryParam*)pRequest->body.interParam)->userParam, pRequest, pRequest->code); return; } diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c index 9a723218ff..e182cd97ee 100644 --- a/source/client/src/clientMsgHandler.c +++ b/source/client/src/clientMsgHandler.c @@ -541,6 +541,10 @@ static int32_t buildShowVariablesBlock(SArray* pVars, SSDataBlock** block) { infoData.info.bytes = SHOW_VARIABLES_RESULT_FIELD3_LEN; TSDB_CHECK_NULL(taosArrayPush(pBlock->pDataBlock, &infoData), code, line, END, terrno); + infoData.info.type = TSDB_DATA_TYPE_VARCHAR; + infoData.info.bytes = SHOW_VARIABLES_RESULT_FIELD4_LEN; + TSDB_CHECK_NULL(taosArrayPush(pBlock->pDataBlock, &infoData), code, line, END, terrno); + int32_t numOfCfg = taosArrayGetSize(pVars); code = blockDataEnsureCapacity(pBlock, numOfCfg); TSDB_CHECK_CODE(code, line, END); @@ -569,6 +573,13 @@ static int32_t buildShowVariablesBlock(SArray* pVars, SSDataBlock** block) { TSDB_CHECK_NULL(pColInfo, code, line, END, terrno); code = colDataSetVal(pColInfo, i, scope, false); TSDB_CHECK_CODE(code, line, END); + + char info[TSDB_CONFIG_INFO_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_WITH_MAXSIZE_TO_VARSTR(info, pInfo->info, TSDB_CONFIG_INFO_LEN + VARSTR_HEADER_SIZE); + pColInfo = taosArrayGet(pBlock->pDataBlock, c++); + TSDB_CHECK_NULL(pColInfo, code, line, END, terrno); + code = colDataSetVal(pColInfo, i, info, false); + TSDB_CHECK_CODE(code, line, END); } pBlock->info.rows = numOfCfg; @@ -825,7 +836,7 @@ int32_t processCompactDbRsp(void* param, SDataBuf* pMsg, int32_t code) { tscError("failed to post semaphore"); } } - return code; + return code; } __async_send_cb_fn_t getMsgRspHandle(int32_t msgType) { @@ -845,7 +856,7 @@ __async_send_cb_fn_t getMsgRspHandle(int32_t msgType) { case TDMT_MND_SHOW_VARIABLES: return processShowVariablesRsp; case TDMT_MND_COMPACT_DB: - return processCompactDbRsp; + return processCompactDbRsp; default: return genericRspCallback; } diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index 4d6a6fbff7..902e581bfd 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -1872,13 +1872,17 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) { if (tmq->epTimer == NULL){ tmq->epTimer = taosTmrStart(tmqAssignAskEpTask, DEFAULT_ASKEP_INTERVAL, (void*)(tmq->refId), tmqMgmt.timer); + if (tmq->epTimer == NULL) { + code = TSDB_CODE_TSC_INTERNAL_ERROR; + goto END; + } } - if (tmq->commitTimer == NULL){ + if (tmq->autoCommit && tmq->commitTimer == NULL){ tmq->commitTimer = taosTmrStart(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, (void*)(tmq->refId), tmqMgmt.timer); - } - if (tmq->epTimer == NULL || tmq->commitTimer == NULL) { - code = TSDB_CODE_TSC_INTERNAL_ERROR; - goto END; + if (tmq->commitTimer == NULL) { + code = TSDB_CODE_TSC_INTERNAL_ERROR; + goto END; + } } END: diff --git a/source/client/test/CMakeLists.txt b/source/client/test/CMakeLists.txt index 7ca3086871..9393bfc449 100644 --- a/source/client/test/CMakeLists.txt +++ b/source/client/test/CMakeLists.txt @@ -8,31 +8,31 @@ AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) ADD_EXECUTABLE(clientTest clientTests.cpp) TARGET_LINK_LIBRARIES( clientTest - os util common transport parser catalog scheduler gtest taos_static qcom executor function + os util common transport parser catalog scheduler gtest ${TAOS_LIB_STATIC} qcom executor function ) ADD_EXECUTABLE(tmqTest tmqTest.cpp) TARGET_LINK_LIBRARIES( tmqTest - PUBLIC os util common transport parser catalog scheduler function gtest taos_static qcom + PUBLIC os util common transport parser catalog scheduler function gtest ${TAOS_LIB_STATIC} qcom ) ADD_EXECUTABLE(smlTest smlTest.cpp) TARGET_LINK_LIBRARIES( smlTest - PUBLIC os util common transport parser catalog scheduler function gtest taos_static qcom geometry + PUBLIC os util common transport parser catalog scheduler function gtest ${TAOS_LIB_STATIC} qcom geometry ) #ADD_EXECUTABLE(clientMonitorTest clientMonitorTests.cpp) #TARGET_LINK_LIBRARIES( # clientMonitorTest -# PUBLIC os util common transport monitor parser catalog scheduler function gtest taos_static qcom executor +# PUBLIC os util common transport monitor parser catalog scheduler function gtest ${TAOS_LIB_STATIC} qcom executor #) ADD_EXECUTABLE(userOperTest ../../../tests/script/api/passwdTest.c) TARGET_LINK_LIBRARIES( userOperTest - PUBLIC taos + PUBLIC ${TAOS_LIB} ) TARGET_INCLUDE_DIRECTORIES( diff --git a/source/common/CMakeLists.txt b/source/common/CMakeLists.txt index f10eb6a611..39380a0644 100644 --- a/source/common/CMakeLists.txt +++ b/source/common/CMakeLists.txt @@ -1,4 +1,7 @@ aux_source_directory(src COMMON_SRC) +aux_source_directory(src/msg COMMON_MSG_SRC) + +LIST(APPEND COMMON_SRC ${COMMON_MSG_SRC}) if(TD_ENTERPRISE) LIST(APPEND COMMON_SRC ${TD_ENTERPRISE_DIR}/src/plugins/common/src/tglobal.c) diff --git a/source/libs/stream/src/streamMsg.c b/source/common/src/msg/streamMsg.c similarity index 75% rename from source/libs/stream/src/streamMsg.c rename to source/common/src/msg/streamMsg.c index 193daa0cc4..c92ab52ac1 100644 --- a/source/libs/stream/src/streamMsg.c +++ b/source/common/src/msg/streamMsg.c @@ -15,8 +15,48 @@ #include "streamMsg.h" #include "os.h" -#include "tstream.h" -#include "streamInt.h" +#include "tcommon.h" + +typedef struct STaskId { + int64_t streamId; + int64_t taskId; +} STaskId; + +typedef struct STaskCkptInfo { + int64_t latestId; // saved checkpoint id + int64_t latestVer; // saved checkpoint ver + int64_t latestTime; // latest checkpoint time + int64_t latestSize; // latest checkpoint size + int8_t remoteBackup; // latest checkpoint backup done + int64_t activeId; // current active checkpoint id + int32_t activeTransId; // checkpoint trans id + int8_t failed; // denote if the checkpoint is failed or not + int8_t consensusChkptId; // required the consensus-checkpointId + int64_t consensusTs; // +} STaskCkptInfo; + +typedef struct STaskStatusEntry { + STaskId id; + int32_t status; + int32_t statusLastDuration; // to record the last duration of current status + int64_t stage; + int32_t nodeId; + SVersionRange verRange; // start/end version in WAL, only valid for source task + int64_t processedVer; // only valid for source task + double inputQUsed; // in MiB + double inputRate; + double procsThroughput; // duration between one element put into input queue and being processed. + double procsTotal; // duration between one element put into input queue and being processed. + double outputThroughput; // the size of dispatched result blocks in bytes + double outputTotal; // the size of dispatched result blocks in bytes + double sinkQuota; // existed quota size for sink task + double sinkDataSize; // sink to dst data size + int64_t startTime; + int64_t startCheckpointId; + int64_t startCheckpointVer; + int64_t hTaskId; + STaskCkptInfo checkpointInfo; +} STaskStatusEntry; int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamUpstreamEpInfo* pInfo) { TAOS_CHECK_RETURN(tEncodeI32(pEncoder, pInfo->taskId)); @@ -289,7 +329,7 @@ int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatchReq* p TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pReq->totalLen)); if (taosArrayGetSize(pReq->data) != pReq->blockNum || taosArrayGetSize(pReq->dataLen) != pReq->blockNum) { - stError("invalid dispatch req msg"); + uError("invalid dispatch req msg"); TAOS_CHECK_EXIT(TSDB_CODE_INVALID_MSG); } @@ -605,173 +645,92 @@ void tCleanupStreamHbMsg(SStreamHbMsg* pMsg) { pMsg->numOfTasks = -1; } -int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) { +int32_t tEncodeStreamHbRsp(SEncoder* pEncoder, const SMStreamHbRspMsg* pRsp) { int32_t code = 0; int32_t lino; TAOS_CHECK_EXIT(tStartEncode(pEncoder)); - TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->ver)); - TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->id.streamId)); - TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->id.taskId)); - TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->info.trigger)); - TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->info.taskLevel)); - TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->outputInfo.type)); - TAOS_CHECK_EXIT(tEncodeI16(pEncoder, pTask->msgInfo.msgType)); - - TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->status.taskStatus)); - TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->status.schedStatus)); - - TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->info.selfChildId)); - TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->info.nodeId)); - TAOS_CHECK_EXIT(tEncodeSEpSet(pEncoder, &pTask->info.epSet)); - TAOS_CHECK_EXIT(tEncodeSEpSet(pEncoder, &pTask->info.mnodeEpset)); - - TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->chkInfo.checkpointId)); - TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->chkInfo.checkpointVer)); - TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->info.fillHistory)); - - TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->hTaskInfo.id.streamId)); - int32_t taskId = pTask->hTaskInfo.id.taskId; - TAOS_CHECK_EXIT(tEncodeI32(pEncoder, taskId)); - - TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->streamTaskId.streamId)); - taskId = pTask->streamTaskId.taskId; - TAOS_CHECK_EXIT(tEncodeI32(pEncoder, taskId)); - - TAOS_CHECK_EXIT(tEncodeU64(pEncoder, pTask->dataRange.range.minVer)); - TAOS_CHECK_EXIT(tEncodeU64(pEncoder, pTask->dataRange.range.maxVer)); - TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->dataRange.window.skey)); - TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->dataRange.window.ekey)); - - int32_t epSz = taosArrayGetSize(pTask->upstreamInfo.pList); - TAOS_CHECK_EXIT(tEncodeI32(pEncoder, epSz)); - for (int32_t i = 0; i < epSz; i++) { - SStreamUpstreamEpInfo* pInfo = taosArrayGetP(pTask->upstreamInfo.pList, i); - TAOS_CHECK_EXIT(tEncodeStreamEpInfo(pEncoder, pInfo)); - } - - if (pTask->info.taskLevel != TASK_LEVEL__SINK) { - TAOS_CHECK_EXIT(tEncodeCStr(pEncoder, pTask->exec.qmsg)); - } - - if (pTask->outputInfo.type == TASK_OUTPUT__TABLE) { - TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->outputInfo.tbSink.stbUid)); - TAOS_CHECK_EXIT(tEncodeCStr(pEncoder, pTask->outputInfo.tbSink.stbFullName)); - TAOS_CHECK_EXIT(tEncodeSSchemaWrapper(pEncoder, pTask->outputInfo.tbSink.pSchemaWrapper)); - } else if (pTask->outputInfo.type == TASK_OUTPUT__SMA) { - TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->outputInfo.smaSink.smaId)); - } else if (pTask->outputInfo.type == TASK_OUTPUT__FETCH) { - TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->outputInfo.fetchSink.reserved)); - } else if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) { - TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->outputInfo.fixedDispatcher.taskId)); - TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->outputInfo.fixedDispatcher.nodeId)); - TAOS_CHECK_EXIT(tEncodeSEpSet(pEncoder, &pTask->outputInfo.fixedDispatcher.epSet)); - } else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) { - TAOS_CHECK_EXIT(tSerializeSUseDbRspImp(pEncoder, &pTask->outputInfo.shuffleDispatcher.dbInfo)); - TAOS_CHECK_EXIT(tEncodeCStr(pEncoder, pTask->outputInfo.shuffleDispatcher.stbFullName)); - } - TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->info.delaySchedParam)); - TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->subtableWithoutMd5)); - TAOS_CHECK_EXIT(tEncodeCStrWithLen(pEncoder, pTask->reserve, sizeof(pTask->reserve) - 1)); - + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pRsp->msgId)); tEndEncode(pEncoder); + _exit: return code; } -int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask) { - int32_t taskId = 0; +int32_t tDecodeStreamHbRsp(SDecoder* pDecoder, SMStreamHbRspMsg* pRsp) { int32_t code = 0; int32_t lino; TAOS_CHECK_EXIT(tStartDecode(pDecoder)); - TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->ver)); - if (pTask->ver <= SSTREAM_TASK_INCOMPATIBLE_VER || pTask->ver > SSTREAM_TASK_VER) { - TAOS_CHECK_EXIT(TSDB_CODE_INVALID_MSG); - } + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pRsp->msgId)); + tEndDecode(pDecoder); - TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->id.streamId)); - TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->id.taskId)); - TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->info.trigger)); - TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->info.taskLevel)); - TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->outputInfo.type)); - TAOS_CHECK_EXIT(tDecodeI16(pDecoder, &pTask->msgInfo.msgType)); +_exit: + return code; +} - TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->status.taskStatus)); - TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->status.schedStatus)); +int32_t tEncodeRetrieveChkptTriggerReq(SEncoder* pEncoder, const SRetrieveChkptTriggerReq* pReq) { + int32_t code = 0; + int32_t lino; - TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->info.selfChildId)); - TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->info.nodeId)); - TAOS_CHECK_EXIT(tDecodeSEpSet(pDecoder, &pTask->info.epSet)); - TAOS_CHECK_EXIT(tDecodeSEpSet(pDecoder, &pTask->info.mnodeEpset)); + TAOS_CHECK_EXIT(tStartEncode(pEncoder)); + TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pReq->streamId)); + TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pReq->checkpointId)); + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pReq->upstreamNodeId)); + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pReq->upstreamTaskId)); + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pReq->downstreamNodeId)); + TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pReq->downstreamTaskId)); + tEndEncode(pEncoder); - TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->chkInfo.checkpointId)); - TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->chkInfo.checkpointVer)); - TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->info.fillHistory)); +_exit: + return code; +} - TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->hTaskInfo.id.streamId)); - TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &taskId)); - pTask->hTaskInfo.id.taskId = taskId; +int32_t tDecodeRetrieveChkptTriggerReq(SDecoder* pDecoder, SRetrieveChkptTriggerReq* pReq) { + int32_t code = 0; + int32_t lino; - TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->streamTaskId.streamId)); - TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &taskId)); - pTask->streamTaskId.taskId = taskId; + TAOS_CHECK_EXIT(tStartDecode(pDecoder)); + TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pReq->streamId)); + TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pReq->checkpointId)); + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pReq->upstreamNodeId)); + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pReq->upstreamTaskId)); + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pReq->downstreamNodeId)); + TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pReq->downstreamTaskId)); + tEndDecode(pDecoder); - TAOS_CHECK_EXIT(tDecodeU64(pDecoder, (uint64_t*)&pTask->dataRange.range.minVer)); - TAOS_CHECK_EXIT(tDecodeU64(pDecoder, (uint64_t*)&pTask->dataRange.range.maxVer)); - TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->dataRange.window.skey)); - TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->dataRange.window.ekey)); +_exit: + return code; +} - int32_t epSz = -1; - TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &epSz) < 0); +int32_t tEncodeCheckpointTriggerRsp(SEncoder* pEncoder, const SCheckpointTriggerRsp* pRsp) { + int32_t code = 0; + int32_t lino; - if ((pTask->upstreamInfo.pList = taosArrayInit(epSz, POINTER_BYTES)) == NULL) { - TAOS_CHECK_EXIT(terrno); - } - for (int32_t i = 0; i < epSz; i++) { - SStreamUpstreamEpInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamUpstreamEpInfo)); - if (pInfo == NULL) { - TAOS_CHECK_EXIT(terrno); - } - if ((code = tDecodeStreamEpInfo(pDecoder, pInfo)) < 0) { - taosMemoryFreeClear(pInfo); - goto _exit; - } - if (taosArrayPush(pTask->upstreamInfo.pList, &pInfo) == NULL) { - TAOS_CHECK_EXIT(terrno); - } - } + TAOS_CHECK_EXIT(tStartEncode(pEncoder)); + TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pRsp->streamId)); + TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pRsp->checkpointId)); + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pRsp->upstreamTaskId)); + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pRsp->taskId)); + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pRsp->transId)); + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pRsp->rspCode)); + tEndEncode(pEncoder); - if (pTask->info.taskLevel != TASK_LEVEL__SINK) { - TAOS_CHECK_EXIT(tDecodeCStrAlloc(pDecoder, &pTask->exec.qmsg)); - } +_exit: + return code; +} - if (pTask->outputInfo.type == TASK_OUTPUT__TABLE) { - TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->outputInfo.tbSink.stbUid)); - TAOS_CHECK_EXIT(tDecodeCStrTo(pDecoder, pTask->outputInfo.tbSink.stbFullName)); - pTask->outputInfo.tbSink.pSchemaWrapper = taosMemoryCalloc(1, sizeof(SSchemaWrapper)); - if (pTask->outputInfo.tbSink.pSchemaWrapper == NULL) { - TAOS_CHECK_EXIT(terrno); - } - TAOS_CHECK_EXIT(tDecodeSSchemaWrapper(pDecoder, pTask->outputInfo.tbSink.pSchemaWrapper)); - } else if (pTask->outputInfo.type == TASK_OUTPUT__SMA) { - TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->outputInfo.smaSink.smaId)); - } else if (pTask->outputInfo.type == TASK_OUTPUT__FETCH) { - TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->outputInfo.fetchSink.reserved)); - } else if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) { - TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->outputInfo.fixedDispatcher.taskId)); - TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->outputInfo.fixedDispatcher.nodeId)); - TAOS_CHECK_EXIT(tDecodeSEpSet(pDecoder, &pTask->outputInfo.fixedDispatcher.epSet)); - } else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) { - TAOS_CHECK_EXIT(tDeserializeSUseDbRspImp(pDecoder, &pTask->outputInfo.shuffleDispatcher.dbInfo)); - TAOS_CHECK_EXIT(tDecodeCStrTo(pDecoder, pTask->outputInfo.shuffleDispatcher.stbFullName)); - } - TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->info.delaySchedParam)); - if (pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER) { - TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->subtableWithoutMd5)); - } - TAOS_CHECK_EXIT(tDecodeCStrTo(pDecoder, pTask->reserve)); +int32_t tDecodeCheckpointTriggerRsp(SDecoder* pDecoder, SCheckpointTriggerRsp* pRsp) { + int32_t code = 0; + int32_t lino; + TAOS_CHECK_EXIT(tStartDecode(pDecoder)); + TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pRsp->streamId)); + TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pRsp->checkpointId)); + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pRsp->upstreamTaskId)); + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pRsp->taskId)); + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pRsp->transId)); + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pRsp->rspCode)); tEndDecode(pDecoder); _exit: @@ -830,11 +789,7 @@ int32_t tEncodeRestoreCheckpointInfo(SEncoder* pEncoder, const SRestoreCheckpoin tEndEncode(pEncoder); _exit: - if (code) { - return code; - } else { - return pEncoder->pos; - } + return code; } int32_t tDecodeRestoreCheckpointInfo(SDecoder* pDecoder, SRestoreCheckpointInfo* pReq) { @@ -853,3 +808,31 @@ int32_t tDecodeRestoreCheckpointInfo(SDecoder* pDecoder, SRestoreCheckpointInfo* _exit: return code; } + +int32_t tEncodeStreamTaskRunReq (SEncoder* pEncoder, const SStreamTaskRunReq* pReq) { + int32_t code = 0; + int32_t lino; + + TAOS_CHECK_EXIT(tStartEncode(pEncoder)); + TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pReq->streamId)); + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pReq->taskId)); + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pReq->reqType)); + tEndEncode(pEncoder); + +_exit: + return code; +} + +int32_t tDecodeStreamTaskRunReq(SDecoder* pDecoder, SStreamTaskRunReq* pReq) { + int32_t code = 0; + int32_t lino; + + TAOS_CHECK_EXIT(tStartDecode(pDecoder)); + TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pReq->streamId)); + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pReq->taskId)); + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pReq->reqType)); + tEndDecode(pDecoder); + +_exit: + return code; +} \ No newline at end of file diff --git a/source/common/src/tmsg.c b/source/common/src/msg/tmsg.c similarity index 99% rename from source/common/src/tmsg.c rename to source/common/src/msg/tmsg.c index ca5af34e15..2e997218ac 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/msg/tmsg.c @@ -2169,7 +2169,7 @@ int32_t tSerializeRetrieveAnalAlgoRsp(void *buf, int32_t bufLen, SRetrieveAnalAl SAnalyticsUrl *pUrl = pIter; size_t nameLen = 0; const char *name = taosHashGetKey(pIter, &nameLen); - if (nameLen > 0 && nameLen <= TSDB_ANAL_ALGO_KEY_LEN && pUrl->urlLen > 0) { + if (nameLen > 0 && nameLen <= TSDB_ANALYTIC_ALGO_KEY_LEN && pUrl->urlLen > 0) { numOfAlgos++; } pIter = taosHashIterate(pRsp->hash, pIter); @@ -2224,7 +2224,7 @@ int32_t tDeserializeRetrieveAnalAlgoRsp(void *buf, int32_t bufLen, SRetrieveAnal int32_t numOfAlgos = 0; int32_t nameLen; int32_t type; - char name[TSDB_ANAL_ALGO_KEY_LEN]; + char name[TSDB_ANALYTIC_ALGO_KEY_LEN]; SAnalyticsUrl url = {0}; TAOS_CHECK_EXIT(tStartDecode(&decoder)); @@ -2233,7 +2233,7 @@ int32_t tDeserializeRetrieveAnalAlgoRsp(void *buf, int32_t bufLen, SRetrieveAnal for (int32_t f = 0; f < numOfAlgos; ++f) { TAOS_CHECK_EXIT(tDecodeI32(&decoder, &nameLen)); - if (nameLen > 0 && nameLen <= TSDB_ANAL_ALGO_NAME_LEN) { + if (nameLen > 0 && nameLen <= TSDB_ANALYTIC_ALGO_NAME_LEN) { TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, name)); } @@ -5642,6 +5642,12 @@ int32_t tSerializeSShowVariablesRsp(void *buf, int32_t bufLen, SShowVariablesRsp SVariablesInfo *pInfo = taosArrayGet(pRsp->variables, i); TAOS_CHECK_EXIT(tEncodeSVariablesInfo(&encoder, pInfo)); } + + for (int32_t i = 0; i < varNum; ++i) { + SVariablesInfo *pInfo = taosArrayGet(pRsp->variables, i); + TAOS_CHECK_RETURN(tEncodeCStr(&encoder, pInfo->info)); + } + tEndEncode(&encoder); _exit: @@ -5675,6 +5681,13 @@ int32_t tDeserializeSShowVariablesRsp(void *buf, int32_t bufLen, SShowVariablesR TAOS_CHECK_EXIT(terrno); } } + + if (!tDecodeIsEnd(&decoder)) { + for (int32_t i = 0; i < varNum; ++i) { + SVariablesInfo *pInfo = taosArrayGet(pRsp->variables, i); + TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pInfo->info)); + } + } } tEndDecode(&decoder); diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 4993ece7c1..bfe82aa7ae 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -327,8 +327,9 @@ static const SSysDbTableSchema configSchema[] = { static const SSysDbTableSchema variablesSchema[] = { {.name = "dnode_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, {.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, - {.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "value", .bytes = TSDB_CONFIG_PATH_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, {.name = "scope", .bytes = TSDB_CONFIG_SCOPE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "info", .bytes = TSDB_CONFIG_INFO_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, }; static const SSysDbTableSchema topicSchema[] = { @@ -401,7 +402,7 @@ static const SSysDbTableSchema userCompactsDetailSchema[] = { static const SSysDbTableSchema anodesSchema[] = { {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, - {.name = "url", .bytes = TSDB_ANAL_ANODE_URL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "url", .bytes = TSDB_ANALYTIC_ANODE_URL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, {.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true}, {.name = "update_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true}, @@ -409,8 +410,8 @@ static const SSysDbTableSchema anodesSchema[] = { static const SSysDbTableSchema anodesFullSchema[] = { {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, - {.name = "type", .bytes = TSDB_ANAL_ALGO_TYPE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, - {.name = "algo", .bytes = TSDB_ANAL_ALGO_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "type", .bytes = TSDB_ANALYTIC_ALGO_TYPE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "algo", .bytes = TSDB_ANALYTIC_ALGO_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, }; static const SSysDbTableSchema tsmaSchema[] = { diff --git a/source/common/src/tcol.c b/source/common/src/tcol.c index a23385aba0..55a4b21208 100644 --- a/source/common/src/tcol.c +++ b/source/common/src/tcol.c @@ -81,26 +81,42 @@ const char* getDefaultEncodeStr(uint8_t type) { return columnEncodeStr(getDefaul uint16_t getDefaultCompress(uint8_t type) { switch (type) { case TSDB_DATA_TYPE_NULL: + return TSDB_COLVAL_COMPRESS_LZ4; case TSDB_DATA_TYPE_BOOL: + return TSDB_COLVAL_COMPRESS_ZSTD; case TSDB_DATA_TYPE_TINYINT: case TSDB_DATA_TYPE_SMALLINT: + return TSDB_COLVAL_COMPRESS_ZLIB; case TSDB_DATA_TYPE_INT: case TSDB_DATA_TYPE_BIGINT: + return TSDB_COLVAL_COMPRESS_LZ4; case TSDB_DATA_TYPE_FLOAT: case TSDB_DATA_TYPE_DOUBLE: + return TSDB_COLVAL_COMPRESS_LZ4; case TSDB_DATA_TYPE_VARCHAR: // TSDB_DATA_TYPE_BINARY + return TSDB_COLVAL_COMPRESS_ZSTD; case TSDB_DATA_TYPE_TIMESTAMP: + return TSDB_COLVAL_COMPRESS_LZ4; case TSDB_DATA_TYPE_NCHAR: + return TSDB_COLVAL_COMPRESS_ZSTD; case TSDB_DATA_TYPE_UTINYINT: case TSDB_DATA_TYPE_USMALLINT: + return TSDB_COLVAL_COMPRESS_ZLIB; case TSDB_DATA_TYPE_UINT: case TSDB_DATA_TYPE_UBIGINT: + return TSDB_COLVAL_COMPRESS_LZ4; case TSDB_DATA_TYPE_JSON: + return TSDB_COLVAL_COMPRESS_LZ4; case TSDB_DATA_TYPE_VARBINARY: + return TSDB_COLVAL_COMPRESS_ZSTD; case TSDB_DATA_TYPE_DECIMAL: + return TSDB_COLVAL_COMPRESS_LZ4; case TSDB_DATA_TYPE_BLOB: + return TSDB_COLVAL_COMPRESS_LZ4; case TSDB_DATA_TYPE_MEDIUMBLOB: + return TSDB_COLVAL_COMPRESS_LZ4; case TSDB_DATA_TYPE_GEOMETRY: + return TSDB_COLVAL_COMPRESS_LZ4; case TSDB_DATA_TYPE_MAX: return TSDB_COLVAL_COMPRESS_LZ4; default: diff --git a/source/common/src/tdataformat.c b/source/common/src/tdataformat.c index e580ad33bd..a38842735c 100644 --- a/source/common/src/tdataformat.c +++ b/source/common/src/tdataformat.c @@ -3036,7 +3036,8 @@ _exit: return code; } -int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind, int32_t buffMaxLen) { +int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind, int32_t buffMaxLen, initGeosFn igeos, + checkWKBGeometryFn cgeos) { int32_t code = 0; if (!(pBind->num == 1 && pBind->is_null && *pBind->is_null)) { @@ -3046,6 +3047,12 @@ int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind, int32 } if (IS_VAR_DATA_TYPE(pColData->type)) { // var-length data type + if (pColData->type == TSDB_DATA_TYPE_GEOMETRY) { + code = igeos(); + if (code) { + return code; + } + } for (int32_t i = 0; i < pBind->num; ++i) { if (pBind->is_null && pBind->is_null[i]) { if (pColData->cflag & COL_IS_KEY) { @@ -3055,9 +3062,12 @@ int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind, int32 code = tColDataAppendValueImpl[pColData->flag][CV_FLAG_NULL](pColData, NULL, 0); if (code) goto _exit; } else if (pBind->length[i] > buffMaxLen) { - uError("var data length too big, len:%d, max:%d", pBind->length[i], buffMaxLen); - return TSDB_CODE_INVALID_PARA; + return TSDB_CODE_PAR_VALUE_TOO_LONG; } else { + if (pColData->type == TSDB_DATA_TYPE_GEOMETRY) { + code = cgeos((char *)pBind->buffer + pBind->buffer_length * i, (size_t)pBind->length[i]); + if (code) goto _exit; + } code = tColDataAppendValueImpl[pColData->flag][CV_FLAG_VALUE]( pColData, (uint8_t *)pBind->buffer + pBind->buffer_length * i, pBind->length[i]); } @@ -3108,7 +3118,8 @@ _exit: return code; } -int32_t tColDataAddValueByBind2(SColData *pColData, TAOS_STMT2_BIND *pBind, int32_t buffMaxLen) { +int32_t tColDataAddValueByBind2(SColData *pColData, TAOS_STMT2_BIND *pBind, int32_t buffMaxLen, initGeosFn igeos, + checkWKBGeometryFn cgeos) { int32_t code = 0; if (!(pBind->num == 1 && pBind->is_null && *pBind->is_null)) { @@ -3118,6 +3129,13 @@ int32_t tColDataAddValueByBind2(SColData *pColData, TAOS_STMT2_BIND *pBind, int3 } if (IS_VAR_DATA_TYPE(pColData->type)) { // var-length data type + if (pColData->type == TSDB_DATA_TYPE_GEOMETRY) { + code = igeos(); + if (code) { + return code; + } + } + uint8_t *buf = pBind->buffer; for (int32_t i = 0; i < pBind->num; ++i) { if (pBind->is_null && pBind->is_null[i]) { @@ -3133,9 +3151,12 @@ int32_t tColDataAddValueByBind2(SColData *pColData, TAOS_STMT2_BIND *pBind, int3 if (code) goto _exit; } } else if (pBind->length[i] > buffMaxLen) { - uError("var data length too big, len:%d, max:%d", pBind->length[i], buffMaxLen); - return TSDB_CODE_INVALID_PARA; + return TSDB_CODE_PAR_VALUE_TOO_LONG; } else { + if (pColData->type == TSDB_DATA_TYPE_GEOMETRY) { + code = cgeos(buf, pBind->length[i]); + if (code) goto _exit; + } code = tColDataAppendValueImpl[pColData->flag][CV_FLAG_VALUE](pColData, buf, pBind->length[i]); buf += pBind->length[i]; } diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 0133428c53..93c86a2dcd 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -217,6 +217,8 @@ float tsSelectivityRatio = 1.0; int32_t tsTagFilterResCacheSize = 1024 * 10; char tsTagFilterCache = 0; +int32_t tsBypassFlag = 0; + // the maximum allowed query buffer size during query processing for each data node. // -1 no limit (default) // 0 no query allowed, queries are disabled @@ -612,6 +614,7 @@ static int32_t taosAddClientCfg(SConfig *pCfg) { cfgAddInt64(pCfg, "randErrorDivisor", tsRandErrDivisor, 1, INT64_MAX, CFG_SCOPE_BOTH, CFG_DYN_BOTH)); TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "randErrorScope", tsRandErrScope, 0, INT64_MAX, CFG_SCOPE_BOTH, CFG_DYN_BOTH)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "safetyCheckLevel", tsSafetyCheckLevel, 0, 5, CFG_SCOPE_BOTH, CFG_DYN_BOTH)); + TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "bypassFlag", tsBypassFlag, 0, INT32_MAX, CFG_SCOPE_BOTH, CFG_DYN_BOTH)); tsNumOfRpcThreads = tsNumOfCores / 2; tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, TSDB_MAX_RPC_THREADS); @@ -1303,6 +1306,10 @@ static int32_t taosSetClientCfg(SConfig *pCfg) { TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "safetyCheckLevel"); tsSafetyCheckLevel = pItem->i32; + + TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "bypassFlag"); + tsBypassFlag = pItem->i32; + TAOS_RETURN(TSDB_CODE_SUCCESS); } @@ -2046,7 +2053,8 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) { {"supportVnodes", &tsNumOfSupportVnodes}, {"experimental", &tsExperimental}, {"maxTsmaNum", &tsMaxTsmaNum}, - {"safetyCheckLevel", &tsSafetyCheckLevel}}; + {"safetyCheckLevel", &tsSafetyCheckLevel}, + {"bypassFlag", &tsBypassFlag}}; if ((code = taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true)) != TSDB_CODE_SUCCESS) { code = taosCfgSetOption(options, tListLen(options), pItem, false); @@ -2302,7 +2310,8 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, const char *name) { {"multiResultFunctionStarReturnTags", &tsMultiResultFunctionStarReturnTags}, {"maxTsmaCalcDelay", &tsMaxTsmaCalcDelay}, {"tsmaDataDeleteMark", &tsmaDataDeleteMark}, - {"safetyCheckLevel", &tsSafetyCheckLevel}}; + {"safetyCheckLevel", &tsSafetyCheckLevel}, + {"bypassFlag", &tsBypassFlag}}; if ((code = taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true)) != TSDB_CODE_SUCCESS) { code = taosCfgSetOption(options, tListLen(options), pItem, false); diff --git a/source/common/src/tmisce.c b/source/common/src/tmisce.c index 4df458c2bb..8988fab56a 100644 --- a/source/common/src/tmisce.c +++ b/source/common/src/tmisce.c @@ -267,7 +267,14 @@ int32_t dumpConfToDataBlock(SSDataBlock* pBlock, int32_t startCol) { int8_t locked = 0; - TAOS_CHECK_GOTO(blockDataEnsureCapacity(pBlock, cfgGetSize(pConf)), NULL, _exit); + size_t exSize = 0; + size_t index = 0; + SConfigItem* pDataDirItem = cfgGetItem(pConf, "dataDir"); + if (pDataDirItem) { + exSize = TMAX(taosArrayGetSize(pDataDirItem->array), 1) - 1; + } + + TAOS_CHECK_GOTO(blockDataEnsureCapacity(pBlock, cfgGetSize(pConf) + exSize), NULL, _exit); TAOS_CHECK_GOTO(cfgCreateIter(pConf, &pIter), NULL, _exit); @@ -275,6 +282,7 @@ int32_t dumpConfToDataBlock(SSDataBlock* pBlock, int32_t startCol) { locked = 1; while ((pItem = cfgNextIter(pIter)) != NULL) { +_start: col = startCol; // GRANT_CFG_SKIP; @@ -289,9 +297,18 @@ int32_t dumpConfToDataBlock(SSDataBlock* pBlock, int32_t startCol) { TAOS_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, name, false), NULL, _exit); - char value[TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE] = {0}; + char value[TSDB_CONFIG_PATH_LEN + VARSTR_HEADER_SIZE] = {0}; int32_t valueLen = 0; - TAOS_CHECK_GOTO(cfgDumpItemValue(pItem, &value[VARSTR_HEADER_SIZE], TSDB_CONFIG_VALUE_LEN, &valueLen), NULL, _exit); + SDiskCfg* pDiskCfg = NULL; + if (strcasecmp(pItem->name, "dataDir") == 0 && exSize > 0) { + char* buf = &value[VARSTR_HEADER_SIZE]; + pDiskCfg = taosArrayGet(pItem->array, index); + valueLen = tsnprintf(buf, TSDB_CONFIG_PATH_LEN, "%s", pDiskCfg->dir); + index++; + } else { + TAOS_CHECK_GOTO(cfgDumpItemValue(pItem, &value[VARSTR_HEADER_SIZE], TSDB_CONFIG_PATH_LEN, &valueLen), NULL, + _exit); + } varDataSetLen(value, valueLen); pColInfo = taosArrayGet(pBlock->pDataBlock, col++); @@ -313,8 +330,28 @@ int32_t dumpConfToDataBlock(SSDataBlock* pBlock, int32_t startCol) { } TAOS_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, scope, false), NULL, _exit); + char info[TSDB_CONFIG_INFO_LEN + VARSTR_HEADER_SIZE] = {0}; + if (strcasecmp(pItem->name, "dataDir") == 0 && pDiskCfg) { + char* buf = &info[VARSTR_HEADER_SIZE]; + valueLen = tsnprintf(buf, TSDB_CONFIG_INFO_LEN, "level %d primary %d disabled %" PRIi8, pDiskCfg->level, + pDiskCfg->primary, pDiskCfg->disable); + } else { + valueLen = 0; + } + varDataSetLen(info, valueLen); + + pColInfo = taosArrayGet(pBlock->pDataBlock, col++); + if (pColInfo == NULL) { + code = terrno; + TAOS_CHECK_GOTO(code, NULL, _exit); + } + TAOS_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, info, false), NULL, _exit); + numOfRows++; - } + if (index > 0 && index <= exSize) { + goto _start; + } +} pBlock->info.rows = numOfRows; _exit: if (locked) cfgUnLock(pConf); diff --git a/source/common/test/CMakeLists.txt b/source/common/test/CMakeLists.txt index 2fe3ef652d..bb12612273 100644 --- a/source/common/test/CMakeLists.txt +++ b/source/common/test/CMakeLists.txt @@ -46,7 +46,7 @@ if (${TD_LINUX}) target_sources(tmsgTest PRIVATE "tmsgTest.cpp" - "../src/tmsg.c" + "../src/msg/tmsg.c" ) target_include_directories(tmsgTest PUBLIC "${TD_SOURCE_DIR}/include/common/") target_link_libraries(tmsgTest PUBLIC os util gtest gtest_main) diff --git a/source/dnode/mgmt/mgmt_snode/src/smWorker.c b/source/dnode/mgmt/mgmt_snode/src/smWorker.c index 8c33c5bb4b..1e882fc656 100644 --- a/source/dnode/mgmt/mgmt_snode/src/smWorker.c +++ b/source/dnode/mgmt/mgmt_snode/src/smWorker.c @@ -36,14 +36,15 @@ static void smProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO dTrace("msg:%p, get from snode-write queue", pMsg); int32_t code = sndProcessWriteMsg(pMgmt->pSnode, pMsg, NULL); - if (code < 0) { - dGError("snd, msg:%p failed to process write since %s", pMsg, tstrerror(code)); - if (pMsg->info.handle != NULL) { - tmsgSendRsp(pMsg); - } - } else { - smSendRsp(pMsg, 0); - } + // if (code < 0) { + // dGError("snd, msg:%p failed to process write since %s", pMsg, tstrerror(code)); + // if (pMsg->info.handle != NULL) { + // tmsgSendRsp(pMsg); + // } + // } else { + // smSendRsp(pMsg, 0); + // } + smSendRsp(pMsg, code); dTrace("msg:%p, is freed", pMsg); rpcFreeCont(pMsg->pCont); diff --git a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h index 989adf84ac..7842077d88 100644 --- a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h +++ b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h @@ -37,7 +37,9 @@ typedef struct SVnodeMgmt { SSingleWorker mgmtMultiWorker; SHashObj *hash; SHashObj *closedHash; + SHashObj *creatingHash; TdThreadRwlock lock; + TdThreadMutex mutex; SVnodesStat state; STfs *pTfs; TdThread thread; @@ -96,6 +98,7 @@ SVnodeObj *vmAcquireVnodeImpl(SVnodeMgmt *pMgmt, int32_t vgId, bool strict); void vmReleaseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode); int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl); void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal, bool keepClosed); +void vmRemoveFromCreatingHash(SVnodeMgmt *pMgmt, int32_t vgId); // vmHandle.c SArray *vmGetMsgHandles(); @@ -113,6 +116,7 @@ int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt); int32_t vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes); int32_t vmGetAllVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes); +int32_t vmGetAllVnodeListFromHashWithCreating(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes); // vmWorker.c int32_t vmStartWorker(SVnodeMgmt *pMgmt); diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c index 7566b69c02..b4453ad6fc 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c @@ -67,6 +67,54 @@ int32_t vmGetAllVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnod return 0; } +int32_t vmGetAllVnodeListFromHashWithCreating(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes) { + (void)taosThreadRwlockRdlock(&pMgmt->lock); + + int32_t num = 0; + int32_t size = taosHashGetSize(pMgmt->hash); + int32_t creatingSize = taosHashGetSize(pMgmt->creatingHash); + size += creatingSize; + SVnodeObj **pVnodes = taosMemoryCalloc(size, sizeof(SVnodeObj *)); + if (pVnodes == NULL) { + (void)taosThreadRwlockUnlock(&pMgmt->lock); + return terrno; + } + + void *pIter = taosHashIterate(pMgmt->hash, NULL); + while (pIter) { + SVnodeObj **ppVnode = pIter; + SVnodeObj *pVnode = *ppVnode; + if (pVnode && num < size) { + int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1); + dTrace("vgId:%d,acquire vnode, vnode:%p, ref:%d", pVnode->vgId, pVnode, refCount); + pVnodes[num++] = (*ppVnode); + pIter = taosHashIterate(pMgmt->hash, pIter); + } else { + taosHashCancelIterate(pMgmt->hash, pIter); + } + } + + pIter = taosHashIterate(pMgmt->creatingHash, NULL); + while (pIter) { + SVnodeObj **ppVnode = pIter; + SVnodeObj *pVnode = *ppVnode; + if (pVnode && num < size) { + int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1); + dTrace("vgId:%d, acquire vnode, vnode:%p, ref:%d", pVnode->vgId, pVnode, refCount); + pVnodes[num++] = (*ppVnode); + pIter = taosHashIterate(pMgmt->creatingHash, pIter); + } else { + taosHashCancelIterate(pMgmt->creatingHash, pIter); + } + } + (void)taosThreadRwlockUnlock(&pMgmt->lock); + + *numOfVnodes = num; + *ppVnodes = pVnodes; + + return 0; +} + int32_t vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes) { (void)taosThreadRwlockRdlock(&pMgmt->lock); diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index 006f44b349..90b3f0025d 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -381,6 +381,7 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { if (vnodeCreate(path, &vnodeCfg, diskPrimary, pMgmt->pTfs) < 0) { dError("vgId:%d, failed to create vnode since %s", req.vgId, terrstr()); vmReleaseVnode(pMgmt, pVnode); + vmRemoveFromCreatingHash(pMgmt, req.vgId); (void)tFreeSCreateVnodeReq(&req); code = terrno != 0 ? terrno : -1; return code; @@ -422,6 +423,8 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { } _OVER: + vmRemoveFromCreatingHash(pMgmt, req.vgId); + if (code != 0) { int32_t r = 0; r = taosThreadRwlockWrlock(&pMgmt->lock); diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 682c179270..c0f15b8877 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -16,6 +16,7 @@ #define _DEFAULT_SOURCE #include "vmInt.h" #include "libs/function/tudf.h" +#include "osMemory.h" #include "tfs.h" #include "vnd.h" @@ -62,10 +63,20 @@ int32_t vmAllocPrimaryDisk(SVnodeMgmt *pMgmt, int32_t vgId) { int32_t numOfVnodes = 0; SVnodeObj **ppVnodes = NULL; - code = vmGetVnodeListFromHash(pMgmt, &numOfVnodes, &ppVnodes); + code = taosThreadMutexLock(&pMgmt->mutex); if (code != 0) { return code; } + + code = vmGetAllVnodeListFromHashWithCreating(pMgmt, &numOfVnodes, &ppVnodes); + if (code != 0) { + int32_t r = taosThreadMutexUnlock(&pMgmt->mutex); + if (r != 0) { + dError("vgId:%d, failed to unlock mutex since %s", vgId, tstrerror(r)); + } + return code; + } + for (int32_t v = 0; v < numOfVnodes; v++) { SVnodeObj *pVnode = ppVnodes[v]; disks[pVnode->diskPrimary] += 1; @@ -81,6 +92,51 @@ int32_t vmAllocPrimaryDisk(SVnodeMgmt *pMgmt, int32_t vgId) { } } + SVnodeObj *pCreatingVnode = taosMemoryCalloc(1, sizeof(SVnodeObj)); + if (pCreatingVnode == NULL) { + code = -1; + if (terrno != 0) code = terrno; + dError("failed to alloc vnode since %s", tstrerror(code)); + int32_t r = taosThreadMutexUnlock(&pMgmt->mutex); + if (r != 0) { + dError("vgId:%d, failed to unlock mutex since %s", vgId, tstrerror(r)); + } + goto _OVER; + } + (void)memset(pCreatingVnode, 0, sizeof(SVnodeObj)); + + pCreatingVnode->vgId = vgId; + pCreatingVnode->diskPrimary = diskId; + + code = taosThreadRwlockWrlock(&pMgmt->lock); + if (code != 0) { + int32_t r = taosThreadMutexUnlock(&pMgmt->mutex); + if (r != 0) { + dError("vgId:%d, failed to unlock mutex since %s", vgId, tstrerror(r)); + } + taosMemoryFree(pCreatingVnode); + goto _OVER; + } + + dTrace("vgId:%d, put vnode into creating hash, pCreatingVnode:%p", vgId, pCreatingVnode); + code = taosHashPut(pMgmt->creatingHash, &vgId, sizeof(int32_t), &pCreatingVnode, sizeof(SVnodeObj *)); + if (code != 0) { + dError("vgId:%d, failed to put vnode to creatingHash", vgId); + taosMemoryFree(pCreatingVnode); + } + + int32_t r = taosThreadRwlockUnlock(&pMgmt->lock); + if (r != 0) { + dError("vgId:%d, failed to unlock since %s", vgId, tstrerror(r)); + } + + code = taosThreadMutexUnlock(&pMgmt->mutex); + if (code != 0) { + goto _OVER; + } + +_OVER: + for (int32_t i = 0; i < numOfVnodes; ++i) { if (ppVnodes == NULL || ppVnodes[i] == NULL) continue; vmReleaseVnode(pMgmt, ppVnodes[i]); @@ -89,8 +145,13 @@ int32_t vmAllocPrimaryDisk(SVnodeMgmt *pMgmt, int32_t vgId) { taosMemoryFree(ppVnodes); } - dInfo("vgId:%d, alloc disk:%d of level 0. ndisk:%d, vnodes: %d", vgId, diskId, ndisk, numOfVnodes); - return diskId; + if (code != 0) { + dError("vgId:%d, failed to alloc disk since %s", vgId, tstrerror(code)); + return code; + } else { + dInfo("vgId:%d, alloc disk:%d of level 0. ndisk:%d, vnodes: %d", vgId, diskId, ndisk, numOfVnodes); + return diskId; + } } SVnodeObj *vmAcquireVnodeImpl(SVnodeMgmt *pMgmt, int32_t vgId, bool strict) { @@ -172,7 +233,9 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) { } (void)taosThreadRwlockWrlock(&pMgmt->lock); + SVnodeObj *pOld = NULL; + int32_t r = taosHashGetDup(pMgmt->hash, &pVnode->vgId, sizeof(int32_t), (void *)&pOld); if (r != 0) { dError("vgId:%d, failed to get vnode from hash", pVnode->vgId); @@ -187,15 +250,15 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) { if (r != 0) { dError("vgId:%d, failed to get vnode from closedHash", pVnode->vgId); } - if (pOld) { + if (pOld != NULL) { vmFreeVnodeObj(&pOld); + dInfo("vgId:%d, remove from closedHash", pVnode->vgId); + r = taosHashRemove(pMgmt->closedHash, &pVnode->vgId, sizeof(int32_t)); + if (r != 0) { + dError("vgId:%d, failed to remove vnode from hash", pVnode->vgId); + } } - dInfo("vgId:%d, remove from closedHash", pVnode->vgId); - r = taosHashRemove(pMgmt->closedHash, &pVnode->vgId, sizeof(int32_t)); - if (r != 0) { - dError("vgId:%d, failed to remove vnode from hash", pVnode->vgId); - } (void)taosThreadRwlockUnlock(&pMgmt->lock); return code; @@ -216,12 +279,12 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal, } if (keepClosed) { SVnodeObj *pClosedVnode = taosMemoryCalloc(1, sizeof(SVnodeObj)); - (void)memset(pClosedVnode, 0, sizeof(SVnodeObj)); - if (pVnode == NULL) { - dError("vgId:%d, failed to alloc vnode since %s", pVnode->vgId, terrstr()); + if (pClosedVnode == NULL) { + dError("failed to alloc vnode since %s", terrstr()); (void)taosThreadRwlockUnlock(&pMgmt->lock); return; } + (void)memset(pClosedVnode, 0, sizeof(SVnodeObj)); pClosedVnode->vgId = pVnode->vgId; pClosedVnode->dropped = pVnode->dropped; @@ -427,11 +490,18 @@ static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) { pMgmt->closedHash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK); - if (pMgmt->hash == NULL) { + if (pMgmt->closedHash == NULL) { dError("failed to init vnode closed hash since %s", terrstr()); return TSDB_CODE_OUT_OF_MEMORY; } + pMgmt->creatingHash = + taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK); + if (pMgmt->creatingHash == NULL) { + dError("failed to init vnode creatingHash hash since %s", terrstr()); + return TSDB_CODE_OUT_OF_MEMORY; + } + SWrapperCfg *pCfgs = NULL; int32_t numOfVnodes = 0; if (vmGetVnodeListFromFile(pMgmt, &pCfgs, &numOfVnodes) != 0) { @@ -509,6 +579,32 @@ static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) { return 0; } +void vmRemoveFromCreatingHash(SVnodeMgmt *pMgmt, int32_t vgId) { + SVnodeObj *pOld = NULL; + + (void)taosThreadRwlockWrlock(&pMgmt->lock); + int32_t r = taosHashGetDup(pMgmt->creatingHash, &vgId, sizeof(int32_t), (void *)&pOld); + if (r != 0) { + dError("vgId:%d, failed to get vnode from creating Hash", vgId); + } + dTrace("vgId:%d, remove from creating Hash", vgId); + r = taosHashRemove(pMgmt->creatingHash, &vgId, sizeof(int32_t)); + if (r != 0) { + dError("vgId:%d, failed to remove vnode from hash", vgId); + } + (void)taosThreadRwlockUnlock(&pMgmt->lock); + + if (pOld) { + dTrace("vgId:%d, free vnode pOld:%p", vgId, &pOld); + vmFreeVnodeObj(&pOld); + } + +_OVER: + if (r != 0) { + dError("vgId:%d, failed to remove vnode from creatingHash since %s", vgId, tstrerror(r)); + } +} + static void *vmCloseVnodeInThread(void *param) { SVnodeThread *pThread = param; SVnodeMgmt *pMgmt = pThread->pMgmt; @@ -614,6 +710,18 @@ static void vmCloseVnodes(SVnodeMgmt *pMgmt) { pMgmt->closedHash = NULL; } + pIter = taosHashIterate(pMgmt->creatingHash, NULL); + while (pIter) { + SVnodeObj **ppVnode = pIter; + vmFreeVnodeObj(ppVnode); + pIter = taosHashIterate(pMgmt->creatingHash, pIter); + } + + if (pMgmt->creatingHash != NULL) { + taosHashCleanup(pMgmt->creatingHash); + pMgmt->creatingHash = NULL; + } + dInfo("total vnodes:%d are all closed", numOfVnodes); } @@ -622,6 +730,7 @@ static void vmCleanup(SVnodeMgmt *pMgmt) { vmStopWorker(pMgmt); vnodeCleanup(); (void)taosThreadRwlockDestroy(&pMgmt->lock); + (void)taosThreadMutexDestroy(&pMgmt->mutex); (void)taosThreadMutexDestroy(&pMgmt->fileLock); taosMemoryFree(pMgmt); } @@ -714,6 +823,12 @@ static int32_t vmInit(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { goto _OVER; } + code = taosThreadMutexInit(&pMgmt->mutex, NULL); + if (code != 0) { + code = TAOS_SYSTEM_ERROR(errno); + goto _OVER; + } + code = taosThreadMutexInit(&pMgmt->fileLock, NULL); if (code != 0) { code = TAOS_SYSTEM_ERROR(errno); diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index 61543e619e..5f396a520a 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -214,8 +214,6 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) { } else if ((pRpc->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || pRpc->code == TSDB_CODE_RPC_BROKEN_LINK) && (!IsReq(pRpc)) && (pRpc->pCont == NULL)) { dGError("msg:%p, type:%s pCont is NULL, err: %s", pRpc, TMSG_INFO(pRpc->msgType), tstrerror(pRpc->code)); - code = pRpc->code; - goto _OVER; } if (pHandle->defaultNtype == NODE_END) { diff --git a/source/dnode/mgmt/node_util/CMakeLists.txt b/source/dnode/mgmt/node_util/CMakeLists.txt index d882d784de..320da45065 100644 --- a/source/dnode/mgmt/node_util/CMakeLists.txt +++ b/source/dnode/mgmt/node_util/CMakeLists.txt @@ -6,5 +6,5 @@ target_include_directories( PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/inc" ) target_link_libraries( - node_util cjson mnode vnode qnode snode wal sync taos_static tfs monitor monitorfw + node_util cjson mnode vnode qnode snode wal sync ${TAOS_LIB_STATIC} tfs monitor monitorfw ) \ No newline at end of file diff --git a/source/dnode/mnode/impl/inc/mndStream.h b/source/dnode/mnode/impl/inc/mndStream.h index c9155f536c..b519d8509a 100644 --- a/source/dnode/mnode/impl/inc/mndStream.h +++ b/source/dnode/mnode/impl/inc/mndStream.h @@ -56,6 +56,7 @@ typedef struct SStreamTransMgmt { typedef struct SStreamTaskResetMsg { int64_t streamId; int32_t transId; + int64_t checkpointId; } SStreamTaskResetMsg; typedef struct SChkptReportInfo { @@ -142,9 +143,9 @@ int32_t mndStreamSetResumeAction(STrans *pTrans, SMnode *pMnode, SStreamObj *pSt int32_t mndStreamSetPauseAction(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream); int32_t mndStreamSetDropAction(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream); int32_t mndStreamSetDropActionFromList(SMnode *pMnode, STrans *pTrans, SArray *pList); -int32_t mndStreamSetResetTaskAction(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream); +int32_t mndStreamSetResetTaskAction(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream, int64_t chkptId); int32_t mndStreamSetUpdateChkptAction(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream); -int32_t mndCreateStreamResetStatusTrans(SMnode *pMnode, SStreamObj *pStream); +int32_t mndCreateStreamResetStatusTrans(SMnode *pMnode, SStreamObj *pStream, int64_t chkptId); int32_t mndStreamSetChkptIdAction(SMnode *pMnode, STrans *pTrans, SStreamTask* pTask, int64_t checkpointId, int64_t ts); int32_t mndStreamSetRestartAction(SMnode* pMnode, STrans *pTrans, SStreamObj* pStream); int32_t mndStreamSetCheckpointAction(SMnode *pMnode, STrans *pTrans, SStreamTask *pTask, int64_t checkpointId, diff --git a/source/dnode/mnode/impl/src/mndAnode.c b/source/dnode/mnode/impl/src/mndAnode.c index 87bfe9f7af..c64208600a 100644 --- a/source/dnode/mnode/impl/src/mndAnode.c +++ b/source/dnode/mnode/impl/src/mndAnode.c @@ -309,7 +309,7 @@ static int32_t mndCreateAnode(SMnode *pMnode, SRpcMsg *pReq, SMCreateAnodeReq *p anodeObj.updateTime = anodeObj.createdTime; anodeObj.version = 0; anodeObj.urlLen = pCreate->urlLen; - if (anodeObj.urlLen > TSDB_ANAL_ANODE_URL_LEN) { + if (anodeObj.urlLen > TSDB_ANALYTIC_ANODE_URL_LEN) { code = TSDB_CODE_MND_ANODE_TOO_LONG_URL; goto _OVER; } @@ -491,23 +491,24 @@ static int32_t mndSetDropAnodeRedoLogs(STrans *pTrans, SAnodeObj *pObj) { int32_t code = 0; SSdbRaw *pRedoRaw = mndAnodeActionEncode(pObj); if (pRedoRaw == NULL) { - code = TSDB_CODE_MND_RETURN_VALUE_NULL; - if (terrno != 0) code = terrno; - TAOS_RETURN(code); + code = terrno; + return code; } + TAOS_CHECK_RETURN(mndTransAppendRedolog(pTrans, pRedoRaw)); TAOS_CHECK_RETURN(sdbSetRawStatus(pRedoRaw, SDB_STATUS_DROPPING)); - TAOS_RETURN(code); + + return code; } static int32_t mndSetDropAnodeCommitLogs(STrans *pTrans, SAnodeObj *pObj) { int32_t code = 0; SSdbRaw *pCommitRaw = mndAnodeActionEncode(pObj); if (pCommitRaw == NULL) { - code = TSDB_CODE_MND_RETURN_VALUE_NULL; - if (terrno != 0) code = terrno; - TAOS_RETURN(code); + code = terrno; + return code; } + TAOS_CHECK_RETURN(mndTransAppendCommitlog(pTrans, pCommitRaw)); TAOS_CHECK_RETURN(sdbSetRawStatus(pCommitRaw, SDB_STATUS_DROPPED)); TAOS_RETURN(code); @@ -521,25 +522,25 @@ static int32_t mndSetDropAnodeInfoToTrans(SMnode *pMnode, STrans *pTrans, SAnode } static int32_t mndDropAnode(SMnode *pMnode, SRpcMsg *pReq, SAnodeObj *pObj) { - int32_t code = -1; + int32_t code = 0; + int32_t lino = 0; STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pReq, "drop-anode"); - if (pTrans == NULL) { - code = TSDB_CODE_MND_RETURN_VALUE_NULL; - if (terrno != 0) code = terrno; - goto _OVER; - } + TSDB_CHECK_NULL(pTrans, code, lino, _OVER, terrno); + mndTransSetSerial(pTrans); + mInfo("trans:%d, to drop anode:%d", pTrans->id, pObj->id); - mInfo("trans:%d, used to drop anode:%d", pTrans->id, pObj->id); - TAOS_CHECK_GOTO(mndSetDropAnodeInfoToTrans(pMnode, pTrans, pObj, false), NULL, _OVER); - TAOS_CHECK_GOTO(mndTransPrepare(pMnode, pTrans), NULL, _OVER); + code = mndSetDropAnodeInfoToTrans(pMnode, pTrans, pObj, false); + mndReleaseAnode(pMnode, pObj); - code = 0; + TSDB_CHECK_CODE(code, lino, _OVER); + + code = mndTransPrepare(pMnode, pTrans); _OVER: mndTransDrop(pTrans); - TAOS_RETURN(code); + return code; } static int32_t mndProcessDropAnodeReq(SRpcMsg *pReq) { @@ -560,20 +561,20 @@ static int32_t mndProcessDropAnodeReq(SRpcMsg *pReq) { pObj = mndAcquireAnode(pMnode, dropReq.anodeId); if (pObj == NULL) { - code = TSDB_CODE_MND_RETURN_VALUE_NULL; - if (terrno != 0) code = terrno; + code = terrno; goto _OVER; } code = mndDropAnode(pMnode, pReq, pObj); - if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS; + if (code == 0) { + code = TSDB_CODE_ACTION_IN_PROGRESS; + } _OVER: if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { mError("anode:%d, failed to drop since %s", dropReq.anodeId, tstrerror(code)); } - mndReleaseAnode(pMnode, pObj); tFreeSMDropAnodeReq(&dropReq); TAOS_RETURN(code); } @@ -584,7 +585,7 @@ static int32_t mndRetrieveAnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB int32_t numOfRows = 0; int32_t cols = 0; SAnodeObj *pObj = NULL; - char buf[TSDB_ANAL_ANODE_URL_LEN + VARSTR_HEADER_SIZE]; + char buf[TSDB_ANALYTIC_ANODE_URL_LEN + VARSTR_HEADER_SIZE]; char status[64]; int32_t code = 0; @@ -642,7 +643,7 @@ static int32_t mndRetrieveAnodesFull(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock int32_t numOfRows = 0; int32_t cols = 0; SAnodeObj *pObj = NULL; - char buf[TSDB_ANAL_ALGO_NAME_LEN + VARSTR_HEADER_SIZE]; + char buf[TSDB_ANALYTIC_ALGO_NAME_LEN + VARSTR_HEADER_SIZE]; int32_t code = 0; while (numOfRows < rows) { @@ -693,7 +694,7 @@ static int32_t mndDecodeAlgoList(SJson *pJson, SAnodeObj *pObj) { int32_t code = 0; int32_t protocol = 0; double tmp = 0; - char buf[TSDB_ANAL_ALGO_NAME_LEN + 1] = {0}; + char buf[TSDB_ANALYTIC_ALGO_NAME_LEN + 1] = {0}; code = tjsonGetDoubleValue(pJson, "protocol", &tmp); if (code < 0) return TSDB_CODE_INVALID_JSON_FORMAT; @@ -753,10 +754,10 @@ static int32_t mndDecodeAlgoList(SJson *pJson, SAnodeObj *pObj) { } static int32_t mndGetAnodeAlgoList(const char *url, SAnodeObj *pObj) { - char anodeUrl[TSDB_ANAL_ANODE_URL_LEN + 1] = {0}; - snprintf(anodeUrl, TSDB_ANAL_ANODE_URL_LEN, "%s/%s", url, "list"); + char anodeUrl[TSDB_ANALYTIC_ANODE_URL_LEN + 1] = {0}; + snprintf(anodeUrl, TSDB_ANALYTIC_ANODE_URL_LEN, "%s/%s", url, "list"); - SJson *pJson = taosAnalSendReqRetJson(anodeUrl, ANAL_HTTP_TYPE_GET, NULL); + SJson *pJson = taosAnalSendReqRetJson(anodeUrl, ANALYTICS_HTTP_TYPE_GET, NULL); if (pJson == NULL) return terrno; int32_t code = mndDecodeAlgoList(pJson, pObj); @@ -769,10 +770,10 @@ static int32_t mndGetAnodeStatus(SAnodeObj *pObj, char *status, int32_t statusLe int32_t code = 0; int32_t protocol = 0; double tmp = 0; - char anodeUrl[TSDB_ANAL_ANODE_URL_LEN + 1] = {0}; - snprintf(anodeUrl, TSDB_ANAL_ANODE_URL_LEN, "%s/%s", pObj->url, "status"); + char anodeUrl[TSDB_ANALYTIC_ANODE_URL_LEN + 1] = {0}; + snprintf(anodeUrl, TSDB_ANALYTIC_ANODE_URL_LEN, "%s/%s", pObj->url, "status"); - SJson *pJson = taosAnalSendReqRetJson(anodeUrl, ANAL_HTTP_TYPE_GET, NULL); + SJson *pJson = taosAnalSendReqRetJson(anodeUrl, ANALYTICS_HTTP_TYPE_GET, NULL); if (pJson == NULL) return terrno; code = tjsonGetDoubleValue(pJson, "protocol", &tmp); @@ -808,7 +809,7 @@ static int32_t mndProcessAnalAlgoReq(SRpcMsg *pReq) { SAnodeObj *pObj = NULL; SAnalyticsUrl url; int32_t nameLen; - char name[TSDB_ANAL_ALGO_KEY_LEN]; + char name[TSDB_ANALYTIC_ALGO_KEY_LEN]; SRetrieveAnalAlgoReq req = {0}; SRetrieveAnalAlgoRsp rsp = {0}; @@ -847,13 +848,13 @@ static int32_t mndProcessAnalAlgoReq(SRpcMsg *pReq) { goto _OVER; } } - url.url = taosMemoryMalloc(TSDB_ANAL_ANODE_URL_LEN + TSDB_ANAL_ALGO_TYPE_LEN + 1); + url.url = taosMemoryMalloc(TSDB_ANALYTIC_ANODE_URL_LEN + TSDB_ANALYTIC_ALGO_TYPE_LEN + 1); if (url.url == NULL) { sdbRelease(pSdb, pAnode); goto _OVER; } - url.urlLen = 1 + tsnprintf(url.url, TSDB_ANAL_ANODE_URL_LEN + TSDB_ANAL_ALGO_TYPE_LEN, "%s/%s", pAnode->url, + url.urlLen = 1 + tsnprintf(url.url, TSDB_ANALYTIC_ANODE_URL_LEN + TSDB_ANALYTIC_ALGO_TYPE_LEN, "%s/%s", pAnode->url, taosAnalAlgoUrlStr(url.type)); if (taosHashPut(rsp.hash, name, nameLen, &url, sizeof(SAnalyticsUrl)) != 0) { taosMemoryFree(url.url); diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index 406128e232..0011c11b0a 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -1103,6 +1103,7 @@ static int32_t mndProcessShowVariablesReq(SRpcMsg *pReq) { (void)strcpy(info.name, "statusInterval"); (void)snprintf(info.value, TSDB_CONFIG_VALUE_LEN, "%d", tsStatusInterval); (void)strcpy(info.scope, "server"); + // fill info.info if (taosArrayPush(rsp.variables, &info) == NULL) { code = terrno; goto _OVER; diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c index 6c30193ea7..9dd43225b1 100644 --- a/source/dnode/mnode/impl/src/mndMain.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -53,7 +53,7 @@ static inline int32_t mndAcquireRpc(SMnode *pMnode) { if (pMnode->stopped) { code = TSDB_CODE_APP_IS_STOPPING; } else if (!mndIsLeader(pMnode)) { - code = -1; + code = 1; } else { #if 1 (void)atomic_add_fetch_32(&pMnode->rpcRef, 1); @@ -1002,8 +1002,12 @@ int64_t mndGenerateUid(const char *name, int32_t len) { int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgroupInfo *pVgroupInfo, SMonStbInfo *pStbInfo, SMonGrantInfo *pGrantInfo) { - int32_t code = 0; - TAOS_CHECK_RETURN(mndAcquireRpc(pMnode)); + int32_t code = mndAcquireRpc(pMnode); + if (code < 0) { + TAOS_RETURN(code); + } else if (code == 1) { + TAOS_RETURN(TSDB_CODE_SUCCESS); + } SSdb *pSdb = pMnode->pSdb; int64_t ms = taosGetTimestampMs(); diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 81db427afd..6336cd6e49 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -2434,7 +2434,12 @@ static void doAddReportStreamTask(SArray *pList, int64_t reportChkptId, const SC mDebug("s-task:0x%x expired checkpoint-report msg in checkpoint-report list update from %" PRId64 "->%" PRId64, pReport->taskId, p->checkpointId, pReport->checkpointId); - memcpy(p, pReport, sizeof(STaskChkptInfo)); + // update the checkpoint report info + p->checkpointId = pReport->checkpointId; + p->ts = pReport->checkpointTs; + p->version = pReport->checkpointVer; + p->transId = pReport->transId; + p->dropHTask = pReport->dropHTask; } else { mWarn("taskId:0x%x already in checkpoint-report list", pReport->taskId); } diff --git a/source/dnode/mnode/impl/src/mndStreamErrorInjection.c b/source/dnode/mnode/impl/src/mndStreamErrorInjection.c new file mode 100644 index 0000000000..c68416369d --- /dev/null +++ b/source/dnode/mnode/impl/src/mndStreamErrorInjection.c @@ -0,0 +1,72 @@ +#include "mndTrans.h" + +uint32_t seed = 0; + +static SRpcMsg createRpcMsg(STransAction* pAction, int64_t traceId, int64_t signature) { + SRpcMsg rpcMsg = {.msgType = pAction->msgType, .contLen = pAction->contLen, .info.ahandle = (void *)signature}; + rpcMsg.pCont = rpcMallocCont(pAction->contLen); + if (rpcMsg.pCont == NULL) { + return rpcMsg; + } + + rpcMsg.info.traceId.rootId = traceId; + rpcMsg.info.notFreeAhandle = 1; + + memcpy(rpcMsg.pCont, pAction->pCont, pAction->contLen); + return rpcMsg; +} + +void streamTransRandomErrorGen(STransAction *pAction, STrans *pTrans, int64_t signature) { + if ((pAction->msgType == TDMT_STREAM_TASK_UPDATE_CHKPT && pAction->id > 2) || + (pAction->msgType == TDMT_STREAM_CONSEN_CHKPT) || + (pAction->msgType == TDMT_VND_STREAM_CHECK_POINT_SOURCE && pAction->id > 2)) { + if (seed == 0) { + seed = taosGetTimestampSec(); + } + + uint32_t v = taosRandR(&seed); + int32_t choseItem = v % 5; + + if (choseItem == 0) { + // 1. one of update-checkpoint not send, restart and send it again + taosMsleep(5000); + if (pAction->msgType == TDMT_STREAM_TASK_UPDATE_CHKPT) { + mError( + "***sleep 5s and core dump, following tasks will not recv update-checkpoint info, so the checkpoint will " + "rollback***"); + exit(-1); + } else if (pAction->msgType == TDMT_STREAM_CONSEN_CHKPT) { // pAction->msgType == TDMT_STREAM_CONSEN_CHKPT + mError( + "***sleep 5s and core dump, following tasks will not recv consen-checkpoint info, so the tasks will " + "not started***"); + } else { // pAction->msgType == TDMT_VND_STREAM_CHECK_POINT_SOURCE + mError( + "***sleep 5s and core dump, following tasks will not recv checkpoint-source info, so the tasks will " + "started after restart***"); + exit(-1); + } + } else if (choseItem == 1) { + // 2. repeat send update chkpt msg + mError("***repeat send update-checkpoint/consensus/checkpoint trans msg 3times to vnode***"); + + mError("***repeat 1***"); + SRpcMsg rpcMsg1 = createRpcMsg(pAction, pTrans->mTraceId, signature); + int32_t code = tmsgSendReq(&pAction->epSet, &rpcMsg1); + + mError("***repeat 2***"); + SRpcMsg rpcMsg2 = createRpcMsg(pAction, pTrans->mTraceId, signature); + code = tmsgSendReq(&pAction->epSet, &rpcMsg2); + + mError("***repeat 3***"); + SRpcMsg rpcMsg3 = createRpcMsg(pAction, pTrans->mTraceId, signature); + code = tmsgSendReq(&pAction->epSet, &rpcMsg3); + } else if (choseItem == 2) { + // 3. sleep 40s and then send msg + mError("***idle for 30s, and then send msg***"); + taosMsleep(30000); + } else { + // do nothing + // mInfo("no error triggered"); + } + } +} diff --git a/source/dnode/mnode/impl/src/mndStreamHb.c b/source/dnode/mnode/impl/src/mndStreamHb.c index 941956ae2b..4b3db28aa1 100644 --- a/source/dnode/mnode/impl/src/mndStreamHb.c +++ b/source/dnode/mnode/impl/src/mndStreamHb.c @@ -24,7 +24,7 @@ typedef struct SFailedCheckpointInfo { static int32_t mndStreamSendUpdateChkptInfoMsg(SMnode *pMnode); static int32_t mndSendDropOrphanTasksMsg(SMnode *pMnode, SArray *pList); -static int32_t mndSendResetFromCheckpointMsg(SMnode *pMnode, int64_t streamId, int32_t transId); +static int32_t mndSendResetFromCheckpointMsg(SMnode *pMnode, int64_t streamId, int32_t transId, int64_t checkpointId); static void updateStageInfo(STaskStatusEntry *pTaskEntry, int64_t stage); static void addIntoFailedChkptList(SArray *pList, const SFailedCheckpointInfo *pInfo); static int32_t setNodeEpsetExpiredFlag(const SArray *pNodeList); @@ -68,7 +68,7 @@ void addIntoFailedChkptList(SArray *pList, const SFailedCheckpointInfo *pInfo) { } } -int32_t mndCreateStreamResetStatusTrans(SMnode *pMnode, SStreamObj *pStream) { +int32_t mndCreateStreamResetStatusTrans(SMnode *pMnode, SStreamObj *pStream, int64_t chkptId) { STrans *pTrans = NULL; int32_t code = doCreateTrans(pMnode, pStream, NULL, TRN_CONFLICT_NOTHING, MND_STREAM_TASK_RESET_NAME, " reset from failed checkpoint", &pTrans); @@ -84,7 +84,7 @@ int32_t mndCreateStreamResetStatusTrans(SMnode *pMnode, SStreamObj *pStream) { return code; } - code = mndStreamSetResetTaskAction(pMnode, pTrans, pStream); + code = mndStreamSetResetTaskAction(pMnode, pTrans, pStream, chkptId); if (code) { sdbRelease(pMnode->pSdb, pStream); mndTransDrop(pTrans); @@ -115,7 +115,7 @@ int32_t mndCreateStreamResetStatusTrans(SMnode *pMnode, SStreamObj *pStream) { return code; } -int32_t mndSendResetFromCheckpointMsg(SMnode *pMnode, int64_t streamId, int32_t transId) { +int32_t mndSendResetFromCheckpointMsg(SMnode *pMnode, int64_t streamId, int32_t transId, int64_t checkpointId) { int32_t size = sizeof(SStreamTaskResetMsg); int32_t num = taosArrayGetSize(execInfo.pKilledChkptTrans); @@ -135,8 +135,9 @@ int32_t mndSendResetFromCheckpointMsg(SMnode *pMnode, int64_t streamId, int32_t taosArrayRemove(execInfo.pKilledChkptTrans, 0); // remove this first, append new reset trans in the tail } - SStreamTaskResetMsg p = {.streamId = streamId, .transId = transId}; + SStreamTaskResetMsg p = {.streamId = streamId, .transId = transId, .checkpointId = checkpointId}; + // let's remember that this trans had been killed already void *px = taosArrayPush(execInfo.pKilledChkptTrans, &p); if (px == NULL) { mError("failed to push reset-msg trans:%d into the killed chkpt trans list, size:%d", transId, num - 1); @@ -150,6 +151,7 @@ int32_t mndSendResetFromCheckpointMsg(SMnode *pMnode, int64_t streamId, int32_t pReq->streamId = streamId; pReq->transId = transId; + pReq->checkpointId = checkpointId; SRpcMsg rpcMsg = {.msgType = TDMT_MND_STREAM_TASK_RESET, .pCont = pReq, .contLen = size}; int32_t code = tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); @@ -234,7 +236,7 @@ int32_t mndProcessResetStatusReq(SRpcMsg *pReq) { } else { mDebug("stream:%s (0x%" PRIx64 ") reset checkpoint procedure, transId:%d, create reset trans", pStream->name, pStream->uid, pMsg->transId); - code = mndCreateStreamResetStatusTrans(pMnode, pStream); + code = mndCreateStreamResetStatusTrans(pMnode, pStream, pMsg->checkpointId); } } @@ -379,9 +381,10 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) { } if ((pEntry->lastHbMsgId == req.msgId) && (pEntry->lastHbMsgTs == req.ts)) { - mError("vgId:%d HbMsgId:%d already handled, bh msg discard", pEntry->nodeId, req.msgId); + mError("vgId:%d HbMsgId:%d already handled, bh msg discard, and send HbRsp", pEntry->nodeId, req.msgId); - terrno = TSDB_CODE_INVALID_MSG; + // return directly and after the vnode to continue to send the next HbMsg. + terrno = TSDB_CODE_SUCCESS; doSendHbMsgRsp(terrno, &pReq->info, req.vgId, req.msgId); streamMutexUnlock(&execInfo.lock); @@ -495,10 +498,11 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) { continue; } - mInfo("checkpointId:%" PRId64 " transId:%d failed, issue task-reset trans to reset all tasks status", - pInfo->checkpointId, pInfo->transId); + mInfo("stream:0x%" PRIx64 " checkpointId:%" PRId64 + " transId:%d failed issue task-reset trans to reset all tasks status", + pInfo->streamUid, pInfo->checkpointId, pInfo->transId); - code = mndSendResetFromCheckpointMsg(pMnode, pInfo->streamUid, pInfo->transId); + code = mndSendResetFromCheckpointMsg(pMnode, pInfo->streamUid, pInfo->transId, pInfo->checkpointId); if (code) { mError("failed to create reset task trans, code:%s", tstrerror(code)); } @@ -549,12 +553,37 @@ void cleanupAfterProcessHbMsg(SStreamHbMsg *pReq, SArray *pFailedChkptList, SArr } void doSendHbMsgRsp(int32_t code, SRpcHandleInfo *pRpcInfo, int32_t vgId, int32_t msgId) { - SRpcMsg rsp = {.code = code, .info = *pRpcInfo, .contLen = sizeof(SMStreamHbRspMsg)}; - rsp.pCont = rpcMallocCont(rsp.contLen); + int32_t ret = 0; + int32_t tlen = 0; + void *buf = NULL; - SMStreamHbRspMsg *pMsg = rsp.pCont; - pMsg->head.vgId = htonl(vgId); - pMsg->msgId = msgId; + const SMStreamHbRspMsg msg = {.msgId = msgId}; + + tEncodeSize(tEncodeStreamHbRsp, &msg, tlen, ret); + if (ret < 0) { + mError("encode stream hb msg rsp failed, code:%s", tstrerror(code)); + } + + buf = rpcMallocCont(tlen + sizeof(SMsgHead)); + if (buf == NULL) { + mError("encode stream hb msg rsp failed, code:%s", tstrerror(terrno)); + return; + } + + ((SMStreamHbRspMsg*)buf)->head.vgId = htonl(vgId); + void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead)); + + SEncoder encoder; + tEncoderInit(&encoder, abuf, tlen); + if ((code = tEncodeStreamHbRsp(&encoder, &msg)) < 0) { + rpcFreeCont(buf); + tEncoderClear(&encoder); + mError("encode stream hb msg rsp failed, code:%s", tstrerror(code)); + return; + } + tEncoderClear(&encoder); + + SRpcMsg rsp = {.code = code, .info = *pRpcInfo, .contLen = tlen + sizeof(SMsgHead), .pCont = buf}; tmsgSendRsp(&rsp); pRpcInfo->handle = NULL; // disable auto rsp diff --git a/source/dnode/mnode/impl/src/mndStreamTransAct.c b/source/dnode/mnode/impl/src/mndStreamTransAct.c index 139ea4f147..5ccb626609 100644 --- a/source/dnode/mnode/impl/src/mndStreamTransAct.c +++ b/source/dnode/mnode/impl/src/mndStreamTransAct.c @@ -295,7 +295,7 @@ static int32_t doSetUpdateChkptAction(SMnode *pMnode, STrans *pTrans, SStreamTas return code; } -static int32_t doSetResetAction(SMnode *pMnode, STrans *pTrans, SStreamTask *pTask) { +static int32_t doSetResetAction(SMnode *pMnode, STrans *pTrans, SStreamTask *pTask, int64_t chkptId) { SVResetStreamTaskReq *pReq = taosMemoryCalloc(1, sizeof(SVResetStreamTaskReq)); if (pReq == NULL) { mError("failed to malloc in reset stream, size:%" PRIzu ", code:%s", sizeof(SVResetStreamTaskReq), @@ -306,6 +306,7 @@ static int32_t doSetResetAction(SMnode *pMnode, STrans *pTrans, SStreamTask *pTa pReq->head.vgId = htonl(pTask->info.nodeId); pReq->taskId = pTask->id.taskId; pReq->streamId = pTask->id.streamId; + pReq->chkptId = chkptId; SEpSet epset = {0}; bool hasEpset = false; @@ -544,7 +545,7 @@ int32_t mndStreamSetDropActionFromList(SMnode *pMnode, STrans *pTrans, SArray* p return 0; } -int32_t mndStreamSetResetTaskAction(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream) { +int32_t mndStreamSetResetTaskAction(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream, int64_t chkptId) { SStreamTaskIter *pIter = NULL; taosWLockLatch(&pStream->lock); @@ -564,7 +565,7 @@ int32_t mndStreamSetResetTaskAction(SMnode *pMnode, STrans *pTrans, SStreamObj * return code; } - code = doSetResetAction(pMnode, pTrans, pTask); + code = doSetResetAction(pMnode, pTrans, pTask, chkptId); if (code != TSDB_CODE_SUCCESS) { destroyStreamTaskIter(pIter); taosWUnLockLatch(&pStream->lock); @@ -606,7 +607,7 @@ int32_t mndStreamSetChkptIdAction(SMnode *pMnode, STrans *pTrans, SStreamTask* p tEncoderInit(&encoder, abuf, tlen); code = tEncodeRestoreCheckpointInfo(&encoder, &req); tEncoderClear(&encoder); - if (code == -1) { + if (code < 0) { taosMemoryFree(pBuf); return code; } diff --git a/source/dnode/mnode/impl/src/mndStreamUtil.c b/source/dnode/mnode/impl/src/mndStreamUtil.c index f9b7644af4..615c383f07 100644 --- a/source/dnode/mnode/impl/src/mndStreamUtil.c +++ b/source/dnode/mnode/impl/src/mndStreamUtil.c @@ -1521,74 +1521,4 @@ int32_t mndCheckForSnode(SMnode *pMnode, SDbObj *pSrcDb) { mError("snode not existed when trying to create stream in db with multiple replica"); return TSDB_CODE_SNODE_NOT_DEPLOYED; } -} - -uint32_t seed = 0; -static SRpcMsg createRpcMsg(STransAction* pAction, int64_t traceId, int64_t signature) { - SRpcMsg rpcMsg = {.msgType = pAction->msgType, .contLen = pAction->contLen, .info.ahandle = (void *)signature}; - rpcMsg.pCont = rpcMallocCont(pAction->contLen); - if (rpcMsg.pCont == NULL) { - return rpcMsg; - } - - rpcMsg.info.traceId.rootId = traceId; - rpcMsg.info.notFreeAhandle = 1; - - memcpy(rpcMsg.pCont, pAction->pCont, pAction->contLen); - return rpcMsg; -} - -void streamTransRandomErrorGen(STransAction *pAction, STrans *pTrans, int64_t signature) { - if ((pAction->msgType == TDMT_STREAM_TASK_UPDATE_CHKPT && pAction->id > 2) || - (pAction->msgType == TDMT_STREAM_CONSEN_CHKPT) || - (pAction->msgType == TDMT_VND_STREAM_CHECK_POINT_SOURCE && pAction->id > 2)) { - if (seed == 0) { - seed = taosGetTimestampSec(); - } - - uint32_t v = taosRandR(&seed); - int32_t choseItem = v % 5; - - if (choseItem == 0) { - // 1. one of update-checkpoint not send, restart and send it again - taosMsleep(5000); - if (pAction->msgType == TDMT_STREAM_TASK_UPDATE_CHKPT) { - mError( - "***sleep 5s and core dump, following tasks will not recv update-checkpoint info, so the checkpoint will " - "rollback***"); - exit(-1); - } else if (pAction->msgType == TDMT_STREAM_CONSEN_CHKPT) { // pAction->msgType == TDMT_STREAM_CONSEN_CHKPT - mError( - "***sleep 5s and core dump, following tasks will not recv consen-checkpoint info, so the tasks will " - "not started***"); - } else { // pAction->msgType == TDMT_VND_STREAM_CHECK_POINT_SOURCE - mError( - "***sleep 5s and core dump, following tasks will not recv checkpoint-source info, so the tasks will " - "started after restart***"); - exit(-1); - } - } else if (choseItem == 1) { - // 2. repeat send update chkpt msg - mError("***repeat send update-checkpoint/consensus/checkpoint trans msg 3times to vnode***"); - - mError("***repeat 1***"); - SRpcMsg rpcMsg1 = createRpcMsg(pAction, pTrans->mTraceId, signature); - int32_t code = tmsgSendReq(&pAction->epSet, &rpcMsg1); - - mError("***repeat 2***"); - SRpcMsg rpcMsg2 = createRpcMsg(pAction, pTrans->mTraceId, signature); - code = tmsgSendReq(&pAction->epSet, &rpcMsg2); - - mError("***repeat 3***"); - SRpcMsg rpcMsg3 = createRpcMsg(pAction, pTrans->mTraceId, signature); - code = tmsgSendReq(&pAction->epSet, &rpcMsg3); - } else if (choseItem == 2) { - // 3. sleep 40s and then send msg - mError("***idle for 30s, and then send msg***"); - taosMsleep(30000); - } else { - // do nothing - // mInfo("no error triggered"); - } - } } \ No newline at end of file diff --git a/source/dnode/mnode/impl/test/stream/stream.cpp b/source/dnode/mnode/impl/test/stream/stream.cpp index d508cf7390..45bc4c2ce2 100644 --- a/source/dnode/mnode/impl/test/stream/stream.cpp +++ b/source/dnode/mnode/impl/test/stream/stream.cpp @@ -246,7 +246,7 @@ TEST_F(StreamTest, kill_checkpoint_trans) { px = taosArrayPush(pStream->tasks, &pLevel); ASSERT(px != NULL); - code = mndCreateStreamResetStatusTrans(pMnode, pStream); + code = mndCreateStreamResetStatusTrans(pMnode, pStream, 1); ASSERT(code != 0); tFreeStreamObj(pStream); diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c index 659ba3f777..9a5bea33e3 100644 --- a/source/dnode/vnode/src/meta/metaOpen.c +++ b/source/dnode/vnode/src/meta/metaOpen.c @@ -324,7 +324,11 @@ static int32_t metaGenerateNewMeta(SMeta **ppMeta) { SMetaEntry me = {0}; tDecoderInit(&dc, value, valueSize); if (metaDecodeEntry(&dc, &me) == 0) { - if (metaHandleEntry(pNewMeta, &me) != 0) { + if (me.type == TSDB_CHILD_TABLE && + tdbTbGet(pMeta->pUidIdx, &me.ctbEntry.suid, sizeof(me.ctbEntry.suid), NULL, NULL) != 0) { + metaError("vgId:%d failed to get super table uid:%" PRId64 " for child table uid:%" PRId64, + TD_VID(pVnode), me.ctbEntry.suid, uid); + } else if (metaHandleEntry(pNewMeta, &me) != 0) { metaError("vgId:%d failed to handle entry, uid:%" PRId64, TD_VID(pVnode), uid); } } diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 6195899566..a234777441 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -1009,21 +1009,34 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) { } int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) { - SStreamTaskRunReq* pReq = pMsg->pCont; + int32_t code = 0; + char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); + int32_t len = pMsg->contLen - sizeof(SMsgHead); + SDecoder decoder; + + SStreamTaskRunReq req = {0}; + tDecoderInit(&decoder, (uint8_t*)msg, len); + if ((code = tDecodeStreamTaskRunReq(&decoder, &req)) < 0) { + tqError("vgId:%d failed to decode task run req, code:%s", pTq->pStreamMeta->vgId, tstrerror(code)); + tDecoderClear(&decoder); + return TSDB_CODE_SUCCESS; + } + + tDecoderClear(&decoder); // extracted submit data from wal files for all tasks - if (pReq->reqType == STREAM_EXEC_T_EXTRACT_WAL_DATA) { + if (req.reqType == STREAM_EXEC_T_EXTRACT_WAL_DATA) { return tqScanWal(pTq); } - int32_t code = tqStreamTaskProcessRunReq(pTq->pStreamMeta, pMsg, vnodeIsRoleLeader(pTq->pVnode)); + code = tqStreamTaskProcessRunReq(pTq->pStreamMeta, pMsg, vnodeIsRoleLeader(pTq->pVnode)); if (code) { tqError("vgId:%d failed to create task run req, code:%s", TD_VID(pTq->pVnode), tstrerror(code)); return code; } // let's continue scan data in the wal files - if (pReq->reqType >= 0 || pReq->reqType == STREAM_EXEC_T_RESUME_TASK) { + if (req.reqType >= 0 || req.reqType == STREAM_EXEC_T_RESUME_TASK) { code = tqScanWalAsync(pTq, false); // it's ok to failed if (code) { tqError("vgId:%d failed to start scan wal file, code:%s", pTq->pStreamMeta->vgId, tstrerror(code)); @@ -1297,7 +1310,7 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) int32_t tqProcessTaskCheckpointReadyMsg(STQ* pTq, SRpcMsg* pMsg) { int32_t vgId = TD_VID(pTq->pVnode); - SRetrieveChkptTriggerReq* pReq = (SRetrieveChkptTriggerReq*)pMsg->pCont; + SStreamCheckpointReadyMsg* pReq = (SStreamCheckpointReadyMsg*)pMsg->pCont; if (!vnodeIsRoleLeader(pTq->pVnode)) { tqError("vgId:%d not leader, ignore the retrieve checkpoint-trigger msg from 0x%x", vgId, (int32_t)pReq->downstreamTaskId); @@ -1318,10 +1331,23 @@ int32_t tqProcessTaskResetReq(STQ* pTq, SRpcMsg* pMsg) { int32_t tqProcessTaskRetrieveTriggerReq(STQ* pTq, SRpcMsg* pMsg) { int32_t vgId = TD_VID(pTq->pVnode); - SRetrieveChkptTriggerReq* pReq = (SRetrieveChkptTriggerReq*)pMsg->pCont; if (!vnodeIsRoleLeader(pTq->pVnode)) { - tqError("vgId:%d not leader, ignore the retrieve checkpoint-trigger msg from 0x%x", vgId, - (int32_t)pReq->downstreamTaskId); + SRetrieveChkptTriggerReq req = {0}; + + char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); + int32_t len = pMsg->contLen - sizeof(SMsgHead); + SDecoder decoder = {0}; + + tDecoderInit(&decoder, (uint8_t*)msg, len); + if (tDecodeRetrieveChkptTriggerReq(&decoder, &req) < 0) { + tDecoderClear(&decoder); + tqError("vgId:%d invalid retrieve checkpoint-trigger req received", vgId); + return TSDB_CODE_INVALID_MSG; + } + tDecoderClear(&decoder); + + tqError("vgId:%d not leader, ignore the retrieve checkpoint-trigger msg from s-task:0x%" PRId64, vgId, + req.downstreamTaskId); return TSDB_CODE_STREAM_NOT_LEADER; } diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c index f31dd28847..3f67503454 100644 --- a/source/dnode/vnode/src/tqCommon/tqCommon.c +++ b/source/dnode/vnode/src/tqCommon/tqCommon.c @@ -828,14 +828,25 @@ static int32_t restartStreamTasks(SStreamMeta* pMeta, bool isLeader) { } int32_t tqStreamTaskProcessRunReq(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLeader) { - SStreamTaskRunReq* pReq = pMsg->pCont; + int32_t code = 0; + int32_t vgId = pMeta->vgId; + char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); + int32_t len = pMsg->contLen - sizeof(SMsgHead); + SDecoder decoder; - int32_t type = pReq->reqType; - int32_t vgId = pMeta->vgId; - int32_t code = 0; + SStreamTaskRunReq req = {0}; + tDecoderInit(&decoder, (uint8_t*)msg, len); + if ((code = tDecodeStreamTaskRunReq(&decoder, &req)) < 0) { + tqError("vgId:%d failed to decode task run req, code:%s", pMeta->vgId, tstrerror(code)); + tDecoderClear(&decoder); + return TSDB_CODE_SUCCESS; + } + tDecoderClear(&decoder); + + int32_t type = req.reqType; if (type == STREAM_EXEC_T_START_ONE_TASK) { - code = streamMetaStartOneTask(pMeta, pReq->streamId, pReq->taskId); + code = streamMetaStartOneTask(pMeta, req.streamId, req.taskId); return 0; } else if (type == STREAM_EXEC_T_START_ALL_TASKS) { code = streamMetaStartAllTasks(pMeta); @@ -847,11 +858,11 @@ int32_t tqStreamTaskProcessRunReq(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLead code = streamMetaStopAllTasks(pMeta); return 0; } else if (type == STREAM_EXEC_T_ADD_FAILED_TASK) { - code = streamMetaAddFailedTask(pMeta, pReq->streamId, pReq->taskId); + code = streamMetaAddFailedTask(pMeta, req.streamId, req.taskId); return code; } else if (type == STREAM_EXEC_T_RESUME_TASK) { // task resume to run after idle for a while SStreamTask* pTask = NULL; - code = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId, &pTask); + code = streamMetaAcquireTask(pMeta, req.streamId, req.taskId, &pTask); if (pTask != NULL && (code == 0)) { char* pStatus = NULL; @@ -873,7 +884,7 @@ int32_t tqStreamTaskProcessRunReq(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLead } SStreamTask* pTask = NULL; - code = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId, &pTask); + code = streamMetaAcquireTask(pMeta, req.streamId, req.taskId, &pTask); if ((pTask != NULL) && (code == 0)) { // even in halt status, the data in inputQ must be processed char* p = NULL; if (streamTaskReadyToRun(pTask, &p)) { @@ -890,7 +901,7 @@ int32_t tqStreamTaskProcessRunReq(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLead return 0; } else { // NOTE: pTask->status.schedStatus is not updated since it is not be handled by the run exec. // todo add one function to handle this - tqError("vgId:%d failed to found s-task, taskId:0x%x may have been dropped", vgId, pReq->taskId); + tqError("vgId:%d failed to found s-task, taskId:0x%x may have been dropped", vgId, req.taskId); return code; } } @@ -939,7 +950,7 @@ int32_t tqStartTaskCompleteCallback(SStreamMeta* pMeta) { } int32_t tqStreamTaskProcessTaskResetReq(SStreamMeta* pMeta, char* pMsg) { - SVPauseStreamTaskReq* pReq = (SVPauseStreamTaskReq*)pMsg; + SVResetStreamTaskReq* pReq = (SVResetStreamTaskReq*)pMsg; SStreamTask* pTask = NULL; int32_t code = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId, &pTask); @@ -954,17 +965,13 @@ int32_t tqStreamTaskProcessTaskResetReq(SStreamMeta* pMeta, char* pMsg) { streamMutexLock(&pTask->lock); streamTaskClearCheckInfo(pTask, true); + streamTaskSetFailedCheckpointId(pTask, pReq->chkptId); + // clear flag set during do checkpoint, and open inputQ for all upstream tasks SStreamTaskState pState = streamTaskGetStatus(pTask); if (pState.state == TASK_STATUS__CK) { - int32_t tranId = 0; - int64_t activeChkId = 0; - streamTaskGetActiveCheckpointInfo(pTask, &tranId, &activeChkId); - - tqDebug("s-task:%s reset task status from checkpoint, current checkpointingId:%" PRId64 ", transId:%d", - pTask->id.idStr, activeChkId, tranId); - streamTaskSetStatusReady(pTask); + tqDebug("s-task:%s reset checkpoint status to ready", pTask->id.idStr); } else if (pState.state == TASK_STATUS__UNINIT) { // tqDebug("s-task:%s start task by checking downstream tasks", pTask->id.idStr); // tqStreamTaskRestoreCheckpoint(pMeta, pTask->id.streamId, pTask->id.taskId); @@ -980,25 +987,36 @@ int32_t tqStreamTaskProcessTaskResetReq(SStreamMeta* pMeta, char* pMsg) { } int32_t tqStreamTaskProcessRetrieveTriggerReq(SStreamMeta* pMeta, SRpcMsg* pMsg) { - SRetrieveChkptTriggerReq* pReq = (SRetrieveChkptTriggerReq*)pMsg->pCont; + SRetrieveChkptTriggerReq req = {0}; + SStreamTask* pTask = NULL; + char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); + int32_t len = pMsg->contLen - sizeof(SMsgHead); + SDecoder decoder = {0}; - SStreamTask* pTask = NULL; - int32_t code = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->upstreamTaskId, &pTask); + tDecoderInit(&decoder, (uint8_t*)msg, len); + if (tDecodeRetrieveChkptTriggerReq(&decoder, &req) < 0) { + tDecoderClear(&decoder); + tqError("vgId:%d invalid retrieve checkpoint-trigger req received", pMeta->vgId); + return TSDB_CODE_INVALID_MSG; + } + tDecoderClear(&decoder); + + int32_t code = streamMetaAcquireTask(pMeta, req.streamId, req.upstreamTaskId, &pTask); if (pTask == NULL || (code != 0)) { - tqError("vgId:%d process retrieve checkpoint trigger, checkpointId:%" PRId64 + tqError("vgId:%d process retrieve checkpoint-trigger, checkpointId:%" PRId64 " from s-task:0x%x, failed to acquire task:0x%x, it may have been dropped already", - pMeta->vgId, pReq->checkpointId, (int32_t)pReq->downstreamTaskId, pReq->upstreamTaskId); + pMeta->vgId, req.checkpointId, (int32_t)req.downstreamTaskId, req.upstreamTaskId); return TSDB_CODE_STREAM_TASK_NOT_EXIST; } tqDebug("s-task:0x%x recv retrieve checkpoint-trigger msg from downstream s-task:0x%x, checkpointId:%" PRId64, - pReq->upstreamTaskId, (int32_t)pReq->downstreamTaskId, pReq->checkpointId); + req.upstreamTaskId, (int32_t)req.downstreamTaskId, req.checkpointId); if (pTask->status.downstreamReady != 1) { tqError("s-task:%s not ready for checkpoint-trigger retrieve from 0x%x, since downstream not ready", - pTask->id.idStr, (int32_t)pReq->downstreamTaskId); + pTask->id.idStr, (int32_t)req.downstreamTaskId); - code = streamTaskSendCheckpointTriggerMsg(pTask, pReq->downstreamTaskId, pReq->downstreamNodeId, &pMsg->info, + code = streamTaskSendCheckpointTriggerMsg(pTask, req.downstreamTaskId, req.downstreamNodeId, &pMsg->info, TSDB_CODE_STREAM_TASK_IVLD_STATUS); streamMetaReleaseTask(pMeta, pTask); return code; @@ -1010,19 +1028,19 @@ int32_t tqStreamTaskProcessRetrieveTriggerReq(SStreamMeta* pMeta, SRpcMsg* pMsg) int64_t checkpointId = 0; streamTaskGetActiveCheckpointInfo(pTask, &transId, &checkpointId); - if (checkpointId != pReq->checkpointId) { + if (checkpointId != req.checkpointId) { tqError("s-task:%s invalid checkpoint-trigger retrieve msg from 0x%" PRIx64 ", current checkpointId:%" PRId64 " req:%" PRId64, - pTask->id.idStr, pReq->downstreamTaskId, checkpointId, pReq->checkpointId); + pTask->id.idStr, req.downstreamTaskId, checkpointId, req.checkpointId); streamMetaReleaseTask(pMeta, pTask); return TSDB_CODE_INVALID_MSG; } - if (streamTaskAlreadySendTrigger(pTask, pReq->downstreamNodeId)) { + if (streamTaskAlreadySendTrigger(pTask, req.downstreamNodeId)) { // re-send the lost checkpoint-trigger msg to downstream task tqDebug("s-task:%s re-send checkpoint-trigger to:0x%x, checkpointId:%" PRId64 ", transId:%d", pTask->id.idStr, - (int32_t)pReq->downstreamTaskId, checkpointId, transId); - code = streamTaskSendCheckpointTriggerMsg(pTask, pReq->downstreamTaskId, pReq->downstreamNodeId, &pMsg->info, + (int32_t)req.downstreamTaskId, checkpointId, transId); + code = streamTaskSendCheckpointTriggerMsg(pTask, req.downstreamTaskId, req.downstreamNodeId, &pMsg->info, TSDB_CODE_SUCCESS); } else { // not send checkpoint-trigger yet, wait int32_t recv = 0, total = 0; @@ -1036,7 +1054,7 @@ int32_t tqStreamTaskProcessRetrieveTriggerReq(SStreamMeta* pMeta, SRpcMsg* pMsg) "sending checkpoint-source/trigger", pTask->id.idStr, recv, total); } - code = streamTaskSendCheckpointTriggerMsg(pTask, pReq->downstreamTaskId, pReq->downstreamNodeId, &pMsg->info, + code = streamTaskSendCheckpointTriggerMsg(pTask, req.downstreamTaskId, req.downstreamNodeId, &pMsg->info, TSDB_CODE_ACTION_IN_PROGRESS); } } else { // upstream not recv the checkpoint-source/trigger till now @@ -1048,7 +1066,7 @@ int32_t tqStreamTaskProcessRetrieveTriggerReq(SStreamMeta* pMeta, SRpcMsg* pMsg) "s-task:%s not recv checkpoint-source from mnode or checkpoint-trigger from upstream yet, wait for all " "upstream sending checkpoint-source/trigger", pTask->id.idStr); - code = streamTaskSendCheckpointTriggerMsg(pTask, pReq->downstreamTaskId, pReq->downstreamNodeId, &pMsg->info, + code = streamTaskSendCheckpointTriggerMsg(pTask, req.downstreamTaskId, req.downstreamNodeId, &pMsg->info, TSDB_CODE_ACTION_IN_PROGRESS); } @@ -1057,23 +1075,34 @@ int32_t tqStreamTaskProcessRetrieveTriggerReq(SStreamMeta* pMeta, SRpcMsg* pMsg) } int32_t tqStreamTaskProcessRetrieveTriggerRsp(SStreamMeta* pMeta, SRpcMsg* pMsg) { - SCheckpointTriggerRsp* pRsp = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); + SCheckpointTriggerRsp rsp = {0}; + SStreamTask* pTask = NULL; + char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); + int32_t len = pMsg->contLen - sizeof(SMsgHead); + SDecoder decoder = {0}; - SStreamTask* pTask = NULL; - int32_t code = streamMetaAcquireTask(pMeta, pRsp->streamId, pRsp->taskId, &pTask); + tDecoderInit(&decoder, (uint8_t*)msg, len); + if (tDecodeCheckpointTriggerRsp(&decoder, &rsp) < 0) { + tDecoderClear(&decoder); + tqError("vgId:%d invalid retrieve checkpoint-trigger rsp received", pMeta->vgId); + return TSDB_CODE_INVALID_MSG; + } + tDecoderClear(&decoder); + + int32_t code = streamMetaAcquireTask(pMeta, rsp.streamId, rsp.taskId, &pTask); if (pTask == NULL || (code != 0)) { tqError( "vgId:%d process retrieve checkpoint-trigger, failed to acquire task:0x%x, it may have been dropped already", - pMeta->vgId, pRsp->taskId); + pMeta->vgId, rsp.taskId); return code; } tqDebug( - "s-task:%s recv re-send checkpoint-trigger msg from through retrieve/rsp channel, upstream:0x%x, " - "checkpointId:%" PRId64 ", transId:%d", - pTask->id.idStr, pRsp->upstreamTaskId, pRsp->checkpointId, pRsp->transId); + "s-task:%s recv re-send checkpoint-trigger msg through retrieve/rsp channel, upstream:0x%x, checkpointId:%" PRId64 + ", transId:%d", + pTask->id.idStr, rsp.upstreamTaskId, rsp.checkpointId, rsp.transId); - code = streamTaskProcessCheckpointTriggerRsp(pTask, pRsp); + code = streamTaskProcessCheckpointTriggerRsp(pTask, &rsp); streamMetaReleaseTask(pMeta, pTask); return code; } @@ -1186,10 +1215,12 @@ int32_t tqStreamTaskProcessTaskResumeReq(void* handle, int64_t sversion, char* m streamMutexUnlock(&pHTask->lock); code = tqProcessTaskResumeImpl(handle, pHTask, sversion, pReq->igUntreated, fromVnode); + tqDebug("s-task:%s resume complete, code:%s", pHTask->id.idStr, tstrerror(code)); + streamMetaReleaseTask(pMeta, pHTask); } - return code; + return TSDB_CODE_SUCCESS; } int32_t tqStreamTasksGetTotalNum(SStreamMeta* pMeta) { return taosArrayGetSize(pMeta->pTaskList); } @@ -1201,7 +1232,23 @@ int32_t doProcessDummyRspMsg(SStreamMeta* UNUSED_PARAM(pMeta), SRpcMsg* pMsg) { } int32_t tqStreamProcessStreamHbRsp(SStreamMeta* pMeta, SRpcMsg* pMsg) { - return streamProcessHeartbeatRsp(pMeta, pMsg->pCont); + SMStreamHbRspMsg rsp = {0}; + int32_t code = 0; + SDecoder decoder; + char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); + int32_t len = pMsg->contLen - sizeof(SMsgHead); + + tDecoderInit(&decoder, (uint8_t*)msg, len); + code = tDecodeStreamHbRsp(&decoder, &rsp); + if (code < 0) { + terrno = TSDB_CODE_INVALID_MSG; + tDecoderClear(&decoder); + tqError("vgId:%d failed to parse hb rsp msg, code:%s", pMeta->vgId, tstrerror(terrno)); + return terrno; + } + + tDecoderClear(&decoder); + return streamProcessHeartbeatRsp(pMeta, &rsp); } int32_t tqStreamProcessReqCheckpointRsp(SStreamMeta* pMeta, SRpcMsg* pMsg) { return doProcessDummyRspMsg(pMeta, pMsg); } @@ -1235,7 +1282,7 @@ int32_t tqStreamTaskProcessConsenChkptIdReq(SStreamMeta* pMeta, SRpcMsg* pMsg) { SRestoreCheckpointInfo req = {0}; tDecoderInit(&decoder, (uint8_t*)msg, len); - if (tDecodeRestoreCheckpointInfo(&decoder, &req) < 0) { + if ((code = tDecodeRestoreCheckpointInfo(&decoder, &req)) < 0) { tqError("vgId:%d failed to decode set consensus checkpointId req, code:%s", vgId, tstrerror(code)); tDecoderClear(&decoder); return TSDB_CODE_SUCCESS; diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c index 0f524e22d7..f5aeb609d5 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c @@ -346,7 +346,8 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList, p->rowKey.pks[0].pData = taosMemoryCalloc(1, pPkCol->bytes); if (p->rowKey.pks[0].pData == NULL) { taosMemoryFreeClear(p); - TSDB_CHECK_NULL(p->rowKey.pks[0].pData, code, lino, _end, terrno); + code = terrno; + TSDB_CHECK_CODE(code, lino, _end); } } diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit2.c b/source/dnode/vnode/src/tsdb/tsdbCommit2.c index 95c5daf842..e3c75760c8 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCommit2.c +++ b/source/dnode/vnode/src/tsdb/tsdbCommit2.c @@ -667,7 +667,7 @@ int32_t tsdbCommitBegin(STsdb *tsdb, SCommitInfo *info) { int64_t nRow = imem->nRow; int64_t nDel = imem->nDel; - if (nRow == 0 && nDel == 0) { + if ((nRow == 0 && nDel == 0) || (tsBypassFlag & TSDB_BYPASS_RB_TSDB_COMMIT)) { (void)taosThreadMutexLock(&tsdb->mutex); tsdb->imem = NULL; (void)taosThreadMutexUnlock(&tsdb->mutex); diff --git a/source/dnode/vnode/src/tsdb/tsdbDataFileRW.c b/source/dnode/vnode/src/tsdb/tsdbDataFileRW.c index 720ba68414..f51ffe0c83 100644 --- a/source/dnode/vnode/src/tsdb/tsdbDataFileRW.c +++ b/source/dnode/vnode/src/tsdb/tsdbDataFileRW.c @@ -972,7 +972,7 @@ static int32_t tsdbDataFileWriteBrinRecord(SDataFileWriter *writer, const SBrinR break; } - if ((writer->brinBlock->numOfRecords) >= writer->config->maxRow) { + if ((writer->brinBlock->numOfRecords) >= 256) { TAOS_CHECK_GOTO(tsdbDataFileWriteBrinBlock(writer), &lino, _exit); } diff --git a/source/dnode/vnode/src/tsdb/tsdbMemTable.c b/source/dnode/vnode/src/tsdb/tsdbMemTable.c index eb22335311..5b26d17519 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMemTable.c +++ b/source/dnode/vnode/src/tsdb/tsdbMemTable.c @@ -122,6 +122,10 @@ int32_t tsdbInsertTableData(STsdb *pTsdb, int64_t version, SSubmitTbData *pSubmi tb_uid_t suid = pSubmitTbData->suid; tb_uid_t uid = pSubmitTbData->uid; + if (tsBypassFlag & TSDB_BYPASS_RB_TSDB_WRITE_MEM) { + goto _err; + } + // create/get STbData to op code = tsdbGetOrCreateTbData(pMemTable, suid, uid, &pTbData); if (code) { diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index f30f7eb310..05ae4be74b 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -210,7 +210,7 @@ static int32_t setColumnIdSlotList(SBlockLoadSuppInfo* pSupInfo, SColumnInfo* pC pSupInfo->smaValid = true; pSupInfo->numOfCols = numOfCols; - pSupInfo->colId = taosMemoryMalloc(numOfCols * (sizeof(int16_t) * 2 + POINTER_BYTES)); + pSupInfo->colId = taosMemoryCalloc(numOfCols, sizeof(int16_t) * 2 + POINTER_BYTES); TSDB_CHECK_NULL(pSupInfo->colId, code, lino, _end, terrno); pSupInfo->slotId = (int16_t*)((char*)pSupInfo->colId + (sizeof(int16_t) * numOfCols)); @@ -836,6 +836,7 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFileReader* pFileRead pList = &pReader->status.uidList; int32_t i = 0; + int32_t j = 0; while (i < TARRAY2_SIZE(pBlkArray)) { pBrinBlk = &pBlkArray->data[i]; if (pBrinBlk->maxTbid.suid < pReader->info.suid) { @@ -851,7 +852,7 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFileReader* pFileRead (pBrinBlk->minTbid.suid <= pReader->info.suid) && (pBrinBlk->maxTbid.suid >= pReader->info.suid), code, lino, _end, TSDB_CODE_INTERNAL_ERROR); - if (pBrinBlk->maxTbid.suid == pReader->info.suid && pBrinBlk->maxTbid.uid < pList->tableUidList[0]) { + if (pBrinBlk->maxTbid.suid == pReader->info.suid && pBrinBlk->maxTbid.uid < pList->tableUidList[j]) { i += 1; continue; } @@ -864,6 +865,14 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFileReader* pFileRead TSDB_CHECK_NULL(p1, code, lino, _end, terrno); i += 1; + if (pBrinBlk->maxTbid.suid == pReader->info.suid) { + while (j < numOfTables && pList->tableUidList[j] < pBrinBlk->maxTbid.uid) { + j++; + } + if (j >= numOfTables) { + break; + } + } } et2 = taosGetTimestampUs(); @@ -1134,7 +1143,12 @@ static int32_t getCurrentBlockInfo(SDataBlockIter* pBlockIter, SFileDataBlockInf *pInfo = NULL; size_t num = TARRAY_SIZE(pBlockIter->blockList); - TSDB_CHECK_CONDITION(num != 0, code, lino, _end, TSDB_CODE_INVALID_PARA); + if (num == 0) { + // Some callers would attempt to call this function. Filter out certain normal cases and return directly to avoid + // generating excessive unnecessary error logs. + TSDB_CHECK_CONDITION(num == pBlockIter->numOfBlocks, code, lino, _end, TSDB_CODE_INVALID_PARA); + return TSDB_CODE_INVALID_PARA; + } *pInfo = taosArrayGet(pBlockIter->blockList, pBlockIter->index); TSDB_CHECK_NULL(*pInfo, code, lino, _end, TSDB_CODE_INVALID_PARA); @@ -4807,7 +4821,7 @@ static int32_t checkForNeighborFileBlock(STsdbReader* pReader, STableBlockScanIn pBlockData = &pReader->status.fileBlockData; asc = ASCENDING_TRAVERSE(pReader->info.order); pVerRange = &pReader->info.verRange; - ASCENDING_TRAVERSE(pReader->info.order) ? 1 : -1; + step = ASCENDING_TRAVERSE(pReader->info.order) ? 1 : -1; *state = CHECK_FILEBLOCK_QUIT; code = loadNeighborIfOverlap(pFBlock, pScanInfo, pReader, &loadNeighbor); @@ -5530,12 +5544,10 @@ int32_t tsdbReaderOpen2(void* pVnode, SQueryTableDataCond* pCond, void* pTableLi // update the SQueryTableDataCond to create inner reader int32_t order = pCond->order; if (order == TSDB_ORDER_ASC) { - pCond->twindows.ekey = window.skey - 1; - pCond->twindows.skey = INT64_MIN; + pCond->twindows = pCond->extTwindows[0]; pCond->order = TSDB_ORDER_DESC; } else { - pCond->twindows.skey = window.ekey + 1; - pCond->twindows.ekey = INT64_MAX; + pCond->twindows = pCond->extTwindows[1]; pCond->order = TSDB_ORDER_ASC; } @@ -5544,11 +5556,9 @@ int32_t tsdbReaderOpen2(void* pVnode, SQueryTableDataCond* pCond, void* pTableLi TSDB_CHECK_CODE(code, lino, _end); if (order == TSDB_ORDER_ASC) { - pCond->twindows.skey = window.ekey + 1; - pCond->twindows.ekey = INT64_MAX; + pCond->twindows = pCond->extTwindows[1]; } else { - pCond->twindows.skey = INT64_MIN; - pCond->twindows.ekey = window.ekey - 1; + pCond->twindows = pCond->extTwindows[0]; } pCond->order = order; @@ -6115,7 +6125,7 @@ int32_t tsdbNextDataBlock2(STsdbReader* pReader, bool* hasNext) { TSDB_CHECK_CODE(code, lino, _end); } - goto _end; + return code; } } @@ -6142,7 +6152,7 @@ int32_t tsdbNextDataBlock2(STsdbReader* pReader, bool* hasNext) { acquired = false; TSDB_CHECK_CODE(code, lino, _end); } - goto _end; + return code; } if (pReader->step == EXTERNAL_ROWS_MAIN && pReader->innerReader[1] != NULL) { @@ -6168,7 +6178,7 @@ int32_t tsdbNextDataBlock2(STsdbReader* pReader, bool* hasNext) { TSDB_CHECK_CODE(code, lino, _end); } - goto _end; + return code; } } diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c index 53365303b0..2d2446415e 100644 --- a/source/dnode/vnode/src/vnd/vnodeOpen.c +++ b/source/dnode/vnode/src/vnd/vnodeOpen.c @@ -360,6 +360,7 @@ SVnode *vnodeOpen(const char *path, int32_t diskPrimary, STfs *pTfs, SMsgCb msgC info.config = vnodeCfgDefault; // load vnode info + vInfo("vgId:%d, start to vnode load info %s", info.config.vgId, dir); ret = vnodeLoadInfo(dir, &info); if (ret < 0) { vError("failed to open vnode from %s since %s", path, tstrerror(terrno)); @@ -429,22 +430,26 @@ SVnode *vnodeOpen(const char *path, int32_t diskPrimary, STfs *pTfs, SMsgCb msgC int8_t rollback = vnodeShouldRollback(pVnode); // open buffer pool + vInfo("vgId:%d, start to open vnode buffer pool", TD_VID(pVnode)); if (vnodeOpenBufPool(pVnode) < 0) { vError("vgId:%d, failed to open vnode buffer pool since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; } // open meta + vInfo("vgId:%d, start to open vnode meta", TD_VID(pVnode)); if (metaOpen(pVnode, &pVnode->pMeta, rollback) < 0) { vError("vgId:%d, failed to open vnode meta since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; } + vInfo("vgId:%d, start to upgrade meta", TD_VID(pVnode)); if (metaUpgrade(pVnode, &pVnode->pMeta) < 0) { vError("vgId:%d, failed to upgrade meta since %s", TD_VID(pVnode), tstrerror(terrno)); } // open tsdb + vInfo("vgId:%d, start to open vnode tsdb", TD_VID(pVnode)); if (!VND_IS_RSMA(pVnode) && tsdbOpen(pVnode, &VND_TSDB(pVnode), VNODE_TSDB_DIR, NULL, rollback, force) < 0) { vError("vgId:%d, failed to open vnode tsdb since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; @@ -455,6 +460,7 @@ SVnode *vnodeOpen(const char *path, int32_t diskPrimary, STfs *pTfs, SMsgCb msgC ret = taosRealPath(tdir, NULL, sizeof(tdir)); TAOS_UNUSED(ret); + vInfo("vgId:%d, start to open vnode wal", TD_VID(pVnode)); pVnode->pWal = walOpen(tdir, &(pVnode->config.walCfg)); if (pVnode->pWal == NULL) { vError("vgId:%d, failed to open vnode wal since %s. wal:%s", TD_VID(pVnode), tstrerror(terrno), tdir); @@ -467,6 +473,7 @@ SVnode *vnodeOpen(const char *path, int32_t diskPrimary, STfs *pTfs, SMsgCb msgC TAOS_UNUSED(ret); // open query + vInfo("vgId:%d, start to open vnode query", TD_VID(pVnode)); if (vnodeQueryOpen(pVnode)) { vError("vgId:%d, failed to open vnode query since %s", TD_VID(pVnode), tstrerror(terrno)); terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -474,18 +481,21 @@ SVnode *vnodeOpen(const char *path, int32_t diskPrimary, STfs *pTfs, SMsgCb msgC } // sma required the tq is initialized before the vnode open + vInfo("vgId:%d, start to open vnode tq", TD_VID(pVnode)); if (tqOpen(tdir, pVnode)) { vError("vgId:%d, failed to open vnode tq since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; } // open sma + vInfo("vgId:%d, start to open vnode sma", TD_VID(pVnode)); if (smaOpen(pVnode, rollback, force)) { vError("vgId:%d, failed to open vnode sma since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; } // vnode begin + vInfo("vgId:%d, start to begin vnode", TD_VID(pVnode)); if (vnodeBegin(pVnode) < 0) { vError("vgId:%d, failed to begin since %s", TD_VID(pVnode), tstrerror(terrno)); terrno = TSDB_CODE_OUT_OF_MEMORY; diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 6702b8b588..16c5e026d1 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -362,6 +362,10 @@ static int32_t vnodePreProcessSubmitMsg(SVnode *pVnode, SRpcMsg *pMsg) { int32_t code = 0; int32_t lino = 0; + if (tsBypassFlag & TSDB_BYPASS_RA_RPC_RECV_SUBMIT) { + return TSDB_CODE_MSG_PREPROCESSED; + } + SDecoder *pCoder = &(SDecoder){0}; if (taosHton64(((SSubmitReq2Msg *)pMsg->pCont)->version) != 1) { diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h index e757163ba8..b581e31919 100644 --- a/source/libs/catalog/inc/catalogInt.h +++ b/source/libs/catalog/inc/catalogInt.h @@ -271,6 +271,7 @@ typedef struct SCtgViewsCtx { SArray* pNames; SArray* pResList; SArray* pFetchs; + bool forceFetch; } SCtgViewsCtx; typedef enum { @@ -831,12 +832,12 @@ typedef struct SCtgCacheItemInfo { #define ctgDebug(param, ...) qDebug("CTG:%p " param, pCtg, __VA_ARGS__) #define ctgTrace(param, ...) qTrace("CTG:%p " param, pCtg, __VA_ARGS__) -#define ctgTaskFatal(param, ...) qFatal("qid:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__) -#define ctgTaskError(param, ...) qError("qid:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__) -#define ctgTaskWarn(param, ...) qWarn("qid:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__) -#define ctgTaskInfo(param, ...) qInfo("qid:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__) -#define ctgTaskDebug(param, ...) qDebug("qid:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__) -#define ctgTaskTrace(param, ...) qTrace("qid:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__) +#define ctgTaskFatal(param, ...) qFatal("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__) +#define ctgTaskError(param, ...) qError("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__) +#define ctgTaskWarn(param, ...) qWarn("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__) +#define ctgTaskInfo(param, ...) qInfo("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__) +#define ctgTaskDebug(param, ...) qDebug("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__) +#define ctgTaskTrace(param, ...) qTrace("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__) #define CTG_LOCK_DEBUG(...) \ do { \ diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c index c1dcdf2741..9bfb4102aa 100644 --- a/source/libs/catalog/src/ctgAsync.c +++ b/source/libs/catalog/src/ctgAsync.c @@ -20,6 +20,11 @@ #include "tref.h" #include "trpc.h" +typedef struct SCtgViewTaskParam { + bool forceFetch; + SArray* pTableReqs; +} SCtgViewTaskParam; + void ctgIsTaskDone(SCtgJob* pJob, CTG_TASK_TYPE type, bool* done) { SCtgTask* pTask = NULL; @@ -500,7 +505,7 @@ int32_t ctgInitGetTbTagTask(SCtgJob* pJob, int32_t taskIdx, void* param) { int32_t ctgInitGetViewsTask(SCtgJob* pJob, int32_t taskIdx, void* param) { SCtgTask task = {0}; - + SCtgViewTaskParam* p = param; task.type = CTG_TASK_GET_VIEW; task.taskId = taskIdx; task.pJob = pJob; @@ -511,7 +516,8 @@ int32_t ctgInitGetViewsTask(SCtgJob* pJob, int32_t taskIdx, void* param) { } SCtgViewsCtx* ctx = task.taskCtx; - ctx->pNames = param; + ctx->pNames = p->pTableReqs; + ctx->forceFetch = p->forceFetch; ctx->pResList = taosArrayInit(pJob->viewNum, sizeof(SMetaRes)); if (NULL == ctx->pResList) { qError("QID:0x%" PRIx64 " taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->viewNum, @@ -849,13 +855,12 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgJob** job, const int32_t tbCfgNum = (int32_t)taosArrayGetSize(pReq->pTableCfg); int32_t tbTagNum = (int32_t)taosArrayGetSize(pReq->pTableTag); int32_t viewNum = (int32_t)ctgGetTablesReqNum(pReq->pView); - int32_t tbTsmaNum = (int32_t)taosArrayGetSize(pReq->pTableTSMAs); + int32_t tbTsmaNum = tsQuerySmaOptimize ? (int32_t)taosArrayGetSize(pReq->pTableTSMAs) : 0; int32_t tsmaNum = (int32_t)taosArrayGetSize(pReq->pTSMAs); int32_t tbNameNum = (int32_t)ctgGetTablesReqNum(pReq->pTableName); int32_t taskNum = tbMetaNum + dbVgNum + udfNum + tbHashNum + qnodeNum + dnodeNum + svrVerNum + dbCfgNum + indexNum + userNum + dbInfoNum + tbIndexNum + tbCfgNum + tbTagNum + viewNum + tbTsmaNum + tbNameNum; - *job = taosMemoryCalloc(1, sizeof(SCtgJob)); if (NULL == *job) { ctgError("failed to calloc, size:%d,QID:0x%" PRIx64, (int32_t)sizeof(SCtgJob), pConn->requestId); @@ -1014,7 +1019,8 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgJob** job, const } if (viewNum > 0) { - CTG_ERR_JRET(ctgInitTask(pJob, CTG_TASK_GET_VIEW, pReq->pView, NULL)); + SCtgViewTaskParam param = {.forceFetch = pReq->forceFetchViewMeta, .pTableReqs = pReq->pView}; + CTG_ERR_JRET(ctgInitTask(pJob, CTG_TASK_GET_VIEW, ¶m, NULL)); } if (tbTsmaNum > 0) { CTG_ERR_JRET(ctgInitTask(pJob, CTG_TASK_GET_TB_TSMA, pReq->pTableTSMAs, NULL)); @@ -3712,16 +3718,14 @@ int32_t ctgLaunchGetViewsTask(SCtgTask* pTask) { bool tbMetaDone = false; SName* pName = NULL; - /* - ctgIsTaskDone(pJob, CTG_TASK_GET_TB_META_BATCH, &tbMetaDone); - if (tbMetaDone) { - CTG_ERR_RET(ctgBuildViewNullRes(pTask, pCtx)); - TSWAP(pTask->res, pCtx->pResList); + ctgIsTaskDone(pJob, CTG_TASK_GET_TB_META_BATCH, &tbMetaDone); + if (tbMetaDone && !pCtx->forceFetch) { + CTG_ERR_RET(ctgBuildViewNullRes(pTask, pCtx)); + TSWAP(pTask->res, pCtx->pResList); - CTG_ERR_RET(ctgHandleTaskEnd(pTask, 0)); - return TSDB_CODE_SUCCESS; - } - */ + CTG_ERR_RET(ctgHandleTaskEnd(pTask, 0)); + return TSDB_CODE_SUCCESS; + } int32_t dbNum = taosArrayGetSize(pCtx->pNames); int32_t fetchIdx = 0; diff --git a/source/libs/catalog/test/CMakeLists.txt b/source/libs/catalog/test/CMakeLists.txt index de4d08835c..f23a6beaee 100644 --- a/source/libs/catalog/test/CMakeLists.txt +++ b/source/libs/catalog/test/CMakeLists.txt @@ -9,7 +9,7 @@ IF(NOT TD_DARWIN) ADD_EXECUTABLE(catalogTest ${SOURCE_LIST}) TARGET_LINK_LIBRARIES( catalogTest - PUBLIC os util common nodes catalog transport gtest qcom taos_static + PUBLIC os util common nodes catalog transport gtest qcom ${TAOS_LIB_STATIC} ) TARGET_INCLUDE_DIRECTORIES( diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c index 3c332a6b06..353bc1fa18 100644 --- a/source/libs/command/src/command.c +++ b/source/libs/command/src/command.c @@ -954,6 +954,12 @@ static int32_t buildLocalVariablesResultDataBlock(SSDataBlock** pOutput) { goto _exit; } + infoData.info.type = TSDB_DATA_TYPE_VARCHAR; + infoData.info.bytes = SHOW_LOCAL_VARIABLES_RESULT_FIELD4_LEN; + if (taosArrayPush(pBlock->pDataBlock, &infoData) == NULL) { + goto _exit; + } + *pOutput = pBlock; _exit: diff --git a/source/libs/executor/src/anomalywindowoperator.c b/source/libs/executor/src/anomalywindowoperator.c index 94cc5d9129..3bc9c806b0 100644 --- a/source/libs/executor/src/anomalywindowoperator.c +++ b/source/libs/executor/src/anomalywindowoperator.c @@ -44,9 +44,9 @@ typedef struct { SExprSupp scalarSup; int32_t tsSlotId; STimeWindowAggSupp twAggSup; - char algoName[TSDB_ANAL_ALGO_NAME_LEN]; - char algoUrl[TSDB_ANAL_ALGO_URL_LEN]; - char anomalyOpt[TSDB_ANAL_ALGO_OPTION_LEN]; + char algoName[TSDB_ANALYTIC_ALGO_NAME_LEN]; + char algoUrl[TSDB_ANALYTIC_ALGO_URL_LEN]; + char anomalyOpt[TSDB_ANALYTIC_ALGO_OPTION_LEN]; SAnomalyWindowSupp anomalySup; SWindowRowsSup anomalyWinRowSup; SColumn anomalyCol; @@ -75,13 +75,13 @@ int32_t createAnomalywindowOperatorInfo(SOperatorInfo* downstream, SPhysiNode* p if (!taosAnalGetOptStr(pAnomalyNode->anomalyOpt, "algo", pInfo->algoName, sizeof(pInfo->algoName))) { qError("failed to get anomaly_window algorithm name from %s", pAnomalyNode->anomalyOpt); - code = TSDB_CODE_ANAL_ALGO_NOT_FOUND; + code = TSDB_CODE_ANA_ALGO_NOT_FOUND; goto _error; } if (taosAnalGetAlgoUrl(pInfo->algoName, ANAL_ALGO_TYPE_ANOMALY_DETECT, pInfo->algoUrl, sizeof(pInfo->algoUrl)) != 0) { qError("failed to get anomaly_window algorithm url from %s", pInfo->algoName); - code = TSDB_CODE_ANAL_ALGO_NOT_LOAD; + code = TSDB_CODE_ANA_ALGO_NOT_LOAD; goto _error; } @@ -262,7 +262,7 @@ static void anomalyDestroyOperatorInfo(void* param) { static int32_t anomalyCacheBlock(SAnomalyWindowOperatorInfo* pInfo, SSDataBlock* pSrc) { if (pInfo->anomalySup.cachedRows > ANAL_ANOMALY_WINDOW_MAX_ROWS) { - return TSDB_CODE_ANAL_ANODE_TOO_MANY_ROWS; + return TSDB_CODE_ANA_ANODE_TOO_MANY_ROWS; } SSDataBlock* pDst = NULL; @@ -287,7 +287,7 @@ static int32_t anomalyFindWindow(SAnomalyWindowSupp* pSupp, TSKEY key) { return -1; } -static int32_t anomalyParseJson(SJson* pJson, SArray* pWindows) { +static int32_t anomalyParseJson(SJson* pJson, SArray* pWindows, const char* pId) { int32_t code = 0; int32_t rows = 0; STimeWindow win = {0}; @@ -295,8 +295,23 @@ static int32_t anomalyParseJson(SJson* pJson, SArray* pWindows) { taosArrayClear(pWindows); tjsonGetInt32ValueFromDouble(pJson, "rows", rows, code); - if (code < 0) return TSDB_CODE_INVALID_JSON_FORMAT; - if (rows <= 0) return 0; + if (code < 0) { + return TSDB_CODE_INVALID_JSON_FORMAT; + } + + if (rows < 0) { + char pMsg[1024] = {0}; + code = tjsonGetStringValue(pJson, "msg", pMsg); + if (code) { + qError("%s failed to get error msg from rsp, unknown error", pId); + } else { + qError("%s failed to exec forecast, msg:%s", pId, pMsg); + } + + return TSDB_CODE_ANA_WN_DATA; + } else if (rows == 0) { + return TSDB_CODE_SUCCESS; + } SJson* res = tjsonGetObjectItem(pJson, "res"); if (res == NULL) return TSDB_CODE_INVALID_JSON_FORMAT; @@ -313,7 +328,10 @@ static int32_t anomalyParseJson(SJson* pJson, SArray* pWindows) { SJson* start = tjsonGetArrayItem(row, 0); SJson* end = tjsonGetArrayItem(row, 1); - if (start == NULL || end == NULL) return TSDB_CODE_INVALID_JSON_FORMAT; + if (start == NULL || end == NULL) { + qError("%s invalid res from analytic sys, code:%s", pId, tstrerror(TSDB_CODE_INVALID_JSON_FORMAT)); + return TSDB_CODE_INVALID_JSON_FORMAT; + } tjsonGetObjectValueBigInt(start, &win.skey); tjsonGetObjectValueBigInt(end, &win.ekey); @@ -322,52 +340,57 @@ static int32_t anomalyParseJson(SJson* pJson, SArray* pWindows) { win.ekey = win.skey + 1; } - if (taosArrayPush(pWindows, &win) == NULL) return TSDB_CODE_OUT_OF_BUFFER; + if (taosArrayPush(pWindows, &win) == NULL) { + qError("%s out of memory in generating anomaly_window", pId); + return TSDB_CODE_OUT_OF_BUFFER; + } } int32_t numOfWins = taosArrayGetSize(pWindows); - qDebug("anomaly window recevied, total:%d", numOfWins); + qDebug("%s anomaly window recevied, total:%d", pId, numOfWins); for (int32_t i = 0; i < numOfWins; ++i) { STimeWindow* pWindow = taosArrayGet(pWindows, i); - qDebug("anomaly win:%d [%" PRId64 ", %" PRId64 ")", i, pWindow->skey, pWindow->ekey); + qDebug("%s anomaly win:%d [%" PRId64 ", %" PRId64 ")", pId, i, pWindow->skey, pWindow->ekey); } - return 0; + return code; } static int32_t anomalyAnalysisWindow(SOperatorInfo* pOperator) { SAnomalyWindowOperatorInfo* pInfo = pOperator->info; SAnomalyWindowSupp* pSupp = &pInfo->anomalySup; SJson* pJson = NULL; - SAnalBuf analBuf = {.bufType = ANAL_BUF_TYPE_JSON}; + SAnalyticBuf analBuf = {.bufType = ANALYTICS_BUF_TYPE_JSON}; char dataBuf[64] = {0}; int32_t code = 0; int64_t ts = 0; + int32_t lino = 0; + const char* pId = GET_TASKID(pOperator->pTaskInfo); - // int64_t ts = taosGetTimestampMs(); snprintf(analBuf.fileName, sizeof(analBuf.fileName), "%s/tdengine-anomaly-%" PRId64 "-%" PRId64, tsTempDir, ts, pSupp->groupId); code = tsosAnalBufOpen(&analBuf, 2); - if (code != 0) goto _OVER; + QUERY_CHECK_CODE(code, lino, _OVER); const char* prec = TSDB_TIME_PRECISION_MILLI_STR; if (pInfo->anomalyCol.precision == TSDB_TIME_PRECISION_MICRO) prec = TSDB_TIME_PRECISION_MICRO_STR; if (pInfo->anomalyCol.precision == TSDB_TIME_PRECISION_NANO) prec = TSDB_TIME_PRECISION_NANO_STR; code = taosAnalBufWriteColMeta(&analBuf, 0, TSDB_DATA_TYPE_TIMESTAMP, "ts"); - if (code != 0) goto _OVER; + QUERY_CHECK_CODE(code, lino, _OVER); code = taosAnalBufWriteColMeta(&analBuf, 1, pInfo->anomalyCol.type, "val"); - if (code != 0) goto _OVER; + QUERY_CHECK_CODE(code, lino, _OVER); code = taosAnalBufWriteDataBegin(&analBuf); - if (code != 0) goto _OVER; + QUERY_CHECK_CODE(code, lino, _OVER); int32_t numOfBlocks = (int32_t)taosArrayGetSize(pSupp->blocks); // timestamp code = taosAnalBufWriteColBegin(&analBuf, 0); - if (code != 0) goto _OVER; + QUERY_CHECK_CODE(code, lino, _OVER); + for (int32_t i = 0; i < numOfBlocks; ++i) { SSDataBlock* pBlock = taosArrayGetP(pSupp->blocks, i); if (pBlock == NULL) break; @@ -375,15 +398,17 @@ static int32_t anomalyAnalysisWindow(SOperatorInfo* pOperator) { if (pTsCol == NULL) break; for (int32_t j = 0; j < pBlock->info.rows; ++j) { code = taosAnalBufWriteColData(&analBuf, 0, TSDB_DATA_TYPE_TIMESTAMP, &((TSKEY*)pTsCol->pData)[j]); - if (code != 0) goto _OVER; + QUERY_CHECK_CODE(code, lino, _OVER); } } + code = taosAnalBufWriteColEnd(&analBuf, 0); - if (code != 0) goto _OVER; + QUERY_CHECK_CODE(code, lino, _OVER); // data code = taosAnalBufWriteColBegin(&analBuf, 1); - if (code != 0) goto _OVER; + QUERY_CHECK_CODE(code, lino, _OVER); + for (int32_t i = 0; i < numOfBlocks; ++i) { SSDataBlock* pBlock = taosArrayGetP(pSupp->blocks, i); if (pBlock == NULL) break; @@ -392,48 +417,47 @@ static int32_t anomalyAnalysisWindow(SOperatorInfo* pOperator) { for (int32_t j = 0; j < pBlock->info.rows; ++j) { code = taosAnalBufWriteColData(&analBuf, 1, pValCol->info.type, colDataGetData(pValCol, j)); - if (code != 0) goto _OVER; - if (code != 0) goto _OVER; + QUERY_CHECK_CODE(code, lino, _OVER); } } code = taosAnalBufWriteColEnd(&analBuf, 1); - if (code != 0) goto _OVER; + QUERY_CHECK_CODE(code, lino, _OVER); code = taosAnalBufWriteDataEnd(&analBuf); - if (code != 0) goto _OVER; + QUERY_CHECK_CODE(code, lino, _OVER); code = taosAnalBufWriteOptStr(&analBuf, "option", pInfo->anomalyOpt); - if (code != 0) goto _OVER; + QUERY_CHECK_CODE(code, lino, _OVER); code = taosAnalBufWriteOptStr(&analBuf, "algo", pInfo->algoName); - if (code != 0) goto _OVER; + QUERY_CHECK_CODE(code, lino, _OVER); code = taosAnalBufWriteOptStr(&analBuf, "prec", prec); - if (code != 0) goto _OVER; + QUERY_CHECK_CODE(code, lino, _OVER); int64_t wncheck = ANAL_FORECAST_DEFAULT_WNCHECK; bool hasWncheck = taosAnalGetOptInt(pInfo->anomalyOpt, "wncheck", &wncheck); if (!hasWncheck) { qDebug("anomaly_window wncheck not found from %s, use default:%" PRId64, pInfo->anomalyOpt, wncheck); } + code = taosAnalBufWriteOptInt(&analBuf, "wncheck", wncheck); - if (code != 0) goto _OVER; + QUERY_CHECK_CODE(code, lino, _OVER); code = taosAnalBufClose(&analBuf); - if (code != 0) goto _OVER; + QUERY_CHECK_CODE(code, lino, _OVER); - pJson = taosAnalSendReqRetJson(pInfo->algoUrl, ANAL_HTTP_TYPE_POST, &analBuf); + pJson = taosAnalSendReqRetJson(pInfo->algoUrl, ANALYTICS_HTTP_TYPE_POST, &analBuf); if (pJson == NULL) { code = terrno; goto _OVER; } - code = anomalyParseJson(pJson, pSupp->windows); - if (code != 0) goto _OVER; + code = anomalyParseJson(pJson, pSupp->windows, pId); _OVER: if (code != 0) { - qError("failed to analysis window since %s", tstrerror(code)); + qError("%s failed to analysis window since %s, lino:%d", pId, tstrerror(code), lino); } taosAnalBufDestroy(&analBuf); diff --git a/source/libs/executor/src/forecastoperator.c b/source/libs/executor/src/forecastoperator.c index 20dc9e28ba..bf1efc54ca 100644 --- a/source/libs/executor/src/forecastoperator.c +++ b/source/libs/executor/src/forecastoperator.c @@ -29,9 +29,9 @@ #ifdef USE_ANALYTICS typedef struct { - char algoName[TSDB_ANAL_ALGO_NAME_LEN]; - char algoUrl[TSDB_ANAL_ALGO_URL_LEN]; - char algoOpt[TSDB_ANAL_ALGO_OPTION_LEN]; + char algoName[TSDB_ANALYTIC_ALGO_NAME_LEN]; + char algoUrl[TSDB_ANALYTIC_ALGO_URL_LEN]; + char algoOpt[TSDB_ANALYTIC_ALGO_OPTION_LEN]; int64_t maxTs; int64_t minTs; int64_t numOfRows; @@ -47,7 +47,7 @@ typedef struct { int16_t inputValSlot; int8_t inputValType; int8_t inputPrecision; - SAnalBuf analBuf; + SAnalyticBuf analBuf; } SForecastSupp; typedef struct SForecastOperatorInfo { @@ -74,12 +74,12 @@ static FORCE_INLINE int32_t forecastEnsureBlockCapacity(SSDataBlock* pBlock, int static int32_t forecastCacheBlock(SForecastSupp* pSupp, SSDataBlock* pBlock) { if (pSupp->cachedRows > ANAL_FORECAST_MAX_ROWS) { - return TSDB_CODE_ANAL_ANODE_TOO_MANY_ROWS; + return TSDB_CODE_ANA_ANODE_TOO_MANY_ROWS; } int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; - SAnalBuf* pBuf = &pSupp->analBuf; + SAnalyticBuf* pBuf = &pSupp->analBuf; qDebug("block:%d, %p rows:%" PRId64, pSupp->numOfBlocks, pBlock, pBlock->info.rows); pSupp->numOfBlocks++; @@ -108,7 +108,7 @@ static int32_t forecastCacheBlock(SForecastSupp* pSupp, SSDataBlock* pBlock) { } static int32_t forecastCloseBuf(SForecastSupp* pSupp) { - SAnalBuf* pBuf = &pSupp->analBuf; + SAnalyticBuf* pBuf = &pSupp->analBuf; int32_t code = 0; for (int32_t i = 0; i < 2; ++i) { @@ -180,8 +180,8 @@ static int32_t forecastCloseBuf(SForecastSupp* pSupp) { return code; } -static int32_t forecastAnalysis(SForecastSupp* pSupp, SSDataBlock* pBlock) { - SAnalBuf* pBuf = &pSupp->analBuf; +static int32_t forecastAnalysis(SForecastSupp* pSupp, SSDataBlock* pBlock, const char* pId) { + SAnalyticBuf* pBuf = &pSupp->analBuf; int32_t resCurRow = pBlock->info.rows; int8_t tmpI8; int16_t tmpI16; @@ -192,28 +192,45 @@ static int32_t forecastAnalysis(SForecastSupp* pSupp, SSDataBlock* pBlock) { int32_t code = 0; SColumnInfoData* pResValCol = taosArrayGet(pBlock->pDataBlock, pSupp->resValSlot); - if (NULL == pResValCol) return TSDB_CODE_OUT_OF_RANGE; + if (NULL == pResValCol) { + return terrno; + } SColumnInfoData* pResTsCol = (pSupp->resTsSlot != -1 ? taosArrayGet(pBlock->pDataBlock, pSupp->resTsSlot) : NULL); SColumnInfoData* pResLowCol = (pSupp->resLowSlot != -1 ? taosArrayGet(pBlock->pDataBlock, pSupp->resLowSlot) : NULL); SColumnInfoData* pResHighCol = (pSupp->resHighSlot != -1 ? taosArrayGet(pBlock->pDataBlock, pSupp->resHighSlot) : NULL); - SJson* pJson = taosAnalSendReqRetJson(pSupp->algoUrl, ANAL_HTTP_TYPE_POST, pBuf); - if (pJson == NULL) return terrno; + SJson* pJson = taosAnalSendReqRetJson(pSupp->algoUrl, ANALYTICS_HTTP_TYPE_POST, pBuf); + if (pJson == NULL) { + return terrno; + } int32_t rows = 0; tjsonGetInt32ValueFromDouble(pJson, "rows", rows, code); - if (code < 0) goto _OVER; - if (rows <= 0) goto _OVER; + if (rows < 0 && code == 0) { + char pMsg[1024] = {0}; + code = tjsonGetStringValue(pJson, "msg", pMsg); + if (code != 0) { + qError("%s failed to get msg from rsp, unknown error", pId); + } else { + qError("%s failed to exec forecast, msg:%s", pId, pMsg); + } + + tjsonDelete(pJson); + return TSDB_CODE_ANA_WN_DATA; + } + + if (code < 0) { + goto _OVER; + } SJson* res = tjsonGetObjectItem(pJson, "res"); if (res == NULL) goto _OVER; int32_t ressize = tjsonGetArraySize(res); bool returnConf = (pSupp->resHighSlot != -1 || pSupp->resLowSlot != -1); - if (returnConf) { - if (ressize != 4) goto _OVER; - } else if (ressize != 2) { + + if ((returnConf && (ressize != 4)) || ((!returnConf) && (ressize != 2))) { goto _OVER; } @@ -313,41 +330,25 @@ static int32_t forecastAnalysis(SForecastSupp* pSupp, SSDataBlock* pBlock) { resCurRow++; } - // for (int32_t i = rows; i < pSupp->optRows; ++i) { - // colDataSetNNULL(pResValCol, rows, (pSupp->optRows - rows)); - // if (pResTsCol != NULL) { - // colDataSetNNULL(pResTsCol, rows, (pSupp->optRows - rows)); - // } - // if (pResLowCol != NULL) { - // colDataSetNNULL(pResLowCol, rows, (pSupp->optRows - rows)); - // } - // if (pResHighCol != NULL) { - // colDataSetNNULL(pResHighCol, rows, (pSupp->optRows - rows)); - // } - // } - - // if (rows == pSupp->optRows) { - // pResValCol->hasNull = false; - // } - pBlock->info.rows += rows; if (pJson != NULL) tjsonDelete(pJson); return 0; _OVER: - if (pJson != NULL) tjsonDelete(pJson); + tjsonDelete(pJson); if (code == 0) { code = TSDB_CODE_INVALID_JSON_FORMAT; } - qError("failed to perform forecast finalize since %s", tstrerror(code)); - return TSDB_CODE_INVALID_JSON_FORMAT; + + qError("%s failed to perform forecast finalize since %s", pId, tstrerror(code)); + return code; } -static int32_t forecastAggregateBlocks(SForecastSupp* pSupp, SSDataBlock* pResBlock) { +static int32_t forecastAggregateBlocks(SForecastSupp* pSupp, SSDataBlock* pResBlock, const char* pId) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; - SAnalBuf* pBuf = &pSupp->analBuf; + SAnalyticBuf* pBuf = &pSupp->analBuf; code = forecastCloseBuf(pSupp); QUERY_CHECK_CODE(code, lino, _end); @@ -355,10 +356,10 @@ static int32_t forecastAggregateBlocks(SForecastSupp* pSupp, SSDataBlock* pResBl code = forecastEnsureBlockCapacity(pResBlock, 1); QUERY_CHECK_CODE(code, lino, _end); - code = forecastAnalysis(pSupp, pResBlock); + code = forecastAnalysis(pSupp, pResBlock, pId); QUERY_CHECK_CODE(code, lino, _end); - uInfo("block:%d, forecast finalize", pSupp->numOfBlocks); + uInfo("%s block:%d, forecast finalize", pId, pSupp->numOfBlocks); _end: pSupp->numOfBlocks = 0; @@ -373,9 +374,10 @@ static int32_t forecastNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { SForecastOperatorInfo* pInfo = pOperator->info; SSDataBlock* pResBlock = pInfo->pRes; SForecastSupp* pSupp = &pInfo->forecastSupp; - SAnalBuf* pBuf = &pSupp->analBuf; + SAnalyticBuf* pBuf = &pSupp->analBuf; int64_t st = taosGetTimestampUs(); int32_t numOfBlocks = pSupp->numOfBlocks; + const char* pId = GET_TASKID(pOperator->pTaskInfo); blockDataCleanup(pResBlock); @@ -389,45 +391,46 @@ static int32_t forecastNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { pSupp->groupId = pBlock->info.id.groupId; numOfBlocks++; pSupp->cachedRows += pBlock->info.rows; - qDebug("group:%" PRId64 ", blocks:%d, rows:%" PRId64 ", total rows:%" PRId64, pSupp->groupId, numOfBlocks, + qDebug("%s group:%" PRId64 ", blocks:%d, rows:%" PRId64 ", total rows:%" PRId64, pId, pSupp->groupId, numOfBlocks, pBlock->info.rows, pSupp->cachedRows); code = forecastCacheBlock(pSupp, pBlock); QUERY_CHECK_CODE(code, lino, _end); } else { - qDebug("group:%" PRId64 ", read finish for new group coming, blocks:%d", pSupp->groupId, numOfBlocks); - code = forecastAggregateBlocks(pSupp, pResBlock); + qDebug("%s group:%" PRId64 ", read finish for new group coming, blocks:%d", pId, pSupp->groupId, numOfBlocks); + code = forecastAggregateBlocks(pSupp, pResBlock, pId); QUERY_CHECK_CODE(code, lino, _end); pSupp->groupId = pBlock->info.id.groupId; numOfBlocks = 1; pSupp->cachedRows = pBlock->info.rows; - qDebug("group:%" PRId64 ", new group, rows:%" PRId64 ", total rows:%" PRId64, pSupp->groupId, pBlock->info.rows, - pSupp->cachedRows); + qDebug("%s group:%" PRId64 ", new group, rows:%" PRId64 ", total rows:%" PRId64, pId, pSupp->groupId, + pBlock->info.rows, pSupp->cachedRows); code = forecastCacheBlock(pSupp, pBlock); QUERY_CHECK_CODE(code, lino, _end); } if (pResBlock->info.rows > 0) { (*ppRes) = pResBlock; - qDebug("group:%" PRId64 ", return to upstream, blocks:%d", pResBlock->info.id.groupId, numOfBlocks); + qDebug("%s group:%" PRId64 ", return to upstream, blocks:%d", pId, pResBlock->info.id.groupId, numOfBlocks); return code; } } if (numOfBlocks > 0) { - qDebug("group:%" PRId64 ", read finish, blocks:%d", pSupp->groupId, numOfBlocks); - code = forecastAggregateBlocks(pSupp, pResBlock); + qDebug("%s group:%" PRId64 ", read finish, blocks:%d", pId, pSupp->groupId, numOfBlocks); + code = forecastAggregateBlocks(pSupp, pResBlock, pId); QUERY_CHECK_CODE(code, lino, _end); } int64_t cost = taosGetTimestampUs() - st; - qDebug("all groups finished, cost:%" PRId64 "us", cost); + qDebug("%s all groups finished, cost:%" PRId64 "us", pId, cost); _end: if (code != TSDB_CODE_SUCCESS) { - qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + qError("%s %s failed at line %d since %s", pId, __func__, lino, tstrerror(code)); pTaskInfo->code = code; T_LONG_JMP(pTaskInfo->env, code); } + (*ppRes) = (pResBlock->info.rows == 0) ? NULL : pResBlock; return code; } @@ -498,7 +501,7 @@ static int32_t forecastParseInput(SForecastSupp* pSupp, SNodeList* pFuncs) { pSupp->inputPrecision = pTsNode->node.resType.precision; pSupp->inputValSlot = pValNode->slotId; pSupp->inputValType = pValNode->node.resType.type; - tstrncpy(pSupp->algoOpt, "algo=arima", TSDB_ANAL_ALGO_OPTION_LEN); + tstrncpy(pSupp->algoOpt, "algo=arima", TSDB_ANALYTIC_ALGO_OPTION_LEN); } else { return TSDB_CODE_PLAN_INTERNAL_ERROR; } @@ -516,22 +519,22 @@ static int32_t forecastParseAlgo(SForecastSupp* pSupp) { if (!taosAnalGetOptStr(pSupp->algoOpt, "algo", pSupp->algoName, sizeof(pSupp->algoName))) { qError("failed to get forecast algorithm name from %s", pSupp->algoOpt); - return TSDB_CODE_ANAL_ALGO_NOT_FOUND; + return TSDB_CODE_ANA_ALGO_NOT_FOUND; } if (taosAnalGetAlgoUrl(pSupp->algoName, ANAL_ALGO_TYPE_FORECAST, pSupp->algoUrl, sizeof(pSupp->algoUrl)) != 0) { qError("failed to get forecast algorithm url from %s", pSupp->algoName); - return TSDB_CODE_ANAL_ALGO_NOT_LOAD; + return TSDB_CODE_ANA_ALGO_NOT_LOAD; } return 0; } static int32_t forecastCreateBuf(SForecastSupp* pSupp) { - SAnalBuf* pBuf = &pSupp->analBuf; + SAnalyticBuf* pBuf = &pSupp->analBuf; int64_t ts = 0; // taosGetTimestampMs(); - pBuf->bufType = ANAL_BUF_TYPE_JSON_COL; + pBuf->bufType = ANALYTICS_BUF_TYPE_JSON_COL; snprintf(pBuf->fileName, sizeof(pBuf->fileName), "%s/tdengine-forecast-%" PRId64, tsTempDir, ts); int32_t code = tsosAnalBufOpen(pBuf, 2); if (code != 0) goto _OVER; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 84dde6a579..b128fe41ed 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -3040,7 +3040,6 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock pBlockInfo->rows, pTaskInfo, &pTableScanInfo->base.metaCache); // ignore the table not exists error, since this table may have been dropped during the scan procedure. if (code) { - blockDataFreeRes((SSDataBlock*)pBlock); QUERY_CHECK_CODE(code, lino, _end); } @@ -3408,6 +3407,8 @@ int32_t streamScanOperatorEncode(SStreamScanInfo* pInfo, void** pBuff, int32_t* QUERY_CHECK_CODE(code, lino, _end); } + qDebug("%s last scan range %d. %" PRId64 ",%" PRId64, __func__, __LINE__, pInfo->lastScanRange.skey, pInfo->lastScanRange.ekey); + *pLen = len; _end: @@ -3465,11 +3466,6 @@ void streamScanOperatorDecode(void* pBuff, int32_t len, SStreamScanInfo* pInfo) goto _end; } - void* pUpInfo = taosMemoryCalloc(1, sizeof(SUpdateInfo)); - if (!pUpInfo) { - lino = __LINE__; - goto _end; - } SDecoder decoder = {0}; pDeCoder = &decoder; tDecoderInit(pDeCoder, buf, tlen); @@ -3478,14 +3474,20 @@ void streamScanOperatorDecode(void* pBuff, int32_t len, SStreamScanInfo* pInfo) goto _end; } + void* pUpInfo = taosMemoryCalloc(1, sizeof(SUpdateInfo)); + if (!pUpInfo) { + lino = __LINE__; + goto _end; + } code = pInfo->stateStore.updateInfoDeserialize(pDeCoder, pUpInfo); if (code == TSDB_CODE_SUCCESS) { pInfo->stateStore.updateInfoDestroy(pInfo->pUpdateInfo); pInfo->pUpdateInfo = pUpInfo; + qDebug("%s line:%d. stream scan updateinfo deserialize success", __func__, __LINE__); } else { taosMemoryFree(pUpInfo); - lino = __LINE__; - goto _end; + code = TSDB_CODE_SUCCESS; + qDebug("%s line:%d. stream scan did not have updateinfo", __func__, __LINE__); } if (tDecodeIsEnd(pDeCoder)) { @@ -3505,6 +3507,7 @@ void streamScanOperatorDecode(void* pBuff, int32_t len, SStreamScanInfo* pInfo) lino = __LINE__; goto _end; } + qDebug("%s last scan range %d. %" PRId64 ",%" PRId64, __func__, __LINE__, pInfo->lastScanRange.skey, pInfo->lastScanRange.ekey); _end: if (pDeCoder != NULL) { diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c index f77aa8f34a..50deba932f 100644 --- a/source/libs/executor/src/timesliceoperator.c +++ b/source/libs/executor/src/timesliceoperator.c @@ -1131,6 +1131,47 @@ static int32_t extractPkColumnFromFuncs(SNodeList* pFuncs, bool* pHasPk, SColumn return TSDB_CODE_SUCCESS; } +/** + * @brief Determine the actual time range for reading data based on the RANGE clause and the WHERE conditions. + * @param[in] cond The range specified by WHERE condition. + * @param[in] range The range specified by RANGE clause. + * @param[out] twindow The range to be read in DESC order, and only one record is needed. + * @param[out] extTwindow The external range to read for only one record, which is used for FILL clause. + * @note `cond` and `twindow` may be the same address. + */ +static int32_t getQueryExtWindow(const STimeWindow* cond, const STimeWindow* range, STimeWindow* twindow, + STimeWindow* extTwindows) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + STimeWindow tempWindow; + + if (cond->skey > cond->ekey || range->skey > range->ekey) { + *twindow = extTwindows[0] = extTwindows[1] = TSWINDOW_DESC_INITIALIZER; + return code; + } + + if (range->ekey < cond->skey) { + extTwindows[1] = *cond; + *twindow = extTwindows[0] = TSWINDOW_DESC_INITIALIZER; + return code; + } + + if (cond->ekey < range->skey) { + extTwindows[0] = *cond; + *twindow = extTwindows[1] = TSWINDOW_DESC_INITIALIZER; + return code; + } + + // Only scan data in the time range intersecion. + extTwindows[0] = extTwindows[1] = *cond; + twindow->skey = TMAX(cond->skey, range->skey); + twindow->ekey = TMIN(cond->ekey, range->ekey); + extTwindows[0].ekey = twindow->skey - 1; + extTwindows[1].skey = twindow->ekey + 1; + + return code; +} + int32_t createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SOperatorInfo** pOptrInfo) { QRY_PARAM_CHECK(pOptrInfo); @@ -1206,8 +1247,10 @@ int32_t createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyN if (downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) { STableScanInfo* pScanInfo = (STableScanInfo*)downstream->info; - pScanInfo->base.cond.twindows = pInfo->win; - pScanInfo->base.cond.type = TIMEWINDOW_RANGE_EXTERNAL; + SQueryTableDataCond *cond = &pScanInfo->base.cond; + cond->type = TIMEWINDOW_RANGE_EXTERNAL; + code = getQueryExtWindow(&cond->twindows, &pInfo->win, &cond->twindows, cond->extTwindows); + QUERY_CHECK_CODE(code, lino, _error); } setOperatorInfo(pOperator, "TimeSliceOperator", QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC, false, OP_NOT_OPENED, pInfo, diff --git a/source/libs/executor/test/CMakeLists.txt b/source/libs/executor/test/CMakeLists.txt index c75de23c32..cb1f951c94 100644 --- a/source/libs/executor/test/CMakeLists.txt +++ b/source/libs/executor/test/CMakeLists.txt @@ -9,7 +9,7 @@ MESSAGE(STATUS "build parser unit test") # ADD_EXECUTABLE(executorTest ${SOURCE_LIST}) # TARGET_LINK_LIBRARIES( # executorTest -# PRIVATE os util common transport gtest taos_static qcom executor function planner scalar nodes vnode +# PRIVATE os util common transport gtest ${TAOS_LIB_STATIC} qcom executor function planner scalar nodes vnode # ) # # TARGET_INCLUDE_DIRECTORIES( diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index c2e2e9c17c..83227dea9e 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -3037,61 +3037,60 @@ int32_t lastRowFunction(SqlFunctionCtx* pCtx) { TSKEY startKey = getRowPTs(pInput->pPTS, 0); TSKEY endKey = getRowPTs(pInput->pPTS, pInput->totalRows - 1); -#if 0 - int32_t blockDataOrder = (startKey <= endKey) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC; - - // the optimized version only valid if all tuples in one block are monotonious increasing or descreasing. - // this assumption is NOT always works if project operator exists in downstream. - if (blockDataOrder == TSDB_ORDER_ASC) { + if (pCtx->order == TSDB_ORDER_ASC && !pCtx->hasPrimaryKey) { for (int32_t i = pInput->numOfRows + pInput->startRowIndex - 1; i >= pInput->startRowIndex; --i) { - char* data = colDataGetData(pInputCol, i); + bool isNull = colDataIsNull(pInputCol, pInput->numOfRows, i, NULL); + char* data = isNull ? NULL : colDataGetData(pInputCol, i); TSKEY cts = getRowPTs(pInput->pPTS, i); numOfElems++; if (pResInfo->numOfRes == 0 || pInfo->ts < cts) { - doSaveLastrow(pCtx, data, i, cts, pInfo); + int32_t code = doSaveLastrow(pCtx, data, i, cts, pInfo); + if (code != TSDB_CODE_SUCCESS) return code; } break; } - } else { // descending order + } else if (!pCtx->hasPrimaryKey && pCtx->order == TSDB_ORDER_DESC) { + // the optimized version only valid if all tuples in one block are monotonious increasing or descreasing. + // this assumption is NOT always works if project operator exists in downstream. for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; ++i) { - char* data = colDataGetData(pInputCol, i); + bool isNull = colDataIsNull(pInputCol, pInput->numOfRows, i, NULL); + char* data = isNull ? NULL : colDataGetData(pInputCol, i); TSKEY cts = getRowPTs(pInput->pPTS, i); numOfElems++; if (pResInfo->numOfRes == 0 || pInfo->ts < cts) { - doSaveLastrow(pCtx, data, i, cts, pInfo); + int32_t code = doSaveLastrow(pCtx, data, i, cts, pInfo); + if (code != TSDB_CODE_SUCCESS) return code; } break; } - } -#else + } else { + int64_t* pts = (int64_t*)pInput->pPTS->pData; + int from = -1; + int32_t i = -1; + while (funcInputGetNextRowIndex(pInput, from, false, &i, &from)) { + bool isNull = colDataIsNull(pInputCol, pInput->numOfRows, i, NULL); + char* data = isNull ? NULL : colDataGetData(pInputCol, i); + TSKEY cts = pts[i]; - int64_t* pts = (int64_t*)pInput->pPTS->pData; - int from = -1; - int32_t i = -1; - while (funcInputGetNextRowIndex(pInput, from, false, &i, &from)) { - bool isNull = colDataIsNull(pInputCol, pInput->numOfRows, i, NULL); - char* data = isNull ? NULL : colDataGetData(pInputCol, i); - TSKEY cts = pts[i]; - - numOfElems++; - char* pkData = NULL; - if (pCtx->hasPrimaryKey) { - pkData = colDataGetData(pkCol, i); - } - if (pResInfo->numOfRes == 0 || pInfo->ts < cts || - (pInfo->ts == pts[i] && pkCompareFn && pkCompareFn(pkData, pInfo->pkData) < 0)) { - int32_t code = doSaveLastrow(pCtx, data, i, cts, pInfo); - if (code != TSDB_CODE_SUCCESS) { - return code; + numOfElems++; + char* pkData = NULL; + if (pCtx->hasPrimaryKey) { + pkData = colDataGetData(pkCol, i); + } + if (pResInfo->numOfRes == 0 || pInfo->ts < cts || + (pInfo->ts == pts[i] && pkCompareFn && pkCompareFn(pkData, pInfo->pkData) < 0)) { + int32_t code = doSaveLastrow(pCtx, data, i, cts, pInfo); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + pResInfo->numOfRes = 1; } - pResInfo->numOfRes = 1; } - } -#endif + } SET_VAL(pResInfo, numOfElems, 1); return TSDB_CODE_SUCCESS; diff --git a/source/libs/geometry/src/geosWrapper.c b/source/libs/geometry/src/geosWrapper.c index 13c5f7208e..8789762a85 100644 --- a/source/libs/geometry/src/geosWrapper.c +++ b/source/libs/geometry/src/geosWrapper.c @@ -63,7 +63,7 @@ int32_t initCtxMakePoint() { int32_t doMakePoint(double x, double y, unsigned char **outputGeom, size_t *size) { int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = NULL; - + TAOS_CHECK_RETURN(getThreadLocalGeosCtx(&geosCtx)); GEOSGeometry *geom = NULL; @@ -170,7 +170,7 @@ static int32_t initWktRegex(pcre2_code **ppRegex, pcre2_match_data **ppMatchData int32_t initCtxGeomFromText() { int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = NULL; - + TAOS_CHECK_RETURN(getThreadLocalGeosCtx(&geosCtx)); if (geosCtx->handle == NULL) { @@ -208,7 +208,7 @@ int32_t initCtxGeomFromText() { int32_t doGeomFromText(const char *inputWKT, unsigned char **outputGeom, size_t *size) { int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = NULL; - + TAOS_CHECK_RETURN(getThreadLocalGeosCtx(&geosCtx)); GEOSGeometry *geom = NULL; @@ -245,7 +245,7 @@ _exit: int32_t initCtxAsText() { int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = NULL; - + TAOS_CHECK_RETURN(getThreadLocalGeosCtx(&geosCtx)); if (geosCtx->handle == NULL) { @@ -283,11 +283,11 @@ int32_t initCtxAsText() { int32_t doAsText(const unsigned char *inputGeom, size_t size, char **outputWKT) { int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = NULL; - + TAOS_CHECK_RETURN(getThreadLocalGeosCtx(&geosCtx)); - GEOSGeometry *geom = NULL; - char *wkt = NULL; + GEOSGeometry *geom = NULL; + char *wkt = NULL; geom = GEOSWKBReader_read_r(geosCtx->handle, geosCtx->WKBReader, inputGeom, size); if (geom == NULL) { @@ -313,10 +313,35 @@ _exit: return code; } +int32_t checkWKB(const unsigned char *wkb, size_t size) { + int32_t code = TSDB_CODE_SUCCESS; + GEOSGeometry *geom = NULL; + SGeosContext *geosCtx = NULL; + + TAOS_CHECK_RETURN(getThreadLocalGeosCtx(&geosCtx)); + + geom = GEOSWKBReader_read_r(geosCtx->handle, geosCtx->WKBReader, wkb, size); + if (geom == NULL) { + return TSDB_CODE_FUNC_FUNTION_PARA_VALUE; + } + + if (!GEOSisValid_r(geosCtx->handle, geom)) { + code = TSDB_CODE_FUNC_FUNTION_PARA_VALUE; + goto _exit; + } + +_exit: + if (geom) { + GEOSGeom_destroy_r(geosCtx->handle, geom); + geom = NULL; + } + return code; +} + int32_t initCtxRelationFunc() { int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = NULL; - + TAOS_CHECK_RETURN(getThreadLocalGeosCtx(&geosCtx)); if (geosCtx->handle == NULL) { @@ -343,7 +368,7 @@ int32_t doGeosRelation(const GEOSGeometry *geom1, const GEOSPreparedGeometry *pr _geosPreparedRelationFunc_t preparedRelationFn, _geosPreparedRelationFunc_t swappedPreparedRelationFn) { SGeosContext *geosCtx = NULL; - + TAOS_CHECK_RETURN(getThreadLocalGeosCtx(&geosCtx)); if (!preparedGeom1) { diff --git a/source/libs/nodes/src/nodesEqualFuncs.c b/source/libs/nodes/src/nodesEqualFuncs.c index 241da85267..891843761a 100644 --- a/source/libs/nodes/src/nodesEqualFuncs.c +++ b/source/libs/nodes/src/nodesEqualFuncs.c @@ -153,6 +153,12 @@ static bool caseWhenNodeEqual(const SCaseWhenNode* a, const SCaseWhenNode* b) { return true; } +static bool groupingSetNodeEqual(const SGroupingSetNode* a, const SGroupingSetNode* b) { + COMPARE_SCALAR_FIELD(groupingSetType); + COMPARE_NODE_LIST_FIELD(pParameterList); + return true; +} + bool nodesEqualNode(const SNode* a, const SNode* b) { if (a == b) { return true; @@ -181,10 +187,11 @@ bool nodesEqualNode(const SNode* a, const SNode* b) { return whenThenNodeEqual((const SWhenThenNode*)a, (const SWhenThenNode*)b); case QUERY_NODE_CASE_WHEN: return caseWhenNodeEqual((const SCaseWhenNode*)a, (const SCaseWhenNode*)b); + case QUERY_NODE_GROUPING_SET: + return groupingSetNodeEqual((const SGroupingSetNode*)a, (const SGroupingSetNode*)b); case QUERY_NODE_REAL_TABLE: case QUERY_NODE_TEMP_TABLE: case QUERY_NODE_JOIN_TABLE: - case QUERY_NODE_GROUPING_SET: case QUERY_NODE_ORDER_BY_EXPR: case QUERY_NODE_LIMIT: return false; diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index a9d0aa2924..30cc552761 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -2948,3 +2948,46 @@ void nodesSortList(SNodeList** pList, int32_t (*comp)(SNode* pNode1, SNode* pNod inSize *= 2; } } + +static SNode* nodesListFindNode(SNodeList* pList, SNode* pNode) { + SNode* pFound = NULL; + FOREACH(pFound, pList) { + if (nodesEqualNode(pFound, pNode)) { + break; + } + } + return pFound; +} + +int32_t nodesListDeduplicate(SNodeList** ppList) { + if (!ppList || LIST_LENGTH(*ppList) <= 1) return TSDB_CODE_SUCCESS; + if (LIST_LENGTH(*ppList) == 2) { + SNode* pNode1 = nodesListGetNode(*ppList, 0); + SNode* pNode2 = nodesListGetNode(*ppList, 1); + if (nodesEqualNode(pNode1, pNode2)) { + SListCell* pCell = nodesListGetCell(*ppList, 1); + (void)nodesListErase(*ppList, pCell); + } + return TSDB_CODE_SUCCESS; + } + SNodeList* pTmp = NULL; + int32_t code = nodesMakeList(&pTmp); + if (TSDB_CODE_SUCCESS == code) { + SNode* pNode = NULL; + FOREACH(pNode, *ppList) { + SNode* pFound = nodesListFindNode(pTmp, pNode); + if (NULL == pFound) { + code = nodesCloneNode(pNode, &pFound); + if (TSDB_CODE_SUCCESS == code) code = nodesListStrictAppend(pTmp, pFound); + if (TSDB_CODE_SUCCESS != code) break; + } + } + } + if (TSDB_CODE_SUCCESS == code) { + nodesDestroyList(*ppList); + *ppList = pTmp; + } else { + nodesDestroyList(pTmp); + } + return code; +} diff --git a/source/libs/parser/inc/parUtil.h b/source/libs/parser/inc/parUtil.h index 857c7604a9..7298b04eb0 100644 --- a/source/libs/parser/inc/parUtil.h +++ b/source/libs/parser/inc/parUtil.h @@ -115,6 +115,7 @@ typedef struct SParseMetaCache { SHashObj* pTableName; // key is tbFUid, elements is STableMeta*(append with tbName) SArray* pDnodes; // element is SEpSet bool dnodeRequired; + bool forceFetchViewMeta; } SParseMetaCache; int32_t generateSyntaxErrMsg(SMsgBuf* pBuf, int32_t errCode, ...); diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index 245346273f..1a5e3444c0 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -1377,7 +1377,7 @@ SNode* createAnomalyWindowNode(SAstCreateContext* pCxt, SNode* pExpr, const STok CHECK_MAKE_NODE(pAnomaly->pCol); pAnomaly->pExpr = pExpr; if (pFuncOpt == NULL) { - tstrncpy(pAnomaly->anomalyOpt, "algo=iqr", TSDB_ANAL_ALGO_OPTION_LEN); + tstrncpy(pAnomaly->anomalyOpt, "algo=iqr", TSDB_ANALYTIC_ALGO_OPTION_LEN); } else { (void)trimString(pFuncOpt->z, pFuncOpt->n, pAnomaly->anomalyOpt, sizeof(pAnomaly->anomalyOpt)); } diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c index eecc04658b..b78e10768f 100644 --- a/source/libs/parser/src/parAstParser.c +++ b/source/libs/parser/src/parAstParser.c @@ -810,7 +810,7 @@ static int32_t collectMetaKeyFromShowCreateView(SCollectMetaKeyCxt* pCxt, SShowC if (TSDB_CODE_SUCCESS == code) { code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->viewName, pCxt->pMetaCache); } - + pCxt->pMetaCache->forceFetchViewMeta = true; return code; } @@ -888,6 +888,7 @@ static int32_t collectMetaKeyFromCreateViewStmt(SCollectMetaKeyCxt* pCxt, SCreat static int32_t collectMetaKeyFromDropViewStmt(SCollectMetaKeyCxt* pCxt, SDropViewStmt* pStmt) { int32_t code = reserveViewUserAuthInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->pUser, pStmt->dbName, pStmt->viewName, AUTH_TYPE_ALTER, pCxt->pMetaCache); + pCxt->pMetaCache->forceFetchViewMeta = true; return code; } diff --git a/source/libs/parser/src/parCalcConst.c b/source/libs/parser/src/parCalcConst.c index e757ec8b24..a2e98bece7 100644 --- a/source/libs/parser/src/parCalcConst.c +++ b/source/libs/parser/src/parCalcConst.c @@ -329,14 +329,23 @@ static int32_t calcConstGroupBy(SCalcConstContext* pCxt, SSelectStmt* pSelect) { if (TSDB_CODE_SUCCESS == code) { SNode* pNode = NULL; FOREACH(pNode, pSelect->pGroupByList) { + bool hasNotValue = false; SNode* pGroupPara = NULL; FOREACH(pGroupPara, ((SGroupingSetNode*)pNode)->pParameterList) { if (QUERY_NODE_VALUE != nodeType(pGroupPara)) { - return code; + hasNotValue = true; + break; + } + } + if (!hasNotValue) { + if (pSelect->hasAggFuncs) { + ERASE_NODE(pSelect->pGroupByList); + } else { + if (!cell->pPrev && !cell->pNext) continue; + ERASE_NODE(pSelect->pGroupByList); } } } - NODES_DESTORY_LIST(pSelect->pGroupByList); } return code; } diff --git a/source/libs/parser/src/parInsertStmt.c b/source/libs/parser/src/parInsertStmt.c index 0979028e6d..c6951d229d 100644 --- a/source/libs/parser/src/parInsertStmt.c +++ b/source/libs/parser/src/parInsertStmt.c @@ -13,6 +13,7 @@ * along with this program. If not, see . */ +#include "geosWrapper.h" #include "os.h" #include "parInsertUtil.h" #include "parInt.h" @@ -192,6 +193,12 @@ int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const ch // strcpy(val.colName, pTagSchema->name); if (pTagSchema->type == TSDB_DATA_TYPE_BINARY || pTagSchema->type == TSDB_DATA_TYPE_VARBINARY || pTagSchema->type == TSDB_DATA_TYPE_GEOMETRY) { + if (pTagSchema->type == TSDB_DATA_TYPE_GEOMETRY) { + if (initCtxAsText() || checkWKB(bind[c].buffer, colLen)) { + code = buildSyntaxErrMsg(&pBuf, "invalid geometry tag", bind[c].buffer); + goto end; + } + } val.pData = (uint8_t*)bind[c].buffer; val.nData = colLen; } else if (pTagSchema->type == TSDB_DATA_TYPE_NCHAR) { @@ -409,7 +416,8 @@ int32_t qBindStmtColsValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bind, c } code = tColDataAddValueByBind(pCol, pBind, - IS_VAR_DATA_TYPE(pColSchema->type) ? pColSchema->bytes - VARSTR_HEADER_SIZE : -1); + IS_VAR_DATA_TYPE(pColSchema->type) ? pColSchema->bytes - VARSTR_HEADER_SIZE : -1, + initCtxAsText, checkWKB); if (code) { goto _return; } @@ -461,7 +469,8 @@ int32_t qBindStmtSingleColValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bi } code = tColDataAddValueByBind(pCol, pBind, - IS_VAR_DATA_TYPE(pColSchema->type) ? pColSchema->bytes - VARSTR_HEADER_SIZE : -1); + IS_VAR_DATA_TYPE(pColSchema->type) ? pColSchema->bytes - VARSTR_HEADER_SIZE : -1, + initCtxAsText, checkWKB); qDebug("stmt col %d bind %d rows data", colIdx, rowNum); @@ -544,6 +553,12 @@ int32_t qBindStmtTagsValue2(void* pBlock, void* boundTags, int64_t suid, const c // strcpy(val.colName, pTagSchema->name); if (pTagSchema->type == TSDB_DATA_TYPE_BINARY || pTagSchema->type == TSDB_DATA_TYPE_VARBINARY || pTagSchema->type == TSDB_DATA_TYPE_GEOMETRY) { + if (pTagSchema->type == TSDB_DATA_TYPE_GEOMETRY) { + if (initCtxAsText() || checkWKB(bind[c].buffer, colLen)) { + code = buildSyntaxErrMsg(&pBuf, "invalid geometry tag", bind[c].buffer); + goto end; + } + } val.pData = (uint8_t*)bind[c].buffer; val.nData = colLen; } else if (pTagSchema->type == TSDB_DATA_TYPE_NCHAR) { @@ -666,11 +681,25 @@ int32_t qBindStmtStbColsValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bin int32_t code = 0; int16_t lastColId = -1; bool colInOrder = true; + int ncharColNums = 0; if (NULL == *pTSchema) { *pTSchema = tBuildTSchema(pSchema, pDataBlock->pMeta->tableInfo.numOfColumns, pDataBlock->pMeta->sversion); } + for (int c = 0; c < boundInfo->numOfBound; ++c) { + if (TSDB_DATA_TYPE_NCHAR == pSchema[boundInfo->pColIndex[c]].type) { + ncharColNums++; + } + } + if (ncharColNums > 0) { + ncharBinds = taosArrayInit(ncharColNums, sizeof(ncharBind)); + if (!ncharBinds) { + code = terrno; + goto _return; + } + } + for (int c = 0; c < boundInfo->numOfBound; ++c) { SSchema* pColSchema = &pSchema[boundInfo->pColIndex[c]]; if (pColSchema->colId <= lastColId) { @@ -695,13 +724,6 @@ int32_t qBindStmtStbColsValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bin if (code) { goto _return; } - if (!ncharBinds) { - ncharBinds = taosArrayInit(1, sizeof(ncharBind)); - if (!ncharBinds) { - code = terrno; - goto _return; - } - } if (!taosArrayPush(ncharBinds, &ncharBind)) { code = terrno; goto _return; @@ -824,7 +846,8 @@ int32_t qBindStmtColsValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bind, } code = tColDataAddValueByBind2(pCol, pBind, - IS_VAR_DATA_TYPE(pColSchema->type) ? pColSchema->bytes - VARSTR_HEADER_SIZE : -1); + IS_VAR_DATA_TYPE(pColSchema->type) ? pColSchema->bytes - VARSTR_HEADER_SIZE : -1, + initCtxAsText, checkWKB); if (code) { goto _return; } @@ -876,7 +899,8 @@ int32_t qBindStmtSingleColValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* b } code = tColDataAddValueByBind2(pCol, pBind, - IS_VAR_DATA_TYPE(pColSchema->type) ? pColSchema->bytes - VARSTR_HEADER_SIZE : -1); + IS_VAR_DATA_TYPE(pColSchema->type) ? pColSchema->bytes - VARSTR_HEADER_SIZE : -1, + initCtxAsText, checkWKB); qDebug("stmt col %d bind %d rows data", colIdx, rowNum); diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 99c03c412c..fcb6361a6b 100755 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -3311,11 +3311,11 @@ static int32_t selectCommonType(SDataType* commonType, const SDataType* newType) } else { resultType = gDisplyTypes[type2][type1]; } - + if (resultType == -1) { return TSDB_CODE_SCALAR_CONVERT_ERROR; } - + if (commonType->type == newType->type) { commonType->bytes = TMAX(commonType->bytes, newType->bytes); return TSDB_CODE_SUCCESS; @@ -3328,9 +3328,9 @@ static int32_t selectCommonType(SDataType* commonType, const SDataType* newType) } else { commonType->bytes = TMAX(TMAX(commonType->bytes, newType->bytes), TYPE_BYTES[resultType]); } - + commonType->type = resultType; - + return TSDB_CODE_SUCCESS; } @@ -9652,7 +9652,7 @@ static int32_t translateDropUser(STranslateContext* pCxt, SDropUserStmt* pStmt) static int32_t translateCreateAnode(STranslateContext* pCxt, SCreateAnodeStmt* pStmt) { SMCreateAnodeReq createReq = {0}; createReq.urlLen = strlen(pStmt->url) + 1; - if (createReq.urlLen > TSDB_ANAL_ANODE_URL_LEN) { + if (createReq.urlLen > TSDB_ANALYTIC_ANODE_URL_LEN) { return TSDB_CODE_MND_ANODE_TOO_LONG_URL; } @@ -13127,7 +13127,7 @@ static int32_t extractShowCreateViewResultSchema(int32_t* numOfCols, SSchema** p } static int32_t extractShowVariablesResultSchema(int32_t* numOfCols, SSchema** pSchema) { - *numOfCols = 3; + *numOfCols = SHOW_LOCAL_VARIABLES_RESULT_COLS; // SHOW_VARIABLES_RESULT_COLS *pSchema = taosMemoryCalloc((*numOfCols), sizeof(SSchema)); if (NULL == (*pSchema)) { return terrno; @@ -13138,13 +13138,17 @@ static int32_t extractShowVariablesResultSchema(int32_t* numOfCols, SSchema** pS strcpy((*pSchema)[0].name, "name"); (*pSchema)[1].type = TSDB_DATA_TYPE_BINARY; - (*pSchema)[1].bytes = TSDB_CONFIG_VALUE_LEN; + (*pSchema)[1].bytes = TSDB_CONFIG_PATH_LEN; strcpy((*pSchema)[1].name, "value"); (*pSchema)[2].type = TSDB_DATA_TYPE_BINARY; (*pSchema)[2].bytes = TSDB_CONFIG_SCOPE_LEN; strcpy((*pSchema)[2].name, "scope"); + (*pSchema)[3].type = TSDB_DATA_TYPE_BINARY; + (*pSchema)[3].bytes = TSDB_CONFIG_INFO_LEN; + strcpy((*pSchema)[3].name, "info"); + return TSDB_CODE_SUCCESS; } diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c index e35eea9e72..44e44982a3 100644 --- a/source/libs/parser/src/parUtil.c +++ b/source/libs/parser/src/parUtil.c @@ -817,6 +817,7 @@ int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalog } #endif pCatalogReq->dNodeRequired = pMetaCache->dnodeRequired; + pCatalogReq->forceFetchViewMeta = pMetaCache->forceFetchViewMeta; return code; } diff --git a/source/libs/parser/src/parser.c b/source/libs/parser/src/parser.c index c2714659ec..e2135bfd63 100644 --- a/source/libs/parser/src/parser.c +++ b/source/libs/parser/src/parser.c @@ -154,6 +154,9 @@ static int32_t parseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, SParseMetaCa } static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) { + if (!pParam || IS_NULL_TYPE(pParam->buffer_type)) { + return TSDB_CODE_APP_ERROR; + } if (IS_VAR_DATA_TYPE(pVal->node.resType.type)) { taosMemoryFreeClear(pVal->datum.p); } @@ -441,6 +444,9 @@ int32_t qStmtBindParams(SQuery* pQuery, TAOS_MULTI_BIND* pParams, int32_t colIdx } static int32_t setValueByBindParam2(SValueNode* pVal, TAOS_STMT2_BIND* pParam) { + if (!pParam || IS_NULL_TYPE(pParam->buffer_type)) { + return TSDB_CODE_APP_ERROR; + } if (IS_VAR_DATA_TYPE(pVal->node.resType.type)) { taosMemoryFreeClear(pVal->datum.p); } diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index 09a4b9c593..fd4e42f256 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -838,8 +838,11 @@ static int32_t createAggLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect, } if (NULL != pSelect->pGroupByList) { - pAgg->pGroupKeys = NULL; - code = nodesCloneList(pSelect->pGroupByList, &pAgg->pGroupKeys); + code = nodesListDeduplicate(&pSelect->pGroupByList); + if (TSDB_CODE_SUCCESS == code) { + pAgg->pGroupKeys = NULL; + code = nodesCloneList(pSelect->pGroupByList, &pAgg->pGroupKeys); + } } // rewrite the expression in subsequent clauses diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 885faa5461..a1809ff137 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -204,6 +204,7 @@ static void optSetParentOrder(SLogicNode* pNode, EOrder order, SLogicNode* pNode // case QUERY_NODE_LOGIC_PLAN_WINDOW: case QUERY_NODE_LOGIC_PLAN_AGG: case QUERY_NODE_LOGIC_PLAN_SORT: + case QUERY_NODE_LOGIC_PLAN_FILL: if (pNode == pNodeForcePropagate) { pNode->outputTsOrder = order; break; @@ -3491,37 +3492,77 @@ static void eliminateProjPushdownProjIdx(SNodeList* pParentProjects, SNodeList* } } +static int32_t eliminateProjOptFindProjPrefixWithOrderCheck(SProjectLogicNode* pProj, SProjectLogicNode* pChild, SNodeList** pNewChildTargets, bool *orderMatch) { + int32_t code = 0; + SNode* pProjection = NULL, *pChildTarget = NULL; + *orderMatch = true; + FORBOTH(pProjection, pProj->pProjections, pChildTarget, pChild->node.pTargets) { + if (!pProjection) break; + if (0 != strcmp(((SColumnNode*)pProjection)->colName, ((SColumnNode*)pChildTarget)->colName)) { + *orderMatch = false; + break; + } + if (pNewChildTargets) { + SNode* pNew = NULL; + code = nodesCloneNode(pChildTarget, &pNew); + if (TSDB_CODE_SUCCESS == code) { + code = nodesListMakeStrictAppend(pNewChildTargets, pNew); + } + if (TSDB_CODE_SUCCESS != code && pNewChildTargets) { + nodesDestroyList(*pNewChildTargets); + *pNewChildTargets = NULL; + break; + } + } + } + return code; +} + +static int32_t eliminateProjOptPushTargetsToSetOpChildren(SProjectLogicNode* pSetOp) { + SNode* pChildProj = NULL; + int32_t code = 0; + bool orderMatch = false; + FOREACH(pChildProj, pSetOp->node.pChildren) { + if (QUERY_NODE_LOGIC_PLAN_PROJECT == nodeType(pChildProj)) { + SProjectLogicNode* pChildLogic = (SProjectLogicNode*)pChildProj; + SNodeList* pNewChildTargetsForChild = NULL; + code = eliminateProjOptFindProjPrefixWithOrderCheck(pSetOp, pChildLogic, &pNewChildTargetsForChild, &orderMatch); + if (TSDB_CODE_SUCCESS != code) break; + nodesDestroyList(pChildLogic->node.pTargets); + pChildLogic->node.pTargets = pNewChildTargetsForChild; + alignProjectionWithTarget((SLogicNode*)pChildLogic); + if (pChildLogic->isSetOpProj) { + code = eliminateProjOptPushTargetsToSetOpChildren(pChildLogic); + if (TSDB_CODE_SUCCESS != code) break; + } + } + } + return code; +} + static int32_t eliminateProjOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan, SProjectLogicNode* pProjectNode) { SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pProjectNode->node.pChildren, 0); int32_t code = 0; + bool isSetOpProj = false; + bool orderMatch = false; + bool sizeMatch = LIST_LENGTH(pProjectNode->pProjections) == LIST_LENGTH(pChild->pTargets); + bool needReplaceTargets = true; if (NULL == pProjectNode->node.pParent) { SNodeList* pNewChildTargets = NULL; - code = nodesMakeList(&pNewChildTargets); - if (TSDB_CODE_SUCCESS != code) { - return code; - } SNode * pProjection = NULL, *pChildTarget = NULL; - bool orderMatch = true; - bool needOrderMatch = + isSetOpProj = QUERY_NODE_LOGIC_PLAN_PROJECT == nodeType(pChild) && ((SProjectLogicNode*)pChild)->isSetOpProj; - if (needOrderMatch) { + if (isSetOpProj) { // For sql: select ... from (select ... union all select ...); // When eliminating the outer proj (the outer select), we have to make sure that the outer proj projections and // union all project targets have same columns in the same order. See detail in TD-30188 - FORBOTH(pProjection, pProjectNode->pProjections, pChildTarget, pChild->pTargets) { - if (!pProjection) break; - if (0 != strcmp(((SColumnNode*)pProjection)->colName, ((SColumnNode*)pChildTarget)->colName)) { - orderMatch = false; - break; - } - SNode* pNew = NULL; - code = nodesCloneNode(pChildTarget, &pNew); - if (TSDB_CODE_SUCCESS == code) { - code = nodesListStrictAppend(pNewChildTargets, pNew); - } - if (TSDB_CODE_SUCCESS != code) break; + code = eliminateProjOptFindProjPrefixWithOrderCheck(pProjectNode, (SProjectLogicNode*)pChild, + sizeMatch ? NULL : &pNewChildTargets, &orderMatch); + if (TSDB_CODE_SUCCESS == code && sizeMatch && orderMatch) { + pNewChildTargets = pChild->pTargets; + needReplaceTargets = false; } } else { FOREACH(pProjection, pProjectNode->pProjections) { @@ -3530,7 +3571,7 @@ static int32_t eliminateProjOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan* SNode* pNew = NULL; code = nodesCloneNode(pChildTarget, &pNew); if (TSDB_CODE_SUCCESS == code) { - code = nodesListStrictAppend(pNewChildTargets, pNew); + code = nodesListMakeStrictAppend(&pNewChildTargets, pNew); } break; } @@ -3545,12 +3586,13 @@ static int32_t eliminateProjOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan* return code; } - if (eliminateProjOptCanChildConditionUseChildTargets(pChild, pNewChildTargets) && - (!needOrderMatch || (needOrderMatch && orderMatch))) { - nodesDestroyList(pChild->pTargets); - pChild->pTargets = pNewChildTargets; + if (eliminateProjOptCanChildConditionUseChildTargets(pChild, pNewChildTargets) && (!isSetOpProj || orderMatch)) { + if (needReplaceTargets) { + nodesDestroyList(pChild->pTargets); + pChild->pTargets = pNewChildTargets; + } } else { - nodesDestroyList(pNewChildTargets); + if (needReplaceTargets) nodesDestroyList(pNewChildTargets); OPTIMIZE_FLAG_SET_MASK(pProjectNode->node.optimizedFlag, OPTIMIZE_FLAG_ELIMINATE_PROJ); pCxt->optimized = true; return TSDB_CODE_SUCCESS; @@ -3573,7 +3615,11 @@ static int32_t eliminateProjOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan* NODES_CLEAR_LIST(pProjectNode->node.pChildren); nodesDestroyNode((SNode*)pProjectNode); // if pChild is a project logic node, remove its projection which is not reference by its target. - alignProjectionWithTarget(pChild); + if (needReplaceTargets) { + alignProjectionWithTarget(pChild); + // Since we have eliminated the outer proj, we need to push down the new targets to the children of the set operation. + if (isSetOpProj && orderMatch && !sizeMatch) code = eliminateProjOptPushTargetsToSetOpChildren((SProjectLogicNode*)pChild); + } } pCxt->optimized = true; return code; diff --git a/source/libs/qworker/inc/qwInt.h b/source/libs/qworker/inc/qwInt.h index 708c285aea..6d81baf91a 100644 --- a/source/libs/qworker/inc/qwInt.h +++ b/source/libs/qworker/inc/qwInt.h @@ -313,29 +313,29 @@ typedef struct SQWorkerMgmt { #define QW_SCH_DLOG(param, ...) qDebug("QW:%p SID:%" PRIx64 " " param, mgmt, sId, __VA_ARGS__) #define QW_TASK_ELOG(param, ...) \ - qError("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId, __VA_ARGS__) + qError("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId, __VA_ARGS__) #define QW_TASK_WLOG(param, ...) \ - qWarn("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId, __VA_ARGS__) + qWarn("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId, __VA_ARGS__) #define QW_TASK_DLOG(param, ...) \ - qDebug("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId, __VA_ARGS__) + qDebug("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId, __VA_ARGS__) #define QW_TASK_DLOGL(param, ...) \ - qDebugL("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId, __VA_ARGS__) + qDebugL("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId, __VA_ARGS__) #define QW_TASK_ELOG_E(param) \ - qError("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId) + qError("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId) #define QW_TASK_WLOG_E(param) \ - qWarn("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId) + qWarn("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId) #define QW_TASK_DLOG_E(param) \ - qDebug("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId) + qDebug("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId) #define QW_SCH_TASK_ELOG(param, ...) \ - qError("QW:%p SID:0x%" PRIx64 ",qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, \ + qError("QW:%p SID:0x%" PRIx64 ",QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, \ qId, cId, tId, eId, __VA_ARGS__) #define QW_SCH_TASK_WLOG(param, ...) \ - qWarn("QW:%p SID:0x%" PRIx64 ",qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, qId, \ + qWarn("QW:%p SID:0x%" PRIx64 ",QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, qId, \ cId, tId, eId, __VA_ARGS__) #define QW_SCH_TASK_DLOG(param, ...) \ - qDebug("QW:%p SID:0x%" PRIx64 ",qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, \ + qDebug("QW:%p SID:0x%" PRIx64 ",QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, \ qId, cId, tId, eId, __VA_ARGS__) #define QW_LOCK_DEBUG(...) \ diff --git a/source/libs/scheduler/inc/schInt.h b/source/libs/scheduler/inc/schInt.h index 6a910453f0..ef643852ea 100644 --- a/source/libs/scheduler/inc/schInt.h +++ b/source/libs/scheduler/inc/schInt.h @@ -62,7 +62,7 @@ typedef enum { #define SCH_DEFAULT_MAX_RETRY_NUM 6 #define SCH_MIN_AYSNC_EXEC_NUM 3 #define SCH_DEFAULT_RETRY_TOTAL_ROUND 3 -#define SCH_DEFAULT_TASK_CAPACITY_NUM 1000 +#define SCH_DEFAULT_TASK_CAPACITY_NUM 1000 typedef struct SSchDebug { bool lockEnable; @@ -333,12 +333,13 @@ extern SSchedulerMgmt schMgmt; #define SCH_UNLOCK_TASK(_task) SCH_UNLOCK(SCH_WRITE, &(_task)->lock) #define SCH_CLIENT_ID(_task) ((_task) ? (_task)->clientId : -1) -#define SCH_TASK_ID(_task) ((_task) ? (_task)->taskId : -1) -#define SCH_TASK_EID(_task) ((_task) ? (_task)->execId : -1) +#define SCH_TASK_ID(_task) ((_task) ? (_task)->taskId : -1) +#define SCH_TASK_EID(_task) ((_task) ? (_task)->execId : -1) #define SCH_IS_DATA_BIND_QRY_TASK(task) ((task)->plan->subplanType == SUBPLAN_TYPE_SCAN) -#define SCH_IS_DATA_BIND_PLAN(_plan) (((_plan)->subplanType == SUBPLAN_TYPE_SCAN) || ((_plan)->subplanType == SUBPLAN_TYPE_MODIFY)) -#define SCH_IS_DATA_BIND_TASK(task) SCH_IS_DATA_BIND_PLAN((task)->plan) +#define SCH_IS_DATA_BIND_PLAN(_plan) \ + (((_plan)->subplanType == SUBPLAN_TYPE_SCAN) || ((_plan)->subplanType == SUBPLAN_TYPE_MODIFY)) +#define SCH_IS_DATA_BIND_TASK(task) SCH_IS_DATA_BIND_PLAN((task)->plan) #define SCH_IS_LEAF_TASK(_job, _task) (((_task)->level->level + 1) == (_job)->levelNum) #define SCH_IS_DATA_MERGE_TASK(task) (!SCH_IS_DATA_BIND_TASK(task)) #define SCH_IS_LOCAL_EXEC_TASK(_job, _task) \ @@ -419,15 +420,15 @@ extern SSchedulerMgmt schMgmt; #define SCH_SWITCH_EPSET(_addr) ((_addr)->epSet.inUse = ((_addr)->epSet.inUse + 1) % (_addr)->epSet.numOfEps) #define SCH_TASK_NUM_OF_EPS(_addr) ((_addr)->epSet.numOfEps) -#define SCH_LOG_TASK_START_TS(_task) \ - do { \ - int64_t us = taosGetTimestampUs(); \ - if (NULL == taosArrayPush((_task)->profile.execTime, &us)) { \ - qError("taosArrayPush task execTime failed, error:%s", tstrerror(terrno)); \ - } \ - if (0 == (_task)->execId) { \ - (_task)->profile.startTs = us; \ - } \ +#define SCH_LOG_TASK_START_TS(_task) \ + do { \ + int64_t us = taosGetTimestampUs(); \ + if (NULL == taosArrayPush((_task)->profile.execTime, &us)) { \ + qError("taosArrayPush task execTime failed, error:%s", tstrerror(terrno)); \ + } \ + if (0 == (_task)->execId) { \ + (_task)->profile.startTs = us; \ + } \ } while (0) #define SCH_LOG_TASK_WAIT_TS(_task) \ @@ -450,23 +451,23 @@ extern SSchedulerMgmt schMgmt; (_task)->profile.endTs = us; \ } while (0) -#define SCH_JOB_ELOG(param, ...) qError("qid:0x%" PRIx64 " " param, pJob->queryId, __VA_ARGS__) -#define SCH_JOB_DLOG(param, ...) qDebug("qid:0x%" PRIx64 " " param, pJob->queryId, __VA_ARGS__) +#define SCH_JOB_ELOG(param, ...) qError("QID:0x%" PRIx64 " " param, pJob->queryId, __VA_ARGS__) +#define SCH_JOB_DLOG(param, ...) qDebug("QID:0x%" PRIx64 " " param, pJob->queryId, __VA_ARGS__) #define SCH_TASK_ELOG(param, ...) \ - qError("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_CLIENT_ID(pTask), \ + qError("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_CLIENT_ID(pTask), \ SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), __VA_ARGS__) #define SCH_TASK_DLOG(param, ...) \ - qDebug("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_CLIENT_ID(pTask), \ + qDebug("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_CLIENT_ID(pTask), \ SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), __VA_ARGS__) #define SCH_TASK_TLOG(param, ...) \ - qTrace("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_CLIENT_ID(pTask), \ + qTrace("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_CLIENT_ID(pTask), \ SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), __VA_ARGS__) #define SCH_TASK_DLOGL(param, ...) \ - qDebugL("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_CLIENT_ID(pTask), \ + qDebugL("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_CLIENT_ID(pTask), \ SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), __VA_ARGS__) #define SCH_TASK_WLOG(param, ...) \ - qWarn("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_CLIENT_ID(pTask), \ + qWarn("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_CLIENT_ID(pTask), \ SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), __VA_ARGS__) #define SCH_SET_ERRNO(_err) \ @@ -580,7 +581,7 @@ int32_t schDelayLaunchTask(SSchJob *pJob, SSchTask *pTask); int32_t schBuildAndSendMsg(SSchJob *job, SSchTask *task, SQueryNodeAddr *addr, int32_t msgType, void *param); int32_t schAcquireJob(int64_t refId, SSchJob **ppJob); int32_t schReleaseJob(int64_t refId); -int32_t schReleaseJobEx(int64_t refId, int32_t* released); +int32_t schReleaseJobEx(int64_t refId, int32_t *released); void schFreeFlowCtrl(SSchJob *pJob); int32_t schChkJobNeedFlowCtrl(SSchJob *pJob, SSchLevel *pLevel); int32_t schDecTaskFlowQuota(SSchJob *pJob, SSchTask *pTask); @@ -648,7 +649,7 @@ void schDropTaskInHashList(SSchJob *pJob, SHashObj *list); int32_t schNotifyTaskInHashList(SSchJob *pJob, SHashObj *list, ETaskNotifyType type, SSchTask *pTask); int32_t schLaunchLevelTasks(SSchJob *pJob, SSchLevel *level); void schGetTaskFromList(SHashObj *pTaskList, uint64_t taskId, SSchTask **pTask); -int32_t schValidateSubplan(SSchJob *pJob, SSubplan* pSubplan, int32_t level, int32_t idx, int32_t taskNum); +int32_t schValidateSubplan(SSchJob *pJob, SSubplan *pSubplan, int32_t level, int32_t idx, int32_t taskNum); int32_t schInitTask(SSchJob *pJob, SSchTask *pTask, SSubplan *pPlan, SSchLevel *pLevel); int32_t schSwitchTaskCandidateAddr(SSchJob *pJob, SSchTask *pTask); void schDirectPostJobRes(SSchedulerReq *pReq, int32_t errCode); diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c index eefb32f783..3321fdb4b5 100644 --- a/source/libs/scheduler/src/schRemote.c +++ b/source/libs/scheduler/src/schRemote.c @@ -1345,30 +1345,19 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr, SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); } -#if 1 - SSchTrans trans = {.pTrans = pJob->conn.pTrans, .pHandle = SCH_GET_TASK_HANDLE(pTask)}; - code = schAsyncSendMsg(pJob, pTask, &trans, addr, msgType, msg, (uint32_t)msgSize, persistHandle, (rpcCtx.args ? &rpcCtx : NULL)); - msg = NULL; - SCH_ERR_JRET(code); - - if (msgType == TDMT_SCH_QUERY || msgType == TDMT_SCH_MERGE_QUERY) { - SCH_ERR_RET(schAppendTaskExecNode(pJob, pTask, addr, pTask->execId)); - } -#else - if (TDMT_VND_SUBMIT != msgType) { + if ((tsBypassFlag & TSDB_BYPASS_RB_RPC_SEND_SUBMIT) && (TDMT_VND_SUBMIT == msgType)) { + taosMemoryFree(msg); + SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); + } else { SSchTrans trans = {.pTrans = pJob->conn.pTrans, .pHandle = SCH_GET_TASK_HANDLE(pTask)}; - code = schAsyncSendMsg(pJob, pTask, &trans, addr, msgType, msg, msgSize, persistHandle, (rpcCtx.args ? &rpcCtx : NULL)); + code = schAsyncSendMsg(pJob, pTask, &trans, addr, msgType, msg, (uint32_t)msgSize, persistHandle, (rpcCtx.args ? &rpcCtx : NULL)); msg = NULL; SCH_ERR_JRET(code); if (msgType == TDMT_SCH_QUERY || msgType == TDMT_SCH_MERGE_QUERY) { SCH_ERR_RET(schAppendTaskExecNode(pJob, pTask, addr, pTask->execId)); } - } else { - taosMemoryFree(msg); - SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); } -#endif return TSDB_CODE_SUCCESS; diff --git a/source/libs/scheduler/test/CMakeLists.txt b/source/libs/scheduler/test/CMakeLists.txt index 9605cc7a1c..d9572e8dec 100644 --- a/source/libs/scheduler/test/CMakeLists.txt +++ b/source/libs/scheduler/test/CMakeLists.txt @@ -11,12 +11,12 @@ IF(NOT TD_DARWIN) IF (TD_GRANT) TARGET_LINK_LIBRARIES( schedulerTest - PUBLIC os util common catalog transport gtest qcom taos_static planner scheduler grant + PUBLIC os util common catalog transport gtest qcom ${TAOS_LIB_STATIC} planner scheduler grant ) ELSE () TARGET_LINK_LIBRARIES( schedulerTest - PUBLIC os util common catalog transport gtest qcom taos_static planner scheduler + PUBLIC os util common catalog transport gtest qcom ${TAOS_LIB_STATIC} planner scheduler ) ENDIF() diff --git a/source/libs/stream/inc/streamInt.h b/source/libs/stream/inc/streamInt.h index 863bc76c79..427733e9ec 100644 --- a/source/libs/stream/inc/streamInt.h +++ b/source/libs/stream/inc/streamInt.h @@ -192,7 +192,6 @@ int32_t streamTaskSendCheckpointReadyMsg(SStreamTask* pTask); int32_t streamTaskSendCheckpointSourceRsp(SStreamTask* pTask); int32_t streamTaskSendCheckpointReq(SStreamTask* pTask); -void streamTaskSetFailedCheckpointId(SStreamTask* pTask); int32_t streamTaskGetNumOfDownstream(const SStreamTask* pTask); int32_t streamTaskGetNumOfUpstream(const SStreamTask* pTask); int32_t streamTaskInitTokenBucket(STokenBucket* pBucket, int32_t numCap, int32_t numRate, float quotaRate, const char*); @@ -245,6 +244,9 @@ int32_t streamCreateSinkResTrigger(SStreamTrigger** pTrigger); int32_t streamCreateForcewindowTrigger(SStreamTrigger** pTrigger, int32_t trigger, SInterval* pInterval, STimeWindow* pLatestWindow, const char* id); +// inject stream errors +void chkptFailedByRetrieveReqToSource(SStreamTask* pTask, int64_t checkpointId); + #ifdef __cplusplus } #endif diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c index 7724d1c5ff..d8ddd0fd02 100644 --- a/source/libs/stream/src/streamCheckpoint.c +++ b/source/libs/stream/src/streamCheckpoint.c @@ -161,33 +161,52 @@ int32_t streamTaskProcessCheckpointTriggerRsp(SStreamTask* pTask, SCheckpointTri int32_t streamTaskSendCheckpointTriggerMsg(SStreamTask* pTask, int32_t dstTaskId, int32_t downstreamNodeId, SRpcHandleInfo* pRpcInfo, int32_t code) { - int32_t size = sizeof(SMsgHead) + sizeof(SCheckpointTriggerRsp); - void* pBuf = rpcMallocCont(size); - if (pBuf == NULL) { + int32_t ret = 0; + int32_t tlen = 0; + void* buf = NULL; + SEncoder encoder; + + SCheckpointTriggerRsp req = {.streamId = pTask->id.streamId, + .upstreamTaskId = pTask->id.taskId, + .taskId = dstTaskId, + .rspCode = code}; + + if (code == TSDB_CODE_SUCCESS) { + req.checkpointId = pTask->chkInfo.pActiveInfo->activeId; + req.transId = pTask->chkInfo.pActiveInfo->transId; + } else { + req.checkpointId = -1; + req.transId = -1; + } + + tEncodeSize(tEncodeCheckpointTriggerRsp, &req, tlen, ret); + if (ret < 0) { + stError("s-task:%s encode checkpoint-trigger rsp msg failed, code:%s", pTask->id.idStr, tstrerror(code)); + return ret; + } + + buf = rpcMallocCont(tlen + sizeof(SMsgHead)); + if (buf == NULL) { + stError("s-task:%s malloc chkpt-trigger rsp failed for task:0x%x, since out of memory", pTask->id.idStr, dstTaskId); return terrno; } - SCheckpointTriggerRsp* pRsp = POINTER_SHIFT(pBuf, sizeof(SMsgHead)); + ((SMsgHead*)buf)->vgId = htonl(downstreamNodeId); + void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead)); - ((SMsgHead*)pBuf)->vgId = htonl(downstreamNodeId); - - pRsp->streamId = pTask->id.streamId; - pRsp->upstreamTaskId = pTask->id.taskId; - pRsp->taskId = dstTaskId; - pRsp->rspCode = code; - - if (code == TSDB_CODE_SUCCESS) { - pRsp->checkpointId = pTask->chkInfo.pActiveInfo->activeId; - pRsp->transId = pTask->chkInfo.pActiveInfo->transId; - } else { - pRsp->checkpointId = -1; - pRsp->transId = -1; + tEncoderInit(&encoder, abuf, tlen); + if ((ret = tEncodeCheckpointTriggerRsp(&encoder, &req)) < 0) { + rpcFreeCont(buf); + tEncoderClear(&encoder); + stError("encode checkpoint-trigger rsp failed, code:%s", tstrerror(code)); + return ret; } + tEncoderClear(&encoder); - SRpcMsg rspMsg = {.code = 0, .pCont = pBuf, .contLen = size, .info = *pRpcInfo}; + SRpcMsg rspMsg = {.code = 0, .pCont = buf, .contLen = tlen + sizeof(SMsgHead), .info = *pRpcInfo}; tmsgSendRsp(&rspMsg); - return 0; + return ret; } int32_t continueDispatchCheckpointTriggerBlock(SStreamDataBlock* pBlock, SStreamTask* pTask) { @@ -222,14 +241,14 @@ static int32_t doCheckBeforeHandleChkptTrigger(SStreamTask* pTask, int64_t check stError("s-task:%s vgId:%d current checkpointId:%" PRId64 " recv expired checkpoint-trigger block, checkpointId:%" PRId64 " transId:%d, discard", id, vgId, pTask->chkInfo.checkpointId, checkpointId, transId); - return code; + return TSDB_CODE_STREAM_INVLD_CHKPT; } if (pActiveInfo->failedId >= checkpointId) { stError("s-task:%s vgId:%d checkpointId:%" PRId64 " transId:%d, has been marked failed, failedId:%" PRId64 " discard the checkpoint-trigger block", id, vgId, checkpointId, transId, pActiveInfo->failedId); - return code; + return TSDB_CODE_STREAM_INVLD_CHKPT; } if (pTask->chkInfo.checkpointId == checkpointId) { @@ -255,8 +274,7 @@ static int32_t doCheckBeforeHandleChkptTrigger(SStreamTask* pTask, int64_t check "the interrupted checkpoint", id, vgId, pBlock->srcTaskId); - streamTaskOpenUpstreamInput(pTask, pBlock->srcTaskId); - return code; + return TSDB_CODE_STREAM_INVLD_CHKPT; } if (streamTaskGetStatus(pTask).state == TASK_STATUS__CK) { @@ -264,14 +282,14 @@ static int32_t doCheckBeforeHandleChkptTrigger(SStreamTask* pTask, int64_t check stError("s-task:%s vgId:%d active checkpointId:%" PRId64 ", recv invalid checkpoint-trigger checkpointId:%" PRId64 " discard", id, vgId, pActiveInfo->activeId, checkpointId); - return code; + return TSDB_CODE_STREAM_INVLD_CHKPT; } else { // checkpointId == pActiveInfo->activeId if (pActiveInfo->allUpstreamTriggerRecv == 1) { stDebug( "s-task:%s vgId:%d all upstream checkpoint-trigger recv, discard this checkpoint-trigger, " "checkpointId:%" PRId64 " transId:%d", id, vgId, checkpointId, transId); - return code; + return TSDB_CODE_STREAM_INVLD_CHKPT; } if (taskLevel == TASK_LEVEL__SINK || taskLevel == TASK_LEVEL__AGG) { @@ -283,17 +301,17 @@ static int32_t doCheckBeforeHandleChkptTrigger(SStreamTask* pTask, int64_t check } if (p->upstreamTaskId == pBlock->srcTaskId) { - stWarn("s-task:%s repeatly recv checkpoint-source msg from task:0x%x vgId:%d, checkpointId:%" PRId64 + stWarn("s-task:%s repeatly recv checkpoint-trigger msg from task:0x%x vgId:%d, checkpointId:%" PRId64 ", prev recvTs:%" PRId64 " discard", pTask->id.idStr, p->upstreamTaskId, p->upstreamNodeId, p->checkpointId, p->recvTs); - return code; + return TSDB_CODE_STREAM_INVLD_CHKPT; } } } } } - return 0; + return TSDB_CODE_SUCCESS; } int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock* pBlock) { @@ -317,6 +335,9 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock code = doCheckBeforeHandleChkptTrigger(pTask, checkpointId, pBlock, transId); streamMutexUnlock(&pTask->lock); if (code) { + if (taskLevel != TASK_LEVEL__SOURCE) { // the checkpoint-trigger is discard, open the inputQ for upstream tasks + streamTaskOpenUpstreamInput(pTask, pBlock->srcTaskId); + } streamFreeQitem((SStreamQueueItem*)pBlock); return code; } @@ -330,6 +351,11 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock pActiveInfo->activeId = checkpointId; pActiveInfo->transId = transId; + if (pTask->chkInfo.startTs == 0) { + pTask->chkInfo.startTs = taosGetTimestampMs(); + pTask->execInfo.checkpoint += 1; + } + code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_GEN_CHECKPOINT); if (code != TSDB_CODE_SUCCESS) { stError("s-task:%s handle checkpoint-trigger block failed, code:%s", id, tstrerror(code)); @@ -359,6 +385,10 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock } } +#if 0 + taosMsleep(20*1000); +#endif + if (taskLevel == TASK_LEVEL__SOURCE) { int8_t type = pTask->outputInfo.type; pActiveInfo->allUpstreamTriggerRecv = 1; @@ -373,6 +403,10 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock return code; } +#if 0 + chkptFailedByRetrieveReqToSource(pTask, checkpointId); +#endif + if (type == TASK_OUTPUT__FIXED_DISPATCH || type == TASK_OUTPUT__SHUFFLE_DISPATCH) { stDebug("s-task:%s set childIdx:%d, and add checkpoint-trigger block into outputQ", id, pTask->info.selfChildId); code = continueDispatchCheckpointTriggerBlock(pBlock, pTask); // todo handle this failure @@ -382,11 +416,6 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock streamFreeQitem((SStreamQueueItem*)pBlock); } } else if (taskLevel == TASK_LEVEL__SINK || taskLevel == TASK_LEVEL__AGG) { - if (pTask->chkInfo.startTs == 0) { - pTask->chkInfo.startTs = taosGetTimestampMs(); - pTask->execInfo.checkpoint += 1; - } - // todo: handle this // update the child Id for downstream tasks code = streamAddCheckpointReadyMsg(pTask, pBlock->srcTaskId, pTask->info.selfChildId, checkpointId); @@ -562,7 +591,7 @@ void streamTaskClearCheckInfo(SStreamTask* pTask, bool clearChkpReadyMsg) { } streamMutexUnlock(&pInfo->lock); - stDebug("s-task:%s clear active checkpointInfo, failed checkpointId:%" PRId64 ", current checkpointId:%" PRId64, + stDebug("s-task:%s clear active checkpointInfo, failed checkpointId:%" PRId64 ", latest checkpointId:%" PRId64, pTask->id.idStr, pInfo->failedId, pTask->chkInfo.checkpointId); } @@ -682,15 +711,22 @@ int32_t streamTaskUpdateTaskCheckpointInfo(SStreamTask* pTask, bool restored, SV return TSDB_CODE_SUCCESS; } -void streamTaskSetFailedCheckpointId(SStreamTask* pTask) { +void streamTaskSetFailedCheckpointId(SStreamTask* pTask, int64_t failedId) { struct SActiveCheckpointInfo* pInfo = pTask->chkInfo.pActiveInfo; - if (pInfo->activeId <= 0) { - stWarn("s-task:%s checkpoint-info is cleared now, not set the failed checkpoint info", pTask->id.idStr); + if (failedId <= 0) { + stWarn("s-task:%s failedId is 0, not update the failed checkpoint info, current failedId:%" PRId64 + " activeId:%" PRId64, + pTask->id.idStr, pInfo->failedId, pInfo->activeId); } else { - pInfo->failedId = pInfo->activeId; - stDebug("s-task:%s mark and set the failed checkpointId:%" PRId64 " (transId:%d)", pTask->id.idStr, pInfo->activeId, - pInfo->transId); + if (failedId <= pInfo->failedId) { + stDebug("s-task:%s failedId:%" PRId64 " not update to:%" PRId64, pTask->id.idStr, pInfo->failedId, failedId); + } else { + stDebug("s-task:%s mark and set the failed checkpointId:%" PRId64 " (transId:%d) activeId:%" PRId64 + " prev failedId:%" PRId64, + pTask->id.idStr, failedId, pInfo->transId, pInfo->activeId, pInfo->failedId); + pInfo->failedId = failedId; + } } } @@ -698,7 +734,7 @@ void streamTaskSetCheckpointFailed(SStreamTask* pTask) { streamMutexLock(&pTask->lock); ETaskStatus status = streamTaskGetStatus(pTask).state; if (status == TASK_STATUS__CK) { - streamTaskSetFailedCheckpointId(pTask); + streamTaskSetFailedCheckpointId(pTask, pTask->chkInfo.pActiveInfo->activeId); } streamMutexUnlock(&pTask->lock); } @@ -876,8 +912,9 @@ int32_t streamTaskBuildCheckpoint(SStreamTask* pTask) { code = streamSendChkptReportMsg(pTask, &pTask->chkInfo, dropRelHTask); } } else { // clear the checkpoint info if failed + // set failed checkpoint id before clear the checkpoint info streamMutexLock(&pTask->lock); - streamTaskSetFailedCheckpointId(pTask); // set failed checkpoint id before clear the checkpoint info + streamTaskSetFailedCheckpointId(pTask, ckId); streamMutexUnlock(&pTask->lock); code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_CHECKPOINT_DONE); @@ -1101,23 +1138,43 @@ int32_t doSendRetrieveTriggerMsg(SStreamTask* pTask, SArray* pNotSendList) { return TSDB_CODE_INVALID_PARA; } - SRetrieveChkptTriggerReq* pReq = rpcMallocCont(sizeof(SRetrieveChkptTriggerReq)); - if (pReq == NULL) { - code = terrno; - stError("vgId:%d failed to create msg to retrieve trigger msg for task:%s exec, code:out of memory", vgId, pId); + int32_t ret = 0; + int32_t tlen = 0; + void* buf = NULL; + SRpcMsg rpcMsg = {0}; + SEncoder encoder; + + SRetrieveChkptTriggerReq req = {.streamId = pTask->id.streamId, + .downstreamTaskId = pTask->id.taskId, + .downstreamNodeId = vgId, + .upstreamTaskId = pUpstreamTask->taskId, + .upstreamNodeId = pUpstreamTask->nodeId, + .checkpointId = checkpointId}; + + tEncodeSize(tEncodeRetrieveChkptTriggerReq, &req, tlen, ret); + if (ret < 0) { + stError("encode retrieve checkpoint-trigger msg failed, code:%s", tstrerror(code)); + } + + buf = rpcMallocCont(tlen + sizeof(SMsgHead)); + if (buf == NULL) { + stError("vgId:%d failed to create retrieve checkpoint-trigger msg for task:%s exec, code:out of memory", vgId, pId); continue; } - pReq->head.vgId = htonl(pUpstreamTask->nodeId); - pReq->streamId = pTask->id.streamId; - pReq->downstreamTaskId = pTask->id.taskId; - pReq->downstreamNodeId = vgId; - pReq->upstreamTaskId = pUpstreamTask->taskId; - pReq->upstreamNodeId = pUpstreamTask->nodeId; - pReq->checkpointId = checkpointId; + ((SRetrieveChkptTriggerReq*)buf)->head.vgId = htonl(pUpstreamTask->nodeId); + void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead)); - SRpcMsg rpcMsg = {0}; - initRpcMsg(&rpcMsg, TDMT_STREAM_RETRIEVE_TRIGGER, pReq, sizeof(SRetrieveChkptTriggerReq)); + tEncoderInit(&encoder, abuf, tlen); + if ((code = tEncodeRetrieveChkptTriggerReq(&encoder, &req)) < 0) { + rpcFreeCont(buf); + tEncoderClear(&encoder); + stError("encode retrieve checkpoint-trigger req failed, code:%s", tstrerror(code)); + continue; + } + tEncoderClear(&encoder); + + initRpcMsg(&rpcMsg, TDMT_STREAM_RETRIEVE_TRIGGER, buf, tlen + sizeof(SMsgHead)); code = tmsgSendReq(&pUpstreamTask->epSet, &rpcMsg); if (code == TSDB_CODE_SUCCESS) { diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index e0fa199199..5807240f5e 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -1170,6 +1170,7 @@ int32_t streamTaskSendCheckpointReadyMsg(SStreamTask* pTask) { if (taosArrayGetSize(pTask->upstreamInfo.pList) != num) { stError("s-task:%s invalid number of sent readyMsg:%d to upstream:%d", id, num, (int32_t)taosArrayGetSize(pTask->upstreamInfo.pList)); + streamMutexUnlock(&pActiveInfo->lock); return TSDB_CODE_STREAM_INTERNAL_ERROR; } @@ -1412,6 +1413,7 @@ int32_t streamAddCheckpointSourceRspMsg(SStreamCheckpointSourceReq* pReq, SRpcHa if (size > 0) { STaskCheckpointReadyInfo* pReady = taosArrayGet(pActiveInfo->pReadyMsgList, 0); if (pReady == NULL) { + streamMutexUnlock(&pActiveInfo->lock); return terrno; } diff --git a/source/libs/stream/src/streamErrorInjection.c b/source/libs/stream/src/streamErrorInjection.c new file mode 100644 index 0000000000..515845ba2b --- /dev/null +++ b/source/libs/stream/src/streamErrorInjection.c @@ -0,0 +1,17 @@ +#include "streamInt.h" + +/** + * pre-request: checkpoint interval should be 60s + * @param pTask + * @param checkpointId + */ +void chkptFailedByRetrieveReqToSource(SStreamTask* pTask, int64_t checkpointId) { + streamMutexLock(&pTask->lock); + + // set current checkpoint failed immediately, set failed checkpoint id before clear the checkpoint info + streamTaskSetFailedCheckpointId(pTask, checkpointId); + streamMutexUnlock(&pTask->lock); + + // the checkpoint interval should be 60s, and the next checkpoint req should be issued by mnode + taosMsleep(65*1000); +} \ No newline at end of file diff --git a/source/libs/stream/src/streamSched.c b/source/libs/stream/src/streamSched.c index 8c79abfd02..9e131fd526 100644 --- a/source/libs/stream/src/streamSched.c +++ b/source/libs/stream/src/streamSched.c @@ -83,13 +83,37 @@ int32_t streamTrySchedExec(SStreamTask* pTask) { } int32_t streamTaskSchedTask(SMsgCb* pMsgCb, int32_t vgId, int64_t streamId, int32_t taskId, int32_t execType) { - SStreamTaskRunReq* pRunReq = rpcMallocCont(sizeof(SStreamTaskRunReq)); - if (pRunReq == NULL) { + int32_t code = 0; + int32_t tlen = 0; + + SStreamTaskRunReq req = {.streamId = streamId, .taskId = taskId, .reqType = execType}; + + tEncodeSize(tEncodeStreamTaskRunReq, &req, tlen, code); + if (code < 0) { + stError("s-task:0x%" PRIx64 " vgId:%d encode stream task run req failed, code:%s", streamId, vgId, tstrerror(code)); + return code; + } + + void* buf = rpcMallocCont(tlen + sizeof(SMsgHead)); + if (buf == NULL) { stError("vgId:%d failed to create msg to start stream task:0x%x exec, type:%d, code:%s", vgId, taskId, execType, tstrerror(terrno)); return terrno; } + ((SMsgHead*)buf)->vgId = vgId; + char* bufx = POINTER_SHIFT(buf, sizeof(SMsgHead)); + + SEncoder encoder; + tEncoderInit(&encoder, (uint8_t*)bufx, tlen); + if ((code = tEncodeStreamTaskRunReq(&encoder, &req)) < 0) { + rpcFreeCont(buf); + tEncoderClear(&encoder); + stError("s-task:0x%x vgId:%d encode run task msg failed, code:%s", taskId, vgId, tstrerror(code)); + return code; + } + tEncoderClear(&encoder); + if (streamId != 0) { stDebug("vgId:%d create msg to for task:0x%x, exec type:%d, %s", vgId, taskId, execType, streamTaskGetExecType(execType)); @@ -97,13 +121,8 @@ int32_t streamTaskSchedTask(SMsgCb* pMsgCb, int32_t vgId, int64_t streamId, int3 stDebug("vgId:%d create msg to exec, type:%d, %s", vgId, execType, streamTaskGetExecType(execType)); } - pRunReq->head.vgId = vgId; - pRunReq->streamId = streamId; - pRunReq->taskId = taskId; - pRunReq->reqType = execType; - - SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)}; - int32_t code = tmsgPutToQueue(pMsgCb, STREAM_QUEUE, &msg); + SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = buf, .contLen = tlen + sizeof(SMsgHead)}; + code = tmsgPutToQueue(pMsgCb, STREAM_QUEUE, &msg); if (code) { stError("vgId:%d failed to put msg into stream queue, code:%s, %x", vgId, tstrerror(code), taskId); } diff --git a/source/libs/stream/src/streamStartTask.c b/source/libs/stream/src/streamStartTask.c index ed12687e41..9c16ff036e 100644 --- a/source/libs/stream/src/streamStartTask.c +++ b/source/libs/stream/src/streamStartTask.c @@ -433,6 +433,7 @@ int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) { // send hb msg to mnode before closing all tasks. int32_t code = streamMetaSendMsgBeforeCloseTasks(pMeta, &pTaskList); if (code != TSDB_CODE_SUCCESS) { + streamMetaRUnLock(pMeta); return code; } diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index a044859b80..f46228fd47 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -22,6 +22,7 @@ #include "tstream.h" #include "ttimer.h" #include "wal.h" +#include "streamMsg.h" static void streamTaskDestroyUpstreamInfo(SUpstreamInfo* pUpstreamInfo); static int32_t streamTaskUpdateUpstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpSet* pEpSet, bool* pUpdated); @@ -1246,13 +1247,13 @@ void streamTaskDestroyActiveChkptInfo(SActiveCheckpointInfo* pInfo) { taosMemoryFree(pInfo); } -//NOTE: clear the checkpoint id, and keep the failed id +// NOTE: clear the checkpoint id, and keep the failed id +// failedId for a task will increase as the checkpoint I.D. increases. void streamTaskClearActiveInfo(SActiveCheckpointInfo* pInfo) { pInfo->activeId = 0; pInfo->transId = 0; pInfo->allUpstreamTriggerRecv = 0; pInfo->dispatchTrigger = false; -// pInfo->failedId = 0; taosArrayClear(pInfo->pDispatchTriggerList); taosArrayClear(pInfo->pCheckpointReadyRecvList); @@ -1303,4 +1304,178 @@ void streamTaskFreeRefId(int64_t* pRefId) { } metaRefMgtRemove(pRefId); +} + + +int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) { + int32_t code = 0; + int32_t lino; + + TAOS_CHECK_EXIT(tStartEncode(pEncoder)); + TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->ver)); + TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->id.streamId)); + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->id.taskId)); + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->info.trigger)); + TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->info.taskLevel)); + TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->outputInfo.type)); + TAOS_CHECK_EXIT(tEncodeI16(pEncoder, pTask->msgInfo.msgType)); + + TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->status.taskStatus)); + TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->status.schedStatus)); + + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->info.selfChildId)); + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->info.nodeId)); + TAOS_CHECK_EXIT(tEncodeSEpSet(pEncoder, &pTask->info.epSet)); + TAOS_CHECK_EXIT(tEncodeSEpSet(pEncoder, &pTask->info.mnodeEpset)); + + TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->chkInfo.checkpointId)); + TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->chkInfo.checkpointVer)); + TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->info.fillHistory)); + + TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->hTaskInfo.id.streamId)); + int32_t taskId = pTask->hTaskInfo.id.taskId; + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, taskId)); + + TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->streamTaskId.streamId)); + taskId = pTask->streamTaskId.taskId; + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, taskId)); + + TAOS_CHECK_EXIT(tEncodeU64(pEncoder, pTask->dataRange.range.minVer)); + TAOS_CHECK_EXIT(tEncodeU64(pEncoder, pTask->dataRange.range.maxVer)); + TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->dataRange.window.skey)); + TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->dataRange.window.ekey)); + + int32_t epSz = taosArrayGetSize(pTask->upstreamInfo.pList); + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, epSz)); + for (int32_t i = 0; i < epSz; i++) { + SStreamUpstreamEpInfo* pInfo = taosArrayGetP(pTask->upstreamInfo.pList, i); + TAOS_CHECK_EXIT(tEncodeStreamEpInfo(pEncoder, pInfo)); + } + + if (pTask->info.taskLevel != TASK_LEVEL__SINK) { + TAOS_CHECK_EXIT(tEncodeCStr(pEncoder, pTask->exec.qmsg)); + } + + if (pTask->outputInfo.type == TASK_OUTPUT__TABLE) { + TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->outputInfo.tbSink.stbUid)); + TAOS_CHECK_EXIT(tEncodeCStr(pEncoder, pTask->outputInfo.tbSink.stbFullName)); + TAOS_CHECK_EXIT(tEncodeSSchemaWrapper(pEncoder, pTask->outputInfo.tbSink.pSchemaWrapper)); + } else if (pTask->outputInfo.type == TASK_OUTPUT__SMA) { + TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->outputInfo.smaSink.smaId)); + } else if (pTask->outputInfo.type == TASK_OUTPUT__FETCH) { + TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->outputInfo.fetchSink.reserved)); + } else if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) { + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->outputInfo.fixedDispatcher.taskId)); + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->outputInfo.fixedDispatcher.nodeId)); + TAOS_CHECK_EXIT(tEncodeSEpSet(pEncoder, &pTask->outputInfo.fixedDispatcher.epSet)); + } else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) { + TAOS_CHECK_EXIT(tSerializeSUseDbRspImp(pEncoder, &pTask->outputInfo.shuffleDispatcher.dbInfo)); + TAOS_CHECK_EXIT(tEncodeCStr(pEncoder, pTask->outputInfo.shuffleDispatcher.stbFullName)); + } + TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->info.delaySchedParam)); + TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->subtableWithoutMd5)); + TAOS_CHECK_EXIT(tEncodeCStrWithLen(pEncoder, pTask->reserve, sizeof(pTask->reserve) - 1)); + + tEndEncode(pEncoder); +_exit: + return code; +} + +int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask) { + int32_t taskId = 0; + int32_t code = 0; + int32_t lino; + + TAOS_CHECK_EXIT(tStartDecode(pDecoder)); + TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->ver)); + if (pTask->ver <= SSTREAM_TASK_INCOMPATIBLE_VER || pTask->ver > SSTREAM_TASK_VER) { + TAOS_CHECK_EXIT(TSDB_CODE_INVALID_MSG); + } + + TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->id.streamId)); + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->id.taskId)); + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->info.trigger)); + TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->info.taskLevel)); + TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->outputInfo.type)); + TAOS_CHECK_EXIT(tDecodeI16(pDecoder, &pTask->msgInfo.msgType)); + + TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->status.taskStatus)); + TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->status.schedStatus)); + + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->info.selfChildId)); + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->info.nodeId)); + TAOS_CHECK_EXIT(tDecodeSEpSet(pDecoder, &pTask->info.epSet)); + TAOS_CHECK_EXIT(tDecodeSEpSet(pDecoder, &pTask->info.mnodeEpset)); + + TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->chkInfo.checkpointId)); + TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->chkInfo.checkpointVer)); + TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->info.fillHistory)); + + TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->hTaskInfo.id.streamId)); + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &taskId)); + pTask->hTaskInfo.id.taskId = taskId; + + TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->streamTaskId.streamId)); + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &taskId)); + pTask->streamTaskId.taskId = taskId; + + TAOS_CHECK_EXIT(tDecodeU64(pDecoder, (uint64_t*)&pTask->dataRange.range.minVer)); + TAOS_CHECK_EXIT(tDecodeU64(pDecoder, (uint64_t*)&pTask->dataRange.range.maxVer)); + TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->dataRange.window.skey)); + TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->dataRange.window.ekey)); + + int32_t epSz = -1; + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &epSz) < 0); + + if ((pTask->upstreamInfo.pList = taosArrayInit(epSz, POINTER_BYTES)) == NULL) { + TAOS_CHECK_EXIT(terrno); + } + for (int32_t i = 0; i < epSz; i++) { + SStreamUpstreamEpInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamUpstreamEpInfo)); + if (pInfo == NULL) { + TAOS_CHECK_EXIT(terrno); + } + if ((code = tDecodeStreamEpInfo(pDecoder, pInfo)) < 0) { + taosMemoryFreeClear(pInfo); + goto _exit; + } + if (taosArrayPush(pTask->upstreamInfo.pList, &pInfo) == NULL) { + TAOS_CHECK_EXIT(terrno); + } + } + + if (pTask->info.taskLevel != TASK_LEVEL__SINK) { + TAOS_CHECK_EXIT(tDecodeCStrAlloc(pDecoder, &pTask->exec.qmsg)); + } + + if (pTask->outputInfo.type == TASK_OUTPUT__TABLE) { + TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->outputInfo.tbSink.stbUid)); + TAOS_CHECK_EXIT(tDecodeCStrTo(pDecoder, pTask->outputInfo.tbSink.stbFullName)); + pTask->outputInfo.tbSink.pSchemaWrapper = taosMemoryCalloc(1, sizeof(SSchemaWrapper)); + if (pTask->outputInfo.tbSink.pSchemaWrapper == NULL) { + TAOS_CHECK_EXIT(terrno); + } + TAOS_CHECK_EXIT(tDecodeSSchemaWrapper(pDecoder, pTask->outputInfo.tbSink.pSchemaWrapper)); + } else if (pTask->outputInfo.type == TASK_OUTPUT__SMA) { + TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->outputInfo.smaSink.smaId)); + } else if (pTask->outputInfo.type == TASK_OUTPUT__FETCH) { + TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->outputInfo.fetchSink.reserved)); + } else if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) { + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->outputInfo.fixedDispatcher.taskId)); + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->outputInfo.fixedDispatcher.nodeId)); + TAOS_CHECK_EXIT(tDecodeSEpSet(pDecoder, &pTask->outputInfo.fixedDispatcher.epSet)); + } else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) { + TAOS_CHECK_EXIT(tDeserializeSUseDbRspImp(pDecoder, &pTask->outputInfo.shuffleDispatcher.dbInfo)); + TAOS_CHECK_EXIT(tDecodeCStrTo(pDecoder, pTask->outputInfo.shuffleDispatcher.stbFullName)); + } + TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->info.delaySchedParam)); + if (pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER) { + TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->subtableWithoutMd5)); + } + TAOS_CHECK_EXIT(tDecodeCStrTo(pDecoder, pTask->reserve)); + + tEndDecode(pDecoder); + +_exit: + return code; } \ No newline at end of file diff --git a/source/libs/stream/src/streamUpdate.c b/source/libs/stream/src/streamUpdate.c index a3cfa00127..49d5041369 100644 --- a/source/libs/stream/src/streamUpdate.c +++ b/source/libs/stream/src/streamUpdate.c @@ -445,6 +445,11 @@ int32_t updateInfoSerialize(SEncoder* pEncoder, const SUpdateInfo* pInfo) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; if (!pInfo) { + if (tEncodeI32(pEncoder, -1) < 0) { + code = TSDB_CODE_FAILED; + QUERY_CHECK_CODE(code, lino, _end); + } + uDebug("%s line:%d. it did not have updateinfo", __func__, __LINE__); return TSDB_CODE_SUCCESS; } @@ -550,6 +555,10 @@ int32_t updateInfoDeserialize(SDecoder* pDeCoder, SUpdateInfo* pInfo) { int32_t size = 0; if (tDecodeI32(pDeCoder, &size) < 0) return -1; + + if (size < 0) { + return -1; + } pInfo->pTsBuckets = taosArrayInit(size, sizeof(TSKEY)); QUERY_CHECK_NULL(pInfo->pTsBuckets, code, lino, _error, terrno); diff --git a/source/libs/sync/inc/syncIndexMgr.h b/source/libs/sync/inc/syncIndexMgr.h index 3c372a3b12..ed7a17b4c7 100644 --- a/source/libs/sync/inc/syncIndexMgr.h +++ b/source/libs/sync/inc/syncIndexMgr.h @@ -29,6 +29,7 @@ typedef struct SSyncIndexMgr { SyncTerm privateTerm[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA]; // for advanced function int64_t startTimeArr[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA]; int64_t recvTimeArr[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA]; + int64_t sentTimeArr[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA]; int32_t replicaNum; int32_t totalReplicaNum; SSyncNode *pNode; @@ -45,7 +46,9 @@ void syncIndexMgrCopyIfExist(SSyncIndexMgr * pNewIndex, SSyncIndexMgr void syncIndexMgrSetStartTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId, int64_t startTime); int64_t syncIndexMgrGetStartTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId); void syncIndexMgrSetRecvTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId, int64_t recvTime); +void syncIndexMgrSetSentTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId, int64_t sentTime); int64_t syncIndexMgrGetRecvTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId); +int64_t syncIndexMgrGetSentTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId); void syncIndexMgrSetTerm(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId, SyncTerm term); SyncTerm syncIndexMgrGetTerm(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId); diff --git a/source/libs/sync/inc/syncInt.h b/source/libs/sync/inc/syncInt.h index 0b653ddbe9..b19d1184a7 100644 --- a/source/libs/sync/inc/syncInt.h +++ b/source/libs/sync/inc/syncInt.h @@ -234,6 +234,10 @@ struct SSyncNode { bool isStart; + // statis + int32_t sendCount; + int32_t recvCount; + int32_t slowCount; }; // open/close -------------- diff --git a/source/libs/sync/inc/syncPipeline.h b/source/libs/sync/inc/syncPipeline.h index 427a3690f2..eeb24d2f16 100644 --- a/source/libs/sync/inc/syncPipeline.h +++ b/source/libs/sync/inc/syncPipeline.h @@ -39,6 +39,7 @@ typedef struct SSyncLogReplMgr { int64_t peerStartTime; int32_t retryBackoff; int32_t peerId; + int32_t sendCount; } SSyncLogReplMgr; typedef struct SSyncLogBufEntry { diff --git a/source/libs/sync/inc/syncUtil.h b/source/libs/sync/inc/syncUtil.h index 1606f47592..7b71491f47 100644 --- a/source/libs/sync/inc/syncUtil.h +++ b/source/libs/sync/inc/syncUtil.h @@ -46,12 +46,12 @@ extern "C" { #define sLDebug(...) if (sDebugFlag & DEBUG_DEBUG) { taosPrintLongString("SYN ", DEBUG_DEBUG, sDebugFlag, __VA_ARGS__); } #define sLTrace(...) if (sDebugFlag & DEBUG_TRACE) { taosPrintLongString("SYN ", DEBUG_TRACE, sDebugFlag, __VA_ARGS__); } -#define sNFatal(pNode, ...) if (sDebugFlag & DEBUG_FATAL) { syncPrintNodeLog("SYN FATAL ", DEBUG_FATAL, 255, pNode, __VA_ARGS__); } -#define sNError(pNode, ...) if (sDebugFlag & DEBUG_ERROR) { syncPrintNodeLog("SYN ERROR ", DEBUG_ERROR, 255, pNode, __VA_ARGS__); } -#define sNWarn(pNode, ...) if (sDebugFlag & DEBUG_WARN) { syncPrintNodeLog("SYN WARN ", DEBUG_WARN, 255, pNode, __VA_ARGS__); } -#define sNInfo(pNode, ...) if (sDebugFlag & DEBUG_INFO) { syncPrintNodeLog("SYN ", DEBUG_INFO, 255, pNode, __VA_ARGS__); } -#define sNDebug(pNode, ...) if (sDebugFlag & DEBUG_DEBUG) { syncPrintNodeLog("SYN ", DEBUG_DEBUG, sDebugFlag, pNode, __VA_ARGS__); } -#define sNTrace(pNode, ...) if (sDebugFlag & DEBUG_TRACE) { syncPrintNodeLog("SYN ", DEBUG_TRACE, sDebugFlag, pNode, __VA_ARGS__); } +#define sNFatal(pNode, ...) if (sDebugFlag & DEBUG_FATAL) { syncPrintNodeLog("SYN FATAL ", DEBUG_FATAL, 255, true, pNode, __VA_ARGS__); } +#define sNError(pNode, ...) if (sDebugFlag & DEBUG_ERROR) { syncPrintNodeLog("SYN ERROR ", DEBUG_ERROR, 255, true, pNode, __VA_ARGS__); } +#define sNWarn(pNode, ...) if (sDebugFlag & DEBUG_WARN) { syncPrintNodeLog("SYN WARN ", DEBUG_WARN, 255, true, pNode, __VA_ARGS__); } +#define sNInfo(pNode, ...) if (sDebugFlag & DEBUG_INFO) { syncPrintNodeLog("SYN ", DEBUG_INFO, 255, true, pNode, __VA_ARGS__); } +#define sNDebug(pNode, ...) if (sDebugFlag & DEBUG_DEBUG) { syncPrintNodeLog("SYN ", DEBUG_DEBUG, sDebugFlag, false, pNode, __VA_ARGS__); } +#define sNTrace(pNode, ...) if (sDebugFlag & DEBUG_TRACE) { syncPrintNodeLog("SYN ", DEBUG_TRACE, sDebugFlag, false, pNode, __VA_ARGS__); } #define sSFatal(pSender, ...) if (sDebugFlag & DEBUG_FATAL) { syncPrintSnapshotSenderLog("SYN FATAL ", DEBUG_FATAL, 255, pSender, __VA_ARGS__); } #define sSError(pSender, ...) if (sDebugFlag & DEBUG_ERROR) { syncPrintSnapshotSenderLog("SYN ERROR ", DEBUG_ERROR, 255, pSender, __VA_ARGS__); } @@ -85,7 +85,8 @@ void syncUtilMsgHtoN(void* msg); void syncUtilGenerateArbToken(int32_t nodeId, int32_t groupId, char* buf); -void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNode* pNode, const char* format, ...); +void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, bool formatTime, SSyncNode* pNode, + const char* format, ...); void syncPrintSnapshotSenderLog(const char* flags, ELogLevel level, int32_t dflag, SSyncSnapshotSender* pSender, const char* format, ...); void syncPrintSnapshotReceiverLog(const char* flags, ELogLevel level, int32_t dflag, SSyncSnapshotReceiver* pReceiver, diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c index 0345880874..9fc39ec463 100644 --- a/source/libs/sync/src/syncAppendEntries.c +++ b/source/libs/sync/src/syncAppendEntries.c @@ -104,6 +104,11 @@ int32_t syncNodeOnAppendEntries(SSyncNode* ths, const SRpcMsg* pRpcMsg) { goto _IGNORE; } + int32_t nRef = atomic_add_fetch_32(&ths->recvCount, 1); + if (nRef <= 0) { + sError("vgId:%d, recv count is %d", ths->vgId, nRef); + } + int32_t code = syncBuildAppendEntriesReply(&rpcRsp, ths->vgId); if (code != 0) { syncLogRecvAppendEntries(ths, pMsg, "build rsp error"); diff --git a/source/libs/sync/src/syncIndexMgr.c b/source/libs/sync/src/syncIndexMgr.c index 4946912941..ec7354040f 100644 --- a/source/libs/sync/src/syncIndexMgr.c +++ b/source/libs/sync/src/syncIndexMgr.c @@ -155,6 +155,18 @@ void syncIndexMgrSetRecvTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId, i DID(pRaftId), CID(pRaftId)); } +void syncIndexMgrSetSentTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId, int64_t sentTime) { + for (int i = 0; i < pIndexMgr->totalReplicaNum; ++i) { + if (syncUtilSameId(&((*(pIndexMgr->replicas))[i]), pRaftId)) { + (pIndexMgr->sentTimeArr)[i] = sentTime; + return; + } + } + + sError("vgId:%d, indexmgr set sent-time:%" PRId64 " for dnode:%d cluster:%d failed", pIndexMgr->pNode->vgId, sentTime, + DID(pRaftId), CID(pRaftId)); +} + int64_t syncIndexMgrGetRecvTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId) { for (int i = 0; i < pIndexMgr->totalReplicaNum; ++i) { if (syncUtilSameId(&((*(pIndexMgr->replicas))[i]), pRaftId)) { @@ -168,6 +180,19 @@ int64_t syncIndexMgrGetRecvTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId return TSDB_CODE_SYN_INVALID_ID; } +int64_t syncIndexMgrGetSentTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId) { + for (int i = 0; i < pIndexMgr->totalReplicaNum; ++i) { + if (syncUtilSameId(&((*(pIndexMgr->replicas))[i]), pRaftId)) { + int64_t recvTime = (pIndexMgr->sentTimeArr)[i]; + return recvTime; + } + } + + sError("vgId:%d, indexmgr get sent-time from dnode:%d cluster:%d failed", pIndexMgr->pNode->vgId, DID(pRaftId), + CID(pRaftId)); + return TSDB_CODE_SYN_INVALID_ID; +} + void syncIndexMgrSetTerm(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId, SyncTerm term) { for (int i = 0; i < pIndexMgr->totalReplicaNum; ++i) { if (syncUtilSameId(&((*(pIndexMgr->replicas))[i]), pRaftId)) { diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 3d37cdb560..5bdac16f42 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -980,8 +980,12 @@ static int32_t syncHbTimerStart(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer) { sTrace("vgId:%d, start hb timer, rid:%" PRId64 " addr:%" PRId64 " at %d", pSyncNode->vgId, pData->rid, pData->destId.addr, pSyncTimer->timerMS); - TAOS_CHECK_RETURN(taosTmrReset(pSyncTimer->timerCb, pSyncTimer->timerMS, (void*)(pData->rid), - syncEnv()->pTimerManager, &pSyncTimer->pTimer)); + bool stopped = taosTmrReset(pSyncTimer->timerCb, pSyncTimer->timerMS, (void*)(pData->rid), syncEnv()->pTimerManager, + &pSyncTimer->pTimer); + if (stopped) { + sError("vgId:%d, failed to reset hb timer success", pSyncNode->vgId); + return TSDB_CODE_SYN_INTERNAL_ERROR; + } } else { code = TSDB_CODE_SYN_INTERNAL_ERROR; sError("vgId:%d, start ctrl hb timer error, sync env is stop", pSyncNode->vgId); @@ -1624,8 +1628,12 @@ ESyncStrategy syncNodeStrategy(SSyncNode* pSyncNode) { return pSyncNode->raftCfg int32_t syncNodeStartPingTimer(SSyncNode* pSyncNode) { int32_t code = 0; if (syncIsInit()) { - TAOS_CHECK_RETURN(taosTmrReset(pSyncNode->FpPingTimerCB, pSyncNode->pingTimerMS, (void*)pSyncNode->rid, - syncEnv()->pTimerManager, &pSyncNode->pPingTimer)); + bool stopped = taosTmrReset(pSyncNode->FpPingTimerCB, pSyncNode->pingTimerMS, (void*)pSyncNode->rid, + syncEnv()->pTimerManager, &pSyncNode->pPingTimer); + if (stopped) { + sError("vgId:%d, failed to reset ping timer, ms:%d", pSyncNode->vgId, pSyncNode->pingTimerMS); + return TSDB_CODE_SYN_INTERNAL_ERROR; + } atomic_store_64(&pSyncNode->pingTimerLogicClock, pSyncNode->pingTimerLogicClockUser); } else { sError("vgId:%d, start ping timer error, sync env is stop", pSyncNode->vgId); @@ -1653,8 +1661,9 @@ int32_t syncNodeStartElectTimer(SSyncNode* pSyncNode, int32_t ms) { pSyncNode->electTimerParam.pSyncNode = pSyncNode; pSyncNode->electTimerParam.pData = NULL; - TAOS_CHECK_RETURN(taosTmrReset(pSyncNode->FpElectTimerCB, pSyncNode->electTimerMS, (void*)(pSyncNode->rid), - syncEnv()->pTimerManager, &pSyncNode->pElectTimer)); + bool stopped = taosTmrReset(pSyncNode->FpElectTimerCB, pSyncNode->electTimerMS, (void*)(pSyncNode->rid), + syncEnv()->pTimerManager, &pSyncNode->pElectTimer); + if (stopped) sError("vgId:%d, failed to reset elect timer, ms:%d", pSyncNode->vgId, ms); } else { sError("vgId:%d, start elect timer error, sync env is stop", pSyncNode->vgId); } @@ -1690,7 +1699,7 @@ void syncNodeResetElectTimer(SSyncNode* pSyncNode) { // TODO check return value if ((code = syncNodeRestartElectTimer(pSyncNode, electMS)) != 0) { - sError("vgId:%d, failed to restart elect timer since %s", pSyncNode->vgId, terrstr()); + sError("vgId:%d, failed to restart elect timer since %s", pSyncNode->vgId, tstrerror(code)); return; }; @@ -2586,10 +2595,9 @@ static void syncNodeEqPingTimer(void* param, void* tmrId) { } _out: - if ((code = taosTmrReset(syncNodeEqPingTimer, pNode->pingTimerMS, (void*)pNode->rid, syncEnv()->pTimerManager, - &pNode->pPingTimer)) != 0) { - sError("failed to reset ping timer since %s", tstrerror(code)); - }; + if (taosTmrReset(syncNodeEqPingTimer, pNode->pingTimerMS, (void*)pNode->rid, syncEnv()->pTimerManager, + &pNode->pPingTimer)) + sError("failed to reset ping timer"); } syncNodeRelease(pNode); } @@ -2759,13 +2767,10 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) { if (syncIsInit()) { sTrace("vgId:%d, reset peer hb timer at %d", pSyncNode->vgId, pSyncTimer->timerMS); - if ((code = taosTmrReset(syncNodeEqPeerHeartbeatTimer, pSyncTimer->timerMS, (void*)hbDataRid, - syncEnv()->pTimerManager, &pSyncTimer->pTimer)) != 0) { - sError("vgId:%d, reset peer hb timer error, %s", pSyncNode->vgId, tstrerror(code)); - syncNodeRelease(pSyncNode); - syncHbTimerDataRelease(pData); - return; - } + bool stopped = taosTmrReset(syncNodeEqPeerHeartbeatTimer, pSyncTimer->timerMS, (void*)hbDataRid, + syncEnv()->pTimerManager, &pSyncTimer->pTimer); + if (stopped) sError("vgId:%d, reset peer hb timer error, %s", pSyncNode->vgId, tstrerror(code)); + } else { sError("sync env is stop, reset peer hb timer error"); } diff --git a/source/libs/sync/src/syncReplication.c b/source/libs/sync/src/syncReplication.c index 247b5624c3..7466aaf66e 100644 --- a/source/libs/sync/src/syncReplication.c +++ b/source/libs/sync/src/syncReplication.c @@ -88,11 +88,33 @@ int32_t syncNodeSendAppendEntries(SSyncNode* pSyncNode, const SRaftId* destRaftI pMsg->destId = *destRaftId; TAOS_CHECK_RETURN(syncNodeSendMsgById(destRaftId, pSyncNode, pRpcMsg)); + int32_t nRef = 0; + if (pSyncNode != NULL) { + nRef = atomic_add_fetch_32(&pSyncNode->sendCount, 1); + if (nRef <= 0) { + sError("vgId:%d, send count is %d", pSyncNode->vgId, nRef); + } + } + + SSyncLogReplMgr* mgr = syncNodeGetLogReplMgr(pSyncNode, (SRaftId*)destRaftId); + if (mgr != NULL) { + nRef = atomic_add_fetch_32(&mgr->sendCount, 1); + if (nRef <= 0) { + sError("vgId:%d, send count is %d", pSyncNode->vgId, nRef); + } + } + TAOS_RETURN(TSDB_CODE_SUCCESS); } int32_t syncNodeSendHeartbeat(SSyncNode* pSyncNode, const SRaftId* destId, SRpcMsg* pMsg) { - return syncNodeSendMsgById(destId, pSyncNode, pMsg); + SRaftId destIdTmp = *destId; + TAOS_CHECK_RETURN(syncNodeSendMsgById(destId, pSyncNode, pMsg)); + + int64_t tsMs = taosGetTimestampMs(); + syncIndexMgrSetSentTime(pSyncNode->pMatchIndex, &destIdTmp, tsMs); + + return TSDB_CODE_SUCCESS; } int32_t syncNodeHeartbeatPeers(SSyncNode* pSyncNode) { diff --git a/source/libs/sync/src/syncUtil.c b/source/libs/sync/src/syncUtil.c index 9058b6ecef..65c7f9761e 100644 --- a/source/libs/sync/src/syncUtil.c +++ b/source/libs/sync/src/syncUtil.c @@ -22,6 +22,7 @@ #include "syncRaftStore.h" #include "syncSnapshot.h" #include "tglobal.h" +#include "ttime.h" static void syncCfg2SimpleStr(const SSyncCfg* pCfg, char* buf, int32_t bufLen) { int32_t len = tsnprintf(buf, bufLen, "{num:%d, as:%d, [", pCfg->replicaNum, pCfg->myIndex); @@ -108,13 +109,40 @@ void syncUtilGenerateArbToken(int32_t nodeId, int32_t groupId, char* buf) { (void)snprintf(buf, TSDB_ARB_TOKEN_SIZE, "d%d#g%d#%" PRId64 "#%d", nodeId, groupId, currentMs, randVal); } +static void syncPrintTime(bool formatTime, int32_t* len, int64_t tsMs, int32_t i, char* buf, int32_t bufLen) { + if (formatTime) { + char pBuf[TD_TIME_STR_LEN] = {0}; + if (tsMs > 0) { + if (taosFormatUtcTime(pBuf, TD_TIME_STR_LEN, tsMs, TSDB_TIME_PRECISION_MILLI) != 0) { + pBuf[0] = '\0'; + } + } + (*len) += tsnprintf(buf + (*len), bufLen - (*len), "%d:%s", i, pBuf); + } else { + (*len) += tsnprintf(buf + (*len), bufLen - (*len), "%d:%" PRId64, i, tsMs); + } +} + // for leader -static void syncHearbeatReplyTime2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen) { +static void syncHearbeatReplyTime2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen, bool formatTime) { int32_t len = 0; len += tsnprintf(buf + len, bufLen - len, "%s", "{"); for (int32_t i = 0; i < pSyncNode->replicaNum; ++i) { int64_t tsMs = syncIndexMgrGetRecvTime(pSyncNode->pMatchIndex, &(pSyncNode->replicasId[i])); - len += tsnprintf(buf + len, bufLen - len, "%d:%" PRId64, i, tsMs); + syncPrintTime(formatTime, &len, tsMs, i, buf, bufLen); + if (i < pSyncNode->replicaNum - 1) { + len += tsnprintf(buf + len, bufLen - len, "%s", ","); + } + } + len += tsnprintf(buf + len, bufLen - len, "%s", "}"); +} + +static void syncSentHearbeatTime2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen, bool formatTime) { + int32_t len = 0; + len += tsnprintf(buf + len, bufLen - len, "%s", "{"); + for (int32_t i = 0; i < pSyncNode->replicaNum; ++i) { + int64_t tsMs = syncIndexMgrGetSentTime(pSyncNode->pMatchIndex, &(pSyncNode->replicasId[i])); + syncPrintTime(formatTime, &len, tsMs, i, buf, bufLen); if (i < pSyncNode->replicaNum - 1) { len += tsnprintf(buf + len, bufLen - len, "%s", ","); } @@ -123,12 +151,12 @@ static void syncHearbeatReplyTime2Str(SSyncNode* pSyncNode, char* buf, int32_t b } // for follower -static void syncHearbeatTime2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen) { +static void syncHearbeatTime2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen, bool formatTime) { int32_t len = 0; len += tsnprintf(buf + len, bufLen - len, "%s", "{"); for (int32_t i = 0; i < pSyncNode->replicaNum; ++i) { int64_t tsMs = syncIndexMgrGetRecvTime(pSyncNode->pNextIndex, &(pSyncNode->replicasId[i])); - len += tsnprintf(buf + len, bufLen - len, "%d:%" PRId64, i, tsMs); + syncPrintTime(formatTime, &len, tsMs, i, buf, bufLen); if (i < pSyncNode->replicaNum - 1) { len += tsnprintf(buf + len, bufLen - len, "%s", ","); } @@ -152,8 +180,9 @@ static void syncLogReplStates2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLe for (int32_t i = 0; i < pSyncNode->replicaNum; i++) { SSyncLogReplMgr* pMgr = pSyncNode->logReplMgrs[i]; if (pMgr == NULL) break; - len += tsnprintf(buf + len, bufLen - len, "%d:%d [%" PRId64 ", %" PRId64 ", %" PRId64 "]", i, pMgr->restored, + len += tsnprintf(buf + len, bufLen - len, "%d:%d [%" PRId64 ", %" PRId64 ", %" PRId64 "] ", i, pMgr->restored, pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex); + len += tsnprintf(buf + len, bufLen - len, "%d", pMgr->sendCount); if (i + 1 < pSyncNode->replicaNum) { len += tsnprintf(buf + len, bufLen - len, "%s", ", "); } @@ -173,7 +202,8 @@ static void syncPeerState2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen) { len += tsnprintf(buf + len, bufLen - len, "%s", "}"); } -void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNode* pNode, const char* format, ...) { +void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, bool formatTime, SSyncNode* pNode, + const char* format, ...) { if (pNode == NULL || pNode->pLogStore == NULL) return; int64_t currentTerm = raftStoreGetTerm(pNode); @@ -205,10 +235,13 @@ void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNo syncLogBufferStates2Str(pNode, bufferStatesStr, sizeof(bufferStatesStr)); char hbrTimeStr[256] = ""; - syncHearbeatReplyTime2Str(pNode, hbrTimeStr, sizeof(hbrTimeStr)); + syncHearbeatReplyTime2Str(pNode, hbrTimeStr, sizeof(hbrTimeStr), formatTime); char hbTimeStr[256] = ""; - syncHearbeatTime2Str(pNode, hbTimeStr, sizeof(hbTimeStr)); + syncHearbeatTime2Str(pNode, hbTimeStr, sizeof(hbTimeStr), formatTime); + + char sentHbTimeStr[512] = ""; + syncSentHearbeatTime2Str(pNode, sentHbTimeStr, sizeof(sentHbTimeStr), formatTime); char eventLog[512]; // {0}; va_list argpointer; @@ -234,14 +267,15 @@ void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNo ", elect-times:%d, as-leader-times:%d, as-assigned-leader-times:%d, cfg-ch-times:%d, hb-slow:%d, hbr-slow:%d, " "aq-items:%d, snaping:%" PRId64 ", replicas:%d, last-cfg:%" PRId64 ", chging:%d, restore:%d, quorum:%d, elect-lc-timer:%" PRId64 ", hb:%" PRId64 - ", buffer:%s, repl-mgrs:%s, members:%s, hb:%s, hb-reply:%s, arb-token:%s", + ", buffer:%s, repl-mgrs:%s, members:%s, send hb:%s, recv hb:%s, recv hb-reply:%s, arb-token:%s, msg[sent:%d, recv:%d, slow-recev:%d]", pNode->vgId, eventLog, syncStr(pNode->state), currentTerm, pNode->commitIndex, pNode->assignedCommitIndex, appliedIndex, logBeginIndex, logLastIndex, pNode->minMatchIndex, snapshot.lastApplyIndex, snapshot.lastApplyTerm, pNode->electNum, pNode->becomeLeaderNum, pNode->becomeAssignedLeaderNum, pNode->configChangeNum, pNode->hbSlowNum, pNode->hbrSlowNum, aqItems, pNode->snapshottingIndex, pNode->replicaNum, pNode->raftCfg.lastConfigIndex, pNode->changing, pNode->restoreFinish, syncNodeDynamicQuorum(pNode), pNode->electTimerLogicClock, pNode->heartbeatTimerLogicClockUser, bufferStatesStr, - replMgrStatesStr, cfgStr, hbTimeStr, hbrTimeStr, pNode->arbToken); + replMgrStatesStr, cfgStr, sentHbTimeStr, hbTimeStr, hbrTimeStr, pNode->arbToken, pNode->sendCount, pNode->recvCount, + pNode->slowCount); } } diff --git a/source/libs/transport/inc/transComm.h b/source/libs/transport/inc/transComm.h index 5c79b379ed..2ba88cdcc6 100644 --- a/source/libs/transport/inc/transComm.h +++ b/source/libs/transport/inc/transComm.h @@ -96,7 +96,7 @@ typedef void* queue[2]; // #define TRANS_RETRY_COUNT_LIMIT 100 // retry count limit // #define TRANS_RETRY_INTERVAL 15 // retry interval (ms) -#define TRANS_CONN_TIMEOUT 3000 // connect timeout (ms) +#define TRANS_CONN_TIMEOUT 5000 // connect timeout (ms) #define TRANS_READ_TIMEOUT 3000 // read timeout (ms) #define TRANS_PACKET_LIMIT 1024 * 1024 * 512 @@ -452,6 +452,7 @@ void transPrintEpSet(SEpSet* pEpSet); void transFreeMsg(void* msg); int32_t transCompressMsg(char* msg, int32_t len); int32_t transDecompressMsg(char** msg, int32_t* len); +int32_t transDecompressMsgExt(char const* msg, int32_t len, char** out, int32_t* outLen); int32_t transOpenRefMgt(int size, void (*func)(void*)); void transCloseRefMgt(int32_t refMgt); diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index c03d3418fa..c62b8d21c9 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -725,7 +725,8 @@ void cliConnTimeout(uv_timer_t* handle) { return; } - tTrace("%s conn %p conn timeout", CONN_GET_INST_LABEL(conn), conn); + cliMayUpdateFqdnCache(pThrd->fqdn2ipCache, conn->dstAddr); + tTrace("%s conn %p failed to connect %s since conn timeout", CONN_GET_INST_LABEL(conn), conn, conn->dstAddr); TAOS_UNUSED(transUnrefCliHandle(conn)); } @@ -1334,13 +1335,31 @@ static void cliBatchSendCb(uv_write_t* req, int status) { } } bool cliConnMayAddUserInfo(SCliConn* pConn, STransMsgHead** ppHead, int32_t* msgLen) { + int32_t code = 0; SCliThrd* pThrd = pConn->hostThrd; STrans* pInst = pThrd->pInst; if (pConn->userInited == 1) { return false; } STransMsgHead* pHead = *ppHead; - STransMsgHead* tHead = taosMemoryCalloc(1, *msgLen + sizeof(pInst->user)); + int32_t len = *msgLen; + char* oriMsg = NULL; + int32_t oriLen = 0; + + if (pHead->comp == 1) { + int32_t msgLen = htonl(pHead->msgLen); + code = transDecompressMsgExt((char*)(pHead), msgLen, &oriMsg, &oriLen); + if (code < 0) { + tError("failed to decompress since %s", tstrerror(code)); + return false; + } else { + tDebug("decompress msg and resent, compress size %d, raw size %d", msgLen, oriLen); + } + + pHead = (STransMsgHead*)oriMsg; + len = oriLen; + } + STransMsgHead* tHead = taosMemoryCalloc(1, len + sizeof(pInst->user)); if (tHead == NULL) { return false; } @@ -1348,14 +1367,17 @@ bool cliConnMayAddUserInfo(SCliConn* pConn, STransMsgHead** ppHead, int32_t* msg memcpy((char*)tHead + TRANS_MSG_OVERHEAD, pInst->user, sizeof(pInst->user)); memcpy((char*)tHead + TRANS_MSG_OVERHEAD + sizeof(pInst->user), (char*)pHead + TRANS_MSG_OVERHEAD, - *msgLen - TRANS_MSG_OVERHEAD); + len - TRANS_MSG_OVERHEAD); tHead->withUserInfo = 1; *ppHead = tHead; - *msgLen += sizeof(pInst->user); + *msgLen = len + sizeof(pInst->user); pConn->pInitUserReq = tHead; pConn->userInited = 1; + if (oriMsg != NULL) { + taosMemoryFree(oriMsg); + } return true; } int32_t cliBatchSend(SCliConn* pConn, int8_t direct) { @@ -1421,9 +1443,8 @@ int32_t cliBatchSend(SCliConn* pConn, int8_t direct) { pReq->contLen = 0; } - int32_t msgLen = transMsgLenFromCont(pReq->contLen); - STransMsgHead* pHead = transHeadFromCont(pReq->pCont); + int32_t msgLen = transMsgLenFromCont(pReq->contLen); char* content = pReq->pCont; int32_t contLen = pReq->contLen; @@ -1679,7 +1700,7 @@ void cliConnCb(uv_connect_t* req, int status) { STUB_RAND_NETWORK_ERR(status); if (status != 0) { - tDebug("%s conn %p failed to connect to %s since %s", CONN_GET_INST_LABEL(pConn), pConn, pConn->dstAddr, + tError("%s conn %p failed to connect to %s since %s", CONN_GET_INST_LABEL(pConn), pConn, pConn->dstAddr, uv_strerror(status)); cliMayUpdateFqdnCache(pThrd->fqdn2ipCache, pConn->dstAddr); TAOS_UNUSED(transUnrefCliHandle(pConn)); @@ -1832,15 +1853,20 @@ static FORCE_INLINE int32_t cliUpdateFqdnCache(SHashObj* cache, char* fqdn) { if (code == 0) { size_t len = strlen(fqdn); uint32_t* v = taosHashGet(cache, fqdn, len); - if (addr != *v) { - char old[TSDB_FQDN_LEN] = {0}, new[TSDB_FQDN_LEN] = {0}; - tinet_ntoa(old, *v); - tinet_ntoa(new, addr); - tWarn("update ip of fqdn:%s, old: %s, new: %s", fqdn, old, new); - code = taosHashPut(cache, fqdn, strlen(fqdn), &addr, sizeof(addr)); + if (v != NULL) { + if (addr != *v) { + char old[TSDB_FQDN_LEN] = {0}, new[TSDB_FQDN_LEN] = {0}; + tinet_ntoa(old, *v); + tinet_ntoa(new, addr); + tWarn("update ip of fqdn:%s, old: %s, new: %s", fqdn, old, new); + code = taosHashPut(cache, fqdn, len, &addr, sizeof(addr)); + } + } else { + code = taosHashPut(cache, fqdn, len, &addr, sizeof(addr)); } } else { code = TSDB_CODE_RPC_FQDN_ERROR; // TSDB_CODE_RPC_INVALID_FQDN; + tWarn("failed to get ip from fqdn:%s since %s", fqdn, tstrerror(code)); } return code; } @@ -1991,7 +2017,9 @@ void cliHandleBatchReq(SCliThrd* pThrd, SCliReq* pReq) { tWarn("%s conn %p failed to added to heap cache since %s", pInst->label, pConn, tstrerror(code)); } } else { - // TAOS_CHECK_GOTO(code, &lino, _exception); + if (code == TSDB_CODE_OUT_OF_MEMORY && pConn == NULL) { + TAOS_CHECK_GOTO(code, &lino, _exception); + } return; } } @@ -2470,10 +2498,6 @@ static int32_t createThrdObj(void* trans, SCliThrd** ppThrd) { _end: if (pThrd) { TAOS_UNUSED(taosThreadMutexDestroy(&pThrd->msgMtx)); - - TAOS_UNUSED(uv_loop_close(pThrd->loop)); - taosMemoryFree(pThrd->loop); - TAOS_UNUSED((taosThreadMutexDestroy(&pThrd->msgMtx))); transAsyncPoolDestroy(pThrd->asyncPool); for (int i = 0; i < taosArrayGetSize(pThrd->timerList); i++) { uv_timer_t* timer = taosArrayGetP(pThrd->timerList, i); @@ -2483,6 +2507,9 @@ _end: taosArrayDestroy(pThrd->timerList); TAOS_UNUSED(destroyConnPool(pThrd)); + TAOS_UNUSED(uv_loop_close(pThrd->loop)); + taosMemoryFree(pThrd->loop); + transDQDestroy(pThrd->delayQueue, NULL); transDQDestroy(pThrd->timeoutQueue, NULL); transDQDestroy(pThrd->waitConnQueue, NULL); @@ -2901,6 +2928,7 @@ bool cliMayRetry(SCliConn* pConn, SCliReq* pReq, STransMsg* pResp) { transFreeMsg(pResp->pCont); } pResp->pCont = NULL; + pResp->info.hasEpSet = 0; if (code != TSDB_CODE_RPC_BROKEN_LINK && code != TSDB_CODE_RPC_NETWORK_UNAVAIL && code != TSDB_CODE_SUCCESS) { // save one internal code pCtx->retryCode = code; @@ -2933,10 +2961,8 @@ void cliMayResetRespCode(SCliReq* pReq, STransMsg* pResp) { // check whole vnodes is offline on this vgroup if (((pCtx->epSet != NULL) && pCtx->epsetRetryCnt >= pCtx->epSet->numOfEps) || pCtx->retryStep > 0) { - if (pResp->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { - pResp->code = TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED; - } else if (pResp->code == TSDB_CODE_RPC_BROKEN_LINK) { - pResp->code = TSDB_CODE_RPC_SOMENODE_BROKEN_LINK; + if (pResp->code == TSDB_CODE_RPC_BROKEN_LINK) { + pResp->code = TSDB_CODE_RPC_NETWORK_UNAVAIL; // TSDB_CODE_RPC_SOMENODE_BROKEN_LINK; } } } diff --git a/source/libs/transport/src/transComm.c b/source/libs/transport/src/transComm.c index 66bd4a08f3..c0edcd54e4 100644 --- a/source/libs/transport/src/transComm.c +++ b/source/libs/transport/src/transComm.c @@ -77,6 +77,11 @@ int32_t transDecompressMsg(char** msg, int32_t* len) { STransMsgHead* pNewHead = (STransMsgHead*)buf; int32_t decompLen = LZ4_decompress_safe(pCont + sizeof(STransCompMsg), (char*)pNewHead->content, tlen - sizeof(STransMsgHead) - sizeof(STransCompMsg), oriLen); + + if (decompLen != oriLen) { + taosMemoryFree(buf); + return TSDB_CODE_INVALID_MSG; + } memcpy((char*)pNewHead, (char*)pHead, sizeof(STransMsgHead)); *len = oriLen + sizeof(STransMsgHead); @@ -84,9 +89,36 @@ int32_t transDecompressMsg(char** msg, int32_t* len) { taosMemoryFree(pHead); *msg = buf; + return 0; +} +int32_t transDecompressMsgExt(char const* msg, int32_t len, char** out, int32_t* outLen) { + STransMsgHead* pHead = (STransMsgHead*)msg; + char* pCont = transContFromHead(pHead); + + STransCompMsg* pComp = (STransCompMsg*)pCont; + int32_t oriLen = htonl(pComp->contLen); + + int32_t tlen = len; + char* buf = taosMemoryCalloc(1, oriLen + sizeof(STransMsgHead)); + if (buf == NULL) { + return terrno; + } + + STransMsgHead* pNewHead = (STransMsgHead*)buf; + int32_t decompLen = LZ4_decompress_safe(pCont + sizeof(STransCompMsg), (char*)pNewHead->content, + tlen - sizeof(STransMsgHead) - sizeof(STransCompMsg), oriLen); if (decompLen != oriLen) { + tError("msgLen:%d, originLen:%d, decompLen:%d", len, oriLen, decompLen); + taosMemoryFree(buf); return TSDB_CODE_INVALID_MSG; } + memcpy((char*)pNewHead, (char*)pHead, sizeof(STransMsgHead)); + + *out = buf; + *outLen = oriLen + sizeof(STransMsgHead); + pNewHead->msgLen = *outLen; + pNewHead->comp = 0; + return 0; } diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c index d02bfb8281..ecbdd0db84 100644 --- a/source/libs/transport/src/transSvr.c +++ b/source/libs/transport/src/transSvr.c @@ -1320,8 +1320,6 @@ static FORCE_INLINE SSvrConn* createConn(void* hThrd) { TAOS_CHECK_GOTO(TSDB_CODE_REF_INVALID_ID, &lino, _end); } - QUEUE_INIT(&exh->q); - SExHandle* pSelf = transAcquireExHandle(uvGetConnRefOfThrd(pThrd), exh->refId); if (pSelf != exh) { TAOS_CHECK_GOTO(TSDB_CODE_REF_INVALID_ID, NULL, _end); @@ -1369,6 +1367,12 @@ static FORCE_INLINE SSvrConn* createConn(void* hThrd) { return pConn; _end: if (pConn) { + if (pConn->refId > 0) { + transReleaseExHandle(uvGetConnRefOfThrd(pThrd), pConn->refId); + transRemoveExHandle(uvGetConnRefOfThrd(pThrd), pConn->refId); + pConn->refId = -1; + } + transQueueDestroy(&pConn->resps); transDestroyBuffer(&pConn->readBuf); taosHashCleanup(pConn->pQTable); @@ -1378,7 +1382,7 @@ _end: taosMemoryFree(pConn); pConn = NULL; } - tError("%s failed to create conn since %s" PRId64, transLabel(pInst), tstrerror(code)); + tError("%s failed to create conn since %s", transLabel(pInst), tstrerror(code)); return NULL; } diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index da26ddae3a..ce2b9218b5 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -415,10 +415,10 @@ static void printFileSet(int32_t vgId, SArray* fileSet, const char* str) { int32_t sz = taosArrayGetSize(fileSet); for (int32_t i = 0; i < sz; i++) { SWalFileInfo* pFileInfo = taosArrayGet(fileSet, i); - wInfo("vgId:%d, %s-%d, firstVer:%" PRId64 ", lastVer:%" PRId64 ", fileSize:%" PRId64 ", syncedOffset:%" PRId64 - ", createTs:%" PRId64 ", closeTs:%" PRId64, - vgId, str, i, pFileInfo->firstVer, pFileInfo->lastVer, pFileInfo->fileSize, pFileInfo->syncedOffset, - pFileInfo->createTs, pFileInfo->closeTs); + wTrace("vgId:%d, %s-%d, firstVer:%" PRId64 ", lastVer:%" PRId64 ", fileSize:%" PRId64 ", syncedOffset:%" PRId64 + ", createTs:%" PRId64 ", closeTs:%" PRId64, + vgId, str, i, pFileInfo->firstVer, pFileInfo->lastVer, pFileInfo->fileSize, pFileInfo->syncedOffset, + pFileInfo->createTs, pFileInfo->closeTs); } } diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c index 1a9652b3bb..66ead2fd26 100644 --- a/source/libs/wal/src/walWrite.c +++ b/source/libs/wal/src/walWrite.c @@ -376,6 +376,10 @@ static FORCE_INLINE int32_t walCheckAndRoll(SWal *pWal) { int32_t walBeginSnapshot(SWal *pWal, int64_t ver, int64_t logRetention) { int32_t code = 0; + if (pWal->cfg.level == TAOS_WAL_SKIP) { + TAOS_RETURN(TSDB_CODE_SUCCESS); + } + if (logRetention < 0) { TAOS_RETURN(TSDB_CODE_FAILED); } @@ -404,6 +408,10 @@ _exit: int32_t walEndSnapshot(SWal *pWal) { int32_t code = 0, lino = 0; + if (pWal->cfg.level == TAOS_WAL_SKIP) { + TAOS_RETURN(TSDB_CODE_SUCCESS); + } + TAOS_UNUSED(taosThreadRwlockWrlock(&pWal->mutex)); int64_t ver = pWal->vers.verInSnapshotting; diff --git a/source/libs/wal/test/walMetaTest.cpp b/source/libs/wal/test/walMetaTest.cpp index 3e6fab116f..a958ad74e0 100644 --- a/source/libs/wal/test/walMetaTest.cpp +++ b/source/libs/wal/test/walMetaTest.cpp @@ -510,4 +510,27 @@ TEST_F(WalSkipLevel, restart) { TearDown(); SetUp(); +} + +TEST_F(WalSkipLevel, roll) { + int code; + int i; + for (i = 0; i < 100; i++) { + code = walAppendLog(pWal, i, 0, syncMeta, (void*)ranStr, ranStrLen); + ASSERT_EQ(code, 0); + code = walCommit(pWal, i); + } + walBeginSnapshot(pWal, i - 1, 0); + walEndSnapshot(pWal); + code = walAppendLog(pWal, 5, 0, syncMeta, (void*)ranStr, ranStrLen); + ASSERT_NE(code, 0); + for (; i < 200; i++) { + code = walAppendLog(pWal, i, 0, syncMeta, (void*)ranStr, ranStrLen); + ASSERT_EQ(code, 0); + code = walCommit(pWal, i); + } + code = walBeginSnapshot(pWal, i - 1, 0); + ASSERT_EQ(code, 0); + code = walEndSnapshot(pWal); + ASSERT_EQ(code, 0); } \ No newline at end of file diff --git a/source/util/src/tanalytics.c b/source/util/src/tanalytics.c index 99d91700a2..68bbbb7e99 100644 --- a/source/util/src/tanalytics.c +++ b/source/util/src/tanalytics.c @@ -34,7 +34,7 @@ typedef struct { } SCurlResp; static SAlgoMgmt tsAlgos = {0}; -static int32_t taosAnalBufGetCont(SAnalBuf *pBuf, char **ppCont, int64_t *pContLen); +static int32_t taosAnalBufGetCont(SAnalyticBuf *pBuf, char **ppCont, int64_t *pContLen); const char *taosAnalAlgoStr(EAnalAlgoType type) { switch (type) { @@ -127,28 +127,44 @@ void taosAnalUpdate(int64_t newVer, SHashObj *pHash) { } bool taosAnalGetOptStr(const char *option, const char *optName, char *optValue, int32_t optMaxLen) { - char buf[TSDB_ANAL_ALGO_OPTION_LEN] = {0}; - int32_t bufLen = tsnprintf(buf, sizeof(buf), "%s=", optName); + char buf[TSDB_ANALYTIC_ALGO_OPTION_LEN] = {0}; + char *pStart = NULL; + char *pEnd = NULL; - char *pos1 = strstr(option, buf); - char *pos2 = strstr(option, ANAL_ALGO_SPLIT); - if (pos1 != NULL) { - if (optMaxLen > 0) { - int32_t copyLen = optMaxLen; - if (pos2 != NULL) { - copyLen = (int32_t)(pos2 - pos1 - strlen(optName)); - copyLen = MIN(copyLen, optMaxLen); - } - tstrncpy(optValue, pos1 + bufLen, copyLen); - } - return true; - } else { + pStart = strstr(option, optName); + if (pStart == NULL) { return false; } + + pEnd = strstr(pStart, ANAL_ALGO_SPLIT); + if (optMaxLen > 0) { + if (pEnd > pStart) { + int32_t len = (int32_t)(pEnd - pStart); + len = MIN(len + 1, TSDB_ANALYTIC_ALGO_OPTION_LEN); + tstrncpy(buf, pStart, len); + } else { + int32_t len = MIN(tListLen(buf), strlen(pStart) + 1); + tstrncpy(buf, pStart, len); + } + + char *pRight = strstr(buf, "="); + if (pRight == NULL) { + return false; + } else { + pRight += 1; + } + + int32_t unused = strtrim(pRight); + + int32_t vLen = MIN(optMaxLen, strlen(pRight) + 1); + tstrncpy(optValue, pRight, vLen); + } + + return true; } bool taosAnalGetOptInt(const char *option, const char *optName, int64_t *optValue) { - char buf[TSDB_ANAL_ALGO_OPTION_LEN] = {0}; + char buf[TSDB_ANALYTIC_ALGO_OPTION_LEN] = {0}; int32_t bufLen = tsnprintf(buf, sizeof(buf), "%s=", optName); char *pos1 = strstr(option, buf); @@ -163,7 +179,7 @@ bool taosAnalGetOptInt(const char *option, const char *optName, int64_t *optValu int32_t taosAnalGetAlgoUrl(const char *algoName, EAnalAlgoType type, char *url, int32_t urlLen) { int32_t code = 0; - char name[TSDB_ANAL_ALGO_KEY_LEN] = {0}; + char name[TSDB_ANALYTIC_ALGO_KEY_LEN] = {0}; int32_t nameLen = 1 + tsnprintf(name, sizeof(name) - 1, "%d:%s", type, algoName); char *unused = strntolower(name, name, nameLen); @@ -175,7 +191,7 @@ int32_t taosAnalGetAlgoUrl(const char *algoName, EAnalAlgoType type, char *url, uDebug("algo:%s, type:%s, url:%s", algoName, taosAnalAlgoStr(type), url); } else { url[0] = 0; - terrno = TSDB_CODE_ANAL_ALGO_NOT_FOUND; + terrno = TSDB_CODE_ANA_ALGO_NOT_FOUND; code = terrno; uError("algo:%s, type:%s, url not found", algoName, taosAnalAlgoStr(type)); } @@ -276,16 +292,16 @@ _OVER: return code; } -SJson *taosAnalSendReqRetJson(const char *url, EAnalHttpType type, SAnalBuf *pBuf) { +SJson *taosAnalSendReqRetJson(const char *url, EAnalHttpType type, SAnalyticBuf *pBuf) { int32_t code = -1; char *pCont = NULL; int64_t contentLen; SJson *pJson = NULL; SCurlResp curlRsp = {0}; - if (type == ANAL_HTTP_TYPE_GET) { + if (type == ANALYTICS_HTTP_TYPE_GET) { if (taosCurlGetRequest(url, &curlRsp) != 0) { - terrno = TSDB_CODE_ANAL_URL_CANT_ACCESS; + terrno = TSDB_CODE_ANA_URL_CANT_ACCESS; goto _OVER; } } else { @@ -295,20 +311,20 @@ SJson *taosAnalSendReqRetJson(const char *url, EAnalHttpType type, SAnalBuf *pBu goto _OVER; } if (taosCurlPostRequest(url, &curlRsp, pCont, contentLen) != 0) { - terrno = TSDB_CODE_ANAL_URL_CANT_ACCESS; + terrno = TSDB_CODE_ANA_URL_CANT_ACCESS; goto _OVER; } } if (curlRsp.data == NULL || curlRsp.dataLen == 0) { - terrno = TSDB_CODE_ANAL_URL_RSP_IS_NULL; + terrno = TSDB_CODE_ANA_URL_RSP_IS_NULL; goto _OVER; } pJson = tjsonParse(curlRsp.data); if (pJson == NULL) { if (curlRsp.data[0] == '<') { - terrno = TSDB_CODE_ANAL_ANODE_RETURN_ERROR; + terrno = TSDB_CODE_ANA_ANODE_RETURN_ERROR; } else { terrno = TSDB_CODE_INVALID_JSON_FORMAT; } @@ -360,7 +376,7 @@ _OVER: return code; } -static int32_t taosAnalJsonBufWriteOptInt(SAnalBuf *pBuf, const char *optName, int64_t optVal) { +static int32_t taosAnalJsonBufWriteOptInt(SAnalyticBuf *pBuf, const char *optName, int64_t optVal) { char buf[64] = {0}; int32_t bufLen = tsnprintf(buf, sizeof(buf), "\"%s\": %" PRId64 ",\n", optName, optVal); if (taosWriteFile(pBuf->filePtr, buf, bufLen) != bufLen) { @@ -369,7 +385,7 @@ static int32_t taosAnalJsonBufWriteOptInt(SAnalBuf *pBuf, const char *optName, i return 0; } -static int32_t taosAnalJsonBufWriteOptStr(SAnalBuf *pBuf, const char *optName, const char *optVal) { +static int32_t taosAnalJsonBufWriteOptStr(SAnalyticBuf *pBuf, const char *optName, const char *optVal) { char buf[128] = {0}; int32_t bufLen = tsnprintf(buf, sizeof(buf), "\"%s\": \"%s\",\n", optName, optVal); if (taosWriteFile(pBuf->filePtr, buf, bufLen) != bufLen) { @@ -378,7 +394,7 @@ static int32_t taosAnalJsonBufWriteOptStr(SAnalBuf *pBuf, const char *optName, c return 0; } -static int32_t taosAnalJsonBufWriteOptFloat(SAnalBuf *pBuf, const char *optName, float optVal) { +static int32_t taosAnalJsonBufWriteOptFloat(SAnalyticBuf *pBuf, const char *optName, float optVal) { char buf[128] = {0}; int32_t bufLen = tsnprintf(buf, sizeof(buf), "\"%s\": %f,\n", optName, optVal); if (taosWriteFile(pBuf->filePtr, buf, bufLen) != bufLen) { @@ -387,7 +403,7 @@ static int32_t taosAnalJsonBufWriteOptFloat(SAnalBuf *pBuf, const char *optName, return 0; } -static int32_t taosAnalJsonBufWriteStr(SAnalBuf *pBuf, const char *buf, int32_t bufLen) { +static int32_t taosAnalJsonBufWriteStr(SAnalyticBuf *pBuf, const char *buf, int32_t bufLen) { if (bufLen <= 0) { bufLen = strlen(buf); } @@ -397,9 +413,9 @@ static int32_t taosAnalJsonBufWriteStr(SAnalBuf *pBuf, const char *buf, int32_t return 0; } -static int32_t taosAnalJsonBufWriteStart(SAnalBuf *pBuf) { return taosAnalJsonBufWriteStr(pBuf, "{\n", 0); } +static int32_t taosAnalJsonBufWriteStart(SAnalyticBuf *pBuf) { return taosAnalJsonBufWriteStr(pBuf, "{\n", 0); } -static int32_t tsosAnalJsonBufOpen(SAnalBuf *pBuf, int32_t numOfCols) { +static int32_t tsosAnalJsonBufOpen(SAnalyticBuf *pBuf, int32_t numOfCols) { pBuf->filePtr = taosOpenFile(pBuf->fileName, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_WRITE_THROUGH); if (pBuf->filePtr == NULL) { return terrno; @@ -409,7 +425,7 @@ static int32_t tsosAnalJsonBufOpen(SAnalBuf *pBuf, int32_t numOfCols) { if (pBuf->pCols == NULL) return TSDB_CODE_OUT_OF_MEMORY; pBuf->numOfCols = numOfCols; - if (pBuf->bufType == ANAL_BUF_TYPE_JSON) { + if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON) { return taosAnalJsonBufWriteStart(pBuf); } @@ -426,7 +442,7 @@ static int32_t tsosAnalJsonBufOpen(SAnalBuf *pBuf, int32_t numOfCols) { return taosAnalJsonBufWriteStart(pBuf); } -static int32_t taosAnalJsonBufWriteColMeta(SAnalBuf *pBuf, int32_t colIndex, int32_t colType, const char *colName) { +static int32_t taosAnalJsonBufWriteColMeta(SAnalyticBuf *pBuf, int32_t colIndex, int32_t colType, const char *colName) { char buf[128] = {0}; bool first = (colIndex == 0); bool last = (colIndex == pBuf->numOfCols - 1); @@ -452,16 +468,16 @@ static int32_t taosAnalJsonBufWriteColMeta(SAnalBuf *pBuf, int32_t colIndex, int return 0; } -static int32_t taosAnalJsonBufWriteDataBegin(SAnalBuf *pBuf) { +static int32_t taosAnalJsonBufWriteDataBegin(SAnalyticBuf *pBuf) { return taosAnalJsonBufWriteStr(pBuf, "\"data\": [\n", 0); } -static int32_t taosAnalJsonBufWriteStrUseCol(SAnalBuf *pBuf, const char *buf, int32_t bufLen, int32_t colIndex) { +static int32_t taosAnalJsonBufWriteStrUseCol(SAnalyticBuf *pBuf, const char *buf, int32_t bufLen, int32_t colIndex) { if (bufLen <= 0) { bufLen = strlen(buf); } - if (pBuf->bufType == ANAL_BUF_TYPE_JSON) { + if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON) { if (taosWriteFile(pBuf->filePtr, buf, bufLen) != bufLen) { return terrno; } @@ -474,11 +490,11 @@ static int32_t taosAnalJsonBufWriteStrUseCol(SAnalBuf *pBuf, const char *buf, in return 0; } -static int32_t taosAnalJsonBufWriteColBegin(SAnalBuf *pBuf, int32_t colIndex) { +static int32_t taosAnalJsonBufWriteColBegin(SAnalyticBuf *pBuf, int32_t colIndex) { return taosAnalJsonBufWriteStrUseCol(pBuf, "[\n", 0, colIndex); } -static int32_t taosAnalJsonBufWriteColEnd(SAnalBuf *pBuf, int32_t colIndex) { +static int32_t taosAnalJsonBufWriteColEnd(SAnalyticBuf *pBuf, int32_t colIndex) { if (colIndex == pBuf->numOfCols - 1) { return taosAnalJsonBufWriteStrUseCol(pBuf, "\n]\n", 0, colIndex); @@ -487,7 +503,7 @@ static int32_t taosAnalJsonBufWriteColEnd(SAnalBuf *pBuf, int32_t colIndex) { } } -static int32_t taosAnalJsonBufWriteColData(SAnalBuf *pBuf, int32_t colIndex, int32_t colType, void *colValue) { +static int32_t taosAnalJsonBufWriteColData(SAnalyticBuf *pBuf, int32_t colIndex, int32_t colType, void *colValue) { char buf[64]; int32_t bufLen = 0; @@ -541,12 +557,12 @@ static int32_t taosAnalJsonBufWriteColData(SAnalBuf *pBuf, int32_t colIndex, int return taosAnalJsonBufWriteStrUseCol(pBuf, buf, bufLen, colIndex); } -static int32_t taosAnalJsonBufWriteDataEnd(SAnalBuf *pBuf) { +static int32_t taosAnalJsonBufWriteDataEnd(SAnalyticBuf *pBuf) { int32_t code = 0; char *pCont = NULL; int64_t contLen = 0; - if (pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) { + if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) { for (int32_t i = 0; i < pBuf->numOfCols; ++i) { SAnalyticsColBuf *pCol = &pBuf->pCols[i]; @@ -570,14 +586,14 @@ static int32_t taosAnalJsonBufWriteDataEnd(SAnalBuf *pBuf) { return taosAnalJsonBufWriteStr(pBuf, "],\n", 0); } -static int32_t taosAnalJsonBufWriteEnd(SAnalBuf *pBuf) { +static int32_t taosAnalJsonBufWriteEnd(SAnalyticBuf *pBuf) { int32_t code = taosAnalJsonBufWriteOptInt(pBuf, "rows", pBuf->pCols[0].numOfRows); if (code != 0) return code; return taosAnalJsonBufWriteStr(pBuf, "\"protocol\": 1.0\n}", 0); } -int32_t taosAnalJsonBufClose(SAnalBuf *pBuf) { +int32_t taosAnalJsonBufClose(SAnalyticBuf *pBuf) { int32_t code = taosAnalJsonBufWriteEnd(pBuf); if (code != 0) return code; @@ -588,7 +604,7 @@ int32_t taosAnalJsonBufClose(SAnalBuf *pBuf) { if (code != 0) return code; } - if (pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) { + if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) { for (int32_t i = 0; i < pBuf->numOfCols; ++i) { SAnalyticsColBuf *pCol = &pBuf->pCols[i]; if (pCol->filePtr != NULL) { @@ -603,14 +619,14 @@ int32_t taosAnalJsonBufClose(SAnalBuf *pBuf) { return 0; } -void taosAnalBufDestroy(SAnalBuf *pBuf) { +void taosAnalBufDestroy(SAnalyticBuf *pBuf) { if (pBuf->fileName[0] != 0) { if (pBuf->filePtr != NULL) (void)taosCloseFile(&pBuf->filePtr); // taosRemoveFile(pBuf->fileName); pBuf->fileName[0] = 0; } - if (pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) { + if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) { for (int32_t i = 0; i < pBuf->numOfCols; ++i) { SAnalyticsColBuf *pCol = &pBuf->pCols[i]; if (pCol->fileName[0] != 0) { @@ -627,102 +643,102 @@ void taosAnalBufDestroy(SAnalBuf *pBuf) { pBuf->numOfCols = 0; } -int32_t tsosAnalBufOpen(SAnalBuf *pBuf, int32_t numOfCols) { - if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) { +int32_t tsosAnalBufOpen(SAnalyticBuf *pBuf, int32_t numOfCols) { + if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) { return tsosAnalJsonBufOpen(pBuf, numOfCols); } else { - return TSDB_CODE_ANAL_BUF_INVALID_TYPE; + return TSDB_CODE_ANA_BUF_INVALID_TYPE; } } -int32_t taosAnalBufWriteOptStr(SAnalBuf *pBuf, const char *optName, const char *optVal) { - if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) { +int32_t taosAnalBufWriteOptStr(SAnalyticBuf *pBuf, const char *optName, const char *optVal) { + if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) { return taosAnalJsonBufWriteOptStr(pBuf, optName, optVal); } else { - return TSDB_CODE_ANAL_BUF_INVALID_TYPE; + return TSDB_CODE_ANA_BUF_INVALID_TYPE; } } -int32_t taosAnalBufWriteOptInt(SAnalBuf *pBuf, const char *optName, int64_t optVal) { - if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) { +int32_t taosAnalBufWriteOptInt(SAnalyticBuf *pBuf, const char *optName, int64_t optVal) { + if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) { return taosAnalJsonBufWriteOptInt(pBuf, optName, optVal); } else { - return TSDB_CODE_ANAL_BUF_INVALID_TYPE; + return TSDB_CODE_ANA_BUF_INVALID_TYPE; } } -int32_t taosAnalBufWriteOptFloat(SAnalBuf *pBuf, const char *optName, float optVal) { - if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) { +int32_t taosAnalBufWriteOptFloat(SAnalyticBuf *pBuf, const char *optName, float optVal) { + if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) { return taosAnalJsonBufWriteOptFloat(pBuf, optName, optVal); } else { - return TSDB_CODE_ANAL_BUF_INVALID_TYPE; + return TSDB_CODE_ANA_BUF_INVALID_TYPE; } } -int32_t taosAnalBufWriteColMeta(SAnalBuf *pBuf, int32_t colIndex, int32_t colType, const char *colName) { - if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) { +int32_t taosAnalBufWriteColMeta(SAnalyticBuf *pBuf, int32_t colIndex, int32_t colType, const char *colName) { + if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) { return taosAnalJsonBufWriteColMeta(pBuf, colIndex, colType, colName); } else { - return TSDB_CODE_ANAL_BUF_INVALID_TYPE; + return TSDB_CODE_ANA_BUF_INVALID_TYPE; } } -int32_t taosAnalBufWriteDataBegin(SAnalBuf *pBuf) { - if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) { +int32_t taosAnalBufWriteDataBegin(SAnalyticBuf *pBuf) { + if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) { return taosAnalJsonBufWriteDataBegin(pBuf); } else { - return TSDB_CODE_ANAL_BUF_INVALID_TYPE; + return TSDB_CODE_ANA_BUF_INVALID_TYPE; } } -int32_t taosAnalBufWriteColBegin(SAnalBuf *pBuf, int32_t colIndex) { - if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) { +int32_t taosAnalBufWriteColBegin(SAnalyticBuf *pBuf, int32_t colIndex) { + if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) { return taosAnalJsonBufWriteColBegin(pBuf, colIndex); } else { - return TSDB_CODE_ANAL_BUF_INVALID_TYPE; + return TSDB_CODE_ANA_BUF_INVALID_TYPE; } } -int32_t taosAnalBufWriteColData(SAnalBuf *pBuf, int32_t colIndex, int32_t colType, void *colValue) { - if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) { +int32_t taosAnalBufWriteColData(SAnalyticBuf *pBuf, int32_t colIndex, int32_t colType, void *colValue) { + if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) { return taosAnalJsonBufWriteColData(pBuf, colIndex, colType, colValue); } else { - return TSDB_CODE_ANAL_BUF_INVALID_TYPE; + return TSDB_CODE_ANA_BUF_INVALID_TYPE; } } -int32_t taosAnalBufWriteColEnd(SAnalBuf *pBuf, int32_t colIndex) { - if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) { +int32_t taosAnalBufWriteColEnd(SAnalyticBuf *pBuf, int32_t colIndex) { + if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) { return taosAnalJsonBufWriteColEnd(pBuf, colIndex); } else { - return TSDB_CODE_ANAL_BUF_INVALID_TYPE; + return TSDB_CODE_ANA_BUF_INVALID_TYPE; } } -int32_t taosAnalBufWriteDataEnd(SAnalBuf *pBuf) { - if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) { +int32_t taosAnalBufWriteDataEnd(SAnalyticBuf *pBuf) { + if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) { return taosAnalJsonBufWriteDataEnd(pBuf); } else { - return TSDB_CODE_ANAL_BUF_INVALID_TYPE; + return TSDB_CODE_ANA_BUF_INVALID_TYPE; } } -int32_t taosAnalBufClose(SAnalBuf *pBuf) { - if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) { +int32_t taosAnalBufClose(SAnalyticBuf *pBuf) { + if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) { return taosAnalJsonBufClose(pBuf); } else { - return TSDB_CODE_ANAL_BUF_INVALID_TYPE; + return TSDB_CODE_ANA_BUF_INVALID_TYPE; } } -static int32_t taosAnalBufGetCont(SAnalBuf *pBuf, char **ppCont, int64_t *pContLen) { +static int32_t taosAnalBufGetCont(SAnalyticBuf *pBuf, char **ppCont, int64_t *pContLen) { *ppCont = NULL; *pContLen = 0; - if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) { + if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) { return taosAnalJsonBufGetCont(pBuf->fileName, ppCont, pContLen); } else { - return TSDB_CODE_ANAL_BUF_INVALID_TYPE; + return TSDB_CODE_ANA_BUF_INVALID_TYPE; } } @@ -730,7 +746,7 @@ static int32_t taosAnalBufGetCont(SAnalBuf *pBuf, char **ppCont, int64_t *pContL int32_t taosAnalyticsInit() { return 0; } void taosAnalyticsCleanup() {} -SJson *taosAnalSendReqRetJson(const char *url, EAnalHttpType type, SAnalBuf *pBuf) { return NULL; } +SJson *taosAnalSendReqRetJson(const char *url, EAnalHttpType type, SAnalyticBuf *pBuf) { return NULL; } int32_t taosAnalGetAlgoUrl(const char *algoName, EAnalAlgoType type, char *url, int32_t urlLen) { return 0; } bool taosAnalGetOptStr(const char *option, const char *optName, char *optValue, int32_t optMaxLen) { return true; } @@ -738,18 +754,18 @@ bool taosAnalGetOptInt(const char *option, const char *optName, int64_t *optV int64_t taosAnalGetVersion() { return 0; } void taosAnalUpdate(int64_t newVer, SHashObj *pHash) {} -int32_t tsosAnalBufOpen(SAnalBuf *pBuf, int32_t numOfCols) { return 0; } -int32_t taosAnalBufWriteOptStr(SAnalBuf *pBuf, const char *optName, const char *optVal) { return 0; } -int32_t taosAnalBufWriteOptInt(SAnalBuf *pBuf, const char *optName, int64_t optVal) { return 0; } -int32_t taosAnalBufWriteOptFloat(SAnalBuf *pBuf, const char *optName, float optVal) { return 0; } -int32_t taosAnalBufWriteColMeta(SAnalBuf *pBuf, int32_t colIndex, int32_t colType, const char *colName) { return 0; } -int32_t taosAnalBufWriteDataBegin(SAnalBuf *pBuf) { return 0; } -int32_t taosAnalBufWriteColBegin(SAnalBuf *pBuf, int32_t colIndex) { return 0; } -int32_t taosAnalBufWriteColData(SAnalBuf *pBuf, int32_t colIndex, int32_t colType, void *colValue) { return 0; } -int32_t taosAnalBufWriteColEnd(SAnalBuf *pBuf, int32_t colIndex) { return 0; } -int32_t taosAnalBufWriteDataEnd(SAnalBuf *pBuf) { return 0; } -int32_t taosAnalBufClose(SAnalBuf *pBuf) { return 0; } -void taosAnalBufDestroy(SAnalBuf *pBuf) {} +int32_t tsosAnalBufOpen(SAnalyticBuf *pBuf, int32_t numOfCols) { return 0; } +int32_t taosAnalBufWriteOptStr(SAnalyticBuf *pBuf, const char *optName, const char *optVal) { return 0; } +int32_t taosAnalBufWriteOptInt(SAnalyticBuf *pBuf, const char *optName, int64_t optVal) { return 0; } +int32_t taosAnalBufWriteOptFloat(SAnalyticBuf *pBuf, const char *optName, float optVal) { return 0; } +int32_t taosAnalBufWriteColMeta(SAnalyticBuf *pBuf, int32_t colIndex, int32_t colType, const char *colName) { return 0; } +int32_t taosAnalBufWriteDataBegin(SAnalyticBuf *pBuf) { return 0; } +int32_t taosAnalBufWriteColBegin(SAnalyticBuf *pBuf, int32_t colIndex) { return 0; } +int32_t taosAnalBufWriteColData(SAnalyticBuf *pBuf, int32_t colIndex, int32_t colType, void *colValue) { return 0; } +int32_t taosAnalBufWriteColEnd(SAnalyticBuf *pBuf, int32_t colIndex) { return 0; } +int32_t taosAnalBufWriteDataEnd(SAnalyticBuf *pBuf) { return 0; } +int32_t taosAnalBufClose(SAnalyticBuf *pBuf) { return 0; } +void taosAnalBufDestroy(SAnalyticBuf *pBuf) {} const char *taosAnalAlgoStr(EAnalAlgoType algoType) { return 0; } EAnalAlgoType taosAnalAlgoInt(const char *algoName) { return 0; } diff --git a/source/util/src/tconfig.c b/source/util/src/tconfig.c index f9bd41a400..d2a2b1fb9a 100644 --- a/source/util/src/tconfig.c +++ b/source/util/src/tconfig.c @@ -881,11 +881,26 @@ void cfgDumpCfg(SConfig *pCfg, bool tsc, bool dump) { case CFG_DTYPE_CHARSET: case CFG_DTYPE_TIMEZONE: case CFG_DTYPE_NONE: + if (strcasecmp(pItem->name, "dataDir") == 0) { + size_t sz = taosArrayGetSize(pItem->array); + if (sz > 1) { + for (size_t j = 0; j < sz; ++j) { + SDiskCfg *pCfg = taosArrayGet(pItem->array, j); + if (dump) { + (void)printf("%s %s %s l:%d p:%d d:%"PRIi8"\n", src, name, pCfg->dir, pCfg->level, pCfg->primary, pCfg->disable); + } else { + uInfo("%s %s %s l:%d p:%d d:%"PRIi8, src, name, pCfg->dir, pCfg->level, pCfg->primary, pCfg->disable); + } + } + break; + } + } if (dump) { (void)printf("%s %s %s\n", src, name, pItem->str); } else { uInfo("%s %s %s", src, name, pItem->str); } + break; } } diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 0d8a85155a..9e8a85d301 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -361,13 +361,14 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_ANODE_TOO_MANY_ALGO, "Anode too many algori TAOS_DEFINE_ERROR(TSDB_CODE_MND_ANODE_TOO_LONG_ALGO_NAME, "Anode too long algorithm name") TAOS_DEFINE_ERROR(TSDB_CODE_MND_ANODE_TOO_MANY_ALGO_TYPE, "Anode too many algorithm type") -TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_URL_RSP_IS_NULL, "Analysis service response is NULL") -TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_URL_CANT_ACCESS, "Analysis service can't access") -TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_ALGO_NOT_FOUND, "Analysis algorithm not found") -TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_ALGO_NOT_LOAD, "Analysis algorithm not loaded") -TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_BUF_INVALID_TYPE, "Analysis invalid buffer type") -TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_ANODE_RETURN_ERROR, "Analysis failed since anode return error") -TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_ANODE_TOO_MANY_ROWS, "Analysis failed since too many input rows for anode") +TAOS_DEFINE_ERROR(TSDB_CODE_ANA_URL_RSP_IS_NULL, "Analysis service response is NULL") +TAOS_DEFINE_ERROR(TSDB_CODE_ANA_URL_CANT_ACCESS, "Analysis service can't access") +TAOS_DEFINE_ERROR(TSDB_CODE_ANA_ALGO_NOT_FOUND, "Analysis algorithm is missing") +TAOS_DEFINE_ERROR(TSDB_CODE_ANA_ALGO_NOT_LOAD, "Analysis algorithm not loaded") +TAOS_DEFINE_ERROR(TSDB_CODE_ANA_BUF_INVALID_TYPE, "Analysis invalid buffer type") +TAOS_DEFINE_ERROR(TSDB_CODE_ANA_ANODE_RETURN_ERROR, "Analysis failed since anode return error") +TAOS_DEFINE_ERROR(TSDB_CODE_ANA_ANODE_TOO_MANY_ROWS, "Analysis failed since too many input rows for anode") +TAOS_DEFINE_ERROR(TSDB_CODE_ANA_WN_DATA, "white-noise data not processed") // mnode-sma TAOS_DEFINE_ERROR(TSDB_CODE_MND_SMA_ALREADY_EXIST, "SMA already exists") @@ -853,7 +854,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_STREAM_TASK_IVLD_STATUS, "Invalid task status TAOS_DEFINE_ERROR(TSDB_CODE_STREAM_CONFLICT_EVENT, "Stream conflict event") TAOS_DEFINE_ERROR(TSDB_CODE_STREAM_INTERNAL_ERROR, "Stream internal error") TAOS_DEFINE_ERROR(TSDB_CODE_STREAM_NOT_LEADER, "Stream task not on leader vnode") -TAOS_DEFINE_ERROR(TSDB_CODE_STREAM_INPUTQ_FULL, "Task input queue is full") +TAOS_DEFINE_ERROR(TSDB_CODE_STREAM_INPUTQ_FULL, "Task input queue is full") +TAOS_DEFINE_ERROR(TSDB_CODE_STREAM_INVLD_CHKPT, "Invalid checkpoint trigger msg") // TDLite TAOS_DEFINE_ERROR(TSDB_CODE_TDLITE_IVLD_OPEN_FLAGS, "Invalid TDLite open flags") diff --git a/tests/army/alter/alterConfig.py b/tests/army/alter/alterConfig.py index f8c52551e3..6a22dd014f 100644 --- a/tests/army/alter/alterConfig.py +++ b/tests/army/alter/alterConfig.py @@ -100,6 +100,79 @@ class TDTestCase(TBase): tdSql.query('show dnodes') tdSql.checkData(0, 3, "64") + def checkKeyValue(self, res, key, value, ikey = 0, ival = 1): + result = False + for row in res: + if row[ikey] == key: + if row[ival] != value: + raise Exception(f"key:{key} value:{row[ival]} != {value}") + else: + tdLog.info(f"key:{key} value:{row[ival]} == {value}") + result = True + break + if not result: + raise Exception(f"key:{key} not found") + + def checkRows(self, sql, nExpect, nRetry): + for i in range(nRetry): + res = tdSql.getResult(sql) + if len(res) == nExpect: + break + time.sleep(1) + if len(res) != nExpect: + raise Exception(f"rows:{len(res)} != {nExpect}") + + def alterBypassFlag(self): + """Add test case for altering bypassFlag(TD-32907) + """ + tdSql.execute(f"drop database if exists db") + tdSql.execute(f"create database db") + tdSql.execute("use db") + self.checkKeyValue(tdSql.getResult("show local variables;"), "bypassFlag", "0") + self.checkKeyValue(tdSql.getResult("show dnode 1 variables like 'bypassFlag'"), "bypassFlag", "0", 1, 2) + tdSql.execute("alter local 'bypassFlag 1'") + self.checkKeyValue(tdSql.getResult("show local variables;"), "bypassFlag", "1") + self.checkKeyValue(tdSql.getResult("show dnode 1 variables like 'bypassFlag'"), "bypassFlag", "0", 1, 2) + tdSql.execute("create table stb0(ts timestamp, c0 int) tags(t0 int)") + tdSql.execute("create table ctb0 using stb0 tags(0)") + tdSql.execute("insert into ctb0 values(now, 1)") + tdSql.query("select * from stb0") + tdSql.checkRows(0) + tdSql.execute("alter local 'bypassFlag 0'") + tdSql.execute("alter all dnodes 'bypassFlag 2'") + self.checkKeyValue(tdSql.getResult("show local variables"), "bypassFlag", "0") + self.checkKeyValue(tdSql.getResult("show dnode 1 variables like 'bypassFlag'"), "bypassFlag", "2", 1, 2) + tdSql.execute("insert into ctb0 values(now, 2)") + tdSql.query("select * from stb0") + tdSql.checkRows(0) + tdSql.execute("alter all dnodes 'bypassFlag 4'") + self.checkKeyValue(tdSql.getResult("show dnode 1 variables like 'bypassFlag'"), "bypassFlag", "4", 1, 2) + tdSql.execute("insert into ctb0 values(now, 4)") + tdSql.execute("insert into ctb1 using stb0 tags(1) values(now, 10)") + tdSql.query("select * from stb0") + tdSql.checkRows(0) + tdSql.query("show db.tables") + tdSql.checkRows(2) + tdSql.execute("alter all dnodes 'bypassFlag 8'") + self.checkKeyValue(tdSql.getResult("show dnode 1 variables like 'bypassFlag'"), "bypassFlag", "8", 1, 2) + tdSql.execute("insert into ctb0 values(now, 8)") + tdSql.execute("insert into ctb1 values(now, 18)") + tdSql.query("select * from stb0") + tdSql.checkRows(2) + tdSql.execute("flush database db") + self.checkRows("select * from stb0", 0, 10) + tdSql.execute("alter all dnodes 'bypassFlag 0'") + self.checkKeyValue(tdSql.getResult("show local variables"), "bypassFlag", "0") + self.checkKeyValue(tdSql.getResult("show dnode 1 variables like 'bypassFlag'"), "bypassFlag", "0", 1, 2) + tdSql.execute("insert into ctb0 values(now, 80)") + tdSql.execute("insert into ctb1 values(now, 180)") + tdSql.query("select * from stb0") + tdSql.checkRows(2) + tdSql.execute("flush database db") + for i in range(5): + self.checkRows("select * from stb0", 2, 1) + time.sleep(1) + # run def run(self): tdLog.debug(f"start to excute {__file__}") @@ -110,6 +183,8 @@ class TDTestCase(TBase): self.alterTtlConfig() # TS-5390 self.alterCachemodel() + # TD-32907 + self.alterBypassFlag() tdLog.success(f"{__file__} successfully executed") diff --git a/tests/army/output.txt b/tests/army/output.txt new file mode 100644 index 0000000000..ed3bd5da1a --- /dev/null +++ b/tests/army/output.txt @@ -0,0 +1,91 @@ +[10/28 19:12:21.666563] SUCC: created database (db_sub) +[10/28 19:12:21.694603] INFO: start creating 1000 table(s) with 8 thread(s) +[10/28 19:12:21.823202] SUCC: Spent 0.1290 seconds to create 1000 table(s) with 8 thread(s) speed: 7752 tables/s, already exist 0 table(s), actual 1000 table(s) pre created, 0 table(s) will be auto created +[10/28 19:12:22.127442] SUCC: thread[4] progressive mode, completed total inserted rows: 125000, 441047.79 records/second +[10/28 19:12:22.128649] SUCC: thread[7] progressive mode, completed total inserted rows: 125000, 440895.33 records/second +[10/28 19:12:22.129478] SUCC: thread[5] progressive mode, completed total inserted rows: 125000, 440151.69 records/second +[10/28 19:12:22.133756] SUCC: thread[1] progressive mode, completed total inserted rows: 125000, 433268.05 records/second +[10/28 19:12:22.135211] SUCC: thread[3] progressive mode, completed total inserted rows: 125000, 430329.63 records/second +[10/28 19:12:22.137335] SUCC: thread[0] progressive mode, completed total inserted rows: 125000, 425800.08 records/second +[10/28 19:12:22.138252] SUCC: thread[6] progressive mode, completed total inserted rows: 125000, 426330.15 records/second +[10/28 19:12:22.141351] SUCC: thread[2] progressive mode, completed total inserted rows: 125000, 422778.64 records/second +[10/28 19:12:22.141585] SUCC: Spent 0.311648 (real 0.289041) seconds to insert rows: 1000000 with 8 thread(s) into db_sub 3208748.33 (real 3459716.79) records/second +[10/28 19:12:22.141590] SUCC: insert delay, min: 0.9600ms, avg: 2.3123ms, p90: 3.1790ms, p95: 3.5080ms, p99: 4.2230ms, max: 4.9040ms +[10/28 19:28:50.798427] SUCC: created database (db_sub) +[10/28 19:28:50.828326] INFO: start creating 1000 table(s) with 8 thread(s) +[10/28 19:28:50.936429] SUCC: Spent 0.1080 seconds to create 1000 table(s) with 8 thread(s) speed: 9259 tables/s, already exist 0 table(s), actual 1000 table(s) pre created, 0 table(s) will be auto created +[10/28 19:28:51.187235] SUCC: thread[6] progressive mode, completed total inserted rows: 125000, 539204.48 records/second +[10/28 19:28:51.189941] SUCC: thread[2] progressive mode, completed total inserted rows: 125000, 532329.43 records/second +[10/28 19:28:51.191551] SUCC: thread[4] progressive mode, completed total inserted rows: 125000, 530954.66 records/second +[10/28 19:28:51.191858] SUCC: thread[1] progressive mode, completed total inserted rows: 125000, 529259.59 records/second +[10/28 19:28:51.192459] SUCC: thread[3] progressive mode, completed total inserted rows: 125000, 530229.44 records/second +[10/28 19:28:51.195372] SUCC: thread[7] progressive mode, completed total inserted rows: 125000, 522099.42 records/second +[10/28 19:28:51.197727] SUCC: thread[0] progressive mode, completed total inserted rows: 125000, 516620.72 records/second +[10/28 19:28:51.197883] SUCC: thread[5] progressive mode, completed total inserted rows: 125000, 517125.12 records/second +[10/28 19:28:51.198123] SUCC: Spent 0.255536 (real 0.237135) seconds to insert rows: 1000000 with 8 thread(s) into db_sub 3913342.93 (real 4217007.19) records/second +[10/28 19:28:51.198130] SUCC: insert delay, min: 0.9200ms, avg: 1.8971ms, p90: 2.6870ms, p95: 2.9520ms, p99: 3.5880ms, max: 4.0710ms +[10/28 19:31:44.377691] SUCC: created database (db_sub) +[10/28 19:31:44.392998] INFO: start creating 1000 table(s) with 8 thread(s) +[10/28 19:31:44.696768] SUCC: Spent 0.3040 seconds to create 1000 table(s) with 8 thread(s) speed: 3289 tables/s, already exist 0 table(s), actual 1000 table(s) pre created, 0 table(s) will be auto created +[10/28 19:31:45.126910] SUCC: thread[3] progressive mode, completed total inserted rows: 125000, 304775.47 records/second +[10/28 19:31:45.131979] SUCC: thread[0] progressive mode, completed total inserted rows: 125000, 301117.75 records/second +[10/28 19:31:45.135106] SUCC: thread[5] progressive mode, completed total inserted rows: 125000, 299854.39 records/second +[10/28 19:31:45.135675] SUCC: thread[4] progressive mode, completed total inserted rows: 125000, 298322.24 records/second +[10/28 19:31:45.137069] SUCC: thread[7] progressive mode, completed total inserted rows: 125000, 297733.89 records/second +[10/28 19:31:45.137952] SUCC: thread[1] progressive mode, completed total inserted rows: 125000, 296900.13 records/second +[10/28 19:31:45.138834] SUCC: thread[2] progressive mode, completed total inserted rows: 125000, 295170.54 records/second +[10/28 19:31:45.145048] SUCC: thread[6] progressive mode, completed total inserted rows: 125000, 291966.71 records/second +[10/28 19:31:45.145369] SUCC: Spent 0.442506 (real 0.419200) seconds to insert rows: 1000000 with 8 thread(s) into db_sub 2259856.36 (real 2385496.18) records/second +[10/28 19:31:45.145377] SUCC: insert delay, min: 1.0400ms, avg: 3.3536ms, p90: 5.3120ms, p95: 7.9660ms, p99: 13.1570ms, max: 19.1410ms +[10/28 19:44:19.873056] SUCC: created database (db_sub) +[10/28 19:44:19.904701] INFO: start creating 1000 table(s) with 8 thread(s) +[10/28 19:44:20.053846] SUCC: Spent 0.1490 seconds to create 1000 table(s) with 8 thread(s) speed: 6711 tables/s, already exist 0 table(s), actual 1000 table(s) pre created, 0 table(s) will be auto created +[10/28 19:44:20.328698] SUCC: thread[3] progressive mode, completed total inserted rows: 125000, 485742.49 records/second +[10/28 19:44:20.330777] SUCC: thread[2] progressive mode, completed total inserted rows: 125000, 481686.29 records/second +[10/28 19:44:20.331290] SUCC: thread[4] progressive mode, completed total inserted rows: 125000, 480911.65 records/second +[10/28 19:44:20.331665] SUCC: thread[1] progressive mode, completed total inserted rows: 125000, 481043.06 records/second +[10/28 19:44:20.333451] SUCC: thread[0] progressive mode, completed total inserted rows: 125000, 477172.09 records/second +[10/28 19:44:20.334745] SUCC: thread[5] progressive mode, completed total inserted rows: 125000, 475675.84 records/second +[10/28 19:44:20.335056] SUCC: thread[6] progressive mode, completed total inserted rows: 125000, 474158.37 records/second +[10/28 19:44:20.337919] SUCC: thread[7] progressive mode, completed total inserted rows: 125000, 470816.89 records/second +[10/28 19:44:20.338144] SUCC: Spent 0.277921 (real 0.261310) seconds to insert rows: 1000000 with 8 thread(s) into db_sub 3598144.80 (real 3826872.30) records/second +[10/28 19:44:20.338153] SUCC: insert delay, min: 0.9180ms, avg: 2.0905ms, p90: 2.6490ms, p95: 3.0620ms, p99: 4.1480ms, max: 4.7840ms +[10/28 19:58:27.100989] SUCC: created database (db_sub) +[10/28 19:58:27.115572] INFO: start creating 1000 table(s) with 8 thread(s) +[10/28 19:58:27.362948] SUCC: Spent 0.2470 seconds to create 1000 table(s) with 8 thread(s) speed: 4049 tables/s, already exist 0 table(s), actual 1000 table(s) pre created, 0 table(s) will be auto created +[10/28 19:58:27.807669] SUCC: thread[7] progressive mode, completed total inserted rows: 125000, 291891.03 records/second +[10/28 19:58:27.818785] SUCC: thread[1] progressive mode, completed total inserted rows: 125000, 285413.54 records/second +[10/28 19:58:27.819649] SUCC: thread[0] progressive mode, completed total inserted rows: 125000, 284193.61 records/second +[10/28 19:58:27.819844] SUCC: thread[5] progressive mode, completed total inserted rows: 125000, 284352.64 records/second +[10/28 19:58:27.820170] SUCC: thread[6] progressive mode, completed total inserted rows: 125000, 284576.63 records/second +[10/28 19:58:27.821489] SUCC: thread[4] progressive mode, completed total inserted rows: 125000, 283781.33 records/second +[10/28 19:58:27.822061] SUCC: thread[2] progressive mode, completed total inserted rows: 125000, 283112.24 records/second +[10/28 19:58:27.823513] SUCC: thread[3] progressive mode, completed total inserted rows: 125000, 282730.59 records/second +[10/28 19:58:27.823779] SUCC: Spent 0.455783 (real 0.438625) seconds to insert rows: 1000000 with 8 thread(s) into db_sub 2194026.54 (real 2279851.81) records/second +[10/28 19:58:27.823786] SUCC: insert delay, min: 0.9780ms, avg: 3.5090ms, p90: 5.5650ms, p95: 6.8600ms, p99: 10.6010ms, max: 13.4400ms +[10/28 20:00:06.417182] SUCC: created database (db_sub) +[10/28 20:00:06.448202] INFO: start creating 1000 table(s) with 8 thread(s) +[10/28 20:00:06.596961] SUCC: Spent 0.1480 seconds to create 1000 table(s) with 8 thread(s) speed: 6757 tables/s, already exist 0 table(s), actual 1000 table(s) pre created, 0 table(s) will be auto created +[10/28 20:00:06.895455] SUCC: thread[3] progressive mode, completed total inserted rows: 125000, 443978.76 records/second +[10/28 20:00:06.896986] SUCC: thread[5] progressive mode, completed total inserted rows: 125000, 442549.94 records/second +[10/28 20:00:06.897536] SUCC: thread[0] progressive mode, completed total inserted rows: 125000, 440927.99 records/second +[10/28 20:00:06.898905] SUCC: thread[2] progressive mode, completed total inserted rows: 125000, 439131.15 records/second +[10/28 20:00:06.899024] SUCC: thread[7] progressive mode, completed total inserted rows: 125000, 439628.46 records/second +[10/28 20:00:06.901861] SUCC: thread[1] progressive mode, completed total inserted rows: 125000, 435197.37 records/second +[10/28 20:00:06.902305] SUCC: thread[6] progressive mode, completed total inserted rows: 125000, 434812.86 records/second +[10/28 20:00:06.904698] SUCC: thread[4] progressive mode, completed total inserted rows: 125000, 433406.26 records/second +[10/28 20:00:06.904905] SUCC: Spent 0.301788 (real 0.284949) seconds to insert rows: 1000000 with 8 thread(s) into db_sub 3313584.37 (real 3509399.93) records/second +[10/28 20:00:06.904912] SUCC: insert delay, min: 0.8770ms, avg: 2.2796ms, p90: 3.1340ms, p95: 3.6480ms, p99: 4.8280ms, max: 6.0880ms +[10/28 20:05:34.756207] SUCC: created database (db_sub) +[10/28 20:05:34.784793] INFO: start creating 1000 table(s) with 8 thread(s) +[10/28 20:05:34.927068] SUCC: Spent 0.1430 seconds to create 1000 table(s) with 8 thread(s) speed: 6993 tables/s, already exist 0 table(s), actual 1000 table(s) pre created, 0 table(s) will be auto created +[10/28 20:05:35.213741] SUCC: thread[4] progressive mode, completed total inserted rows: 125000, 466952.82 records/second +[10/28 20:05:35.215403] SUCC: thread[3] progressive mode, completed total inserted rows: 125000, 463804.68 records/second +[10/28 20:05:35.221132] SUCC: thread[2] progressive mode, completed total inserted rows: 125000, 453322.31 records/second +[10/28 20:05:35.221224] SUCC: thread[1] progressive mode, completed total inserted rows: 125000, 453671.11 records/second +[10/28 20:05:35.222003] SUCC: thread[0] progressive mode, completed total inserted rows: 125000, 452641.07 records/second +[10/28 20:05:35.222536] SUCC: thread[5] progressive mode, completed total inserted rows: 125000, 451796.89 records/second +[10/28 20:05:35.223663] SUCC: thread[7] progressive mode, completed total inserted rows: 125000, 449643.52 records/second +[10/28 20:05:35.225246] SUCC: thread[6] progressive mode, completed total inserted rows: 125000, 447768.68 records/second +[10/28 20:05:35.225659] SUCC: Spent 0.290871 (real 0.274808) seconds to insert rows: 1000000 with 8 thread(s) into db_sub 3437950.16 (real 3638904.25) records/second +[10/28 20:05:35.225666] SUCC: insert delay, min: 0.9360ms, avg: 2.1985ms, p90: 2.9290ms, p95: 3.4580ms, p99: 4.6030ms, max: 6.2660ms diff --git a/tests/army/query/function/ans/interp.csv b/tests/army/query/function/ans/interp.csv index e1ba236aa1..3eaccd887a 100644 --- a/tests/army/query/function/ans/interp.csv +++ b/tests/army/query/function/ans/interp.csv @@ -366,3 +366,652 @@ taos> select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _i 2020-02-01 00:00:16.000 | td32727 | 10 | 10 | true | 1 | 2020-02-01 00:00:16.000 | td32727 | 15 | 15 | true | 1 | +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(null); + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(value, 1); + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(prev); + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(next); + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(linear); + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(null); + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(value, 1); + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(prev); + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(next); + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(linear); + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(null); + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(value, 1); + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(prev); + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(next); + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(linear); + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(null); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:21.000 | false | 21 | + 2020-01-01 00:00:22.000 | true | NULL | + 2020-01-01 00:00:23.000 | true | NULL | + 2020-01-01 00:00:24.000 | true | NULL | + 2020-01-01 00:00:25.000 | true | NULL | + 2020-01-01 00:00:26.000 | true | NULL | + 2020-01-01 00:00:27.000 | true | NULL | + 2020-01-01 00:00:28.000 | true | NULL | + 2020-01-01 00:00:29.000 | true | NULL | + 2020-01-01 00:00:30.000 | true | NULL | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(value, 1); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:21.000 | false | 21 | + 2020-01-01 00:00:22.000 | true | 1 | + 2020-01-01 00:00:23.000 | true | 1 | + 2020-01-01 00:00:24.000 | true | 1 | + 2020-01-01 00:00:25.000 | true | 1 | + 2020-01-01 00:00:26.000 | true | 1 | + 2020-01-01 00:00:27.000 | true | 1 | + 2020-01-01 00:00:28.000 | true | 1 | + 2020-01-01 00:00:29.000 | true | 1 | + 2020-01-01 00:00:30.000 | true | 1 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(prev); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:21.000 | false | 21 | + 2020-01-01 00:00:22.000 | true | 21 | + 2020-01-01 00:00:23.000 | true | 21 | + 2020-01-01 00:00:24.000 | true | 21 | + 2020-01-01 00:00:25.000 | true | 21 | + 2020-01-01 00:00:26.000 | true | 21 | + 2020-01-01 00:00:27.000 | true | 21 | + 2020-01-01 00:00:28.000 | true | 21 | + 2020-01-01 00:00:29.000 | true | 21 | + 2020-01-01 00:00:30.000 | true | 21 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(next); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:21.000 | false | 21 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(linear); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:21.000 | false | 21 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(null); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:15.000 | false | 15 | + 2020-01-01 00:00:16.000 | true | NULL | + 2020-01-01 00:00:17.000 | true | NULL | + 2020-01-01 00:00:18.000 | true | NULL | + 2020-01-01 00:00:19.000 | true | NULL | + 2020-01-01 00:00:20.000 | true | NULL | + 2020-01-01 00:00:21.000 | false | 21 | + 2020-01-01 00:00:22.000 | true | NULL | + 2020-01-01 00:00:23.000 | true | NULL | + 2020-01-01 00:00:24.000 | true | NULL | + 2020-01-01 00:00:25.000 | true | NULL | + 2020-01-01 00:00:26.000 | true | NULL | + 2020-01-01 00:00:27.000 | true | NULL | + 2020-01-01 00:00:28.000 | true | NULL | + 2020-01-01 00:00:29.000 | true | NULL | + 2020-01-01 00:00:30.000 | true | NULL | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(value, 1); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:15.000 | false | 15 | + 2020-01-01 00:00:16.000 | true | 1 | + 2020-01-01 00:00:17.000 | true | 1 | + 2020-01-01 00:00:18.000 | true | 1 | + 2020-01-01 00:00:19.000 | true | 1 | + 2020-01-01 00:00:20.000 | true | 1 | + 2020-01-01 00:00:21.000 | false | 21 | + 2020-01-01 00:00:22.000 | true | 1 | + 2020-01-01 00:00:23.000 | true | 1 | + 2020-01-01 00:00:24.000 | true | 1 | + 2020-01-01 00:00:25.000 | true | 1 | + 2020-01-01 00:00:26.000 | true | 1 | + 2020-01-01 00:00:27.000 | true | 1 | + 2020-01-01 00:00:28.000 | true | 1 | + 2020-01-01 00:00:29.000 | true | 1 | + 2020-01-01 00:00:30.000 | true | 1 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(prev); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:15.000 | false | 15 | + 2020-01-01 00:00:16.000 | true | 15 | + 2020-01-01 00:00:17.000 | true | 15 | + 2020-01-01 00:00:18.000 | true | 15 | + 2020-01-01 00:00:19.000 | true | 15 | + 2020-01-01 00:00:20.000 | true | 15 | + 2020-01-01 00:00:21.000 | false | 21 | + 2020-01-01 00:00:22.000 | true | 21 | + 2020-01-01 00:00:23.000 | true | 21 | + 2020-01-01 00:00:24.000 | true | 21 | + 2020-01-01 00:00:25.000 | true | 21 | + 2020-01-01 00:00:26.000 | true | 21 | + 2020-01-01 00:00:27.000 | true | 21 | + 2020-01-01 00:00:28.000 | true | 21 | + 2020-01-01 00:00:29.000 | true | 21 | + 2020-01-01 00:00:30.000 | true | 21 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(next); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:15.000 | false | 15 | + 2020-01-01 00:00:16.000 | true | 21 | + 2020-01-01 00:00:17.000 | true | 21 | + 2020-01-01 00:00:18.000 | true | 21 | + 2020-01-01 00:00:19.000 | true | 21 | + 2020-01-01 00:00:20.000 | true | 21 | + 2020-01-01 00:00:21.000 | false | 21 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(linear); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:15.000 | false | 15 | + 2020-01-01 00:00:16.000 | true | 16 | + 2020-01-01 00:00:17.000 | true | 17 | + 2020-01-01 00:00:18.000 | true | 18 | + 2020-01-01 00:00:19.000 | true | 19 | + 2020-01-01 00:00:20.000 | true | 20 | + 2020-01-01 00:00:21.000 | false | 21 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(null); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:00.000 | false | 0 | + 2020-01-01 00:00:01.000 | false | 1 | + 2020-01-01 00:00:02.000 | true | NULL | + 2020-01-01 00:00:03.000 | false | 3 | + 2020-01-01 00:00:04.000 | true | NULL | + 2020-01-01 00:00:05.000 | true | NULL | + 2020-01-01 00:00:06.000 | false | 6 | + 2020-01-01 00:00:07.000 | true | NULL | + 2020-01-01 00:00:08.000 | true | NULL | + 2020-01-01 00:00:09.000 | true | NULL | + 2020-01-01 00:00:10.000 | false | 10 | + 2020-01-01 00:00:11.000 | true | NULL | + 2020-01-01 00:00:12.000 | true | NULL | + 2020-01-01 00:00:13.000 | true | NULL | + 2020-01-01 00:00:14.000 | true | NULL | + 2020-01-01 00:00:15.000 | false | 15 | + 2020-01-01 00:00:16.000 | true | NULL | + 2020-01-01 00:00:17.000 | true | NULL | + 2020-01-01 00:00:18.000 | true | NULL | + 2020-01-01 00:00:19.000 | true | NULL | + 2020-01-01 00:00:20.000 | true | NULL | + 2020-01-01 00:00:21.000 | false | 21 | + 2020-01-01 00:00:22.000 | true | NULL | + 2020-01-01 00:00:23.000 | true | NULL | + 2020-01-01 00:00:24.000 | true | NULL | + 2020-01-01 00:00:25.000 | true | NULL | + 2020-01-01 00:00:26.000 | true | NULL | + 2020-01-01 00:00:27.000 | true | NULL | + 2020-01-01 00:00:28.000 | true | NULL | + 2020-01-01 00:00:29.000 | true | NULL | + 2020-01-01 00:00:30.000 | true | NULL | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(value, 1); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:00.000 | false | 0 | + 2020-01-01 00:00:01.000 | false | 1 | + 2020-01-01 00:00:02.000 | true | 1 | + 2020-01-01 00:00:03.000 | false | 3 | + 2020-01-01 00:00:04.000 | true | 1 | + 2020-01-01 00:00:05.000 | true | 1 | + 2020-01-01 00:00:06.000 | false | 6 | + 2020-01-01 00:00:07.000 | true | 1 | + 2020-01-01 00:00:08.000 | true | 1 | + 2020-01-01 00:00:09.000 | true | 1 | + 2020-01-01 00:00:10.000 | false | 10 | + 2020-01-01 00:00:11.000 | true | 1 | + 2020-01-01 00:00:12.000 | true | 1 | + 2020-01-01 00:00:13.000 | true | 1 | + 2020-01-01 00:00:14.000 | true | 1 | + 2020-01-01 00:00:15.000 | false | 15 | + 2020-01-01 00:00:16.000 | true | 1 | + 2020-01-01 00:00:17.000 | true | 1 | + 2020-01-01 00:00:18.000 | true | 1 | + 2020-01-01 00:00:19.000 | true | 1 | + 2020-01-01 00:00:20.000 | true | 1 | + 2020-01-01 00:00:21.000 | false | 21 | + 2020-01-01 00:00:22.000 | true | 1 | + 2020-01-01 00:00:23.000 | true | 1 | + 2020-01-01 00:00:24.000 | true | 1 | + 2020-01-01 00:00:25.000 | true | 1 | + 2020-01-01 00:00:26.000 | true | 1 | + 2020-01-01 00:00:27.000 | true | 1 | + 2020-01-01 00:00:28.000 | true | 1 | + 2020-01-01 00:00:29.000 | true | 1 | + 2020-01-01 00:00:30.000 | true | 1 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(prev); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:00.000 | false | 0 | + 2020-01-01 00:00:01.000 | false | 1 | + 2020-01-01 00:00:02.000 | true | 1 | + 2020-01-01 00:00:03.000 | false | 3 | + 2020-01-01 00:00:04.000 | true | 3 | + 2020-01-01 00:00:05.000 | true | 3 | + 2020-01-01 00:00:06.000 | false | 6 | + 2020-01-01 00:00:07.000 | true | 6 | + 2020-01-01 00:00:08.000 | true | 6 | + 2020-01-01 00:00:09.000 | true | 6 | + 2020-01-01 00:00:10.000 | false | 10 | + 2020-01-01 00:00:11.000 | true | 10 | + 2020-01-01 00:00:12.000 | true | 10 | + 2020-01-01 00:00:13.000 | true | 10 | + 2020-01-01 00:00:14.000 | true | 10 | + 2020-01-01 00:00:15.000 | false | 15 | + 2020-01-01 00:00:16.000 | true | 15 | + 2020-01-01 00:00:17.000 | true | 15 | + 2020-01-01 00:00:18.000 | true | 15 | + 2020-01-01 00:00:19.000 | true | 15 | + 2020-01-01 00:00:20.000 | true | 15 | + 2020-01-01 00:00:21.000 | false | 21 | + 2020-01-01 00:00:22.000 | true | 21 | + 2020-01-01 00:00:23.000 | true | 21 | + 2020-01-01 00:00:24.000 | true | 21 | + 2020-01-01 00:00:25.000 | true | 21 | + 2020-01-01 00:00:26.000 | true | 21 | + 2020-01-01 00:00:27.000 | true | 21 | + 2020-01-01 00:00:28.000 | true | 21 | + 2020-01-01 00:00:29.000 | true | 21 | + 2020-01-01 00:00:30.000 | true | 21 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(next); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:00.000 | false | 0 | + 2020-01-01 00:00:01.000 | false | 1 | + 2020-01-01 00:00:02.000 | true | 3 | + 2020-01-01 00:00:03.000 | false | 3 | + 2020-01-01 00:00:04.000 | true | 6 | + 2020-01-01 00:00:05.000 | true | 6 | + 2020-01-01 00:00:06.000 | false | 6 | + 2020-01-01 00:00:07.000 | true | 10 | + 2020-01-01 00:00:08.000 | true | 10 | + 2020-01-01 00:00:09.000 | true | 10 | + 2020-01-01 00:00:10.000 | false | 10 | + 2020-01-01 00:00:11.000 | true | 15 | + 2020-01-01 00:00:12.000 | true | 15 | + 2020-01-01 00:00:13.000 | true | 15 | + 2020-01-01 00:00:14.000 | true | 15 | + 2020-01-01 00:00:15.000 | false | 15 | + 2020-01-01 00:00:16.000 | true | 21 | + 2020-01-01 00:00:17.000 | true | 21 | + 2020-01-01 00:00:18.000 | true | 21 | + 2020-01-01 00:00:19.000 | true | 21 | + 2020-01-01 00:00:20.000 | true | 21 | + 2020-01-01 00:00:21.000 | false | 21 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(linear); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:00.000 | false | 0 | + 2020-01-01 00:00:01.000 | false | 1 | + 2020-01-01 00:00:02.000 | true | 2 | + 2020-01-01 00:00:03.000 | false | 3 | + 2020-01-01 00:00:04.000 | true | 4 | + 2020-01-01 00:00:05.000 | true | 5 | + 2020-01-01 00:00:06.000 | false | 6 | + 2020-01-01 00:00:07.000 | true | 7 | + 2020-01-01 00:00:08.000 | true | 8 | + 2020-01-01 00:00:09.000 | true | 9 | + 2020-01-01 00:00:10.000 | false | 10 | + 2020-01-01 00:00:11.000 | true | 11 | + 2020-01-01 00:00:12.000 | true | 12 | + 2020-01-01 00:00:13.000 | true | 13 | + 2020-01-01 00:00:14.000 | true | 14 | + 2020-01-01 00:00:15.000 | false | 15 | + 2020-01-01 00:00:16.000 | true | 16 | + 2020-01-01 00:00:17.000 | true | 17 | + 2020-01-01 00:00:18.000 | true | 18 | + 2020-01-01 00:00:19.000 | true | 19 | + 2020-01-01 00:00:20.000 | true | 20 | + 2020-01-01 00:00:21.000 | false | 21 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(null); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:00.000 | false | 0 | + 2020-01-01 00:00:01.000 | false | 1 | + 2020-01-01 00:00:02.000 | true | NULL | + 2020-01-01 00:00:03.000 | false | 3 | + 2020-01-01 00:00:04.000 | true | NULL | + 2020-01-01 00:00:05.000 | true | NULL | + 2020-01-01 00:00:06.000 | false | 6 | + 2020-01-01 00:00:07.000 | true | NULL | + 2020-01-01 00:00:08.000 | true | NULL | + 2020-01-01 00:00:09.000 | true | NULL | + 2020-01-01 00:00:10.000 | false | 10 | + 2020-01-01 00:00:11.000 | true | NULL | + 2020-01-01 00:00:12.000 | true | NULL | + 2020-01-01 00:00:13.000 | true | NULL | + 2020-01-01 00:00:14.000 | true | NULL | + 2020-01-01 00:00:15.000 | false | 15 | + 2020-01-01 00:00:16.000 | true | NULL | + 2020-01-01 00:00:17.000 | true | NULL | + 2020-01-01 00:00:18.000 | true | NULL | + 2020-01-01 00:00:19.000 | true | NULL | + 2020-01-01 00:00:20.000 | true | NULL | + 2020-01-01 00:00:21.000 | false | 21 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(value, 1); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:00.000 | false | 0 | + 2020-01-01 00:00:01.000 | false | 1 | + 2020-01-01 00:00:02.000 | true | 1 | + 2020-01-01 00:00:03.000 | false | 3 | + 2020-01-01 00:00:04.000 | true | 1 | + 2020-01-01 00:00:05.000 | true | 1 | + 2020-01-01 00:00:06.000 | false | 6 | + 2020-01-01 00:00:07.000 | true | 1 | + 2020-01-01 00:00:08.000 | true | 1 | + 2020-01-01 00:00:09.000 | true | 1 | + 2020-01-01 00:00:10.000 | false | 10 | + 2020-01-01 00:00:11.000 | true | 1 | + 2020-01-01 00:00:12.000 | true | 1 | + 2020-01-01 00:00:13.000 | true | 1 | + 2020-01-01 00:00:14.000 | true | 1 | + 2020-01-01 00:00:15.000 | false | 15 | + 2020-01-01 00:00:16.000 | true | 1 | + 2020-01-01 00:00:17.000 | true | 1 | + 2020-01-01 00:00:18.000 | true | 1 | + 2020-01-01 00:00:19.000 | true | 1 | + 2020-01-01 00:00:20.000 | true | 1 | + 2020-01-01 00:00:21.000 | false | 21 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(prev); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:00.000 | false | 0 | + 2020-01-01 00:00:01.000 | false | 1 | + 2020-01-01 00:00:02.000 | true | 1 | + 2020-01-01 00:00:03.000 | false | 3 | + 2020-01-01 00:00:04.000 | true | 3 | + 2020-01-01 00:00:05.000 | true | 3 | + 2020-01-01 00:00:06.000 | false | 6 | + 2020-01-01 00:00:07.000 | true | 6 | + 2020-01-01 00:00:08.000 | true | 6 | + 2020-01-01 00:00:09.000 | true | 6 | + 2020-01-01 00:00:10.000 | false | 10 | + 2020-01-01 00:00:11.000 | true | 10 | + 2020-01-01 00:00:12.000 | true | 10 | + 2020-01-01 00:00:13.000 | true | 10 | + 2020-01-01 00:00:14.000 | true | 10 | + 2020-01-01 00:00:15.000 | false | 15 | + 2020-01-01 00:00:16.000 | true | 15 | + 2020-01-01 00:00:17.000 | true | 15 | + 2020-01-01 00:00:18.000 | true | 15 | + 2020-01-01 00:00:19.000 | true | 15 | + 2020-01-01 00:00:20.000 | true | 15 | + 2020-01-01 00:00:21.000 | false | 21 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(next); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:00.000 | false | 0 | + 2020-01-01 00:00:01.000 | false | 1 | + 2020-01-01 00:00:02.000 | true | 3 | + 2020-01-01 00:00:03.000 | false | 3 | + 2020-01-01 00:00:04.000 | true | 6 | + 2020-01-01 00:00:05.000 | true | 6 | + 2020-01-01 00:00:06.000 | false | 6 | + 2020-01-01 00:00:07.000 | true | 10 | + 2020-01-01 00:00:08.000 | true | 10 | + 2020-01-01 00:00:09.000 | true | 10 | + 2020-01-01 00:00:10.000 | false | 10 | + 2020-01-01 00:00:11.000 | true | 15 | + 2020-01-01 00:00:12.000 | true | 15 | + 2020-01-01 00:00:13.000 | true | 15 | + 2020-01-01 00:00:14.000 | true | 15 | + 2020-01-01 00:00:15.000 | false | 15 | + 2020-01-01 00:00:16.000 | true | 21 | + 2020-01-01 00:00:17.000 | true | 21 | + 2020-01-01 00:00:18.000 | true | 21 | + 2020-01-01 00:00:19.000 | true | 21 | + 2020-01-01 00:00:20.000 | true | 21 | + 2020-01-01 00:00:21.000 | false | 21 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(linear); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:00.000 | false | 0 | + 2020-01-01 00:00:01.000 | false | 1 | + 2020-01-01 00:00:02.000 | true | 2 | + 2020-01-01 00:00:03.000 | false | 3 | + 2020-01-01 00:00:04.000 | true | 4 | + 2020-01-01 00:00:05.000 | true | 5 | + 2020-01-01 00:00:06.000 | false | 6 | + 2020-01-01 00:00:07.000 | true | 7 | + 2020-01-01 00:00:08.000 | true | 8 | + 2020-01-01 00:00:09.000 | true | 9 | + 2020-01-01 00:00:10.000 | false | 10 | + 2020-01-01 00:00:11.000 | true | 11 | + 2020-01-01 00:00:12.000 | true | 12 | + 2020-01-01 00:00:13.000 | true | 13 | + 2020-01-01 00:00:14.000 | true | 14 | + 2020-01-01 00:00:15.000 | false | 15 | + 2020-01-01 00:00:16.000 | true | 16 | + 2020-01-01 00:00:17.000 | true | 17 | + 2020-01-01 00:00:18.000 | true | 18 | + 2020-01-01 00:00:19.000 | true | 19 | + 2020-01-01 00:00:20.000 | true | 20 | + 2020-01-01 00:00:21.000 | false | 21 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(null); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:00.000 | true | NULL | + 2020-01-01 00:00:01.000 | true | NULL | + 2020-01-01 00:00:02.000 | true | NULL | + 2020-01-01 00:00:03.000 | true | NULL | + 2020-01-01 00:00:04.000 | true | NULL | + 2020-01-01 00:00:05.000 | true | NULL | + 2020-01-01 00:00:06.000 | true | NULL | + 2020-01-01 00:00:07.000 | true | NULL | + 2020-01-01 00:00:08.000 | true | NULL | + 2020-01-01 00:00:09.000 | true | NULL | + 2020-01-01 00:00:10.000 | true | NULL | + 2020-01-01 00:00:11.000 | true | NULL | + 2020-01-01 00:00:12.000 | true | NULL | + 2020-01-01 00:00:13.000 | true | NULL | + 2020-01-01 00:00:14.000 | true | NULL | + 2020-01-01 00:00:15.000 | false | 15 | + 2020-01-01 00:00:16.000 | true | NULL | + 2020-01-01 00:00:17.000 | true | NULL | + 2020-01-01 00:00:18.000 | true | NULL | + 2020-01-01 00:00:19.000 | true | NULL | + 2020-01-01 00:00:20.000 | true | NULL | + 2020-01-01 00:00:21.000 | false | 21 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(value, 1); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:00.000 | true | 1 | + 2020-01-01 00:00:01.000 | true | 1 | + 2020-01-01 00:00:02.000 | true | 1 | + 2020-01-01 00:00:03.000 | true | 1 | + 2020-01-01 00:00:04.000 | true | 1 | + 2020-01-01 00:00:05.000 | true | 1 | + 2020-01-01 00:00:06.000 | true | 1 | + 2020-01-01 00:00:07.000 | true | 1 | + 2020-01-01 00:00:08.000 | true | 1 | + 2020-01-01 00:00:09.000 | true | 1 | + 2020-01-01 00:00:10.000 | true | 1 | + 2020-01-01 00:00:11.000 | true | 1 | + 2020-01-01 00:00:12.000 | true | 1 | + 2020-01-01 00:00:13.000 | true | 1 | + 2020-01-01 00:00:14.000 | true | 1 | + 2020-01-01 00:00:15.000 | false | 15 | + 2020-01-01 00:00:16.000 | true | 1 | + 2020-01-01 00:00:17.000 | true | 1 | + 2020-01-01 00:00:18.000 | true | 1 | + 2020-01-01 00:00:19.000 | true | 1 | + 2020-01-01 00:00:20.000 | true | 1 | + 2020-01-01 00:00:21.000 | false | 21 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(prev); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:15.000 | false | 15 | + 2020-01-01 00:00:16.000 | true | 15 | + 2020-01-01 00:00:17.000 | true | 15 | + 2020-01-01 00:00:18.000 | true | 15 | + 2020-01-01 00:00:19.000 | true | 15 | + 2020-01-01 00:00:20.000 | true | 15 | + 2020-01-01 00:00:21.000 | false | 21 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(next); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:00.000 | true | 15 | + 2020-01-01 00:00:01.000 | true | 15 | + 2020-01-01 00:00:02.000 | true | 15 | + 2020-01-01 00:00:03.000 | true | 15 | + 2020-01-01 00:00:04.000 | true | 15 | + 2020-01-01 00:00:05.000 | true | 15 | + 2020-01-01 00:00:06.000 | true | 15 | + 2020-01-01 00:00:07.000 | true | 15 | + 2020-01-01 00:00:08.000 | true | 15 | + 2020-01-01 00:00:09.000 | true | 15 | + 2020-01-01 00:00:10.000 | true | 15 | + 2020-01-01 00:00:11.000 | true | 15 | + 2020-01-01 00:00:12.000 | true | 15 | + 2020-01-01 00:00:13.000 | true | 15 | + 2020-01-01 00:00:14.000 | true | 15 | + 2020-01-01 00:00:15.000 | false | 15 | + 2020-01-01 00:00:16.000 | true | 21 | + 2020-01-01 00:00:17.000 | true | 21 | + 2020-01-01 00:00:18.000 | true | 21 | + 2020-01-01 00:00:19.000 | true | 21 | + 2020-01-01 00:00:20.000 | true | 21 | + 2020-01-01 00:00:21.000 | false | 21 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(linear); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:15.000 | false | 15 | + 2020-01-01 00:00:16.000 | true | 16 | + 2020-01-01 00:00:17.000 | true | 17 | + 2020-01-01 00:00:18.000 | true | 18 | + 2020-01-01 00:00:19.000 | true | 19 | + 2020-01-01 00:00:20.000 | true | 20 | + 2020-01-01 00:00:21.000 | false | 21 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(null); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:00.000 | true | NULL | + 2020-01-01 00:00:01.000 | true | NULL | + 2020-01-01 00:00:02.000 | true | NULL | + 2020-01-01 00:00:03.000 | true | NULL | + 2020-01-01 00:00:04.000 | true | NULL | + 2020-01-01 00:00:05.000 | true | NULL | + 2020-01-01 00:00:06.000 | true | NULL | + 2020-01-01 00:00:07.000 | true | NULL | + 2020-01-01 00:00:08.000 | true | NULL | + 2020-01-01 00:00:09.000 | true | NULL | + 2020-01-01 00:00:10.000 | true | NULL | + 2020-01-01 00:00:11.000 | true | NULL | + 2020-01-01 00:00:12.000 | true | NULL | + 2020-01-01 00:00:13.000 | true | NULL | + 2020-01-01 00:00:14.000 | true | NULL | + 2020-01-01 00:00:15.000 | true | NULL | + 2020-01-01 00:00:16.000 | true | NULL | + 2020-01-01 00:00:17.000 | true | NULL | + 2020-01-01 00:00:18.000 | true | NULL | + 2020-01-01 00:00:19.000 | true | NULL | + 2020-01-01 00:00:20.000 | true | NULL | + 2020-01-01 00:00:21.000 | false | 21 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(value, 1); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:00.000 | true | 1 | + 2020-01-01 00:00:01.000 | true | 1 | + 2020-01-01 00:00:02.000 | true | 1 | + 2020-01-01 00:00:03.000 | true | 1 | + 2020-01-01 00:00:04.000 | true | 1 | + 2020-01-01 00:00:05.000 | true | 1 | + 2020-01-01 00:00:06.000 | true | 1 | + 2020-01-01 00:00:07.000 | true | 1 | + 2020-01-01 00:00:08.000 | true | 1 | + 2020-01-01 00:00:09.000 | true | 1 | + 2020-01-01 00:00:10.000 | true | 1 | + 2020-01-01 00:00:11.000 | true | 1 | + 2020-01-01 00:00:12.000 | true | 1 | + 2020-01-01 00:00:13.000 | true | 1 | + 2020-01-01 00:00:14.000 | true | 1 | + 2020-01-01 00:00:15.000 | true | 1 | + 2020-01-01 00:00:16.000 | true | 1 | + 2020-01-01 00:00:17.000 | true | 1 | + 2020-01-01 00:00:18.000 | true | 1 | + 2020-01-01 00:00:19.000 | true | 1 | + 2020-01-01 00:00:20.000 | true | 1 | + 2020-01-01 00:00:21.000 | false | 21 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(prev); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:21.000 | false | 21 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(next); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:00.000 | true | 21 | + 2020-01-01 00:00:01.000 | true | 21 | + 2020-01-01 00:00:02.000 | true | 21 | + 2020-01-01 00:00:03.000 | true | 21 | + 2020-01-01 00:00:04.000 | true | 21 | + 2020-01-01 00:00:05.000 | true | 21 | + 2020-01-01 00:00:06.000 | true | 21 | + 2020-01-01 00:00:07.000 | true | 21 | + 2020-01-01 00:00:08.000 | true | 21 | + 2020-01-01 00:00:09.000 | true | 21 | + 2020-01-01 00:00:10.000 | true | 21 | + 2020-01-01 00:00:11.000 | true | 21 | + 2020-01-01 00:00:12.000 | true | 21 | + 2020-01-01 00:00:13.000 | true | 21 | + 2020-01-01 00:00:14.000 | true | 21 | + 2020-01-01 00:00:15.000 | true | 21 | + 2020-01-01 00:00:16.000 | true | 21 | + 2020-01-01 00:00:17.000 | true | 21 | + 2020-01-01 00:00:18.000 | true | 21 | + 2020-01-01 00:00:19.000 | true | 21 | + 2020-01-01 00:00:20.000 | true | 21 | + 2020-01-01 00:00:21.000 | false | 21 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(linear); + _irowts | _isfilled | interp(c1) | +==================================================== + 2020-01-01 00:00:21.000 | false | 21 | + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(null); + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(value, 1); + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(prev); + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(next); + +taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(linear); + diff --git a/tests/army/query/function/in/interp.in b/tests/army/query/function/in/interp.in index 4825ab46b1..97a9936b8d 100644 --- a/tests/army/query/function/in/interp.in +++ b/tests/army/query/function/in/interp.in @@ -13,3 +13,53 @@ select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _isfille select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2,c3 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (prev) order by irowts, c2, c3; select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2,c3 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (linear) order by irowts, c2, c3; select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2,c3 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (value, 1) order by irowts, c2, c3; +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(null); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(value, 1); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(prev); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(next); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(linear); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(null); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(value, 1); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(prev); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(next); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(linear); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(null); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(value, 1); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(prev); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(next); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(linear); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(null); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(value, 1); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(prev); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(next); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(linear); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(null); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(value, 1); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(prev); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(next); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(linear); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(null); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(value, 1); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(prev); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(next); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(linear); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(null); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(value, 1); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(prev); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(next); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(linear); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(null); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(value, 1); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(prev); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(next); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(linear); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(null); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(value, 1); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(prev); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(next); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(linear); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(null); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(value, 1); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(prev); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(next); +select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(linear); diff --git a/tests/army/query/function/test_interp.py b/tests/army/query/function/test_interp.py index f903e7be73..106ef1e58e 100644 --- a/tests/army/query/function/test_interp.py +++ b/tests/army/query/function/test_interp.py @@ -38,6 +38,7 @@ class TDTestCase(TBase): (ts timestamp, c0 tinyint, c1 smallint, c2 int, c3 bigint, c4 double, c5 float, c6 bool, c7 varchar(10), c8 nchar(10), c9 tinyint unsigned, c10 smallint unsigned, c11 int unsigned, c12 bigint unsigned) ''' ) + tdSql.execute("create table if not exists test.td32861(ts timestamp, c1 int);") tdLog.printNoPrefix("==========step2:insert data") @@ -45,6 +46,16 @@ class TDTestCase(TBase): tdSql.execute(f"insert into test.td32727 values ('2020-02-01 00:00:10', 10, 10, 10, 10, 10.0, 10.0, true, 'varchar', 'nchar', 10, 10, 10, 10)") tdSql.execute(f"insert into test.td32727 values ('2020-02-01 00:00:15', 15, 15, 15, 15, 15.0, 15.0, true, 'varchar', 'nchar', 15, 15, 15, 15)") + tdSql.execute( + """insert into test.td32861 values + ('2020-01-01 00:00:00', 0), + ('2020-01-01 00:00:01', 1), + ('2020-01-01 00:00:03', 3), + ('2020-01-01 00:00:06', 6), + ('2020-01-01 00:00:10', 10), + ('2020-01-01 00:00:15', 15), + ('2020-01-01 00:00:21', 21);""" + ) def test_normal_query_new(self, testCase): # read sql from .sql file and execute diff --git a/tests/army/storage/compressBasic.py b/tests/army/storage/compressBasic.py index f24c4dd288..446cb920fb 100644 --- a/tests/army/storage/compressBasic.py +++ b/tests/army/storage/compressBasic.py @@ -33,7 +33,26 @@ class TDTestCase(TBase): "compressMsgSize" : "100", } # compress - compresses = ["lz4","tsz","zlib","zstd","disabled","xz"] + compresses = ["lz4","zlib","zstd","disabled","xz"] + + compressDefaultDict = {}; + compressDefaultDict["BOOL"] = "zstd" + compressDefaultDict["TINYINT"] = "zlib" + compressDefaultDict["SMALLINT"] = "zlib" + compressDefaultDict["INT"] = "lz4" + compressDefaultDict["BIGINT"] = "lz4" + compressDefaultDict["FLOAT"] = "lz4" + compressDefaultDict["DOUBLE"] = "lz4" + compressDefaultDict["VARCHAR"] = "zstd" + compressDefaultDict["TIMESTAMP"] = "lz4" + compressDefaultDict["NCHAR"] = "zstd" + compressDefaultDict["TINYINT UNSIGNED"] = "zlib" + compressDefaultDict["SMALLINT UNSIGNED"] = "zlib" + compressDefaultDict["INT UNSIGNED"] = "lz4" + compressDefaultDict["BIGINT UNSIGNED"] = "lz4" + compressDefaultDict["NCHAR"] = "zstd" + compressDefaultDict["BLOB"] = "lz4" + compressDefaultDict["VARBINARY"] = "zstd" # level levels = ["high","medium","low"] @@ -137,15 +156,20 @@ class TDTestCase(TBase): defEncodes = [ "delta-i","delta-i","simple8b","simple8b","simple8b","simple8b","simple8b","simple8b", "simple8b","simple8b","delta-d","delta-d","bit-packing", "disabled","disabled","disabled","disabled"] - + count = tdSql.getRows() for i in range(count): node = tdSql.getData(i, 3) if node == "TAG": break # check - tdSql.checkData(i, 4, defEncodes[i]) - tdSql.checkData(i, 5, self.defCompress) + tdLog.info(f"check default encode {tdSql.getData(i, 1)}") + #tdLog.info(f"check default encode compressDefaultDict[tdSql.getData(i, 2)]") + defaultValue = self.compressDefaultDict[tdSql.getData(i, 1)] + if defaultValue == None: + defaultValue = self.defCompress + tdLog.info(f"check default compress {tdSql.getData(i, 1)} {defaultValue}") + tdSql.checkData(i, 5, defaultValue) tdSql.checkData(i, 6, self.defLevel) # geometry encode is disabled @@ -185,10 +209,6 @@ class TDTestCase(TBase): comps.append(self.compresses[0]) # add lz4 for comp in comps: for i in range(self.colCnt - 1): - col = f"c{i}" - sql = f"alter table {tbname} modify column {col} COMPRESS '{comp}';" - tdSql.execute(sql, show=True) - self.checkDataDesc(tbname, i + 1, 5, comp) self.writeData(1000) # alter float(c9) double(c10) to tsz @@ -326,6 +346,7 @@ class TDTestCase(TBase): while offset < count: sql = f"select * from {tbname} limit {step} offset {offset}" + tdLog.info(sql) tdSql.query(sql) self.autoGen.dataCorrect(tdSql.res, tdSql.getRows(), step) offset += step diff --git a/tests/army/storage/s3/s3Basic.py b/tests/army/storage/s3/s3Basic.py index 273a6129e1..cefd4ef60d 100644 --- a/tests/army/storage/s3/s3Basic.py +++ b/tests/army/storage/s3/s3Basic.py @@ -47,7 +47,7 @@ for test: class TDTestCase(TBase): - index = eutil.cpuRand(20) + 1 + index = eutil.cpuRand(40) + 1 bucketName = f"ci-bucket{index}" updatecfgDict = { "supportVnodes":"1000", @@ -63,6 +63,10 @@ class TDTestCase(TBase): tdLog.info(f"assign bucketName is {bucketName}\n") maxFileSize = (128 + 10) * 1014 * 1024 # add 10M buffer + def exit(self, log): + self.dropDb(True) + tdLog.exit(log) + def insertData(self): tdLog.info(f"insert data.") # taosBenchmark run @@ -107,8 +111,8 @@ class TDTestCase(TBase): loop = 0 rets = [] overCnt = 0 - while loop < 200: - time.sleep(3) + while loop < 150: + time.sleep(2) # check upload to s3 rets = eos.runRetList(cmd) @@ -134,7 +138,7 @@ class TDTestCase(TBase): # check can pass if overCnt > 0: - tdLog.exit(f"s3 have {overCnt} files over size.") + self.exit(f"s3 have {overCnt} files over size.") def doAction(self): @@ -159,7 +163,7 @@ class TDTestCase(TBase): return True time.sleep(1) - tdLog.exit(f"stream count is not expect . expect = 100000 or 100001 real={count} . sql={sql}") + self.exit(f"stream count is not expect . expect = 100000 or 100001 real={count} . sql={sql}") def checkCreateDb(self, keepLocal, chunkSize, compact): diff --git a/tests/ci/Dockerfile b/tests/ci/Dockerfile index d3d574b484..1caa6fea9e 100644 --- a/tests/ci/Dockerfile +++ b/tests/ci/Dockerfile @@ -7,7 +7,7 @@ RUN apt-get install -y locales psmisc sudo tree libgeos-dev libgflags2.2 libgfl RUN sed -i 's/# en_US.UTF-8/en_US.UTF-8/' /etc/locale.gen && locale-gen RUN pip3 config set global.index-url http://admin:123456@192.168.0.212:3141/admin/dev/+simple/ RUN pip3 config set global.trusted-host 192.168.0.212 -RUN pip3 install taospy==2.7.16 taos-ws-py==0.3.3 pandas psutil fabric2 requests faker simplejson toml pexpect tzlocal distro decorator loguru hyperloglog +RUN pip3 install taospy==2.7.16 taos-ws-py==0.3.5 pandas psutil fabric2 requests faker simplejson toml pexpect tzlocal distro decorator loguru hyperloglog ENV LANG=en_US.UTF-8 LANGUAGE=en_US.UTF-8 LC_ALL=en_US.UTF-8 RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E298A3A825C0D65DFD57CBB651716619E084DAB9 RUN add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu focal-cran40/' diff --git a/tests/docs-examples-test/python.sh b/tests/docs-examples-test/python.sh index 6a25683b58..3a9812637c 100644 --- a/tests/docs-examples-test/python.sh +++ b/tests/docs-examples-test/python.sh @@ -130,7 +130,7 @@ pip3 install kafka-python python3 kafka_example_consumer.py # 21 -pip3 install taos-ws-py==0.3.3 +pip3 install taos-ws-py==0.3.5 python3 conn_websocket_pandas.py # 22 diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index d52a32adc7..b7298d359b 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -232,6 +232,14 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/agg_group_NotReturnValue.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/td-32548.py +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stddev_test.py +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stddev_test.py -Q 2 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stddev_test.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stddev_test.py -Q 4 +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/checkpoint_info.py -N 4 +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/checkpoint_info2.py -N 4 +,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/test_multi_insert.py + ,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreDnode.py -N 5 -M 3 -i False ,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreVnode.py -N 5 -M 3 -i False ,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreMnode.py -N 5 -M 3 -i False @@ -353,6 +361,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege_all.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/fsync.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/multilevel.py +,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/multilevel_createdb.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/ttl.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/ttlChangeOnWrite.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/compress_tsz1.py @@ -1294,6 +1303,7 @@ #,,y,script,./test.sh -f tsim/mnode/basic3.sim ,,y,script,./test.sh -f tsim/mnode/basic4.sim ,,y,script,./test.sh -f tsim/mnode/basic5.sim +,,y,script,./test.sh -f tsim/mnode/basic6.sim ,,y,script,./test.sh -f tsim/show/basic.sim ,,y,script,./test.sh -f tsim/table/autocreate.sim ,,y,script,./test.sh -f tsim/table/basic1.sim diff --git a/tests/parallel_test/run_case.sh b/tests/parallel_test/run_case.sh index 5b0d34fc0a..a78d0aa4a4 100755 --- a/tests/parallel_test/run_case.sh +++ b/tests/parallel_test/run_case.sh @@ -76,9 +76,9 @@ ulimit -c unlimited md5sum /usr/lib/libtaos.so.1 md5sum /home/TDinternal/debug/build/lib/libtaos.so -#get python connector and update: taospy 2.7.16 taos-ws-py 0.3.3 +#get python connector and update: taospy 2.7.16 taos-ws-py 0.3.5 pip3 install taospy==2.7.16 -pip3 install taos-ws-py==0.3.3 +pip3 install taos-ws-py==0.3.5 $TIMEOUT_CMD $cmd RET=$? echo "cmd exit code: $RET" diff --git a/tests/pytest/auto_crash_gen.py b/tests/pytest/auto_crash_gen.py index 4e4679db6a..f6b31b4691 100755 --- a/tests/pytest/auto_crash_gen.py +++ b/tests/pytest/auto_crash_gen.py @@ -16,7 +16,18 @@ msg_dict = {0: "success", 1: "failed", 2: "other errors", 3: "crash occured", 4: # formal hostname = socket.gethostname() -group_url = 'https://open.feishu.cn/open-apis/bot/v2/hook/56c333b5-eae9-4c18-b0b6-7e4b7174f5c9' +group_url_test = ( + 'https://open.feishu.cn/open-apis/bot/v2/hook/7e409a8e-4390-4043-80d0-4e0dd2cbae7d' +) + +notification_robot_url = ( + "https://open.feishu.cn/open-apis/bot/v2/hook/56c333b5-eae9-4c18-b0b6-7e4b7174f5c9" +) + +alert_robot_url = ( + "https://open.feishu.cn/open-apis/bot/v2/hook/02363732-91f1-49c4-879c-4e98cf31a5f3" +) + def get_msg(text): return { @@ -37,12 +48,12 @@ def get_msg(text): } -def send_msg(json): +def send_msg(url:str,json:dict): headers = { 'Content-Type': 'application/json' } - req = requests.post(url=group_url, headers=headers, json=json) + req = requests.post(url=url, headers=headers, json=json) inf = req.json() if "StatusCode" in inf and inf["StatusCode"] == 0: pass @@ -355,18 +366,27 @@ def main(): core_dir = "none" text = f''' - exit status: {msg_dict[status]} - test scope: crash_gen - owner: pxiao - hostname: {hostname} - start time: {starttime} - end time: {endtime} - git commit : {git_commit} - log dir: {log_dir} - core dir: {core_dir} - cmd: {cmd}''' +Result: {msg_dict[status]} - send_msg(get_msg(text)) +Details +Owner: Jayden Jia +Start time: {starttime} +End time: {endtime} +Hostname: {hostname} +Commit: {git_commit} +Cmd: {cmd} +Log dir: {log_dir} +Core dir: {core_dir} +''' + text_result=text.split("Result: ")[1].split("Details")[0].strip() + print(text_result) + + if text_result == "success": + send_msg(notification_robot_url, get_msg(text)) + else: + send_msg(alert_robot_url, get_msg(text)) + + #send_msg(get_msg(text)) except Exception as e: print("exception:", e) exit(status) diff --git a/tests/pytest/auto_crash_gen_valgrind.py b/tests/pytest/auto_crash_gen_valgrind.py index 1e0de6ace1..b346aca308 100755 --- a/tests/pytest/auto_crash_gen_valgrind.py +++ b/tests/pytest/auto_crash_gen_valgrind.py @@ -19,7 +19,18 @@ msg_dict = {0: "success", 1: "failed", 2: "other errors", 3: "crash occured", 4: # formal hostname = socket.gethostname() -group_url = 'https://open.feishu.cn/open-apis/bot/v2/hook/56c333b5-eae9-4c18-b0b6-7e4b7174f5c9' +group_url_test = ( + 'https://open.feishu.cn/open-apis/bot/v2/hook/7e409a8e-4390-4043-80d0-4e0dd2cbae7d' +) + +notification_robot_url = ( + "https://open.feishu.cn/open-apis/bot/v2/hook/56c333b5-eae9-4c18-b0b6-7e4b7174f5c9" +) + +alert_robot_url = ( + "https://open.feishu.cn/open-apis/bot/v2/hook/02363732-91f1-49c4-879c-4e98cf31a5f3" +) + def get_msg(text): return { @@ -40,13 +51,12 @@ def get_msg(text): } -def send_msg(json): +def send_msg(url:str,json:dict): headers = { 'Content-Type': 'application/json' } - - req = requests.post(url=group_url, headers=headers, json=json) + req = requests.post(url=url, headers=headers, json=json) inf = req.json() if "StatusCode" in inf and inf["StatusCode"] == 0: pass @@ -389,18 +399,28 @@ def main(): core_dir = "none" text = f''' - exit status: {msg_dict[status]} - test scope: crash_gen - owner: pxiao - hostname: {hostname} - start time: {starttime} - end time: {endtime} - git commit : {git_commit} - log dir: {log_dir} - core dir: {core_dir} - cmd: {cmd}''' +Result: {msg_dict[status]} - send_msg(get_msg(text)) +Details +Owner: Jayden Jia +Start time: {starttime} +End time: {endtime} +Hostname: {hostname} +Commit: {git_commit} +Cmd: {cmd} +Log dir: {log_dir} +Core dir: {core_dir} +''' + + text_result=text.split("Result: ")[1].split("Details")[0].strip() + print(text_result) + + if text_result == "success": + send_msg(notification_robot_url, get_msg(text)) + else: + send_msg(alert_robot_url, get_msg(text)) + + #send_msg(get_msg(text)) except Exception as e: print("exception:", e) exit(status) diff --git a/tests/pytest/auto_crash_gen_valgrind_cluster.py b/tests/pytest/auto_crash_gen_valgrind_cluster.py index 22f453e51e..522ad48640 100755 --- a/tests/pytest/auto_crash_gen_valgrind_cluster.py +++ b/tests/pytest/auto_crash_gen_valgrind_cluster.py @@ -16,7 +16,18 @@ msg_dict = {0: "success", 1: "failed", 2: "other errors", 3: "crash occured", 4: # formal hostname = socket.gethostname() -group_url = 'https://open.feishu.cn/open-apis/bot/v2/hook/56c333b5-eae9-4c18-b0b6-7e4b7174f5c9' +group_url_test = ( + 'https://open.feishu.cn/open-apis/bot/v2/hook/7e409a8e-4390-4043-80d0-4e0dd2cbae7d' +) + +notification_robot_url = ( + "https://open.feishu.cn/open-apis/bot/v2/hook/56c333b5-eae9-4c18-b0b6-7e4b7174f5c9" +) + +alert_robot_url = ( + "https://open.feishu.cn/open-apis/bot/v2/hook/02363732-91f1-49c4-879c-4e98cf31a5f3" +) + def get_msg(text): return { @@ -37,12 +48,12 @@ def get_msg(text): } -def send_msg(json): +def send_msg(url:str,json:dict): headers = { 'Content-Type': 'application/json' } - req = requests.post(url=group_url, headers=headers, json=json) + req = requests.post(url=url, headers=headers, json=json) inf = req.json() if "StatusCode" in inf and inf["StatusCode"] == 0: pass @@ -376,18 +387,28 @@ def main(): core_dir = "none" text = f''' - exit status: {msg_dict[status]} - test scope: crash_gen - owner: pxiao - hostname: {hostname} - start time: {starttime} - end time: {endtime} - git commit : {git_commit} - log dir: {log_dir} - core dir: {core_dir} - cmd: {cmd}''' - - send_msg(get_msg(text)) +Result: {msg_dict[status]} + +Details +Owner: Jayden Jia +Start time: {starttime} +End time: {endtime} +Hostname: {hostname} +Commit: {git_commit} +Cmd: {cmd} +Log dir: {log_dir} +Core dir: {core_dir} +''' + + text_result=text.split("Result: ")[1].split("Details")[0].strip() + print(text_result) + + if text_result == "success": + send_msg(notification_robot_url, get_msg(text)) + else: + send_msg(alert_robot_url, get_msg(text)) + + #send_msg(get_msg(text)) except Exception as e: print("exception:", e) exit(status) diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index 1d3333264a..46b7e1f795 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -843,9 +843,10 @@ class TDSql: tdSql.query("select * from information_schema.ins_vnodes") #result: dnode_id|vgroup_id|db_name|status|role_time|start_time|restored| + results = list(tdSql.queryResult) for vnode_group_id in db_vgroups_list: - print(tdSql.queryResult) - for result in tdSql.queryResult: + for result in results: + print(f'result[2] is {result[2]}, db_name is {db_name}, result[1] is {result[1]}, vnode_group_id is {vnode_group_id}') if result[2] == db_name and result[1] == vnode_group_id: tdLog.debug(f"dbname: {db_name}, vgroup :{vnode_group_id}, dnode is {result[0]}") print(useful_trans_dnodes_list) diff --git a/tests/requirements.txt b/tests/requirements.txt index c6dd044c86..a036c2b3d0 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -9,4 +9,5 @@ requests pexpect faker pyopenssl -hyperloglog \ No newline at end of file +hyperloglog +tzlocal \ No newline at end of file diff --git a/tests/script/api/stmt2-geometry-test.c b/tests/script/api/stmt2-geometry-test.c new file mode 100644 index 0000000000..46fd9081ae --- /dev/null +++ b/tests/script/api/stmt2-geometry-test.c @@ -0,0 +1,107 @@ +#include +#include +#include +#include "taos.h" + +int8_t byteArray[21] = {0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x59, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x59, 0x40}; +int8_t worngArray[21] = {0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x59, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x59, 0x40}; + +void do_query(TAOS* taos, const char* sql) { + printf("[sql]%s\n", sql); + TAOS_RES* result = taos_query(taos, sql); + int code = taos_errno(result); + if (code) { + printf(" failed to query: %s, reason:%s\n", sql, taos_errstr(result)); + taos_free_result(result); + return; + } + taos_free_result(result); +} + +void execute_test(TAOS* taos, const char* tbname1, const char* tbname2, int8_t* tag2, int8_t* col2, + const char* case_desc, int size) { + // prepare stmt + TAOS_STMT2_OPTION option = {0, true, false, NULL, NULL}; + TAOS_STMT2* stmt = taos_stmt2_init(taos, &option); + const char* sql; + if (tbname1 == "tb41") { + sql = "insert into db.? using db.stb2 tags(?, ?) values(?,?)"; + } else { + sql = "insert into db.? using db.stb tags(?, ?) values(?,?)"; + } + int code = taos_stmt2_prepare(stmt, sql, 0); + printf("\n%s\n insert into db.? using db.stb tags(?, ?) values(?,?)\n", case_desc); + if (code != 0) { + printf(" failed to execute taos_stmt2_prepare. error:%s\n", taos_stmt2_error(stmt)); + taos_stmt2_close(stmt); + return; + } + + // prepare data + int t1_val = 0; + int64_t ts = 1591060628000; + const char* tbname[2] = {tbname1, tbname2}; + int32_t length[5] = {sizeof(int), 2, sizeof(int64_t), size, 20, sizeof(col2)}; + + TAOS_STMT2_BIND tags[2][2] = { + {{TSDB_DATA_TYPE_INT, &t1_val, &length[0], NULL, 2}, {TSDB_DATA_TYPE_GEOMETRY, tag2, &length[3], NULL, 2}}, + {{TSDB_DATA_TYPE_INT, &t1_val, &length[0], NULL, 2}, {TSDB_DATA_TYPE_GEOMETRY, tag2, &length[3], NULL, 2}}}; + TAOS_STMT2_BIND params[2][2] = { + {{TSDB_DATA_TYPE_TIMESTAMP, &ts, &length[2], NULL, 1}, {TSDB_DATA_TYPE_GEOMETRY, col2, &length[3], NULL, 1}}, + {{TSDB_DATA_TYPE_TIMESTAMP, &ts, &length[2], NULL, 1}, {TSDB_DATA_TYPE_GEOMETRY, col2, &length[3], NULL, 1}}}; + TAOS_STMT2_BIND* tagv[2] = {&tags[0][0], &tags[1][0]}; + TAOS_STMT2_BIND* paramv[2] = {¶ms[0][0], ¶ms[1][0]}; + + TAOS_STMT2_BINDV bindv = {2, &tbname[0], &tagv[0], ¶mv[0]}; + code = taos_stmt2_bind_param(stmt, &bindv, -1); + if (code != 0) { + printf(" failed to bind param. error:%s\n", taos_stmt2_error(stmt)); + taos_stmt2_close(stmt); + return; + } + + if (taos_stmt2_exec(stmt, NULL)) { + printf(" failed to execute insert statement.error:%s\n", taos_stmt2_error(stmt)); + taos_stmt2_close(stmt); + return; + } + printf("[ok]\n"); + + taos_stmt2_close(stmt); +} + +void test1(TAOS* taos) { execute_test(taos, "tb11", "tb12", &byteArray[0], &byteArray[0], "[normal]case 1", 21); } + +void test2(TAOS* taos) { + execute_test(taos, "tb21", "tb22", &worngArray[0], &byteArray[0], "[wrong WKB tag]case 2", 21); +} + +void test3(TAOS* taos) { + execute_test(taos, "tb31", "tb32", "POLYGON((0 0, 4 0, 4 4, 0 4, 0 0))", "POLYGON((0 0, 4 0, 4 4, 0 4, 0 0))", + "[wrong WKT col]case 3", sizeof("POLYGON((0 0, 4 0, 4 4, 0 4, 0 0))")); +} + +void test4(TAOS* taos) { execute_test(taos, "tb41", "tb42", &byteArray[0], &byteArray[0], "[wrong size]case 4", 21); } + +int main() { + TAOS* taos = taos_connect("localhost", "root", "taosdata", "", 0); + if (!taos) { + printf("failed to connect to db, reason:%s\n", taos_errstr(taos)); + exit(1); + } + // init test db & stb table + do_query(taos, "drop database if exists db"); + do_query(taos, "create database db"); + do_query(taos, "create table db.stb (ts timestamp, b geometry(100)) tags(t1 int, t2 geometry(100))"); + do_query(taos, "create table db.stb2 (ts timestamp, b geometry(100)) tags(t1 int, t2 geometry(10))"); + + test1(taos); + test2(taos); + test3(taos); + test4(taos); + + taos_close(taos); + taos_cleanup(); +} diff --git a/tests/script/api/stmt2-nchar.c b/tests/script/api/stmt2-nchar.c new file mode 100644 index 0000000000..3952bd5898 --- /dev/null +++ b/tests/script/api/stmt2-nchar.c @@ -0,0 +1,273 @@ +// sample code to verify all TDengine API +// to compile: gcc -o apitest apitest.c -ltaos + +#include +#include +#include +#include +#include "taos.h" +static int64_t count = 10000; + +int64_t genReqid() { + count += 100; + return count; +} + +void stmtAsyncQueryCb(void* param, TAOS_RES* pRes, int code) { + int affected_rows = taos_affected_rows(pRes); + return; + /* + SSP_CB_PARAM* qParam = (SSP_CB_PARAM*)param; + if (code == 0 && pRes) { + if (qParam->fetch) { + taos_fetch_rows_a(pRes, sqAsyncFetchCb, param); + } else { + if (qParam->free) { + taos_free_result(pRes); + } + *qParam->end = 1; + } + } else { + sqError("select", taos_errstr(pRes)); + *qParam->end = 1; + taos_free_result(pRes); + } + */ +} + +void veriry_stmt(TAOS* taos) { + TAOS_RES* result = taos_query(taos, "drop database if exists test;"); + taos_free_result(result); + usleep(100000); + result = taos_query(taos, "create database test;"); + + int code = taos_errno(result); + if (code != 0) { + printf("\033[31mfailed to create database, reason:%s\033[0m\n", taos_errstr(result)); + taos_free_result(result); + return; + } + taos_free_result(result); + + usleep(100000); + taos_select_db(taos, "test"); + + // create table + /* + const char* sql = + "create table m1 (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin " + "binary(40), blob nchar(10))"; + */ + const char* sql = + "create table m1 (ts timestamp, blob2 nchar(10), blob nchar(10),blob3 nchar(10),blob4 nchar(10),blob5 " + "nchar(10))"; + result = taos_query(taos, sql); + code = taos_errno(result); + if (code != 0) { + printf("\033[31mfailed to create table, reason:%s\033[0m\n", taos_errstr(result)); + taos_free_result(result); + return; + } + taos_free_result(result); + + // insert 10 records + struct { + int64_t ts[10]; + char blob[10][1]; + char blob2[10][1]; + char blob3[10][1]; + char blob4[10][1]; + char blob5[10][1]; + + } v; + + int32_t* t64_len = malloc(sizeof(int32_t) * 10); + int32_t* blob_len = malloc(sizeof(int32_t) * 10); + int32_t* blob_len2 = malloc(sizeof(int32_t) * 10); + int32_t* blob_len3 = malloc(sizeof(int32_t) * 10); + int32_t* blob_len4 = malloc(sizeof(int32_t) * 10); + int32_t* blob_len5 = malloc(sizeof(int32_t) * 10); + +#include "time.h" + clock_t start, end; + TAOS_STMT2_OPTION option = {0, true, true, stmtAsyncQueryCb, NULL}; + + start = clock(); + TAOS_STMT2* stmt = taos_stmt2_init(taos, &option); + end = clock(); + printf("init time:%f\n", (double)(end - start) / CLOCKS_PER_SEC); + // TAOS_MULTI_BIND params[10]; + TAOS_STMT2_BIND params[10]; + char is_null[10] = {0}; + + params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + // params[0].buffer_length = sizeof(v.ts[0]); + params[0].buffer = v.ts; + params[0].length = t64_len; + params[0].is_null = is_null; + params[0].num = 10; + + params[1].buffer_type = TSDB_DATA_TYPE_NCHAR; + // params[8].buffer_length = sizeof(v.blob2[0]); + params[1].buffer = v.blob2; + params[1].length = blob_len2; + params[1].is_null = is_null; + params[1].num = 10; + + params[2].buffer_type = TSDB_DATA_TYPE_NCHAR; + // params[9].buffer_length = sizeof(v.blob[0]); + params[2].buffer = v.blob3; + params[2].length = blob_len; + params[2].is_null = is_null; + params[2].num = 10; + + params[3].buffer_type = TSDB_DATA_TYPE_NCHAR; + // params[9].buffer_length = sizeof(v.blob[0]); + params[3].buffer = v.blob4; + params[3].length = blob_len; + params[3].is_null = is_null; + params[3].num = 10; + + params[4].buffer_type = TSDB_DATA_TYPE_NCHAR; + // params[9].buffer_length = sizeof(v.blob[0]); + params[4].buffer = v.blob; + params[4].length = blob_len; + params[4].is_null = is_null; + params[4].num = 10; + + params[5].buffer_type = TSDB_DATA_TYPE_NCHAR; + // params[9].buffer_length = sizeof(v.blob[0]); + params[5].buffer = v.blob5; + params[5].length = blob_len; + params[5].is_null = is_null; + params[5].num = 10; + + sql = "insert into ? (ts, blob2, blob, blob3, blob4, blob5) values(?,?,?,?,?,?)"; + start = clock(); + code = taos_stmt2_prepare(stmt, sql, 0); + end = clock(); + printf("prepare time:%f\n", (double)(end - start) / CLOCKS_PER_SEC); + if (code != 0) { + printf("\033[31mfailed to execute taos_stmt_prepare. error:%s\033[0m\n", taos_stmt_errstr(stmt)); + taos_stmt_close(stmt); + return; + } + /* + code = taos_stmt_set_tbname(stmt, "m1"); + if (code != 0) { + printf("\033[31mfailed to execute taos_stmt_prepare. error:%s\033[0m\n", taos_stmt_errstr(stmt)); + taos_stmt_close(stmt); + return; + } + */ + + int64_t ts = 1591060628000; + for (int i = 0; i < 10; ++i) { + is_null[i] = 0; + + v.ts[i] = ts++; + + v.blob[i][0] = 'a' + i; + v.blob2[i][0] = 'f' + i; + v.blob3[i][0] = 't' + i; + v.blob4[i][0] = 'A' + i; + v.blob5[i][0] = 'G' + i; + + // v.blob2[i] = malloc(strlen("一二三四五六七十九八")); + // v.blob[i] = malloc(strlen("十九八七六五四三二一")); + + // strcpy(v.blob2[i], "一二三四五六七十九八"); + // strcpy(v.blob[i], "十九八七六五四三二一"); + + blob_len[i] = sizeof(char); + blob_len2[i] = sizeof(char); + blob_len3[i] = sizeof(char); + blob_len4[i] = sizeof(char); + blob_len5[i] = sizeof(char); + } + + char* tbname = "m1"; + TAOS_STMT2_BIND* bind_cols[1] = {¶ms[0]}; + TAOS_STMT2_BINDV bindv = {1, &tbname, NULL, &bind_cols[0]}; + start = clock(); + // taos_stmt2_bind_param(stmt, "m1", NULL, params, -1); + taos_stmt2_bind_param(stmt, &bindv, -1); + end = clock(); + printf("bind time:%f\n", (double)(end - start) / CLOCKS_PER_SEC); + // taos_stmt_bind_param_batch(stmt, params); + // taos_stmt_add_batch(stmt); + /* + int param_count = -1; + code = taos_stmt2_param_count(stmt, ¶m_count); + if (code != 0) { + printf("\033[31mfailed to execute taos_stmt_param_count. error:%s\033[0m\n", taos_stmt_errstr(stmt)); + taos_stmt_close(stmt); + return; + } + printf("param_count: %d\n", param_count); + */ + TAOS_FIELD_E* fields = NULL; + int field_count = -1; + start = clock(); + code = taos_stmt2_get_fields(stmt, TAOS_FIELD_COL, &field_count, NULL); + end = clock(); + printf("get fields time:%f\n", (double)(end - start) / CLOCKS_PER_SEC); + if (code != 0) { + printf("\033[31mfailed to execute taos_stmt_param_count. error:%s\033[0m\n", taos_stmt_errstr(stmt)); + taos_stmt_close(stmt); + return; + } + printf("col field_count: %d\n", field_count); + start = clock(); + taos_stmt2_free_fields(stmt, fields); + end = clock(); + printf("free time:%f\n", (double)(end - start) / CLOCKS_PER_SEC); + /* + code = taos_stmt2_get_fields(stmt, TAOS_FIELD_TAG, &field_count, &fields); + if (code != 0) { + printf("\033[31mfailed to execute taos_stmt_param_count. error:%s\033[0m\n", taos_stmt_errstr(stmt)); + taos_stmt_close(stmt); + return; + } + printf("tag field_count: %d\n", field_count); + taos_stmt2_free_fields(stmt, fields); + */ + // if (taos_stmt_execute(stmt) != 0) { + start = clock(); + // if (taos_stmt2_exec(stmt, NULL, stmtAsyncQueryCb, NULL) != 0) { + if (taos_stmt2_exec(stmt, NULL) != 0) { + printf("\033[31mfailed to execute insert statement.error:%s\033[0m\n", taos_stmt_errstr(stmt)); + taos_stmt2_close(stmt); + return; + } + end = clock(); + printf("exec time:%f\n", (double)(end - start) / CLOCKS_PER_SEC); + + taos_stmt2_close(stmt); + + free(blob_len); + free(blob_len2); + free(blob_len5); + free(blob_len3); + free(blob_len4); +} + +int main(int argc, char* argv[]) { + const char* host = "127.0.0.1"; + const char* user = "root"; + const char* passwd = "taosdata"; + + taos_options(TSDB_OPTION_TIMEZONE, "GMT-8"); + TAOS* taos = taos_connect(host, user, passwd, "", 0); + if (taos == NULL) { + printf("\033[31mfailed to connect to db, reason:%s\033[0m\n", taos_errstr(taos)); + exit(1); + } + + printf("********* verify stmt query **********\n"); + veriry_stmt(taos); + + printf("done\n"); + taos_close(taos); + taos_cleanup(); +} diff --git a/tests/script/api/stmt2.c b/tests/script/api/stmt2.c index 82537905dc..5b9f41baff 100644 --- a/tests/script/api/stmt2.c +++ b/tests/script/api/stmt2.c @@ -81,8 +81,8 @@ void veriry_stmt(TAOS* taos) { float f4[10]; double f8[10]; char bin[10][40]; - char blob[10][80]; - char blob2[10][80]; + char blob[10][1]; + char blob2[10][1]; } v; int32_t* t8_len = malloc(sizeof(int32_t) * 10); @@ -218,8 +218,14 @@ void veriry_stmt(TAOS* taos) { for (int j = 0; j < sizeof(v.bin[0]); ++j) { v.bin[i][j] = (char)(i + '0'); } - strcpy(v.blob2[i], "一二三四五六七十九八"); - strcpy(v.blob[i], "一二三四五六七八九十"); + v.blob[i][0] = 'a' + i; + v.blob2[i][0] = 'A' + i; + + // v.blob2[i] = malloc(strlen("一二三四五六七十九八")); + // v.blob[i] = malloc(strlen("十九八七六五四三二一")); + + // strcpy(v.blob2[i], "一二三四五六七十九八"); + // strcpy(v.blob[i], "十九八七六五四三二一"); t8_len[i] = sizeof(int8_t); t16_len[i] = sizeof(int16_t); @@ -228,10 +234,9 @@ void veriry_stmt(TAOS* taos) { float_len[i] = sizeof(float); double_len[i] = sizeof(double); bin_len[i] = sizeof(v.bin[0]); - blob_len[i] = (int32_t)strlen(v.blob[i]); - blob_len2[i] = (int32_t)strlen(v.blob2[i]); + blob_len[i] = sizeof(char); + blob_len2[i] = sizeof(char); } - char* tbname = "m1"; TAOS_STMT2_BIND* bind_cols[1] = {¶ms[0]}; TAOS_STMT2_BINDV bindv = {1, &tbname, NULL, &bind_cols[0]}; diff --git a/tests/script/tsim/compress/compress2.sim b/tests/script/tsim/compress/compress2.sim index 0af6f87de4..179317dfbb 100644 --- a/tests/script/tsim/compress/compress2.sim +++ b/tests/script/tsim/compress/compress2.sim @@ -38,7 +38,7 @@ sql alter table $tb modify column b level 'm' sql_error alter table $tb modify column b level 'l' # already exist -sql_error alter table $tb modify column b compress 'lz4' +sql alter table $tb modify column b compress 'lz4' sql alter table $tb modify column b compress 'xz' sql alter table $tb modify column b compress 'zstd' sql_error alter table $tb modify column b compress 'tsz' @@ -147,7 +147,7 @@ sql alter table $stb modify column b level 'm' sql_error alter table $stb modify column b level 'l' # already exist sql desc $stb -sql_error alter table $stb modify column b compress 'lz4' +sql alter table $stb modify column b compress 'lz4' sql alter table $stb modify column b compress 'xz' sql alter table $stb modify column b compress 'zstd' sql_error alter table $stb modify column b compress 'tsz' diff --git a/tests/script/tsim/db/basic1.sim b/tests/script/tsim/db/basic1.sim index 8eb6dce759..f3239957d3 100644 --- a/tests/script/tsim/db/basic1.sim +++ b/tests/script/tsim/db/basic1.sim @@ -53,6 +53,8 @@ if $rows != 5 then return -1 endi +sleep 500 + print =============== show vgroups2 sql show d2.vgroups if $rows != 2 then @@ -126,13 +128,14 @@ if $data12 != d2 then endi if $data13 != leader then + print expect leader , actual $13 return -1 endi -print $data14 -print $data15 +print $data14 , $data15 if $data16 != 1 then + print expect 1, acutal $data16 return -1 endi diff --git a/tests/script/tsim/mnode/basic6.sim b/tests/script/tsim/mnode/basic6.sim new file mode 100644 index 0000000000..4ee56ff555 --- /dev/null +++ b/tests/script/tsim/mnode/basic6.sim @@ -0,0 +1,413 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 +system sh/deploy.sh -n dnode4 -i 4 +system sh/cfg.sh -n dnode1 -c compressMsgSize -v 0 +system sh/cfg.sh -n dnode2 -c compressMsgSize -v 0 +system sh/cfg.sh -n dnode3 -c compressMsgSize -v 0 +system sh/cfg.sh -n dnode4 -c compressMsgSize -v 0 +system sh/exec.sh -n dnode1 -s start +sql connect + +print =============== step1: create dnodes +sql create dnode $hostname port 7200 +sql create dnode $hostname port 7300 +sql create dnode $hostname port 7400 + +$x = 0 +step1: + $x = $x + 1 + sleep 1000 + if $x == 5 then + return -1 + endi +sql select * from information_schema.ins_dnodes +if $data(1)[4] != ready then + goto step1 +endi + +print =============== step2: create dnodes - with error +sql_error create mnode on dnode 1; +sql_error create mnode on dnode 2; +sql_error create mnode on dnode 3; +sql_error create mnode on dnode 4; +sql_error create mnode on dnode 5; +sql_error create mnode on dnode 6; + +print =============== step3: create mnode 2 and 3 +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +system sh/exec.sh -n dnode4 -s start +$x = 0 +step3: + $x = $x + 1 + sleep 1000 + if $x == 5 then + return -1 + endi +sql select * from information_schema.ins_dnodes +if $data(2)[4] != ready then + goto step3 +endi +if $data(3)[4] != ready then + goto step3 +endi +if $data(4)[4] != ready then + goto step3 +endi + +sql create mnode on dnode 2 +sql create mnode on dnode 3 + +$x = 0 +step31: + $x = $x + 1 + sleep 1000 + if $x == 50 then + return -1 + endi +sql select * from information_schema.ins_mnodes +$leaderNum = 0 +if $data(1)[2] == leader then + $leaderNum = 1 +endi +if $data(2)[2] == leader then + $leaderNum = 1 +endi +if $data(3)[2] == leader then + $leaderNum = 1 +endi +if $leaderNum == 0 then + goto step31 +endi + +print =============== step4: create dnodes - with error +sql_error create mnode on dnode 1 +sql_error create mnode on dnode 2; +sql_error create mnode on dnode 3; +sql_error create mnode on dnode 4; +sql_error create mnode on dnode 5; +sql_error create mnode on dnode 6; + +print =============== step5: drop mnodes - with error +sql_error drop mnode on dnode 1 +sql_error drop mnode on dnode 4 +sql_error drop mnode on dnode 5 +sql_error drop mnode on dnode 6 + +system sh/exec.sh -n dnode2 -s stop +$x = 0 +step5: + $x = $x + 1 + sleep 1000 + if $x == 10 then + return -1 + endi +sql select * from information_schema.ins_dnodes +print ===> $data00 $data01 $data02 $data03 $data04 $data05 +print ===> $data10 $data11 $data12 $data13 $data14 $data15 +print ===> $data20 $data21 $data22 $data23 $data24 $data25 +print ===> $data30 $data31 $data32 $data33 $data34 $data35 +if $data(1)[4] != ready then + goto step5 +endi +if $data(2)[4] != offline then + goto step5 +endi +if $data(3)[4] != ready then + goto step5 +endi +if $data(4)[4] != ready then + goto step5 +endi + +sql_error drop mnode on dnode 2 + +system sh/exec.sh -n dnode2 -s start +$x = 0 +step51: + $x = $x + 1 + sleep 1000 + if $x == 10 then + return -1 + endi +sql select * from information_schema.ins_dnodes +print ===> $data00 $data01 $data02 $data03 $data04 $data05 +print ===> $data10 $data11 $data12 $data13 $data14 $data15 +print ===> $data20 $data21 $data22 $data23 $data24 $data25 +print ===> $data30 $data31 $data32 $data33 $data34 $data35 +if $data(1)[4] != ready then + goto step51 +endi +if $data(2)[4] != ready then + goto step51 +endi +if $data(3)[4] != ready then + goto step51 +endi +if $data(4)[4] != ready then + goto step51 +endi + +print =============== step6: stop mnode1 +system sh/exec.sh -n dnode1 -s stop +# sql_error drop mnode on dnode 1 + +$x = 0 +step61: + $x = $x + 1 + sleep 1000 + if $x == 10 then + return -1 + endi +sql select * from information_schema.ins_mnodes -x step61 +print ===> $data00 $data01 $data02 $data03 $data04 $data05 +print ===> $data10 $data11 $data12 $data13 $data14 $data15 +print ===> $data20 $data21 $data22 $data23 $data24 $data25 +$leaderNum = 0 +if $data(2)[2] == leader then + $leaderNum = 1 +endi +if $data(3)[2] == leader then + $leaderNum = 1 +endi +if $leaderNum != 1 then + goto step61 +endi + +print =============== step7: start mnode1 and wait it online +system sh/exec.sh -n dnode1 -s start + +$x = 0 +step71: + $x = $x + 1 + sleep 1000 + if $x == 50 then + return -1 + endi +sql select * from information_schema.ins_dnodes +print ===> $data00 $data01 $data02 $data03 $data04 $data05 +print ===> $data10 $data11 $data12 $data13 $data14 $data15 +print ===> $data20 $data21 $data22 $data23 $data24 $data25 +print ===> $data30 $data31 $data32 $data33 $data34 $data35 +if $data(1)[4] != ready then + goto step71 +endi +if $data(2)[4] != ready then + goto step71 +endi +if $data(3)[4] != ready then + goto step71 +endi +if $data(4)[4] != ready then + goto step71 +endi + +print =============== step8: stop mnode1 and drop it +system sh/exec.sh -n dnode1 -s stop + +$x = 0 +step81: + $x = $x + 1 + sleep 1000 + if $x == 10 then + return -1 + endi +sql select * from information_schema.ins_mnodes +print ===> $data00 $data01 $data02 $data03 $data04 $data05 +print ===> $data10 $data11 $data12 $data13 $data14 $data15 +print ===> $data20 $data21 $data22 $data23 $data24 $data25 +$leaderNum = 0 +if $data(1)[2] == leader then + $leaderNum = 1 +endi +if $data(2)[2] == leader then + $leaderNum = 1 +endi +if $data(3)[2] == leader then + $leaderNum = 1 +endi +if $leaderNum != 1 then + goto step81 +endi + +print =============== step9: start mnode1 and wait it dropped +print check mnode has leader step9a +$x = 0 +step9a: + $x = $x + 1 + sleep 1000 + if $x == 10 then + return -1 + endi +print check mnode leader +sql select * from information_schema.ins_mnodes +print ===> $data00 $data01 $data02 $data03 $data04 $data05 +print ===> $data10 $data11 $data12 $data13 $data14 $data15 +print ===> $data20 $data21 $data22 $data23 $data24 $data25 +$leaderNum = 0 +if $data(1)[2] == leader then + $leaderNum = 1 +endi +if $data(2)[2] == leader then + $leaderNum = 1 +endi +if $data(3)[2] == leader then + $leaderNum = 1 +endi +if $leaderNum != 1 then + goto step9a +endi + +print start dnode1 step9b +system sh/exec.sh -n dnode1 -s start +$x = 0 +step9b: + $x = $x + 1 + sleep 1000 + if $x == 10 then + return -1 + endi +print check dnode1 ready +sql select * from information_schema.ins_dnodes +print ===> $data00 $data01 $data02 $data03 $data04 $data05 +print ===> $data10 $data11 $data12 $data13 $data14 $data15 +print ===> $data20 $data21 $data22 $data23 $data24 $data25 +print ===> $data30 $data31 $data32 $data33 $data34 $data35 +if $data(1)[4] != ready then + goto step9b +endi +if $data(2)[4] != ready then + goto step9b +endi +if $data(3)[4] != ready then + goto step9b +endi +if $data(4)[4] != ready then + goto step9b +endi + +sleep 4000 +print check mnode has leader step9c +$x = 0 +step9c: + $x = $x + 1 + sleep 1000 + if $x == 10 then + return -1 + endi +print check mnode leader +sql select * from information_schema.ins_mnodes +print ===> $data00 $data01 $data02 $data03 $data04 $data05 +print ===> $data10 $data11 $data12 $data13 $data14 $data15 +print ===> $data20 $data21 $data22 $data23 $data24 $data25 +$leaderNum = 0 +if $data(1)[2] == leader then + $leaderNum = 1 +endi +if $data(2)[2] == leader then + $leaderNum = 1 +endi +if $data(3)[2] == leader then + $leaderNum = 1 +endi +if $leaderNum != 1 then + goto step9c +endi + +print drop mnode step9d +sql drop mnode on dnode 1 + +$x = 0 +step9d: + $x = $x + 1 + sleep 1000 + if $x == 20 then + return -1 + endi +print check mnode leader +sql select * from information_schema.ins_mnodes +print ===> $data00 $data01 $data02 $data03 $data04 $data05 +print ===> $data10 $data11 $data12 $data13 $data14 $data15 +print ===> $data20 $data21 $data22 $data23 $data24 $data25 +$leaderNum = 0 +if $data(1)[2] == leader then + $leaderNum = 1 +endi +if $data(2)[2] == leader then + $leaderNum = 1 +endi +if $data(3)[2] == leader then + $leaderNum = 1 +endi +if $leaderNum != 1 then + goto step9d +endi +if $rows != 2 then + goto step9d +endi + +print =============== stepa: create mnode1 again +sql create mnode on dnode 1 +$x = 0 +stepa: + $x = $x + 1 + sleep 1000 + if $x == 10 then + return -1 + endi +sql select * from information_schema.ins_mnodes +print ===> $data00 $data01 $data02 $data03 $data04 $data05 +print ===> $data10 $data11 $data12 $data13 $data14 $data15 +print ===> $data20 $data21 $data22 $data23 $data24 $data25 +$leaderNum = 0 +if $data(1)[2] == leader then + $leaderNum = 1 +endi +if $data(2)[2] == leader then + $leaderNum = 1 +endi +if $data(3)[2] == leader then + $leaderNum = 1 +endi +if $leaderNum == 0 then + goto stepa +endi +if $leaderNum != 1 then + return -1 +endi + +$x = 0 +stepb: + $x = $x + 1 + sleep 1000 + if $x == 10 then + print ====> dnode not ready! + return -1 + endi +sql select * from information_schema.ins_dnodes +print ===> $data00 $data01 $data02 $data03 $data04 $data05 +print ===> $data10 $data11 $data12 $data13 $data14 $data15 +print ===> $data20 $data21 $data22 $data23 $data24 $data25 +print ===> $data30 $data31 $data32 $data33 $data34 $data35 +if $rows != 4 then + return -1 +endi +if $data(1)[4] != ready then + goto stepb +endi +if $data(2)[4] != ready then + goto stepb +endi +if $data(3)[4] != ready then + goto stepb +endi +if $data(4)[4] != ready then + goto stepb +endi + +system sh/exec.sh -n dnode1 -s stop +system sh/exec.sh -n dnode2 -s stop +system sh/exec.sh -n dnode3 -s stop +system sh/exec.sh -n dnode4 -s stop diff --git a/tests/script/tsim/stream/streamFwcIntervalCheckpoint.sim b/tests/script/tsim/stream/streamFwcIntervalCheckpoint.sim new file mode 100644 index 0000000000..ed72d87e9a --- /dev/null +++ b/tests/script/tsim/stream/streamFwcIntervalCheckpoint.sim @@ -0,0 +1,67 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 + +system sh/cfg.sh -n dnode1 -c checkpointInterval -v 60 + +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 4; +sql use test; + +sql create stable st(ts timestamp, a int, b int , c int)tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stream streams1 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt1 as select _wstart, count(a) from st partition by tbname interval(2s); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2 as select _wstart, count(a) from st interval(2s); + +run tsim/stream/checkTaskStatus.sim + +sleep 70000 + + +print restart taosd 01 ...... + +system sh/stop_dnodes.sh + +system sh/exec.sh -n dnode1 -s start + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(now + 3000a,1,1,1); + +$loop_count = 0 +loop0: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print select * from streamt1; +sql select * from streamt1; + +print $data00 $data01 $data02 + +if $rows == 0 then + goto loop0 +endi + +print select * from streamt2; +sql select * from streamt2; + +print $data00 $data01 $data02 + +if $rows == 0 then + goto loop0 +endi + +print end + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/system-test/0-others/information_schema.py b/tests/system-test/0-others/information_schema.py index c1a3942db6..538aa1ad63 100644 --- a/tests/system-test/0-others/information_schema.py +++ b/tests/system-test/0-others/information_schema.py @@ -222,7 +222,7 @@ class TDTestCase: tdSql.query("select * from information_schema.ins_columns where db_name ='information_schema'") tdLog.info(len(tdSql.queryResult)) - tdSql.checkEqual(True, len(tdSql.queryResult) in range(281, 282)) + tdSql.checkEqual(True, len(tdSql.queryResult) in range(282, 283)) tdSql.query("select * from information_schema.ins_columns where db_name ='performance_schema'") tdSql.checkEqual(56, len(tdSql.queryResult)) diff --git a/tests/system-test/0-others/multilevel_createdb.py b/tests/system-test/0-others/multilevel_createdb.py new file mode 100644 index 0000000000..70131a760b --- /dev/null +++ b/tests/system-test/0-others/multilevel_createdb.py @@ -0,0 +1,89 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + + +from util.log import * +from util.cases import * +from util.sql import * +from util.common import * +from util.sqlset import * +import glob + +def scanFiles(pattern): + res = [] + for f in glob.iglob(pattern): + res += [f] + return res + +def checkFiles(pattern, state): + res = scanFiles(pattern) + tdLog.info(res) + num = len(res) + if num: + if state: + tdLog.info("%s: %d files exist. expect: files exist" % (pattern, num)) + else: + tdLog.exit("%s: %d files exist. expect: files not exist." % (pattern, num)) + else: + if state: + tdLog.exit("%s: %d files exist. expect: files exist" % (pattern, num)) + else: + tdLog.info("%s: %d files exist. expect: files not exist." % (pattern, num)) + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.setsql = TDSetSql() + + def basic(self): + tdLog.info("============== basic test ===============") + cfg={ + '/mnt/data1 0 1 0' : 'dataDir', + '/mnt/data2 0 0 0' : 'dataDir', + '/mnt/data3 0 0 0' : 'dataDir', + '/mnt/data4 0 0 0' : 'dataDir' + } + tdSql.createDir('/mnt/data1') + tdSql.createDir('/mnt/data2') + tdSql.createDir('/mnt/data3') + tdSql.createDir('/mnt/data4') + + tdDnodes.stop(1) + tdDnodes.deploy(1,cfg) + tdDnodes.start(1) + + checkFiles(r'/mnt/data1/*/*',1) + checkFiles(r'/mnt/data2/*/*',0) + + tdSql.execute('create database nws vgroups 20 stt_trigger 1 wal_level 1 wal_retention_period 0') + + checkFiles(r'/mnt/data1/vnode/*/wal',5) + checkFiles(r'/mnt/data2/vnode/*/wal',5) + checkFiles(r'/mnt/data3/vnode/*/wal',5) + checkFiles(r'/mnt/data4/vnode/*/wal',5) + + def run(self): + self.basic() + + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/1-insert/test_multi_insert.py b/tests/system-test/1-insert/test_multi_insert.py new file mode 100644 index 0000000000..d1b6d28ffd --- /dev/null +++ b/tests/system-test/1-insert/test_multi_insert.py @@ -0,0 +1,32 @@ +from util.sql import * +from util.common import * +import taos +taos.taos_connect +class TDTestCase: + def init(self, conn, logSql, replicaVar = 1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + self.conn = conn + tdSql.init(conn.cursor(), logSql) + def initdb(self): + tdSql.execute("drop database if exists d0") + tdSql.execute("create database d0") + tdSql.execute("use d0") + tdSql.execute("create stable stb0 (ts timestamp, w_ts timestamp, opc nchar(100), quality int) tags(t0 int)") + tdSql.execute("create table t0 using stb0 tags(1)") + tdSql.execute("create table t1 using stb0 tags(2)") + def multi_insert(self): + for i in range(5): + tdSql.execute(f"insert into t1 values(1721265436000, now() + {i + 1}s, '0', 12) t1(opc, quality, ts) values ('opc2', 192, now()+ {i + 2}s) t1(ts, opc, quality) values(now() + {i + 3}s, 'opc4', 10) t1 values(1721265436000, now() + {i + 4}s, '1', 191) t1(opc, quality, ts) values('opc5', 192, now() + {i + 5}s) t1 values(now(), now() + {i + 6}s, '2', 192)") + tdSql.execute("insert into t0 values(1721265436000,now(),'0',192) t0(quality,w_ts,ts) values(192,now(),1721265326000) t0(quality,w_t\ +s,ts) values(190,now()+1s,1721265326000) t0 values(1721265436000,now()+2s,'1',191) t0(quality,w_ts,ts) values(192,now()+3s,\ +1721265326002) t0(ts,w_ts,opc,quality) values(1721265436003,now()+4s,'3',193) t0 values(now(), now() + 4s , '2', 192)") + def run(self): + self.initdb() + self.multi_insert() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/2-query/group_partition.py b/tests/system-test/2-query/group_partition.py index 7ee528841c..74f5e86267 100644 --- a/tests/system-test/2-query/group_partition.py +++ b/tests/system-test/2-query/group_partition.py @@ -422,21 +422,36 @@ class TDTestCase: def test_TS5567(self): tdSql.query(f"select const_col from (select 1 as const_col from {self.dbname}.{self.stable}) t group by const_col") - tdSql.checkRows(50) + tdSql.checkRows(1) tdSql.query(f"select const_col from (select 1 as const_col from {self.dbname}.{self.stable}) t partition by const_col") tdSql.checkRows(50) tdSql.query(f"select const_col from (select 1 as const_col, count(c1) from {self.dbname}.{self.stable} t group by c1) group by const_col") - tdSql.checkRows(10) + tdSql.checkRows(1) tdSql.query(f"select const_col from (select 1 as const_col, count(c1) from {self.dbname}.{self.stable} t group by c1) partition by const_col") tdSql.checkRows(10) tdSql.query(f"select const_col as c_c from (select 1 as const_col from {self.dbname}.{self.stable}) t group by c_c") - tdSql.checkRows(50) + tdSql.checkRows(1) tdSql.query(f"select const_col as c_c from (select 1 as const_col from {self.dbname}.{self.stable}) t partition by c_c") tdSql.checkRows(50) tdSql.query(f"select const_col from (select 1 as const_col, count(c1) from {self.dbname}.{self.stable} t group by c1) group by 1") - tdSql.checkRows(10) + tdSql.checkRows(1) tdSql.query(f"select const_col from (select 1 as const_col, count(c1) from {self.dbname}.{self.stable} t group by c1) partition by 1") tdSql.checkRows(10) + + def test_TD_32883(self): + sql = "select avg(c1), t9 from db.stb group by t9,t9, tbname" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(5) + sql = "select avg(c1), t10 from db.stb group by t10,t10, tbname" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(5) + sql = "select avg(c1), t10 from db.stb partition by t10,t10, tbname" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(5) + sql = "select avg(c1), concat(t9,t10) from db.stb group by concat(t9,t10), concat(t9,t10),tbname" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(5) + def run(self): tdSql.prepare() self.prepare_db() @@ -470,6 +485,7 @@ class TDTestCase: self.test_event_window(nonempty_tb_num) self.test_TS5567() + self.test_TD_32883() ## test old version before changed # self.test_groupby('group', 0, 0) diff --git a/tests/system-test/2-query/partition_by_col.py b/tests/system-test/2-query/partition_by_col.py index ef88e88cbd..da7fe78124 100644 --- a/tests/system-test/2-query/partition_by_col.py +++ b/tests/system-test/2-query/partition_by_col.py @@ -313,7 +313,21 @@ class TDTestCase: order_by_list = 'ts,c1,c2,c3,c4,c5,c6,c7,c8,c9,t1,t2,t3,t4,t5,t6' self.prepare_and_query_and_compare(sqls, order_by_list, compare_what=COMPARE_LEN) + + def test_tsdb_read(self): + tdSql.execute('delete from t0') + tdSql.execute('flush database test') + for i in range(0, 4096): + tdSql.execute(f"insert into test.t0 values({1537146000000 + i}, 1,1,1,1,1,1,1,'a','1')") + tdSql.execute("flush database test") + tdSql.execute(f"insert into t0 values({1537146000000 + 4095}, 1,1,1,1,1,1,1,'a','1')") + for i in range(4095, 4096*2 + 100): + tdSql.execute(f"insert into test.t0 values({1537146000000 + i}, 1,1,1,1,1,1,1,'a','1')") + tdSql.execute("flush database test") + time.sleep(5) + tdSql.query('select first(ts), last(ts) from t0', queryTimes=1) + tdSql.checkRows(1) def run(self): self.prepareTestEnv() @@ -323,6 +337,8 @@ class TDTestCase: self.test_sort_for_partition_res() self.test_sort_for_partition_interval() self.test_sort_for_partition_no_agg_limit() + self.test_tsdb_read() + def stop(self): tdSql.close() diff --git a/tests/system-test/2-query/project_group.py b/tests/system-test/2-query/project_group.py index 19fe8b1cf0..a251854213 100644 --- a/tests/system-test/2-query/project_group.py +++ b/tests/system-test/2-query/project_group.py @@ -15,6 +15,30 @@ class TDTestCase: self.batchNum = 5 self.ts = 1537146000000 + def groupby_value(self): + tdSql.query('select 1 from stb group by now') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + tdSql.query('select 1 from stb group by "1"') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + tdSql.query('select count(*) from stb group by now') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 12) + tdSql.query('select count(*) from stb group by now+1') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 12) + tdSql.query('select 1, count(*) from stb group by now, "1"') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 12) + tdSql.query('select count(*) as cc from sta1 as a join sta2 as b on a.ts = b.ts group by now') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 3) + tdSql.query('select a.tbname, count(*) as cc from sta1 as a join sta2 as b on a.ts = b.ts group by a.tbname, "1"') + tdSql.checkRows(1) + tdSql.checkData(0, 1, 3) + def run(self): dbname = "db" tdSql.prepare() @@ -59,6 +83,9 @@ class TDTestCase: tdSql.checkRows(2) tdSql.query('select col1 > 0 and col2 > 0 from stb') tdSql.checkRows(12) + + self.groupby_value() + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/system-test/2-query/stddev_test.py b/tests/system-test/2-query/stddev_test.py new file mode 100644 index 0000000000..c0cb51fe57 --- /dev/null +++ b/tests/system-test/2-query/stddev_test.py @@ -0,0 +1,54 @@ +import numpy as np +from util.log import * +from util.cases import * +from util.sql import * +from util.common import * +from util.sqlset import * + +''' +Test case for TS-5150 +''' +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.ts = 1537146000000 + def initdabase(self): + tdSql.execute('create database if not exists db_test vgroups 2 buffer 10') + tdSql.execute('use db_test') + tdSql.execute('create stable stb(ts timestamp, delay int) tags(groupid int)') + tdSql.execute('create table t1 using stb tags(1)') + tdSql.execute('create table t2 using stb tags(2)') + tdSql.execute('create table t3 using stb tags(3)') + tdSql.execute('create table t4 using stb tags(4)') + tdSql.execute('create table t5 using stb tags(5)') + tdSql.execute('create table t6 using stb tags(6)') + def insert_data(self): + for i in range(5000): + tdSql.execute(f"insert into t1 values({self.ts + i * 1000}, {i%5})") + tdSql.execute(f"insert into t2 values({self.ts + i * 1000}, {i%5})") + tdSql.execute(f"insert into t3 values({self.ts + i * 1000}, {i%5})") + + def verify_stddev(self): + for i in range(20): + tdSql.query(f'SELECT MAX(CASE WHEN delay != 0 THEN delay ELSE NULL END) AS maxDelay,\ + MIN(CASE WHEN delay != 0 THEN delay ELSE NULL END) AS minDelay,\ + AVG(CASE WHEN delay != 0 THEN delay ELSE NULL END) AS avgDelay,\ + STDDEV(CASE WHEN delay != 0 THEN delay ELSE NULL END) AS jitter,\ + COUNT(CASE WHEN delay = 0 THEN 1 ELSE NULL END) AS timeoutCount,\ + COUNT(*) AS totalCount from stb where ts between {1537146000000 + i * 1000} and {1537146000000 + (i+10) * 1000}') + res = tdSql.queryResult[0][3] + assert res > 0.8 + def run(self): + self.initdabase() + self.insert_data() + self.verify_stddev() + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) + diff --git a/tests/system-test/2-query/union.py b/tests/system-test/2-query/union.py index 2aa01f2c02..5104489592 100644 --- a/tests/system-test/2-query/union.py +++ b/tests/system-test/2-query/union.py @@ -369,8 +369,46 @@ class TDTestCase: ''' ) + def test_TS_5630(self): + sql = "CREATE DATABASE `ep_iot` BUFFER 256 CACHESIZE 20 CACHEMODEL 'both' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 2 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 3 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0" + tdSql.execute(sql, queryTimes=1) + tdLog.info("database ep_iot created") + sql = "CREATE STABLE `ep_iot`.`sldc_dp` (`ts` TIMESTAMP, `data_write_time` TIMESTAMP, `jz1fdgl` DOUBLE, `jz1ssfdfh` DOUBLE, `jz1fdmh` DOUBLE, `jz1gdmh` DOUBLE, `jz1qjrhl` DOUBLE, `jz1zhcydl` DOUBLE, `jz1zkby` DOUBLE, `jz1zzqyl` DOUBLE, `jz1zzqwda` DOUBLE, `jz1zzqwdb` DOUBLE, `jz1zzqll` DOUBLE, `jz1gswd` DOUBLE, `jz1gsll` DOUBLE, `jz1glxl` DOUBLE, `jz1qjrh` DOUBLE, `jz1zhrxl` DOUBLE, `jz1gmjassllfk` DOUBLE, `jz1gmjasslllj` DOUBLE, `jz1gmjbssllfk` DOUBLE, `jz1gmjbsslllj` DOUBLE, `jz1gmjcssllfk` DOUBLE, `jz1gmjcsslllj` DOUBLE, `jz1gmjdssllfk` DOUBLE, `jz1gmjdsslllj` DOUBLE, `jz1gmjessllfk` DOUBLE, `jz1gmjesslllj` DOUBLE, `jz1gmjfssllfk` DOUBLE, `jz1gmjfsslllj` DOUBLE, `jz1zrqwda` DOUBLE, `jz1zrqwdb` DOUBLE, `jz1zrzqyl` DOUBLE, `jz1mmjadl` DOUBLE, `jz1mmjbdl` DOUBLE, `jz1mmjcdl` DOUBLE, `jz1mmjddl` DOUBLE, `jz1mmjedl` DOUBLE, `jz1mmjfdl` DOUBLE, `jz1cyqckwda` DOUBLE, `jz1cyqckwdb` DOUBLE, `jz1njswd` DOUBLE, `jz1nqqxhsckawd` DOUBLE, `jz1nqqxhsckbwd` DOUBLE, `jz1nqqxhsrkawd` DOUBLE, `jz1nqqxhsrkbwd` DOUBLE, `jz1kyqackyqwdsel` DOUBLE, `jz1kyqbckyqwdsel` DOUBLE, `jz1yfjackyqwd` DOUBLE, `jz1yfjbckyqwd` DOUBLE, `jz1trkyqwd` DOUBLE, `jz1trkyqwd1` DOUBLE, `jz1trkyqwd2` DOUBLE, `jz1trkyqwd3` DOUBLE, `jz1tckjyqwd1` DOUBLE, `jz1tckjyqwd2` DOUBLE, `jz1tckyqwd1` DOUBLE, `jz1bya` DOUBLE, `jz1byb` DOUBLE, `jz1pqwda` DOUBLE, `jz1pqwdb` DOUBLE, `jz1gmjadl` DOUBLE, `jz1gmjbdl` DOUBLE, `jz1gmjcdl` DOUBLE, `jz1gmjddl` DOUBLE, `jz1gmjedl` DOUBLE, `jz1gmjfdl` DOUBLE, `jz1yfjadl` DOUBLE, `jz1yfjbdl` DOUBLE, `jz1ycfjadl` DOUBLE, `jz1ycfjbdl` DOUBLE, `jz1sfjadl` DOUBLE, `jz1sfjbdl` DOUBLE, `jz1fdjyggl` DOUBLE, `jz1fdjwggl` DOUBLE, `jz1sjzs` DOUBLE, `jz1zfl` DOUBLE, `jz1ltyl` DOUBLE, `jz1smb` DOUBLE, `jz1rll` DOUBLE, `jz1grd` DOUBLE, `jz1zjwd` DOUBLE, `jz1yl` DOUBLE, `jz1kyqckwd` DOUBLE, `jz1abmfsybrkcy` DOUBLE, `jz1bbmfsybrkcy` DOUBLE, `jz1abjcsdmfytwdzdz` DOUBLE, `jz1bbjcsdmfytwdzdz` DOUBLE, `jz2fdgl` DOUBLE, `jz2ssfdfh` DOUBLE, `jz2fdmh` DOUBLE, `jz2gdmh` DOUBLE, `jz2qjrhl` DOUBLE, `jz2zhcydl` DOUBLE, `jz2zkby` DOUBLE, `jz2zzqyl` DOUBLE, `jz2zzqwda` DOUBLE, `jz2zzqwdb` DOUBLE, `jz2zzqll` DOUBLE, `jz2gswd` DOUBLE, `jz2gsll` DOUBLE, `jz2glxl` DOUBLE, `jz2qjrh` DOUBLE, `jz2zhrxl` DOUBLE, `jz2gmjassllfk` DOUBLE, `jz2gmjasslllj` DOUBLE, `jz2gmjbssllfk` DOUBLE, `jz2gmjbsslllj` DOUBLE, `jz2gmjcssllfk` DOUBLE, `jz2gmjcsslllj` DOUBLE, `jz2gmjdssllfk` DOUBLE, `jz2gmjdsslllj` DOUBLE, `jz2gmjessllfk` DOUBLE, `jz2gmjesslllj` DOUBLE, `jz2gmjfssllfk` DOUBLE, `jz2gmjfsslllj` DOUBLE, `jz2zrqwda` DOUBLE, `jz2zrqwdb` DOUBLE, `jz2zrzqyl` DOUBLE, `jz2mmjadl` DOUBLE, `jz2mmjbdl` DOUBLE, `jz2mmjcdl` DOUBLE, `jz2mmjddl` DOUBLE, `jz2mmjedl` DOUBLE, `jz2mmjfdl` DOUBLE, `jz2cyqckwda` DOUBLE, `jz2cyqckwdb` DOUBLE, `jz2njswd` DOUBLE, `jz2nqqxhsckawd` DOUBLE, `jz2nqqxhsckbwd` DOUBLE, `jz2nqqxhsrkawd` DOUBLE, `jz2nqqxhsrkbwd` DOUBLE, `jz2kyqackyqwdsel` DOUBLE, `jz2kyqbckyqwdsel` DOUBLE, `jz2yfjackyqwd` DOUBLE, `jz2yfjbckyqwd` DOUBLE, `jz2trkyqwd` DOUBLE, `jz2trkyqwd1` DOUBLE, `jz2trkyqwd2` DOUBLE, `jz2trkyqwd3` DOUBLE, `jz2tckjyqwd1` DOUBLE, `jz2tckjyqwd2` DOUBLE, `jz2tckyqwd1` DOUBLE, `jz2bya` DOUBLE, `jz2byb` DOUBLE, `jz2pqwda` DOUBLE, `jz2pqwdb` DOUBLE, `jz2gmjadl` DOUBLE, `jz2gmjbdl` DOUBLE, `jz2gmjcdl` DOUBLE, `jz2gmjddl` DOUBLE, `jz2gmjedl` DOUBLE, `jz2gmjfdl` DOUBLE, `jz2yfjadl` DOUBLE, `jz2yfjbdl` DOUBLE, `jz2ycfjadl` DOUBLE, `jz2ycfjbdl` DOUBLE, `jz2sfjadl` DOUBLE, `jz2sfjbdl` DOUBLE, `jz2fdjyggl` DOUBLE, `jz2fdjwggl` DOUBLE, `jz2sjzs` DOUBLE, `jz2zfl` DOUBLE, `jz2ltyl` DOUBLE, `jz2smb` DOUBLE, `jz2rll` DOUBLE, `jz2grd` DOUBLE, `jz2zjwd` DOUBLE, `jz2yl` DOUBLE, `jz2kyqckwd` DOUBLE, `jz2abmfsybrkcy` DOUBLE, `jz2bbmfsybrkcy` DOUBLE, `jz2abjcsdmfytwdzdz` DOUBLE, `jz2bbjcsdmfytwdzdz` DOUBLE) TAGS (`iot_hub_id` VARCHAR(100), `device_group_code` VARCHAR(100), `device_code` VARCHAR(100))" + tdLog.info("stable ep_iot.sldc_dp created") + tdSql.execute(sql, queryTimes=1) + sql = "insert into ep_iot.sldc_dp_t1 using ep_iot.sldc_dp tags('a','a','a') values(now, now, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9,0,1);" + tdSql.execute(sql, queryTimes=1) + sql = "insert into ep_iot.sldc_dp_t1 using ep_iot.sldc_dp tags('b','b','b') values(now, now, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9,0,1);" + tdSql.execute(sql, queryTimes=1) + sql = "insert into ep_iot.sldc_dp_t1 using ep_iot.sldc_dp tags('c','c','c') values(now, now, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9,0,1);" + tdSql.execute(sql, queryTimes=1) + sql = "insert into ep_iot.sldc_dp_t1 using ep_iot.sldc_dp tags('d','d','d') values(now, now, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9,0,1);" + tdSql.execute(sql, queryTimes=1) + sql = "insert into ep_iot.sldc_dp_t1 using ep_iot.sldc_dp tags('e','e','e') values(now, now, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9,0,1);" + tdSql.execute(sql, queryTimes=1) + sql = "select scdw_code, scdw_name, jzmc, fdgl, jzzt from ((select '01072016' as scdw_code, '盛鲁电厂' as scdw_name, '机组1' as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '01072016' as scdw_code, '盛鲁电厂' as scdw_name, '机组2' as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '00103673' as scdw_code, '鲁西电厂' as scdw_name, '机组1'as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '00103673' as scdw_code, '鲁西电厂' as scdw_name, '机组2'as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '01061584' as scdw_code, '富源热电' as scdw_name, '机组1'as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt ,last(ts) as ts from ep_iot.sldc_dp) union all ( select '01061584' as scdw_code, '富源热电' as scdw_name, '机组2'as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt ,last(ts) as ts from ep_iot.sldc_dp)) where scdw_code like '%%';" + tdSql.query(sql, queryTimes=1) + tdSql.checkCols(5) + tdSql.checkRows(6) + + sql = "select scdw_name, scdw_code, jzmc, fdgl, jzzt from ((select '01072016' as scdw_code, '盛鲁电厂' as scdw_name, '机组1' as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '01072016' as scdw_code, '盛鲁电厂' as scdw_name, '机组2' as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '00103673' as scdw_code, '鲁西电厂' as scdw_name, '机组1'as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '00103673' as scdw_code, '鲁西电厂' as scdw_name, '机组2'as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '01061584' as scdw_code, '富源热电' as scdw_name, '机组1'as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt ,last(ts) as ts from ep_iot.sldc_dp) union all ( select '01061584' as scdw_code, '富源热电' as scdw_name, '机组2'as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt ,last(ts) as ts from ep_iot.sldc_dp)) where scdw_code like '%%';" + tdSql.query(sql, queryTimes=1) + tdSql.checkCols(5) + tdSql.checkRows(6) + sql = "select scdw_name, scdw_code, jzzt from ((select '01072016' as scdw_code, '盛鲁电厂' as scdw_name, '机组1' as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '01072016' as scdw_code, '盛鲁电厂' as scdw_name, '机组2' as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '00103673' as scdw_code, '鲁西电厂' as scdw_name, '机组1'as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '00103673' as scdw_code, '鲁西电厂' as scdw_name, '机组2'as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '01061584' as scdw_code, '富源热电' as scdw_name, '机组1'as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt ,last(ts) as ts from ep_iot.sldc_dp) union all ( select '01061584' as scdw_code, '富源热电' as scdw_name, '机组2'as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt ,last(ts) as ts from ep_iot.sldc_dp)) where scdw_code like '%%';" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(6) + tdSql.checkCols(3) + + sql = "select scdw_code, scdw_name, jzmc, fdgl, jzzt,ts from ((select '01072016' as scdw_code, '盛鲁电厂' as scdw_name, '机组1' as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '01072016' as scdw_code, '盛鲁电厂' as scdw_name, '机组2' as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '00103673' as scdw_code, '鲁西电厂' as scdw_name, '机组1'as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '00103673' as scdw_code, '鲁西电厂' as scdw_name, '机组2'as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '01061584' as scdw_code, '富源热电' as scdw_name, '机组1'as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt ,last(ts) as ts from ep_iot.sldc_dp) union all ( select '01061584' as scdw_code, '富源热电' as scdw_name, '机组2'as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt ,last(ts) as ts from ep_iot.sldc_dp)) where scdw_code like '%%';" + tdSql.query(sql, queryTimes=1) + tdSql.checkCols(6) + tdSql.checkRows(6) + ##tdSql.execute("drop database ep_iot") + def run(self): tdSql.prepare() + self.test_TS_5630() tdLog.printNoPrefix("==========step1:create table") self.__create_tb() diff --git a/tests/system-test/7-tmq/tmq_offset.py b/tests/system-test/7-tmq/tmq_offset.py index 07d1a4bc04..7eabb50be2 100644 --- a/tests/system-test/7-tmq/tmq_offset.py +++ b/tests/system-test/7-tmq/tmq_offset.py @@ -45,6 +45,11 @@ class TDTestCase: tdLog.exit("tmq_offset_test error!") else: buildPath = tdCom.getBuildPath() + cmdStr0 = '%s/build/bin/tmq_offset_test 5679'%(buildPath) + tdLog.info(cmdStr0) + if os.system(cmdStr0) != 0: + tdLog.exit(cmdStr0) + cmdStr1 = '%s/build/bin/taosBenchmark -i 50 -B 1 -t 1000 -n 100000 -y &'%(buildPath) tdLog.info(cmdStr1) os.system(cmdStr1) diff --git a/tests/system-test/8-stream/checkpoint_info.py b/tests/system-test/8-stream/checkpoint_info.py new file mode 100644 index 0000000000..522017a702 --- /dev/null +++ b/tests/system-test/8-stream/checkpoint_info.py @@ -0,0 +1,140 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + + +from util.log import * +from util.cases import * +from util.sql import * +from util.common import * +from util.sqlset import * +from util.cluster import * +import threading +# should be used by -N option +class TDTestCase: + + #updatecfgDict = {'checkpointInterval': 60 ,} + def init(self, conn, logSql, replicaVar=1): + print("========init========") + + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + def find_checkpoint_info_file(self, dirpath, checkpointid, task_id): + for root, dirs, files in os.walk(dirpath): + if f'checkpoint{checkpointid}' in dirs: + info_path = os.path.join(root, f'checkpoint{checkpointid}', 'info') + if os.path.exists(info_path): + if task_id in info_path: + return info_path + else: + continue + else: + return None + def get_dnode_info(self): + ''' + get a dict from vnode to dnode + ''' + self.vnode_dict = {} + sql = 'select dnode_id, vgroup_id from information_schema.ins_vnodes' + result = tdSql.getResult(sql) + for (dnode,vnode) in result: + self.vnode_dict[vnode] = dnode + def print_time_info(self): + ''' + sometimes, we need to wait for a while to check the info (for example, the checkpoint info file won't be created immediately after the redistribute) + ''' + times= 0 + while(True): + if(self.check_info()): + tdLog.success(f'Time to finish is {times}') + return + else: + if times > 200: + tdLog.exit("time out") + times += 10 + time.sleep(10) + def check_info(self): + ''' + first, check if the vnode is restored + ''' + while(True): + if(self.check_vnodestate()): + break + sql = 'select task_id, node_id, checkpoint_id, checkpoint_ver from information_schema.ins_stream_tasks where `level` = "source" or `level` = "agg" and node_type == "vnode"' + for task_id, vnode, checkpoint_id, checkpoint_ver in tdSql.getResult(sql): + dirpath = f"{cluster.dnodes[self.vnode_dict[vnode]-1].dataDir}/vnode/vnode{vnode}/" + info_path = self.find_checkpoint_info_file(dirpath, checkpoint_id, task_id) + if info_path is None: + return False + with open(info_path, 'r') as f: + info_id, info_ver = f.read().split() + if int(info_id) != int(checkpoint_id) or int(info_ver) != int(checkpoint_ver): + return False + return True + + def restart_stream(self): + tdLog.debug("========restart stream========") + time.sleep(10) + for i in range(5): + tdSql.execute("pause stream s1") + time.sleep(2) + tdSql.execute("resume stream s1") + def initstream(self): + tdLog.debug("========case1 start========") + os.system("nohup taosBenchmark -y -B 1 -t 4 -S 500 -n 1000 -v 3 > /dev/null 2>&1 &") + time.sleep(5) + tdSql.execute("create snode on dnode 1") + tdSql.execute("use test") + tdSql.execute("create stream if not exists s1 trigger at_once ignore expired 0 ignore update 0 fill_history 1 into st1 as select _wstart,sum(voltage),groupid from meters partition by groupid interval(1s)") + tdLog.debug("========create stream using snode and insert data ok========") + self.get_dnode_info() + def redistribute_vnode(self): + tdLog.debug("========redistribute vnode========") + tdSql.redistribute_db_all_vgroups() + self.get_dnode_info() + def replicate_db(self): + tdLog.debug("========replicate db========") + while True: + res = tdSql.getResult("SHOW TRANSACTIONS") + if res == []: + tdLog.debug("========== no transaction, begin to replicate db =========") + tdSql.execute("alter database test replica 3") + return + else: + time.sleep(5) + continue + def check_vnodestate(self): + sql = 'select distinct restored from information_schema.ins_vnodes' + if tdSql.getResult(sql) != [(True,)]: + tdLog.debug(f"vnode not restored, wait 5s") + time.sleep(5) + return False + else: + return True + def run(self): + print("========run========") + self.initstream() + self.restart_stream() + time.sleep(60) + self.print_time_info() + self.redistribute_vnode() + self.restart_stream() + time.sleep(60) + self.print_time_info() + + def stop(self): + print("========stop========") + tdSql.close() + tdLog.success(f"{__file__} successfully executed") +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/8-stream/checkpoint_info2.py b/tests/system-test/8-stream/checkpoint_info2.py new file mode 100644 index 0000000000..3dc57477f7 --- /dev/null +++ b/tests/system-test/8-stream/checkpoint_info2.py @@ -0,0 +1,141 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + + +from util.log import * +from util.cases import * +from util.sql import * +from util.common import * +from util.sqlset import * +from util.cluster import * + +# should be used by -N option +class TDTestCase: + updatecfgDict = {'checkpointInterval': 60 , + } + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), True) + def find_checkpoint_info_file(self, dirpath, checkpointid, task_id): + for root, dirs, files in os.walk(dirpath): + if f'checkpoint{checkpointid}' in dirs: + info_path = os.path.join(root, f'checkpoint{checkpointid}', 'info') + if os.path.exists(info_path): + if task_id in info_path: + tdLog.info(f"info file found in {info_path}") + return info_path + else: + continue + else: + tdLog.info(f"info file not found in {info_path}") + return None + else: + tdLog.info(f"no checkpoint{checkpointid} in {dirpath}") + def get_dnode_info(self): + ''' + get a dict from vnode to dnode + ''' + self.vnode_dict = {} + sql = 'select dnode_id, vgroup_id from information_schema.ins_vnodes where status = "leader"' + result = tdSql.getResult(sql) + for (dnode,vnode) in result: + self.vnode_dict[vnode] = dnode + def print_time_info(self): + ''' + sometimes, we need to wait for a while to check the info (for example, the checkpoint info file won't be created immediately after the redistribute) + ''' + times= 0 + while(True): + if(self.check_info()): + tdLog.success(f'Time to finish is {times}') + return + else: + if times > 400: + tdLog.exit("time out") + times += 10 + time.sleep(10) + def check_info(self): + ''' + first, check if the vnode is restored + ''' + while(True): + if(self.check_vnodestate()): + break + self.get_dnode_info() + sql = 'select task_id, node_id, checkpoint_id, checkpoint_ver from information_schema.ins_stream_tasks where `level` = "source" or `level` = "agg" and node_type == "vnode"' + for task_id, vnode, checkpoint_id, checkpoint_ver in tdSql.getResult(sql): + dirpath = f"{cluster.dnodes[self.vnode_dict[vnode]-1].dataDir}/vnode/vnode{vnode}/" + info_path = self.find_checkpoint_info_file(dirpath, checkpoint_id, task_id) + if info_path is None: + tdLog.info(f"info path: {dirpath} is null") + return False + with open(info_path, 'r') as f: + info_id, info_ver = f.read().split() + if int(info_id) != int(checkpoint_id) or int(info_ver) != int(checkpoint_ver): + tdLog.info(f"infoId: {info_id}, checkpointId: {checkpoint_id}, infoVer: {info_ver}, checkpointVer: {checkpoint_ver}") + return False + return True + + def restart_stream(self): + tdLog.debug("========restart stream========") + for i in range(5): + tdSql.execute("pause stream s1") + time.sleep(2) + tdSql.execute("resume stream s1") + def initstream(self): + tdLog.debug("========case1 start========") + os.system("nohup taosBenchmark -y -B 1 -t 4 -S 500 -n 1000 -v 3 > /dev/null 2>&1 &") + time.sleep(5) + tdSql.execute("create snode on dnode 1") + tdSql.execute("use test") + tdSql.execute("create stream if not exists s1 trigger at_once ignore expired 0 ignore update 0 fill_history 1 into st1 as select _wstart,sum(voltage),groupid from meters partition by groupid interval(1s)") + tdLog.debug("========create stream using snode and insert data ok========") + self.get_dnode_info() + def redistribute_vnode(self): + tdLog.debug("========redistribute vnode========") + tdSql.redistribute_db_all_vgroups() + self.get_dnode_info() + def replicate_db(self): + tdLog.debug("========replicate db========") + while True: + res = tdSql.getResult("SHOW TRANSACTIONS") + if res == []: + tdLog.debug("========== no transaction, begin to replicate db =========") + tdSql.execute("alter database test replica 3") + return + else: + time.sleep(5) + continue + def check_vnodestate(self): + sql = 'select distinct restored from information_schema.ins_vnodes' + if tdSql.getResult(sql) != [(True,)]: + tdLog.debug(f"vnode not restored, wait 5s") + time.sleep(5) + return False + else: + return True + def run(self): + self.initstream() + self.replicate_db() + self.print_time_info() + self.restart_stream() + time.sleep(60) + self.print_time_info() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/taosc_test/CMakeLists.txt b/tests/taosc_test/CMakeLists.txt index c16fe59271..45c14f84bf 100644 --- a/tests/taosc_test/CMakeLists.txt +++ b/tests/taosc_test/CMakeLists.txt @@ -16,7 +16,7 @@ aux_source_directory(src OS_SRC) # taoscTest add_executable(taoscTest "taoscTest.cpp") -target_link_libraries(taoscTest taos os gtest_main) +target_link_libraries(taoscTest ${TAOS_LIB} os gtest_main) target_include_directories( taoscTest PUBLIC "${TD_SOURCE_DIR}/include/os" diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 5e93be695d..d058d7a52f 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -13,7 +13,7 @@ IF(TD_WEBSOCKET) PREFIX "taosws-rs" SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosws-rs BUILD_ALWAYS off - DEPENDS taos + DEPENDS ${TAOS_LIB} BUILD_IN_SOURCE 1 CONFIGURE_COMMAND cmake -E echo "taosws-rs no need cmake to config" PATCH_COMMAND @@ -32,7 +32,7 @@ IF(TD_WEBSOCKET) PREFIX "taosws-rs" SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosws-rs BUILD_ALWAYS off - DEPENDS taos + DEPENDS ${TAOS_LIB} BUILD_IN_SOURCE 1 CONFIGURE_COMMAND cmake -E echo "taosws-rs no need cmake to config" PATCH_COMMAND @@ -52,7 +52,7 @@ IF(TD_WEBSOCKET) PREFIX "taosws-rs" SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosws-rs BUILD_ALWAYS off - DEPENDS taos + DEPENDS ${TAOS_LIB} BUILD_IN_SOURCE 1 CONFIGURE_COMMAND cmake -E echo "taosws-rs no need cmake to config" PATCH_COMMAND @@ -139,7 +139,7 @@ ELSE() PREFIX "taosadapter" SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter BUILD_ALWAYS off - DEPENDS taos + DEPENDS ${TAOS_LIB} BUILD_IN_SOURCE 1 CONFIGURE_COMMAND cmake -E echo "taosadapter no need cmake to config" PATCH_COMMAND @@ -168,7 +168,7 @@ ELSE() PREFIX "taosadapter" SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter BUILD_ALWAYS off - DEPENDS taos + DEPENDS ${TAOS_LIB} BUILD_IN_SOURCE 1 CONFIGURE_COMMAND cmake -E echo "taosadapter no need cmake to config" PATCH_COMMAND @@ -193,7 +193,7 @@ ELSE() PREFIX "taosadapter" SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter BUILD_ALWAYS off - DEPENDS taos + DEPENDS ${TAOS_LIB} BUILD_IN_SOURCE 1 CONFIGURE_COMMAND cmake -E echo "taosadapter no need cmake to config" PATCH_COMMAND diff --git a/tools/shell/CMakeLists.txt b/tools/shell/CMakeLists.txt index 4a8e0b9d34..2301f33803 100644 --- a/tools/shell/CMakeLists.txt +++ b/tools/shell/CMakeLists.txt @@ -33,9 +33,9 @@ ELSE() ENDIF() if(TD_WINDOWS) - target_link_libraries(shell PUBLIC taos_static ${LINK_WEBSOCKET}) + target_link_libraries(shell PUBLIC ${TAOS_LIB_STATIC} ${LINK_WEBSOCKET}) else() - target_link_libraries(shell PUBLIC taos ${LINK_WEBSOCKET} ${LINK_JEMALLOC} ${LINK_ARGP}) + target_link_libraries(shell PUBLIC ${TAOS_LIB} ${LINK_WEBSOCKET} ${LINK_JEMALLOC} ${LINK_ARGP}) endif() target_link_libraries( @@ -63,7 +63,7 @@ IF(TD_LINUX) IF(TD_WEBSOCKET) ADD_DEPENDENCIES(shell_ut taosws-rs) ENDIF() - target_link_libraries(shell_ut PUBLIC taos ${LINK_WEBSOCKET} ${LINK_JEMALLOC} ${LINK_ARGP}) + target_link_libraries(shell_ut PUBLIC ${TAOS_LIB} ${LINK_WEBSOCKET} ${LINK_JEMALLOC} ${LINK_ARGP}) target_link_libraries(shell_ut PRIVATE os common transport geometry util) # util depends diff --git a/utils/test/c/CMakeLists.txt b/utils/test/c/CMakeLists.txt index 7589d11840..7054eb218f 100644 --- a/utils/test/c/CMakeLists.txt +++ b/utils/test/c/CMakeLists.txt @@ -1,5 +1,5 @@ add_executable(tmq_demo tmqDemo.c) -add_dependencies(tmq_demo taos) +add_dependencies(tmq_demo ${TAOS_LIB}) add_executable(tmq_sim tmqSim.c) add_executable(create_table createTable.c) add_executable(tmq_taosx_ci tmq_taosx_ci.c) @@ -22,7 +22,7 @@ endif(${TD_LINUX}) target_link_libraries( tmq_offset - PUBLIC taos + PUBLIC ${TAOS_LIB} PUBLIC util PUBLIC common PUBLIC os @@ -30,7 +30,7 @@ target_link_libraries( target_link_libraries( tmq_multi_thread_test - PUBLIC taos + PUBLIC ${TAOS_LIB} PUBLIC util PUBLIC common PUBLIC os @@ -38,56 +38,56 @@ target_link_libraries( target_link_libraries( create_table - PUBLIC taos - PUBLIC util - PUBLIC common - PUBLIC os -) -target_link_libraries( - tmq_demo - PUBLIC taos - PUBLIC util - PUBLIC common - PUBLIC os -) -target_link_libraries( - tmq_sim PUBLIC ${TAOS_LIB} PUBLIC util PUBLIC common PUBLIC os ) +target_link_libraries( + tmq_demo + PUBLIC ${TAOS_LIB} + PUBLIC util + PUBLIC common + PUBLIC os +) +target_link_libraries( + tmq_sim + PUBLIC ${TAOS_LIB_PLATFORM_SPEC} + PUBLIC util + PUBLIC common + PUBLIC os +) target_link_libraries( tmq_ts5466 - PUBLIC taos + PUBLIC ${TAOS_LIB} PUBLIC util PUBLIC common PUBLIC os ) target_link_libraries( tmq_td32187 - PUBLIC taos + PUBLIC ${TAOS_LIB} PUBLIC util PUBLIC common PUBLIC os ) target_link_libraries( tmq_td32526 - PUBLIC taos + PUBLIC ${TAOS_LIB} PUBLIC util PUBLIC common PUBLIC os ) target_link_libraries( tmq_taosx_ci - PUBLIC taos + PUBLIC ${TAOS_LIB} PUBLIC util PUBLIC common PUBLIC os ) target_link_libraries( tmq_offset_test - PUBLIC taos + PUBLIC ${TAOS_LIB} PUBLIC util PUBLIC common PUBLIC os @@ -95,7 +95,7 @@ target_link_libraries( target_link_libraries( replay_test - PUBLIC taos + PUBLIC ${TAOS_LIB} PUBLIC util PUBLIC common PUBLIC os @@ -103,7 +103,7 @@ target_link_libraries( target_link_libraries( write_raw_block_test - PUBLIC taos + PUBLIC ${TAOS_LIB} PUBLIC util PUBLIC common PUBLIC os @@ -111,7 +111,7 @@ target_link_libraries( target_link_libraries( tmq_write_raw_test - PUBLIC taos + PUBLIC ${TAOS_LIB} PUBLIC util PUBLIC common PUBLIC os @@ -119,7 +119,7 @@ target_link_libraries( target_link_libraries( sml_test - PUBLIC taos + PUBLIC ${TAOS_LIB} PUBLIC util PUBLIC common PUBLIC os @@ -128,7 +128,7 @@ target_link_libraries( target_link_libraries( get_db_name_test - PUBLIC taos + PUBLIC ${TAOS_LIB} PUBLIC util PUBLIC common PUBLIC os @@ -136,7 +136,7 @@ target_link_libraries( target_link_libraries( varbinary_test - PUBLIC taos + PUBLIC ${TAOS_LIB} PUBLIC util PUBLIC common PUBLIC os @@ -145,7 +145,7 @@ target_link_libraries( if(${TD_LINUX}) target_link_libraries( tsz_test - PUBLIC taos + PUBLIC ${TAOS_LIB} PUBLIC util PUBLIC common PUBLIC os diff --git a/utils/test/c/tmq_offset_test.c b/utils/test/c/tmq_offset_test.c index 6be9b38979..25f048bab2 100644 --- a/utils/test/c/tmq_offset_test.c +++ b/utils/test/c/tmq_offset_test.c @@ -80,6 +80,77 @@ int buildData(TAOS* pConn){ return 0; } +void test_ts5679(TAOS* pConn){ + TAOS_RES* pRes = taos_query(pConn, "drop topic if exists t_5679"); + ASSERT(taos_errno(pRes) == 0); + taos_free_result(pRes); + + pRes = taos_query(pConn, "drop database if exists db_ts5679"); + ASSERT(taos_errno(pRes) == 0); + taos_free_result(pRes); + + pRes = taos_query(pConn, "create database if not exists db_ts5679 vgroups 1 wal_retention_period 3600"); + ASSERT(taos_errno(pRes) == 0); + taos_free_result(pRes); + + pRes = taos_query(pConn, "create topic t_5679 as database db_ts5679"); + ASSERT(taos_errno(pRes) == 0); + taos_free_result(pRes); + + pRes = taos_query(pConn, "use db_ts5679"); + ASSERT(taos_errno(pRes) == 0); + taos_free_result(pRes); + + pRes = taos_query(pConn,"CREATE TABLE `t1` (`ts` TIMESTAMP, `voltage` INT)"); + ASSERT(taos_errno(pRes) == 0); + taos_free_result(pRes); + + pRes = taos_query(pConn, "insert into t1 values(now, 1)"); + ASSERT(taos_errno(pRes) == 0); + taos_free_result(pRes); + + tmq_conf_t* conf = tmq_conf_new(); + + tmq_conf_set(conf, "enable.auto.commit", "false"); + tmq_conf_set(conf, "auto.commit.interval.ms", "2000"); + tmq_conf_set(conf, "group.id", "group_id_2"); + tmq_conf_set(conf, "td.connect.user", "root"); + tmq_conf_set(conf, "td.connect.pass", "taosdata"); + tmq_conf_set(conf, "auto.offset.reset", "earliest"); + tmq_conf_set(conf, "msg.with.table.name", "false"); + + tmq_t* tmq = tmq_consumer_new(conf, NULL, 0); + tmq_conf_destroy(conf); + + // 创建订阅 topics 列表 + tmq_list_t* topicList = tmq_list_new(); + tmq_list_append(topicList, "t_5679"); + + // 启动订阅 + tmq_subscribe(tmq, topicList); + tmq_list_destroy(topicList); + + while(1){ + pRes = tmq_consumer_poll(tmq, 1000); + if (pRes == NULL){ + break; + } + taosSsleep(3); + } + tmq_topic_assignment* pAssign = NULL; + int32_t numOfAssign = 0; + int32_t code = tmq_get_topic_assignment(tmq, "t_5679", &pAssign, &numOfAssign); + ASSERT (code == 0); + + for(int i = 0; i < numOfAssign; i++){ + int64_t committed = tmq_committed(tmq, "t_5679", pAssign[i].vgId); + printf("committed offset:%"PRId64"\n", committed); + ASSERT(committed == TSDB_CODE_TMQ_NO_COMMITTED); + } + + taos_free_result(pRes); +} + void test_offset(TAOS* pConn){ if(buildData(pConn) != 0){ ASSERT(0); @@ -304,8 +375,13 @@ void test_ts3756(TAOS* pConn){ int main(int argc, char* argv[]) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); - test_offset(pConn); - test_ts3756(pConn); + if (argc == 2) { + test_ts5679(pConn); + }else{ + test_offset(pConn); + test_ts3756(pConn); + } + taos_close(pConn); return 0; } diff --git a/utils/test/c/tmq_taosx_ci.c b/utils/test/c/tmq_taosx_ci.c index cd70dd88f5..117f9fa2e1 100644 --- a/utils/test/c/tmq_taosx_ci.c +++ b/utils/test/c/tmq_taosx_ci.c @@ -65,12 +65,12 @@ static void msg_process(TAOS_RES* msg) { if (g_fp && strcmp(result, "") != 0) { // RES_TYPE__TMQ_BATCH_META if ((*(int8_t*)msg) == 5) { - cJSON* pJson = cJSON_Parse(result); - cJSON* pJsonArray = cJSON_GetObjectItem(pJson, "metas"); + cJSON* pJson = cJSON_Parse(result); + cJSON* pJsonArray = cJSON_GetObjectItem(pJson, "metas"); int32_t num = cJSON_GetArraySize(pJsonArray); for (int32_t i = 0; i < num; i++) { cJSON* pJsonItem = cJSON_GetArrayItem(pJsonArray, i); - char* itemStr = cJSON_PrintUnformatted(pJsonItem); + char* itemStr = cJSON_PrintUnformatted(pJsonItem); taosFprintfFile(g_fp, itemStr); tmq_free_json_meta(itemStr); taosFprintfFile(g_fp, "\n"); @@ -489,10 +489,11 @@ int buildStable(TAOS* pConn, TAOS_RES* pRes) { } taos_free_result(pRes); #else - pRes = taos_query(pConn, - "create stream meters_summary_s trigger at_once IGNORE EXPIRED 0 fill_history 1 into meters_summary as select " - "_wstart, max(current) as current, " - "groupid, location from meters partition by groupid, location interval(10m)"); + pRes = taos_query( + pConn, + "create stream meters_summary_s trigger at_once IGNORE EXPIRED 0 fill_history 1 into meters_summary as select " + "_wstart, max(current) as current, " + "groupid, location from meters partition by groupid, location interval(10m)"); if (taos_errno(pRes) != 0) { printf("failed to create super table meters_summary, reason:%s\n", taos_errstr(pRes)); return -1; @@ -632,8 +633,8 @@ tmq_t* build_consumer() { tmq_conf_set(conf, "enable.auto.commit", "true"); tmq_conf_set(conf, "auto.offset.reset", "earliest"); tmq_conf_set(conf, "msg.consume.excluded", "1"); -// tmq_conf_set(conf, "session.timeout.ms", "1000000"); -// tmq_conf_set(conf, "max.poll.interval.ms", "20000"); + // tmq_conf_set(conf, "session.timeout.ms", "1000000"); + // tmq_conf_set(conf, "max.poll.interval.ms", "20000"); if (g_conf.snapShot) { tmq_conf_set(conf, "experimental.snapshot.enable", "true"); @@ -722,7 +723,7 @@ void initLogFile() { "\"level\":\"medium\"},{" "\"name\":\"groupid\",\"type\":4,\"isPrimarykey\":false,\"encode\":\"simple8b\",\"compress\":\"lz4\"," "\"level\":\"medium\"},{\"name\":" - "\"location\",\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"lz4\"," + "\"location\",\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"zstd\"," "\"level\":\"medium\"}],\"tags\":[{\"name\":\"group_id\"," "\"type\":14}" "]}", @@ -750,7 +751,7 @@ void initLogFile() { "\"level\":\"medium\"}" ",{" "\"name\":\"c3\",\"type\":8,\"length\":64,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":" - "\"lz4\",\"level\":\"medium\"},{" + "\"zstd\",\"level\":\"medium\"},{" "\"name\":\"c4\",\"type\":5,\"isPrimarykey\":false,\"encode\":\"simple8b\",\"compress\":\"lz4\",\"level\":" "\"medium\"}],\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":" "\"t3\"," @@ -772,7 +773,7 @@ void initLogFile() { "{\"type\":\"create\",\"tableType\":\"normal\",\"tableName\":\"n1\",\"columns\":[{\"name\":\"ts\"," "\"type\":9," "\"isPrimarykey\":false,\"encode\":\"delta-i\",\"compress\":\"lz4\",\"level\":\"medium\"},{\"name\":\"c2\"," - "\"type\":10,\"length\":8,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"lz4\",\"level\":" + "\"type\":10,\"length\":8,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"zstd\",\"level\":" "\"medium\"},{\"name\":\"cc3\",\"type\":5," "\"isPrimarykey\":false,\"encode\":\"simple8b\",\"compress\":\"lz4\",\"level\":\"medium\"}],\"tags\":[]}", "{\"type\":\"create\",\"tableType\":\"super\",\"tableName\":\"jt\",\"columns\":[{\"name\":\"ts\"," @@ -794,7 +795,7 @@ void initLogFile() { "\"medium\"},{\"name\":\"c2\",\"type\":6,\"isPrimarykey\":" "false,\"encode\":\"delta-d\",\"compress\":\"lz4\",\"level\":\"medium\"},{" "\"name\":\"c3\",\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":" - "\"lz4\",\"level\":\"medium\"}]," + "\"zstd\",\"level\":\"medium\"}]," "\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\"," "\"type\":" "1}]}", @@ -806,7 +807,7 @@ void initLogFile() { "\"name\":\"c2\",\"type\":6," "\"isPrimarykey\":false,\"encode\":\"delta-d\",\"compress\":\"lz4\",\"level\":\"medium\"},{\"name\":" "\"c3\"," - "\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"lz4\",\"level\":" + "\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"zstd\",\"level\":" "\"medium\"}]," "\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\"," "\"type\":" @@ -863,7 +864,7 @@ void initLogFile() { "\"level\":\"medium\"},{\"name\":\"c2\",\"type\":6,\"isPrimarykey\":false,\"encode\":\"delta-d\"," "\"compress\":\"lz4\",\"level\":\"medium\"},{" "\"name\":\"c3\",\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":" - "\"lz4\",\"level\":\"medium\"}]," + "\"zstd\",\"level\":\"medium\"}]," "\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\"," "\"type\":" "1}]}", @@ -883,7 +884,8 @@ void initLogFile() { "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":5,\"colName\":\"c4\"," "\"colType\":5}", "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":6,\"colName\":\"c4\"}", - "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":5,\"colName\":\"c4\",\"colType\":5}", + "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":5,\"colName\":\"c4\"," + "\"colType\":5}", "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":7,\"colName\":\"c3\"," "\"colType\":8,\"colLength\":64}", "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":1,\"colName\":\"t2\"," @@ -896,7 +898,7 @@ void initLogFile() { "9,\"isPrimarykey\":false,\"encode\":\"delta-i\",\"compress\":\"lz4\",\"level\":\"medium\"},{\"name\":" "\"c1\",\"type\":4,\"isPrimarykey\":false,\"encode\":\"simple8b\",\"compress\":\"lz4\",\"level\":" "\"medium\"},{\"name\":\"c2\",\"type\":10,\"length\":4," - "\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"lz4\",\"level\":\"medium\"}],\"tags\":[]}", + "\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"zstd\",\"level\":\"medium\"}],\"tags\":[]}", "{\"type\":\"alter\",\"tableType\":\"normal\",\"tableName\":\"n1\",\"alterType\":5,\"colName\":\"c3\"," "\"colType\":5}", "{\"type\":\"alter\",\"tableType\":\"normal\",\"tableName\":\"n1\",\"alterType\":7,\"colName\":\"c2\"," @@ -921,7 +923,7 @@ void initLogFile() { "{\"name\":\"c1\",\"type\":4,\"isPrimarykey\":false,\"encode\":\"simple8b\",\"compress\":\"lz4\",\"level\":" "\"medium\"},{\"name\":\"c2\",\"type\":6,\"isPrimarykey\":" "false,\"encode\":\"delta-d\",\"compress\":\"lz4\",\"level\":\"medium\"},{\"name\":\"c3\",\"type\":8," - "\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"lz4\",\"level\":\"medium\"}]," + "\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"zstd\",\"level\":\"medium\"}]," "\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\"," "\"type\":1}]}", "{\"type\":\"drop\",\"tableType\":\"super\",\"tableName\":\"st1\"}", @@ -931,7 +933,7 @@ void initLogFile() { "\"level\":\"medium\"},{\"name\":\"c2\",\"type\":6,\"isPrimarykey\":" "false,\"encode\":\"delta-d\",\"compress\":\"lz4\",\"level\":\"medium\"},{" "\"name\":\"c3\",\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":" - "\"lz4\",\"level\":\"medium\"}]," + "\"zstd\",\"level\":\"medium\"}]," "\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\"," "\"type\":" "1}]}", @@ -941,7 +943,7 @@ void initLogFile() { "\"c1\",\"type\":4,\"isPrimarykey\":false,\"encode\":\"simple8b\",\"compress\":\"lz4\",\"level\":" "\"medium\"},{\"name\":\"c2\",\"type\":6," "\"isPrimarykey\":false,\"encode\":\"delta-d\",\"compress\":\"lz4\",\"level\":\"medium\"},{\"name\":\"c3\"," - "\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"lz4\",\"level\":" + "\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"zstd\",\"level\":" "\"medium\"}]," "\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\"," "\"type\":" @@ -985,7 +987,8 @@ void initLogFile() { "\"lz4\",\"level\":\"medium\"},{" "\"name\":\"groupid\",\"type\":4,\"isPrimarykey\":false,\"encode\":\"simple8b\",\"compress\":\"lz4\"," "\"level\":\"medium\"},{\"name\":" - "\"location\",\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"lz4\"," + "\"location\",\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":" + "\"zstd\"," "\"level\":\"medium\"}],\"tags\":[{\"name\":\"group_id\"," "\"type\":" "14}]}", @@ -1012,7 +1015,7 @@ void initLogFile() { "\"level\":\"medium\"},{\"name\":\"c2\",\"type\":6,\"isPrimarykey\":" "false,\"encode\":\"delta-d\",\"compress\":\"lz4\",\"level\":\"medium\"},{" "\"name\":\"c3\",\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":" - "\"lz4\",\"level\":\"medium\"}]," + "\"zstd\",\"level\":\"medium\"}]," "\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\"," "\"type\":" "1}]}", @@ -1032,7 +1035,8 @@ void initLogFile() { "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":5,\"colName\":\"c4\"," "\"colType\":5}", "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":6,\"colName\":\"c4\"}", - "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":5,\"colName\":\"c4\",\"colType\":5}", + "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":5,\"colName\":\"c4\"," + "\"colType\":5}", "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":7,\"colName\":\"c3\"," "\"colType\":8,\"colLength\":64}", "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":1,\"colName\":\"t2\"," @@ -1044,7 +1048,7 @@ void initLogFile() { "9,\"isPrimarykey\":false,\"encode\":\"delta-i\",\"compress\":\"lz4\",\"level\":\"medium\"}" ",{\"name\":\"c1\",\"type\":4,\"isPrimarykey\":false,\"encode\":\"simple8b\",\"compress\":\"lz4\"," "\"level\":\"medium\"},{\"name\":\"c2\",\"type\":10,\"length\":4," - "\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"lz4\",\"level\":\"medium\"}],\"tags\":[]}", + "\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"zstd\",\"level\":\"medium\"}],\"tags\":[]}", "{\"type\":\"alter\",\"tableType\":\"normal\",\"tableName\":\"n1\",\"alterType\":5,\"colName\":\"c3\"," "\"colType\":5}", "{\"type\":\"alter\",\"tableType\":\"normal\",\"tableName\":\"n1\",\"alterType\":7,\"colName\":\"c2\"," @@ -1069,7 +1073,7 @@ void initLogFile() { "\"level\":\"medium\"},{\"name\":\"c2\",\"type\":6,\"isPrimarykey\":" "false,\"encode\":\"delta-d\",\"compress\":\"lz4\",\"level\":\"medium\"},{" "\"name\":\"c3\",\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":" - "\"lz4\",\"level\":\"medium\"}]," + "\"zstd\",\"level\":\"medium\"}]," "\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\"," "\"type\":" "1}]}", @@ -1079,7 +1083,7 @@ void initLogFile() { "\"c1\",\"type\":4,\"isPrimarykey\":false,\"encode\":\"simple8b\",\"compress\":\"lz4\",\"level\":" "\"medium\"},{\"name\":\"c2\",\"type\":6," "\"isPrimarykey\":false,\"encode\":\"delta-d\",\"compress\":\"lz4\",\"level\":\"medium\"},{\"name\":\"c3\"," - "\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"lz4\",\"level\":" + "\"type\":8,\"length\":16,\"isPrimarykey\":false,\"encode\":\"disabled\",\"compress\":\"zstd\",\"level\":" "\"medium\"}]," "\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\"," "\"type\":"