Merge branch '3.0' of https://github.com/taosdata/TDengine into feat/TS-5776

This commit is contained in:
wangmm0220 2025-02-07 18:25:08 +08:00
commit 94b74cc20f
2866 changed files with 538835 additions and 4793 deletions

View File

@ -2,7 +2,7 @@
# addr2line
ExternalProject_Add(addr2line
GIT_REPOSITORY https://github.com/davea42/libdwarf-addr2line.git
GIT_TAG master
GIT_TAG main
SOURCE_DIR "${TD_CONTRIB_DIR}/addr2line"
BINARY_DIR "${TD_CONTRIB_DIR}/addr2line"
CONFIGURE_COMMAND ""

View File

@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "3.3.5.0.alpha")
SET(TD_VER_NUMBER "3.3.5.2.alpha")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)

View File

@ -12,7 +12,7 @@ ExternalProject_Add(curl2
BUILD_IN_SOURCE TRUE
BUILD_ALWAYS 1
UPDATE_COMMAND ""
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.2 --with-ssl=$ENV{HOME}/.cos-local.2 --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd --without-libidn2 --without-nghttp2 --without-libpsl #--enable-debug
CONFIGURE_COMMAND ${CONTRIB_CONFIG_ENV} ./configure --prefix=$ENV{HOME}/.cos-local.2 --with-ssl=$ENV{HOME}/.cos-local.2 --enable-websockets --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd --without-libidn2 --without-nghttp2 --without-libpsl #--enable-debug
BUILD_COMMAND make -j
INSTALL_COMMAND make install
TEST_COMMAND ""

View File

@ -6,9 +6,9 @@ ExternalProject_Add(openssl
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
SOURCE_DIR "${TD_CONTRIB_DIR}/openssl"
BUILD_IN_SOURCE TRUE
#BUILD_ALWAYS 1
#UPDATE_COMMAND ""
CONFIGURE_COMMAND ./Configure --prefix=$ENV{HOME}/.cos-local.2 no-shared
BUILD_ALWAYS 1
UPDATE_COMMAND ""
CONFIGURE_COMMAND ${CONTRIB_CONFIG_ENV} ./Configure --prefix=$ENV{HOME}/.cos-local.2 no-shared
BUILD_COMMAND make -j
INSTALL_COMMAND make install_sw -j
TEST_COMMAND ""

View File

@ -17,7 +17,6 @@ elseif(${BUILD_WITH_COS})
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.1/)
cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
endif(${BUILD_WITH_COS})
configure_file(${CONTRIB_TMP_FILE3} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
@ -43,11 +42,6 @@ endif()
set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in")
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
# taos-tools
if(${BUILD_TOOLS})
cat("${TD_SUPPORT_DIR}/taostools_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif()
# taosws-rs
if(${WEBSOCKET})
cat("${TD_SUPPORT_DIR}/taosws_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
@ -146,11 +140,16 @@ if(${BUILD_WITH_SQLITE})
cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif(${BUILD_WITH_SQLITE})
# libcurl
if(NOT ${TD_WINDOWS})
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.2/)
cat("${TD_SUPPORT_DIR}/ssl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif(NOT ${TD_WINDOWS})
# s3
if(${BUILD_WITH_S3})
cat("${TD_SUPPORT_DIR}/ssl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
cat("${TD_SUPPORT_DIR}/xml2_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
cat("${TD_SUPPORT_DIR}/libs3_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
cat("${TD_SUPPORT_DIR}/azure_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
add_definitions(-DUSE_S3)
@ -160,7 +159,6 @@ elseif(${BUILD_WITH_COS})
# cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
# cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
# cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
# cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
add_definitions(-DUSE_COS)
endif()
@ -199,6 +197,11 @@ endif()
# lemon
cat("${TD_SUPPORT_DIR}/lemon_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
# Force specify CC=cc on MacOS. Because the default CC setting in the generated Makefile has issues finding standard library headers
IF(${TD_DARWIN})
SET(CONTRIB_CONFIG_ENV "CC=cc")
ENDIF()
# download dependencies
configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .

View File

@ -26,7 +26,8 @@ Flink Connector supports all platforms that can run Flink 1.19 and above version
| Flink Connector Version | Major Changes | TDengine Version|
|-------------------------| ------------------------------------ | ---------------- |
| 2.0.0 | 1.Support SQL queries on data in TDengine database. <br/> 2. Support CDC subscription to data in TDengine database.<br/> 3. Supports reading and writing to TDengine database using Table SQL. | 3.3.5.0 and higher|
| 2.0.1 | Sink supports writing types from Rowdata implementations.| - |
| 2.0.0 | 1.Support SQL queries on data in TDengine database. <br/> 2. Support CDC subscription to data in TDengine database.<br/> 3. Supports reading and writing to TDengine database using Table SQL. | 3.3.5.1 and higher|
| 1.0.0 | Support Sink function to write data from other sources to TDengine in the future.| 3.3.2.0 and higher|
## Exception and error codes
@ -114,7 +115,7 @@ If using Maven to manage a project, simply add the following dependencies in pom
<dependency>
<groupId>com.taosdata.flink</groupId>
<artifactId>flink-connector-tdengine</artifactId>
<version>2.0.0</version>
<version>2.0.1</version>
</dependency>
```

View File

@ -246,13 +246,14 @@ The query performance test mainly outputs the QPS indicator of query request spe
``` bash
complete query with 3 threads and 10000 query delay avg: 0.002686s min: 0.001182s max: 0.012189s p90: 0.002977s p95: 0.003493s p99: 0.004645s SQL command: select ...
INFO: Total specified queries: 30000
INFO: Spend 26.9530 second completed total queries: 30000, the QPS of all threads: 1113.049
```
- The first line represents the percentile distribution of query execution and query request delay for each of the three threads executing 10000 queries. The SQL command is the test query statement
- The second line indicates that a total of 10000 * 3 = 30000 queries have been completed
- The third line indicates that the total query time is 26.9653 seconds, and the query rate per second (QPS) is 1113.049 times/second
- The second line indicates that the total query time is 26.9653 seconds, the total queries is 10000 * 3 = 30000, and the query rate per second (QPS) is 1113.049 times/second
- If the `continue_if_fail` option is set to `yes` in the query, the last line will output the number of failed requests and error rate, the format like "error + number of failed requests (error rate)"
- QPS = number of successful requests / time spent (in seconds)
- Error rate = number of failed requests / (number of successful requests + number of failed requests)
#### Subscription metrics
@ -334,9 +335,9 @@ Parameters related to supertable creation are configured in the `super_tables` s
- **child_table_exists**: Whether the child table already exists, default is "no", options are "yes" or "no".
- **child_table_count**: Number of child tables, default is 10.
- **childtable_count**: Number of child tables, default is 10.
- **child_table_prefix**: Prefix for child table names, mandatory, no default value.
- **childtable_prefix**: Prefix for child table names, mandatory, no default value.
- **escape_character**: Whether the supertable and child table names contain escape characters, default is "no", options are "yes" or "no".
@ -403,7 +404,7 @@ Specify the configuration parameters for tag and data columns in `super_tables`
- **min**: The minimum value for the data type of the column/tag. Generated values will be greater than or equal to the minimum value.
- **max**: The maximum value for the data type of the column/tag. Generated values will be less than the minimum value.
- **max**: The maximum value for the data type of the column/tag. Generated values will be less than the maximum value.
- **scalingFactor**: Floating-point precision enhancement factor, only effective when the data type is float/double, valid values range from 1 to 1000000 positive integers. Used to enhance the precision of generated floating points, especially when min or max values are small. This attribute enhances the precision after the decimal point by powers of 10: a scalingFactor of 10 means enhancing the precision by 1 decimal place, 100 means 2 places, and so on.
@ -431,11 +432,9 @@ Specify the configuration parameters for tag and data columns in `super_tables`
- **create_table_thread_count** : The number of threads for creating tables, default is 8.
- **connection_pool_size** : The number of pre-established connections with the TDengine server. If not configured, it defaults to the specified number of threads.
- **result_file** : The path to the result output file, default is ./output.txt.
- **confirm_parameter_prompt** : A toggle parameter that requires user confirmation after a prompt to continue. The default value is false.
- **confirm_parameter_prompt** : A toggle parameter that requires user confirmation after a prompt to continue. The value can be "yes" or "no", by default "no".
- **interlace_rows** : Enables interleaved insertion mode and specifies the number of rows to insert into each subtable at a time. Interleaved insertion mode refers to inserting the specified number of rows into each subtable in sequence and repeating this process until all subtable data has been inserted. The default value is 0, meaning data is inserted into one subtable completely before moving to the next.
This parameter can also be configured in `super_tables`; if configured, the settings in `super_tables` take higher priority and override the global settings.
@ -464,12 +463,12 @@ For other common parameters, see Common Configuration Parameters.
Configuration parameters for querying specified tables (can specify supertables, subtables, or regular tables) are set in `specified_table_query`.
- **mixed_query** "yes": `Mixed Query` "no": `Normal Query`, default is "no"
`Mixed Query`: All SQL statements in `sqls` are grouped by the number of threads, with each thread executing one group. Each SQL statement in a thread needs to perform `query_times` queries.
`Normal Query `: Each SQL in `sqls` starts `threads` and exits after executing `query_times` times. The next SQL can only be executed after all previous SQL threads have finished executing and exited.
Regardless of whether it is a `Normal Query` or `Mixed Query`, the total number of query executions is the same. The total number of queries = `sqls` * `threads` * `query_times`. The difference is that `Normal Query` starts `threads` for each SQL query, while ` Mixed Query` only starts `threads` once to complete all SQL queries. The number of thread startups for the two is different.
- `General Query`: Each SQL in `sqls` starts `threads` threads to query this SQL, Each thread exits after executing the `query_times` queries, and only after all threads executing this SQL have completed can the next SQL be executed.
The total number of queries(`General Query`) = the number of `sqls` * `query_times` * `threads`
- `Mixed Query` : All SQL statements in `sqls` are divided into `threads` groups, with each thread executing one group. Each SQL statement needs to execute `query_times` queries.
The total number of queries(`Mixed Query`) = the number of `sqls` * `query_times`
- **query_interval** : Query interval, in seconds, default is 0.
- **query_interval** : Query interval, in millisecond, default is 0.
- **threads** : Number of threads executing the SQL query, default is 1.
@ -491,6 +490,7 @@ The thread mode of the super table query is the same as the `Normal Query` mode
- **sqls** :
- **sql** : The SQL command to execute, required; for supertable queries, keep "xxxx" in the SQL command, the program will automatically replace it with all subtable names of the supertable.
- **result** : File to save the query results, if not specified, results are not saved.
- **Note**: The maximum number of SQL arrays configured under SQL is 100.
### Configuration Parameters for Subscription Scenarios

View File

@ -43,7 +43,8 @@ TDengine supports `UNION ALL` and `UNION` operators. UNION ALL combines the resu
| 9 | LIKE | BINARY, NCHAR, and VARCHAR | Matches the specified pattern string with wildcard |
| 10 | NOT LIKE | BINARY, NCHAR, and VARCHAR | Does not match the specified pattern string with wildcard |
| 11 | MATCH, NMATCH | BINARY, NCHAR, and VARCHAR | Regular expression match |
| 12 | CONTAINS | JSON | Whether a key exists in JSON |
| 12 | REGEXP, NOT REGEXP | BINARY, NCHAR, and VARCHAR | Regular expression match |
| 13 | CONTAINS | JSON | Whether a key exists in JSON |
LIKE conditions use wildcard strings for matching checks, with the following rules:
@ -51,7 +52,7 @@ LIKE conditions use wildcard strings for matching checks, with the following rul
- If you want to match an underscore character that is originally in the string, you can write it as \_ in the wildcard string, i.e., add a backslash to escape it.
- The wildcard string cannot exceed 100 bytes in length. It is not recommended to use too long wildcard strings, as it may severely affect the performance of the LIKE operation.
MATCH and NMATCH conditions use regular expressions for matching, with the following rules:
MATCH/REGEXP and NMATCH/NOT REGEXP conditions use regular expressions for matching, with the following rules:
- Supports regular expressions that comply with the POSIX standard, see Regular Expressions for specific standards.
- When MATCH matches a regular expression, it returns TRUE. When NMATCH does not match a regular expression, it returns TRUE.

View File

@ -25,6 +25,10 @@ Download links for TDengine 3.x version installation packages are as follows:
import Release from "/components/ReleaseV3";
## 3.3.5.2
<Release type="tdengine" version="3.3.5.2" />
## 3.3.5.0
<Release type="tdengine" version="3.3.5.0" />

View File

@ -0,0 +1,43 @@
---
title: TDengine 3.3.5.2 Release Notes
sidebar_label: 3.3.5.2
description: Version 3.3.5.2 Notes
slug: /release-history/release-notes/3.3.5.2
---
## Features
1. feat: taosX now support multiple stables with template for MQTT
## Enhancements
1. enh: improve taosX error message if database is invalid
2. enh: use poetry group depencencies and reduce dep when install [#251](https://github.com/taosdata/taos-connector-python/issues/251)
3. enh: improve backup restore using taosX
4. enh: during the multi-level storage data migration, if the migration time is too long, it may cause the Vnode to switch leader
5. enh: adjust the systemctl strategy for managing the taosd process, if three consecutive restarts fail within 60 seconds, the next restart will be delayed until 900 seconds later
## Fixes
1. fix: the maxRetryWaitTime parameter is used to control the maximum reconnection timeout time for the client when the cluster is unable to provide services, but it does not take effect when encountering a Sync timeout error
2. fix: supports immediate subscription to the new tag value after modifying the tag value of the sub-table
3. fix: the tmq_consumer_poll function for data subscription does not return an error code when the call fails
4. fix: taosd may crash when more than 100 views are created and the show views command is executed
5. fix: when using stmt2 to insert data, if not all data columns are bound, the insertion operation will fail
6. fix: when using stmt2 to insert data, if the database name or table name is enclosed in backticks, the insertion operation will fail
7. fix: when closing a vnode, if there are ongoing file merge tasks, taosd may crash
8. fix: frequent execution of the “drop table with tb_uid” statement may lead to a deadlock in taosd
9. fix: the potential deadlock during the switching of log files
10. fix: prohibit the creation of databases with the same names as system databases (information_schema, performance_schema)
11. fix: when the inner query of a nested query come from a super table, the sorting information cannot be pushed up
12. fix: incorrect error reporting when attempting to write Geometry data types that do not conform to topological specifications through the STMT interface
13. fix: when using the percentile function and session window in a query statement, if an error occurs, taosd may crash
14. fix: the issue of being unable to dynamically modify system parameters
15. fix: random error of tranlict transaction in replication
16. fix: the same consumer executes the unsubscribe operation and immediately attempts to subscribe to other different topics, the subscription API will return an error
17. fix: fix CVE-2022-28948 security issue in go connector
18. fix: when a subquery in a view contains an ORDER BY clause with an alias, and the query function itself also has an alias, querying the view will result in an error
19. fix: when changing the database from a single replica to a mulit replica, if there are some metadata generated by earlier versions that are no longer used in the new version, the modification operation will fail
20. fix: column names were not correctly copied when using SELECT * FROM subqueries
21. fix: when performing max/min function on string type data, the results are inaccurate and taosd will crash
22. fix: stream computing does not support the use of the HAVING clause, but no error is reported during creation
23. fix: the version information displayed by taos shell for the server is inaccurate, such as being unable to correctly distinguish between the community edition and the enterprise edition
24. fix: in certain specific query scenarios, when JOIN and CAST are used together, taosd may crash

View File

@ -5,6 +5,7 @@ slug: /release-history/release-notes
[3.3.5.0](./3-3-5-0/)
[3.3.5.2](./3.3.5.2)
[3.3.4.8](./3-3-4-8/)
[3.3.4.3](./3-3-4-3/)

View File

@ -263,7 +263,7 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location")
Class<SourceRecords<RowData>> typeClass = (Class<SourceRecords<RowData>>) (Class<?>) SourceRecords.class;
SourceSplitSql sql = new SourceSplitSql("select ts, `current`, voltage, phase, tbname from meters");
TDengineSource<SourceRecords<RowData>> source = new TDengineSource<>(connProps, sql, typeClass);
DataStreamSource<SourceRecords<RowData>> input = env.fromSource(source, WatermarkStrategy.noWatermarks(), "kafka-source");
DataStreamSource<SourceRecords<RowData>> input = env.fromSource(source, WatermarkStrategy.noWatermarks(), "tdengine-source");
DataStream<String> resultStream = input.map((MapFunction<SourceRecords<RowData>, String>) records -> {
StringBuilder sb = new StringBuilder();
Iterator<RowData> iterator = records.iterator();
@ -304,7 +304,7 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location")
config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER, "RowData");
config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER_ENCODING, "UTF-8");
TDengineCdcSource<RowData> tdengineSource = new TDengineCdcSource<>("topic_meters", config, RowData.class);
DataStreamSource<RowData> input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "kafka-source");
DataStreamSource<RowData> input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "tdengine-source");
DataStream<String> resultStream = input.map((MapFunction<RowData, String>) rowData -> {
StringBuilder sb = new StringBuilder();
sb.append("tsxx: " + rowData.getTimestamp(0, 0) +
@ -343,7 +343,7 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location")
Class<ConsumerRecords<RowData>> typeClass = (Class<ConsumerRecords<RowData>>) (Class<?>) ConsumerRecords.class;
TDengineCdcSource<ConsumerRecords<RowData>> tdengineSource = new TDengineCdcSource<>("topic_meters", config, typeClass);
DataStreamSource<ConsumerRecords<RowData>> input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "kafka-source");
DataStreamSource<ConsumerRecords<RowData>> input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "tdengine-source");
DataStream<String> resultStream = input.map((MapFunction<ConsumerRecords<RowData>, String>) records -> {
Iterator<ConsumerRecord<RowData>> iterator = records.iterator();
StringBuilder sb = new StringBuilder();
@ -388,7 +388,7 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location")
config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER, "com.taosdata.flink.entity.ResultDeserializer");
config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER_ENCODING, "UTF-8");
TDengineCdcSource<ResultBean> tdengineSource = new TDengineCdcSource<>("topic_meters", config, ResultBean.class);
DataStreamSource<ResultBean> input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "kafka-source");
DataStreamSource<ResultBean> input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "tdengine-source");
DataStream<String> resultStream = input.map((MapFunction<ResultBean, String>) rowData -> {
StringBuilder sb = new StringBuilder();
sb.append("ts: " + rowData.getTs() +

View File

@ -91,3 +91,18 @@ taos> select _flow, _fhigh, _frowts, forecast(i32) from foo;
## 内置预测算法
- [arima](./02-arima.md)
- [holtwinters](./03-holtwinters.md)
- CES (Complex Exponential Smoothing)
- Theta
- Prophet
- XGBoost
- LightGBM
- Multiple Seasonal-Trend decomposition using LOESS (MSTL)
- ETS (Error, Trend, Seasonal)
- Long Short-Term Memory (LSTM)
- Multilayer Perceptron (MLP)
- DeepAR
- N-BEATS
- N-HiTS
- PatchTST (Patch Time Series Transformer)
- Temporal Fusion Transformer
- TimesNet

View File

@ -50,6 +50,13 @@ FROM foo
ANOMALY_WINDOW(foo.i32, "algo=shesd,direction=both,anoms=0.05")
```
后续待添加异常检测算法
- Gaussian Process Regression
基于变点检测的异常检测算法
- CUSUM (Cumulative Sum Control Chart)
- PELT (Pruned Exact Linear Time)
### 参考文献
1. [https://en.wikipedia.org/wiki/689599.7 rule](https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule)
2. https://en.wikipedia.org/wiki/Interquartile_range

View File

@ -3,7 +3,7 @@ title: "数据密度算法"
sidebar_label: "数据密度算法"
---
### 基于数据密度的检测方
### 基于数据密度/数据挖掘的检测算
LOF<sup>[1]</sup>: Local Outlier Factor(LOF),局部离群因子/局部异常因子,
是 Breunig 在 2000 年提出的一种基于密度的局部离群点检测算法,该方法适用于不同类簇密度分散情况迥异的数据。根据数据点周围的数据密集情况,首先计算每个数据点的一个局部可达密度,然后通过局部可达密度进一步计算得到每个数据点的一个离群因子,
该离群因子即标识了一个数据点的离群程度,因子值越大,表示离群程度越高,因子值越小,表示离群程度越低。最后,输出离群程度最大的 $topK$ 个点。
@ -15,6 +15,14 @@ FROM foo
ANOMALY_WINDOW(foo.i32, "algo=lof")
```
后续待添加基于数据挖掘检测算法
- DBSCAN (Density-Based Spatial Clustering of Applications with Noise)
- K-Nearest Neighbors (KNN)
- Principal Component Analysis (PCA)
第三方异常检测算法库
- PyOD
### 参考文献
1. Breunig, M. M.; Kriegel, H.-P.; Ng, R. T.; Sander, J. (2000). LOF: Identifying Density-based Local Outliers (PDF). Proceedings of the 2000 ACM SIGMOD International Conference on Management of Data. SIGMOD. pp. 93104. doi:10.1145/335191.335388. ISBN 1-58113-217-4.

View File

@ -12,6 +12,11 @@ FROM foo
ANOMALY_WINDOW(col1, 'algo=encoder, model=ad_autoencoder_foo');
```
后续添加机器(深度)学习异常检测算法
- Isolation Forest
- One-Class Support Vector Machines (SVM)
- Prophet
### 参考文献
1. https://en.wikipedia.org/wiki/Autoencoder

View File

@ -0,0 +1,40 @@
---
title: "常见问题"
sidebar_label: "常见问题"
---
<b>1. 创建 anode 失败,返回指定服务无法访问</b>
```bash
taos> create anode '127.0.0.1:6090';
DB error: Analysis service can't access[0x80000441] (0.117446s)
```
请务必使用 `curl` 命令检查 anode 服务是否正常。`curl '127.0.0.1:6090'` 正常的 anode 服务会返回以下结果。
```bash
TDengine© Time Series Data Analytics Platform (ver 1.0.x)
```
如果出现下面的结果,表示 anode 服务不正常。
```bash
curl: (7) Failed to connect to 127.0.0.1 port 6090: Connection refused
```
如果 anode 服务启动/运行不正常,请检查 uWSGI 的运行日志 `/var/log/taos/taosanode/taosanode.log`,检查其中的错误信息,根据错误信息解决响应的问题。
>请勿使用 systemctl status taosanode 检查 taosanode 是否正常
<b>2. 服务正常,查询过程返回服务不可用</b>
```bash
taos> select _frowts,forecast(current, 'algo=arima, alpha=95, wncheck=0, rows=20') from d1 where ts<='2017-07-14 10:40:09.999';
DB error: Analysis service can't access[0x80000441] (60.195613s)
```
数据分析默认超时时间是 60s出现这个问题的原因是输入数据分析过程超过默认的最长等待时间请尝试采用限制数据输入范围的方式将输入数据规模减小或者更换分析算法再次尝试。
<b>3. 返回结果出现非法 JSON 格式错误 (Invalid json format) </b>
从 anode 返回到 TDengine 的分析结果有误,请检查 anode 运行日志 `/var/log/taos/taosanode/taosanode.app.log`,以便于获得具体的错误信息。

View File

@ -24,7 +24,8 @@ Flink Connector 支持所有能运行 Flink 1.19 及以上版本的平台。
## 版本历史
| Flink Connector 版本 | 主要变化 | TDengine 版本 |
| ------------------| ------------------------------------ | ---------------- |
| 2.0.0 | 1. 支持 SQL 查询 TDengine 数据库中的数据<br/> 2. 支持 CDC 订阅 TDengine 数据库中的数据<br/> 3. 支持 Table SQL 方式读取和写入 TDengine 数据库| 3.3.5.0 及以上版本 |
| 2.0.1 | Sink 支持对所有继承自 RowData 并已实现的类型进行数据写入| - |
| 2.0.0 | 1. 支持 SQL 查询 TDengine 数据库中的数据<br/> 2. 支持 CDC 订阅 TDengine 数据库中的数据<br/> 3. 支持 Table SQL 方式读取和写入 TDengine 数据库| 3.3.5.1 及以上版本 |
| 1.0.0 | 支持 Sink 功能,将来着其他数据源的数据写入到 TDengine| 3.3.2.0 及以上版本|
## 异常和错误码
@ -111,7 +112,7 @@ env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.AT_LEAST_ONCE);
<dependency>
<groupId>com.taosdata.flink</groupId>
<artifactId>flink-connector-tdengine</artifactId>
<version>2.0.0</version>
<version>2.0.1</version>
</dependency>
```

View File

@ -169,6 +169,9 @@ INFO: Spend 26.9530 second completed total queries: 30000, the QPS of all thread
- 第一行表示 3 个线程每个线程执行 10000 次查询及查询请求延时百分位分布情况,`SQL command` 为测试的查询语句
- 第二行表示总共完成了 10000 * 3 = 30000 次查询总数
- 第三行表示查询总耗时为 26.9653 秒,每秒查询率(QPS)为1113.049 次/秒
- 如果在查询中设置了 `continue_if_fail` 选项为 `yes`,在最后一行中会输出失败请求个数及错误率,格式 error + 失败请求个数 (错误率)
- QPS = 成功请求数量 / 花费时间(单位秒)
- 错误率 = 失败请求数量 /(成功请求数量 + 失败请求数量)
#### 订阅指标
@ -207,12 +210,12 @@ INFO: Consumed total msgs: 3000, total rows: 30000000
插入场景下 `filetype` 必须设置为 `insert`,该参数及其它通用参数详见[通用配置参数](#通用配置参数)
- ** keep_trying ** : 失败后进行重试的次数,默认不重试。需使用 v3.0.9 以上版本。
- **keep_trying** : 失败后进行重试的次数,默认不重试。需使用 v3.0.9 以上版本。
- ** trying_interval ** : 失败重试间隔时间,单位为毫秒,仅在 keep_trying 指定重试后有效。需使用 v3.0.9 以上版本。
- ** childtable_from 和 childtable_to ** : 指定写入子表范围,开闭区间为 [childtable_from, childtable_to).
- **trying_interval** : 失败重试间隔时间,单位为毫秒,仅在 keep_trying 指定重试后有效。需使用 v3.0.9 以上版本。
- **childtable_from 和 childtable_to** : 指定写入子表范围,开闭区间为 [childtable_from, childtable_to).
 
- ** continue_if_fail ** : 允许用户定义失败后行为
- **continue_if_fail** : 允许用户定义失败后行为
“continue_if_fail”:  “no”, 失败 taosBenchmark 自动退出,默认行为
“continue_if_fail”: “yes”, 失败 taosBenchmark 警告用户,并继续写入
@ -224,7 +227,7 @@ INFO: Consumed total msgs: 3000, total rows: 30000000
- **name** : 数据库名。
- **drop** : 数据库已存在时是否删除重建,可选项为 "yes" 或 "no", 默认为 “yes”
- **drop** : 数据库已存在时是否删除,可选项为 "yes" 或 "no", 默认为 “yes”
#### 流式计算相关配置参数
@ -250,9 +253,9 @@ INFO: Consumed total msgs: 3000, total rows: 30000000
- **child_table_exists** : 子表是否已经存在,默认值为 "no",可选值为 "yes" 或 "no"。
- **child_table_count** : 子表的数量,默认值为 10。
- **childtable_count** : 子表的数量,默认值为 10。
- **child_table_prefix** : 子表名称的前缀,必选配置项,没有默认值。
- **childtable_prefix** : 子表名称的前缀,必选配置项,没有默认值。
- **escape_character** : 超级表和子表名称中是否包含转义字符,默认值为 "no",可选值为 "yes" 或 "no"。
@ -319,7 +322,7 @@ INFO: Consumed total msgs: 3000, total rows: 30000000
- **min** : 数据类型的 列/标签 的最小值。生成的值将大于或等于最小值。
- **max** : 数据类型的 列/标签 的最大值。生成的值将小于最值。
- **max** : 数据类型的 列/标签 的最大值。生成的值将小于最值。
- **scalingFactor** : 浮点数精度增强因子,仅当数据类型是 float/double 时生效,有效值范围为 1 至 1000000 的正整数。用于增强生成浮点数的精度,特别是在 min 或 max 值较小的情况下。此属性按 10 的幂次增强小数点后的精度scalingFactor 为 10 表示增强 1 位小数精度100 表示增强 2 位,依此类推。
@ -343,15 +346,13 @@ INFO: Consumed total msgs: 3000, total rows: 30000000
- **thread_count** : 插入数据的线程数量,默认为 8。
- **thread_bind_vgroup** : 写入时 vgroup 是否和写入线程绑定,绑定后可提升写入速度, 取值为 "yes" 或 "no",默认值为 “no”, 设置为 “no” 后与原来行为一致。 当设为 “yes” 时,如果 thread_count 数量大小写入数据库的 vgroups 数量, thread_count 自动调整为 vgroups 数量;如果 thread_count 数量小于 vgroups 数量,写入线程数量不做调整,一个线程写完一个 vgroup 数据后再写下一个,同时保持一个 vgroup 同时只能由一个线程写入的规则。
- **thread_bind_vgroup** : 写入时 vgroup 是否和写入线程绑定,绑定后可提升写入速度, 取值为 "yes" 或 "no",默认值为 “no”, 设置为 “no” 后与原来行为一致。 当设为 “yes” 时,如果 thread_count 大于写入数据库 vgroups 数量, thread_count 自动调整为 vgroups 数量;如果 thread_count 小于 vgroups 数量,写入线程数量不做调整,一个线程写完一个 vgroup 数据后再写下一个,同时保持一个 vgroup 同时只能由一个线程写入的规则。
- **create_table_thread_count** : 建表的线程数量,默认为 8。
- **connection_pool_size** : 预先建立的与 TDengine 服务端之间的连接的数量。若不配置,则与所指定的线程数相同。
- **result_file** : 结果输出文件的路径,默认值为 ./output.txt。
- **confirm_parameter_prompt** : 开关参数,要求用户在提示后确认才能继续。默认值为 false
- **confirm_parameter_prompt** : 开关参数,要求用户在提示后确认才能继续 可取值 "yes" or "no"。默认值为 "no"
- **interlace_rows** : 启用交错插入模式并同时指定向每个子表每次插入的数据行数。交错插入模式是指依次向每张子表插入由本参数所指定的行数并重复这个过程,直到所有子表的数据都插入完成。默认值为 0 即向一张子表完成数据插入后才会向下一张子表进行数据插入。
`super_tables` 中也可以配置该参数,若配置则以 `super_tables` 中的配置为高优先级,覆盖全局设置。
@ -381,12 +382,16 @@ interval 控制休眠时间,避免持续查询慢查询消耗 CPU ,单位为
查询指定表(可以指定超级表、子表或普通表)的配置参数在 `specified_table_query` 中设置。
- **mixed_query** : 查询模式,取值 “yes” 为`混合查询` "no" 为`正常查询` , 默认值为 “no”
`混合查询``sqls` 中所有 sql 按 `threads` 线程数分组,每个线程执行一组, 线程中每个 sql 都需执行 `query_times` 次查询
`正常查询``sqls` 中每个 sql 启动 `threads` 个线程,每个线程执行完 `query_times` 次后退出,下个 sql 需等待上个 sql 线程全部执行完退出后方可执行
不管 `正常查询` 还是 `混合查询` ,执行查询总次数是相同的 ,查询总次数 = `sqls` 个数 * `threads` * `query_times` 区别是 `正常查询` 每个 sql 都会启动 `threads` 个线程,而 `混合查询` 只启动一次 `threads` 个线程执行完所有 SQL, 两者启动线程次数不一样。
- **mixed_query** : 查询模式
“yes” :`混合查询`
"no"(默认值) :`普通查询`
`普通查询``sqls` 中每个 sql 启动 `threads` 个线程查询此 sql, 执行完 `query_times` 次查询后退出,执行此 sql 的所有线程都完成后进入下一个 sql
`查询总次数` = `sqls` 个数 * `query_times` * `threads`
`混合查询``sqls` 中所有 sql 分成 `threads` 个组,每个线程执行一组, 每个 sql 都需执行 `query_times` 次查询
`查询总次数` = `sqls` 个数 * `query_times`
- **query_interval** : 查询时间间隔,单位是秒,默认值为 0。
- **query_interval** : 查询时间间隔,单位: millisecond,默认值为 0。
- **threads** : 执行查询 SQL 的线程数,默认值为 1。
@ -406,9 +411,9 @@ interval 控制休眠时间,避免持续查询慢查询消耗 CPU ,单位为
- **threads** : 执行查询 SQL 的线程数,默认值为 1。
- **sqls**
- **sql** : 执行的 SQL 命令,必填;对于超级表的查询 SQL在 SQL 命令中保留 "xxxx",程序会自动将其替换为超级表的所有子表名。
替换为超级表中所有的子表名。
- **sql** : 执行的 SQL 命令,必填;对于超级表的查询 SQL在 SQL 命令中必须保留 "xxxx",会替换为超级下所有子表名后再执行。
- **result** : 保存查询结果的文件,未指定则不保存。
- **限制项** : sqls 下配置 sql 数组最大为 100 个
### 订阅场景配置参数

View File

@ -45,7 +45,8 @@ TDengine 支持 `UNION ALL` 和 `UNION` 操作符。UNION ALL 将查询返回的
| 9 | LIKE | BINARY、NCHAR 和 VARCHAR | 通配符匹配所指定的模式串 |
| 10 | NOT LIKE | BINARY、NCHAR 和 VARCHAR | 通配符不匹配所指定的模式串 |
| 11 | MATCH, NMATCH | BINARY、NCHAR 和 VARCHAR | 正则表达式匹配 |
| 12 | CONTAINS | JSON | JSON 中是否存在某键 |
| 12 | REGEXP, NOT REGEXP | BINARY、NCHAR 和 VARCHAR | 正则表达式匹配 |
| 13 | CONTAINS | JSON | JSON 中是否存在某键 |
LIKE 条件使用通配符字符串进行匹配检查,规则如下:
@ -53,7 +54,7 @@ LIKE 条件使用通配符字符串进行匹配检查,规则如下:
- 如果希望匹配字符串中原本就带有的 \_下划线字符那么可以在通配符字符串中写作 \_即加一个反斜线来进行转义。
- 通配符字符串最长不能超过 100 字节。不建议使用太长的通配符字符串,否则将有可能严重影响 LIKE 操作的执行性能。
MATCH 条件和 NMATCH 条件使用正则表达式进行匹配,规则如下:
MATCH/REGEXP 条件和 NMATCH/NOT REGEXP 条件使用正则表达式进行匹配,规则如下:
- 支持符合 POSIX 规范的正则表达式,具体规范内容可参见 Regular Expressions。
- MATCH 和正则表达式匹配时, 返回 TURE. NMATCH 和正则表达式不匹配时, 返回 TRUE.

View File

@ -24,6 +24,10 @@ TDengine 3.x 各版本安装包下载链接如下:
import Release from "/components/ReleaseV3";
## 3.3.5.2
<Release type="tdengine" version="3.3.5.2" />
## 3.3.5.0
<Release type="tdengine" version="3.3.5.0" />

View File

@ -0,0 +1,42 @@
---
title: 3.3.5.2 版本说明
sidebar_label: 3.3.5.2
description: 3.3.5.2 版本说明
---
## 特性
1. 特性taosX MQTT 数据源支持根据模板创建多个超级表
## 优化
1. 优化:改进 taosX 数据库不可用时的错误信息
2. 优化:使用 Poetry 标准管理依赖项并减少 Python 连接器安装依赖项 [#251](https://github.com/taosdata/taos-connector-python/issues/251)
3. 优化taosX 增量备份和恢复优化
4. 优化:在多级存储数据迁移过程中,如果迁移时间过长,可能会导致 Vnode 切主
5. 优化:调整 systemctl 守护 taosd 进程的策略,如果 60 秒内连续三次重启失败,下次重启将推迟至 900 秒后
## 修复
1. 修复maxRetryWaitTime 参数用于控制当集群无法提供服务时客户端的最大重连超时时间,但在遇到 Sync timeout 错误时,该参数不生效
2. 修复:支持在修改子表的 tag 值后,即时订阅到更新后的 tag 值
3. 修复:数据订阅的 tmq_consumer_poll 函数调用失败时没有返回错误码
4. 修复:当创建超过 100 个视图并执行 show views 命令时taosd 可能会发生崩溃
5. 修复:当使用 stmt2 写入数据时,如果未绑定所有的数据列,写入操作将会失败
6. 修复:当使用 stmt2 写入数据时,如果数据库名或表名使用了反引号,写入操作将会失败
7. 修复:关闭 vnode 时如果有正在进行的文件合并任务taosd 可能会崩溃
8. 修复:频繁执行 drop table with `tb_uid` 语句可能导致 taosd 死锁
9. 修复:日志文件切换过程中可能出现的死锁问题
10. 修复禁止创建与系统库information_schema, performance_schema同名的数据库
11. 修复:当嵌套查询的内层查询来源于超级表时,排序信息无法被上推
12. 修复:通过 STMT 接口尝试写入不符合拓扑规范的 Geometry 数据类型时误报错误
13. 修复:在查询语句中使用 percentile 函数和会话窗口时如果出现错误taosd 可能会崩溃
14. 修复:无法动态修改系统参数的问题
15. 修复:订阅同步偶发 Translict transaction 错误
16. 修复:同一消费者在执行取消订阅操作后,立即尝试订阅其他不同的主题时,会返回错误
17. 修复Go 连接器安全修复 CVE-2022-28948
18. 修复:当视图中的子查询包含带别名的 ORDER BY 子句,并且查询函数自身也带有别名时,查询该视图会引发错误
19. 修复:在将数据库从单副本修改为多副本时,如果存在一些由较早版本生成且在新版本中已不再使用的元数据,会导致修改操作失败
20. 修复:在使用 SELECT * FROM 子查询时,列名未能正确复制到外层查询
21. 修复:对字符串类型数据执行 max/min 函数时,结果不准确且 taosd 可能会崩溃
22. 修复:流式计算不支持使用 HAVING 语句,但在创建时未报告错误
23. 修复taos shell 显示的服务端版本信息不准确,例如无法正确区分社区版和企业版
24. 修复:在某些特定的查询场景下,当 JOIN 和 CAST 联合使用时taosd 可能会崩溃

View File

@ -4,6 +4,7 @@ sidebar_label: 版本说明
description: 各版本版本说明
---
[3.3.5.2](./3.3.5.2)
[3.3.5.0](./3.3.5.0)
[3.3.4.8](./3.3.4.8)
[3.3.4.3](./3.3.4.3)

View File

@ -161,6 +161,7 @@ typedef enum EStreamType {
STREAM_PARTITION_DELETE_DATA,
STREAM_GET_RESULT,
STREAM_DROP_CHILD_TABLE,
STREAM_NOTIFY_EVENT,
} EStreamType;
#pragma pack(push, 1)
@ -409,6 +410,9 @@ typedef struct STUidTagInfo {
#define UD_GROUPID_COLUMN_INDEX 1
#define UD_TAG_COLUMN_INDEX 2
// stream notify event block column
#define NOTIFY_EVENT_STR_COLUMN_INDEX 0
int32_t taosGenCrashJsonMsg(int signum, char** pMsg, int64_t clusterId, int64_t startTime);
int32_t dumpConfToDataBlock(SSDataBlock* pBlock, int32_t startCol);

View File

@ -285,6 +285,8 @@ bool isAutoTableName(char* ctbName);
int32_t buildCtbNameAddGroupId(const char* stbName, char* ctbName, uint64_t groupId, size_t cap);
int32_t buildCtbNameByGroupId(const char* stbName, uint64_t groupId, char** pName);
int32_t buildCtbNameByGroupIdImpl(const char* stbName, uint64_t groupId, char* pBuf);
int32_t buildSinkDestTableName(char* parTbName, const char* stbFullName, uint64_t gid, bool newSubTableRule,
char** dstTableName);
int32_t trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolList);

View File

@ -34,6 +34,9 @@ extern "C" {
#define GLOBAL_CONFIG_FILE_VERSION 1
#define LOCAL_CONFIG_FILE_VERSION 1
#define RPC_MEMORY_USAGE_RATIO 0.1
#define QUEUE_MEMORY_USAGE_RATIO 0.6
typedef enum {
DND_CA_SM4 = 1,
} EEncryptAlgor;
@ -110,6 +113,7 @@ extern int32_t tsNumOfQnodeFetchThreads;
extern int32_t tsNumOfSnodeStreamThreads;
extern int32_t tsNumOfSnodeWriteThreads;
extern int64_t tsQueueMemoryAllowed;
extern int64_t tsApplyMemoryAllowed;
extern int32_t tsRetentionSpeedLimitMB;
extern int32_t tsNumOfCompactThreads;

View File

@ -269,6 +269,7 @@ typedef enum ENodeType {
QUERY_NODE_TSMA_OPTIONS,
QUERY_NODE_ANOMALY_WINDOW,
QUERY_NODE_RANGE_AROUND,
QUERY_NODE_STREAM_NOTIFY_OPTIONS,
// Statement nodes are used in parser and planner module.
QUERY_NODE_SET_OPERATOR = 100,
@ -2960,6 +2961,11 @@ typedef struct {
// 3.3.0.0
SArray* pCols; // array of SField
int64_t smaId;
// 3.3.6.0
SArray* pNotifyAddrUrls;
int32_t notifyEventTypes;
int32_t notifyErrorHandle;
int8_t notifyHistory;
} SCMCreateStreamReq;
typedef struct {

View File

@ -98,6 +98,9 @@ int32_t qSetTaskId(qTaskInfo_t tinfo, uint64_t taskId, uint64_t queryId);
int32_t qSetStreamOpOpen(qTaskInfo_t tinfo);
int32_t qSetStreamNotifyInfo(qTaskInfo_t tinfo, int32_t eventTypes, const SSchemaWrapper* pSchemaWrapper,
const char* stbFullName, bool newSubTableRule);
/**
* Set multiple input data blocks for the stream scan.
* @param tinfo

View File

@ -566,19 +566,44 @@ typedef struct SStreamOptions {
int64_t setFlag;
} SStreamOptions;
typedef enum EStreamNotifyOptionSetFlag {
SNOTIFY_OPT_ERROR_HANDLE_SET = BIT_FLAG_MASK(0),
SNOTIFY_OPT_NOTIFY_HISTORY_SET = BIT_FLAG_MASK(1),
} EStreamNotifyOptionSetFlag;
typedef enum EStreamNotifyEventType {
SNOTIFY_EVENT_WINDOW_OPEN = BIT_FLAG_MASK(0),
SNOTIFY_EVENT_WINDOW_CLOSE = BIT_FLAG_MASK(1),
} EStreamNotifyEventType;
typedef enum EStreamNotifyErrorHandleType {
SNOTIFY_ERROR_HANDLE_PAUSE,
SNOTIFY_ERROR_HANDLE_DROP,
} EStreamNotifyErrorHandleType;
typedef struct SStreamNotifyOptions {
ENodeType type;
SNodeList* pAddrUrls;
EStreamNotifyEventType eventTypes;
EStreamNotifyErrorHandleType errorHandle;
bool notifyHistory;
EStreamNotifyOptionSetFlag setFlag;
} SStreamNotifyOptions;
typedef struct SCreateStreamStmt {
ENodeType type;
char streamName[TSDB_TABLE_NAME_LEN];
char targetDbName[TSDB_DB_NAME_LEN];
char targetTabName[TSDB_TABLE_NAME_LEN];
bool ignoreExists;
SStreamOptions* pOptions;
SNode* pQuery;
SNode* pPrevQuery;
SNodeList* pTags;
SNode* pSubtable;
SNodeList* pCols;
SCMCreateStreamReq* pReq;
ENodeType type;
char streamName[TSDB_TABLE_NAME_LEN];
char targetDbName[TSDB_DB_NAME_LEN];
char targetTabName[TSDB_TABLE_NAME_LEN];
bool ignoreExists;
SStreamOptions* pOptions;
SNode* pQuery;
SNode* pPrevQuery;
SNodeList* pTags;
SNode* pSubtable;
SNodeList* pCols;
SStreamNotifyOptions* pNotifyOptions;
SCMCreateStreamReq* pReq;
} SCreateStreamStmt;
typedef struct SDropStreamStmt {

View File

@ -65,10 +65,14 @@ typedef struct SStreamTaskSM SStreamTaskSM;
typedef struct SStreamQueueItem SStreamQueueItem;
typedef struct SActiveCheckpointInfo SActiveCheckpointInfo;
#define SSTREAM_TASK_VER 4
#define SSTREAM_TASK_INCOMPATIBLE_VER 1
#define SSTREAM_TASK_NEED_CONVERT_VER 2
#define SSTREAM_TASK_SUBTABLE_CHANGED_VER 3
#define SSTREAM_TASK_VER 5
#define SSTREAM_TASK_INCOMPATIBLE_VER 1
#define SSTREAM_TASK_NEED_CONVERT_VER 2
#define SSTREAM_TASK_SUBTABLE_CHANGED_VER 3 // Append subtable name with groupId
#define SSTREAM_TASK_APPEND_STABLE_NAME_VER 4 // Append subtable name with stableName and groupId
#define SSTREAM_TASK_ADD_NOTIFY_VER 5 // Support event notification at window open/close
#define IS_NEW_SUBTB_RULE(_t) (((_t)->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER) && ((_t)->subtableWithoutMd5 != 1))
extern int32_t streamMetaRefPool;
extern int32_t streamTaskRefPool;
@ -427,6 +431,15 @@ typedef struct STaskCheckInfo {
TdThreadMutex checkInfoLock;
} STaskCheckInfo;
typedef struct SNotifyInfo {
SArray* pNotifyAddrUrls;
int32_t notifyEventTypes;
int32_t notifyErrorHandle;
char* streamName;
char* stbFullName;
SSchemaWrapper* pSchemaWrapper;
} SNotifyInfo;
struct SStreamTask {
int64_t ver;
SStreamTaskId id;
@ -449,6 +462,7 @@ struct SStreamTask {
SStreamState* pState; // state backend
SUpstreamInfo upstreamInfo;
STaskCheckInfo taskCheckInfo;
SNotifyInfo notifyInfo;
// the followings attributes don't be serialized
SScanhistorySchedInfo schedHistoryInfo;

View File

@ -245,6 +245,7 @@ typedef enum ELogicConditionType {
#define TSDB_OFFSET_LEN 64 // it is a null-terminated string
#define TSDB_USER_CGROUP_LEN (TSDB_USER_LEN + TSDB_CGROUP_LEN) // it is a null-terminated string
#define TSDB_STREAM_NAME_LEN 193 // it is a null-terminated string
#define TSDB_STREAM_NOTIFY_URL_LEN 128 // it includes the terminating '\0'
#define TSDB_DB_NAME_LEN 65
#define TSDB_DB_FNAME_LEN (TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN + TSDB_NAME_DELIMITER_LEN)
#define TSDB_PRIVILEDGE_CONDITION_LEN 48 * 1024

View File

@ -79,6 +79,9 @@ void taosResetLog();
void taosDumpData(uint8_t *msg, int32_t len);
void taosSetNoNewFile();
// Fast uint64_t to string conversion, equivalent to sprintf(buf, "%lu", val) but with 10x better performance.
char *u64toaFastLut(uint64_t val, char *buf);
void taosPrintLog(const char *flags, int32_t level, int32_t dflag, const char *format, ...)
#ifdef __GNUC__
__attribute__((format(printf, 4, 5)))

View File

@ -55,6 +55,7 @@ typedef struct {
typedef enum {
DEF_QITEM = 0,
RPC_QITEM = 1,
APPLY_QITEM = 2,
} EQItype;
typedef void (*FItem)(SQueueInfo *pInfo, void *pItem);

View File

@ -131,6 +131,8 @@ typedef struct SStmtQueue {
SStmtQNode* head;
SStmtQNode* tail;
uint64_t qRemainNum;
TdThreadMutex mutex;
TdThreadCond waitCond;
} SStmtQueue;
typedef struct STscStmt {

View File

@ -253,7 +253,7 @@ void taos_cleanup(void) {
taosCloseRef(id);
nodesDestroyAllocatorSet();
// cleanupAppInfo();
cleanupAppInfo();
rpcCleanup();
tscDebug("rpc cleanup");

View File

@ -39,31 +39,39 @@ static FORCE_INLINE int32_t stmtAllocQNodeFromBuf(STableBufInfo* pTblBuf, void**
}
bool stmtDequeue(STscStmt* pStmt, SStmtQNode** param) {
while (0 == atomic_load_64(&pStmt->queue.qRemainNum)) {
taosUsleep(1);
return false;
(void)taosThreadMutexLock(&pStmt->queue.mutex);
while (0 == atomic_load_64((int64_t*)&pStmt->queue.qRemainNum)) {
(void)taosThreadCondWait(&pStmt->queue.waitCond, &pStmt->queue.mutex);
if (atomic_load_8((int8_t*)&pStmt->queue.stopQueue)) {
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
return false;
}
}
SStmtQNode* orig = pStmt->queue.head;
SStmtQNode* node = pStmt->queue.head->next;
pStmt->queue.head = pStmt->queue.head->next;
// taosMemoryFreeClear(orig);
*param = node;
(void)atomic_sub_fetch_64(&pStmt->queue.qRemainNum, 1);
(void)atomic_sub_fetch_64((int64_t*)&pStmt->queue.qRemainNum, 1);
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
*param = node;
return true;
}
void stmtEnqueue(STscStmt* pStmt, SStmtQNode* param) {
(void)taosThreadMutexLock(&pStmt->queue.mutex);
pStmt->queue.tail->next = param;
pStmt->queue.tail = param;
pStmt->stat.bindDataNum++;
(void)atomic_add_fetch_64(&pStmt->queue.qRemainNum, 1);
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
}
static int32_t stmtCreateRequest(STscStmt* pStmt) {
@ -415,9 +423,11 @@ void stmtResetQueueTableBuf(STableBufInfo* pTblBuf, SStmtQueue* pQueue) {
pTblBuf->buffIdx = 1;
pTblBuf->buffOffset = sizeof(*pQueue->head);
(void)taosThreadMutexLock(&pQueue->mutex);
pQueue->head = pQueue->tail = pTblBuf->pCurBuff;
pQueue->qRemainNum = 0;
pQueue->head->next = NULL;
(void)taosThreadMutexUnlock(&pQueue->mutex);
}
int32_t stmtCleanExecInfo(STscStmt* pStmt, bool keepTable, bool deepClean) {
@ -809,6 +819,8 @@ int32_t stmtStartBindThread(STscStmt* pStmt) {
}
int32_t stmtInitQueue(STscStmt* pStmt) {
(void)taosThreadCondInit(&pStmt->queue.waitCond, NULL);
(void)taosThreadMutexInit(&pStmt->queue.mutex, NULL);
STMT_ERR_RET(stmtAllocQNodeFromBuf(&pStmt->sql.siInfo.tbBuf, (void**)&pStmt->queue.head));
pStmt->queue.tail = pStmt->queue.head;
@ -1619,11 +1631,18 @@ int stmtClose(TAOS_STMT* stmt) {
pStmt->queue.stopQueue = true;
(void)taosThreadMutexLock(&pStmt->queue.mutex);
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
if (pStmt->bindThreadInUse) {
(void)taosThreadJoin(pStmt->bindThread, NULL);
pStmt->bindThreadInUse = false;
}
(void)taosThreadCondDestroy(&pStmt->queue.waitCond);
(void)taosThreadMutexDestroy(&pStmt->queue.mutex);
STMT_DLOG("stmt %p closed, stbInterlaceMode: %d, statInfo: ctgGetTbMetaNum=>%" PRId64 ", getCacheTbInfo=>%" PRId64
", parseSqlNum=>%" PRId64 ", pStmt->stat.bindDataNum=>%" PRId64
", settbnameAPI:%u, bindAPI:%u, addbatchAPI:%u, execAPI:%u"
@ -1757,7 +1776,9 @@ _return:
}
int stmtGetParamNum(TAOS_STMT* stmt, int* nums) {
int code = 0;
STscStmt* pStmt = (STscStmt*)stmt;
int32_t preCode = pStmt->errCode;
STMT_DLOG_E("start to get param num");
@ -1765,7 +1786,7 @@ int stmtGetParamNum(TAOS_STMT* stmt, int* nums) {
return pStmt->errCode;
}
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS));
STMT_ERRI_JRET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS));
if (pStmt->bInfo.needParse && pStmt->sql.runTimes && pStmt->sql.type > 0 &&
STMT_TYPE_MULTI_INSERT != pStmt->sql.type) {
@ -1777,23 +1798,29 @@ int stmtGetParamNum(TAOS_STMT* stmt, int* nums) {
pStmt->exec.pRequest = NULL;
}
STMT_ERR_RET(stmtCreateRequest(pStmt));
STMT_ERRI_JRET(stmtCreateRequest(pStmt));
if (pStmt->bInfo.needParse) {
STMT_ERR_RET(stmtParseSql(pStmt));
STMT_ERRI_JRET(stmtParseSql(pStmt));
}
if (STMT_TYPE_QUERY == pStmt->sql.type) {
*nums = taosArrayGetSize(pStmt->sql.pQuery->pPlaceholderValues);
} else {
STMT_ERR_RET(stmtFetchColFields(stmt, nums, NULL));
STMT_ERRI_JRET(stmtFetchColFields(stmt, nums, NULL));
}
return TSDB_CODE_SUCCESS;
_return:
pStmt->errCode = preCode;
return code;
}
int stmtGetParam(TAOS_STMT* stmt, int idx, int* type, int* bytes) {
int code = 0;
STscStmt* pStmt = (STscStmt*)stmt;
int32_t preCode = pStmt->errCode;
STMT_DLOG_E("start to get param");
@ -1802,10 +1829,10 @@ int stmtGetParam(TAOS_STMT* stmt, int idx, int* type, int* bytes) {
}
if (STMT_TYPE_QUERY == pStmt->sql.type) {
STMT_RET(TSDB_CODE_TSC_STMT_API_ERROR);
STMT_ERRI_JRET(TSDB_CODE_TSC_STMT_API_ERROR);
}
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS));
STMT_ERRI_JRET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS));
if (pStmt->bInfo.needParse && pStmt->sql.runTimes && pStmt->sql.type > 0 &&
STMT_TYPE_MULTI_INSERT != pStmt->sql.type) {
@ -1817,27 +1844,29 @@ int stmtGetParam(TAOS_STMT* stmt, int idx, int* type, int* bytes) {
pStmt->exec.pRequest = NULL;
}
STMT_ERR_RET(stmtCreateRequest(pStmt));
STMT_ERRI_JRET(stmtCreateRequest(pStmt));
if (pStmt->bInfo.needParse) {
STMT_ERR_RET(stmtParseSql(pStmt));
STMT_ERRI_JRET(stmtParseSql(pStmt));
}
int32_t nums = 0;
TAOS_FIELD_E* pField = NULL;
STMT_ERR_RET(stmtFetchColFields(stmt, &nums, &pField));
STMT_ERRI_JRET(stmtFetchColFields(stmt, &nums, &pField));
if (idx >= nums) {
tscError("idx %d is too big", idx);
taosMemoryFree(pField);
STMT_ERR_RET(TSDB_CODE_INVALID_PARA);
STMT_ERRI_JRET(TSDB_CODE_INVALID_PARA);
}
*type = pField[idx].type;
*bytes = pField[idx].bytes;
taosMemoryFree(pField);
_return:
return TSDB_CODE_SUCCESS;
taosMemoryFree(pField);
pStmt->errCode = preCode;
return code;
}
TAOS_RES* stmtUseResult(TAOS_STMT* stmt) {

View File

@ -39,31 +39,35 @@ static FORCE_INLINE int32_t stmtAllocQNodeFromBuf(STableBufInfo* pTblBuf, void**
}
static bool stmtDequeue(STscStmt2* pStmt, SStmtQNode** param) {
(void)taosThreadMutexLock(&pStmt->queue.mutex);
while (0 == atomic_load_64((int64_t*)&pStmt->queue.qRemainNum)) {
taosUsleep(1);
return false;
(void)taosThreadCondWait(&pStmt->queue.waitCond, &pStmt->queue.mutex);
if (atomic_load_8((int8_t*)&pStmt->queue.stopQueue)) {
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
return false;
}
}
SStmtQNode* orig = pStmt->queue.head;
SStmtQNode* node = pStmt->queue.head->next;
pStmt->queue.head = pStmt->queue.head->next;
// taosMemoryFreeClear(orig);
*param = node;
(void)atomic_sub_fetch_64((int64_t*)&pStmt->queue.qRemainNum, 1);
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
return true;
}
static void stmtEnqueue(STscStmt2* pStmt, SStmtQNode* param) {
(void)taosThreadMutexLock(&pStmt->queue.mutex);
pStmt->queue.tail->next = param;
pStmt->queue.tail = param;
pStmt->stat.bindDataNum++;
(void)atomic_add_fetch_64((int64_t*)&pStmt->queue.qRemainNum, 1);
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
}
static int32_t stmtCreateRequest(STscStmt2* pStmt) {
@ -339,9 +343,11 @@ static void stmtResetQueueTableBuf(STableBufInfo* pTblBuf, SStmtQueue* pQueue) {
pTblBuf->buffIdx = 1;
pTblBuf->buffOffset = sizeof(*pQueue->head);
(void)taosThreadMutexLock(&pQueue->mutex);
pQueue->head = pQueue->tail = pTblBuf->pCurBuff;
pQueue->qRemainNum = 0;
pQueue->head->next = NULL;
(void)taosThreadMutexUnlock(&pQueue->mutex);
}
static int32_t stmtCleanExecInfo(STscStmt2* pStmt, bool keepTable, bool deepClean) {
@ -735,6 +741,8 @@ static int32_t stmtStartBindThread(STscStmt2* pStmt) {
}
static int32_t stmtInitQueue(STscStmt2* pStmt) {
(void)taosThreadCondInit(&pStmt->queue.waitCond, NULL);
(void)taosThreadMutexInit(&pStmt->queue.mutex, NULL);
STMT_ERR_RET(stmtAllocQNodeFromBuf(&pStmt->sql.siInfo.tbBuf, (void**)&pStmt->queue.head));
pStmt->queue.tail = pStmt->queue.head;
@ -1066,13 +1074,16 @@ static int stmtFetchColFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIELD_E
}
static int stmtFetchStbColFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIELD_ALL** fields) {
int32_t code = 0;
int32_t preCode = pStmt->errCode;
if (pStmt->errCode != TSDB_CODE_SUCCESS) {
return pStmt->errCode;
}
if (STMT_TYPE_QUERY == pStmt->sql.type) {
tscError("invalid operation to get query column fileds");
STMT_ERR_RET(TSDB_CODE_TSC_STMT_API_ERROR);
STMT_ERRI_JRET(TSDB_CODE_TSC_STMT_API_ERROR);
}
STableDataCxt** pDataBlock = NULL;
@ -1084,21 +1095,25 @@ static int stmtFetchStbColFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIEL
(STableDataCxt**)taosHashGet(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName));
if (NULL == pDataBlock) {
tscError("table %s not found in exec blockHash", pStmt->bInfo.tbFName);
STMT_ERR_RET(TSDB_CODE_APP_ERROR);
STMT_ERRI_JRET(TSDB_CODE_APP_ERROR);
}
}
STMT_ERR_RET(qBuildStmtStbColFields(*pDataBlock, pStmt->bInfo.boundTags, pStmt->bInfo.preCtbname, fieldNum, fields));
STMT_ERRI_JRET(qBuildStmtStbColFields(*pDataBlock, pStmt->bInfo.boundTags, pStmt->bInfo.preCtbname, fieldNum, fields));
if (pStmt->bInfo.tbType == TSDB_SUPER_TABLE) {
pStmt->bInfo.needParse = true;
qDestroyStmtDataBlock(*pDataBlock);
if (taosHashRemove(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName)) != 0) {
tscError("get fileds %s remove exec blockHash fail", pStmt->bInfo.tbFName);
STMT_ERR_RET(TSDB_CODE_APP_ERROR);
STMT_ERRI_JRET(TSDB_CODE_APP_ERROR);
}
}
return TSDB_CODE_SUCCESS;
_return:
pStmt->errCode = preCode;
return code;
}
/*
SArray* stmtGetFreeCol(STscStmt2* pStmt, int32_t* idx) {
@ -1748,11 +1763,18 @@ int stmtClose2(TAOS_STMT2* stmt) {
pStmt->queue.stopQueue = true;
(void)taosThreadMutexLock(&pStmt->queue.mutex);
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
if (pStmt->bindThreadInUse) {
(void)taosThreadJoin(pStmt->bindThread, NULL);
pStmt->bindThreadInUse = false;
}
(void)taosThreadCondDestroy(&pStmt->queue.waitCond);
(void)taosThreadMutexDestroy(&pStmt->queue.mutex);
if (pStmt->options.asyncExecFn && !pStmt->semWaited) {
if (tsem_wait(&pStmt->asyncQuerySem) != 0) {
tscError("failed to wait asyncQuerySem");
@ -1824,7 +1846,7 @@ int stmtParseColFields2(TAOS_STMT2* stmt) {
if (pStmt->exec.pRequest && STMT_TYPE_QUERY == pStmt->sql.type && pStmt->sql.runTimes) {
taos_free_result(pStmt->exec.pRequest);
pStmt->exec.pRequest = NULL;
STMT_ERR_RET(stmtCreateRequest(pStmt));
STMT_ERRI_JRET(stmtCreateRequest(pStmt));
}
STMT_ERRI_JRET(stmtCreateRequest(pStmt));
@ -1850,7 +1872,9 @@ int stmtGetStbColFields2(TAOS_STMT2* stmt, int* nums, TAOS_FIELD_ALL** fields) {
}
int stmtGetParamNum2(TAOS_STMT2* stmt, int* nums) {
int32_t code = 0;
STscStmt2* pStmt = (STscStmt2*)stmt;
int32_t preCode = pStmt->errCode;
STMT_DLOG_E("start to get param num");
@ -1858,7 +1882,7 @@ int stmtGetParamNum2(TAOS_STMT2* stmt, int* nums) {
return pStmt->errCode;
}
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS));
STMT_ERRI_JRET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS));
if (pStmt->bInfo.needParse && pStmt->sql.runTimes && pStmt->sql.type > 0 &&
STMT_TYPE_MULTI_INSERT != pStmt->sql.type) {
@ -1870,19 +1894,23 @@ int stmtGetParamNum2(TAOS_STMT2* stmt, int* nums) {
pStmt->exec.pRequest = NULL;
}
STMT_ERR_RET(stmtCreateRequest(pStmt));
STMT_ERRI_JRET(stmtCreateRequest(pStmt));
if (pStmt->bInfo.needParse) {
STMT_ERR_RET(stmtParseSql(pStmt));
STMT_ERRI_JRET(stmtParseSql(pStmt));
}
if (STMT_TYPE_QUERY == pStmt->sql.type) {
*nums = taosArrayGetSize(pStmt->sql.pQuery->pPlaceholderValues);
} else {
STMT_ERR_RET(stmtFetchColFields2(stmt, nums, NULL));
STMT_ERRI_JRET(stmtFetchColFields2(stmt, nums, NULL));
}
return TSDB_CODE_SUCCESS;
_return:
pStmt->errCode = preCode;
return code;
}
TAOS_RES* stmtUseResult2(TAOS_STMT2* stmt) {

View File

@ -74,8 +74,9 @@ enum {
};
typedef struct {
tmr_h timer;
int32_t rsetId;
tmr_h timer;
int32_t rsetId;
TdThreadMutex lock;
} SMqMgmt;
struct tmq_list_t {
@ -1614,13 +1615,21 @@ static void tmqMgmtInit(void) {
tmqMgmt.timer = taosTmrInit(1000, 100, 360000, "TMQ");
if (tmqMgmt.timer == NULL) {
tmqInitRes = terrno;
goto END;
}
tmqMgmt.rsetId = taosOpenRef(10000, tmqFreeImpl);
if (tmqMgmt.rsetId < 0) {
tmqInitRes = terrno;
goto END;
}
if (taosThreadMutexInit(&tmqMgmt.lock, NULL) != 0){
goto END;
}
return;
END:
tmqInitRes = terrno;
}
void tmqMgmtClose(void) {
@ -1629,10 +1638,28 @@ void tmqMgmtClose(void) {
tmqMgmt.timer = NULL;
}
(void) taosThreadMutexLock(&tmqMgmt.lock);
if (tmqMgmt.rsetId >= 0) {
tmq_t *tmq = taosIterateRef(tmqMgmt.rsetId, 0);
int64_t refId = 0;
while (tmq) {
refId = tmq->refId;
if (refId == 0) {
break;
}
atomic_store_8(&tmq->status, TMQ_CONSUMER_STATUS__CLOSED);
if (taosRemoveRef(tmqMgmt.rsetId, tmq->refId) != 0) {
qWarn("taosRemoveRef tmq refId:%" PRId64 " failed, error:%s", refId, tstrerror(terrno));
}
tmq = taosIterateRef(tmqMgmt.rsetId, refId);
}
taosCloseRef(tmqMgmt.rsetId);
tmqMgmt.rsetId = -1;
}
(void)taosThreadMutexUnlock(&tmqMgmt.lock);
}
tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
@ -2640,8 +2667,13 @@ int32_t tmq_unsubscribe(tmq_t* tmq) {
int32_t tmq_consumer_close(tmq_t* tmq) {
if (tmq == NULL) return TSDB_CODE_INVALID_PARA;
int32_t code = 0;
(void) taosThreadMutexLock(&tmqMgmt.lock);
if (atomic_load_8(&tmq->status) == TMQ_CONSUMER_STATUS__CLOSED){
goto end;
}
tqInfoC("consumer:0x%" PRIx64 " start to close consumer, status:%d", tmq->consumerId, tmq->status);
int32_t code = tmq_unsubscribe(tmq);
code = tmq_unsubscribe(tmq);
if (code == 0) {
atomic_store_8(&tmq->status, TMQ_CONSUMER_STATUS__CLOSED);
code = taosRemoveRef(tmqMgmt.rsetId, tmq->refId);
@ -2649,6 +2681,9 @@ int32_t tmq_consumer_close(tmq_t* tmq) {
tqErrorC("tmq close failed to remove ref:%" PRId64 ", code:%d", tmq->refId, code);
}
}
end:
(void)taosThreadMutexUnlock(&tmqMgmt.lock);
return code;
}

View File

@ -735,7 +735,7 @@ TEST(stmt2Case, insert_ntb_get_fields_Test) {
{
const char* sql = "insert into stmt2_testdb_4.? values(?,?)";
printf("case 2 : %s\n", sql);
getFieldsError(taos, sql, TSDB_CODE_PAR_TABLE_NOT_EXIST);
getFieldsError(taos, sql, TSDB_CODE_TSC_STMT_TBNAME_ERROR);
}
// case 3 : wrong para nums
@ -1496,8 +1496,51 @@ TEST(stmt2Case, geometry) {
checkError(stmt, code);
ASSERT_EQ(affected_rows, 3);
// test wrong wkb input
unsigned char wkb2[3][61] = {
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xF0, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40,
},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40}};
params[1].buffer = wkb2;
code = taos_stmt2_bind_param(stmt, &bindv, -1);
ASSERT_EQ(code, TSDB_CODE_FUNC_FUNTION_PARA_VALUE);
taos_stmt2_close(stmt);
do_query(taos, "DROP DATABASE IF EXISTS stmt2_testdb_13");
taos_close(taos);
}
// TD-33582
TEST(stmt2Case, errcode) {
TAOS* taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(taos, nullptr);
do_query(taos, "DROP DATABASE IF EXISTS stmt2_testdb_14");
do_query(taos, "CREATE DATABASE IF NOT EXISTS stmt2_testdb_14");
do_query(taos, "use stmt2_testdb_14");
TAOS_STMT2_OPTION option = {0};
TAOS_STMT2* stmt = taos_stmt2_init(taos, &option);
ASSERT_NE(stmt, nullptr);
char* sql = "select * from t where ts > ? and name = ? foo = ?";
int code = taos_stmt2_prepare(stmt, sql, 0);
checkError(stmt, code);
int fieldNum = 0;
TAOS_FIELD_ALL* pFields = NULL;
code = taos_stmt2_get_fields(stmt, &fieldNum, &pFields);
ASSERT_EQ(code, TSDB_CODE_PAR_SYNTAX_ERROR);
// get fail dont influence the next stmt prepare
sql = "nsert into ? (ts, name) values (?, ?)";
code = taos_stmt_prepare(stmt, sql, 0);
checkError(stmt, code);
}
#pragma GCC diagnostic pop

View File

@ -212,15 +212,6 @@ void insertData(TAOS *taos, TAOS_STMT_OPTIONS *option, const char *sql, int CTB_
void getFields(TAOS *taos, const char *sql, int expectedALLFieldNum, TAOS_FIELD_E *expectedTagFields,
int expectedTagFieldNum, TAOS_FIELD_E *expectedColFields, int expectedColFieldNum) {
// create database and table
do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_3");
do_query(taos, "CREATE DATABASE IF NOT EXISTS stmt_testdb_3");
do_query(taos, "USE stmt_testdb_3");
do_query(
taos,
"CREATE STABLE IF NOT EXISTS stmt_testdb_3.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS "
"(groupId INT, location BINARY(24))");
TAOS_STMT *stmt = taos_stmt_init(taos);
ASSERT_NE(stmt, nullptr);
int code = taos_stmt_prepare(stmt, sql, 0);
@ -267,6 +258,24 @@ void getFields(TAOS *taos, const char *sql, int expectedALLFieldNum, TAOS_FIELD_
taos_stmt_close(stmt);
}
void getFieldsError(TAOS *taos, const char *sql, int expectedErrocode) {
TAOS_STMT *stmt = taos_stmt_init(taos);
ASSERT_NE(stmt, nullptr);
STscStmt *pStmt = (STscStmt *)stmt;
int code = taos_stmt_prepare(stmt, sql, 0);
int fieldNum = 0;
TAOS_FIELD_E *pFields = NULL;
code = taos_stmt_get_tag_fields(stmt, &fieldNum, &pFields);
ASSERT_EQ(code, expectedErrocode);
ASSERT_EQ(pStmt->errCode, TSDB_CODE_SUCCESS);
taosMemoryFree(pFields);
taos_stmt_close(stmt);
}
} // namespace
int main(int argc, char **argv) {
@ -298,6 +307,15 @@ TEST(stmtCase, get_fields) {
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(taos, nullptr);
// create database and table
do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_3");
do_query(taos, "CREATE DATABASE IF NOT EXISTS stmt_testdb_3");
do_query(taos, "USE stmt_testdb_3");
do_query(
taos,
"CREATE STABLE IF NOT EXISTS stmt_testdb_3.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS "
"(groupId INT, location BINARY(24))");
// nomarl test
{
TAOS_FIELD_E tagFields[2] = {{"groupid", TSDB_DATA_TYPE_INT, 0, 0, sizeof(int)},
{"location", TSDB_DATA_TYPE_BINARY, 0, 0, 24}};
@ -307,6 +325,12 @@ TEST(stmtCase, get_fields) {
{"phase", TSDB_DATA_TYPE_FLOAT, 0, 0, sizeof(float)}};
getFields(taos, "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)", 7, &tagFields[0], 2, &colFields[0], 4);
}
// error case [TD-33570]
{ getFieldsError(taos, "INSERT INTO ? VALUES (?,?,?,?)", TSDB_CODE_TSC_STMT_TBNAME_ERROR); }
{ getFieldsError(taos, "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)", TSDB_CODE_TSC_STMT_TBNAME_ERROR); }
do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_3");
taos_close(taos);
}
@ -520,9 +544,6 @@ TEST(stmtCase, geometry) {
int code = taos_stmt_prepare(stmt, stmt_sql, 0);
checkError(stmt, code);
// code = taos_stmt_set_tbname(stmt, "tb1");
// checkError(stmt, code);
code = taos_stmt_bind_param_batch(stmt, params);
checkError(stmt, code);
@ -532,11 +553,58 @@ TEST(stmtCase, geometry) {
code = taos_stmt_execute(stmt);
checkError(stmt, code);
//test wrong wkb input
unsigned char wkb2[3][61] = {
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xF0, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40,
},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40}};
params[1].buffer = wkb2;
code = taos_stmt_bind_param_batch(stmt, params);
ASSERT_EQ(code, TSDB_CODE_FUNC_FUNTION_PARA_VALUE);
taosMemoryFree(t64_len);
taosMemoryFree(wkb_len);
taos_stmt_close(stmt);
do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_5");
taos_close(taos);
}
//TD-33582
TEST(stmtCase, errcode) {
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(taos, nullptr);
do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_4");
do_query(taos, "CREATE DATABASE IF NOT EXISTS stmt_testdb_4");
do_query(taos, "USE stmt_testdb_4");
do_query(
taos,
"CREATE STABLE IF NOT EXISTS stmt_testdb_4.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS "
"(groupId INT, location BINARY(24))");
TAOS_STMT *stmt = taos_stmt_init(taos);
ASSERT_NE(stmt, nullptr);
char *sql = "select * from t where ts > ? and name = ? foo = ?";
int code = taos_stmt_prepare(stmt, sql, 0);
checkError(stmt, code);
int fieldNum = 0;
TAOS_FIELD_E *pFields = NULL;
code = stmtGetParamNum(stmt, &fieldNum);
ASSERT_EQ(code, TSDB_CODE_PAR_SYNTAX_ERROR);
code = taos_stmt_get_tag_fields(stmt, &fieldNum, &pFields);
ASSERT_EQ(code, TSDB_CODE_PAR_SYNTAX_ERROR);
// get fail dont influence the next stmt prepare
sql = "nsert into ? (ts, name) values (?, ?)";
code = taos_stmt_prepare(stmt, sql, 0);
checkError(stmt, code);
}
#pragma GCC diagnostic pop

View File

@ -54,6 +54,23 @@ target_link_libraries(
INTERFACE api
)
if(NOT ${TD_WINDOWS})
target_include_directories(
common
PUBLIC "$ENV{HOME}/.cos-local.2/include"
)
find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
target_link_libraries(
common
PUBLIC ${CURL_LIBRARY}
PUBLIC ${SSL_LIBRARY}
PUBLIC ${CRYPTO_LIBRARY}
)
endif()
if(${BUILD_S3})
if(${BUILD_WITH_S3})
target_include_directories(
@ -65,9 +82,6 @@ if(${BUILD_S3})
set(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.2)
find_library(S3_LIBRARY s3)
find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
target_link_libraries(
common
@ -87,7 +101,6 @@ if(${BUILD_S3})
find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/)
find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/)
find_library(MINIXML_LIBRARY mxml)
find_library(CURL_LIBRARY curl)
target_link_libraries(
common

View File

@ -9970,6 +9970,16 @@ int32_t tSerializeSCMCreateStreamReq(void *buf, int32_t bufLen, const SCMCreateS
}
TAOS_CHECK_EXIT(tEncodeI64(&encoder, pReq->smaId));
int32_t addrSize = taosArrayGetSize(pReq->pNotifyAddrUrls);
TAOS_CHECK_EXIT(tEncodeI32(&encoder, addrSize));
for (int32_t i = 0; i < addrSize; ++i) {
const char *url = taosArrayGetP(pReq->pNotifyAddrUrls, i);
TAOS_CHECK_EXIT((tEncodeCStr(&encoder, url)));
}
TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->notifyEventTypes));
TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->notifyErrorHandle));
TAOS_CHECK_EXIT(tEncodeI8(&encoder, pReq->notifyHistory));
tEndEncode(&encoder);
_exit:
@ -10104,6 +10114,30 @@ int32_t tDeserializeSCMCreateStreamReq(void *buf, int32_t bufLen, SCMCreateStrea
TAOS_CHECK_EXIT(tDecodeI64(&decoder, &pReq->smaId));
}
if (!tDecodeIsEnd(&decoder)) {
int32_t addrSize = 0;
TAOS_CHECK_EXIT(tDecodeI32(&decoder, &addrSize));
pReq->pNotifyAddrUrls = taosArrayInit(addrSize, POINTER_BYTES);
if (pReq->pNotifyAddrUrls == NULL) {
TAOS_CHECK_EXIT(terrno);
}
for (int32_t i = 0; i < addrSize; ++i) {
char *url = NULL;
TAOS_CHECK_EXIT(tDecodeCStr(&decoder, &url));
url = taosStrndup(url, TSDB_STREAM_NOTIFY_URL_LEN);
if (url == NULL) {
TAOS_CHECK_EXIT(terrno);
}
if (taosArrayPush(pReq->pNotifyAddrUrls, &url) == NULL) {
taosMemoryFree(url);
TAOS_CHECK_EXIT(terrno);
}
}
TAOS_CHECK_EXIT(tDecodeI32(&decoder, &pReq->notifyEventTypes));
TAOS_CHECK_EXIT(tDecodeI32(&decoder, &pReq->notifyErrorHandle));
TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pReq->notifyHistory));
}
tEndDecode(&decoder);
_exit:
tDecoderClear(&decoder);
@ -10166,6 +10200,7 @@ void tFreeSCMCreateStreamReq(SCMCreateStreamReq *pReq) {
taosArrayDestroy(pReq->fillNullCols);
taosArrayDestroy(pReq->pVgroupVerList);
taosArrayDestroy(pReq->pCols);
taosArrayDestroyP(pReq->pNotifyAddrUrls, NULL);
}
int32_t tEncodeSRSmaParam(SEncoder *pCoder, const SRSmaParam *pRSmaParam) {

View File

@ -3061,6 +3061,33 @@ int32_t buildCtbNameByGroupIdImpl(const char* stbFullName, uint64_t groupId, cha
return code;
}
int32_t buildSinkDestTableName(char* parTbName, const char* stbFullName, uint64_t gid, bool newSubTableRule,
char** dstTableName) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
if (parTbName[0]) {
if (newSubTableRule && !isAutoTableName(parTbName) && !alreadyAddGroupId(parTbName, gid) && gid != 0 &&
stbFullName) {
*dstTableName = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN);
TSDB_CHECK_NULL(*dstTableName, code, lino, _end, terrno);
tstrncpy(*dstTableName, parTbName, TSDB_TABLE_NAME_LEN);
code = buildCtbNameAddGroupId(stbFullName, *dstTableName, gid, TSDB_TABLE_NAME_LEN);
TSDB_CHECK_CODE(code, lino, _end);
} else {
*dstTableName = taosStrdup(parTbName);
TSDB_CHECK_NULL(*dstTableName, code, lino, _end, terrno);
}
} else {
code = buildCtbNameByGroupId(stbFullName, gid, dstTableName);
TSDB_CHECK_CODE(code, lino, _end);
}
_end:
return code;
}
// return length of encoded data, return -1 if failed
int32_t blockEncode(const SSDataBlock* pBlock, char* data, size_t dataBuflen, int32_t numOfCols) {
int32_t code = blockDataCheck(pBlock);

View File

@ -500,7 +500,9 @@ int32_t taosSetS3Cfg(SConfig *pCfg) {
TAOS_RETURN(TSDB_CODE_SUCCESS);
}
struct SConfig *taosGetCfg() { return tsCfg; }
struct SConfig *taosGetCfg() {
return tsCfg;
}
static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *inputCfgDir, const char *envFile,
char *apolloUrl) {
@ -818,8 +820,13 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
tsNumOfSnodeWriteThreads = tsNumOfCores / 4;
tsNumOfSnodeWriteThreads = TRANGE(tsNumOfSnodeWriteThreads, 2, 4);
tsQueueMemoryAllowed = tsTotalMemoryKB * 1024 * 0.1;
tsQueueMemoryAllowed = TRANGE(tsQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10LL, TSDB_MAX_MSG_SIZE * 10000LL);
tsQueueMemoryAllowed = tsTotalMemoryKB * 1024 * RPC_MEMORY_USAGE_RATIO * QUEUE_MEMORY_USAGE_RATIO;
tsQueueMemoryAllowed = TRANGE(tsQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * QUEUE_MEMORY_USAGE_RATIO * 10LL,
TSDB_MAX_MSG_SIZE * QUEUE_MEMORY_USAGE_RATIO * 10000LL);
tsApplyMemoryAllowed = tsTotalMemoryKB * 1024 * RPC_MEMORY_USAGE_RATIO * (1 - QUEUE_MEMORY_USAGE_RATIO);
tsApplyMemoryAllowed = TRANGE(tsApplyMemoryAllowed, TSDB_MAX_MSG_SIZE * (1 - QUEUE_MEMORY_USAGE_RATIO) * 10LL,
TSDB_MAX_MSG_SIZE * (1 - QUEUE_MEMORY_USAGE_RATIO) * 10000LL);
tsLogBufferMemoryAllowed = tsTotalMemoryKB * 1024 * 0.1;
tsLogBufferMemoryAllowed = TRANGE(tsLogBufferMemoryAllowed, TSDB_MAX_MSG_SIZE * 10LL, TSDB_MAX_MSG_SIZE * 10000LL);
@ -857,7 +864,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "numOfSnodeSharedThreads", tsNumOfSnodeStreamThreads, 2, 1024, CFG_SCOPE_SERVER, CFG_DYN_SERVER_LAZY,CFG_CATEGORY_LOCAL));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "numOfSnodeUniqueThreads", tsNumOfSnodeWriteThreads, 2, 1024, CFG_SCOPE_SERVER, CFG_DYN_SERVER_LAZY,CFG_CATEGORY_LOCAL));
TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, INT64_MAX, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL));
TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * RPC_MEMORY_USAGE_RATIO * 10L, INT64_MAX, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncElectInterval", tsElectInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncHeartbeatInterval", tsHeartbeatInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncHeartbeatTimeout", tsHeartbeatTimeout, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL));
@ -1569,7 +1576,8 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsNumOfSnodeWriteThreads = pItem->i32;
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "rpcQueueMemoryAllowed");
tsQueueMemoryAllowed = pItem->i64;
tsQueueMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64 * QUEUE_MEMORY_USAGE_RATIO;
tsApplyMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64 * (1 - QUEUE_MEMORY_USAGE_RATIO);
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "simdEnable");
tsSIMDEnable = (bool)pItem->bval;
@ -2392,6 +2400,12 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) {
code = TSDB_CODE_SUCCESS;
goto _exit;
}
if (strcasecmp("rpcQueueMemoryAllowed", name) == 0) {
tsQueueMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64 * QUEUE_MEMORY_USAGE_RATIO;
tsApplyMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64 * (1 - QUEUE_MEMORY_USAGE_RATIO);
code = TSDB_CODE_SUCCESS;
goto _exit;
}
if (strcasecmp(name, "numOfCompactThreads") == 0) {
#ifdef TD_ENTERPRISE
@ -2497,7 +2511,6 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) {
{"experimental", &tsExperimental},
{"numOfRpcSessions", &tsNumOfRpcSessions},
{"rpcQueueMemoryAllowed", &tsQueueMemoryAllowed},
{"shellActivityTimer", &tsShellActivityTimer},
{"readTimeout", &tsReadTimeout},
{"safetyCheckLevel", &tsSafetyCheckLevel},

View File

@ -181,7 +181,7 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) {
req.numOfSupportVnodes = tsNumOfSupportVnodes;
req.numOfDiskCfg = tsDiskCfgNum;
req.memTotal = tsTotalMemoryKB * 1024;
req.memAvail = req.memTotal - tsQueueMemoryAllowed - 16 * 1024 * 1024;
req.memAvail = req.memTotal - tsQueueMemoryAllowed - tsApplyMemoryAllowed - 16 * 1024 * 1024;
tstrncpy(req.dnodeEp, tsLocalEp, TSDB_EP_LEN);
tstrncpy(req.machineId, pMgmt->pData->machineId, TSDB_MACHINE_ID_LEN + 1);

View File

@ -323,7 +323,7 @@ int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
return TSDB_CODE_INVALID_MSG;
}
EQItype itype = APPLY_QUEUE == qtype ? DEF_QITEM : RPC_QITEM;
EQItype itype = APPLY_QUEUE == qtype ? APPLY_QITEM : RPC_QITEM;
SRpcMsg *pMsg;
code = taosAllocateQitem(sizeof(SRpcMsg), itype, pRpc->contLen, (void **)&pMsg);
if (code) {

View File

@ -36,7 +36,8 @@ void Testbase::InitLog(const char* path) {
tstrncpy(tsLogDir, path, PATH_MAX);
taosGetSystemInfo();
tsQueueMemoryAllowed = tsTotalMemoryKB * 0.1;
tsQueueMemoryAllowed = tsTotalMemoryKB * 0.06;
tsApplyMemoryAllowed = tsTotalMemoryKB * 0.04;
if (taosInitLog("taosdlog", 1, false) != 0) {
printf("failed to init log file\n");
}

View File

@ -753,6 +753,77 @@ static int32_t doStreamCheck(SMnode *pMnode, SStreamObj *pStreamObj) {
return TSDB_CODE_SUCCESS;
}
static void *notifyAddrDup(void *p) { return taosStrdup((char *)p); }
static int32_t addStreamTaskNotifyInfo(const SCMCreateStreamReq *createReq, const SStreamObj *pStream,
SStreamTask *pTask) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
TSDB_CHECK_NULL(createReq, code, lino, _end, TSDB_CODE_INVALID_PARA);
TSDB_CHECK_NULL(pTask, code, lino, _end, TSDB_CODE_INVALID_PARA);
pTask->notifyInfo.pNotifyAddrUrls = taosArrayDup(createReq->pNotifyAddrUrls, notifyAddrDup);
TSDB_CHECK_NULL(pTask->notifyInfo.pNotifyAddrUrls, code, lino, _end, terrno);
pTask->notifyInfo.notifyEventTypes = createReq->notifyEventTypes;
pTask->notifyInfo.notifyErrorHandle = createReq->notifyErrorHandle;
pTask->notifyInfo.streamName = taosStrdup(createReq->name);
TSDB_CHECK_NULL(pTask->notifyInfo.streamName, code, lino, _end, terrno);
pTask->notifyInfo.stbFullName = taosStrdup(createReq->targetStbFullName);
TSDB_CHECK_NULL(pTask->notifyInfo.stbFullName, code, lino, _end, terrno);
pTask->notifyInfo.pSchemaWrapper = tCloneSSchemaWrapper(&pStream->outputSchema);
TSDB_CHECK_NULL(pTask->notifyInfo.pSchemaWrapper, code, lino, _end, terrno);
_end:
if (code != TSDB_CODE_SUCCESS) {
mError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
return code;
}
static int32_t addStreamNotifyInfo(SCMCreateStreamReq *createReq, SStreamObj *pStream) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
int32_t level = 0;
int32_t nTasks = 0;
SArray *pLevel = NULL;
TSDB_CHECK_NULL(createReq, code, lino, _end, TSDB_CODE_INVALID_PARA);
TSDB_CHECK_NULL(pStream, code, lino, _end, TSDB_CODE_INVALID_PARA);
if (taosArrayGetSize(createReq->pNotifyAddrUrls) == 0) {
goto _end;
}
level = taosArrayGetSize(pStream->tasks);
for (int32_t i = 0; i < level; ++i) {
pLevel = taosArrayGetP(pStream->tasks, i);
nTasks = taosArrayGetSize(pLevel);
for (int32_t j = 0; j < nTasks; ++j) {
code = addStreamTaskNotifyInfo(createReq, pStream, taosArrayGetP(pLevel, j));
TSDB_CHECK_CODE(code, lino, _end);
}
}
if (pStream->conf.fillHistory && createReq->notifyHistory) {
level = taosArrayGetSize(pStream->pHTasksList);
for (int32_t i = 0; i < level; ++i) {
pLevel = taosArrayGetP(pStream->pHTasksList, i);
nTasks = taosArrayGetSize(pLevel);
for (int32_t j = 0; j < nTasks; ++j) {
code = addStreamTaskNotifyInfo(createReq, pStream, taosArrayGetP(pLevel, j));
TSDB_CHECK_CODE(code, lino, _end);
}
}
}
_end:
if (code != TSDB_CODE_SUCCESS) {
mError("%s for stream %s failed at line %d since %s", __func__, pStream->name, lino, tstrerror(code));
}
return code;
}
static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
SStreamObj *pStream = NULL;
@ -850,6 +921,14 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
goto _OVER;
}
// add notify info into all stream tasks
code = addStreamNotifyInfo(&createReq, &streamObj);
if (code != TSDB_CODE_SUCCESS) {
mError("stream:%s failed to add stream notify info since %s", createReq.name, tstrerror(code));
mndTransDrop(pTrans);
goto _OVER;
}
// add stream to trans
code = mndPersistStream(pTrans, &streamObj);
if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_ACTION_IN_PROGRESS) {

View File

@ -75,6 +75,7 @@ set(
"src/tq/tqSnapshot.c"
"src/tq/tqStreamStateSnap.c"
"src/tq/tqStreamTaskSnap.c"
"src/tq/tqStreamNotify.c"
)
aux_source_directory("src/tsdb/" TSDB_SOURCE_FILES)

View File

@ -159,6 +159,11 @@ int32_t buildAutoCreateTableReq(const char* stbFullName, int64_t suid, int32_t n
SArray* pTagArray, bool newSubTableRule, SVCreateTbReq** pReq);
int32_t tqExtractDropCtbDataBlock(const void* data, int32_t len, int64_t ver, void** pRefBlock, int32_t type);
// tq send notifications
int32_t tqInitNotifyHandleMap(SStreamNotifyHandleMap** ppMap);
void tqDestroyNotifyHandleMap(SStreamNotifyHandleMap** ppMap);
int32_t tqSendAllNotifyEvents(const SArray* pBlocks, SStreamTask* pTask, SVnode* pVnode);
#define TQ_ERR_GO_TO_END(c) \
do { \
code = c; \

View File

@ -81,6 +81,8 @@ typedef struct SCommitInfo SCommitInfo;
typedef struct SCompactInfo SCompactInfo;
typedef struct SQueryNode SQueryNode;
typedef struct SStreamNotifyHandleMap SStreamNotifyHandleMap;
#define VNODE_META_TMP_DIR "meta.tmp"
#define VNODE_META_BACKUP_DIR "meta.backup"
@ -255,6 +257,9 @@ int32_t tqProcessTaskCheckpointReadyRsp(STQ* pTq, SRpcMsg* pMsg);
int32_t tqBuildStreamTask(void* pTq, SStreamTask* pTask, int64_t ver);
int32_t tqScanWal(STQ* pTq);
// injection error
void streamMetaFreeTQDuringScanWalError(STQ* pTq);
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd);
int32_t tqCheckColModifiable(STQ* pTq, int64_t tbUid, int32_t colId);
// tq-mq
@ -496,6 +501,9 @@ struct SVnode {
int64_t blockSeq;
SQHandle* pQuery;
SVMonitorObj monitor;
// Notification Handles
SStreamNotifyHandleMap* pNotifyHandleMap;
};
#define TD_VID(PVNODE) ((PVNODE)->config.vgId)

View File

@ -75,12 +75,14 @@ int32_t tqOpen(const char* path, SVnode* pVnode) {
if (pTq == NULL) {
return terrno;
}
pVnode->pTq = pTq;
pTq->pVnode = pVnode;
pTq->path = taosStrdup(path);
if (pTq->path == NULL) {
return terrno;
}
pTq->pVnode = pVnode;
pTq->pHandle = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK);
if (pTq->pHandle == NULL) {
@ -131,11 +133,19 @@ void tqClose(STQ* pTq) {
return;
}
int32_t vgId = 0;
if (pTq->pVnode != NULL) {
vgId = TD_VID(pTq->pVnode);
} else if (pTq->pStreamMeta != NULL) {
vgId = pTq->pStreamMeta->vgId;
}
// close the stream meta firstly
streamMetaClose(pTq->pStreamMeta);
void* pIter = taosHashIterate(pTq->pPushMgr, NULL);
while (pIter) {
STqHandle* pHandle = *(STqHandle**)pIter;
int32_t vgId = TD_VID(pTq->pVnode);
if (pHandle->msg != NULL) {
tqPushEmptyDataRsp(pHandle, vgId);
rpcFreeCont(pHandle->msg->pCont);
@ -151,8 +161,12 @@ void tqClose(STQ* pTq) {
taosHashCleanup(pTq->pOffset);
taosMemoryFree(pTq->path);
tqMetaClose(pTq);
qDebug("vgId:%d end to close tq", pTq->pStreamMeta != NULL ? pTq->pStreamMeta->vgId : -1);
streamMetaClose(pTq->pStreamMeta);
qDebug("vgId:%d end to close tq", vgId);
#if 0
streamMetaFreeTQDuringScanWalError(pTq);
#endif
taosMemoryFree(pTq);
}

View File

@ -16,8 +16,6 @@
#include "tcommon.h"
#include "tq.h"
#define IS_NEW_SUBTB_RULE(_t) (((_t)->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER) && ((_t)->subtableWithoutMd5 != 1))
typedef struct STableSinkInfo {
uint64_t uid;
tstr name;
@ -983,7 +981,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
tqDebug("s-task:%s append groupId:%" PRId64 " for generated dstTable:%s", id, groupId, dstTableName);
if (pTask->ver == SSTREAM_TASK_SUBTABLE_CHANGED_VER) {
code = buildCtbNameAddGroupId(NULL, dstTableName, groupId, sizeof(pDataBlock->info.parTbName));
} else if (pTask->ver > SSTREAM_TASK_SUBTABLE_CHANGED_VER && stbFullName) {
} else if (pTask->ver >= SSTREAM_TASK_APPEND_STABLE_NAME_VER && stbFullName) {
code = buildCtbNameAddGroupId(stbFullName, dstTableName, groupId, sizeof(pDataBlock->info.parTbName));
}
if (code != TSDB_CODE_SUCCESS) {
@ -1150,6 +1148,12 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
return;
}
code = tqSendAllNotifyEvents(pBlocks, pTask, pVnode);
if (code != TSDB_CODE_SUCCESS) {
tqError("vgId: %d, s-task:%s failed to send all event notifications", vgId, id);
// continue processing even if notification fails
}
bool onlySubmitData = hasOnlySubmitData(pBlocks, numOfBlocks);
if (!onlySubmitData || pTask->subtableWithoutMd5 == 1) {
tqDebug("vgId:%d, s-task:%s write %d stream resBlock(s) into table, has delete block, submit one-by-one", vgId, id,
@ -1173,6 +1177,8 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
continue;
} else if (pDataBlock->info.type == STREAM_DROP_CHILD_TABLE && pTask->subtableWithoutMd5) {
code = doBuildAndSendDropTableMsg(pVnode, stbFullName, pDataBlock, pTask, suid);
} else if (pDataBlock->info.type == STREAM_NOTIFY_EVENT) {
continue;
} else {
code = handleResultBlockMsg(pTask, pDataBlock, i, pVnode, earlyTs);
}
@ -1317,6 +1323,10 @@ void rebuildAndSendMultiResBlock(SStreamTask* pTask, const SArray* pBlocks, SVno
continue;
}
if (pDataBlock->info.type == STREAM_NOTIFY_EVENT) {
continue;
}
hasSubmit = true;
pTask->execInfo.sink.numOfBlocks += 1;
uint64_t groupId = pDataBlock->info.id.groupId;

View File

@ -0,0 +1,445 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "cmdnodes.h"
#include "tq.h"
#ifndef WINDOWS
#include "curl/curl.h"
#endif
#define STREAM_EVENT_NOTIFY_RETRY_MS 50 // 50ms
typedef struct SStreamNotifyHandle {
TdThreadMutex mutex;
#ifndef WINDOWS
CURL* curl;
#endif
char* url;
} SStreamNotifyHandle;
struct SStreamNotifyHandleMap {
TdThreadMutex gMutex;
SHashObj* handleMap;
};
static void stopStreamNotifyConn(SStreamNotifyHandle* pHandle) {
#ifndef WINDOWS
if (pHandle == NULL || pHandle->curl == NULL) {
return;
}
// status code 1000 means normal closure
size_t len = 0;
uint16_t status = htons(1000);
CURLcode res = curl_ws_send(pHandle->curl, &status, sizeof(status), &len, 0, CURLWS_CLOSE);
if (res != CURLE_OK) {
tqWarn("failed to send ws-close msg to %s for %d", pHandle->url ? pHandle->url : "", res);
}
// TODO: add wait mechanism for peer connection close response
curl_easy_cleanup(pHandle->curl);
#endif
}
static void destroyStreamNotifyHandle(void* ptr) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
SStreamNotifyHandle** ppHandle = ptr;
if (ppHandle == NULL || *ppHandle == NULL) {
return;
}
code = taosThreadMutexDestroy(&(*ppHandle)->mutex);
stopStreamNotifyConn(*ppHandle);
taosMemoryFreeClear((*ppHandle)->url);
taosMemoryFreeClear(*ppHandle);
}
static void releaseStreamNotifyHandle(SStreamNotifyHandle** ppHandle) {
if (ppHandle == NULL || *ppHandle == NULL) {
return;
}
(void)taosThreadMutexUnlock(&(*ppHandle)->mutex);
*ppHandle = NULL;
}
static int32_t acquireStreamNotifyHandle(SStreamNotifyHandleMap* pMap, const char* url,
SStreamNotifyHandle** ppHandle) {
#ifndef WINDOWS
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
bool gLocked = false;
SStreamNotifyHandle** ppFindHandle = NULL;
SStreamNotifyHandle* pNewHandle = NULL;
CURL* newCurl = NULL;
CURLcode res = CURLE_OK;
TSDB_CHECK_NULL(pMap, code, lino, _end, TSDB_CODE_INVALID_PARA);
TSDB_CHECK_NULL(url, code, lino, _end, TSDB_CODE_INVALID_PARA);
TSDB_CHECK_NULL(ppHandle, code, lino, _end, TSDB_CODE_INVALID_PARA);
*ppHandle = NULL;
code = taosThreadMutexLock(&pMap->gMutex);
TSDB_CHECK_CODE(code, lino, _end);
gLocked = true;
ppFindHandle = taosHashGet(pMap->handleMap, url, strlen(url));
if (ppFindHandle == NULL) {
pNewHandle = taosMemoryCalloc(1, sizeof(SStreamNotifyHandle));
TSDB_CHECK_NULL(pNewHandle, code, lino, _end, terrno);
code = taosThreadMutexInit(&pNewHandle->mutex, NULL);
TSDB_CHECK_CODE(code, lino, _end);
code = taosHashPut(pMap->handleMap, url, strlen(url), &pNewHandle, POINTER_BYTES);
TSDB_CHECK_CODE(code, lino, _end);
*ppHandle = pNewHandle;
pNewHandle = NULL;
} else {
*ppHandle = *ppFindHandle;
}
code = taosThreadMutexLock(&(*ppHandle)->mutex);
TSDB_CHECK_CODE(code, lino, _end);
(void)taosThreadMutexUnlock(&pMap->gMutex);
gLocked = false;
if ((*ppHandle)->curl == NULL) {
newCurl = curl_easy_init();
TSDB_CHECK_NULL(newCurl, code, lino, _end, TSDB_CODE_FAILED);
res = curl_easy_setopt(newCurl, CURLOPT_URL, url);
TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED);
res = curl_easy_setopt(newCurl, CURLOPT_SSL_VERIFYPEER, 0L);
TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED);
res = curl_easy_setopt(newCurl, CURLOPT_SSL_VERIFYHOST, 0L);
TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED);
res = curl_easy_setopt(newCurl, CURLOPT_TIMEOUT, 3L);
TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED);
res = curl_easy_setopt(newCurl, CURLOPT_CONNECT_ONLY, 2L);
TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED);
res = curl_easy_perform(newCurl);
TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED);
(*ppHandle)->curl = newCurl;
newCurl = NULL;
}
if ((*ppHandle)->url == NULL) {
(*ppHandle)->url = taosStrdup(url);
TSDB_CHECK_NULL((*ppHandle)->url, code, lino, _end, terrno);
}
_end:
if (code != TSDB_CODE_SUCCESS) {
tqError("%s failed at line %d since %d, %s", __func__, lino, res, tstrerror(code));
if (*ppHandle) {
releaseStreamNotifyHandle(ppHandle);
}
*ppHandle = NULL;
}
if (newCurl) {
curl_easy_cleanup(newCurl);
}
if (pNewHandle) {
destroyStreamNotifyHandle(&pNewHandle);
}
if (gLocked) {
(void)taosThreadMutexUnlock(&pMap->gMutex);
}
return code;
#else
tqError("stream notify events is not supported on windows");
return TSDB_CODE_NOT_SUPPORTTED_IN_WINDOWS;
#endif
}
int32_t tqInitNotifyHandleMap(SStreamNotifyHandleMap** ppMap) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
SStreamNotifyHandleMap* pMap = NULL;
TSDB_CHECK_NULL(ppMap, code, lino, _end, TSDB_CODE_INVALID_PARA);
*ppMap = NULL;
pMap = taosMemoryCalloc(1, sizeof(SStreamNotifyHandleMap));
TSDB_CHECK_NULL(pMap, code, lino, _end, terrno);
code = taosThreadMutexInit(&pMap->gMutex, NULL);
TSDB_CHECK_CODE(code, lino, _end);
pMap->handleMap = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
TSDB_CHECK_NULL(pMap->handleMap, code, lino, _end, terrno);
taosHashSetFreeFp(pMap->handleMap, destroyStreamNotifyHandle);
*ppMap = pMap;
pMap = NULL;
_end:
if (code != TSDB_CODE_SUCCESS) {
tqError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
if (pMap != NULL) {
tqDestroyNotifyHandleMap(&pMap);
}
return code;
}
void tqDestroyNotifyHandleMap(SStreamNotifyHandleMap** ppMap) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
if (*ppMap == NULL) {
return;
}
taosHashCleanup((*ppMap)->handleMap);
code = taosThreadMutexDestroy(&(*ppMap)->gMutex);
taosMemoryFreeClear((*ppMap));
}
#define JSON_CHECK_ADD_ITEM(obj, str, item) \
TSDB_CHECK_CONDITION(cJSON_AddItemToObjectCS(obj, str, item), code, lino, _end, TSDB_CODE_OUT_OF_MEMORY)
static int32_t getStreamNotifyEventHeader(const char* streamName, char** pHeader) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
cJSON* obj = NULL;
cJSON* streams = NULL;
cJSON* stream = NULL;
char msgId[37];
TSDB_CHECK_NULL(streamName, code, lino, _end, TSDB_CODE_INVALID_PARA);
TSDB_CHECK_NULL(pHeader, code, lino, _end, TSDB_CODE_INVALID_PARA);
*pHeader = NULL;
code = taosGetSystemUUIDLimit36(msgId, sizeof(msgId));
TSDB_CHECK_CODE(code, lino, _end);
stream = cJSON_CreateObject();
TSDB_CHECK_NULL(stream, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY);
JSON_CHECK_ADD_ITEM(stream, "streamName", cJSON_CreateStringReference(streamName));
JSON_CHECK_ADD_ITEM(stream, "events", cJSON_CreateArray());
streams = cJSON_CreateArray();
TSDB_CHECK_CONDITION(cJSON_AddItemToArray(streams, stream), code, lino, _end, TSDB_CODE_OUT_OF_MEMORY)
stream = NULL;
obj = cJSON_CreateObject();
TSDB_CHECK_NULL(obj, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY);
JSON_CHECK_ADD_ITEM(obj, "messageId", cJSON_CreateStringReference(msgId));
JSON_CHECK_ADD_ITEM(obj, "timestamp", cJSON_CreateNumber(taosGetTimestampMs()));
JSON_CHECK_ADD_ITEM(obj, "streams", streams);
streams = NULL;
*pHeader = cJSON_PrintUnformatted(obj);
TSDB_CHECK_NULL(*pHeader, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY);
_end:
if (code != TSDB_CODE_SUCCESS) {
tqError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
if (stream != NULL) {
cJSON_Delete(stream);
}
if (streams != NULL) {
cJSON_Delete(streams);
}
if (obj != NULL) {
cJSON_Delete(obj);
}
return code;
}
static int32_t packupStreamNotifyEvent(const char* streamName, const SArray* pBlocks, char** pMsg,
int32_t* nNotifyEvents) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
int32_t numOfBlocks = 0;
int32_t msgHeaderLen = 0;
int32_t msgTailLen = 0;
int32_t msgLen = 0;
char* msgHeader = NULL;
const char* msgTail = "]}]}";
char* msg = NULL;
TSDB_CHECK_NULL(pMsg, code, lino, _end, TSDB_CODE_INVALID_PARA);
*pMsg = NULL;
numOfBlocks = taosArrayGetSize(pBlocks);
*nNotifyEvents = 0;
for (int32_t i = 0; i < numOfBlocks; ++i) {
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
if (pDataBlock == NULL || pDataBlock->info.type != STREAM_NOTIFY_EVENT) {
continue;
}
SColumnInfoData* pEventStrCol = taosArrayGet(pDataBlock->pDataBlock, NOTIFY_EVENT_STR_COLUMN_INDEX);
for (int32_t j = 0; j < pDataBlock->info.rows; ++j) {
char* val = colDataGetVarData(pEventStrCol, j);
msgLen += varDataLen(val) + 1;
}
*nNotifyEvents += pDataBlock->info.rows;
}
if (msgLen == 0) {
// skip since no notification events found
goto _end;
}
code = getStreamNotifyEventHeader(streamName, &msgHeader);
TSDB_CHECK_CODE(code, lino, _end);
msgHeaderLen = strlen(msgHeader);
msgTailLen = strlen(msgTail);
msgLen += msgHeaderLen;
msg = taosMemoryMalloc(msgLen);
TSDB_CHECK_NULL(msg, code, lino, _end, terrno);
char* p = msg;
TAOS_STRNCPY(p, msgHeader, msgHeaderLen);
p += msgHeaderLen - msgTailLen;
for (int32_t i = 0; i < numOfBlocks; ++i) {
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
if (pDataBlock == NULL || pDataBlock->info.type != STREAM_NOTIFY_EVENT) {
continue;
}
SColumnInfoData* pEventStrCol = taosArrayGet(pDataBlock->pDataBlock, NOTIFY_EVENT_STR_COLUMN_INDEX);
for (int32_t j = 0; j < pDataBlock->info.rows; ++j) {
char* val = colDataGetVarData(pEventStrCol, j);
TAOS_STRNCPY(p, varDataVal(val), varDataLen(val));
p += varDataLen(val);
*(p++) = ',';
}
}
p -= 1;
TAOS_STRNCPY(p, msgTail, msgTailLen);
*(p + msgTailLen) = '\0';
*pMsg = msg;
msg = NULL;
_end:
if (code != TSDB_CODE_SUCCESS) {
tqError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
if (msgHeader != NULL) {
cJSON_free(msgHeader);
}
if (msg != NULL) {
taosMemoryFreeClear(msg);
}
return code;
}
static int32_t sendSingleStreamNotify(SStreamNotifyHandle* pHandle, char* msg) {
#ifndef WINDOWS
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
CURLcode res = CURLE_OK;
uint64_t sentLen = 0;
uint64_t totalLen = 0;
size_t nbytes = 0;
TSDB_CHECK_NULL(pHandle, code, lino, _end, TSDB_CODE_INVALID_PARA);
TSDB_CHECK_NULL(pHandle->curl, code, lino, _end, TSDB_CODE_INVALID_PARA);
totalLen = strlen(msg);
while (sentLen < totalLen) {
res = curl_ws_send(pHandle->curl, msg + sentLen, totalLen - sentLen, &nbytes, 0, CURLWS_TEXT);
TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED);
sentLen += nbytes;
}
_end:
if (code != TSDB_CODE_SUCCESS) {
tqError("%s failed at line %d since %d, %s", __func__, lino, res, tstrerror(code));
stopStreamNotifyConn(pHandle);
}
return code;
#else
tqError("stream notify events is not supported on windows");
return TSDB_CODE_NOT_SUPPORTTED_IN_WINDOWS;
#endif
}
int32_t tqSendAllNotifyEvents(const SArray* pBlocks, SStreamTask* pTask, SVnode* pVnode) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
char* msg = NULL;
int32_t nNotifyAddr = 0;
int32_t nNotifyEvents = 0;
SStreamNotifyHandle* pHandle = NULL;
TSDB_CHECK_NULL(pTask, code, lino, _end, TSDB_CODE_INVALID_PARA);
TSDB_CHECK_NULL(pVnode, code, lino, _end, TSDB_CODE_INVALID_PARA);
nNotifyAddr = taosArrayGetSize(pTask->notifyInfo.pNotifyAddrUrls);
if (nNotifyAddr == 0) {
goto _end;
}
code = packupStreamNotifyEvent(pTask->notifyInfo.streamName, pBlocks, &msg, &nNotifyEvents);
TSDB_CHECK_CODE(code, lino, _end);
if (msg == NULL) {
goto _end;
}
tqDebug("stream task %s prepare to send %d notify events, total msg length: %" PRIu64, pTask->notifyInfo.streamName,
nNotifyEvents, (uint64_t)strlen(msg));
for (int32_t i = 0; i < nNotifyAddr; ++i) {
if (streamTaskShouldStop(pTask)) {
break;
}
const char* url = taosArrayGetP(pTask->notifyInfo.pNotifyAddrUrls, i);
code = acquireStreamNotifyHandle(pVnode->pNotifyHandleMap, url, &pHandle);
if (code != TSDB_CODE_SUCCESS) {
tqError("failed to get stream notify handle of %s", url);
if (pTask->notifyInfo.notifyErrorHandle == SNOTIFY_ERROR_HANDLE_PAUSE) {
// retry for event message sending in PAUSE error handling mode
taosMsleep(STREAM_EVENT_NOTIFY_RETRY_MS);
--i;
continue;
} else {
// simply ignore the failure in DROP error handling mode
code = TSDB_CODE_SUCCESS;
continue;
}
}
code = sendSingleStreamNotify(pHandle, msg);
if (code != TSDB_CODE_SUCCESS) {
tqError("failed to send stream notify handle to %s since %s", url, tstrerror(code));
if (pTask->notifyInfo.notifyErrorHandle == SNOTIFY_ERROR_HANDLE_PAUSE) {
// retry for event message sending in PAUSE error handling mode
taosMsleep(STREAM_EVENT_NOTIFY_RETRY_MS);
--i;
} else {
// simply ignore the failure in DROP error handling mode
code = TSDB_CODE_SUCCESS;
}
} else {
tqDebug("stream task %s send %d notify events to %s successfully", pTask->notifyInfo.streamName, nNotifyEvents,
url);
}
releaseStreamNotifyHandle(&pHandle);
}
_end:
if (code != TSDB_CODE_SUCCESS) {
tqError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
if (msg) {
taosMemoryFreeClear(msg);
}
return code;
}

View File

@ -22,6 +22,8 @@
typedef struct SBuildScanWalMsgParam {
int64_t metaId;
int32_t numOfTasks;
int8_t restored;
SMsgCb msgCb;
} SBuildScanWalMsgParam;
static int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta);
@ -74,7 +76,6 @@ int32_t tqScanWal(STQ* pTq) {
static void doStartScanWal(void* param, void* tmrId) {
int32_t vgId = 0;
STQ* pTq = NULL;
int32_t code = 0;
SBuildScanWalMsgParam* pParam = (SBuildScanWalMsgParam*)param;
@ -86,13 +87,29 @@ static void doStartScanWal(void* param, void* tmrId) {
return;
}
if (pMeta->closeFlag) {
code = taosReleaseRef(streamMetaRefPool, pParam->metaId);
if (code == TSDB_CODE_SUCCESS) {
tqDebug("vgId:%d jump out of scan wal timer since closed", vgId);
} else {
tqError("vgId:%d failed to release ref for streamMeta, rid:%" PRId64 " code:%s", vgId, pParam->metaId,
tstrerror(code));
}
taosMemoryFree(pParam);
return;
}
vgId = pMeta->vgId;
pTq = pMeta->ahandle;
tqDebug("vgId:%d create msg to start wal scan, numOfTasks:%d, vnd restored:%d", vgId, pParam->numOfTasks,
pTq->pVnode->restored);
pParam->restored);
#if 0
// wait for the vnode is freed, and invalid read may occur.
taosMsleep(10000);
#endif
code = streamTaskSchedTask(&pTq->pVnode->msgCb, vgId, 0, 0, STREAM_EXEC_T_EXTRACT_WAL_DATA);
code = streamTaskSchedTask(&pParam->msgCb, vgId, 0, 0, STREAM_EXEC_T_EXTRACT_WAL_DATA);
if (code) {
tqError("vgId:%d failed sched task to scan wal, code:%s", vgId, tstrerror(code));
}
@ -120,6 +137,8 @@ int32_t tqScanWalInFuture(STQ* pTq, int32_t numOfTasks, int32_t idleDuration) {
pParam->metaId = pMeta->rid;
pParam->numOfTasks = numOfTasks;
pParam->restored = pTq->pVnode->restored;
pParam->msgCb = pTq->pVnode->msgCb;
code = streamTimerGetInstance(&pTimer);
if (code) {
@ -330,13 +349,13 @@ int32_t doPutDataIntoInputQ(SStreamTask* pTask, int64_t maxVer, int32_t* numOfIt
int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta) {
int32_t vgId = pStreamMeta->vgId;
SArray* pTaskList = NULL;
int32_t numOfTasks = taosArrayGetSize(pStreamMeta->pTaskList);
if (numOfTasks == 0) {
return TSDB_CODE_SUCCESS;
}
// clone the task list, to avoid the task update during scan wal files
SArray* pTaskList = NULL;
streamMetaWLock(pStreamMeta);
pTaskList = taosArrayDup(pStreamMeta->pTaskList, NULL);
streamMetaWUnLock(pStreamMeta);
@ -447,3 +466,11 @@ int32_t doScanWalAsync(STQ* pTq, bool ckPause) {
return streamTaskSchedTask(&pTq->pVnode->msgCb, vgId, 0, 0, STREAM_EXEC_T_EXTRACT_WAL_DATA);
}
void streamMetaFreeTQDuringScanWalError(STQ* pTq) {
SBuildScanWalMsgParam* p = taosMemoryCalloc(1, sizeof(SBuildScanWalMsgParam));
p->metaId = pTq->pStreamMeta->rid;
p->numOfTasks = 0;
doStartScanWal(p, 0);
}

View File

@ -86,6 +86,14 @@ int32_t tqExpandStreamTask(SStreamTask* pTask) {
if (code) {
return code;
}
code =
qSetStreamNotifyInfo(pTask->exec.pExecutor, pTask->notifyInfo.notifyEventTypes,
pTask->notifyInfo.pSchemaWrapper, pTask->notifyInfo.stbFullName, IS_NEW_SUBTB_RULE(pTask));
if (code) {
tqError("s-task:%s failed to set stream notify info, code:%s", pTask->id.idStr, tstrerror(code));
return code;
}
}
streamSetupScheduleTrigger(pTask);
@ -1357,4 +1365,4 @@ int32_t tqStreamTaskProcessConsenChkptIdReq(SStreamMeta* pMeta, SRpcMsg* pMsg) {
streamMetaReleaseTask(pMeta, pTask);
return 0;
}
}

View File

@ -13,8 +13,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "tsdbDataFileRW.h"
#include "meta.h"
#include "tsdbDataFileRW.h"
// SDataFileReader =============================================
struct SDataFileReader {
@ -299,6 +299,7 @@ extern int32_t tBlockDataDecompress(SBufferReader *br, SBlockData *blockData, SB
int32_t tsdbDataFileReadBlockData(SDataFileReader *reader, const SBrinRecord *record, SBlockData *bData) {
int32_t code = 0;
int32_t lino = 0;
int32_t fid = reader->config->files[TSDB_FTYPE_DATA].file.fid;
SBuffer *buffer = reader->buffers + 0;
SBuffer *assist = reader->buffers + 1;
@ -321,8 +322,8 @@ int32_t tsdbDataFileReadBlockData(SDataFileReader *reader, const SBrinRecord *re
_exit:
if (code) {
tsdbError("vgId:%d %s failed at %s:%d since %s", TD_VID(reader->config->tsdb->pVnode), __func__, __FILE__, lino,
tstrerror(code));
tsdbError("vgId:%d %s fid %d failed at %s:%d since %s", TD_VID(reader->config->tsdb->pVnode), __func__, fid,
__FILE__, lino, tstrerror(code));
}
return code;
}
@ -331,6 +332,7 @@ int32_t tsdbDataFileReadBlockDataByColumn(SDataFileReader *reader, const SBrinRe
STSchema *pTSchema, int16_t cids[], int32_t ncid) {
int32_t code = 0;
int32_t lino = 0;
int32_t fid = reader->config->files[TSDB_FTYPE_DATA].file.fid;
SDiskDataHdr hdr;
SBuffer *buffer0 = reader->buffers + 0;
@ -505,8 +507,8 @@ int32_t tsdbDataFileReadBlockDataByColumn(SDataFileReader *reader, const SBrinRe
_exit:
if (code) {
tsdbError("vgId:%d %s failed at %s:%d since %s", TD_VID(reader->config->tsdb->pVnode), __func__, __FILE__, lino,
tstrerror(code));
tsdbError("vgId:%d %s fid:%d failed at %s:%d since %s", TD_VID(reader->config->tsdb->pVnode), __func__, fid,
__FILE__, lino, tstrerror(code));
}
return code;
}

View File

@ -15,6 +15,7 @@
#include "sync.h"
#include "tcs.h"
#include "tq.h"
#include "tsdb.h"
#include "vnd.h"
@ -483,6 +484,14 @@ SVnode *vnodeOpen(const char *path, int32_t diskPrimary, STfs *pTfs, SMsgCb msgC
ret = taosRealPath(tdir, NULL, sizeof(tdir));
TAOS_UNUSED(ret);
// init handle map for stream event notification
ret = tqInitNotifyHandleMap(&pVnode->pNotifyHandleMap);
if (ret != TSDB_CODE_SUCCESS) {
vError("vgId:%d, failed to init StreamNotifyHandleMap", TD_VID(pVnode));
terrno = ret;
goto _err;
}
// open query
vInfo("vgId:%d, start to open vnode query", TD_VID(pVnode));
if (vnodeQueryOpen(pVnode)) {
@ -555,6 +564,7 @@ void vnodeClose(SVnode *pVnode) {
vnodeAWait(&pVnode->commitTask);
vnodeSyncClose(pVnode);
vnodeQueryClose(pVnode);
tqDestroyNotifyHandleMap(&pVnode->pNotifyHandleMap);
tqClose(pVnode->pTq);
walClose(pVnode->pWal);
if (pVnode->pTsdb) tsdbClose(&pVnode->pTsdb);

View File

@ -449,9 +449,17 @@ typedef struct STimeWindowAggSupp {
SColumnInfoData timeWindowData; // query time window info for scalar function execution.
} STimeWindowAggSupp;
typedef struct SStreamNotifyEventSupp {
SArray* pWindowEvents; // Array of SStreamNotifyEvent, storing window events and trigger values.
SHashObj* pTableNameHashMap; // Hash map from groupid to the dest child table name.
SHashObj* pResultHashMap; // Hash map from groupid+skey to the window agg result.
SSDataBlock* pEventBlock; // The datablock contains all window events and results.
} SStreamNotifyEventSupp;
typedef struct SSteamOpBasicInfo {
int32_t primaryPkIndex;
bool updateOperatorInfo;
int32_t primaryPkIndex;
bool updateOperatorInfo;
SStreamNotifyEventSupp windowEventSup;
} SSteamOpBasicInfo;
typedef struct SStreamFillSupporter {
@ -767,6 +775,8 @@ typedef struct SStreamEventAggOperatorInfo {
SSHashObj* pPkDeleted;
bool destHasPrimaryKey;
struct SOperatorInfo* pOperator;
SNodeList* pStartCondCols;
SNodeList* pEndCondCols;
} SStreamEventAggOperatorInfo;
typedef struct SStreamCountAggOperatorInfo {

View File

@ -71,6 +71,10 @@ typedef struct {
SVersionRange fillHistoryVer;
STimeWindow fillHistoryWindow;
SStreamState* pState;
int32_t eventTypes; // event types to notify
SSchemaWrapper* notifyResultSchema; // agg result to notify
char* stbFullName; // used to generate dest child table name
bool newSubTableRule; // used to generate dest child table name
} SStreamTaskInfo;
struct SExecTaskInfo {

View File

@ -19,7 +19,10 @@
extern "C" {
#endif
#include "cJSON.h"
#include "cmdnodes.h"
#include "executorInt.h"
#include "querytask.h"
#include "tutil.h"
#define FILL_POS_INVALID 0
@ -57,7 +60,8 @@ typedef struct SSlicePoint {
void setStreamOperatorState(SSteamOpBasicInfo* pBasicInfo, EStreamType type);
bool needSaveStreamOperatorInfo(SSteamOpBasicInfo* pBasicInfo);
void saveStreamOperatorStateComplete(SSteamOpBasicInfo* pBasicInfo);
void initStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo);
int32_t initStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo);
void destroyStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo);
int64_t getDeleteMarkFromOption(SStreamNodeOption* pOption);
void removeDeleteResults(SSHashObj* pUpdatedMap, SArray* pDelWins);
@ -106,6 +110,13 @@ int32_t buildAllResultKey(SStateStore* pStateStore, SStreamState* pState, TSKEY
int32_t initOffsetInfo(int32_t** ppOffset, SSDataBlock* pRes);
TSKEY compareTs(void* pKey);
int32_t addEventAggNotifyEvent(EStreamNotifyEventType eventType, const SSessionKey* pSessionKey,
const SSDataBlock* pInputBlock, const SNodeList* pCondCols, int32_t ri,
SStreamNotifyEventSupp* sup);
int32_t addAggResultNotifyEvent(const SSDataBlock* pResultBlock, const SSchemaWrapper* pSchemaWrapper,
SStreamNotifyEventSupp* sup);
int32_t buildNotifyEventBlock(const SExecTaskInfo* pTaskInfo, SStreamNotifyEventSupp* sup);
#ifdef __cplusplus
}
#endif

View File

@ -250,6 +250,28 @@ int32_t qSetStreamOpOpen(qTaskInfo_t tinfo) {
return code;
}
int32_t qSetStreamNotifyInfo(qTaskInfo_t tinfo, int32_t eventTypes, const SSchemaWrapper* pSchemaWrapper,
const char* stbFullName, bool newSubTableRule) {
int32_t code = TSDB_CODE_SUCCESS;
SStreamTaskInfo *pStreamInfo = NULL;
if (tinfo == 0 || eventTypes == 0 || pSchemaWrapper == NULL || stbFullName == NULL) {
goto _end;
}
pStreamInfo = &((SExecTaskInfo*)tinfo)->streamInfo;
pStreamInfo->eventTypes = eventTypes;
pStreamInfo->notifyResultSchema = tCloneSSchemaWrapper(pSchemaWrapper);
if (pStreamInfo->notifyResultSchema == NULL) {
code = terrno;
}
pStreamInfo->stbFullName = taosStrdup(stbFullName);
pStreamInfo->newSubTableRule = newSubTableRule;
_end:
return code;
}
int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type) {
if (tinfo == NULL) {
return TSDB_CODE_APP_ERROR;

View File

@ -262,6 +262,8 @@ SSchemaWrapper* extractQueriedColumnSchema(SScanPhysiNode* pScanNode) {
static void cleanupStreamInfo(SStreamTaskInfo* pStreamInfo) {
tDeleteSchemaWrapper(pStreamInfo->schema);
tOffsetDestroy(&pStreamInfo->currentOffset);
tDeleteSchemaWrapper(pStreamInfo->notifyResultSchema);
taosMemoryFree(pStreamInfo->stbFullName);
}
static void freeBlock(void* pParam) {

View File

@ -12,6 +12,8 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "cmdnodes.h"
#include "executorInt.h"
#include "filter.h"
#include "function.h"
@ -53,6 +55,8 @@ void destroyStreamEventOperatorInfo(void* param) {
&pInfo->groupResInfo);
pInfo->pOperator = NULL;
}
destroyStreamBasicInfo(&pInfo->basic);
destroyStreamAggSupporter(&pInfo->streamAggSup);
clearGroupResInfo(&pInfo->groupResInfo);
taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos);
@ -89,6 +93,16 @@ void destroyStreamEventOperatorInfo(void* param) {
pInfo->pEndCondInfo = NULL;
}
if (pInfo->pStartCondCols != NULL) {
nodesDestroyList(pInfo->pStartCondCols);
pInfo->pStartCondCols = NULL;
}
if (pInfo->pEndCondCols != NULL) {
nodesDestroyList(pInfo->pEndCondCols);
pInfo->pEndCondCols = NULL;
}
taosMemoryFreeClear(param);
}
@ -121,7 +135,7 @@ void reuseOutputBuf(void* pState, SRowBuffPos* pPos, SStateStore* pAPI) {
}
int32_t setEventOutputBuf(SStreamAggSupporter* pAggSup, TSKEY* pTs, uint64_t groupId, bool* pStart, bool* pEnd,
int32_t index, int32_t rows, SEventWindowInfo* pCurWin, SSessionKey* pNextWinKey) {
int32_t index, int32_t rows, SEventWindowInfo* pCurWin, SSessionKey* pNextWinKey, int32_t* pWinCode) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
int32_t winCode = TSDB_CODE_SUCCESS;
@ -143,6 +157,7 @@ int32_t setEventOutputBuf(SStreamAggSupporter* pAggSup, TSKEY* pTs, uint64_t gro
setEventWindowInfo(pAggSup, &leftWinKey, pVal, pCurWin);
if (inWin || (pCurWin->pWinFlag->startFlag && !pCurWin->pWinFlag->endFlag)) {
pCurWin->winInfo.isOutput = !isWindowIncomplete(pCurWin);
(*pWinCode) = TSDB_CODE_SUCCESS;
goto _end;
}
}
@ -156,6 +171,7 @@ int32_t setEventOutputBuf(SStreamAggSupporter* pAggSup, TSKEY* pTs, uint64_t gro
if (endi < 0 || pTs[endi] >= rightWinKey.win.skey) {
setEventWindowInfo(pAggSup, &rightWinKey, pVal, pCurWin);
pCurWin->winInfo.isOutput = !isWindowIncomplete(pCurWin);
(*pWinCode) = TSDB_CODE_SUCCESS;
goto _end;
}
}
@ -163,6 +179,7 @@ int32_t setEventOutputBuf(SStreamAggSupporter* pAggSup, TSKEY* pTs, uint64_t gro
SSessionKey winKey = {.win.skey = ts, .win.ekey = ts, .groupId = groupId};
code = pAggSup->stateStore.streamStateSessionAllocWinBuffByNextPosition(pAggSup->pState, pCur, &winKey, &pVal, &len);
QUERY_CHECK_CODE(code, lino, _error);
(*pWinCode) = TSDB_CODE_FAILED;
setEventWindowInfo(pAggSup, &winKey, pVal, pCurWin);
pCurWin->pWinFlag->startFlag = start;
@ -373,10 +390,18 @@ static void doStreamEventAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl
bool allEqual = true;
SEventWindowInfo curWin = {0};
SSessionKey nextWinKey = {0};
int32_t winCode = TSDB_CODE_SUCCESS;
code = setEventOutputBuf(pAggSup, tsCols, groupId, (bool*)pColStart->pData, (bool*)pColEnd->pData, i, rows, &curWin,
&nextWinKey);
&nextWinKey, &winCode);
QUERY_CHECK_CODE(code, lino, _end);
if (BIT_FLAG_TEST_MASK(pTaskInfo->streamInfo.eventTypes, SNOTIFY_EVENT_WINDOW_OPEN) &&
*(bool*)colDataGetNumData(pColStart, i) && winCode != TSDB_CODE_SUCCESS) {
code = addEventAggNotifyEvent(SNOTIFY_EVENT_WINDOW_OPEN, &curWin.winInfo.sessionWin, pSDataBlock,
pInfo->pStartCondCols, i, &pInfo->basic.windowEventSup);
QUERY_CHECK_CODE(code, lino, _end);
}
setSessionWinOutputInfo(pSeUpdated, &curWin.winInfo);
bool rebuild = false;
code = updateEventWindowInfo(pAggSup, &curWin, &nextWinKey, tsCols, (bool*)pColStart->pData, (bool*)pColEnd->pData,
@ -443,6 +468,12 @@ static void doStreamEventAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl
tSimpleHashPut(pAggSup->pResultRows, &key, sizeof(SSessionKey), &curWin.winInfo, sizeof(SResultWindowInfo));
QUERY_CHECK_CODE(code, lino, _end);
}
if (BIT_FLAG_TEST_MASK(pTaskInfo->streamInfo.eventTypes, SNOTIFY_EVENT_WINDOW_CLOSE)) {
code = addEventAggNotifyEvent(SNOTIFY_EVENT_WINDOW_CLOSE, &curWin.winInfo.sessionWin, pSDataBlock,
pInfo->pEndCondCols, i + winRows - 1, &pInfo->basic.windowEventSup);
QUERY_CHECK_CODE(code, lino, _end);
}
}
_end:
@ -563,6 +594,7 @@ void doStreamEventSaveCheckpoint(SOperatorInfo* pOperator) {
static int32_t buildEventResult(SOperatorInfo* pOperator, SSDataBlock** ppRes) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
SStreamEventAggOperatorInfo* pInfo = pOperator->info;
SOptrBasicInfo* pBInfo = &pInfo->binfo;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
@ -577,10 +609,27 @@ static int32_t buildEventResult(SOperatorInfo* pOperator, SSDataBlock** ppRes) {
doBuildSessionResult(pOperator, pInfo->streamAggSup.pState, &pInfo->groupResInfo, pBInfo->pRes);
if (pBInfo->pRes->info.rows > 0) {
printDataBlock(pBInfo->pRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo));
if (BIT_FLAG_TEST_MASK(pTaskInfo->streamInfo.eventTypes, SNOTIFY_EVENT_WINDOW_CLOSE)) {
code = addAggResultNotifyEvent(pBInfo->pRes, pTaskInfo->streamInfo.notifyResultSchema, &pInfo->basic.windowEventSup);
QUERY_CHECK_CODE(code, lino, _end);
}
(*ppRes) = pBInfo->pRes;
return code;
}
code = buildNotifyEventBlock(pTaskInfo, &pInfo->basic.windowEventSup);
QUERY_CHECK_CODE(code, lino, _end);
if (pInfo->basic.windowEventSup.pEventBlock->info.rows > 0) {
printDataBlock(pInfo->basic.windowEventSup.pEventBlock, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo));
(*ppRes) = pInfo->basic.windowEventSup.pEventBlock;
return code;
}
_end:
(*ppRes) = NULL;
if (code != TSDB_CODE_SUCCESS) {
qError("%s failed at line %d since %s. task:%s", __func__, lino, tstrerror(code), GET_TASKID(pTaskInfo));
}
return code;
}
@ -957,6 +1006,7 @@ int32_t createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode*
pInfo->pPkDeleted = tSimpleHashInit(64, hashFn);
QUERY_CHECK_NULL(pInfo->pPkDeleted, code, lino, _error, terrno);
pInfo->destHasPrimaryKey = pEventNode->window.destHasPrimaryKey;
initStreamBasicInfo(&pInfo->basic);
pInfo->pOperator = pOperator;
setOperatorInfo(pOperator, "StreamEventAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT, true, OP_NOT_OPENED,
@ -989,6 +1039,12 @@ int32_t createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode*
code = filterInitFromNode((SNode*)pEventNode->pEndCond, &pInfo->pEndCondInfo, 0);
QUERY_CHECK_CODE(code, lino, _error);
code =
nodesCollectColumnsFromNode((SNode*)pEventNode->pStartCond, NULL, COLLECT_COL_TYPE_ALL, &pInfo->pStartCondCols);
QUERY_CHECK_CODE(code, lino, _error);
code = nodesCollectColumnsFromNode((SNode*)pEventNode->pEndCond, NULL, COLLECT_COL_TYPE_ALL, &pInfo->pEndCondCols);
QUERY_CHECK_CODE(code, lino, _error);
*pOptrInfo = pOperator;
return TSDB_CODE_SUCCESS;

View File

@ -13,7 +13,19 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "streamexecutorInt.h"
#include "executorInt.h"
#include "tdatablock.h"
#define NOTIFY_EVENT_NAME_CACHE_LIMIT_MB 16
typedef struct SStreamNotifyEvent {
uint64_t gid;
TSKEY skey;
char* content;
bool isEnd;
} SStreamNotifyEvent;
void setStreamOperatorState(SSteamOpBasicInfo* pBasicInfo, EStreamType type) {
if (type != STREAM_GET_ALL && type != STREAM_CHECKPOINT) {
@ -29,7 +41,509 @@ void saveStreamOperatorStateComplete(SSteamOpBasicInfo* pBasicInfo) {
pBasicInfo->updateOperatorInfo = false;
}
void initStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo) {
static void destroyStreamWindowEvent(void* ptr) {
SStreamNotifyEvent* pEvent = ptr;
if (pEvent == NULL || pEvent->content == NULL) return;
cJSON_free(pEvent->content);
}
static void destroyStreamNotifyEventSupp(SStreamNotifyEventSupp* sup) {
if (sup == NULL) return;
taosArrayDestroyEx(sup->pWindowEvents, destroyStreamWindowEvent);
taosHashCleanup(sup->pTableNameHashMap);
taosHashCleanup(sup->pResultHashMap);
blockDataDestroy(sup->pEventBlock);
*sup = (SStreamNotifyEventSupp){0};
}
static int32_t initStreamNotifyEventSupp(SStreamNotifyEventSupp *sup) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
SSDataBlock* pBlock = NULL;
SColumnInfoData infoData = {0};
if (sup == NULL) {
goto _end;
}
code = createDataBlock(&pBlock);
QUERY_CHECK_CODE(code, lino, _end);
pBlock->info.type = STREAM_NOTIFY_EVENT;
pBlock->info.watermark = INT64_MIN;
infoData.info.type = TSDB_DATA_TYPE_VARCHAR;
infoData.info.bytes = tDataTypes[infoData.info.type].bytes;
code = blockDataAppendColInfo(pBlock, &infoData);
QUERY_CHECK_CODE(code, lino, _end);
sup->pWindowEvents = taosArrayInit(0, sizeof(SStreamNotifyEvent));
QUERY_CHECK_NULL(sup->pWindowEvents, code, lino, _end, terrno);
sup->pTableNameHashMap = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, HASH_NO_LOCK);
QUERY_CHECK_NULL(sup->pTableNameHashMap, code, lino, _end, terrno);
sup->pResultHashMap = taosHashInit(4096, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
QUERY_CHECK_NULL(sup->pResultHashMap, code, lino, _end, terrno);
taosHashSetFreeFp(sup->pResultHashMap, destroyStreamWindowEvent);
sup->pEventBlock = pBlock;
pBlock = NULL;
_end:
if (code != TSDB_CODE_SUCCESS) {
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
if (sup) {
destroyStreamNotifyEventSupp(sup);
}
}
if (pBlock != NULL) {
blockDataDestroy(pBlock);
}
return code;
}
int32_t initStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo) {
pBasicInfo->primaryPkIndex = -1;
pBasicInfo->updateOperatorInfo = false;
return initStreamNotifyEventSupp(&pBasicInfo->windowEventSup);
}
void destroyStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo) {
destroyStreamNotifyEventSupp(&pBasicInfo->windowEventSup);
}
static void streamNotifyGetEventWindowId(const SSessionKey* pSessionKey, char *buf) {
uint64_t hash = 0;
uint64_t ar[2];
ar[0] = pSessionKey->groupId;
ar[1] = pSessionKey->win.skey;
hash = MurmurHash3_64((char*)ar, sizeof(ar));
buf = u64toaFastLut(hash, buf);
}
#define JSON_CHECK_ADD_ITEM(obj, str, item) \
QUERY_CHECK_CONDITION(cJSON_AddItemToObjectCS(obj, str, item), code, lino, _end, TSDB_CODE_OUT_OF_MEMORY)
static int32_t jsonAddColumnField(const char* colName, const SColumnInfoData* pColData, int32_t ri, cJSON* obj) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
char* temp = NULL;
QUERY_CHECK_NULL(colName, code, lino, _end, TSDB_CODE_INVALID_PARA);
QUERY_CHECK_NULL(pColData, code, lino, _end, TSDB_CODE_INVALID_PARA);
QUERY_CHECK_NULL(obj, code, lino, _end, TSDB_CODE_INVALID_PARA);
if (colDataIsNull_s(pColData, ri)) {
JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNull());
goto _end;
}
switch (pColData->info.type) {
case TSDB_DATA_TYPE_BOOL: {
bool val = *(bool*)colDataGetNumData(pColData, ri);
JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateBool(val));
break;
}
case TSDB_DATA_TYPE_TINYINT: {
int8_t val = *(int8_t*)colDataGetNumData(pColData, ri);
JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val));
break;
}
case TSDB_DATA_TYPE_SMALLINT: {
int16_t val = *(int16_t*)colDataGetNumData(pColData, ri);
JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val));
break;
}
case TSDB_DATA_TYPE_INT: {
int32_t val = *(int32_t*)colDataGetNumData(pColData, ri);
JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val));
break;
}
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_TIMESTAMP: {
int64_t val = *(int64_t*)colDataGetNumData(pColData, ri);
JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val));
break;
}
case TSDB_DATA_TYPE_FLOAT: {
float val = *(float*)colDataGetNumData(pColData, ri);
JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val));
break;
}
case TSDB_DATA_TYPE_DOUBLE: {
double val = *(double*)colDataGetNumData(pColData, ri);
JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val));
break;
}
case TSDB_DATA_TYPE_VARCHAR:
case TSDB_DATA_TYPE_NCHAR: {
// cJSON requires null-terminated strings, but this data is not null-terminated,
// so we need to manually copy the string and add null termination.
const char* src = varDataVal(colDataGetVarData(pColData, ri));
int32_t len = varDataLen(colDataGetVarData(pColData, ri));
temp = cJSON_malloc(len + 1);
QUERY_CHECK_NULL(temp, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY);
memcpy(temp, src, len);
temp[len] = '\0';
cJSON* item = cJSON_CreateStringReference(temp);
JSON_CHECK_ADD_ITEM(obj, colName, item);
// let the cjson object to free memory later
item->type &= ~cJSON_IsReference;
temp = NULL;
break;
}
case TSDB_DATA_TYPE_UTINYINT: {
uint8_t val = *(uint8_t*)colDataGetNumData(pColData, ri);
JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val));
break;
}
case TSDB_DATA_TYPE_USMALLINT: {
uint16_t val = *(uint16_t*)colDataGetNumData(pColData, ri);
JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val));
break;
}
case TSDB_DATA_TYPE_UINT: {
uint32_t val = *(uint32_t*)colDataGetNumData(pColData, ri);
JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val));
break;
}
case TSDB_DATA_TYPE_UBIGINT: {
uint64_t val = *(uint64_t*)colDataGetNumData(pColData, ri);
JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val));
break;
}
default: {
JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateStringReference("<Unable to display this data type>"));
break;
}
}
_end:
if (code != TSDB_CODE_SUCCESS) {
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
if (temp) {
cJSON_free(temp);
}
return code;
}
int32_t addEventAggNotifyEvent(EStreamNotifyEventType eventType, const SSessionKey* pSessionKey,
const SSDataBlock* pInputBlock, const SNodeList* pCondCols, int32_t ri,
SStreamNotifyEventSupp* sup) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
SNode* node = NULL;
cJSON* event = NULL;
cJSON* fields = NULL;
cJSON* cond = NULL;
SStreamNotifyEvent item = {0};
char windowId[32];
QUERY_CHECK_NULL(pSessionKey, code, lino, _end, TSDB_CODE_INVALID_PARA);
QUERY_CHECK_NULL(pInputBlock, code, lino, _end, TSDB_CODE_INVALID_PARA);
QUERY_CHECK_NULL(pInputBlock->pDataBlock, code, lino, _end, TSDB_CODE_INVALID_PARA);
QUERY_CHECK_NULL(pCondCols, code, lino, _end, TSDB_CODE_INVALID_PARA);
QUERY_CHECK_NULL(sup, code, lino, _end, TSDB_CODE_INVALID_PARA);
qDebug("add stream notify event from event window, type: %s, start: %" PRId64 ", end: %" PRId64,
(eventType == SNOTIFY_EVENT_WINDOW_OPEN) ? "WINDOW_OPEN" : "WINDOW_CLOSE", pSessionKey->win.skey,
pSessionKey->win.ekey);
event = cJSON_CreateObject();
QUERY_CHECK_NULL(event, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY);
// add basic info
streamNotifyGetEventWindowId(pSessionKey, windowId);
if (eventType == SNOTIFY_EVENT_WINDOW_OPEN) {
JSON_CHECK_ADD_ITEM(event, "eventType", cJSON_CreateStringReference("WINDOW_OPEN"));
} else if (eventType == SNOTIFY_EVENT_WINDOW_CLOSE) {
JSON_CHECK_ADD_ITEM(event, "eventType", cJSON_CreateStringReference("WINDOW_CLOSE"));
}
JSON_CHECK_ADD_ITEM(event, "eventTime", cJSON_CreateNumber(taosGetTimestampMs()));
JSON_CHECK_ADD_ITEM(event, "windowId", cJSON_CreateStringReference(windowId));
JSON_CHECK_ADD_ITEM(event, "windowType", cJSON_CreateStringReference("Event"));
JSON_CHECK_ADD_ITEM(event, "windowStart", cJSON_CreateNumber(pSessionKey->win.skey));
if (eventType == SNOTIFY_EVENT_WINDOW_CLOSE) {
JSON_CHECK_ADD_ITEM(event, "windowEnd", cJSON_CreateNumber(pSessionKey->win.ekey));
}
// create fields object to store matched column values
fields = cJSON_CreateObject();
QUERY_CHECK_NULL(fields, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY);
FOREACH(node, pCondCols) {
SColumnNode* pColDef = (SColumnNode*)node;
SColumnInfoData* pColData = taosArrayGet(pInputBlock->pDataBlock, pColDef->slotId);
code = jsonAddColumnField(pColDef->colName, pColData, ri, fields);
QUERY_CHECK_CODE(code, lino, _end);
}
// add trigger condition
cond = cJSON_CreateObject();
QUERY_CHECK_NULL(cond, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY);
JSON_CHECK_ADD_ITEM(cond, "conditionIndex", cJSON_CreateNumber(0));
JSON_CHECK_ADD_ITEM(cond, "fieldValues", fields);
fields = NULL;
JSON_CHECK_ADD_ITEM(event, "triggerConditions", cond);
cond = NULL;
// convert json object to string value
item.gid = pSessionKey->groupId;
item.skey = pSessionKey->win.skey;
item.isEnd = (eventType == SNOTIFY_EVENT_WINDOW_CLOSE);
item.content = cJSON_PrintUnformatted(event);
QUERY_CHECK_NULL(taosArrayPush(sup->pWindowEvents, &item), code, lino, _end, terrno);
item.content = NULL;
_end:
if (code != TSDB_CODE_SUCCESS) {
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
destroyStreamWindowEvent(&item);
if (cond != NULL) {
cJSON_Delete(cond);
}
if (fields != NULL) {
cJSON_Delete(fields);
}
if (event != NULL) {
cJSON_Delete(event);
}
return code;
}
int32_t addAggResultNotifyEvent(const SSDataBlock* pResultBlock, const SSchemaWrapper* pSchemaWrapper,
SStreamNotifyEventSupp* sup) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
SNode * node = NULL;
cJSON* event = NULL;
cJSON* result = NULL;
SStreamNotifyEvent item = {0};
SColumnInfoData* pWstartCol = NULL;
QUERY_CHECK_NULL(pResultBlock, code, lino, _end, TSDB_CODE_INVALID_PARA);
QUERY_CHECK_NULL(pSchemaWrapper, code, lino, _end, TSDB_CODE_INVALID_PARA);
QUERY_CHECK_NULL(sup, code, lino, _end, TSDB_CODE_INVALID_PARA);
qDebug("add %" PRId64 " stream notify results from window agg", pResultBlock->info.rows);
pWstartCol = taosArrayGet(pResultBlock->pDataBlock, 0);
for (int32_t i = 0; i< pResultBlock->info.rows; ++i) {
event = cJSON_CreateObject();
QUERY_CHECK_NULL(event, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY);
// convert the result row into json
result = cJSON_CreateObject();
QUERY_CHECK_NULL(result, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY);
for (int32_t j = 0; j < pSchemaWrapper->nCols; ++j) {
SSchema *pCol = pSchemaWrapper->pSchema + j;
SColumnInfoData *pColData = taosArrayGet(pResultBlock->pDataBlock, pCol->colId - 1);
code = jsonAddColumnField(pCol->name, pColData, i, result);
QUERY_CHECK_CODE(code, lino, _end);
}
JSON_CHECK_ADD_ITEM(event, "result", result);
result = NULL;
item.gid = pResultBlock->info.id.groupId;
item.skey = *(uint64_t*)colDataGetNumData(pWstartCol, i);
item.content = cJSON_PrintUnformatted(event);
code = taosHashPut(sup->pResultHashMap, &item.gid, sizeof(item.gid) + sizeof(item.skey), &item, sizeof(item));
TSDB_CHECK_CODE(code, lino, _end);
item.content = NULL;
cJSON_Delete(event);
event = NULL;
}
_end:
if (code != TSDB_CODE_SUCCESS) {
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
destroyStreamWindowEvent(&item);
if (result != NULL) {
cJSON_Delete(result);
}
if (event != NULL) {
cJSON_Delete(event);
}
return code;
}
static int32_t streamNotifyGetDestTableName(const SExecTaskInfo* pTaskInfo, uint64_t gid, char** pTableName) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
const SStorageAPI* pAPI = NULL;
void* tbname = NULL;
int32_t winCode = TSDB_CODE_SUCCESS;
char parTbName[TSDB_TABLE_NAME_LEN];
const SStreamTaskInfo* pStreamInfo = NULL;
QUERY_CHECK_NULL(pTaskInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
QUERY_CHECK_NULL(pTableName, code, lino, _end, TSDB_CODE_INVALID_PARA);
*pTableName = NULL;
pAPI = &pTaskInfo->storageAPI;
code = pAPI->stateStore.streamStateGetParName((void*)pTaskInfo->streamInfo.pState, gid, &tbname, false, &winCode);
QUERY_CHECK_CODE(code, lino, _end);
if (winCode != TSDB_CODE_SUCCESS) {
parTbName[0] = '\0';
} else {
tstrncpy(parTbName, tbname, sizeof(parTbName));
}
pAPI->stateStore.streamStateFreeVal(tbname);
pStreamInfo = &pTaskInfo->streamInfo;
code = buildSinkDestTableName(parTbName, pStreamInfo->stbFullName, gid, pStreamInfo->newSubTableRule, pTableName);
QUERY_CHECK_CODE(code, lino, _end);
_end:
if (code != TSDB_CODE_SUCCESS) {
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
return code;
}
static int32_t streamNotifyFillTableName(const char* tableName, const SStreamNotifyEvent* pEvent,
const SStreamNotifyEvent* pResult, char** pVal) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
static const char* prefix = "{\"tableName\":\"";
uint64_t prefixLen = 0;
uint64_t nameLen = 0;
uint64_t eventLen = 0;
uint64_t resultLen = 0;
uint64_t valLen = 0;
char* val = NULL;
char* p = NULL;
QUERY_CHECK_NULL(tableName, code, lino, _end, TSDB_CODE_INVALID_PARA);
QUERY_CHECK_NULL(pEvent, code, lino , _end, TSDB_CODE_INVALID_PARA);
QUERY_CHECK_NULL(pVal, code, lino , _end, TSDB_CODE_INVALID_PARA);
*pVal = NULL;
prefixLen = strlen(prefix);
nameLen = strlen(tableName);
eventLen = strlen(pEvent->content);
if (pResult != NULL) {
resultLen = strlen(pResult->content);
valLen = VARSTR_HEADER_SIZE + prefixLen + nameLen + eventLen + resultLen;
} else {
valLen = VARSTR_HEADER_SIZE + prefixLen + nameLen + eventLen + 1;
}
val = taosMemoryMalloc(valLen);
QUERY_CHECK_NULL(val, code, lino, _end, terrno);
varDataSetLen(val, valLen - VARSTR_HEADER_SIZE);
p = varDataVal(val);
TAOS_STRNCPY(p, prefix, prefixLen);
p += prefixLen;
TAOS_STRNCPY(p, tableName, nameLen);
p += nameLen;
*(p++) = '\"';
TAOS_STRNCPY(p, pEvent->content, eventLen);
*p = ',';
if (pResult != NULL) {
p += eventLen - 1;
TAOS_STRNCPY(p, pResult->content, resultLen);
*p = ',';
}
*pVal = val;
val = NULL;
_end:
if (code != TSDB_CODE_SUCCESS) {
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
if (val != NULL) {
taosMemoryFreeClear(val);
}
return code;
}
int32_t buildNotifyEventBlock(const SExecTaskInfo* pTaskInfo, SStreamNotifyEventSupp* sup) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
SColumnInfoData* pEventStrCol = NULL;
int32_t nWindowEvents = 0;
int32_t nWindowResults = 0;
char* val = NULL;
if (pTaskInfo == NULL || sup == NULL) {
goto _end;
}
QUERY_CHECK_NULL(sup->pEventBlock, code, lino, _end, TSDB_CODE_INVALID_PARA);
blockDataCleanup(sup->pEventBlock);
nWindowEvents = taosArrayGetSize(sup->pWindowEvents);
nWindowResults = taosHashGetSize(sup->pResultHashMap);
qDebug("start to build stream notify event block, nWindowEvents: %d, nWindowResults: %d", nWindowEvents,
nWindowResults);
if (nWindowEvents == 0) {
goto _end;
}
code = blockDataEnsureCapacity(sup->pEventBlock, nWindowEvents);
QUERY_CHECK_CODE(code, lino, _end);
pEventStrCol = taosArrayGet(sup->pEventBlock->pDataBlock, NOTIFY_EVENT_STR_COLUMN_INDEX);
QUERY_CHECK_NULL(pEventStrCol, code, lino, _end, terrno);
for (int32_t i = 0; i < nWindowEvents; ++i) {
SStreamNotifyEvent* pResult = NULL;
SStreamNotifyEvent* pEvent = taosArrayGet(sup->pWindowEvents, i);
char* tableName = taosHashGet(sup->pTableNameHashMap, &pEvent->gid, sizeof(pEvent->gid));
if (tableName == NULL) {
code = streamNotifyGetDestTableName(pTaskInfo, pEvent->gid, &tableName);
QUERY_CHECK_CODE(code, lino, _end);
code = taosHashPut(sup->pTableNameHashMap, &pEvent->gid, sizeof(pEvent->gid), tableName, strlen(tableName) + 1);
taosMemoryFreeClear(tableName);
QUERY_CHECK_CODE(code, lino, _end);
tableName = taosHashGet(sup->pTableNameHashMap, &pEvent->gid, sizeof(pEvent->gid));
QUERY_CHECK_NULL(tableName, code, lino, _end, TSDB_CODE_INTERNAL_ERROR);
}
if (pEvent->isEnd) {
pResult = taosHashGet(sup->pResultHashMap, &pEvent->gid, sizeof(pEvent->gid) + sizeof(pEvent->skey));
QUERY_CHECK_NULL(pResult, code, lino, _end, TSDB_CODE_INTERNAL_ERROR);
}
code = streamNotifyFillTableName(tableName, pEvent, pResult, &val);
QUERY_CHECK_CODE(code, lino, _end);
code = colDataSetVal(pEventStrCol, i, val, false);
QUERY_CHECK_CODE(code, lino, _end);
taosMemoryFreeClear(val);
sup->pEventBlock->info.rows++;
}
if (taosHashGetMemSize(sup->pTableNameHashMap) >= NOTIFY_EVENT_NAME_CACHE_LIMIT_MB * 1024 * 1024) {
taosHashClear(sup->pTableNameHashMap);
}
_end:
if (code != TSDB_CODE_SUCCESS) {
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
if (val != NULL) {
taosMemoryFreeClear(val);
}
if (sup != NULL) {
taosArrayClearEx(sup->pWindowEvents, destroyStreamWindowEvent);
taosHashClear(sup->pResultHashMap);
}
return code;
}

View File

@ -55,6 +55,7 @@ void destroyStreamIntervalSliceOperatorInfo(void* param) {
pInfo->pOperator = NULL;
}
destroyStreamBasicInfo(&pInfo->basic);
clearGroupResInfo(&pInfo->groupResInfo);
taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos);
pInfo->pUpdated = NULL;
@ -651,7 +652,8 @@ int32_t createStreamIntervalSliceOperatorInfo(SOperatorInfo* downstream, SPhysiN
optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL);
setOperatorStreamStateFn(pOperator, streamIntervalSliceReleaseState, streamIntervalSliceReloadState);
initStreamBasicInfo(&pInfo->basic);
code = initStreamBasicInfo(&pInfo->basic);
QUERY_CHECK_CODE(code, lino, _error);
if (downstream) {
code = initIntervalSliceDownStream(downstream, &pInfo->streamAggSup, pPhyNode->type, pInfo->primaryTsIndex,
&pInfo->twAggSup, &pInfo->basic, &pInfo->interval, pInfo->hasInterpoFunc);

View File

@ -150,6 +150,7 @@ void destroyStreamTimeSliceOperatorInfo(void* param) {
&pInfo->groupResInfo);
pInfo->pOperator = NULL;
}
destroyStreamBasicInfo(&pInfo->basic);
colDataDestroy(&pInfo->twAggSup.timeWindowData);
destroyStreamAggSupporter(&pInfo->streamAggSup);
resetPrevAndNextWindow(pInfo->pFillSup);
@ -2201,7 +2202,8 @@ int32_t createStreamTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode*
optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL);
setOperatorStreamStateFn(pOperator, streamTimeSliceReleaseState, streamTimeSliceReloadState);
initStreamBasicInfo(&pInfo->basic);
code = initStreamBasicInfo(&pInfo->basic);
QUERY_CHECK_CODE(code, lino, _error);
if (downstream) {
code = initTimeSliceDownStream(downstream, &pInfo->streamAggSup, pOperator->operatorType, pInfo->primaryTsIndex,
&pInfo->twAggSup, &pInfo->basic, pInfo->pFillSup);

View File

@ -784,6 +784,7 @@ static bool funcNotSupportStringSma(SFunctionNode* pFunc) {
case FUNCTION_TYPE_SPREAD_PARTIAL:
case FUNCTION_TYPE_SPREAD_MERGE:
case FUNCTION_TYPE_TWA:
case FUNCTION_TYPE_ELAPSED:
pParam = nodesListGetNode(pFunc->pParameterList, 0);
if (pParam && nodesIsExprNode(pParam) && (IS_VAR_DATA_TYPE(((SExprNode*)pParam)->resType.type))) {
return true;

View File

@ -99,6 +99,8 @@ const char* nodesNodeName(ENodeType type) {
return "CountWindow";
case QUERY_NODE_ANOMALY_WINDOW:
return "AnomalyWindow";
case QUERY_NODE_STREAM_NOTIFY_OPTIONS:
return "StreamNotifyOptions";
case QUERY_NODE_SET_OPERATOR:
return "SetOperator";
case QUERY_NODE_SELECT_STMT:
@ -5812,6 +5814,45 @@ static int32_t jsonToStreamOptions(const SJson* pJson, void* pObj) {
return code;
}
static const char* jkStreamNotifyOptionsAddrUrls = "AddrUrls";
static const char* jkStreamNotifyOptionsEventType = "EventType";
static const char* jkStreamNotifyOptionsErrorHandle = "ErrorHandle";
static const char* jkStreamNotifyOptionsNotifyHistory = "NotifyHistory";
static int32_t streamNotifyOptionsToJson(const void* pObj, SJson* pJson) {
const SStreamNotifyOptions* pNotifyOption = (const SStreamNotifyOptions*)pObj;
int32_t code = nodeListToJson(pJson, jkStreamNotifyOptionsAddrUrls, pNotifyOption->pAddrUrls);
if (code == TSDB_CODE_SUCCESS) {
code = tjsonAddIntegerToObject(pJson, jkStreamNotifyOptionsEventType, pNotifyOption->eventTypes);
}
if (code == TSDB_CODE_SUCCESS) {
code = tjsonAddIntegerToObject(pJson, jkStreamNotifyOptionsErrorHandle, pNotifyOption->errorHandle);
}
if (code == TSDB_CODE_SUCCESS) {
code = tjsonAddBoolToObject(pJson, jkStreamNotifyOptionsNotifyHistory, pNotifyOption->notifyHistory);
}
return code;
}
static int32_t jsonToStreamNotifyOptions(const SJson* pJson, void* pObj) {
SStreamNotifyOptions* pNotifyOption = (SStreamNotifyOptions*)pObj;
int32_t code = jsonToNodeList(pJson, jkStreamNotifyOptionsAddrUrls, &pNotifyOption->pAddrUrls);
int32_t val = 0;
if (code == TSDB_CODE_SUCCESS) {
code = tjsonGetIntValue(pJson, jkStreamNotifyOptionsEventType, &val);
pNotifyOption->eventTypes = val;
}
if (code == TSDB_CODE_SUCCESS) {
code = tjsonGetIntValue(pJson, jkStreamNotifyOptionsErrorHandle, &val);
pNotifyOption->errorHandle = val;
}
if (code == TSDB_CODE_SUCCESS) {
code = tjsonGetBoolValue(pJson, jkStreamNotifyOptionsNotifyHistory, &pNotifyOption->notifyHistory);
}
return code;
}
static const char* jkWhenThenWhen = "When";
static const char* jkWhenThenThen = "Then";
@ -7207,6 +7248,7 @@ static const char* jkCreateStreamStmtOptions = "Options";
static const char* jkCreateStreamStmtQuery = "Query";
static const char* jkCreateStreamStmtTags = "Tags";
static const char* jkCreateStreamStmtSubtable = "Subtable";
static const char* jkCreateStreamStmtNotifyOptions = "NotifyOptions";
static int32_t createStreamStmtToJson(const void* pObj, SJson* pJson) {
const SCreateStreamStmt* pNode = (const SCreateStreamStmt*)pObj;
@ -7233,6 +7275,9 @@ static int32_t createStreamStmtToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkCreateStreamStmtSubtable, nodeToJson, pNode->pSubtable);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkCreateStreamStmtNotifyOptions, nodeToJson, pNode->pNotifyOptions);
}
return code;
}
@ -7262,6 +7307,9 @@ static int32_t jsonToCreateStreamStmt(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkCreateStreamStmtSubtable, &pNode->pSubtable);
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkCreateStreamStmtNotifyOptions, (SNode**)&pNode->pNotifyOptions);
}
return code;
}
@ -8029,6 +8077,8 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
return countWindowNodeToJson(pObj, pJson);
case QUERY_NODE_ANOMALY_WINDOW:
return anomalyWindowNodeToJson(pObj, pJson);
case QUERY_NODE_STREAM_NOTIFY_OPTIONS:
return streamNotifyOptionsToJson(pObj, pJson);
case QUERY_NODE_SET_OPERATOR:
return setOperatorToJson(pObj, pJson);
case QUERY_NODE_SELECT_STMT:
@ -8402,6 +8452,8 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
return jsonToCountWindowNode(pJson, pObj);
case QUERY_NODE_ANOMALY_WINDOW:
return jsonToAnomalyWindowNode(pJson, pObj);
case QUERY_NODE_STREAM_NOTIFY_OPTIONS:
return jsonToStreamNotifyOptions(pJson, pObj);
case QUERY_NODE_SET_OPERATOR:
return jsonToSetOperator(pJson, pObj);
case QUERY_NODE_SELECT_STMT:

View File

@ -467,6 +467,9 @@ int32_t nodesMakeNode(ENodeType type, SNode** ppNodeOut) {
case QUERY_NODE_WINDOW_OFFSET:
code = makeNode(type, sizeof(SWindowOffsetNode), &pNode);
break;
case QUERY_NODE_STREAM_NOTIFY_OPTIONS:
code = makeNode(type, sizeof(SStreamNotifyOptions), &pNode);
break;
case QUERY_NODE_SET_OPERATOR:
code = makeNode(type, sizeof(SSetOperator), &pNode);
break;
@ -1267,6 +1270,11 @@ void nodesDestroyNode(SNode* pNode) {
nodesDestroyNode(pAround->pTimepoint);
break;
}
case QUERY_NODE_STREAM_NOTIFY_OPTIONS: {
SStreamNotifyOptions* pNotifyOptions = (SStreamNotifyOptions*)pNode;
nodesDestroyList(pNotifyOptions->pAddrUrls);
break;
}
case QUERY_NODE_SET_OPERATOR: {
SSetOperator* pStmt = (SSetOperator*)pNode;
nodesDestroyList(pStmt->pProjectionList);
@ -1479,6 +1487,7 @@ void nodesDestroyNode(SNode* pNode) {
nodesDestroyNode(pStmt->pQuery);
nodesDestroyList(pStmt->pTags);
nodesDestroyNode(pStmt->pSubtable);
nodesDestroyNode((SNode*)pStmt->pNotifyOptions);
tFreeSCMCreateStreamReq(pStmt->pReq);
taosMemoryFreeClear(pStmt->pReq);
break;

View File

@ -296,8 +296,12 @@ SNode* createDropFunctionStmt(SAstCreateContext* pCxt, bool ignoreNotExists, con
SNode* createStreamOptions(SAstCreateContext* pCxt);
SNode* setStreamOptions(SAstCreateContext* pCxt, SNode* pOptions, EStreamOptionsSetFlag setflag, SToken* pToken,
SNode* pNode);
SNode* createStreamNotifyOptions(SAstCreateContext *pCxt, SNodeList* pAddrUrls, SNodeList* pEventTypes);
SNode* setStreamNotifyOptions(SAstCreateContext* pCxt, SNode* pNode, EStreamNotifyOptionSetFlag setFlag,
SToken* pToken);
SNode* createCreateStreamStmt(SAstCreateContext* pCxt, bool ignoreExists, SToken* pStreamName, SNode* pRealTable,
SNode* pOptions, SNodeList* pTags, SNode* pSubtable, SNode* pQuery, SNodeList* pCols);
SNode* pOptions, SNodeList* pTags, SNode* pSubtable, SNode* pQuery, SNodeList* pCols,
SNode* pNotifyOptions);
SNode* createDropStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pStreamName);
SNode* createPauseStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pStreamName);
SNode* createResumeStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, bool ignoreUntreated, SToken* pStreamName);

View File

@ -785,7 +785,7 @@ full_view_name(A) ::= db_name(B) NK_DOT view_name(C).
/************************************************ create/drop stream **************************************************/
cmd ::= CREATE STREAM not_exists_opt(E) stream_name(A) stream_options(B) INTO
full_table_name(C) col_list_opt(H) tag_def_or_ref_opt(F) subtable_opt(G)
AS query_or_subquery(D). { pCxt->pRootNode = createCreateStreamStmt(pCxt, E, &A, C, B, F, G, D, H); }
AS query_or_subquery(D) notify_opt(I). { pCxt->pRootNode = createCreateStreamStmt(pCxt, E, &A, C, B, F, G, D, H, I); }
cmd ::= DROP STREAM exists_opt(A) stream_name(B). { pCxt->pRootNode = createDropStreamStmt(pCxt, A, &B); }
cmd ::= PAUSE STREAM exists_opt(A) stream_name(B). { pCxt->pRootNode = createPauseStreamStmt(pCxt, A, &B); }
cmd ::= RESUME STREAM exists_opt(A) ignore_opt(C) stream_name(B). { pCxt->pRootNode = createResumeStreamStmt(pCxt, A, C, &B); }
@ -832,6 +832,26 @@ subtable_opt(A) ::= SUBTABLE NK_LP expression(B) NK_RP.
ignore_opt(A) ::= . { A = false; }
ignore_opt(A) ::= IGNORE UNTREATED. { A = true; }
notify_opt(A) ::= . { A = NULL; }
notify_opt(A) ::= notify_def(B). { A = B; }
notify_def(A) ::= NOTIFY NK_LP url_def_list(B) NK_RP ON NK_LP event_def_list(C) NK_RP. { A = createStreamNotifyOptions(pCxt, B, C); }
notify_def(A) ::= notify_def(B) ON_FAILURE DROP(C). { A = setStreamNotifyOptions(pCxt, B, SNOTIFY_OPT_ERROR_HANDLE_SET, &C); }
notify_def(A) ::= notify_def(B) ON_FAILURE PAUSE(C). { A = setStreamNotifyOptions(pCxt, B, SNOTIFY_OPT_ERROR_HANDLE_SET, &C); }
notify_def(A) ::= notify_def(B) NOTIFY_HISTORY NK_INTEGER(C). { A = setStreamNotifyOptions(pCxt, B, SNOTIFY_OPT_NOTIFY_HISTORY_SET, &C); }
%type url_def_list { SNodeList* }
%destructor url_def_list { nodesDestroyList($$); }
url_def_list(A) ::= NK_STRING(B). { A = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &B)); }
url_def_list(A) ::= url_def_list(B) NK_COMMA NK_STRING(C). { A = addNodeToList(pCxt, B, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &C)); }
%type event_def_list { SNodeList* }
%destructor event_def_list { nodesDestroyList($$); }
event_def_list(A) ::= NK_STRING(B). { A = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &B)); }
event_def_list(A) ::= event_def_list(B) NK_COMMA NK_STRING(C). { A = addNodeToList(pCxt, B, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &C)); }
/************************************************ kill connection/query ***********************************************/
cmd ::= KILL CONNECTION NK_INTEGER(A). { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_CONNECTION_STMT, &A); }
cmd ::= KILL QUERY NK_STRING(A). { pCxt->pRootNode = createKillQueryStmt(pCxt, &A); }
@ -1391,6 +1411,8 @@ compare_op(A) ::= LIKE.
compare_op(A) ::= NOT LIKE. { A = OP_TYPE_NOT_LIKE; }
compare_op(A) ::= MATCH. { A = OP_TYPE_MATCH; }
compare_op(A) ::= NMATCH. { A = OP_TYPE_NMATCH; }
compare_op(A) ::= REGEXP. { A = OP_TYPE_MATCH; }
compare_op(A) ::= NOT REGEXP. { A = OP_TYPE_NMATCH; }
compare_op(A) ::= CONTAINS. { A = OP_TYPE_JSON_CONTAINS; }
%type in_op { EOperatorType }

View File

@ -1526,8 +1526,8 @@ SNode* createCaseWhenNode(SAstCreateContext* pCxt, SNode* pCase, SNodeList* pWhe
pCaseWhen->pCase = pCase;
pCaseWhen->pWhenThenList = pWhenThenList;
pCaseWhen->pElse = pElse;
pCaseWhen->tz = pCxt->pQueryCxt->timezone;
pCaseWhen->charsetCxt = pCxt->pQueryCxt->charsetCxt;
pCaseWhen->tz = pCxt->pQueryCxt->timezone;
pCaseWhen->charsetCxt = pCxt->pQueryCxt->charsetCxt;
return (SNode*)pCaseWhen;
_err:
nodesDestroyNode(pCase);
@ -3657,8 +3657,115 @@ SNode* setStreamOptions(SAstCreateContext* pCxt, SNode* pOptions, EStreamOptions
return pOptions;
}
static bool validateNotifyUrl(const char* url) {
const char* prefix[] = {"http://", "https://", "ws://", "wss://"};
const char* host = NULL;
if (!url || *url == '\0') return false;
for (int32_t i = 0; i < ARRAY_SIZE(prefix); ++i) {
if (strncasecmp(url, prefix[i], strlen(prefix[i])) == 0) {
host = url + strlen(prefix[i]);
break;
}
}
return (host != NULL) && (*host != '\0') && (*host != '/');
}
SNode* createStreamNotifyOptions(SAstCreateContext* pCxt, SNodeList* pAddrUrls, SNodeList* pEventTypes) {
SNode* pNode = NULL;
EStreamNotifyEventType eventTypes = 0;
const char* eWindowOpenStr = "WINDOW_OPEN";
const char* eWindowCloseStr = "WINDOW_CLOSE";
CHECK_PARSER_STATUS(pCxt);
if (LIST_LENGTH(pAddrUrls) == 0) {
pCxt->errCode =
generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, "notification address cannot be empty");
goto _err;
}
FOREACH(pNode, pAddrUrls) {
char *url = ((SValueNode*)pNode)->literal;
if (strlen(url) >= TSDB_STREAM_NOTIFY_URL_LEN) {
pCxt->errCode =
generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR,
"notification address \"%s\" exceed maximum length %d", url, TSDB_STREAM_NOTIFY_URL_LEN);
goto _err;
}
if (!validateNotifyUrl(url)) {
pCxt->errCode = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR,
"invalid notification address \"%s\"", url);
goto _err;
}
}
if (LIST_LENGTH(pEventTypes) == 0) {
pCxt->errCode = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR,
"event types must be specified for notification");
goto _err;
}
FOREACH(pNode, pEventTypes) {
char *eventStr = ((SValueNode *)pNode)->literal;
if (strncasecmp(eventStr, eWindowOpenStr, strlen(eWindowOpenStr) + 1) == 0) {
BIT_FLAG_SET_MASK(eventTypes, SNOTIFY_EVENT_WINDOW_OPEN);
} else if (strncasecmp(eventStr, eWindowCloseStr, strlen(eWindowCloseStr) + 1) == 0) {
BIT_FLAG_SET_MASK(eventTypes, SNOTIFY_EVENT_WINDOW_CLOSE);
} else {
pCxt->errCode = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR,
"invalid event type '%s' for notification", eventStr);
goto _err;
}
}
SStreamNotifyOptions* pNotifyOptions = NULL;
pCxt->errCode = nodesMakeNode(QUERY_NODE_STREAM_NOTIFY_OPTIONS, (SNode**)&pNotifyOptions);
CHECK_MAKE_NODE(pNotifyOptions);
pNotifyOptions->pAddrUrls = pAddrUrls;
pNotifyOptions->eventTypes = eventTypes;
pNotifyOptions->errorHandle = SNOTIFY_ERROR_HANDLE_PAUSE;
pNotifyOptions->notifyHistory = false;
nodesDestroyList(pEventTypes);
return (SNode*)pNotifyOptions;
_err:
nodesDestroyList(pAddrUrls);
nodesDestroyList(pEventTypes);
return NULL;
}
SNode* setStreamNotifyOptions(SAstCreateContext* pCxt, SNode* pNode, EStreamNotifyOptionSetFlag setFlag,
SToken* pToken) {
CHECK_PARSER_STATUS(pCxt);
SStreamNotifyOptions* pNotifyOption = (SStreamNotifyOptions*)pNode;
if (BIT_FLAG_TEST_MASK(pNotifyOption->setFlag, setFlag)) {
pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR,
"stream notify options each item can only be set once");
goto _err;
}
switch (setFlag) {
case SNOTIFY_OPT_ERROR_HANDLE_SET:
pNotifyOption->errorHandle = (pToken->type == TK_DROP) ? SNOTIFY_ERROR_HANDLE_DROP : SNOTIFY_ERROR_HANDLE_PAUSE;
break;
case SNOTIFY_OPT_NOTIFY_HISTORY_SET:
pNotifyOption->notifyHistory = taosStr2Int8(pToken->z, NULL, 10);
break;
default:
break;
}
BIT_FLAG_SET_MASK(pNotifyOption->setFlag, setFlag);
return pNode;
_err:
nodesDestroyNode(pNode);
return NULL;
}
SNode* createCreateStreamStmt(SAstCreateContext* pCxt, bool ignoreExists, SToken* pStreamName, SNode* pRealTable,
SNode* pOptions, SNodeList* pTags, SNode* pSubtable, SNode* pQuery, SNodeList* pCols) {
SNode* pOptions, SNodeList* pTags, SNode* pSubtable, SNode* pQuery, SNodeList* pCols,
SNode* pNotifyOptions) {
CHECK_PARSER_STATUS(pCxt);
CHECK_NAME(checkStreamName(pCxt, pStreamName));
SCreateStreamStmt* pStmt = NULL;
@ -3674,6 +3781,7 @@ SNode* createCreateStreamStmt(SAstCreateContext* pCxt, bool ignoreExists, SToken
pStmt->pTags = pTags;
pStmt->pSubtable = pSubtable;
pStmt->pCols = pCols;
pStmt->pNotifyOptions = (SStreamNotifyOptions*)pNotifyOptions;
return (SNode*)pStmt;
_err:
nodesDestroyNode(pRealTable);
@ -3682,6 +3790,7 @@ _err:
nodesDestroyList(pTags);
nodesDestroyNode(pSubtable);
nodesDestroyList(pCols);
nodesDestroyNode(pNotifyOptions);
return NULL;
}

View File

@ -2751,6 +2751,9 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pS
if (TSDB_CODE_SUCCESS == code && hasData) {
code = parseInsertTableClause(pCxt, pStmt, &token);
}
if (TSDB_CODE_PAR_TABLE_NOT_EXIST == code && pCxt->preCtbname) {
code = TSDB_CODE_TSC_STMT_TBNAME_ERROR;
}
}
if (TSDB_CODE_SUCCESS == code && !pCxt->missCache) {

View File

@ -355,6 +355,10 @@ static SKeyword keywordTable[] = {
{"FORCE_WINDOW_CLOSE", TK_FORCE_WINDOW_CLOSE},
{"DISK_INFO", TK_DISK_INFO},
{"AUTO", TK_AUTO},
{"NOTIFY", TK_NOTIFY},
{"ON_FAILURE", TK_ON_FAILURE},
{"NOTIFY_HISTORY", TK_NOTIFY_HISTORY},
{"REGEXP", TK_REGEXP},
};
// clang-format on

View File

@ -12192,6 +12192,45 @@ static int32_t translateStreamOptions(STranslateContext* pCxt, SCreateStreamStmt
return TSDB_CODE_SUCCESS;
}
static int32_t buildStreamNotifyOptions(STranslateContext* pCxt, SStreamNotifyOptions* pNotifyOptions,
SCMCreateStreamReq* pReq) {
int32_t code = TSDB_CODE_SUCCESS;
SNode* pNode = NULL;
if (pNotifyOptions == NULL || pNotifyOptions->pAddrUrls->length == 0) {
return code;
}
pReq->pNotifyAddrUrls = taosArrayInit(pNotifyOptions->pAddrUrls->length, POINTER_BYTES);
if (pReq->pNotifyAddrUrls != NULL) {
FOREACH(pNode, pNotifyOptions->pAddrUrls) {
char *url = taosStrndup(((SValueNode*)pNode)->literal, TSDB_STREAM_NOTIFY_URL_LEN);
if (url == NULL) {
code = terrno;
break;
}
if (taosArrayPush(pReq->pNotifyAddrUrls, &url) == NULL) {
code = terrno;
taosMemoryFreeClear(url);
break;
}
}
} else {
code = terrno;
}
if (code == TSDB_CODE_SUCCESS) {
pReq->notifyEventTypes = pNotifyOptions->eventTypes;
pReq->notifyErrorHandle = pNotifyOptions->errorHandle;
pReq->notifyHistory = pNotifyOptions->notifyHistory;
} else {
taosArrayDestroyP(pReq->pNotifyAddrUrls, NULL);
pReq->pNotifyAddrUrls = NULL;
}
return code;
}
static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* pStmt, SCMCreateStreamReq* pReq) {
pReq->igExists = pStmt->ignoreExists;
@ -12238,6 +12277,10 @@ static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt*
}
}
if (TSDB_CODE_SUCCESS == code) {
code = buildStreamNotifyOptions(pCxt, pStmt->pNotifyOptions, pReq);
}
return code;
}

View File

@ -735,7 +735,7 @@ int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, S
!alreadyAddGroupId(pDataBlock->info.parTbName, groupId) && groupId != 0) {
if (pTask->ver == SSTREAM_TASK_SUBTABLE_CHANGED_VER) {
code = buildCtbNameAddGroupId(NULL, pDataBlock->info.parTbName, groupId, sizeof(pDataBlock->info.parTbName));
} else if (pTask->ver > SSTREAM_TASK_SUBTABLE_CHANGED_VER) {
} else if (pTask->ver >= SSTREAM_TASK_APPEND_STABLE_NAME_VER) {
code = buildCtbNameAddGroupId(pTask->outputInfo.shuffleDispatcher.stbFullName, pDataBlock->info.parTbName,
groupId, sizeof(pDataBlock->info.parTbName));
}

View File

@ -14,4 +14,4 @@ void chkptFailedByRetrieveReqToSource(SStreamTask* pTask, int64_t checkpointId)
// the checkpoint interval should be 60s, and the next checkpoint req should be issued by mnode
taosMsleep(65*1000);
}
}

View File

@ -331,7 +331,6 @@ void streamMetaHbToMnode(void* param, void* tmrId) {
} else {
stError("vgId:%d jump out of meta timer, failed to release the meta rid:%" PRId64, vgId, rid);
}
// taosMemoryFree(param);
return;
}

View File

@ -198,6 +198,7 @@ int32_t streamMetaCheckBackendCompatible(SStreamMeta* pMeta) {
SCheckpointInfo info;
tDecoderInit(&decoder, (uint8_t*)pVal, vLen);
if (tDecodeStreamTaskChkInfo(&decoder, &info) < 0) {
tDecoderClear(&decoder);
continue;
}
@ -576,6 +577,7 @@ void streamMetaClose(SStreamMeta* pMeta) {
if (pMeta == NULL) {
return;
}
int32_t code = taosRemoveRef(streamMetaRefPool, pMeta->rid);
if (code) {
stError("vgId:%d failed to remove meta ref:%" PRId64 ", code:%s", pMeta->vgId, pMeta->rid, tstrerror(code));
@ -1031,6 +1033,7 @@ int64_t streamMetaGetLatestCheckpointId(SStreamMeta* pMeta) {
SCheckpointInfo info;
tDecoderInit(&decoder, (uint8_t*)pVal, vLen);
if (tDecodeStreamTaskChkInfo(&decoder, &info) < 0) {
tDecoderClear(&decoder);
continue;
}
tDecoderClear(&decoder);

View File

@ -326,6 +326,11 @@ void tFreeStreamTask(void* pParam) {
streamTaskDestroyActiveChkptInfo(pTask->chkInfo.pActiveInfo);
pTask->chkInfo.pActiveInfo = NULL;
taosArrayDestroyP(pTask->notifyInfo.pNotifyAddrUrls, NULL);
taosMemoryFreeClear(pTask->notifyInfo.streamName);
taosMemoryFreeClear(pTask->notifyInfo.stbFullName);
tDeleteSchemaWrapper(pTask->notifyInfo.pSchemaWrapper);
taosMemoryFree(pTask);
stDebug("s-task:0x%x free task completed", taskId);
}
@ -1318,6 +1323,78 @@ void streamTaskFreeRefId(int64_t* pRefId) {
metaRefMgtRemove(pRefId);
}
static int32_t tEncodeStreamNotifyInfo(SEncoder* pEncoder, const SNotifyInfo* info) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
QUERY_CHECK_NULL(pEncoder, code, lino, _exit, TSDB_CODE_INVALID_PARA);
QUERY_CHECK_NULL(info, code, lino, _exit, TSDB_CODE_INVALID_PARA);
int32_t addrSize = taosArrayGetSize(info->pNotifyAddrUrls);
TAOS_CHECK_EXIT(tEncodeI32(pEncoder, addrSize));
for (int32_t i = 0; i < addrSize; ++i) {
const char* url = taosArrayGetP(info->pNotifyAddrUrls, i);
TAOS_CHECK_EXIT(tEncodeCStr(pEncoder, url));
}
TAOS_CHECK_EXIT(tEncodeI32(pEncoder, info->notifyEventTypes));
TAOS_CHECK_EXIT(tEncodeI32(pEncoder, info->notifyErrorHandle));
if (addrSize > 0) {
TAOS_CHECK_EXIT(tEncodeCStr(pEncoder, info->streamName));
TAOS_CHECK_EXIT(tEncodeCStr(pEncoder, info->stbFullName));
TAOS_CHECK_EXIT(tEncodeSSchemaWrapper(pEncoder, info->pSchemaWrapper));
}
_exit:
if (code != TSDB_CODE_SUCCESS) {
stError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
return code;
}
static int32_t tDecodeStreamNotifyInfo(SDecoder* pDecoder, SNotifyInfo* info) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
QUERY_CHECK_NULL(pDecoder, code, lino, _exit, TSDB_CODE_INVALID_PARA);
QUERY_CHECK_NULL(info, code, lino, _exit, TSDB_CODE_INVALID_PARA);
int32_t addrSize = 0;
TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &addrSize));
info->pNotifyAddrUrls = taosArrayInit(addrSize, POINTER_BYTES);
QUERY_CHECK_NULL(info->pNotifyAddrUrls, code, lino, _exit, terrno);
for (int32_t i = 0; i < addrSize; ++i) {
char *url = NULL;
TAOS_CHECK_EXIT(tDecodeCStr(pDecoder, &url));
url = taosStrndup(url, TSDB_STREAM_NOTIFY_URL_LEN);
QUERY_CHECK_NULL(url, code, lino, _exit, terrno);
if (taosArrayPush(info->pNotifyAddrUrls, &url) == NULL) {
taosMemoryFree(url);
TAOS_CHECK_EXIT(terrno);
}
}
TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &info->notifyEventTypes));
TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &info->notifyErrorHandle));
if (addrSize > 0) {
char* name = NULL;
TAOS_CHECK_EXIT(tDecodeCStr(pDecoder, &name));
info->streamName = taosStrndup(name, TSDB_STREAM_FNAME_LEN + 1);
QUERY_CHECK_NULL(info->streamName, code, lino, _exit, terrno);
TAOS_CHECK_EXIT(tDecodeCStr(pDecoder, &name));
info->stbFullName = taosStrndup(name, TSDB_STREAM_FNAME_LEN + 1);
QUERY_CHECK_NULL(info->stbFullName, code, lino, _exit, terrno);
info->pSchemaWrapper = taosMemoryCalloc(1, sizeof(SSchemaWrapper));
if (info->pSchemaWrapper == NULL) {
TAOS_CHECK_EXIT(terrno);
}
TAOS_CHECK_EXIT(tDecodeSSchemaWrapper(pDecoder, info->pSchemaWrapper));
}
_exit:
if (code != TSDB_CODE_SUCCESS) {
stError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
return code;
}
int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) {
int32_t code = 0;
@ -1388,6 +1465,10 @@ int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) {
TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->subtableWithoutMd5));
TAOS_CHECK_EXIT(tEncodeCStrWithLen(pEncoder, pTask->reserve, sizeof(pTask->reserve) - 1));
if (pTask->ver >= SSTREAM_TASK_ADD_NOTIFY_VER) {
TAOS_CHECK_EXIT(tEncodeStreamNotifyInfo(pEncoder, &pTask->notifyInfo));
}
tEndEncode(pEncoder);
_exit:
return code;
@ -1486,8 +1567,12 @@ int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask) {
}
TAOS_CHECK_EXIT(tDecodeCStrTo(pDecoder, pTask->reserve));
if (pTask->ver >= SSTREAM_TASK_ADD_NOTIFY_VER) {
TAOS_CHECK_EXIT(tDecodeStreamNotifyInfo(pDecoder, &pTask->notifyInfo));
}
tEndDecode(pDecoder);
_exit:
return code;
}
}

View File

@ -732,7 +732,11 @@ int32_t syncFsmExecute(SSyncNode* pNode, SSyncFSM* pFsm, ESyncState role, SyncTe
pEntry->index, pEntry->term, TMSG_INFO(pEntry->originalRpcType), code, retry);
if (retry) {
taosMsleep(10);
sError("vgId:%d, retry on fsm commit since %s. index:%" PRId64, pNode->vgId, tstrerror(code), pEntry->index);
if (code == TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE) {
sError("vgId:%d, failed to execute fsm since %s. index:%" PRId64, pNode->vgId, terrstr(), pEntry->index);
} else {
sDebug("vgId:%d, retry on fsm commit since %s. index:%" PRId64, pNode->vgId, terrstr(), pEntry->index);
}
}
} while (retry);

View File

@ -1490,3 +1490,32 @@ bool taosAssertRelease(bool condition) {
return true;
}
#endif
char* u64toaFastLut(uint64_t val, char* buf) {
static const char* lut =
"0001020304050607080910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455"
"5657585960616263646566676869707172737475767778798081828384858687888990919293949596979899";
char temp[24];
char* p = temp;
while (val >= 100) {
strncpy(p, lut + (val % 100) * 2, 2);
val /= 100;
p += 2;
}
if (val >= 10) {
strncpy(p, lut + val * 2, 2);
p += 2;
} else if (val > 0 || p == temp) {
*(p++) = val + '0';
}
while (p != temp) {
*buf++ = *--p;
}
*buf = '\0';
return buf;
}

View File

@ -14,14 +14,16 @@
*/
#define _DEFAULT_SOURCE
#include "tqueue.h"
#include "taoserror.h"
#include "tlog.h"
#include "tqueue.h"
#include "tutil.h"
int64_t tsQueueMemoryAllowed = 0;
int64_t tsQueueMemoryUsed = 0;
int64_t tsApplyMemoryAllowed = 0;
int64_t tsApplyMemoryUsed = 0;
struct STaosQueue {
STaosQnode *head;
STaosQnode *tail;
@ -148,20 +150,34 @@ int64_t taosQueueMemorySize(STaosQueue *queue) {
}
int32_t taosAllocateQitem(int32_t size, EQItype itype, int64_t dataSize, void **item) {
int64_t alloced = atomic_add_fetch_64(&tsQueueMemoryUsed, size + dataSize);
int64_t alloced = -1;
if (alloced > tsQueueMemoryAllowed) {
alloced = atomic_add_fetch_64(&tsQueueMemoryUsed, size + dataSize);
if (itype == RPC_QITEM) {
uError("failed to alloc qitem, size:%" PRId64 " alloc:%" PRId64 " allowed:%" PRId64, size + dataSize, alloced,
tsQueueMemoryAllowed);
(void)atomic_sub_fetch_64(&tsQueueMemoryUsed, size + dataSize);
return (terrno = TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE);
}
} else if (itype == APPLY_QITEM) {
alloced = atomic_add_fetch_64(&tsApplyMemoryUsed, size + dataSize);
if (alloced > tsApplyMemoryAllowed) {
uDebug("failed to alloc qitem, size:%" PRId64 " alloc:%" PRId64 " allowed:%" PRId64, size + dataSize, alloced,
tsApplyMemoryAllowed);
(void)atomic_sub_fetch_64(&tsApplyMemoryUsed, size + dataSize);
return (terrno = TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE);
}
}
*item = NULL;
STaosQnode *pNode = taosMemoryCalloc(1, sizeof(STaosQnode) + size);
if (pNode == NULL) {
(void)atomic_sub_fetch_64(&tsQueueMemoryUsed, size + dataSize);
if (itype == RPC_QITEM) {
(void)atomic_sub_fetch_64(&tsQueueMemoryUsed, size + dataSize);
} else if (itype == APPLY_QITEM) {
(void)atomic_sub_fetch_64(&tsApplyMemoryUsed, size + dataSize);
}
return terrno;
}
@ -178,7 +194,12 @@ void taosFreeQitem(void *pItem) {
if (pItem == NULL) return;
STaosQnode *pNode = (STaosQnode *)((char *)pItem - sizeof(STaosQnode));
int64_t alloced = atomic_sub_fetch_64(&tsQueueMemoryUsed, pNode->size + pNode->dataSize);
int64_t alloced = -1;
if (pNode->itype == RPC_QITEM) {
alloced = atomic_sub_fetch_64(&tsQueueMemoryUsed, pNode->size + pNode->dataSize);
} else if (pNode->itype == APPLY_QITEM) {
alloced = atomic_sub_fetch_64(&tsApplyMemoryUsed, pNode->size + pNode->dataSize);
}
uTrace("item:%p, node:%p is freed, alloc:%" PRId64, pItem, pNode, alloced);
taosMemoryFree(pNode);

View File

@ -37,6 +37,7 @@ class TBase:
# save param
self.replicaVar = int(replicaVar)
tdSql.init(conn.cursor(), True)
self.tmpdir = "tmp"
# record server information
self.dnodeNum = 0

View File

@ -23,12 +23,31 @@ import frame.epath
import frame.eos
from frame.log import *
# run taosBenchmark with command or json file mode
def benchMark(command = "", json = "") :
# get taosBenchmark path
# taosdump
def taosDumpFile():
bmFile = frame.epath.binFile("taosdump")
if frame.eos.isWin():
bmFile += ".exe"
return bmFile
# taosBenchmark
def benchMarkFile():
bmFile = frame.epath.binFile("taosBenchmark")
if frame.eos.isWin():
bmFile += ".exe"
return bmFile
# taosAdapter
def taosAdapterFile():
bmFile = frame.epath.binFile("taosAdapter")
if frame.eos.isWin():
bmFile += ".exe"
return bmFile
# run taosBenchmark with command or json file mode
def benchMark(command = "", json = "") :
# get taosBenchmark path
bmFile = benchMarkFile()
# run
if command != "":

View File

@ -113,10 +113,11 @@ if __name__ == "__main__":
previousCluster = False
level = 1
disk = 1
taosAdapter = False
opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:RWU:n:i:aPL:D:', [
opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:RWBU:n:i:aPL:D:', [
'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums',
'queryPolicy','createDnodeNums','restful','websocket','adaptercfgupdate','replicaVar','independentMnode',"asan",'previous','level','disk'])
'queryPolicy','createDnodeNums','restful','websocket','adapter','adaptercfgupdate','replicaVar','independentMnode',"asan",'previous','level','disk'])
for key, value in opts:
if key in ['-h', '--help']:
tdLog.printNoPrefix(
@ -145,7 +146,7 @@ if __name__ == "__main__":
tdLog.printNoPrefix('-P run case with [P]revious cluster, do not create new cluster to run case.')
tdLog.printNoPrefix('-L set multiple level number. range 1 ~ 3')
tdLog.printNoPrefix('-D set disk number on each level. range 1 ~ 10')
tdLog.printNoPrefix('-B start taosadapter process')
sys.exit(0)
if key in ['-r', '--restart']:
@ -238,11 +239,14 @@ if __name__ == "__main__":
if key in ['-D', '--disk']:
disk = value
if key in ['-B']:
taosAdapter = True
#
# do exeCmd command
#
if not execCmd == "":
if restful or websocket:
if taosAdapter or taosAdapter or restful or websocket:
tAdapter.init(deployPath)
else:
tdDnodes.init(deployPath)
@ -281,7 +285,7 @@ if __name__ == "__main__":
if valgrind:
time.sleep(2)
if restful or websocket:
if taosAdapter or restful or websocket:
toBeKilled = "taosadapter"
# killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" % toBeKilled
@ -377,7 +381,7 @@ if __name__ == "__main__":
tdDnodes.deploy(1,updateCfgDict)
tdDnodes.start(1)
tdCases.logSql(logSql)
if restful or websocket:
if taosAdapter or restful or websocket:
tAdapter.deploy(adapter_cfg_dict)
tAdapter.start()
@ -416,7 +420,7 @@ if __name__ == "__main__":
clusterDnodes.starttaosd(dnode.index)
tdCases.logSql(logSql)
if restful or websocket:
if taosAdapter or restful or websocket:
tAdapter.deploy(adapter_cfg_dict)
tAdapter.start()
@ -537,7 +541,7 @@ if __name__ == "__main__":
except:
pass
if restful or websocket:
if taosAdapter or restful or websocket:
tAdapter.init(deployPath, masterIp)
tAdapter.stop(force_kill=True)
@ -548,7 +552,7 @@ if __name__ == "__main__":
tdDnodes.start(1)
tdCases.logSql(logSql)
if restful or websocket:
if taosAdapter or restful or websocket:
tAdapter.deploy(adapter_cfg_dict)
tAdapter.start()
@ -591,7 +595,7 @@ if __name__ == "__main__":
clusterDnodes.starttaosd(dnode.index)
tdCases.logSql(logSql)
if restful or websocket:
if taosAdapter or restful or websocket:
tAdapter.deploy(adapter_cfg_dict)
tAdapter.start()

View File

@ -17,37 +17,33 @@ import os
import sys
import time
import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
from util.dnodes import tdDnodes
from util.dnodes import *
import frame
import frame.etool
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
from frame.srvCtl import *
import itertools
from itertools import product
from itertools import combinations
from faker import Faker
import subprocess
class TDTestCase:
class TDTestCase(TBase):
def caseDescription(self):
'''
case1<xyguo>[TD-12434]:taosdump null nchar/binary length can cause core:taos-tools/src/taosdump.c
case2<xyguo>[TD-12478]:taos_stmt_execute() failed! reason: WAL size exceeds limit
'''
return
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
os.system("rm -rf 5-taos-tools/TD-12478.py.sql")
os.system("rm db*")
os.system("rm dump_result.txt*")
def restartDnodes(self):
tdDnodes.stop(1)
tdDnodes.start(1)
sc.dnodeStop(1)
sc.dnodeStart(1)
def dropandcreateDB_random(self,n):
self.ts = 1630000000000

View File

@ -0,0 +1,169 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import os
import json
import frame
import frame.etool
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
# reomve single and double quotation
def removeQuotation(origin):
value = ""
for c in origin:
if c != '\'' and c != '"':
value += c
return value
class TDTestCase(TBase):
def caseDescription(self):
"""
[TD-11510] taosBenchmark test cases
"""
def benchmarkQuery(self, benchmark, jsonFile, keys, options=""):
# exe insert
result = "query.log"
os.system(f"rm -f {result}")
cmd = f"{benchmark} {options} -f {jsonFile} >> {result}"
os.system(cmd)
tdLog.info(cmd)
with open(result) as file:
content = file.read()
for key in keys:
if content.find(key) == -1:
tdLog.exit(f"not found key: {key} in content={content}")
else:
tdLog.info(f"found key:{key} successful.")
def testBenchmarkJson(self, benchmark, jsonFile, options="", checkStep=False):
# exe insert
cmd = f"{benchmark} {options} -f {jsonFile}"
os.system(cmd)
#
# check insert result
#
with open(jsonFile, "r") as file:
data = json.load(file)
db = data["databases"][0]["dbinfo"]["name"]
stb = data["databases"][0]["super_tables"][0]["name"]
child_count = data["databases"][0]["super_tables"][0]["childtable_count"]
insert_rows = data["databases"][0]["super_tables"][0]["insert_rows"]
timestamp_step = data["databases"][0]["super_tables"][0]["timestamp_step"]
# drop
try:
drop = data["databases"][0]["dbinfo"]["drop"]
except:
drop = "yes"
# command is first
if options.find("-Q") != -1:
drop = "no"
# cachemodel
try:
cachemode = data["databases"][0]["dbinfo"]["cachemodel"]
except:
cachemode = None
# vgropus
try:
vgroups = data["databases"][0]["dbinfo"]["vgroups"]
except:
vgroups = None
tdLog.info(f"get json info: db={db} stb={stb} child_count={child_count} insert_rows={insert_rows} \n")
# all count insert_rows * child_table_count
sql = f"select * from {db}.{stb}"
tdSql.query(sql)
tdSql.checkRows(child_count * insert_rows)
# timestamp step
if checkStep:
sql = f"select * from (select diff(ts) as dif from {db}.{stb} partition by tbname) where dif != {timestamp_step};"
tdSql.query(sql)
tdSql.checkRows(0)
if drop.lower() == "yes":
# check database optins
sql = f"select `vgroups`,`cachemodel` from information_schema.ins_databases where name='{db}';"
tdSql.query(sql)
if cachemode != None:
value = removeQuotation(cachemode)
tdLog.info(f" deal both origin={cachemode} after={value}")
tdSql.checkData(0, 1, value)
if vgroups != None:
tdSql.checkData(0, 0, vgroups)
# bugs ts
def bugsTS(self, benchmark):
self.testBenchmarkJson(benchmark, "./tools/benchmark/basic/json/TS-5002.json")
# TS-5234
self.testBenchmarkJson(benchmark, "./tools/benchmark/basic/json/TS-5234-1.json")
self.testBenchmarkJson(benchmark, "./tools/benchmark/basic/json/TS-5234-2.json")
self.testBenchmarkJson(benchmark, "./tools/benchmark/basic/json/TS-5234-3.json")
# TS-5846
keys = ["completed total queries: 40"]
self.benchmarkQuery(benchmark, "./tools/benchmark/basic/json/TS-5846-Query.json", keys)
keys = ["completed total queries: 20"]
self.benchmarkQuery(benchmark, "./tools/benchmark/basic/json/TS-5846-Mixed-Query.json", keys)
# bugs td
def bugsTD(self, benchmark):
self.testBenchmarkJson(benchmark, "./tools/benchmark/basic/json/TD-31490.json", checkStep = False)
self.testBenchmarkJson(benchmark, "./tools/benchmark/basic/json/TD-31575.json")
self.testBenchmarkJson(benchmark, "./tools/benchmark/basic/json/TD-32846.json")
# no drop
db = "td32913db"
vgroups = 4
tdSql.execute(f"create database {db} vgroups {vgroups}")
self.testBenchmarkJson(benchmark, "./tools/benchmark/basic/json/TD-32913.json", options="-Q")
tdSql.query(f"select `vgroups` from information_schema.ins_databases where name='{db}';")
tdSql.checkData(0, 0, vgroups)
# other
self.testBenchmarkJson(benchmark, "./tools/benchmark/basic/json/TD-32913-1.json")
self.testBenchmarkJson(benchmark, "./tools/benchmark/basic/json/TD-32913-2.json", options="-T 6")
self.testBenchmarkJson(benchmark, "./tools/benchmark/basic/json/TD-32913-3.json")
def run(self):
benchmark = etool.benchMarkFile()
# ts
self.bugsTS(benchmark)
# td
self.bugsTD(benchmark)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,53 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import os
import frame
import frame.etool
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
class TDTestCase(TBase):
def caseDescription(self):
"""
[TD-19387] taosBenchmark support partial columns num
"""
def run(self):
binPath = etool.benchMarkFile()
cmd = "%s -t 1 -n 1 -y -L 2 " % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.query("select * from test.meters")
dbresult = tdSql.res
for i in range(len(dbresult[0])):
if i in (1, 2) and dbresult[0][i] is None:
tdLog.exit("result[0][%d] is NULL, which should not be" % i)
else:
tdLog.info("result[0][{0}] is {1}".format(i, dbresult[0][i]))
tdSql.checkData(0, 0, 1500000000000)
tdSql.checkData(0, 3, None)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,63 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import os
import subprocess
import frame
import frame.etool
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
from frame.srvCtl import *
class TDTestCase(TBase):
def caseDescription(self):
"""
[TD-19985] taosBenchmark retry test cases
"""
def run(self):
binPath = etool.benchMarkFile()
cmd = (
"%s -t 1 -n 10 -i 1000 -r 1 -k 10 -z 1000 -y &"
# "%s -t 1 -n 10 -i 5000 -r 1 -y &"
% binPath
)
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
time.sleep(2)
sc.dnodeStopAll()
time.sleep(2)
sc.dnodeStart(1)
time.sleep(2)
psCmd = "ps -ef|grep -w taosBenchmark| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True)
while processID:
time.sleep(1)
processID = subprocess.check_output(psCmd, shell=True)
tdSql.query("select count(*) from test.meters")
tdSql.checkData(0, 0, 10)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,74 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import os
import frame
import frame.etool
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
class TDTestCase(TBase):
def caseDescription(self):
"""
[TD-21063] taosBenchmark single table test cases
"""
def run(self):
binPath = etool.benchMarkFile()
cmd = "%s -N -I taosc -t 1 -n 1 -y -E" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("use test")
tdSql.query("show stables")
tdSql.checkRows(0)
tdSql.query("show tables")
tdSql.checkRows(1)
tdSql.query("select count(*) from `meters`")
tdSql.checkData(0, 0, 1)
cmd = "%s -N -I rest -t 1 -n 1 -y" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("use test")
tdSql.query("show stables")
tdSql.checkRows(0)
tdSql.query("show tables")
tdSql.checkRows(1)
tdSql.query("select count(*) from meters")
tdSql.checkData(0, 0, 1)
cmd = "%s -N -I stmt -t 1 -n 1 -y" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("use test")
tdSql.query("show stables")
tdSql.checkRows(0)
tdSql.query("show tables")
tdSql.checkRows(1)
tdSql.query("select count(*) from meters")
tdSql.checkData(0, 0, 1)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,75 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import os
import frame
import frame.etool
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
class TDTestCase(TBase):
def caseDescription(self):
"""
[TD-22334] taosBenchmark sml rest test cases
"""
def run(self):
binPath = etool.benchMarkFile()
cmd = "%s -I sml-rest -t 1 -n 1 -y" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.query("select count(*) from test.meters")
tdSql.checkData(0, 0, 1)
cmd = "%s -I sml-rest-line -t 1 -n 1 -y" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.query("select count(*) from test.meters")
tdSql.checkData(0, 0, 1)
cmd = "%s -I sml-rest-telnet -t 1 -n 1 -y" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.query("select count(*) from test.meters")
tdSql.checkData(0, 0, 1)
cmd = "%s -I sml-rest-json -t 1 -n 1 -y" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.query("select count(*) from test.meters")
tdSql.checkData(0, 0, 1)
cmd = "%s -I sml-rest-taosjson -t 1 -n 1 -y" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.query("select count(*) from test.meters")
tdSql.checkData(0, 0, 1)
cmd = "%s -N -I sml-rest -y" % binPath
tdLog.info("%s" % cmd)
assert os.system("%s" % cmd) != 0
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,76 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import os
import time
import frame
import frame.etool
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
class TDTestCase(TBase):
def caseDescription(self):
"""
[TD-21932] taosBenchmark sml test cases
"""
def run(self):
binPath = etool.benchMarkFile()
cmd = "%s -I sml -t 1 -n 1 -y" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.query("select count(*) from test.meters")
tdSql.checkData(0, 0, 1)
cmd = "%s -I sml-line -t 1 -n 1 -y" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.query("select count(*) from test.meters")
tdSql.checkData(0, 0, 1)
cmd = "%s -I sml-telnet -t 1 -n 1 -y" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.query("select count(*) from test.meters")
tdSql.checkData(0, 0, 1)
cmd = "%s -I sml-json -t 1 -n 1 -y" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.query("select count(*) from test.meters")
tdSql.checkData(0, 0, 1)
cmd = "%s -I sml-taosjson -t 1 -n 1 -y" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.query("select count(*) from test.meters")
tdSql.checkData(0, 0, 1)
cmd = "%s -I sml -t 10 -n 10000 -y" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.query("select count(*) from test.meters")
tdSql.checkData(0, 0, 10*10000)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,54 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import os
import frame
import frame.etool
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
class TDTestCase(TBase):
def caseDescription(self):
"""
[TD-19352] taosBenchmark supplement insert test cases
"""
def run(self):
binPath = etool.benchMarkFile()
cmd = "%s -t 1 -n 1 -y" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
cmd = "%s -t 1 -n 10 -U -s 1600000000000 -y" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.query("select count(*) from test.meters")
tdSql.checkData(0, 0, 11)
tdSql.query("select * from test.meters")
tdSql.checkData(0, 0, 1500000000000)
tdSql.checkData(1, 0, 1600000000000)
tdSql.checkData(10, 0, 1600000000009)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,51 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import os
import frame
import frame.etool
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
class TDTestCase(TBase):
def caseDescription(self):
"""
[TD-21806] taosBenchmark specifying vgroups test cases
"""
def run(self):
binPath = etool.benchMarkFile()
cmd = (
"%s -t 1 -n 1 -v 3 -y &"
% binPath
)
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
time.sleep(2)
tdSql.query("select `vgroups` from information_schema.ins_databases where name='test'")
tdSql.checkData(0, 0, 3)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -12,53 +12,30 @@
# -*- coding: utf-8 -*-
import os
import subprocess
import time
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
import frame
import frame.etool
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
class TDTestCase:
class TDTestCase(TBase):
def caseDescription(self):
'''
"""
[TD-11510] taosBenchmark test cases
'''
return
"""
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getPath(self, tool="taosBenchmark"):
if (platform.system().lower() == 'windows'):
tool = tool + ".exe"
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
paths = []
for root, dirs, files in os.walk(projPath):
if ((tool) in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
paths.append(os.path.join(root, tool))
break
if (len(paths) == 0):
tdLog.exit("taosBenchmark not found!")
return
else:
tdLog.info("taosBenchmark found in %s" % paths[0])
return paths[0]
def run(self):
binPath = self.getPath()
cmd = "%s -F 7 -n 10 -t 2 -x -y -M -C -d newtest -l 5 -A binary,nchar\(31\) -b tinyint,binary\(23\),bool,nchar -w 29 -E -m $%%^*" %binPath
binPath = etool.benchMarkFile()
cmd = (
"%s -F 7 -n 10 -t 2 -x -y -M -C -d newtest -l 5 -A binary,nchar\(31\) -b tinyint,binary\(23\),bool,nchar -w 29 -E -m $%%^*"
% binPath
)
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("use newtest")
@ -68,79 +45,127 @@ class TDTestCase:
tdSql.checkRows(8)
tdSql.checkData(0, 1, "TIMESTAMP")
tdSql.checkData(1, 1, "TINYINT")
tdSql.checkData(2, 1, "VARCHAR")
# 2.x is binary and 3.x is varchar
# tdSql.checkData(2, 1, "BINARY")
tdSql.checkData(2, 2, 23)
tdSql.checkData(3, 1, "BOOL")
tdSql.checkData(4, 1, "NCHAR")
tdSql.checkData(4, 2, 29)
tdSql.checkData(5, 1, "INT")
tdSql.checkData(6, 1, "VARCHAR")
# 2.x is binary and 3.x is varchar
# tdSql.checkData(6, 1, "BINARY")
tdSql.checkData(6, 2, 29)
tdSql.checkData(6, 3, "TAG")
tdSql.checkData(7, 1, "NCHAR")
tdSql.checkData(7, 2, 31)
tdSql.checkData(7, 3, "TAG")
tdSql.query("select distinct(tbname) from meters where tbname like '$%^*%'")
tdSql.query("show tables")
tdSql.checkRows(2)
tdSql.execute("drop database if exists newtest")
cmd = "%s -F 7 -n 10 -t 2 -y -M -I stmt" %binPath
cmd = (
"%s -t 2 -n 10 -b bool,tinyint,smallint,int,bigint,float,double,utinyint,usmallint,uint,ubigint,binary,nchar,timestamp -A bool,tinyint,smallint,int,bigint,float,double,utinyint,usmallint,uint,ubigint,binary,nchar,timestamp -y"
% binPath
)
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.query("select count(*) from (select distinct(tbname) from test.meters)")
tdSql.checkData(0, 0, 2)
tdSql.query("show test.tables")
tdSql.checkRows(2)
tdSql.query("select count(*) from test.meters")
tdSql.checkData(0, 0, 20)
cmd = "%s -n 3 -t 3 -B 2 -i 1 -G -y -T 1 2>&1 | grep sleep | wc -l" %binPath
cmd = (
"%s -I stmt -t 2 -n 10 -b bool,tinyint,smallint,int,bigint,float,double,utinyint,usmallint,uint,ubigint,binary,nchar,timestamp -A bool,tinyint,smallint,int,bigint,float,double,utinyint,usmallint,uint,ubigint,binary,nchar,timestamp -y"
% binPath
)
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.query("show test.tables")
tdSql.checkRows(2)
tdSql.query("select count(*) from test.meters")
tdSql.checkData(0, 0, 20)
cmd = "%s -F 7 -n 10 -t 2 -y -M -I stmt" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.query("show test.tables")
tdSql.checkRows(2)
tdSql.query("select count(*) from test.meters")
tdSql.checkData(0, 0, 20)
# add stmt2
cmd = "%s -F 700 -n 1000 -t 4 -y -M -I stmt2" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.query("show test.tables")
tdSql.checkRows(4)
tdSql.query("select count(*) from test.meters")
tdSql.checkData(0, 0, 4000)
cmd = "%s -n 3 -t 3 -B 2 -i 1 -G -y -T 1 2>&1 | grep sleep | wc -l" % binPath
sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8")
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
if (int(sleepTimes) != 2):
if int(sleepTimes) != 2:
tdLog.exit("expected sleep times 2, actual %d" % int(sleepTimes))
cmd = "%s -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -r 1 2>&1 | grep sleep | wc -l" %binPath
cmd = (
"%s -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -r 1 2>&1 | grep sleep | wc -l" % binPath
)
sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8")
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
if (int(sleepTimes) != 3):
if int(sleepTimes) != 3:
tdLog.exit("expected sleep times 3, actual %d" % int(sleepTimes))
cmd = "%s -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -I sml 2>&1 | grep sleep | wc -l" %binPath
cmd = (
"%s -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -I sml 2>&1 | grep sleep | wc -l"
% binPath
)
sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8")
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
if (int(sleepTimes) != 2):
if int(sleepTimes) != 2:
tdLog.exit("expected sleep times 2, actual %d" % int(sleepTimes))
cmd = "%s -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -r 1 -I sml 2>&1 | grep sleep | wc -l" %binPath
cmd = (
"%s -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -r 1 -I sml 2>&1 | grep sleep | wc -l"
% binPath
)
sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8")
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
if (int(sleepTimes) != 3):
if int(sleepTimes) != 3:
tdLog.exit("expected sleep times 3, actual %d" % int(sleepTimes))
cmd = "%s -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -I stmt 2>&1 | grep sleep | wc -l" %binPath
cmd = (
"%s -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -I stmt 2>&1 | grep sleep | wc -l"
% binPath
)
sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8")
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
if (int(sleepTimes) != 2):
if int(sleepTimes) != 2:
tdLog.exit("expected sleep times 2, actual %d" % int(sleepTimes))
cmd = "%s -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -r 1 -I stmt 2>&1 | grep sleep | wc -l" %binPath
cmd = (
"%s -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -r 1 -I stmt 2>&1 | grep sleep | wc -l"
% binPath
)
sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8")
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
if (int(sleepTimes) != 3):
if int(sleepTimes) != 3:
tdLog.exit("expected sleep times 3, actual %d" % int(sleepTimes))
cmd = "%s -S 17 -n 3 -t 1 -y -x" %binPath
cmd = "%s -S 17 -n 3 -t 1 -y -x" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
time.sleep(2) # to avoid invalid vgroup id
tdSql.query("select last(ts) from test.meters")
tdSql.checkData(0, 0 , "2017-07-14 10:40:00.034")
tdSql.checkData(0, 0, "2017-07-14 10:40:00.034")
cmd = "%s -N -I taosc -t 11 -n 11 -y -x -E" %binPath
cmd = "%s -N -I taosc -t 11 -n 11 -y -x -E" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("use test")
@ -151,7 +176,7 @@ class TDTestCase:
tdSql.query("select count(*) from `d10`")
tdSql.checkData(0, 0, 11)
cmd = "%s -N -I rest -t 11 -n 11 -y -x" %binPath
cmd = "%s -N -I rest -t 11 -n 11 -y -x" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("use test")
@ -162,7 +187,7 @@ class TDTestCase:
tdSql.query("select count(*) from d10")
tdSql.checkData(0, 0, 11)
cmd = "%s -N -I stmt -t 11 -n 11 -y -x" %binPath
cmd = "%s -N -I stmt -t 11 -n 11 -y -x" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("use test")
@ -173,139 +198,131 @@ class TDTestCase:
tdSql.query("select count(*) from d10")
tdSql.checkData(0, 0, 11)
cmd = "%s -N -I sml -y" %binPath
tdLog.info("%s" % cmd)
assert(os.system("%s" % cmd) !=0 )
cmd = "%s -n 1 -t 1 -y -b bool" %binPath
cmd = "%s -n 1 -t 1 -y -b bool" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "BOOL")
cmd = "%s -n 1 -t 1 -y -b tinyint" %binPath
cmd = "%s -n 1 -t 1 -y -b tinyint" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "TINYINT")
cmd = "%s -n 1 -t 1 -y -b utinyint" %binPath
cmd = "%s -n 1 -t 1 -y -b utinyint" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "TINYINT UNSIGNED")
cmd = "%s -n 1 -t 1 -y -b smallint" %binPath
cmd = "%s -n 1 -t 1 -y -b smallint" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "SMALLINT")
cmd = "%s -n 1 -t 1 -y -b usmallint" %binPath
cmd = "%s -n 1 -t 1 -y -b usmallint" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "SMALLINT UNSIGNED")
cmd = "%s -n 1 -t 1 -y -b int" %binPath
cmd = "%s -n 1 -t 1 -y -b int" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "INT")
cmd = "%s -n 1 -t 1 -y -b uint" %binPath
cmd = "%s -n 1 -t 1 -y -b uint" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "INT UNSIGNED")
cmd = "%s -n 1 -t 1 -y -b bigint" %binPath
cmd = "%s -n 1 -t 1 -y -b bigint" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "BIGINT")
cmd = "%s -n 1 -t 1 -y -b ubigint" %binPath
cmd = "%s -n 1 -t 1 -y -b ubigint" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "BIGINT UNSIGNED")
cmd = "%s -n 1 -t 1 -y -b timestamp" %binPath
cmd = "%s -n 1 -t 1 -y -b timestamp" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "TIMESTAMP")
cmd = "%s -n 1 -t 1 -y -b float" %binPath
cmd = "%s -n 1 -t 1 -y -b float" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "FLOAT")
cmd = "%s -n 1 -t 1 -y -b double" %binPath
cmd = "%s -n 1 -t 1 -y -b double" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "DOUBLE")
cmd = "%s -n 1 -t 1 -y -b nchar" %binPath
cmd = "%s -n 1 -t 1 -y -b nchar" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "NCHAR")
cmd = "%s -n 1 -t 1 -y -b nchar\(7\)" %binPath
cmd = "%s -n 1 -t 1 -y -b nchar\(7\)" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "NCHAR")
cmd = "%s -n 1 -t 1 -y -b binary" %binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "VARCHAR")
# 2.x is binary and 3.x is varchar
# cmd = "%s -n 1 -t 1 -y -b binary" %binPath
# tdLog.info("%s" % cmd)
# os.system("%s" % cmd)
# tdSql.execute("reset query cache")
# tdSql.query("describe test.meters")
# tdSql.checkData(1, 1, "BINARY")
cmd = "%s -n 1 -t 1 -y -b binary\(7\)" %binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "VARCHAR")
# cmd = "%s -n 1 -t 1 -y -b binary\(7\)" %binPath
# tdLog.info("%s" % cmd)
# os.system("%s" % cmd)
# tdSql.execute("reset query cache")
# tdSql.query("describe test.meters")
# tdSql.checkData(1, 1, "BINARY")
cmd = "%s -n 1 -t 1 -y -A json\(7\)" %binPath
cmd = "%s -n 1 -t 1 -y -A json" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(4, 1, "JSON")
cmd = "%s -n 1 -t 1 -y -b int,x" %binPath
tdLog.info("%s" % cmd)
assert(os.system("%s" % cmd) != 0)
cmd = "%s -n 1 -t 1 -y -A int,json" %binPath
tdLog.info("%s" % cmd)
assert(os.system("%s" % cmd) != 0)
cmd = "%s -n 1 -t 1 -y -b int,x" % binPath
ret = os.system("%s" % cmd)
if ret == 0:
tdLog.exit(f"expect failed, but successful cmd= {cmd} ")
tdLog.info(f"test except ok, cmd={cmd} ret={ret}")
def stop(self):
tdSql.close()

View File

@ -0,0 +1,50 @@
0,300.18991105956627
800,301.17345392774644
1200,301.55375211473387
1600,302.36356862640656
2000,303.1406323751084
2800,303.9159543118504
3600,304.483725379133
4000,305.05543735790496
4400,305.3220278809733
5600,306.5161529126947
6000,307.39147847940967
6400,308.3087978607504
6800,309.15934507811176
7200,309.2209920259603
7600,309.803587911068
8400,310.5335562951462
8800,311.48930624122
9200,311.759662137853
9600,311.99896141075567
10000,312.1610220373193
10400,312.52790040358923
11200,313.86419859052074
11600,314.58161206236935
12000,314.7287519381517
12400,315.7150471030763
12800,316.56835897615736
13200,316.9096732936423
14400,318.4992420459544
14800,319.1871240892884
15600,320.75835046846015
16000,321.3268224700194
16800,322.7216015334804
17600,323.1404560641849
18000,323.18247165088445
18800,324.41639977875917
20000,324.33270037314605
20400,323.84187335783747
20800,322.9354968771805
21600,322.20467685333284
22000,321.737580118744
22400,322.1692179980207
22800,322.4327523497001
24000,323.9630653580819
24400,324.90425359822837
25200,326.1863442959756
26000,327.02028556809154
26400,327.71615254870494
26800,328.6975738298687
27200,329.26002057438444
27600,329.92161585019835
1 0 300.18991105956627
2 800 301.17345392774644
3 1200 301.55375211473387
4 1600 302.36356862640656
5 2000 303.1406323751084
6 2800 303.9159543118504
7 3600 304.483725379133
8 4000 305.05543735790496
9 4400 305.3220278809733
10 5600 306.5161529126947
11 6000 307.39147847940967
12 6400 308.3087978607504
13 6800 309.15934507811176
14 7200 309.2209920259603
15 7600 309.803587911068
16 8400 310.5335562951462
17 8800 311.48930624122
18 9200 311.759662137853
19 9600 311.99896141075567
20 10000 312.1610220373193
21 10400 312.52790040358923
22 11200 313.86419859052074
23 11600 314.58161206236935
24 12000 314.7287519381517
25 12400 315.7150471030763
26 12800 316.56835897615736
27 13200 316.9096732936423
28 14400 318.4992420459544
29 14800 319.1871240892884
30 15600 320.75835046846015
31 16000 321.3268224700194
32 16800 322.7216015334804
33 17600 323.1404560641849
34 18000 323.18247165088445
35 18800 324.41639977875917
36 20000 324.33270037314605
37 20400 323.84187335783747
38 20800 322.9354968771805
39 21600 322.20467685333284
40 22000 321.737580118744
41 22400 322.1692179980207
42 22800 322.4327523497001
43 24000 323.9630653580819
44 24400 324.90425359822837
45 25200 326.1863442959756
46 26000 327.02028556809154
47 26400 327.71615254870494
48 26800 328.6975738298687
49 27200 329.26002057438444
50 27600 329.92161585019835

View File

@ -0,0 +1,103 @@
"100000000001",1,200,630,"A07","1","100000000001","100000000001U","100000000001U","A07","100000000001","100000000001","13141"
"100000000001.001",1,200,630,"A07","0","100000000001","100000000001.001U","100000000001.001U","A07","100000000001","100000000001","13141"
"100000000001.002",1,200,630,"A07","0","100000000001","100000000001.002U","100000000001.002U","A07","100000000001","100000000001","13141"
"100000000001.003",1,200,630,"A07","0","100000000001","100000000001.003U","100000000001.003U","A07","100000000001","100000000001","13141"
"100000000001.004",1,200,630,"A07","0","100000000001","100000000001.004U","100000000001.004U","A07","100000000001","100000000001","13141"
"100000000001.005",1,200,630,"A07","0","100000000001","100000000001.005U","100000000001.005U","A07","100000000001","100000000001","13141"
"100000000001.006",1,200,630,"A07","0","100000000001","100000000001.006U","100000000001.006U","A07","100000000001","100000000001","13141"
"100000000001.007",1,200,630,"A07","0","100000000001","100000000001.007U","100000000001.007U","A07","100000000001","100000000001","13141"
"100000000001.008",1,200,630,"A07","0","100000000001","100000000001.008U","100000000001.008U","A07","100000000001","100000000001","13141"
"100000000001.009",1,200,630,"A07","0","100000000001","100000000001.009U","100000000001.009U","A07","100000000001","100000000001","13141"
"100000000001.010",1,200,630,"A07","0","100000000001","100000000001.010U","100000000001.010U","A07","100000000001","100000000001","13141"
"100000000001.011",1,200,630,"A07","0","100000000001","100000000001.011U","100000000001.011U","A07","100000000001","100000000001","13141"
"100000000001.012",1,200,630,"A07","0","100000000001","100000000001.012U","100000000001.012U","A07","100000000001","100000000001","13141"
"100000000001.013",1,200,630,"A07","0","100000000001","100000000001.013U","100000000001.013U","A07","100000000001","100000000001","13141"
"100000000001.014",1,200,630,"A07","0","100000000001","100000000001.014U","100000000001.014U","A07","100000000001","100000000001","13141"
"100000000001.015",1,200,630,"A07","0","100000000001","100000000001.015U","100000000001.015U","A07","100000000001","100000000001","13141"
"100000000001.016",1,200,630,"A07","0","100000000001","100000000001.016U","100000000001.016U","A07","100000000001","100000000001","13141"
"100000000001.017",1,200,630,"A07","0","100000000001","100000000001.017U","100000000001.017U","A07","100000000001","100000000001","13141"
"100000000001.018",1,200,630,"A07","0","100000000001","100000000001.018U","100000000001.018U","A07","100000000001","100000000001","13141"
"100000000001.019",1,200,630,"A07","0","100000000001","100000000001.019U","100000000001.019U","A07","100000000001","100000000001","13141"
"100000000001.020",1,200,630,"A07","0","100000000001","100000000001.020U","100000000001.020U","A07","100000000001","100000000001","13141"
"100000000001.021",1,200,630,"A07","0","100000000001","100000000001.021U","100000000001.021U","A07","100000000001","100000000001","13141"
"100000000001.022",1,200,630,"A07","0","100000000001","100000000001.022U","100000000001.022U","A07","100000000001","100000000001","13141"
"100000000001.023",1,200,630,"A07","0","100000000001","100000000001.023U","100000000001.023U","A07","100000000001","100000000001","13141"
"100000000001.024",1,200,630,"A07","0","100000000001","100000000001.024U","100000000001.024U","A07","100000000001","100000000001","13141"
"100000000001.025",1,200,630,"A07","0","100000000001","100000000001.025U","100000000001.025U","A07","100000000001","100000000001","13141"
"100000000001.026",1,200,630,"A07","0","100000000001","100000000001.026U","100000000001.026U","A07","100000000001","100000000001","13141"
"100000000001.027",1,200,630,"A07","0","100000000001","100000000001.027U","100000000001.027U","A07","100000000001","100000000001","13141"
"100000000001.028",1,200,630,"A07","0","100000000001","100000000001.028U","100000000001.028U","A07","100000000001","100000000001","13141"
"100000000001.029",1,200,630,"A07","0","100000000001","100000000001.029U","100000000001.029U","A07","100000000001","100000000001","13141"
"100000000001.030",1,200,630,"A07","0","100000000001","100000000001.030U","100000000001.030U","A07","100000000001","100000000001","13141"
"100000000001.031",1,200,630,"A07","0","100000000001","100000000001.031U","100000000001.031U","A07","100000000001","100000000001","13141"
"100000000001.032",1,200,630,"A07","0","100000000001","100000000001.032U","100000000001.032U","A07","100000000001","100000000001","13141"
"100000000001.033",1,200,630,"A07","0","100000000001","100000000001.033U","100000000001.033U","A07","100000000001","100000000001","13141"
"100000000001.034",1,200,630,"A07","0","100000000001","100000000001.034U","100000000001.034U","A07","100000000001","100000000001","13141"
"100000000001.035",1,200,630,"A07","0","100000000001","100000000001.035U","100000000001.035U","A07","100000000001","100000000001","13141"
"100000000001.036",1,200,630,"A07","0","100000000001","100000000001.036U","100000000001.036U","A07","100000000001","100000000001","13141"
"100000000001.037",1,200,630,"A07","0","100000000001","100000000001.037U","100000000001.037U","A07","100000000001","100000000001","13141"
"100000000001.038",1,200,630,"A07","0","100000000001","100000000001.038U","100000000001.038U","A07","100000000001","100000000001","13141"
"100000000001.039",1,200,630,"A07","0","100000000001","100000000001.039U","100000000001.039U","A07","100000000001","100000000001","13141"
"100000000001.040",1,200,630,"A07","0","100000000001","100000000001.040U","100000000001.040U","A07","100000000001","100000000001","13141"
"100000000001.041",1,200,630,"A07","0","100000000001","100000000001.041U","100000000001.041U","A07","100000000001","100000000001","13141"
"100000000001.042",1,200,630,"A07","0","100000000001","100000000001.042U","100000000001.042U","A07","100000000001","100000000001","13141"
"100000000001.043",1,200,630,"A07","0","100000000001","100000000001.043U","100000000001.043U","A07","100000000001","100000000001","13141"
"100000000001.044",1,200,630,"A07","0","100000000001","100000000001.044U","100000000001.044U","A07","100000000001","100000000001","13141"
"100000000001.045",1,200,630,"A07","0","100000000001","100000000001.045U","100000000001.045U","A07","100000000001","100000000001","13141"
"100000000001.046",1,200,630,"A07","0","100000000001","100000000001.046U","100000000001.046U","A07","100000000001","100000000001","13141"
"100000000001.047",1,200,630,"A07","0","100000000001","100000000001.047U","100000000001.047U","A07","100000000001","100000000001","13141"
"100000000001.048",1,200,630,"A07","0","100000000001","100000000001.048U","100000000001.048U","A07","100000000001","100000000001","13141"
"100000000001.049",1,200,630,"A07","0","100000000001","100000000001.049U","100000000001.049U","A07","100000000001","100000000001","13141"
"100000000001.050",1,200,630,"A07","0","100000000001","100000000001.050U","100000000001.050U","A07","100000000001","100000000001","13141"
"100000000001.051",1,200,630,"A07","0","100000000001","100000000001.051U","100000000001.051U","A07","100000000001","100000000001","13141"
"100000000001.052",1,200,630,"A07","0","100000000001","100000000001.052U","100000000001.052U","A07","100000000001","100000000001","13141"
"100000000001.053",1,200,630,"A07","0","100000000001","100000000001.053U","100000000001.053U","A07","100000000001","100000000001","13141"
"100000000001.054",1,200,630,"A07","0","100000000001","100000000001.054U","100000000001.054U","A07","100000000001","100000000001","13141"
"100000000001.055",1,200,630,"A07","0","100000000001","100000000001.055U","100000000001.055U","A07","100000000001","100000000001","13141"
"100000000001.056",1,200,630,"A07","0","100000000001","100000000001.056U","100000000001.056U","A07","100000000001","100000000001","13141"
"100000000001.057",1,200,630,"A07","0","100000000001","100000000001.057U","100000000001.057U","A07","100000000001","100000000001","13141"
"100000000001.058",1,200,630,"A07","0","100000000001","100000000001.058U","100000000001.058U","A07","100000000001","100000000001","13141"
"100000000001.059",1,200,630,"A07","0","100000000001","100000000001.059U","100000000001.059U","A07","100000000001","100000000001","13141"
"100000000001.060",1,200,630,"A07","0","100000000001","100000000001.060U","100000000001.060U","A07","100000000001","100000000001","13141"
"100000000001.061",1,200,630,"A07","0","100000000001","100000000001.061U","100000000001.061U","A07","100000000001","100000000001","13141"
"100000000001.062",1,200,630,"A07","0","100000000001","100000000001.062U","100000000001.062U","A07","100000000001","100000000001","13141"
"100000000001.063",1,200,630,"A07","0","100000000001","100000000001.063U","100000000001.063U","A07","100000000001","100000000001","13141"
"100000000001.064",1,200,630,"A07","0","100000000001","100000000001.064U","100000000001.064U","A07","100000000001","100000000001","13141"
"100000000001.065",1,200,630,"A07","0","100000000001","100000000001.065U","100000000001.065U","A07","100000000001","100000000001","13141"
"100000000001.066",1,200,630,"A07","0","100000000001","100000000001.066U","100000000001.066U","A07","100000000001","100000000001","13141"
"100000000001.067",1,200,630,"A07","0","100000000001","100000000001.067U","100000000001.067U","A07","100000000001","100000000001","13141"
"100000000001.068",1,200,630,"A07","0","100000000001","100000000001.068U","100000000001.068U","A07","100000000001","100000000001","13141"
"100000000001.069",1,200,630,"A07","0","100000000001","100000000001.069U","100000000001.069U","A07","100000000001","100000000001","13141"
"100000000001.070",1,200,630,"A07","0","100000000001","100000000001.070U","100000000001.070U","A07","100000000001","100000000001","13141"
"100000000001.071",1,200,630,"A07","0","100000000001","100000000001.071U","100000000001.071U","A07","100000000001","100000000001","13141"
"100000000001.072",1,200,630,"A07","0","100000000001","100000000001.072U","100000000001.072U","A07","100000000001","100000000001","13141"
"100000000001.073",1,200,630,"A07","0","100000000001","100000000001.073U","100000000001.073U","A07","100000000001","100000000001","13141"
"100000000001.074",1,200,630,"A07","0","100000000001","100000000001.074U","100000000001.074U","A07","100000000001","100000000001","13141"
"100000000001.075",1,200,630,"A07","0","100000000001","100000000001.075U","100000000001.075U","A07","100000000001","100000000001","13141"
"100000000001.076",1,200,630,"A07","0","100000000001","100000000001.076U","100000000001.076U","A07","100000000001","100000000001","13141"
"100000000001.077",1,200,630,"A07","0","100000000001","100000000001.077U","100000000001.077U","A07","100000000001","100000000001","13141"
"100000000001.078",1,200,630,"A07","0","100000000001","100000000001.078U","100000000001.078U","A07","100000000001","100000000001","13141"
"100000000001.079",1,200,630,"A07","0","100000000001","100000000001.079U","100000000001.079U","A07","100000000001","100000000001","13141"
"100000000001.080",1,200,630,"A07","0","100000000001","100000000001.080U","100000000001.080U","A07","100000000001","100000000001","13141"
"100000000001.081",1,200,630,"A07","0","100000000001","100000000001.081U","100000000001.081U","A07","100000000001","100000000001","13141"
"100000000001.082",1,200,630,"A07","0","100000000001","100000000001.082U","100000000001.082U","A07","100000000001","100000000001","13141"
"100000000001.083",1,200,630,"A07","0","100000000001","100000000001.083U","100000000001.083U","A07","100000000001","100000000001","13141"
"100000000001.084",1,200,630,"A07","0","100000000001","100000000001.084U","100000000001.084U","A07","100000000001","100000000001","13141"
"100000000001.085",1,200,630,"A07","0","100000000001","100000000001.085U","100000000001.085U","A07","100000000001","100000000001","13141"
"100000000001.086",1,200,630,"A07","0","100000000001","100000000001.086U","100000000001.086U","A07","100000000001","100000000001","13141"
"100000000001.087",1,200,630,"A07","0","100000000001","100000000001.087U","100000000001.087U","A07","100000000001","100000000001","13141"
"100000000001.088",1,200,630,"A07","0","100000000001","100000000001.088U","100000000001.088U","A07","100000000001","100000000001","13141"
"100000000001.089",1,200,630,"A07","0","100000000001","100000000001.089U","100000000001.089U","A07","100000000001","100000000001","13141"
"100000000001.090",1,200,630,"A07","0","100000000001","100000000001.090U","100000000001.090U","A07","100000000001","100000000001","13141"
"100000000001.091",1,200,630,"A07","0","100000000001","100000000001.091U","100000000001.091U","A07","100000000001","100000000001","13141"
"100000000001.092",1,200,630,"A07","0","100000000001","100000000001.092U","100000000001.092U","A07","100000000001","100000000001","13141"
"100000000001.093",1,200,630,"A07","0","100000000001","100000000001.093U","100000000001.093U","A07","100000000001","100000000001","13141"
"100000000001.094",1,200,630,"A07","0","100000000001","100000000001.094U","100000000001.094U","A07","100000000001","100000000001","13141"
"100000000001.095",1,200,630,"A07","0","100000000001","100000000001.095U","100000000001.095U","A07","100000000001","100000000001","13141"
"100000000001.096",1,200,630,"A07","0","100000000001","100000000001.096U","100000000001.096U","A07","100000000001","100000000001","13141"
"100000000001.097",1,200,630,"A07","0","100000000001","100000000001.097U","100000000001.097U","A07","100000000001","100000000001","13141"
"100000000001.098",1,200,630,"A07","0","100000000001","100000000001.098U","100000000001.098U","A07","100000000001","100000000001","13141"
"100000000001.099",1,200,630,"A07","0","100000000001","100000000001.099U","100000000001.099U","A07","100000000001","100000000001","13141"
"100000000001.100",1,200,630,"A07","0","100000000001","100000000001.100U","100000000001.100U","A07","100000000001","100000000001","13141"
"100000000002",1,200,630,"A07","1","100000000002","100000000002U","100000000002U","A07","100000000002","100000000002","13169"
"100000000002.001",1,200,630,"A07","0","100000000002","100000000002.001U","100000000002.001U","A07","100000000002","100000000002","13169"
1 100000000001 1 200 630 A07 1 100000000001 100000000001U 100000000001U A07 100000000001 100000000001 13141
2 100000000001.001 1 200 630 A07 0 100000000001 100000000001.001U 100000000001.001U A07 100000000001 100000000001 13141
3 100000000001.002 1 200 630 A07 0 100000000001 100000000001.002U 100000000001.002U A07 100000000001 100000000001 13141
4 100000000001.003 1 200 630 A07 0 100000000001 100000000001.003U 100000000001.003U A07 100000000001 100000000001 13141
5 100000000001.004 1 200 630 A07 0 100000000001 100000000001.004U 100000000001.004U A07 100000000001 100000000001 13141
6 100000000001.005 1 200 630 A07 0 100000000001 100000000001.005U 100000000001.005U A07 100000000001 100000000001 13141
7 100000000001.006 1 200 630 A07 0 100000000001 100000000001.006U 100000000001.006U A07 100000000001 100000000001 13141
8 100000000001.007 1 200 630 A07 0 100000000001 100000000001.007U 100000000001.007U A07 100000000001 100000000001 13141
9 100000000001.008 1 200 630 A07 0 100000000001 100000000001.008U 100000000001.008U A07 100000000001 100000000001 13141
10 100000000001.009 1 200 630 A07 0 100000000001 100000000001.009U 100000000001.009U A07 100000000001 100000000001 13141
11 100000000001.010 1 200 630 A07 0 100000000001 100000000001.010U 100000000001.010U A07 100000000001 100000000001 13141
12 100000000001.011 1 200 630 A07 0 100000000001 100000000001.011U 100000000001.011U A07 100000000001 100000000001 13141
13 100000000001.012 1 200 630 A07 0 100000000001 100000000001.012U 100000000001.012U A07 100000000001 100000000001 13141
14 100000000001.013 1 200 630 A07 0 100000000001 100000000001.013U 100000000001.013U A07 100000000001 100000000001 13141
15 100000000001.014 1 200 630 A07 0 100000000001 100000000001.014U 100000000001.014U A07 100000000001 100000000001 13141
16 100000000001.015 1 200 630 A07 0 100000000001 100000000001.015U 100000000001.015U A07 100000000001 100000000001 13141
17 100000000001.016 1 200 630 A07 0 100000000001 100000000001.016U 100000000001.016U A07 100000000001 100000000001 13141
18 100000000001.017 1 200 630 A07 0 100000000001 100000000001.017U 100000000001.017U A07 100000000001 100000000001 13141
19 100000000001.018 1 200 630 A07 0 100000000001 100000000001.018U 100000000001.018U A07 100000000001 100000000001 13141
20 100000000001.019 1 200 630 A07 0 100000000001 100000000001.019U 100000000001.019U A07 100000000001 100000000001 13141
21 100000000001.020 1 200 630 A07 0 100000000001 100000000001.020U 100000000001.020U A07 100000000001 100000000001 13141
22 100000000001.021 1 200 630 A07 0 100000000001 100000000001.021U 100000000001.021U A07 100000000001 100000000001 13141
23 100000000001.022 1 200 630 A07 0 100000000001 100000000001.022U 100000000001.022U A07 100000000001 100000000001 13141
24 100000000001.023 1 200 630 A07 0 100000000001 100000000001.023U 100000000001.023U A07 100000000001 100000000001 13141
25 100000000001.024 1 200 630 A07 0 100000000001 100000000001.024U 100000000001.024U A07 100000000001 100000000001 13141
26 100000000001.025 1 200 630 A07 0 100000000001 100000000001.025U 100000000001.025U A07 100000000001 100000000001 13141
27 100000000001.026 1 200 630 A07 0 100000000001 100000000001.026U 100000000001.026U A07 100000000001 100000000001 13141
28 100000000001.027 1 200 630 A07 0 100000000001 100000000001.027U 100000000001.027U A07 100000000001 100000000001 13141
29 100000000001.028 1 200 630 A07 0 100000000001 100000000001.028U 100000000001.028U A07 100000000001 100000000001 13141
30 100000000001.029 1 200 630 A07 0 100000000001 100000000001.029U 100000000001.029U A07 100000000001 100000000001 13141
31 100000000001.030 1 200 630 A07 0 100000000001 100000000001.030U 100000000001.030U A07 100000000001 100000000001 13141
32 100000000001.031 1 200 630 A07 0 100000000001 100000000001.031U 100000000001.031U A07 100000000001 100000000001 13141
33 100000000001.032 1 200 630 A07 0 100000000001 100000000001.032U 100000000001.032U A07 100000000001 100000000001 13141
34 100000000001.033 1 200 630 A07 0 100000000001 100000000001.033U 100000000001.033U A07 100000000001 100000000001 13141
35 100000000001.034 1 200 630 A07 0 100000000001 100000000001.034U 100000000001.034U A07 100000000001 100000000001 13141
36 100000000001.035 1 200 630 A07 0 100000000001 100000000001.035U 100000000001.035U A07 100000000001 100000000001 13141
37 100000000001.036 1 200 630 A07 0 100000000001 100000000001.036U 100000000001.036U A07 100000000001 100000000001 13141
38 100000000001.037 1 200 630 A07 0 100000000001 100000000001.037U 100000000001.037U A07 100000000001 100000000001 13141
39 100000000001.038 1 200 630 A07 0 100000000001 100000000001.038U 100000000001.038U A07 100000000001 100000000001 13141
40 100000000001.039 1 200 630 A07 0 100000000001 100000000001.039U 100000000001.039U A07 100000000001 100000000001 13141
41 100000000001.040 1 200 630 A07 0 100000000001 100000000001.040U 100000000001.040U A07 100000000001 100000000001 13141
42 100000000001.041 1 200 630 A07 0 100000000001 100000000001.041U 100000000001.041U A07 100000000001 100000000001 13141
43 100000000001.042 1 200 630 A07 0 100000000001 100000000001.042U 100000000001.042U A07 100000000001 100000000001 13141
44 100000000001.043 1 200 630 A07 0 100000000001 100000000001.043U 100000000001.043U A07 100000000001 100000000001 13141
45 100000000001.044 1 200 630 A07 0 100000000001 100000000001.044U 100000000001.044U A07 100000000001 100000000001 13141
46 100000000001.045 1 200 630 A07 0 100000000001 100000000001.045U 100000000001.045U A07 100000000001 100000000001 13141
47 100000000001.046 1 200 630 A07 0 100000000001 100000000001.046U 100000000001.046U A07 100000000001 100000000001 13141
48 100000000001.047 1 200 630 A07 0 100000000001 100000000001.047U 100000000001.047U A07 100000000001 100000000001 13141
49 100000000001.048 1 200 630 A07 0 100000000001 100000000001.048U 100000000001.048U A07 100000000001 100000000001 13141
50 100000000001.049 1 200 630 A07 0 100000000001 100000000001.049U 100000000001.049U A07 100000000001 100000000001 13141
51 100000000001.050 1 200 630 A07 0 100000000001 100000000001.050U 100000000001.050U A07 100000000001 100000000001 13141
52 100000000001.051 1 200 630 A07 0 100000000001 100000000001.051U 100000000001.051U A07 100000000001 100000000001 13141
53 100000000001.052 1 200 630 A07 0 100000000001 100000000001.052U 100000000001.052U A07 100000000001 100000000001 13141
54 100000000001.053 1 200 630 A07 0 100000000001 100000000001.053U 100000000001.053U A07 100000000001 100000000001 13141
55 100000000001.054 1 200 630 A07 0 100000000001 100000000001.054U 100000000001.054U A07 100000000001 100000000001 13141
56 100000000001.055 1 200 630 A07 0 100000000001 100000000001.055U 100000000001.055U A07 100000000001 100000000001 13141
57 100000000001.056 1 200 630 A07 0 100000000001 100000000001.056U 100000000001.056U A07 100000000001 100000000001 13141
58 100000000001.057 1 200 630 A07 0 100000000001 100000000001.057U 100000000001.057U A07 100000000001 100000000001 13141
59 100000000001.058 1 200 630 A07 0 100000000001 100000000001.058U 100000000001.058U A07 100000000001 100000000001 13141
60 100000000001.059 1 200 630 A07 0 100000000001 100000000001.059U 100000000001.059U A07 100000000001 100000000001 13141
61 100000000001.060 1 200 630 A07 0 100000000001 100000000001.060U 100000000001.060U A07 100000000001 100000000001 13141
62 100000000001.061 1 200 630 A07 0 100000000001 100000000001.061U 100000000001.061U A07 100000000001 100000000001 13141
63 100000000001.062 1 200 630 A07 0 100000000001 100000000001.062U 100000000001.062U A07 100000000001 100000000001 13141
64 100000000001.063 1 200 630 A07 0 100000000001 100000000001.063U 100000000001.063U A07 100000000001 100000000001 13141
65 100000000001.064 1 200 630 A07 0 100000000001 100000000001.064U 100000000001.064U A07 100000000001 100000000001 13141
66 100000000001.065 1 200 630 A07 0 100000000001 100000000001.065U 100000000001.065U A07 100000000001 100000000001 13141
67 100000000001.066 1 200 630 A07 0 100000000001 100000000001.066U 100000000001.066U A07 100000000001 100000000001 13141
68 100000000001.067 1 200 630 A07 0 100000000001 100000000001.067U 100000000001.067U A07 100000000001 100000000001 13141
69 100000000001.068 1 200 630 A07 0 100000000001 100000000001.068U 100000000001.068U A07 100000000001 100000000001 13141
70 100000000001.069 1 200 630 A07 0 100000000001 100000000001.069U 100000000001.069U A07 100000000001 100000000001 13141
71 100000000001.070 1 200 630 A07 0 100000000001 100000000001.070U 100000000001.070U A07 100000000001 100000000001 13141
72 100000000001.071 1 200 630 A07 0 100000000001 100000000001.071U 100000000001.071U A07 100000000001 100000000001 13141
73 100000000001.072 1 200 630 A07 0 100000000001 100000000001.072U 100000000001.072U A07 100000000001 100000000001 13141
74 100000000001.073 1 200 630 A07 0 100000000001 100000000001.073U 100000000001.073U A07 100000000001 100000000001 13141
75 100000000001.074 1 200 630 A07 0 100000000001 100000000001.074U 100000000001.074U A07 100000000001 100000000001 13141
76 100000000001.075 1 200 630 A07 0 100000000001 100000000001.075U 100000000001.075U A07 100000000001 100000000001 13141
77 100000000001.076 1 200 630 A07 0 100000000001 100000000001.076U 100000000001.076U A07 100000000001 100000000001 13141
78 100000000001.077 1 200 630 A07 0 100000000001 100000000001.077U 100000000001.077U A07 100000000001 100000000001 13141
79 100000000001.078 1 200 630 A07 0 100000000001 100000000001.078U 100000000001.078U A07 100000000001 100000000001 13141
80 100000000001.079 1 200 630 A07 0 100000000001 100000000001.079U 100000000001.079U A07 100000000001 100000000001 13141
81 100000000001.080 1 200 630 A07 0 100000000001 100000000001.080U 100000000001.080U A07 100000000001 100000000001 13141
82 100000000001.081 1 200 630 A07 0 100000000001 100000000001.081U 100000000001.081U A07 100000000001 100000000001 13141
83 100000000001.082 1 200 630 A07 0 100000000001 100000000001.082U 100000000001.082U A07 100000000001 100000000001 13141
84 100000000001.083 1 200 630 A07 0 100000000001 100000000001.083U 100000000001.083U A07 100000000001 100000000001 13141
85 100000000001.084 1 200 630 A07 0 100000000001 100000000001.084U 100000000001.084U A07 100000000001 100000000001 13141
86 100000000001.085 1 200 630 A07 0 100000000001 100000000001.085U 100000000001.085U A07 100000000001 100000000001 13141
87 100000000001.086 1 200 630 A07 0 100000000001 100000000001.086U 100000000001.086U A07 100000000001 100000000001 13141
88 100000000001.087 1 200 630 A07 0 100000000001 100000000001.087U 100000000001.087U A07 100000000001 100000000001 13141
89 100000000001.088 1 200 630 A07 0 100000000001 100000000001.088U 100000000001.088U A07 100000000001 100000000001 13141
90 100000000001.089 1 200 630 A07 0 100000000001 100000000001.089U 100000000001.089U A07 100000000001 100000000001 13141
91 100000000001.090 1 200 630 A07 0 100000000001 100000000001.090U 100000000001.090U A07 100000000001 100000000001 13141
92 100000000001.091 1 200 630 A07 0 100000000001 100000000001.091U 100000000001.091U A07 100000000001 100000000001 13141
93 100000000001.092 1 200 630 A07 0 100000000001 100000000001.092U 100000000001.092U A07 100000000001 100000000001 13141
94 100000000001.093 1 200 630 A07 0 100000000001 100000000001.093U 100000000001.093U A07 100000000001 100000000001 13141
95 100000000001.094 1 200 630 A07 0 100000000001 100000000001.094U 100000000001.094U A07 100000000001 100000000001 13141
96 100000000001.095 1 200 630 A07 0 100000000001 100000000001.095U 100000000001.095U A07 100000000001 100000000001 13141
97 100000000001.096 1 200 630 A07 0 100000000001 100000000001.096U 100000000001.096U A07 100000000001 100000000001 13141
98 100000000001.097 1 200 630 A07 0 100000000001 100000000001.097U 100000000001.097U A07 100000000001 100000000001 13141
99 100000000001.098 1 200 630 A07 0 100000000001 100000000001.098U 100000000001.098U A07 100000000001 100000000001 13141
100 100000000001.099 1 200 630 A07 0 100000000001 100000000001.099U 100000000001.099U A07 100000000001 100000000001 13141
101 100000000001.100 1 200 630 A07 0 100000000001 100000000001.100U 100000000001.100U A07 100000000001 100000000001 13141
102 100000000002 1 200 630 A07 1 100000000002 100000000002U 100000000002U A07 100000000002 100000000002 13169
103 100000000002.001 1 200 630 A07 0 100000000002 100000000002.001U 100000000002.001U A07 100000000002 100000000002 13169

View File

@ -0,0 +1,200 @@
"2023-11-15 06:13:20.000", 11.1370001,216, 0.2763700
"2023-11-15 06:13:21.000", 8.8470001,215, 0.5411610
"2023-11-15 06:13:22.000", 8.4510002,215, 0.7852990
"2023-11-15 06:13:23.000", 9.1129999,218, 0.4466600
"2023-11-15 06:13:24.000", 10.9060001,222, 0.1409650
"2023-11-15 06:13:25.000", 8.1940002,217, 0.1292390
"2023-11-15 06:13:26.000", 8.2639999,223, 0.5075400
"2023-11-15 06:13:27.000", 10.1599998,223, 0.0787010
"2023-11-15 06:13:28.000", 9.5030003,219, 0.7912880
"2023-11-15 06:13:29.000", 11.9480000,216, 0.7197460
"2023-11-15 06:13:30.000", 11.1370001,216, 0.2763700
"2023-11-15 06:13:31.000", 8.8470001,215, 0.5411610
"2023-11-15 06:13:32.000", 8.4510002,215, 0.7852990
"2023-11-15 06:13:33.000", 9.1129999,218, 0.4466600
"2023-11-15 06:13:34.000", 10.9060001,222, 0.1409650
"2023-11-15 06:13:35.000", 8.1940002,217, 0.1292390
"2023-11-15 06:13:36.000", 8.2639999,223, 0.5075400
"2023-11-15 06:13:37.000", 10.1599998,223, 0.0787010
"2023-11-15 06:13:38.000", 9.5030003,219, 0.7912880
"2023-11-15 06:13:39.000", 11.9480000,216, 0.7197460
"2023-11-15 06:13:40.000", 11.1370001,216, 0.2763700
"2023-11-15 06:13:41.000", 8.8470001,215, 0.5411610
"2023-11-15 06:13:42.000", 8.4510002,215, 0.7852990
"2023-11-15 06:13:43.000", 9.1129999,218, 0.4466600
"2023-11-15 06:13:44.000", 10.9060001,222, 0.1409650
"2023-11-15 06:13:45.000", 8.1940002,217, 0.1292390
"2023-11-15 06:13:46.000", 8.2639999,223, 0.5075400
"2023-11-15 06:13:47.000", 10.1599998,223, 0.0787010
"2023-11-15 06:13:48.000", 9.5030003,219, 0.7912880
"2023-11-15 06:13:49.000", 11.9480000,216, 0.7197460
"2023-11-15 06:13:50.000", 11.1370001,216, 0.2763700
"2023-11-15 06:13:51.000", 8.8470001,215, 0.5411610
"2023-11-15 06:13:52.000", 8.4510002,215, 0.7852990
"2023-11-15 06:13:53.000", 9.1129999,218, 0.4466600
"2023-11-15 06:13:54.000", 10.9060001,222, 0.1409650
"2023-11-15 06:13:55.000", 8.1940002,217, 0.1292390
"2023-11-15 06:13:56.000", 8.2639999,223, 0.5075400
"2023-11-15 06:13:57.000", 10.1599998,223, 0.0787010
"2023-11-15 06:13:58.000", 9.5030003,219, 0.7912880
"2023-11-15 06:13:59.000", 11.9480000,216, 0.7197460
"2023-11-15 06:14:00.000", 11.1370001,216, 0.2763700
"2023-11-15 06:14:01.000", 8.8470001,215, 0.5411610
"2023-11-15 06:14:02.000", 8.4510002,215, 0.7852990
"2023-11-15 06:14:03.000", 9.1129999,218, 0.4466600
"2023-11-15 06:14:04.000", 10.9060001,222, 0.1409650
"2023-11-15 06:14:05.000", 8.1940002,217, 0.1292390
"2023-11-15 06:14:06.000", 8.2639999,223, 0.5075400
"2023-11-15 06:14:07.000", 10.1599998,223, 0.0787010
"2023-11-15 06:14:08.000", 9.5030003,219, 0.7912880
"2023-11-15 06:14:09.000", 11.9480000,216, 0.7197460
"2023-11-15 06:14:10.000", 11.1370001,216, 0.2763700
"2023-11-15 06:14:11.000", 8.8470001,215, 0.5411610
"2023-11-15 06:14:12.000", 8.4510002,215, 0.7852990
"2023-11-15 06:14:13.000", 9.1129999,218, 0.4466600
"2023-11-15 06:14:14.000", 10.9060001,222, 0.1409650
"2023-11-15 06:14:15.000", 8.1940002,217, 0.1292390
"2023-11-15 06:14:16.000", 8.2639999,223, 0.5075400
"2023-11-15 06:14:17.000", 10.1599998,223, 0.0787010
"2023-11-15 06:14:18.000", 9.5030003,219, 0.7912880
"2023-11-15 06:14:19.000", 11.9480000,216, 0.7197460
"2023-11-15 06:14:20.000", 11.1370001,216, 0.2763700
"2023-11-15 06:14:21.000", 8.8470001,215, 0.5411610
"2023-11-15 06:14:22.000", 8.4510002,215, 0.7852990
"2023-11-15 06:14:23.000", 9.1129999,218, 0.4466600
"2023-11-15 06:14:24.000", 10.9060001,222, 0.1409650
"2023-11-15 06:14:25.000", 8.1940002,217, 0.1292390
"2023-11-15 06:14:26.000", 8.2639999,223, 0.5075400
"2023-11-15 06:14:27.000", 10.1599998,223, 0.0787010
"2023-11-15 06:14:28.000", 9.5030003,219, 0.7912880
"2023-11-15 06:14:29.000", 11.9480000,216, 0.7197460
"2023-11-15 06:14:30.000", 11.1370001,216, 0.2763700
"2023-11-15 06:14:31.000", 8.8470001,215, 0.5411610
"2023-11-15 06:14:32.000", 8.4510002,215, 0.7852990
"2023-11-15 06:14:33.000", 9.1129999,218, 0.4466600
"2023-11-15 06:14:34.000", 10.9060001,222, 0.1409650
"2023-11-15 06:14:35.000", 8.1940002,217, 0.1292390
"2023-11-15 06:14:36.000", 8.2639999,223, 0.5075400
"2023-11-15 06:14:37.000", 10.1599998,223, 0.0787010
"2023-11-15 06:14:38.000", 9.5030003,219, 0.7912880
"2023-11-15 06:14:39.000", 11.9480000,216, 0.7197460
"2023-11-15 06:14:40.000", 11.1370001,216, 0.2763700
"2023-11-15 06:14:41.000", 8.8470001,215, 0.5411610
"2023-11-15 06:14:42.000", 8.4510002,215, 0.7852990
"2023-11-15 06:14:43.000", 9.1129999,218, 0.4466600
"2023-11-15 06:14:44.000", 10.9060001,222, 0.1409650
"2023-11-15 06:14:45.000", 8.1940002,217, 0.1292390
"2023-11-15 06:14:46.000", 8.2639999,223, 0.5075400
"2023-11-15 06:14:47.000", 10.1599998,223, 0.0787010
"2023-11-15 06:14:48.000", 9.5030003,219, 0.7912880
"2023-11-15 06:14:49.000", 11.9480000,216, 0.7197460
"2023-11-15 06:14:50.000", 11.1370001,216, 0.2763700
"2023-11-15 06:14:51.000", 8.8470001,215, 0.5411610
"2023-11-15 06:14:52.000", 8.4510002,215, 0.7852990
"2023-11-15 06:14:53.000", 9.1129999,218, 0.4466600
"2023-11-15 06:14:54.000", 10.9060001,222, 0.1409650
"2023-11-15 06:14:55.000", 8.1940002,217, 0.1292390
"2023-11-15 06:14:56.000", 8.2639999,223, 0.5075400
"2023-11-15 06:14:57.000", 10.1599998,223, 0.0787010
"2023-11-15 06:14:58.000", 9.5030003,219, 0.7912880
"2023-11-15 06:14:59.000", 11.9480000,216, 0.7197460
"2023-11-15 06:15:00.000", 11.1370001,216, 0.2763700
"2023-11-15 06:15:01.000", 8.8470001,215, 0.5411610
"2023-11-15 06:15:02.000", 8.4510002,215, 0.7852990
"2023-11-15 06:15:03.000", 9.1129999,218, 0.4466600
"2023-11-15 06:15:04.000", 10.9060001,222, 0.1409650
"2023-11-15 06:15:05.000", 8.1940002,217, 0.1292390
"2023-11-15 06:15:06.000", 8.2639999,223, 0.5075400
"2023-11-15 06:15:07.000", 10.1599998,223, 0.0787010
"2023-11-15 06:15:08.000", 9.5030003,219, 0.7912880
"2023-11-15 06:15:09.000", 11.9480000,216, 0.7197460
"2023-11-15 06:15:10.000", 11.1370001,216, 0.2763700
"2023-11-15 06:15:11.000", 8.8470001,215, 0.5411610
"2023-11-15 06:15:12.000", 8.4510002,215, 0.7852990
"2023-11-15 06:15:13.000", 9.1129999,218, 0.4466600
"2023-11-15 06:15:14.000", 10.9060001,222, 0.1409650
"2023-11-15 06:15:15.000", 8.1940002,217, 0.1292390
"2023-11-15 06:15:16.000", 8.2639999,223, 0.5075400
"2023-11-15 06:15:17.000", 10.1599998,223, 0.0787010
"2023-11-15 06:15:18.000", 9.5030003,219, 0.7912880
"2023-11-15 06:15:19.000", 11.9480000,216, 0.7197460
"2023-11-15 06:15:20.000", 11.1370001,216, 0.2763700
"2023-11-15 06:15:21.000", 8.8470001,215, 0.5411610
"2023-11-15 06:15:22.000", 8.4510002,215, 0.7852990
"2023-11-15 06:15:23.000", 9.1129999,218, 0.4466600
"2023-11-15 06:15:24.000", 10.9060001,222, 0.1409650
"2023-11-15 06:15:25.000", 8.1940002,217, 0.1292390
"2023-11-15 06:15:26.000", 8.2639999,223, 0.5075400
"2023-11-15 06:15:27.000", 10.1599998,223, 0.0787010
"2023-11-15 06:15:28.000", 9.5030003,219, 0.7912880
"2023-11-15 06:15:29.000", 11.9480000,216, 0.7197460
"2023-11-15 06:15:30.000", 11.1370001,216, 0.2763700
"2023-11-15 06:15:31.000", 8.8470001,215, 0.5411610
"2023-11-15 06:15:32.000", 8.4510002,215, 0.7852990
"2023-11-15 06:15:33.000", 9.1129999,218, 0.4466600
"2023-11-15 06:15:34.000", 10.9060001,222, 0.1409650
"2023-11-15 06:15:35.000", 8.1940002,217, 0.1292390
"2023-11-15 06:15:36.000", 8.2639999,223, 0.5075400
"2023-11-15 06:15:37.000", 10.1599998,223, 0.0787010
"2023-11-15 06:15:38.000", 9.5030003,219, 0.7912880
"2023-11-15 06:15:39.000", 11.9480000,216, 0.7197460
"2023-11-15 06:15:40.000", 11.1370001,216, 0.2763700
"2023-11-15 06:15:41.000", 8.8470001,215, 0.5411610
"2023-11-15 06:15:42.000", 8.4510002,215, 0.7852990
"2023-11-15 06:15:43.000", 9.1129999,218, 0.4466600
"2023-11-15 06:15:44.000", 10.9060001,222, 0.1409650
"2023-11-15 06:15:45.000", 8.1940002,217, 0.1292390
"2023-11-15 06:15:46.000", 8.2639999,223, 0.5075400
"2023-11-15 06:15:47.000", 10.1599998,223, 0.0787010
"2023-11-15 06:15:48.000", 9.5030003,219, 0.7912880
"2023-11-15 06:15:49.000", 11.9480000,216, 0.7197460
"2023-11-15 06:15:50.000", 11.1370001,216, 0.2763700
"2023-11-15 06:15:51.000", 8.8470001,215, 0.5411610
"2023-11-15 06:15:52.000", 8.4510002,215, 0.7852990
"2023-11-15 06:15:53.000", 9.1129999,218, 0.4466600
"2023-11-15 06:15:54.000", 10.9060001,222, 0.1409650
"2023-11-15 06:15:55.000", 8.1940002,217, 0.1292390
"2023-11-15 06:15:56.000", 8.2639999,223, 0.5075400
"2023-11-15 06:15:57.000", 10.1599998,223, 0.0787010
"2023-11-15 06:15:58.000", 9.5030003,219, 0.7912880
"2023-11-15 06:15:59.000", 11.9480000,216, 0.7197460
"2023-11-15 06:16:00.000", 11.1370001,216, 0.2763700
"2023-11-15 06:16:01.000", 8.8470001,215, 0.5411610
"2023-11-15 06:16:02.000", 8.4510002,215, 0.7852990
"2023-11-15 06:16:03.000", 9.1129999,218, 0.4466600
"2023-11-15 06:16:04.000", 10.9060001,222, 0.1409650
"2023-11-15 06:16:05.000", 8.1940002,217, 0.1292390
"2023-11-15 06:16:06.000", 8.2639999,223, 0.5075400
"2023-11-15 06:16:07.000", 10.1599998,223, 0.0787010
"2023-11-15 06:16:08.000", 9.5030003,219, 0.7912880
"2023-11-15 06:16:09.000", 11.9480000,216, 0.7197460
"2023-11-15 06:16:10.000", 11.1370001,216, 0.2763700
"2023-11-15 06:16:11.000", 8.8470001,215, 0.5411610
"2023-11-15 06:16:12.000", 8.4510002,215, 0.7852990
"2023-11-15 06:16:13.000", 9.1129999,218, 0.4466600
"2023-11-15 06:16:14.000", 10.9060001,222, 0.1409650
"2023-11-15 06:16:15.000", 8.1940002,217, 0.1292390
"2023-11-15 06:16:16.000", 8.2639999,223, 0.5075400
"2023-11-15 06:16:17.000", 10.1599998,223, 0.0787010
"2023-11-15 06:16:18.000", 9.5030003,219, 0.7912880
"2023-11-15 06:16:19.000", 11.9480000,216, 0.7197460
"2023-11-15 06:16:20.000", 11.1370001,216, 0.2763700
"2023-11-15 06:16:21.000", 8.8470001,215, 0.5411610
"2023-11-15 06:16:22.000", 8.4510002,215, 0.7852990
"2023-11-15 06:16:23.000", 9.1129999,218, 0.4466600
"2023-11-15 06:16:24.000", 10.9060001,222, 0.1409650
"2023-11-15 06:16:25.000", 8.1940002,217, 0.1292390
"2023-11-15 06:16:26.000", 8.2639999,223, 0.5075400
"2023-11-15 06:16:27.000", 10.1599998,223, 0.0787010
"2023-11-15 06:16:28.000", 9.5030003,219, 0.7912880
"2023-11-15 06:16:29.000", 11.9480000,216, 0.7197460
"2023-11-15 06:16:30.000", 11.1370001,216, 0.2763700
"2023-11-15 06:16:31.000", 8.8470001,215, 0.5411610
"2023-11-15 06:16:32.000", 8.4510002,215, 0.7852990
"2023-11-15 06:16:33.000", 9.1129999,218, 0.4466600
"2023-11-15 06:16:34.000", 10.9060001,222, 0.1409650
"2023-11-15 06:16:35.000", 8.1940002,217, 0.1292390
"2023-11-15 06:16:36.000", 8.2639999,223, 0.5075400
"2023-11-15 06:16:37.000", 10.1599998,223, 0.0787010
"2023-11-15 06:16:38.000", 9.5030003,219, 0.7912880
"2023-11-15 06:16:39.000", 11.9480000,216, 0.7197460
1 2023-11-15 06:13:20.000 11.1370001 216 0.2763700
2 2023-11-15 06:13:21.000 8.8470001 215 0.5411610
3 2023-11-15 06:13:22.000 8.4510002 215 0.7852990
4 2023-11-15 06:13:23.000 9.1129999 218 0.4466600
5 2023-11-15 06:13:24.000 10.9060001 222 0.1409650
6 2023-11-15 06:13:25.000 8.1940002 217 0.1292390
7 2023-11-15 06:13:26.000 8.2639999 223 0.5075400
8 2023-11-15 06:13:27.000 10.1599998 223 0.0787010
9 2023-11-15 06:13:28.000 9.5030003 219 0.7912880
10 2023-11-15 06:13:29.000 11.9480000 216 0.7197460
11 2023-11-15 06:13:30.000 11.1370001 216 0.2763700
12 2023-11-15 06:13:31.000 8.8470001 215 0.5411610
13 2023-11-15 06:13:32.000 8.4510002 215 0.7852990
14 2023-11-15 06:13:33.000 9.1129999 218 0.4466600
15 2023-11-15 06:13:34.000 10.9060001 222 0.1409650
16 2023-11-15 06:13:35.000 8.1940002 217 0.1292390
17 2023-11-15 06:13:36.000 8.2639999 223 0.5075400
18 2023-11-15 06:13:37.000 10.1599998 223 0.0787010
19 2023-11-15 06:13:38.000 9.5030003 219 0.7912880
20 2023-11-15 06:13:39.000 11.9480000 216 0.7197460
21 2023-11-15 06:13:40.000 11.1370001 216 0.2763700
22 2023-11-15 06:13:41.000 8.8470001 215 0.5411610
23 2023-11-15 06:13:42.000 8.4510002 215 0.7852990
24 2023-11-15 06:13:43.000 9.1129999 218 0.4466600
25 2023-11-15 06:13:44.000 10.9060001 222 0.1409650
26 2023-11-15 06:13:45.000 8.1940002 217 0.1292390
27 2023-11-15 06:13:46.000 8.2639999 223 0.5075400
28 2023-11-15 06:13:47.000 10.1599998 223 0.0787010
29 2023-11-15 06:13:48.000 9.5030003 219 0.7912880
30 2023-11-15 06:13:49.000 11.9480000 216 0.7197460
31 2023-11-15 06:13:50.000 11.1370001 216 0.2763700
32 2023-11-15 06:13:51.000 8.8470001 215 0.5411610
33 2023-11-15 06:13:52.000 8.4510002 215 0.7852990
34 2023-11-15 06:13:53.000 9.1129999 218 0.4466600
35 2023-11-15 06:13:54.000 10.9060001 222 0.1409650
36 2023-11-15 06:13:55.000 8.1940002 217 0.1292390
37 2023-11-15 06:13:56.000 8.2639999 223 0.5075400
38 2023-11-15 06:13:57.000 10.1599998 223 0.0787010
39 2023-11-15 06:13:58.000 9.5030003 219 0.7912880
40 2023-11-15 06:13:59.000 11.9480000 216 0.7197460
41 2023-11-15 06:14:00.000 11.1370001 216 0.2763700
42 2023-11-15 06:14:01.000 8.8470001 215 0.5411610
43 2023-11-15 06:14:02.000 8.4510002 215 0.7852990
44 2023-11-15 06:14:03.000 9.1129999 218 0.4466600
45 2023-11-15 06:14:04.000 10.9060001 222 0.1409650
46 2023-11-15 06:14:05.000 8.1940002 217 0.1292390
47 2023-11-15 06:14:06.000 8.2639999 223 0.5075400
48 2023-11-15 06:14:07.000 10.1599998 223 0.0787010
49 2023-11-15 06:14:08.000 9.5030003 219 0.7912880
50 2023-11-15 06:14:09.000 11.9480000 216 0.7197460
51 2023-11-15 06:14:10.000 11.1370001 216 0.2763700
52 2023-11-15 06:14:11.000 8.8470001 215 0.5411610
53 2023-11-15 06:14:12.000 8.4510002 215 0.7852990
54 2023-11-15 06:14:13.000 9.1129999 218 0.4466600
55 2023-11-15 06:14:14.000 10.9060001 222 0.1409650
56 2023-11-15 06:14:15.000 8.1940002 217 0.1292390
57 2023-11-15 06:14:16.000 8.2639999 223 0.5075400
58 2023-11-15 06:14:17.000 10.1599998 223 0.0787010
59 2023-11-15 06:14:18.000 9.5030003 219 0.7912880
60 2023-11-15 06:14:19.000 11.9480000 216 0.7197460
61 2023-11-15 06:14:20.000 11.1370001 216 0.2763700
62 2023-11-15 06:14:21.000 8.8470001 215 0.5411610
63 2023-11-15 06:14:22.000 8.4510002 215 0.7852990
64 2023-11-15 06:14:23.000 9.1129999 218 0.4466600
65 2023-11-15 06:14:24.000 10.9060001 222 0.1409650
66 2023-11-15 06:14:25.000 8.1940002 217 0.1292390
67 2023-11-15 06:14:26.000 8.2639999 223 0.5075400
68 2023-11-15 06:14:27.000 10.1599998 223 0.0787010
69 2023-11-15 06:14:28.000 9.5030003 219 0.7912880
70 2023-11-15 06:14:29.000 11.9480000 216 0.7197460
71 2023-11-15 06:14:30.000 11.1370001 216 0.2763700
72 2023-11-15 06:14:31.000 8.8470001 215 0.5411610
73 2023-11-15 06:14:32.000 8.4510002 215 0.7852990
74 2023-11-15 06:14:33.000 9.1129999 218 0.4466600
75 2023-11-15 06:14:34.000 10.9060001 222 0.1409650
76 2023-11-15 06:14:35.000 8.1940002 217 0.1292390
77 2023-11-15 06:14:36.000 8.2639999 223 0.5075400
78 2023-11-15 06:14:37.000 10.1599998 223 0.0787010
79 2023-11-15 06:14:38.000 9.5030003 219 0.7912880
80 2023-11-15 06:14:39.000 11.9480000 216 0.7197460
81 2023-11-15 06:14:40.000 11.1370001 216 0.2763700
82 2023-11-15 06:14:41.000 8.8470001 215 0.5411610
83 2023-11-15 06:14:42.000 8.4510002 215 0.7852990
84 2023-11-15 06:14:43.000 9.1129999 218 0.4466600
85 2023-11-15 06:14:44.000 10.9060001 222 0.1409650
86 2023-11-15 06:14:45.000 8.1940002 217 0.1292390
87 2023-11-15 06:14:46.000 8.2639999 223 0.5075400
88 2023-11-15 06:14:47.000 10.1599998 223 0.0787010
89 2023-11-15 06:14:48.000 9.5030003 219 0.7912880
90 2023-11-15 06:14:49.000 11.9480000 216 0.7197460
91 2023-11-15 06:14:50.000 11.1370001 216 0.2763700
92 2023-11-15 06:14:51.000 8.8470001 215 0.5411610
93 2023-11-15 06:14:52.000 8.4510002 215 0.7852990
94 2023-11-15 06:14:53.000 9.1129999 218 0.4466600
95 2023-11-15 06:14:54.000 10.9060001 222 0.1409650
96 2023-11-15 06:14:55.000 8.1940002 217 0.1292390
97 2023-11-15 06:14:56.000 8.2639999 223 0.5075400
98 2023-11-15 06:14:57.000 10.1599998 223 0.0787010
99 2023-11-15 06:14:58.000 9.5030003 219 0.7912880
100 2023-11-15 06:14:59.000 11.9480000 216 0.7197460
101 2023-11-15 06:15:00.000 11.1370001 216 0.2763700
102 2023-11-15 06:15:01.000 8.8470001 215 0.5411610
103 2023-11-15 06:15:02.000 8.4510002 215 0.7852990
104 2023-11-15 06:15:03.000 9.1129999 218 0.4466600
105 2023-11-15 06:15:04.000 10.9060001 222 0.1409650
106 2023-11-15 06:15:05.000 8.1940002 217 0.1292390
107 2023-11-15 06:15:06.000 8.2639999 223 0.5075400
108 2023-11-15 06:15:07.000 10.1599998 223 0.0787010
109 2023-11-15 06:15:08.000 9.5030003 219 0.7912880
110 2023-11-15 06:15:09.000 11.9480000 216 0.7197460
111 2023-11-15 06:15:10.000 11.1370001 216 0.2763700
112 2023-11-15 06:15:11.000 8.8470001 215 0.5411610
113 2023-11-15 06:15:12.000 8.4510002 215 0.7852990
114 2023-11-15 06:15:13.000 9.1129999 218 0.4466600
115 2023-11-15 06:15:14.000 10.9060001 222 0.1409650
116 2023-11-15 06:15:15.000 8.1940002 217 0.1292390
117 2023-11-15 06:15:16.000 8.2639999 223 0.5075400
118 2023-11-15 06:15:17.000 10.1599998 223 0.0787010
119 2023-11-15 06:15:18.000 9.5030003 219 0.7912880
120 2023-11-15 06:15:19.000 11.9480000 216 0.7197460
121 2023-11-15 06:15:20.000 11.1370001 216 0.2763700
122 2023-11-15 06:15:21.000 8.8470001 215 0.5411610
123 2023-11-15 06:15:22.000 8.4510002 215 0.7852990
124 2023-11-15 06:15:23.000 9.1129999 218 0.4466600
125 2023-11-15 06:15:24.000 10.9060001 222 0.1409650
126 2023-11-15 06:15:25.000 8.1940002 217 0.1292390
127 2023-11-15 06:15:26.000 8.2639999 223 0.5075400
128 2023-11-15 06:15:27.000 10.1599998 223 0.0787010
129 2023-11-15 06:15:28.000 9.5030003 219 0.7912880
130 2023-11-15 06:15:29.000 11.9480000 216 0.7197460
131 2023-11-15 06:15:30.000 11.1370001 216 0.2763700
132 2023-11-15 06:15:31.000 8.8470001 215 0.5411610
133 2023-11-15 06:15:32.000 8.4510002 215 0.7852990
134 2023-11-15 06:15:33.000 9.1129999 218 0.4466600
135 2023-11-15 06:15:34.000 10.9060001 222 0.1409650
136 2023-11-15 06:15:35.000 8.1940002 217 0.1292390
137 2023-11-15 06:15:36.000 8.2639999 223 0.5075400
138 2023-11-15 06:15:37.000 10.1599998 223 0.0787010
139 2023-11-15 06:15:38.000 9.5030003 219 0.7912880
140 2023-11-15 06:15:39.000 11.9480000 216 0.7197460
141 2023-11-15 06:15:40.000 11.1370001 216 0.2763700
142 2023-11-15 06:15:41.000 8.8470001 215 0.5411610
143 2023-11-15 06:15:42.000 8.4510002 215 0.7852990
144 2023-11-15 06:15:43.000 9.1129999 218 0.4466600
145 2023-11-15 06:15:44.000 10.9060001 222 0.1409650
146 2023-11-15 06:15:45.000 8.1940002 217 0.1292390
147 2023-11-15 06:15:46.000 8.2639999 223 0.5075400
148 2023-11-15 06:15:47.000 10.1599998 223 0.0787010
149 2023-11-15 06:15:48.000 9.5030003 219 0.7912880
150 2023-11-15 06:15:49.000 11.9480000 216 0.7197460
151 2023-11-15 06:15:50.000 11.1370001 216 0.2763700
152 2023-11-15 06:15:51.000 8.8470001 215 0.5411610
153 2023-11-15 06:15:52.000 8.4510002 215 0.7852990
154 2023-11-15 06:15:53.000 9.1129999 218 0.4466600
155 2023-11-15 06:15:54.000 10.9060001 222 0.1409650
156 2023-11-15 06:15:55.000 8.1940002 217 0.1292390
157 2023-11-15 06:15:56.000 8.2639999 223 0.5075400
158 2023-11-15 06:15:57.000 10.1599998 223 0.0787010
159 2023-11-15 06:15:58.000 9.5030003 219 0.7912880
160 2023-11-15 06:15:59.000 11.9480000 216 0.7197460
161 2023-11-15 06:16:00.000 11.1370001 216 0.2763700
162 2023-11-15 06:16:01.000 8.8470001 215 0.5411610
163 2023-11-15 06:16:02.000 8.4510002 215 0.7852990
164 2023-11-15 06:16:03.000 9.1129999 218 0.4466600
165 2023-11-15 06:16:04.000 10.9060001 222 0.1409650
166 2023-11-15 06:16:05.000 8.1940002 217 0.1292390
167 2023-11-15 06:16:06.000 8.2639999 223 0.5075400
168 2023-11-15 06:16:07.000 10.1599998 223 0.0787010
169 2023-11-15 06:16:08.000 9.5030003 219 0.7912880
170 2023-11-15 06:16:09.000 11.9480000 216 0.7197460
171 2023-11-15 06:16:10.000 11.1370001 216 0.2763700
172 2023-11-15 06:16:11.000 8.8470001 215 0.5411610
173 2023-11-15 06:16:12.000 8.4510002 215 0.7852990
174 2023-11-15 06:16:13.000 9.1129999 218 0.4466600
175 2023-11-15 06:16:14.000 10.9060001 222 0.1409650
176 2023-11-15 06:16:15.000 8.1940002 217 0.1292390
177 2023-11-15 06:16:16.000 8.2639999 223 0.5075400
178 2023-11-15 06:16:17.000 10.1599998 223 0.0787010
179 2023-11-15 06:16:18.000 9.5030003 219 0.7912880
180 2023-11-15 06:16:19.000 11.9480000 216 0.7197460
181 2023-11-15 06:16:20.000 11.1370001 216 0.2763700
182 2023-11-15 06:16:21.000 8.8470001 215 0.5411610
183 2023-11-15 06:16:22.000 8.4510002 215 0.7852990
184 2023-11-15 06:16:23.000 9.1129999 218 0.4466600
185 2023-11-15 06:16:24.000 10.9060001 222 0.1409650
186 2023-11-15 06:16:25.000 8.1940002 217 0.1292390
187 2023-11-15 06:16:26.000 8.2639999 223 0.5075400
188 2023-11-15 06:16:27.000 10.1599998 223 0.0787010
189 2023-11-15 06:16:28.000 9.5030003 219 0.7912880
190 2023-11-15 06:16:29.000 11.9480000 216 0.7197460
191 2023-11-15 06:16:30.000 11.1370001 216 0.2763700
192 2023-11-15 06:16:31.000 8.8470001 215 0.5411610
193 2023-11-15 06:16:32.000 8.4510002 215 0.7852990
194 2023-11-15 06:16:33.000 9.1129999 218 0.4466600
195 2023-11-15 06:16:34.000 10.9060001 222 0.1409650
196 2023-11-15 06:16:35.000 8.1940002 217 0.1292390
197 2023-11-15 06:16:36.000 8.2639999 223 0.5075400
198 2023-11-15 06:16:37.000 10.1599998 223 0.0787010
199 2023-11-15 06:16:38.000 9.5030003 219 0.7912880
200 2023-11-15 06:16:39.000 11.9480000 216 0.7197460

Some files were not shown because too many files have changed in this diff Show More