diff --git a/.github/workflows/taosd-ci-build.yml b/.github/workflows/taosd-ci-build.yml new file mode 100644 index 0000000000..0876f5b731 --- /dev/null +++ b/.github/workflows/taosd-ci-build.yml @@ -0,0 +1,66 @@ +name: TDengine Build + +on: + pull_request: + branches: + - 'main' + - '3.0' + - '3.1' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build: + runs-on: ubuntu-latest + name: Run unit tests + + steps: + - name: Checkout the repository + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: 1.18 + + - name: Install system dependencies + run: | + sudo apt update -y + sudo apt install -y build-essential cmake \ + libgeos-dev libjansson-dev libsnappy-dev liblzma-dev libz-dev \ + zlib1g pkg-config libssl-dev gawk + + + - name: Build and install TDengine + run: | + mkdir debug && cd debug + cmake .. -DBUILD_HTTP=false -DBUILD_JDBC=false \ + -DBUILD_TOOLS=true -DBUILD_TEST=off \ + -DBUILD_KEEPER=true -DBUILD_DEPENDENCY_TESTS=false + make -j 4 + sudo make install + which taosd + which taosadapter + which taoskeeper + + - name: Start taosd + run: | + cp /etc/taos/taos.cfg ./ + sudo echo "supportVnodes 256" >> taos.cfg + nohup sudo taosd -c taos.cfg & + + - name: Start taosadapter + run: nohup sudo taosadapter & + + - name: Run tests with taosBenchmark + run: | + taosBenchmark -t 10 -n 10 -y + taos -s "select count(*) from test.meters" + + - name: Clean up + if: always() + run: | + if pgrep taosd; then sudo pkill taosd; fi + if pgrep taosadapter; then sudo pkill taosadapter; fi diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 6fa3483099..1b2f28908c 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -7,6 +7,9 @@ file_zh_changed = '' file_en_changed = '' file_no_doc_changed = '1' file_only_tdgpt_change_except = '1' +tdgpt_file = "forecastoperator.c|anomalywindowoperator.c|tanalytics.h|tanalytics.c|tdgpt_cases.task|analytics" + + def abortPreviousBuilds() { def currentJobName = env.JOB_NAME def currentBuildNumber = env.BUILD_NUMBER.toInteger() @@ -67,7 +70,7 @@ def check_docs(){ returnStdout: true ) - file_no_doc_changed = sh ( + def file_no_doc_changed = sh ( script: ''' cd ${WKC} git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" || : @@ -78,7 +81,7 @@ def check_docs(){ file_only_tdgpt_change_except = sh ( script: ''' cd ${WKC} - git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -v "forecastoperator.c\\|anomalywindowoperator.c\\|tanalytics.h\\|tanalytics.c" |grep -v "tsim/analytics" |grep -v "tdgpt_cases.task" || : + git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -Ev "forecastoperator.c|anomalywindowoperator.c|tanalytics.h|tanalytics.c|tdgpt_cases.task|analytics" ||: ''', returnStdout: true ).trim() @@ -570,7 +573,7 @@ pipeline { cd ${WKC}/tests/parallel_test ./run_scan_container.sh -d ${WKDIR} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + ''' ''' - if ( file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c|tsim\/analytics|tdgpt_cases.task/ ) { + if ( file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c|tanalytics.h|tanalytics.c|tdgpt_cases.task|analytics/ ) { sh ''' cd ${WKC}/tests/parallel_test export DEFAULT_RETRY_TIME=2 diff --git a/README.md b/README.md index f827c38975..ff72412434 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,7 @@

+[![Build Status](https://github.com/taosdata/TDengine/actions/workflows/taosd-ci-build.yml/badge.svg)](https://github.com/taosdata/TDengine/actions/workflows/taosd-ci-build.yml) [![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=3.0)](https://coveralls.io/github/taosdata/TDengine?branch=3.0) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201)
diff --git a/cmake/taosadapter_CMakeLists.txt.in b/cmake/taosadapter_CMakeLists.txt.in index 13826a1a74..ef6ed4af1d 100644 --- a/cmake/taosadapter_CMakeLists.txt.in +++ b/cmake/taosadapter_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taosadapter ExternalProject_Add(taosadapter GIT_REPOSITORY https://github.com/taosdata/taosadapter.git - GIT_TAG main + GIT_TAG 3.0 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 9bbda8309f..9a6a5329ae 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG main + GIT_TAG 3.0 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/cmake/taosws_CMakeLists.txt.in b/cmake/taosws_CMakeLists.txt.in index b013d45911..17446d184d 100644 --- a/cmake/taosws_CMakeLists.txt.in +++ b/cmake/taosws_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taosws-rs ExternalProject_Add(taosws-rs GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git - GIT_TAG main + GIT_TAG 3.0 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/docs/en/08-operation/04-maintenance.md b/docs/en/08-operation/04-maintenance.md index 970ee40d18..2f6afbf9df 100644 --- a/docs/en/08-operation/04-maintenance.md +++ b/docs/en/08-operation/04-maintenance.md @@ -17,7 +17,7 @@ TDengine is designed for various writing scenarios, and many of these scenarios ```sql COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY']; -SHOW COMPACTS [compact_id]; +SHOW COMPACT [compact_id]; KILL COMPACT compact_id; ``` diff --git a/docs/en/10-third-party/05-bi/11-superset.md b/docs/en/10-third-party/05-bi/11-superset.md index aa56648b99..be3e3aa08d 100644 --- a/docs/en/10-third-party/05-bi/11-superset.md +++ b/docs/en/10-third-party/05-bi/11-superset.md @@ -9,14 +9,13 @@ Apache Superset provides an intuitive user interface that makes creating, sharin Through the Python connector of TDengine, Superset can support TDengine data sources and provide functions such as data presentation and analysis -## Install Apache Superset - -Ensure that Apache Superset v2.1.0 or above is installed. If not, please visit [official website](https://superset.apache.org/) to install - -## Install TDengine - -Both TDengine Enterprise Edition and Community Edition are supported, with version requirements of 3.0 or higher +## Prerequisites +Prepare the following environment: +- TDengine is installed and running normally (both Enterprise and Community versions are available) +- taosAdapter is running normally, refer to [taosAdapter](../../../reference/components/taosAdapter) +- Apache Superset version 2.1.0 or above is already installed, refre to [Apache Superset](https://superset.apache.org/) + ## Install TDengine Python Connector The Python connector of TDengine comes with a connection driver that supports Superset in versions 2.1.18 and later, which will be automatically installed in the Superset directory and provide data source services. diff --git a/docs/en/14-reference/01-components/01-taosd.md b/docs/en/14-reference/01-components/01-taosd.md index 1b7f63510b..c86b631df4 100644 --- a/docs/en/14-reference/01-components/01-taosd.md +++ b/docs/en/14-reference/01-components/01-taosd.md @@ -190,7 +190,8 @@ The effective value of charset is UTF-8. |Parameter Name |Supported Version |Dynamic Modification|Description| |-----------------------|-------------------------|--------------------|------------| |supportVnodes | |Supported, effective immediately |Maximum number of vnodes supported by a dnode, range 0-4096, default value is twice the number of CPU cores + 5| -|numOfCommitThreads | |Supported, effective after restart|Maximum number of commit threads, range 0-1024, default value 4| +|numOfCommitThreads | |Supported, effective after restart|Maximum number of commit threads, range 1-1024, default value 4| +|numOfCompactThreads | |Supported, effective after restart|Maximum number of commit threads, range 1-16, default value 2| |numOfMnodeReadThreads | |Supported, effective after restart|Number of Read threads for mnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)| |numOfVnodeQueryThreads | |Supported, effective after restart|Number of Query threads for vnode, range 0-1024, default value is twice the number of CPU cores (not exceeding 16)| |numOfVnodeFetchThreads | |Supported, effective after restart|Number of Fetch threads for vnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)| diff --git a/docs/en/14-reference/02-tools/09-taosdump.md b/docs/en/14-reference/02-tools/09-taosdump.md index d336f66c02..75747f2f57 100644 --- a/docs/en/14-reference/02-tools/09-taosdump.md +++ b/docs/en/14-reference/02-tools/09-taosdump.md @@ -4,22 +4,17 @@ sidebar_label: taosdump slug: /tdengine-reference/tools/taosdump --- -taosdump is a tool application that supports backing up data from a running TDengine cluster and restoring the backed-up data to the same or another running TDengine cluster. - -taosdump can back up data using databases, supertables, or basic tables as logical data units, and can also back up data records within a specified time period from databases, supertables, and basic tables. You can specify the directory path for data backup; if not specified, taosdump defaults to backing up data to the current directory. - -If the specified location already has data files, taosdump will prompt the user and exit immediately to avoid data being overwritten. This means the same path can only be used for one backup. -If you see related prompts, please operate carefully. - -taosdump is a logical backup tool, it should not be used to back up any raw data, environment settings, hardware information, server configuration, or cluster topology. taosdump uses [Apache AVRO](https://avro.apache.org/) as the data file format to store backup data. +`taosdump` is a TDengine data backup/recovery tool provided for open source users, and the backed up data files adopt the standard [Apache AVRO](https://avro.apache.org/) + Format, convenient for exchanging data with the external ecosystem. + Taosdump provides multiple data backup and recovery options to meet different data needs, and all supported options can be viewed through --help. ## Installation -There are two ways to install taosdump: +Taosdump provides two installation methods: -- Install the official taosTools package, please find taosTools on the [release history page](../../../release-history/taostools/) and download it for installation. +- Taosdump is the default installation component in the TDengine installation package, which can be used after installing TDengine. For how to install TDengine, please refer to [TDengine Installation](../../../get-started/) -- Compile taos-tools separately and install, please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details. +- Compile and install taos tools separately, refer to [taos tools](https://github.com/taosdata/taos-tools) . ## Common Use Cases @@ -30,6 +25,9 @@ There are two ways to install taosdump: 3. Backup certain supertables or basic tables in a specified database: use the `dbname stbname1 stbname2 tbname1 tbname2 ...` parameter, note that this input sequence starts with the database name, supports only one database, and the second and subsequent parameters are the names of the supertables or basic tables in that database, separated by spaces; 4. Backup the system log database: TDengine clusters usually include a system database named `log`, which contains data for TDengine's own operation, taosdump does not back up the log database by default. If there is a specific need to back up the log database, you can use the `-a` or `--allow-sys` command line parameter. 5. "Tolerant" mode backup: Versions after taosdump 1.4.1 provide the `-n` and `-L` parameters, used for backing up data without using escape characters and in "tolerant" mode, which can reduce backup data time and space occupied when table names, column names, and label names do not use escape characters. If unsure whether to use `-n` and `-L`, use the default parameters for "strict" mode backup. For an explanation of escape characters, please refer to the [official documentation](../../sql-manual/escape-characters/). +6. If a backup file already exists in the directory specified by the `-o` parameter, to prevent data from being overwritten, taosdump will report an error and exit. Please replace it with another empty directory or clear the original data before backing up. +7. Currently, taosdump does not support data breakpoint backup function. Once the data backup is interrupted, it needs to be started from scratch. + If the backup takes a long time, it is recommended to use the (-S -E options) method to specify the start/end time for segmented backup. :::tip @@ -42,7 +40,8 @@ There are two ways to install taosdump: ### taosdump Restore Data -Restore data files from a specified path: use the `-i` parameter along with the data file path. As mentioned earlier, the same directory should not be used to back up different data sets, nor should the same path be used to back up the same data set multiple times, otherwise, the backup data will cause overwriting or multiple backups. +- Restore data files from a specified path: use the `-i` parameter along with the data file path. As mentioned earlier, the same directory should not be used to back up different data sets, nor should the same path be used to back up the same data set multiple times, otherwise, the backup data will cause overwriting or multiple backups. +- taosdump supports data recovery to a new database name with the parameter `-W`, please refer to the command line parameter description for details. :::tip taosdump internally uses the TDengine stmt binding API to write restored data, currently using 16384 as a batch for writing. If there are many columns in the backup data, it may cause a "WAL size exceeds limit" error, in which case you can try adjusting the `-B` parameter to a smaller value. @@ -105,6 +104,13 @@ Usage: taosdump [OPTION...] dbname [tbname ...] the table name.(Version 2.5.3) -T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is 8. + -W, --rename=RENAME-LIST Rename database name with new name during + importing data. RENAME-LIST: + "db1=newDB1|db2=newDB2" means rename db1 to newDB1 + and rename db2 to newDB2 (Version 2.5.4) + -k, --retry-count=VALUE Set the number of retry attempts for connection or + query failures + -z, --retry-sleep-ms=VALUE retry interval sleep time, unit ms -C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service -R, --restful Use RESTful interface to connect TDengine -t, --timeout=SECONDS The timeout seconds for websocket to interact. @@ -112,10 +118,6 @@ Usage: taosdump [OPTION...] dbname [tbname ...] -?, --help Give this help list --usage Give a short usage message -V, --version Print program version - -W, --rename=RENAME-LIST Rename database name with new name during - importing data. RENAME-LIST: - "db1=newDB1|db2=newDB2" means rename db1 to newDB1 - and rename db2 to newDB2 (Version 2.5.4) Mandatory or optional arguments to long options are also mandatory or optional for any corresponding short options. diff --git a/docs/en/14-reference/02-tools/10-taosbenchmark.md b/docs/en/14-reference/02-tools/10-taosbenchmark.md index 09227f210b..d1a18b5d1c 100644 --- a/docs/en/14-reference/02-tools/10-taosbenchmark.md +++ b/docs/en/14-reference/02-tools/10-taosbenchmark.md @@ -4,35 +4,38 @@ sidebar_label: taosBenchmark slug: /tdengine-reference/tools/taosbenchmark --- -taosBenchmark (formerly known as taosdemo) is a tool for testing the performance of the TDengine product. taosBenchmark can test the performance of TDengine's insert, query, and subscription functions. It can simulate massive data generated by a large number of devices and flexibly control the number of databases, supertables, types and number of tag columns, types and number of data columns, number of subtables, data volume per subtable, data insertion interval, number of working threads in taosBenchmark, whether and how to insert out-of-order data, etc. To accommodate the usage habits of past users, the installation package provides taosdemo as a soft link to taosBenchmark. +TaosBenchmark is a performance benchmarking tool for TDengine products, providing insertion, query, and subscription performance testing for TDengine products, and outputting performance indicators. ## Installation -There are two ways to install taosBenchmark: +taosBenchmark provides two installation methods: -- taosBenchmark is automatically installed with the official TDengine installation package, for details please refer to [TDengine Installation](../../../get-started/). +- taosBenchmark is the default installation component in the TDengine installation package, which can be used after installing TDengine. For how to install TDengine, please refer to [TDengine Installation](../../../get started/) -- Compile and install taos-tools separately, for details please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository. +- Compile and install taos tools separately, refer to [taos tools](https://github.com/taosdata/taos-tools) . ## Operation ### Configuration and Operation Methods -taosBenchmark needs to be executed in the operating system's terminal, and this tool supports two configuration methods: Command Line Arguments and JSON Configuration File. These two methods are mutually exclusive; when using a configuration file, only one command line argument `-f ` can be used to specify the configuration file. When using command line arguments to run taosBenchmark and control its behavior, the `-f` parameter cannot be used; instead, other parameters must be used for configuration. In addition, taosBenchmark also offers a special mode of operation, which is running without any parameters. - -taosBenchmark supports comprehensive performance testing for TDengine, and the TDengine features it supports are divided into three categories: writing, querying, and subscribing. These three functions are mutually exclusive, and each run of taosBenchmark can only select one of them. It is important to note that the type of function to be tested is not configurable when using the command line configuration method; the command line configuration method can only test writing performance. To test TDengine's query and subscription performance, you must use the configuration file method and specify the type of function to be tested through the `filetype` parameter in the configuration file. +taosBbenchmark supports three operating modes: +- No parameter mode +- Command line mode +- JSON configuration file mode +The command-line approach is a subset of the functionality of JSON configuration files, which immediately uses the command line and then the configuration file, with the parameters specified by the command line taking precedence. **Ensure that the TDengine cluster is running correctly before running taosBenchmark.** ### Running Without Command Line Arguments -Execute the following command to quickly experience taosBenchmark performing a write performance test on TDengine based on the default configuration. +Execute the following command to quickly experience taosBenchmark performing a write performance test on TDengine based on the default configuration. ```shell taosBenchmark ``` -When running without parameters, taosBenchmark by default connects to the TDengine cluster specified under `/etc/taos`, and creates a database named `test` in TDengine, under which a supertable named `meters` is created, and 10,000 tables are created under the supertable, each table having 10,000 records inserted. Note that if a `test` database already exists, this command will delete the existing database and create a new `test` database. +When running without parameters, taosBenchmark defaults to connecting to the TDengine cluster specified in `/etc/taos/taos.cfg `. +After successful connection, a smart meter example database test, super meters, and 10000 sub meters will be created, with 10000 records per sub meter. If the test database already exists, it will be deleted before creating a new one. ### Running Using Command Line Configuration Parameters @@ -46,9 +49,7 @@ The above command `taosBenchmark` will create a database named `test`, establish ### Running Using a Configuration File -The taosBenchmark installation package includes examples of configuration files, located in `/examples/taosbenchmark-json` - -Use the following command line to run taosBenchmark and control its behavior through a configuration file. +Running in configuration file mode provides all functions, so parameters can be configured to run in the configuration file. ```shell taosBenchmark -f @@ -214,6 +215,61 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) - **-?/--help**: Displays help information and exits. Cannot be used with other parameters. + +## Output performance indicators + +#### Write indicators + +After writing is completed, a summary performance metric will be output in the last two lines in the following format: +``` bash +SUCC: Spent 8.527298 (real 8.117379) seconds to insert rows: 10000000 with 8 thread(s) into test 1172704.41 (real 1231924.74) records/second +SUCC: insert delay, min: 19.6780ms, avg: 64.9390ms, p90: 94.6900ms, p95: 105.1870ms, p99: 130.6660ms, max: 157.0830ms +``` +First line write speed statistics: +- Spent: Total write time, in seconds, counting from the start of writing the first data to the end of the last data. This indicates that a total of 8.527298 seconds were spent +- Real: Total write time (calling the engine), excluding the time spent preparing data for the testing framework. Purely counting the time spent on engine calls, The time spent is 8.117379 seconds. If 8.527298-8.117379=0.409919 seconds, it is the time spent preparing data for the testing framework +- Rows: Write the total number of rows, which is 10 million pieces of data +- Threads: The number of threads being written, which is 8 threads writing simultaneously +- Records/second write speed = `total write time` / `total number of rows written`, real in parentheses is the same as before, indicating pure engine write speed + +Second line single write delay statistics: +- min: Write minimum delay +- avg: Write normal delay +- p90: Write delay p90 percentile delay number +- p95: Write delay p95 percentile delay number +- p99: Write delay p99 percentile delay number +- max: maximum write delay +Through this series of indicators, the distribution of write request latency can be observed + +#### Query indicators +The query performance test mainly outputs the QPS indicator of query request speed, and the output format is as follows: + +``` bash +complete query with 3 threads and 10000 query delay avg: 0.002686s min: 0.001182s max: 0.012189s p90: 0.002977s p95: 0.003493s p99: 0.004645s SQL command: select ... +INFO: Total specified queries: 30000 +INFO: Spend 26.9530 second completed total queries: 30000, the QPS of all threads: 1113.049 +``` + +- The first line represents the percentile distribution of query execution and query request delay for each of the three threads executing 10000 queries. The SQL command is the test query statement +- The second line indicates that a total of 10000 * 3 = 30000 queries have been completed +- The third line indicates that the total query time is 26.9653 seconds, and the query rate per second (QPS) is 1113.049 times/second + +#### Subscription metrics + +The subscription performance test mainly outputs consumer consumption speed indicators, with the following output format: +``` bash +INFO: consumer id 0 has poll total msgs: 376, period rate: 37.592 msgs/s, total rows: 3760000, period rate: 375924.815 rows/s +INFO: consumer id 1 has poll total msgs: 362, period rate: 36.131 msgs/s, total rows: 3620000, period rate: 361313.504 rows/s +INFO: consumer id 2 has poll total msgs: 364, period rate: 36.378 msgs/s, total rows: 3640000, period rate: 363781.731 rows/s +INFO: consumerId: 0, consume msgs: 1000, consume rows: 10000000 +INFO: consumerId: 1, consume msgs: 1000, consume rows: 10000000 +INFO: consumerId: 2, consume msgs: 1000, consume rows: 10000000 +INFO: Consumed total msgs: 3000, total rows: 30000000 +``` +- Lines 1 to 3 real-time output of the current consumption speed of each consumer, msgs/s represents the number of consumption messages, each message contains multiple rows of data, and rows/s represents the consumption speed calculated by rows +- Lines 4 to 6 show the overall statistics of each consumer after the test is completed, including the total number of messages consumed and the total number of lines +- The overall statistics of all consumers in line 7, `msgs` represents how many messages were consumed in total, `rows` represents how many rows of data were consumed in total + ## Configuration File Parameters Detailed Explanation ### General Configuration Parameters @@ -331,21 +387,6 @@ Parameters related to supertable creation are configured in the `super_tables` s - **repeat_ts_max** : Numeric type, when composite primary key is enabled, specifies the maximum number of records with the same timestamp to be generated - **sqls** : Array of strings type, specifies the array of sql to be executed after the supertable is successfully created, the table name specified in sql must be prefixed with the database name, otherwise an unspecified database error will occur -#### tsma Configuration Parameters - -Specify the configuration parameters for tsma in `super_tables` under `tsmas`, with the following specific parameters: - -- **name**: Specifies the name of the tsma, mandatory. - -- **function**: Specifies the function of the tsma, mandatory. - -- **interval**: Specifies the time interval for the tsma, mandatory. - -- **sliding**: Specifies the window time shift for the tsma, mandatory. - -- **custom**: Specifies custom configuration appended at the end of the tsma creation statement, optional. - -- **start_when_inserted**: Specifies when to create the tsma after how many rows are inserted, optional, default is 0. #### Tag and Data Column Configuration Parameters @@ -423,6 +464,11 @@ For other common parameters, see Common Configuration Parameters. Configuration parameters for querying specified tables (can specify supertables, subtables, or regular tables) are set in `specified_table_query`. +- **mixed_query** "yes": `Mixed Query` "no": `Normal Query`, default is "no" +`Mixed Query`: All SQL statements in `sqls` are grouped by the number of threads, with each thread executing one group. Each SQL statement in a thread needs to perform `query_times` queries. +`Normal Query `: Each SQL in `sqls` starts `threads` and exits after executing `query_times` times. The next SQL can only be executed after all previous SQL threads have finished executing and exited. +Regardless of whether it is a `Normal Query` or `Mixed Query`, the total number of query executions is the same. The total number of queries = `sqls` * `threads` * `query_times`. The difference is that `Normal Query` starts `threads` for each SQL query, while ` Mixed Query` only starts `threads` once to complete all SQL queries. The number of thread startups for the two is different. + - **query_interval** : Query interval, in seconds, default is 0. - **threads** : Number of threads executing the SQL query, default is 1. @@ -433,7 +479,8 @@ Configuration parameters for querying specified tables (can specify supertables, #### Configuration Parameters for Querying Supertables -Configuration parameters for querying supertables are set in `super_table_query`. +Configuration parameters for querying supertables are set in `super_table_query`. +The thread mode of the super table query is the same as the `Normal Query` mode of the specified query statement described above, except that `sqls` is filled all sub tables. - **stblname** : The name of the supertable to query, required. diff --git a/docs/en/14-reference/03-taos-sql/10-function.md b/docs/en/14-reference/03-taos-sql/10-function.md index e6cfa20bd4..ab5c48bce2 100644 --- a/docs/en/14-reference/03-taos-sql/10-function.md +++ b/docs/en/14-reference/03-taos-sql/10-function.md @@ -943,6 +943,7 @@ CHAR(expr1 [, expr2] [, expr3] ...) - NULL values in input parameters will be skipped. - If the input parameters are of string type, they will be converted to numeric type for processing. - If the character corresponding to the input parameter is a non-printable character, the return value will still contain the character corresponding to that parameter, but it may not be displayed. +- This function can have at most 2^31 - 1 input parameters. **Examples**: diff --git a/docs/en/14-reference/03-taos-sql/12-distinguished.md b/docs/en/14-reference/03-taos-sql/12-distinguished.md index 4c09f140a1..e98b654be3 100644 --- a/docs/en/14-reference/03-taos-sql/12-distinguished.md +++ b/docs/en/14-reference/03-taos-sql/12-distinguished.md @@ -148,6 +148,7 @@ When using time windows, note: - The window width of the aggregation period is specified by the keyword INTERVAL, with the shortest interval being 10 milliseconds (10a); it also supports an offset (the offset must be less than the interval), which is the offset of the time window division compared to "UTC moment 0". The SLIDING statement is used to specify the forward increment of the aggregation period, i.e., the duration of each window slide forward. - When using the INTERVAL statement, unless in very special cases, it is required to configure the timezone parameter in the taos.cfg configuration files of both the client and server to the same value to avoid frequent cross-time zone conversions by time processing functions, which can cause severe performance impacts. - The returned results have a strictly monotonically increasing time-series. +- When using AUTO as the window offset, if the WHERE time condition is complex, such as multiple AND/OR/IN combinations, AUTO may not take effect. In such cases, you can manually specify the window offset to resolve the issue. - When using AUTO as the window offset, if the window width unit is d (day), n (month), w (week), y (year), such as: INTERVAL(1d, AUTO), INTERVAL(3w, AUTO), the TSMA optimization cannot take effect. If TSMA is manually created on the target table, the statement will report an error and exit; in this case, you can explicitly specify the Hint SKIP_TSMA or not use AUTO as the window offset. ### State Window diff --git a/docs/en/14-reference/03-taos-sql/24-show.md b/docs/en/14-reference/03-taos-sql/24-show.md index 36c20df0b4..b46fb41fa0 100644 --- a/docs/en/14-reference/03-taos-sql/24-show.md +++ b/docs/en/14-reference/03-taos-sql/24-show.md @@ -304,9 +304,10 @@ Displays information about all topics in the current database. ```sql SHOW TRANSACTIONS; +SHOW TRANSACTION [tranaction_id]; ``` -Displays information about transactions currently being executed in the system (these transactions are only for metadata level, not for regular tables). +Displays information about one of or all transaction(s) currently being executed in the system (these transactions are only for metadata level, not for regular tables). ## SHOW USERS diff --git a/docs/en/26-tdinternal/01-arch.md b/docs/en/26-tdinternal/01-arch.md index 55c56a7681..ef689e0b74 100644 --- a/docs/en/26-tdinternal/01-arch.md +++ b/docs/en/26-tdinternal/01-arch.md @@ -328,8 +328,35 @@ In addition to precomputation, TDengine also supports various downsampling stora ### Multi-Level Storage and Object Storage -By default, TDengine stores all data in the /var/lib/taos directory. To expand storage capacity, reduce potential bottlenecks caused by file reading, and enhance data throughput, TDengine allows the use of the configuration parameter `dataDir` to enable the cluster to utilize multiple mounted hard drives simultaneously. +By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter "dataDir" to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter "dataDir". + +dataDir format is as follows: + +``` +dataDir data_path [tier_level] [primary] [disable_create_new_file] +``` + +Where `data_path` is the folder path of mount point, and `tier_level` is the media storage-tier. The higher the media storage-tier, means the older the data file. Multiple hard disks can be mounted at the same storage-tier, and data files on the same storage-tier are distributed on all hard disks within the tier. TDengine supports up to 3 tiers of storage, so tier_level values are 0, 1, and 2. When configuring dataDir, there must be only one mount path without specifying tier_level, which is called special mount disk (path). The mount path defaults to level 0 storage media and contains special file links, which cannot be removed, otherwise it will have a devastating impact on the written data. And `primary` means whether the data dir is the primary mount point. Enter 0 for false or 1 for true. The default value is 1. A TDengine cluster can have only one `primary` mount point, which must be on tier 0. And `disable_create_new_file` means whether to prohibit the creation of new file sets on the specified mount point. Enter 0 for false and 1 for true. The default value is 0. Tier 0 storage must have at least one mount point with disable_create_new_file set to 0. Tier 1 and tier 2 storage do not have this restriction. + +Suppose there is a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, ..., /mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows: + +``` +dataDir /mnt/disk1/taos 0 1 0 +dataDir /mnt/disk2/taos 0 0 0 +dataDir /mnt/disk3/taos 1 0 0 +dataDir /mnt/disk4/taos 1 0 1 +dataDir /mnt/disk5/taos 2 0 0 +dataDir /mnt/disk6/taos 2 0 0 +``` + +Mounted disks can also be a non-local network disk, as long as the system can access it. + +You can use the following command to dynamically modify dataDir to control whether disable_create_new_file is enabled for the current directory. + +``` +alter dnode 1 "/mnt/disk2/taos 1"; +``` + +Note: Tiered Storage is only supported in Enterprise Edition -Additionally, TDengine offers tiered data storage functionality, allowing users to store data from different time periods in directories on different storage devices. This facilitates the separation of "hot" data (frequently accessed) and "cold" data (less frequently accessed), making full use of various storage resources while saving costs. For example, data that is recently collected and requires frequent access can be stored on high-performance solid-state drives due to their high read performance requirements. Data that exceeds a certain age and has lower query demands can be stored on mechanically driven hard disks, which are relatively cheaper. -To further reduce storage costs, TDengine also supports storing time-series data in object storage systems. Through its innovative design, in most cases, the performance of querying time-series data from object storage systems is close to half that of local disks, and in some scenarios, the performance can even be comparable to local disks. Additionally, TDengine allows users to perform delete and update operations on time-series data stored in object storage. diff --git a/docs/zh/06-advanced/05-data-in/07-mqtt.md b/docs/zh/06-advanced/05-data-in/07-mqtt.mdx similarity index 98% rename from docs/zh/06-advanced/05-data-in/07-mqtt.md rename to docs/zh/06-advanced/05-data-in/07-mqtt.mdx index a0e121f632..3ffab4dfbf 100644 --- a/docs/zh/06-advanced/05-data-in/07-mqtt.md +++ b/docs/zh/06-advanced/05-data-in/07-mqtt.mdx @@ -166,6 +166,12 @@ json 数据支持 JSONObject 或者 JSONArray,使用 json 解析器可以解 ![mqtt-14](./mqtt-14.png) -### 8. 创建完成 +### 8. 异常处理策略 + +import Contributing from './_03-exception-handling-strategy.mdx' + + + +### 9. 创建完成 点击 **提交** 按钮,完成创建 MQTT 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。 diff --git a/docs/zh/06-advanced/05-data-in/08-kafka.md b/docs/zh/06-advanced/05-data-in/08-kafka.mdx similarity index 97% rename from docs/zh/06-advanced/05-data-in/08-kafka.md rename to docs/zh/06-advanced/05-data-in/08-kafka.mdx index b605f84c7a..71070b271c 100644 --- a/docs/zh/06-advanced/05-data-in/08-kafka.md +++ b/docs/zh/06-advanced/05-data-in/08-kafka.mdx @@ -196,12 +196,16 @@ json 数据支持 JSONObject 或者 JSONArray,使用 json 解析器可以解 ### 8. 配置高级选项 -**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示: +import AdvancedOptions from './_02-advanced_options.mdx' -![kafka-15.png](./kafka-15.png) + -![kafka-16.png](./kafka-16.png) +### 9. 异常处理策略 -### 9. 创建完成 +import Contributing from './_03-exception-handling-strategy.mdx' + + + +### 10. 创建完成 点击 **提交** 按钮,完成创建 Kafka 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。 diff --git a/docs/zh/06-advanced/05-data-in/09-influxdb.md b/docs/zh/06-advanced/05-data-in/09-influxdb.mdx similarity index 94% rename from docs/zh/06-advanced/05-data-in/09-influxdb.md rename to docs/zh/06-advanced/05-data-in/09-influxdb.mdx index d0b781667d..b88bcdf3c6 100644 --- a/docs/zh/06-advanced/05-data-in/09-influxdb.md +++ b/docs/zh/06-advanced/05-data-in/09-influxdb.mdx @@ -75,9 +75,9 @@ InfluxDB 是一种流行的开源时间序列数据库,它针对处理大量 ### 6. 配置高级选项 -**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示: -![InfluxDB-09zh-AdvancedOptionsExpandButton.png](./pic/InfluxDB-09zh-AdvancedOptionsExpandButton.png "高级选项展开按钮") -![InfluxDB-10zh-AdvancedOptionsExpand.png](./pic/InfluxDB-10zh-AdvancedOptionsExpand.png "高级选项展开按钮") +import AdvancedOptions from './_02-advanced_options.mdx' + + ### 7. 创建完成 diff --git a/docs/zh/06-advanced/05-data-in/10-opentsdb.md b/docs/zh/06-advanced/05-data-in/10-opentsdb.mdx similarity index 92% rename from docs/zh/06-advanced/05-data-in/10-opentsdb.md rename to docs/zh/06-advanced/05-data-in/10-opentsdb.mdx index 3737f2a415..eeb4e37988 100644 --- a/docs/zh/06-advanced/05-data-in/10-opentsdb.md +++ b/docs/zh/06-advanced/05-data-in/10-opentsdb.mdx @@ -58,9 +58,9 @@ OpenTSDB 是一个架构在 HBase 系统之上的实时监控信息收集和展 ### 5. 配置高级选项 -**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示: -![OpenTSDB-07zh-AdvancedOptionsExpandButton.png](./pic/OpenTSDB-07zh-AdvancedOptionsExpandButton.png "高级选项展开按钮") -![OpenTSDB-08zh-AdvancedOptionsExpand.png](./pic/OpenTSDB-08zh-AdvancedOptionsExpand.png "高级选项展开按钮") +import AdvancedOptions from './_02-advanced_options.mdx' + + ### 6. 创建完成 diff --git a/docs/zh/06-advanced/05-data-in/11-csv.md b/docs/zh/06-advanced/05-data-in/11-csv.mdx similarity index 95% rename from docs/zh/06-advanced/05-data-in/11-csv.md rename to docs/zh/06-advanced/05-data-in/11-csv.mdx index 4924ed2fbd..5737fc8b79 100644 --- a/docs/zh/06-advanced/05-data-in/11-csv.md +++ b/docs/zh/06-advanced/05-data-in/11-csv.mdx @@ -107,13 +107,25 @@ sidebar_label: "CSV" ![csv-09.png](./csv-09.png) -### 5. 创建完成 +### 5. 配置高级选项 + +import AdvancedOptions from './_02-advanced_options.mdx' + + + +### 6. 异常处理策略 + +import Contributing from './_03-exception-handling-strategy.mdx' + + + +### 7. 创建完成 点击 **提交** 按钮,完成创建 CSV 到 TDengine 的数据同步任务,回到数据写入任务列表页面,可查看任务执行情况,也可以进行任务的“启动/停止”操作与“查看/编辑/删除/复制”操作。 ![csv-10.png](./csv-10.png) -### 6. 查看运行指标 +### 8. 查看运行指标 点击 **查看** 按钮,查看任务的运行指标,同时也可以查看任务中所有文件的处理情况。 diff --git a/docs/zh/06-advanced/05-data-in/12-aveva-historian.md b/docs/zh/06-advanced/05-data-in/12-aveva-historian.mdx similarity index 97% rename from docs/zh/06-advanced/05-data-in/12-aveva-historian.md rename to docs/zh/06-advanced/05-data-in/12-aveva-historian.mdx index ee04194dea..e8ab4c839e 100644 --- a/docs/zh/06-advanced/05-data-in/12-aveva-historian.md +++ b/docs/zh/06-advanced/05-data-in/12-aveva-historian.mdx @@ -134,6 +134,12 @@ split 提取器,seperator 填写分割符 `,`, number 填写 2。 ![aveva-historian-08.png](pic/aveva-historian-08.png) -### 7. 创建完成 +### 7. 异常处理策略 + +import Contributing from './_03-exception-handling-strategy.mdx' + + + +### 8. 创建完成 点击 **提交** 按钮,完成创建任务。提交任务后,回到**数据写入**页面可以查看任务状态。 diff --git a/docs/zh/06-advanced/05-data-in/13-mysql.md b/docs/zh/06-advanced/05-data-in/13-mysql.mdx similarity index 93% rename from docs/zh/06-advanced/05-data-in/13-mysql.md rename to docs/zh/06-advanced/05-data-in/13-mysql.mdx index 4cc84fbfa2..f1894190cb 100644 --- a/docs/zh/06-advanced/05-data-in/13-mysql.md +++ b/docs/zh/06-advanced/05-data-in/13-mysql.mdx @@ -98,14 +98,16 @@ MySQL 是最流行的关系型数据库之一。很多系统都曾经或正在 ### 8. 配置高级选项 -**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示: +import AdvancedOptions from './_02-advanced_options.mdx' -**最大读取并发数** 数据源连接数或读取线程数限制,当默认参数不满足需要或需要调整资源使用量时修改此参数。 + -**批次大小** 单次发送的最大消息数或行数。默认是 10000。 +### 9. 异常处理策略 -![mysql-07.png](pic/mysql-07.png) +import Contributing from './_03-exception-handling-strategy.mdx' -### 9. 创建完成 + + +### 10. 创建完成 点击 **提交** 按钮,完成创建 MySQL 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。 diff --git a/docs/zh/06-advanced/05-data-in/14-postgres.md b/docs/zh/06-advanced/05-data-in/14-postgres.mdx similarity index 93% rename from docs/zh/06-advanced/05-data-in/14-postgres.md rename to docs/zh/06-advanced/05-data-in/14-postgres.mdx index af8297bfff..7651db68f2 100644 --- a/docs/zh/06-advanced/05-data-in/14-postgres.md +++ b/docs/zh/06-advanced/05-data-in/14-postgres.mdx @@ -99,14 +99,16 @@ TDengine 可以高效地从 PostgreSQL 读取数据并将其写入 TDengine, ### 8. 配置高级选项 -**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示: +import AdvancedOptions from './_02-advanced_options.mdx' -**最大读取并发数** 数据源连接数或读取线程数限制,当默认参数不满足需要或需要调整资源使用量时修改此参数。 + -**批次大小** 单次发送的最大消息数或行数。默认是 10000。 +### 9. 异常处理策略 -![postgres-07.png](pic/postgres-07.png) +import Contributing from './_03-exception-handling-strategy.mdx' -### 9. 创建完成 + + +### 10. 创建完成 点击 **提交** 按钮,完成创建 PostgreSQL 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。 diff --git a/docs/zh/06-advanced/05-data-in/15-oracle.md b/docs/zh/06-advanced/05-data-in/15-oracle.mdx similarity index 93% rename from docs/zh/06-advanced/05-data-in/15-oracle.md rename to docs/zh/06-advanced/05-data-in/15-oracle.mdx index 39bbab32d3..484365415e 100644 --- a/docs/zh/06-advanced/05-data-in/15-oracle.md +++ b/docs/zh/06-advanced/05-data-in/15-oracle.mdx @@ -91,14 +91,16 @@ TDengine 可以高效地从 Oracle 读取数据并将其写入 TDengine,以实 ### 7. 配置高级选项 -**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示: +import AdvancedOptions from './_02-advanced_options.mdx' -**最大读取并发数** 数据源连接数或读取线程数限制,当默认参数不满足需要或需要调整资源使用量时修改此参数。 + -**批次大小** 单次发送的最大消息数或行数。默认是 10000。 +### 8. 异常处理策略 -![oracle-06.png](pic/oracle-06.png) +import Contributing from './_03-exception-handling-strategy.mdx' -### 8. 创建完成 + + +### 9. 创建完成 点击 **提交** 按钮,完成创建 Oracle 到 TDengine 的数据同步任务,回到**数据源列表****页面可查看任务执行情况。 diff --git a/docs/zh/06-advanced/05-data-in/16-mssql.md b/docs/zh/06-advanced/05-data-in/16-mssql.mdx similarity index 94% rename from docs/zh/06-advanced/05-data-in/16-mssql.md rename to docs/zh/06-advanced/05-data-in/16-mssql.mdx index 81e9e98013..1e6b9928be 100644 --- a/docs/zh/06-advanced/05-data-in/16-mssql.md +++ b/docs/zh/06-advanced/05-data-in/16-mssql.mdx @@ -105,14 +105,16 @@ Microsoft SQL Server 是最流行的关系型数据库之一。很多系统都 ### 8. 配置高级选项 -**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示: +import AdvancedOptions from './_02-advanced_options.mdx' -**最大读取并发数** 数据源连接数或读取线程数限制,当默认参数不满足需要或需要调整资源使用量时修改此参数。 + -**批次大小** 单次发送的最大消息数或行数。默认是 10000。 +### 9. 异常处理策略 -![mssql-07.png](pic/mssql-07.png) +import Contributing from './_03-exception-handling-strategy.mdx' -### 9. 创建完成 + + +### 10. 创建完成 点击 **提交** 按钮,完成创建 Microsoft SQL Server 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。 diff --git a/docs/zh/06-advanced/05-data-in/17-mongodb.md b/docs/zh/06-advanced/05-data-in/17-mongodb.mdx similarity index 94% rename from docs/zh/06-advanced/05-data-in/17-mongodb.md rename to docs/zh/06-advanced/05-data-in/17-mongodb.mdx index 5311bc43c6..e92f37a6f0 100644 --- a/docs/zh/06-advanced/05-data-in/17-mongodb.md +++ b/docs/zh/06-advanced/05-data-in/17-mongodb.mdx @@ -122,14 +122,16 @@ MongoDB 是一个介于关系型数据库与非关系型数据库之间的产品 ### 8. 配置高级选项 -**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示: +import AdvancedOptions from './_02-advanced_options.mdx' -**最大读取并发数** 数据源连接数或读取线程数限制,当默认参数不满足需要或需要调整资源使用量时修改此参数。 + -**批次大小** 单次发送的最大消息数或行数。默认是 10000。 +### 9. 异常处理策略 -![mongodb-07.png](pic/mongodb-07.png) +import Contributing from './_03-exception-handling-strategy.mdx' -### 9. 创建完成 + + +### 10. 创建完成 点击 **提交** 按钮,完成创建 MongoDB 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。 diff --git a/docs/zh/06-advanced/05-data-in/_02-advanced_options.mdx b/docs/zh/06-advanced/05-data-in/_02-advanced_options.mdx new file mode 100644 index 0000000000..f37de063c0 --- /dev/null +++ b/docs/zh/06-advanced/05-data-in/_02-advanced_options.mdx @@ -0,0 +1,7 @@ +**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示: + +**最大读取并发数** 数据源连接数或读取线程数限制,当默认参数不满足需要或需要调整资源使用量时修改此参数。 + +**批次大小** 单次发送的最大消息数或行数。默认是 10000。 + +![advanced_options.png](pic/advanced_options.png) \ No newline at end of file diff --git a/docs/zh/06-advanced/05-data-in/_03-exception-handling-strategy.mdx b/docs/zh/06-advanced/05-data-in/_03-exception-handling-strategy.mdx new file mode 100644 index 0000000000..470c304ff3 --- /dev/null +++ b/docs/zh/06-advanced/05-data-in/_03-exception-handling-strategy.mdx @@ -0,0 +1,23 @@ +异常处理策略区域是对数据异常时的处理策略进行配置,默认折叠的,点击右侧 `>` 可以展开,如下图所示: + +![exception-handling-strategy.png](pic/exception-handling-strategy.png) + +各异常项说明及相应可选处理策略如下: + +> 通用处理策略说明: +> 归档:将异常数据写入归档文件(默认路径为 `${data_dir}/tasks/_id/.datetime`),不写入目标库 +> 丢弃:将异常数据忽略,不写入目标库 +> 报错:任务报错 + +- **主键时间戳溢出** 检查数据中第一列时间戳是否在正确的时间范围内(now - keep1, now + 100y),可选处理策略:归档、丢弃、报错 +- **主键时间戳空** 检查数据中第一列时间戳是否为空,可选处理策略:归档、丢弃、报错、使用当前时间 + > 使用当前时间:使用当前时间填充到空的时间戳字段中 +- **表名长度溢出** 检查子表表名的长度是否超出限制(最大 192 字符),可选处理策略:归档、丢弃、报错、截断、截断且归档 + > 截断:截取原始表名的前 192 个字符作为新的表名 + > 截断且归档:截取原始表名的前 192 个字符作为新的表名,并且将此行记录写入归档文件 +- **表名非法字符** 检查子表表名中是否包含特殊字符(符号 `.` 等),可选处理策略:归档、丢弃、报错、非法字符替换为指定字符串 + > 非法字符替换为指定字符串:将原始表名中的特殊字符替换为后方输入框中的指定字符串,例如 `a.b` 替换为 `a_b` +- **表名模板变量空值** 检查子表表名模板中的变量是否为空,可选处理策略:丢弃、留空、变量替换为指定字符串 + > 留空:变量位置不做任何特殊处理,例如 `a_{x}` 转换为 `a_` + > 变量替换为指定字符串:变量位置使用后方输入框中的指定字符串,例如 `a_{x}` 转换为 `a_b` +- **列名长度溢出** 检查列名的长度是否超出限制(最大 64 字符),可选处理策略:归档、丢弃、报错 \ No newline at end of file diff --git a/docs/zh/06-advanced/05-data-in/kafka-15.png b/docs/zh/06-advanced/05-data-in/kafka-15.png deleted file mode 100644 index 96d593dad9..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/kafka-15.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/kafka-16.png b/docs/zh/06-advanced/05-data-in/kafka-16.png deleted file mode 100644 index 395453c410..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/kafka-16.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/pic/InfluxDB-09zh-AdvancedOptionsExpandButton.png b/docs/zh/06-advanced/05-data-in/pic/InfluxDB-09zh-AdvancedOptionsExpandButton.png deleted file mode 100644 index f12692c506..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/pic/InfluxDB-09zh-AdvancedOptionsExpandButton.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/pic/InfluxDB-10zh-AdvancedOptionsExpand.png b/docs/zh/06-advanced/05-data-in/pic/InfluxDB-10zh-AdvancedOptionsExpand.png deleted file mode 100644 index dbb188852c..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/pic/InfluxDB-10zh-AdvancedOptionsExpand.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-07zh-AdvancedOptionsExpandButton.png b/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-07zh-AdvancedOptionsExpandButton.png deleted file mode 100644 index 65d6344e56..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-07zh-AdvancedOptionsExpandButton.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-08zh-AdvancedOptionsExpand.png b/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-08zh-AdvancedOptionsExpand.png deleted file mode 100644 index ea5dc538e5..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-08zh-AdvancedOptionsExpand.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/pic/advanced_options.png b/docs/zh/06-advanced/05-data-in/pic/advanced_options.png new file mode 100644 index 0000000000..8ef9b8d35a Binary files /dev/null and b/docs/zh/06-advanced/05-data-in/pic/advanced_options.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/exception-handling-strategy.png b/docs/zh/06-advanced/05-data-in/pic/exception-handling-strategy.png new file mode 100644 index 0000000000..1e1d55d85c Binary files /dev/null and b/docs/zh/06-advanced/05-data-in/pic/exception-handling-strategy.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/mongodb-07.png b/docs/zh/06-advanced/05-data-in/pic/mongodb-07.png deleted file mode 100644 index 2305ec3d2e..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/pic/mongodb-07.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/pic/mssql-07.png b/docs/zh/06-advanced/05-data-in/pic/mssql-07.png deleted file mode 100644 index 6c1668481c..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/pic/mssql-07.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/pic/mysql-07.png b/docs/zh/06-advanced/05-data-in/pic/mysql-07.png deleted file mode 100644 index 6c1668481c..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/pic/mysql-07.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/pic/oracle-06.png b/docs/zh/06-advanced/05-data-in/pic/oracle-06.png deleted file mode 100644 index 0de5443f08..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/pic/oracle-06.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/pic/postgres-07.png b/docs/zh/06-advanced/05-data-in/pic/postgres-07.png deleted file mode 100644 index 6c1668481c..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/pic/postgres-07.png and /dev/null differ diff --git a/docs/zh/06-advanced/06-TDgpt/02-management.md b/docs/zh/06-advanced/06-TDgpt/02-management.md index b37c39944f..d977e25dc3 100644 --- a/docs/zh/06-advanced/06-TDgpt/02-management.md +++ b/docs/zh/06-advanced/06-TDgpt/02-management.md @@ -66,7 +66,7 @@ pidfile = /usr/local/taos/taosanode/taosanode.pid # uWSGI log files logto = /var/log/taos/taosanode/taosanode.log -# wWSGI monitor port +# uWSGI monitor port stats = 127.0.0.1:8387 # python virtual environment directory, used by Anode @@ -86,7 +86,7 @@ log-level = DEBUG **提示** 请勿设置 `daemonize` 参数,该参数会导致 uWSGI 与 systemctl 冲突,从而导致 Anode 无法正常启动。 -上面的示例配置文件 `taosanode.ini` 只包含了使用 Anode 提供服务的基础配置参数,对于 uWSGI 的其他配置参数的设置及其说明请参考 [uWSGIS官方文档](https://uwsgi-docs-zh.readthedocs.io/zh-cn/latest/Options.html)。 +上面的示例配置文件 `taosanode.ini` 只包含了使用 Anode 提供服务的基础配置参数,对于 uWSGI 的其他配置参数的设置及其说明请参考 [uWSGI 官方文档](https://uwsgi-docs-zh.readthedocs.io/zh-cn/latest/Options.html)。 Anode 运行配置主要是以下: - app-log: Anode 服务运行产生的日志,用户可以调整其到需要的位置 @@ -110,7 +110,7 @@ SHOW ANODES; taos> show anodes; id | url | status | create_time | update_time | ================================================================================================================== - 1 | 192.168.0.1:6090 | ready | 2024-11-28 18:44:27.089 | 2024-11-28 18:44:27.089 | + 1 | 192.168.0.1:6090 | ready | 2024-11-28 18:44:27.089 | 2024-11-28 18:44:27.089 | Query OK, 1 row(s) in set (0.037205s) ``` diff --git a/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md b/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md index 3981fff8c6..71b97aa996 100644 --- a/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md +++ b/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md @@ -41,7 +41,7 @@ algo=expr1 "} ``` -1. `column_expr`:预测的时序数据列。与异常检测相同,只支持数值类型列输入。 +1. `column_expr`:预测的时序数据列,只支持数值类型列输入。 2. `options`:预测函数的参数。字符串类型,其中使用 K=V 方式调用算法及相关参数。采用逗号分隔的 K=V 字符串表示,其中的字符串不需要使用单引号、双引号、或转义号等符号,不能使用中文及其他宽字符。预测支持 `conf`, `every`, `rows`, `start`, `rows` 几个控制参数,其含义如下: ### 参数说明 diff --git a/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md b/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md index 5395dc374b..841722c6a2 100644 --- a/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md +++ b/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md @@ -99,7 +99,7 @@ def test_myfc(self): s = loader.get_service("myfc") # 设置用于预测分析的数据 - s.set_input_list(self.get_input_list()) + s.set_input_list(self.get_input_list(), None) # 检查预测结果应该全部为 1 r = s.set_params( {"fc_rows": 10, "start_ts": 171000000, "time_step": 86400 * 30, "start_p": 0} diff --git a/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md b/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md index 5a9ac20140..7c85d41c50 100644 --- a/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md +++ b/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md @@ -44,10 +44,10 @@ class _MyAnomalyDetectionService(AbstractAnomalyDetectionService): def set_params(self, params): """该算法无需任何输入参数,直接重载父类该函数,不处理算法参数设置逻辑""" - pass + return super().set_params(params) ``` -将该文件保存在 `./lib/taosanalytics/algo/ad/` 目录下,然后重启 taosanode 服务。在 TDengine 命令行接口 taos 中执行 `SHOW ANODES FULL` 就能够看到新加入的算法,然后应用就可以通过 SQL 语句调用该检测算法。 +将该文件保存在 `./lib/taosanalytics/algo/ad/` 目录下,然后重启 taosanode 服务。在 TDengine 命令行接口 taos 中执行 `SHOW ANODES FULL` 就能够看到新加入的算法,然后就可以通过 SQL 语句调用该算法。 ```SQL --- 对 col 列进行异常检测,通过指定 algo 参数为 myad 来调用新添加的异常检测类 @@ -65,7 +65,7 @@ def test_myad(self): s = loader.get_service("myad") # 设置需要进行检测的输入数据 - s.set_input_list(AnomalyDetectionTest.input_list) + s.set_input_list(AnomalyDetectionTest.input_list, None) r = s.execute() diff --git a/docs/zh/08-operation/04-maintenance.md b/docs/zh/08-operation/04-maintenance.md index 9ef165179d..429542485d 100644 --- a/docs/zh/08-operation/04-maintenance.md +++ b/docs/zh/08-operation/04-maintenance.md @@ -19,7 +19,7 @@ TDengine 面向多种写入场景,而很多写入场景下,TDengine 的存 ```SQL COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY']; COMPACT [db_name.]VGROUPS IN (vgroup_id1, vgroup_id2, ...) [start with 'XXXX'] [end with 'YYYY']; -SHOW COMPACTS [compact_id]; +SHOW COMPACT [compact_id]; KILL COMPACT compact_id; ``` diff --git a/docs/zh/08-operation/05-monitor.md b/docs/zh/08-operation/05-monitor.md index abbd54736b..897f813872 100644 --- a/docs/zh/08-operation/05-monitor.md +++ b/docs/zh/08-operation/05-monitor.md @@ -145,3 +145,47 @@ toasX 的配置文件(默认 /etc/taos/taosx.toml) 中与 monitor 相关的配 #### 限制 只有在以 server 模式运行 taosX 时,与监控相关的配置才生效。 + +## explorer 集成监控面板 + +explorer 支持集成已有的 grafana dashboard。 + +### 配置 grafana + +编辑 grafana.ini, 修改以下配置项。配置 root_url, 可能对现有的 grafana 使用习惯有所影响,为了集成到 explorer 是需要如此配置的, 方便通过 explorer 做服务代理。 + +``` toml +[server] +# If you use reverse proxy and sub path specify full url (with sub path) +root_url = http://ip:3000/grafana +# Serve Grafana from subpath specified in `root_url` setting. By default it is set to `false` for compatibility reasons. +serve_from_sub_path = true + +[security] +# set to true if you want to allow browsers to render Grafana in a ,