diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 0f7a450920..6fb2a69847 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 3588b3d + GIT_TAG e7270c9 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/docs/en/12-taos-sql/10-function.md b/docs/en/12-taos-sql/10-function.md index f74d0dbe5c..ab1d2f900b 100644 --- a/docs/en/12-taos-sql/10-function.md +++ b/docs/en/12-taos-sql/10-function.md @@ -126,7 +126,7 @@ SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause] SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The rounded down value of a specific field +**Description**: The rounded down value of a specific field **More explanations**: The restrictions are same as those of the `CEIL` function. #### LOG @@ -173,7 +173,7 @@ SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause] SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The rounded value of a specific field. +**Description**: The rounded value of a specific field. **More explanations**: The restrictions are same as those of the `CEIL` function. @@ -434,7 +434,7 @@ SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause]; **More explanations**: - You can specify a time zone in the following format: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。 For example, TO_ISO8601(1, "+00:00"). -- If the input is a UNIX timestamp, the precision of the returned value is determined by the digits of the input timestamp +- If the input is a UNIX timestamp, the precision of the returned value is determined by the digits of the input timestamp - If the input is a column of TIMESTAMP type, the precision of the returned value is same as the precision set for the current data base in use @@ -769,14 +769,14 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam **Explanations**: - bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin"。 -- bin_description: parameter to describe how to generate buckets,can be in the following JSON formats for each bin_type respectively: - - "user_input": "[1, 3, 5, 7]": +- bin_description: parameter to describe how to generate buckets,can be in the following JSON formats for each bin_type respectively: + - "user_input": "[1, 3, 5, 7]": User specified bin values. - + - "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}" "start" - bin starting point. "width" - bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated set of bins. The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]. - + - "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}" "start" - bin starting point. "factor" - exponential factor of bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated range of bins. The above "linear_bin" descriptor generates a set of bins: [-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]. @@ -862,9 +862,9 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] RA - `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter. - The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input. -- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified. -- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. -- Interpolation is performed based on `FILL` parameter. +- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified. +- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. +- Interpolation is performed based on `FILL` parameter. - `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable. ### LAST @@ -917,7 +917,7 @@ SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause]; **Return value type**:Same as the data type of the column being operated upon -**Applicable data types**: Numeric, Timestamp +**Applicable data types**: Numeric **Applicable table types**: standard tables and supertables @@ -932,7 +932,7 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause]; **Return value type**:Same as the data type of the column being operated upon -**Applicable data types**: Numeric, Timestamp +**Applicable data types**: Numeric **Applicable table types**: standard tables and supertables @@ -968,7 +968,7 @@ SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause] **Applicable table types**: standard tables and supertables -**More explanations**: +**More explanations**: This function cannot be used in expression calculation. - Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline @@ -1046,10 +1046,10 @@ SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause] **Applicable table types**: standard tables and supertables -**More explanations**: - +**More explanations**: + - Arithmetic operation can't be performed on the result of `csum` function -- Can only be used with aggregate functions This function can be used with supertables and standard tables. +- Can only be used with aggregate functions This function can be used with supertables and standard tables. - Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline @@ -1067,8 +1067,8 @@ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHER **Applicable table types**: standard tables and supertables -**More explanation**: - +**More explanation**: + - It can be used together with `PARTITION BY tbname` against a STable. - It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from。 @@ -1086,7 +1086,7 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER **Applicable table types**: standard tables and supertables -**More explanation**: +**More explanation**: - The number of result rows is the number of rows subtracted by one, no output for the first row - It can be used together with a selected column. For example: select \_rowts, DIFF() from。 @@ -1123,9 +1123,9 @@ SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause] **Applicable table types**: standard tables and supertables -**More explanations**: - -- Arithmetic operation can't be performed on the result of `MAVG`. +**More explanations**: + +- Arithmetic operation can't be performed on the result of `MAVG`. - Can only be used with data columns, can't be used with tags. - Can't be used with aggregate functions. - Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline diff --git a/docs/en/12-taos-sql/20-keywords.md b/docs/en/12-taos-sql/20-keywords.md index d7b9c73c4f..f7eb067c96 100644 --- a/docs/en/12-taos-sql/20-keywords.md +++ b/docs/en/12-taos-sql/20-keywords.md @@ -5,7 +5,9 @@ title: Reserved Keywords ## Keyword List -There are about 200 keywords reserved by TDengine, they can't be used as the name of database, STable or table with either upper case, lower case or mixed case. The following list shows all reserved keywords: +There are more than 200 keywords reserved by TDengine, they can't be used as the name of database, table, STable, subtable, column or tag with either upper case, lower case or mixed case. If you need to use these keywords, use the symbol `` ` `` to enclose the keywords, e.g. \`ADD\`. + +The following list shows all reserved keywords: ### A @@ -14,15 +16,20 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam - ACCOUNTS - ADD - AFTER +- AGGREGATE - ALL - ALTER +- ANALYZE - AND +- APPS - AS - ASC +- AT_ONCE - ATTACH ### B +- BALANCE - BEFORE - BEGIN - BETWEEN @@ -32,19 +39,27 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam - BITNOT - BITOR - BLOCKS +- BNODE +- BNODES - BOOL +- BUFFER +- BUFSIZE - BY ### C - CACHE -- CACHELAST +- CACHEMODEL +- CACHESIZE - CASCADE +- CAST - CHANGE +- CLIENT_VERSION - CLUSTER - COLON - COLUMN - COMMA +- COMMENT - COMP - COMPACT - CONCAT @@ -52,15 +67,18 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam - CONNECTION - CONNECTIONS - CONNS +- CONSUMER +- CONSUMERS +- CONTAINS - COPY +- COUNT - CREATE -- CTIME +- CURRENT_USER ### D - DATABASE - DATABASES -- DAYS - DBS - DEFERRED - DELETE @@ -69,18 +87,23 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam - DESCRIBE - DETACH - DISTINCT +- DISTRIBUTED - DIVIDE - DNODE - DNODES - DOT - DOUBLE - DROP +- DURATION ### E +- EACH +- ENABLE - END -- EQ +- EVERY - EXISTS +- EXPIRED - EXPLAIN ### F @@ -88,18 +111,20 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam - FAIL - FILE - FILL +- FIRST - FLOAT +- FLUSH - FOR - FROM -- FSYNC +- FUNCTION +- FUNCTIONS ### G -- GE - GLOB +- GRANT - GRANTS - GROUP -- GT ### H @@ -110,15 +135,18 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam - ID - IF - IGNORE -- IMMEDIA +- IMMEDIATE - IMPORT - IN -- INITIAL +- INDEX +- INDEXES +- INITIALLY +- INNER - INSERT - INSTEAD - INT - INTEGER -- INTERVA +- INTERVAL - INTO - IS - ISNULL @@ -126,6 +154,7 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam ### J - JOIN +- JSON ### K @@ -135,46 +164,57 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam ### L -- LE +- LAST +- LAST_ROW +- LICENCES - LIKE - LIMIT - LINEAR - LOCAL -- LP -- LSHIFT -- LT ### M - MATCH +- MAX_DELAY - MAXROWS +- MERGE +- META - MINROWS - MINUS +- MNODE - MNODES - MODIFY - MODULES ### N -- NE +- NCHAR +- NEXT +- NMATCH - NONE - NOT - NOTNULL - NOW - NULL +- NULLS ### O - OF - OFFSET +- ON - OR - ORDER +- OUTPUTTYPE ### P -- PARTITION +- PAGES +- PAGESIZE +- PARTITIONS - PASS - PLUS +- PORT - PPS - PRECISION - PREV @@ -182,47 +222,63 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam ### Q +- QNODE +- QNODES - QTIME -- QUERIE +- QUERIES - QUERY -- QUORUM ### R - RAISE -- REM +- RANGE +- RATIO +- READ +- REDISTRIBUTE +- RENAME - REPLACE - REPLICA - RESET -- RESTRIC +- RESTRICT +- RETENTIONS +- REVOKE +- ROLLUP - ROW -- RP -- RSHIFT ### S +- SCHEMALESS - SCORES - SELECT - SEMI +- SERVER_STATUS +- SERVER_VERSION - SESSION - SET - SHOW -- SLASH +- SINGLE_STABLE - SLIDING - SLIMIT -- SMALLIN +- SMA +- SMALLINT +- SNODE +- SNODES - SOFFSET -- STable -- STableS +- SPLIT +- STABLE +- STABLES - STAR - STATE -- STATEMEN -- STATE_WI +- STATE_WINDOW +- STATEMENT - STORAGE - STREAM - STREAMS +- STRICT - STRING +- SUBSCRIPTIONS - SYNCDB +- SYSINFO ### T @@ -233,20 +289,24 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam - TBNAME - TIMES - TIMESTAMP +- TIMEZONE - TINYINT +- TO +- TODAY - TOPIC - TOPICS +- TRANSACTION +- TRANSACTIONS - TRIGGER +- TRIM - TSERIES - TTL ### U -- UMINUS - UNION - UNSIGNED - UPDATE -- UPLUS - USE - USER - USERS @@ -256,8 +316,11 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam - VALUE - VALUES +- VARCHAR - VARIABLE - VARIABLES +- VERBOSE +- VGROUP - VGROUPS - VIEW - VNODES @@ -265,14 +328,25 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam ### W - WAL +- WAL_FSYNC_PERIOD +- WAL_LEVEL +- WAL_RETENTION_PERIOD +- WAL_RETENTION_SIZE +- WAL_ROLL_PERIOD +- WAL_SEGMENT_SIZE +- WATERMARK - WHERE +- WINDOW_CLOSE +- WITH +- WRITE ### \_ - \_C0 -- \_QSTART -- \_QSTOP - \_QDURATION -- \_WSTART -- \_WSTOP +- \_QEND +- \_QSTART +- \_ROWTS - \_WDURATION +- \_WEND +- \_WSTART diff --git a/docs/en/13-operation/10-monitor.md b/docs/en/13-operation/10-monitor.md index a4679983f2..74a5564a2a 100644 --- a/docs/en/13-operation/10-monitor.md +++ b/docs/en/13-operation/10-monitor.md @@ -2,7 +2,7 @@ title: TDengine Monitoring --- -After TDengine is started, a database named `log` is created automatically to help with monitoring. Information that includes CPU, memory and disk usage, bandwidth, number of requests, disk I/O speed, slow queries, is written into the `log` database at a predefined interval. Additionally, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into the `log` database too. A system operator can view the data in `log` database from TDengine CLI or from a web console. +After TDengine is started, it automatically writes monitoring data including CPU, memory and disk usage, bandwidth, number of requests, disk I/O speed, slow queries, into a designated database at a predefined interval through taosKeeper. Additionally, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into the `log` database too. A system operator can view the data in `log` database from TDengine CLI or from a web console. The collection of the monitoring information is enabled by default, but can be disabled by parameter `monitor` in the configuration file. @@ -10,7 +10,7 @@ The collection of the monitoring information is enabled by default, but can be d TDinsight is a complete solution which uses the monitoring database `log` mentioned previously, and Grafana, to monitor a TDengine cluster. -From version 2.3.3.0, more monitoring data has been added in the `log` database. Please refer to [TDinsight Grafana Dashboard](https://grafana.com/grafana/dashboards/15167) to learn more details about using TDinsight to monitor TDengine. +Please refer to [TDinsight Grafana Dashboard](../../reference/tdinsight) to learn more details about using TDinsight to monitor TDengine. A script `TDinsight.sh` is provided to deploy TDinsight automatically. @@ -30,31 +30,14 @@ Prepare: 2. Grafana Alert Notification -There are two ways to setup Grafana alert notification. +You can use below command to setup Grafana alert notification. -- An existing Grafana Notification Channel can be specified with parameter `-E`, the notifier uid of the channel can be obtained by `curl -u admin:admin localhost:3000/api/alert-notifications |jq` +An existing Grafana Notification Channel can be specified with parameter `-E`, the notifier uid of the channel can be obtained by `curl -u admin:admin localhost:3000/api/alert-notifications |jq` ```bash sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E ``` -- The AliCloud SMS alert built in TDengine data source plugin can be enabled with parameter `-s`, the parameters of enabling this plugin are listed below: - - - `-I`: AliCloud SMS Key ID - - `-K`: AliCloud SMS Key Secret - - `-S`: AliCloud SMS Signature - - `-C`: SMS notification template - - `-T`: Input parameters in JSON format for the SMS notification template, for example`{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}` - - `-B`: List of mobile numbers to be notified - - Below is an example of the full command using the AliCloud SMS alert. - - ```bash - sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -s \ - -I XXXXXXX -K XXXXXXXX -S taosdata -C SMS_1111111 -B 18900000000 \ - -T '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}' - ``` - Launch `TDinsight.sh` with the command above and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`. For more use cases and restrictions please refer to [TDinsight](/reference/tdinsight/). diff --git a/docs/en/14-reference/07-tdinsight/assets/15146-tdengine-monitor-dashboard.json b/docs/en/14-reference/07-tdinsight/assets/15146-tdengine-monitor-dashboard.json index 54dc1062d6..f651983528 100644 --- a/docs/en/14-reference/07-tdinsight/assets/15146-tdengine-monitor-dashboard.json +++ b/docs/en/14-reference/07-tdinsight/assets/15146-tdengine-monitor-dashboard.json @@ -211,7 +211,7 @@ ], "timeFrom": null, "timeShift": null, - "title": "Leader MNode", + "title": "Master MNode", "transformations": [ { "id": "filterByValue", @@ -221,7 +221,7 @@ "config": { "id": "regex", "options": { - "value": "leader" + "value": "master" } }, "fieldName": "role" @@ -300,7 +300,7 @@ ], "timeFrom": null, "timeShift": null, - "title": "Leader MNode Create Time", + "title": "Master MNode Create Time", "transformations": [ { "id": "filterByValue", @@ -310,7 +310,7 @@ "config": { "id": "regex", "options": { - "value": "leader" + "value": "master" } }, "fieldName": "role" diff --git a/docs/en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp b/docs/en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp index a78e18028a..3bc0d960f1 100644 Binary files a/docs/en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp and b/docs/en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp differ diff --git a/docs/en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp b/docs/en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp index b152418d09..f5a602d3f9 100644 Binary files a/docs/en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp and b/docs/en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp differ diff --git a/docs/en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp b/docs/en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp index f58f48b7f1..f155fa42a0 100644 Binary files a/docs/en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp and b/docs/en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp differ diff --git a/docs/en/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp b/docs/en/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp index 00afcce013..dc0b85e262 100644 Binary files a/docs/en/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp and b/docs/en/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp differ diff --git a/docs/en/14-reference/07-tdinsight/assets/TDinsight-5-database.webp b/docs/en/14-reference/07-tdinsight/assets/TDinsight-5-database.webp index 567e5694f9..342c8cfc0a 100644 Binary files a/docs/en/14-reference/07-tdinsight/assets/TDinsight-5-database.webp and b/docs/en/14-reference/07-tdinsight/assets/TDinsight-5-database.webp differ diff --git a/docs/en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp b/docs/en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp index 8666193f59..942130d4fa 100644 Binary files a/docs/en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp and b/docs/en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp differ diff --git a/docs/en/14-reference/07-tdinsight/assets/howto-add-datasource.webp b/docs/en/14-reference/07-tdinsight/assets/howto-add-datasource.webp index 06d0ff6ed5..d7fc9e233a 100644 Binary files a/docs/en/14-reference/07-tdinsight/assets/howto-add-datasource.webp and b/docs/en/14-reference/07-tdinsight/assets/howto-add-datasource.webp differ diff --git a/docs/en/14-reference/07-tdinsight/assets/import_dashboard.webp b/docs/en/14-reference/07-tdinsight/assets/import_dashboard.webp index fb7958f1b9..ae2a1e8e9b 100644 Binary files a/docs/en/14-reference/07-tdinsight/assets/import_dashboard.webp and b/docs/en/14-reference/07-tdinsight/assets/import_dashboard.webp differ diff --git a/docs/en/14-reference/07-tdinsight/assets/import_dashboard_view.webp b/docs/en/14-reference/07-tdinsight/assets/import_dashboard_view.webp new file mode 100644 index 0000000000..1b10e41c75 Binary files /dev/null and b/docs/en/14-reference/07-tdinsight/assets/import_dashboard_view.webp differ diff --git a/docs/en/14-reference/07-tdinsight/assets/select_dashboard_db.webp b/docs/en/14-reference/07-tdinsight/assets/select_dashboard_db.webp new file mode 100644 index 0000000000..956132e37e Binary files /dev/null and b/docs/en/14-reference/07-tdinsight/assets/select_dashboard_db.webp differ diff --git a/docs/en/14-reference/07-tdinsight/assets/tdengine-grafana-7.x.json b/docs/en/14-reference/07-tdinsight/assets/tdengine-grafana-7.x.json index 1add8522a7..b4254c428b 100644 --- a/docs/en/14-reference/07-tdinsight/assets/tdengine-grafana-7.x.json +++ b/docs/en/14-reference/07-tdinsight/assets/tdengine-grafana-7.x.json @@ -153,7 +153,7 @@ ], "timeFrom": null, "timeShift": null, - "title": "Leader MNode", + "title": "Master MNode", "transformations": [ { "id": "filterByValue", @@ -163,7 +163,7 @@ "config": { "id": "regex", "options": { - "value": "leader" + "value": "master" } }, "fieldName": "role" @@ -246,7 +246,7 @@ ], "timeFrom": null, "timeShift": null, - "title": "Leader MNode Create Time", + "title": "Master MNode Create Time", "transformations": [ { "id": "filterByValue", @@ -256,7 +256,7 @@ "config": { "id": "regex", "options": { - "value": "leader" + "value": "master" } }, "fieldName": "role" diff --git a/docs/en/14-reference/07-tdinsight/index.md b/docs/en/14-reference/07-tdinsight/index.md index 2e56203525..d03c16a8bc 100644 --- a/docs/en/14-reference/07-tdinsight/index.md +++ b/docs/en/14-reference/07-tdinsight/index.md @@ -5,15 +5,23 @@ sidebar_label: TDinsight TDinsight is a solution for monitoring TDengine using the builtin native monitoring database and [Grafana]. -After TDengine starts, it will automatically create a monitoring database `log`. TDengine will automatically write many metrics in specific intervals into the `log` database. The metrics may include the server's CPU, memory, hard disk space, network bandwidth, number of requests, disk read/write speed, slow queries, other information like important system operations (user login, database creation, database deletion, etc.), and error alarms. With [Grafana] and [TDengine Data Source Plugin](https://github.com/taosdata/grafanaplugin/releases), TDinsight can visualize cluster status, node information, insertion and query requests, resource usage, vnode, dnode, and mnode status, exception alerts and many other metrics. This is very convenient for developers who want to monitor TDengine cluster status in real-time. This article will guide users to install the Grafana server, automatically install the TDengine data source plug-in, and deploy the TDinsight visualization panel using the `TDinsight.sh` installation script. +After TDengine starts, it automatically writes many metrics in specific intervals into a designated database. The metrics may include the server's CPU, memory, hard disk space, network bandwidth, number of requests, disk read/write speed, slow queries, other information like important system operations (user login, database creation, database deletion, etc.), and error alarms. With [Grafana] and [TDengine Data Source Plugin](https://github.com/taosdata/grafanaplugin/releases), TDinsight can visualize cluster status, node information, insertion and query requests, resource usage, vnode, dnode, and mnode status, exception alerts and many other metrics. This is very convenient for developers who want to monitor TDengine cluster status in real-time. This article will guide users to install the Grafana server, automatically install the TDengine data source plug-in, and deploy the TDinsight visualization panel using the `TDinsight.sh` installation script. ## System Requirements -To deploy TDinsight, a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 2.3.3.0 and above, with the `log` database enabled (`monitor = 1`). +To deploy TDinsight, we need +- a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 3.0.1.0 and above, with the monitoring feature enabled. For detailed configuration, please refer to [TDengine monitoring configuration](../config/#monitoring-parameters). +- taosAdapter has been instaleld and running, please refer to [taosAdapter](../taosadapter). +- taosKeeper has been installed and running, please refer to [taosKeeper](../taoskeeper). + +Please record +- The endpoint of taosAdapter REST service, for example `http://tdengine.local:6041` +- Authentication of taosAdapter, e.g. user name and password +- The database name used by taosKeeper to store monitoring data ## Installing Grafana -We recommend using the latest [Grafana] version 7 or 8 here. You can install Grafana on any [supported operating system](https://grafana.com/docs/grafana/latest/installation/requirements/#supported-operating-systems) by following the [official Grafana documentation Instructions](https://grafana.com/docs/grafana/latest/installation/) to install [Grafana]. +We recommend using the latest [Grafana] version 8 or 9 here. You can install Grafana on any [supported operating system](https://grafana.com/docs/grafana/latest/installation/requirements/#supported-operating-systems) by following the [official Grafana documentation Instructions](https://grafana.com/docs/grafana/latest/installation/) to install [Grafana]. ### Installing Grafana on Debian or Ubuntu @@ -71,7 +79,7 @@ chmod +x TDinsight.sh ./TDinsight.sh ``` -This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://grafana.com/grafana/dashboards/15167) with configurable parameters for command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc. With the alert setting options provided by this script, you can also get built-in support for AliCloud SMS alert notifications. +This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsightV3.json) with configurable parameters for command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc. With the alert setting options provided by this script, you can also get built-in support for AliCloud SMS alert notifications. Assume you use TDengine and Grafana's default services on the same host. Run `. /TDinsight.sh` and open the Grafana browser window to see the TDinsight dashboard. @@ -106,18 +114,6 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste -E, --external-notifier Apply external notifier uid to TDinsight dashboard. -Alibaba Cloud SMS as Notifier: --s, --sms-enabled To enable tdengine-datasource plugin builtin Alibaba Cloud SMS webhook. --N, --sms-notifier-name Provisioning notifier name.[default: TDinsight Builtin SMS] --U, --sms-notifier-uid Provisioning notifier uid, use lowercase notifier name by default. --D, --sms-notifier-is-default Set notifier as default. --I, --sms-access-key-id Alibaba Cloud SMS access key id --K, --sms-access-key-secret Alibaba Cloud SMS access key secret --S, --sms-sign-name Sign name --C, --sms-template-code Template code --T, --sms-template-param Template param, a escaped JSON string like '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}' --B, --sms-phone-numbers Comma-separated numbers list, eg "189xxxxxxxx,132xxxxxxxx" --L, --sms-listen-addr [default: 127.0.0.1:9100] ``` Most command-line options can take effect the same as environment variables. @@ -136,17 +132,6 @@ Most command-line options can take effect the same as environment variables. | -t | --tdinsight-title | TDINSIGHT_DASHBOARD_TITLE | TDinsight dashboard title. [Default: TDinsight] | -e | -tdinsight-title | -e | --tdinsight-editable | TDINSIGHT_DASHBOARD_EDITABLE | If the dashboard is configured to be editable. [Default: false] | -e | --external | -E | --external-notifier | EXTERNAL_NOTIFIER | Apply the external notifier uid to the TDinsight dashboard. | -s -| -s | --sms-enabled | SMS_ENABLED | Enable the tdengine-datasource plugin built into Alibaba Cloud SMS webhook. | -s -| -N | --sms-notifier-name | SMS_NOTIFIER_NAME | The name of the provisioning notifier. [Default: `TDinsight Builtin SMS`] | -U -| -U | --sms-notifier-uid | SMS_NOTIFIER_UID | "Notification Channel" `uid`, lowercase of the program name is used by default, other characters are replaced by "-". |-sms -| -D | --sms-notifier-is-default | SMS_NOTIFIER_IS_DEFAULT | Set built-in SMS notification to default value. |-sms-notifier-is-default -| -I | --sms-access-key-id | SMS_ACCESS_KEY_ID | Alibaba Cloud SMS access key id | -| -K | --sms-access-key-secret | SMS_ACCESS_KEY_SECRET | AliCloud SMS-access-secret-key | -| -S | --sms-sign-name | SMS_SIGN_NAME | Signature | -| -C | --sms-template-code | SMS_TEMPLATE_CODE | Template code | -| -T | --sms-template-param | SMS_TEMPLATE_PARAM | JSON template for template parameters | -| -B | --sms-phone-numbers | SMS_PHONE_NUMBERS | A comma-separated list of phone numbers, e.g. `"189xxxxxxxx,132xxxxxxxx"` | -| -L | --sms-listen-addr | SMS_LISTEN_ADDR | Built-in SMS webhook listener address, default is `127.0.0.1:9100` | Suppose you start a TDengine database on host `tdengine` with HTTP API port `6041`, user `root1`, and password `pass5ord`. Execute the script. @@ -166,24 +151,10 @@ Use the `uid` value obtained above as `-E` input. sudo ./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier ``` -If you want to use the [Alibaba Cloud SMS](https://www.aliyun.com/product/sms) service as a notification channel, you should enable it with the `-s` flag add the following parameters. - -- `-N`: Notification Channel name, default is `TDinsight Builtin SMS`. -- `-U`: Channel uid, default is lowercase of `name`, any other character is replaced with -, for the default `-N`, its uid is `tdinsight-builtin-sms`. -- `-I`: Alibaba Cloud SMS access key id. -- `-K`: Alibaba Cloud SMS access secret key. -- `-S`: Alibaba Cloud SMS signature. -- `-C`: Alibaba Cloud SMS template id. -- `-T`: Alibaba Cloud SMS template parameters, for JSON format template, example is as follows `'{"alarm_level":"%s", "time":"%s", "name":"%s", "content":"%s"}'`. There are four parameters: alarm level, time, name and alarm content. -- `-B`: a list of phone numbers, separated by a comma `,`. - If you want to monitor multiple TDengine clusters, you need to set up numerous TDinsight dashboards. Setting up non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and `-N` and `-L` should also be changed if using the built-in SMS alerting feature. ```bash sudo . /TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -i tdinsight-env1 -t 'TDinsight Env1' -# If using built-in SMS notifications -sudo . /TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -i tdinsight-env1 -t 'TDinsight Env1' \ - -s -N 'Env1 SMS' -I xx -K xx -S xx -C SMS_XX -T '' -B 00000000000 -L 127.0.0.01:10611 ``` Please note that the configuration data source, notification channel, and dashboard are not changeable on the front end. You should update the configuration again via this script or manually change the configuration file in the `/etc/grafana/provisioning` directory (this is the default directory for Grafana, use the `-P` option to change it as needed). @@ -249,21 +220,23 @@ Save and test. It will report 'TDengine Data source is working' under normal cir ### Importing dashboards -Point to **+** / **Create** - **import** (or `/dashboard/import` url). +In the page of configuring data source, click **Dashboards** tab. ![TDengine Database TDinsight Import Dashboard and Configuration](./assets/import_dashboard.webp) -Type the dashboard ID `15167` in the **Import via grafana.com** location and **Load**. +Choose `TDengine for 3.x` and click `import`. -![TDengine Database TDinsight Import via grafana.com](./assets/import-dashboard-15167.webp) +After the importing is done, `TDinsight for 3.x` dashboard is available on the page of `search dashboards by name`. -Once the import is complete, the full page view of TDinsight is shown below. +![TDengine Database TDinsight Import via grafana.com](./assets/import_dashboard_view.webp) -![TDengine Database TDinsight show](./assets/TDinsight-full.webp) +In the `TDinsight for 3.x` dashboard, choose the database used by taosKeeper to store monitoring data, you can see the monitoring result. + +![TDengine Database TDinsight 选择数据库](./assets/select_dashboard_db.webp) ## TDinsight dashboard details -The TDinsight dashboard is designed to provide the usage and status of TDengine-related resources [dnodes, mnodes, vnodes](../../taos-sql/node/) or databases. +The TDinsight dashboard is designed to provide the usage and status of TDengine-related resources, e.g. dnodes, mnodes, vnodes and databases. Details of the metrics are as follows. @@ -285,7 +258,6 @@ This section contains the current information and status of the cluster, the ale - **Measuring Points Used**: The number of measuring points used to enable the alert rule (no data available in the community version, healthy by default). - **Grants Expire Time**: the expiration time of the enterprise version of the enabled alert rule (no data available for the community version, healthy by default). - **Error Rate**: Aggregate error rate (average number of errors per second) for alert-enabled clusters. -- **Variables**: `show variables` table display. ### DNodes Status @@ -294,7 +266,6 @@ This section contains the current information and status of the cluster, the ale - **DNodes Status**: simple table view of `show dnodes`. - **DNodes Lifetime**: the time elapsed since the dnode was created. - **DNodes Number**: the number of DNodes changes. -- **Offline Reason**: if any dnode status is offline, the reason for offline is shown as a pie chart. ### MNode Overview @@ -309,7 +280,6 @@ This section contains the current information and status of the cluster, the ale 1. **Requests Rate(Inserts per Second)**: average number of inserts per second. 2. **Requests (Selects)**: number of query requests and change rate (count of second). -3. **Requests (HTTP)**: number of HTTP requests and request rate (count of second). ### Database @@ -319,9 +289,8 @@ Database usage, repeated for each value of the variable `$database` i.e. multipl 1. **STables**: number of super tables. 2. **Total Tables**: number of all tables. -3. **Sub Tables**: the number of all super table subtables. -4. **Tables**: graph of all normal table numbers over time. -5. **Tables Number Foreach VGroups**: The number of tables contained in each VGroups. +3. **Tables**: number of normal tables. +4. **Table number for each vgroup**: number of tables per vgroup. ### DNode Resource Usage @@ -356,12 +325,11 @@ Currently, only the number of logins per minute is reported. Support monitoring taosAdapter request statistics and status details. Includes. -1. **http_request**: contains the total number of requests, the number of failed requests, and the number of requests being processed -2. **top 3 request endpoint**: data of the top 3 requests by endpoint group -3. **Memory Used**: taosAdapter memory usage -4. **latency_quantile(ms)**: quantile of (1, 2, 5, 9, 99) stages -5. **top 3 failed request endpoint**: data of the top 3 failed requests by endpoint grouping -6. **CPU Used**: taosAdapter CPU usage +1. **http_request_inflight**: number of real-time requests. +2. **http_request_total**: number of total requests. +3. **http_request_fail**: number of failed requets. +4. **CPU Used**: CPU usage of taosAdapter. +5. **Memory Used**: Memory usage of taosAdapter. ## Upgrade @@ -403,13 +371,6 @@ services: TDENGINE_API: ${TDENGINE_API} TDENGINE_USER: ${TDENGINE_USER} TDENGINE_PASS: ${TDENGINE_PASS} - SMS_ACCESS_KEY_ID: ${SMS_ACCESS_KEY_ID} - SMS_ACCESS_KEY_SECRET: ${SMS_ACCESS_KEY_SECRET} - SMS_SIGN_NAME: ${SMS_SIGN_NAME} - SMS_TEMPLATE_CODE: ${SMS_TEMPLATE_CODE} - SMS_TEMPLATE_PARAM: '${SMS_TEMPLATE_PARAM}' - SMS_PHONE_NUMBERS: $SMS_PHONE_NUMBERS - SMS_LISTEN_ADDR: ${SMS_LISTEN_ADDR} ports: - 3000:3000 volumes: diff --git a/docs/zh/07-develop/07-tmq.mdx b/docs/zh/07-develop/07-tmq.mdx index 2f5c13d9b0..d9a35ab4eb 100644 --- a/docs/zh/07-develop/07-tmq.mdx +++ b/docs/zh/07-develop/07-tmq.mdx @@ -218,7 +218,7 @@ void Close() ```sql DROP DATABASE IF EXISTS tmqdb; CREATE DATABASE tmqdb; -CREATE TABLE tmqdb.stb (ts TIMESTAMP, c1 INT, c2 FLOAT, c3 VARCHAR(16) TAGS(t1 INT, t3 VARCHAR(16)); +CREATE TABLE tmqdb.stb (ts TIMESTAMP, c1 INT, c2 FLOAT, c3 VARCHAR(16)) TAGS(t1 INT, t3 VARCHAR(16)); CREATE TABLE tmqdb.ctb0 USING tmqdb.stb TAGS(0, "subtable0"); CREATE TABLE tmqdb.ctb1 USING tmqdb.stb TAGS(1, "subtable1"); INSERT INTO tmqdb.ctb0 VALUES(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00'); diff --git a/docs/zh/10-deployment/01-deploy.md b/docs/zh/10-deployment/01-deploy.md index 03b4ce30f9..eecb86ce41 100644 --- a/docs/zh/10-deployment/01-deploy.md +++ b/docs/zh/10-deployment/01-deploy.md @@ -168,7 +168,7 @@ Query OK, 8 row(s) in set (0.001154s) ## 删除数据节点 -先停止要删除的数据节点的 taosd 进程,然后启动 CLI 程序 taos,执行: +启动 CLI 程序 taos,执行: ```sql DROP DNODE "fqdn:port"; diff --git a/docs/zh/12-taos-sql/06-select.md b/docs/zh/12-taos-sql/06-select.md index b0a7d88efe..0d38cb0635 100644 --- a/docs/zh/12-taos-sql/06-select.md +++ b/docs/zh/12-taos-sql/06-select.md @@ -104,7 +104,7 @@ SELECT location, groupid, current FROM d1001 LIMIT 2; ### 结果去重 -`DISINTCT` 关键字可以对结果集中的一列或多列进行去重,去除的列既可以是标签列也可以是数据列。 +`DISTINCT` 关键字可以对结果集中的一列或多列进行去重,去除的列既可以是标签列也可以是数据列。 对标签列去重: diff --git a/docs/zh/12-taos-sql/10-function.md b/docs/zh/12-taos-sql/10-function.md index 9c5b7f771e..86e9aaa80f 100644 --- a/docs/zh/12-taos-sql/10-function.md +++ b/docs/zh/12-taos-sql/10-function.md @@ -127,7 +127,7 @@ SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause] SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**功能说明**:获得指定字段的向下取整数的结果。 +**功能说明**:获得指定字段的向下取整数的结果。 其他使用说明参见 CEIL 函数描述。 #### LOG @@ -174,7 +174,7 @@ SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause] SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**功能说明**:获得指定字段的四舍五入的结果。 +**功能说明**:获得指定字段的四舍五入的结果。 其他使用说明参见 CEIL 函数描述。 @@ -435,7 +435,7 @@ SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause]; **使用说明**: - timezone 参数允许输入的时区格式为: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。例如,TO_ISO8601(1, "+00:00")。 -- 如果输入是表示 UNIX 时间戳的整形,返回格式精度由时间戳的位数决定; +- 如果输入是表示 UNIX 时间戳的整形,返回格式精度由时间戳的位数决定; - 如果输入是 TIMESTAMP 类型的列,返回格式的时间戳精度与当前 DATABASE 设置的时间精度一致。 @@ -770,14 +770,14 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam **详细说明**: - bin_type 用户指定的分桶类型, 有效输入类型为"user_input“, ”linear_bin", "log_bin"。 -- bin_description 描述如何生成分桶区间,针对三种桶类型,分别为以下描述格式(均为 JSON 格式字符串): - - "user_input": "[1, 3, 5, 7]" +- bin_description 描述如何生成分桶区间,针对三种桶类型,分别为以下描述格式(均为 JSON 格式字符串): + - "user_input": "[1, 3, 5, 7]" 用户指定 bin 的具体数值。 - + - "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}" "start" 表示数据起始点,"width" 表示每次 bin 偏移量, "count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点和终点, 生成区间为[-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]。 - + - "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}" "start" 表示数据起始点,"factor" 表示按指数递增的因子,"count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点和终点, 生成区间为[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]。 @@ -918,7 +918,7 @@ SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause]; **返回数据类型**:同应用的字段。 -**适用数据类型**:数值类型,时间戳类型。 +**适用数据类型**:数值类型。 **适用于**:表和超级表。 @@ -933,7 +933,7 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause]; **返回数据类型**:同应用的字段。 -**适用数据类型**:数值类型,时间戳类型。 +**适用数据类型**:数值类型。 **适用于**:表和超级表。 @@ -969,7 +969,7 @@ SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause] **适用于**:表和超级表。 -**使用说明**: +**使用说明**: - 不能参与表达式计算;该函数可以应用在普通表和超级表上; - 使用在超级表上的时候,需要搭配 PARTITION by tbname 使用,将结果强制规约到单个时间线。 @@ -1047,10 +1047,10 @@ SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause] **适用于**:表和超级表。 -**使用说明**: - +**使用说明**: + - 不支持 +、-、*、/ 运算,如 csum(col1) + csum(col2)。 -- 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。 +- 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。 - 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。 @@ -1068,8 +1068,8 @@ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHER **适用于**:表和超级表。 -**使用说明**: - +**使用说明**: + - DERIVATIVE 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。 - 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE() from。 @@ -1087,7 +1087,7 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER **适用于**:表和超级表。 -**使用说明**: +**使用说明**: - 输出结果行数是范围内总行数减一,第一行没有结果输出。 - 可以与选择相关联的列一起使用。 例如: select \_rowts, DIFF() from。 @@ -1124,9 +1124,9 @@ SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause] **适用于**:表和超级表。 -**使用说明**: - -- 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1); +**使用说明**: + +- 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1); - 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用; - 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。 diff --git a/docs/zh/12-taos-sql/20-keywords.md b/docs/zh/12-taos-sql/20-keywords.md index ee36dc44f0..7530e803db 100644 --- a/docs/zh/12-taos-sql/20-keywords.md +++ b/docs/zh/12-taos-sql/20-keywords.md @@ -6,7 +6,8 @@ description: TDengine 保留关键字的详细列表 ## 保留关键字 -目前 TDengine 有将近 200 个内部保留关键字,这些关键字无论大小写如果需要用作库名、表名、STable 名、数据列名及标签列名等,需要使用符合``将关键字括起来使用,例如`ADD`。 +目前 TDengine 有 200 多个内部保留关键字,这些关键字如果需要用作库名、表名、超级表名、子表名、数据列名及标签列名等,无论大小写,需要使用符号 `` ` `` 将关键字括起来使用,例如 \`ADD\`。 + 关键字列表如下: ### A @@ -16,15 +17,20 @@ description: TDengine 保留关键字的详细列表 - ACCOUNTS - ADD - AFTER +- AGGREGATE - ALL - ALTER +- ANALYZE - AND +- APPS - AS - ASC +- AT_ONCE - ATTACH ### B +- BALANCE - BEFORE - BEGIN - BETWEEN @@ -34,19 +40,27 @@ description: TDengine 保留关键字的详细列表 - BITNOT - BITOR - BLOCKS +- BNODE +- BNODES - BOOL +- BUFFER +- BUFSIZE - BY ### C - CACHE -- CACHELAST +- CACHEMODEL +- CACHESIZE - CASCADE +- CAST - CHANGE +- CLIENT_VERSION - CLUSTER - COLON - COLUMN - COMMA +- COMMENT - COMP - COMPACT - CONCAT @@ -54,15 +68,18 @@ description: TDengine 保留关键字的详细列表 - CONNECTION - CONNECTIONS - CONNS +- CONSUMER +- CONSUMERS +- CONTAINS - COPY +- COUNT - CREATE -- CTIME +- CURRENT_USER ### D - DATABASE - DATABASES -- DAYS - DBS - DEFERRED - DELETE @@ -71,18 +88,23 @@ description: TDengine 保留关键字的详细列表 - DESCRIBE - DETACH - DISTINCT +- DISTRIBUTED - DIVIDE - DNODE - DNODES - DOT - DOUBLE - DROP +- DURATION ### E +- EACH +- ENABLE - END -- EQ +- EVERY - EXISTS +- EXPIRED - EXPLAIN ### F @@ -90,18 +112,20 @@ description: TDengine 保留关键字的详细列表 - FAIL - FILE - FILL +- FIRST - FLOAT +- FLUSH - FOR - FROM -- FSYNC +- FUNCTION +- FUNCTIONS ### G -- GE - GLOB +- GRANT - GRANTS - GROUP -- GT ### H @@ -112,15 +136,18 @@ description: TDengine 保留关键字的详细列表 - ID - IF - IGNORE -- IMMEDIA +- IMMEDIATE - IMPORT - IN -- INITIAL +- INDEX +- INDEXES +- INITIALLY +- INNER - INSERT - INSTEAD - INT - INTEGER -- INTERVA +- INTERVAL - INTO - IS - ISNULL @@ -128,6 +155,7 @@ description: TDengine 保留关键字的详细列表 ### J - JOIN +- JSON ### K @@ -137,46 +165,57 @@ description: TDengine 保留关键字的详细列表 ### L -- LE +- LAST +- LAST_ROW +- LICENCES - LIKE - LIMIT - LINEAR - LOCAL -- LP -- LSHIFT -- LT ### M - MATCH +- MAX_DELAY - MAXROWS +- MERGE +- META - MINROWS - MINUS +- MNODE - MNODES - MODIFY - MODULES ### N -- NE +- NCHAR +- NEXT +- NMATCH - NONE - NOT - NOTNULL - NOW - NULL +- NULLS ### O - OF - OFFSET +- ON - OR - ORDER +- OUTPUTTYPE ### P -- PARTITION +- PAGES +- PAGESIZE +- PARTITIONS - PASS - PLUS +- PORT - PPS - PRECISION - PREV @@ -184,47 +223,63 @@ description: TDengine 保留关键字的详细列表 ### Q +- QNODE +- QNODES - QTIME -- QUERIE +- QUERIES - QUERY -- QUORUM ### R - RAISE -- REM +- RANGE +- RATIO +- READ +- REDISTRIBUTE +- RENAME - REPLACE - REPLICA - RESET -- RESTRIC +- RESTRICT +- RETENTIONS +- REVOKE +- ROLLUP - ROW -- RP -- RSHIFT ### S +- SCHEMALESS - SCORES - SELECT - SEMI +- SERVER_STATUS +- SERVER_VERSION - SESSION - SET - SHOW -- SLASH +- SINGLE_STABLE - SLIDING - SLIMIT -- SMALLIN +- SMA +- SMALLINT +- SNODE +- SNODES - SOFFSET -- STable -- STableS +- SPLIT +- STABLE +- STABLES - STAR - STATE -- STATEMEN -- STATE_WI +- STATE_WINDOW +- STATEMENT - STORAGE - STREAM - STREAMS +- STRICT - STRING +- SUBSCRIPTIONS - SYNCDB +- SYSINFO ### T @@ -235,20 +290,24 @@ description: TDengine 保留关键字的详细列表 - TBNAME - TIMES - TIMESTAMP +- TIMEZONE - TINYINT +- TO +- TODAY - TOPIC - TOPICS +- TRANSACTION +- TRANSACTIONS - TRIGGER +- TRIM - TSERIES - TTL ### U -- UMINUS - UNION - UNSIGNED - UPDATE -- UPLUS - USE - USER - USERS @@ -258,8 +317,11 @@ description: TDengine 保留关键字的详细列表 - VALUE - VALUES +- VARCHAR - VARIABLE - VARIABLES +- VERBOSE +- VGROUP - VGROUPS - VIEW - VNODES @@ -267,14 +329,25 @@ description: TDengine 保留关键字的详细列表 ### W - WAL +- WAL_FSYNC_PERIOD +- WAL_LEVEL +- WAL_RETENTION_PERIOD +- WAL_RETENTION_SIZE +- WAL_ROLL_PERIOD +- WAL_SEGMENT_SIZE +- WATERMARK - WHERE +- WINDOW_CLOSE +- WITH +- WRITE ### \_ - \_C0 -- \_QSTART -- \_QSTOP - \_QDURATION -- \_WSTART -- \_WSTOP +- \_QEND +- \_QSTART +- \_ROWTS - \_WDURATION +- \_WEND +- \_WSTART diff --git a/docs/zh/14-reference/14-taosKeeper.md b/docs/zh/14-reference/14-taosKeeper.md index ae0a496f03..7780dc2fe9 100644 --- a/docs/zh/14-reference/14-taosKeeper.md +++ b/docs/zh/14-reference/14-taosKeeper.md @@ -79,7 +79,7 @@ password = "taosdata" # 需要被监控的 taosAdapter [taosAdapter] -address = ["127.0.0.1:6041","192.168.1.95:6041"] +address = ["127.0.0.1:6041"] [metrics] # 监控指标前缀 @@ -92,7 +92,7 @@ cluster = "production" database = "log" # 指定需要监控的普通表 -tables = ["normal_table"] +tables = [] ``` ### 获取监控指标 @@ -141,4 +141,4 @@ taos_cluster_info_dnodes_total{cluster_id="5981392874047724755"} 1 # HELP taos_cluster_info_first_ep # TYPE taos_cluster_info_first_ep gauge taos_cluster_info_first_ep{cluster_id="5981392874047724755",value="hlb:6030"} 1 -``` \ No newline at end of file +``` diff --git a/docs/zh/21-tdinternal/01-arch.md b/docs/zh/21-tdinternal/01-arch.md index 704524fd21..b128b9d438 100644 --- a/docs/zh/21-tdinternal/01-arch.md +++ b/docs/zh/21-tdinternal/01-arch.md @@ -26,7 +26,7 @@ TDengine 分布式架构的逻辑结构图如下: **管理节点(mnode):** 一个虚拟的逻辑单元,负责所有数据节点运行状态的监控和维护,以及节点之间的负载均衡(图中 M)。同时,管理节点也负责元数据(包括用户、数据库、超级表等)的存储和管理,因此也称为 Meta Node。TDengine 集群中可配置多个(最多不超过 3 个)mnode,它们自动构建成为一个虚拟管理节点组(图中 M1,M2,M3)。mnode 支持多副本,采用 RAFT 一致性协议,保证系统的高可用与高可靠,任何数据更新操作只能在 Leader 上进行。mnode 集群的第一个节点在集群部署时自动完成,其他节点的创建与删除由用户通过 SQL 命令完成。每个 dnode 上至多有一个 mnode,由所属的数据节点的 EP 来唯一标识。每个 dnode 通过内部消息交互自动获取整个集群中所有 mnode 所在的 dnode 的 EP。 -**弹性计算节点(qnode):** 一个虚拟的逻辑单元,运行查询计算任务,也包括基于系统表来实现的 show 命令(图中 Q)。集群中可配置多个 qnode,在整个集群内部共享使用(图中 Q1,Q2,Q3)。qnode 不与具体的 DB 绑定,即一个 qnode 可以同时执行多个 DB 的查询任务。每个 dnode 上至多有一个 qnode,由所属的数据节点的 EP 来唯一标识。客户端通过与 mnode 交互,获取可用的 qnode 列表,当没有可用的 qnode 时,计算任务在 vnode 中执行。 +**计算节点(qnode):** 一个虚拟的逻辑单元,运行查询计算任务,也包括基于系统表来实现的 show 命令(图中 Q)。集群中可配置多个 qnode,在整个集群内部共享使用(图中 Q1,Q2,Q3)。qnode 不与具体的 DB 绑定,即一个 qnode 可以同时执行多个 DB 的查询任务。每个 dnode 上至多有一个 qnode,由所属的数据节点的 EP 来唯一标识。客户端通过与 mnode 交互,获取可用的 qnode 列表,当没有可用的 qnode 时,计算任务在 vnode 中执行。当一个查询执行时,依赖执行计划,调度器会安排一个或多个 qnode 来一起执行。qnode 能从 vnode 获取数据,也可以将自己的计算结果发给其他 qnode 做进一步的处理。通过引入独立的计算节点,TDengine 实现了存储和计算分离。 **流计算节点(snode):** 一个虚拟的逻辑单元,只运行流计算任务(图中 S)。集群中可配置多个 snode,在整个集群内部共享使用(图中 S1,S2,S3)。snode 不与具体的 stream 绑定,即一个 snode 可以同时执行多个 stream 的计算任务。每个 dnode 上至多有一个 snode,由所属的数据节点的 EP 来唯一标识。由 mnode 调度可用的 snode 完成流计算任务,当没有可用的 snode 时,流计算任务在 vnode 中执行。 diff --git a/include/common/tcommon.h b/include/common/tcommon.h index 37db574d98..ba4baa0130 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -45,8 +45,8 @@ enum { // clang-format on typedef struct { - TSKEY ts; uint64_t groupId; + TSKEY ts; } SWinKey; static inline int SWinKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2) { @@ -68,6 +68,37 @@ static inline int SWinKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, i return 0; } +typedef struct { + uint64_t groupId; + TSKEY ts; + int32_t exprIdx; +} STupleKey; + +static inline int STupleKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2) { + STupleKey* pTuple1 = (STupleKey*)pKey1; + STupleKey* pTuple2 = (STupleKey*)pKey2; + + if (pTuple1->groupId > pTuple2->groupId) { + return 1; + } else if (pTuple1->groupId < pTuple2->groupId) { + return -1; + } + + if (pTuple1->ts > pTuple2->ts) { + return 1; + } else if (pTuple1->ts < pTuple2->ts) { + return -1; + } + + if (pTuple1->exprIdx > pTuple2->exprIdx) { + return 1; + } else if (pTuple1->exprIdx < pTuple2->exprIdx) { + return -1; + } + + return 0; +} + enum { TMQ_MSG_TYPE__DUMMY = 0, TMQ_MSG_TYPE__POLL_RSP, diff --git a/include/common/tdataformat.h b/include/common/tdataformat.h index df16f4f0ab..5b49560175 100644 --- a/include/common/tdataformat.h +++ b/include/common/tdataformat.h @@ -36,8 +36,13 @@ typedef struct STSRow2 STSRow2; typedef struct STSRowBuilder STSRowBuilder; typedef struct STagVal STagVal; typedef struct STag STag; +typedef struct SColData SColData; -// bitmap +#define HAS_NONE ((uint8_t)0x1) +#define HAS_NULL ((uint8_t)0x2) +#define HAS_VALUE ((uint8_t)0x4) + +// bitmap ================================ const static uint8_t BIT2_MAP[4][4] = {{0b00000000, 0b00000001, 0b00000010, 0}, {0b00000000, 0b00000100, 0b00001000, 2}, {0b00000000, 0b00010000, 0b00100000, 4}, @@ -51,21 +56,21 @@ const static uint8_t BIT2_MAP[4][4] = {{0b00000000, 0b00000001, 0b00000010, 0}, #define SET_BIT2(p, i, v) ((p)[(i) >> 2] = (p)[(i) >> 2] & N1(BIT2_MAP[(i)&3][3]) | BIT2_MAP[(i)&3][(v)]) #define GET_BIT2(p, i) (((p)[(i) >> 2] >> BIT2_MAP[(i)&3][3]) & ((uint8_t)3)) -// STSchema +// STSchema ================================ int32_t tTSchemaCreate(int32_t sver, SSchema *pSchema, int32_t nCols, STSchema **ppTSchema); void tTSchemaDestroy(STSchema *pTSchema); -// SValue +// SValue ================================ int32_t tPutValue(uint8_t *p, SValue *pValue, int8_t type); int32_t tGetValue(uint8_t *p, SValue *pValue, int8_t type); int tValueCmprFn(const SValue *pValue1, const SValue *pValue2, int8_t type); -// SColVal +// SColVal ================================ #define COL_VAL_NONE(CID, TYPE) ((SColVal){.cid = (CID), .type = (TYPE), .isNone = 1}) #define COL_VAL_NULL(CID, TYPE) ((SColVal){.cid = (CID), .type = (TYPE), .isNull = 1}) #define COL_VAL_VALUE(CID, TYPE, V) ((SColVal){.cid = (CID), .type = (TYPE), .value = (V)}) -// STSRow2 +// STSRow2 ================================ #define TSROW_LEN(PROW, V) tGetI32v((uint8_t *)(PROW)->data, (V) ? &(V) : NULL) #define TSROW_SVER(PROW, V) tGetI32v((PROW)->data + TSROW_LEN(PROW, NULL), (V) ? &(V) : NULL) @@ -77,7 +82,7 @@ int32_t tTSRowToArray(STSRow2 *pRow, STSchema *pTSchema, SArray **ppArray); int32_t tPutTSRow(uint8_t *p, STSRow2 *pRow); int32_t tGetTSRow(uint8_t *p, STSRow2 **ppRow); -// STSRowBuilder +// STSRowBuilder ================================ #define tsRowBuilderInit() ((STSRowBuilder){0}) #define tsRowBuilderClear(B) \ do { \ @@ -86,7 +91,7 @@ int32_t tGetTSRow(uint8_t *p, STSRow2 **ppRow); } \ } while (0) -// STag +// STag ================================ int32_t tTagNew(SArray *pArray, int32_t version, int8_t isJson, STag **ppTag); void tTagFree(STag *pTag); bool tTagIsJson(const void *pTag); @@ -100,7 +105,16 @@ void tTagSetCid(const STag *pTag, int16_t iTag, int16_t cid); void debugPrintSTag(STag *pTag, const char *tag, int32_t ln); // TODO: remove int32_t parseJsontoTagData(const char *json, SArray *pTagVals, STag **ppTag, void *pMsgBuf); -// STRUCT ================= +// SColData ================================ +void tColDataDestroy(void *ph); +void tColDataInit(SColData *pColData, int16_t cid, int8_t type, int8_t smaOn); +void tColDataClear(SColData *pColData); +int32_t tColDataAppendValue(SColData *pColData, SColVal *pColVal); +void tColDataGetValue(SColData *pColData, int32_t iVal, SColVal *pColVal); +uint8_t tColDataGetBitValue(SColData *pColData, int32_t iVal); +int32_t tColDataCopy(SColData *pColDataSrc, SColData *pColDataDest); + +// STRUCT ================================ struct STColumn { col_id_t colId; int8_t type; @@ -166,6 +180,18 @@ struct SColVal { SValue value; }; +struct SColData { + int16_t cid; + int8_t type; + int8_t smaOn; + int32_t nVal; + uint8_t flag; + uint8_t *pBitMap; + int32_t *aOffset; + int32_t nData; + uint8_t *pData; +}; + #pragma pack(push, 1) struct STagVal { // char colName[TSDB_COL_NAME_LEN]; // only used for tmq_get_meta diff --git a/include/libs/function/function.h b/include/libs/function/function.h index 3f26eee86a..65ddc180d6 100644 --- a/include/libs/function/function.h +++ b/include/libs/function/function.h @@ -34,66 +34,69 @@ typedef struct SFuncExecEnv { int32_t calcMemSize; } SFuncExecEnv; -typedef bool (*FExecGetEnv)(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); -typedef bool (*FExecInit)(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo* pResultCellInfo); +typedef bool (*FExecGetEnv)(struct SFunctionNode *pFunc, SFuncExecEnv *pEnv); +typedef bool (*FExecInit)(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo *pResultCellInfo); typedef int32_t (*FExecProcess)(struct SqlFunctionCtx *pCtx); -typedef int32_t (*FExecFinalize)(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock); +typedef int32_t (*FExecFinalize)(struct SqlFunctionCtx *pCtx, SSDataBlock *pBlock); typedef int32_t (*FScalarExecProcess)(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); typedef int32_t (*FExecCombine)(struct SqlFunctionCtx *pDestCtx, struct SqlFunctionCtx *pSourceCtx); typedef struct SScalarFuncExecFuncs { - FExecGetEnv getEnv; + FExecGetEnv getEnv; FScalarExecProcess process; } SScalarFuncExecFuncs; typedef struct SFuncExecFuncs { - FExecGetEnv getEnv; - FExecInit init; - FExecProcess process; + FExecGetEnv getEnv; + FExecInit init; + FExecProcess process; FExecFinalize finalize; - FExecCombine combine; + FExecCombine combine; } SFuncExecFuncs; -#define MAX_INTERVAL_TIME_WINDOW 1000000 // maximum allowed time windows in final results +#define MAX_INTERVAL_TIME_WINDOW 1000000 // maximum allowed time windows in final results #define TOP_BOTTOM_QUERY_LIMIT 100 #define FUNCTIONS_NAME_MAX_LENGTH 16 typedef struct SResultRowEntryInfo { - bool initialized:1; // output buffer has been initialized - bool complete:1; // query has completed - uint8_t isNullRes:6; // the result is null - uint16_t numOfRes; // num of output result in current buffer. NOT NULL RESULT + bool initialized : 1; // output buffer has been initialized + bool complete : 1; // query has completed + uint8_t isNullRes : 6; // the result is null + uint16_t numOfRes; // num of output result in current buffer. NOT NULL RESULT } SResultRowEntryInfo; // determine the real data need to calculated the result enum { - BLK_DATA_NOT_LOAD = 0x0, - BLK_DATA_SMA_LOAD = 0x1, + BLK_DATA_NOT_LOAD = 0x0, + BLK_DATA_SMA_LOAD = 0x1, BLK_DATA_DATA_LOAD = 0x3, - BLK_DATA_FILTEROUT = 0x4, // discard current data block since it is not qualified for filter + BLK_DATA_FILTEROUT = 0x4, // discard current data block since it is not qualified for filter }; enum { - MAIN_SCAN = 0x0u, - REVERSE_SCAN = 0x1u, // todo remove it - REPEAT_SCAN = 0x2u, //repeat scan belongs to the master scan - MERGE_STAGE = 0x20u, + MAIN_SCAN = 0x0u, + REVERSE_SCAN = 0x1u, // todo remove it + REPEAT_SCAN = 0x2u, // repeat scan belongs to the master scan + MERGE_STAGE = 0x20u, }; typedef struct SPoint1 { - int64_t key; - union{double val; char* ptr;}; + int64_t key; + union { + double val; + char *ptr; + }; } SPoint1; struct SqlFunctionCtx; struct SResultRowEntryInfo; -//for selectivity query, the corresponding tag value is assigned if the data is qualified +// for selectivity query, the corresponding tag value is assigned if the data is qualified typedef struct SSubsidiaryResInfo { - int16_t num; - int32_t rowLen; - char* buf; // serialize data buffer + int16_t num; + int32_t rowLen; + char *buf; // serialize data buffer struct SqlFunctionCtx **pCtx; } SSubsidiaryResInfo; @@ -106,69 +109,70 @@ typedef struct SResultDataInfo { } SResultDataInfo; #define GET_RES_INFO(ctx) ((ctx)->resultInfo) -#define GET_ROWCELL_INTERBUF(_c) ((void*) ((char*)(_c) + sizeof(SResultRowEntryInfo))) +#define GET_ROWCELL_INTERBUF(_c) ((void *)((char *)(_c) + sizeof(SResultRowEntryInfo))) typedef struct SInputColumnInfoData { - int32_t totalRows; // total rows in current columnar data - int32_t startRowIndex; // handle started row index - int32_t numOfRows; // the number of rows needs to be handled - int32_t numOfInputCols; // PTS is not included - bool colDataAggIsSet;// if agg is set or not - SColumnInfoData *pPTS; // primary timestamp column + int32_t totalRows; // total rows in current columnar data + int32_t startRowIndex; // handle started row index + int32_t numOfRows; // the number of rows needs to be handled + int32_t numOfInputCols; // PTS is not included + bool colDataAggIsSet; // if agg is set or not + SColumnInfoData *pPTS; // primary timestamp column SColumnInfoData **pData; SColumnDataAgg **pColumnDataAgg; - uint64_t uid; // table uid, used to set the tag value when building the final query result for selectivity functions. + uint64_t uid; // table uid, used to set the tag value when building the final query result for selectivity functions. } SInputColumnInfoData; typedef struct SSerializeDataHandle { - struct SDiskbasedBuf* pBuf; + struct SDiskbasedBuf *pBuf; int32_t currentPage; + void *pState; } SSerializeDataHandle; // sql function runtime context typedef struct SqlFunctionCtx { - SInputColumnInfoData input; - SResultDataInfo resDataInfo; - uint32_t order; // data block scanner order: asc|desc - uint8_t scanFlag; // record current running step, default: 0 - int16_t functionId; // function id - char *pOutput; // final result output buffer, point to sdata->data - int32_t numOfParams; - SFunctParam *param; // input parameter, e.g., top(k, 20), the number of results for top query is kept in param - SColumnInfoData *pTsOutput; // corresponding output buffer for timestamp of each result, e.g., top/bottom*/ - int32_t offset; - struct SResultRowEntryInfo *resultInfo; - SSubsidiaryResInfo subsidiaries; - SPoint1 start; - SPoint1 end; - SFuncExecFuncs fpSet; - SScalarFuncExecFuncs sfp; - struct SExprInfo *pExpr; - struct SSDataBlock *pSrcBlock; - struct SSDataBlock *pDstBlock; // used by indefinite rows function to set selectivity - SSerializeDataHandle saveHandle; - bool isStream; + SInputColumnInfoData input; + SResultDataInfo resDataInfo; + uint32_t order; // data block scanner order: asc|desc + uint8_t scanFlag; // record current running step, default: 0 + int16_t functionId; // function id + char *pOutput; // final result output buffer, point to sdata->data + int32_t numOfParams; + SFunctParam *param; // input parameter, e.g., top(k, 20), the number of results for top query is kept in param + SColumnInfoData *pTsOutput; // corresponding output buffer for timestamp of each result, e.g., top/bottom*/ + int32_t offset; + struct SResultRowEntryInfo *resultInfo; + SSubsidiaryResInfo subsidiaries; + SPoint1 start; + SPoint1 end; + SFuncExecFuncs fpSet; + SScalarFuncExecFuncs sfp; + struct SExprInfo *pExpr; + struct SSDataBlock *pSrcBlock; + struct SSDataBlock *pDstBlock; // used by indefinite rows function to set selectivity + SSerializeDataHandle saveHandle; + bool isStream; - char udfName[TSDB_FUNC_NAME_LEN]; + char udfName[TSDB_FUNC_NAME_LEN]; } SqlFunctionCtx; enum { - TEXPR_BINARYEXPR_NODE= 0x1, + TEXPR_BINARYEXPR_NODE = 0x1, TEXPR_UNARYEXPR_NODE = 0x2, }; typedef struct tExprNode { int32_t nodeType; union { - struct {// function node - char functionName[FUNCTIONS_NAME_MAX_LENGTH]; // todo refactor - int32_t functionId; - int32_t num; - struct SFunctionNode *pFunctNode; + struct { // function node + char functionName[FUNCTIONS_NAME_MAX_LENGTH]; // todo refactor + int32_t functionId; + int32_t num; + struct SFunctionNode *pFunctNode; } _function; struct { - struct SNode* pRootNode; + struct SNode *pRootNode; } _optrRoot; }; } tExprNode; @@ -182,17 +186,18 @@ struct SScalarParam { int32_t numOfRows; }; -void cleanupResultRowEntry(struct SResultRowEntryInfo* pCell); -int32_t getNumOfResult(SqlFunctionCtx* pCtx, int32_t num, SSDataBlock* pResBlock); -bool isRowEntryCompleted(struct SResultRowEntryInfo* pEntry); -bool isRowEntryInitialized(struct SResultRowEntryInfo* pEntry); +void cleanupResultRowEntry(struct SResultRowEntryInfo *pCell); +int32_t getNumOfResult(SqlFunctionCtx *pCtx, int32_t num, SSDataBlock *pResBlock); +bool isRowEntryCompleted(struct SResultRowEntryInfo *pEntry); +bool isRowEntryInitialized(struct SResultRowEntryInfo *pEntry); typedef struct SPoint { int64_t key; - void * val; + void *val; } SPoint; -int32_t taosGetLinearInterpolationVal(SPoint* point, int32_t outputType, SPoint* point1, SPoint* point2, int32_t inputType); +int32_t taosGetLinearInterpolationVal(SPoint *point, int32_t outputType, SPoint *point1, SPoint *point2, + int32_t inputType); /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // udf api diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 8aeb86102a..1e86a04775 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -151,6 +151,8 @@ typedef struct SVnodeModifyLogicNode { SArray* pDataBlocks; SVgDataBlocks* pVgDataBlocks; SNode* pAffectedRows; // SColumnNode + SNode* pStartTs; // SColumnNode + SNode* pEndTs; // SColumnNode uint64_t tableId; uint64_t stableId; int8_t tableType; // table type @@ -525,6 +527,8 @@ typedef struct SDataDeleterNode { char tsColName[TSDB_COL_NAME_LEN]; STimeWindow deleteTimeRange; SNode* pAffectedRows; + SNode* pStartTs; + SNode* pEndTs; } SDataDeleterNode; typedef struct SSubplan { diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h index 3a1eaf289e..e90c994e8f 100644 --- a/include/libs/nodes/querynodes.h +++ b/include/libs/nodes/querynodes.h @@ -315,6 +315,8 @@ typedef struct SDeleteStmt { SNode* pFromTable; // FROM clause SNode* pWhere; // WHERE clause SNode* pCountFunc; // count the number of rows affected + SNode* pFirstFunc; // the start timestamp when the data was actually deleted + SNode* pLastFunc; // the end timestamp when the data was actually deleted SNode* pTagCond; // pWhere divided into pTagCond and timeRange STimeWindow timeRange; uint8_t precision; diff --git a/include/libs/stream/streamState.h b/include/libs/stream/streamState.h new file mode 100644 index 0000000000..df19544396 --- /dev/null +++ b/include/libs/stream/streamState.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "tdatablock.h" +#include "tdbInt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef _STREAM_STATE_H_ +#define _STREAM_STATE_H_ + +typedef struct SStreamTask SStreamTask; + +// incremental state storage +typedef struct { + SStreamTask* pOwner; + TDB* db; + TTB* pStateDb; + TTB* pFuncStateDb; + TXN txn; +} SStreamState; + +SStreamState* streamStateOpen(char* path, SStreamTask* pTask); +void streamStateClose(SStreamState* pState); +int32_t streamStateBegin(SStreamState* pState); +int32_t streamStateCommit(SStreamState* pState); +int32_t streamStateAbort(SStreamState* pState); + +typedef struct { + TBC* pCur; +} SStreamStateCur; + +#if 1 +int32_t streamStateFuncPut(SStreamState* pState, const STupleKey* key, const void* value, int32_t vLen); +int32_t streamStateFuncGet(SStreamState* pState, const STupleKey* key, void** pVal, int32_t* pVLen); +int32_t streamStateFuncDel(SStreamState* pState, const STupleKey* key); + +int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen); +int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen); +int32_t streamStateDel(SStreamState* pState, const SWinKey* key); +int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen); +int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pVal); +void streamFreeVal(void* val); + +SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key); +SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key); +SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const SWinKey* key); +void streamStateFreeCur(SStreamStateCur* pCur); + +int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); + +int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur); +int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur); + +int32_t streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur); +int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur); + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* ifndef _STREAM_STATE_H_ */ diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index afd8de6b1c..554d66d621 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -16,6 +16,7 @@ #include "executor.h" #include "os.h" #include "query.h" +#include "streamState.h" #include "tdatablock.h" #include "tdbInt.h" #include "tmsg.h" @@ -263,14 +264,6 @@ typedef struct { SArray* checkpointVer; } SStreamRecoveringState; -// incremental state storage -typedef struct { - SStreamTask* pOwner; - TDB* db; - TTB* pStateDb; - TXN txn; -} SStreamState; - typedef struct SStreamTask { int64_t streamId; int32_t taskId; @@ -540,39 +533,6 @@ int32_t streamMetaCommit(SStreamMeta* pMeta); int32_t streamMetaRollBack(SStreamMeta* pMeta); int32_t streamLoadTasks(SStreamMeta* pMeta); -SStreamState* streamStateOpen(char* path, SStreamTask* pTask); -void streamStateClose(SStreamState* pState); -int32_t streamStateBegin(SStreamState* pState); -int32_t streamStateCommit(SStreamState* pState); -int32_t streamStateAbort(SStreamState* pState); - -typedef struct { - TBC* pCur; -} SStreamStateCur; - -#if 1 -int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen); -int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen); -int32_t streamStateDel(SStreamState* pState, const SWinKey* key); -int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen); -int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pVal); -void streamFreeVal(void* val); - -SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key); -SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key); -SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const SWinKey* key); -void streamStateFreeCur(SStreamStateCur* pCur); - -int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); - -int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur); -int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur); - -int32_t streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur); -int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur); - -#endif - #ifdef __cplusplus } #endif diff --git a/include/libs/tfs/tfs.h b/include/libs/tfs/tfs.h index 1dc154ce48..6f71fd4cd0 100644 --- a/include/libs/tfs/tfs.h +++ b/include/libs/tfs/tfs.h @@ -69,6 +69,14 @@ void tfsUpdateSize(STfs *pTfs); */ SDiskSize tfsGetSize(STfs *pTfs); +/** + * @brief Get level of multi-tier storage. + * + * @param pTfs + * @return int32_t + */ +int32_t tfsGetLevel(STfs *pTfs); + /** * @brief Allocate an existing available tier level from fs. * diff --git a/include/util/taoserror.h b/include/util/taoserror.h index d16a599811..01d42d6950 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -285,6 +285,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_MND_TOPIC_SUBSCRIBED TAOS_DEF_ERROR_CODE(0, 0x03EB) #define TSDB_CODE_MND_CGROUP_USED TAOS_DEF_ERROR_CODE(0, 0x03EC) #define TSDB_CODE_MND_TOPIC_MUST_BE_DELETED TAOS_DEF_ERROR_CODE(0, 0x03ED) +#define TSDB_CODE_MND_IN_REBALANCE TAOS_DEF_ERROR_CODE(0, 0x03EF) // mnode-stream #define TSDB_CODE_MND_STREAM_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x03F0) @@ -577,6 +578,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_FUNC_FUNTION_PARA_TYPE TAOS_DEF_ERROR_CODE(0, 0x2802) #define TSDB_CODE_FUNC_FUNTION_PARA_VALUE TAOS_DEF_ERROR_CODE(0, 0x2803) #define TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION TAOS_DEF_ERROR_CODE(0, 0x2804) +#define TSDB_CODE_FUNC_DUP_TIMESTAMP TAOS_DEF_ERROR_CODE(0, 0x2805) //udf #define TSDB_CODE_UDF_STOPPING TAOS_DEF_ERROR_CODE(0, 0x2901) diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index f2f72acafa..5088e9bdac 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -840,14 +840,20 @@ function updateProduct() { echo echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${cfg_install_dir}/${configFile}" - echo -e "${GREEN_DARK}To configure Adapter (if has) ${NC}: edit ${cfg_install_dir}/${adapterName}.toml" + [ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To configure Taos Adapter ${NC}: edit ${configDir}/taosadapter.toml" if ((${service_mod} == 0)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}" + [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Taos Adatper ${NC}: ${csudo}systemctl start taosadapter ${NC}" elif ((${service_mod} == 1)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}" + [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}service taosadapter start${NC}" else - echo -e "${GREEN_DARK}To start Adapter (if has)${NC}: ${adapterName} &${NC}" echo -e "${GREEN_DARK}To start ${productName} ${NC}: ./${serverName}${NC}" + [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: taosadapter &${NC}" fi if [ ${openresty_work} = 'true' ]; then @@ -926,14 +932,20 @@ function installProduct() { # Ask if to start the service echo echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${cfg_install_dir}/${configFile}" - echo -e "${GREEN_DARK}To configure ${adapterName} (if has) ${NC}: edit ${cfg_install_dir}/${adapterName}.toml" + [ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To configure Taos Adapter ${NC}: edit ${configDir}/taosadapter.toml" if ((${service_mod} == 0)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}" + [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Taos Adatper ${NC}: ${csudo}systemctl start taosadapter ${NC}" elif ((${service_mod} == 1)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}" + [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}service taosadapter start${NC}" else - echo -e "${GREEN_DARK}To start Adapter (if has)${NC}: ${adapterName} &${NC}" echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}" + [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: taosadapter &${NC}" fi if [ ! -z "$firstEp" ]; then diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index a6dceeeaad..d1e7a222cc 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -609,14 +609,20 @@ function update_TDengine() { echo echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}" - echo -e "${GREEN_DARK}To configure Taos Adapter (if has) ${NC}: edit ${configDir}/taosadapter.toml" + [ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To configure Taos Adapter ${NC}: edit ${configDir}/taosadapter.toml" if ((${service_mod} == 0)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}" + [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Taos Adatper ${NC}: ${csudo}systemctl start taosadapter ${NC}" elif ((${service_mod} == 1)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}" + [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}service taosadapter start${NC}" else - echo -e "${GREEN_DARK}To start Taos Adapter (if has)${NC}: taosadapter &${NC}" echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}" + [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: taosadapter &${NC}" fi echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName}${NC} in shell${NC}" @@ -649,14 +655,20 @@ function install_TDengine() { echo -e "\033[44;32;1m${productName} is installed successfully!${NC}" echo echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}" - echo -e "${GREEN_DARK}To configure taosadapter (if has) ${NC}: edit ${configDir}/taosadapter.toml" + [ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To configure Taos Adapter ${NC}: edit ${configDir}/taosadapter.toml" if ((${service_mod} == 0)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}" + [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}systemctl start taosadapter ${NC}" elif ((${service_mod} == 1)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}" + [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}service taosadapter start${NC}" else - echo -e "${GREEN_DARK}To start Taos Adapter (if has)${NC}: taosadapter &${NC}" echo -e "${GREEN_DARK}To start ${productName} ${NC}: ./${serverName}${NC}" + [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: taosadapter &${NC}" fi echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName}${NC} in shell${NC}" diff --git a/source/client/src/clientHb.c b/source/client/src/clientHb.c index 84a827ed78..e484e3c59c 100644 --- a/source/client/src/clientHb.c +++ b/source/client/src/clientHb.c @@ -414,6 +414,9 @@ int32_t hbGetQueryBasicInfo(SClientHbKey *connKey, SClientHbReq *req) { int32_t code = hbBuildQueryDesc(hbBasic, pTscObj); if (code) { releaseTscObj(connKey->tscRid); + if (hbBasic->queryDesc) { + taosArrayDestroyEx(hbBasic->queryDesc, tFreeClientHbQueryDesc); + } taosMemoryFree(hbBasic); return code; } diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 39b4b069a0..1073ea59ef 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -854,6 +854,7 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) { pRequest->metric.resultReady = taosGetTimestampUs(); if (pResult) { + destroyQueryExecRes(&pRequest->body.resInfo.execRes); memcpy(&pRequest->body.resInfo.execRes, pResult, sizeof(*pResult)); } @@ -1384,6 +1385,7 @@ int32_t doProcessMsgFromServer(void* param) { pSendInfo->fp(pSendInfo->param, &buf, pMsg->code); rpcFreeCont(pMsg->pCont); destroySendMsgInfo(pSendInfo); + taosMemoryFree(arg); return TSDB_CODE_SUCCESS; } diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 3086078080..73636e7372 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -870,11 +870,13 @@ static void fetchCallback(void *pResult, void *param, int32_t code) { if (code != TSDB_CODE_SUCCESS) { pRequest->code = code; + taosMemoryFreeClear(pResultInfo->pData); pRequest->body.fetchFp(pRequest->body.param, pRequest, 0); return; } if (pRequest->code != TSDB_CODE_SUCCESS) { + taosMemoryFreeClear(pResultInfo->pData); pRequest->body.fetchFp(pRequest->body.param, pRequest, 0); return; } diff --git a/source/common/src/tdataformat.c b/source/common/src/tdataformat.c index b40f449a05..15a369fe40 100644 --- a/source/common/src/tdataformat.c +++ b/source/common/src/tdataformat.c @@ -15,6 +15,7 @@ #define _DEFAULT_SOURCE #include "tdataformat.h" +#include "tRealloc.h" #include "tcoding.h" #include "tdatablock.h" #include "tlog.h" @@ -680,7 +681,7 @@ int32_t tGetTSRow(uint8_t *p, STSRow2 **ppRow) { return n; } -// STSchema +// STSchema ======================================== int32_t tTSchemaCreate(int32_t sver, SSchema *pSchema, int32_t ncols, STSchema **ppTSchema) { *ppTSchema = (STSchema *)taosMemoryMalloc(sizeof(STSchema) + sizeof(STColumn) * ncols); if (*ppTSchema == NULL) { @@ -720,9 +721,7 @@ void tTSchemaDestroy(STSchema *pTSchema) { if (pTSchema) taosMemoryFree(pTSchema); } -// STSRowBuilder - -// STag +// STag ======================================== static int tTagValCmprFn(const void *p1, const void *p2) { if (((STagVal *)p1)->cid < ((STagVal *)p2)->cid) { return -1; @@ -1172,4 +1171,495 @@ STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder) { return pSchema; } -#endif \ No newline at end of file +#endif + +// SColData ======================================== +void tColDataDestroy(void *ph) { + SColData *pColData = (SColData *)ph; + + tFree(pColData->pBitMap); + tFree((uint8_t *)pColData->aOffset); + tFree(pColData->pData); +} + +void tColDataInit(SColData *pColData, int16_t cid, int8_t type, int8_t smaOn) { + pColData->cid = cid; + pColData->type = type; + pColData->smaOn = smaOn; + tColDataClear(pColData); +} + +void tColDataClear(SColData *pColData) { + pColData->nVal = 0; + pColData->flag = 0; + pColData->nData = 0; +} + +static FORCE_INLINE int32_t tColDataPutValue(SColData *pColData, SColVal *pColVal) { + int32_t code = 0; + + if (IS_VAR_DATA_TYPE(pColData->type)) { + code = tRealloc((uint8_t **)(&pColData->aOffset), sizeof(int32_t) * (pColData->nVal + 1)); + if (code) goto _exit; + pColData->aOffset[pColData->nVal] = pColData->nData; + + if (pColVal->value.nData) { + code = tRealloc(&pColData->pData, pColData->nData + pColVal->value.nData); + if (code) goto _exit; + memcpy(pColData->pData + pColData->nData, pColVal->value.pData, pColVal->value.nData); + pColData->nData += pColVal->value.nData; + } + } else { + ASSERT(pColData->nData == tDataTypes[pColData->type].bytes * pColData->nVal); + code = tRealloc(&pColData->pData, pColData->nData + tDataTypes[pColData->type].bytes); + if (code) goto _exit; + pColData->nData += tPutValue(pColData->pData + pColData->nData, &pColVal->value, pColVal->type); + } + +_exit: + return code; +} +static FORCE_INLINE int32_t tColDataAppendValue0(SColData *pColData, SColVal *pColVal) { // 0 + int32_t code = 0; + + if (pColVal->isNone) { + pColData->flag = HAS_NONE; + } else if (pColVal->isNull) { + pColData->flag = HAS_NULL; + } else { + pColData->flag = HAS_VALUE; + code = tColDataPutValue(pColData, pColVal); + if (code) goto _exit; + } + pColData->nVal++; + +_exit: + return code; +} +static FORCE_INLINE int32_t tColDataAppendValue1(SColData *pColData, SColVal *pColVal) { // HAS_NONE + int32_t code = 0; + + if (!pColVal->isNone) { + int32_t nBit = BIT1_SIZE(pColData->nVal + 1); + + code = tRealloc(&pColData->pBitMap, nBit); + if (code) goto _exit; + + memset(pColData->pBitMap, 0, nBit); + SET_BIT1(pColData->pBitMap, pColData->nVal, 1); + + if (pColVal->isNull) { + pColData->flag |= HAS_NULL; + } else { + pColData->flag |= HAS_VALUE; + + if (pColData->nVal) { + if (IS_VAR_DATA_TYPE(pColData->type)) { + int32_t nOffset = sizeof(int32_t) * pColData->nVal; + code = tRealloc((uint8_t **)(&pColData->aOffset), nOffset); + if (code) goto _exit; + memset(pColData->aOffset, 0, nOffset); + } else { + pColData->nData = tDataTypes[pColData->type].bytes * pColData->nVal; + code = tRealloc(&pColData->pData, pColData->nData); + if (code) goto _exit; + memset(pColData->pData, 0, pColData->nData); + } + } + + code = tColDataPutValue(pColData, pColVal); + if (code) goto _exit; + } + } + pColData->nVal++; + +_exit: + return code; +} +static FORCE_INLINE int32_t tColDataAppendValue2(SColData *pColData, SColVal *pColVal) { // HAS_NULL + int32_t code = 0; + + if (!pColVal->isNull) { + int32_t nBit = BIT1_SIZE(pColData->nVal + 1); + code = tRealloc(&pColData->pBitMap, nBit); + if (code) goto _exit; + + if (pColVal->isNone) { + pColData->flag |= HAS_NONE; + + memset(pColData->pBitMap, 255, nBit); + SET_BIT1(pColData->pBitMap, pColData->nVal, 0); + } else { + pColData->flag |= HAS_VALUE; + + memset(pColData->pBitMap, 0, nBit); + SET_BIT1(pColData->pBitMap, pColData->nVal, 1); + + if (pColData->nVal) { + if (IS_VAR_DATA_TYPE(pColData->type)) { + int32_t nOffset = sizeof(int32_t) * pColData->nVal; + code = tRealloc((uint8_t **)(&pColData->aOffset), nOffset); + if (code) goto _exit; + memset(pColData->aOffset, 0, nOffset); + } else { + pColData->nData = tDataTypes[pColData->type].bytes * pColData->nVal; + code = tRealloc(&pColData->pData, pColData->nData); + if (code) goto _exit; + memset(pColData->pData, 0, pColData->nData); + } + } + + code = tColDataPutValue(pColData, pColVal); + if (code) goto _exit; + } + } + pColData->nVal++; + +_exit: + return code; +} +static FORCE_INLINE int32_t tColDataAppendValue3(SColData *pColData, SColVal *pColVal) { // HAS_NULL|HAS_NONE + int32_t code = 0; + + if (pColVal->isNone) { + code = tRealloc(&pColData->pBitMap, BIT1_SIZE(pColData->nVal + 1)); + if (code) goto _exit; + + SET_BIT1(pColData->pBitMap, pColData->nVal, 0); + } else if (pColVal->isNull) { + code = tRealloc(&pColData->pBitMap, BIT1_SIZE(pColData->nVal + 1)); + if (code) goto _exit; + + SET_BIT1(pColData->pBitMap, pColData->nVal, 1); + } else { + pColData->flag |= HAS_VALUE; + + uint8_t *pBitMap = NULL; + code = tRealloc(&pBitMap, BIT2_SIZE(pColData->nVal + 1)); + if (code) goto _exit; + + for (int32_t iVal = 0; iVal < pColData->nVal; iVal++) { + SET_BIT2(pBitMap, iVal, GET_BIT1(pColData->pBitMap, iVal)); + } + SET_BIT2(pBitMap, pColData->nVal, 2); + + tFree(pColData->pBitMap); + pColData->pBitMap = pBitMap; + + if (pColData->nVal) { + if (IS_VAR_DATA_TYPE(pColData->type)) { + int32_t nOffset = sizeof(int32_t) * pColData->nVal; + code = tRealloc((uint8_t **)(&pColData->aOffset), nOffset); + if (code) goto _exit; + memset(pColData->aOffset, 0, nOffset); + } else { + pColData->nData = tDataTypes[pColData->type].bytes * pColData->nVal; + code = tRealloc(&pColData->pData, pColData->nData); + if (code) goto _exit; + memset(pColData->pData, 0, pColData->nData); + } + } + + code = tColDataPutValue(pColData, pColVal); + if (code) goto _exit; + } + pColData->nVal++; + +_exit: + return code; +} +static FORCE_INLINE int32_t tColDataAppendValue4(SColData *pColData, SColVal *pColVal) { // HAS_VALUE + int32_t code = 0; + + if (pColVal->isNone || pColVal->isNull) { + if (pColVal->isNone) { + pColData->flag |= HAS_NONE; + } else { + pColData->flag |= HAS_NULL; + } + + int32_t nBit = BIT1_SIZE(pColData->nVal + 1); + code = tRealloc(&pColData->pBitMap, nBit); + if (code) goto _exit; + + memset(pColData->pBitMap, 255, nBit); + SET_BIT1(pColData->pBitMap, pColData->nVal, 0); + + code = tColDataPutValue(pColData, pColVal); + if (code) goto _exit; + } else { + code = tColDataPutValue(pColData, pColVal); + if (code) goto _exit; + } + pColData->nVal++; + +_exit: + return code; +} +static FORCE_INLINE int32_t tColDataAppendValue5(SColData *pColData, SColVal *pColVal) { // HAS_VALUE|HAS_NONE + int32_t code = 0; + + if (pColVal->isNull) { + pColData->flag |= HAS_NULL; + + uint8_t *pBitMap = NULL; + code = tRealloc(&pBitMap, BIT2_SIZE(pColData->nVal + 1)); + if (code) goto _exit; + + for (int32_t iVal = 0; iVal < pColData->nVal; iVal++) { + SET_BIT2(pBitMap, iVal, GET_BIT1(pColData->pBitMap, iVal) ? 2 : 0); + } + SET_BIT2(pBitMap, pColData->nVal, 1); + + tFree(pColData->pBitMap); + pColData->pBitMap = pBitMap; + } else { + code = tRealloc(&pColData->pBitMap, BIT1_SIZE(pColData->nVal + 1)); + if (code) goto _exit; + + if (pColVal->isNone) { + SET_BIT1(pColData->pBitMap, pColData->nVal, 0); + } else { + SET_BIT1(pColData->pBitMap, pColData->nVal, 1); + } + } + code = tColDataPutValue(pColData, pColVal); + if (code) goto _exit; + + pColData->nVal++; + +_exit: + return code; +} +static FORCE_INLINE int32_t tColDataAppendValue6(SColData *pColData, SColVal *pColVal) { // HAS_VALUE|HAS_NULL + int32_t code = 0; + + if (pColVal->isNone) { + pColData->flag |= HAS_NONE; + + uint8_t *pBitMap = NULL; + code = tRealloc(&pBitMap, BIT2_SIZE(pColData->nVal + 1)); + if (code) goto _exit; + + for (int32_t iVal = 0; iVal < pColData->nVal; iVal++) { + SET_BIT2(pBitMap, iVal, GET_BIT1(pColData->pBitMap, iVal) ? 2 : 1); + } + SET_BIT2(pBitMap, pColData->nVal, 0); + + tFree(pColData->pBitMap); + pColData->pBitMap = pBitMap; + } else { + code = tRealloc(&pColData->pBitMap, BIT1_SIZE(pColData->nVal + 1)); + if (code) goto _exit; + + if (pColVal->isNull) { + SET_BIT1(pColData->pBitMap, pColData->nVal, 0); + } else { + SET_BIT1(pColData->pBitMap, pColData->nVal, 1); + } + } + code = tColDataPutValue(pColData, pColVal); + if (code) goto _exit; + + pColData->nVal++; + +_exit: + return code; +} +static FORCE_INLINE int32_t tColDataAppendValue7(SColData *pColData, + SColVal *pColVal) { // HAS_VALUE|HAS_NULL|HAS_NONE + int32_t code = 0; + + code = tRealloc(&pColData->pBitMap, BIT2_SIZE(pColData->nVal + 1)); + if (code) goto _exit; + + if (pColVal->isNone) { + SET_BIT2(pColData->pBitMap, pColData->nVal, 0); + } else if (pColVal->isNull) { + SET_BIT2(pColData->pBitMap, pColData->nVal, 1); + } else { + SET_BIT2(pColData->pBitMap, pColData->nVal, 2); + } + code = tColDataPutValue(pColData, pColVal); + if (code) goto _exit; + + pColData->nVal++; + +_exit: + return code; +} +static int32_t (*tColDataAppendValueImpl[])(SColData *pColData, SColVal *pColVal) = { + tColDataAppendValue0, // 0 + tColDataAppendValue1, // HAS_NONE + tColDataAppendValue2, // HAS_NULL + tColDataAppendValue3, // HAS_NULL|HAS_NONE + tColDataAppendValue4, // HAS_VALUE + tColDataAppendValue5, // HAS_VALUE|HAS_NONE + tColDataAppendValue6, // HAS_VALUE|HAS_NULL + tColDataAppendValue7 // HAS_VALUE|HAS_NULL|HAS_NONE +}; +int32_t tColDataAppendValue(SColData *pColData, SColVal *pColVal) { + ASSERT(pColData->cid == pColVal->cid && pColData->type == pColVal->type); + return tColDataAppendValueImpl[pColData->flag](pColData, pColVal); +} + +static FORCE_INLINE void tColDataGetValue1(SColData *pColData, int32_t iVal, SColVal *pColVal) { // HAS_NONE + *pColVal = COL_VAL_NONE(pColData->cid, pColData->type); +} +static FORCE_INLINE void tColDataGetValue2(SColData *pColData, int32_t iVal, SColVal *pColVal) { // HAS_NULL + *pColVal = COL_VAL_NULL(pColData->cid, pColData->type); +} +static FORCE_INLINE void tColDataGetValue3(SColData *pColData, int32_t iVal, SColVal *pColVal) { // HAS_NULL|HAS_NONE + switch (GET_BIT1(pColData->pBitMap, iVal)) { + case 0: + *pColVal = COL_VAL_NONE(pColData->cid, pColData->type); + break; + case 1: + *pColVal = COL_VAL_NULL(pColData->cid, pColData->type); + break; + default: + ASSERT(0); + } +} +static FORCE_INLINE void tColDataGetValue4(SColData *pColData, int32_t iVal, SColVal *pColVal) { // HAS_VALUE + SValue value; + if (IS_VAR_DATA_TYPE(pColData->type)) { + if (iVal + 1 < pColData->nVal) { + value.nData = pColData->aOffset[iVal + 1] - pColData->aOffset[iVal]; + } else { + value.nData = pColData->nData - pColData->aOffset[iVal]; + } + value.pData = pColData->pData + pColData->aOffset[iVal]; + } else { + tGetValue(pColData->pData + tDataTypes[pColData->type].bytes * iVal, &value, pColData->type); + } + *pColVal = COL_VAL_VALUE(pColData->cid, pColData->type, value); +} +static FORCE_INLINE void tColDataGetValue5(SColData *pColData, int32_t iVal, + SColVal *pColVal) { // HAS_VALUE|HAS_NONE + switch (GET_BIT1(pColData->pBitMap, iVal)) { + case 0: + *pColVal = COL_VAL_NONE(pColData->cid, pColData->type); + break; + case 1: + tColDataGetValue4(pColData, iVal, pColVal); + break; + default: + ASSERT(0); + } +} +static FORCE_INLINE void tColDataGetValue6(SColData *pColData, int32_t iVal, + SColVal *pColVal) { // HAS_VALUE|HAS_NULL + switch (GET_BIT1(pColData->pBitMap, iVal)) { + case 0: + *pColVal = COL_VAL_NULL(pColData->cid, pColData->type); + break; + case 1: + tColDataGetValue4(pColData, iVal, pColVal); + break; + default: + ASSERT(0); + } +} +static FORCE_INLINE void tColDataGetValue7(SColData *pColData, int32_t iVal, + SColVal *pColVal) { // HAS_VALUE|HAS_NULL|HAS_NONE + switch (GET_BIT2(pColData->pBitMap, iVal)) { + case 0: + *pColVal = COL_VAL_NONE(pColData->cid, pColData->type); + break; + case 1: + *pColVal = COL_VAL_NULL(pColData->cid, pColData->type); + break; + case 2: + tColDataGetValue4(pColData, iVal, pColVal); + break; + default: + ASSERT(0); + } +} +static void (*tColDataGetValueImpl[])(SColData *pColData, int32_t iVal, SColVal *pColVal) = { + NULL, // 0 + tColDataGetValue1, // HAS_NONE + tColDataGetValue2, // HAS_NULL + tColDataGetValue3, // HAS_NULL | HAS_NONE + tColDataGetValue4, // HAS_VALUE + tColDataGetValue5, // HAS_VALUE | HAS_NONE + tColDataGetValue6, // HAS_VALUE | HAS_NULL + tColDataGetValue7 // HAS_VALUE | HAS_NULL | HAS_NONE +}; +void tColDataGetValue(SColData *pColData, int32_t iVal, SColVal *pColVal) { + ASSERT(iVal >= 0 && iVal < pColData->nVal && pColData->flag); + tColDataGetValueImpl[pColData->flag](pColData, iVal, pColVal); +} + +uint8_t tColDataGetBitValue(SColData *pColData, int32_t iVal) { + uint8_t v; + switch (pColData->flag) { + case HAS_NONE: + v = 0; + break; + case HAS_NULL: + v = 1; + break; + case (HAS_NULL | HAS_NONE): + v = GET_BIT1(pColData->pBitMap, iVal); + break; + case HAS_VALUE: + v = 2; + break; + case (HAS_VALUE | HAS_NONE): + v = GET_BIT1(pColData->pBitMap, iVal); + if (v) v = 2; + break; + case (HAS_VALUE | HAS_NULL): + v = GET_BIT1(pColData->pBitMap, iVal) + 1; + break; + case (HAS_VALUE | HAS_NULL | HAS_NONE): + v = GET_BIT2(pColData->pBitMap, iVal); + break; + default: + ASSERT(0); + break; + } + return v; +} + +int32_t tColDataCopy(SColData *pColDataSrc, SColData *pColDataDest) { + int32_t code = 0; + int32_t size; + + ASSERT(pColDataSrc->nVal > 0); + ASSERT(pColDataDest->cid = pColDataSrc->cid); + ASSERT(pColDataDest->type = pColDataSrc->type); + + pColDataDest->smaOn = pColDataSrc->smaOn; + pColDataDest->nVal = pColDataSrc->nVal; + pColDataDest->flag = pColDataSrc->flag; + + // bitmap + if (pColDataSrc->flag != HAS_NONE && pColDataSrc->flag != HAS_NULL && pColDataSrc->flag != HAS_VALUE) { + size = BIT2_SIZE(pColDataSrc->nVal); + code = tRealloc(&pColDataDest->pBitMap, size); + if (code) goto _exit; + memcpy(pColDataDest->pBitMap, pColDataSrc->pBitMap, size); + } + + // offset + if (IS_VAR_DATA_TYPE(pColDataDest->type)) { + size = sizeof(int32_t) * pColDataSrc->nVal; + + code = tRealloc((uint8_t **)&pColDataDest->aOffset, size); + if (code) goto _exit; + + memcpy(pColDataDest->aOffset, pColDataSrc->aOffset, size); + } + + // value + pColDataDest->nData = pColDataSrc->nData; + code = tRealloc(&pColDataDest->pData, pColDataSrc->nData); + if (code) goto _exit; + memcpy(pColDataDest->pData, pColDataSrc->pData, pColDataDest->nData); + +_exit: + return code; +} \ No newline at end of file diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index c436e4ffd2..ddda8f8c9a 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -63,7 +63,7 @@ int32_t tsNumOfVnodeWriteThreads = 2; int32_t tsNumOfVnodeSyncThreads = 2; int32_t tsNumOfVnodeRsmaThreads = 2; int32_t tsNumOfQnodeQueryThreads = 4; -int32_t tsNumOfQnodeFetchThreads = 4; +int32_t tsNumOfQnodeFetchThreads = 1; int32_t tsNumOfSnodeSharedThreads = 2; int32_t tsNumOfSnodeUniqueThreads = 2; @@ -385,9 +385,9 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsNumOfQnodeQueryThreads = TMAX(tsNumOfQnodeQueryThreads, 4); if (cfgAddInt32(pCfg, "numOfQnodeQueryThreads", tsNumOfQnodeQueryThreads, 1, 1024, 0) != 0) return -1; - tsNumOfQnodeFetchThreads = tsNumOfCores / 2; - tsNumOfQnodeFetchThreads = TMAX(tsNumOfQnodeFetchThreads, 4); - if (cfgAddInt32(pCfg, "numOfQnodeFetchThreads", tsNumOfQnodeFetchThreads, 1, 1024, 0) != 0) return -1; +// tsNumOfQnodeFetchThreads = tsNumOfCores / 2; +// tsNumOfQnodeFetchThreads = TMAX(tsNumOfQnodeFetchThreads, 4); +// if (cfgAddInt32(pCfg, "numOfQnodeFetchThreads", tsNumOfQnodeFetchThreads, 1, 1024, 0) != 0) return -1; tsNumOfSnodeSharedThreads = tsNumOfCores / 4; tsNumOfSnodeSharedThreads = TRANGE(tsNumOfSnodeSharedThreads, 2, 4); @@ -527,6 +527,7 @@ static int32_t taosUpdateServerCfg(SConfig *pCfg) { pItem->stype = stype; } +/* pItem = cfgGetItem(tsCfg, "numOfQnodeFetchThreads"); if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { tsNumOfQnodeFetchThreads = numOfCores / 2; @@ -534,6 +535,7 @@ static int32_t taosUpdateServerCfg(SConfig *pCfg) { pItem->i32 = tsNumOfQnodeFetchThreads; pItem->stype = stype; } +*/ pItem = cfgGetItem(tsCfg, "numOfSnodeSharedThreads"); if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { @@ -691,7 +693,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsNumOfVnodeSyncThreads = cfgGetItem(pCfg, "numOfVnodeSyncThreads")->i32; tsNumOfVnodeRsmaThreads = cfgGetItem(pCfg, "numOfVnodeRsmaThreads")->i32; tsNumOfQnodeQueryThreads = cfgGetItem(pCfg, "numOfQnodeQueryThreads")->i32; - tsNumOfQnodeFetchThreads = cfgGetItem(pCfg, "numOfQnodeFetchThreads")->i32; +// tsNumOfQnodeFetchThreads = cfgGetItem(pCfg, "numOfQnodeFetchThreads")->i32; tsNumOfSnodeSharedThreads = cfgGetItem(pCfg, "numOfSnodeSharedThreads")->i32; tsNumOfSnodeUniqueThreads = cfgGetItem(pCfg, "numOfSnodeUniqueThreads")->i32; tsRpcQueueMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64; @@ -939,8 +941,10 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) { tsNumOfVnodeRsmaThreads = cfgGetItem(pCfg, "numOfVnodeRsmaThreads")->i32; } else if (strcasecmp("numOfQnodeQueryThreads", name) == 0) { tsNumOfQnodeQueryThreads = cfgGetItem(pCfg, "numOfQnodeQueryThreads")->i32; +/* } else if (strcasecmp("numOfQnodeFetchThreads", name) == 0) { tsNumOfQnodeFetchThreads = cfgGetItem(pCfg, "numOfQnodeFetchThreads")->i32; +*/ } else if (strcasecmp("numOfSnodeSharedThreads", name) == 0) { tsNumOfSnodeSharedThreads = cfgGetItem(pCfg, "numOfSnodeSharedThreads")->i32; } else if (strcasecmp("numOfSnodeUniqueThreads", name) == 0) { diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 1a41f80907..fffaf214ed 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -3347,7 +3347,13 @@ int32_t tDeserializeSSTbHbRsp(void *buf, int32_t bufLen, SSTbHbRsp *pRsp) { return 0; } -void tFreeSTableMetaRsp(void *pRsp) { taosMemoryFreeClear(((STableMetaRsp *)pRsp)->pSchemas); } +void tFreeSTableMetaRsp(void *pRsp) { + if (NULL == pRsp) { + return; + } + + taosMemoryFreeClear(((STableMetaRsp *)pRsp)->pSchemas); +} void tFreeSTableIndexRsp(void *info) { if (NULL == info) { @@ -5439,6 +5445,8 @@ void tFreeSSubmitRsp(SSubmitRsp *pRsp) { for (int32_t i = 0; i < pRsp->nBlocks; ++i) { SSubmitBlkRsp *sRsp = pRsp->pBlocks + i; taosMemoryFree(sRsp->tblFName); + tFreeSTableMetaRsp(sRsp->pMeta); + taosMemoryFree(sRsp->pMeta); } taosMemoryFree(pRsp->pBlocks); diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index 1452c5ae2f..8a968712e0 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -900,6 +900,7 @@ int32_t mndDropSubByTopic(SMnode *pMnode, STrans *pTrans, const char *topicName) // iter all vnode to delete handle if (taosHashGetSize(pSub->consumerHash) != 0) { sdbRelease(pSdb, pSub); + terrno = TSDB_CODE_MND_IN_REBALANCE; return -1; } int32_t sz = taosArrayGetSize(pSub->unassignedVgs); diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index eb072d013d..7b36966d6c 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -713,7 +713,6 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) { mndReleaseTopic(pMnode, pTopic); if (code != 0) { - terrno = code; mError("topic:%s, failed to drop since %s", dropReq.name, terrstr()); return -1; } diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h index 4264a3232b..817089f237 100644 --- a/source/dnode/vnode/src/inc/tsdb.h +++ b/source/dnode/vnode/src/inc/tsdb.h @@ -44,7 +44,6 @@ typedef struct SMapData SMapData; typedef struct SBlockIdx SBlockIdx; typedef struct SDataBlk SDataBlk; typedef struct SSttBlk SSttBlk; -typedef struct SColData SColData; typedef struct SDiskDataHdr SDiskDataHdr; typedef struct SBlockData SBlockData; typedef struct SDelFile SDelFile; @@ -71,10 +70,6 @@ typedef struct SLDataIter SLDataIter; #define TSDB_MAX_SUBBLOCKS 8 #define TSDB_FHDR_SIZE 512 -#define HAS_NONE ((int8_t)0x1) -#define HAS_NULL ((int8_t)0x2) -#define HAS_VALUE ((int8_t)0x4) - #define VERSION_MIN 0 #define VERSION_MAX INT64_MAX @@ -148,15 +143,6 @@ int32_t tPutBlockIdx(uint8_t *p, void *ph); int32_t tGetBlockIdx(uint8_t *p, void *ph); int32_t tCmprBlockIdx(void const *lhs, void const *rhs); int32_t tCmprBlockL(void const *lhs, void const *rhs); -// SColdata -void tColDataInit(SColData *pColData, int16_t cid, int8_t type, int8_t smaOn); -void tColDataReset(SColData *pColData); -void tColDataClear(void *ph); -int32_t tColDataAppendValue(SColData *pColData, SColVal *pColVal); -int32_t tColDataGetValue(SColData *pColData, int32_t iRow, SColVal *pColVal); -int32_t tColDataCopy(SColData *pColDataSrc, SColData *pColDataDest); -int32_t tPutColData(uint8_t *p, SColData *pColData); -int32_t tGetColData(uint8_t *p, SColData *pColData); // SBlockData #define tBlockDataFirstRow(PBLOCKDATA) tsdbRowFromBlockData(PBLOCKDATA, 0) #define tBlockDataLastRow(PBLOCKDATA) tsdbRowFromBlockData(PBLOCKDATA, (PBLOCKDATA)->nRow - 1) @@ -470,18 +456,6 @@ struct SSttBlk { SBlockInfo bInfo; }; -struct SColData { - int16_t cid; - int8_t type; - int8_t smaOn; - int32_t nVal; - uint8_t flag; - uint8_t *pBitMap; - int32_t *aOffset; - int32_t nData; - uint8_t *pData; -}; - // (SBlockData){.suid = 0, .uid = 0}: block data not initialized // (SBlockData){.suid = suid, .uid = uid}: block data for ONE child table int .data file // (SBlockData){.suid = suid, .uid = 0}: block data for N child tables int .last file diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index f9c2757c37..c8841e5e16 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -832,7 +832,7 @@ int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver) { tDecoderClear(pCoder); int32_t sz = taosArrayGetSize(pRes->uidList); - if (sz == 0) { + if (sz == 0 || pRes->affectedRows == 0) { taosArrayDestroy(pRes->uidList); return 0; } diff --git a/source/dnode/vnode/src/tsdb/tsdbFS.c b/source/dnode/vnode/src/tsdb/tsdbFS.c index 31bdb784dd..10926ae6ad 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFS.c +++ b/source/dnode/vnode/src/tsdb/tsdbFS.c @@ -58,7 +58,7 @@ static int32_t tsdbGnrtCurrent(STsdb *pTsdb, STsdbFS *pFS, char *fname) { taosCalcChecksumAppend(0, pData, size); // create and write - pFD = taosOpenFile(fname, TD_FILE_WRITE | TD_FILE_CREATE); + pFD = taosOpenFile(fname, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC); if (pFD == NULL) { code = TAOS_SYSTEM_ERROR(errno); goto _err; diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 34737bb7fd..252fbabd62 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -903,7 +903,7 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn // null value exists, check one-by-one if (pData->flag != HAS_VALUE) { for (int32_t j = pDumpInfo->rowIndex; rowIndex < remain; j += step, rowIndex++) { - uint8_t v = GET_BIT2(pData->pBitMap, j); + uint8_t v = tColDataGetBitValue(pData, j); if (v == 0 || v == 1) { colDataSetNull_f(pColData->nullbitmap, rowIndex); } @@ -3924,6 +3924,5 @@ void tsdbUntakeReadSnap(STsdb* pTsdb, STsdbReadSnap* pSnap, const char* idStr) { tsdbFSUnref(pTsdb, &pSnap->fs); taosMemoryFree(pSnap); } - tsdbTrace("vgId:%d, untake read snapshot, %s", TD_VID(pTsdb->pVnode), idStr); -} \ No newline at end of file +} diff --git a/source/dnode/vnode/src/tsdb/tsdbRetention.c b/source/dnode/vnode/src/tsdb/tsdbRetention.c index d99bf2aa5c..eb4151079b 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRetention.c +++ b/source/dnode/vnode/src/tsdb/tsdbRetention.c @@ -16,9 +16,19 @@ #include "tsdb.h" static bool tsdbShouldDoRetention(STsdb *pTsdb, int64_t now) { + STsdbKeepCfg *keepCfg = &pTsdb->keepCfg; + + if ((keepCfg->keep0 == keepCfg->keep1) && (keepCfg->keep1 == keepCfg->keep2)) { + return false; + } + + if (tfsGetLevel(pTsdb->pVnode->pTfs) <= 1) { + return false; + } + for (int32_t iSet = 0; iSet < taosArrayGetSize(pTsdb->fs.aDFileSet); iSet++) { SDFileSet *pSet = (SDFileSet *)taosArrayGet(pTsdb->fs.aDFileSet, iSet); - int32_t expLevel = tsdbFidLevel(pSet->fid, &pTsdb->keepCfg, now); + int32_t expLevel = tsdbFidLevel(pSet->fid, keepCfg, now); SDiskID did; if (expLevel == pSet->diskId.level) continue; @@ -53,7 +63,7 @@ int32_t tsdbDoRetention(STsdb *pTsdb, int64_t now) { if (code) goto _err; for (int32_t iSet = 0; iSet < taosArrayGetSize(fs.aDFileSet); iSet++) { - SDFileSet *pSet = (SDFileSet *)taosArrayGet(pTsdb->fs.aDFileSet, iSet); + SDFileSet *pSet = (SDFileSet *)taosArrayGet(fs.aDFileSet, iSet); int32_t expLevel = tsdbFidLevel(pSet->fid, &pTsdb->keepCfg, now); SDiskID did; @@ -65,6 +75,7 @@ int32_t tsdbDoRetention(STsdb *pTsdb, int64_t now) { taosArrayRemove(fs.aDFileSet, iSet); iSet--; } else { + if (expLevel == 0) continue; if (tfsAllocDisk(pTsdb->pVnode->pTfs, expLevel, &did) < 0) { code = terrno; goto _exit; diff --git a/source/dnode/vnode/src/tsdb/tsdbUtil.c b/source/dnode/vnode/src/tsdb/tsdbUtil.c index caeca45e01..64c150484b 100644 --- a/source/dnode/vnode/src/tsdb/tsdbUtil.c +++ b/source/dnode/vnode/src/tsdb/tsdbUtil.c @@ -909,248 +909,6 @@ int32_t tsdbBuildDeleteSkyline(SArray *aDelData, int32_t sidx, int32_t eidx, SAr return code; } -// SColData ======================================== -void tColDataInit(SColData *pColData, int16_t cid, int8_t type, int8_t smaOn) { - pColData->cid = cid; - pColData->type = type; - pColData->smaOn = smaOn; - tColDataReset(pColData); -} - -void tColDataReset(SColData *pColData) { - pColData->nVal = 0; - pColData->flag = 0; - pColData->nData = 0; -} - -void tColDataClear(void *ph) { - SColData *pColData = (SColData *)ph; - - tFree(pColData->pBitMap); - tFree((uint8_t *)pColData->aOffset); - tFree(pColData->pData); -} - -int32_t tColDataAppendValue(SColData *pColData, SColVal *pColVal) { - int32_t code = 0; - int64_t size; - SValue value = {0}; - SValue *pValue = &value; - - ASSERT(pColVal->cid == pColData->cid); - ASSERT(pColVal->type == pColData->type); - - // realloc bitmap - size = BIT2_SIZE(pColData->nVal + 1); - code = tRealloc(&pColData->pBitMap, size); - if (code) goto _exit; - if ((pColData->nVal & 3) == 0) { - pColData->pBitMap[pColData->nVal >> 2] = 0; - } - - // put value - if (pColVal->isNone) { - pColData->flag |= HAS_NONE; - SET_BIT2(pColData->pBitMap, pColData->nVal, 0); - } else if (pColVal->isNull) { - pColData->flag |= HAS_NULL; - SET_BIT2(pColData->pBitMap, pColData->nVal, 1); - } else { - pColData->flag |= HAS_VALUE; - SET_BIT2(pColData->pBitMap, pColData->nVal, 2); - pValue = &pColVal->value; - } - - if (IS_VAR_DATA_TYPE(pColData->type)) { - // offset - code = tRealloc((uint8_t **)&pColData->aOffset, sizeof(int32_t) * (pColData->nVal + 1)); - if (code) goto _exit; - pColData->aOffset[pColData->nVal] = pColData->nData; - - // value - if ((!pColVal->isNone) && (!pColVal->isNull)) { - code = tRealloc(&pColData->pData, pColData->nData + pColVal->value.nData); - if (code) goto _exit; - memcpy(pColData->pData + pColData->nData, pColVal->value.pData, pColVal->value.nData); - pColData->nData += pColVal->value.nData; - } - } else { - code = tRealloc(&pColData->pData, pColData->nData + tPutValue(NULL, pValue, pColVal->type)); - if (code) goto _exit; - pColData->nData += tPutValue(pColData->pData + pColData->nData, pValue, pColVal->type); - } - - pColData->nVal++; - -_exit: - return code; -} - -int32_t tColDataCopy(SColData *pColDataSrc, SColData *pColDataDest) { - int32_t code = 0; - int32_t size; - - ASSERT(pColDataSrc->nVal > 0); - ASSERT(pColDataDest->cid = pColDataSrc->cid); - ASSERT(pColDataDest->type = pColDataSrc->type); - - pColDataDest->smaOn = pColDataSrc->smaOn; - pColDataDest->nVal = pColDataSrc->nVal; - pColDataDest->flag = pColDataSrc->flag; - - // bitmap - if (pColDataSrc->flag != HAS_NONE && pColDataSrc->flag != HAS_NULL && pColDataSrc->flag != HAS_VALUE) { - size = BIT2_SIZE(pColDataSrc->nVal); - code = tRealloc(&pColDataDest->pBitMap, size); - if (code) goto _exit; - memcpy(pColDataDest->pBitMap, pColDataSrc->pBitMap, size); - } - - // offset - if (IS_VAR_DATA_TYPE(pColDataDest->type)) { - size = sizeof(int32_t) * pColDataSrc->nVal; - - code = tRealloc((uint8_t **)&pColDataDest->aOffset, size); - if (code) goto _exit; - - memcpy(pColDataDest->aOffset, pColDataSrc->aOffset, size); - } - - // value - pColDataDest->nData = pColDataSrc->nData; - code = tRealloc(&pColDataDest->pData, pColDataSrc->nData); - if (code) goto _exit; - memcpy(pColDataDest->pData, pColDataSrc->pData, pColDataDest->nData); - -_exit: - return code; -} - -int32_t tColDataGetValue(SColData *pColData, int32_t iVal, SColVal *pColVal) { - int32_t code = 0; - - ASSERT(iVal < pColData->nVal); - ASSERT(pColData->flag); - - if (pColData->flag == HAS_NONE) { - *pColVal = COL_VAL_NONE(pColData->cid, pColData->type); - goto _exit; - } else if (pColData->flag == HAS_NULL) { - *pColVal = COL_VAL_NULL(pColData->cid, pColData->type); - goto _exit; - } else if (pColData->flag != HAS_VALUE) { - uint8_t v = GET_BIT2(pColData->pBitMap, iVal); - if (v == 0) { - *pColVal = COL_VAL_NONE(pColData->cid, pColData->type); - goto _exit; - } else if (v == 1) { - *pColVal = COL_VAL_NULL(pColData->cid, pColData->type); - goto _exit; - } - } - - // get value - SValue value; - if (IS_VAR_DATA_TYPE(pColData->type)) { - if (iVal + 1 < pColData->nVal) { - value.nData = pColData->aOffset[iVal + 1] - pColData->aOffset[iVal]; - } else { - value.nData = pColData->nData - pColData->aOffset[iVal]; - } - - value.pData = pColData->pData + pColData->aOffset[iVal]; - } else { - tGetValue(pColData->pData + tDataTypes[pColData->type].bytes * iVal, &value, pColData->type); - } - *pColVal = COL_VAL_VALUE(pColData->cid, pColData->type, value); - -_exit: - return code; -} - -int32_t tPutColData(uint8_t *p, SColData *pColData) { - int32_t n = 0; - - n += tPutI16v(p ? p + n : p, pColData->cid); - n += tPutI8(p ? p + n : p, pColData->type); - n += tPutI8(p ? p + n : p, pColData->smaOn); - n += tPutI32v(p ? p + n : p, pColData->nVal); - n += tPutU8(p ? p + n : p, pColData->flag); - - if (pColData->flag == HAS_NONE || pColData->flag == HAS_NULL) goto _exit; - if (pColData->flag != HAS_VALUE) { - // bitmap - - int32_t size = BIT2_SIZE(pColData->nVal); - if (p) { - memcpy(p + n, pColData->pBitMap, size); - } - n += size; - } - if (IS_VAR_DATA_TYPE(pColData->type)) { - // offset - - int32_t size = sizeof(int32_t) * pColData->nVal; - if (p) { - memcpy(p + n, pColData->aOffset, size); - } - n += size; - } - n += tPutI32v(p ? p + n : p, pColData->nData); - if (p) { - memcpy(p + n, pColData->pData, pColData->nData); - } - n += pColData->nData; - -_exit: - return n; -} - -int32_t tGetColData(uint8_t *p, SColData *pColData) { - int32_t n = 0; - - n += tGetI16v(p + n, &pColData->cid); - n += tGetI8(p + n, &pColData->type); - n += tGetI8(p + n, &pColData->smaOn); - n += tGetI32v(p + n, &pColData->nVal); - n += tGetU8(p + n, &pColData->flag); - - if (pColData->flag == HAS_NONE || pColData->flag == HAS_NULL) goto _exit; - if (pColData->flag != HAS_VALUE) { - // bitmap - - int32_t size = BIT2_SIZE(pColData->nVal); - pColData->pBitMap = p + n; - n += size; - } - if (IS_VAR_DATA_TYPE(pColData->type)) { - // offset - - int32_t size = sizeof(int32_t) * pColData->nVal; - pColData->aOffset = (int32_t *)(p + n); - n += size; - } - n += tGetI32v(p + n, &pColData->nData); - pColData->pData = p + n; - n += pColData->nData; - -_exit: - return n; -} - -static FORCE_INLINE int32_t tColDataCmprFn(const void *p1, const void *p2) { - SColData *pColData1 = (SColData *)p1; - SColData *pColData2 = (SColData *)p2; - - if (pColData1->cid < pColData2->cid) { - return -1; - } else if (pColData1->cid > pColData2->cid) { - return 1; - } - - return 0; -} - // SBlockData ====================================================== int32_t tBlockDataCreate(SBlockData *pBlockData) { int32_t code = 0; @@ -1182,7 +940,7 @@ void tBlockDataDestroy(SBlockData *pBlockData, int8_t deepClear) { tFree((uint8_t *)pBlockData->aVersion); tFree((uint8_t *)pBlockData->aTSKEY); taosArrayDestroy(pBlockData->aIdx); - taosArrayDestroyEx(pBlockData->aColData, deepClear ? tColDataClear : NULL); + taosArrayDestroyEx(pBlockData->aColData, deepClear ? tColDataDestroy : NULL); pBlockData->aUid = NULL; pBlockData->aVersion = NULL; pBlockData->aTSKEY = NULL; @@ -1251,7 +1009,7 @@ void tBlockDataClear(SBlockData *pBlockData) { pBlockData->nRow = 0; for (int32_t iColData = 0; iColData < taosArrayGetSize(pBlockData->aIdx); iColData++) { SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iColData); - tColDataReset(pColData); + tColDataClear(pColData); } } @@ -1501,7 +1259,7 @@ void tBlockDataGetColData(SBlockData *pBlockData, int16_t cid, SColData **ppColD while (lidx <= ridx) { int32_t midx = (lidx + ridx) / 2; SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, midx); - int32_t c = tColDataCmprFn(pColData, &(SColData){.cid = cid}); + int32_t c = (pColData->cid == cid) ? 0 : ((pColData->cid > cid) ? 1 : -1); if (c == 0) { *ppColData = pColData; @@ -1986,47 +1744,16 @@ int32_t tsdbCmprColData(SColData *pColData, int8_t cmprAlg, SBlockCol *pBlockCol int32_t size = 0; // bitmap if (pColData->flag != HAS_VALUE) { - uint8_t *pBitMap = pColData->pBitMap; - int32_t szBitMap = BIT2_SIZE(pColData->nVal); - - // BIT2 to BIT1 - if (pColData->flag != (HAS_VALUE | HAS_NULL | HAS_NONE)) { + int32_t szBitMap; + if (pColData->flag == (HAS_VALUE | HAS_NULL | HAS_NONE)) { + szBitMap = BIT2_SIZE(pColData->nVal); + } else { szBitMap = BIT1_SIZE(pColData->nVal); - pBitMap = taosMemoryCalloc(1, szBitMap); - if (pBitMap == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _exit; - } - - for (int32_t iVal = 0; iVal < pColData->nVal; iVal++) { - uint8_t v = GET_BIT2(pColData->pBitMap, iVal); - switch (pColData->flag) { - case (HAS_NULL | HAS_NONE): - SET_BIT1(pBitMap, iVal, v); - break; - case (HAS_VALUE | HAS_NONE): - if (v) { - SET_BIT1(pBitMap, iVal, 1); - } else { - SET_BIT1(pBitMap, iVal, 0); - } - break; - case (HAS_VALUE | HAS_NULL): - SET_BIT1(pBitMap, iVal, v - 1); - break; - default: - ASSERT(0); - } - } } - code = tsdbCmprData(pBitMap, szBitMap, TSDB_DATA_TYPE_TINYINT, cmprAlg, ppOut, nOut + size, &pBlockCol->szBitmap, - ppBuf); + code = tsdbCmprData(pColData->pBitMap, szBitMap, TSDB_DATA_TYPE_TINYINT, cmprAlg, ppOut, nOut + size, + &pBlockCol->szBitmap, ppBuf); if (code) goto _exit; - - if (pColData->flag != (HAS_VALUE | HAS_NULL | HAS_NONE)) { - taosMemoryFree(pBitMap); - } } size += pBlockCol->szBitmap; @@ -2064,46 +1791,15 @@ int32_t tsdbDecmprColData(uint8_t *pIn, SBlockCol *pBlockCol, int8_t cmprAlg, in uint8_t *p = pIn; // bitmap if (pBlockCol->szBitmap) { - if (pBlockCol->flag != (HAS_VALUE | HAS_NULL | HAS_NONE)) { - uint8_t *pBitMap = NULL; - code = tsdbDecmprData(p, pBlockCol->szBitmap, TSDB_DATA_TYPE_TINYINT, cmprAlg, &pBitMap, - BIT1_SIZE(pColData->nVal), ppBuf); - if (code) goto _exit; - - code = tRealloc(&pColData->pBitMap, BIT2_SIZE(pColData->nVal)); - if (code) { - tFree(pBitMap); - goto _exit; - } - - // BIT1 to BIT2 - for (int32_t iVal = 0; iVal < nVal; iVal++) { - uint8_t v = GET_BIT1(pBitMap, iVal); - switch (pBlockCol->flag) { - case (HAS_NULL | HAS_NONE): - SET_BIT2(pColData->pBitMap, iVal, v); - break; - case (HAS_VALUE | HAS_NONE): - if (v) { - SET_BIT2(pColData->pBitMap, iVal, 2); - } else { - SET_BIT2(pColData->pBitMap, iVal, 0); - } - break; - case (HAS_VALUE | HAS_NULL): - SET_BIT2(pColData->pBitMap, iVal, v + 1); - break; - default: - ASSERT(0); - } - } - - tFree(pBitMap); + int32_t szBitMap; + if (pColData->flag == (HAS_VALUE | HAS_NULL | HAS_NONE)) { + szBitMap = BIT2_SIZE(pColData->nVal); } else { - code = tsdbDecmprData(p, pBlockCol->szBitmap, TSDB_DATA_TYPE_TINYINT, cmprAlg, &pColData->pBitMap, - BIT2_SIZE(pColData->nVal), ppBuf); - if (code) goto _exit; + szBitMap = BIT1_SIZE(pColData->nVal); } + + code = tsdbDecmprData(p, pBlockCol->szBitmap, TSDB_DATA_TYPE_TINYINT, cmprAlg, &pColData->pBitMap, szBitMap, ppBuf); + if (code) goto _exit; } p += pBlockCol->szBitmap; diff --git a/source/libs/catalog/src/ctgUtil.c b/source/libs/catalog/src/ctgUtil.c index 296100ce6d..97b174de1c 100644 --- a/source/libs/catalog/src/ctgUtil.c +++ b/source/libs/catalog/src/ctgUtil.c @@ -99,7 +99,16 @@ char *ctgTaskTypeStr(CTG_TASK_TYPE type) { } void ctgFreeQNode(SCtgQNode *node) { - //TODO + if (NULL == node) { + return; + } + + if (node->op) { + taosMemoryFree(node->op->data); + taosMemoryFree(node->op); + } + + taosMemoryFree(node); } void ctgFreeSTableIndex(void *info) { diff --git a/source/libs/executor/src/dataDeleter.c b/source/libs/executor/src/dataDeleter.c index 40198615ea..55a1a1fdb9 100644 --- a/source/libs/executor/src/dataDeleter.c +++ b/source/libs/executor/src/dataDeleter.c @@ -79,25 +79,33 @@ static void toDataCacheEntry(SDataDeleterHandle* pHandle, const SInputData* pInp pEntry->dataLen = sizeof(SDeleterRes); ASSERT(1 == pEntry->numOfRows); - ASSERT(1 == pEntry->numOfCols); + ASSERT(3 == pEntry->numOfCols); pBuf->useSize = sizeof(SDataCacheEntry); SColumnInfoData* pColRes = (SColumnInfoData*)taosArrayGet(pInput->pData->pDataBlock, 0); + SColumnInfoData* pColSKey = (SColumnInfoData*)taosArrayGet(pInput->pData->pDataBlock, 1); + SColumnInfoData* pColEKey = (SColumnInfoData*)taosArrayGet(pInput->pData->pDataBlock, 2); SDeleterRes* pRes = (SDeleterRes*)pEntry->data; pRes->suid = pHandle->pParam->suid; pRes->uidList = pHandle->pParam->pUidList; - pRes->skey = pHandle->pDeleter->deleteTimeRange.skey; - pRes->ekey = pHandle->pDeleter->deleteTimeRange.ekey; strcpy(pRes->tableName, pHandle->pDeleter->tableFName); strcpy(pRes->tsColName, pHandle->pDeleter->tsColName); pRes->affectedRows = *(int64_t*)pColRes->pData; + if (pRes->affectedRows) { + pRes->skey = *(int64_t*)pColSKey->pData; + pRes->ekey = *(int64_t*)pColEKey->pData; + ASSERT(pRes->skey <= pRes->ekey); + } else { + pRes->skey = pHandle->pDeleter->deleteTimeRange.skey; + pRes->ekey = pHandle->pDeleter->deleteTimeRange.ekey; + } pBuf->useSize += pEntry->dataLen; - - atomic_add_fetch_64(&pHandle->cachedSize, pEntry->dataLen); - atomic_add_fetch_64(&gDataSinkStat.cachedSize, pEntry->dataLen); + + atomic_add_fetch_64(&pHandle->cachedSize, pEntry->dataLen); + atomic_add_fetch_64(&gDataSinkStat.cachedSize, pEntry->dataLen); } static bool allocBuf(SDataDeleterHandle* pDeleter, const SInputData* pInput, SDataDeleterBuf* pBuf) { @@ -172,7 +180,8 @@ static void getDataLength(SDataSinkHandle* pHandle, int64_t* pLen, bool* pQueryE SDataCacheEntry* pEntry = (SDataCacheEntry*)pDeleter->nextOutput.pData; *pLen = pEntry->dataLen; *pQueryEnd = pDeleter->queryEnd; - qDebug("got data len %" PRId64 ", row num %d in sink", *pLen, ((SDataCacheEntry*)(pDeleter->nextOutput.pData))->numOfRows); + qDebug("got data len %" PRId64 ", row num %d in sink", *pLen, + ((SDataCacheEntry*)(pDeleter->nextOutput.pData))->numOfRows); } static int32_t getDataBlock(SDataSinkHandle* pHandle, SOutputData* pOutput) { @@ -186,14 +195,14 @@ static int32_t getDataBlock(SDataSinkHandle* pHandle, SOutputData* pOutput) { return TSDB_CODE_SUCCESS; } SDataCacheEntry* pEntry = (SDataCacheEntry*)(pDeleter->nextOutput.pData); - memcpy(pOutput->pData, pEntry->data, pEntry->dataLen); + memcpy(pOutput->pData, pEntry->data, pEntry->dataLen); pDeleter->pParam->pUidList = NULL; pOutput->numOfRows = pEntry->numOfRows; pOutput->numOfCols = pEntry->numOfCols; pOutput->compressed = pEntry->compressed; - atomic_sub_fetch_64(&pDeleter->cachedSize, pEntry->dataLen); - atomic_sub_fetch_64(&gDataSinkStat.cachedSize, pEntry->dataLen); + atomic_sub_fetch_64(&pDeleter->cachedSize, pEntry->dataLen); + atomic_sub_fetch_64(&gDataSinkStat.cachedSize, pEntry->dataLen); taosMemoryFreeClear(pDeleter->nextOutput.pData); // todo persistent pOutput->bufStatus = updateStatus(pDeleter); @@ -202,7 +211,7 @@ static int32_t getDataBlock(SDataSinkHandle* pHandle, SOutputData* pOutput) { pOutput->useconds = pDeleter->useconds; pOutput->precision = pDeleter->pSchema->precision; taosThreadMutexUnlock(&pDeleter->mutex); - + return TSDB_CODE_SUCCESS; } @@ -211,7 +220,7 @@ static int32_t destroyDataSinker(SDataSinkHandle* pHandle) { atomic_sub_fetch_64(&gDataSinkStat.cachedSize, pDeleter->cachedSize); taosMemoryFreeClear(pDeleter->nextOutput.pData); taosArrayDestroy(pDeleter->pParam->pUidList); - taosMemoryFree(pDeleter->pParam); + taosMemoryFree(pDeleter->pParam); while (!taosQueueEmpty(pDeleter->pDataBlocks)) { SDataDeleterBuf* pBuf = NULL; taosReadQitem(pDeleter->pDataBlocks, (void**)&pBuf); @@ -230,14 +239,15 @@ static int32_t getCacheSize(struct SDataSinkHandle* pHandle, uint64_t* size) { return TSDB_CODE_SUCCESS; } -int32_t createDataDeleter(SDataSinkManager* pManager, const SDataSinkNode* pDataSink, DataSinkHandle* pHandle, void *pParam) { +int32_t createDataDeleter(SDataSinkManager* pManager, const SDataSinkNode* pDataSink, DataSinkHandle* pHandle, + void* pParam) { SDataDeleterHandle* deleter = taosMemoryCalloc(1, sizeof(SDataDeleterHandle)); if (NULL == deleter) { terrno = TSDB_CODE_QRY_OUT_OF_MEMORY; return TSDB_CODE_QRY_OUT_OF_MEMORY; } - SDataDeleterNode* pDeleterNode = (SDataDeleterNode *)pDataSink; + SDataDeleterNode* pDeleterNode = (SDataDeleterNode*)pDataSink; deleter->sink.fPut = putDataBlock; deleter->sink.fEndPut = endPut; deleter->sink.fGetLen = getDataLength; diff --git a/source/libs/function/CMakeLists.txt b/source/libs/function/CMakeLists.txt index ea401e56e5..dd048a047a 100644 --- a/source/libs/function/CMakeLists.txt +++ b/source/libs/function/CMakeLists.txt @@ -14,7 +14,7 @@ target_include_directories( target_link_libraries( function - PRIVATE os util common nodes scalar qcom transport + PRIVATE os util common nodes scalar qcom transport stream PUBLIC uv_a ) diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 5844784ea4..f3d3393ac3 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -311,22 +311,6 @@ static int32_t translateInOutStr(SFunctionNode* pFunc, char* pErrBuf, int32_t le return TSDB_CODE_SUCCESS; } -static int32_t translateMinMax(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; - if (!IS_TIMESTAMP_TYPE(paraType) && !IS_NUMERIC_TYPE(paraType) && !IS_NULL_TYPE(paraType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } else if (IS_NULL_TYPE(paraType)) { - paraType = TSDB_DATA_TYPE_BIGINT; - } - - pFunc->node.resType = (SDataType){.bytes = tDataTypes[paraType].bytes, .type = paraType}; - return TSDB_CODE_SUCCESS; -} - static int32_t translateTrimStr(SFunctionNode* pFunc, char* pErrBuf, int32_t len, bool isLtrim) { if (1 != LIST_LENGTH(pFunc->pParameterList)) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); @@ -2076,7 +2060,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "min", .type = FUNCTION_TYPE_MIN, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED | FUNC_MGT_SELECT_FUNC, - .translateFunc = translateMinMax, + .translateFunc = translateInOutNum, .dataRequiredFunc = statisDataRequired, .getEnvFunc = getMinmaxFuncEnv, .initFunc = minmaxFunctionSetup, @@ -2091,7 +2075,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "max", .type = FUNCTION_TYPE_MAX, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED | FUNC_MGT_SELECT_FUNC, - .translateFunc = translateMinMax, + .translateFunc = translateInOutNum, .dataRequiredFunc = statisDataRequired, .getEnvFunc = getMinmaxFuncEnv, .initFunc = minmaxFunctionSetup, diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 0d7fd1a6da..9b502eded7 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -18,6 +18,7 @@ #include "function.h" #include "query.h" #include "querynodes.h" +#include "streamState.h" #include "tcompare.h" #include "tdatablock.h" #include "tdigest.h" @@ -56,8 +57,13 @@ typedef struct SAvgRes { } SAvgRes; typedef struct STuplePos { - int32_t pageId; - int32_t offset; + union { + struct { + int32_t pageId; + int32_t offset; + }; + STupleKey streamTupleKey; + }; } STuplePos; typedef struct SMinmaxResInfo { @@ -1146,7 +1152,8 @@ bool getMinmaxFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) { return true; } -static STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock); +static STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, + const STupleKey* pKey); static int32_t updateTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos); static const char* loadTupleData(SqlFunctionCtx* pCtx, const STuplePos* pPos); @@ -1201,10 +1208,10 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { pBuf->v = *(int64_t*)tval; if (pCtx->subsidiaries.num > 0) { index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock, NULL); } } else { - if (IS_SIGNED_NUMERIC_TYPE(type) || IS_TIMESTAMP_TYPE(type)) { + if (IS_SIGNED_NUMERIC_TYPE(type)) { int64_t prev = 0; GET_TYPED_DATA(prev, int64_t, type, &pBuf->v); @@ -1213,7 +1220,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { *(int64_t*)&pBuf->v = val; if (pCtx->subsidiaries.num > 0) { index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock, NULL); } } } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { @@ -1225,7 +1232,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { *(uint64_t*)&pBuf->v = val; if (pCtx->subsidiaries.num > 0) { index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock, NULL); } } } else if (type == TSDB_DATA_TYPE_DOUBLE) { @@ -1237,7 +1244,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { *(double*)&pBuf->v = val; if (pCtx->subsidiaries.num > 0) { index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock, NULL); } } } else if (type == TSDB_DATA_TYPE_FLOAT) { @@ -1251,7 +1258,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (pCtx->subsidiaries.num > 0) { index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock, NULL); } } } @@ -1263,7 +1270,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { int32_t start = pInput->startRowIndex; int32_t numOfRows = pInput->numOfRows; - if (IS_SIGNED_NUMERIC_TYPE(type) || IS_TIMESTAMP_TYPE(type) || type == TSDB_DATA_TYPE_BOOL) { + if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL) { if (type == TSDB_DATA_TYPE_TINYINT || type == TSDB_DATA_TYPE_BOOL) { int8_t* pData = (int8_t*)pCol->pData; int8_t* val = (int8_t*)&pBuf->v; @@ -1276,7 +1283,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL); } pBuf->assign = true; } else { @@ -1307,7 +1314,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL); } pBuf->assign = true; } else { @@ -1338,7 +1345,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL); } pBuf->assign = true; } else { @@ -1357,7 +1364,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { numOfElems += 1; } - } else if (type == TSDB_DATA_TYPE_BIGINT || type == TSDB_DATA_TYPE_TIMESTAMP) { + } else if (type == TSDB_DATA_TYPE_BIGINT) { int64_t* pData = (int64_t*)pCol->pData; int64_t* val = (int64_t*)&pBuf->v; @@ -1369,7 +1376,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL); } pBuf->assign = true; } else { @@ -1402,7 +1409,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL); } pBuf->assign = true; } else { @@ -1433,7 +1440,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL); } pBuf->assign = true; } else { @@ -1464,7 +1471,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL); } pBuf->assign = true; } else { @@ -1495,7 +1502,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL); } pBuf->assign = true; } else { @@ -1527,7 +1534,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL); } pBuf->assign = true; } else { @@ -1558,7 +1565,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL); } pBuf->assign = true; } else { @@ -1581,7 +1588,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { _min_max_over: if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pBuf->nullTupleSaved) { - pBuf->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock); + pBuf->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, NULL); pBuf->nullTupleSaved = true; } return numOfElems; @@ -2758,7 +2765,7 @@ static void firstlastSaveTupleData(const SSDataBlock* pSrcBlock, int32_t rowInde } if (!pInfo->hasResult) { - pInfo->pos = saveTupleData(pCtx, rowIndex, pSrcBlock); + pInfo->pos = saveTupleData(pCtx, rowIndex, pSrcBlock, NULL); } else { updateTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos); } @@ -3426,7 +3433,7 @@ int32_t topFunction(SqlFunctionCtx* pCtx) { } if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pRes->nullTupleSaved) { - pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock); + pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, NULL); pRes->nullTupleSaved = true; } return TSDB_CODE_SUCCESS; @@ -3454,7 +3461,7 @@ int32_t bottomFunction(SqlFunctionCtx* pCtx) { } if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pRes->nullTupleSaved) { - pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock); + pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, NULL); pRes->nullTupleSaved = true; } @@ -3506,7 +3513,7 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData // save the data of this tuple if (pCtx->subsidiaries.num > 0) { - pItem->tuplePos = saveTupleData(pCtx, rowIndex, pSrcBlock); + pItem->tuplePos = saveTupleData(pCtx, rowIndex, pSrcBlock, NULL); } #ifdef BUF_PAGE_DEBUG qDebug("page_saveTuple i:%d, item:%p,pageId:%d, offset:%d\n", pEntryInfo->numOfRes, pItem, pItem->tuplePos.pageId, @@ -3578,7 +3585,8 @@ void* serializeTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SSubsid return buf; } -static STuplePos doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length) { +static STuplePos doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length, + const STupleKey* pKey) { STuplePos p = {0}; if (pHandle->pBuf != NULL) { SFilePage* pPage = NULL; @@ -3604,12 +3612,16 @@ static STuplePos doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf releaseBufPage(pHandle->pBuf, pPage); } else { // other tuple save policy + if (streamStateFuncPut(pHandle->pState, pKey, pBuf, length) < 0) { + ASSERT(0); + } + p.streamTupleKey = *pKey; } return p; } -STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock) { +STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, const STupleKey* pKey) { if (pCtx->subsidiaries.rowLen == 0) { int32_t rowLen = 0; for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) { @@ -3622,7 +3634,7 @@ STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBloc } char* buf = serializeTupleData(pSrcBlock, rowIndex, &pCtx->subsidiaries, pCtx->subsidiaries.buf); - return doSaveTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen); + return doSaveTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen, pKey); } static int32_t doUpdateTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length, STuplePos* pPos) { @@ -3632,6 +3644,7 @@ static int32_t doUpdateTupleData(SSerializeDataHandle* pHandle, const void* pBuf setBufPageDirty(pPage, true); releaseBufPage(pHandle->pBuf, pPage); } else { + streamStateFuncPut(pHandle->pState, &pPos->streamTupleKey, pBuf, length); } return TSDB_CODE_SUCCESS; @@ -3650,7 +3663,10 @@ static char* doLoadTupleData(SSerializeDataHandle* pHandle, const STuplePos* pPo releaseBufPage(pHandle->pBuf, pPage); return p; } else { - return NULL; + void* value = NULL; + int32_t vLen; + streamStateFuncGet(pHandle->pState, &pPos->streamTupleKey, &value, &vLen); + return (char*)value; } } @@ -4764,7 +4780,7 @@ int32_t stateDurationFunction(SqlFunctionCtx* pCtx) { colDataAppendNULL(pOutput, i); // handle selectivity if (pCtx->subsidiaries.num > 0) { - appendSelectivityValue(pCtx, i, i); + appendSelectivityValue(pCtx, i, pCtx->offset + numOfElems - 1); } continue; } @@ -4781,11 +4797,11 @@ int32_t stateDurationFunction(SqlFunctionCtx* pCtx) { } else { pInfo->durationStart = 0; } - colDataAppend(pOutput, i, (char*)&output, false); + colDataAppend(pOutput, pCtx->offset + numOfElems - 1, (char*)&output, false); // handle selectivity if (pCtx->subsidiaries.num > 0) { - appendSelectivityValue(pCtx, i, i); + appendSelectivityValue(pCtx, i, pCtx->offset + numOfElems - 1); } } @@ -4981,7 +4997,7 @@ static void doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* da if (pInfo->numSampled < pInfo->samples) { sampleAssignResult(pInfo, data, pInfo->numSampled); if (pCtx->subsidiaries.num > 0) { - pInfo->tuplePos[pInfo->numSampled] = saveTupleData(pCtx, index, pCtx->pSrcBlock); + pInfo->tuplePos[pInfo->numSampled] = saveTupleData(pCtx, index, pCtx->pSrcBlock, NULL); } pInfo->numSampled++; } else { @@ -5012,7 +5028,7 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) { } if (pInfo->numSampled == 0 && pCtx->subsidiaries.num > 0 && !pInfo->nullTupleSaved) { - pInfo->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock); + pInfo->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, NULL); pInfo->nullTupleSaved = true; } @@ -5398,8 +5414,8 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { int32_t i = pInput->startRowIndex; if (pCtx->start.key != INT64_MIN) { - ASSERT((pCtx->start.key < tsList[i] && pCtx->order == TSDB_ORDER_ASC) || - (pCtx->start.key > tsList[i] && pCtx->order == TSDB_ORDER_DESC)); + //ASSERT((pCtx->start.key < tsList[i] && pCtx->order == TSDB_ORDER_ASC) || + // (pCtx->start.key > tsList[i] && pCtx->order == TSDB_ORDER_DESC)); ASSERT(last->key == INT64_MIN); for (; i < pInput->numOfRows + pInput->startRowIndex; ++i) { @@ -5447,6 +5463,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { numOfElems++; INIT_INTP_POINT(st, tsList[i], val[i]); + if (pInfo->p.key == st.key) { + return TSDB_CODE_FUNC_DUP_TIMESTAMP; + } + pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -5462,6 +5482,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { numOfElems++; INIT_INTP_POINT(st, tsList[i], val[i]); + if (pInfo->p.key == st.key) { + return TSDB_CODE_FUNC_DUP_TIMESTAMP; + } + pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -5476,6 +5500,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { numOfElems++; INIT_INTP_POINT(st, tsList[i], val[i]); + if (pInfo->p.key == st.key) { + return TSDB_CODE_FUNC_DUP_TIMESTAMP; + } + pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -5490,6 +5518,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { numOfElems++; INIT_INTP_POINT(st, tsList[i], val[i]); + if (pInfo->p.key == st.key) { + return TSDB_CODE_FUNC_DUP_TIMESTAMP; + } + pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -5504,6 +5536,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { numOfElems++; INIT_INTP_POINT(st, tsList[i], val[i]); + if (pInfo->p.key == st.key) { + return TSDB_CODE_FUNC_DUP_TIMESTAMP; + } + pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -5518,6 +5554,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { numOfElems++; INIT_INTP_POINT(st, tsList[i], val[i]); + if (pInfo->p.key == st.key) { + return TSDB_CODE_FUNC_DUP_TIMESTAMP; + } + pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -5532,6 +5572,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { numOfElems++; INIT_INTP_POINT(st, tsList[i], val[i]); + if (pInfo->p.key == st.key) { + return TSDB_CODE_FUNC_DUP_TIMESTAMP; + } + pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -5546,6 +5590,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { numOfElems++; INIT_INTP_POINT(st, tsList[i], val[i]); + if (pInfo->p.key == st.key) { + return TSDB_CODE_FUNC_DUP_TIMESTAMP; + } + pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -5560,6 +5608,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { numOfElems++; INIT_INTP_POINT(st, tsList[i], val[i]); + if (pInfo->p.key == st.key) { + return TSDB_CODE_FUNC_DUP_TIMESTAMP; + } + pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -5574,6 +5626,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { numOfElems++; INIT_INTP_POINT(st, tsList[i], val[i]); + if (pInfo->p.key == st.key) { + return TSDB_CODE_FUNC_DUP_TIMESTAMP; + } + pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } diff --git a/source/libs/index/src/indexTfile.c b/source/libs/index/src/indexTfile.c index c3be0ea6f5..1fc631e9f3 100644 --- a/source/libs/index/src/indexTfile.c +++ b/source/libs/index/src/indexTfile.c @@ -323,10 +323,6 @@ static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTRslt* tr, while ((rt = stmStNextWith(st, NULL)) != NULL) { FstSlice* s = &rt->data; char* ch = (char*)fstSliceData(s, NULL); - // if (0 != strncmp(ch, tem->colName, tem->nColName)) { - // swsResultDestroy(rt); - // break; - //} TExeCond cond = cmpFn(ch, p, tem->colType); if (MATCH == cond) { diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index eb0b604d37..48e21e137f 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -399,6 +399,8 @@ static int32_t logicVnodeModifCopy(const SVnodeModifyLogicNode* pSrc, SVnodeModi COPY_SCALAR_FIELD(modifyType); COPY_SCALAR_FIELD(msgType); CLONE_NODE_FIELD(pAffectedRows); + CLONE_NODE_FIELD(pStartTs); + CLONE_NODE_FIELD(pEndTs); COPY_SCALAR_FIELD(tableId); COPY_SCALAR_FIELD(stableId); COPY_SCALAR_FIELD(tableType); diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index a41462e3fd..afd05eff1d 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -2431,6 +2431,8 @@ static const char* jkDeletePhysiPlanTsColName = "TsColName"; static const char* jkDeletePhysiPlanDeleteTimeRangeStartKey = "DeleteTimeRangeStartKey"; static const char* jkDeletePhysiPlanDeleteTimeRangeEndKey = "DeleteTimeRangeEndKey"; static const char* jkDeletePhysiPlanAffectedRows = "AffectedRows"; +static const char* jkDeletePhysiPlanStartTs = "StartTs"; +static const char* jkDeletePhysiPlanEndTs = "EndTs"; static int32_t physiDeleteNodeToJson(const void* pObj, SJson* pJson) { const SDataDeleterNode* pNode = (const SDataDeleterNode*)pObj; @@ -2457,6 +2459,12 @@ static int32_t physiDeleteNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddObject(pJson, jkDeletePhysiPlanAffectedRows, nodeToJson, pNode->pAffectedRows); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkDeletePhysiPlanStartTs, nodeToJson, pNode->pStartTs); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkDeletePhysiPlanEndTs, nodeToJson, pNode->pEndTs); + } return code; } @@ -2486,6 +2494,12 @@ static int32_t jsonToPhysiDeleteNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeObject(pJson, jkDeletePhysiPlanAffectedRows, &pNode->pAffectedRows); } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkDeletePhysiPlanStartTs, &pNode->pStartTs); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkDeletePhysiPlanEndTs, &pNode->pEndTs); + } return code; } diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index efe820fee2..202ee6717a 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -2665,7 +2665,9 @@ enum { PHY_DELETER_CODE_TABLE_FNAME, PHY_DELETER_CODE_TS_COL_NAME, PHY_DELETER_CODE_DELETE_TIME_RANGE, - PHY_DELETER_CODE_AFFECTED_ROWS + PHY_DELETER_CODE_AFFECTED_ROWS, + PHY_DELETER_CODE_START_TS, + PHY_DELETER_CODE_END_TS }; static int32_t physiDeleteNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { @@ -2690,6 +2692,12 @@ static int32_t physiDeleteNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeObj(pEncoder, PHY_DELETER_CODE_AFFECTED_ROWS, nodeToMsg, pNode->pAffectedRows); } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeObj(pEncoder, PHY_DELETER_CODE_START_TS, nodeToMsg, pNode->pStartTs); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeObj(pEncoder, PHY_DELETER_CODE_END_TS, nodeToMsg, pNode->pEndTs); + } return code; } @@ -2722,6 +2730,12 @@ static int32_t msgToPhysiDeleteNode(STlvDecoder* pDecoder, void* pObj) { case PHY_DELETER_CODE_AFFECTED_ROWS: code = msgToNodeFromTlv(pTlv, (void**)&pNode->pAffectedRows); break; + case PHY_DELETER_CODE_START_TS: + code = msgToNodeFromTlv(pTlv, (void**)&pNode->pStartTs); + break; + case PHY_DELETER_CODE_END_TS: + code = msgToNodeFromTlv(pTlv, (void**)&pNode->pEndTs); + break; default: break; } diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index 61b2ad954f..805ddb9e42 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -727,6 +727,8 @@ void nodesDestroyNode(SNode* pNode) { nodesDestroyNode(pStmt->pFromTable); nodesDestroyNode(pStmt->pWhere); nodesDestroyNode(pStmt->pCountFunc); + nodesDestroyNode(pStmt->pFirstFunc); + nodesDestroyNode(pStmt->pLastFunc); nodesDestroyNode(pStmt->pTagCond); break; } @@ -791,6 +793,8 @@ void nodesDestroyNode(SNode* pNode) { destroyVgDataBlockArray(pLogicNode->pDataBlocks); // pVgDataBlocks is weak reference nodesDestroyNode(pLogicNode->pAffectedRows); + nodesDestroyNode(pLogicNode->pStartTs); + nodesDestroyNode(pLogicNode->pEndTs); taosMemoryFreeClear(pLogicNode->pVgroupList); nodesDestroyList(pLogicNode->pInsertCols); break; @@ -997,6 +1001,8 @@ void nodesDestroyNode(SNode* pNode) { SDataDeleterNode* pSink = (SDataDeleterNode*)pNode; destroyDataSinkNode((SDataSinkNode*)pSink); nodesDestroyNode(pSink->pAffectedRows); + nodesDestroyNode(pSink->pStartTs); + nodesDestroyNode(pSink->pEndTs); break; } case QUERY_NODE_PHYSICAL_SUBPLAN: { diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index 511ae25810..6f11c653a4 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -1787,10 +1787,10 @@ SNode* createRevokeStmt(SAstCreateContext* pCxt, int64_t privileges, SToken* pDb return (SNode*)pStmt; } -SNode* createCountFuncForDelete(SAstCreateContext* pCxt) { +SNode* createFuncForDelete(SAstCreateContext* pCxt, const char* pFuncName) { SFunctionNode* pFunc = (SFunctionNode*)nodesMakeNode(QUERY_NODE_FUNCTION); CHECK_OUT_OF_MEM(pFunc); - strcpy(pFunc->functionName, "count"); + strcpy(pFunc->functionName, pFuncName); if (TSDB_CODE_SUCCESS != nodesListMakeStrictAppend(&pFunc->pParameterList, createPrimaryKeyCol(pCxt, NULL))) { nodesDestroyNode((SNode*)pFunc); CHECK_OUT_OF_MEM(NULL); @@ -1804,8 +1804,10 @@ SNode* createDeleteStmt(SAstCreateContext* pCxt, SNode* pTable, SNode* pWhere) { CHECK_OUT_OF_MEM(pStmt); pStmt->pFromTable = pTable; pStmt->pWhere = pWhere; - pStmt->pCountFunc = createCountFuncForDelete(pCxt); - if (NULL == pStmt->pCountFunc) { + pStmt->pCountFunc = createFuncForDelete(pCxt, "count"); + pStmt->pFirstFunc = createFuncForDelete(pCxt, "first"); + pStmt->pLastFunc = createFuncForDelete(pCxt, "last"); + if (NULL == pStmt->pCountFunc || NULL == pStmt->pFirstFunc || NULL == pStmt->pLastFunc) { nodesDestroyNode((SNode*)pStmt); CHECK_OUT_OF_MEM(NULL); } diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c index 06e19b98cd..1af61b0ca8 100644 --- a/source/libs/parser/src/parInsert.c +++ b/source/libs/parser/src/parInsert.c @@ -1423,9 +1423,7 @@ static int32_t parseDataFromFile(SInsertParseContext* pCxt, SToken filePath, STa } static void destroyInsertParseContextForTable(SInsertParseContext* pCxt) { - if (!pCxt->pComCxt->async) { - taosMemoryFreeClear(pCxt->pTableMeta); - } + taosMemoryFreeClear(pCxt->pTableMeta); destroyBoundColumnInfo(&pCxt->tags); tdDestroySVCreateTbReq(&pCxt->createTblReq); } diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index f1a59ebf51..ea9eefbb29 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -3347,10 +3347,16 @@ static int32_t translateDelete(STranslateContext* pCxt, SDeleteStmt* pDelete) { if (TSDB_CODE_SUCCESS == code) { code = translateDeleteWhere(pCxt, pDelete); } + pCxt->currClause = SQL_CLAUSE_SELECT; if (TSDB_CODE_SUCCESS == code) { - pCxt->currClause = SQL_CLAUSE_SELECT; code = translateExpr(pCxt, &pDelete->pCountFunc); } + if (TSDB_CODE_SUCCESS == code) { + code = translateExpr(pCxt, &pDelete->pFirstFunc); + } + if (TSDB_CODE_SUCCESS == code) { + code = translateExpr(pCxt, &pDelete->pLastFunc); + } return code; } @@ -5960,12 +5966,6 @@ typedef struct SVgroupCreateTableBatch { char dbName[TSDB_DB_NAME_LEN]; } SVgroupCreateTableBatch; -static void destroyCreateTbReq(SVCreateTbReq* pReq) { - taosMemoryFreeClear(pReq->name); - taosMemoryFreeClear(pReq->comment); - taosMemoryFreeClear(pReq->ntb.schemaRow.pSchema); -} - static int32_t buildNormalTableBatchReq(int32_t acctId, const SCreateTableStmt* pStmt, const SVgroupInfo* pVgroupInfo, SVgroupCreateTableBatch* pBatch) { char dbFName[TSDB_DB_FNAME_LEN] = {0}; @@ -5980,7 +5980,7 @@ static int32_t buildNormalTableBatchReq(int32_t acctId, const SCreateTableStmt* if (pStmt->pOptions->commentNull == false) { req.comment = strdup(pStmt->pOptions->comment); if (NULL == req.comment) { - destroyCreateTbReq(&req); + tdDestroySVCreateTbReq(&req); return TSDB_CODE_OUT_OF_MEMORY; } req.commentLen = strlen(pStmt->pOptions->comment); @@ -5991,7 +5991,7 @@ static int32_t buildNormalTableBatchReq(int32_t acctId, const SCreateTableStmt* req.ntb.schemaRow.version = 1; req.ntb.schemaRow.pSchema = taosMemoryCalloc(req.ntb.schemaRow.nCols, sizeof(SSchema)); if (NULL == req.name || NULL == req.ntb.schemaRow.pSchema) { - destroyCreateTbReq(&req); + tdDestroySVCreateTbReq(&req); return TSDB_CODE_OUT_OF_MEMORY; } if (pStmt->ignoreExists) { @@ -6007,7 +6007,7 @@ static int32_t buildNormalTableBatchReq(int32_t acctId, const SCreateTableStmt* strcpy(pBatch->dbName, pStmt->dbName); pBatch->req.pArray = taosArrayInit(1, sizeof(struct SVCreateTbReq)); if (NULL == pBatch->req.pArray) { - destroyCreateTbReq(&req); + tdDestroySVCreateTbReq(&req); return TSDB_CODE_OUT_OF_MEMORY; } taosArrayPush(pBatch->req.pArray, &req); @@ -6052,16 +6052,7 @@ static void destroyCreateTbReqBatch(void* data) { size_t size = taosArrayGetSize(pTbBatch->req.pArray); for (int32_t i = 0; i < size; ++i) { SVCreateTbReq* pTableReq = taosArrayGet(pTbBatch->req.pArray, i); - taosMemoryFreeClear(pTableReq->name); - taosMemoryFreeClear(pTableReq->comment); - - if (pTableReq->type == TSDB_NORMAL_TABLE) { - taosMemoryFreeClear(pTableReq->ntb.schemaRow.pSchema); - } else if (pTableReq->type == TSDB_CHILD_TABLE) { - taosMemoryFreeClear(pTableReq->ctb.pTag); - taosMemoryFreeClear(pTableReq->ctb.name); - taosArrayDestroy(pTableReq->ctb.tagName); - } + tdDestroySVCreateTbReq(pTableReq); } taosArrayDestroy(pTbBatch->req.pArray); @@ -6422,6 +6413,8 @@ static int32_t rewriteCreateSubTable(STranslateContext* pCxt, SCreateSubTableCla if (TSDB_CODE_SUCCESS == code) { addCreateTbReqIntoVgroup(pCxt->pParseCxt->acctId, pVgroupHashmap, pStmt, pTag, pSuperTableMeta->uid, pStmt->useTableName, &info, tagName, pSuperTableMeta->tableInfo.numOfTags); + } else { + taosMemoryFree(pTag); } taosArrayDestroy(tagName); diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c index 32513fd0b6..daab80667c 100644 --- a/source/libs/parser/src/parUtil.c +++ b/source/libs/parser/src/parUtil.c @@ -1124,7 +1124,7 @@ int32_t getTableMetaFromCacheForInsert(SArray* pTableMetaPos, SParseMetaCache* p int32_t reqIndex = *(int32_t*)taosArrayGet(pTableMetaPos, tableNo); SMetaRes* pRes = taosArrayGet(pMetaCache->pTableMetaData, reqIndex); if (TSDB_CODE_SUCCESS == pRes->code) { - *pMeta = pRes->pRes; + *pMeta = tableMetaDup(pRes->pRes); if (NULL == *pMeta) { return TSDB_CODE_OUT_OF_MEMORY; } diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index bf72f52105..fea2be55f9 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -1372,9 +1372,21 @@ static int32_t createDeleteAggLogicNode(SLogicPlanContext* pCxt, SDeleteStmt* pD } int32_t code = nodesListMakeStrictAppend(&pAgg->pAggFuncs, nodesCloneNode(pDelete->pCountFunc)); + if (TSDB_CODE_SUCCESS == code) { + code = nodesListStrictAppend(pAgg->pAggFuncs, nodesCloneNode(pDelete->pFirstFunc)); + } + if (TSDB_CODE_SUCCESS == code) { + code = nodesListStrictAppend(pAgg->pAggFuncs, nodesCloneNode(pDelete->pLastFunc)); + } if (TSDB_CODE_SUCCESS == code) { code = rewriteExpr(pAgg->pAggFuncs, &pDelete->pCountFunc); } + if (TSDB_CODE_SUCCESS == code) { + code = rewriteExpr(pAgg->pAggFuncs, &pDelete->pFirstFunc); + } + if (TSDB_CODE_SUCCESS == code) { + code = rewriteExpr(pAgg->pAggFuncs, &pDelete->pLastFunc); + } // set the output if (TSDB_CODE_SUCCESS == code) { code = createColumnByRewriteExprs(pAgg->pAggFuncs, &pAgg->node.pTargets); @@ -1405,7 +1417,9 @@ static int32_t createVnodeModifLogicNodeByDelete(SLogicPlanContext* pCxt, SDelet strcpy(pModify->tsColName, pRealTable->pMeta->schema->name); pModify->deleteTimeRange = pDelete->timeRange; pModify->pAffectedRows = nodesCloneNode(pDelete->pCountFunc); - if (NULL == pModify->pAffectedRows) { + pModify->pStartTs = nodesCloneNode(pDelete->pFirstFunc); + pModify->pEndTs = nodesCloneNode(pDelete->pLastFunc); + if (NULL == pModify->pAffectedRows || NULL == pModify->pStartTs || NULL == pModify->pEndTs) { nodesDestroyNode((SNode*)pModify); return TSDB_CODE_OUT_OF_MEMORY; } diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 0cbb833a4d..b0177e61ed 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -1323,9 +1323,9 @@ static int32_t createSortPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren static int32_t createPartitionPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SPartitionLogicNode* pPartLogicNode, SPhysiNode** pPhyNode) { - SPartitionPhysiNode* pPart = - (SPartitionPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pPartLogicNode, - pCxt->pPlanCxt->streamQuery ? QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION : QUERY_NODE_PHYSICAL_PLAN_PARTITION); + SPartitionPhysiNode* pPart = (SPartitionPhysiNode*)makePhysiNode( + pCxt, (SLogicNode*)pPartLogicNode, + pCxt->pPlanCxt->streamQuery ? QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION : QUERY_NODE_PHYSICAL_PLAN_PARTITION); if (NULL == pPart) { return TSDB_CODE_OUT_OF_MEMORY; } @@ -1670,6 +1670,12 @@ static int32_t createDataDeleter(SPhysiPlanContext* pCxt, SVnodeModifyLogicNode* int32_t code = setNodeSlotId(pCxt, pRoot->pOutputDataBlockDesc->dataBlockId, -1, pModify->pAffectedRows, &pDeleter->pAffectedRows); + if (TSDB_CODE_SUCCESS == code) { + code = setNodeSlotId(pCxt, pRoot->pOutputDataBlockDesc->dataBlockId, -1, pModify->pStartTs, &pDeleter->pStartTs); + } + if (TSDB_CODE_SUCCESS == code) { + code = setNodeSlotId(pCxt, pRoot->pOutputDataBlockDesc->dataBlockId, -1, pModify->pEndTs, &pDeleter->pEndTs); + } if (TSDB_CODE_SUCCESS == code) { pDeleter->sink.pInputDataBlockDesc = (SDataBlockDescNode*)nodesCloneNode((SNode*)pRoot->pOutputDataBlockDesc); if (NULL == pDeleter->sink.pInputDataBlockDesc) { diff --git a/source/libs/scheduler/inc/schInt.h b/source/libs/scheduler/inc/schInt.h index 957fd46ba5..7fea286732 100644 --- a/source/libs/scheduler/inc/schInt.h +++ b/source/libs/scheduler/inc/schInt.h @@ -283,8 +283,9 @@ typedef struct SSchJob { } SSchJob; typedef struct SSchTaskCtx { - int64_t jobRid; + int64_t jobRid; SSchTask *pTask; + bool asyncLaunch; } SSchTaskCtx; extern SSchedulerMgmt schMgmt; diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c index 5a64aaaebb..275fbb8016 100644 --- a/source/libs/scheduler/src/schRemote.c +++ b/source/libs/scheduler/src/schRemote.c @@ -396,7 +396,7 @@ int32_t schHandleCallback(void *param, SDataBuf *pMsg, int32_t rspCode) { tstrerror(rspCode)); SCH_ERR_JRET(schProcessOnCbBegin(&pJob, &pTask, pParam->queryId, pParam->refId, pParam->taskId)); - + code = schHandleResponseMsg(pJob, pTask, pParam->execId, pMsg, rspCode); pMsg->pData = NULL; diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c index 969c6fc8a6..8b3a2016b8 100644 --- a/source/libs/scheduler/src/schTask.c +++ b/source/libs/scheduler/src/schTask.c @@ -138,12 +138,6 @@ int32_t schUpdateTaskExecNode(SSchJob *pJob, SSchTask *pTask, void *handle, int3 return TSDB_CODE_SUCCESS; } - if ((execId != pTask->execId) || pTask->waitRetry) { // ignore it - SCH_TASK_DLOG("handle not updated since execId %d is already not current execId %d, waitRetry %d", execId, - pTask->execId, pTask->waitRetry); - return TSDB_CODE_SUCCESS; - } - SSchNodeInfo *nodeInfo = taosHashGet(pTask->execNodes, &execId, sizeof(execId)); if (NULL == nodeInfo) { // ignore it SCH_TASK_DLOG("handle not updated since execId %d already not exist, current execId %d, waitRetry %d", execId, @@ -162,11 +156,16 @@ int32_t schUpdateTaskHandle(SSchJob *pJob, SSchTask *pTask, bool dropExecNode, v if (dropExecNode) { SCH_RET(schDropTaskExecNode(pJob, pTask, handle, execId)); } - - SCH_SET_TASK_HANDLE(pTask, handle); - + schUpdateTaskExecNode(pJob, pTask, handle, execId); + if ((execId != pTask->execId) || pTask->waitRetry) { // ignore it + SCH_TASK_DLOG("handle not updated since execId %d is already not current execId %d, waitRetry %d", execId, pTask->execId, pTask->waitRetry); + SCH_ERR_RET(TSDB_CODE_SCH_IGNORE_ERROR); + } + + SCH_SET_TASK_HANDLE(pTask, handle); + return TSDB_CODE_SUCCESS; } @@ -352,7 +351,7 @@ int32_t schDoTaskRedirect(SSchJob *pJob, SSchTask *pTask, SDataBuf *pData, int32 pTask->waitRetry = true; schDropTaskOnExecNode(pJob, pTask); taosHashClear(pTask->execNodes); - SCH_ERR_JRET(schRemoveTaskFromExecList(pJob, pTask)); + schRemoveTaskFromExecList(pJob, pTask); schDeregisterTaskHb(pJob, pTask); atomic_sub_fetch_32(&pTask->level->taskLaunchedNum, 1); taosMemoryFreeClear(pTask->msg); @@ -430,12 +429,14 @@ int32_t schHandleRedirect(SSchJob *pJob, SSchTask *pTask, SDataBuf *pData, int32 code = schDoTaskRedirect(pJob, pTask, pData, rspCode); taosMemoryFree(pData->pData); + taosMemoryFree(pData->pEpSet); SCH_RET(code); _return: taosMemoryFree(pData->pData); + taosMemoryFree(pData->pEpSet); SCH_RET(schProcessOnTaskFailure(pJob, pTask, code)); } @@ -597,7 +598,7 @@ int32_t schTaskCheckSetRetry(SSchJob *pJob, SSchTask *pTask, int32_t errCode, bo int32_t schHandleTaskRetry(SSchJob *pJob, SSchTask *pTask) { atomic_sub_fetch_32(&pTask->level->taskLaunchedNum, 1); - SCH_ERR_RET(schRemoveTaskFromExecList(pJob, pTask)); + schRemoveTaskFromExecList(pJob, pTask); SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_INIT); if (SCH_TASK_NEED_FLOW_CTRL(pJob, pTask)) { @@ -744,8 +745,7 @@ _return: int32_t schRemoveTaskFromExecList(SSchJob *pJob, SSchTask *pTask) { int32_t code = taosHashRemove(pJob->execTasks, &pTask->taskId, sizeof(pTask->taskId)); if (code) { - SCH_TASK_ELOG("task failed to rm from execTask list, code:%x", code); - SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); + SCH_TASK_WLOG("task already not in execTask list, code:%x", code); } return TSDB_CODE_SUCCESS; @@ -834,6 +834,11 @@ int32_t schLaunchTaskImpl(void *param) { } SSchTask *pTask = pCtx->pTask; + + if (pCtx->asyncLaunch) { + SCH_LOCK_TASK(pTask); + } + int8_t status = 0; int32_t code = 0; @@ -880,8 +885,6 @@ int32_t schLaunchTaskImpl(void *param) { _return: - taosMemoryFree(param); - if (pJob->taskNum >= SCH_MIN_AYSNC_EXEC_NUM) { if (code) { code = schProcessOnTaskFailure(pJob, pTask, code); @@ -891,8 +894,14 @@ _return: } } + if (pCtx->asyncLaunch) { + SCH_UNLOCK_TASK(pTask); + } + schReleaseJob(pJob->refId); + taosMemoryFree(param); + SCH_RET(code); } @@ -906,6 +915,7 @@ int32_t schAsyncLaunchTaskImpl(SSchJob *pJob, SSchTask *pTask) { param->pTask = pTask; if (pJob->taskNum >= SCH_MIN_AYSNC_EXEC_NUM) { + param->asyncLaunch = true; taosAsyncExec(schLaunchTaskImpl, param, NULL); } else { SCH_ERR_RET(schLaunchTaskImpl(param)); diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c index 5efdbb4679..6cd5132bb9 100644 --- a/source/libs/stream/src/streamState.c +++ b/source/libs/stream/src/streamState.c @@ -35,6 +35,10 @@ SStreamState* streamStateOpen(char* path, SStreamTask* pTask) { goto _err; } + if (tdbTbOpen("func.state.db", sizeof(STupleKey), -1, STupleKeyCmpr, pState->db, &pState->pFuncStateDb) < 0) { + goto _err; + } + if (streamStateBegin(pState) < 0) { goto _err; } @@ -44,8 +48,9 @@ SStreamState* streamStateOpen(char* path, SStreamTask* pTask) { return pState; _err: - if (pState->pStateDb) tdbTbClose(pState->pStateDb); - if (pState->db) tdbClose(pState->db); + tdbTbClose(pState->pStateDb); + tdbTbClose(pState->pFuncStateDb); + tdbClose(pState->db); taosMemoryFree(pState); return NULL; } @@ -53,6 +58,7 @@ _err: void streamStateClose(SStreamState* pState) { tdbCommit(pState->db, &pState->txn); tdbTbClose(pState->pStateDb); + tdbTbClose(pState->pFuncStateDb); tdbClose(pState->db); taosMemoryFree(pState); @@ -101,6 +107,17 @@ int32_t streamStateAbort(SStreamState* pState) { return 0; } +int32_t streamStateFuncPut(SStreamState* pState, const STupleKey* key, const void* value, int32_t vLen) { + return tdbTbUpsert(pState->pFuncStateDb, key, sizeof(STupleKey), value, vLen, &pState->txn); +} +int32_t streamStateFuncGet(SStreamState* pState, const STupleKey* key, void** pVal, int32_t* pVLen) { + return tdbTbGet(pState->pFuncStateDb, key, sizeof(STupleKey), pVal, pVLen); +} + +int32_t streamStateFuncDel(SStreamState* pState, const STupleKey* key) { + return tdbTbDelete(pState->pFuncStateDb, key, sizeof(STupleKey), &pState->txn); +} + int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) { return tdbTbUpsert(pState->pStateDb, key, sizeof(SWinKey), value, vLen, &pState->txn); } diff --git a/source/libs/tdb/src/db/tdbBtree.c b/source/libs/tdb/src/db/tdbBtree.c index 1480920f90..c6ecd37680 100644 --- a/source/libs/tdb/src/db/tdbBtree.c +++ b/source/libs/tdb/src/db/tdbBtree.c @@ -489,7 +489,7 @@ static int tdbBtreeBalanceDeeper(SBTree *pBt, SPage *pRoot, SPage **ppChild, TXN } // Copy the root page content to the child page - tdbPageCopy(pRoot, pChild); + tdbPageCopy(pRoot, pChild, 0); // Reinitialize the root page zArg.flags = TDB_BTREE_ROOT; @@ -742,7 +742,7 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx for (int i = 0; i < nOlds; i++) { tdbPageCreate(pOlds[0]->pageSize, &pOldsCopy[i], tdbDefaultMalloc, NULL); tdbBtreeInitPage(pOldsCopy[i], &iarg, 0); - tdbPageCopy(pOlds[i], pOldsCopy[i]); + tdbPageCopy(pOlds[i], pOldsCopy[i], 0); } iNew = 0; nNewCells = 0; @@ -840,7 +840,7 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx i8 flags = TDB_BTREE_ROOT | TDB_BTREE_PAGE_IS_LEAF(pNews[0]); // copy content to the parent page tdbBtreeInitPage(pParent, &(SBtreeInitPageArg){.flags = flags, .pBt = pBt}, 0); - tdbPageCopy(pNews[0], pParent); + tdbPageCopy(pNews[0], pParent, 1); } for (int i = 0; i < 3; i++) { diff --git a/source/libs/tdb/src/db/tdbPage.c b/source/libs/tdb/src/db/tdbPage.c index a3f376b929..1e2eefabf4 100644 --- a/source/libs/tdb/src/db/tdbPage.c +++ b/source/libs/tdb/src/db/tdbPage.c @@ -229,7 +229,7 @@ int tdbPageDropCell(SPage *pPage, int idx, TXN *pTxn, SBTree *pBt) { return 0; } -void tdbPageCopy(SPage *pFromPage, SPage *pToPage) { +void tdbPageCopy(SPage *pFromPage, SPage *pToPage, int deepCopyOvfl) { int delta, nFree; pToPage->pFreeStart = pToPage->pPageHdr + (pFromPage->pFreeStart - pFromPage->pPageHdr); @@ -250,8 +250,15 @@ void tdbPageCopy(SPage *pFromPage, SPage *pToPage) { // Copy the overflow cells for (int iOvfl = 0; iOvfl < pFromPage->nOverflow; iOvfl++) { + SCell *pNewCell = pFromPage->apOvfl[iOvfl]; + if (deepCopyOvfl) { + int szCell = (*pFromPage->xCellSize)(pFromPage, pFromPage->apOvfl[iOvfl], 0, NULL, NULL); + pNewCell = (SCell *)tdbOsMalloc(szCell); + memcpy(pNewCell, pFromPage->apOvfl[iOvfl], szCell); + } + + pToPage->apOvfl[iOvfl] = pNewCell; pToPage->aiOvfl[iOvfl] = pFromPage->aiOvfl[iOvfl]; - pToPage->apOvfl[iOvfl] = pFromPage->apOvfl[iOvfl]; } pToPage->nOverflow = pFromPage->nOverflow; } diff --git a/source/libs/tdb/src/inc/tdbInt.h b/source/libs/tdb/src/inc/tdbInt.h index df6ba8b35f..29a9665c15 100644 --- a/source/libs/tdb/src/inc/tdbInt.h +++ b/source/libs/tdb/src/inc/tdbInt.h @@ -333,7 +333,7 @@ void tdbPageInit(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell int tdbPageInsertCell(SPage *pPage, int idx, SCell *pCell, int szCell, u8 asOvfl); int tdbPageDropCell(SPage *pPage, int idx, TXN *pTxn, SBTree *pBt); int tdbPageUpdateCell(SPage *pPage, int idx, SCell *pCell, int szCell, TXN *pTxn, SBTree *pBt); -void tdbPageCopy(SPage *pFromPage, SPage *pToPage); +void tdbPageCopy(SPage *pFromPage, SPage *pToPage, int copyOvflCells); int tdbPageCapacity(int pageSize, int amHdrSize); static inline SCell *tdbPageGetCell(SPage *pPage, int idx) { diff --git a/source/libs/tfs/src/tfs.c b/source/libs/tfs/src/tfs.c index 62aec219df..4600e5e568 100644 --- a/source/libs/tfs/src/tfs.c +++ b/source/libs/tfs/src/tfs.c @@ -113,6 +113,8 @@ SDiskSize tfsGetSize(STfs *pTfs) { return size; } +int32_t tfsGetLevel(STfs *pTfs) { return pTfs->nlevel; } + int32_t tfsAllocDisk(STfs *pTfs, int32_t expLevel, SDiskID *pDiskId) { pDiskId->level = expLevel; pDiskId->id = -1; diff --git a/source/os/src/osSemaphore.c b/source/os/src/osSemaphore.c index 8cc6f0ef2e..5baba5af1e 100644 --- a/source/os/src/osSemaphore.c +++ b/source/os/src/osSemaphore.c @@ -400,6 +400,9 @@ int tsem_init(tsem_t *psem, int flags, unsigned int count) { } int tsem_destroy(tsem_t *psem) { + if (psem == NULL || *psem == NULL) return -1; + dispatch_release(*psem); + *psem = NULL; return 0; } @@ -421,13 +424,7 @@ int tsem_timewait(tsem_t *psem, int64_t nanosecs) { return 0; } -bool taosCheckPthreadValid(TdThread thread) { - int32_t ret = taosThreadKill(thread, 0); - if (ret == ESRCH) return false; - if (ret == EINVAL) return false; - // alive - return true; -} +bool taosCheckPthreadValid(TdThread thread) { return thread != 0; } int64_t taosGetSelfPthreadId() { TdThread thread = taosThreadSelf(); diff --git a/source/os/src/osSysinfo.c b/source/os/src/osSysinfo.c index 19e9568bbe..6867c1373b 100644 --- a/source/os/src/osSysinfo.c +++ b/source/os/src/osSysinfo.c @@ -344,30 +344,27 @@ int32_t taosGetCpuInfo(char *cpuModel, int32_t maxLen, float *numOfCores) { *numOfCores = si.dwNumberOfProcessors; return 0; #elif defined(_TD_DARWIN_64) - char *line = NULL; - size_t size = 0; + char buf[16]; int32_t done = 0; int32_t code = -1; - TdFilePtr pFile = taosOpenFile("/proc/cpuinfo", TD_FILE_READ | TD_FILE_STREAM); - if (pFile == NULL) return false; - - while (done != 3 && (size = taosGetLineFile(pFile, &line)) != -1) { - line[size - 1] = '\0'; - if (((done & 1) == 0) && strncmp(line, "model name", 10) == 0) { - const char *v = strchr(line, ':') + 2; - tstrncpy(cpuModel, v, maxLen); - code = 0; - done |= 1; - } else if (((done & 2) == 0) && strncmp(line, "cpu cores", 9) == 0) { - const char *v = strchr(line, ':') + 2; - *numOfCores = atof(v); - done |= 2; - } + TdCmdPtr pCmd = taosOpenCmd("sysctl -n machdep.cpu.brand_string"); + if (pCmd == NULL) return code; + if (taosGetsCmd(pCmd, maxLen, cpuModel) > 0) { + code = 0; + done |= 1; } + taosCloseCmd(&pCmd); - if (line != NULL) taosMemoryFree(line); - taosCloseFile(&pFile); + pCmd = taosOpenCmd("sysctl -n machdep.cpu.core_count"); + if (pCmd == NULL) return code; + memset(buf, 0, sizeof(buf)); + if (taosGetsCmd(pCmd, maxLen, cpuModel) > 0) { + code = 0; + done |= 2; + *numOfCores = atof(buf); + } + taosCloseCmd(&pCmd); return code; #else diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 044cdc86b4..50c42ff170 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -288,6 +288,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_CONSUMER_NOT_READY, "Consumer not ready") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOPIC_SUBSCRIBED, "Topic subscribed cannot be dropped") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOPIC_MUST_BE_DELETED, "Topic must be dropped first") TAOS_DEFINE_ERROR(TSDB_CODE_MND_CGROUP_USED, "Consumer group being used by some consumer") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_IN_REBALANCE, "Topic being rebalanced") // mnode-stream TAOS_DEFINE_ERROR(TSDB_CODE_MND_STREAM_ALREADY_EXIST, "Stream already exists") @@ -579,6 +580,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_FUNTION_PARA_NUM, "Invalid function par TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_FUNTION_PARA_TYPE, "Invalid function para type") TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_FUNTION_PARA_VALUE, "Invalid function para value") TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION, "Not buildin function") +TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_DUP_TIMESTAMP, "Duplicate timestamps not allowed in function") //udf TAOS_DEFINE_ERROR(TSDB_CODE_UDF_STOPPING, "udf is stopping") diff --git a/tests/system-test/0-others/taosShellNetChk.py b/tests/system-test/0-others/taosShellNetChk.py index dd44852d49..80f6a6bc30 100644 --- a/tests/system-test/0-others/taosShellNetChk.py +++ b/tests/system-test/0-others/taosShellNetChk.py @@ -231,7 +231,7 @@ class TDTestCase: if platform.system().lower() == 'windows': os.system('ps -a | grep taos | awk \'{print $2}\' | xargs kill -9') else: - os.system('pkill taos') + os.system('pkill -9 taos') def stop(self): tdSql.close() diff --git a/tests/system-test/1-insert/mutil_stage.py b/tests/system-test/1-insert/mutil_stage.py index 764da1f166..63317e8036 100644 --- a/tests/system-test/1-insert/mutil_stage.py +++ b/tests/system-test/1-insert/mutil_stage.py @@ -1,4 +1,5 @@ from datetime import datetime +from platform import platform import time from typing import List, Any, Tuple @@ -83,6 +84,8 @@ class TDTestCase: def del_old_datadir(self, filename): cmd = f"sed -i '/^dataDir/d' {filename}" + if platform.system().lower() == 'darwin': + cmd = f"sed -i '' '/^dataDir/d' {filename}" if os.system(cmd) != 0: tdLog.exit(cmd) diff --git a/tests/system-test/2-query/max.py b/tests/system-test/2-query/max.py index 169b1c2c38..5cc9a2d9e8 100644 --- a/tests/system-test/2-query/max.py +++ b/tests/system-test/2-query/max.py @@ -38,18 +38,7 @@ class TDTestCase: elif i>=9: tdSql.checkData(0, 0, np.max(floatData)) - tdSql.query(f"select max(now()) from {dbname}.stb_1") - tdSql.checkRows(1) - - tdSql.query(f"select last(ts) from {dbname}.stb_1") - lastTs = tdSql.getData(0, 0) - tdSql.query(f"select max(ts) from {dbname}.stb_1") - tdSql.checkData(0, 0, lastTs) - - tdSql.query(f"select last(ts) from {dbname}.stb") - lastTs = tdSql.getData(0, 0) - tdSql.query(f"select max(ts) from {dbname}.stb") - tdSql.checkData(0, 0, lastTs) + tdSql.error(f"select max(now()) from {dbname}.stb_1") tdSql.query(f"select max(col1) from {dbname}.stb_1 where col2<=5") tdSql.checkData(0,0,5) @@ -78,13 +67,7 @@ class TDTestCase: elif i>=9: tdSql.checkData(0, 0, np.max(floatData)) - tdSql.query(f"select max(now()) from {dbname}.ntb") - tdSql.checkRows(1) - - tdSql.query(f"select last(ts) from {dbname}.ntb") - lastTs = tdSql.getData(0, 0) - tdSql.query(f"select max(ts) from {dbname}.ntb") - tdSql.checkData(0, 0, lastTs) + tdSql.error(f"select max(now()) from {dbname}.ntb") tdSql.query(f"select max(col1) from {dbname}.ntb where col2<=5") tdSql.checkData(0,0,5) diff --git a/tests/system-test/2-query/max_partition.py b/tests/system-test/2-query/max_partition.py index 01c2677242..08bb7675ad 100644 --- a/tests/system-test/2-query/max_partition.py +++ b/tests/system-test/2-query/max_partition.py @@ -181,7 +181,7 @@ class TDTestCase: # bug need fix tdSql.checkData(0,1,None) - tdSql.query(f"select c1 , twa(c1) from {dbname}.stb partition by c1 order by c1") + tdSql.query(f"select c1 , twa(c1) from {dbname}.sub_stb_1 partition by c1 order by c1") tdSql.checkRows(11) tdSql.checkData(0,1,None) diff --git a/tests/system-test/2-query/min.py b/tests/system-test/2-query/min.py index 3d46b7b222..d97c4340f4 100644 --- a/tests/system-test/2-query/min.py +++ b/tests/system-test/2-query/min.py @@ -37,6 +37,8 @@ class TDTestCase: floatData.append(i + 0.1) # max verifacation + tdSql.error(f"select min(now()) from {dbname}.stb_1") + tdSql.error(f"select min(ts) from {dbname}.stb_1") tdSql.error(f"select min(col7) from {dbname}.stb_1") tdSql.error(f"select min(col8) from {dbname}.stb_1") tdSql.error(f"select min(col9) from {dbname}.stb_1") @@ -67,20 +69,9 @@ class TDTestCase: tdSql.query(f"select min(col1) from {dbname}.stb_1 where col2>=5") tdSql.checkData(0,0,5) - tdSql.query(f"select min(now()) from {dbname}.stb_1") - tdSql.checkRows(1) - - tdSql.query(f"select first(ts) from {dbname}.stb_1") - firstTs = tdSql.getData(0, 0) - tdSql.query(f"select min(ts) from {dbname}.stb_1") - tdSql.checkData(0, 0, firstTs) - - tdSql.query(f"select first(ts) from {dbname}.stb_1") - firstTs = tdSql.getData(0, 0) - tdSql.query(f"select min(ts) from {dbname}.stb_1") - tdSql.checkData(0, 0, firstTs) - + tdSql.error(f"select min(now()) from {dbname}.stb_1") + tdSql.error(f"select min(ts) from {dbname}.stb_1") tdSql.error(f"select min(col7) from {dbname}.stb_1") tdSql.error(f"select min(col8) from {dbname}.stb_1") tdSql.error(f"select min(col9) from {dbname}.stb_1") @@ -111,19 +102,8 @@ class TDTestCase: tdSql.query(f"select min(col1) from {dbname}.stb where col2>=5") tdSql.checkData(0,0,5) - tdSql.query(f"select min(now()) from {dbname}.stb_1") - tdSql.checkRows(1) - - tdSql.query(f"select first(ts) from {dbname}.stb_1") - firstTs = tdSql.getData(0, 0) - tdSql.query(f"select min(ts) from {dbname}.stb_1") - tdSql.checkData(0, 0, firstTs) - - tdSql.query(f"select first(ts) from {dbname}.stb_1") - firstTs = tdSql.getData(0, 0) - tdSql.query(f"select min(ts) from {dbname}.stb_1") - tdSql.checkData(0, 0, firstTs) - + tdSql.error(f"select min(now()) from {dbname}.stb_1") + tdSql.error(f"select min(ts) from {dbname}.stb_1") tdSql.error(f"select min(col7) from {dbname}.ntb") tdSql.error(f"select min(col8) from {dbname}.ntb") tdSql.error(f"select min(col9) from {dbname}.ntb") @@ -154,19 +134,6 @@ class TDTestCase: tdSql.query(f"select min(col1) from {dbname}.ntb where col2>=5") tdSql.checkData(0,0,5) - tdSql.query(f"select min(now()) from {dbname}.stb_1") - tdSql.checkRows(1) - - tdSql.query(f"select first(ts) from {dbname}.stb_1") - firstTs = tdSql.getData(0, 0) - tdSql.query(f"select min(ts) from {dbname}.stb_1") - tdSql.checkData(0, 0, firstTs) - - tdSql.query(f"select first(ts) from {dbname}.stb_1") - firstTs = tdSql.getData(0, 0) - tdSql.query(f"select min(ts) from {dbname}.stb_1") - tdSql.checkData(0, 0, firstTs) - def stop(self): tdSql.close() diff --git a/tests/system-test/2-query/twa.py b/tests/system-test/2-query/twa.py index 62940477cf..4c163da485 100644 --- a/tests/system-test/2-query/twa.py +++ b/tests/system-test/2-query/twa.py @@ -124,7 +124,7 @@ class TDTestCase: tdSql.checkData(0,1,4.500000000) # mixup with other functions - tdSql.query(f"select twa(c1),twa(c2),max(c1),elapsed(ts) from {dbname}.stb1 ") + tdSql.query(f"select twa(c1),twa(c2),max(c1),elapsed(ts) from {dbname}.ct1 ") tdSql.checkData(0,0,1.000000000) tdSql.checkData(0,1,11111.000000000) tdSql.checkData(0,2,1)