Merge branch '3.0' into merge/mainto3.0_0105

This commit is contained in:
Shengliang Guan 2023-01-05 16:08:23 +08:00
commit 3016f35e9f
244 changed files with 21816 additions and 14213 deletions

View File

@ -173,7 +173,7 @@ def pre_test_build_mac() {
'''
sh '''
cd ${WK}/debug
cmake .. -DBUILD_TEST=true -DBUILD_HTTPS=false
cmake .. -DBUILD_TEST=true -DBUILD_HTTPS=false -DCMAKE_BUILD_TYPE=Release
make -j10
ctest -j10 || exit 7
'''

View File

@ -1,6 +1,7 @@
cmake_minimum_required(VERSION 3.0)
set(CMAKE_VERBOSE_MAKEFILE OFF)
set(TD_BUILD_TAOSA_INTERNAL FALSE)
#set output directory
SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/lib)

View File

@ -21,7 +21,7 @@ IF (TD_LINUX)
ELSEIF (TD_WINDOWS)
SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.bat")
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} :needAdmin ${TD_SOURCE_DIR} ${PROJECT_BINARY_DIR} Windows ${TD_VER_NUMBER})")
INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} :needAdmin ${TD_SOURCE_DIR} ${PROJECT_BINARY_DIR} Windows ${TD_VER_NUMBER} ${TD_BUILD_TAOSA_INTERNAL})")
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.sh")
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")

View File

@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG 4776778
GIT_TAG 69964a0
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

View File

@ -47,7 +47,6 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http
:::note
- In JSON protocol, strings will be converted to NCHAR type and numeric values will be converted to double type.
- Only data in array format is accepted and so an array must be used even if there is only one row.
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
:::

View File

@ -27,10 +27,11 @@ database_option: {
| PRECISION {'ms' | 'us' | 'ns'}
| REPLICA value
| RETENTIONS ingestion_duration:keep_duration ...
| STRICT {'off' | 'on'}
| WAL_LEVEL {1 | 2}
| VGROUPS value
| SINGLE_STABLE {0 | 1}
| TABLE_PREFIX value
| TABLE_SUFFIX value
| WAL_RETENTION_PERIOD value
| WAL_ROLL_PERIOD value
| WAL_RETENTION_SIZE value
@ -61,9 +62,6 @@ database_option: {
- PRECISION: specifies the precision at which a database records timestamps. Enter ms for milliseconds, us for microseconds, or ns for nanoseconds. The default value is ms.
- REPLICA: specifies the number of replicas that are made of the database. Enter 1 or 3. The default value is 1. The value of the REPLICA parameter cannot exceed the number of dnodes in the cluster.
- RETENTIONS: specifies the retention period for data aggregated at various intervals. For example, RETENTIONS 15s:7d,1m:21d,15m:50d indicates that data aggregated every 15 seconds is retained for 7 days, data aggregated every 1 minute is retained for 21 days, and data aggregated every 15 minutes is retained for 50 days. You must enter three aggregation intervals and corresponding retention periods.
- STRICT: specifies whether strong data consistency is enabled. The default value is off.
- on: Strong consistency is enabled and implemented through the Raft consensus algorithm. In this mode, an operation is considered successful once it is confirmed by half of the nodes in the cluster.
- off: Strong consistency is disabled. In this mode, an operation is considered successful when it is initiated by the local node.
- WAL_LEVEL: specifies whether fsync is enabled. The default value is 1.
- 1: WAL is enabled but fsync is disabled.
- 2: WAL and fsync are both enabled.
@ -71,6 +69,8 @@ database_option: {
- SINGLE_STABLE: specifies whether the database can contain more than one supertable.
- 0: The database can contain multiple supertables.
- 1: The database can contain only one supertable.
- TABLE_PREFIXThe prefix length in the table name that is ignored when distributing table to vnode based on table name.
- TABLE_SUFFIXThe suffix length in the table name that is ignored when distributing table to vnode based on table name.
- WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. Enter a time in seconds. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is 4 days.
- WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. Enter a size in KB. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is -1.
- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value of single copy is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk. The default values of multiple copy is 1 day.

View File

@ -876,7 +876,8 @@ INTERP(expr)
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. The parameter `EVERY` must be an integer, with no quotes, with a time unit of: b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
- Interpolation is performed based on `FILL` parameter.
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
- Pseudo column `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.1.4).
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.1.4).
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.2.1).
### LAST

View File

@ -108,7 +108,7 @@ SHOW STREAMS;
When you create a stream, you can use the TRIGGER parameter to specify triggering conditions for it.
For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggering:
For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggeringthe default value is AT_ONCE:
1. AT_ONCE: triggers on write

View File

@ -17,6 +17,7 @@ The following list shows all reserved keywords:
- ADD
- AFTER
- AGGREGATE
- ALIVE
- ALL
- ALTER
- ANALYZE

View File

@ -178,6 +178,139 @@ SHOW TABLE DISTRIBUTED table_name;
Shows how table data is distributed.
Examples Below is an example of this command to display the block distribution of table `d0` in detailed format.
```sql
show table distributed d0\G;
```
<details>
<summary> Show Example </summary>
<pre><code>
*************************** 1.row ***************************
_block_dist: Total_Blocks=[5] Total_Size=[93.65 Kb] Average_size=[18.73 Kb] Compression_Ratio=[23.98 %]
Total_Blocks : Table `d0` contains total 5 blocks
Total_Size: The total size of all the data blocks in table `d0` is 93.65 KB
Average_size: The average size of each block is 18.73 KB
Compression_Ratio: The data compression rate is 23.98%
*************************** 2.row ***************************
_block_dist: Total_Rows=[20000] Inmem_Rows=[0] MinRows=[3616] MaxRows=[4096] Average_Rows=[4000]
Total_Rows: Table `d0` contains 20,000 rows
Inmem_Rows The rows still in memory, i.e. not committed in disk, is 0, i.e. none such rows
MinRows The minimum number of rows in a block is 3,616
MaxRows The maximum number of rows in a block is 4,096B
Average_Rows The average number of rows in a block is 4,000
*************************** 3.row ***************************
_block_dist: Total_Tables=[1] Total_Files=[2]
Total_Tables: The number of child tables, 1 in this example
Total_Files The number of files storing the table's data, 2 in this example
*************************** 4.row ***************************
_block_dist: --------------------------------------------------------------------------------
*************************** 5.row ***************************
_block_dist: 0100 |
*************************** 6.row ***************************
_block_dist: 0299 |
*************************** 7.row ***************************
_block_dist: 0498 |
*************************** 8.row ***************************
_block_dist: 0697 |
*************************** 9.row ***************************
_block_dist: 0896 |
*************************** 10.row ***************************
_block_dist: 1095 |
*************************** 11.row ***************************
_block_dist: 1294 |
*************************** 12.row ***************************
_block_dist: 1493 |
*************************** 13.row ***************************
_block_dist: 1692 |
*************************** 14.row ***************************
_block_dist: 1891 |
*************************** 15.row ***************************
_block_dist: 2090 |
*************************** 16.row ***************************
_block_dist: 2289 |
*************************** 17.row ***************************
_block_dist: 2488 |
*************************** 18.row ***************************
_block_dist: 2687 |
*************************** 19.row ***************************
_block_dist: 2886 |
*************************** 20.row ***************************
_block_dist: 3085 |
*************************** 21.row ***************************
_block_dist: 3284 |
*************************** 22.row ***************************
_block_dist: 3483 ||||||||||||||||| 1 (20.00%)
*************************** 23.row ***************************
_block_dist: 3682 |
*************************** 24.row ***************************
_block_dist: 3881 ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| 4 (80.00%)
Query OK, 24 row(s) in set (0.002444s)
</code></pre>
</details>
The above show the block distribution percentage according to the number of rows in each block. In the above example, we can get below information:
- `_block_dist: 3483 ||||||||||||||||| 1 (20.00%)` means there is one block whose rows is between 3,483 and 3,681.
- `_block_dist: 3881 ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| 4 (80.00%)` means there are 4 blocks whose rows is between 3,881 and 4,096. - The number of blocks whose rows fall in other range is zero.
## SHOW TAGS
```sql

View File

@ -878,8 +878,10 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
| taos-jdbcdriver version | major changes |
| :---------------------: | :--------------------------------------------: |
| 3.0.3 | fix timestamp resolution error for REST connection in jdk17+ version |
| 3.0.1 - 3.0.2 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use 3.0.2 in the JDK 8 environment |
| 3.0.0 | Support for TDengine 3.0 |
| 2.0.42 | fix wasNull interface return value in WebSocket connection |
| 2.0.41 | fix decode method of username and password in REST connection |
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters |
| 2.0.38 | JDBC REST connections add bulk pull function |

View File

@ -21,6 +21,7 @@ taosAdapter provides the following features.
- Seamless connection to collectd
- Seamless connection to StatsD
- Supports Prometheus remote_read and remote_write
- Get table's VGroup ID
## taosAdapter architecture diagram
@ -59,6 +60,7 @@ Usage of taosAdapter:
--collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045)
--collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root")
--collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10)
--collectd.ttl int collectd data ttl. Env "TAOS_ADAPTER_COLLECTD_TTL" (default 0, means no ttl)
-c, --config string config path default /etc/taos/taosadapter.toml
--cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true)
--cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials"
@ -100,6 +102,7 @@ Usage of taosAdapter:
--node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s)
--node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100])
--node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root")
--node_exporter.ttl int node_exporter data ttl. Env "TAOS_ADAPTER_NODE_EXPORTER_TTL"(default 0, means no ttl)
--opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true)
--opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1)
--opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb])
@ -110,6 +113,7 @@ Usage of taosAdapter:
--opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
--opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE"
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
--opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL"(default 0, means no ttl)
--pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT" (default 1h0m0s)
--pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT" (default 4000)
--pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE" (default 4000)
@ -131,6 +135,7 @@ Usage of taosAdapter:
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE"
--statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root")
--statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10)
--statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL" (default 0, means no ttl)
--taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE"
--version Print the version and exit
```
@ -174,6 +179,7 @@ See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/bl
node_export is an exporter for machine metrics. Please visit [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) for more information.
- Support for Prometheus remote_read and remote_write
remote_read and remote_write are interfaces for Prometheus data read and write from/to other data storage solution. Please visit [https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) for more information.
- Get table's VGroup ID. For more information about VGroup, please refer to [primary-logic-unit](/tdinternal/arch/#primary-logic-unit).
## Interfaces
@ -195,6 +201,7 @@ Support InfluxDB query parameters as follows.
- `precision` The time precision used by TDengine
- `u` TDengine user name
- `p` TDengine password
- `ttl` The time to live of automatically created sub-table. This value cannot be updated. TDengine will use the ttl value of the first data of sub-table to create sub-table. For more information, please refer [Create Table](/taos-sql/table/#create-table)
Note: InfluxDB token authorization is not supported at present. Only Basic authorization and query parameter validation are supported.
Example: curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000"
@ -236,6 +243,10 @@ node_export is an exporter of hardware and OS metrics exposed by the \*NIX kerne
<Prometheus />
### Get table's VGroup ID
You can call `http://<fqdn>:6041/rest/vgid?db=<db>&table=<table>` to get table's VGroup ID. For more information about VGroup, please refer to [primary-logic-unit](/tdinternal/arch/#primary-logic-unit).
## Memory usage optimization methods
taosAdapter will monitor its memory usage during operation and adjust it with two thresholds. Valid values are integers between 1 to 100, and represent a percentage of the system's physical memory.

View File

@ -204,6 +204,12 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
- **-a/--replica <replicaNum\>** :
Specify the number of replicas when creating the database. The default value is 1.
- **-k/--keep-trying <NUMBER\>** :
Keep trying if failed to insert, default is no. Available with v3.0.9+.
- **-z/--trying-interval <NUMBER\>** :
Specify interval between keep trying insert. Valid value is a postive number. Only valid when keep trying be enabled. Available with v3.0.9+.
- **-V/--version** :
Show version information only. Users should not use it with other parameters.
@ -231,6 +237,10 @@ The parameters listed in this section apply to all function modes.
`filetype` must be set to `insert` in the insertion scenario. See [General Configuration Parameters](#General Configuration Parameters)
- ** keep_trying ** : Keep trying if failed to insert, default is no. Available with v3.0.9+.
- ** trying_interval ** : Specify interval between keep trying insert. Valid value is a postive number. Only valid when keep trying be enabled. Available with v3.0.9+.
#### Database related configuration parameters
The parameters related to database creation are configured in `dbinfo` in the json configuration file, as follows. The other parameters correspond to the database parameters specified when `create database` in [../../taos-sql/database].

View File

@ -19,7 +19,7 @@ Users should not use taosdump to back up raw data, environment settings, hardwar
There are two ways to install taosdump:
- Install the taosTools official installer. Please find taosTools from [All download links](https://www.tdengine.com/all-downloads) page and download and install it.
- Install the taosTools official installer. Please find taosTools from [Release History](https://docs.taosdata.com/releases/tools/) page and download and install it.
- Compile taos-tools separately and install it. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details.

View File

@ -153,11 +153,11 @@ The parameters described in this document by the effect that they have on the sy
| Meaning | Execution policy for query statements |
| Unit | None |
| Default | 1 |
| Notes | 1: Run queries on vnodes and not on qnodes |
| Value Range | 1: Run queries on vnodes and not on qnodes
2: Run subtasks without scan operators on qnodes and subtasks with scan operators on vnodes.
3: Only run scan operators on vnodes; run all other operators on qnodes.
3: Only run scan operators on vnodes; run all other operators on qnodes. |
### querySmaOptimize
@ -173,6 +173,14 @@ The parameters described in this document by the effect that they have on the sy
1: Enable SMA indexing and perform queries from suitable statements on precomputation results.|
### countAlwaysReturnValue
| Attribute | Description |
| -------- | -------------------------------- |
| Applicable | Server only |
| Meaning | count()/hyperloglog() return value or not if the result data is NULL |
| Vlue Range | 0Return empty line1Return 0 |
| Default | 1 |
### maxNumOfDistinctRes
@ -307,6 +315,14 @@ The charset that takes effect is UTF-8.
| Meaning | All data files are stored in this directory |
| Default Value | /var/lib/taos |
### tempDir
| Attribute | Description |
| -------- | ------------------------------------------ |
| Applicable | Server only |
| Meaning | The directory where to put all the temporary files generated during system running |
| Default | /tmp |
### minimalTmpDirGB
| Attribute | Description |
@ -336,89 +352,6 @@ The charset that takes effect is UTF-8.
| Value Range | 0-4096 |
| Default Value | 2x the CPU cores |
## Time Parameters
### statusInterval
| Attribute | Description |
| -------- | --------------------------- |
| Applicable | Server Only |
| Meaning | the interval of dnode reporting status to mnode |
| Unit | second |
| Value Range | 1-10 |
| Default Value | 1 |
### shellActivityTimer
| Attribute | Description |
| -------- | --------------------------------- |
| Applicable | Server and Client |
| Meaning | The interval for TDengine CLI to send heartbeat to mnode |
| Unit | second |
| Value Range | 1-120 |
| Default Value | 3 |
## Performance Optimization Parameters
### numOfCommitThreads
| Attribute | Description |
| -------- | ---------------------- |
| Applicable | Server Only |
| Meaning | Maximum of threads for committing to disk |
| Default Value | |
## Compression Parameters
### compressMsgSize
| Attribute | Description |
| ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| Applicable | Server Only |
| Meaning | The threshold for message size to compress the message. | Set the value to 64330 bytes for good message compression. |
| Unit | bytes |
| Value Range | 0: already compress; >0: compress when message exceeds it; -1: always uncompress |
| Default Value | -1 |
### compressColData
| Attribute | Description |
| -------- | --------------------------------------------------------------------------------------- |
| Applicable | Server Only |
| Meaning | The threshold for size of column data to trigger compression for the query result |
| Unit | bytes |
| Value Range | 0: always compress; >0: only compress when the size of any column data exceeds the threshold; -1: always uncompress |
| Default Value | -1 |
| Default Value | -1 |
| Note | available from version 2.3.0.0 | |
## Continuous Query Parameters |
### minSlidingTime
| Attribute | Description |
| ------------- | -------------------------------------------------------- |
| Applicable | Server Only |
| Meaning | Minimum sliding time of time window |
| Unit | millisecond or microsecond , depending on time precision |
| Value Range | 10-1000000 |
| Default Value | 10 |
### minIntervalTime
| Attribute | Description |
| ------------- | --------------------------- |
| Applicable | Server Only |
| Meaning | Minimum size of time window |
| Unit | millisecond |
| Value Range | 1-1000000 |
| Default Value | 10 |
:::info
To prevent system resource from being exhausted by multiple concurrent streams, a random delay is applied on each stream automatically. `maxFirstStreamCompDelay` is the maximum delay time before a continuous query is started the first time. `streamCompDelayRatio` is the ratio for calculating delay time, with the size of the time window as base. `maxStreamCompDelay` is the maximum delay time. The actual delay time is a random time not bigger than `maxStreamCompDelay`. If a continuous query fails, `retryStreamComDelay` is the delay time before retrying it, also not bigger than `maxStreamCompDelay`.
:::
## Log Parameters
### logDir
@ -665,6 +598,18 @@ To prevent system resource from being exhausted by multiple concurrent streams,
| Value Range | 0: not consistent; 1: consistent. |
| Default | 1 |
## Compress Parameters
### compressMsgSize
| Attribute | Description |
| -------- | ----------------------------- |
| Applicable | Both Client and Server side |
| Meaning | Whether RPC message is compressed |
| Value Range | -1: none message is compressed; 0: all messages are compressed; N (N>0): messages exceeding N bytes are compressed |
| Default | -1 |
## Other Parameters
### enableCoreFile
@ -686,172 +631,60 @@ To prevent system resource from being exhausted by multiple concurrent streams,
| Value Range | 0: disable UDF; 1: enabled UDF |
| Default Value | 1 |
## Parameter Comparison of TDengine 2.x and 3.0
| # | **Parameter** | **In 2.x** | **In 3.0** |
| --- | :-----------------: | --------------- | --------------- |
| 1 | firstEp | Yes | Yes |
| 2 | secondEp | Yes | Yes |
| 3 | fqdn | Yes | Yes |
| 4 | serverPort | Yes | Yes |
| 5 | maxShellConns | Yes | Yes |
| 6 | monitor | Yes | Yes |
| 7 | monitorFqdn | No | Yes |
| 8 | monitorPort | No | Yes |
| 9 | monitorInterval | Yes | Yes |
| 10 | monitorMaxLogs | No | Yes |
| 11 | monitorComp | No | Yes |
| 12 | telemetryReporting | Yes | Yes |
| 13 | telemetryInterval | No | Yes |
| 14 | telemetryServer | No | Yes |
| 15 | telemetryPort | No | Yes |
| 16 | queryPolicy | No | Yes |
| 17 | querySmaOptimize | No | Yes |
| 18 | queryRsmaTolerance | No | Yes |
| 19 | queryBufferSize | Yes | Yes |
| 20 | maxNumOfDistinctRes | Yes | Yes |
| 21 | minSlidingTime | Yes | Yes |
| 22 | minIntervalTime | Yes | Yes |
| 23 | countAlwaysReturnValue | Yes | Yes |
| 24 | dataDir | Yes | Yes |
| 25 | minimalDataDirGB | Yes | Yes |
| 26 | supportVnodes | No | Yes |
| 27 | tempDir | Yes | Yes |
| 28 | minimalTmpDirGB | Yes | Yes |
| 29 | compressMsgSize | Yes | Yes |
| 30 | compressColData | Yes | Yes |
| 31 | smlChildTableName | Yes | Yes |
| 32 | smlTagName | Yes | Yes |
| 33 | smlDataFormat | No | Yes |
| 34 | statusInterval | Yes | Yes |
| 35 | shellActivityTimer | Yes | Yes |
| 36 | transPullupInterval | No | Yes |
| 37 | mqRebalanceInterval | No | Yes |
| 38 | ttlUnit | No | Yes |
| 39 | ttlPushInterval | No | Yes |
| 40 | numOfTaskQueueThreads | No | Yes |
| 41 | numOfRpcThreads | No | Yes |
| 42 | numOfCommitThreads | Yes | Yes |
| 43 | numOfMnodeReadThreads | No | Yes |
| 44 | numOfVnodeQueryThreads | No | Yes |
| 45 | ratioOfVnodeStreamThreads | No | Yes |
| 46 | numOfVnodeFetchThreads | No | Yes |
| 47 | numOfVnodeRsmaThreads | No | Yes |
| 48 | numOfQnodeQueryThreads | No | Yes |
| 49 | numOfQnodeFetchThreads | No | Yes |
| 50 | numOfSnodeSharedThreads | No | Yes |
| 51 | numOfSnodeUniqueThreads | No | Yes |
| 52 | rpcQueueMemoryAllowed | No | Yes |
| 53 | logDir | Yes | Yes |
| 54 | minimalLogDirGB | Yes | Yes |
| 55 | numOfLogLines | Yes | Yes |
| 56 | asyncLog | Yes | Yes |
| 57 | logKeepDays | Yes | Yes |
| 60 | debugFlag | Yes | Yes |
| 61 | tmrDebugFlag | Yes | Yes |
| 62 | uDebugFlag | Yes | Yes |
| 63 | rpcDebugFlag | Yes | Yes |
| 64 | jniDebugFlag | Yes | Yes |
| 65 | qDebugFlag | Yes | Yes |
| 66 | cDebugFlag | Yes | Yes |
| 67 | dDebugFlag | Yes | Yes |
| 68 | vDebugFlag | Yes | Yes |
| 69 | mDebugFlag | Yes | Yes |
| 70 | wDebugFlag | Yes | Yes |
| 71 | sDebugFlag | Yes | Yes |
| 72 | tsdbDebugFlag | Yes | Yes |
| 73 | tqDebugFlag | No | Yes |
| 74 | fsDebugFlag | Yes | Yes |
| 75 | udfDebugFlag | No | Yes |
| 76 | smaDebugFlag | No | Yes |
| 77 | idxDebugFlag | No | Yes |
| 78 | tdbDebugFlag | No | Yes |
| 79 | metaDebugFlag | No | Yes |
| 80 | timezone | Yes | Yes |
| 81 | locale | Yes | Yes |
| 82 | charset | Yes | Yes |
| 83 | udf | Yes | Yes |
| 84 | enableCoreFile | Yes | Yes |
| 85 | arbitrator | Yes | No |
| 86 | numOfThreadsPerCore | Yes | No |
| 87 | numOfMnodes | Yes | No |
| 88 | vnodeBak | Yes | No |
| 89 | balance | Yes | No |
| 90 | balanceInterval | Yes | No |
| 91 | offlineThreshold | Yes | No |
| 92 | role | Yes | No |
| 93 | dnodeNopLoop | Yes | No |
| 94 | keepTimeOffset | Yes | No |
| 95 | rpcTimer | Yes | No |
| 96 | rpcMaxTime | Yes | No |
| 97 | rpcForceTcp | Yes | No |
| 98 | tcpConnTimeout | Yes | No |
| 99 | syncCheckInterval | Yes | No |
| 100 | maxTmrCtrl | Yes | No |
| 101 | monitorReplica | Yes | No |
| 102 | smlTagNullName | Yes | No |
| 103 | keepColumnName | Yes | No |
| 104 | ratioOfQueryCores | Yes | No |
| 105 | maxStreamCompDelay | Yes | No |
| 106 | maxFirstStreamCompDelay | Yes | No |
| 107 | retryStreamCompDelay | Yes | No |
| 108 | streamCompDelayRatio | Yes | No |
| 109 | maxVgroupsPerDb | Yes | No |
| 110 | maxTablesPerVnode | Yes | No |
| 111 | minTablesPerVnode | Yes | No |
| 112 | tableIncStepPerVnode | Yes | No |
| 113 | cache | Yes | No |
| 114 | blocks | Yes | No |
| 115 | days | Yes | No |
| 116 | keep | Yes | No |
| 117 | minRows | Yes | No |
| 118 | maxRows | Yes | No |
| 119 | quorum | Yes | No |
| 120 | comp | Yes | No |
| 121 | walLevel | Yes | No |
| 122 | fsync | Yes | No |
| 123 | replica | Yes | No |
| 124 | partitions | Yes | No |
| 125 | quorum | Yes | No |
| 126 | update | Yes | No |
| 127 | cachelast | Yes | No |
| 128 | maxSQLLength | Yes | No |
| 129 | maxWildCardsLength | Yes | No |
| 130 | maxRegexStringLen | Yes | No |
| 131 | maxNumOfOrderedRes | Yes | No |
| 132 | maxConnections | Yes | No |
| 133 | mnodeEqualVnodeNum | Yes | No |
| 134 | http | Yes | No |
| 135 | httpEnableRecordSql | Yes | No |
| 136 | httpMaxThreads | Yes | No |
| 137 | restfulRowLimit | Yes | No |
| 138 | httpDbNameMandatory | Yes | No |
| 139 | httpKeepAlive | Yes | No |
| 140 | enableRecordSql | Yes | No |
| 141 | maxBinaryDisplayWidth | Yes | No |
| 142 | stream | Yes | No |
| 143 | retrieveBlockingModel | Yes | No |
| 144 | tsdbMetaCompactRatio | Yes | No |
| 145 | defaultJSONStrType | Yes | No |
| 146 | walFlushSize | Yes | No |
| 147 | keepTimeOffset | Yes | No |
| 148 | flowctrl | Yes | No |
| 149 | slaveQuery | Yes | No |
| 150 | adjustMaster | Yes | No |
| 151 | topicBinaryLen | Yes | No |
| 152 | telegrafUseFieldNum | Yes | No |
| 153 | deadLockKillQuery | Yes | No |
| 154 | clientMerge | Yes | No |
| 155 | sdbDebugFlag | Yes | No |
| 156 | odbcDebugFlag | Yes | No |
| 157 | httpDebugFlag | Yes | No |
| 158 | monDebugFlag | Yes | No |
| 159 | cqDebugFlag | Yes | No |
| 160 | shortcutFlag | Yes | No |
| 161 | probeSeconds | Yes | No |
| 162 | probeKillSeconds | Yes | No |
| 163 | probeInterval | Yes | No |
| 164 | lossyColumns | Yes | No |
| 165 | fPrecision | Yes | No |
| 166 | dPrecision | Yes | No |
| 167 | maxRange | Yes | No |
| 168 | range | Yes | No |
## 3.0 Parameters
| # | **参数** | **Applicable to 2.x ** | **Applicable to 3.0 ** | Current behavior in 3.0 |
| --- | :---------------------: | --------------- | --------------- | ------------------------------------------------- |
| 1 | firstEp | Yes | Yes | |
| 2 | secondEp | Yes | Yes | |
| 3 | fqdn | Yes | Yes | |
| 4 | serverPort | Yes | Yes | |
| 5 | maxShellConns | Yes | Yes | |
| 6 | monitor | Yes | Yes | |
| 7 | monitorFqdn | No | Yes | |
| 8 | monitorPort | No | Yes | |
| 9 | monitorInterval | Yes | Yes | |
| 10 | queryPolicy | No | Yes | |
| 11 | querySmaOptimize | No | Yes | |
| 12 | maxNumOfDistinctRes | Yes | Yes | |
| 15 | countAlwaysReturnValue | Yes | Yes | |
| 16 | dataDir | Yes | Yes | |
| 17 | minimalDataDirGB | Yes | Yes | |
| 18 | supportVnodes | No | Yes | |
| 19 | tempDir | Yes | Yes | |
| 20 | minimalTmpDirGB | Yes | Yes | |
| 21 | smlChildTableName | Yes | Yes | |
| 22 | smlTagName | Yes | Yes | |
| 23 | smlDataFormat | No | Yes | |
| 24 | statusInterval | Yes | Yes | |
| 25 | logDir | Yes | Yes | |
| 26 | minimalLogDirGB | Yes | Yes | |
| 27 | numOfLogLines | Yes | Yes | |
| 28 | asyncLog | Yes | Yes | |
| 29 | logKeepDays | Yes | Yes | |
| 30 | debugFlag | Yes | Yes | |
| 31 | tmrDebugFlag | Yes | Yes | |
| 32 | uDebugFlag | Yes | Yes | |
| 33 | rpcDebugFlag | Yes | Yes | |
| 34 | jniDebugFlag | Yes | Yes | |
| 35 | qDebugFlag | Yes | Yes | |
| 36 | cDebugFlag | Yes | Yes | |
| 37 | dDebugFlag | Yes | Yes | |
| 38 | vDebugFlag | Yes | Yes | |
| 39 | mDebugFlag | Yes | Yes | |
| 40 | wDebugFlag | Yes | Yes | |
| 41 | sDebugFlag | Yes | Yes | |
| 42 | tsdbDebugFlag | Yes | Yes | |
| 43 | tqDebugFlag | No | Yes | |
| 44 | fsDebugFlag | Yes | Yes | |
| 45 | udfDebugFlag | No | Yes | |
| 46 | smaDebugFlag | No | Yes | |
| 47 | idxDebugFlag | No | Yes | |
| 48 | tdbDebugFlag | No | Yes | |
| 49 | metaDebugFlag | No | Yes | |
| 50 | timezone | Yes | Yes | |
| 51 | locale | Yes | Yes | |
| 52 | charset | Yes | Yes | |
| 53 | udf | Yes | Yes | |
| 54 | enableCoreFile | Yes | Yes | |

View File

@ -10,6 +10,18 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
import Release from "/components/ReleaseV3";
## 3.0.2.2
<Release type="tdengine" version="3.0.2.2" />
## 3.0.2.1
<Release type="tdengine" version="3.0.2.1" />
## 3.0.2.0
<Release type="tdengine" version="3.0.2.0" />
## 3.0.1.8
<Release type="tdengine" version="3.0.1.8" />

View File

@ -10,6 +10,18 @@ For other historical version installers, please visit [here](https://www.taosdat
import Release from "/components/ReleaseV3";
## 2.4.0
<Release type="tools" version="2.4.0" />
## 2.3.3
<Release type="tools" version="2.3.3" />
## 2.3.2
<Release type="tools" version="2.3.2" />
## 2.3.0
<Release type="tools" version="2.3.0" />

View File

@ -24,7 +24,7 @@ func main() {
if err != nil {
panic(err)
}
_, err = db.Exec("create topic if not exists example_tmq_topic with meta as DATABASE example_tmq")
_, err = db.Exec("create topic if not exists example_tmq_topic as DATABASE example_tmq")
if err != nil {
panic(err)
}
@ -84,20 +84,6 @@ func main() {
if err != nil {
panic(err)
}
for {
result, err := consumer.Poll(time.Second)
if err != nil {
panic(err)
}
if result.Type != common.TMQ_RES_TABLE_META {
panic("want message type 2 got " + strconv.Itoa(int(result.Type)))
}
data, _ := json.Marshal(result.Meta)
fmt.Println(string(data))
consumer.Commit(context.Background(), result.Message)
consumer.FreeMessage(result.Message)
break
}
_, err = db.Exec("insert into example_tmq.t1 values(now,1)")
if err != nil {
panic(err)

View File

@ -8,7 +8,7 @@ conn.execute("CREATE DATABASE test")
# change database. same as execute "USE db"
conn.select_db("test")
conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)")
affected_row: int = conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m 24.4)")
affected_row: int = conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m, 24.4)")
print("affected_row", affected_row)
# output:
# affected_row 3

View File

@ -47,7 +47,6 @@ OpenTSDB JSON 格式协议采用一个 JSON 字符串表示一行或多行数据
:::note
- 对于 JSON 格式协议TDengine 并不会自动把所有标签转成 NCHAR 类型, 字符串将将转为 NCHAR 类型, 数值将同样转换为 DOUBLE 类型。
- TDengine 只接收 JSON **数组格式**的字符串,即使一行数据也需要转换成数组形式。
- 默认生成的子表名是根据规则生成的唯一 ID 值。用户也可以通过在 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 `"tags": { "host": "web02","dc": "lga","tname":"cpu1"}` 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略
:::

View File

@ -19,6 +19,7 @@ TDengine 提供了兼容 InfluxDB (v1) 和 OpenTSDB 行协议的 Schemaless API
- `precision` TDengine 使用的时间精度
- `u` TDengine 用户名
- `p` TDengine 密码
- `ttl` 自动创建的子表生命周期,以子表的第一条数据的 TTL 参数为准,不可更新。更多信息请参考[创建表文档](taos-sql/table/#创建表)的 TTL 参数
注意: 目前不支持 InfluxDB 的 token 验证方式,仅支持 Basic 验证和查询参数验证。

View File

@ -68,39 +68,38 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对
### 安装连接器
<Tabs defaultValue="maven">
<TabItem value="maven" label="使用 Maven 安装">
<TabItem value="maven" label="使用 Maven 安装">
目前 taos-jdbcdriver 已经发布到 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver)
仓库,且各大仓库都已同步。
目前 taos-jdbcdriver 已经发布到 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 仓库,且各大仓库都已同步。
- [sonatype](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver)
- [mvnrepository](https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver)
- [maven.aliyun](https://maven.aliyun.com/mvn/search)
- [sonatype](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver)
- [mvnrepository](https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver)
- [maven.aliyun](https://maven.aliyun.com/mvn/search)
Maven 项目中,在 pom.xml 中添加以下依赖:
Maven 项目中,在 pom.xml 中添加以下依赖:
```xml-dtd
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.0.0</version>
</dependency>
```
```xml-dtd
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.0.0</version>
</dependency>
```
</TabItem>
<TabItem value="source" label="使用源码编译安装">
</TabItem>
<TabItem value="source" label="使用源码编译安装">
可以通过下载 TDengine 的源码,自己编译最新版本的 Java connector
可以通过下载 TDengine 的源码,自己编译最新版本的 Java connector
```shell
git clone https://github.com/taosdata/taos-connector-jdbc.git
cd taos-connector-jdbc
mvn clean install -Dmaven.test.skip=true
```
```shell
git clone https://github.com/taosdata/taos-connector-jdbc.git
cd taos-connector-jdbc
mvn clean install -Dmaven.test.skip=true
```
编译后,在 target 目录下会产生 taos-jdbcdriver-3.0.*-dist.jar 的 jar 包,并自动将编译的 jar 文件放在本地的 Maven 仓库中。
编译后,在 target 目录下会产生 taos-jdbcdriver-3.0.*-dist.jar 的 jar 包,并自动将编译的 jar 文件放在本地的 Maven 仓库中。
</TabItem>
</TabItem>
</Tabs>
## 建立连接
@ -111,125 +110,117 @@ TDengine 的 JDBC URL 规范格式为:
对于建立连接,原生连接与 REST 连接有细微不同。
<Tabs defaultValue="rest">
<TabItem value="native" label="原生连接">
<TabItem value="native" label="原生连接">
```java
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
```java
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
以上示例,使用了 JDBC 原生连接的 TSDBDriver建立了到 hostname 为 taosdemo.com端口为 6030TDengine 的默认端口),数据库名为 test 的连接。这个 URL
中指定用户名user为 root密码password为 taosdata。
以上示例,使用了 JDBC 原生连接的 TSDBDriver建立了到 hostname 为 taosdemo.com端口为 6030TDengine 的默认端口),数据库名为 test 的连接。这个 URL
中指定用户名user为 root密码password为 taosdata。
**注意**:使用 JDBC 原生连接taos-jdbcdriver 需要依赖客户端驱动Linux 下是 libtaos.soWindows 下是 taos.dllmacOS 下是 libtaos.dylib
**注意**:使用 JDBC 原生连接taos-jdbcdriver 需要依赖客户端驱动Linux 下是 libtaos.soWindows 下是 taos.dllmacOS 下是 libtaos.dylib
url 中的配置参数如下:
url 中的配置参数如下:
- user登录 TDengine 用户名,默认值 'root'。
- password用户登录密码默认值 'taosdata'。
- cfgdir客户端配置文件目录路径Linux OS 上默认值 `/etc/taos`Windows OS 上默认值 `C:/TDengine/cfg`。
- charset客户端使用的字符集默认值为系统字符集。
- locale客户端语言环境默认值系统当前 locale。
- timezone客户端使用的时区默认值为系统当前时区。
- batchfetch: true在执行查询时批量拉取结果集false逐行拉取结果集。默认值为true。开启批量拉取同时获取一批数据在查询数据量较大时批量拉取可以有效的提升查询性能。
- batchErrorIgnoretrue在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败将继续执行下面的 SQL。false不再执行失败 SQL
后的任何语句。默认值为false。
- user登录 TDengine 用户名,默认值 'root'。
- password用户登录密码默认值 'taosdata'。
- cfgdir客户端配置文件目录路径Linux OS 上默认值 `/etc/taos`Windows OS 上默认值 `C:/TDengine/cfg`。
- charset客户端使用的字符集默认值为系统字符集。
- locale客户端语言环境默认值系统当前 locale。
- timezone客户端使用的时区默认值为系统当前时区。
- batchfetch: true在执行查询时批量拉取结果集false逐行拉取结果集。默认值为true。开启批量拉取同时获取一批数据在查询数据量较大时批量拉取可以有效的提升查询性能。
- batchErrorIgnoretrue在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败将继续执行下面的 SQL。false不再执行失败 SQL 后的任何语句。默认值为false。
JDBC 原生连接的使用请参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1955.html)。
JDBC 原生连接的使用请参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1955.html)。
**使用 TDengine 客户端驱动配置文件建立连接 **
**使用 TDengine 客户端驱动配置文件建立连接 **
当使用 JDBC 原生连接连接 TDengine 集群时,可以使用 TDengine 客户端驱动配置文件,在配置文件中指定集群的 firstEp、secondEp 等参数。如下所示:
当使用 JDBC 原生连接连接 TDengine 集群时,可以使用 TDengine 客户端驱动配置文件,在配置文件中指定集群的 firstEp、secondEp 等参数。如下所示:
1. 在 Java 应用中不指定 hostname 和 port
1. 在 Java 应用中不指定 hostname 和 port
```java
public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
return conn;
}
```
```java
public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
return conn;
}
```
2. 在配置文件中指定 firstEp 和 secondEp
2. 在配置文件中指定 firstEp 和 secondEp
```shell
# first fully qualified domain name (FQDN) for TDengine system
firstEp cluster_node1:6030
```shell
# first fully qualified domain name (FQDN) for TDengine system
firstEp cluster_node1:6030
# second fully qualified domain name (FQDN) for TDengine system, for cluster only
secondEp cluster_node2:6030
# second fully qualified domain name (FQDN) for TDengine system, for cluster only
secondEp cluster_node2:6030
# default system charset
# charset UTF-8
# default system charset
# charset UTF-8
# system locale
# locale en_US.UTF-8
```
# system locale
# locale en_US.UTF-8
```
以上示例jdbc 会使用客户端的配置文件,建立到 hostname 为 cluster_node1、端口为 6030、数据库名为 test 的连接。当集群中 firstEp 节点失效时JDBC 会尝试使用 secondEp
连接集群。
以上示例jdbc 会使用客户端的配置文件,建立到 hostname 为 cluster_node1、端口为 6030、数据库名为 test 的连接。当集群中 firstEp 节点失效时JDBC 会尝试使用 secondEp 连接集群。
TDengine 中,只要保证 firstEp 和 secondEp 中一个节点有效,就可以正常建立到集群的连接。
TDengine 中,只要保证 firstEp 和 secondEp 中一个节点有效,就可以正常建立到集群的连接。
> **注意**:这里的配置文件指的是调用 JDBC Connector 的应用程序所在机器上的配置文件Linux OS 上默认值 /etc/taos/taos.cfg Windows OS 上默认值
C://TDengine/cfg/taos.cfg。
> **注意**:这里的配置文件指的是调用 JDBC Connector 的应用程序所在机器上的配置文件Linux OS 上默认值 /etc/taos/taos.cfg Windows OS 上默认值 C://TDengine/cfg/taos.cfg。
</TabItem>
<TabItem value="rest" label="REST 连接">
</TabItem>
<TabItem value="rest" label="REST 连接">
```java
Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
```java
Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
以上示例,使用了 JDBC REST 连接的 RestfulDriver建立了到 hostname 为 taosdemo.com端口为 6041数据库名为 test 的连接。这个 URL 中指定用户名user
root密码password为 taosdata。
以上示例,使用了 JDBC REST 连接的 RestfulDriver建立了到 hostname 为 taosdemo.com端口为 6041数据库名为 test 的连接。这个 URL 中指定用户名user为 root密码password为 taosdata。
使用 JDBC REST 连接,不需要依赖客户端驱动。与 JDBC 原生连接相比,仅需要:
使用 JDBC REST 连接,不需要依赖客户端驱动。与 JDBC 原生连接相比,仅需要:
1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”
2. jdbcUrl 以“jdbc:TAOS-RS://”开头;
3. 使用 6041 作为连接端口。
1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”
2. jdbcUrl 以“jdbc:TAOS-RS://”开头;
3. 使用 6041 作为连接端口。
url 中的配置参数如下:
url 中的配置参数如下:
- user登录 TDengine 用户名,默认值 'root'。
- password用户登录密码默认值 'taosdata'。
- batchfetch: true在执行查询时批量拉取结果集false逐行拉取结果集。默认值为false。逐行拉取结果集使用 HTTP 方式进行数据传输。JDBC REST
连接支持批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTPWebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。
- charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。
- batchErrorIgnoretrue在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 SQL 了。false不再执行失败 SQL
后的任何语句。默认值为false。
- httpConnectTimeout: 连接超时时间,单位 ms 默认值为 5000。
- httpSocketTimeout: socket 超时时间,单位 ms默认值为 5000。仅在 batchfetch 设置为 false 时生效。
- messageWaitTimeout: 消息超时时间, 单位 ms 默认值为 3000。 仅在 batchfetch 设置为 true 时生效。
- useSSL: 连接中是否使用 SSL。
- user登录 TDengine 用户名,默认值 'root'。
- password用户登录密码默认值 'taosdata'。
- batchfetch: true在执行查询时批量拉取结果集false逐行拉取结果集。默认值为false。逐行拉取结果集使用 HTTP 方式进行数据传输。JDBC REST 连接支持批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTPWebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。
- charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。
- batchErrorIgnoretrue在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 SQL 了。false不再执行失败 SQL 后的任何语句。默认值为false。
- httpConnectTimeout: 连接超时时间,单位 ms 默认值为 5000。
- httpSocketTimeout: socket 超时时间,单位 ms默认值为 5000。仅在 batchfetch 设置为 false 时生效。
- messageWaitTimeout: 消息超时时间, 单位 ms 默认值为 3000。 仅在 batchfetch 设置为 true 时生效。
- useSSL: 连接中是否使用 SSL。
**注意**部分配置项比如locale、timezone在 REST 连接中不生效。
**注意**部分配置项比如locale、timezone在 REST 连接中不生效。
:::note
:::note
- 与原生连接方式不同REST 接口是无状态的。在使用 JDBC REST 连接时,需要在 SQL 中指定表、超级表的数据库名称。例如:
- 与原生连接方式不同REST 接口是无状态的。在使用 JDBC REST 连接时,需要在 SQL 中指定表、超级表的数据库名称。例如:
```sql
INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6);
```
```sql
INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6);
```
- 如果在 url 中指定了 dbname那么JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url在 SQL 中不需要指定 dbname。例如url 为
jdbc:TAOS-RS://127.0.0.1:6041/test那么可以执行 sqlinsert into t1 using weather(ts, temperature)
tags('California.SanFrancisco') values(now, 24.6);
- 如果在 url 中指定了 dbname那么JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url在 SQL 中不需要指定 dbname。例如url 为 jdbc:TAOS-RS://127.0.0.1:6041/test那么可以执行 sqlinsert into t1 using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6);
:::
:::
</TabItem>
</TabItem>
</Tabs>
### 指定 URL 和 Properties 获取连接
@ -890,8 +881,10 @@ public static void main(String[] args) throws Exception {
| taos-jdbcdriver 版本 | 主要变化 |
| :------------------: | :----------------------------: |
| 3.0.3 | 修复 REST 连接在 jdk17+ 版本时间戳解析错误问题 |
| 3.0.1 - 3.0.2 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译JDK 8 环境下建议使用 3.0.2 版本 |
| 3.0.0 | 支持 TDengine 3.0 |
| 2.0.42 | 修在 WebSocket 连接中 wasNull 接口返回值 |
| 2.0.41 | 修正 REST 连接中用户名和密码转码方式 |
| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 |
| 2.0.38 | JDBC REST 连接增加批量拉取功能 |
@ -928,7 +921,7 @@ public static void main(String[] args) throws Exception {
**原因**taos-jdbcdriver 3.0.1 版本需要在 JDK 11+ 环境使用。
**解决方法** 更换 taos-jdbcdriver 3.0.2 版本。
**解决方法** 更换 taos-jdbcdriver 3.0.2+ 版本。
其它问题请参考 [FAQ](../../../train-faq/faq)

View File

@ -27,10 +27,11 @@ database_option: {
| PRECISION {'ms' | 'us' | 'ns'}
| REPLICA value
| RETENTIONS ingestion_duration:keep_duration ...
| STRICT {'off' | 'on'}
| WAL_LEVEL {1 | 2}
| VGROUPS value
| SINGLE_STABLE {0 | 1}
| TABLE_PREFIX value
| TABLE_SUFFIX value
| WAL_RETENTION_PERIOD value
| WAL_ROLL_PERIOD value
| WAL_RETENTION_SIZE value
@ -61,9 +62,6 @@ database_option: {
- PRECISION数据库的时间戳精度。ms 表示毫秒us 表示微秒ns 表示纳秒,默认 ms 毫秒。
- REPLICA表示数据库副本数取值为 1 或 3默认为 1。在集群中使用副本数必须小于或等于 DNODE 的数目。
- RETENTIONS表示数据的聚合周期和保存时长如 RETENTIONS 15s:7d,1m:21d,15m:50d 表示数据原始采集周期为 15 秒,原始数据保存 7 天;按 1 分钟聚合的数据保存 21 天;按 15 分钟聚合的数据保存 50 天。目前支持且只支持三级存储周期。
- STRICT表示数据同步的一致性要求默认为 off。
- on 表示强一致,即运行标准的 raft 协议,半数提交返回成功。
- off 表示弱一致,本地提交即返回成功。
- WAL_LEVELWAL 级别,默认为 1。
- 1写 WAL但不执行 fsync。
- 2写 WAL而且执行 fsync。
@ -71,6 +69,8 @@ database_option: {
- SINGLE_STABLE表示此数据库中是否只可以创建一个超级表用于超级表列非常多的情况。
- 0表示可以创建多张超级表。
- 1表示只可以创建一张超级表。
- TABLE_PREFIX内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度。
- TABLE_SUFFIX内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度。
- WAL_RETENTION_PERIODwal 文件的额外保留策略用于数据订阅。wal 的保存时长,单位为 s。单副本默认为 0即落盘后立即删除。-1 表示不删除。多副本默认为 4 天。
- WAL_RETENTION_SIZEwal 文件的额外保留策略用于数据订阅。wal 的保存的最大上限,单位为 KB。单副本默认为 0即落盘后立即删除。多副本默认为-1表示不删除。
- WAL_ROLL_PERIODwal 文件切换时长,单位为 s。当 wal 文件创建并写入后,经过该时间,会自动创建一个新的 wal 文件。单副本默认为 0即仅在落盘时创建新文件。多副本默认为 1 天。

View File

@ -139,10 +139,10 @@ alter_table_option: {
- ADD COLUMN添加列。
- DROP COLUMN删除列。
- MODIFY COLUMN修改列定义,如果数据列的类型是可变长类型,那么可以使用此指令修改其宽度,只能改大,不能改小。
- MODIFY COLUMN修改列的宽度,数据列的类型必须是 nchar 和 binary使用此指令可以修改其宽度,只能改大,不能改小。
- ADD TAG给超级表添加一个标签。
- DROP TAG删除超级表的一个标签。从超级表删除某个标签后该超级表下的所有子表也会自动删除该标签。
- MODIFY TAG修改超级表的一个标签的定义。如果标签的类型是可变长类型,那么可以使用此指令修改其宽度,只能改大,不能改小。
- MODIFY TAG修改超级表的一个标签的列宽度。标签的类型只能是 nchar 和 binary使用此指令可以修改其宽度,只能改大,不能改小。
- RENAME TAG修改超级表的一个标签的名称。从超级表修改某个标签名后该超级表下的所有子表也会自动更新该标签名。
### 增加列

View File

@ -880,6 +880,7 @@ INTERP(expr)
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。
- INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 partition by tbname 一起使用。
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.1.4版本以后支持)。
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.2.1版本以后支持)。
### LAST

View File

@ -114,7 +114,7 @@ SELECT * from information_schema.`ins_streams`;
在创建流时,可以通过 TRIGGER 指令指定流式计算的触发模式。
对于非窗口计算,流式计算的触发是实时的;对于窗口计算,目前提供 3 种触发模式:
对于非窗口计算,流式计算的触发是实时的;对于窗口计算,目前提供 3 种触发模式,默认为 AT_ONCE
1. AT_ONCE写入立即触发

View File

@ -18,6 +18,7 @@ description: TDengine 保留关键字的详细列表
- ADD
- AFTER
- AGGREGATE
- ALIVE
- ALL
- ALTER
- ANALYZE

View File

@ -179,6 +179,81 @@ SHOW TABLE DISTRIBUTED table_name;
显示表的数据分布信息。
示例说明:
语句: show table distributed d0\G; 竖行显示表 d0 的 BLOCK 分布情况
<details>
<summary>显示示例</summary>
<pre><code>
*************************** 1.row ***************************
_block_dist: Total_Blocks=[5] Total_Size=[93.65 Kb] Average_size=[18.73 Kb] Compression_Ratio=[23.98 %]
Total_Blocks : 表d0 占用的 block 个数为 5 个
Total_Size. : 表 d0 所有 block 在文件中占用的大小为 93.65 KB
Average_size: 平均每个 block 在文件中占用的空间大小为 18.73 KB
Compression_Ratio: 数据压缩率为 23.98%
*************************** 2.row ***************************
_block_dist: Total_Rows=[20000] Inmem_Rows=[0] MinRows=[3616] MaxRows=[4096] Average_Rows=[4000]
Total_Rows: 统计表 d0 的所有行数 为20000 行
Inmem_Rows 表示仍然还存放在内存中的行数,即没有落盘的行数,为 0行表示没有
MinRows BLOCK 中最小的行数,为 3616 行
MaxRows BLOCK 中最大的行数,为 4096行
Average_Rows BLOCK 中的平均行数为4000 行
*************************** 3.row ***************************
_block_dist: Total_Tables=[1] Total_Files=[2]
Total_Tables: 表示子表的个数这里为1
Total_Files 表数据保存在几个文件中,这里保存在 2 个文件中
*************************** 5.row ***************************
_block_dist: 0100 |
*************************** 6.row ***************************
_block_dist: 0299 |
......
*************************** 22.row ***************************
_block_dist: 3483 ||||||||||||||||| 1 (20.00%)
*************************** 23.row ***************************
_block_dist: 3682 |
*************************** 24.row ***************************
_block_dist: 3881 ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| 4 (80.00%)
Query OK, 24 row(s) in set (0.002444s)
</code></pre>
</details>
上面是块中包含数据行数的块儿分布情况图,这里的 0100 0299 0498 … 表示的是每个块中包含的数据行数,上面的意思就是这个表的 5 个块,分布在 3483 ~3681 行的块有 1 个,占整个块的 20%,分布在 3881 ~ 4096最大行数的块数为 4 个,占整个块的 80% 其它区域内分布块数为 0。
## SHOW TAGS
```sql

View File

@ -94,6 +94,7 @@ description: "TDengine 3.0 版本的语法变更说明"
| 9 | SAMPLE | 增强 | 可以直接用于超级表了。没有PARTITION BY时超级表的数据会被合并成一条时间线。
| 10 | STATECOUNT | 增强 | 可以直接用于超级表了。没有PARTITION BY时超级表的数据会被合并成一条时间线。
| 11 | STATEDURATION | 增强 | 可以直接用于超级表了。没有PARTITION BY时超级表的数据会被合并成一条时间线。
| 12 | TIMETRUNCATE | 增强 | 增加ignore_timezone参数可选是否使用默认值为1.
## SCHEMALESS 变更

View File

@ -21,6 +21,7 @@ taosAdapter 提供以下功能:
- 无缝连接到 collectd
- 无缝连接到 StatsD
- 支持 Prometheus remote_read 和 remote_write
- 获取 table 所在的虚拟节点组VGroup的 VGroup ID
## taosAdapter 架构图
@ -59,6 +60,7 @@ Usage of taosAdapter:
--collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045)
--collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root")
--collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10)
--collectd.ttl int collectd data ttl. Env "TAOS_ADAPTER_COLLECTD_TTL" (default 0, means no ttl)
-c, --config string config path default /etc/taos/taosadapter.toml
--cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true)
--cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials"
@ -100,6 +102,7 @@ Usage of taosAdapter:
--node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s)
--node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100])
--node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root")
--node_exporter.ttl int node_exporter data ttl. Env "TAOS_ADAPTER_NODE_EXPORTER_TTL"(default 0, means no ttl)
--opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true)
--opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1)
--opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb])
@ -110,6 +113,7 @@ Usage of taosAdapter:
--opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
--opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE"
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
--opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL"(default 0, means no ttl)
--pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT" (default 1h0m0s)
--pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT" (default 4000)
--pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE" (default 4000)
@ -131,6 +135,7 @@ Usage of taosAdapter:
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE"
--statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root")
--statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10)
--statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL" (default 0, means no ttl)
--taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE"
--version Print the version and exit
```
@ -174,6 +179,7 @@ AllowWebSockets
node_export 是一个机器指标的导出器。请访问 [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) 了解更多信息。
- 支持 Prometheus remote_read 和 remote_write
remote_read 和 remote_write 是 Prometheus 数据读写分离的集群方案。请访问[https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) 了解更多信息。
- 获取 table 所在的虚拟节点组VGroup的 VGroup ID。关于虚拟节点组VGroup的更多信息请访问[整体架构文档](/tdinternal/arch/#主要逻辑单元) 。
## 接口
@ -195,6 +201,7 @@ AllowWebSockets
- `precision` TDengine 使用的时间精度
- `u` TDengine 用户名
- `p` TDengine 密码
- `ttl` 自动创建的子表生命周期,以子表的第一条数据的 TTL 参数为准,不可更新。更多信息请参考[创建表文档](taos-sql/table/#创建表)的 TTL 参数。
注意: 目前不支持 InfluxDB 的 token 验证方式,仅支持 Basic 验证和查询参数验证。
示例: curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000"
@ -235,6 +242,10 @@ Prometheus 使用的由 \*NIX 内核暴露的硬件和操作系统指标的输
<Prometheus />
### 获取 table 的 VGroup ID
可以访问 http 接口 `http://<fqdn>:6041/rest/vgid?db=<db>&table=<table>` 获取 table 的 VGroup ID。关于虚拟节点组VGroup的更多信息请访问[整体架构文档](/tdinternal/arch/#主要逻辑单元) 。
## 内存使用优化方法
taosAdapter 将监测自身运行过程中内存使用率并通过两个阈值进行调节。有效值范围为 -1 到 100 的整数,单位为系统物理内存的百分比。
@ -277,7 +288,7 @@ http 返回内容:
## taosAdapter 监控指标
taosAdapter 采集 http 相关指标、cpu 百分比和内存百分比。
taosAdapter 采集 http 相关指标、CPU 百分比和内存百分比。
### http 接口
@ -289,13 +300,13 @@ http://<fqdn>:6041/metrics
### 写入 TDengine
taosAdapter 支持将 http 监控、cpu 百分比和内存百分比写入 TDengine。
taosAdapter 支持将 http 监控、CPU 百分比和内存百分比写入 TDengine。
有关配置参数
| **配置项** | **描述** | **默认值** |
|-------------------------|--------------------------------------------|----------|
| monitor.collectDuration | cpu 和内存采集间隔 | 3s |
| monitor.collectDuration | CPU 和内存采集间隔 | 3s |
| monitor.identity | 当前taosadapter 的标识符如果不设置将使用 'hostname:port' | |
| monitor.incgroup | 是否是 cgroup 中运行(容器中运行设置为 true) | false |
| monitor.writeToTD | 是否写入到 TDengine | false |

View File

@ -204,6 +204,10 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
- **-a/--replica <replicaNum\>** :
创建数据库时指定其副本数,默认值为 1 。
- ** -k/--keep-trying <NUMBER\>** : 失败后进行重试的次数默认不重试需使用 v3.0.9 以上版本
- ** -z/--trying-interval <NUMBER\>** : 失败重试间隔时间单位为毫秒仅在 -k 指定重试后有效需使用 v3.0.9 以上版本
- **-V/--version** :
显示版本信息并退出。不能与其它参数混用。
@ -231,6 +235,10 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
插入场景下 `filetype` 必须设置为 `insert`,该参数及其它通用参数详见[通用配置参数](#通用配置参数)
- ** keep_trying ** : 失败后进行重试的次数,默认不重试。需使用 v3.0.9 以上版本。
- ** trying_interval ** : 失败重试间隔时间,单位为毫秒,仅在 keep_trying 指定重试后有效。需使用 v3.0.9 以上版本。
#### 数据库相关配置参数
创建数据库时的相关参数在 json 配置文件中的 `dbinfo` 中配置,个别具体参数如下。其余参数均与 TDengine 中 `create database` 时所指定的数据库参数相对应,详见[../../taos-sql/database]

View File

@ -22,7 +22,7 @@ taosdump 是一个逻辑备份工具,它不应被用于备份任何原始数
taosdump 有两种安装方式:
- 安装 taosTools 官方安装包, 请从[所有下载链接](https://www.taosdata.com/all-downloads)页面找到 taosTools 并下载安装。
- 安装 taosTools 官方安装包, 请从[发布历史页面](https://docs.taosdata.com/releases/tools/)页面找到 taosTools 并下载安装。
- 单独编译 taos-tools 并安装, 详情请参考 [taos-tools](https://github.com/taosdata/taos-tools) 仓库。

View File

@ -134,15 +134,6 @@ taos --dump-config
| 取值范围 | 1-200000 |
| 缺省值 | 30 |
### telemetryReporting
| 属性 | 说明 |
| -------- | ---------------------------------------- |
| 适用范围 | 仅服务端适用 |
| 含义 | 是否允许 TDengine 采集和上报基本使用信息 |
| 取值范围 | 0不允许 1允许 |
| 缺省值 | 1 |
## 查询相关
### queryPolicy
@ -191,6 +182,15 @@ taos --dump-config
| 取值范围 | 0 表示包含函数名1 表示不包含函数名。 |
| 缺省值 | 0 |
### countAlwaysReturnValue
| 属性 | 说明 |
| -------- | -------------------------------- |
| 适用范围 | 仅服务端适用 |
| 含义 | count/hyperloglog函数在数据为空或者NULL的情况下是否返回值 |
| 取值范围 | 0返回空行1返回 0 |
| 缺省值 | 1 |
## 区域相关
### timezone
@ -306,12 +306,20 @@ charset 的有效值是 UTF-8。
| 含义 | 数据文件目录,所有的数据文件都将写入该目录 |
| 缺省值 | /var/lib/taos |
### tempDir
| 属性 | 说明 |
| -------- | ------------------------------------------ |
| 适用范围 | 仅服务端适用 |
| 含义 | 该参数指定所有系统运行过程中的临时文件生成的目录 |
| 缺省值 | /tmp |
### minimalTmpDirGB
| 属性 | 说明 |
| -------- | ------------------------------------------------ |
| 适用范围 | 服务端和客户端均适用 |
| 含义 | 当日志文件夹的磁盘大小小于该值时,停止写临时文件 |
| 含义 | tempDir 所指定的临时文件目录所需要保留的最小空间 |
| 单位 | GB |
| 缺省值 | 1.0 |
@ -320,7 +328,7 @@ charset 的有效值是 UTF-8。
| 属性 | 说明 |
| -------- | ------------------------------------------------ |
| 适用范围 | 仅服务端适用 |
| 含义 | 当日志文件夹的磁盘大小小于该值时,停止写时序数据 |
| 含义 | dataDir 指定的时序数据存储目录所需要保留的最小 |
| 单位 | GB |
| 缺省值 | 2.0 |
@ -335,27 +343,7 @@ charset 的有效值是 UTF-8。
| 取值范围 | 0-4096 |
| 缺省值 | CPU 核数的 2 倍 |
## 时间相关
### statusInterval
| 属性 | 说明 |
| -------- | --------------------------- |
| 适用范围 | 仅服务端适用 |
| 含义 | dnode 向 mnode 报告状态间隔 |
| 单位 | 秒 |
| 取值范围 | 1-10 |
| 缺省值 | 1 |
### shellActivityTimer
| 属性 | 说明 |
| -------- | --------------------------------- |
| 适用范围 | 服务端和客户端均适用 |
| 含义 | shell 客户端向 mnode 发送心跳间隔 |
| 单位 | 秒 |
| 取值范围 | 1-120 |
| 缺省值 | 3 |
## 时间相关 |
## 性能调优
@ -367,28 +355,6 @@ charset 的有效值是 UTF-8。
| 含义 | 设置写入线程的最大数量 |
| 缺省值 | |
## 压缩相关
### compressMsgSize
| 属性 | 说明 |
| -------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
| 适用范围 | 仅服务端适用 |
| 含义 | 客户端与服务器之间进行消息通讯过程中,对通讯的消息进行压缩的阈值。如果要压缩消息,建议设置为 64330 字节,即大于 64330 字节的消息体才进行压缩。 |
| 单位 | bytes |
| 取值范围 | `0 `表示对所有的消息均进行压缩 >0: 超过该值的消息才进行压缩 -1: 不压缩 |
| 缺省值 | -1 |
### compressColData
| 属性 | 说明 |
| -------- | --------------------------------------------------------------------------------------- |
| 适用范围 | 仅服务端适用 |
| 含义 | 客户端与服务器之间进行消息通讯过程中,对服务器端查询结果进行列压缩的阈值。 |
| 单位 | bytes |
| 取值范围 | 0: 对所有查询结果均进行压缩 >0: 查询结果中任意列大小超过该值的消息才进行压缩 -1: 不压缩 |
| 缺省值 | -1 |
## 日志相关
### logDir
@ -613,7 +579,7 @@ charset 的有效值是 UTF-8。
| 属性 | 说明 |
| -------- | ------------------------- |
| 适用范围 | 仅客户端适用 |
| 含义 | schemaless 自定义的子表名 |
| 含义 | schemaless 自定义的子表名的 key |
| 类型 | 字符串 |
| 缺省值 | 无 |
@ -656,12 +622,18 @@ charset 的有效值是 UTF-8。
| 取值范围 | 0: 不启动1启动 |
| 缺省值 | 1 |
## 2.X 与 3.0 配置参数对比
## 压缩参数
:::note
对于 2.x 版本中适用但在 3.0 版本中废弃的参数,其当前行为会有特别说明
### compressMsgSize
:::
| 属性 | 说明 |
| -------- | ----------------------------- |
| 适用于 | 服务端和客户端均适用 |
| 含义 | 是否对 RPC 消息进行压缩 |
| 取值范围 | -1: 所有消息都不压缩; 0: 所有消息都压缩; N (N>0): 只有大于 N 个字节的消息才压缩 |
| 缺省值 | -1 |
## 3.0 中有效的配置参数列表
| # | **参数** | **适用于 2.X ** | **适用于 3.0 ** | 3.0 版本的当前行为 |
| --- | :---------------------: | --------------- | --------------- | ------------------------------------------------- |
@ -674,159 +646,134 @@ charset 的有效值是 UTF-8。
| 7 | monitorFqdn | 否 | 是 | |
| 8 | monitorPort | 否 | 是 | |
| 9 | monitorInterval | 是 | 是 | |
| 10 | monitorMaxLogs | 否 | 是 | |
| 11 | monitorComp | 否 | 是 | |
| 12 | telemetryReporting | 是 | 是 | |
| 13 | telemetryInterval | 否 | 是 | |
| 14 | telemetryServer | 否 | 是 | |
| 15 | telemetryPort | 否 | 是 | |
| 16 | queryPolicy | 否 | 是 | |
| 17 | querySmaOptimize | 否 | 是 | |
| 18 | queryRsmaTolerance | 否 | 是 | |
| 19 | queryBufferSize | 是 | 是 | |
| 20 | maxNumOfDistinctRes | 是 | 是 | |
| 21 | minSlidingTime | 是 | 是 | |
| 22 | minIntervalTime | 是 | 是 | |
| 23 | countAlwaysReturnValue | 是 | 是 | |
| 24 | dataDir | 是 | 是 | |
| 25 | minimalDataDirGB | 是 | 是 | |
| 26 | supportVnodes | 否 | 是 | |
| 27 | tempDir | 是 | 是 | |
| 28 | minimalTmpDirGB | 是 | 是 | |
| 29 | compressMsgSize | 是 | 是 | |
| 30 | compressColData | 是 | 是 | |
| 31 | smlChildTableName | 是 | 是 | |
| 32 | smlTagName | 是 | 是 | |
| 33 | smlDataFormat | 否 | 是 | |
| 34 | statusInterval | 是 | 是 | |
| 35 | shellActivityTimer | 是 | 是 | |
| 36 | transPullupInterval | 否 | 是 | |
| 37 | mqRebalanceInterval | 否 | 是 | |
| 38 | ttlUnit | 否 | 是 | |
| 39 | ttlPushInterval | 否 | 是 | |
| 40 | numOfTaskQueueThreads | 否 | 是 | |
| 41 | numOfRpcThreads | 否 | 是 | |
| 42 | numOfCommitThreads | 是 | 是 | |
| 43 | numOfMnodeReadThreads | 否 | 是 | |
| 44 | numOfVnodeQueryThreads | 否 | 是 | |
| 45 | ratioOfVnodeStreamThreads | 否 | 是 | |
| 46 | numOfVnodeFetchThreads | 否 | 是 | |
| 47 | numOfVnodeRsmaThreads | 否 | 是 | |
| 48 | numOfQnodeQueryThreads | 否 | 是 | |
| 49 | numOfQnodeFetchThreads | 否 | 是 | |
| 50 | numOfSnodeSharedThreads | 否 | 是 | |
| 51 | numOfSnodeUniqueThreads | 否 | 是 | |
| 52 | rpcQueueMemoryAllowed | 否 | 是 | |
| 53 | logDir | 是 | 是 | |
| 54 | minimalLogDirGB | 是 | 是 | |
| 55 | numOfLogLines | 是 | 是 | |
| 56 | asyncLog | 是 | 是 | |
| 57 | logKeepDays | 是 | 是 | |
| 60 | debugFlag | 是 | 是 | |
| 61 | tmrDebugFlag | 是 | 是 | |
| 62 | uDebugFlag | 是 | 是 | |
| 63 | rpcDebugFlag | 是 | 是 | |
| 64 | jniDebugFlag | 是 | 是 | |
| 65 | qDebugFlag | 是 | 是 | |
| 66 | cDebugFlag | 是 | 是 | |
| 67 | dDebugFlag | 是 | 是 | |
| 68 | vDebugFlag | 是 | 是 | |
| 69 | mDebugFlag | 是 | 是 | |
| 70 | wDebugFlag | 是 | 是 | |
| 71 | sDebugFlag | 是 | 是 | |
| 72 | tsdbDebugFlag | 是 | 是 | |
| 73 | tqDebugFlag | 否 | 是 | |
| 74 | fsDebugFlag | 是 | 是 | |
| 75 | udfDebugFlag | 否 | 是 | |
| 76 | smaDebugFlag | 否 | 是 | |
| 77 | idxDebugFlag | 否 | 是 | |
| 78 | tdbDebugFlag | 否 | 是 | |
| 79 | metaDebugFlag | 否 | 是 | |
| 80 | timezone | 是 | 是 | |
| 81 | locale | 是 | 是 | |
| 82 | charset | 是 | 是 | |
| 83 | udf | 是 | 是 | |
| 84 | enableCoreFile | 是 | 是 | |
| 85 | arbitrator | 是 | 否 | 通过 RAFT 协议选主 |
| 86 | numOfThreadsPerCore | 是 | 否 | 有其它参数设置多种线程池的大小 |
| 87 | numOfMnodes | 是 | 否 | 通过 create mnode 命令动态创建 mnode |
| 88 | vnodeBak | 是 | 否 | 3.0 行为未知 |
| 89 | balance | 是 | 否 | 负载均衡功能由 split/merge vgroups 实现 |
| 90 | balanceInterval | 是 | 否 | 随着 balance 参数失效 |
| 91 | offlineThreshold | 是 | 否 | 3.0 行为未知 |
| 92 | role | 是 | 否 | 由 supportVnode 决定是否能够创建 |
| 93 | dnodeNopLoop | 是 | 否 | 2.6 文档中未找到此参数 |
| 94 | keepTimeOffset | 是 | 否 | 2.6 文档中未找到此参数 |
| 95 | rpcTimer | 是 | 否 | 3.0 行为未知 |
| 96 | rpcMaxTime | 是 | 否 | 3.0 行为未知 |
| 97 | rpcForceTcp | 是 | 否 | 默认为 TCP |
| 98 | tcpConnTimeout | 是 | 否 | 3.0 行为未知 |
| 99 | syncCheckInterval | 是 | 否 | 3.0 行为未知 |
| 100 | maxTmrCtrl | 是 | 否 | 3.0 行为未知 |
| 101 | monitorReplica | 是 | 否 | 由 RAFT 协议管理多副本 |
| 102 | smlTagNullName | 是 | 否 | 3.0 行为未知 |
| 103 | keepColumnName | 是 | 否 | 3.0 行为未知 |
| 104 | ratioOfQueryCores | 是 | 否 | 由 线程池 相关配置参数决定 |
| 105 | maxStreamCompDelay | 是 | 否 | 3.0 行为未知 |
| 106 | maxFirstStreamCompDelay | 是 | 否 | 3.0 行为未知 |
| 107 | retryStreamCompDelay | 是 | 否 | 3.0 行为未知 |
| 108 | streamCompDelayRatio | 是 | 否 | 3.0 行为未知 |
| 109 | maxVgroupsPerDb | 是 | 否 | 由 create db 的参数 vgroups 指定实际 vgroups 数量 |
| 110 | maxTablesPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
| 111 | minTablesPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
| 112 | tableIncStepPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
| 113 | cache | 是 | 否 | 由 buffer 代替 cache\*blocks |
| 114 | blocks | 是 | 否 | 由 buffer 代替 cache\*blocks |
| 115 | days | 是 | 否 | 由 create db 的参数 duration 取代 |
| 116 | keep | 是 | 否 | 由 create db 的参数 keep 取代 |
| 117 | minRows | 是 | 否 | 由 create db 的参数 minRows 取代 |
| 118 | maxRows | 是 | 否 | 由 create db 的参数 maxRows 取代 |
| 119 | quorum | 是 | 否 | 由 RAFT 协议决定 |
| 120 | comp | 是 | 否 | 由 create db 的参数 comp 取代 |
| 121 | walLevel | 是 | 否 | 由 create db 的参数 wal_level 取代 |
| 122 | fsync | 是 | 否 | 由 create db 的参数 wal_fsync_period 取代 |
| 123 | replica | 是 | 否 | 由 create db 的参数 replica 取代 |
| 124 | partitions | 是 | 否 | 3.0 行为未知 |
| 125 | update | 是 | 否 | 允许更新部分列 |
| 126 | cachelast | 是 | 否 | 由 create db 的参数 cacheModel 取代 |
| 127 | maxSQLLength | 是 | 否 | SQL 上限为 1MB无需参数控制 |
| 128 | maxWildCardsLength | 是 | 否 | 3.0 行为未知 |
| 129 | maxRegexStringLen | 是 | 否 | 3.0 行为未知 |
| 130 | maxNumOfOrderedRes | 是 | 否 | 3.0 行为未知 |
| 131 | maxConnections | 是 | 否 | 取决于系统配置和系统处理能力,详见后面的 Note |
| 132 | mnodeEqualVnodeNum | 是 | 否 | 3.0 行为未知 |
| 133 | http | 是 | 否 | http 服务由 taosAdapter 提供 |
| 134 | httpEnableRecordSql | 是 | 否 | taosd 不提供 http 服务 |
| 135 | httpMaxThreads | 是 | 否 | taosd 不提供 http 服务 |
| 136 | restfulRowLimit | 是 | 否 | taosd 不提供 http 服务 |
| 137 | httpDbNameMandatory | 是 | 否 | taosd 不提供 http 服务 |
| 138 | httpKeepAlive | 是 | 否 | taosd 不提供 http 服务 |
| 139 | enableRecordSql | 是 | 否 | 3.0 行为未知 |
| 140 | maxBinaryDisplayWidth | 是 | 否 | 3.0 行为未知 |
| 141 | stream | 是 | 否 | 默认启用连续查询 |
| 142 | retrieveBlockingModel | 是 | 否 | 3.0 行为未知 |
| 143 | tsdbMetaCompactRatio | 是 | 否 | 3.0 行为未知 |
| 144 | defaultJSONStrType | 是 | 否 | 3.0 行为未知 |
| 145 | walFlushSize | 是 | 否 | 3.0 行为未知 |
| 146 | keepTimeOffset | 是 | 否 | 3.0 行为未知 |
| 147 | flowctrl | 是 | 否 | 3.0 行为未知 |
| 148 | slaveQuery | 是 | 否 | 3.0 行为未知: slave vnode 是否能够处理查询? |
| 149 | adjustMaster | 是 | 否 | 3.0 行为未知 |
| 150 | topicBinaryLen | 是 | 否 | 3.0 行为未知 |
| 151 | telegrafUseFieldNum | 是 | 否 | 3.0 行为未知 |
| 152 | deadLockKillQuery | 是 | 否 | 3.0 行为未知 |
| 153 | clientMerge | 是 | 否 | 3.0 行为未知 |
| 154 | sdbDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
| 155 | odbcDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
| 156 | httpDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
| 157 | monDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
| 158 | cqDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
| 159 | shortcutFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
| 160 | probeSeconds | 是 | 否 | 3.0 行为未知 |
| 161 | probeKillSeconds | 是 | 否 | 3.0 行为未知 |
| 162 | probeInterval | 是 | 否 | 3.0 行为未知 |
| 163 | lossyColumns | 是 | 否 | 3.0 行为未知 |
| 164 | fPrecision | 是 | 否 | 3.0 行为未知 |
| 165 | dPrecision | 是 | 否 | 3.0 行为未知 |
| 166 | maxRange | 是 | 否 | 3.0 行为未知 |
| 167 | range | 是 | 否 | 3.0 行为未知 |
| 10 | queryPolicy | 否 | 是 | |
| 11 | querySmaOptimize | 否 | 是 | |
| 12 | maxNumOfDistinctRes | 是 | 是 | |
| 15 | countAlwaysReturnValue | 是 | 是 | |
| 16 | dataDir | 是 | 是 | |
| 17 | minimalDataDirGB | 是 | 是 | |
| 18 | supportVnodes | 否 | 是 | |
| 19 | tempDir | 是 | 是 | |
| 20 | minimalTmpDirGB | 是 | 是 | |
| 21 | smlChildTableName | 是 | 是 | |
| 22 | smlTagName | 是 | 是 | |
| 23 | smlDataFormat | 否 | 是 | |
| 24 | statusInterval | 是 | 是 | |
| 25 | logDir | 是 | 是 | |
| 26 | minimalLogDirGB | 是 | 是 | |
| 27 | numOfLogLines | 是 | 是 | |
| 28 | asyncLog | 是 | 是 | |
| 29 | logKeepDays | 是 | 是 | |
| 30 | debugFlag | 是 | 是 | |
| 31 | tmrDebugFlag | 是 | 是 | |
| 32 | uDebugFlag | 是 | 是 | |
| 33 | rpcDebugFlag | 是 | 是 | |
| 34 | jniDebugFlag | 是 | 是 | |
| 35 | qDebugFlag | 是 | 是 | |
| 36 | cDebugFlag | 是 | 是 | |
| 37 | dDebugFlag | 是 | 是 | |
| 38 | vDebugFlag | 是 | 是 | |
| 39 | mDebugFlag | 是 | 是 | |
| 40 | wDebugFlag | 是 | 是 | |
| 41 | sDebugFlag | 是 | 是 | |
| 42 | tsdbDebugFlag | 是 | 是 | |
| 43 | tqDebugFlag | 否 | 是 | |
| 44 | fsDebugFlag | 是 | 是 | |
| 45 | udfDebugFlag | 否 | 是 | |
| 46 | smaDebugFlag | 否 | 是 | |
| 47 | idxDebugFlag | 否 | 是 | |
| 48 | tdbDebugFlag | 否 | 是 | |
| 49 | metaDebugFlag | 否 | 是 | |
| 50 | timezone | 是 | 是 | |
| 51 | locale | 是 | 是 | |
| 52 | charset | 是 | 是 | |
| 53 | udf | 是 | 是 | |
| 54 | enableCoreFile | 是 | 是 | |
## 2.x->3.0 的废弃参数
| # | **参数** | **适用于 2.X ** | **适用于 3.0 ** | 3.0 版本的当前行为 |
| --- | :---------------------: | --------------- | --------------- | ------------------------------------------------- |
| 1 | arbitrator | 是 | 否 | 通过 RAFT 协议选主 |
| 2 | numOfThreadsPerCore | 是 | 否 | 有其它参数设置多种线程池的大小 |
| 3 | numOfMnodes | 是 | 否 | 通过 create mnode 命令动态创建 mnode |
| 4 | vnodeBak | 是 | 否 | 3.0 行为未知 |
| 5 | balance | 是 | 否 | 负载均衡功能由 split/merge vgroups 实现 |
| 6 | balanceInterval | 是 | 否 | 随着 balance 参数失效 |
| 7 | offlineThreshold | 是 | 否 | 3.0 行为未知 |
| 8 | role | 是 | 否 | 由 supportVnode 决定是否能够创建 |
| 9 | dnodeNopLoop | 是 | 否 | 2.6 文档中未找到此参数 |
| 10 | keepTimeOffset | 是 | 否 | 2.6 文档中未找到此参数 |
| 11 | rpcTimer | 是 | 否 | 3.0 行为未知 |
| 12 | rpcMaxTime | 是 | 否 | 3.0 行为未知 |
| 13 | rpcForceTcp | 是 | 否 | 默认为 TCP |
| 14 | tcpConnTimeout | 是 | 否 | 3.0 行为未知 |
| 15 | syncCheckInterval | 是 | 否 | 3.0 行为未知 |
| 16 | maxTmrCtrl | 是 | 否 | 3.0 行为未知 |
| 17 | monitorReplica | 是 | 否 | 由 RAFT 协议管理多副本 |
| 18 | smlTagNullName | 是 | 否 | 3.0 行为未知 |
| 19 | keepColumnName | 是 | 否 | 3.0 行为未知 |
| 20 | ratioOfQueryCores | 是 | 否 | 由 线程池 相关配置参数决定 |
| 21 | maxStreamCompDelay | 是 | 否 | 3.0 行为未知 |
| 22 | maxFirstStreamCompDelay | 是 | 否 | 3.0 行为未知 |
| 23 | retryStreamCompDelay | 是 | 否 | 3.0 行为未知 |
| 24 | streamCompDelayRatio | 是 | 否 | 3.0 行为未知 |
| 25 | maxVgroupsPerDb | 是 | 否 | 由 create db 的参数 vgroups 指定实际 vgroups 数量 |
| 26 | maxTablesPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
| 27 | minTablesPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
| 28 | tableIncStepPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
| 29 | cache | 是 | 否 | 由 buffer 代替 cache\*blocks |
| 30 | blocks | 是 | 否 | 由 buffer 代替 cache\*blocks |
| 31 | days | 是 | 否 | 由 create db 的参数 duration 取代 |
| 32 | keep | 是 | 否 | 由 create db 的参数 keep 取代 |
| 33 | minRows | 是 | 否 | 由 create db 的参数 minRows 取代 |
| 34 | maxRows | 是 | 否 | 由 create db 的参数 maxRows 取代 |
| 35 | quorum | 是 | 否 | 由 RAFT 协议决定 |
| 36 | comp | 是 | 否 | 由 create db 的参数 comp 取代 |
| 37 | walLevel | 是 | 否 | 由 create db 的参数 wal_level 取代 |
| 38 | fsync | 是 | 否 | 由 create db 的参数 wal_fsync_period 取代 |
| 39 | replica | 是 | 否 | 由 create db 的参数 replica 取代 |
| 40 | partitions | 是 | 否 | 3.0 行为未知 |
| 41 | update | 是 | 否 | 允许更新部分列 |
| 42 | cachelast | 是 | 否 | 由 create db 的参数 cacheModel 取代 |
| 43 | maxSQLLength | 是 | 否 | SQL 上限为 1MB无需参数控制 |
| 44 | maxWildCardsLength | 是 | 否 | 3.0 行为未知 |
| 45 | maxRegexStringLen | 是 | 否 | 3.0 行为未知 |
| 46 | maxNumOfOrderedRes | 是 | 否 | 3.0 行为未知 |
| 47 | maxConnections | 是 | 否 | 取决于系统配置和系统处理能力,详见后面的 Note |
| 48 | mnodeEqualVnodeNum | 是 | 否 | 3.0 行为未知 |
| 49 | http | 是 | 否 | http 服务由 taosAdapter 提供 |
| 50 | httpEnableRecordSql | 是 | 否 | taosd 不提供 http 服务 |
| 51 | httpMaxThreads | 是 | 否 | taosd 不提供 http 服务 |
| 52 | restfulRowLimit | 是 | 否 | taosd 不提供 http 服务 |
| 53 | httpDbNameMandatory | 是 | 否 | taosd 不提供 http 服务 |
| 54 | httpKeepAlive | 是 | 否 | taosd 不提供 http 服务 |
| 55 | enableRecordSql | 是 | 否 | 3.0 行为未知 |
| 56 | maxBinaryDisplayWidth | 是 | 否 | 3.0 行为未知 |
| 57 | stream | 是 | 否 | 默认启用连续查询 |
| 58 | retrieveBlockingModel | 是 | 否 | 3.0 行为未知 |
| 59 | tsdbMetaCompactRatio | 是 | 否 | 3.0 行为未知 |
| 60 | defaultJSONStrType | 是 | 否 | 3.0 行为未知 |
| 61 | walFlushSize | 是 | 否 | 3.0 行为未知 |
| 62 | keepTimeOffset | 是 | 否 | 3.0 行为未知 |
| 63 | flowctrl | 是 | 否 | 3.0 行为未知 |
| 64 | slaveQuery | 是 | 否 | 3.0 行为未知: slave vnode 是否能够处理查询? |
| 65 | adjustMaster | 是 | 否 | 3.0 行为未知 |
| 66 | topicBinaryLen | 是 | 否 | 3.0 行为未知 |
| 67 | telegrafUseFieldNum | 是 | 否 | 3.0 行为未知 |
| 68 | deadLockKillQuery | 是 | 否 | 3.0 行为未知 |
| 69 | clientMerge | 是 | 否 | 3.0 行为未知 |
| 70 | sdbDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
| 71 | odbcDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
| 72 | httpDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
| 73 | monDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
| 74 | cqDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
| 75 | shortcutFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
| 76 | probeSeconds | 是 | 否 | 3.0 行为未知 |
| 77 | probeKillSeconds | 是 | 否 | 3.0 行为未知 |
| 78 | probeInterval | 是 | 否 | 3.0 行为未知 |
| 79 | lossyColumns | 是 | 否 | 3.0 行为未知 |
| 80 | fPrecision | 是 | 否 | 3.0 行为未知 |
| 81 | dPrecision | 是 | 否 | 3.0 行为未知 |
| 82 | maxRange | 是 | 否 | 3.0 行为未知 |
| 83 | range | 是 | 否 | 3.0 行为未知 |

View File

@ -10,11 +10,22 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
import Release from "/components/ReleaseV3";
## 3.0.2.2
<Release type="tdengine" version="3.0.2.2" />
## 3.0.2.1
<Release type="tdengine" version="3.0.2.1" />
## 3.0.2.0
<Release type="tdengine" version="3.0.2.0" />
## 3.0.1.8
<Release type="tdengine" version="3.0.1.8" />
## 3.0.1.7
<Release type="tdengine" version="3.0.1.7" />

View File

@ -10,6 +10,18 @@ taosTools 各版本安装包下载链接如下:
import Release from "/components/ReleaseV3";
## 2.4.0
<Release type="tools" version="2.4.0" />
## 2.3.3
<Release type="tools" version="2.3.3" />
## 2.3.2
<Release type="tools" version="2.3.2" />
## 2.3.0
<Release type="tools" version="2.3.0" />

View File

@ -30,6 +30,11 @@ typedef int64_t tb_uid_t;
#define IS_TSWINDOW_SPECIFIED(win) (((win).skey != INT64_MIN) || ((win).ekey != INT64_MAX))
#define TSWINDOW_IS_EQUAL(t1, t2) (((t1).skey == (t2).skey) && ((t1).ekey == (t2).ekey))
//define show cluster alive and show db.alive
#define SHOW_STATUS_NOT_AVAILABLE 0
#define SHOW_STATUS_AVAILABLE 1
#define SHOW_STATUS_HALF_AVAILABLE 2
typedef enum {
TSDB_SUPER_TABLE = 1, // super table
TSDB_CHILD_TABLE = 2, // table created from super table

View File

@ -195,7 +195,7 @@ typedef struct SDataBlockInfo {
uint32_t capacity;
SBlockID id;
int16_t hasVarCol;
int16_t dataLoad; // denote if the data is loaded or not
int16_t dataLoad; // denote if the data is loaded or not
// TODO: optimize and remove following
int64_t version; // used for stream, and need serialization
@ -204,8 +204,9 @@ typedef struct SDataBlockInfo {
STimeWindow calWin; // used for stream, do not serialize
TSKEY watermark; // used for stream
char parTbName[TSDB_TABLE_NAME_LEN]; // used for stream partition
STag* pTag; // used for stream partition
char parTbName[TSDB_TABLE_NAME_LEN]; // used for stream partition
int32_t tagLen;
void* pTag; // used for stream partition
} SDataBlockInfo;
typedef struct SSDataBlock {
@ -239,22 +240,22 @@ typedef struct SVarColAttr {
// pBlockAgg->numOfNull == info.rows, all data are null
// pBlockAgg->numOfNull == 0, no data are null.
typedef struct SColumnInfoData {
char* pData; // the corresponding block data in memory
char* pData; // the corresponding block data in memory
union {
char* nullbitmap; // bitmap, one bit for each item in the list
SVarColAttr varmeta;
};
SColumnInfo info; // column info
bool hasNull; // if current column data has null value.
SColumnInfo info; // column info
bool hasNull; // if current column data has null value.
} SColumnInfoData;
typedef struct SQueryTableDataCond {
uint64_t suid;
int32_t order; // desc|asc order to iterate the data block
int32_t order; // desc|asc order to iterate the data block
int32_t numOfCols;
SColumnInfo* colList;
int32_t* pSlotList; // the column output destation slot, and it may be null
int32_t type; // data block load type:
int32_t* pSlotList; // the column output destation slot, and it may be null
int32_t type; // data block load type:
STimeWindow twindows;
int64_t startVersion;
int64_t endVersion;
@ -291,6 +292,7 @@ typedef struct STableBlockDistInfo {
uint16_t numOfFiles;
uint32_t numOfTables;
uint32_t numOfBlocks;
uint32_t numOfVgroups;
uint64_t totalSize;
uint64_t totalRows;
int32_t maxRows;
@ -340,7 +342,7 @@ typedef struct SExprInfo {
typedef struct {
const char* key;
int32_t keyLen;
size_t keyLen;
uint8_t type;
union {
const char* value;
@ -349,7 +351,7 @@ typedef struct {
double d;
float f;
};
int32_t length;
size_t length;
} SSmlKv;
#define QUERY_ASC_FORWARD_STEP 1

View File

@ -265,7 +265,7 @@ void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag);
// for debug
char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf);
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataBlocks, STSchema* pTSchema, int32_t vgId,
int32_t buildSubmitReqFromDataBlock(SSubmitReq2** pReq, const SSDataBlock* pDataBlocks, const STSchema* pTSchema, int64_t uid, int32_t vgId,
tb_uid_t suid);
char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId);

View File

@ -44,18 +44,38 @@ typedef struct SColData SColData;
#define HAS_VALUE ((uint8_t)0x4)
// bitmap ================================
const static uint8_t BIT2_MAP[4][4] = {{0b00000000, 0b00000001, 0b00000010, 0},
{0b00000000, 0b00000100, 0b00001000, 2},
{0b00000000, 0b00010000, 0b00100000, 4},
{0b00000000, 0b01000000, 0b10000000, 6}};
const static uint8_t BIT1_MAP[8] = {0b11111110, 0b11111101, 0b11111011, 0b11110111,
0b11101111, 0b11011111, 0b10111111, 0b01111111};
#define N1(n) ((((uint8_t)1) << (n)) - 1)
#define BIT1_SIZE(n) ((((n)-1) >> 3) + 1)
#define BIT2_SIZE(n) ((((n)-1) >> 2) + 1)
#define SET_BIT1(p, i, v) ((p)[(i) >> 3] = (p)[(i) >> 3] & N1((i)&7) | (((uint8_t)(v)) << ((i)&7)))
#define GET_BIT1(p, i) (((p)[(i) >> 3] >> ((i)&7)) & ((uint8_t)1))
#define SET_BIT2(p, i, v) ((p)[(i) >> 2] = (p)[(i) >> 2] & N1(BIT2_MAP[(i)&3][3]) | BIT2_MAP[(i)&3][(v)])
#define GET_BIT2(p, i) (((p)[(i) >> 2] >> BIT2_MAP[(i)&3][3]) & ((uint8_t)3))
const static uint8_t BIT2_MAP[4] = {0b11111100, 0b11110011, 0b11001111, 0b00111111};
#define ONE ((uint8_t)1)
#define THREE ((uint8_t)3)
#define DIV_8(i) ((i) >> 3)
#define MOD_8(i) ((i)&7)
#define DIV_4(i) ((i) >> 2)
#define MOD_4(i) ((i)&3)
#define MOD_4_TIME_2(i) (MOD_4(i) << 1)
#define BIT1_SIZE(n) (DIV_8((n)-1) + 1)
#define BIT2_SIZE(n) (DIV_4((n)-1) + 1)
#define SET_BIT1(p, i, v) ((p)[DIV_8(i)] = (p)[DIV_8(i)] & BIT1_MAP[MOD_8(i)] | ((v) << MOD_8(i)))
#define SET_BIT1_EX(p, i, v) \
do { \
if (MOD_8(i) == 0) { \
(p)[DIV_8(i)] = 0; \
} \
SET_BIT1(p, i, v); \
} while (0)
#define GET_BIT1(p, i) (((p)[DIV_8(i)] >> MOD_8(i)) & ONE)
#define SET_BIT2(p, i, v) ((p)[DIV_4(i)] = (p)[DIV_4(i)] & BIT2_MAP[MOD_4(i)] | ((v) << MOD_4_TIME_2(i)))
#define SET_BIT2_EX(p, i, v) \
do { \
if (MOD_4(i) == 0) { \
(p)[DIV_4(i)] = 0; \
} \
SET_BIT2(p, i, v); \
} while (0)
#define GET_BIT2(p, i) (((p)[DIV_4(i)] >> MOD_4_TIME_2(i)) & THREE)
// SBuffer ================================
struct SBuffer {
@ -70,9 +90,6 @@ int32_t tBufferInit(SBuffer *pBuffer, int64_t size);
int32_t tBufferPut(SBuffer *pBuffer, const void *pData, int64_t nData);
int32_t tBufferReserve(SBuffer *pBuffer, int64_t nData, void **ppData);
// STSchema ================================
void tDestroyTSchema(STSchema *pTSchema);
// SColVal ================================
#define CV_FLAG_VALUE ((int8_t)0x0)
#define CV_FLAG_NONE ((int8_t)0x1)
@ -87,8 +104,12 @@ void tDestroyTSchema(STSchema *pTSchema);
#define COL_VAL_IS_VALUE(CV) ((CV)->flag == CV_FLAG_VALUE)
// SRow ================================
int32_t tRowBuild(SArray *aColVal, STSchema *pTSchema, SBuffer *pBuffer);
int32_t tRowBuild(SArray *aColVal, const STSchema *pTSchema, SRow **ppRow);
void tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal);
void tRowDestroy(SRow *pRow);
void tRowSort(SArray *aRowP);
int32_t tRowMerge(SArray *aRowP, STSchema *pTSchema, int8_t flag);
int32_t tRowAppendToColData(SRow *pRow, STSchema *pTSchema, SColData *aColData, int32_t nColData);
// SRowIter ================================
int32_t tRowIterOpen(SRow *pRow, STSchema *pTSchema, SRowIter **ppIter);
@ -110,15 +131,28 @@ void debugPrintSTag(STag *pTag, const char *tag, int32_t ln); // TODO: remov
int32_t parseJsontoTagData(const char *json, SArray *pTagVals, STag **ppTag, void *pMsgBuf);
// SColData ================================
typedef void *(*xMallocFn)(void *, int32_t);
void tColDataDestroy(void *ph);
void tColDataInit(SColData *pColData, int16_t cid, int8_t type, int8_t smaOn);
void tColDataClear(SColData *pColData);
void tColDataDeepClear(SColData *pColData);
int32_t tColDataAppendValue(SColData *pColData, SColVal *pColVal);
void tColDataGetValue(SColData *pColData, int32_t iVal, SColVal *pColVal);
uint8_t tColDataGetBitValue(const SColData *pColData, int32_t iVal);
int32_t tColDataCopy(SColData *pColDataSrc, SColData *pColDataDest);
int32_t tColDataCopy(SColData *pColDataFrom, SColData *pColData, xMallocFn xMalloc, void *arg);
extern void (*tColDataCalcSMA[])(SColData *pColData, int64_t *sum, int64_t *max, int64_t *min, int16_t *numOfNull);
// for stmt bind
int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind);
void tColDataSortMerge(SArray *colDataArr);
//for raw block
int32_t tColDataAddValueByDataBlock(SColData *pColData, int8_t type, int32_t bytes,
int32_t nRows, char* lengthOrbitmap, char *data);
// for encode/decode
int32_t tPutColData(uint8_t *pBuf, SColData *pColData);
int32_t tGetColData(uint8_t *pBuf, SColData *pColData);
// STRUCT ================================
struct STColumn {
col_id_t colId;
@ -225,23 +259,9 @@ struct STag {
memcpy(varDataVal(x), (str), (_size)); \
} while (0);
// ----------------- SCHEMA BUILDER DEFINITION
typedef struct {
int32_t tCols;
int32_t nCols;
schema_ver_t version;
uint16_t flen;
int32_t tlen;
STColumn *columns;
} STSchemaBuilder;
int32_t tdInitTSchemaBuilder(STSchemaBuilder *pBuilder, schema_ver_t version);
void tdDestroyTSchemaBuilder(STSchemaBuilder *pBuilder);
void tdResetTSchemaBuilder(STSchemaBuilder *pBuilder, schema_ver_t version);
int32_t tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int8_t flags, col_id_t colId, col_bytes_t bytes);
STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder);
// STSchema ================================
STSchema *tBuildTSchema(SSchema *aSchema, int32_t numOfCols, int32_t version);
void tDestroyTSchema(STSchema *pTSchema);
#endif

View File

@ -482,8 +482,6 @@ static FORCE_INLINE int32_t tDecodeSSchemaWrapperEx(SDecoder* pDecoder, SSchemaW
return 0;
}
STSchema* tdGetSTSChemaFromSSChema(SSchema* pSchema, int32_t nCols, int32_t sver);
typedef struct {
char name[TSDB_TABLE_FNAME_LEN];
int8_t igExists;
@ -1734,6 +1732,8 @@ typedef struct {
int32_t execId;
} STaskDropReq;
int32_t tSerializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
int32_t tDeserializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
int32_t tSerializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
int32_t tDeserializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
@ -1751,6 +1751,8 @@ typedef struct {
#define STREAM_FILL_HISTORY_ON 1
#define STREAM_FILL_HISTORY_OFF 0
#define STREAM_DEFAULT_FILL_HISTORY STREAM_FILL_HISTORY_OFF
#define STREAM_CREATE_STABLE_TRUE 1
#define STREAM_CREATE_STABLE_FALSE 0
typedef struct {
char name[TSDB_STREAM_FNAME_LEN];
@ -1768,6 +1770,8 @@ typedef struct {
SArray* pTags; // array of SField
// 3.0.20
int64_t checkpointFreq; // ms
// 3.0.2.3
int8_t createStb;
} SCMCreateStreamReq;
typedef struct {
@ -2081,10 +2085,15 @@ typedef struct SVCreateTbReq {
};
} SVCreateTbReq;
int tEncodeSVCreateTbReq(SEncoder* pCoder, const SVCreateTbReq* pReq);
int tDecodeSVCreateTbReq(SDecoder* pCoder, SVCreateTbReq* pReq);
int tEncodeSVCreateTbReq(SEncoder* pCoder, const SVCreateTbReq* pReq);
int tDecodeSVCreateTbReq(SDecoder* pCoder, SVCreateTbReq* pReq);
void tDestroySVCreateTbReq(SVCreateTbReq* pReq, int32_t flags);
static FORCE_INLINE void tdDestroySVCreateTbReq(SVCreateTbReq* req) {
if (NULL == req) {
return;
}
taosMemoryFreeClear(req->name);
taosMemoryFreeClear(req->comment);
if (req->type == TSDB_CHILD_TABLE) {
@ -3232,6 +3241,57 @@ int32_t tSerializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq);
int32_t tDeserializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq);
int32_t tSerializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
int32_t tDeserializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
int32_t tSerializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq);
int32_t tDeserializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq);
int32_t tSerializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
int32_t tDeserializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
#define SUBMIT_REQ_AUTO_CREATE_TABLE 0x1
#define SUBMIT_REQ_COLUMN_DATA_FORMAT 0x2
typedef struct {
int32_t flags;
SVCreateTbReq* pCreateTbReq;
int64_t suid;
int64_t uid;
int32_t sver;
union {
SArray* aRowP;
SArray* aCol;
};
} SSubmitTbData;
typedef struct {
SArray* aSubmitTbData; // SArray<SSubmitTbData>
} SSubmitReq2;
int32_t tEncodeSSubmitReq2(SEncoder* pCoder, const SSubmitReq2* pReq);
int32_t tDecodeSSubmitReq2(SDecoder* pCoder, SSubmitReq2* pReq);
void tDestroySSubmitTbData(SSubmitTbData* pTbData, int32_t flag);
void tDestroySSubmitReq2(SSubmitReq2* pReq, int32_t flag);
typedef struct {
int32_t affectedRows;
SArray* aCreateTbRsp; // SArray<SVCreateTbRsp>
} SSubmitRsp2;
int32_t tEncodeSSubmitRsp2(SEncoder* pCoder, const SSubmitRsp2* pRsp);
int32_t tDecodeSSubmitRsp2(SDecoder* pCoder, SSubmitRsp2* pRsp);
void tDestroySSubmitRsp2(SSubmitRsp2* pRsp, int32_t flag);
#define TSDB_MSG_FLG_ENCODE 0x1
#define TSDB_MSG_FLG_DECODE 0x2
typedef struct {
union {
struct {
void* msgStr;
int32_t msgLen;
int64_t ver;
};
void* pDataBlock;
};
} SPackedData;
#pragma pack(pop)

View File

@ -16,327 +16,331 @@
#ifndef _TD_COMMON_TOKEN_H_
#define _TD_COMMON_TOKEN_H_
#define TK_OR 1
#define TK_AND 2
#define TK_UNION 3
#define TK_ALL 4
#define TK_MINUS 5
#define TK_EXCEPT 6
#define TK_INTERSECT 7
#define TK_NK_BITAND 8
#define TK_NK_BITOR 9
#define TK_NK_LSHIFT 10
#define TK_NK_RSHIFT 11
#define TK_NK_PLUS 12
#define TK_NK_MINUS 13
#define TK_NK_STAR 14
#define TK_NK_SLASH 15
#define TK_NK_REM 16
#define TK_NK_CONCAT 17
#define TK_CREATE 18
#define TK_ACCOUNT 19
#define TK_NK_ID 20
#define TK_PASS 21
#define TK_NK_STRING 22
#define TK_ALTER 23
#define TK_PPS 24
#define TK_TSERIES 25
#define TK_STORAGE 26
#define TK_STREAMS 27
#define TK_QTIME 28
#define TK_DBS 29
#define TK_USERS 30
#define TK_CONNS 31
#define TK_STATE 32
#define TK_USER 33
#define TK_ENABLE 34
#define TK_NK_INTEGER 35
#define TK_SYSINFO 36
#define TK_DROP 37
#define TK_GRANT 38
#define TK_ON 39
#define TK_TO 40
#define TK_REVOKE 41
#define TK_FROM 42
#define TK_SUBSCRIBE 43
#define TK_NK_COMMA 44
#define TK_READ 45
#define TK_WRITE 46
#define TK_NK_DOT 47
#define TK_DNODE 48
#define TK_PORT 49
#define TK_DNODES 50
#define TK_NK_IPTOKEN 51
#define TK_FORCE 52
#define TK_LOCAL 53
#define TK_QNODE 54
#define TK_BNODE 55
#define TK_SNODE 56
#define TK_MNODE 57
#define TK_DATABASE 58
#define TK_USE 59
#define TK_FLUSH 60
#define TK_TRIM 61
#define TK_IF 62
#define TK_NOT 63
#define TK_EXISTS 64
#define TK_BUFFER 65
#define TK_CACHEMODEL 66
#define TK_CACHESIZE 67
#define TK_COMP 68
#define TK_DURATION 69
#define TK_NK_VARIABLE 70
#define TK_MAXROWS 71
#define TK_MINROWS 72
#define TK_KEEP 73
#define TK_PAGES 74
#define TK_PAGESIZE 75
#define TK_TSDB_PAGESIZE 76
#define TK_PRECISION 77
#define TK_REPLICA 78
#define TK_VGROUPS 79
#define TK_SINGLE_STABLE 80
#define TK_RETENTIONS 81
#define TK_SCHEMALESS 82
#define TK_WAL_LEVEL 83
#define TK_WAL_FSYNC_PERIOD 84
#define TK_WAL_RETENTION_PERIOD 85
#define TK_WAL_RETENTION_SIZE 86
#define TK_WAL_ROLL_PERIOD 87
#define TK_WAL_SEGMENT_SIZE 88
#define TK_STT_TRIGGER 89
#define TK_TABLE_PREFIX 90
#define TK_TABLE_SUFFIX 91
#define TK_NK_COLON 92
#define TK_MAX_SPEED 93
#define TK_TABLE 94
#define TK_NK_LP 95
#define TK_NK_RP 96
#define TK_STABLE 97
#define TK_ADD 98
#define TK_COLUMN 99
#define TK_MODIFY 100
#define TK_RENAME 101
#define TK_TAG 102
#define TK_SET 103
#define TK_NK_EQ 104
#define TK_USING 105
#define TK_TAGS 106
#define TK_COMMENT 107
#define TK_BOOL 108
#define TK_TINYINT 109
#define TK_SMALLINT 110
#define TK_INT 111
#define TK_INTEGER 112
#define TK_BIGINT 113
#define TK_FLOAT 114
#define TK_DOUBLE 115
#define TK_BINARY 116
#define TK_TIMESTAMP 117
#define TK_NCHAR 118
#define TK_UNSIGNED 119
#define TK_JSON 120
#define TK_VARCHAR 121
#define TK_MEDIUMBLOB 122
#define TK_BLOB 123
#define TK_VARBINARY 124
#define TK_DECIMAL 125
#define TK_MAX_DELAY 126
#define TK_WATERMARK 127
#define TK_ROLLUP 128
#define TK_TTL 129
#define TK_SMA 130
#define TK_DELETE_MARK 131
#define TK_FIRST 132
#define TK_LAST 133
#define TK_SHOW 134
#define TK_PRIVILEGES 135
#define TK_DATABASES 136
#define TK_TABLES 137
#define TK_STABLES 138
#define TK_MNODES 139
#define TK_QNODES 140
#define TK_FUNCTIONS 141
#define TK_INDEXES 142
#define TK_ACCOUNTS 143
#define TK_APPS 144
#define TK_CONNECTIONS 145
#define TK_LICENCES 146
#define TK_GRANTS 147
#define TK_QUERIES 148
#define TK_SCORES 149
#define TK_TOPICS 150
#define TK_VARIABLES 151
#define TK_CLUSTER 152
#define TK_BNODES 153
#define TK_SNODES 154
#define TK_TRANSACTIONS 155
#define TK_DISTRIBUTED 156
#define TK_CONSUMERS 157
#define TK_SUBSCRIPTIONS 158
#define TK_VNODES 159
#define TK_LIKE 160
#define TK_TBNAME 161
#define TK_QTAGS 162
#define TK_AS 163
#define TK_INDEX 164
#define TK_FUNCTION 165
#define TK_INTERVAL 166
#define TK_TOPIC 167
#define TK_WITH 168
#define TK_META 169
#define TK_CONSUMER 170
#define TK_GROUP 171
#define TK_DESC 172
#define TK_DESCRIBE 173
#define TK_RESET 174
#define TK_QUERY 175
#define TK_CACHE 176
#define TK_EXPLAIN 177
#define TK_ANALYZE 178
#define TK_VERBOSE 179
#define TK_NK_BOOL 180
#define TK_RATIO 181
#define TK_NK_FLOAT 182
#define TK_OUTPUTTYPE 183
#define TK_AGGREGATE 184
#define TK_BUFSIZE 185
#define TK_STREAM 186
#define TK_INTO 187
#define TK_TRIGGER 188
#define TK_AT_ONCE 189
#define TK_WINDOW_CLOSE 190
#define TK_IGNORE 191
#define TK_EXPIRED 192
#define TK_FILL_HISTORY 193
#define TK_SUBTABLE 194
#define TK_KILL 195
#define TK_CONNECTION 196
#define TK_TRANSACTION 197
#define TK_BALANCE 198
#define TK_VGROUP 199
#define TK_MERGE 200
#define TK_REDISTRIBUTE 201
#define TK_SPLIT 202
#define TK_DELETE 203
#define TK_INSERT 204
#define TK_NULL 205
#define TK_NK_QUESTION 206
#define TK_NK_ARROW 207
#define TK_ROWTS 208
#define TK_QSTART 209
#define TK_QEND 210
#define TK_QDURATION 211
#define TK_WSTART 212
#define TK_WEND 213
#define TK_WDURATION 214
#define TK_IROWTS 215
#define TK_CAST 216
#define TK_NOW 217
#define TK_TODAY 218
#define TK_TIMEZONE 219
#define TK_CLIENT_VERSION 220
#define TK_SERVER_VERSION 221
#define TK_SERVER_STATUS 222
#define TK_CURRENT_USER 223
#define TK_COUNT 224
#define TK_LAST_ROW 225
#define TK_CASE 226
#define TK_END 227
#define TK_WHEN 228
#define TK_THEN 229
#define TK_ELSE 230
#define TK_BETWEEN 231
#define TK_IS 232
#define TK_NK_LT 233
#define TK_NK_GT 234
#define TK_NK_LE 235
#define TK_NK_GE 236
#define TK_NK_NE 237
#define TK_MATCH 238
#define TK_NMATCH 239
#define TK_CONTAINS 240
#define TK_IN 241
#define TK_JOIN 242
#define TK_INNER 243
#define TK_SELECT 244
#define TK_DISTINCT 245
#define TK_WHERE 246
#define TK_PARTITION 247
#define TK_BY 248
#define TK_SESSION 249
#define TK_STATE_WINDOW 250
#define TK_SLIDING 251
#define TK_FILL 252
#define TK_VALUE 253
#define TK_NONE 254
#define TK_PREV 255
#define TK_LINEAR 256
#define TK_NEXT 257
#define TK_HAVING 258
#define TK_RANGE 259
#define TK_EVERY 260
#define TK_ORDER 261
#define TK_SLIMIT 262
#define TK_SOFFSET 263
#define TK_LIMIT 264
#define TK_OFFSET 265
#define TK_ASC 266
#define TK_NULLS 267
#define TK_ABORT 268
#define TK_AFTER 269
#define TK_ATTACH 270
#define TK_BEFORE 271
#define TK_BEGIN 272
#define TK_BITAND 273
#define TK_BITNOT 274
#define TK_BITOR 275
#define TK_BLOCKS 276
#define TK_CHANGE 277
#define TK_COMMA 278
#define TK_COMPACT 279
#define TK_CONCAT 280
#define TK_CONFLICT 281
#define TK_COPY 282
#define TK_DEFERRED 283
#define TK_DELIMITERS 284
#define TK_DETACH 285
#define TK_DIVIDE 286
#define TK_DOT 287
#define TK_EACH 288
#define TK_FAIL 289
#define TK_FILE 290
#define TK_FOR 291
#define TK_GLOB 292
#define TK_ID 293
#define TK_IMMEDIATE 294
#define TK_IMPORT 295
#define TK_INITIALLY 296
#define TK_INSTEAD 297
#define TK_ISNULL 298
#define TK_KEY 299
#define TK_MODULES 300
#define TK_NK_BITNOT 301
#define TK_NK_SEMI 302
#define TK_NOTNULL 303
#define TK_OF 304
#define TK_PLUS 305
#define TK_PRIVILEGE 306
#define TK_RAISE 307
#define TK_REPLACE 308
#define TK_RESTRICT 309
#define TK_ROW 310
#define TK_SEMI 311
#define TK_STAR 312
#define TK_STATEMENT 313
#define TK_STRICT 314
#define TK_STRING 315
#define TK_TIMES 316
#define TK_UPDATE 317
#define TK_VALUES 318
#define TK_VARIABLE 319
#define TK_VIEW 320
#define TK_WAL 321
#define TK_OR 1
#define TK_AND 2
#define TK_UNION 3
#define TK_ALL 4
#define TK_MINUS 5
#define TK_EXCEPT 6
#define TK_INTERSECT 7
#define TK_NK_BITAND 8
#define TK_NK_BITOR 9
#define TK_NK_LSHIFT 10
#define TK_NK_RSHIFT 11
#define TK_NK_PLUS 12
#define TK_NK_MINUS 13
#define TK_NK_STAR 14
#define TK_NK_SLASH 15
#define TK_NK_REM 16
#define TK_NK_CONCAT 17
#define TK_CREATE 18
#define TK_ACCOUNT 19
#define TK_NK_ID 20
#define TK_PASS 21
#define TK_NK_STRING 22
#define TK_ALTER 23
#define TK_PPS 24
#define TK_TSERIES 25
#define TK_STORAGE 26
#define TK_STREAMS 27
#define TK_QTIME 28
#define TK_DBS 29
#define TK_USERS 30
#define TK_CONNS 31
#define TK_STATE 32
#define TK_USER 33
#define TK_ENABLE 34
#define TK_NK_INTEGER 35
#define TK_SYSINFO 36
#define TK_DROP 37
#define TK_GRANT 38
#define TK_ON 39
#define TK_TO 40
#define TK_REVOKE 41
#define TK_FROM 42
#define TK_SUBSCRIBE 43
#define TK_NK_COMMA 44
#define TK_READ 45
#define TK_WRITE 46
#define TK_NK_DOT 47
#define TK_DNODE 48
#define TK_PORT 49
#define TK_DNODES 50
#define TK_NK_IPTOKEN 51
#define TK_FORCE 52
#define TK_LOCAL 53
#define TK_QNODE 54
#define TK_BNODE 55
#define TK_SNODE 56
#define TK_MNODE 57
#define TK_DATABASE 58
#define TK_USE 59
#define TK_FLUSH 60
#define TK_TRIM 61
#define TK_IF 62
#define TK_NOT 63
#define TK_EXISTS 64
#define TK_BUFFER 65
#define TK_CACHEMODEL 66
#define TK_CACHESIZE 67
#define TK_COMP 68
#define TK_DURATION 69
#define TK_NK_VARIABLE 70
#define TK_MAXROWS 71
#define TK_MINROWS 72
#define TK_KEEP 73
#define TK_PAGES 74
#define TK_PAGESIZE 75
#define TK_TSDB_PAGESIZE 76
#define TK_PRECISION 77
#define TK_REPLICA 78
#define TK_VGROUPS 79
#define TK_SINGLE_STABLE 80
#define TK_RETENTIONS 81
#define TK_SCHEMALESS 82
#define TK_WAL_LEVEL 83
#define TK_WAL_FSYNC_PERIOD 84
#define TK_WAL_RETENTION_PERIOD 85
#define TK_WAL_RETENTION_SIZE 86
#define TK_WAL_ROLL_PERIOD 87
#define TK_WAL_SEGMENT_SIZE 88
#define TK_STT_TRIGGER 89
#define TK_TABLE_PREFIX 90
#define TK_TABLE_SUFFIX 91
#define TK_NK_COLON 92
#define TK_MAX_SPEED 93
#define TK_TABLE 94
#define TK_NK_LP 95
#define TK_NK_RP 96
#define TK_STABLE 97
#define TK_ADD 98
#define TK_COLUMN 99
#define TK_MODIFY 100
#define TK_RENAME 101
#define TK_TAG 102
#define TK_SET 103
#define TK_NK_EQ 104
#define TK_USING 105
#define TK_TAGS 106
#define TK_COMMENT 107
#define TK_BOOL 108
#define TK_TINYINT 109
#define TK_SMALLINT 110
#define TK_INT 111
#define TK_INTEGER 112
#define TK_BIGINT 113
#define TK_FLOAT 114
#define TK_DOUBLE 115
#define TK_BINARY 116
#define TK_TIMESTAMP 117
#define TK_NCHAR 118
#define TK_UNSIGNED 119
#define TK_JSON 120
#define TK_VARCHAR 121
#define TK_MEDIUMBLOB 122
#define TK_BLOB 123
#define TK_VARBINARY 124
#define TK_DECIMAL 125
#define TK_MAX_DELAY 126
#define TK_WATERMARK 127
#define TK_ROLLUP 128
#define TK_TTL 129
#define TK_SMA 130
#define TK_DELETE_MARK 131
#define TK_FIRST 132
#define TK_LAST 133
#define TK_SHOW 134
#define TK_PRIVILEGES 135
#define TK_DATABASES 136
#define TK_TABLES 137
#define TK_STABLES 138
#define TK_MNODES 139
#define TK_QNODES 140
#define TK_FUNCTIONS 141
#define TK_INDEXES 142
#define TK_ACCOUNTS 143
#define TK_APPS 144
#define TK_CONNECTIONS 145
#define TK_LICENCES 146
#define TK_GRANTS 147
#define TK_QUERIES 148
#define TK_SCORES 149
#define TK_TOPICS 150
#define TK_VARIABLES 151
#define TK_CLUSTER 152
#define TK_BNODES 153
#define TK_SNODES 154
#define TK_TRANSACTIONS 155
#define TK_DISTRIBUTED 156
#define TK_CONSUMERS 157
#define TK_SUBSCRIPTIONS 158
#define TK_VNODES 159
#define TK_ALIVE 160
#define TK_LIKE 161
#define TK_TBNAME 162
#define TK_QTAGS 163
#define TK_AS 164
#define TK_INDEX 165
#define TK_FUNCTION 166
#define TK_INTERVAL 167
#define TK_COUNT 168
#define TK_LAST_ROW 169
#define TK_TOPIC 170
#define TK_WITH 171
#define TK_META 172
#define TK_CONSUMER 173
#define TK_GROUP 174
#define TK_DESC 175
#define TK_DESCRIBE 176
#define TK_RESET 177
#define TK_QUERY 178
#define TK_CACHE 179
#define TK_EXPLAIN 180
#define TK_ANALYZE 181
#define TK_VERBOSE 182
#define TK_NK_BOOL 183
#define TK_RATIO 184
#define TK_NK_FLOAT 185
#define TK_OUTPUTTYPE 186
#define TK_AGGREGATE 187
#define TK_BUFSIZE 188
#define TK_STREAM 189
#define TK_INTO 190
#define TK_TRIGGER 191
#define TK_AT_ONCE 192
#define TK_WINDOW_CLOSE 193
#define TK_IGNORE 194
#define TK_EXPIRED 195
#define TK_FILL_HISTORY 196
#define TK_SUBTABLE 197
#define TK_KILL 198
#define TK_CONNECTION 199
#define TK_TRANSACTION 200
#define TK_BALANCE 201
#define TK_VGROUP 202
#define TK_MERGE 203
#define TK_REDISTRIBUTE 204
#define TK_SPLIT 205
#define TK_DELETE 206
#define TK_INSERT 207
#define TK_NULL 208
#define TK_NK_QUESTION 209
#define TK_NK_ARROW 210
#define TK_ROWTS 211
#define TK_QSTART 212
#define TK_QEND 213
#define TK_QDURATION 214
#define TK_WSTART 215
#define TK_WEND 216
#define TK_WDURATION 217
#define TK_IROWTS 218
#define TK_ISFILLED 219
#define TK_CAST 220
#define TK_NOW 221
#define TK_TODAY 222
#define TK_TIMEZONE 223
#define TK_CLIENT_VERSION 224
#define TK_SERVER_VERSION 225
#define TK_SERVER_STATUS 226
#define TK_CURRENT_USER 227
#define TK_CASE 228
#define TK_END 229
#define TK_WHEN 230
#define TK_THEN 231
#define TK_ELSE 232
#define TK_BETWEEN 233
#define TK_IS 234
#define TK_NK_LT 235
#define TK_NK_GT 236
#define TK_NK_LE 237
#define TK_NK_GE 238
#define TK_NK_NE 239
#define TK_MATCH 240
#define TK_NMATCH 241
#define TK_CONTAINS 242
#define TK_IN 243
#define TK_JOIN 244
#define TK_INNER 245
#define TK_SELECT 246
#define TK_DISTINCT 247
#define TK_WHERE 248
#define TK_PARTITION 249
#define TK_BY 250
#define TK_SESSION 251
#define TK_STATE_WINDOW 252
#define TK_EVENT_WINDOW 253
#define TK_START 254
#define TK_SLIDING 255
#define TK_FILL 256
#define TK_VALUE 257
#define TK_NONE 258
#define TK_PREV 259
#define TK_LINEAR 260
#define TK_NEXT 261
#define TK_HAVING 262
#define TK_RANGE 263
#define TK_EVERY 264
#define TK_ORDER 265
#define TK_SLIMIT 266
#define TK_SOFFSET 267
#define TK_LIMIT 268
#define TK_OFFSET 269
#define TK_ASC 270
#define TK_NULLS 271
#define TK_ABORT 272
#define TK_AFTER 273
#define TK_ATTACH 274
#define TK_BEFORE 275
#define TK_BEGIN 276
#define TK_BITAND 277
#define TK_BITNOT 278
#define TK_BITOR 279
#define TK_BLOCKS 280
#define TK_CHANGE 281
#define TK_COMMA 282
#define TK_COMPACT 283
#define TK_CONCAT 284
#define TK_CONFLICT 285
#define TK_COPY 286
#define TK_DEFERRED 287
#define TK_DELIMITERS 288
#define TK_DETACH 289
#define TK_DIVIDE 290
#define TK_DOT 291
#define TK_EACH 292
#define TK_FAIL 293
#define TK_FILE 294
#define TK_FOR 295
#define TK_GLOB 296
#define TK_ID 297
#define TK_IMMEDIATE 298
#define TK_IMPORT 299
#define TK_INITIALLY 300
#define TK_INSTEAD 301
#define TK_ISNULL 302
#define TK_KEY 303
#define TK_MODULES 304
#define TK_NK_BITNOT 305
#define TK_NK_SEMI 306
#define TK_NOTNULL 307
#define TK_OF 308
#define TK_PLUS 309
#define TK_PRIVILEGE 310
#define TK_RAISE 311
#define TK_REPLACE 312
#define TK_RESTRICT 313
#define TK_ROW 314
#define TK_SEMI 315
#define TK_STAR 316
#define TK_STATEMENT 317
#define TK_STRICT 318
#define TK_STRING 319
#define TK_TIMES 320
#define TK_UPDATE 321
#define TK_VALUES 322
#define TK_VARIABLE 323
#define TK_VIEW 324
#define TK_WAL 325
#define TK_NK_SPACE 600
#define TK_NK_COMMENT 601

View File

@ -266,6 +266,7 @@ typedef struct {
#define IS_FLOAT_TYPE(_t) ((_t) == TSDB_DATA_TYPE_FLOAT || (_t) == TSDB_DATA_TYPE_DOUBLE)
#define IS_INTEGER_TYPE(_t) ((IS_SIGNED_NUMERIC_TYPE(_t)) || (IS_UNSIGNED_NUMERIC_TYPE(_t)))
#define IS_TIMESTAMP_TYPE(_t) ((_t) == TSDB_DATA_TYPE_TIMESTAMP)
#define IS_BOOLEAN_TYPE(_t) ((_t) == TSDB_DATA_TYPE_BOOL)
#define IS_NUMERIC_TYPE(_t) ((IS_SIGNED_NUMERIC_TYPE(_t)) || (IS_UNSIGNED_NUMERIC_TYPE(_t)) || (IS_FLOAT_TYPE(_t)))
#define IS_MATHABLE_TYPE(_t) \

View File

@ -22,7 +22,7 @@
typedef struct SExplainCtx SExplainCtx;
int32_t qExecCommand(bool sysInfoUser, SNode *pStmt, SRetrieveTableRsp **pRsp);
int32_t qExecCommand(int64_t* pConnId, bool sysInfoUser, SNode *pStmt, SRetrieveTableRsp **pRsp);
int32_t qExecStaticExplain(SQueryPlan *pDag, SRetrieveTableRsp **pRsp);
int32_t qExecExplainBegin(SQueryPlan *pDag, SExplainCtx **pCtx, int64_t startTs);

View File

@ -192,7 +192,9 @@ int32_t qStreamPrepareTsdbScan(qTaskInfo_t tinfo, uint64_t uid, int64_t ts);
int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subType);
int32_t qStreamScanMemData(qTaskInfo_t tinfo, const SSubmitReq* pReq);
// int32_t qStreamScanMemData(qTaskInfo_t tinfo, const SSubmitReq* pReq, int64_t ver);
//
int32_t qStreamSetScanMemData(qTaskInfo_t tinfo, SPackedData submit);
int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset);
@ -216,6 +218,7 @@ int32_t qStreamSourceRecoverStep2(qTaskInfo_t tinfo, int64_t ver);
int32_t qStreamRecoverFinish(qTaskInfo_t tinfo);
int32_t qStreamRestoreParam(qTaskInfo_t tinfo);
bool qStreamRecoverScanFinished(qTaskInfo_t tinfo);
void qStreamCloseTsdbReader(void* task);
#ifdef __cplusplus
}

View File

@ -120,6 +120,7 @@ typedef enum EFunctionType {
FUNCTION_TYPE_WEND,
FUNCTION_TYPE_WDURATION,
FUNCTION_TYPE_IROWTS,
FUNCTION_TYPE_ISFILLED,
FUNCTION_TYPE_TAGS,
// internal function

View File

@ -40,6 +40,7 @@ extern "C" {
#define SHOW_LOCAL_VARIABLES_RESULT_FIELD1_LEN (TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE)
#define SHOW_LOCAL_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE)
#define SHOW_ALIVE_RESULT_COLS 1
#define PRIVILEGE_TYPE_MASK(n) (1 << n)
#define PRIVILEGE_TYPE_ALL PRIVILEGE_TYPE_MASK(0)
@ -171,10 +172,10 @@ typedef struct SCreateSubTableClause {
STableOptions* pOptions;
} SCreateSubTableClause;
typedef struct SCreateMultiTableStmt {
typedef struct SCreateMultiTablesStmt {
ENodeType type;
SNodeList* pSubTables;
} SCreateMultiTableStmt;
} SCreateMultiTablesStmt;
typedef struct SDropTableClause {
ENodeType type;
@ -209,14 +210,14 @@ typedef struct SAlterTableStmt {
typedef struct SCreateUserStmt {
ENodeType type;
char useName[TSDB_USER_LEN];
char userName[TSDB_USER_LEN];
char password[TSDB_USET_PASSWORD_LEN];
int8_t sysinfo;
} SCreateUserStmt;
typedef struct SAlterUserStmt {
ENodeType type;
char useName[TSDB_USER_LEN];
char userName[TSDB_USER_LEN];
int8_t alterType;
char password[TSDB_USET_PASSWORD_LEN];
int8_t enable;
@ -225,7 +226,7 @@ typedef struct SAlterUserStmt {
typedef struct SDropUserStmt {
ENodeType type;
char useName[TSDB_USER_LEN];
char userName[TSDB_USER_LEN];
} SDropUserStmt;
typedef struct SCreateDnodeStmt {
@ -262,6 +263,11 @@ typedef struct SShowCreateDatabaseStmt {
void* pCfg; // SDbCfgInfo
} SShowCreateDatabaseStmt;
typedef struct SShowAliveStmt {
ENodeType type;
char dbName[TSDB_DB_NAME_LEN];
} SShowAliveStmt;
typedef struct SShowCreateTableStmt {
ENodeType type;
char dbName[TSDB_DB_NAME_LEN];
@ -295,7 +301,7 @@ typedef struct SShowTableTagsStmt {
SNodeList* pTags;
} SShowTableTagsStmt;
typedef enum EIndexType { INDEX_TYPE_SMA = 1, INDEX_TYPE_FULLTEXT } EIndexType;
typedef enum EIndexType { INDEX_TYPE_SMA = 1, INDEX_TYPE_FULLTEXT, INDEX_TYPE_NORMAL } EIndexType;
typedef struct SIndexOptions {
ENodeType type;
@ -401,6 +407,7 @@ typedef struct SCreateStreamStmt {
SNode* pQuery;
SNodeList* pTags;
SNode* pSubtable;
SNodeList* pCols;
} SCreateStreamStmt;
typedef struct SDropStreamStmt {

View File

@ -112,11 +112,12 @@ typedef enum ENodeType {
QUERY_NODE_COLUMN_REF,
QUERY_NODE_WHEN_THEN,
QUERY_NODE_CASE_WHEN,
QUERY_NODE_EVENT_WINDOW,
// Statement nodes are used in parser and planner module.
QUERY_NODE_SET_OPERATOR = 100,
QUERY_NODE_SELECT_STMT,
QUERY_NODE_VNODE_MODIF_STMT,
QUERY_NODE_VNODE_MODIFY_STMT,
QUERY_NODE_CREATE_DATABASE_STMT,
QUERY_NODE_DROP_DATABASE_STMT,
QUERY_NODE_ALTER_DATABASE_STMT,
@ -124,7 +125,7 @@ typedef enum ENodeType {
QUERY_NODE_TRIM_DATABASE_STMT,
QUERY_NODE_CREATE_TABLE_STMT,
QUERY_NODE_CREATE_SUBTABLE_CLAUSE,
QUERY_NODE_CREATE_MULTI_TABLE_STMT,
QUERY_NODE_CREATE_MULTI_TABLES_STMT,
QUERY_NODE_DROP_TABLE_CLAUSE,
QUERY_NODE_DROP_TABLE_STMT,
QUERY_NODE_DROP_SUPER_TABLE_STMT,
@ -207,6 +208,8 @@ typedef enum ENodeType {
QUERY_NODE_DELETE_STMT,
QUERY_NODE_INSERT_STMT,
QUERY_NODE_QUERY,
QUERY_NODE_SHOW_DB_ALIVE_STMT,
QUERY_NODE_SHOW_CLUSTER_ALIVE_STMT,
// logic plan node
QUERY_NODE_LOGIC_PLAN_SCAN = 1000,
@ -265,7 +268,9 @@ typedef enum ENodeType {
QUERY_NODE_PHYSICAL_PLAN_DELETE,
QUERY_NODE_PHYSICAL_SUBPLAN,
QUERY_NODE_PHYSICAL_PLAN,
QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN
QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN,
QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT,
QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT
} ENodeType;
/**

View File

@ -186,7 +186,12 @@ typedef struct SMergeLogicNode {
bool groupSort;
} SMergeLogicNode;
typedef enum EWindowType { WINDOW_TYPE_INTERVAL = 1, WINDOW_TYPE_SESSION, WINDOW_TYPE_STATE } EWindowType;
typedef enum EWindowType {
WINDOW_TYPE_INTERVAL = 1,
WINDOW_TYPE_SESSION,
WINDOW_TYPE_STATE,
WINDOW_TYPE_EVENT
} EWindowType;
typedef enum EWindowAlgorithm {
INTERVAL_ALGO_HASH = 1,
@ -213,6 +218,8 @@ typedef struct SWindowLogicNode {
SNode* pTspk;
SNode* pTsEnd;
SNode* pStateExpr;
SNode* pStartCond;
SNode* pEndCond;
int8_t triggerType;
int64_t watermark;
int64_t deleteMark;
@ -500,6 +507,14 @@ typedef struct SStateWinodwPhysiNode {
typedef SStateWinodwPhysiNode SStreamStateWinodwPhysiNode;
typedef struct SEventWinodwPhysiNode {
SWinodwPhysiNode window;
SNode* pStartCond;
SNode* pEndCond;
} SEventWinodwPhysiNode;
typedef SEventWinodwPhysiNode SStreamEventWinodwPhysiNode;
typedef struct SSortPhysiNode {
SPhysiNode node;
SNodeList* pExprs; // these are expression list of order_by_clause and parameter expression of aggregate function

View File

@ -223,6 +223,13 @@ typedef struct SIntervalWindowNode {
SNode* pFill;
} SIntervalWindowNode;
typedef struct SEventWindowNode {
ENodeType type; // QUERY_NODE_EVENT_WINDOW
SNode* pCol; // timestamp primary key
SNode* pStartCond;
SNode* pEndCond;
} SEventWindowNode;
typedef enum EFillMode {
FILL_MODE_NONE = 1,
FILL_MODE_VALUE,
@ -354,34 +361,34 @@ typedef struct SVgDataBlocks {
void* pData; // SSubmitReq + SSubmitBlk + ...
} SVgDataBlocks;
typedef void (*FFreeDataBlockHash)(SHashObj*);
typedef void (*FFreeDataBlockArray)(SArray*);
typedef void (*FFreeTableBlockHash)(SHashObj*);
typedef void (*FFreeVgourpBlockArray)(SArray*);
typedef struct SVnodeModifOpStmt {
ENodeType nodeType;
ENodeType sqlNodeType;
SArray* pDataBlocks; // data block for each vgroup, SArray<SVgDataBlocks*>.
uint32_t insertType; // insert data from [file|sql statement| bound statement]
const char* pSql; // current sql statement position
int32_t totalRowsNum;
int32_t totalTbNum;
SName targetTableName;
SName usingTableName;
const char* pBoundCols;
struct STableMeta* pTableMeta;
SHashObj* pVgroupsHashObj;
SHashObj* pTableBlockHashObj;
SHashObj* pSubTableHashObj;
SHashObj* pTableNameHashObj;
SHashObj* pDbFNameHashObj;
SArray* pVgDataBlocks;
SVCreateTbReq createTblReq;
TdFilePtr fp;
FFreeDataBlockHash freeHashFunc;
FFreeDataBlockArray freeArrayFunc;
bool usingTableProcessing;
bool fileProcessing;
} SVnodeModifOpStmt;
typedef struct SVnodeModifyOpStmt {
ENodeType nodeType;
ENodeType sqlNodeType;
SArray* pDataBlocks; // data block for each vgroup, SArray<SVgDataBlocks*>.
uint32_t insertType; // insert data from [file|sql statement| bound statement]
const char* pSql; // current sql statement position
int32_t totalRowsNum;
int32_t totalTbNum;
SName targetTableName;
SName usingTableName;
const char* pBoundCols;
struct STableMeta* pTableMeta;
SHashObj* pVgroupsHashObj;
SHashObj* pTableBlockHashObj; // SHashObj<tuid, STableDataCxt*>
SHashObj* pSubTableHashObj;
SHashObj* pTableNameHashObj;
SHashObj* pDbFNameHashObj;
SArray* pVgDataBlocks; // SArray<SVgroupDataCxt*>
SVCreateTbReq* pCreateTblReq;
TdFilePtr fp;
FFreeTableBlockHash freeHashFunc;
FFreeVgourpBlockArray freeArrayFunc;
bool usingTableProcessing;
bool fileProcessing;
} SVnodeModifyOpStmt;
typedef struct SExplainOptions {
ENodeType type;

View File

@ -58,7 +58,6 @@ typedef struct SParseContext {
bool isSuperUser;
bool enableSysInfo;
bool async;
int8_t schemalessType;
const char* svrVer;
bool nodeOffline;
SArray* pTableMetaPos; // sql table pos => catalog data pos
@ -85,12 +84,12 @@ int32_t qSetSTableIdForRsma(SNode* pStmt, int64_t uid);
void qCleanupKeywordsTable();
int32_t qBuildStmtOutput(SQuery* pQuery, SHashObj* pVgHash, SHashObj* pBlockHash);
int32_t qResetStmtDataBlock(void* block, bool keepBuf);
int32_t qCloneStmtDataBlock(void** pDst, void* pSrc);
void qFreeStmtDataBlock(void* pDataBlock);
int32_t qRebuildStmtDataBlock(void** pDst, void* pSrc, uint64_t uid, int32_t vgId);
void qDestroyStmtDataBlock(void* pBlock);
STableMeta* qGetTableMetaInDataBlock(void* pDataBlock);
int32_t qResetStmtDataBlock(STableDataCxt* block, bool keepBuf);
int32_t qCloneStmtDataBlock(STableDataCxt** pDst, STableDataCxt* pSrc, bool reset);
int32_t qRebuildStmtDataBlock(STableDataCxt** pDst, STableDataCxt* pSrc, uint64_t uid, uint64_t suid, int32_t vgId, bool rebuildCreateTb);
void qDestroyStmtDataBlock(STableDataCxt* pBlock);
STableMeta* qGetTableMetaInDataBlock(STableDataCxt* pDataBlock);
int32_t qCloneCurrentTbData(STableDataCxt* pDataBlock, SSubmitTbData **pData);
int32_t qStmtBindParams(SQuery* pQuery, TAOS_MULTI_BIND* pParams, int32_t colIdx);
int32_t qStmtParseQuerySql(SParseContext* pCxt, SQuery* pQuery);
@ -105,11 +104,18 @@ void destroyBoundColumnInfo(void* pBoundInfo);
int32_t qCreateSName(SName* pName, const char* pTableName, int32_t acctId, char* dbName, char* msgBuf,
int32_t msgBufLen);
void* smlInitHandle(SQuery* pQuery);
void smlDestroyHandle(void* pHandle);
int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols, bool format, STableMeta* pTableMeta,
void qDestroyBoundColInfo(void* pInfo);
SQuery* smlInitHandle();
int32_t smlBuildRow(STableDataCxt* pTableCxt);
int32_t smlBuildCol(STableDataCxt* pTableCxt, SSchema* schema, void *kv, int32_t index);
STableDataCxt* smlInitTableDataCtx(SQuery* query, STableMeta* pTableMeta);
int32_t smlBindData(SQuery* handle, bool dataFormat, SArray* tags, SArray* colsSchema, SArray* cols, STableMeta* pTableMeta,
char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl, char* msgBuf, int16_t msgBufLen);
int32_t smlBuildOutput(void* handle, SHashObj* pVgHash);
int32_t smlBuildOutput(SQuery* handle, SHashObj* pVgHash);
int rawBlockBindData(SQuery *query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, TAOS_FIELD *fields, int numFields);
int32_t rewriteToVnodeModifyOpStmt(SQuery* pQuery, SArray* pBufArray);
SArray* serializeVgroupsCreateTableBatch(SHashObj* pVgroupHashmap);

View File

@ -163,6 +163,23 @@ typedef struct STargetInfo {
int32_t vgId;
} STargetInfo;
typedef struct SBoundColInfo {
int16_t* pColIndex; // bound index => schema index
int32_t numOfCols;
int32_t numOfBound;
} SBoundColInfo;
typedef struct STableDataCxt {
STableMeta* pMeta;
STSchema* pSchema;
SBoundColInfo boundColsInfo;
SArray* pValues;
SSubmitTbData* pData;
TSKEY lastTs;
bool ordered;
bool duplicateTs;
} STableDataCxt;
typedef int32_t (*__async_send_cb_fn_t)(void* param, SDataBuf* pMsg, int32_t code);
typedef int32_t (*__async_exec_fn_t)(void* param);
@ -238,6 +255,7 @@ int32_t dataConverToStr(char* str, int type, void* buf, int32_t bufSize, int32_t
char* parseTagDatatoJson(void* p);
int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst);
int32_t cloneDbVgInfo(SDBVgInfo* pSrc, SDBVgInfo** pDst);
int32_t cloneSVreateTbReq(SVCreateTbReq* pSrc, SVCreateTbReq** pDst);
void freeVgInfo(SDBVgInfo* vgInfo);
extern int32_t (*queryBuildMsg[TDMT_MAX])(void* input, char** msg, int32_t msgSize, int32_t* msgLen,

View File

@ -35,6 +35,7 @@ typedef struct STdbState {
TTB* pFillStateDb; // todo refactor
TTB* pSessionStateDb;
TTB* pParNameDb;
TTB* pParTagDb;
TXN* txn;
} STdbState;
@ -108,6 +109,9 @@ int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur);
int32_t streamStatePutParName(SStreamState* pState, int64_t groupId, const char* tbname);
int32_t streamStateGetParName(SStreamState* pState, int64_t groupId, void** pVal);
int32_t streamStatePutParTag(SStreamState* pState, int64_t groupId, const void* tag, int32_t tagLen);
int32_t streamStateGetParTag(SStreamState* pState, int64_t groupId, void** tagVal, int32_t* tagLen);
#if 0
char* streamStateSessionDump(SStreamState* pState);
#endif

View File

@ -103,6 +103,7 @@ typedef struct {
int8_t type;
} SStreamQueueItem;
#if 0
typedef struct {
int8_t type;
int64_t ver;
@ -116,6 +117,21 @@ typedef struct {
SArray* dataRefs; // SArray<int32_t*>
SArray* reqs; // SArray<SSubmitReq*>
} SStreamMergedSubmit;
#endif
typedef struct {
int8_t type;
int64_t ver;
int32_t* dataRef;
SPackedData submit;
} SStreamDataSubmit2;
typedef struct {
int8_t type;
int64_t ver;
SArray* dataRefs; // SArray<int32_t*>
SArray* submits; // SArray<SPackedSubmit>
} SStreamMergedSubmit2;
typedef struct {
int8_t type;
@ -219,11 +235,11 @@ static FORCE_INLINE void* streamQueueNextItem(SStreamQueue* queue) {
}
}
SStreamDataSubmit* streamDataSubmitNew(SSubmitReq* pReq);
SStreamDataSubmit2* streamDataSubmitNew(SPackedData submit);
void streamDataSubmitRefDec(SStreamDataSubmit* pDataSubmit);
void streamDataSubmitRefDec(SStreamDataSubmit2* pDataSubmit);
SStreamDataSubmit* streamSubmitRefClone(SStreamDataSubmit* pSubmit);
SStreamDataSubmit2* streamSubmitRefClone(SStreamDataSubmit2* pSubmit);
typedef struct {
char* qmsg;
@ -355,14 +371,15 @@ void tFreeSStreamTask(SStreamTask* pTask);
static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem* pItem) {
if (pItem->type == STREAM_INPUT__DATA_SUBMIT) {
SStreamDataSubmit* pSubmitClone = streamSubmitRefClone((SStreamDataSubmit*)pItem);
SStreamDataSubmit2* pSubmitClone = streamSubmitRefClone((SStreamDataSubmit2*)pItem);
if (pSubmitClone == NULL) {
qDebug("task %d %p submit enqueue failed since out of memory", pTask->taskId, pTask);
terrno = TSDB_CODE_OUT_OF_MEMORY;
atomic_store_8(&pTask->inputStatus, TASK_INPUT_STATUS__FAILED);
return -1;
}
qDebug("task %d %p submit enqueue %p %p %p", pTask->taskId, pTask, pItem, pSubmitClone, pSubmitClone->data);
qDebug("task %d %p submit enqueue %p %p %p %d %" PRId64, pTask->taskId, pTask, pItem, pSubmitClone,
pSubmitClone->submit.msgStr, pSubmitClone->submit.msgLen, pSubmitClone->submit.ver);
taosWriteQitem(pTask->inputQueue->queue, pSubmitClone);
// qStreamInput(pTask->exec.executor, pSubmitClone);
} else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE ||

View File

@ -116,6 +116,7 @@ extern "C" {
#include "osTimer.h"
#include "osTimezone.h"
#include "taoserror.h"
#include "tlog.h"
#ifdef __cplusplus
}

View File

@ -52,11 +52,13 @@ _exit:
return code;
}
static FORCE_INLINE void tFree(uint8_t *pBuf) {
if (pBuf) {
taosMemoryFree(pBuf - sizeof(int64_t));
}
}
#define tFree(BUF) \
do { \
if (BUF) { \
taosMemoryFree((uint8_t *)(BUF) - sizeof(int64_t)); \
(BUF) = NULL; \
} \
} while (0)
#ifdef __cplusplus
}

View File

@ -158,6 +158,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_TSC_QUERY_KILLED TAOS_DEF_ERROR_CODE(0, 0X022D)
#define TSDB_CODE_TSC_NO_EXEC_NODE TAOS_DEF_ERROR_CODE(0, 0X022E)
#define TSDB_CODE_TSC_NOT_STABLE_ERROR TAOS_DEF_ERROR_CODE(0, 0X022F)
#define TSDB_CODE_TSC_STMT_CACHE_ERROR TAOS_DEF_ERROR_CODE(0, 0X0230)
// mnode-common
// #define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) // 2.x
@ -714,7 +715,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_RSMA_INVALID_ENV TAOS_DEF_ERROR_CODE(0, 0x3150)
#define TSDB_CODE_RSMA_INVALID_STAT TAOS_DEF_ERROR_CODE(0, 0x3151)
#define TSDB_CODE_RSMA_QTASKINFO_CREATE TAOS_DEF_ERROR_CODE(0, 0x3152)
// #define TSDB_CODE_RSMA_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x3153)
#define TSDB_CODE_RSMA_FS_COMMIT TAOS_DEF_ERROR_CODE(0, 0x3153)
#define TSDB_CODE_RSMA_REMOVE_EXISTS TAOS_DEF_ERROR_CODE(0, 0x3154)
#define TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP TAOS_DEF_ERROR_CODE(0, 0x3155)
#define TSDB_CODE_RSMA_EMPTY_INFO TAOS_DEF_ERROR_CODE(0, 0x3156)
@ -722,6 +723,9 @@ int32_t* taosGetErrno();
#define TSDB_CODE_RSMA_REGEX_MATCH TAOS_DEF_ERROR_CODE(0, 0x3158)
#define TSDB_CODE_RSMA_STREAM_STATE_OPEN TAOS_DEF_ERROR_CODE(0, 0x3159)
#define TSDB_CODE_RSMA_STREAM_STATE_COMMIT TAOS_DEF_ERROR_CODE(0, 0x3160)
#define TSDB_CODE_RSMA_FS_REF TAOS_DEF_ERROR_CODE(0, 0x3161)
#define TSDB_CODE_RSMA_FS_SYNC TAOS_DEF_ERROR_CODE(0, 0x3162)
#define TSDB_CODE_RSMA_FS_UPDATE TAOS_DEF_ERROR_CODE(0, 0x3163)
//index
#define TSDB_CODE_INDEX_REBUILDING TAOS_DEF_ERROR_CODE(0, 0x3200)

View File

@ -22,19 +22,6 @@
extern "C" {
#endif
#if 0
#define TARRAY(TYPE) \
struct { \
int32_t tarray_size_; \
int32_t tarray_neles_; \
struct TYPE* td_array_data_; \
}
#define TARRAY_SIZE(ARRAY) (ARRAY)->tarray_size_
#define TARRAY_NELES(ARRAY) (ARRAY)->tarray_neles_
#define TARRAY_ELE_AT(ARRAY, IDX) ((ARRAY)->td_array_data_ + idx)
#endif
#define TARRAY_MIN_SIZE 8
#define TARRAY_GET_ELEM(array, index) ((void*)((char*)((array)->pData) + (index) * (array)->elemSize))
#define TARRAY_ELEM_IDX(array, ele) (POINTER_DISTANCE(ele, (array)->pData) / (array)->elemSize)
@ -46,6 +33,9 @@ typedef struct SArray {
void* pData;
} SArray;
#define TARRAY_SIZE(array) ((array)->size)
#define TARRAY_DATA(array) ((array)->pData)
/**
*
* @param size
@ -194,6 +184,13 @@ void taosArrayPopTailBatch(SArray* pArray, size_t cnt);
*/
void taosArrayRemove(SArray* pArray, size_t index);
/**
* remove batch entry from the given index
* @param pArray
* @param index
*/
void taosArrayRemoveBatch(SArray* pArray, size_t index, size_t num, FDelete fp);
/**
* copy the whole array from source to destination
* @param pDst

View File

@ -190,10 +190,11 @@ typedef enum ELogicConditionType {
#define TSDB_MIN_COLUMNS 2 // PRIMARY COLUMN(timestamp) + other columns
#define TSDB_NODE_NAME_LEN 64
#define TSDB_TABLE_NAME_LEN 193 // it is a null-terminated string
#define TSDB_TOPIC_NAME_LEN 193 // it is a null-terminated string
#define TSDB_CGROUP_LEN 193 // it is a null-terminated string
#define TSDB_STREAM_NAME_LEN 193 // it is a null-terminated string
#define TSDB_TABLE_NAME_LEN 193 // it is a null-terminated string
#define TSDB_TOPIC_NAME_LEN 193 // it is a null-terminated string
#define TSDB_CGROUP_LEN 193 // it is a null-terminated string
#define TSDB_USER_CGROUP_LEN (TSDB_USER_LEN + TSDB_CGROUP_LEN) // it is a null-terminated string
#define TSDB_STREAM_NAME_LEN 193 // it is a null-terminated string
#define TSDB_DB_NAME_LEN 65
#define TSDB_DB_FNAME_LEN (TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN + TSDB_NAME_DELIMITER_LEN)

View File

@ -116,6 +116,7 @@ static int32_t tEncodeI64v(SEncoder* pCoder, int64_t val);
static int32_t tEncodeFloat(SEncoder* pCoder, float val);
static int32_t tEncodeDouble(SEncoder* pCoder, double val);
static int32_t tEncodeBinary(SEncoder* pCoder, const uint8_t* val, uint32_t len);
static int32_t tEncodeBinaryEx(SEncoder* pCoder, const uint8_t* val, uint32_t len);
static int32_t tEncodeCStrWithLen(SEncoder* pCoder, const char* val, uint32_t len);
static int32_t tEncodeCStr(SEncoder* pCoder, const char* val);

View File

@ -14,6 +14,7 @@ set binary_dir=%3
set binary_dir=%binary_dir:/=\\%
set osType=%4
set verNumber=%5
set Enterprise=%6
set target_dir=C:\\TDengine
if not exist %target_dir% (
@ -57,7 +58,33 @@ if exist %binary_dir%\\build\\lib\\taosws.dll (
if exist %binary_dir%\\build\\bin\\taosdump.exe (
copy %binary_dir%\\build\\bin\\taosdump.exe %target_dir% > nul
)
if %Enterprise% == TRUE (
if exist %binary_dir%\\build\\bin\\taosx.exe (
copy %binary_dir%\\build\\bin\\taosx.exe %target_dir% > nul
)
if exist %binary_dir%\\build\\bin\\tmq_sim.exe (
copy %binary_dir%\\build\\bin\\tmq_sim.exe %target_dir% > nul
)
if exist %binary_dir%\\build\\bin\\tsim.exe (
copy %binary_dir%\\build\\bin\\tsim.exe %target_dir% > nul
)
if exist %binary_dir%\\build\\bin\\tmq_taosx_ci.exe (
copy %binary_dir%\\build\\bin\\tmq_taosx_ci.exe %target_dir% > nul
)
if exist %binary_dir%\\build\\bin\\tmq_demo.exe (
copy %binary_dir%\\build\\bin\\tmq_demo.exe %target_dir% > nul
)
if exist %binary_dir%\\build\\bin\\dumper.exe (
copy %binary_dir%\\build\\bin\\dumper.exe %target_dir% > nul
)
if exist %binary_dir%\\build\\bin\\runUdf.exe (
copy %binary_dir%\\build\\bin\\runUdf.exe %target_dir% > nul
)
if exist %binary_dir%\\build\\bin\\create_table.exe (
copy %binary_dir%\\build\\bin\\create_table.exe %target_dir% > nul
)
)
copy %binary_dir%\\build\\bin\\taosd.exe %target_dir% > nul
copy %binary_dir%\\build\\bin\\udfd.exe %target_dir% > nul

View File

@ -149,7 +149,6 @@ typedef struct STscObj {
int32_t numOfReqs; // number of sqlObj bound to this connection
SAppInstInfo* pAppInfo;
SHashObj* pRequests;
int8_t schemalessType; // todo remove it, this attribute should be move to request
} STscObj;
typedef struct SResultColumn {

View File

@ -30,7 +30,7 @@ extern "C" {
#define tscDebug(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSC ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0)
#define tscTrace(...) do { if (cDebugFlag & DEBUG_TRACE) { taosPrintLog("TSC ", DEBUG_TRACE, cDebugFlag, __VA_ARGS__); }} while(0)
#define tscDebugL(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLongString("TSC ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0)
//#define tscPerf(...) do { if (cDebugFlag & DEBUG_INFO) { taosPrintLog("TSC ", DEBUG_INFO, cDebugFlag, __VA_ARGS__); }} while(0)
#define tscPerf(...) do { if (cDebugFlag & DEBUG_INFO) { taosPrintLog("TSC ", 0, cDebugFlag, __VA_ARGS__); }} while(0)
// clang-format on
#ifdef __cplusplus

View File

@ -0,0 +1,244 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_CLIENTSML_H
#define TDENGINE_CLIENTSML_H
#ifdef __cplusplus
extern "C" {
#endif
#include "catalog.h"
#include "clientInt.h"
#include "osThread.h"
#include "query.h"
#include "taos.h"
#include "taoserror.h"
#include "tcommon.h"
#include "tdef.h"
#include "tglobal.h"
#include "tlog.h"
#include "tmsg.h"
#include "tname.h"
#include "ttime.h"
#include "ttypes.h"
#include "cJSON.h"
#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
# define expect(expr,value) (__builtin_expect ((expr),(value)) )
#else
# define expect(expr,value) (expr)
#endif
#ifndef likely
#define likely(expr) expect((expr) != 0, 1)
#endif
#ifndef unlikely
#define unlikely(expr) expect((expr) != 0, 0)
#endif
#define SPACE ' '
#define COMMA ','
#define EQUAL '='
#define QUOTE '"'
#define SLASH '\\'
#define JUMP_SPACE(sql, sqlEnd) \
while (sql < sqlEnd) { \
if (unlikely(*sql == SPACE)) \
sql++; \
else \
break; \
}
#define IS_INVALID_COL_LEN(len) ((len) <= 0 || (len) >= TSDB_COL_NAME_LEN)
#define IS_INVALID_TABLE_LEN(len) ((len) <= 0 || (len) >= TSDB_TABLE_NAME_LEN)
#define TS "_ts"
#define TS_LEN 3
#define VALUE "_value"
#define VALUE_LEN 6
#define OTD_JSON_FIELDS_NUM 4
#define MAX_RETRY_TIMES 5
typedef TSDB_SML_PROTOCOL_TYPE SMLProtocolType;
typedef enum {
SCHEMA_ACTION_NULL,
SCHEMA_ACTION_CREATE_STABLE,
SCHEMA_ACTION_ADD_COLUMN,
SCHEMA_ACTION_ADD_TAG,
SCHEMA_ACTION_CHANGE_COLUMN_SIZE,
SCHEMA_ACTION_CHANGE_TAG_SIZE,
} ESchemaAction;
typedef struct {
const void *key;
int32_t keyLen;
void *value;
bool used;
}Node;
typedef struct NodeList{
Node data;
struct NodeList* next;
}NodeList;
typedef struct {
char *measure;
char *tags;
char *cols;
char *timestamp;
int32_t measureLen;
int32_t measureTagsLen;
int32_t tagsLen;
int32_t colsLen;
int32_t timestampLen;
SArray *colArray;
} SSmlLineInfo;
typedef struct {
const char *sTableName; // super table name
int32_t sTableNameLen;
char childTableName[TSDB_TABLE_NAME_LEN];
uint64_t uid;
void *key; // for openTsdb
SArray *tags;
// elements are SHashObj<cols key string, SSmlKv*> for find by key quickly
SArray *cols;
STableDataCxt *tableDataCtx;
} SSmlTableInfo;
typedef struct {
SArray *tags; // save the origin order to create table
SHashObj *tagHash; // elements are <key, index in tags>
SArray *cols;
SHashObj *colHash;
STableMeta *tableMeta;
} SSmlSTableMeta;
typedef struct {
int32_t len;
char *buf;
} SSmlMsgBuf;
typedef struct {
int32_t code;
int32_t lineNum;
int32_t numOfSTables;
int32_t numOfCTables;
int32_t numOfCreateSTables;
int32_t numOfAlterColSTables;
int32_t numOfAlterTagSTables;
int64_t parseTime;
int64_t schemaTime;
int64_t insertBindTime;
int64_t insertRpcTime;
int64_t endTime;
} SSmlCostInfo;
typedef struct {
int64_t id;
SMLProtocolType protocol;
int8_t precision;
bool reRun;
bool dataFormat; // true means that the name and order of keys in each line are the same(only for influx protocol)
bool isRawLine;
int32_t ttl;
NodeList *childTables;
NodeList *superTables;
SHashObj *pVgHash;
STscObj *taos;
SCatalog *pCatalog;
SRequestObj *pRequest;
SQuery *pQuery;
SSmlCostInfo cost;
int32_t lineNum;
SSmlMsgBuf msgBuf;
cJSON *root; // for parse json
int8_t offset[OTD_JSON_FIELDS_NUM];
SSmlLineInfo *lines; // element is SSmlLineInfo
bool parseJsonByLib;
SArray *tagJsonArray;
//
SArray *preLineTagKV;
SArray *maxTagKVs;
SArray *preLineColKV;
SSmlLineInfo preLine;
STableMeta *currSTableMeta;
STableDataCxt *currTableDataCtx;
bool needModifySchema;
} SSmlHandle;
#define IS_SAME_CHILD_TABLE (elements->measureTagsLen == info->preLine.measureTagsLen \
&& memcmp(elements->measure, info->preLine.measure, elements->measureTagsLen) == 0)
#define IS_SAME_SUPER_TABLE (elements->measureLen == info->preLine.measureLen \
&& memcmp(elements->measure, info->preLine.measure, elements->measureLen) == 0)
#define IS_SAME_KEY (maxKV->keyLen == kv.keyLen && memcmp(maxKV->key, kv.key, kv.keyLen) == 0)
extern int64_t smlFactorNS[3];
extern int64_t smlFactorS[3];
typedef int32_t (*_equal_fn_sml)(const void *, const void *);
SSmlHandle *smlBuildSmlInfo(TAOS *taos);
void smlDestroyInfo(SSmlHandle *info);
int smlJsonParseObjFirst(char **start, SSmlLineInfo *element, int8_t *offset);
int smlJsonParseObj(char **start, SSmlLineInfo *element, int8_t *offset);
//SArray *smlJsonParseTags(char *start, char *end);
bool smlParseNumberOld(SSmlKv *kvVal, SSmlMsgBuf *msg);
void* nodeListGet(NodeList* list, const void *key, int32_t len, _equal_fn_sml fn);
int nodeListSet(NodeList** list, const void *key, int32_t len, void* value, _equal_fn_sml fn);
int nodeListSize(NodeList* list);
bool smlDoubleToInt64OverFlow(double num);
int32_t smlBuildInvalidDataMsg(SSmlMsgBuf *pBuf, const char *msg1, const char *msg2);
bool smlParseNumber(SSmlKv *kvVal, SSmlMsgBuf *msg);
int64_t smlGetTimeValue(const char *value, int32_t len, uint8_t fromPrecision, uint8_t toPrecision);
int8_t smlGetTsTypeByLen(int32_t len);
SSmlTableInfo* smlBuildTableInfo(int numRows, const char* measure, int32_t measureLen);
SSmlSTableMeta* smlBuildSTableMeta(bool isDataFormat);
int32_t smlSetCTableName(SSmlTableInfo *oneTable);
STableMeta* smlGetMeta(SSmlHandle *info, const void* measure, int32_t measureLen);
int32_t is_same_child_table_telnet(const void *a, const void *b);
int64_t smlParseOpenTsdbTime(SSmlHandle *info, const char *data, int32_t len);
int32_t smlClearForRerun(SSmlHandle *info);
int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg);
uint8_t smlGetTimestampLen(int64_t num);
void clearColValArray(SArray* pCols);
int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements);
int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements);
int32_t smlParseJSON(SSmlHandle *info, char *payload);
#ifdef __cplusplus
}
#endif
#endif // TDENGINE_CLIENTSML_H

View File

@ -21,8 +21,6 @@ extern "C" {
#endif
#include "catalog.h"
typedef void STableDataBlocks;
typedef enum {
STMT_TYPE_INSERT = 1,
STMT_TYPE_MULTI_INSERT,
@ -43,8 +41,8 @@ typedef enum {
} STMT_STATUS;
typedef struct SStmtTableCache {
STableDataBlocks *pDataBlock;
void *boundTags;
STableDataCxt *pDataCtx;
void *boundTags;
} SStmtTableCache;
typedef struct SStmtQueryResInfo {
@ -71,10 +69,11 @@ typedef struct SStmtBindInfo {
} SStmtBindInfo;
typedef struct SStmtExecInfo {
int32_t affectedRows;
SRequestObj *pRequest;
SHashObj *pBlockHash;
bool autoCreateTbl;
int32_t affectedRows;
SRequestObj *pRequest;
SHashObj *pBlockHash;
STableDataCxt *pCurrBlock;
SSubmitTbData *pCurrTbData;
} SStmtExecInfo;
typedef struct SStmtSQLInfo {

View File

@ -76,13 +76,19 @@ static void deregisterRequest(SRequestObj *pRequest) {
"current:%d, app current:%d",
pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000.0, num, currentInst);
if (QUERY_NODE_VNODE_MODIF_STMT == pRequest->stmtType) {
// tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
// "us, exec:%" PRId64 "us",
// duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
// pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd -
// pRequest->metric.ctgEnd, pRequest->metric.execEnd - pRequest->metric.semanticEnd);
atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration);
tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
"us, exec:%" PRId64 "us, stmtType:%d",
duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
pRequest->metric.execEnd - pRequest->metric.semanticEnd, pRequest->stmtType);
if (QUERY_NODE_VNODE_MODIFY_STMT == pRequest->stmtType) {
// tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
// "us, exec:%" PRId64 "us",
// duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
// pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd -
// pRequest->metric.ctgEnd, pRequest->metric.execEnd - pRequest->metric.semanticEnd);
// atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration);
} else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) {
// tscPerf("select duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
// "us, planner:%" PRId64 "us, exec:%" PRId64 "us, reqId:0x%" PRIx64,
@ -264,7 +270,6 @@ void *createTscObj(const char *user, const char *auth, const char *db, int32_t c
taosThreadMutexInit(&pObj->mutex, NULL);
pObj->id = taosAddRef(clientConnRefPool, pObj);
pObj->schemalessType = 1;
atomic_add_fetch_64(&pObj->pAppInfo->numOfConns, 1);

View File

@ -239,7 +239,6 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC
.pTransporter = pTscObj->pAppInfo->pTransporter,
.pStmtCb = pStmtCb,
.pUser = pTscObj->user,
.schemalessType = pTscObj->schemalessType,
.isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER)),
.enableSysInfo = pTscObj->sysInfo,
.svrVer = pTscObj->sVer,
@ -273,7 +272,7 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC
int32_t execLocalCmd(SRequestObj* pRequest, SQuery* pQuery) {
SRetrieveTableRsp* pRsp = NULL;
int32_t code = qExecCommand(pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp);
int32_t code = qExecCommand(&pRequest->pTscObj->id, pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp);
if (TSDB_CODE_SUCCESS == code && NULL != pRsp) {
code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, true);
}
@ -311,7 +310,7 @@ void asyncExecLocalCmd(SRequestObj* pRequest, SQuery* pQuery) {
return;
}
int32_t code = qExecCommand(pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp);
int32_t code = qExecCommand(&pRequest->pTscObj->id ,pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp);
if (TSDB_CODE_SUCCESS == code && NULL != pRsp) {
code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, true);
}
@ -741,47 +740,21 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList
}
int32_t handleSubmitExecRes(SRequestObj* pRequest, void* res, SCatalog* pCatalog, SEpSet* epset) {
int32_t code = 0;
SArray* pArray = NULL;
SSubmitRsp* pRsp = (SSubmitRsp*)res;
if (pRsp->nBlocks <= 0) {
taosMemoryFreeClear(pRsp->pBlocks);
SArray* pArray = NULL;
SSubmitRsp2* pRsp = (SSubmitRsp2*)res;
if (NULL == pRsp->aCreateTbRsp) {
return TSDB_CODE_SUCCESS;
}
pArray = taosArrayInit(pRsp->nBlocks, sizeof(STbSVersion));
if (NULL == pArray) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return TSDB_CODE_OUT_OF_MEMORY;
int32_t tbNum = taosArrayGetSize(pRsp->aCreateTbRsp);
for (int32_t i = 0; i < tbNum; ++i) {
SVCreateTbRsp* pTbRsp = (SVCreateTbRsp*)taosArrayGet(pRsp->aCreateTbRsp, i);
if (pTbRsp->pMeta) {
handleCreateTbExecRes(pTbRsp->pMeta, pCatalog);
}
}
for (int32_t i = 0; i < pRsp->nBlocks; ++i) {
SSubmitBlkRsp* blk = pRsp->pBlocks + i;
if (blk->pMeta) {
handleCreateTbExecRes(blk->pMeta, pCatalog);
tFreeSTableMetaRsp(blk->pMeta);
taosMemoryFreeClear(blk->pMeta);
}
if (NULL == blk->tblFName || 0 == blk->tblFName[0]) {
continue;
}
STbSVersion tbSver = {.tbFName = blk->tblFName, .sver = blk->sver};
taosArrayPush(pArray, &tbSver);
}
SRequestConnInfo conn = {.pTrans = pRequest->pTscObj->pAppInfo->pTransporter,
.requestId = pRequest->requestId,
.requestObjRefId = pRequest->self,
.mgmtEps = *epset};
code = catalogChkTbMetaVersion(pCatalog, &conn, pArray);
_return:
taosArrayDestroy(pArray);
return code;
return TSDB_CODE_SUCCESS;
}
int32_t handleQueryExecRes(SRequestObj* pRequest, void* res, SCatalog* pCatalog, SEpSet* epset) {
@ -882,7 +855,7 @@ int32_t handleQueryExecRsp(SRequestObj* pRequest) {
}
static bool incompletaFileParsing(SNode* pStmt) {
return QUERY_NODE_VNODE_MODIF_STMT != nodeType(pStmt) ? false : ((SVnodeModifOpStmt*)pStmt)->fileProcessing;
return QUERY_NODE_VNODE_MODIFY_STMT != nodeType(pStmt) ? false : ((SVnodeModifyOpStmt*)pStmt)->fileProcessing;
}
// todo refacto the error code mgmt
@ -961,7 +934,7 @@ SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQue
if (pQuery->pRoot && !pRequest->inRetry) {
STscObj* pTscObj = pRequest->pTscObj;
SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary;
if (QUERY_NODE_VNODE_MODIF_STMT == pQuery->pRoot->type) {
if (QUERY_NODE_VNODE_MODIFY_STMT == pQuery->pRoot->type) {
atomic_add_fetch_64((int64_t*)&pActivity->numOfInsertsReq, 1);
} else if (QUERY_NODE_SELECT_STMT == pQuery->pRoot->type) {
atomic_add_fetch_64((int64_t*)&pActivity->numOfQueryReq, 1);
@ -1066,7 +1039,7 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat
}
if (TSDB_CODE_SUCCESS == code && !pRequest->validateOnly) {
SArray* pNodeList = NULL;
if (QUERY_NODE_VNODE_MODIF_STMT != nodeType(pQuery->pRoot)) {
if (QUERY_NODE_VNODE_MODIFY_STMT != nodeType(pQuery->pRoot)) {
buildAsyncExecNodeList(pRequest, &pNodeList, pMnodeList, pResultMeta);
}

View File

@ -802,7 +802,7 @@ static void doAsyncQueryFromParse(SMetaData *pResultMeta, void *param, int32_t c
tstrerror(code));
if (code == TSDB_CODE_SUCCESS) {
//pWrapper->pCatalogReq->forceUpdate = false;
// pWrapper->pCatalogReq->forceUpdate = false;
code = qContinueParseSql(pWrapper->pParseCtx, pWrapper->pCatalogReq, pResultMeta, pQuery);
}
@ -831,8 +831,8 @@ void continueInsertFromCsv(SSqlCallbackWrapper *pWrapper, SRequestObj *pRequest)
tstrerror(code), pWrapper->pRequest->requestId);
destorySqlCallbackWrapper(pWrapper);
terrno = code;
pWrapper->pRequest->code = code;
pWrapper->pRequest->body.queryFp(pWrapper->pRequest->body.param, pWrapper->pRequest, code);
pRequest->code = code;
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
}
}
@ -866,7 +866,6 @@ int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt) {
.pTransporter = pTscObj->pAppInfo->pTransporter,
.pStmtCb = NULL,
.pUser = pTscObj->user,
.schemalessType = pTscObj->schemalessType,
.isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER)),
.enableSysInfo = pTscObj->sysInfo,
.async = true,
@ -1380,8 +1379,7 @@ int taos_stmt_get_col_fields(TAOS_STMT *stmt, int *fieldNum, TAOS_FIELD_E **fiel
}
// let stmt to reclaim TAOS_FIELD_E that was allocated by `taos_stmt_get_tag_fields`/`taos_stmt_get_col_fields`
void taos_stmt_reclaim_fields(TAOS_STMT *stmt, TAOS_FIELD_E *fields)
{
void taos_stmt_reclaim_fields(TAOS_STMT *stmt, TAOS_FIELD_E *fields) {
(void)stmt;
if (!fields) return;
taosMemoryFree(fields);

View File

@ -1240,21 +1240,12 @@ static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) {
return code;
}
typedef struct {
SVgroupInfo vg;
void* data;
} VgData;
static void destroyVgHash(void* data) {
VgData* vgData = (VgData*)data;
taosMemoryFreeClear(vgData->data);
}
int taos_write_raw_block_with_fields(TAOS* taos, int rows, char* pData, const char* tbname, TAOS_FIELD *fields, int numFields){
int taos_write_raw_block_with_fields(TAOS* taos, int rows, char* pData, const char* tbname, TAOS_FIELD* fields,
int numFields) {
int32_t code = TSDB_CODE_SUCCESS;
STableMeta* pTableMeta = NULL;
SQuery* pQuery = NULL;
SSubmitReq* subReq = NULL;
SHashObj* pVgHash = NULL;
SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0);
if (!pRequest) {
@ -1299,156 +1290,34 @@ int taos_write_raw_block_with_fields(TAOS* taos, int rows, char* pData, const ch
uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbname);
goto end;
}
uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
uint64_t uid = pTableMeta->uid;
int32_t numOfCols = pTableMeta->tableInfo.numOfColumns;
uint16_t fLen = 0;
int32_t rowSize = 0;
int16_t nVar = 0;
for (int i = 0; i < pTableMeta->tableInfo.numOfColumns; i++) {
SSchema* schema = pTableMeta->schema + i;
fLen += TYPE_BYTES[schema->type];
rowSize += schema->bytes;
if (IS_VAR_DATA_TYPE(schema->type)) {
nVar++;
}
}
fLen -= sizeof(TSKEY);
int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
(int32_t)TD_BITMAP_BYTES(numOfCols - 1);
int32_t schemaLen = 0;
int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
int32_t totalLen = sizeof(SSubmitReq) + submitLen;
subReq = taosMemoryCalloc(1, totalLen);
SSubmitBlk* blk = POINTER_SHIFT(subReq, sizeof(SSubmitReq));
void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk));
STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen);
SRowBuilder rb = {0};
tdSRowInit(&rb, pTableMeta->sversion);
tdSRowSetTpInfo(&rb, numOfCols, fLen);
int32_t dataLen = 0;
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column length |
char* pStart = pData + getVersion1BlockMetaSize(pData, numFields);
int32_t* colLength = (int32_t*)pStart;
pStart += sizeof(int32_t) * numFields;
SResultColumn* pCol = taosMemoryCalloc(numFields, sizeof(SResultColumn));
for (int32_t i = 0; i < numFields; ++i) {
if (IS_VAR_DATA_TYPE(fields[i].type)) {
pCol[i].offset = (int32_t*)pStart;
pStart += rows * sizeof(int32_t);
} else {
pCol[i].nullbitmap = pStart;
pStart += BitmapLen(rows);
}
pCol[i].pData = pStart;
pStart += colLength[i];
}
SHashObj* schemaHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
for (int i = 0; i < numFields; i++) {
TAOS_FIELD* schema = &fields[i];
taosHashPut(schemaHash, schema->name, strlen(schema->name), &i, sizeof(int32_t));
}
for (int32_t j = 0; j < rows; j++) {
tdSRowResetBuf(&rb, rowData);
int32_t offset = 0;
for (int32_t k = 0; k < numOfCols; k++) {
const SSchema* pColumn = &pTableMeta->schema[k];
int32_t* index = taosHashGet(schemaHash, pColumn->name, strlen(pColumn->name));
if (!index) { // add none
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NONE, NULL, false, offset, k);
}else{
if (IS_VAR_DATA_TYPE(pColumn->type)) {
if (pCol[*index].offset[j] != -1) {
char* data = pCol[*index].pData + pCol[*index].offset[j];
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
} else {
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
}
} else {
if (!colDataIsNull_f(pCol[*index].nullbitmap, j)) {
char* data = pCol[*index].pData + pColumn->bytes * j;
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
} else {
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
}
}
}
if (pColumn->colId != PRIMARYKEY_TIMESTAMP_COL_ID) {
offset += TYPE_BYTES[pColumn->type];
}
}
tdSRowEnd(&rb);
int32_t rowLen = TD_ROW_LEN(rowData);
rowData = POINTER_SHIFT(rowData, rowLen);
dataLen += rowLen;
}
taosHashCleanup(schemaHash);
taosMemoryFree(pCol);
blk->uid = htobe64(uid);
blk->suid = htobe64(suid);
blk->sversion = htonl(pTableMeta->sversion);
blk->schemaLen = htonl(schemaLen);
blk->numOfRows = htonl(rows);
blk->dataLen = htonl(dataLen);
subReq->length = sizeof(SSubmitReq) + sizeof(SSubmitBlk) + schemaLen + dataLen;
subReq->numOfBlocks = 1;
pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
if (NULL == pQuery) {
uError("create SQuery error");
pQuery = smlInitHandle();
if (pQuery == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
pQuery->haveResultSet = false;
pQuery->msgType = TDMT_VND_SUBMIT;
pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
if (NULL == pQuery->pRoot) {
uError("create pQuery->pRoot error");
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
SVnodeModifOpStmt* nodeStmt = (SVnodeModifOpStmt*)(pQuery->pRoot);
nodeStmt->pDataBlocks = taosArrayInit(1, POINTER_BYTES);
pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
taosHashPut(pVgHash, (const char*)&vgData.vgId, sizeof(vgData.vgId), (char*)&vgData, sizeof(vgData));
SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
if (NULL == dst) {
code = TSDB_CODE_OUT_OF_MEMORY;
code = rawBlockBindData(pQuery, pTableMeta, pData, NULL, fields, numFields);
if (code != TSDB_CODE_SUCCESS) {
uError("WriteRaw:rawBlockBindData failed");
goto end;
}
dst->vg = vgData;
dst->numOfTables = subReq->numOfBlocks;
dst->size = subReq->length;
dst->pData = (char*)subReq;
subReq->header.vgId = htonl(dst->vg.vgId);
subReq->version = htonl(1);
subReq->header.contLen = htonl(subReq->length);
subReq->length = htonl(subReq->length);
subReq->numOfBlocks = htonl(subReq->numOfBlocks);
subReq = NULL; // no need free
taosArrayPush(nodeStmt->pDataBlocks, &dst);
code = smlBuildOutput(pQuery, pVgHash);
if (code != TSDB_CODE_SUCCESS) {
uError("smlBuildOutput failed");
return code;
}
launchQueryImpl(pRequest, pQuery, true, NULL);
code = pRequest->code;
end:
end:
taosMemoryFreeClear(pTableMeta);
qDestroyQuery(pQuery);
taosMemoryFree(subReq);
destroyRequest(pRequest);
taosHashCleanup(pVgHash);
return code;
}
@ -1456,7 +1325,7 @@ int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname)
int32_t code = TSDB_CODE_SUCCESS;
STableMeta* pTableMeta = NULL;
SQuery* pQuery = NULL;
SSubmitReq* subReq = NULL;
SHashObj* pVgHash = NULL;
SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0);
if (!pRequest) {
@ -1501,137 +1370,25 @@ int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname)
uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbname);
goto end;
}
uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
uint64_t uid = pTableMeta->uid;
int32_t numOfCols = pTableMeta->tableInfo.numOfColumns;
uint16_t fLen = 0;
int32_t rowSize = 0;
int16_t nVar = 0;
for (int i = 0; i < numOfCols; i++) {
SSchema* schema = pTableMeta->schema + i;
fLen += TYPE_BYTES[schema->type];
rowSize += schema->bytes;
if (IS_VAR_DATA_TYPE(schema->type)) {
nVar++;
}
}
fLen -= sizeof(TSKEY);
int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
(int32_t)TD_BITMAP_BYTES(numOfCols - 1);
int32_t schemaLen = 0;
int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
int32_t totalLen = sizeof(SSubmitReq) + submitLen;
subReq = taosMemoryCalloc(1, totalLen);
SSubmitBlk* blk = POINTER_SHIFT(subReq, sizeof(SSubmitReq));
void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk));
STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen);
SRowBuilder rb = {0};
tdSRowInit(&rb, pTableMeta->sversion);
tdSRowSetTpInfo(&rb, numOfCols, fLen);
int32_t dataLen = 0;
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column length |
char* pStart = pData + getVersion1BlockMetaSize(pData, numOfCols);
int32_t* colLength = (int32_t*)pStart;
pStart += sizeof(int32_t) * numOfCols;
SResultColumn* pCol = taosMemoryCalloc(numOfCols, sizeof(SResultColumn));
for (int32_t i = 0; i < numOfCols; ++i) {
if (IS_VAR_DATA_TYPE(pTableMeta->schema[i].type)) {
pCol[i].offset = (int32_t*)pStart;
pStart += rows * sizeof(int32_t);
} else {
pCol[i].nullbitmap = pStart;
pStart += BitmapLen(rows);
}
pCol[i].pData = pStart;
pStart += colLength[i];
}
for (int32_t j = 0; j < rows; j++) {
tdSRowResetBuf(&rb, rowData);
int32_t offset = 0;
for (int32_t k = 0; k < numOfCols; k++) {
const SSchema* pColumn = &pTableMeta->schema[k];
if (IS_VAR_DATA_TYPE(pColumn->type)) {
if (pCol[k].offset[j] != -1) {
char* data = pCol[k].pData + pCol[k].offset[j];
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
} else {
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
}
} else {
if (!colDataIsNull_f(pCol[k].nullbitmap, j)) {
char* data = pCol[k].pData + pColumn->bytes * j;
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
} else {
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
}
}
if (pColumn->colId != PRIMARYKEY_TIMESTAMP_COL_ID) {
offset += TYPE_BYTES[pColumn->type];
}
}
tdSRowEnd(&rb);
int32_t rowLen = TD_ROW_LEN(rowData);
rowData = POINTER_SHIFT(rowData, rowLen);
dataLen += rowLen;
}
taosMemoryFree(pCol);
blk->uid = htobe64(uid);
blk->suid = htobe64(suid);
blk->sversion = htonl(pTableMeta->sversion);
blk->schemaLen = htonl(schemaLen);
blk->numOfRows = htonl(rows);
blk->dataLen = htonl(dataLen);
subReq->length = sizeof(SSubmitReq) + sizeof(SSubmitBlk) + schemaLen + dataLen;
subReq->numOfBlocks = 1;
pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
if (NULL == pQuery) {
uError("create SQuery error");
code = TSDB_CODE_OUT_OF_MEMORY;
taosMemoryFree(subReq);
goto end;
}
pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
pQuery->haveResultSet = false;
pQuery->msgType = TDMT_VND_SUBMIT;
pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
if (NULL == pQuery->pRoot) {
uError("create pQuery->pRoot error");
pQuery = smlInitHandle();
if (pQuery == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
SVnodeModifOpStmt* nodeStmt = (SVnodeModifOpStmt*)(pQuery->pRoot);
nodeStmt->pDataBlocks = taosArrayInit(1, POINTER_BYTES);
pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
taosHashPut(pVgHash, (const char*)&vgData.vgId, sizeof(vgData.vgId), (char*)&vgData, sizeof(vgData));
SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
if (NULL == dst) {
code = TSDB_CODE_OUT_OF_MEMORY;
code = rawBlockBindData(pQuery, pTableMeta, pData, NULL, NULL, 0);
if (code != TSDB_CODE_SUCCESS) {
uError("WriteRaw:rawBlockBindData failed");
goto end;
}
dst->vg = vgData;
dst->numOfTables = subReq->numOfBlocks;
dst->size = subReq->length;
dst->pData = (char*)subReq;
subReq->header.vgId = htonl(dst->vg.vgId);
subReq->version = htonl(1);
subReq->header.contLen = htonl(subReq->length);
subReq->length = htonl(subReq->length);
subReq->numOfBlocks = htonl(subReq->numOfBlocks);
subReq = NULL; // no need free
taosArrayPush(nodeStmt->pDataBlocks, &dst);
code = smlBuildOutput(pQuery, pVgHash);
if (code != TSDB_CODE_SUCCESS) {
uError("smlBuildOutput failed");
return code;
}
launchQueryImpl(pRequest, pQuery, true, NULL);
code = pRequest->code;
@ -1639,7 +1396,8 @@ int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname)
end:
taosMemoryFreeClear(pTableMeta);
qDestroyQuery(pQuery);
taosMemoryFree(subReq);
destroyRequest(pRequest);
taosHashCleanup(pVgHash);
return code;
}
@ -1676,8 +1434,6 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
goto end;
}
pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
taosHashSetFreeFp(pVgHash, destroyVgHash);
struct SCatalog* pCatalog = NULL;
code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog);
if (code != TSDB_CODE_SUCCESS) {
@ -1691,6 +1447,12 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
conn.requestObjRefId = pRequest->self;
conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
pQuery = smlInitHandle();
if (pQuery == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
uDebug("raw data block num:%d\n", rspObj.rsp.blockNum);
while (++rspObj.resIter < rspObj.rsp.blockNum) {
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter);
@ -1698,14 +1460,6 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
uError("WriteRaw:no schema, iter:%d", rspObj.resIter);
goto end;
}
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.rsp.blockSchema, rspObj.resIter);
setResSchemaInfo(&rspObj.resInfo, pSW->pSchema, pSW->nCols);
code = setQueryResultFromRsp(&rspObj.resInfo, pRetrieve, false, false);
if (code != TSDB_CODE_SUCCESS) {
uError("WriteRaw: setQueryResultFromRsp error");
goto end;
}
const char* tbName = (const char*)taosArrayGetP(rspObj.rsp.blockTbName, rspObj.resIter);
if (!tbName) {
@ -1719,13 +1473,6 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
strcpy(pName.dbname, pRequest->pDb);
strcpy(pName.tname, tbName);
VgData vgData = {0};
code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &(vgData.vg));
if (code != TSDB_CODE_SUCCESS) {
uError("WriteRaw:catalogGetTableHashVgroup failed. table name: %s", tbName);
goto end;
}
code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
uError("WriteRaw:catalogGetTableMeta table not exist. table name: %s", tbName);
@ -1737,164 +1484,29 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
goto end;
}
uint16_t fLen = 0;
int32_t rowSize = 0;
int16_t nVar = 0;
for (int i = 0; i < pTableMeta->tableInfo.numOfColumns; i++) {
SSchema* schema = &pTableMeta->schema[i];
fLen += TYPE_BYTES[schema->type];
rowSize += schema->bytes;
if (IS_VAR_DATA_TYPE(schema->type)) {
nVar++;
}
}
fLen -= sizeof(TSKEY);
int32_t rows = rspObj.resInfo.numOfRows;
int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
(int32_t)TD_BITMAP_BYTES(pTableMeta->tableInfo.numOfColumns - 1);
int32_t schemaLen = 0;
int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
SSubmitReq* subReq = NULL;
SSubmitBlk* blk = NULL;
void* hData = taosHashGet(pVgHash, &vgData.vg.vgId, sizeof(vgData.vg.vgId));
if (hData) {
vgData = *(VgData*)hData;
int32_t totalLen = ((SSubmitReq*)(vgData.data))->length + submitLen;
void* tmp = taosMemoryRealloc(vgData.data, totalLen);
if (tmp == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
vgData.data = tmp;
((VgData*)hData)->data = tmp;
subReq = (SSubmitReq*)(vgData.data);
blk = POINTER_SHIFT(vgData.data, subReq->length);
} else {
int32_t totalLen = sizeof(SSubmitReq) + submitLen;
void* tmp = taosMemoryCalloc(1, totalLen);
if (tmp == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
vgData.data = tmp;
taosHashPut(pVgHash, (const char*)&vgData.vg.vgId, sizeof(vgData.vg.vgId), (char*)&vgData, sizeof(vgData));
subReq = (SSubmitReq*)(vgData.data);
subReq->length = sizeof(SSubmitReq);
subReq->numOfBlocks = 0;
blk = POINTER_SHIFT(vgData.data, sizeof(SSubmitReq));
}
// pSW->pSchema should be same as pTableMeta->schema
// ASSERT(pSW->nCols == pTableMeta->tableInfo.numOfColumns);
uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
uint64_t uid = pTableMeta->uid;
int16_t sver = pTableMeta->sversion;
void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk));
STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen);
SRowBuilder rb = {0};
tdSRowInit(&rb, sver);
tdSRowSetTpInfo(&rb, pTableMeta->tableInfo.numOfColumns, fLen);
int32_t totalLen = 0;
SHashObj* schemaHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
for (int i = 0; i < pSW->nCols; i++) {
SSchema* schema = &pSW->pSchema[i];
taosHashPut(schemaHash, schema->name, strlen(schema->name), &i, sizeof(int32_t));
}
for (int32_t j = 0; j < rows; j++) {
tdSRowResetBuf(&rb, rowData);
doSetOneRowPtr(&rspObj.resInfo);
rspObj.resInfo.current += 1;
int32_t offset = 0;
for (int32_t k = 0; k < pTableMeta->tableInfo.numOfColumns; k++) {
const SSchema* pColumn = &pTableMeta->schema[k];
int32_t* index = taosHashGet(schemaHash, pColumn->name, strlen(pColumn->name));
if (!index) {
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NONE, NULL, false, offset, k);
} else {
char* colData = rspObj.resInfo.row[*index];
if (!colData) {
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
} else {
if (IS_VAR_DATA_TYPE(pColumn->type)) {
colData -= VARSTR_HEADER_SIZE;
}
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, colData, true, offset, k);
}
}
if (pColumn->colId != PRIMARYKEY_TIMESTAMP_COL_ID) {
offset += TYPE_BYTES[pColumn->type];
}
}
tdSRowEnd(&rb);
int32_t rowLen = TD_ROW_LEN(rowData);
rowData = POINTER_SHIFT(rowData, rowLen);
totalLen += rowLen;
}
taosHashCleanup(schemaHash);
blk->uid = htobe64(uid);
blk->suid = htobe64(suid);
blk->sversion = htonl(sver);
blk->schemaLen = htonl(schemaLen);
blk->numOfRows = htonl(rows);
blk->dataLen = htonl(totalLen);
subReq->length += sizeof(SSubmitBlk) + schemaLen + totalLen;
subReq->numOfBlocks++;
taosMemoryFreeClear(pTableMeta);
rspObj.resInfo.pRspMsg = NULL;
doFreeReqResultInfo(&rspObj.resInfo);
}
pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
if (NULL == pQuery) {
uError("create SQuery error");
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
pQuery->haveResultSet = false;
pQuery->msgType = TDMT_VND_SUBMIT;
pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
if (NULL == pQuery->pRoot) {
uError("create pQuery->pRoot error");
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
SVnodeModifOpStmt* nodeStmt = (SVnodeModifOpStmt*)(pQuery->pRoot);
int32_t numOfVg = taosHashGetSize(pVgHash);
nodeStmt->pDataBlocks = taosArrayInit(numOfVg, POINTER_BYTES);
VgData* vData = (VgData*)taosHashIterate(pVgHash, NULL);
while (vData) {
SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
if (NULL == dst) {
code = TSDB_CODE_OUT_OF_MEMORY;
SVgroupInfo vg;
code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vg);
if (code != TSDB_CODE_SUCCESS) {
uError("WriteRaw:catalogGetTableHashVgroup failed. table name: %s", tbName);
goto end;
}
dst->vg = vData->vg;
SSubmitReq* subReq = (SSubmitReq*)(vData->data);
dst->numOfTables = subReq->numOfBlocks;
dst->size = subReq->length;
dst->pData = (char*)subReq;
vData->data = NULL; // no need free
subReq->header.vgId = htonl(dst->vg.vgId);
subReq->version = htonl(1);
subReq->header.contLen = htonl(subReq->length);
subReq->length = htonl(subReq->length);
subReq->numOfBlocks = htonl(subReq->numOfBlocks);
taosArrayPush(nodeStmt->pDataBlocks, &dst);
vData = (VgData*)taosHashIterate(pVgHash, vData);
void* hData = taosHashGet(pVgHash, &vg.vgId, sizeof(vg.vgId));
if (hData == NULL) {
taosHashPut(pVgHash, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg));
}
code = rawBlockBindData(pQuery, pTableMeta, pRetrieve->data, NULL, NULL, 0);
if (code != TSDB_CODE_SUCCESS) {
uError("WriteRaw:rawBlockBindData failed");
goto end;
}
}
code = smlBuildOutput(pQuery, pVgHash);
if (code != TSDB_CODE_SUCCESS) {
uError("smlBuildOutput failed");
return code;
}
launchQueryImpl(pRequest, pQuery, true, NULL);
@ -1902,8 +1514,6 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
end:
tDeleteSMqDataRsp(&rspObj.rsp);
rspObj.resInfo.pRspMsg = NULL;
doFreeReqResultInfo(&rspObj.resInfo);
tDecoderClear(&decoder);
qDestroyQuery(pQuery);
destroyRequest(pRequest);
@ -1912,7 +1522,6 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
return code;
}
static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) {
int32_t code = TSDB_CODE_SUCCESS;
SHashObj* pVgHash = NULL;
@ -1946,8 +1555,6 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
goto end;
}
pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
taosHashSetFreeFp(pVgHash, destroyVgHash);
struct SCatalog* pCatalog = NULL;
code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog);
if (code != TSDB_CODE_SUCCESS) {
@ -1961,6 +1568,13 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
conn.requestObjRefId = pRequest->self;
conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
pQuery = smlInitHandle();
if (pQuery == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
uDebug("raw data block num:%d\n", rspObj.rsp.blockNum);
while (++rspObj.resIter < rspObj.rsp.blockNum) {
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter);
@ -1968,14 +1582,6 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
uError("WriteRaw:no schema, iter:%d", rspObj.resIter);
goto end;
}
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.rsp.blockSchema, rspObj.resIter);
setResSchemaInfo(&rspObj.resInfo, pSW->pSchema, pSW->nCols);
code = setQueryResultFromRsp(&rspObj.resInfo, pRetrieve, false, false);
if (code != TSDB_CODE_SUCCESS) {
uError("WriteRaw: setQueryResultFromRsp error");
goto end;
}
const char* tbName = (const char*)taosArrayGetP(rspObj.rsp.blockTbName, rspObj.resIter);
if (!tbName) {
@ -1989,44 +1595,28 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
strcpy(pName.dbname, pRequest->pDb);
strcpy(pName.tname, tbName);
VgData vgData = {0};
code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &(vgData.vg));
if (code != TSDB_CODE_SUCCESS) {
uError("WriteRaw:catalogGetTableHashVgroup failed. table name: %s", tbName);
goto end;
}
// find schema data info
int32_t schemaLen = 0;
void* schemaData = NULL;
SVCreateTbReq pCreateReq = {0};
for (int j = 0; j < rspObj.rsp.createTableNum; j++) {
void** dataTmp = taosArrayGet(rspObj.rsp.createTableReq, j);
int32_t* lenTmp = taosArrayGet(rspObj.rsp.createTableLen, j);
SDecoder decoderTmp = {0};
SVCreateTbReq pCreateReq = {0};
SDecoder decoderTmp = {0};
tDecoderInit(&decoderTmp, *dataTmp, *lenTmp);
memset(&pCreateReq, 0, sizeof(SVCreateTbReq));
if (tDecodeSVCreateTbReq(&decoderTmp, &pCreateReq) < 0) {
tDecoderClear(&decoderTmp);
taosMemoryFreeClear(pCreateReq.comment);
taosArrayDestroy(pCreateReq.ctb.tagName);
goto end;
}
ASSERT(pCreateReq.type == TSDB_CHILD_TABLE);
if (strcmp(tbName, pCreateReq.name) == 0) {
schemaLen = *lenTmp;
schemaData = *dataTmp;
strcpy(pName.tname, pCreateReq.ctb.stbName);
tDecoderClear(&decoderTmp);
taosMemoryFreeClear(pCreateReq.comment);
taosArrayDestroy(pCreateReq.ctb.tagName);
break;
}
tDecoderClear(&decoderTmp);
taosMemoryFreeClear(pCreateReq.comment);
taosArrayDestroy(pCreateReq.ctb.tagName);
}
code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
@ -2040,176 +1630,30 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
goto end;
}
uint16_t fLen = 0;
int32_t rowSize = 0;
int16_t nVar = 0;
for (int i = 0; i < pTableMeta->tableInfo.numOfColumns; i++) {
SSchema* schema = &pTableMeta->schema[i];
fLen += TYPE_BYTES[schema->type];
rowSize += schema->bytes;
if (IS_VAR_DATA_TYPE(schema->type)) {
nVar++;
}
}
fLen -= sizeof(TSKEY);
int32_t rows = rspObj.resInfo.numOfRows;
int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
(int32_t)TD_BITMAP_BYTES(pTableMeta->tableInfo.numOfColumns - 1);
int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
SSubmitReq* subReq = NULL;
SSubmitBlk* blk = NULL;
void* hData = taosHashGet(pVgHash, &vgData.vg.vgId, sizeof(vgData.vg.vgId));
if (hData) {
vgData = *(VgData*)hData;
int32_t totalLen = ((SSubmitReq*)(vgData.data))->length + submitLen;
void* tmp = taosMemoryRealloc(vgData.data, totalLen);
if (tmp == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
vgData.data = tmp;
((VgData*)hData)->data = tmp;
subReq = (SSubmitReq*)(vgData.data);
blk = POINTER_SHIFT(vgData.data, subReq->length);
} else {
int32_t totalLen = sizeof(SSubmitReq) + submitLen;
void* tmp = taosMemoryCalloc(1, totalLen);
if (tmp == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
vgData.data = tmp;
taosHashPut(pVgHash, (const char*)&vgData.vg.vgId, sizeof(vgData.vg.vgId), (char*)&vgData, sizeof(vgData));
subReq = (SSubmitReq*)(vgData.data);
subReq->length = sizeof(SSubmitReq);
subReq->numOfBlocks = 0;
blk = POINTER_SHIFT(vgData.data, sizeof(SSubmitReq));
}
// pSW->pSchema should be same as pTableMeta->schema
// ASSERT(pSW->nCols == pTableMeta->tableInfo.numOfColumns);
uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
uint64_t uid = pTableMeta->uid;
int16_t sver = pTableMeta->sversion;
void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk));
if (schemaData) {
memcpy(blkSchema, schemaData, schemaLen);
}
STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen);
SRowBuilder rb = {0};
tdSRowInit(&rb, sver);
tdSRowSetTpInfo(&rb, pTableMeta->tableInfo.numOfColumns, fLen);
int32_t totalLen = 0;
SHashObj* schemaHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
for (int i = 0; i < pSW->nCols; i++) {
SSchema* schema = &pSW->pSchema[i];
taosHashPut(schemaHash, schema->name, strlen(schema->name), &i, sizeof(int32_t));
}
for (int32_t j = 0; j < rows; j++) {
tdSRowResetBuf(&rb, rowData);
doSetOneRowPtr(&rspObj.resInfo);
rspObj.resInfo.current += 1;
int32_t offset = 0;
for (int32_t k = 0; k < pTableMeta->tableInfo.numOfColumns; k++) {
const SSchema* pColumn = &pTableMeta->schema[k];
int32_t* index = taosHashGet(schemaHash, pColumn->name, strlen(pColumn->name));
if (!index) {
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NONE, NULL, false, offset, k);
} else {
char* colData = rspObj.resInfo.row[*index];
if (!colData) {
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
} else {
if (IS_VAR_DATA_TYPE(pColumn->type)) {
colData -= VARSTR_HEADER_SIZE;
}
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, colData, true, offset, k);
}
}
if (pColumn->colId != PRIMARYKEY_TIMESTAMP_COL_ID) {
offset += TYPE_BYTES[pColumn->type];
}
}
tdSRowEnd(&rb);
int32_t rowLen = TD_ROW_LEN(rowData);
rowData = POINTER_SHIFT(rowData, rowLen);
totalLen += rowLen;
}
taosHashCleanup(schemaHash);
blk->uid = htobe64(uid);
blk->suid = htobe64(suid);
blk->sversion = htonl(sver);
blk->schemaLen = htonl(schemaLen);
blk->numOfRows = htonl(rows);
blk->dataLen = htonl(totalLen);
subReq->length += sizeof(SSubmitBlk) + schemaLen + totalLen;
subReq->numOfBlocks++;
taosMemoryFreeClear(pTableMeta);
rspObj.resInfo.pRspMsg = NULL;
doFreeReqResultInfo(&rspObj.resInfo);
}
pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
if (NULL == pQuery) {
uError("create SQuery error");
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
pQuery->haveResultSet = false;
pQuery->msgType = TDMT_VND_SUBMIT;
pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
if (NULL == pQuery->pRoot) {
uError("create pQuery->pRoot error");
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
SVnodeModifOpStmt* nodeStmt = (SVnodeModifOpStmt*)(pQuery->pRoot);
int32_t numOfVg = taosHashGetSize(pVgHash);
nodeStmt->pDataBlocks = taosArrayInit(numOfVg, POINTER_BYTES);
VgData* vData = (VgData*)taosHashIterate(pVgHash, NULL);
while (vData) {
SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
if (NULL == dst) {
code = TSDB_CODE_OUT_OF_MEMORY;
SVgroupInfo vg;
code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vg);
if (code != TSDB_CODE_SUCCESS) {
uError("WriteRaw:catalogGetTableHashVgroup failed. table name: %s", tbName);
goto end;
}
void* hData = taosHashGet(pVgHash, &vg.vgId, sizeof(vg.vgId));
if (hData == NULL) {
taosHashPut(pVgHash, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg));
}
code = rawBlockBindData(pQuery, pTableMeta, pRetrieve->data, &pCreateReq, NULL, 0);
if (code != TSDB_CODE_SUCCESS) {
uError("WriteRaw:rawBlockBindData failed");
goto end;
}
dst->vg = vData->vg;
SSubmitReq* subReq = (SSubmitReq*)(vData->data);
dst->numOfTables = subReq->numOfBlocks;
dst->size = subReq->length;
dst->pData = (char*)subReq;
vData->data = NULL; // no need free
subReq->header.vgId = htonl(dst->vg.vgId);
subReq->version = htonl(1);
subReq->header.contLen = htonl(subReq->length);
subReq->length = htonl(subReq->length);
subReq->numOfBlocks = htonl(subReq->numOfBlocks);
taosArrayPush(nodeStmt->pDataBlocks, &dst);
vData = (VgData*)taosHashIterate(pVgHash, vData);
}
launchQueryImpl(pRequest, pQuery, true, NULL);
code = pRequest->code;
end:
end:
tDeleteSTaosxRsp(&rspObj.rsp);
rspObj.resInfo.pRspMsg = NULL;
doFreeReqResultInfo(&rspObj.resInfo);
tDecoderClear(&decoder);
qDestroyQuery(pQuery);
destroyRequest(pRequest);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,657 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "clientSml.h"
// comma ,
//#define IS_SLASH_COMMA(sql) (*(sql) == COMMA && *((sql)-1) == SLASH)
#define IS_COMMA(sql) (*(sql) == COMMA && *((sql)-1) != SLASH)
// space
//#define IS_SLASH_SPACE(sql) (*(sql) == SPACE && *((sql)-1) == SLASH)
#define IS_SPACE(sql) (*(sql) == SPACE && *((sql)-1) != SLASH)
// equal =
//#define IS_SLASH_EQUAL(sql) (*(sql) == EQUAL && *((sql)-1) == SLASH)
#define IS_EQUAL(sql) (*(sql) == EQUAL && *((sql)-1) != SLASH)
// quote "
//#define IS_SLASH_QUOTE(sql) (*(sql) == QUOTE && *((sql)-1) == SLASH)
#define IS_QUOTE(sql) (*(sql) == QUOTE && *((sql)-1) != SLASH)
// SLASH
//#define IS_SLASH_SLASH(sql) (*(sql) == SLASH && *((sql)-1) == SLASH)
#define IS_SLASH_LETTER(sql) \
(*((sql)-1) == SLASH && (*(sql) == COMMA || *(sql) == SPACE || *(sql) == EQUAL || *(sql) == QUOTE || *(sql) == SLASH)) \
// (IS_SLASH_COMMA(sql) || IS_SLASH_SPACE(sql) || IS_SLASH_EQUAL(sql) || IS_SLASH_QUOTE(sql) || IS_SLASH_SLASH(sql))
#define MOVE_FORWARD_ONE(sql, len) (memmove((void *)((sql)-1), (sql), len))
#define PROCESS_SLASH(key, keyLen) \
for (int i = 1; i < keyLen; ++i) { \
if (IS_SLASH_LETTER(key + i)) { \
MOVE_FORWARD_ONE(key + i, keyLen - i); \
i--; \
keyLen--; \
} \
}
#define BINARY_ADD_LEN 2 // "binary" 2 means " "
#define NCHAR_ADD_LEN 3 // L"nchar" 3 means L" "
uint8_t smlPrecisionConvert[7] = {TSDB_TIME_PRECISION_NANO, TSDB_TIME_PRECISION_HOURS, TSDB_TIME_PRECISION_MINUTES,
TSDB_TIME_PRECISION_SECONDS, TSDB_TIME_PRECISION_MILLI, TSDB_TIME_PRECISION_MICRO,
TSDB_TIME_PRECISION_NANO};
static int64_t smlParseInfluxTime(SSmlHandle *info, const char *data, int32_t len) {
uint8_t toPrecision = info->currSTableMeta ? info->currSTableMeta->tableInfo.precision : TSDB_TIME_PRECISION_NANO;
if(unlikely(len == 0 || (len == 1 && data[0] == '0'))){
return taosGetTimestampNs()/smlFactorNS[toPrecision];
}
uint8_t fromPrecision = smlPrecisionConvert[info->precision];
int64_t ts = smlGetTimeValue(data, len, fromPrecision, toPrecision);
if (unlikely(ts == -1)) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp", data);
return -1;
}
return ts;
}
int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) {
if (pVal->value[0] == '"'){ // binary
if (pVal->length >= 2 && pVal->value[pVal->length - 1] == '"') {
pVal->type = TSDB_DATA_TYPE_BINARY;
pVal->length -= BINARY_ADD_LEN;
if (pVal->length > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) {
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
}
pVal->value += (BINARY_ADD_LEN - 1);
return TSDB_CODE_SUCCESS;
}
return TSDB_CODE_TSC_INVALID_VALUE;
}
if(pVal->value[0] == 'l' || pVal->value[0] == 'L'){ // nchar
if (pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"' && pVal->length >= 3){
pVal->type = TSDB_DATA_TYPE_NCHAR;
pVal->length -= NCHAR_ADD_LEN;
if (pVal->length > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
}
pVal->value += (NCHAR_ADD_LEN - 1);
return TSDB_CODE_SUCCESS;
}
return TSDB_CODE_TSC_INVALID_VALUE;
}
if (pVal->value[0] == 't' || pVal->value[0] == 'T'){
if(pVal->length == 1 || (pVal->length == 4 && (pVal->value[1] == 'r' || pVal->value[1] == 'R')
&& (pVal->value[2] == 'u' || pVal->value[2] == 'U')
&& (pVal->value[3] == 'e' || pVal->value[3] == 'E'))){
pVal->i = TSDB_TRUE;
pVal->type = TSDB_DATA_TYPE_BOOL;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
return TSDB_CODE_SUCCESS;
}
return TSDB_CODE_TSC_INVALID_VALUE;
}
if (pVal->value[0] == 'f' || pVal->value[0] == 'F'){
if(pVal->length == 1 || (pVal->length == 5 && (pVal->value[1] == 'a' || pVal->value[1] == 'A')
&& (pVal->value[2] == 'l' || pVal->value[2] == 'L')
&& (pVal->value[3] == 's' || pVal->value[3] == 'S')
&& (pVal->value[4] == 'e' || pVal->value[4] == 'E'))){
pVal->i = TSDB_FALSE;
pVal->type = TSDB_DATA_TYPE_BOOL;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
return TSDB_CODE_SUCCESS;
}
return TSDB_CODE_TSC_INVALID_VALUE;
}
// number
if (smlParseNumber(pVal, msg)) {
pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
return TSDB_CODE_SUCCESS;
}
return TSDB_CODE_TSC_INVALID_VALUE;
}
static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd,
SSmlLineInfo* currElement, bool isSameMeasure, bool isSameCTable){
if(isSameCTable){
return TSDB_CODE_SUCCESS;
}
int cnt = 0;
SArray *preLineKV = info->preLineTagKV;
SArray *maxKVs = info->maxTagKVs;
bool isSuperKVInit = true;
SArray *superKV = NULL;
if(info->dataFormat){
if(unlikely(!isSameMeasure)){
SSmlSTableMeta *sMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, currElement->measure, currElement->measureLen, NULL);
if(unlikely(sMeta == NULL)){
STableMeta * pTableMeta = smlGetMeta(info, currElement->measure, currElement->measureLen);
if(pTableMeta == NULL){
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
sMeta = smlBuildSTableMeta(info->dataFormat);
sMeta->tableMeta = pTableMeta;
nodeListSet(&info->superTables, currElement->measure, currElement->measureLen, sMeta, NULL);
}
info->currSTableMeta = sMeta->tableMeta;
superKV = sMeta->tags;
if(unlikely(taosArrayGetSize(superKV) == 0)){
isSuperKVInit = false;
}
taosArraySetSize(maxKVs, 0);
}
}else{
taosArraySetSize(maxKVs, 0);
}
taosArraySetSize(preLineKV, 0);
while (*sql < sqlEnd) {
if (unlikely(IS_SPACE(*sql))) {
break;
}
bool hasSlash = false;
// parse key
const char *key = *sql;
size_t keyLen = 0;
while (*sql < sqlEnd) {
if (unlikely(IS_COMMA(*sql))) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql);
return TSDB_CODE_SML_INVALID_DATA;
}
if (unlikely(IS_EQUAL(*sql))) {
keyLen = *sql - key;
(*sql)++;
break;
}
if(!hasSlash){
hasSlash = (*(*sql) == SLASH);
}
(*sql)++;
}
if(unlikely(hasSlash)) {
PROCESS_SLASH(key, keyLen)
}
if (unlikely(IS_INVALID_COL_LEN(keyLen))) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid key or key is too long than 64", key);
return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH;
}
// parse value
const char *value = *sql;
size_t valueLen = 0;
hasSlash = false;
while (*sql < sqlEnd) {
// parse value
if (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql))) {
break;
}else if (unlikely(IS_EQUAL(*sql))) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql);
return TSDB_CODE_SML_INVALID_DATA;
}
if(!hasSlash){
hasSlash = (*(*sql) == SLASH);
}
(*sql)++;
}
valueLen = *sql - value;
if (unlikely(valueLen == 0)) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid value", value);
return TSDB_CODE_SML_INVALID_DATA;
}
if(unlikely(hasSlash)) {
PROCESS_SLASH(value, valueLen)
}
if (unlikely(valueLen > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)) {
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
}
SSmlKv kv = {.key = key, .keyLen = keyLen, .type = TSDB_DATA_TYPE_NCHAR, .value = value, .length = valueLen};
if(info->dataFormat){
if(unlikely(cnt + 1 > info->currSTableMeta->tableInfo.numOfTags)){
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
if(isSameMeasure){
if(unlikely(cnt >= taosArrayGetSize(maxKVs))) {
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
SSmlKv *maxKV = (SSmlKv *)taosArrayGet(maxKVs, cnt);
if(unlikely(kv.length > maxKV->length)){
maxKV->length = kv.length;
SSmlSTableMeta *tableMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, currElement->measure, currElement->measureLen, NULL);
ASSERT(tableMeta != NULL);
SSmlKv *oldKV = (SSmlKv *)taosArrayGet(tableMeta->tags, cnt);
oldKV->length = kv.length;
info->needModifySchema = true;
}
if(unlikely(!IS_SAME_KEY)){
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
}else{
if(isSuperKVInit){
if(unlikely(cnt >= taosArrayGetSize(superKV))) {
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
SSmlKv *maxKV = (SSmlKv *)taosArrayGet(superKV, cnt);
if(unlikely(kv.length > maxKV->length)) {
maxKV->length = kv.length;
}else{
kv.length = maxKV->length;
}
info->needModifySchema = true;
if(unlikely(!IS_SAME_KEY)){
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
}else{
taosArrayPush(superKV, &kv);
}
taosArrayPush(maxKVs, &kv);
}
}else{
taosArrayPush(maxKVs, &kv);
}
taosArrayPush(preLineKV, &kv);
cnt++;
if(IS_SPACE(*sql)){
break;
}
(*sql)++;
}
void* oneTable = nodeListGet(info->childTables, currElement->measure, currElement->measureTagsLen, NULL);
if ((oneTable != NULL)) {
return TSDB_CODE_SUCCESS;
}
SSmlTableInfo *tinfo = smlBuildTableInfo(1, currElement->measure, currElement->measureLen);
if (unlikely(!tinfo)) {
return TSDB_CODE_OUT_OF_MEMORY;
}
tinfo->tags = taosArrayDup(preLineKV, NULL);
smlSetCTableName(tinfo);
if(info->dataFormat) {
info->currSTableMeta->uid = tinfo->uid;
tinfo->tableDataCtx = smlInitTableDataCtx(info->pQuery, info->currSTableMeta);
if(tinfo->tableDataCtx == NULL){
smlBuildInvalidDataMsg(&info->msgBuf, "smlInitTableDataCtx error", NULL);
return TSDB_CODE_SML_INVALID_DATA;
}
}
nodeListSet(&info->childTables, currElement->measure, currElement->measureTagsLen, tinfo, NULL);
return TSDB_CODE_SUCCESS;
}
static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd,
SSmlLineInfo* currElement, bool isSameMeasure, bool isSameCTable){
int cnt = 0;
SArray *preLineKV = info->preLineColKV;
bool isSuperKVInit = true;
SArray *superKV = NULL;
if(info->dataFormat){
if(unlikely(!isSameCTable)){
SSmlTableInfo *oneTable = (SSmlTableInfo *)nodeListGet(info->childTables, currElement->measure, currElement->measureTagsLen, NULL);
if (unlikely(oneTable == NULL)) {
smlBuildInvalidDataMsg(&info->msgBuf, "child table should inside", currElement->measure);
return TSDB_CODE_SML_INVALID_DATA;
}
info->currTableDataCtx = oneTable->tableDataCtx;
}
if(unlikely(!isSameMeasure)){
SSmlSTableMeta *sMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, currElement->measure, currElement->measureLen, NULL);
if(unlikely(sMeta == NULL)){
STableMeta * pTableMeta = smlGetMeta(info, currElement->measure, currElement->measureLen);
if(pTableMeta == NULL){
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
sMeta = smlBuildSTableMeta(info->dataFormat);
sMeta->tableMeta = pTableMeta;
nodeListSet(&info->superTables, currElement->measure, currElement->measureLen, sMeta, NULL);
}
info->currSTableMeta = sMeta->tableMeta;
superKV = sMeta->cols;
if(unlikely(taosArrayGetSize(superKV) == 0)){
isSuperKVInit = false;
}
taosArraySetSize(preLineKV, 0);
}
}
while (*sql < sqlEnd) {
if (unlikely(IS_SPACE(*sql))) {
break;
}
bool hasSlash = false;
// parse key
const char *key = *sql;
size_t keyLen = 0;
while (*sql < sqlEnd) {
if (unlikely(IS_COMMA(*sql))) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql);
return TSDB_CODE_SML_INVALID_DATA;
}
if (unlikely(IS_EQUAL(*sql))) {
keyLen = *sql - key;
(*sql)++;
break;
}
if(!hasSlash){
hasSlash = (*(*sql) == SLASH);
}
(*sql)++;
}
if(unlikely(hasSlash)) {
PROCESS_SLASH(key, keyLen)
}
if (unlikely(IS_INVALID_COL_LEN(keyLen))) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid key or key is too long than 64", key);
return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH;
}
// parse value
const char *value = *sql;
size_t valueLen = 0;
hasSlash = false;
bool isInQuote = false;
while (*sql < sqlEnd) {
// parse value
if (unlikely(IS_QUOTE(*sql))) {
isInQuote = !isInQuote;
(*sql)++;
continue;
}
if (!isInQuote){
if (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql))) {
break;
} else if (unlikely(IS_EQUAL(*sql))) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql);
return TSDB_CODE_SML_INVALID_DATA;
}
}
if(!hasSlash){
hasSlash = (*(*sql) == SLASH);
}
(*sql)++;
}
valueLen = *sql - value;
if (unlikely(isInQuote)) {
smlBuildInvalidDataMsg(&info->msgBuf, "only one quote", value);
return TSDB_CODE_SML_INVALID_DATA;
}
if (unlikely(valueLen == 0)) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid value", value);
return TSDB_CODE_SML_INVALID_DATA;
}
if(unlikely(hasSlash)) {
PROCESS_SLASH(value, valueLen)
}
SSmlKv kv = {.key = key, .keyLen = keyLen, .value = value, .length = valueLen};
int32_t ret = smlParseValue(&kv, &info->msgBuf);
if (ret != TSDB_CODE_SUCCESS) {
smlBuildInvalidDataMsg(&info->msgBuf, "smlParseValue error", value);
return ret;
}
if(info->dataFormat){
//cnt begin 0, add ts so + 2
if(unlikely(cnt + 2 > info->currSTableMeta->tableInfo.numOfColumns)){
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
// bind data
ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kv, cnt + 1);
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
uError("smlBuildCol error, retry");
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
if(isSameMeasure){
if(cnt >= taosArrayGetSize(preLineKV)) {
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
SSmlKv *maxKV = (SSmlKv *)taosArrayGet(preLineKV, cnt);
if(kv.type != maxKV->type){
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
if(unlikely(IS_VAR_DATA_TYPE(kv.type) && kv.length > maxKV->length)){
maxKV->length = kv.length;
SSmlSTableMeta *tableMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, currElement->measure, currElement->measureLen, NULL);
ASSERT(tableMeta != NULL);
SSmlKv *oldKV = (SSmlKv *)taosArrayGet(tableMeta->cols, cnt);
oldKV->length = kv.length;
info->needModifySchema = true;
}
if(unlikely(!IS_SAME_KEY)){
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
}else{
if(isSuperKVInit){
if(unlikely(cnt >= taosArrayGetSize(superKV))) {
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
SSmlKv *maxKV = (SSmlKv *)taosArrayGet(superKV, cnt);
if(unlikely(kv.type != maxKV->type)){
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
if(IS_VAR_DATA_TYPE(kv.type)){
if(kv.length > maxKV->length) {
maxKV->length = kv.length;
}else{
kv.length = maxKV->length;
}
info->needModifySchema = true;
}
if(unlikely(!IS_SAME_KEY)){
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
}else{
taosArrayPush(superKV, &kv);
}
taosArrayPush(preLineKV, &kv);
}
}else{
if(currElement->colArray == NULL){
currElement->colArray = taosArrayInit(16, sizeof(SSmlKv));
taosArraySetSize(currElement->colArray, 1);
}
taosArrayPush(currElement->colArray, &kv); //reserve for timestamp
}
cnt++;
if(IS_SPACE(*sql)){
break;
}
(*sql)++;
}
return TSDB_CODE_SUCCESS;
}
int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements) {
if (!sql) return TSDB_CODE_SML_INVALID_DATA;
JUMP_SPACE(sql, sqlEnd)
if (unlikely(*sql == COMMA)) return TSDB_CODE_SML_INVALID_DATA;
elements->measure = sql;
// parse measure
while (sql < sqlEnd) {
if (unlikely((sql != elements->measure) && IS_SLASH_LETTER(sql))) {
MOVE_FORWARD_ONE(sql, sqlEnd - sql);
sqlEnd--;
continue;
}
if (unlikely(IS_COMMA(sql))) {
break;
}
if (unlikely(IS_SPACE(sql))) {
break;
}
sql++;
}
elements->measureLen = sql - elements->measure;
if (unlikely(IS_INVALID_TABLE_LEN(elements->measureLen))) {
smlBuildInvalidDataMsg(&info->msgBuf, "measure is empty or too large than 192", NULL);
return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
}
// to get measureTagsLen before
const char* tmp = sql;
while (tmp < sqlEnd){
if (unlikely(IS_SPACE(tmp))) {
break;
}
tmp++;
}
elements->measureTagsLen = tmp - elements->measure;
bool isSameCTable = false;
bool isSameMeasure = false;
if(IS_SAME_CHILD_TABLE){
isSameCTable = true;
isSameMeasure = true;
}else if(info->dataFormat) {
isSameMeasure = IS_SAME_SUPER_TABLE;
}
// parse tag
if (*sql == COMMA) sql++;
elements->tags = sql;
int ret = smlParseTagKv(info, &sql, sqlEnd, elements, isSameMeasure, isSameCTable);
if(unlikely(ret != TSDB_CODE_SUCCESS)){
return ret;
}
if(unlikely(info->reRun)){
return TSDB_CODE_SUCCESS;
}
sql = elements->measure + elements->measureTagsLen;
elements->tagsLen = sql - elements->tags;
// parse cols
JUMP_SPACE(sql, sqlEnd)
elements->cols = sql;
ret = smlParseColKv(info, &sql, sqlEnd, elements, isSameMeasure, isSameCTable);
if(unlikely(ret != TSDB_CODE_SUCCESS)){
return ret;
}
if(unlikely(info->reRun)){
return TSDB_CODE_SUCCESS;
}
elements->colsLen = sql - elements->cols;
if (unlikely(elements->colsLen == 0)) {
smlBuildInvalidDataMsg(&info->msgBuf, "cols is empty", NULL);
return TSDB_CODE_SML_INVALID_DATA;
}
// parse timestamp
JUMP_SPACE(sql, sqlEnd)
elements->timestamp = sql;
while (sql < sqlEnd) {
if (unlikely(isspace(*sql))) {
break;
}
sql++;
}
elements->timestampLen = sql - elements->timestamp;
int64_t ts = smlParseInfluxTime(info, elements->timestamp, elements->timestampLen);
if (unlikely(ts <= 0)) {
uError("SML:0x%" PRIx64 " smlParseTS error:%" PRId64, info->id, ts);
return TSDB_CODE_INVALID_TIMESTAMP;
}
// add ts to
SSmlKv kv = { .key = TS, .keyLen = TS_LEN, .type = TSDB_DATA_TYPE_TIMESTAMP, .i = ts, .length = (size_t)tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes};
if(info->dataFormat){
smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kv, 0);
smlBuildRow(info->currTableDataCtx);
clearColValArray(info->currTableDataCtx->pValues);
}else{
taosArraySet(elements->colArray, 0, &kv);
}
info->preLine = *elements;
return ret;
}

View File

@ -0,0 +1,342 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "clientSml.h"
int32_t is_same_child_table_telnet(const void *a, const void *b){
SSmlLineInfo *t1 = (SSmlLineInfo *)a;
SSmlLineInfo *t2 = (SSmlLineInfo *)b;
// uError("is_same_child_table_telnet len:%d,%d %s,%s @@@ len:%d,%d %s,%s", t1->measureLen, t2->measureLen,
// t1->measure, t2->measure, t1->tagsLen, t2->tagsLen, t1->tags, t2->tags);
if(t1 == NULL || t2 == NULL || t1->measure == NULL || t2->measure == NULL
|| t1->tags == NULL || t2->tags == NULL)
return 1;
return (((t1->measureLen == t2->measureLen) && memcmp(t1->measure, t2->measure, t1->measureLen) == 0)
&& ((t1->tagsLen == t2->tagsLen) && memcmp(t1->tags, t2->tags, t1->tagsLen) == 0)) ? 0 : 1;
}
int64_t smlParseOpenTsdbTime(SSmlHandle *info, const char *data, int32_t len) {
uint8_t toPrecision = info->currSTableMeta ? info->currSTableMeta->tableInfo.precision : TSDB_TIME_PRECISION_NANO;
if (unlikely(!data)) {
smlBuildInvalidDataMsg(&info->msgBuf, "timestamp can not be null", NULL);
return -1;
}
if (unlikely(len == 1 && data[0] == '0')) {
return taosGetTimestampNs()/smlFactorNS[toPrecision];
}
int8_t fromPrecision = smlGetTsTypeByLen(len);
if (unlikely(fromPrecision == -1)) {
smlBuildInvalidDataMsg(&info->msgBuf,
"timestamp precision can only be seconds(10 digits) or milli seconds(13 digits)", data);
return -1;
}
int64_t ts = smlGetTimeValue(data, len, fromPrecision, toPrecision);
if (unlikely(ts == -1)) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp", data);
return -1;
}
return ts;
}
static void smlParseTelnetElement(char **sql, char *sqlEnd, char **data, int32_t *len) {
while (*sql < sqlEnd) {
if (unlikely((**sql != SPACE && !(*data)))) {
*data = *sql;
} else if (unlikely(**sql == SPACE && *data)) {
*len = *sql - *data;
break;
}
(*sql)++;
}
}
static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SSmlLineInfo *elements, SSmlMsgBuf *msg) {
if(is_same_child_table_telnet(elements, &info->preLine) == 0){
return TSDB_CODE_SUCCESS;
}
bool isSameMeasure = IS_SAME_SUPER_TABLE;
int cnt = 0;
SArray *preLineKV = info->preLineTagKV;
SArray *maxKVs = info->maxTagKVs;
bool isSuperKVInit = true;
SArray *superKV = NULL;
if(info->dataFormat){
if(!isSameMeasure){
SSmlSTableMeta *sMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, elements->measure, elements->measureLen, NULL);
if(unlikely(sMeta == NULL)){
STableMeta * pTableMeta = smlGetMeta(info, elements->measure, elements->measureLen);
if(pTableMeta == NULL){
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
sMeta = smlBuildSTableMeta(info->dataFormat);
sMeta->tableMeta = pTableMeta;
nodeListSet(&info->superTables, elements->measure, elements->measureLen, sMeta, NULL);
}
info->currSTableMeta = sMeta->tableMeta;
superKV = sMeta->tags;
if(unlikely(taosArrayGetSize(superKV) == 0)){
isSuperKVInit = false;
}
taosArraySetSize(maxKVs, 0);
}
}else{
taosArraySetSize(maxKVs, 0);
}
taosArraySetSize(preLineKV, 0);
const char *sql = data;
while (sql < sqlEnd) {
JUMP_SPACE(sql, sqlEnd)
if (unlikely(*sql == '\0')) break;
const char *key = sql;
size_t keyLen = 0;
// parse key
while (sql < sqlEnd) {
if (unlikely(*sql == SPACE)) {
smlBuildInvalidDataMsg(msg, "invalid data", sql);
return TSDB_CODE_SML_INVALID_DATA;
}
if (unlikely(*sql == EQUAL)) {
keyLen = sql - key;
sql++;
break;
}
sql++;
}
if (unlikely(IS_INVALID_COL_LEN(keyLen))) {
smlBuildInvalidDataMsg(msg, "invalid key or key is too long than 64", key);
return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH;
}
// if (smlCheckDuplicateKey(key, keyLen, dumplicateKey)) {
// smlBuildInvalidDataMsg(msg, "dumplicate key", key);
// return TSDB_CODE_TSC_DUP_NAMES;
// }
// parse value
const char *value = sql;
size_t valueLen = 0;
while (sql < sqlEnd) {
// parse value
if (unlikely(*sql == SPACE)) {
break;
}
if (unlikely(*sql == EQUAL)) {
smlBuildInvalidDataMsg(msg, "invalid data", sql);
return TSDB_CODE_SML_INVALID_DATA;
}
sql++;
}
valueLen = sql - value;
if (unlikely(valueLen == 0)) {
smlBuildInvalidDataMsg(msg, "invalid value", value);
return TSDB_CODE_TSC_INVALID_VALUE;
}
if (unlikely(valueLen > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)) {
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
}
SSmlKv kv = {.key = key, .keyLen = keyLen, .type = TSDB_DATA_TYPE_NCHAR, .value = value, .length = valueLen};
if(info->dataFormat){
if(unlikely(cnt + 1 > info->currSTableMeta->tableInfo.numOfTags)){
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
if(isSameMeasure){
if(unlikely(cnt >= taosArrayGetSize(maxKVs))) {
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
SSmlKv *maxKV = (SSmlKv *)taosArrayGet(maxKVs, cnt);
if(unlikely(kv.length > maxKV->length)){
maxKV->length = kv.length;
SSmlSTableMeta *tableMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, elements->measure, elements->measureLen, NULL);
ASSERT(tableMeta != NULL);
SSmlKv *oldKV = (SSmlKv *)taosArrayGet(tableMeta->tags, cnt);
oldKV->length = kv.length;
info->needModifySchema = true;
}
if(unlikely(!IS_SAME_KEY)){
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
}else{
if(isSuperKVInit){
if(unlikely(cnt >= taosArrayGetSize(superKV))) {
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
SSmlKv *maxKV = (SSmlKv *)taosArrayGet(superKV, cnt);
if(unlikely(kv.length > maxKV->length)) {
maxKV->length = kv.length;
}else{
kv.length = maxKV->length;
}
info->needModifySchema = true;
if(unlikely(!IS_SAME_KEY)){
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
}else{
taosArrayPush(superKV, &kv);
}
taosArrayPush(maxKVs, &kv);
}
}else{
taosArrayPush(maxKVs, &kv);
}
taosArrayPush(preLineKV, &kv);
cnt++;
}
SSmlTableInfo *tinfo = (SSmlTableInfo *)nodeListGet(info->childTables, elements, POINTER_BYTES, is_same_child_table_telnet);
if (unlikely(tinfo == NULL)) {
tinfo = smlBuildTableInfo(1, elements->measure, elements->measureLen);
if (!tinfo) {
return TSDB_CODE_OUT_OF_MEMORY;
}
tinfo->tags = taosArrayDup(preLineKV, NULL);
smlSetCTableName(tinfo);
if (info->dataFormat) {
info->currSTableMeta->uid = tinfo->uid;
tinfo->tableDataCtx = smlInitTableDataCtx(info->pQuery, info->currSTableMeta);
if (tinfo->tableDataCtx == NULL) {
smlBuildInvalidDataMsg(&info->msgBuf, "smlInitTableDataCtx error", NULL);
return TSDB_CODE_SML_INVALID_DATA;
}
}
SSmlLineInfo *key = (SSmlLineInfo *)taosMemoryMalloc(sizeof(SSmlLineInfo));
*key = *elements;
tinfo->key = key;
nodeListSet(&info->childTables, key, POINTER_BYTES, tinfo, is_same_child_table_telnet);
}
if (info->dataFormat) info->currTableDataCtx = tinfo->tableDataCtx;
return TSDB_CODE_SUCCESS;
}
// format: <metric> <timestamp> <value> <tagk_1>=<tagv_1>[ <tagk_n>=<tagv_n>]
int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements) {
if (!sql) return TSDB_CODE_SML_INVALID_DATA;
// parse metric
smlParseTelnetElement(&sql, sqlEnd, &elements->measure, &elements->measureLen);
if (unlikely((!(elements->measure) || IS_INVALID_TABLE_LEN(elements->measureLen)))) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", sql);
return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
}
// parse timestamp
smlParseTelnetElement(&sql, sqlEnd, &elements->timestamp, &elements->timestampLen);
if (unlikely(!elements->timestamp || elements->timestampLen == 0)) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp", sql);
return TSDB_CODE_SML_INVALID_DATA;
}
bool needConverTime = false; // get TS before parse tag(get meta), so need conver time
if(info->dataFormat && info->currSTableMeta == NULL){
needConverTime = true;
}
int64_t ts = smlParseOpenTsdbTime(info, elements->timestamp, elements->timestampLen);
if (unlikely(ts < 0)) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp", sql);
return TSDB_CODE_INVALID_TIMESTAMP;
}
SSmlKv kvTs = { .key = TS, .keyLen = TS_LEN, .type = TSDB_DATA_TYPE_TIMESTAMP, .i = ts, .length = (size_t)tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes};
// parse value
smlParseTelnetElement(&sql, sqlEnd, &elements->cols, &elements->colsLen);
if (unlikely(!elements->cols || elements->colsLen == 0)) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid value", sql);
return TSDB_CODE_TSC_INVALID_VALUE;
}
SSmlKv kv = {.key = VALUE, .keyLen = VALUE_LEN, .value = elements->cols, .length = (size_t)elements->colsLen};
if (smlParseValue(&kv, &info->msgBuf) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_VALUE;
}
JUMP_SPACE(sql, sqlEnd)
elements->tags = sql;
elements->tagsLen = sqlEnd - sql;
if (unlikely(!elements->tags || elements->tagsLen == 0)) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid value", sql);
return TSDB_CODE_TSC_INVALID_VALUE;
}
int ret = smlParseTelnetTags(info, sql, sqlEnd, elements, &info->msgBuf);
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
return ret;
}
if(unlikely(info->reRun)){
return TSDB_CODE_SUCCESS;
}
if(info->dataFormat){
if(needConverTime) {
kvTs.i = convertTimePrecision(kvTs.i, TSDB_TIME_PRECISION_NANO, info->currSTableMeta->tableInfo.precision);
}
ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kvTs, 0);
if(ret == TSDB_CODE_SUCCESS){
ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kv, 1);
}
if(ret == TSDB_CODE_SUCCESS){
ret = smlBuildRow(info->currTableDataCtx);
}
clearColValArray(info->currTableDataCtx->pValues);
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL);
return ret;
}
}else{
if(elements->colArray == NULL){
elements->colArray = taosArrayInit(16, sizeof(SSmlKv));
}
taosArrayPush(elements->colArray, &kvTs);
taosArrayPush(elements->colArray, &kv);
}
info->preLine = *elements;
return TSDB_CODE_SUCCESS;
}

View File

@ -152,9 +152,10 @@ int32_t stmtRestoreQueryFields(STscStmt* pStmt) {
return TSDB_CODE_SUCCESS;
}
int32_t stmtUpdateBindInfo(TAOS_STMT* stmt, STableMeta* pTableMeta, void* tags, SName* tbName, const char* sTableName, bool autoCreateTbl) {
int32_t stmtUpdateBindInfo(TAOS_STMT* stmt, STableMeta* pTableMeta, void* tags, SName* tbName, const char* sTableName,
bool autoCreateTbl) {
STscStmt* pStmt = (STscStmt*)stmt;
char tbFName[TSDB_TABLE_FNAME_LEN];
char tbFName[TSDB_TABLE_FNAME_LEN];
tNameExtractFullName(tbName, tbFName);
memcpy(&pStmt->bInfo.sname, tbName, sizeof(*tbName));
@ -171,12 +172,11 @@ int32_t stmtUpdateBindInfo(TAOS_STMT* stmt, STableMeta* pTableMeta, void* tags,
return TSDB_CODE_SUCCESS;
}
int32_t stmtUpdateExecInfo(TAOS_STMT* stmt, SHashObj* pVgHash, SHashObj* pBlockHash, bool autoCreateTbl) {
int32_t stmtUpdateExecInfo(TAOS_STMT* stmt, SHashObj* pVgHash, SHashObj* pBlockHash) {
STscStmt* pStmt = (STscStmt*)stmt;
pStmt->sql.pVgHash = pVgHash;
pStmt->exec.pBlockHash = pBlockHash;
pStmt->exec.autoCreateTbl = autoCreateTbl;
return TSDB_CODE_SUCCESS;
}
@ -186,7 +186,7 @@ int32_t stmtUpdateInfo(TAOS_STMT* stmt, STableMeta* pTableMeta, void* tags, SNam
STscStmt* pStmt = (STscStmt*)stmt;
STMT_ERR_RET(stmtUpdateBindInfo(stmt, pTableMeta, tags, tbName, sTableName, autoCreateTbl));
STMT_ERR_RET(stmtUpdateExecInfo(stmt, pVgHash, pBlockHash, autoCreateTbl));
STMT_ERR_RET(stmtUpdateExecInfo(stmt, pVgHash, pBlockHash));
pStmt->sql.autoCreateTbl = autoCreateTbl;
@ -214,16 +214,16 @@ int32_t stmtCacheBlock(STscStmt* pStmt) {
return TSDB_CODE_SUCCESS;
}
STableDataBlocks** pSrc = taosHashGet(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName));
STableDataCxt** pSrc = taosHashGet(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName));
if (!pSrc) {
return TSDB_CODE_OUT_OF_MEMORY;
}
STableDataBlocks* pDst = NULL;
STableDataCxt* pDst = NULL;
STMT_ERR_RET(qCloneStmtDataBlock(&pDst, *pSrc));
STMT_ERR_RET(qCloneStmtDataBlock(&pDst, *pSrc, true));
SStmtTableCache cache = {
.pDataBlock = pDst,
.pDataCtx = pDst,
.boundTags = pStmt->bInfo.boundTags,
};
@ -241,6 +241,8 @@ int32_t stmtCacheBlock(STscStmt* pStmt) {
}
int32_t stmtParseSql(STscStmt* pStmt) {
pStmt->exec.pCurrBlock = NULL;
SStmtCallback stmtCb = {
.pStmt = pStmt,
.getTbNameFn = stmtGetTbName,
@ -273,7 +275,7 @@ int32_t stmtCleanBindInfo(STscStmt* pStmt) {
pStmt->bInfo.tbName[0] = 0;
pStmt->bInfo.tbFName[0] = 0;
if (!pStmt->bInfo.tagsCached) {
destroyBoundColumnInfo(pStmt->bInfo.boundTags);
qDestroyBoundColInfo(pStmt->bInfo.boundTags);
taosMemoryFreeClear(pStmt->bInfo.boundTags);
}
memset(pStmt->bInfo.stbFName, 0, TSDB_TABLE_FNAME_LEN);
@ -289,29 +291,25 @@ int32_t stmtCleanExecInfo(STscStmt* pStmt, bool keepTable, bool deepClean) {
size_t keyLen = 0;
void* pIter = taosHashIterate(pStmt->exec.pBlockHash, NULL);
while (pIter) {
STableDataBlocks* pBlocks = *(STableDataBlocks**)pIter;
char* key = taosHashGetKey(pIter, &keyLen);
STableMeta* pMeta = qGetTableMetaInDataBlock(pBlocks);
STableDataCxt* pBlocks = *(STableDataCxt**)pIter;
char* key = taosHashGetKey(pIter, &keyLen);
STableMeta* pMeta = qGetTableMetaInDataBlock(pBlocks);
if (keepTable && (strlen(pStmt->bInfo.tbFName) == keyLen) && strncmp(pStmt->bInfo.tbFName, key, keyLen) == 0) {
STMT_ERR_RET(qResetStmtDataBlock(pBlocks, true));
if (keepTable && pBlocks == pStmt->exec.pCurrBlock) {
ASSERT(NULL == pBlocks->pData);
TSWAP(pBlocks->pData, pStmt->exec.pCurrTbData);
STMT_ERR_RET(qResetStmtDataBlock(pBlocks, false));
pIter = taosHashIterate(pStmt->exec.pBlockHash, pIter);
continue;
}
if (STMT_TYPE_MULTI_INSERT == pStmt->sql.type) {
qFreeStmtDataBlock(pBlocks);
} else {
qDestroyStmtDataBlock(pBlocks);
}
qDestroyStmtDataBlock(pBlocks);
taosHashRemove(pStmt->exec.pBlockHash, key, keyLen);
pIter = taosHashIterate(pStmt->exec.pBlockHash, pIter);
}
pStmt->exec.autoCreateTbl = false;
if (keepTable) {
return TSDB_CODE_SUCCESS;
}
@ -319,6 +317,9 @@ int32_t stmtCleanExecInfo(STscStmt* pStmt, bool keepTable, bool deepClean) {
taosHashCleanup(pStmt->exec.pBlockHash);
pStmt->exec.pBlockHash = NULL;
tDestroySSubmitTbData(pStmt->exec.pCurrTbData, TSDB_MSG_FLG_ENCODE);
taosMemoryFreeClear(pStmt->exec.pCurrTbData);
STMT_ERR_RET(stmtCleanBindInfo(pStmt));
return TSDB_CODE_SUCCESS;
@ -337,8 +338,8 @@ int32_t stmtCleanSQLInfo(STscStmt* pStmt) {
while (pIter) {
SStmtTableCache* pCache = (SStmtTableCache*)pIter;
qDestroyStmtDataBlock(pCache->pDataBlock);
destroyBoundColumnInfo(pCache->boundTags);
qDestroyStmtDataBlock(pCache->pDataCtx);
qDestroyBoundColInfo(pCache->boundTags);
taosMemoryFreeClear(pCache->boundTags);
pIter = taosHashIterate(pStmt->sql.pTableCache, pIter);
@ -354,7 +355,8 @@ int32_t stmtCleanSQLInfo(STscStmt* pStmt) {
return TSDB_CODE_SUCCESS;
}
int32_t stmtRebuildDataBlock(STscStmt* pStmt, STableDataBlocks* pDataBlock, STableDataBlocks** newBlock, uint64_t uid) {
int32_t stmtRebuildDataBlock(STscStmt* pStmt, STableDataCxt* pDataBlock, STableDataCxt** newBlock, uint64_t uid,
uint64_t suid) {
SEpSet ep = getEpSet_s(&pStmt->taos->pAppInfo->mgmtEp);
SVgroupInfo vgInfo = {0};
SRequestConnInfo conn = {.pTrans = pStmt->taos->pAppInfo->pTransporter,
@ -366,7 +368,9 @@ int32_t stmtRebuildDataBlock(STscStmt* pStmt, STableDataBlocks* pDataBlock, STab
STMT_ERR_RET(
taosHashPut(pStmt->sql.pVgHash, (const char*)&vgInfo.vgId, sizeof(vgInfo.vgId), (char*)&vgInfo, sizeof(vgInfo)));
STMT_ERR_RET(qRebuildStmtDataBlock(newBlock, pDataBlock, uid, vgInfo.vgId));
STMT_ERR_RET(qRebuildStmtDataBlock(newBlock, pDataBlock, uid, suid, vgInfo.vgId, pStmt->sql.autoCreateTbl));
STMT_DLOG("tableDataCxt rebuilt, uid:%" PRId64 ", vgId:%d", uid, vgInfo.vgId);
return TSDB_CODE_SUCCESS;
}
@ -375,12 +379,13 @@ int32_t stmtGetFromCache(STscStmt* pStmt) {
pStmt->bInfo.needParse = true;
pStmt->bInfo.inExecCache = false;
STableDataBlocks* pBlockInExec =
taosHashGet(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName));
if (pBlockInExec) {
STableDataCxt** pCxtInExec = taosHashGet(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName));
if (pCxtInExec) {
pStmt->bInfo.needParse = false;
pStmt->bInfo.inExecCache = true;
pStmt->exec.pCurrBlock = *pCxtInExec;
if (pStmt->sql.autoCreateTbl) {
tscDebug("reuse stmt block for tb %s in execBlock", pStmt->bInfo.tbFName);
return TSDB_CODE_SUCCESS;
@ -407,18 +412,18 @@ int32_t stmtGetFromCache(STscStmt* pStmt) {
SStmtTableCache* pCache = taosHashGet(pStmt->sql.pTableCache, &pStmt->bInfo.tbSuid, sizeof(pStmt->bInfo.tbSuid));
if (pCache) {
pStmt->bInfo.needParse = false;
pStmt->exec.autoCreateTbl = true;
pStmt->bInfo.tbUid = 0;
STableDataBlocks* pNewBlock = NULL;
STMT_ERR_RET(stmtRebuildDataBlock(pStmt, pCache->pDataBlock, &pNewBlock, 0));
STableDataCxt* pNewBlock = NULL;
STMT_ERR_RET(stmtRebuildDataBlock(pStmt, pCache->pDataCtx, &pNewBlock, 0, pStmt->bInfo.tbSuid));
if (taosHashPut(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName), &pNewBlock,
POINTER_BYTES)) {
STMT_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
pStmt->exec.pCurrBlock = pNewBlock;
tscDebug("reuse stmt block for tb %s in sqlBlock, suid:0x%" PRIx64, pStmt->bInfo.tbFName, pStmt->bInfo.tbSuid);
return TSDB_CODE_SUCCESS;
@ -489,14 +494,16 @@ int32_t stmtGetFromCache(STscStmt* pStmt) {
pStmt->bInfo.boundTags = pCache->boundTags;
pStmt->bInfo.tagsCached = true;
STableDataBlocks* pNewBlock = NULL;
STMT_ERR_RET(stmtRebuildDataBlock(pStmt, pCache->pDataBlock, &pNewBlock, uid));
STableDataCxt* pNewBlock = NULL;
STMT_ERR_RET(stmtRebuildDataBlock(pStmt, pCache->pDataCtx, &pNewBlock, uid, suid));
if (taosHashPut(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName), &pNewBlock,
POINTER_BYTES)) {
STMT_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
pStmt->exec.pCurrBlock = pNewBlock;
tscDebug("tb %s in sqlBlock list, set to current", pStmt->bInfo.tbFName);
return TSDB_CODE_SUCCESS;
@ -614,8 +621,8 @@ int stmtSetTbTags(TAOS_STMT* stmt, TAOS_MULTI_BIND* tags) {
return TSDB_CODE_SUCCESS;
}
STableDataBlocks** pDataBlock =
(STableDataBlocks**)taosHashGet(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName));
STableDataCxt** pDataBlock =
(STableDataCxt**)taosHashGet(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName));
if (NULL == pDataBlock) {
tscError("table %s not found in exec blockHash", pStmt->bInfo.tbFName);
STMT_ERR_RET(TSDB_CODE_APP_ERROR);
@ -626,8 +633,6 @@ int stmtSetTbTags(TAOS_STMT* stmt, TAOS_MULTI_BIND* tags) {
pStmt->bInfo.sname.tname, tags, pStmt->exec.pRequest->msgBuf,
pStmt->exec.pRequest->msgBufLen));
pStmt->exec.autoCreateTbl = true;
return TSDB_CODE_SUCCESS;
}
@ -637,8 +642,8 @@ int stmtFetchTagFields(STscStmt* pStmt, int32_t* fieldNum, TAOS_FIELD_E** fields
STMT_ERR_RET(TSDB_CODE_TSC_STMT_API_ERROR);
}
STableDataBlocks** pDataBlock =
(STableDataBlocks**)taosHashGet(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName));
STableDataCxt** pDataBlock =
(STableDataCxt**)taosHashGet(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName));
if (NULL == pDataBlock) {
tscError("table %s not found in exec blockHash", pStmt->bInfo.tbFName);
STMT_ERR_RET(TSDB_CODE_APP_ERROR);
@ -655,8 +660,8 @@ int stmtFetchColFields(STscStmt* pStmt, int32_t* fieldNum, TAOS_FIELD_E** fields
STMT_ERR_RET(TSDB_CODE_TSC_STMT_API_ERROR);
}
STableDataBlocks** pDataBlock =
(STableDataBlocks**)taosHashGet(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName));
STableDataCxt** pDataBlock =
(STableDataCxt**)taosHashGet(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName));
if (NULL == pDataBlock) {
tscError("table %s not found in exec blockHash", pStmt->bInfo.tbFName);
STMT_ERR_RET(TSDB_CODE_APP_ERROR);
@ -729,11 +734,18 @@ int stmtBindBatch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int32_t colIdx) {
return TSDB_CODE_SUCCESS;
}
STableDataBlocks** pDataBlock =
(STableDataBlocks**)taosHashGet(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName));
if (NULL == pDataBlock) {
tscError("table %s not found in exec blockHash", pStmt->bInfo.tbFName);
STMT_ERR_RET(TSDB_CODE_APP_ERROR);
STableDataCxt** pDataBlock = NULL;
if (pStmt->exec.pCurrBlock) {
pDataBlock = &pStmt->exec.pCurrBlock;
} else {
pDataBlock =
(STableDataCxt**)taosHashGet(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName));
if (NULL == pDataBlock) {
tscError("table %s not found in exec blockHash", pStmt->bInfo.tbFName);
STMT_ERR_RET(TSDB_CODE_TSC_STMT_CACHE_ERROR);
}
pStmt->exec.pCurrBlock = *pDataBlock;
}
if (colIdx < 0) {
@ -779,10 +791,10 @@ int stmtUpdateTableUid(STscStmt* pStmt, SSubmitRsp* pRsp) {
int32_t code = 0;
int32_t finalCode = 0;
size_t keyLen = 0;
STableDataBlocks** pIter = taosHashIterate(pStmt->exec.pBlockHash, NULL);
void* pIter = taosHashIterate(pStmt->exec.pBlockHash, NULL);
while (pIter) {
STableDataBlocks* pBlock = *pIter;
char* key = taosHashGetKey(pIter, &keyLen);
STableDataCxt* pBlock = *(STableDataCxt**)pIter;
char* key = taosHashGetKey(pIter, &keyLen);
STableMeta* pMeta = qGetTableMetaInDataBlock(pBlock);
if (pMeta->uid) {
@ -848,7 +860,7 @@ int stmtUpdateTableUid(STscStmt* pStmt, SSubmitRsp* pRsp) {
pMeta->uid = pTableMeta->uid;
pStmt->bInfo.tbUid = pTableMeta->uid;
taosMemoryFree(pTableMeta);
taosMemoryFree(pTableMeta);
}
pIter = taosHashIterate(pStmt->exec.pBlockHash, pIter);
@ -861,7 +873,6 @@ int stmtExec(TAOS_STMT* stmt) {
STscStmt* pStmt = (STscStmt*)stmt;
int32_t code = 0;
SSubmitRsp* pRsp = NULL;
bool autoCreateTbl = pStmt->exec.autoCreateTbl;
STMT_DLOG_E("start to exec");
@ -870,8 +881,13 @@ int stmtExec(TAOS_STMT* stmt) {
if (STMT_TYPE_QUERY == pStmt->sql.type) {
launchQueryImpl(pStmt->exec.pRequest, pStmt->sql.pQuery, true, NULL);
} else {
tDestroySSubmitTbData(pStmt->exec.pCurrTbData, TSDB_MSG_FLG_ENCODE);
taosMemoryFreeClear(pStmt->exec.pCurrTbData);
STMT_ERR_RET(qCloneCurrentTbData(pStmt->exec.pCurrBlock, &pStmt->exec.pCurrTbData));
STMT_ERR_RET(qBuildStmtOutput(pStmt->sql.pQuery, pStmt->sql.pVgHash, pStmt->exec.pBlockHash));
launchQueryImpl(pStmt->exec.pRequest, pStmt->sql.pQuery, true, (autoCreateTbl ? (void**)&pRsp : NULL));
launchQueryImpl(pStmt->exec.pRequest, pStmt->sql.pQuery, true, NULL);
}
if (pStmt->exec.pRequest->code && NEED_CLIENT_HANDLE_ERROR(pStmt->exec.pRequest->code)) {
@ -894,15 +910,6 @@ _return:
stmtCleanExecInfo(pStmt, (code ? false : true), false);
if (TSDB_CODE_SUCCESS == code && autoCreateTbl) {
if (NULL == pRsp) {
tscError("no submit resp got for auto create table");
code = TSDB_CODE_APP_ERROR;
} else {
code = stmtUpdateTableUid(pStmt, pRsp);
}
}
tFreeSSubmitRsp(pRsp);
++pStmt->sql.runTimes;

View File

@ -530,7 +530,6 @@ static int32_t tmqSendCommitReq(tmq_t* tmq, SMqClientVg* pVg, SMqClientTopic* pT
int32_t tmqCommitMsgImpl(tmq_t* tmq, const TAOS_RES* msg, int8_t async, tmq_commit_cb* userCb, void* userParam) {
char* topic;
int32_t vgId;
ASSERT(msg != NULL);
if (TD_RES_TMQ(msg)) {
SMqRspObj* pRspObj = (SMqRspObj*)msg;
topic = pRspObj->topic;
@ -809,8 +808,6 @@ int32_t tmqHandleAllDelayedTask(tmq_t* tmq) {
taosTmrReset(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, pRefId, tmqMgmt.timer, &tmq->commitTimer);
} else if (*pTaskType == TMQ_DELAYED_TASK__REPORT) {
} else {
ASSERT(0);
}
taosFreeQitem(pTaskType);
}
@ -955,10 +952,6 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
const char* user = conf->user == NULL ? TSDB_DEFAULT_USER : conf->user;
const char* pass = conf->pass == NULL ? TSDB_DEFAULT_PASS : conf->pass;
ASSERT(user);
ASSERT(pass);
ASSERT(conf->groupId[0]);
pTmq->clientTopics = taosArrayInit(0, sizeof(SMqClientTopic));
pTmq->mqueue = taosOpenQueue();
pTmq->qall = taosAllocateQall();
@ -1249,8 +1242,6 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
tDecodeSTaosxRsp(&decoder, &pRspWrapper->taosxRsp);
tDecoderClear(&decoder);
memcpy(&pRspWrapper->taosxRsp, pMsg->pData, sizeof(SMqRspHead));
} else {
ASSERT(0);
}
taosMemoryFree(pMsg->pData);

File diff suppressed because it is too large Load Diff

View File

@ -19,7 +19,7 @@
#include "tlog.h"
#include "tname.h"
#define MALLOC_ALIGN_BYTES 256
#define MALLOC_ALIGN_BYTES 32
int32_t colDataGetLength(const SColumnInfoData* pColumnInfoData, int32_t numOfRows) {
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
@ -37,7 +37,8 @@ int32_t colDataGetFullLength(const SColumnInfoData* pColumnInfoData, int32_t num
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
return pColumnInfoData->varmeta.length + sizeof(int32_t) * numOfRows;
} else {
return ((pColumnInfoData->info.type == TSDB_DATA_TYPE_NULL) ? 0 : pColumnInfoData->info.bytes * numOfRows) + BitmapLen(numOfRows);
return ((pColumnInfoData->info.type == TSDB_DATA_TYPE_NULL) ? 0 : pColumnInfoData->info.bytes * numOfRows) +
BitmapLen(numOfRows);
}
}
@ -277,7 +278,7 @@ int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int
pColumnInfoData->varmeta.allocLen = len + oldLen;
}
if (pColumnInfoData->pData && pSource->pData) { // TD-20382
if (pColumnInfoData->pData && pSource->pData) { // TD-20382
memcpy(pColumnInfoData->pData + oldLen, pSource->pData, len);
}
pColumnInfoData->varmeta.length = len + oldLen;
@ -357,7 +358,7 @@ size_t blockDataGetNumOfRows(const SSDataBlock* pBlock) { return pBlock->info.ro
int32_t blockDataUpdateTsWindow(SSDataBlock* pDataBlock, int32_t tsColumnIndex) {
if (pDataBlock->info.rows > 0) {
// ASSERT(pDataBlock->info.dataLoad == 1);
// ASSERT(pDataBlock->info.dataLoad == 1);
}
if (pDataBlock == NULL || pDataBlock->info.rows <= 0 || pDataBlock->info.dataLoad == 0) {
@ -1162,7 +1163,8 @@ void blockDataEmpty(SSDataBlock* pDataBlock) {
}
// todo temporarily disable it
static int32_t doEnsureCapacity(SColumnInfoData* pColumn, const SDataBlockInfo* pBlockInfo, uint32_t numOfRows, bool clearPayload) {
static int32_t doEnsureCapacity(SColumnInfoData* pColumn, const SDataBlockInfo* pBlockInfo, uint32_t numOfRows,
bool clearPayload) {
if (numOfRows <= 0 || numOfRows <= pBlockInfo->capacity) {
return TSDB_CODE_SUCCESS;
}
@ -1223,7 +1225,7 @@ static int32_t doEnsureCapacity(SColumnInfoData* pColumn, const SDataBlockInfo*
return TSDB_CODE_SUCCESS;
}
void colInfoDataCleanup(SColumnInfoData* pColumn, uint32_t numOfRows) {
void colInfoDataCleanup(SColumnInfoData* pColumn, uint32_t numOfRows) {
pColumn->hasNull = false;
if (IS_VAR_DATA_TYPE(pColumn->info.type)) {
@ -1867,7 +1869,7 @@ void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag) {
char pBuf[128] = {0};
int32_t sz = taosArrayGetSize(dataBlocks);
for (int32_t i = 0; i < sz; i++) {
SSDataBlock* pDataBlock = taosArrayGetP(dataBlocks, i);
SSDataBlock* pDataBlock = taosArrayGet(dataBlocks, i);
size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock);
int32_t rows = pDataBlock->info.rows;
@ -1879,21 +1881,37 @@ void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag) {
for (int32_t k = 0; k < numOfCols; k++) {
SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k);
void* var = POINTER_SHIFT(pColInfoData->pData, j * pColInfoData->info.bytes);
if (k == 0) {
printf("cols:%d |", (int32_t)numOfCols);
}
if (colDataIsNull(pColInfoData, rows, j, NULL)) {
printf(" %15s |", "NULL");
continue;
}
switch (pColInfoData->info.type) {
case TSDB_DATA_TYPE_TIMESTAMP:
formatTimestamp(pBuf, *(uint64_t*)var, TSDB_TIME_PRECISION_MILLI);
printf(" %25s |", pBuf);
break;
case TSDB_DATA_TYPE_BOOL:
printf(" %15d |", *(int32_t*)var);
printf(" %15" PRIi8 " |", *(int8_t*)var);
break;
case TSDB_DATA_TYPE_TINYINT:
printf(" %15" PRIi8 " |", *(int8_t*)var);
break;
case TSDB_DATA_TYPE_SMALLINT:
printf(" %15" PRIi16 " |", *(int16_t*)var);
break;
case TSDB_DATA_TYPE_INT:
printf(" %15d |", *(int32_t*)var);
break;
case TSDB_DATA_TYPE_UTINYINT:
printf(" %15" PRIu8 " |", *(uint8_t*)var);
break;
case TSDB_DATA_TYPE_USMALLINT:
printf(" %15" PRIu16 " |", *(uint16_t*)var);
break;
case TSDB_DATA_TYPE_UINT:
printf(" %15u |", *(uint32_t*)var);
break;
@ -1945,7 +1963,8 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf)
"===stream===%s|block type %d|child id %d|group id:%" PRIu64 "|uid:%" PRId64
"|rows:%d|version:%" PRIu64 "|cal start:%" PRIu64 "|cal end:%" PRIu64 "\n",
flag, (int32_t)pDataBlock->info.type, pDataBlock->info.childId, pDataBlock->info.id.groupId,
pDataBlock->info.id.uid, pDataBlock->info.rows, pDataBlock->info.version, pDataBlock->info.calWin.skey, pDataBlock->info.calWin.ekey);
pDataBlock->info.id.uid, pDataBlock->info.rows, pDataBlock->info.version,
pDataBlock->info.calWin.skey, pDataBlock->info.calWin.ekey);
if (len >= size - 1) return dumpBuf;
for (int32_t j = 0; j < rows; j++) {
@ -2047,6 +2066,7 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf)
* @param suid
*
*/
#if 0
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataBlock, STSchema* pTSchema, int32_t vgId,
tb_uid_t suid) {
int32_t bufSize = sizeof(SSubmitReq);
@ -2210,35 +2230,189 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataB
return TSDB_CODE_SUCCESS;
}
#endif
int32_t buildSubmitReqFromDataBlock(SSubmitReq2** ppReq, const SSDataBlock* pDataBlock, const STSchema* pTSchema,
int64_t uid, int32_t vgId, tb_uid_t suid) {
SSubmitReq2* pReq = *ppReq;
SArray* pVals = NULL;
int32_t numOfBlks = 0;
int32_t sz = 1;
terrno = TSDB_CODE_SUCCESS;
if (NULL == pReq) {
if (!(pReq = taosMemoryMalloc(sizeof(SSubmitReq2)))) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _end;
}
if (!(pReq->aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData)))) {
goto _end;
}
}
for (int32_t i = 0; i < sz; ++i) {
int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock);
int32_t rows = pDataBlock->info.rows;
if (colNum <= 1) { // invalid if only with TS col
continue;
}
// the rsma result should has the same column number with schema.
ASSERT(colNum == pTSchema->numOfCols);
SSubmitTbData tbData = {0};
if (!(tbData.aRowP = taosArrayInit(rows, sizeof(SRow*)))) {
goto _end;
}
tbData.suid = suid;
tbData.uid = uid;
tbData.sver = pTSchema->version;
if (!pVals && !(pVals = taosArrayInit(colNum, sizeof(SColVal)))) {
taosArrayDestroy(tbData.aRowP);
goto _end;
}
for (int32_t j = 0; j < rows; ++j) { // iterate by row
taosArrayClear(pVals);
bool isStartKey = false;
int32_t offset = 0;
for (int32_t k = 0; k < colNum; ++k) { // iterate by column
SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k);
const STColumn* pCol = &pTSchema->columns[k];
void* var = POINTER_SHIFT(pColInfoData->pData, j * pColInfoData->info.bytes);
switch (pColInfoData->info.type) {
case TSDB_DATA_TYPE_TIMESTAMP:
ASSERT(pColInfoData->info.type == pCol->type);
if (!isStartKey) {
isStartKey = true;
ASSERT(PRIMARYKEY_TIMESTAMP_COL_ID == pCol->colId);
SColVal cv = COL_VAL_VALUE(pCol->colId, pCol->type, (SValue){.val = *(TSKEY*)var});
taosArrayPush(pVals, &cv);
} else if (colDataIsNull_s(pColInfoData, j)) {
SColVal cv = COL_VAL_NULL(pCol->colId, pCol->type);
taosArrayPush(pVals, &cv);
} else {
SColVal cv = COL_VAL_VALUE(pCol->colId, pCol->type, (SValue){.val = *(int64_t*)var});
taosArrayPush(pVals, &cv);
}
break;
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_VARCHAR: { // TSDB_DATA_TYPE_BINARY
ASSERT(pColInfoData->info.type == pCol->type);
if (colDataIsNull_s(pColInfoData, j)) {
SColVal cv = COL_VAL_NULL(pCol->colId, pCol->type);
taosArrayPush(pVals, &cv);
} else {
void* data = colDataGetVarData(pColInfoData, j);
SValue sv = (SValue){.nData = varDataLen(data), .pData = varDataVal(data)}; // address copy, no value
SColVal cv = COL_VAL_VALUE(pCol->colId, pCol->type, sv);
taosArrayPush(pVals, &cv);
}
break;
}
case TSDB_DATA_TYPE_VARBINARY:
case TSDB_DATA_TYPE_DECIMAL:
case TSDB_DATA_TYPE_BLOB:
case TSDB_DATA_TYPE_JSON:
case TSDB_DATA_TYPE_MEDIUMBLOB:
uError("the column type %" PRIi16 " is defined but not implemented yet", pColInfoData->info.type);
ASSERT(0);
break;
default:
if (pColInfoData->info.type < TSDB_DATA_TYPE_MAX && pColInfoData->info.type > TSDB_DATA_TYPE_NULL) {
if (colDataIsNull_s(pColInfoData, j)) {
SColVal cv = COL_VAL_NULL(pCol->colId, pCol->type); // should use pCol->type
taosArrayPush(pVals, &cv);
} else {
SValue sv;
if (pCol->type == pColInfoData->info.type) {
memcpy(&sv.val, var, tDataTypes[pCol->type].bytes);
} else {
/**
* 1. sum/avg would convert to int64_t/uint64_t/double during aggregation
* 2. below conversion may lead to overflow or loss, the app should select the right data type.
*/
char tv[8] = {0};
if (pColInfoData->info.type == TSDB_DATA_TYPE_FLOAT) {
float v = 0;
GET_TYPED_DATA(v, float, pColInfoData->info.type, var);
SET_TYPED_DATA(&tv, pCol->type, v);
} else if (pColInfoData->info.type == TSDB_DATA_TYPE_DOUBLE) {
double v = 0;
GET_TYPED_DATA(v, double, pColInfoData->info.type, var);
SET_TYPED_DATA(&tv, pCol->type, v);
} else if (IS_SIGNED_NUMERIC_TYPE(pColInfoData->info.type)) {
int64_t v = 0;
GET_TYPED_DATA(v, int64_t, pColInfoData->info.type, var);
SET_TYPED_DATA(&tv, pCol->type, v);
} else {
uint64_t v = 0;
GET_TYPED_DATA(v, uint64_t, pColInfoData->info.type, var);
SET_TYPED_DATA(&tv, pCol->type, v);
}
memcpy(&sv.val, tv, tDataTypes[pCol->type].bytes);
}
SColVal cv = COL_VAL_VALUE(pCol->colId, pColInfoData->info.type, sv);
taosArrayPush(pVals, &cv);
}
} else {
uError("the column type %" PRIi16 " is undefined\n", pColInfoData->info.type);
ASSERT(0);
}
break;
}
}
SRow* pRow = NULL;
if ((terrno = tRowBuild(pVals, pTSchema, &pRow)) < 0) {
tDestroySSubmitTbData(&tbData, TSDB_MSG_FLG_ENCODE);
goto _end;
}
ASSERT(pRow);
taosArrayPush(tbData.aRowP, &pRow);
}
taosArrayPush(pReq->aSubmitTbData, &tbData);
}
_end:
taosArrayDestroy(pVals);
if (terrno != 0) {
*ppReq = NULL;
if (pReq) tDestroySSubmitReq2(pReq, TSDB_MSG_FLG_ENCODE);
return TSDB_CODE_FAILED;
}
*ppReq = pReq;
return TSDB_CODE_SUCCESS;
}
char* buildCtbNameByGroupId(const char* stbFullName, uint64_t groupId) {
if (stbFullName[0] == 0) {
return NULL;
}
SArray* tags = taosArrayInit(0, sizeof(void*));
SArray* tags = taosArrayInit(0, sizeof(SSmlKv));
if (tags == NULL) {
return NULL;
}
SSmlKv* pTag = taosMemoryCalloc(1, sizeof(SSmlKv));
if (pTag == NULL) {
taosArrayDestroy(tags);
return NULL;
}
void* cname = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN + 1);
if (cname == NULL) {
taosArrayDestroy(tags);
taosMemoryFree(pTag);
return NULL;
}
pTag->key = "group_id";
pTag->keyLen = strlen(pTag->key);
pTag->type = TSDB_DATA_TYPE_UBIGINT;
pTag->u = groupId;
pTag->length = sizeof(uint64_t);
SSmlKv pTag = {.key = "group_id",
.keyLen = sizeof("group_id") - 1,
.type = TSDB_DATA_TYPE_UBIGINT,
.u = groupId,
.length = sizeof(uint64_t)};
taosArrayPush(tags, &pTag);
RandTableName rname = {
@ -2250,7 +2424,6 @@ char* buildCtbNameByGroupId(const char* stbFullName, uint64_t groupId) {
buildChildTableName(&rname);
taosMemoryFree(pTag);
taosArrayDestroy(tags);
if ((rname.ctbShortName && rname.ctbShortName[0]) == 0) {

File diff suppressed because it is too large Load Diff

View File

@ -5424,6 +5424,7 @@ int32_t tSerializeSCMCreateStreamReq(void *buf, int32_t bufLen, const SCMCreateS
if (tEncodeI32(&encoder, pField->bytes) < 0) return -1;
if (tEncodeCStr(&encoder, pField->name) < 0) return -1;
}
if (tEncodeI8(&encoder, pReq->createStb) < 0) return -1;
tEndEncode(&encoder);
@ -5484,6 +5485,7 @@ int32_t tDeserializeSCMCreateStreamReq(void *buf, int32_t bufLen, SCMCreateStrea
}
}
}
if (tDecodeI8(&decoder, &pReq->createStb) < 0) return -1;
tEndDecode(&decoder);
@ -5627,30 +5629,6 @@ int tDecodeSVCreateStbReq(SDecoder *pCoder, SVCreateStbReq *pReq) {
return 0;
}
STSchema *tdGetSTSChemaFromSSChema(SSchema *pSchema, int32_t nCols, int32_t sver) {
STSchemaBuilder schemaBuilder = {0};
if (tdInitTSchemaBuilder(&schemaBuilder, sver) < 0) {
return NULL;
}
for (int i = 0; i < nCols; i++) {
SSchema *schema = pSchema + i;
if (tdAddColToSchema(&schemaBuilder, schema->type, schema->flags, schema->colId, schema->bytes) < 0) {
tdDestroyTSchemaBuilder(&schemaBuilder);
return NULL;
}
}
STSchema *pNSchema = tdGetSchemaFromBuilder(&schemaBuilder);
if (pNSchema == NULL) {
tdDestroyTSchemaBuilder(&schemaBuilder);
return NULL;
}
tdDestroyTSchemaBuilder(&schemaBuilder);
return pNSchema;
}
int tEncodeSVCreateTbReq(SEncoder *pCoder, const SVCreateTbReq *pReq) {
if (tStartEncode(pCoder) < 0) return -1;
@ -5728,6 +5706,25 @@ int tDecodeSVCreateTbReq(SDecoder *pCoder, SVCreateTbReq *pReq) {
return 0;
}
void tDestroySVCreateTbReq(SVCreateTbReq *pReq, int32_t flags) {
if (pReq == NULL) return;
if (flags & TSDB_MSG_FLG_ENCODE) {
// TODO
} else if (flags & TSDB_MSG_FLG_DECODE) {
if (pReq->comment) {
pReq->comment = NULL;
taosMemoryFree(pReq->comment);
}
if (pReq->type == TSDB_CHILD_TABLE) {
if (pReq->ctb.tagName) taosArrayDestroy(pReq->ctb.tagName);
} else if (pReq->type == TSDB_NORMAL_TABLE) {
if (pReq->ntb.schemaRow.pSchema) taosMemoryFree(pReq->ntb.schemaRow.pSchema);
}
}
}
int tEncodeSVCreateTbBatchReq(SEncoder *pCoder, const SVCreateTbBatchReq *pReq) {
int32_t nReq = taosArrayGetSize(pReq->pArray);
@ -6719,3 +6716,328 @@ int32_t tDecodeSBatchDeleteReq(SDecoder *pDecoder, SBatchDeleteReq *pReq) {
}
return 0;
}
static int32_t tEncodeSSubmitTbData(SEncoder *pCoder, const SSubmitTbData *pSubmitTbData) {
if (tStartEncode(pCoder) < 0) return -1;
if (tEncodeI32v(pCoder, pSubmitTbData->flags) < 0) return -1;
// auto create table
if (pSubmitTbData->flags & SUBMIT_REQ_AUTO_CREATE_TABLE) {
ASSERT(pSubmitTbData->pCreateTbReq);
if (tEncodeSVCreateTbReq(pCoder, pSubmitTbData->pCreateTbReq) < 0) return -1;
}
// submit data
if (tEncodeI64(pCoder, pSubmitTbData->suid) < 0) return -1;
if (tEncodeI64(pCoder, pSubmitTbData->uid) < 0) return -1;
if (tEncodeI32v(pCoder, pSubmitTbData->sver) < 0) return -1;
if (pSubmitTbData->flags & SUBMIT_REQ_COLUMN_DATA_FORMAT) {
uint64_t nColData = TARRAY_SIZE(pSubmitTbData->aCol);
SColData *aColData = (SColData *)TARRAY_DATA(pSubmitTbData->aCol);
if (tEncodeU64v(pCoder, nColData) < 0) return -1;
for (uint64_t i = 0; i < nColData; i++) {
pCoder->pos += tPutColData(pCoder->data ? pCoder->data + pCoder->pos : NULL, &aColData[i]);
}
} else {
if (tEncodeU64v(pCoder, TARRAY_SIZE(pSubmitTbData->aRowP)) < 0) return -1;
SRow **rows = (SRow **)TARRAY_DATA(pSubmitTbData->aRowP);
for (int32_t iRow = 0; iRow < TARRAY_SIZE(pSubmitTbData->aRowP); ++iRow) {
if (pCoder->data) memcpy(pCoder->data + pCoder->pos, rows[iRow], rows[iRow]->len);
pCoder->pos += rows[iRow]->len;
}
}
tEndEncode(pCoder);
return 0;
}
static int32_t tDecodeSSubmitTbData(SDecoder *pCoder, SSubmitTbData *pSubmitTbData) {
int32_t code = 0;
if (tStartDecode(pCoder) < 0) {
code = TSDB_CODE_INVALID_MSG;
goto _exit;
}
if (tDecodeI32v(pCoder, &pSubmitTbData->flags) < 0) return -1;
if (pSubmitTbData->flags & SUBMIT_REQ_AUTO_CREATE_TABLE) {
pSubmitTbData->pCreateTbReq = taosMemoryCalloc(1, sizeof(SVCreateTbReq));
if (pSubmitTbData->pCreateTbReq == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
if (tDecodeSVCreateTbReq(pCoder, pSubmitTbData->pCreateTbReq) < 0) {
code = TSDB_CODE_INVALID_MSG;
goto _exit;
}
}
// submit data
if (tDecodeI64(pCoder, &pSubmitTbData->suid) < 0) {
code = TSDB_CODE_INVALID_MSG;
goto _exit;
}
if (tDecodeI64(pCoder, &pSubmitTbData->uid) < 0) {
code = TSDB_CODE_INVALID_MSG;
goto _exit;
}
if (tDecodeI32v(pCoder, &pSubmitTbData->sver) < 0) {
code = TSDB_CODE_INVALID_MSG;
goto _exit;
}
if (pSubmitTbData->flags & SUBMIT_REQ_COLUMN_DATA_FORMAT) {
uint64_t nColData;
if (tDecodeU64v(pCoder, &nColData) < 0) {
code = TSDB_CODE_INVALID_MSG;
goto _exit;
}
pSubmitTbData->aCol = taosArrayInit(nColData, sizeof(SColData));
if (pSubmitTbData->aCol == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
for (int32_t i = 0; i < nColData; ++i) {
pCoder->pos += tGetColData(pCoder->data + pCoder->pos, taosArrayReserve(pSubmitTbData->aCol, 1));
}
} else {
uint64_t nRow;
if (tDecodeU64v(pCoder, &nRow) < 0) {
code = TSDB_CODE_INVALID_MSG;
goto _exit;
}
pSubmitTbData->aRowP = taosArrayInit(nRow, sizeof(SRow *));
if (pSubmitTbData->aRowP == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
for (int32_t iRow = 0; iRow < nRow; ++iRow) {
SRow **ppRow = taosArrayReserve(pSubmitTbData->aRowP, 1);
*ppRow = (SRow *)(pCoder->data + pCoder->pos);
pCoder->pos += (*ppRow)->len;
}
}
tEndDecode(pCoder);
_exit:
if (code) {
// TODO: clear
}
return 0;
}
int32_t tEncodeSSubmitReq2(SEncoder *pCoder, const SSubmitReq2 *pReq) {
if (tStartEncode(pCoder) < 0) return -1;
if (tEncodeU64v(pCoder, taosArrayGetSize(pReq->aSubmitTbData)) < 0) return -1;
for (uint64_t i = 0; i < taosArrayGetSize(pReq->aSubmitTbData); i++) {
if (tEncodeSSubmitTbData(pCoder, taosArrayGet(pReq->aSubmitTbData, i)) < 0) return -1;
}
tEndEncode(pCoder);
return 0;
}
int32_t tDecodeSSubmitReq2(SDecoder *pCoder, SSubmitReq2 *pReq) {
int32_t code = 0;
memset(pReq, 0, sizeof(*pReq));
// decode
if (tStartDecode(pCoder) < 0) {
code = TSDB_CODE_INVALID_MSG;
goto _exit;
}
uint64_t nSubmitTbData;
if (tDecodeU64v(pCoder, &nSubmitTbData) < 0) {
code = TSDB_CODE_INVALID_MSG;
goto _exit;
}
pReq->aSubmitTbData = taosArrayInit(nSubmitTbData, sizeof(SSubmitTbData));
if (pReq->aSubmitTbData == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
for (uint64_t i = 0; i < nSubmitTbData; i++) {
if (tDecodeSSubmitTbData(pCoder, taosArrayReserve(pReq->aSubmitTbData, 1)) < 0) {
code = TSDB_CODE_INVALID_MSG;
goto _exit;
}
}
tEndDecode(pCoder);
_exit:
if (code) {
if (pReq->aSubmitTbData) {
// todo
taosArrayDestroy(pReq->aSubmitTbData);
pReq->aSubmitTbData = NULL;
}
}
return code;
}
void tDestroySSubmitTbData(SSubmitTbData *pTbData, int32_t flag) {
if (NULL == pTbData) {
return;
}
if (flag == TSDB_MSG_FLG_ENCODE) {
if (pTbData->pCreateTbReq) {
tdDestroySVCreateTbReq(pTbData->pCreateTbReq);
taosMemoryFree(pTbData->pCreateTbReq);
}
if (pTbData->flags & SUBMIT_REQ_COLUMN_DATA_FORMAT) {
int32_t nColData = TARRAY_SIZE(pTbData->aCol);
SColData *aColData = (SColData *)TARRAY_DATA(pTbData->aCol);
for (int32_t i = 0; i < nColData; ++i) {
tColDataDestroy(&aColData[i]);
}
taosArrayDestroy(pTbData->aCol);
} else {
int32_t nRow = TARRAY_SIZE(pTbData->aRowP);
SRow **rows = (SRow **)TARRAY_DATA(pTbData->aRowP);
for (int32_t i = 0; i < nRow; ++i) {
tRowDestroy(rows[i]);
}
taosArrayDestroy(pTbData->aRowP);
}
} else if (flag == TSDB_MSG_FLG_DECODE) {
if (pTbData->pCreateTbReq) {
tDestroySVCreateTbReq(pTbData->pCreateTbReq, TSDB_MSG_FLG_DECODE);
taosMemoryFree(pTbData->pCreateTbReq);
}
if (pTbData->flags & SUBMIT_REQ_COLUMN_DATA_FORMAT) {
taosArrayDestroy(pTbData->aCol);
} else {
taosArrayDestroy(pTbData->aRowP);
}
}
}
void tDestroySSubmitReq2(SSubmitReq2 *pReq, int32_t flag) {
if (pReq->aSubmitTbData == NULL) return;
int32_t nSubmitTbData = TARRAY_SIZE(pReq->aSubmitTbData);
SSubmitTbData *aSubmitTbData = (SSubmitTbData *)TARRAY_DATA(pReq->aSubmitTbData);
for (int32_t i = 0; i < nSubmitTbData; i++) {
tDestroySSubmitTbData(&aSubmitTbData[i], flag);
}
taosArrayDestroy(pReq->aSubmitTbData);
}
int32_t tEncodeSSubmitRsp2(SEncoder *pCoder, const SSubmitRsp2 *pRsp) {
if (tStartEncode(pCoder) < 0) return -1;
if (tEncodeI32v(pCoder, pRsp->affectedRows) < 0) return -1;
if (tEncodeU64v(pCoder, taosArrayGetSize(pRsp->aCreateTbRsp)) < 0) return -1;
for (int32_t i = 0; i < taosArrayGetSize(pRsp->aCreateTbRsp); ++i) {
if (tEncodeSVCreateTbRsp(pCoder, taosArrayGet(pRsp->aCreateTbRsp, i)) < 0) return -1;
}
tEndEncode(pCoder);
return 0;
}
int32_t tDecodeSSubmitRsp2(SDecoder *pCoder, SSubmitRsp2 *pRsp) {
int32_t code = 0;
memset(pRsp, 0, sizeof(SSubmitRsp2));
// decode
if (tStartDecode(pCoder) < 0) {
code = TSDB_CODE_INVALID_MSG;
goto _exit;
}
if (tDecodeI32v(pCoder, &pRsp->affectedRows) < 0) {
code = TSDB_CODE_INVALID_MSG;
goto _exit;
}
uint64_t nCreateTbRsp;
if (tDecodeU64v(pCoder, &nCreateTbRsp) < 0) {
code = TSDB_CODE_INVALID_MSG;
goto _exit;
}
if (nCreateTbRsp) {
pRsp->aCreateTbRsp = taosArrayInit(nCreateTbRsp, sizeof(SVCreateTbRsp));
if (pRsp->aCreateTbRsp == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
for (int32_t i = 0; i < nCreateTbRsp; ++i) {
SVCreateTbRsp *pCreateTbRsp = taosArrayReserve(pRsp->aCreateTbRsp, 1);
if (tDecodeSVCreateTbRsp(pCoder, pCreateTbRsp) < 0) {
code = TSDB_CODE_INVALID_MSG;
goto _exit;
}
}
}
tEndDecode(pCoder);
_exit:
if (code) {
if (pRsp->aCreateTbRsp) {
taosArrayDestroyEx(pRsp->aCreateTbRsp, NULL /* todo */);
}
}
return code;
}
void tDestroySSubmitRsp2(SSubmitRsp2 *pRsp, int32_t flag) {
if (NULL == pRsp) {
return;
}
if (flag & TSDB_MSG_FLG_ENCODE) {
if (pRsp->aCreateTbRsp) {
int32_t nCreateTbRsp = TARRAY_SIZE(pRsp->aCreateTbRsp);
SVCreateTbRsp *aCreateTbRsp = TARRAY_DATA(pRsp->aCreateTbRsp);
for (int32_t i = 0; i < nCreateTbRsp; ++i) {
if (aCreateTbRsp[i].pMeta) {
taosMemoryFree(aCreateTbRsp[i].pMeta);
}
}
taosArrayDestroy(pRsp->aCreateTbRsp);
}
} else if (flag & TSDB_MSG_FLG_DECODE) {
if (pRsp->aCreateTbRsp) {
int32_t nCreateTbRsp = TARRAY_SIZE(pRsp->aCreateTbRsp);
SVCreateTbRsp *aCreateTbRsp = TARRAY_DATA(pRsp->aCreateTbRsp);
for (int32_t i = 0; i < nCreateTbRsp; ++i) {
if (aCreateTbRsp[i].pMeta) {
taosMemoryFree(aCreateTbRsp[i].pMeta);
}
}
taosArrayDestroy(pRsp->aCreateTbRsp);
}
}
}

View File

@ -90,10 +90,8 @@ int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, in
SName* toName(int32_t acctId, const char* pDbName, const char* pTableName, SName* pName) {
pName->type = TSDB_TABLE_NAME_T;
pName->acctId = acctId;
memset(pName->dbname, 0, TSDB_DB_NAME_LEN);
strncpy(pName->dbname, pDbName, TSDB_DB_NAME_LEN - 1);
memset(pName->tname, 0, TSDB_TABLE_NAME_LEN);
strncpy(pName->tname, pTableName, TSDB_TABLE_NAME_LEN - 1);
snprintf(pName->dbname, sizeof(pName->dbname), "%s", pDbName);
snprintf(pName->tname, sizeof(pName->tname), "%s", pTableName);
return pName;
}
@ -284,8 +282,8 @@ int32_t tNameFromString(SName* dst, const char* str, uint32_t type) {
}
static int compareKv(const void* p1, const void* p2) {
SSmlKv* kv1 = *(SSmlKv**)p1;
SSmlKv* kv2 = *(SSmlKv**)p2;
SSmlKv* kv1 = (SSmlKv*)p1;
SSmlKv* kv2 = (SSmlKv*)p2;
int32_t kvLen1 = kv1->keyLen;
int32_t kvLen2 = kv2->keyLen;
int32_t res = strncasecmp(kv1->key, kv2->key, TMIN(kvLen1, kvLen2));
@ -302,11 +300,11 @@ static int compareKv(const void* p1, const void* p2) {
void buildChildTableName(RandTableName* rName) {
SStringBuilder sb = {0};
taosStringBuilderAppendStringLen(&sb, rName->stbFullName, rName->stbFullNameLen);
if(sb.buf == NULL) return;
if (sb.buf == NULL) return;
taosArraySort(rName->tags, compareKv);
for (int j = 0; j < taosArrayGetSize(rName->tags); ++j) {
taosStringBuilderAppendChar(&sb, ',');
SSmlKv* tagKv = taosArrayGetP(rName->tags, j);
SSmlKv* tagKv = taosArrayGet(rName->tags, j);
taosStringBuilderAppendStringLen(&sb, tagKv->key, tagKv->keyLen);
taosStringBuilderAppendChar(&sb, '=');
if (IS_VAR_DATA_TYPE(tagKv->type)) {

View File

@ -117,7 +117,7 @@ STSchema *genSTSchema(int16_t nCols) {
}
STSchema *pResult = NULL;
pResult = tdGetSTSChemaFromSSChema(pSchema, nCols, 1);
pResult = tBuildTSchema(pSchema, nCols, 1);
taosMemoryFree(pSchema);
return pResult;

View File

@ -133,7 +133,10 @@ static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg) {
SMnode *pMnode = pMsg->info.node;
SMqConsumerRecoverMsg *pRecoverMsg = pMsg->pCont;
SMqConsumerObj *pConsumer = mndAcquireConsumer(pMnode, pRecoverMsg->consumerId);
ASSERT(pConsumer);
if (pConsumer == NULL) {
mError("cannot find consumer %" PRId64 " when processing consumer recover msg", pRecoverMsg->consumerId);
return -1;
}
mInfo("receive consumer recover msg, consumer id %" PRId64 ", status %s", pRecoverMsg->consumerId,
mndConsumerStatusName(pConsumer->status));
@ -381,8 +384,6 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
return -1;
}
ASSERT(strcmp(req.cgroup, pConsumer->cgroup) == 0);
atomic_store_32(&pConsumer->hbStatus, 0);
// 1. check consumer status
@ -428,9 +429,8 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
for (int32_t i = 0; i < numOfTopics; i++) {
char *topic = taosArrayGetP(pConsumer->currentTopics, i);
SMqSubscribeObj *pSub = mndAcquireSubscribe(pMnode, pConsumer->cgroup, topic);
// txn guarantees pSub is created
ASSERT(pSub);
taosRLockLatch(&pSub->lock);
SMqSubTopicEp topicEp = {0};
@ -438,7 +438,6 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
// 2.1 fetch topic schema
SMqTopicObj *pTopic = mndAcquireTopic(pMnode, topic);
ASSERT(pTopic);
taosRLockLatch(&pTopic->lock);
tstrncpy(topicEp.db, pTopic->db, TSDB_DB_FNAME_LEN);
topicEp.schema.nCols = pTopic->schema.nCols;
@ -559,6 +558,10 @@ static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
goto SUBSCRIBE_OVER;
}
if (mndCheckTopicPrivilege(pMnode, pMsg->info.conn.user, MND_OPER_SUBSCRIBE, pTopic) != 0) {
goto SUBSCRIBE_OVER;
}
mndReleaseTopic(pMnode, pTopic);
}
@ -775,8 +778,8 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer,
taosWLockLatch(&pOldConsumer->lock);
if (pNewConsumer->updateType == CONSUMER_UPDATE__MODIFY) {
ASSERT(taosArrayGetSize(pOldConsumer->rebNewTopics) == 0);
ASSERT(taosArrayGetSize(pOldConsumer->rebRemovedTopics) == 0);
/*A(taosArrayGetSize(pOldConsumer->rebNewTopics) == 0);*/
/*A(taosArrayGetSize(pOldConsumer->rebRemovedTopics) == 0);*/
if (taosArrayGetSize(pNewConsumer->rebNewTopics) == 0 && taosArrayGetSize(pNewConsumer->rebRemovedTopics) == 0) {
pOldConsumer->status = MQ_CONSUMER_STATUS__READY;
@ -798,8 +801,8 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer,
pOldConsumer->status = MQ_CONSUMER_STATUS__MODIFY;
}
} else if (pNewConsumer->updateType == CONSUMER_UPDATE__LOST) {
ASSERT(taosArrayGetSize(pOldConsumer->rebNewTopics) == 0);
ASSERT(taosArrayGetSize(pOldConsumer->rebRemovedTopics) == 0);
/*A(taosArrayGetSize(pOldConsumer->rebNewTopics) == 0);*/
/*A(taosArrayGetSize(pOldConsumer->rebRemovedTopics) == 0);*/
int32_t sz = taosArrayGetSize(pOldConsumer->currentTopics);
/*pOldConsumer->rebRemovedTopics = taosArrayInit(sz, sizeof(void *));*/
@ -812,8 +815,8 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer,
pOldConsumer->status = MQ_CONSUMER_STATUS__LOST;
} else if (pNewConsumer->updateType == CONSUMER_UPDATE__RECOVER) {
ASSERT(taosArrayGetSize(pOldConsumer->currentTopics) == 0);
ASSERT(taosArrayGetSize(pOldConsumer->rebNewTopics) == 0);
/*A(taosArrayGetSize(pOldConsumer->currentTopics) == 0);*/
/*A(taosArrayGetSize(pOldConsumer->rebNewTopics) == 0);*/
int32_t sz = taosArrayGetSize(pOldConsumer->assignedTopics);
for (int32_t i = 0; i < sz; i++) {
@ -830,15 +833,15 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer,
pOldConsumer->rebalanceTime = pNewConsumer->upTime;
} else if (pNewConsumer->updateType == CONSUMER_UPDATE__ADD) {
ASSERT(taosArrayGetSize(pNewConsumer->rebNewTopics) == 1);
ASSERT(taosArrayGetSize(pNewConsumer->rebRemovedTopics) == 0);
/*A(taosArrayGetSize(pNewConsumer->rebNewTopics) == 1);*/
/*A(taosArrayGetSize(pNewConsumer->rebRemovedTopics) == 0);*/
char *addedTopic = strdup(taosArrayGetP(pNewConsumer->rebNewTopics, 0));
// not exist in current topic
#if 1
#if 0
for (int32_t i = 0; i < taosArrayGetSize(pOldConsumer->currentTopics); i++) {
char *topic = taosArrayGetP(pOldConsumer->currentTopics, i);
ASSERT(strcmp(topic, addedTopic) != 0);
A(strcmp(topic, addedTopic) != 0);
}
#endif
@ -879,15 +882,15 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer,
atomic_add_fetch_32(&pOldConsumer->epoch, 1);
} else if (pNewConsumer->updateType == CONSUMER_UPDATE__REMOVE) {
ASSERT(taosArrayGetSize(pNewConsumer->rebNewTopics) == 0);
ASSERT(taosArrayGetSize(pNewConsumer->rebRemovedTopics) == 1);
/*A(taosArrayGetSize(pNewConsumer->rebNewTopics) == 0);*/
/*A(taosArrayGetSize(pNewConsumer->rebRemovedTopics) == 1);*/
char *removedTopic = taosArrayGetP(pNewConsumer->rebRemovedTopics, 0);
// not exist in new topic
#if 1
#if 0
for (int32_t i = 0; i < taosArrayGetSize(pOldConsumer->rebNewTopics); i++) {
char *topic = taosArrayGetP(pOldConsumer->rebNewTopics, i);
ASSERT(strcmp(topic, removedTopic) != 0);
A(strcmp(topic, removedTopic) != 0);
}
#endif
@ -913,7 +916,7 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer,
}
}
// must find the topic
ASSERT(i < sz);
/*A(i < sz);*/
// set status
if (taosArrayGetSize(pOldConsumer->rebNewTopics) == 0 && taosArrayGetSize(pOldConsumer->rebRemovedTopics) == 0) {

View File

@ -115,13 +115,11 @@ int32_t mndAddDispatcherToInnerTask(SMnode* pMnode, SStreamObj* pStream, SStream
if (pStream->fixedSinkVgId == 0) {
SDbObj* pDb = mndAcquireDb(pMnode, pStream->targetDb);
ASSERT(pDb);
if (pDb->cfg.numOfVgroups > 1) {
isShuffle = true;
pTask->outputType = TASK_OUTPUT__SHUFFLE_DISPATCH;
pTask->dispatchMsgType = TDMT_STREAM_TASK_DISPATCH;
if (mndExtractDbInfo(pMnode, pDb, &pTask->shuffleDispatcher.dbInfo, NULL) < 0) {
ASSERT(0);
return -1;
}
}
@ -140,9 +138,7 @@ int32_t mndAddDispatcherToInnerTask(SMnode* pMnode, SStreamObj* pStream, SStream
for (int32_t j = 0; j < sinkLvSize; j++) {
SStreamTask* pLastLevelTask = taosArrayGetP(sinkLv, j);
if (pLastLevelTask->nodeId == pVgInfo->vgId) {
ASSERT(pVgInfo->vgId > 0);
pVgInfo->taskId = pLastLevelTask->taskId;
ASSERT(pVgInfo->taskId != 0);
break;
}
}
@ -152,7 +148,6 @@ int32_t mndAddDispatcherToInnerTask(SMnode* pMnode, SStreamObj* pStream, SStream
pTask->dispatchMsgType = TDMT_STREAM_TASK_DISPATCH;
SArray* pArray = taosArrayGetP(pStream->tasks, 0);
// one sink only
ASSERT(taosArrayGetSize(pArray) == 1);
SStreamTask* lastLevelTask = taosArrayGetP(pArray, 0);
pTask->fixedEpDispatcher.taskId = lastLevelTask->taskId;
pTask->fixedEpDispatcher.nodeId = lastLevelTask->nodeId;
@ -170,7 +165,6 @@ int32_t mndAssignTaskToVg(SMnode* pMnode, SStreamTask* pTask, SSubplan* plan, co
plan->execNode.epSet = pTask->epSet;
if (qSubPlanToString(plan, &pTask->exec.qmsg, &msgLen) < 0) {
ASSERT(0);
terrno = TSDB_CODE_QRY_INVALID_INPUT;
return -1;
}
@ -195,7 +189,6 @@ int32_t mndAssignTaskToSnode(SMnode* pMnode, SStreamTask* pTask, SSubplan* plan,
plan->execNode.epSet = pTask->epSet;
if (qSubPlanToString(plan, &pTask->exec.qmsg, &msgLen) < 0) {
ASSERT(0);
terrno = TSDB_CODE_QRY_INVALID_INPUT;
return -1;
}
@ -222,8 +215,6 @@ int32_t mndAddShuffleSinkTasksToStream(SMnode* pMnode, SStreamObj* pStream) {
void* pIter = NULL;
SArray* tasks = taosArrayGetP(pStream->tasks, 0);
ASSERT(taosArrayGetSize(pStream->tasks) == 1);
while (1) {
SVgObj* pVgroup = NULL;
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void**)&pVgroup);
@ -257,7 +248,10 @@ int32_t mndAddShuffleSinkTasksToStream(SMnode* pMnode, SStreamObj* pStream) {
pTask->tbSink.stbUid = pStream->targetStbUid;
memcpy(pTask->tbSink.stbFullName, pStream->targetSTbName, TSDB_TABLE_FNAME_LEN);
pTask->tbSink.pSchemaWrapper = tCloneSSchemaWrapper(&pStream->outputSchema);
ASSERT(pTask->tbSink.pSchemaWrapper);
if (pTask->tbSink.pSchemaWrapper == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
}
sdbRelease(pSdb, pVgroup);
}
@ -265,7 +259,6 @@ int32_t mndAddShuffleSinkTasksToStream(SMnode* pMnode, SStreamObj* pStream) {
}
int32_t mndAddFixedSinkTaskToStream(SMnode* pMnode, SStreamObj* pStream) {
ASSERT(pStream->fixedSinkVgId != 0);
SArray* tasks = taosArrayGetP(pStream->tasks, 0);
SStreamTask* pTask = tNewSStreamTask(pStream->uid);
if (pTask == NULL) {
@ -275,8 +268,6 @@ int32_t mndAddFixedSinkTaskToStream(SMnode* pMnode, SStreamObj* pStream) {
pTask->fillHistory = pStream->fillHistory;
mndAddTaskToTaskSet(tasks, pTask);
ASSERT(pStream->fixedSinkVg.vgId == pStream->fixedSinkVgId);
pTask->nodeId = pStream->fixedSinkVgId;
#if 0
SVgObj* pVgroup = mndAcquireVgroup(pMnode, pStream->fixedSinkVgId);
@ -311,13 +302,16 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
return -1;
}
int32_t planTotLevel = LIST_LENGTH(pPlan->pSubplans);
ASSERT(planTotLevel <= 2);
pStream->tasks = taosArrayInit(planTotLevel, sizeof(void*));
bool hasExtraSink = false;
bool externalTargetDB = strcmp(pStream->sourceDb, pStream->targetDb) != 0;
SDbObj* pDbObj = mndAcquireDb(pMnode, pStream->targetDb);
ASSERT(pDbObj != NULL);
if (pDbObj == NULL) {
terrno = TSDB_CODE_QRY_INVALID_INPUT;
return -1;
}
bool multiTarget = pDbObj->cfg.numOfVgroups > 1;
sdbRelease(pSdb, pDbObj);
@ -351,7 +345,10 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
SNodeListNode* inner = (SNodeListNode*)nodesListGetNode(pPlan->pSubplans, 0);
SSubplan* plan = (SSubplan*)nodesListGetNode(inner->pNodeList, 0);
ASSERT(plan->subplanType == SUBPLAN_TYPE_MERGE);
if (plan->subplanType != SUBPLAN_TYPE_MERGE) {
terrno = TSDB_CODE_QRY_INVALID_INPUT;
return -1;
}
pInnerTask = tNewSStreamTask(pStream->uid);
if (pInnerTask == NULL) {
@ -409,7 +406,10 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
SNodeListNode* inner = (SNodeListNode*)nodesListGetNode(pPlan->pSubplans, 1);
SSubplan* plan = (SSubplan*)nodesListGetNode(inner->pNodeList, 0);
ASSERT(plan->subplanType == SUBPLAN_TYPE_SCAN);
if (plan->subplanType != SUBPLAN_TYPE_SCAN) {
terrno = TSDB_CODE_QRY_INVALID_INPUT;
return -1;
}
void* pIter = NULL;
while (1) {
@ -471,9 +471,15 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
taosArrayPush(pStream->tasks, &taskOneLevel);
SNodeListNode* inner = (SNodeListNode*)nodesListGetNode(pPlan->pSubplans, 0);
ASSERT(LIST_LENGTH(inner->pNodeList) == 1);
if (LIST_LENGTH(inner->pNodeList) != 1) {
terrno = TSDB_CODE_QRY_INVALID_INPUT;
return -1;
}
SSubplan* plan = (SSubplan*)nodesListGetNode(inner->pNodeList, 0);
ASSERT(plan->subplanType == SUBPLAN_TYPE_SCAN);
if (plan->subplanType != SUBPLAN_TYPE_SCAN) {
terrno = TSDB_CODE_QRY_INVALID_INPUT;
return -1;
}
void* pIter = NULL;
while (1) {
@ -550,9 +556,6 @@ int32_t mndSchedInitSubEp(SMnode* pMnode, const SMqTopicObj* pTopic, SMqSubscrib
plan = (SSubplan*)nodesListGetNode(inner->pNodeList, 0);
}
ASSERT(pSub->unassignedVgs);
ASSERT(taosHashGetSize(pSub->consumerHash) == 0);
void* pIter = NULL;
while (1) {
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void**)&pVgroup);
@ -590,10 +593,6 @@ int32_t mndSchedInitSubEp(SMnode* pMnode, const SMqTopicObj* pTopic, SMqSubscrib
sdbRelease(pSdb, pVgroup);
}
ASSERT(pSub->unassignedVgs->size > 0);
ASSERT(taosHashGetSize(pSub->consumerHash) == 0);
qDestroyQueryPlan(pPlan);
return 0;

View File

@ -326,13 +326,11 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj,
// deserialize ast
if (nodesStringToNode(pObj->ast, &pAst) < 0) {
/*ASSERT(0);*/
goto FAIL;
}
// extract output schema from ast
if (qExtractResultSchema(pAst, (int32_t *)&pObj->outputSchema.nCols, &pObj->outputSchema.pSchema) != 0) {
/*ASSERT(0);*/
goto FAIL;
}
@ -347,13 +345,11 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj,
// using ast and param to build physical plan
if (qCreateQueryPlan(&cxt, &pPlan, NULL) < 0) {
/*ASSERT(0);*/
goto FAIL;
}
// save physcial plan
if (nodesNodeToString((SNode *)pPlan, false, &pObj->physicalPlan, NULL) != 0) {
/*ASSERT(0);*/
goto FAIL;
}
@ -361,7 +357,7 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj,
if (pCreate->numOfTags) {
pObj->tagSchema.pSchema = taosMemoryCalloc(pCreate->numOfTags, sizeof(SSchema));
}
ASSERT(pCreate->numOfTags == taosArrayGetSize(pCreate->pTags));
/*A(pCreate->numOfTags == taosArrayGetSize(pCreate->pTags));*/
for (int32_t i = 0; i < pCreate->numOfTags; i++) {
SField *pField = taosArrayGet(pCreate->pTags, i);
pObj->tagSchema.pSchema[i].colId = pObj->outputSchema.nCols + i + 1;
@ -378,9 +374,6 @@ FAIL:
}
int32_t mndPersistTaskDeployReq(STrans *pTrans, const SStreamTask *pTask) {
if (pTask->taskLevel == TASK_LEVEL__AGG) {
ASSERT(taosArrayGetSize(pTask->childEpInfo) != 0);
}
SEncoder encoder;
tEncoderInit(&encoder, NULL, 0);
tEncodeSStreamTask(&encoder, pTask);
@ -545,8 +538,6 @@ _OVER:
}
static int32_t mndPersistTaskDropReq(STrans *pTrans, SStreamTask *pTask) {
ASSERT(pTask->nodeId != 0);
// vnode
/*if (pTask->nodeId > 0) {*/
SVDropStreamTaskReq *pReq = taosMemoryCalloc(1, sizeof(SVDropStreamTaskReq));
@ -646,7 +637,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
// create stb for stream
if (mndCreateStbForStream(pMnode, pTrans, &streamObj, pReq->info.conn.user) < 0) {
if (createStreamReq.createStb == STREAM_CREATE_STABLE_TRUE && mndCreateStbForStream(pMnode, pTrans, &streamObj, pReq->info.conn.user) < 0) {
mError("trans:%d, failed to create stb for stream %s since %s", pTrans->id, createStreamReq.name, terrstr());
mndTransDrop(pTrans);
goto _OVER;
@ -808,10 +799,9 @@ static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq) {
int32_t sz = taosArrayGetSize(pLevel);
for (int32_t j = 0; j < sz; j++) {
SStreamTask *pTask = taosArrayGetP(pLevel, j);
ASSERT(pTask->nodeId > 0);
/*A(pTask->nodeId > 0);*/
SVgObj *pVgObj = mndAcquireVgroup(pMnode, pTask->nodeId);
if (pVgObj == NULL) {
ASSERT(0);
taosRUnLockLatch(&pStream->lock);
mndReleaseStream(pMnode, pStream);
mndTransDrop(pTrans);
@ -871,7 +861,6 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
SMDropStreamReq dropReq = {0};
if (tDeserializeSMDropStreamReq(pReq->pCont, pReq->contLen, &dropReq) < 0) {
ASSERT(0);
terrno = TSDB_CODE_INVALID_MSG;
return -1;
}

View File

@ -699,14 +699,9 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) {
sdbRelease(pSdb, pConsumer);
}
#if 0
if (pTopic->refConsumerCnt != 0) {
mndReleaseTopic(pMnode, pTopic);
terrno = TSDB_CODE_MND_TOPIC_SUBSCRIBED;
mError("topic:%s, failed to drop since %s", dropReq.name, terrstr());
if (mndCheckDbPrivilegeByName(pMnode, pReq->info.conn.user, MND_OPER_READ_DB, pTopic->db) != 0) {
return -1;
}
#endif
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq, "drop-topic");
if (pTrans == NULL) {

View File

@ -177,6 +177,7 @@ void tsdbReaderClose(STsdbReader *pReader);
bool tsdbNextDataBlock(STsdbReader *pReader);
void tsdbRetrieveDataBlockInfo(const STsdbReader *pReader, int32_t *rows, uint64_t *uid, STimeWindow *pWindow);
int32_t tsdbRetrieveDatablockSMA(STsdbReader *pReader, SSDataBlock *pDataBlock, bool *allHave);
void tsdbReleaseDataBlock(STsdbReader *pReader);
SSDataBlock *tsdbRetrieveDataBlock(STsdbReader *pTsdbReadHandle, SArray *pColumnIdList);
int32_t tsdbReaderReset(STsdbReader *pReader, SQueryTableDataCond *pCond);
int32_t tsdbGetFileBlocksDistInfo(STsdbReader *pReader, STableBlockDistInfo *pTableBlockInfo);
@ -223,11 +224,19 @@ typedef struct SSnapContext {
} SSnapContext;
typedef struct STqReader {
int64_t ver;
const SSubmitReq *pMsg;
SSubmitBlk *pBlock;
SSubmitMsgIter msgIter;
SSubmitBlkIter blkIter;
// const SSubmitReq *pMsg;
// SSubmitBlk *pBlock;
// SSubmitMsgIter msgIter;
// SSubmitBlkIter blkIter;
int64_t ver;
SPackedData msg2;
int8_t setMsg;
SSubmitReq2 submit;
int32_t nextBlk;
int64_t lastBlkUid;
SWalReader *pWalReader;
@ -252,11 +261,14 @@ int32_t tqReaderRemoveTbUidList(STqReader *pReader, const SArray *tbUidList);
int32_t tqSeekVer(STqReader *pReader, int64_t ver);
int32_t tqNextBlock(STqReader *pReader, SFetchRet *ret);
int32_t tqReaderSetDataMsg(STqReader *pReader, const SSubmitReq *pMsg, int64_t ver);
bool tqNextDataBlock(STqReader *pReader);
bool tqNextDataBlockFilterOut(STqReader *pReader, SHashObj *filterOutUids);
int32_t tqRetrieveDataBlock(SSDataBlock *pBlock, STqReader *pReader);
int32_t tqRetrieveTaosxBlock(STqReader *pReader, SArray *blocks, SArray *schemas);
int32_t tqReaderSetSubmitReq2(STqReader *pReader, void *msgStr, int32_t msgLen, int64_t ver);
// int32_t tqReaderSetDataMsg(STqReader *pReader, const SSubmitReq *pMsg, int64_t ver);
bool tqNextDataBlock2(STqReader *pReader);
bool tqNextDataBlockFilterOut2(STqReader *pReader, SHashObj *filterOutUids);
int32_t tqRetrieveDataBlock2(SSDataBlock *pBlock, STqReader *pReader);
int32_t tqRetrieveTaosxBlock2(STqReader *pReader, SArray *blocks, SArray *schemas);
// int32_t tqRetrieveDataBlock(SSDataBlock *pBlock, STqReader *pReader);
// int32_t tqRetrieveTaosxBlock(STqReader *pReader, SArray *blocks, SArray *schemas);
int32_t vnodeEnqueueStreamMsg(SVnode *pVnode, SRpcMsg *pMsg);

View File

@ -44,7 +44,6 @@ typedef struct SRSmaInfoItem SRSmaInfoItem;
typedef struct SRSmaFS SRSmaFS;
typedef struct SQTaskFile SQTaskFile;
typedef struct SQTaskFReader SQTaskFReader;
typedef struct SQTaskFWriter SQTaskFWriter;
struct SSmaEnv {
SRWLatch lock;
@ -85,22 +84,20 @@ struct STSmaStat {
struct SQTaskFile {
volatile int32_t nRef;
int32_t padding;
int8_t level;
int64_t suid;
int64_t version;
int64_t size;
int64_t mtime;
};
struct SQTaskFReader {
SSma *pSma;
int8_t level;
int64_t suid;
int64_t version;
TdFilePtr pReadH;
};
struct SQTaskFWriter {
SSma *pSma;
int64_t version;
TdFilePtr pWriteH;
char *fname;
};
struct SRSmaFS {
SArray *aQTaskInf; // array of SQTaskFile
@ -214,85 +211,40 @@ static FORCE_INLINE void tdUnRefSmaStat(SSma *pSma, SSmaStat *pStat) {
// rsma
void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree);
int32_t tdRSmaFSOpen(SSma *pSma, int64_t version);
int32_t tdRSmaFSOpen(SSma *pSma, int64_t version, int8_t rollback);
void tdRSmaFSClose(SRSmaFS *fs);
int32_t tdRSmaFSRef(SSma *pSma, SRSmaStat *pStat, int64_t version);
void tdRSmaFSUnRef(SSma *pSma, SRSmaStat *pStat, int64_t version);
int64_t tdRSmaFSMaxVer(SSma *pSma, SRSmaStat *pStat);
int32_t tdRSmaFSUpsertQTaskFile(SRSmaFS *pFS, SQTaskFile *qTaskFile);
int32_t tdRSmaRestore(SSma *pSma, int8_t type, int64_t committedVer);
int32_t tdRSmaFSPrepareCommit(SSma *pSma, SRSmaFS *pFSNew);
int32_t tdRSmaFSCommit(SSma *pSma);
int32_t tdRSmaFSFinishCommit(SSma *pSma);
int32_t tdRSmaFSCopy(SSma *pSma, SRSmaFS *pFS);
int32_t tdRSmaFSTakeSnapshot(SSma *pSma, SRSmaFS *pFS);
int32_t tdRSmaFSRef(SSma *pSma, SRSmaFS *pFS);
void tdRSmaFSUnRef(SSma *pSma, SRSmaFS *pFS);
int32_t tdRSmaFSUpsertQTaskFile(SSma *pSma, SRSmaFS *pFS, SQTaskFile *qTaskFile, int32_t nSize);
int32_t tdRSmaFSRollback(SSma *pSma);
int32_t tdRSmaRestore(SSma *pSma, int8_t type, int64_t committedVer, int8_t rollback);
int32_t tdRSmaProcessCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, const char *tbName);
int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type);
int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash);
int32_t tdRSmaProcessRestoreImpl(SSma *pSma, int8_t type, int64_t qtaskFileVer);
void tdRSmaQTaskInfoGetFileName(int32_t vgId, int64_t version, char *outputName);
void tdRSmaQTaskInfoGetFullName(int32_t vgId, int64_t version, const char *path, char *outputName);
int32_t tdRSmaProcessRestoreImpl(SSma *pSma, int8_t type, int64_t qtaskFileVer, int8_t rollback);
void tdRSmaQTaskInfoGetFileName(int32_t vgId, int64_t suid, int8_t level, int64_t version, char *outputName);
void tdRSmaQTaskInfoGetFullName(int32_t vgId, int64_t suid, int8_t level, int64_t version, const char *path,
char *outputName);
void tdRSmaQTaskInfoGetFullPath(int32_t vgId, int8_t level, const char *path, char *outputName);
void tdRSmaQTaskInfoGetFullPathEx(int32_t vgId, tb_uid_t suid, int8_t level, const char *path, char *outputName);
static FORCE_INLINE void tdRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo) {
int32_t ref = T_REF_INC(pRSmaInfo);
smaDebug("vgId:%d, ref rsma info:%p, val:%d", SMA_VID(pSma), pRSmaInfo, ref);
smaTrace("vgId:%d, ref rsma info:%p, val:%d", SMA_VID(pSma), pRSmaInfo, ref);
}
static FORCE_INLINE void tdUnRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo) {
int32_t ref = T_REF_DEC(pRSmaInfo);
smaDebug("vgId:%d, unref rsma info:%p, val:%d", SMA_VID(pSma), pRSmaInfo, ref);
smaTrace("vgId:%d, unref rsma info:%p, val:%d", SMA_VID(pSma), pRSmaInfo, ref);
}
// smaFileUtil ================
#define TD_FILE_HEAD_SIZE 512
typedef struct STFInfo STFInfo;
typedef struct STFile STFile;
struct STFInfo {
// common fields
uint32_t magic;
uint32_t ftype;
uint32_t fver;
int64_t fsize;
};
enum {
TD_FTYPE_RSMA_QTASKINFO = 0,
};
#if 0
struct STFile {
uint8_t state;
STFInfo info;
char *fname;
TdFilePtr pFile;
};
#define TD_TFILE_PFILE(tf) ((tf)->pFile)
#define TD_TFILE_OPENED(tf) (TD_TFILE_PFILE(tf) != NULL)
#define TD_TFILE_FULL_NAME(tf) ((tf)->fname)
#define TD_TFILE_OPENED(tf) (TD_TFILE_PFILE(tf) != NULL)
#define TD_TFILE_CLOSED(tf) (!TD_TFILE_OPENED(tf))
#define TD_TFILE_SET_CLOSED(f) (TD_TFILE_PFILE(f) = NULL)
#define TD_TFILE_SET_STATE(tf, s) ((tf)->state = (s))
int32_t tdInitTFile(STFile *pTFile, const char *dname, const char *fname);
int32_t tdCreateTFile(STFile *pTFile, bool updateHeader, int8_t fType);
int32_t tdOpenTFile(STFile *pTFile, int flags);
int64_t tdReadTFile(STFile *pTFile, void *buf, int64_t nbyte);
int64_t tdSeekTFile(STFile *pTFile, int64_t offset, int whence);
int64_t tdWriteTFile(STFile *pTFile, void *buf, int64_t nbyte);
int64_t tdAppendTFile(STFile *pTFile, void *buf, int64_t nbyte, int64_t *offset);
int64_t tdGetTFileSize(STFile *pTFile, int64_t *size);
int32_t tdRemoveTFile(STFile *pTFile);
int32_t tdLoadTFileHeader(STFile *pTFile, STFInfo *pInfo);
int32_t tdUpdateTFileHeader(STFile *pTFile);
void tdUpdateTFileMagic(STFile *pTFile, void *pCksm);
void tdCloseTFile(STFile *pTFile);
void tdDestroyTFile(STFile *pTFile);
#endif
void tdGetVndFileName(int32_t vgId, const char *pdname, const char *dname, const char *fname, int64_t version,
char *outputName);
void tdGetVndDirName(int32_t vgId, const char *pdname, const char *dname, bool endWithSep, char *outputName);
void tdRSmaGetFileName(int32_t vgId, const char *pdname, const char *dname, const char *fname, int64_t suid,
int8_t level, int64_t version, char *outputName);
void tdRSmaGetDirName(int32_t vgId, const char *pdname, const char *dname, bool endWithSep, char *outputName);
#ifdef __cplusplus
}

View File

@ -154,7 +154,8 @@ int32_t tqScanData(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffs
int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHead** pHeadWithCkSum);
// tqExec
int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SSubmitReq* pReq, STaosxRsp* pRsp);
int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, STaosxRsp* pRsp);
// int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SSubmitReq* pReq, STaosxRsp* pRsp);
int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t numOfCols, int8_t precision);
int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqDataRsp* pRsp);
int32_t tqPushDataRsp(STQ* pTq, STqPushEntry* pPushEntry);
@ -181,8 +182,9 @@ int32_t tqOffsetDelete(STqOffsetStore* pStore, const char* subscribeKey)
int32_t tqOffsetCommitFile(STqOffsetStore* pStore);
// tqSink
// void tqSinkToTableMerge(SStreamTask* pTask, void* vnode, int64_t ver, void* data);
void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* data);
int32_t tqBuildDeleteReq(SVnode* pVnode, const char* stbFullName, const SSDataBlock* pDataBlock,
SBatchDeleteReq* deleteReq);
void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void* data);
// tqOffset
char* tqOffsetBuildFName(const char* path, int32_t fVer);

View File

@ -65,11 +65,15 @@ typedef struct SSmaInfo SSmaInfo;
typedef struct SBlockCol SBlockCol;
typedef struct SVersionRange SVersionRange;
typedef struct SLDataIter SLDataIter;
typedef struct SQueryNode SQueryNode;
typedef struct SDiskCol SDiskCol;
typedef struct SDiskData SDiskData;
typedef struct SDiskDataBuilder SDiskDataBuilder;
typedef struct SBlkInfo SBlkInfo;
#define TSDBROW_ROW_FMT ((int8_t)0x0)
#define TSDBROW_COL_FMT ((int8_t)0x1)
#define TSDB_FILE_DLMT ((uint32_t)0xF00AFA0F)
#define TSDB_MAX_SUBBLOCKS 8
#define TSDB_FHDR_SIZE 512
@ -102,26 +106,29 @@ static FORCE_INLINE int64_t tsdbLogicToFileSize(int64_t lSize, int32_t szPage) {
// tsdbUtil.c ==============================================================================================
// TSDBROW
#define TSDBROW_TS(ROW) (((ROW)->type == 0) ? (ROW)->pTSRow->ts : (ROW)->pBlockData->aTSKEY[(ROW)->iRow])
#define TSDBROW_VERSION(ROW) (((ROW)->type == 0) ? (ROW)->version : (ROW)->pBlockData->aVersion[(ROW)->iRow])
#define TSDBROW_SVERSION(ROW) TD_ROW_SVER((ROW)->pTSRow)
#define TSDBROW_KEY(ROW) ((TSDBKEY){.version = TSDBROW_VERSION(ROW), .ts = TSDBROW_TS(ROW)})
#define tsdbRowFromTSRow(VERSION, TSROW) ((TSDBROW){.type = 0, .version = (VERSION), .pTSRow = (TSROW)})
#define tsdbRowFromBlockData(BLOCKDATA, IROW) ((TSDBROW){.type = 1, .pBlockData = (BLOCKDATA), .iRow = (IROW)})
void tsdbRowGetColVal(TSDBROW *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal);
// int32_t tPutTSDBRow(uint8_t *p, TSDBROW *pRow);
#define TSDBROW_TS(ROW) (((ROW)->type == TSDBROW_ROW_FMT) ? (ROW)->pTSRow->ts : (ROW)->pBlockData->aTSKEY[(ROW)->iRow])
#define TSDBROW_VERSION(ROW) \
(((ROW)->type == TSDBROW_ROW_FMT) ? (ROW)->version : (ROW)->pBlockData->aVersion[(ROW)->iRow])
#define TSDBROW_SVERSION(ROW) ((ROW)->type == TSDBROW_ROW_FMT ? (ROW)->pTSRow->sver : -1)
#define TSDBROW_KEY(ROW) ((TSDBKEY){.version = TSDBROW_VERSION(ROW), .ts = TSDBROW_TS(ROW)})
#define tsdbRowFromTSRow(VERSION, TSROW) ((TSDBROW){.type = TSDBROW_ROW_FMT, .version = (VERSION), .pTSRow = (TSROW)})
#define tsdbRowFromBlockData(BLOCKDATA, IROW) \
((TSDBROW){.type = TSDBROW_COL_FMT, .pBlockData = (BLOCKDATA), .iRow = (IROW)})
void tsdbRowGetColVal(TSDBROW *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal);
int32_t tsdbRowCmprFn(const void *p1, const void *p2);
// STSDBRowIter
void tsdbRowIterInit(STSDBRowIter *pIter, TSDBROW *pRow, STSchema *pTSchema);
int32_t tsdbRowIterOpen(STSDBRowIter *pIter, TSDBROW *pRow, STSchema *pTSchema);
void tsdbRowClose(STSDBRowIter *pIter);
SColVal *tsdbRowIterNext(STSDBRowIter *pIter);
// SRowMerger
int32_t tRowMergerInit2(SRowMerger *pMerger, STSchema *pResTSchema, TSDBROW *pRow, STSchema *pTSchema);
int32_t tRowMergerAdd(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema);
int32_t tsdbRowMergerInit2(SRowMerger *pMerger, STSchema *pResTSchema, TSDBROW *pRow, STSchema *pTSchema);
int32_t tsdbRowMergerAdd(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema);
int32_t tRowMergerInit(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema);
void tRowMergerClear(SRowMerger *pMerger);
int32_t tRowMerge(SRowMerger *pMerger, TSDBROW *pRow);
int32_t tRowMergerGetRow(SRowMerger *pMerger, STSRow **ppRow);
int32_t tsdbRowMergerInit(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema);
void tsdbRowMergerClear(SRowMerger *pMerger);
int32_t tsdbRowMerge(SRowMerger *pMerger, TSDBROW *pRow);
int32_t tsdbRowMergerGetRow(SRowMerger *pMerger, SRow **ppRow);
// TABLEID
int32_t tTABLEIDCmprFn(const void *p1, const void *p2);
// TSDBKEY
@ -146,24 +153,22 @@ int32_t tGetBlockIdx(uint8_t *p, void *ph);
int32_t tCmprBlockIdx(void const *lhs, void const *rhs);
int32_t tCmprBlockL(void const *lhs, void const *rhs);
// SBlockData
#define tBlockDataFirstRow(PBLOCKDATA) tsdbRowFromBlockData(PBLOCKDATA, 0)
#define tBlockDataLastRow(PBLOCKDATA) tsdbRowFromBlockData(PBLOCKDATA, (PBLOCKDATA)->nRow - 1)
#define tBlockDataFirstKey(PBLOCKDATA) TSDBROW_KEY(&tBlockDataFirstRow(PBLOCKDATA))
#define tBlockDataLastKey(PBLOCKDATA) TSDBROW_KEY(&tBlockDataLastRow(PBLOCKDATA))
#define tBlockDataFirstRow(PBLOCKDATA) tsdbRowFromBlockData(PBLOCKDATA, 0)
#define tBlockDataLastRow(PBLOCKDATA) tsdbRowFromBlockData(PBLOCKDATA, (PBLOCKDATA)->nRow - 1)
#define tBlockDataFirstKey(PBLOCKDATA) TSDBROW_KEY(&tBlockDataFirstRow(PBLOCKDATA))
#define tBlockDataLastKey(PBLOCKDATA) TSDBROW_KEY(&tBlockDataLastRow(PBLOCKDATA))
#define tBlockDataGetColDataByIdx(PBLOCKDATA, IDX) (&(PBLOCKDATA)->aColData[IDX])
int32_t tBlockDataCreate(SBlockData *pBlockData);
void tBlockDataDestroy(SBlockData *pBlockData, int8_t deepClear);
int32_t tBlockDataInit(SBlockData *pBlockData, TABLEID *pId, STSchema *pTSchema, int16_t *aCid, int32_t nCid);
void tBlockDataReset(SBlockData *pBlockData);
int32_t tBlockDataAppendRow(SBlockData *pBlockData, TSDBROW *pRow, STSchema *pTSchema, int64_t uid);
void tBlockDataClear(SBlockData *pBlockData);
SColData *tBlockDataGetColDataByIdx(SBlockData *pBlockData, int32_t idx);
void tBlockDataGetColData(SBlockData *pBlockData, int16_t cid, SColData **ppColData);
int32_t tBlockDataMerge(SBlockData *pBlockData1, SBlockData *pBlockData2, SBlockData *pBlockData);
int32_t tBlockDataAddColData(SBlockData *pBlockData, SColData **ppColData);
int32_t tCmprBlockData(SBlockData *pBlockData, int8_t cmprAlg, uint8_t **ppOut, int32_t *szOut, uint8_t *aBuf[],
int32_t aBufN[]);
int32_t tDecmprBlockData(uint8_t *pIn, int32_t szIn, SBlockData *pBlockData, uint8_t *aBuf[]);
int32_t tBlockDataCreate(SBlockData *pBlockData);
void tBlockDataDestroy(SBlockData *pBlockData);
int32_t tBlockDataInit(SBlockData *pBlockData, TABLEID *pId, STSchema *pTSchema, int16_t *aCid, int32_t nCid);
void tBlockDataReset(SBlockData *pBlockData);
int32_t tBlockDataAppendRow(SBlockData *pBlockData, TSDBROW *pRow, STSchema *pTSchema, int64_t uid);
void tBlockDataClear(SBlockData *pBlockData);
void tBlockDataGetColData(SBlockData *pBlockData, int16_t cid, SColData **ppColData);
int32_t tCmprBlockData(SBlockData *pBlockData, int8_t cmprAlg, uint8_t **ppOut, int32_t *szOut, uint8_t *aBuf[],
int32_t aBufN[]);
int32_t tDecmprBlockData(uint8_t *pIn, int32_t szIn, SBlockData *pBlockData, uint8_t *aBuf[]);
// SDiskDataHdr
int32_t tPutDiskDataHdr(uint8_t *p, const SDiskDataHdr *pHdr);
int32_t tGetDiskDataHdr(uint8_t *p, void *ph);
@ -204,11 +209,13 @@ int32_t tsdbDecmprColData(uint8_t *pIn, SBlockCol *pBlockCol, int8_t cmprAlg, in
uint8_t **ppBuf);
// tsdbMemTable ==============================================================================================
// SMemTable
typedef int32_t (*_tsdb_reseek_func_t)(void *pQHandle);
int32_t tsdbMemTableCreate(STsdb *pTsdb, SMemTable **ppMemTable);
void tsdbMemTableDestroy(SMemTable *pMemTable);
STbData *tsdbGetTbDataFromMemTable(SMemTable *pMemTable, tb_uid_t suid, tb_uid_t uid);
void tsdbRefMemTable(SMemTable *pMemTable);
void tsdbUnrefMemTable(SMemTable *pMemTable);
int32_t tsdbRefMemTable(SMemTable *pMemTable, void *pQHandle, _tsdb_reseek_func_t reseek, SQueryNode **ppNode);
int32_t tsdbUnrefMemTable(SMemTable *pMemTable, SQueryNode *pNode);
SArray *tsdbMemTableGetTbDataArray(SMemTable *pMemTable);
// STbDataIter
int32_t tsdbTbDataIterCreate(STbData *pTbData, TSDBKEY *pFrom, int8_t backward, STbDataIter **ppIter);
@ -286,8 +293,8 @@ int32_t tsdbDelFReaderClose(SDelFReader **ppReader);
int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData);
int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx);
// tsdbRead.c ==============================================================================================
int32_t tsdbTakeReadSnap(STsdb *pTsdb, STsdbReadSnap **ppSnap, const char *id);
void tsdbUntakeReadSnap(STsdb *pTsdb, STsdbReadSnap *pSnap, const char *id);
int32_t tsdbTakeReadSnap(STsdbReader *pReader, _tsdb_reseek_func_t reseek, STsdbReadSnap **ppSnap);
void tsdbUntakeReadSnap(STsdbReader *pReader, STsdbReadSnap *pSnap);
// tsdbMerge.c ==============================================================================================
int32_t tsdbMerge(STsdb *pTsdb);
@ -335,10 +342,13 @@ struct SVersionRange {
typedef struct SMemSkipListNode SMemSkipListNode;
struct SMemSkipListNode {
int8_t level;
int8_t flag; // TSDBROW_ROW_FMT for row format, TSDBROW_COL_FMT for col format
int32_t iRow;
int64_t version;
STSRow *pTSRow;
void *pData;
SMemSkipListNode *forwards[0];
};
typedef struct SMemSkipList {
int64_t size;
uint32_t seed;
@ -359,11 +369,20 @@ struct STbData {
STbData *next;
};
struct SQueryNode {
SQueryNode *pNext;
SQueryNode **ppNext;
void *pQHandle;
_tsdb_reseek_func_t reseek;
};
struct SMemTable {
SRWLatch latch;
STsdb *pTsdb;
SVBufPool *pPool;
volatile int32_t nRef;
int64_t minVer;
int64_t maxVer;
TSKEY minKey;
TSKEY maxKey;
int64_t nRow;
@ -373,14 +392,15 @@ struct SMemTable {
int32_t nBucket;
STbData **aBucket;
};
SQueryNode qList;
};
struct TSDBROW {
int8_t type; // 0 for row from tsRow, 1 for row from block data
int8_t type; // TSDBROW_ROW_FMT for row from tsRow, TSDBROW_COL_FMT for row from block data
union {
struct {
int64_t version;
STSRow *pTSRow;
SRow *pTSRow;
};
struct {
SBlockData *pBlockData;
@ -466,14 +486,14 @@ struct SSttBlk {
// (SBlockData){.suid = suid, .uid = 0}: block data for N child tables int .last file
// (SBlockData){.suid = 0, .uid = uid}: block data for 1 normal table int .last/.data file
struct SBlockData {
int64_t suid; // 0 means normal table block data, otherwise child table block data
int64_t uid; // 0 means block data in .last file, otherwise in .data file
int32_t nRow; // number of rows
int64_t *aUid; // uids of each row, only exist in block data in .last file (uid == 0)
int64_t *aVersion; // versions of each row
TSKEY *aTSKEY; // timestamp of each row
int32_t nColData;
SArray *aColData; // SArray<SColData>
int64_t suid; // 0 means normal table block data, otherwise child table block data
int64_t uid; // 0 means block data in .last file, otherwise in .data file
int32_t nRow; // number of rows
int64_t *aUid; // uids of each row, only exist in block data in .last file (uid == 0)
int64_t *aVersion; // versions of each row
TSKEY *aTSKEY; // timestamp of each row
int32_t nColData;
SColData *aColData;
};
struct TABLEID {
@ -565,10 +585,14 @@ struct SDFileSet {
};
struct STSDBRowIter {
TSDBROW *pRow;
STSchema *pTSchema;
SColVal colVal;
int32_t i;
TSDBROW *pRow;
union {
SRowIter *pIter;
struct {
int32_t iColData;
SColVal cv;
};
};
};
struct SRowMerger {
STSchema *pTSchema;
@ -594,9 +618,11 @@ struct SDelFWriter {
};
struct STsdbReadSnap {
SMemTable *pMem;
SMemTable *pIMem;
STsdbFS fs;
SMemTable *pMem;
SQueryNode *pNode;
SMemTable *pIMem;
SQueryNode *pINode;
STsdbFS fs;
};
struct SDataFWriter {
@ -715,6 +741,9 @@ void *destroyLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo);
// tsdbCache ==============================================================================================
typedef struct SCacheRowsReader {
STsdb *pTsdb;
SVersionRange verRange;
TdThreadMutex readerMutex;
SVnode *pVnode;
STSchema *pSchema;
uint64_t uid;
@ -739,8 +768,8 @@ typedef struct {
int32_t tsdbOpenCache(STsdb *pTsdb);
void tsdbCloseCache(STsdb *pTsdb);
int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, STSRow *row, STsdb *pTsdb);
int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, STSRow *row, bool dup);
int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, TSDBROW *row, STsdb *pTsdb);
int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, TSDBROW *row, bool dup);
int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, SCacheRowsReader *pr, LRUHandle **h);
int32_t tsdbCacheGetLastrowH(SLRUCache *pCache, tb_uid_t uid, SCacheRowsReader *pr, LRUHandle **h);
int32_t tsdbCacheRelease(SLRUCache *pCache, LRUHandle *h);
@ -752,6 +781,8 @@ int32_t tsdbCacheDelete(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey);
void tsdbCacheSetCapacity(SVnode *pVnode, size_t capacity);
size_t tsdbCacheGetCapacity(SVnode *pVnode);
// int32_t tsdbCacheLastArray2Row(SArray *pLastArray, STSRow **ppRow, STSchema *pSchema);
// ========== inline functions ==========
static FORCE_INLINE int32_t tsdbKeyCmprFn(const void *p1, const void *p2) {
TSDBKEY *pKey1 = (TSDBKEY *)p1;
@ -793,8 +824,13 @@ static FORCE_INLINE TSDBROW *tsdbTbDataIterGet(STbDataIter *pIter) {
}
pIter->pRow = &pIter->row;
pIter->pRow->version = pIter->pNode->version;
pIter->pRow->pTSRow = pIter->pNode->pTSRow;
if (pIter->pNode->flag == TSDBROW_ROW_FMT) {
pIter->row = tsdbRowFromTSRow(pIter->pNode->version, pIter->pNode->pData);
} else if (pIter->pNode->flag == TSDBROW_COL_FMT) {
pIter->row = tsdbRowFromBlockData(pIter->pNode->pData, pIter->pNode->iRow);
} else {
ASSERT(0);
}
return pIter->pRow;
}

View File

@ -87,9 +87,11 @@ typedef struct SCommitInfo SCommitInfo;
#define VNODE_RSMA1_DIR "rsma1"
#define VNODE_RSMA2_DIR "rsma2"
#define VND_INFO_FNAME "vnode.json"
#define VNODE_BUF_POOL_SEG 1 // TODO: change parameter here for sync/async commit
#define VND_INFO_FNAME "vnode.json"
// vnd.h
void* vnodeBufPoolMalloc(SVBufPool* pPool, int size);
void* vnodeBufPoolMallocAligned(SVBufPool* pPool, int size);
void vnodeBufPoolFree(SVBufPool* pPool, void* p);
@ -162,10 +164,9 @@ int32_t tsdbCommit(STsdb* pTsdb, SCommitInfo* pInfo);
int32_t tsdbFinishCommit(STsdb* pTsdb);
int32_t tsdbRollbackCommit(STsdb* pTsdb);
int32_t tsdbDoRetention(STsdb* pTsdb, int64_t now);
int tsdbScanAndConvertSubmitMsg(STsdb* pTsdb, SSubmitReq* pMsg);
int tsdbInsertData(STsdb* pTsdb, int64_t version, SSubmitReq* pMsg, SSubmitRsp* pRsp);
int32_t tsdbInsertTableData(STsdb* pTsdb, int64_t version, SSubmitMsgIter* pMsgIter, SSubmitBlk* pBlock,
SSubmitBlkRsp* pRsp);
int tsdbScanAndConvertSubmitMsg(STsdb* pTsdb, SSubmitReq2* pMsg);
int tsdbInsertData(STsdb* pTsdb, int64_t version, SSubmitReq2* pMsg, SSubmitRsp2* pRsp);
int32_t tsdbInsertTableData(STsdb* pTsdb, int64_t version, SSubmitTbData* pSubmitTbData, int32_t* affectedRows);
int32_t tsdbDeleteTableData(STsdb* pTsdb, int64_t version, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKEY eKey);
int32_t tsdbSetKeepCfg(STsdb* pTsdb, STsdbCfg* pCfg);
@ -190,7 +191,7 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t version, char* msg, int32_t msg
int32_t tqProcessTaskDropReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
int32_t tqProcessStreamTaskCheckReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessStreamTaskCheckRsp(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
int32_t tqProcessSubmitReq(STQ* pTq, SSubmitReq* data, int64_t ver);
int32_t tqProcessSubmitReq(STQ* pTq, SPackedData submit);
int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver);
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec);
@ -203,19 +204,12 @@ int32_t tqProcessTaskRecoverFinishReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskRecoverFinishRsp(STQ* pTq, SRpcMsg* pMsg);
int32_t tqCheckLogInWal(STQ* pTq, int64_t version);
SSubmitReq* tqBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchema* pSchema,
SSchemaWrapper* pTagSchemaWrapper, bool createTb, int64_t suid, const char* stbFullName,
SBatchDeleteReq* pDeleteReq);
// sma
int32_t smaInit();
void smaCleanUp();
int32_t smaOpen(SVnode* pVnode, int8_t rollback);
int32_t smaClose(SSma* pSma);
int32_t smaBegin(SSma* pSma);
int32_t smaSyncPreCommit(SSma* pSma);
int32_t smaSyncCommit(SSma* pSma);
int32_t smaSyncPostCommit(SSma* pSma);
int32_t smaPrepareAsyncCommit(SSma* pSma);
int32_t smaCommit(SSma* pSma, SCommitInfo* pInfo);
int32_t smaFinishCommit(SSma* pSma);
@ -226,11 +220,10 @@ int32_t tdProcessTSmaCreate(SSma* pSma, int64_t version, const char* msg);
int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg);
int32_t tdProcessRSmaCreate(SSma* pSma, SVCreateStbReq* pReq);
int32_t tdProcessRSmaSubmit(SSma* pSma, void* pMsg, int32_t inputType);
int32_t tdProcessRSmaSubmit(SSma* pSma, int64_t version, void* pReq, void* pMsg, int32_t len, int32_t inputType);
int32_t tdProcessRSmaDrop(SSma* pSma, SVDropStbReq* pReq);
int32_t tdFetchTbUidList(SSma* pSma, STbUidStore** ppStore, tb_uid_t suid, tb_uid_t uid);
int32_t tdUpdateTbUidList(SSma* pSma, STbUidStore* pUidStore, bool isAdd);
void tdUidStoreDestory(STbUidStore* pStore);
void* tdUidStoreFree(STbUidStore* pStore);
// SMetaSnapReader ========================================
@ -277,6 +270,7 @@ int32_t rsmaSnapRead(SRSmaSnapReader* pReader, uint8_t** ppData);
// SRSmaSnapWriter ========================================
int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, SRSmaSnapWriter** ppWriter);
int32_t rsmaSnapWrite(SRSmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
int32_t rsmaSnapWriterPrepareClose(SRSmaSnapWriter* pWriter);
int32_t rsmaSnapWriterClose(SRSmaSnapWriter** ppWriter, int8_t rollback);
typedef struct {
@ -420,6 +414,7 @@ enum {
struct SSnapDataHdr {
int8_t type;
int8_t flag;
int64_t index;
int64_t size;
uint8_t data[];

View File

@ -55,8 +55,8 @@ struct SMetaCache {
// query cache
struct STagFilterResCache {
TdThreadMutex lock;
SHashObj* pTableEntry;
SLRUCache* pUidResCache;
SHashObj* pTableEntry;
SLRUCache* pUidResCache;
} sTagFilterResCache;
};
@ -210,7 +210,7 @@ _exit:
int32_t metaCacheUpsert(SMeta* pMeta, SMetaInfo* pInfo) {
int32_t code = 0;
// ASSERT(metaIsWLocked(pMeta));
// meta is wlocked for calling this func.
// search
SMetaCache* pCache = pMeta->pCache;
@ -221,7 +221,10 @@ int32_t metaCacheUpsert(SMeta* pMeta, SMetaInfo* pInfo) {
}
if (*ppEntry) { // update
ASSERT(pInfo->suid == (*ppEntry)->info.suid);
if (pInfo->suid != (*ppEntry)->info.suid) {
metaError("meta/cache: suid should be same as the one in cache.");
return TSDB_CODE_FAILED;
}
if (pInfo->version > (*ppEntry)->info.version) {
(*ppEntry)->info.version = pInfo->version;
(*ppEntry)->info.skmVer = pInfo->skmVer;
@ -340,7 +343,7 @@ _exit:
int32_t metaStatsCacheUpsert(SMeta* pMeta, SMetaStbStats* pInfo) {
int32_t code = 0;
// ASSERT(metaIsWLocked(pMeta));
// meta is wlocked for calling this func.
// search
SMetaCache* pCache = pMeta->pCache;
@ -449,7 +452,11 @@ int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pK
// do some book mark work after acquiring the filter result from cache
STagFilterResEntry** pEntry = taosHashGet(pTableMap, &suid, sizeof(uint64_t));
ASSERT(pEntry != NULL);
if (NULL == pEntry) {
metaError("meta/cache: pEntry should not be NULL.");
return TSDB_CODE_FAILED;
}
*acquireRes = 1;
const char* p = taosLRUCacheValue(pCache, pHandle);
@ -494,7 +501,7 @@ int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pK
taosMemoryFree(*p1);
}
atomic_store_32(&(*pEntry)->qTimes, 0); // reset the query times
atomic_store_32(&(*pEntry)->qTimes, 0); // reset the query times
taosArrayDestroy(pInvalidRes);
taosThreadMutexUnlock(pLock);
@ -550,16 +557,19 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int
buf[0] = suid;
memcpy(&buf[1], pKey, keyLen);
ASSERT(sizeof(uint64_t) + keyLen == 24);
if (sizeof(uint64_t) + keyLen != 24) {
metaError("meta/cache: incorrect keyLen:%" PRId32 " length.", keyLen);
return TSDB_CODE_FAILED;
}
// add to cache.
taosLRUCacheInsert(pCache, buf, sizeof(uint64_t) + keyLen, pPayload, payloadLen, freePayload, NULL,
TAOS_LRU_PRIORITY_LOW);
int32_t ret = taosLRUCacheInsert(pCache, buf, sizeof(uint64_t) + keyLen, pPayload, payloadLen, freePayload, NULL,
TAOS_LRU_PRIORITY_LOW);
taosThreadMutexUnlock(pLock);
metaDebug("vgId:%d, suid:%" PRIu64 " list cache added into cache, total:%d, tables:%d", TD_VID(pMeta->pVnode), suid,
(int32_t)taosLRUCacheGetUsage(pCache), taosHashGetSize(pTableEntry));
metaDebug("vgId:%d, suid:%" PRIu64 " list cache added into cache, total:%d, tables:%d, ret:%d", TD_VID(pMeta->pVnode),
suid, (int32_t)taosLRUCacheGetUsage(pCache), taosHashGetSize(pTableEntry), ret);
return TSDB_CODE_SUCCESS;
}

View File

@ -51,7 +51,9 @@ int metaEncodeEntry(SEncoder *pCoder, const SMetaEntry *pME) {
} else if (pME->type == TSDB_TSMA_TABLE) {
if (tEncodeTSma(pCoder, pME->smaEntry.tsma) < 0) return -1;
} else {
ASSERT(0);
metaError("meta/entry: invalide table type: %" PRId8 " encode failed.", pME->type);
return -1;
}
tEndEncode(pCoder);

View File

@ -358,7 +358,10 @@ static int tagIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kL
return -1;
}
ASSERT(pTagIdxKey1->type == pTagIdxKey2->type);
if (pTagIdxKey1->type != pTagIdxKey2->type) {
metaError("meta/open: incorrect tag idx type.");
return TSDB_CODE_FAILED;
}
// check NULL, NULL is always the smallest
if (pTagIdxKey1->isNull && !pTagIdxKey2->isNull) {

View File

@ -19,7 +19,7 @@ void metaReaderInit(SMetaReader *pReader, SMeta *pMeta, int32_t flags) {
memset(pReader, 0, sizeof(*pReader));
pReader->flags = flags;
pReader->pMeta = pMeta;
if (!(flags & META_READER_NOLOCK)) {
if (pReader->pMeta && !(flags & META_READER_NOLOCK)) {
metaRLock(pMeta);
}
}
@ -152,7 +152,7 @@ bool metaIsTableExist(SMeta *pMeta, tb_uid_t uid) {
}
int metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid) {
SMeta *pMeta = pReader->pMeta;
SMeta *pMeta = pReader->pMeta;
int64_t version1;
// query uid.idx
@ -239,7 +239,6 @@ int metaGetTableSzNameByUid(void *meta, uint64_t uid, char *tbName) {
return 0;
}
int metaGetTableUidByName(void *meta, char *tbName, uint64_t *uid) {
int code = 0;
SMetaReader mr = {0};
@ -612,23 +611,14 @@ tb_uid_t metaStbCursorNext(SMStbCursor *pStbCur) {
}
STSchema *metaGetTbTSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, int lock) {
// SMetaReader mr = {0};
STSchema *pTSchema = NULL;
SSchemaWrapper *pSW = NULL;
STSchemaBuilder sb = {0};
SSchema *pSchema;
SSchema *pSchema = NULL;
pSW = metaGetTableSchema(pMeta, uid, sver, lock);
if (!pSW) return NULL;
tdInitTSchemaBuilder(&sb, pSW->version);
for (int i = 0; i < pSW->nCols; i++) {
pSchema = pSW->pSchema + i;
tdAddColToSchema(&sb, pSchema->type, pSchema->flags, pSchema->colId, pSchema->bytes);
}
pTSchema = tdGetSchemaFromBuilder(&sb);
tdDestroyTSchemaBuilder(&sb);
pTSchema = tBuildTSchema(pSW->pSchema, pSW->nCols, pSW->version);
taosMemoryFree(pSW->pSchema);
taosMemoryFree(pSW);
@ -662,7 +652,13 @@ int32_t metaGetTbTSchemaEx(SMeta *pMeta, tb_uid_t suid, tb_uid_t uid, int32_t sv
goto _exit;
}
ASSERT(c);
if (c == 0) {
metaULock(pMeta);
tdbTbcClose(pSkmDbC);
code = TSDB_CODE_FAILED;
metaError("meta/query: incorrect c: %" PRId32 ".", c);
goto _exit;
}
if (c < 0) {
tdbTbcMoveToPrev(pSkmDbC);
@ -686,7 +682,11 @@ int32_t metaGetTbTSchemaEx(SMeta *pMeta, tb_uid_t suid, tb_uid_t uid, int32_t sv
}
}
ASSERT(sver > 0);
if (sver <= 0) {
metaError("meta/query: incorrect sver: %" PRId32 ".", sver);
code = TSDB_CODE_FAILED;
goto _exit;
}
skmDbKey.uid = suid ? suid : uid;
skmDbKey.sver = sver;
@ -709,21 +709,11 @@ int32_t metaGetTbTSchemaEx(SMeta *pMeta, tb_uid_t suid, tb_uid_t uid, int32_t sv
tdbFree(pData);
// convert
STSchemaBuilder sb = {0};
tdInitTSchemaBuilder(&sb, pSchemaWrapper->version);
for (int i = 0; i < pSchemaWrapper->nCols; i++) {
SSchema *pSchema = pSchemaWrapper->pSchema + i;
tdAddColToSchema(&sb, pSchema->type, pSchema->flags, pSchema->colId, pSchema->bytes);
}
STSchema *pTSchema = tdGetSchemaFromBuilder(&sb);
STSchema *pTSchema = tBuildTSchema(pSchemaWrapper->pSchema, pSchemaWrapper->nCols, pSchemaWrapper->version);
if (pTSchema == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
}
tdDestroyTSchemaBuilder(&sb);
*ppTSchema = pTSchema;
taosMemoryFree(pSchemaWrapper->pSchema);
@ -756,9 +746,7 @@ int64_t metaGetTimeSeriesNum(SMeta *pMeta) {
return pMeta->pVnode->config.vndStats.numOfTimeSeries + pMeta->pVnode->config.vndStats.numOfNTimeSeries;
}
int64_t metaGetNtbNum(SMeta *pMeta) {
return pMeta->pVnode->config.vndStats.numOfNTables;
}
int64_t metaGetNtbNum(SMeta *pMeta) { return pMeta->pVnode->config.vndStats.numOfNTables; }
typedef struct {
SMeta *pMeta;

View File

@ -100,7 +100,10 @@ int32_t metaSnapRead(SMetaSnapReader* pReader, uint8_t** ppData) {
break;
}
ASSERT(pData && nData);
if (!pData || !nData) {
metaError("meta/snap: invalide nData: %" PRId32 " meta snap read failed.", nData);
goto _exit;
}
*ppData = taosMemoryMalloc(sizeof(SSnapDataHdr) + nData);
if (*ppData == NULL) {
@ -358,7 +361,11 @@ int32_t buildSnapContext(SMeta* pMeta, int64_t snapVersion, int64_t suid, int8_t
for (int i = 0; i < taosArrayGetSize(ctx->idList); i++) {
int64_t* uid = taosArrayGet(ctx->idList, i);
SIdInfo* idData = (SIdInfo*)taosHashGet(ctx->idVersion, uid, sizeof(int64_t));
ASSERT(idData);
if (!idData) {
metaError("meta/snap: null idData");
return TSDB_CODE_FAILED;
}
idData->index = i;
metaDebug("tmqsnap init idVersion uid:%" PRIi64 " version:%" PRIi64 " index:%d", *uid, idData->version,
idData->index);
@ -475,7 +482,10 @@ int32_t getMetafromSnapShot(SSnapContext* ctx, void** pBuf, int32_t* contLen, in
int64_t* uidTmp = taosArrayGet(ctx->idList, ctx->index);
ctx->index++;
SIdInfo* idInfo = (SIdInfo*)taosHashGet(ctx->idVersion, uidTmp, sizeof(tb_uid_t));
ASSERT(idInfo);
if (!idInfo) {
metaError("meta/snap: null idInfo");
return TSDB_CODE_FAILED;
}
*uid = *uidTmp;
ret = MoveToPosition(ctx, idInfo->version, *uidTmp);
@ -509,7 +519,11 @@ int32_t getMetafromSnapShot(SSnapContext* ctx, void** pBuf, int32_t* contLen, in
(ctx->subType == TOPIC_SUB_TYPE__TABLE && me.type == TSDB_CHILD_TABLE && me.ctbEntry.suid == ctx->suid)) {
STableInfoForChildTable* data =
(STableInfoForChildTable*)taosHashGet(ctx->suidInfo, &me.ctbEntry.suid, sizeof(tb_uid_t));
ASSERT(data);
if (!data) {
metaError("meta/snap: null data");
return TSDB_CODE_FAILED;
}
SVCreateTbReq req = {0};
req.type = TSDB_CHILD_TABLE;
@ -530,7 +544,8 @@ int32_t getMetafromSnapShot(SSnapContext* ctx, void** pBuf, int32_t* contLen, in
} else {
SArray* pTagVals = NULL;
if (tTagToValArray((const STag*)p, &pTagVals) != 0) {
ASSERT(0);
metaError("meta/snap: tag to val array failed.");
return TSDB_CODE_FAILED;
}
int16_t nCols = taosArrayGetSize(pTagVals);
for (int j = 0; j < nCols; ++j) {
@ -574,7 +589,8 @@ int32_t getMetafromSnapShot(SSnapContext* ctx, void** pBuf, int32_t* contLen, in
ret = buildNormalChildTableInfo(&req, pBuf, contLen);
*type = TDMT_VND_CREATE_TABLE;
} else {
ASSERT(0);
metaError("meta/snap: invalid topic sub type: %" PRId8 " get meta from snap failed.", ctx->subType);
ret = -1;
}
tDecoderClear(&dc);
@ -595,7 +611,10 @@ SMetaTableInfo getUidfromSnapShot(SSnapContext* ctx) {
int64_t* uidTmp = taosArrayGet(ctx->idList, ctx->index);
ctx->index++;
SIdInfo* idInfo = (SIdInfo*)taosHashGet(ctx->idVersion, uidTmp, sizeof(tb_uid_t));
ASSERT(idInfo);
if (!idInfo) {
metaError("meta/snap: null idInfo");
return result;
}
int32_t ret = MoveToPosition(ctx, idInfo->version, *uidTmp);
if (ret != 0) {

View File

@ -46,7 +46,7 @@ static void metaGetEntryInfo(const SMetaEntry *pEntry, SMetaInfo *pInfo) {
pInfo->suid = 0;
pInfo->skmVer = pEntry->ntbEntry.schemaRow.version;
} else {
ASSERT(0);
metaError("meta/table: invalide table type: %" PRId8 " get entry info failed.", pEntry->type);
}
}
@ -342,10 +342,22 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
tdbTbcOpen(pMeta->pTbDb, &pTbDbc, NULL);
ret = tdbTbcMoveTo(pTbDbc, &((STbDbKey){.uid = pReq->suid, .version = oversion}), sizeof(STbDbKey), &c);
ASSERT(ret == 0 && c == 0);
if (!(ret == 0 && c == 0)) {
tdbTbcClose(pUidIdxc);
tdbTbcClose(pTbDbc);
terrno = TSDB_CODE_TDB_STB_NOT_EXIST;
metaError("meta/table: invalide ret: %" PRId32 " or c: %" PRId32 "alter stb failed.", ret, c);
return -1;
}
ret = tdbTbcGet(pTbDbc, NULL, NULL, &pData, &nData);
ASSERT(ret == 0);
if (ret < 0) {
tdbTbcClose(pTbDbc);
terrno = TSDB_CODE_TDB_STB_NOT_EXIST;
return -1;
}
oStbEntry.pBuf = taosMemoryMalloc(nData);
memcpy(oStbEntry.pBuf, pData, nData);
@ -558,7 +570,8 @@ static void metaBuildTtlIdxKey(STtlIdxKey *ttlKey, const SMetaEntry *pME) {
ctime = pME->ntbEntry.ctime;
ttlDays = pME->ntbEntry.ttlDays;
} else {
ASSERT(0);
metaError("meta/table: invalide table type: %" PRId8 " build ttl idx key failed.", pME->type);
return;
}
if (ttlDays <= 0) return;
@ -773,7 +786,11 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
tdbTbcOpen(pMeta->pUidIdx, &pUidIdxc, NULL);
tdbTbcMoveTo(pUidIdxc, &uid, sizeof(uid), &c);
ASSERT(c == 0);
if (c != 0) {
tdbTbcClose(pUidIdxc);
metaError("meta/table: invalide c: %" PRId32 " alt tb column failed.", c);
return -1;
}
tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData);
oversion = ((SUidIdxVal *)pData)[0].version;
@ -783,7 +800,13 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
tdbTbcOpen(pMeta->pTbDb, &pTbDbc, NULL);
tdbTbcMoveTo(pTbDbc, &((STbDbKey){.uid = uid, .version = oversion}), sizeof(STbDbKey), &c);
ASSERT(c == 0);
if (c != 0) {
tdbTbcClose(pUidIdxc);
tdbTbcClose(pTbDbc);
metaError("meta/table: invalide c: %" PRId32 " alt tb column failed.", c);
return -1;
}
tdbTbcGet(pTbDbc, NULL, NULL, &pData, &nData);
// get table entry
@ -792,7 +815,13 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
memcpy(entry.pBuf, pData, nData);
tDecoderInit(&dc, entry.pBuf, nData);
ret = metaDecodeEntry(&dc, &entry);
ASSERT(ret == 0);
if (ret != 0) {
tdbTbcClose(pUidIdxc);
tdbTbcClose(pTbDbc);
tDecoderClear(&dc);
metaError("meta/table: invalide ret: %" PRId32 " alt tb column failed.", ret);
return -1;
}
if (entry.type != TSDB_NORMAL_TABLE) {
terrno = TSDB_CODE_VND_INVALID_TABLE_ACTION;
@ -812,7 +841,11 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
if (iCol >= pSchema->nCols) break;
pColumn = &pSchema->pSchema[iCol];
ASSERT(pAlterTbReq->colName);
if (NULL == pAlterTbReq->colName) {
metaError("meta/table: null pAlterTbReq->colName");
return -1;
}
if (strcmp(pColumn->name, pAlterTbReq->colName) == 0) break;
iCol++;
}
@ -964,7 +997,12 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
tdbTbcOpen(pMeta->pUidIdx, &pUidIdxc, NULL);
tdbTbcMoveTo(pUidIdxc, &uid, sizeof(uid), &c);
ASSERT(c == 0);
if (c != 0) {
tdbTbcClose(pUidIdxc);
terrno = TSDB_CODE_TDB_TABLE_NOT_EXIST;
metaError("meta/table: invalide c: %" PRId32 " update tb tag val failed.", c);
return -1;
}
tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData);
oversion = ((SUidIdxVal *)pData)[0].version;
@ -977,7 +1015,14 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
/* get ctbEntry */
tdbTbcOpen(pMeta->pTbDb, &pTbDbc, NULL);
tdbTbcMoveTo(pTbDbc, &((STbDbKey){.uid = uid, .version = oversion}), sizeof(STbDbKey), &c);
ASSERT(c == 0);
if (c != 0) {
tdbTbcClose(pUidIdxc);
tdbTbcClose(pTbDbc);
terrno = TSDB_CODE_TDB_TABLE_NOT_EXIST;
metaError("meta/table: invalide c: %" PRId32 " update tb tag val failed.", c);
return -1;
}
tdbTbcGet(pTbDbc, NULL, NULL, &pData, &nData);
ctbEntry.pBuf = taosMemoryMalloc(nData);
@ -1075,7 +1120,11 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
metaUpdateTagIdx(pMeta, &ctbEntry);
}
ASSERT(ctbEntry.ctbEntry.pTags);
if (NULL == ctbEntry.ctbEntry.pTags) {
metaError("meta/table: null tags, update tag val failed.");
goto _err;
}
SCtbIdxKey ctbIdxKey = {.suid = ctbEntry.ctbEntry.suid, .uid = uid};
tdbTbUpsert(pMeta->pCtbIdx, &ctbIdxKey, sizeof(ctbIdxKey), ctbEntry.ctbEntry.pTags,
((STag *)(ctbEntry.ctbEntry.pTags))->len, pMeta->txn);
@ -1130,7 +1179,11 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p
tdbTbcOpen(pMeta->pUidIdx, &pUidIdxc, NULL);
tdbTbcMoveTo(pUidIdxc, &uid, sizeof(uid), &c);
ASSERT(c == 0);
if (c != 0) {
tdbTbcClose(pUidIdxc);
metaError("meta/table: invalide c: %" PRId32 " update tb options failed.", c);
return -1;
}
tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData);
oversion = ((SUidIdxVal *)pData)[0].version;
@ -1140,7 +1193,13 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p
tdbTbcOpen(pMeta->pTbDb, &pTbDbc, NULL);
tdbTbcMoveTo(pTbDbc, &((STbDbKey){.uid = uid, .version = oversion}), sizeof(STbDbKey), &c);
ASSERT(c == 0);
if (c != 0) {
tdbTbcClose(pUidIdxc);
tdbTbcClose(pTbDbc);
metaError("meta/table: invalide c: %" PRId32 " update tb options failed.", c);
return -1;
}
tdbTbcGet(pTbDbc, NULL, NULL, &pData, &nData);
// get table entry
@ -1149,7 +1208,11 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p
memcpy(entry.pBuf, pData, nData);
tDecoderInit(&dc, entry.pBuf, nData);
ret = metaDecodeEntry(&dc, &entry);
ASSERT(ret == 0);
if (ret != 0) {
tDecoderClear(&dc);
metaError("meta/table: invalide ret: %" PRId32 " alt tb options failed.", ret);
return -1;
}
entry.version = version;
metaWLock(pMeta);
@ -1408,7 +1471,8 @@ static int metaSaveToSkmDb(SMeta *pMeta, const SMetaEntry *pME) {
} else if (pME->type == TSDB_NORMAL_TABLE) {
pSW = &pME->ntbEntry.schemaRow;
} else {
ASSERT(0);
metaError("meta/table: invalide table type: %" PRId8 " save skm db failed.", pME->type);
return TSDB_CODE_FAILED;
}
skmDbKey.uid = pME->uid;

View File

@ -17,42 +17,11 @@
extern SSmaMgmt smaMgmt;
#if 0
static int32_t tdProcessRSmaSyncPreCommitImpl(SSma *pSma);
static int32_t tdProcessRSmaSyncCommitImpl(SSma *pSma);
static int32_t tdProcessRSmaSyncPostCommitImpl(SSma *pSma);
#endif
static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma);
static int32_t tdProcessRSmaAsyncCommitImpl(SSma *pSma, SCommitInfo *pInfo);
static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma);
static int32_t tdUpdateQTaskInfoFiles(SSma *pSma, SRSmaStat *pRSmaStat);
#if 0
/**
* @brief Only applicable to Rollup SMA
*
* @param pSma
* @return int32_t
*/
int32_t smaSyncPreCommit(SSma *pSma) { return tdProcessRSmaSyncPreCommitImpl(pSma); }
/**
* @brief Only applicable to Rollup SMA
*
* @param pSma
* @return int32_t
*/
int32_t smaSyncCommit(SSma *pSma) { return tdProcessRSmaSyncCommitImpl(pSma); }
/**
* @brief Only applicable to Rollup SMA
*
* @param pSma
* @return int32_t
*/
int32_t smaSyncPostCommit(SSma *pSma) { return tdProcessRSmaSyncPostCommitImpl(pSma); }
#endif
/**
* @brief async commit, only applicable to Rollup SMA
*
@ -128,167 +97,25 @@ _exit:
int32_t smaFinishCommit(SSma *pSma) {
int32_t code = 0;
int32_t lino = 0;
SVnode *pVnode = pSma->pVnode;
code = tdRSmaFSFinishCommit(pSma);
TSDB_CHECK_CODE(code, lino, _exit);
if (VND_RSMA1(pVnode) && (code = tsdbFinishCommit(VND_RSMA1(pVnode))) < 0) {
smaError("vgId:%d, failed to finish commit tsdb rsma1 since %s", TD_VID(pVnode), tstrerror(code));
goto _exit;
TSDB_CHECK_CODE(code, lino, _exit);
}
if (VND_RSMA2(pVnode) && (code = tsdbFinishCommit(VND_RSMA2(pVnode))) < 0) {
smaError("vgId:%d, failed to finish commit tsdb rsma2 since %s", TD_VID(pVnode), tstrerror(code));
goto _exit;
TSDB_CHECK_CODE(code, lino, _exit);
}
_exit:
terrno = code;
if (code) {
smaError("vgId:%d, %s failed at line %d since %s", TD_VID(pVnode), __func__, lino, tstrerror(code));
}
return code;
}
#if 0
/**
* @brief pre-commit for rollup sma(sync commit).
* 1) set trigger stat of rsma timer TASK_TRIGGER_STAT_PAUSED.
* 2) wait for all triggered fetch tasks to finish
* 3) perform persist task for qTaskInfo
*
* @param pSma
* @return int32_t
*/
static int32_t tdProcessRSmaSyncPreCommitImpl(SSma *pSma) {
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
if (!pSmaEnv) {
return TSDB_CODE_SUCCESS;
}
SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv);
SRSmaStat *pRSmaStat = SMA_STAT_RSMA(pStat);
// step 1: set rsma stat paused
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED);
// step 2: wait for all triggered fetch tasks to finish
int32_t nLoops = 0;
while (1) {
if (T_REF_VAL_GET(pStat) == 0) {
smaDebug("vgId:%d, rsma fetch tasks are all finished", SMA_VID(pSma));
break;
} else {
smaDebug("vgId:%d, rsma fetch tasks are not all finished yet", SMA_VID(pSma));
}
++nLoops;
if (nLoops > 1000) {
sched_yield();
nLoops = 0;
}
}
// step 3: perform persist task for qTaskInfo
pRSmaStat->commitAppliedVer = pSma->pVnode->state.applied;
tdRSmaPersistExecImpl(pRSmaStat, RSMA_INFO_HASH(pRSmaStat));
smaDebug("vgId:%d, rsma pre commit success", SMA_VID(pSma));
return TSDB_CODE_SUCCESS;
}
/**
* @brief commit for rollup sma
*
* @param pSma
* @return int32_t
*/
static int32_t tdProcessRSmaSyncCommitImpl(SSma *pSma) {
#if 0
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
if (!pSmaEnv) {
return TSDB_CODE_SUCCESS;
}
#endif
return TSDB_CODE_SUCCESS;
}
#endif
// SQTaskFile ======================================================
/**
* @brief At most time, there is only one qtaskinfo file committed latest in aTaskFile. Sometimes, there would be
* multiple qtaskinfo files supporting snapshot replication.
*
* @param pSma
* @param pStat
* @return int32_t
*/
static int32_t tdUpdateQTaskInfoFiles(SSma *pSma, SRSmaStat *pStat) {
SVnode *pVnode = pSma->pVnode;
SRSmaFS *pFS = RSMA_FS(pStat);
int64_t committed = pStat->commitAppliedVer;
int64_t fsMaxVer = -1;
char qTaskInfoFullName[TSDB_FILENAME_LEN];
taosWLockLatch(RSMA_FS_LOCK(pStat));
for (int32_t i = 0; i < taosArrayGetSize(pFS->aQTaskInf);) {
SQTaskFile *pTaskF = taosArrayGet(pFS->aQTaskInf, i);
int32_t oldVal = atomic_fetch_sub_32(&pTaskF->nRef, 1);
if ((oldVal <= 1) && (pTaskF->version < committed)) {
tdRSmaQTaskInfoGetFullName(TD_VID(pVnode), pTaskF->version, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFullName);
if (taosRemoveFile(qTaskInfoFullName) < 0) {
smaWarn("vgId:%d, cleanup qinf, committed %" PRIi64 ", failed to remove %s since %s", TD_VID(pVnode), committed,
qTaskInfoFullName, tstrerror(TAOS_SYSTEM_ERROR(errno)));
} else {
smaDebug("vgId:%d, cleanup qinf, committed %" PRIi64 ", success to remove %s", TD_VID(pVnode), committed,
qTaskInfoFullName);
}
taosArrayRemove(pFS->aQTaskInf, i);
continue;
}
++i;
}
if (taosArrayGetSize(pFS->aQTaskInf) > 0) {
fsMaxVer = ((SQTaskFile *)taosArrayGetLast(pFS->aQTaskInf))->version;
}
if (fsMaxVer < committed) {
tdRSmaQTaskInfoGetFullName(TD_VID(pVnode), committed, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFullName);
if (taosCheckExistFile(qTaskInfoFullName)) {
SQTaskFile qFile = {.nRef = 1, .padding = 0, .version = committed, .size = 0};
if (!taosArrayPush(pFS->aQTaskInf, &qFile)) {
taosWUnLockLatch(RSMA_FS_LOCK(pStat));
terrno = TSDB_CODE_OUT_OF_MEMORY;
return TSDB_CODE_FAILED;
}
}
} else {
smaDebug("vgId:%d, update qinf, no need as committed %" PRIi64 " not larger than fsMaxVer %" PRIi64, TD_VID(pVnode),
committed, fsMaxVer);
}
taosWUnLockLatch(RSMA_FS_LOCK(pStat));
return TSDB_CODE_SUCCESS;
}
#if 0
/**
* @brief post-commit for rollup sma
* 1) clean up the outdated qtaskinfo files
*
* @param pSma
* @return int32_t
*/
static int32_t tdProcessRSmaSyncPostCommitImpl(SSma *pSma) {
SVnode *pVnode = pSma->pVnode;
if (!VND_IS_RSMA(pVnode)) {
return TSDB_CODE_SUCCESS;
}
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma);
tdUpdateQTaskInfoFiles(pSma, pRSmaStat);
return TSDB_CODE_SUCCESS;
}
#endif
/**
* @brief Rsma async commit implementation(only do some necessary light weighted task)
* 1) set rsma stat TASK_TRIGGER_STAT_PAUSED
@ -298,9 +125,12 @@ static int32_t tdProcessRSmaSyncPostCommitImpl(SSma *pSma) {
* @return int32_t
*/
static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
int32_t code = 0;
int32_t lino = 0;
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
if (!pEnv) {
return TSDB_CODE_SUCCESS;
return code;
}
SSmaStat *pStat = SMA_ENV_STAT(pEnv);
@ -317,7 +147,11 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
}
}
pRSmaStat->commitAppliedVer = pSma->pVnode->state.applied;
ASSERT(pRSmaStat->commitAppliedVer > 0);
if (ASSERTS(pRSmaStat->commitAppliedVer >= 0, "commit applied version %" PRIi64 " < 0",
pRSmaStat->commitAppliedVer)) {
code = TSDB_CODE_APP_ERROR;
TSDB_CHECK_CODE(code, lino, _exit);
}
// step 2: wait for all triggered fetch tasks to finish
nLoops = 0;
@ -351,9 +185,9 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
}
}
smaInfo("vgId:%d, rsma commit, all items are consumed, TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId());
if (tdRSmaPersistExecImpl(pRSmaStat, RSMA_INFO_HASH(pRSmaStat)) < 0) {
return TSDB_CODE_FAILED;
}
code = tdRSmaPersistExecImpl(pRSmaStat, RSMA_INFO_HASH(pRSmaStat));
TSDB_CHECK_CODE(code, lino, _exit);
smaInfo("vgId:%d, rsma commit, operator state committed, TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId());
#if 0 // consuming task of qTaskInfo clone
@ -361,8 +195,6 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
// lock
taosWLockLatch(SMA_ENV_LOCK(pEnv));
ASSERT(RSMA_INFO_HASH(pRSmaStat));
void *pIter = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), NULL);
while (pIter) {
@ -380,10 +212,20 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
// all rsma results are written completely
STsdb *pTsdb = NULL;
if ((pTsdb = VND_RSMA1(pSma->pVnode))) tsdbPrepareCommit(pTsdb);
if ((pTsdb = VND_RSMA2(pSma->pVnode))) tsdbPrepareCommit(pTsdb);
if ((pTsdb = VND_RSMA1(pSma->pVnode))) {
code = tsdbPrepareCommit(pTsdb);
TSDB_CHECK_CODE(code, lino, _exit);
}
if ((pTsdb = VND_RSMA2(pSma->pVnode))) {
code = tsdbPrepareCommit(pTsdb);
TSDB_CHECK_CODE(code, lino, _exit);
}
return TSDB_CODE_SUCCESS;
_exit:
if (code) {
smaError("vgId:%d, %s failed at line %d since %s", SMA_VID(pSma), __func__, lino, tstrerror(code));
}
return code;
}
/**
@ -394,26 +236,22 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
*/
static int32_t tdProcessRSmaAsyncCommitImpl(SSma *pSma, SCommitInfo *pInfo) {
int32_t code = 0;
int32_t lino = 0;
SVnode *pVnode = pSma->pVnode;
#if 0
SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pSmaEnv);
// perform persist task for qTaskInfo operator
if (tdRSmaPersistExecImpl(pRSmaStat, RSMA_INFO_HASH(pRSmaStat)) < 0) {
return TSDB_CODE_FAILED;
}
#endif
code = tdRSmaFSCommit(pSma);
TSDB_CHECK_CODE(code, lino, _exit);
code = tsdbCommit(VND_RSMA1(pVnode), pInfo);
TSDB_CHECK_CODE(code, lino, _exit);
code = tsdbCommit(VND_RSMA2(pVnode), pInfo);
TSDB_CHECK_CODE(code, lino, _exit);
if ((code = tsdbCommit(VND_RSMA1(pVnode), pInfo)) < 0) {
smaError("vgId:%d, failed to commit tsdb rsma1 since %s", TD_VID(pVnode), tstrerror(code));
goto _exit;
}
if ((code = tsdbCommit(VND_RSMA2(pVnode), pInfo)) < 0) {
smaError("vgId:%d, failed to commit tsdb rsma2 since %s", TD_VID(pVnode), tstrerror(code));
goto _exit;
}
_exit:
terrno = code;
if (code) {
smaError("vgId:%d, %s failed at line %d since %s", TD_VID(pVnode), __func__, lino, tstrerror(code));
}
return code;
}
@ -473,8 +311,6 @@ static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma) {
taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
}
tdUpdateQTaskInfoFiles(pSma, pRSmaStat);
atomic_store_8(RSMA_COMMIT_STAT(pRSmaStat), 0);
return TSDB_CODE_SUCCESS;

View File

@ -131,7 +131,7 @@ static int32_t tdNewSmaEnv(SSma *pSma, int8_t smaType, SSmaEnv **ppEnv) {
(smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_store_ptr(&SMA_TSMA_ENV(pSma), *ppEnv)
: atomic_store_ptr(&SMA_RSMA_ENV(pSma), *ppEnv);
if (tdInitSmaStat(&SMA_ENV_STAT(pEnv), smaType, pSma) != TSDB_CODE_SUCCESS) {
if ((terrno = tdInitSmaStat(&SMA_ENV_STAT(pEnv), smaType, pSma)) != TSDB_CODE_SUCCESS) {
tdFreeSmaEnv(pEnv);
*ppEnv = NULL;
(smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_store_ptr(&SMA_TSMA_ENV(pSma), NULL)
@ -193,10 +193,16 @@ static void tRSmaInfoHashFreeNode(void *data) {
}
static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pSma) {
ASSERT(pSmaStat != NULL);
int32_t code = 0;
int32_t lino = 0;
if (ASSERTS(pSmaStat != NULL, "pSmaStat is NULL")) {
terrno = TSDB_CODE_RSMA_INVALID_ENV;
TSDB_CHECK_CODE(code, lino, _exit);
}
if (*pSmaStat) { // no lock
return TSDB_CODE_SUCCESS;
return code; // success, return directly
}
/**
@ -207,8 +213,8 @@ static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pS
if (!(*pSmaStat)) {
*pSmaStat = (SSmaStat *)taosMemoryCalloc(1, sizeof(SSmaStat) + sizeof(TdThread) * tsNumOfVnodeRsmaThreads);
if (!(*pSmaStat)) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return TSDB_CODE_FAILED;
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
if (smaType == TSDB_SMA_TYPE_ROLLUP) {
@ -224,7 +230,8 @@ static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pS
if (refId < 0) {
smaError("vgId:%d, taosAddRef refId:%" PRIi64 " to rsetId rsetId:%d max:%d failed since:%s", SMA_VID(pSma),
refId, smaMgmt.rsetId, SMA_MGMT_REF_NUM, tstrerror(terrno));
return TSDB_CODE_FAILED;
code = terrno;
TSDB_CHECK_CODE(code, lino, _exit);
} else {
smaDebug("vgId:%d, taosAddRef refId:%" PRIi64 " to rsetId rsetId:%d max:%d succeed", SMA_VID(pSma), refId,
smaMgmt.rsetId, SMA_MGMT_REF_NUM);
@ -235,25 +242,30 @@ static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pS
RSMA_INFO_HASH(pRSmaStat) = taosHashInit(
RSMA_TASK_INFO_HASH_SLOT, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK);
if (!RSMA_INFO_HASH(pRSmaStat)) {
return TSDB_CODE_FAILED;
code = terrno;
TSDB_CHECK_CODE(code, lino, _exit);
}
taosHashSetFreeFp(RSMA_INFO_HASH(pRSmaStat), tRSmaInfoHashFreeNode);
if (tdRsmaStartExecutor(pSma) < 0) {
return TSDB_CODE_FAILED;
code = terrno;
TSDB_CHECK_CODE(code, lino, _exit);
}
if (!(RSMA_FS(pRSmaStat)->aQTaskInf = taosArrayInit(1, sizeof(SQTaskFile)))) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return TSDB_CODE_FAILED;
}
taosInitRWLatch(RSMA_FS_LOCK(pRSmaStat));
} else if (smaType == TSDB_SMA_TYPE_TIME_RANGE) {
// TODO
} else {
ASSERT(0);
ASSERTS(0, "unknown smaType:%" PRIi8, smaType);
code = TSDB_CODE_APP_ERROR;
TSDB_CHECK_CODE(code, lino, _exit);
}
}
return TSDB_CODE_SUCCESS;
_exit:
if (code) {
smaError("vgId:%d, %s failed at line %d since %s", SMA_VID(pSma), __func__, lino, tstrerror(code));
}
return code;
}
static void tdDestroyTSmaStat(STSmaStat *pStat) {
@ -342,7 +354,10 @@ static int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType) {
smaDebug("vgId:%d, remove refId:%" PRIi64 " from rsmaRef:%" PRIi32 " succeed", vid, refId, smaMgmt.rsetId);
}
} else {
ASSERT(0);
ASSERTS(0, "unknown smaType:%" PRIi8, smaType);
terrno = TSDB_CODE_APP_ERROR;
smaError("%s failed at line %d since %s", __func__, __LINE__, terrstr());
return -1;
}
}
return 0;
@ -351,7 +366,7 @@ static int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType) {
int32_t tdLockSma(SSma *pSma) {
int code = taosThreadMutexLock(&pSma->mutex);
if (code != 0) {
smaError("vgId:%d, failed to lock td since %s", SMA_VID(pSma), strerror(errno));
smaError("vgId:%d, failed to lock since %s", SMA_VID(pSma), strerror(errno));
terrno = TAOS_SYSTEM_ERROR(code);
return -1;
}
@ -360,12 +375,17 @@ int32_t tdLockSma(SSma *pSma) {
}
int32_t tdUnLockSma(SSma *pSma) {
ASSERT(SMA_LOCKED(pSma));
if (ASSERTS(SMA_LOCKED(pSma), "pSma %p is not locked:%d", pSma, pSma->locked)) {
terrno = TSDB_CODE_APP_ERROR;
smaError("vgId:%d, failed to unlock since %s", SMA_VID(pSma), tstrerror(terrno));
return -1;
}
pSma->locked = false;
int code = taosThreadMutexUnlock(&pSma->mutex);
if (code != 0) {
smaError("vgId:%d, failed to unlock td since %s", SMA_VID(pSma), strerror(errno));
terrno = TAOS_SYSTEM_ERROR(code);
smaError("vgId:%d, failed to unlock since %s", SMA_VID(pSma), strerror(errno));
return -1;
}
return 0;

View File

@ -17,250 +17,636 @@
// =================================================================================================
static int32_t tdFetchQTaskInfoFiles(SSma *pSma, int64_t version, SArray **output);
// static int32_t tdFetchQTaskInfoFiles(SSma *pSma, int64_t version, SArray **output);
static int32_t tdQTaskInfCmprFn1(const void *p1, const void *p2);
static int32_t tdQTaskInfCmprFn2(const void *p1, const void *p2);
/**
* @brief Open RSma FS from qTaskInfo files
*
* @param pSma
* @param version
* @return int32_t
*/
int32_t tdRSmaFSOpen(SSma *pSma, int64_t version) {
SVnode *pVnode = pSma->pVnode;
int64_t commitID = pVnode->state.commitID;
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
SRSmaStat *pStat = NULL;
SArray *output = NULL;
terrno = TSDB_CODE_SUCCESS;
static FORCE_INLINE int32_t tPutQTaskF(uint8_t *p, SQTaskFile *pFile) {
int32_t n = 0;
if (!pEnv) {
return TSDB_CODE_SUCCESS;
}
n += tPutI8(p ? p + n : p, pFile->level);
n += tPutI64v(p ? p + n : p, pFile->size);
n += tPutI64v(p ? p + n : p, pFile->suid);
n += tPutI64v(p ? p + n : p, pFile->version);
n += tPutI64v(p ? p + n : p, pFile->mtime);
if (tdFetchQTaskInfoFiles(pSma, version, &output) < 0) {
goto _end;
}
pStat = (SRSmaStat *)SMA_ENV_STAT(pEnv);
for (int32_t i = 0; i < taosArrayGetSize(output); ++i) {
int32_t vid = 0;
int64_t version = -1;
sscanf((const char *)taosArrayGetP(output, i), "v%dqinf.v%" PRIi64, &vid, &version);
SQTaskFile qTaskFile = {.version = version, .nRef = 1};
if ((terrno = tdRSmaFSUpsertQTaskFile(RSMA_FS(pStat), &qTaskFile)) < 0) {
goto _end;
}
smaInfo("vgId:%d, open fs, version:%" PRIi64 ", ref:%d", TD_VID(pVnode), qTaskFile.version, qTaskFile.nRef);
}
_end:
for (int32_t i = 0; i < taosArrayGetSize(output); ++i) {
void *ptr = taosArrayGetP(output, i);
taosMemoryFreeClear(ptr);
}
taosArrayDestroy(output);
if (terrno != TSDB_CODE_SUCCESS) {
smaError("vgId:%d, open rsma fs failed since %s", TD_VID(pVnode), terrstr());
return TSDB_CODE_FAILED;
}
return TSDB_CODE_SUCCESS;
return n;
}
void tdRSmaFSClose(SRSmaFS *fs) { taosArrayDestroy(fs->aQTaskInf); }
static int32_t tdRSmaFSToBinary(uint8_t *p, SRSmaFS *pFS) {
int32_t n = 0;
uint32_t size = taosArrayGetSize(pFS->aQTaskInf);
static int32_t tdQTaskInfCmprFn1(const void *p1, const void *p2) {
if (*(int64_t *)p1 < ((SQTaskFile *)p2)->version) {
return -1;
} else if (*(int64_t *)p1 > ((SQTaskFile *)p2)->version) {
return 1;
// version
n += tPutI8(p ? p + n : p, 0);
// SArray<SQTaskFile>
n += tPutU32v(p ? p + n : p, size);
for (uint32_t i = 0; i < size; ++i) {
n += tPutQTaskF(p ? p + n : p, taosArrayGet(pFS->aQTaskInf, i));
}
return 0;
return n;
}
int32_t tdRSmaFSRef(SSma *pSma, SRSmaStat *pStat, int64_t version) {
SArray *aQTaskInf = RSMA_FS(pStat)->aQTaskInf;
SQTaskFile *pTaskF = NULL;
int32_t oldVal = 0;
int32_t tdRSmaGetQTaskF(uint8_t *p, SQTaskFile *pFile) {
int32_t n = 0;
taosRLockLatch(RSMA_FS_LOCK(pStat));
if ((pTaskF = taosArraySearch(aQTaskInf, &version, tdQTaskInfCmprFn1, TD_EQ))) {
oldVal = atomic_fetch_add_32(&pTaskF->nRef, 1);
ASSERT(oldVal > 0);
}
taosRUnLockLatch(RSMA_FS_LOCK(pStat));
return oldVal;
n += tGetI8(p + n, &pFile->level);
n += tGetI64v(p + n, &pFile->size);
n += tGetI64v(p + n, &pFile->suid);
n += tGetI64v(p + n, &pFile->version);
n += tGetI64v(p + n, &pFile->mtime);
return n;
}
int64_t tdRSmaFSMaxVer(SSma *pSma, SRSmaStat *pStat) {
SArray *aQTaskInf = RSMA_FS(pStat)->aQTaskInf;
int64_t version = -1;
taosRLockLatch(RSMA_FS_LOCK(pStat));
if (taosArrayGetSize(aQTaskInf) > 0) {
version = ((SQTaskFile *)taosArrayGetLast(aQTaskInf))->version;
}
taosRUnLockLatch(RSMA_FS_LOCK(pStat));
return version;
}
void tdRSmaFSUnRef(SSma *pSma, SRSmaStat *pStat, int64_t version) {
SVnode *pVnode = pSma->pVnode;
SArray *aQTaskInf = RSMA_FS(pStat)->aQTaskInf;
char qTaskFullName[TSDB_FILENAME_LEN];
SQTaskFile *pTaskF = NULL;
int32_t idx = -1;
taosWLockLatch(RSMA_FS_LOCK(pStat));
if ((idx = taosArraySearchIdx(aQTaskInf, &version, tdQTaskInfCmprFn1, TD_EQ)) >= 0) {
ASSERT(idx < taosArrayGetSize(aQTaskInf));
pTaskF = taosArrayGet(aQTaskInf, idx);
if (atomic_sub_fetch_32(&pTaskF->nRef, 1) <= 0) {
tdRSmaQTaskInfoGetFullName(TD_VID(pVnode), pTaskF->version, tfsGetPrimaryPath(pVnode->pTfs), qTaskFullName);
if (taosRemoveFile(qTaskFullName) < 0) {
smaWarn("vgId:%d, failed to remove %s since %s", TD_VID(pVnode), qTaskFullName,
tstrerror(TAOS_SYSTEM_ERROR(errno)));
} else {
smaDebug("vgId:%d, success to remove %s", TD_VID(pVnode), qTaskFullName);
}
taosArrayRemove(aQTaskInf, idx);
}
}
taosWUnLockLatch(RSMA_FS_LOCK(pStat));
}
/**
* @brief Fetch qtaskfiles LE than version
*
* @param pSma
* @param version
* @param output
* @return int32_t
*/
static int32_t tdFetchQTaskInfoFiles(SSma *pSma, int64_t version, SArray **output) {
SVnode *pVnode = pSma->pVnode;
TdDirPtr pDir = NULL;
TdDirEntryPtr pDirEntry = NULL;
char dir[TSDB_FILENAME_LEN];
const char *pattern = "v[0-9]+qinf\\.v([0-9]+)?$";
regex_t regex;
int code = 0;
terrno = TSDB_CODE_SUCCESS;
tdGetVndDirName(TD_VID(pVnode), tfsGetPrimaryPath(pVnode->pTfs), VNODE_RSMA_DIR, true, dir);
if (!taosCheckExistFile(dir)) {
smaDebug("vgId:%d, fetch qtask files, no need as dir %s not exist", TD_VID(pVnode), dir);
return TSDB_CODE_SUCCESS;
}
// Resource allocation and init
if ((code = regcomp(&regex, pattern, REG_EXTENDED)) != 0) {
terrno = TSDB_CODE_RSMA_REGEX_MATCH;
char errbuf[128];
regerror(code, &regex, errbuf, sizeof(errbuf));
smaWarn("vgId:%d, fetch qtask files, regcomp for %s failed since %s", TD_VID(pVnode), dir, errbuf);
return TSDB_CODE_FAILED;
}
if (!(pDir = taosOpenDir(dir))) {
regfree(&regex);
terrno = TAOS_SYSTEM_ERROR(errno);
smaError("vgId:%d, fetch qtask files, open dir %s failed since %s", TD_VID(pVnode), dir, terrstr());
return TSDB_CODE_FAILED;
}
int32_t dirLen = strlen(dir);
char *dirEnd = POINTER_SHIFT(dir, dirLen);
regmatch_t regMatch[2];
while ((pDirEntry = taosReadDir(pDir))) {
char *entryName = taosGetDirEntryName(pDirEntry);
if (!entryName) {
continue;
}
code = regexec(&regex, entryName, 2, regMatch, 0);
if (code == 0) {
// match
smaInfo("vgId:%d, fetch qtask files, max ver:%" PRIi64 ", %s found", TD_VID(pVnode), version, entryName);
int64_t ver = -1;
sscanf((const char *)POINTER_SHIFT(entryName, regMatch[1].rm_so), "%" PRIi64, &ver);
if ((ver <= version) && (ver > -1)) {
if (!(*output)) {
if (!(*output = taosArrayInit(1, POINTER_BYTES))) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _end;
}
}
char *entryDup = strdup(entryName);
if (!entryDup) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _end;
}
if (!taosArrayPush(*output, &entryDup)) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _end;
}
} else {
}
} else if (code == REG_NOMATCH) {
// not match
smaTrace("vgId:%d, fetch qtask files, not match %s", TD_VID(pVnode), entryName);
continue;
} else {
// has other error
char errbuf[128];
regerror(code, &regex, errbuf, sizeof(errbuf));
smaWarn("vgId:%d, fetch qtask files, regexec failed since %s", TD_VID(pVnode), errbuf);
terrno = TSDB_CODE_RSMA_REGEX_MATCH;
goto _end;
}
}
_end:
taosCloseDir(&pDir);
regfree(&regex);
return terrno == 0 ? TSDB_CODE_SUCCESS : TSDB_CODE_FAILED;
}
static int32_t tdQTaskFileCmprFn2(const void *p1, const void *p2) {
if (((SQTaskFile *)p1)->version < ((SQTaskFile *)p2)->version) {
return -1;
} else if (((SQTaskFile *)p1)->version > ((SQTaskFile *)p2)->version) {
return 1;
}
return 0;
}
int32_t tdRSmaFSUpsertQTaskFile(SRSmaFS *pFS, SQTaskFile *qTaskFile) {
static int32_t tsdbBinaryToFS(uint8_t *pData, int64_t nData, SRSmaFS *pFS) {
int32_t code = 0;
int32_t idx = taosArraySearchIdx(pFS->aQTaskInf, qTaskFile, tdQTaskFileCmprFn2, TD_GE);
int32_t n = 0;
int8_t version = 0;
if (idx < 0) {
idx = taosArrayGetSize(pFS->aQTaskInf);
} else {
SQTaskFile *pTaskF = (SQTaskFile *)taosArrayGet(pFS->aQTaskInf, idx);
int32_t c = tdQTaskFileCmprFn2(pTaskF, qTaskFile);
if (c == 0) {
pTaskF->nRef = qTaskFile->nRef;
pTaskF->version = qTaskFile->version;
pTaskF->size = qTaskFile->size;
// version
n += tGetI8(pData + n, &version);
// SArray<SQTaskFile>
taosArrayClear(pFS->aQTaskInf);
uint32_t size = 0;
n += tGetU32v(pData + n, &size);
for (uint32_t i = 0; i < size; ++i) {
SQTaskFile qTaskF = {0};
int32_t nt = tdRSmaGetQTaskF(pData + n, &qTaskF);
if (nt < 0) {
code = TSDB_CODE_FILE_CORRUPTED;
goto _exit;
}
n += nt;
if (taosArrayPush(pFS->aQTaskInf, &qTaskF) == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
}
if (taosArrayInsert(pFS->aQTaskInf, idx, qTaskFile) == NULL) {
if (ASSERTS(n + sizeof(TSCKSUM) == nData, "n:%d + sizeof(TSCKSUM):%d != nData:%d", n, (int32_t)sizeof(TSCKSUM),
nData)) {
code = TSDB_CODE_FILE_CORRUPTED;
goto _exit;
}
_exit:
return code;
}
static int32_t tdRSmaSaveFSToFile(SRSmaFS *pFS, const char *fname) {
int32_t code = 0;
int32_t lino = 0;
// encode to binary
int32_t size = tdRSmaFSToBinary(NULL, pFS) + sizeof(TSCKSUM);
uint8_t *pData = taosMemoryMalloc(size);
if (pData == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
tdRSmaFSToBinary(pData, pFS);
taosCalcChecksumAppend(0, pData, size);
// save to file
TdFilePtr pFD = taosCreateFile(fname, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC);
if (pFD == NULL) {
code = TAOS_SYSTEM_ERROR(errno);
TSDB_CHECK_CODE(code, lino, _exit);
}
int64_t n = taosWriteFile(pFD, pData, size);
if (n < 0) {
code = TAOS_SYSTEM_ERROR(errno);
taosCloseFile(&pFD);
TSDB_CHECK_CODE(code, lino, _exit);
}
if (taosFsyncFile(pFD) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
taosCloseFile(&pFD);
TSDB_CHECK_CODE(code, lino, _exit);
}
taosCloseFile(&pFD);
_exit:
if (pData) taosMemoryFree(pData);
if (code) {
smaError("%s failed at line %d since %s, fname:%s", __func__, lino, tstrerror(code), fname);
}
return code;
}
static int32_t tdRSmaFSCreate(SRSmaFS *pFS, int32_t size) {
int32_t code = 0;
pFS->aQTaskInf = taosArrayInit(size, sizeof(SQTaskFile));
if (pFS->aQTaskInf == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
_exit:
return code;
}
}
static void tdRSmaGetCurrentFName(SSma *pSma, char *current, char *current_t) {
SVnode *pVnode = pSma->pVnode;
if (pVnode->pTfs) {
if (current) {
snprintf(current, TSDB_FILENAME_LEN - 1, "%s%svnode%svnode%d%srsma%sPRESENT", tfsGetPrimaryPath(pVnode->pTfs),
TD_DIRSEP, TD_DIRSEP, TD_VID(pVnode), TD_DIRSEP, TD_DIRSEP);
}
if (current_t) {
snprintf(current_t, TSDB_FILENAME_LEN - 1, "%s%svnode%svnode%d%srsma%sPRESENT.t", tfsGetPrimaryPath(pVnode->pTfs),
TD_DIRSEP, TD_DIRSEP, TD_VID(pVnode), TD_DIRSEP, TD_DIRSEP);
}
} else {
#if 0
if (current) {
snprintf(current, TSDB_FILENAME_LEN - 1, "%s%sPRESENT", pTsdb->path, TD_DIRSEP);
}
if (current_t) {
snprintf(current_t, TSDB_FILENAME_LEN - 1, "%s%sPRESENT.t", pTsdb->path, TD_DIRSEP);
}
#endif
}
}
static int32_t tdRSmaLoadFSFromFile(const char *fname, SRSmaFS *pFS) {
int32_t code = 0;
int32_t lino = 0;
uint8_t *pData = NULL;
// load binary
TdFilePtr pFD = taosOpenFile(fname, TD_FILE_READ);
if (pFD == NULL) {
code = TAOS_SYSTEM_ERROR(errno);
TSDB_CHECK_CODE(code, lino, _exit);
}
int64_t size;
if (taosFStatFile(pFD, &size, NULL) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
taosCloseFile(&pFD);
TSDB_CHECK_CODE(code, lino, _exit);
}
pData = taosMemoryMalloc(size);
if (pData == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
taosCloseFile(&pFD);
TSDB_CHECK_CODE(code, lino, _exit);
}
if (taosReadFile(pFD, pData, size) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
taosCloseFile(&pFD);
TSDB_CHECK_CODE(code, lino, _exit);
}
if (!taosCheckChecksumWhole(pData, size)) {
code = TSDB_CODE_FILE_CORRUPTED;
taosCloseFile(&pFD);
TSDB_CHECK_CODE(code, lino, _exit);
}
taosCloseFile(&pFD);
// decode binary
code = tsdbBinaryToFS(pData, size, pFS);
TSDB_CHECK_CODE(code, lino, _exit);
_exit:
if (pData) taosMemoryFree(pData);
if (code) {
smaError("%s failed at line %d since %s, fname:%s", __func__, lino, tstrerror(code), fname);
}
return code;
}
static int32_t tdQTaskInfCmprFn1(const void *p1, const void *p2) {
const SQTaskFile *q1 = (const SQTaskFile *)p1;
const SQTaskFile *q2 = (const SQTaskFile *)p2;
if (q1->suid < q2->suid) {
return -1;
} else if (q1->suid > q2->suid) {
return 1;
}
if (q1->level < q2->level) {
return -1;
} else if (q1->level > q2->level) {
return 1;
}
if (q1->version < q2->version) {
return -2;
} else if (q1->version > q2->version) {
return 1;
}
return 0;
}
static int32_t tdRSmaFSApplyChange(SSma *pSma, SRSmaFS *pFSNew) {
int32_t code = 0;
int32_t lino = 0;
int32_t nRef = 0;
SVnode *pVnode = pSma->pVnode;
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
SRSmaStat *pStat = (SRSmaStat *)SMA_ENV_STAT(pEnv);
SRSmaFS *pFSOld = RSMA_FS(pStat);
int64_t version = pStat->commitAppliedVer;
char fname[TSDB_FILENAME_LEN] = {0};
// SQTaskFile
int32_t nNew = taosArrayGetSize(pFSNew->aQTaskInf);
int32_t iNew = 0;
while (iNew < nNew) {
SQTaskFile *pQTaskFNew = TARRAY_GET_ELEM(pFSNew->aQTaskInf, iNew++);
int32_t idx = taosArraySearchIdx(pFSOld->aQTaskInf, pQTaskFNew, tdQTaskInfCmprFn1, TD_GE);
if (idx < 0) {
idx = taosArrayGetSize(pFSOld->aQTaskInf);
pQTaskFNew->nRef = 1;
} else {
SQTaskFile *pTaskF = TARRAY_GET_ELEM(pFSOld->aQTaskInf, idx);
int32_t c1 = tdQTaskInfCmprFn1(pQTaskFNew, pTaskF);
if (c1 == 0) {
// utilize the item in pFSOld->qQTaskInf, instead of pFSNew
continue;
} else if (c1 < 0) {
// NOTHING TODO
} else {
code = TSDB_CODE_RSMA_FS_UPDATE;
TSDB_CHECK_CODE(code, lino, _exit);
}
}
if (taosArrayInsert(pFSOld->aQTaskInf, idx, pQTaskFNew) == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
// remove previous version
while (--idx >= 0) {
SQTaskFile *preTaskF = TARRAY_GET_ELEM(pFSOld->aQTaskInf, idx);
int32_t c2 = tdQTaskInfCmprFn1(preTaskF, pQTaskFNew);
if (c2 == 0) {
code = TSDB_CODE_RSMA_FS_UPDATE;
TSDB_CHECK_CODE(code, lino, _exit);
} else if (c2 != -2) {
break;
}
nRef = atomic_sub_fetch_32(&preTaskF->nRef, 1);
if (nRef <= 0) {
tdRSmaQTaskInfoGetFullName(TD_VID(pVnode), preTaskF->suid, preTaskF->level, preTaskF->version,
tfsGetPrimaryPath(pVnode->pTfs), fname);
(void)taosRemoveFile(fname);
taosArrayRemove(pFSOld->aQTaskInf, idx);
}
}
}
_exit:
if (code) {
smaError("vgId:%d, %s failed at line %d since %s", TD_VID(pVnode), __func__, lino, tstrerror(code));
}
return code;
}
static int32_t tdRSmaFSScanAndTryFix(SSma *pSma) {
int32_t code = 0;
#if 0
int32_t lino = 0;
SVnode *pVnode = pSma->pVnode;
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
SRSmaStat *pStat = (SRSmaStat *)SMA_ENV_STAT(pEnv);
SRSmaFS *pFS = RSMA_FS(pStat);
char fname[TSDB_FILENAME_LEN] = {0};
char fnameVer[TSDB_FILENAME_LEN] = {0};
// SArray<SQTaskFile>
int32_t size = taosArrayGetSize(pFS->aQTaskInf);
for (int32_t i = 0; i < size; ++i) {
SQTaskFile *pTaskF = (SQTaskFile *)taosArrayGet(pFS->aQTaskInf, i);
// main.tdb =========
tdRSmaQTaskInfoGetFullName(TD_VID(pVnode), pTaskF->suid, pTaskF->level, pTaskF->version,
tfsGetPrimaryPath(pVnode->pTfs), fnameVer);
tdRSmaQTaskInfoGetFullName(TD_VID(pVnode), pTaskF->suid, pTaskF->level, -1, tfsGetPrimaryPath(pVnode->pTfs), fname);
if (taosCheckExistFile(fnameVer)) {
if (taosRenameFile(fnameVer, fname) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
TSDB_CHECK_CODE(code, lino, _exit);
}
smaDebug("vgId:%d, %s:%d succeed to to rename %s to %s", TD_VID(pVnode), __func__, lino, fnameVer, fname);
} else if (taosCheckExistFile(fname)) {
if (taosRemoveFile(fname) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
TSDB_CHECK_CODE(code, lino, _exit);
}
smaDebug("vgId:%d, %s:%d succeed to to remove %s", TD_VID(pVnode), __func__, lino, fname);
}
}
{
// remove those invalid files (todo)
// main.tdb-journal.5 // TDB should handle its clear for kill -9
}
_exit:
if (code) {
smaError("vgId:%d, %s failed at line %d since %s", TD_VID(pVnode), __func__, lino, tstrerror(code));
}
#endif
return code;
}
// EXPOSED APIS ====================================================================================
int32_t tdRSmaFSOpen(SSma *pSma, int64_t version, int8_t rollback) {
int32_t code = 0;
int32_t lino = 0;
SVnode *pVnode = pSma->pVnode;
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
SRSmaStat *pStat = (SRSmaStat *)SMA_ENV_STAT(pEnv);
// open handle
code = tdRSmaFSCreate(RSMA_FS(pStat), 0);
TSDB_CHECK_CODE(code, lino, _exit);
// open impl
char current[TSDB_FILENAME_LEN] = {0};
char current_t[TSDB_FILENAME_LEN] = {0};
tdRSmaGetCurrentFName(pSma, current, current_t);
if (taosCheckExistFile(current)) {
code = tdRSmaLoadFSFromFile(current, RSMA_FS(pStat));
TSDB_CHECK_CODE(code, lino, _exit);
if (taosCheckExistFile(current_t)) {
if (rollback) {
code = tdRSmaFSRollback(pSma);
TSDB_CHECK_CODE(code, lino, _exit);
} else {
code = tdRSmaFSCommit(pSma);
TSDB_CHECK_CODE(code, lino, _exit);
}
}
} else {
// 1st time open with empty current/qTaskInfoFile
code = tdRSmaSaveFSToFile(RSMA_FS(pStat), current);
TSDB_CHECK_CODE(code, lino, _exit);
}
// scan and try fix(remove main.db/main.db.xxx and use the one with version)
code = tdRSmaFSScanAndTryFix(pSma);
TSDB_CHECK_CODE(code, lino, _exit);
_exit:
if (code) {
smaError("vgId:%d, %s failed at line %d since %s", TD_VID(pVnode), __func__, lino, tstrerror(code));
}
return code;
}
void tdRSmaFSClose(SRSmaFS *pFS) { pFS->aQTaskInf = taosArrayDestroy(pFS->aQTaskInf); }
int32_t tdRSmaFSPrepareCommit(SSma *pSma, SRSmaFS *pFSNew) {
int32_t code = 0;
int32_t lino = 0;
char tfname[TSDB_FILENAME_LEN];
tdRSmaGetCurrentFName(pSma, NULL, tfname);
// generate PRESENT.t
code = tdRSmaSaveFSToFile(pFSNew, tfname);
TSDB_CHECK_CODE(code, lino, _exit);
_exit:
if (code) {
smaError("vgId:%d, %s failed at line %d since %s", TD_VID(pSma->pVnode), __func__, lino, tstrerror(code));
}
return code;
}
int32_t tdRSmaFSCommit(SSma *pSma) {
int32_t code = 0;
int32_t lino = 0;
SRSmaFS fs = {0};
char current[TSDB_FILENAME_LEN] = {0};
char current_t[TSDB_FILENAME_LEN] = {0};
tdRSmaGetCurrentFName(pSma, current, current_t);
if (!taosCheckExistFile(current_t)) {
goto _exit;
}
// rename the file
if (taosRenameFile(current_t, current) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
TSDB_CHECK_CODE(code, lino, _exit);
}
// load the new FS
code = tdRSmaFSCreate(&fs, 1);
TSDB_CHECK_CODE(code, lino, _exit);
code = tdRSmaLoadFSFromFile(current, &fs);
TSDB_CHECK_CODE(code, lino, _exit);
// apply file change
code = tdRSmaFSApplyChange(pSma, &fs);
TSDB_CHECK_CODE(code, lino, _exit);
_exit:
tdRSmaFSClose(&fs);
if (code) {
smaError("vgId:%d, %s failed at line %d since %s", SMA_VID(pSma), __func__, lino, tstrerror(code));
}
return code;
}
int32_t tdRSmaFSFinishCommit(SSma *pSma) {
int32_t code = 0;
int32_t lino = 0;
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
SRSmaStat *pStat = (SRSmaStat *)SMA_ENV_STAT(pSmaEnv);
taosWLockLatch(RSMA_FS_LOCK(pStat));
code = tdRSmaFSCommit(pSma);
TSDB_CHECK_CODE(code, lino, _exit);
_exit:
taosWUnLockLatch(RSMA_FS_LOCK(pStat));
if (code) {
smaError("vgId:%d, %s failed at line %d since %s", SMA_VID(pSma), __func__, lino, tstrerror(code));
} else {
smaInfo("vgId:%d, rsmaFS finish commit", SMA_VID(pSma));
}
return code;
}
int32_t tdRSmaFSRollback(SSma *pSma) {
int32_t code = 0;
int32_t lino = 0;
char current_t[TSDB_FILENAME_LEN] = {0};
tdRSmaGetCurrentFName(pSma, NULL, current_t);
(void)taosRemoveFile(current_t);
_exit:
if (code) {
smaError("vgId:%d, %s failed at line %d since %s", SMA_VID(pSma), __func__, lino, tstrerror(errno));
}
return code;
}
int32_t tdRSmaFSUpsertQTaskFile(SSma *pSma, SRSmaFS *pFS, SQTaskFile *qTaskFile, int32_t nSize) {
int32_t code = 0;
for (int32_t i = 0; i < nSize; ++i) {
SQTaskFile *qTaskF = qTaskFile + i;
int32_t idx = taosArraySearchIdx(pFS->aQTaskInf, qTaskF, tdQTaskInfCmprFn1, TD_GE);
if (idx < 0) {
idx = taosArrayGetSize(pFS->aQTaskInf);
} else {
SQTaskFile *pTaskF = (SQTaskFile *)taosArrayGet(pFS->aQTaskInf, idx);
int32_t c = tdQTaskInfCmprFn1(pTaskF, qTaskF);
if (c == 0) {
if (pTaskF->size != qTaskF->size) {
code = TSDB_CODE_RSMA_FS_UPDATE;
smaError("vgId:%d, %s failed at line %d since %s, level:%" PRIi8 ", suid:%" PRIi64 ", version:%" PRIi64
", size:%" PRIi64 " != %" PRIi64,
SMA_VID(pSma), __func__, __LINE__, tstrerror(code), pTaskF->level, pTaskF->suid, pTaskF->version,
pTaskF->size, qTaskF->size);
goto _exit;
}
continue;
}
}
if (!taosArrayInsert(pFS->aQTaskInf, idx, qTaskF)) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
}
_exit:
return code;
}
int32_t tdRSmaFSRef(SSma *pSma, SRSmaFS *pFS) {
int32_t code = 0;
int32_t lino = 0;
int32_t nRef = 0;
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
SRSmaStat *pStat = (SRSmaStat *)SMA_ENV_STAT(pEnv);
SRSmaFS *qFS = RSMA_FS(pStat);
int32_t size = taosArrayGetSize(qFS->aQTaskInf);
pFS->aQTaskInf = taosArrayInit(size, sizeof(SQTaskFile));
if (pFS->aQTaskInf == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
for (int32_t i = 0; i < size; ++i) {
SQTaskFile *qTaskF = (SQTaskFile *)taosArrayGet(qFS->aQTaskInf, i);
nRef = atomic_fetch_add_32(&qTaskF->nRef, 1);
if (nRef <= 0) {
code = TSDB_CODE_RSMA_FS_REF;
TSDB_CHECK_CODE(code, lino, _exit);
}
}
taosArraySetSize(pFS->aQTaskInf, size);
memcpy(pFS->aQTaskInf->pData, qFS->aQTaskInf->pData, size * sizeof(SQTaskFile));
_exit:
if (code) {
smaError("vgId:%d, %s failed at line %d since %s, nRef %d", TD_VID(pSma->pVnode), __func__, lino, tstrerror(code),
nRef);
}
return code;
}
void tdRSmaFSUnRef(SSma *pSma, SRSmaFS *pFS) {
int32_t nRef = 0;
char fname[TSDB_FILENAME_LEN];
SVnode *pVnode = pSma->pVnode;
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
SRSmaStat *pStat = (SRSmaStat *)SMA_ENV_STAT(pEnv);
int32_t size = taosArrayGetSize(pFS->aQTaskInf);
for (int32_t i = 0; i < size; ++i) {
SQTaskFile *pTaskF = (SQTaskFile *)taosArrayGet(pFS->aQTaskInf, i);
nRef = atomic_sub_fetch_32(&pTaskF->nRef, 1);
if (nRef == 0) {
tdRSmaQTaskInfoGetFullName(TD_VID(pVnode), pTaskF->suid, pTaskF->level, pTaskF->version,
tfsGetPrimaryPath(pVnode->pTfs), fname);
if (taosRemoveFile(fname) < 0) {
smaWarn("vgId:%d, failed to remove %s since %s", TD_VID(pVnode), fname, tstrerror(TAOS_SYSTEM_ERROR(errno)));
} else {
smaDebug("vgId:%d, success to remove %s", TD_VID(pVnode), fname);
}
} else if (nRef < 0) {
smaWarn("vgId:%d, abnormal unref %s since %s", TD_VID(pVnode), fname, tstrerror(TSDB_CODE_RSMA_FS_REF));
}
}
taosArrayDestroy(pFS->aQTaskInf);
}
int32_t tdRSmaFSTakeSnapshot(SSma *pSma, SRSmaFS *pFS) {
int32_t code = 0;
int32_t lino = 0;
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
SRSmaStat *pStat = (SRSmaStat *)SMA_ENV_STAT(pEnv);
taosRLockLatch(RSMA_FS_LOCK(pStat));
code = tdRSmaFSRef(pSma, pFS);
TSDB_CHECK_CODE(code, lino, _exit);
_exit:
taosRUnLockLatch(RSMA_FS_LOCK(pStat));
if (code) {
smaError("vgId:%d, %s failed at line %d since %s", TD_VID(pSma->pVnode), __func__, lino, tstrerror(code));
}
return code;
}
int32_t tdRSmaFSCopy(SSma *pSma, SRSmaFS *pFS) {
int32_t code = 0;
int32_t lino = 0;
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
SRSmaStat *pStat = (SRSmaStat *)SMA_ENV_STAT(pEnv);
SRSmaFS *qFS = RSMA_FS(pStat);
int32_t size = taosArrayGetSize(qFS->aQTaskInf);
code = tdRSmaFSCreate(pFS, size);
TSDB_CHECK_CODE(code, lino, _exit);
taosArraySetSize(pFS->aQTaskInf, size);
memcpy(pFS->aQTaskInf->pData, qFS->aQTaskInf->pData, size * sizeof(SQTaskFile));
_exit:
if (code) {
smaError("vgId:%d, %s failed at line %d since %s", TD_VID(pSma->pVnode), __func__, lino, tstrerror(code));
}
return code;
}

View File

@ -34,13 +34,16 @@ static int32_t rsmaRestore(SSma *pSma);
SRetention *r = (SRetention *)VND_RETENTIONS(v) + l; \
if (!RETENTION_VALID(r)) { \
if (l == 0) { \
goto _err; \
code = TSDB_CODE_INVALID_PARA; \
TSDB_CHECK_CODE(code, lino, _exit); \
} \
break; \
} \
smaSetKeepCfg(v, &keepCfg, pCfg, TSDB_TYPE_RSMA_L##l); \
code = smaSetKeepCfg(v, &keepCfg, pCfg, TSDB_TYPE_RSMA_L##l); \
TSDB_CHECK_CODE(code, lino, _exit); \
if (tsdbOpen(v, &SMA_RSMA_TSDB##l(pSma), VNODE_RSMA##l##_DIR, &keepCfg, rollback) < 0) { \
goto _err; \
code = terrno; \
TSDB_CHECK_CODE(code, lino, _exit); \
} \
} while (0)
@ -68,12 +71,10 @@ static int32_t smaEvalDays(SVnode *pVnode, SRetention *r, int8_t level, int8_t p
days = keepDuration;
}
if (level == TSDB_RETENTION_L0) {
goto end;
if (level < TSDB_RETENTION_L1 || level > TSDB_RETENTION_L2) {
goto _exit;
}
ASSERT(level >= TSDB_RETENTION_L1 && level <= TSDB_RETENTION_L2);
freqDuration = convertTimeFromPrecisionToUnit((r + level)->freq, precision, TIME_UNIT_MINUTE);
keepDuration = convertTimeFromPrecisionToUnit((r + level)->keep, precision, TIME_UNIT_MINUTE);
@ -91,16 +92,18 @@ static int32_t smaEvalDays(SVnode *pVnode, SRetention *r, int8_t level, int8_t p
if (days < freqDuration) {
days = freqDuration;
}
end:
_exit:
smaInfo("vgId:%d, evaluated duration for level %d is %d, raw val:%d", TD_VID(pVnode), level + 1, days, duration);
return days;
}
int smaSetKeepCfg(SVnode *pVnode, STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type) {
terrno = 0;
pKeepCfg->precision = pCfg->precision;
switch (type) {
case TSDB_TYPE_TSMA:
ASSERT(0);
ASSERTS(0, "undefined smaType:%d", (int32_t)type);
terrno = TSDB_CODE_APP_ERROR;
break;
case TSDB_TYPE_RSMA_L0:
SMA_SET_KEEP_CFG(pVnode, 0);
@ -112,21 +115,22 @@ int smaSetKeepCfg(SVnode *pVnode, STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int ty
SMA_SET_KEEP_CFG(pVnode, 2);
break;
default:
ASSERT(0);
ASSERTS(0, "unknown smaType:%d", (int32_t)type);
terrno = TSDB_CODE_APP_ERROR;
break;
}
return 0;
return terrno;
}
int32_t smaOpen(SVnode *pVnode, int8_t rollback) {
int32_t code = 0;
int32_t lino = 0;
STsdbCfg *pCfg = &pVnode->config.tsdbCfg;
ASSERT(!pVnode->pSma);
SSma *pSma = taosMemoryCalloc(1, sizeof(SSma));
if (!pSma) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
pVnode->pSma = pSma;
@ -137,7 +141,7 @@ int32_t smaOpen(SVnode *pVnode, int8_t rollback) {
if (VND_IS_RSMA(pVnode)) {
STsdbKeepCfg keepCfg = {0};
for (int i = 0; i < TSDB_RETENTION_MAX; ++i) {
for (int32_t i = 0; i < TSDB_RETENTION_MAX; ++i) {
if (i == TSDB_RETENTION_L0) {
SMA_OPEN_RSMA_IMPL(pVnode, 0);
} else if (i == TSDB_RETENTION_L1) {
@ -145,19 +149,24 @@ int32_t smaOpen(SVnode *pVnode, int8_t rollback) {
} else if (i == TSDB_RETENTION_L2) {
SMA_OPEN_RSMA_IMPL(pVnode, 2);
} else {
ASSERT(0);
code = TSDB_CODE_APP_ERROR;
smaError("vgId:%d, sma open failed since %s, level:%d", TD_VID(pVnode), tstrerror(code), i);
TSDB_CHECK_CODE(code, lino, _exit);
}
}
// restore the rsma
if (tdRSmaRestore(pSma, RSMA_RESTORE_REBOOT, pVnode->state.committed) < 0) {
goto _err;
if (tdRSmaRestore(pSma, RSMA_RESTORE_REBOOT, pVnode->state.committed, rollback) < 0) {
code = terrno;
TSDB_CHECK_CODE(code, lino, _exit);
}
}
return 0;
_err:
return -1;
_exit:
if (code) {
smaError("vgId:%d, %s failed at line %d since %s", TD_VID(pVnode), __func__, lino, tstrerror(code));
}
return code;
}
int32_t smaClose(SSma *pSma) {
@ -181,8 +190,11 @@ int32_t smaClose(SSma *pSma) {
* @param committedVer
* @return int32_t
*/
int32_t tdRSmaRestore(SSma *pSma, int8_t type, int64_t committedVer) {
ASSERT(VND_IS_RSMA(pSma->pVnode));
int32_t tdRSmaRestore(SSma *pSma, int8_t type, int64_t committedVer, int8_t rollback) {
if (!VND_IS_RSMA(pSma->pVnode)) {
terrno = TSDB_CODE_RSMA_INVALID_ENV;
return TSDB_CODE_FAILED;
}
return tdRSmaProcessRestoreImpl(pSma, type, committedVer);
return tdRSmaProcessRestoreImpl(pSma, type, committedVer, rollback);
}

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More