merge 3.0
This commit is contained in:
commit
9d6d05c405
|
@ -60,7 +60,7 @@ sudo apt-get install -y gcc cmake build-essential git libssl-dev
|
|||
为了在 Ubuntu/Debian 系统上编译 [taos-tools](https://github.com/taosdata/taos-tools) 需要安装如下软件:
|
||||
|
||||
```bash
|
||||
sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev pkg-config
|
||||
sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g pkg-config
|
||||
```
|
||||
|
||||
### CentOS 7.9
|
||||
|
@ -85,7 +85,7 @@ sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
|
|||
|
||||
|
||||
```
|
||||
sudo yum install -y zlib-devel xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libstdc++-static openssl-devel
|
||||
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
|
||||
```
|
||||
|
||||
#### CentOS 8/Rocky Linux
|
||||
|
@ -94,7 +94,7 @@ sudo yum install -y zlib-devel xz-devel snappy-devel jansson jansson-devel pkgco
|
|||
sudo yum install -y epel-release
|
||||
sudo yum install -y dnf-plugins-core
|
||||
sudo yum config-manager --set-enabled powertools
|
||||
sudo yum install -y zlib-devel xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libstdc++-static openssl-devel
|
||||
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
|
||||
```
|
||||
|
||||
注意:由于 snappy 缺乏 pkg-config 支持(参考 [链接](https://github.com/google/snappy/pull/86)),会导致 cmake 提示无法发现 libsnappy,实际上工作正常。
|
||||
|
|
|
@ -62,7 +62,7 @@ sudo apt-get install -y gcc cmake build-essential git libssl-dev
|
|||
To build the [taosTools](https://github.com/taosdata/taos-tools) on Ubuntu/Debian, the following packages need to be installed.
|
||||
|
||||
```bash
|
||||
sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev pkg-config
|
||||
sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g pkg-config
|
||||
```
|
||||
|
||||
### CentOS 7.9
|
||||
|
@ -85,7 +85,7 @@ sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
|
|||
#### CentOS 7.9
|
||||
|
||||
```
|
||||
sudo yum install -y zlib-devel xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libstdc++-static openssl-devel
|
||||
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
|
||||
```
|
||||
|
||||
#### CentOS 8/Rocky Linux
|
||||
|
@ -94,7 +94,7 @@ sudo yum install -y zlib-devel xz-devel snappy-devel jansson jansson-devel pkgco
|
|||
sudo yum install -y epel-release
|
||||
sudo yum install -y dnf-plugins-core
|
||||
sudo yum config-manager --set-enabled powertools
|
||||
sudo yum install -y zlib-devel xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libstdc++-static openssl-devel
|
||||
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
|
||||
```
|
||||
|
||||
Note: Since snappy lacks pkg-config support (refer to [link](https://github.com/google/snappy/pull/86)), it leads a cmake prompt libsnappy not found. But snappy still works well.
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.0.1.8")
|
||||
SET(TD_VER_NUMBER "3.0.2.0")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosadapter
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG 0dfad5b
|
||||
GIT_TAG 566540d
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG e62c5ea
|
||||
GIT_TAG 4a4027c
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -878,8 +878,10 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
|
|||
|
||||
| taos-jdbcdriver version | major changes |
|
||||
| :---------------------: | :--------------------------------------------: |
|
||||
| 3.0.3 | fix timestamp resolution error for REST connection in jdk17+ version |
|
||||
| 3.0.1 - 3.0.2 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use 3.0.2 in the JDK 8 environment |
|
||||
| 3.0.0 | Support for TDengine 3.0 |
|
||||
| 2.0.42 | fix wasNull interface return value in WebSocket connection |
|
||||
| 2.0.41 | fix decode method of username and password in REST connection |
|
||||
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters |
|
||||
| 2.0.38 | JDBC REST connections add bulk pull function |
|
||||
|
|
|
@ -59,6 +59,7 @@ Usage of taosAdapter:
|
|||
--collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045)
|
||||
--collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root")
|
||||
--collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10)
|
||||
--collectd.ttl int collectd data ttl. Env "TAOS_ADAPTER_COLLECTD_TTL" (default 0, means no ttl)
|
||||
-c, --config string config path default /etc/taos/taosadapter.toml
|
||||
--cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true)
|
||||
--cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials"
|
||||
|
@ -100,6 +101,7 @@ Usage of taosAdapter:
|
|||
--node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s)
|
||||
--node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100])
|
||||
--node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root")
|
||||
--node_exporter.ttl int node_exporter data ttl. Env "TAOS_ADAPTER_NODE_EXPORTER_TTL"(default 0, means no ttl)
|
||||
--opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true)
|
||||
--opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1)
|
||||
--opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb])
|
||||
|
@ -110,6 +112,7 @@ Usage of taosAdapter:
|
|||
--opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
|
||||
--opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE"
|
||||
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
|
||||
--opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL"(default 0, means no ttl)
|
||||
--pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT" (default 1h0m0s)
|
||||
--pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT" (default 4000)
|
||||
--pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE" (default 4000)
|
||||
|
@ -131,6 +134,7 @@ Usage of taosAdapter:
|
|||
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE"
|
||||
--statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root")
|
||||
--statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10)
|
||||
--statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL" (default 0, means no ttl)
|
||||
--taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE"
|
||||
--version Print the version and exit
|
||||
```
|
||||
|
@ -195,6 +199,7 @@ Support InfluxDB query parameters as follows.
|
|||
- `precision` The time precision used by TDengine
|
||||
- `u` TDengine user name
|
||||
- `p` TDengine password
|
||||
- `ttl` The time to live of automatically created sub-table. This value cannot be updated. TDengine will use the ttl value of the frist data of sub-table to create sub-table. For more information, please refer [Create Table](/taos-sql/table/#create-table)
|
||||
|
||||
Note: InfluxDB token authorization is not supported at present. Only Basic authorization and query parameter validation are supported.
|
||||
Example: curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000"
|
||||
|
|
|
@ -153,11 +153,11 @@ The parameters described in this document by the effect that they have on the sy
|
|||
| Meaning | Execution policy for query statements |
|
||||
| Unit | None |
|
||||
| Default | 1 |
|
||||
| Notes | 1: Run queries on vnodes and not on qnodes |
|
||||
| Value Range | 1: Run queries on vnodes and not on qnodes
|
||||
|
||||
2: Run subtasks without scan operators on qnodes and subtasks with scan operators on vnodes.
|
||||
|
||||
3: Only run scan operators on vnodes; run all other operators on qnodes.
|
||||
3: Only run scan operators on vnodes; run all other operators on qnodes. |
|
||||
|
||||
### querySmaOptimize
|
||||
|
||||
|
@ -173,6 +173,14 @@ The parameters described in this document by the effect that they have on the sy
|
|||
|
||||
1: Enable SMA indexing and perform queries from suitable statements on precomputation results.|
|
||||
|
||||
### countAlwaysReturnValue
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------------------- |
|
||||
| Applicable | Server only |
|
||||
| Meaning | count()/hyperloglog() return value or not if the result data is NULL |
|
||||
| Vlue Range | 0:Return empty line,1:Return 0 |
|
||||
| Default | 1 |
|
||||
|
||||
### maxNumOfDistinctRes
|
||||
|
||||
|
@ -307,6 +315,14 @@ The charset that takes effect is UTF-8.
|
|||
| Meaning | All data files are stored in this directory |
|
||||
| Default Value | /var/lib/taos |
|
||||
|
||||
### tempDir
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ------------------------------------------ |
|
||||
| Applicable | Server only |
|
||||
| Meaning | The directory where to put all the temporary files generated during system running |
|
||||
| Default | /tmp |
|
||||
|
||||
### minimalTmpDirGB
|
||||
|
||||
| Attribute | Description |
|
||||
|
@ -336,89 +352,6 @@ The charset that takes effect is UTF-8.
|
|||
| Value Range | 0-4096 |
|
||||
| Default Value | 2x the CPU cores |
|
||||
|
||||
## Time Parameters
|
||||
|
||||
### statusInterval
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | --------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | the interval of dnode reporting status to mnode |
|
||||
| Unit | second |
|
||||
| Value Range | 1-10 |
|
||||
| Default Value | 1 |
|
||||
|
||||
### shellActivityTimer
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | --------------------------------- |
|
||||
| Applicable | Server and Client |
|
||||
| Meaning | The interval for TDengine CLI to send heartbeat to mnode |
|
||||
| Unit | second |
|
||||
| Value Range | 1-120 |
|
||||
| Default Value | 3 |
|
||||
|
||||
## Performance Optimization Parameters
|
||||
|
||||
### numOfCommitThreads
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ---------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Maximum of threads for committing to disk |
|
||||
| Default Value | |
|
||||
|
||||
## Compression Parameters
|
||||
|
||||
### compressMsgSize
|
||||
|
||||
| Attribute | Description |
|
||||
| ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | The threshold for message size to compress the message. | Set the value to 64330 bytes for good message compression. |
|
||||
| Unit | bytes |
|
||||
| Value Range | 0: already compress; >0: compress when message exceeds it; -1: always uncompress |
|
||||
| Default Value | -1 |
|
||||
|
||||
### compressColData
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | --------------------------------------------------------------------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | The threshold for size of column data to trigger compression for the query result |
|
||||
| Unit | bytes |
|
||||
| Value Range | 0: always compress; >0: only compress when the size of any column data exceeds the threshold; -1: always uncompress |
|
||||
| Default Value | -1 |
|
||||
| Default Value | -1 |
|
||||
| Note | available from version 2.3.0.0 | |
|
||||
|
||||
## Continuous Query Parameters |
|
||||
|
||||
### minSlidingTime
|
||||
|
||||
| Attribute | Description |
|
||||
| ------------- | -------------------------------------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Minimum sliding time of time window |
|
||||
| Unit | millisecond or microsecond , depending on time precision |
|
||||
| Value Range | 10-1000000 |
|
||||
| Default Value | 10 |
|
||||
|
||||
### minIntervalTime
|
||||
|
||||
| Attribute | Description |
|
||||
| ------------- | --------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Minimum size of time window |
|
||||
| Unit | millisecond |
|
||||
| Value Range | 1-1000000 |
|
||||
| Default Value | 10 |
|
||||
|
||||
:::info
|
||||
To prevent system resource from being exhausted by multiple concurrent streams, a random delay is applied on each stream automatically. `maxFirstStreamCompDelay` is the maximum delay time before a continuous query is started the first time. `streamCompDelayRatio` is the ratio for calculating delay time, with the size of the time window as base. `maxStreamCompDelay` is the maximum delay time. The actual delay time is a random time not bigger than `maxStreamCompDelay`. If a continuous query fails, `retryStreamComDelay` is the delay time before retrying it, also not bigger than `maxStreamCompDelay`.
|
||||
|
||||
:::
|
||||
|
||||
## Log Parameters
|
||||
|
||||
### logDir
|
||||
|
@ -686,172 +619,60 @@ To prevent system resource from being exhausted by multiple concurrent streams,
|
|||
| Value Range | 0: disable UDF; 1: enabled UDF |
|
||||
| Default Value | 1 |
|
||||
|
||||
## Parameter Comparison of TDengine 2.x and 3.0
|
||||
| # | **Parameter** | **In 2.x** | **In 3.0** |
|
||||
| --- | :-----------------: | --------------- | --------------- |
|
||||
| 1 | firstEp | Yes | Yes |
|
||||
| 2 | secondEp | Yes | Yes |
|
||||
| 3 | fqdn | Yes | Yes |
|
||||
| 4 | serverPort | Yes | Yes |
|
||||
| 5 | maxShellConns | Yes | Yes |
|
||||
| 6 | monitor | Yes | Yes |
|
||||
| 7 | monitorFqdn | No | Yes |
|
||||
| 8 | monitorPort | No | Yes |
|
||||
| 9 | monitorInterval | Yes | Yes |
|
||||
| 10 | monitorMaxLogs | No | Yes |
|
||||
| 11 | monitorComp | No | Yes |
|
||||
| 12 | telemetryReporting | Yes | Yes |
|
||||
| 13 | telemetryInterval | No | Yes |
|
||||
| 14 | telemetryServer | No | Yes |
|
||||
| 15 | telemetryPort | No | Yes |
|
||||
| 16 | queryPolicy | No | Yes |
|
||||
| 17 | querySmaOptimize | No | Yes |
|
||||
| 18 | queryRsmaTolerance | No | Yes |
|
||||
| 19 | queryBufferSize | Yes | Yes |
|
||||
| 20 | maxNumOfDistinctRes | Yes | Yes |
|
||||
| 21 | minSlidingTime | Yes | Yes |
|
||||
| 22 | minIntervalTime | Yes | Yes |
|
||||
| 23 | countAlwaysReturnValue | Yes | Yes |
|
||||
| 24 | dataDir | Yes | Yes |
|
||||
| 25 | minimalDataDirGB | Yes | Yes |
|
||||
| 26 | supportVnodes | No | Yes |
|
||||
| 27 | tempDir | Yes | Yes |
|
||||
| 28 | minimalTmpDirGB | Yes | Yes |
|
||||
| 29 | compressMsgSize | Yes | Yes |
|
||||
| 30 | compressColData | Yes | Yes |
|
||||
| 31 | smlChildTableName | Yes | Yes |
|
||||
| 32 | smlTagName | Yes | Yes |
|
||||
| 33 | smlDataFormat | No | Yes |
|
||||
| 34 | statusInterval | Yes | Yes |
|
||||
| 35 | shellActivityTimer | Yes | Yes |
|
||||
| 36 | transPullupInterval | No | Yes |
|
||||
| 37 | mqRebalanceInterval | No | Yes |
|
||||
| 38 | ttlUnit | No | Yes |
|
||||
| 39 | ttlPushInterval | No | Yes |
|
||||
| 40 | numOfTaskQueueThreads | No | Yes |
|
||||
| 41 | numOfRpcThreads | No | Yes |
|
||||
| 42 | numOfCommitThreads | Yes | Yes |
|
||||
| 43 | numOfMnodeReadThreads | No | Yes |
|
||||
| 44 | numOfVnodeQueryThreads | No | Yes |
|
||||
| 45 | numOfVnodeStreamThreads | No | Yes |
|
||||
| 46 | numOfVnodeFetchThreads | No | Yes |
|
||||
| 47 | numOfVnodeRsmaThreads | No | Yes |
|
||||
| 48 | numOfQnodeQueryThreads | No | Yes |
|
||||
| 49 | numOfQnodeFetchThreads | No | Yes |
|
||||
| 50 | numOfSnodeSharedThreads | No | Yes |
|
||||
| 51 | numOfSnodeUniqueThreads | No | Yes |
|
||||
| 52 | rpcQueueMemoryAllowed | No | Yes |
|
||||
| 53 | logDir | Yes | Yes |
|
||||
| 54 | minimalLogDirGB | Yes | Yes |
|
||||
| 55 | numOfLogLines | Yes | Yes |
|
||||
| 56 | asyncLog | Yes | Yes |
|
||||
| 57 | logKeepDays | Yes | Yes |
|
||||
| 60 | debugFlag | Yes | Yes |
|
||||
| 61 | tmrDebugFlag | Yes | Yes |
|
||||
| 62 | uDebugFlag | Yes | Yes |
|
||||
| 63 | rpcDebugFlag | Yes | Yes |
|
||||
| 64 | jniDebugFlag | Yes | Yes |
|
||||
| 65 | qDebugFlag | Yes | Yes |
|
||||
| 66 | cDebugFlag | Yes | Yes |
|
||||
| 67 | dDebugFlag | Yes | Yes |
|
||||
| 68 | vDebugFlag | Yes | Yes |
|
||||
| 69 | mDebugFlag | Yes | Yes |
|
||||
| 70 | wDebugFlag | Yes | Yes |
|
||||
| 71 | sDebugFlag | Yes | Yes |
|
||||
| 72 | tsdbDebugFlag | Yes | Yes |
|
||||
| 73 | tqDebugFlag | No | Yes |
|
||||
| 74 | fsDebugFlag | Yes | Yes |
|
||||
| 75 | udfDebugFlag | No | Yes |
|
||||
| 76 | smaDebugFlag | No | Yes |
|
||||
| 77 | idxDebugFlag | No | Yes |
|
||||
| 78 | tdbDebugFlag | No | Yes |
|
||||
| 79 | metaDebugFlag | No | Yes |
|
||||
| 80 | timezone | Yes | Yes |
|
||||
| 81 | locale | Yes | Yes |
|
||||
| 82 | charset | Yes | Yes |
|
||||
| 83 | udf | Yes | Yes |
|
||||
| 84 | enableCoreFile | Yes | Yes |
|
||||
| 85 | arbitrator | Yes | No |
|
||||
| 86 | numOfThreadsPerCore | Yes | No |
|
||||
| 87 | numOfMnodes | Yes | No |
|
||||
| 88 | vnodeBak | Yes | No |
|
||||
| 89 | balance | Yes | No |
|
||||
| 90 | balanceInterval | Yes | No |
|
||||
| 91 | offlineThreshold | Yes | No |
|
||||
| 92 | role | Yes | No |
|
||||
| 93 | dnodeNopLoop | Yes | No |
|
||||
| 94 | keepTimeOffset | Yes | No |
|
||||
| 95 | rpcTimer | Yes | No |
|
||||
| 96 | rpcMaxTime | Yes | No |
|
||||
| 97 | rpcForceTcp | Yes | No |
|
||||
| 98 | tcpConnTimeout | Yes | No |
|
||||
| 99 | syncCheckInterval | Yes | No |
|
||||
| 100 | maxTmrCtrl | Yes | No |
|
||||
| 101 | monitorReplica | Yes | No |
|
||||
| 102 | smlTagNullName | Yes | No |
|
||||
| 103 | keepColumnName | Yes | No |
|
||||
| 104 | ratioOfQueryCores | Yes | No |
|
||||
| 105 | maxStreamCompDelay | Yes | No |
|
||||
| 106 | maxFirstStreamCompDelay | Yes | No |
|
||||
| 107 | retryStreamCompDelay | Yes | No |
|
||||
| 108 | streamCompDelayRatio | Yes | No |
|
||||
| 109 | maxVgroupsPerDb | Yes | No |
|
||||
| 110 | maxTablesPerVnode | Yes | No |
|
||||
| 111 | minTablesPerVnode | Yes | No |
|
||||
| 112 | tableIncStepPerVnode | Yes | No |
|
||||
| 113 | cache | Yes | No |
|
||||
| 114 | blocks | Yes | No |
|
||||
| 115 | days | Yes | No |
|
||||
| 116 | keep | Yes | No |
|
||||
| 117 | minRows | Yes | No |
|
||||
| 118 | maxRows | Yes | No |
|
||||
| 119 | quorum | Yes | No |
|
||||
| 120 | comp | Yes | No |
|
||||
| 121 | walLevel | Yes | No |
|
||||
| 122 | fsync | Yes | No |
|
||||
| 123 | replica | Yes | No |
|
||||
| 124 | partitions | Yes | No |
|
||||
| 125 | quorum | Yes | No |
|
||||
| 126 | update | Yes | No |
|
||||
| 127 | cachelast | Yes | No |
|
||||
| 128 | maxSQLLength | Yes | No |
|
||||
| 129 | maxWildCardsLength | Yes | No |
|
||||
| 130 | maxRegexStringLen | Yes | No |
|
||||
| 131 | maxNumOfOrderedRes | Yes | No |
|
||||
| 132 | maxConnections | Yes | No |
|
||||
| 133 | mnodeEqualVnodeNum | Yes | No |
|
||||
| 134 | http | Yes | No |
|
||||
| 135 | httpEnableRecordSql | Yes | No |
|
||||
| 136 | httpMaxThreads | Yes | No |
|
||||
| 137 | restfulRowLimit | Yes | No |
|
||||
| 138 | httpDbNameMandatory | Yes | No |
|
||||
| 139 | httpKeepAlive | Yes | No |
|
||||
| 140 | enableRecordSql | Yes | No |
|
||||
| 141 | maxBinaryDisplayWidth | Yes | No |
|
||||
| 142 | stream | Yes | No |
|
||||
| 143 | retrieveBlockingModel | Yes | No |
|
||||
| 144 | tsdbMetaCompactRatio | Yes | No |
|
||||
| 145 | defaultJSONStrType | Yes | No |
|
||||
| 146 | walFlushSize | Yes | No |
|
||||
| 147 | keepTimeOffset | Yes | No |
|
||||
| 148 | flowctrl | Yes | No |
|
||||
| 149 | slaveQuery | Yes | No |
|
||||
| 150 | adjustMaster | Yes | No |
|
||||
| 151 | topicBinaryLen | Yes | No |
|
||||
| 152 | telegrafUseFieldNum | Yes | No |
|
||||
| 153 | deadLockKillQuery | Yes | No |
|
||||
| 154 | clientMerge | Yes | No |
|
||||
| 155 | sdbDebugFlag | Yes | No |
|
||||
| 156 | odbcDebugFlag | Yes | No |
|
||||
| 157 | httpDebugFlag | Yes | No |
|
||||
| 158 | monDebugFlag | Yes | No |
|
||||
| 159 | cqDebugFlag | Yes | No |
|
||||
| 160 | shortcutFlag | Yes | No |
|
||||
| 161 | probeSeconds | Yes | No |
|
||||
| 162 | probeKillSeconds | Yes | No |
|
||||
| 163 | probeInterval | Yes | No |
|
||||
| 164 | lossyColumns | Yes | No |
|
||||
| 165 | fPrecision | Yes | No |
|
||||
| 166 | dPrecision | Yes | No |
|
||||
| 167 | maxRange | Yes | No |
|
||||
| 168 | range | Yes | No |
|
||||
|
||||
## 3.0 Parameters
|
||||
|
||||
| # | **参数** | **Applicable to 2.x ** | **Applicable to 3.0 ** | Current behavior in 3.0 |
|
||||
| --- | :---------------------: | --------------- | --------------- | ------------------------------------------------- |
|
||||
| 1 | firstEp | Yes | Yes | |
|
||||
| 2 | secondEp | Yes | Yes | |
|
||||
| 3 | fqdn | Yes | Yes | |
|
||||
| 4 | serverPort | Yes | Yes | |
|
||||
| 5 | maxShellConns | Yes | Yes | |
|
||||
| 6 | monitor | Yes | Yes | |
|
||||
| 7 | monitorFqdn | No | Yes | |
|
||||
| 8 | monitorPort | No | Yes | |
|
||||
| 9 | monitorInterval | Yes | Yes | |
|
||||
| 10 | queryPolicy | No | Yes | |
|
||||
| 11 | querySmaOptimize | No | Yes | |
|
||||
| 12 | maxNumOfDistinctRes | Yes | Yes | |
|
||||
| 15 | countAlwaysReturnValue | Yes | Yes | |
|
||||
| 16 | dataDir | Yes | Yes | |
|
||||
| 17 | minimalDataDirGB | Yes | Yes | |
|
||||
| 18 | supportVnodes | No | Yes | |
|
||||
| 19 | tempDir | Yes | Yes | |
|
||||
| 20 | minimalTmpDirGB | Yes | Yes | |
|
||||
| 21 | smlChildTableName | Yes | Yes | |
|
||||
| 22 | smlTagName | Yes | Yes | |
|
||||
| 23 | smlDataFormat | No | Yes | |
|
||||
| 24 | statusInterval | Yes | Yes | |
|
||||
| 25 | logDir | Yes | Yes | |
|
||||
| 26 | minimalLogDirGB | Yes | Yes | |
|
||||
| 27 | numOfLogLines | Yes | Yes | |
|
||||
| 28 | asyncLog | Yes | Yes | |
|
||||
| 29 | logKeepDays | Yes | Yes | |
|
||||
| 30 | debugFlag | Yes | Yes | |
|
||||
| 31 | tmrDebugFlag | Yes | Yes | |
|
||||
| 32 | uDebugFlag | Yes | Yes | |
|
||||
| 33 | rpcDebugFlag | Yes | Yes | |
|
||||
| 34 | jniDebugFlag | Yes | Yes | |
|
||||
| 35 | qDebugFlag | Yes | Yes | |
|
||||
| 36 | cDebugFlag | Yes | Yes | |
|
||||
| 37 | dDebugFlag | Yes | Yes | |
|
||||
| 38 | vDebugFlag | Yes | Yes | |
|
||||
| 39 | mDebugFlag | Yes | Yes | |
|
||||
| 40 | wDebugFlag | Yes | Yes | |
|
||||
| 41 | sDebugFlag | Yes | Yes | |
|
||||
| 42 | tsdbDebugFlag | Yes | Yes | |
|
||||
| 43 | tqDebugFlag | No | Yes | |
|
||||
| 44 | fsDebugFlag | Yes | Yes | |
|
||||
| 45 | udfDebugFlag | No | Yes | |
|
||||
| 46 | smaDebugFlag | No | Yes | |
|
||||
| 47 | idxDebugFlag | No | Yes | |
|
||||
| 48 | tdbDebugFlag | No | Yes | |
|
||||
| 49 | metaDebugFlag | No | Yes | |
|
||||
| 50 | timezone | Yes | Yes | |
|
||||
| 51 | locale | Yes | Yes | |
|
||||
| 52 | charset | Yes | Yes | |
|
||||
| 53 | udf | Yes | Yes | |
|
||||
| 54 | enableCoreFile | Yes | Yes | |
|
||||
|
|
|
@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.0.2.0
|
||||
|
||||
<Release type="tdengine" version="3.0.2.0" />
|
||||
|
||||
## 3.0.1.8
|
||||
|
||||
<Release type="tdengine" version="3.0.1.8" />
|
||||
|
|
|
@ -10,6 +10,10 @@ For other historical version installers, please visit [here](https://www.taosdat
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 2.3.2
|
||||
|
||||
<Release type="tools" version="2.3.2" />
|
||||
|
||||
## 2.3.0
|
||||
|
||||
<Release type="tools" version="2.3.0" />
|
||||
|
|
|
@ -68,39 +68,38 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对
|
|||
### 安装连接器
|
||||
|
||||
<Tabs defaultValue="maven">
|
||||
<TabItem value="maven" label="使用 Maven 安装">
|
||||
<TabItem value="maven" label="使用 Maven 安装">
|
||||
|
||||
目前 taos-jdbcdriver 已经发布到 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver)
|
||||
仓库,且各大仓库都已同步。
|
||||
目前 taos-jdbcdriver 已经发布到 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 仓库,且各大仓库都已同步。
|
||||
|
||||
- [sonatype](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver)
|
||||
- [mvnrepository](https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver)
|
||||
- [maven.aliyun](https://maven.aliyun.com/mvn/search)
|
||||
- [sonatype](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver)
|
||||
- [mvnrepository](https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver)
|
||||
- [maven.aliyun](https://maven.aliyun.com/mvn/search)
|
||||
|
||||
Maven 项目中,在 pom.xml 中添加以下依赖:
|
||||
Maven 项目中,在 pom.xml 中添加以下依赖:
|
||||
|
||||
```xml-dtd
|
||||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.0.0</version>
|
||||
</dependency>
|
||||
```
|
||||
```xml-dtd
|
||||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.0.0</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="source" label="使用源码编译安装">
|
||||
</TabItem>
|
||||
<TabItem value="source" label="使用源码编译安装">
|
||||
|
||||
可以通过下载 TDengine 的源码,自己编译最新版本的 Java connector
|
||||
可以通过下载 TDengine 的源码,自己编译最新版本的 Java connector
|
||||
|
||||
```shell
|
||||
git clone https://github.com/taosdata/taos-connector-jdbc.git
|
||||
cd taos-connector-jdbc
|
||||
mvn clean install -Dmaven.test.skip=true
|
||||
```
|
||||
```shell
|
||||
git clone https://github.com/taosdata/taos-connector-jdbc.git
|
||||
cd taos-connector-jdbc
|
||||
mvn clean install -Dmaven.test.skip=true
|
||||
```
|
||||
|
||||
编译后,在 target 目录下会产生 taos-jdbcdriver-3.0.*-dist.jar 的 jar 包,并自动将编译的 jar 文件放在本地的 Maven 仓库中。
|
||||
编译后,在 target 目录下会产生 taos-jdbcdriver-3.0.*-dist.jar 的 jar 包,并自动将编译的 jar 文件放在本地的 Maven 仓库中。
|
||||
|
||||
</TabItem>
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
## 建立连接
|
||||
|
@ -111,125 +110,117 @@ TDengine 的 JDBC URL 规范格式为:
|
|||
对于建立连接,原生连接与 REST 连接有细微不同。
|
||||
|
||||
<Tabs defaultValue="rest">
|
||||
<TabItem value="native" label="原生连接">
|
||||
<TabItem value="native" label="原生连接">
|
||||
|
||||
```java
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
|
||||
Connection conn = DriverManager.getConnection(jdbcUrl);
|
||||
```
|
||||
```java
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
|
||||
Connection conn = DriverManager.getConnection(jdbcUrl);
|
||||
```
|
||||
|
||||
以上示例,使用了 JDBC 原生连接的 TSDBDriver,建立了到 hostname 为 taosdemo.com,端口为 6030(TDengine 的默认端口),数据库名为 test 的连接。这个 URL
|
||||
中指定用户名(user)为 root,密码(password)为 taosdata。
|
||||
以上示例,使用了 JDBC 原生连接的 TSDBDriver,建立了到 hostname 为 taosdemo.com,端口为 6030(TDengine 的默认端口),数据库名为 test 的连接。这个 URL
|
||||
中指定用户名(user)为 root,密码(password)为 taosdata。
|
||||
|
||||
**注意**:使用 JDBC 原生连接,taos-jdbcdriver 需要依赖客户端驱动(Linux 下是 libtaos.so;Windows 下是 taos.dll;macOS 下是 libtaos.dylib)。
|
||||
**注意**:使用 JDBC 原生连接,taos-jdbcdriver 需要依赖客户端驱动(Linux 下是 libtaos.so;Windows 下是 taos.dll;macOS 下是 libtaos.dylib)。
|
||||
|
||||
url 中的配置参数如下:
|
||||
url 中的配置参数如下:
|
||||
|
||||
- user:登录 TDengine 用户名,默认值 'root'。
|
||||
- password:用户登录密码,默认值 'taosdata'。
|
||||
- cfgdir:客户端配置文件目录路径,Linux OS 上默认值 `/etc/taos`,Windows OS 上默认值 `C:/TDengine/cfg`。
|
||||
- charset:客户端使用的字符集,默认值为系统字符集。
|
||||
- locale:客户端语言环境,默认值系统当前 locale。
|
||||
- timezone:客户端使用的时区,默认值为系统当前时区。
|
||||
- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:true。开启批量拉取同时获取一批数据在查询数据量较大时批量拉取可以有效的提升查询性能。
|
||||
- batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败将继续执行下面的 SQL。false:不再执行失败 SQL
|
||||
后的任何语句。默认值为:false。
|
||||
- user:登录 TDengine 用户名,默认值 'root'。
|
||||
- password:用户登录密码,默认值 'taosdata'。
|
||||
- cfgdir:客户端配置文件目录路径,Linux OS 上默认值 `/etc/taos`,Windows OS 上默认值 `C:/TDengine/cfg`。
|
||||
- charset:客户端使用的字符集,默认值为系统字符集。
|
||||
- locale:客户端语言环境,默认值系统当前 locale。
|
||||
- timezone:客户端使用的时区,默认值为系统当前时区。
|
||||
- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:true。开启批量拉取同时获取一批数据在查询数据量较大时批量拉取可以有效的提升查询性能。
|
||||
- batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败将继续执行下面的 SQL。false:不再执行失败 SQL 后的任何语句。默认值为:false。
|
||||
|
||||
JDBC 原生连接的使用请参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1955.html)。
|
||||
JDBC 原生连接的使用请参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1955.html)。
|
||||
|
||||
**使用 TDengine 客户端驱动配置文件建立连接 **
|
||||
**使用 TDengine 客户端驱动配置文件建立连接 **
|
||||
|
||||
当使用 JDBC 原生连接连接 TDengine 集群时,可以使用 TDengine 客户端驱动配置文件,在配置文件中指定集群的 firstEp、secondEp 等参数。如下所示:
|
||||
当使用 JDBC 原生连接连接 TDengine 集群时,可以使用 TDengine 客户端驱动配置文件,在配置文件中指定集群的 firstEp、secondEp 等参数。如下所示:
|
||||
|
||||
1. 在 Java 应用中不指定 hostname 和 port
|
||||
1. 在 Java 应用中不指定 hostname 和 port
|
||||
|
||||
```java
|
||||
public Connection getConn() throws Exception{
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata";
|
||||
Properties connProps = new Properties();
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
|
||||
return conn;
|
||||
}
|
||||
```
|
||||
```java
|
||||
public Connection getConn() throws Exception{
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata";
|
||||
Properties connProps = new Properties();
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
|
||||
return conn;
|
||||
}
|
||||
```
|
||||
|
||||
2. 在配置文件中指定 firstEp 和 secondEp
|
||||
2. 在配置文件中指定 firstEp 和 secondEp
|
||||
|
||||
```shell
|
||||
# first fully qualified domain name (FQDN) for TDengine system
|
||||
firstEp cluster_node1:6030
|
||||
```shell
|
||||
# first fully qualified domain name (FQDN) for TDengine system
|
||||
firstEp cluster_node1:6030
|
||||
|
||||
# second fully qualified domain name (FQDN) for TDengine system, for cluster only
|
||||
secondEp cluster_node2:6030
|
||||
# second fully qualified domain name (FQDN) for TDengine system, for cluster only
|
||||
secondEp cluster_node2:6030
|
||||
|
||||
# default system charset
|
||||
# charset UTF-8
|
||||
# default system charset
|
||||
# charset UTF-8
|
||||
|
||||
# system locale
|
||||
# locale en_US.UTF-8
|
||||
```
|
||||
# system locale
|
||||
# locale en_US.UTF-8
|
||||
```
|
||||
|
||||
以上示例,jdbc 会使用客户端的配置文件,建立到 hostname 为 cluster_node1、端口为 6030、数据库名为 test 的连接。当集群中 firstEp 节点失效时,JDBC 会尝试使用 secondEp
|
||||
连接集群。
|
||||
以上示例,jdbc 会使用客户端的配置文件,建立到 hostname 为 cluster_node1、端口为 6030、数据库名为 test 的连接。当集群中 firstEp 节点失效时,JDBC 会尝试使用 secondEp 连接集群。
|
||||
|
||||
TDengine 中,只要保证 firstEp 和 secondEp 中一个节点有效,就可以正常建立到集群的连接。
|
||||
TDengine 中,只要保证 firstEp 和 secondEp 中一个节点有效,就可以正常建立到集群的连接。
|
||||
|
||||
> **注意**:这里的配置文件指的是调用 JDBC Connector 的应用程序所在机器上的配置文件,Linux OS 上默认值 /etc/taos/taos.cfg ,Windows OS 上默认值
|
||||
C://TDengine/cfg/taos.cfg。
|
||||
> **注意**:这里的配置文件指的是调用 JDBC Connector 的应用程序所在机器上的配置文件,Linux OS 上默认值 /etc/taos/taos.cfg ,Windows OS 上默认值 C://TDengine/cfg/taos.cfg。
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="rest" label="REST 连接">
|
||||
</TabItem>
|
||||
<TabItem value="rest" label="REST 连接">
|
||||
|
||||
```java
|
||||
Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
|
||||
String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
|
||||
Connection conn = DriverManager.getConnection(jdbcUrl);
|
||||
```
|
||||
```java
|
||||
Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
|
||||
String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
|
||||
Connection conn = DriverManager.getConnection(jdbcUrl);
|
||||
```
|
||||
|
||||
以上示例,使用了 JDBC REST 连接的 RestfulDriver,建立了到 hostname 为 taosdemo.com,端口为 6041,数据库名为 test 的连接。这个 URL 中指定用户名(user)为
|
||||
root,密码(password)为 taosdata。
|
||||
以上示例,使用了 JDBC REST 连接的 RestfulDriver,建立了到 hostname 为 taosdemo.com,端口为 6041,数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。
|
||||
|
||||
使用 JDBC REST 连接,不需要依赖客户端驱动。与 JDBC 原生连接相比,仅需要:
|
||||
使用 JDBC REST 连接,不需要依赖客户端驱动。与 JDBC 原生连接相比,仅需要:
|
||||
|
||||
1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”;
|
||||
2. jdbcUrl 以“jdbc:TAOS-RS://”开头;
|
||||
3. 使用 6041 作为连接端口。
|
||||
1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”;
|
||||
2. jdbcUrl 以“jdbc:TAOS-RS://”开头;
|
||||
3. 使用 6041 作为连接端口。
|
||||
|
||||
url 中的配置参数如下:
|
||||
url 中的配置参数如下:
|
||||
|
||||
- user:登录 TDengine 用户名,默认值 'root'。
|
||||
- password:用户登录密码,默认值 'taosdata'。
|
||||
- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。逐行拉取结果集使用 HTTP 方式进行数据传输。JDBC REST
|
||||
连接支持批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTP,WebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。
|
||||
- charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。
|
||||
- batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 SQL 了。false:不再执行失败 SQL
|
||||
后的任何语句。默认值为:false。
|
||||
- httpConnectTimeout: 连接超时时间,单位 ms, 默认值为 5000。
|
||||
- httpSocketTimeout: socket 超时时间,单位 ms,默认值为 5000。仅在 batchfetch 设置为 false 时生效。
|
||||
- messageWaitTimeout: 消息超时时间, 单位 ms, 默认值为 3000。 仅在 batchfetch 设置为 true 时生效。
|
||||
- useSSL: 连接中是否使用 SSL。
|
||||
- user:登录 TDengine 用户名,默认值 'root'。
|
||||
- password:用户登录密码,默认值 'taosdata'。
|
||||
- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。逐行拉取结果集使用 HTTP 方式进行数据传输。JDBC REST 连接支持批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTP,WebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。
|
||||
- charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。
|
||||
- batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 SQL 了。false:不再执行失败 SQL 后的任何语句。默认值为:false。
|
||||
- httpConnectTimeout: 连接超时时间,单位 ms, 默认值为 5000。
|
||||
- httpSocketTimeout: socket 超时时间,单位 ms,默认值为 5000。仅在 batchfetch 设置为 false 时生效。
|
||||
- messageWaitTimeout: 消息超时时间, 单位 ms, 默认值为 3000。 仅在 batchfetch 设置为 true 时生效。
|
||||
- useSSL: 连接中是否使用 SSL。
|
||||
|
||||
**注意**:部分配置项(比如:locale、timezone)在 REST 连接中不生效。
|
||||
**注意**:部分配置项(比如:locale、timezone)在 REST 连接中不生效。
|
||||
|
||||
:::note
|
||||
:::note
|
||||
|
||||
- 与原生连接方式不同,REST 接口是无状态的。在使用 JDBC REST 连接时,需要在 SQL 中指定表、超级表的数据库名称。例如:
|
||||
- 与原生连接方式不同,REST 接口是无状态的。在使用 JDBC REST 连接时,需要在 SQL 中指定表、超级表的数据库名称。例如:
|
||||
|
||||
```sql
|
||||
INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6);
|
||||
```
|
||||
```sql
|
||||
INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6);
|
||||
```
|
||||
|
||||
- 如果在 url 中指定了 dbname,那么,JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url,在 SQL 中不需要指定 dbname。例如:url 为
|
||||
jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 sql:insert into t1 using weather(ts, temperature)
|
||||
tags('California.SanFrancisco') values(now, 24.6);
|
||||
- 如果在 url 中指定了 dbname,那么,JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url,在 SQL 中不需要指定 dbname。例如:url 为 jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 sql:insert into t1 using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6);
|
||||
|
||||
:::
|
||||
:::
|
||||
|
||||
</TabItem>
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### 指定 URL 和 Properties 获取连接
|
||||
|
@ -890,8 +881,10 @@ public static void main(String[] args) throws Exception {
|
|||
|
||||
| taos-jdbcdriver 版本 | 主要变化 |
|
||||
| :------------------: | :----------------------------: |
|
||||
| 3.0.3 | 修复 REST 连接在 jdk17+ 版本时间戳解析错误问题 |
|
||||
| 3.0.1 - 3.0.2 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译,JDK 8 环境下建议使用 3.0.2 版本 |
|
||||
| 3.0.0 | 支持 TDengine 3.0 |
|
||||
| 2.0.42 | 修在 WebSocket 连接中 wasNull 接口返回值 |
|
||||
| 2.0.41 | 修正 REST 连接中用户名和密码转码方式 |
|
||||
| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 |
|
||||
| 2.0.38 | JDBC REST 连接增加批量拉取功能 |
|
||||
|
@ -928,7 +921,7 @@ public static void main(String[] args) throws Exception {
|
|||
|
||||
**原因**:taos-jdbcdriver 3.0.1 版本需要在 JDK 11+ 环境使用。
|
||||
|
||||
**解决方法**: 更换 taos-jdbcdriver 3.0.2 版本。
|
||||
**解决方法**: 更换 taos-jdbcdriver 3.0.2+ 版本。
|
||||
|
||||
其它问题请参考 [FAQ](../../../train-faq/faq)
|
||||
|
||||
|
|
|
@ -59,6 +59,7 @@ Usage of taosAdapter:
|
|||
--collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045)
|
||||
--collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root")
|
||||
--collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10)
|
||||
--collectd.ttl int collectd data ttl. Env "TAOS_ADAPTER_COLLECTD_TTL" (default 0, means no ttl)
|
||||
-c, --config string config path default /etc/taos/taosadapter.toml
|
||||
--cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true)
|
||||
--cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials"
|
||||
|
@ -100,6 +101,7 @@ Usage of taosAdapter:
|
|||
--node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s)
|
||||
--node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100])
|
||||
--node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root")
|
||||
--node_exporter.ttl int node_exporter data ttl. Env "TAOS_ADAPTER_NODE_EXPORTER_TTL"(default 0, means no ttl)
|
||||
--opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true)
|
||||
--opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1)
|
||||
--opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb])
|
||||
|
@ -110,6 +112,7 @@ Usage of taosAdapter:
|
|||
--opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
|
||||
--opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE"
|
||||
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
|
||||
--opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL"(default 0, means no ttl)
|
||||
--pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT" (default 1h0m0s)
|
||||
--pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT" (default 4000)
|
||||
--pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE" (default 4000)
|
||||
|
@ -131,6 +134,7 @@ Usage of taosAdapter:
|
|||
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE"
|
||||
--statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root")
|
||||
--statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10)
|
||||
--statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL" (default 0, means no ttl)
|
||||
--taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE"
|
||||
--version Print the version and exit
|
||||
```
|
||||
|
@ -195,6 +199,7 @@ AllowWebSockets
|
|||
- `precision` TDengine 使用的时间精度
|
||||
- `u` TDengine 用户名
|
||||
- `p` TDengine 密码
|
||||
- `ttl` 自动创建的子表生命周期,以子表的第一条数据的 TTL 参数为准,不可更新。更多信息请参考[创建表文档](taos-sql/table/#创建表)的 TTL 参数。
|
||||
|
||||
注意: 目前不支持 InfluxDB 的 token 验证方式,仅支持 Basic 验证和查询参数验证。
|
||||
示例: curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000"
|
||||
|
|
|
@ -134,15 +134,6 @@ taos --dump-config
|
|||
| 取值范围 | 1-200000 |
|
||||
| 缺省值 | 30 |
|
||||
|
||||
### telemetryReporting
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ---------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 是否允许 TDengine 采集和上报基本使用信息 |
|
||||
| 取值范围 | 0:不允许 1:允许 |
|
||||
| 缺省值 | 1 |
|
||||
|
||||
## 查询相关
|
||||
|
||||
### queryPolicy
|
||||
|
@ -191,6 +182,15 @@ taos --dump-config
|
|||
| 取值范围 | 0 表示包含函数名,1 表示不包含函数名。 |
|
||||
| 缺省值 | 0 |
|
||||
|
||||
### countAlwaysReturnValue
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | count/hyperloglog函数在数据为空或者NULL的情况下是否返回值 |
|
||||
| 取值范围 | 0:返回空行,1:返回 0 |
|
||||
| 缺省值 | 1 |
|
||||
|
||||
## 区域相关
|
||||
|
||||
### timezone
|
||||
|
@ -306,12 +306,20 @@ charset 的有效值是 UTF-8。
|
|||
| 含义 | 数据文件目录,所有的数据文件都将写入该目录 |
|
||||
| 缺省值 | /var/lib/taos |
|
||||
|
||||
### tempDir
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------------------------------ |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 该参数指定所有系统运行过程中的临时文件生成的目录 |
|
||||
| 缺省值 | /tmp |
|
||||
|
||||
### minimalTmpDirGB
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------------------------------------ |
|
||||
| 适用范围 | 服务端和客户端均适用 |
|
||||
| 含义 | 当日志文件夹的磁盘大小小于该值时,停止写临时文件 |
|
||||
| 含义 | tempDir 所指定的临时文件目录所需要保留的最小空间 |
|
||||
| 单位 | GB |
|
||||
| 缺省值 | 1.0 |
|
||||
|
||||
|
@ -320,7 +328,7 @@ charset 的有效值是 UTF-8。
|
|||
| 属性 | 说明 |
|
||||
| -------- | ------------------------------------------------ |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 当日志文件夹的磁盘大小小于该值时,停止写时序数据 |
|
||||
| 含义 | dataDir 指定的时序数据存储目录所需要保留的最小 |
|
||||
| 单位 | GB |
|
||||
| 缺省值 | 2.0 |
|
||||
|
||||
|
@ -335,27 +343,7 @@ charset 的有效值是 UTF-8。
|
|||
| 取值范围 | 0-4096 |
|
||||
| 缺省值 | CPU 核数的 2 倍 |
|
||||
|
||||
## 时间相关
|
||||
|
||||
### statusInterval
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | --------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | dnode 向 mnode 报告状态间隔 |
|
||||
| 单位 | 秒 |
|
||||
| 取值范围 | 1-10 |
|
||||
| 缺省值 | 1 |
|
||||
|
||||
### shellActivityTimer
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | --------------------------------- |
|
||||
| 适用范围 | 服务端和客户端均适用 |
|
||||
| 含义 | shell 客户端向 mnode 发送心跳间隔 |
|
||||
| 单位 | 秒 |
|
||||
| 取值范围 | 1-120 |
|
||||
| 缺省值 | 3 |
|
||||
## 时间相关 |
|
||||
|
||||
## 性能调优
|
||||
|
||||
|
@ -367,28 +355,6 @@ charset 的有效值是 UTF-8。
|
|||
| 含义 | 设置写入线程的最大数量 |
|
||||
| 缺省值 | |
|
||||
|
||||
## 压缩相关
|
||||
|
||||
### compressMsgSize
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 客户端与服务器之间进行消息通讯过程中,对通讯的消息进行压缩的阈值。如果要压缩消息,建议设置为 64330 字节,即大于 64330 字节的消息体才进行压缩。 |
|
||||
| 单位 | bytes |
|
||||
| 取值范围 | `0 `表示对所有的消息均进行压缩 >0: 超过该值的消息才进行压缩 -1: 不压缩 |
|
||||
| 缺省值 | -1 |
|
||||
|
||||
### compressColData
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | --------------------------------------------------------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 客户端与服务器之间进行消息通讯过程中,对服务器端查询结果进行列压缩的阈值。 |
|
||||
| 单位 | bytes |
|
||||
| 取值范围 | 0: 对所有查询结果均进行压缩 >0: 查询结果中任意列大小超过该值的消息才进行压缩 -1: 不压缩 |
|
||||
| 缺省值 | -1 |
|
||||
|
||||
## 日志相关
|
||||
|
||||
### logDir
|
||||
|
@ -613,7 +579,7 @@ charset 的有效值是 UTF-8。
|
|||
| 属性 | 说明 |
|
||||
| -------- | ------------------------- |
|
||||
| 适用范围 | 仅客户端适用 |
|
||||
| 含义 | schemaless 自定义的子表名 |
|
||||
| 含义 | schemaless 自定义的子表名的 key |
|
||||
| 类型 | 字符串 |
|
||||
| 缺省值 | 无 |
|
||||
|
||||
|
@ -656,12 +622,7 @@ charset 的有效值是 UTF-8。
|
|||
| 取值范围 | 0: 不启动;1:启动 |
|
||||
| 缺省值 | 1 |
|
||||
|
||||
## 2.X 与 3.0 配置参数对比
|
||||
|
||||
:::note
|
||||
对于 2.x 版本中适用但在 3.0 版本中废弃的参数,其当前行为会有特别说明
|
||||
|
||||
:::
|
||||
## 3.0 中有效的配置参数列表
|
||||
|
||||
| # | **参数** | **适用于 2.X ** | **适用于 3.0 ** | 3.0 版本的当前行为 |
|
||||
| --- | :---------------------: | --------------- | --------------- | ------------------------------------------------- |
|
||||
|
@ -674,159 +635,134 @@ charset 的有效值是 UTF-8。
|
|||
| 7 | monitorFqdn | 否 | 是 | |
|
||||
| 8 | monitorPort | 否 | 是 | |
|
||||
| 9 | monitorInterval | 是 | 是 | |
|
||||
| 10 | monitorMaxLogs | 否 | 是 | |
|
||||
| 11 | monitorComp | 否 | 是 | |
|
||||
| 12 | telemetryReporting | 是 | 是 | |
|
||||
| 13 | telemetryInterval | 否 | 是 | |
|
||||
| 14 | telemetryServer | 否 | 是 | |
|
||||
| 15 | telemetryPort | 否 | 是 | |
|
||||
| 16 | queryPolicy | 否 | 是 | |
|
||||
| 17 | querySmaOptimize | 否 | 是 | |
|
||||
| 18 | queryRsmaTolerance | 否 | 是 | |
|
||||
| 19 | queryBufferSize | 是 | 是 | |
|
||||
| 20 | maxNumOfDistinctRes | 是 | 是 | |
|
||||
| 21 | minSlidingTime | 是 | 是 | |
|
||||
| 22 | minIntervalTime | 是 | 是 | |
|
||||
| 23 | countAlwaysReturnValue | 是 | 是 | |
|
||||
| 24 | dataDir | 是 | 是 | |
|
||||
| 25 | minimalDataDirGB | 是 | 是 | |
|
||||
| 26 | supportVnodes | 否 | 是 | |
|
||||
| 27 | tempDir | 是 | 是 | |
|
||||
| 28 | minimalTmpDirGB | 是 | 是 | |
|
||||
| 29 | compressMsgSize | 是 | 是 | |
|
||||
| 30 | compressColData | 是 | 是 | |
|
||||
| 31 | smlChildTableName | 是 | 是 | |
|
||||
| 32 | smlTagName | 是 | 是 | |
|
||||
| 33 | smlDataFormat | 否 | 是 | |
|
||||
| 34 | statusInterval | 是 | 是 | |
|
||||
| 35 | shellActivityTimer | 是 | 是 | |
|
||||
| 36 | transPullupInterval | 否 | 是 | |
|
||||
| 37 | mqRebalanceInterval | 否 | 是 | |
|
||||
| 38 | ttlUnit | 否 | 是 | |
|
||||
| 39 | ttlPushInterval | 否 | 是 | |
|
||||
| 40 | numOfTaskQueueThreads | 否 | 是 | |
|
||||
| 41 | numOfRpcThreads | 否 | 是 | |
|
||||
| 42 | numOfCommitThreads | 是 | 是 | |
|
||||
| 43 | numOfMnodeReadThreads | 否 | 是 | |
|
||||
| 44 | numOfVnodeQueryThreads | 否 | 是 | |
|
||||
| 45 | numOfVnodeStreamThreads | 否 | 是 | |
|
||||
| 46 | numOfVnodeFetchThreads | 否 | 是 | |
|
||||
| 47 | numOfVnodeRsmaThreads | 否 | 是 | |
|
||||
| 48 | numOfQnodeQueryThreads | 否 | 是 | |
|
||||
| 49 | numOfQnodeFetchThreads | 否 | 是 | |
|
||||
| 50 | numOfSnodeSharedThreads | 否 | 是 | |
|
||||
| 51 | numOfSnodeUniqueThreads | 否 | 是 | |
|
||||
| 52 | rpcQueueMemoryAllowed | 否 | 是 | |
|
||||
| 53 | logDir | 是 | 是 | |
|
||||
| 54 | minimalLogDirGB | 是 | 是 | |
|
||||
| 55 | numOfLogLines | 是 | 是 | |
|
||||
| 56 | asyncLog | 是 | 是 | |
|
||||
| 57 | logKeepDays | 是 | 是 | |
|
||||
| 60 | debugFlag | 是 | 是 | |
|
||||
| 61 | tmrDebugFlag | 是 | 是 | |
|
||||
| 62 | uDebugFlag | 是 | 是 | |
|
||||
| 63 | rpcDebugFlag | 是 | 是 | |
|
||||
| 64 | jniDebugFlag | 是 | 是 | |
|
||||
| 65 | qDebugFlag | 是 | 是 | |
|
||||
| 66 | cDebugFlag | 是 | 是 | |
|
||||
| 67 | dDebugFlag | 是 | 是 | |
|
||||
| 68 | vDebugFlag | 是 | 是 | |
|
||||
| 69 | mDebugFlag | 是 | 是 | |
|
||||
| 70 | wDebugFlag | 是 | 是 | |
|
||||
| 71 | sDebugFlag | 是 | 是 | |
|
||||
| 72 | tsdbDebugFlag | 是 | 是 | |
|
||||
| 73 | tqDebugFlag | 否 | 是 | |
|
||||
| 74 | fsDebugFlag | 是 | 是 | |
|
||||
| 75 | udfDebugFlag | 否 | 是 | |
|
||||
| 76 | smaDebugFlag | 否 | 是 | |
|
||||
| 77 | idxDebugFlag | 否 | 是 | |
|
||||
| 78 | tdbDebugFlag | 否 | 是 | |
|
||||
| 79 | metaDebugFlag | 否 | 是 | |
|
||||
| 80 | timezone | 是 | 是 | |
|
||||
| 81 | locale | 是 | 是 | |
|
||||
| 82 | charset | 是 | 是 | |
|
||||
| 83 | udf | 是 | 是 | |
|
||||
| 84 | enableCoreFile | 是 | 是 | |
|
||||
| 85 | arbitrator | 是 | 否 | 通过 RAFT 协议选主 |
|
||||
| 86 | numOfThreadsPerCore | 是 | 否 | 有其它参数设置多种线程池的大小 |
|
||||
| 87 | numOfMnodes | 是 | 否 | 通过 create mnode 命令动态创建 mnode |
|
||||
| 88 | vnodeBak | 是 | 否 | 3.0 行为未知 |
|
||||
| 89 | balance | 是 | 否 | 负载均衡功能由 split/merge vgroups 实现 |
|
||||
| 90 | balanceInterval | 是 | 否 | 随着 balance 参数失效 |
|
||||
| 91 | offlineThreshold | 是 | 否 | 3.0 行为未知 |
|
||||
| 92 | role | 是 | 否 | 由 supportVnode 决定是否能够创建 |
|
||||
| 93 | dnodeNopLoop | 是 | 否 | 2.6 文档中未找到此参数 |
|
||||
| 94 | keepTimeOffset | 是 | 否 | 2.6 文档中未找到此参数 |
|
||||
| 95 | rpcTimer | 是 | 否 | 3.0 行为未知 |
|
||||
| 96 | rpcMaxTime | 是 | 否 | 3.0 行为未知 |
|
||||
| 97 | rpcForceTcp | 是 | 否 | 默认为 TCP |
|
||||
| 98 | tcpConnTimeout | 是 | 否 | 3.0 行为未知 |
|
||||
| 99 | syncCheckInterval | 是 | 否 | 3.0 行为未知 |
|
||||
| 100 | maxTmrCtrl | 是 | 否 | 3.0 行为未知 |
|
||||
| 101 | monitorReplica | 是 | 否 | 由 RAFT 协议管理多副本 |
|
||||
| 102 | smlTagNullName | 是 | 否 | 3.0 行为未知 |
|
||||
| 103 | keepColumnName | 是 | 否 | 3.0 行为未知 |
|
||||
| 104 | ratioOfQueryCores | 是 | 否 | 由 线程池 相关配置参数决定 |
|
||||
| 105 | maxStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||
| 106 | maxFirstStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||
| 107 | retryStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||
| 108 | streamCompDelayRatio | 是 | 否 | 3.0 行为未知 |
|
||||
| 109 | maxVgroupsPerDb | 是 | 否 | 由 create db 的参数 vgroups 指定实际 vgroups 数量 |
|
||||
| 110 | maxTablesPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
||||
| 111 | minTablesPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
||||
| 112 | tableIncStepPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
||||
| 113 | cache | 是 | 否 | 由 buffer 代替 cache\*blocks |
|
||||
| 114 | blocks | 是 | 否 | 由 buffer 代替 cache\*blocks |
|
||||
| 115 | days | 是 | 否 | 由 create db 的参数 duration 取代 |
|
||||
| 116 | keep | 是 | 否 | 由 create db 的参数 keep 取代 |
|
||||
| 117 | minRows | 是 | 否 | 由 create db 的参数 minRows 取代 |
|
||||
| 118 | maxRows | 是 | 否 | 由 create db 的参数 maxRows 取代 |
|
||||
| 119 | quorum | 是 | 否 | 由 RAFT 协议决定 |
|
||||
| 120 | comp | 是 | 否 | 由 create db 的参数 comp 取代 |
|
||||
| 121 | walLevel | 是 | 否 | 由 create db 的参数 wal_level 取代 |
|
||||
| 122 | fsync | 是 | 否 | 由 create db 的参数 wal_fsync_period 取代 |
|
||||
| 123 | replica | 是 | 否 | 由 create db 的参数 replica 取代 |
|
||||
| 124 | partitions | 是 | 否 | 3.0 行为未知 |
|
||||
| 125 | update | 是 | 否 | 允许更新部分列 |
|
||||
| 126 | cachelast | 是 | 否 | 由 create db 的参数 cacheModel 取代 |
|
||||
| 127 | maxSQLLength | 是 | 否 | SQL 上限为 1MB,无需参数控制 |
|
||||
| 128 | maxWildCardsLength | 是 | 否 | 3.0 行为未知 |
|
||||
| 129 | maxRegexStringLen | 是 | 否 | 3.0 行为未知 |
|
||||
| 130 | maxNumOfOrderedRes | 是 | 否 | 3.0 行为未知 |
|
||||
| 131 | maxConnections | 是 | 否 | 取决于系统配置和系统处理能力,详见后面的 Note |
|
||||
| 132 | mnodeEqualVnodeNum | 是 | 否 | 3.0 行为未知 |
|
||||
| 133 | http | 是 | 否 | http 服务由 taosAdapter 提供 |
|
||||
| 134 | httpEnableRecordSql | 是 | 否 | taosd 不提供 http 服务 |
|
||||
| 135 | httpMaxThreads | 是 | 否 | taosd 不提供 http 服务 |
|
||||
| 136 | restfulRowLimit | 是 | 否 | taosd 不提供 http 服务 |
|
||||
| 137 | httpDbNameMandatory | 是 | 否 | taosd 不提供 http 服务 |
|
||||
| 138 | httpKeepAlive | 是 | 否 | taosd 不提供 http 服务 |
|
||||
| 139 | enableRecordSql | 是 | 否 | 3.0 行为未知 |
|
||||
| 140 | maxBinaryDisplayWidth | 是 | 否 | 3.0 行为未知 |
|
||||
| 141 | stream | 是 | 否 | 默认启用连续查询 |
|
||||
| 142 | retrieveBlockingModel | 是 | 否 | 3.0 行为未知 |
|
||||
| 143 | tsdbMetaCompactRatio | 是 | 否 | 3.0 行为未知 |
|
||||
| 144 | defaultJSONStrType | 是 | 否 | 3.0 行为未知 |
|
||||
| 145 | walFlushSize | 是 | 否 | 3.0 行为未知 |
|
||||
| 146 | keepTimeOffset | 是 | 否 | 3.0 行为未知 |
|
||||
| 147 | flowctrl | 是 | 否 | 3.0 行为未知 |
|
||||
| 148 | slaveQuery | 是 | 否 | 3.0 行为未知: slave vnode 是否能够处理查询? |
|
||||
| 149 | adjustMaster | 是 | 否 | 3.0 行为未知 |
|
||||
| 150 | topicBinaryLen | 是 | 否 | 3.0 行为未知 |
|
||||
| 151 | telegrafUseFieldNum | 是 | 否 | 3.0 行为未知 |
|
||||
| 152 | deadLockKillQuery | 是 | 否 | 3.0 行为未知 |
|
||||
| 153 | clientMerge | 是 | 否 | 3.0 行为未知 |
|
||||
| 154 | sdbDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||
| 155 | odbcDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||
| 156 | httpDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||
| 157 | monDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||
| 158 | cqDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||
| 159 | shortcutFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||
| 160 | probeSeconds | 是 | 否 | 3.0 行为未知 |
|
||||
| 161 | probeKillSeconds | 是 | 否 | 3.0 行为未知 |
|
||||
| 162 | probeInterval | 是 | 否 | 3.0 行为未知 |
|
||||
| 163 | lossyColumns | 是 | 否 | 3.0 行为未知 |
|
||||
| 164 | fPrecision | 是 | 否 | 3.0 行为未知 |
|
||||
| 165 | dPrecision | 是 | 否 | 3.0 行为未知 |
|
||||
| 166 | maxRange | 是 | 否 | 3.0 行为未知 |
|
||||
| 167 | range | 是 | 否 | 3.0 行为未知 |
|
||||
| 10 | queryPolicy | 否 | 是 | |
|
||||
| 11 | querySmaOptimize | 否 | 是 | |
|
||||
| 12 | maxNumOfDistinctRes | 是 | 是 | |
|
||||
| 15 | countAlwaysReturnValue | 是 | 是 | |
|
||||
| 16 | dataDir | 是 | 是 | |
|
||||
| 17 | minimalDataDirGB | 是 | 是 | |
|
||||
| 18 | supportVnodes | 否 | 是 | |
|
||||
| 19 | tempDir | 是 | 是 | |
|
||||
| 20 | minimalTmpDirGB | 是 | 是 | |
|
||||
| 21 | smlChildTableName | 是 | 是 | |
|
||||
| 22 | smlTagName | 是 | 是 | |
|
||||
| 23 | smlDataFormat | 否 | 是 | |
|
||||
| 24 | statusInterval | 是 | 是 | |
|
||||
| 25 | logDir | 是 | 是 | |
|
||||
| 26 | minimalLogDirGB | 是 | 是 | |
|
||||
| 27 | numOfLogLines | 是 | 是 | |
|
||||
| 28 | asyncLog | 是 | 是 | |
|
||||
| 29 | logKeepDays | 是 | 是 | |
|
||||
| 30 | debugFlag | 是 | 是 | |
|
||||
| 31 | tmrDebugFlag | 是 | 是 | |
|
||||
| 32 | uDebugFlag | 是 | 是 | |
|
||||
| 33 | rpcDebugFlag | 是 | 是 | |
|
||||
| 34 | jniDebugFlag | 是 | 是 | |
|
||||
| 35 | qDebugFlag | 是 | 是 | |
|
||||
| 36 | cDebugFlag | 是 | 是 | |
|
||||
| 37 | dDebugFlag | 是 | 是 | |
|
||||
| 38 | vDebugFlag | 是 | 是 | |
|
||||
| 39 | mDebugFlag | 是 | 是 | |
|
||||
| 40 | wDebugFlag | 是 | 是 | |
|
||||
| 41 | sDebugFlag | 是 | 是 | |
|
||||
| 42 | tsdbDebugFlag | 是 | 是 | |
|
||||
| 43 | tqDebugFlag | 否 | 是 | |
|
||||
| 44 | fsDebugFlag | 是 | 是 | |
|
||||
| 45 | udfDebugFlag | 否 | 是 | |
|
||||
| 46 | smaDebugFlag | 否 | 是 | |
|
||||
| 47 | idxDebugFlag | 否 | 是 | |
|
||||
| 48 | tdbDebugFlag | 否 | 是 | |
|
||||
| 49 | metaDebugFlag | 否 | 是 | |
|
||||
| 50 | timezone | 是 | 是 | |
|
||||
| 51 | locale | 是 | 是 | |
|
||||
| 52 | charset | 是 | 是 | |
|
||||
| 53 | udf | 是 | 是 | |
|
||||
| 54 | enableCoreFile | 是 | 是 | |
|
||||
|
||||
## 2.x->3.0 的废弃参数
|
||||
|
||||
| # | **参数** | **适用于 2.X ** | **适用于 3.0 ** | 3.0 版本的当前行为 |
|
||||
| --- | :---------------------: | --------------- | --------------- | ------------------------------------------------- |
|
||||
| 1 | arbitrator | 是 | 否 | 通过 RAFT 协议选主 |
|
||||
| 2 | numOfThreadsPerCore | 是 | 否 | 有其它参数设置多种线程池的大小 |
|
||||
| 3 | numOfMnodes | 是 | 否 | 通过 create mnode 命令动态创建 mnode |
|
||||
| 4 | vnodeBak | 是 | 否 | 3.0 行为未知 |
|
||||
| 5 | balance | 是 | 否 | 负载均衡功能由 split/merge vgroups 实现 |
|
||||
| 6 | balanceInterval | 是 | 否 | 随着 balance 参数失效 |
|
||||
| 7 | offlineThreshold | 是 | 否 | 3.0 行为未知 |
|
||||
| 8 | role | 是 | 否 | 由 supportVnode 决定是否能够创建 |
|
||||
| 9 | dnodeNopLoop | 是 | 否 | 2.6 文档中未找到此参数 |
|
||||
| 10 | keepTimeOffset | 是 | 否 | 2.6 文档中未找到此参数 |
|
||||
| 11 | rpcTimer | 是 | 否 | 3.0 行为未知 |
|
||||
| 12 | rpcMaxTime | 是 | 否 | 3.0 行为未知 |
|
||||
| 13 | rpcForceTcp | 是 | 否 | 默认为 TCP |
|
||||
| 14 | tcpConnTimeout | 是 | 否 | 3.0 行为未知 |
|
||||
| 15 | syncCheckInterval | 是 | 否 | 3.0 行为未知 |
|
||||
| 16 | maxTmrCtrl | 是 | 否 | 3.0 行为未知 |
|
||||
| 17 | monitorReplica | 是 | 否 | 由 RAFT 协议管理多副本 |
|
||||
| 18 | smlTagNullName | 是 | 否 | 3.0 行为未知 |
|
||||
| 19 | keepColumnName | 是 | 否 | 3.0 行为未知 |
|
||||
| 20 | ratioOfQueryCores | 是 | 否 | 由 线程池 相关配置参数决定 |
|
||||
| 21 | maxStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||
| 22 | maxFirstStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||
| 23 | retryStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||
| 24 | streamCompDelayRatio | 是 | 否 | 3.0 行为未知 |
|
||||
| 25 | maxVgroupsPerDb | 是 | 否 | 由 create db 的参数 vgroups 指定实际 vgroups 数量 |
|
||||
| 26 | maxTablesPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
||||
| 27 | minTablesPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
||||
| 28 | tableIncStepPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
||||
| 29 | cache | 是 | 否 | 由 buffer 代替 cache\*blocks |
|
||||
| 30 | blocks | 是 | 否 | 由 buffer 代替 cache\*blocks |
|
||||
| 31 | days | 是 | 否 | 由 create db 的参数 duration 取代 |
|
||||
| 32 | keep | 是 | 否 | 由 create db 的参数 keep 取代 |
|
||||
| 33 | minRows | 是 | 否 | 由 create db 的参数 minRows 取代 |
|
||||
| 34 | maxRows | 是 | 否 | 由 create db 的参数 maxRows 取代 |
|
||||
| 35 | quorum | 是 | 否 | 由 RAFT 协议决定 |
|
||||
| 36 | comp | 是 | 否 | 由 create db 的参数 comp 取代 |
|
||||
| 37 | walLevel | 是 | 否 | 由 create db 的参数 wal_level 取代 |
|
||||
| 38 | fsync | 是 | 否 | 由 create db 的参数 wal_fsync_period 取代 |
|
||||
| 39 | replica | 是 | 否 | 由 create db 的参数 replica 取代 |
|
||||
| 40 | partitions | 是 | 否 | 3.0 行为未知 |
|
||||
| 41 | update | 是 | 否 | 允许更新部分列 |
|
||||
| 42 | cachelast | 是 | 否 | 由 create db 的参数 cacheModel 取代 |
|
||||
| 43 | maxSQLLength | 是 | 否 | SQL 上限为 1MB,无需参数控制 |
|
||||
| 44 | maxWildCardsLength | 是 | 否 | 3.0 行为未知 |
|
||||
| 45 | maxRegexStringLen | 是 | 否 | 3.0 行为未知 |
|
||||
| 46 | maxNumOfOrderedRes | 是 | 否 | 3.0 行为未知 |
|
||||
| 47 | maxConnections | 是 | 否 | 取决于系统配置和系统处理能力,详见后面的 Note |
|
||||
| 48 | mnodeEqualVnodeNum | 是 | 否 | 3.0 行为未知 |
|
||||
| 49 | http | 是 | 否 | http 服务由 taosAdapter 提供 |
|
||||
| 50 | httpEnableRecordSql | 是 | 否 | taosd 不提供 http 服务 |
|
||||
| 51 | httpMaxThreads | 是 | 否 | taosd 不提供 http 服务 |
|
||||
| 52 | restfulRowLimit | 是 | 否 | taosd 不提供 http 服务 |
|
||||
| 53 | httpDbNameMandatory | 是 | 否 | taosd 不提供 http 服务 |
|
||||
| 54 | httpKeepAlive | 是 | 否 | taosd 不提供 http 服务 |
|
||||
| 55 | enableRecordSql | 是 | 否 | 3.0 行为未知 |
|
||||
| 56 | maxBinaryDisplayWidth | 是 | 否 | 3.0 行为未知 |
|
||||
| 57 | stream | 是 | 否 | 默认启用连续查询 |
|
||||
| 58 | retrieveBlockingModel | 是 | 否 | 3.0 行为未知 |
|
||||
| 59 | tsdbMetaCompactRatio | 是 | 否 | 3.0 行为未知 |
|
||||
| 60 | defaultJSONStrType | 是 | 否 | 3.0 行为未知 |
|
||||
| 61 | walFlushSize | 是 | 否 | 3.0 行为未知 |
|
||||
| 62 | keepTimeOffset | 是 | 否 | 3.0 行为未知 |
|
||||
| 63 | flowctrl | 是 | 否 | 3.0 行为未知 |
|
||||
| 64 | slaveQuery | 是 | 否 | 3.0 行为未知: slave vnode 是否能够处理查询? |
|
||||
| 65 | adjustMaster | 是 | 否 | 3.0 行为未知 |
|
||||
| 66 | topicBinaryLen | 是 | 否 | 3.0 行为未知 |
|
||||
| 67 | telegrafUseFieldNum | 是 | 否 | 3.0 行为未知 |
|
||||
| 68 | deadLockKillQuery | 是 | 否 | 3.0 行为未知 |
|
||||
| 69 | clientMerge | 是 | 否 | 3.0 行为未知 |
|
||||
| 70 | sdbDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||
| 71 | odbcDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||
| 72 | httpDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||
| 73 | monDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||
| 74 | cqDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||
| 75 | shortcutFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||
| 76 | probeSeconds | 是 | 否 | 3.0 行为未知 |
|
||||
| 77 | probeKillSeconds | 是 | 否 | 3.0 行为未知 |
|
||||
| 78 | probeInterval | 是 | 否 | 3.0 行为未知 |
|
||||
| 79 | lossyColumns | 是 | 否 | 3.0 行为未知 |
|
||||
| 80 | fPrecision | 是 | 否 | 3.0 行为未知 |
|
||||
| 81 | dPrecision | 是 | 否 | 3.0 行为未知 |
|
||||
| 82 | maxRange | 是 | 否 | 3.0 行为未知 |
|
||||
| 83 | range | 是 | 否 | 3.0 行为未知 |
|
||||
|
|
|
@ -10,11 +10,14 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.0.2.0
|
||||
|
||||
<Release type="tdengine" version="3.0.2.0" />
|
||||
|
||||
## 3.0.1.8
|
||||
|
||||
<Release type="tdengine" version="3.0.1.8" />
|
||||
|
||||
|
||||
## 3.0.1.7
|
||||
|
||||
<Release type="tdengine" version="3.0.1.7" />
|
||||
|
|
|
@ -10,6 +10,10 @@ taosTools 各版本安装包下载链接如下:
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 2.3.2
|
||||
|
||||
<Release type="tools" version="2.3.2" />
|
||||
|
||||
## 2.3.0
|
||||
|
||||
<Release type="tools" version="2.3.0" />
|
||||
|
|
|
@ -149,7 +149,7 @@ DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char
|
|||
DLL_EXPORT TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port);
|
||||
DLL_EXPORT void taos_close(TAOS *taos);
|
||||
|
||||
const char *taos_data_type(int type);
|
||||
DLL_EXPORT const char *taos_data_type(int type);
|
||||
|
||||
DLL_EXPORT TAOS_STMT *taos_stmt_init(TAOS *taos);
|
||||
DLL_EXPORT TAOS_STMT *taos_stmt_init_with_reqid(TAOS *taos, int64_t reqid);
|
||||
|
|
|
@ -70,6 +70,11 @@ static inline bool vnodeIsMsgBlock(tmsg_t type) {
|
|||
return (type == TDMT_VND_CREATE_TABLE) || (type == TDMT_VND_ALTER_TABLE) || (type == TDMT_VND_DROP_TABLE) ||
|
||||
(type == TDMT_VND_UPDATE_TAG_VAL);
|
||||
}
|
||||
|
||||
static inline bool syncUtilUserCommit(tmsg_t msgType) {
|
||||
return msgType != TDMT_SYNC_NOOP && msgType != TDMT_SYNC_LEADER_TRANSFER;
|
||||
}
|
||||
|
||||
/* ------------------------ OTHER DEFINITIONS ------------------------ */
|
||||
// IE type
|
||||
#define TSDB_IE_TYPE_SEC 1
|
||||
|
|
|
@ -211,6 +211,8 @@ int32_t catalogGetCachedSTableMeta(SCatalog* pCtg, const SName* pTableName, STab
|
|||
|
||||
int32_t catalogGetCachedTableHashVgroup(SCatalog* pCtg, const SName* pTableName, SVgroupInfo* pVgroup, bool* exists);
|
||||
|
||||
int32_t catalogGetCachedTableVgMeta(SCatalog* pCtg, const SName* pTableName, SVgroupInfo* pVgroup, STableMeta** pTableMeta);
|
||||
|
||||
/**
|
||||
* Force refresh DB's local cached vgroup info.
|
||||
* @param pCtg (input, got with catalogGetHandle)
|
||||
|
|
|
@ -235,7 +235,6 @@ typedef enum ENodeType {
|
|||
QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_PROJECT,
|
||||
QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_HASH_AGG,
|
||||
|
@ -267,6 +266,7 @@ typedef enum ENodeType {
|
|||
QUERY_NODE_PHYSICAL_PLAN_DELETE,
|
||||
QUERY_NODE_PHYSICAL_SUBPLAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT
|
||||
} ENodeType;
|
||||
|
|
|
@ -47,6 +47,7 @@ extern "C" {
|
|||
|
||||
#define SYNC_HEARTBEAT_SLOW_MS 1500
|
||||
#define SYNC_HEARTBEAT_REPLY_SLOW_MS 1500
|
||||
#define SYNC_SNAP_RESEND_MS 1000 * 60
|
||||
|
||||
#define SYNC_MAX_BATCH_SIZE 1
|
||||
#define SYNC_INDEX_BEGIN 0
|
||||
|
|
|
@ -33,16 +33,16 @@ extern "C" {
|
|||
#define wTrace(...) { if (wDebugFlag & DEBUG_TRACE) { taosPrintLog("WAL ", DEBUG_TRACE, wDebugFlag, __VA_ARGS__); }}
|
||||
// clang-format on
|
||||
|
||||
#define WAL_PROTO_VER 0
|
||||
#define WAL_NOSUFFIX_LEN 20
|
||||
#define WAL_SUFFIX_AT (WAL_NOSUFFIX_LEN + 1)
|
||||
#define WAL_LOG_SUFFIX "log"
|
||||
#define WAL_INDEX_SUFFIX "idx"
|
||||
#define WAL_REFRESH_MS 1000
|
||||
#define WAL_PATH_LEN (TSDB_FILENAME_LEN + 12)
|
||||
#define WAL_FILE_LEN (WAL_PATH_LEN + 32)
|
||||
#define WAL_MAGIC 0xFAFBFCFDF4F3F2F1ULL
|
||||
#define WAL_SCAN_BUF_SIZE (1024 * 1024 * 3)
|
||||
#define WAL_PROTO_VER 0
|
||||
#define WAL_NOSUFFIX_LEN 20
|
||||
#define WAL_SUFFIX_AT (WAL_NOSUFFIX_LEN + 1)
|
||||
#define WAL_LOG_SUFFIX "log"
|
||||
#define WAL_INDEX_SUFFIX "idx"
|
||||
#define WAL_REFRESH_MS 1000
|
||||
#define WAL_PATH_LEN (TSDB_FILENAME_LEN + 12)
|
||||
#define WAL_FILE_LEN (WAL_PATH_LEN + 32)
|
||||
#define WAL_MAGIC 0xFAFBFCFDF4F3F2F1ULL
|
||||
#define WAL_SCAN_BUF_SIZE (1024 * 1024 * 3)
|
||||
|
||||
typedef enum {
|
||||
TAOS_WAL_WRITE = 1,
|
||||
|
|
|
@ -27,6 +27,7 @@ extern "C" {
|
|||
|
||||
#if !defined(WINDOWS)
|
||||
#include <dirent.h>
|
||||
#include <execinfo.h>
|
||||
#include <libgen.h>
|
||||
#include <sched.h>
|
||||
#include <unistd.h>
|
||||
|
|
|
@ -120,12 +120,6 @@ void syslog(int unused, const char *format, ...);
|
|||
#define POINTER_SHIFT(p, b) ((void *)((char *)(p) + (b)))
|
||||
#define POINTER_DISTANCE(p1, p2) ((char *)(p1) - (char *)(p2))
|
||||
|
||||
#ifndef NDEBUG
|
||||
#define ASSERT(x) assert(x)
|
||||
#else
|
||||
#define ASSERT(x)
|
||||
#endif
|
||||
|
||||
#ifndef UNUSED
|
||||
#define UNUSED(x) ((void)(x))
|
||||
#endif
|
||||
|
|
|
@ -62,6 +62,7 @@ int32_t taosRealPath(char *dirname, char *realPath, int32_t maxlen);
|
|||
bool taosIsDir(const char *dirname);
|
||||
char *taosDirName(char *dirname);
|
||||
char *taosDirEntryBaseName(char *dirname);
|
||||
void taosGetCwd(char *buf, int32_t len);
|
||||
|
||||
TdDirPtr taosOpenDir(const char *dirname);
|
||||
TdDirEntryPtr taosReadDir(TdDirPtr pDir);
|
||||
|
|
|
@ -62,7 +62,7 @@ typedef int32_t TdUcs4;
|
|||
int32_t taosUcs4len(TdUcs4 *ucs4);
|
||||
int64_t taosStr2int64(const char *str);
|
||||
|
||||
void taosConvInit(void);
|
||||
int32_t taosConvInit(void);
|
||||
void taosConvDestroy();
|
||||
int32_t taosUcs4ToMbs(TdUcs4 *ucs4, int32_t ucs4_max_len, char *mbs);
|
||||
bool taosMbsToUcs4(const char *mbs, size_t mbs_len, TdUcs4 *ucs4, int32_t ucs4_max_len, int32_t *len);
|
||||
|
|
|
@ -46,6 +46,29 @@ void taosSetTerminalMode();
|
|||
int32_t taosGetOldTerminalMode();
|
||||
void taosResetTerminalMode();
|
||||
|
||||
#if !defined(WINDOWS)
|
||||
#define taosPrintTrace(flags, level, dflag) \
|
||||
{ \
|
||||
void* array[100]; \
|
||||
int32_t size = backtrace(array, 100); \
|
||||
char** strings = backtrace_symbols(array, size); \
|
||||
if (strings != NULL) { \
|
||||
taosPrintLog(flags, level, dflag, "obtained %d stack frames", size); \
|
||||
for (int32_t i = 0; i < size; i++) { \
|
||||
taosPrintLog(flags, level, dflag, "frame:%d, %s", i, strings[i]); \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
taosMemoryFree(strings); \
|
||||
}
|
||||
#else
|
||||
#define taosPrintTrace(flags, level, dflag) \
|
||||
{ \
|
||||
taosPrintLog(flags, level, dflag, \
|
||||
"backtrace not implemented on windows, so detailed stack information cannot be printed"); \
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#define _TD_UTIL_CODING_H_
|
||||
|
||||
#include "os.h"
|
||||
#include "tlog.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
|
|
@ -307,8 +307,9 @@ typedef enum ELogicConditionType {
|
|||
#define TSDB_MIN_DURATION_PER_FILE 60 // unit minute
|
||||
#define TSDB_MAX_DURATION_PER_FILE (3650 * 1440)
|
||||
#define TSDB_DEFAULT_DURATION_PER_FILE (10 * 1440)
|
||||
#define TSDB_MIN_KEEP (1 * 1440) // data in db to be reserved. unit minute
|
||||
#define TSDB_MAX_KEEP (365000 * 1440) // data in db to be reserved.
|
||||
#define TSDB_MIN_KEEP (1 * 1440) // data in db to be reserved. unit minute
|
||||
#define TSDB_MAX_KEEP (365000 * 1440) // data in db to be reserved.
|
||||
#define TSDB_MAX_KEEP_NS (365 * 292 * 1440) // data in db to be reserved.
|
||||
#define TSDB_DEFAULT_KEEP (3650 * 1440) // ten years
|
||||
#define TSDB_MIN_MINROWS_FBLOCK 10
|
||||
#define TSDB_MAX_MINROWS_FBLOCK 1000
|
||||
|
|
|
@ -38,6 +38,7 @@ typedef void (*LogFp)(int64_t ts, ELogLevel level, const char *content);
|
|||
|
||||
extern bool tsLogEmbedded;
|
||||
extern bool tsAsyncLog;
|
||||
extern bool tsAssert;
|
||||
extern int32_t tsNumOfLogLines;
|
||||
extern int32_t tsLogKeepDays;
|
||||
extern LogFp tsLogFp;
|
||||
|
@ -82,6 +83,10 @@ void taosPrintLongString(const char *flags, ELogLevel level, int32_t dflag, cons
|
|||
#endif
|
||||
;
|
||||
|
||||
bool taosAssert(bool condition, const char *file, int32_t line, const char *format, ...);
|
||||
#define ASSERTS(condition, ...) taosAssert(condition, __FILE__, __LINE__, __VA_ARGS__)
|
||||
#define ASSERT(condition) ASSERTS(condition, "assert info not provided")
|
||||
|
||||
// clang-format off
|
||||
#define uFatal(...) { if (uDebugFlag & DEBUG_FATAL) { taosPrintLog("UTL FATAL", DEBUG_FATAL, tsLogEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }}
|
||||
#define uError(...) { if (uDebugFlag & DEBUG_ERROR) { taosPrintLog("UTL ERROR ", DEBUG_ERROR, tsLogEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }}
|
||||
|
|
|
@ -65,6 +65,7 @@ typedef struct STaosQnode {
|
|||
STaosQnode *next;
|
||||
STaosQueue *queue;
|
||||
int64_t timestamp;
|
||||
int64_t dataSize;
|
||||
int32_t size;
|
||||
int8_t itype;
|
||||
int8_t reserved[3];
|
||||
|
@ -103,7 +104,7 @@ typedef struct STaosQall {
|
|||
STaosQueue *taosOpenQueue();
|
||||
void taosCloseQueue(STaosQueue *queue);
|
||||
void taosSetQueueFp(STaosQueue *queue, FItem itemFp, FItems itemsFp);
|
||||
void *taosAllocateQitem(int32_t size, EQItype itype);
|
||||
void *taosAllocateQitem(int32_t size, EQItype itype, int64_t dataSize);
|
||||
void taosFreeQitem(void *pItem);
|
||||
void taosWriteQitem(STaosQueue *queue, void *pItem);
|
||||
int32_t taosReadQitem(STaosQueue *queue, void **ppItem);
|
||||
|
|
|
@ -187,7 +187,7 @@ if [[ $productName == "TDengine" ]]; then
|
|||
git clone --depth 1 https://github.com/taosdata/taos-connector-dotnet ${install_dir}/connector/dotnet
|
||||
rm -rf ${install_dir}/connector/dotnet/.git ||:
|
||||
# cp -r ${connector_dir}/nodejs ${install_dir}/connector
|
||||
git clone --depth 1 https://github.com/taosdata/libtaos-rs ${install_dir}/connector/rust
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-rust ${install_dir}/connector/rust
|
||||
rm -rf ${install_dir}/connector/rust/.git ||:
|
||||
fi
|
||||
fi
|
||||
|
|
|
@ -318,7 +318,7 @@ if [ "$verMode" == "cluster" ] || [ "$verMode" == "cloud" ]; then
|
|||
git clone --depth 1 https://github.com/taosdata/taos-connector-dotnet ${install_dir}/connector/dotnet
|
||||
rm -rf ${install_dir}/connector/dotnet/.git ||:
|
||||
|
||||
git clone --depth 1 https://github.com/taosdata/libtaos-rs ${install_dir}/connector/rust
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-rust ${install_dir}/connector/rust
|
||||
rm -rf ${install_dir}/connector/rust/.git ||:
|
||||
|
||||
# cp -r ${connector_dir}/python ${install_dir}/connector
|
||||
|
|
|
@ -407,7 +407,9 @@ void taos_init_imp(void) {
|
|||
|
||||
initQueryModuleMsgHandle();
|
||||
|
||||
taosConvInit();
|
||||
if (taosConvInit() != 0) {
|
||||
ASSERTS(0, "failed to init conv");
|
||||
}
|
||||
|
||||
rpcInit();
|
||||
|
||||
|
|
|
@ -190,8 +190,9 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param,
|
|||
(*pRequest)->body.param = param;
|
||||
|
||||
STscObj* pTscObj = (*pRequest)->pTscObj;
|
||||
if (taosHashPut(pTscObj->pRequests, &(*pRequest)->self, sizeof((*pRequest)->self), &(*pRequest)->self,
|
||||
sizeof((*pRequest)->self))) {
|
||||
int32_t err = taosHashPut(pTscObj->pRequests, &(*pRequest)->self, sizeof((*pRequest)->self), &(*pRequest)->self,
|
||||
sizeof((*pRequest)->self));
|
||||
if (err) {
|
||||
tscError("%" PRId64 " failed to add to request container, reqId:0x%" PRIx64 ", conn:%" PRId64 ", %s",
|
||||
(*pRequest)->self, (*pRequest)->requestId, pTscObj->id, sql);
|
||||
|
||||
|
|
|
@ -21,12 +21,12 @@
|
|||
#include "os.h"
|
||||
#include "query.h"
|
||||
#include "scheduler.h"
|
||||
#include "tdatablock.h"
|
||||
#include "tglobal.h"
|
||||
#include "tmsg.h"
|
||||
#include "tref.h"
|
||||
#include "trpc.h"
|
||||
#include "version.h"
|
||||
#include "tdatablock.h"
|
||||
|
||||
#define TSC_VAR_NOT_RELEASE 1
|
||||
#define TSC_VAR_RELEASED 0
|
||||
|
@ -178,16 +178,18 @@ void taos_free_result(TAOS_RES *res) {
|
|||
return;
|
||||
}
|
||||
|
||||
tscDebug("taos free res %p", res);
|
||||
|
||||
if (TD_RES_QUERY(res)) {
|
||||
SRequestObj *pRequest = (SRequestObj *)res;
|
||||
tscDebug("0x%" PRIx64 " taos_free_result start to free query", pRequest->requestId);
|
||||
destroyRequest(pRequest);
|
||||
} else if (TD_RES_TMQ_METADATA(res)) {
|
||||
SMqTaosxRspObj *pRsp = (SMqTaosxRspObj *)res;
|
||||
if (pRsp->rsp.blockData) taosArrayDestroyP(pRsp->rsp.blockData, taosMemoryFree);
|
||||
if (pRsp->rsp.blockDataLen) taosArrayDestroy(pRsp->rsp.blockDataLen);
|
||||
if (pRsp->rsp.withTbName) taosArrayDestroyP(pRsp->rsp.blockTbName, taosMemoryFree);
|
||||
if (pRsp->rsp.withSchema) taosArrayDestroyP(pRsp->rsp.blockSchema, (FDelete)tDeleteSSchemaWrapper);
|
||||
taosArrayDestroyP(pRsp->rsp.blockData, taosMemoryFree);
|
||||
taosArrayDestroy(pRsp->rsp.blockDataLen);
|
||||
taosArrayDestroyP(pRsp->rsp.blockTbName, taosMemoryFree);
|
||||
taosArrayDestroyP(pRsp->rsp.blockSchema, (FDelete)tDeleteSSchemaWrapper);
|
||||
// taosx
|
||||
taosArrayDestroy(pRsp->rsp.createTableLen);
|
||||
taosArrayDestroyP(pRsp->rsp.createTableReq, taosMemoryFree);
|
||||
|
@ -197,10 +199,10 @@ void taos_free_result(TAOS_RES *res) {
|
|||
taosMemoryFree(pRsp);
|
||||
} else if (TD_RES_TMQ(res)) {
|
||||
SMqRspObj *pRsp = (SMqRspObj *)res;
|
||||
if (pRsp->rsp.blockData) taosArrayDestroyP(pRsp->rsp.blockData, taosMemoryFree);
|
||||
if (pRsp->rsp.blockDataLen) taosArrayDestroy(pRsp->rsp.blockDataLen);
|
||||
if (pRsp->rsp.withTbName) taosArrayDestroyP(pRsp->rsp.blockTbName, taosMemoryFree);
|
||||
if (pRsp->rsp.withSchema) taosArrayDestroyP(pRsp->rsp.blockSchema, (FDelete)tDeleteSSchemaWrapper);
|
||||
taosArrayDestroyP(pRsp->rsp.blockData, taosMemoryFree);
|
||||
taosArrayDestroy(pRsp->rsp.blockDataLen);
|
||||
taosArrayDestroyP(pRsp->rsp.blockTbName, taosMemoryFree);
|
||||
taosArrayDestroyP(pRsp->rsp.blockSchema, (FDelete)tDeleteSSchemaWrapper);
|
||||
pRsp->resInfo.pRspMsg = NULL;
|
||||
doFreeReqResultInfo(&pRsp->resInfo);
|
||||
taosMemoryFree(pRsp);
|
||||
|
@ -796,9 +798,11 @@ static void doAsyncQueryFromParse(SMetaData *pResultMeta, void *param, int32_t c
|
|||
SQuery *pQuery = pRequest->pQuery;
|
||||
|
||||
pRequest->metric.ctgEnd = taosGetTimestampUs();
|
||||
qDebug("0x%" PRIx64 " start to continue parse, reqId:0x%" PRIx64, pRequest->self, pRequest->requestId);
|
||||
qDebug("0x%" PRIx64 " start to continue parse, reqId:0x%" PRIx64 ", code:%s", pRequest->self, pRequest->requestId,
|
||||
tstrerror(code));
|
||||
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
//pWrapper->pCatalogReq->forceUpdate = false;
|
||||
code = qContinueParseSql(pWrapper->pParseCtx, pWrapper->pCatalogReq, pResultMeta, pQuery);
|
||||
}
|
||||
|
||||
|
@ -879,6 +883,11 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
|
|||
|
||||
if (pRequest->retry++ > REQUEST_TOTAL_EXEC_TIMES) {
|
||||
code = pRequest->prevCode;
|
||||
terrno = code;
|
||||
pRequest->code = code;
|
||||
tscDebug("call sync query cb with code: %s", tstrerror(code));
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
|
||||
return;
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
|
@ -929,6 +938,17 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
|
|||
tscError("0x%" PRIx64 " error happens, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code, tstrerror(code),
|
||||
pRequest->requestId);
|
||||
destorySqlCallbackWrapper(pWrapper);
|
||||
qDestroyQuery(pRequest->pQuery);
|
||||
pRequest->pQuery = NULL;
|
||||
|
||||
if (NEED_CLIENT_HANDLE_ERROR(code)) {
|
||||
tscDebug("0x%" PRIx64 " client retry to handle the error, code:%d - %s, tryCount:%d, reqId:0x%" PRIx64,
|
||||
pRequest->self, code, tstrerror(code), pRequest->retry, pRequest->requestId);
|
||||
pRequest->prevCode = code;
|
||||
doAsyncQuery(pRequest, true);
|
||||
return;
|
||||
}
|
||||
|
||||
terrno = code;
|
||||
pRequest->code = code;
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
|
||||
|
|
|
@ -691,7 +691,7 @@ void tmqAssignAskEpTask(void* param, void* tmrId) {
|
|||
int64_t refId = *(int64_t*)param;
|
||||
tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId);
|
||||
if (tmq != NULL) {
|
||||
int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM);
|
||||
int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM, 0);
|
||||
*pTaskType = TMQ_DELAYED_TASK__ASK_EP;
|
||||
taosWriteQitem(tmq->delayedTask, pTaskType);
|
||||
tsem_post(&tmq->rspSem);
|
||||
|
@ -703,7 +703,7 @@ void tmqAssignDelayedCommitTask(void* param, void* tmrId) {
|
|||
int64_t refId = *(int64_t*)param;
|
||||
tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId);
|
||||
if (tmq != NULL) {
|
||||
int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM);
|
||||
int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM, 0);
|
||||
*pTaskType = TMQ_DELAYED_TASK__COMMIT;
|
||||
taosWriteQitem(tmq->delayedTask, pTaskType);
|
||||
tsem_post(&tmq->rspSem);
|
||||
|
@ -715,7 +715,7 @@ void tmqAssignDelayedReportTask(void* param, void* tmrId) {
|
|||
int64_t refId = *(int64_t*)param;
|
||||
tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId);
|
||||
if (tmq != NULL) {
|
||||
int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM);
|
||||
int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM, 0);
|
||||
*pTaskType = TMQ_DELAYED_TASK__REPORT;
|
||||
taosWriteQitem(tmq->delayedTask, pTaskType);
|
||||
tsem_post(&tmq->rspSem);
|
||||
|
@ -814,24 +814,55 @@ int32_t tmqHandleAllDelayedTask(tmq_t* tmq) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void tmqFreeRspWrapper(SMqRspWrapper* rspWrapper) {
|
||||
if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__END_RSP) {
|
||||
// do nothing
|
||||
} else if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__EP_RSP) {
|
||||
SMqAskEpRspWrapper* pEpRspWrapper = (SMqAskEpRspWrapper*)rspWrapper;
|
||||
tDeleteSMqAskEpRsp(&pEpRspWrapper->msg);
|
||||
} else if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__POLL_RSP) {
|
||||
SMqPollRspWrapper* pRsp = (SMqPollRspWrapper*)rspWrapper;
|
||||
taosArrayDestroyP(pRsp->dataRsp.blockData, taosMemoryFree);
|
||||
taosArrayDestroy(pRsp->dataRsp.blockDataLen);
|
||||
taosArrayDestroyP(pRsp->dataRsp.blockTbName, taosMemoryFree);
|
||||
taosArrayDestroyP(pRsp->dataRsp.blockSchema, (FDelete)tDeleteSSchemaWrapper);
|
||||
} else if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__POLL_META_RSP) {
|
||||
SMqPollRspWrapper* pRsp = (SMqPollRspWrapper*)rspWrapper;
|
||||
taosMemoryFree(pRsp->metaRsp.metaRsp);
|
||||
} else if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__TAOSX_RSP) {
|
||||
SMqPollRspWrapper* pRsp = (SMqPollRspWrapper*)rspWrapper;
|
||||
taosArrayDestroyP(pRsp->taosxRsp.blockData, taosMemoryFree);
|
||||
taosArrayDestroy(pRsp->taosxRsp.blockDataLen);
|
||||
taosArrayDestroyP(pRsp->taosxRsp.blockTbName, taosMemoryFree);
|
||||
taosArrayDestroyP(pRsp->taosxRsp.blockSchema, (FDelete)tDeleteSSchemaWrapper);
|
||||
// taosx
|
||||
taosArrayDestroy(pRsp->taosxRsp.createTableLen);
|
||||
taosArrayDestroyP(pRsp->taosxRsp.createTableReq, taosMemoryFree);
|
||||
}
|
||||
}
|
||||
|
||||
void tmqClearUnhandleMsg(tmq_t* tmq) {
|
||||
SMqRspWrapper* msg = NULL;
|
||||
SMqRspWrapper* rspWrapper = NULL;
|
||||
while (1) {
|
||||
taosGetQitem(tmq->qall, (void**)&msg);
|
||||
if (msg)
|
||||
taosFreeQitem(msg);
|
||||
else
|
||||
taosGetQitem(tmq->qall, (void**)&rspWrapper);
|
||||
if (rspWrapper) {
|
||||
tmqFreeRspWrapper(rspWrapper);
|
||||
taosFreeQitem(rspWrapper);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
msg = NULL;
|
||||
rspWrapper = NULL;
|
||||
taosReadAllQitems(tmq->mqueue, tmq->qall);
|
||||
while (1) {
|
||||
taosGetQitem(tmq->qall, (void**)&msg);
|
||||
if (msg)
|
||||
taosFreeQitem(msg);
|
||||
else
|
||||
taosGetQitem(tmq->qall, (void**)&rspWrapper);
|
||||
if (rspWrapper) {
|
||||
tmqFreeRspWrapper(rspWrapper);
|
||||
taosFreeQitem(rspWrapper);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -875,6 +906,7 @@ void tmqFreeImpl(void* handle) {
|
|||
tmq_t* tmq = (tmq_t*)handle;
|
||||
|
||||
// TODO stop timer
|
||||
tmqClearUnhandleMsg(tmq);
|
||||
if (tmq->mqueue) taosCloseQueue(tmq->mqueue);
|
||||
if (tmq->delayedTask) taosCloseQueue(tmq->delayedTask);
|
||||
if (tmq->qall) taosFreeQall(tmq->qall);
|
||||
|
@ -884,8 +916,7 @@ void tmqFreeImpl(void* handle) {
|
|||
int32_t sz = taosArrayGetSize(tmq->clientTopics);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i);
|
||||
if (pTopic->schema.nCols) taosMemoryFreeClear(pTopic->schema.pSchema);
|
||||
int32_t vgSz = taosArrayGetSize(pTopic->vgs);
|
||||
taosMemoryFreeClear(pTopic->schema.pSchema);
|
||||
taosArrayDestroy(pTopic->vgs);
|
||||
}
|
||||
taosArrayDestroy(tmq->clientTopics);
|
||||
|
@ -1140,7 +1171,7 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
goto CREATE_MSG_FAIL;
|
||||
}
|
||||
if (code == TSDB_CODE_TQ_NO_COMMITTED_OFFSET) {
|
||||
SMqPollRspWrapper* pRspWrapper = taosAllocateQitem(sizeof(SMqPollRspWrapper), DEF_QITEM);
|
||||
SMqPollRspWrapper* pRspWrapper = taosAllocateQitem(sizeof(SMqPollRspWrapper), DEF_QITEM, 0);
|
||||
if (pRspWrapper == NULL) {
|
||||
tscWarn("msg discard from vgId:%d, epoch %d since out of memory", vgId, epoch);
|
||||
goto CREATE_MSG_FAIL;
|
||||
|
@ -1173,7 +1204,7 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
// handle meta rsp
|
||||
int8_t rspType = ((SMqRspHead*)pMsg->pData)->mqMsgType;
|
||||
|
||||
SMqPollRspWrapper* pRspWrapper = taosAllocateQitem(sizeof(SMqPollRspWrapper), DEF_QITEM);
|
||||
SMqPollRspWrapper* pRspWrapper = taosAllocateQitem(sizeof(SMqPollRspWrapper), DEF_QITEM, 0);
|
||||
if (pRspWrapper == NULL) {
|
||||
taosMemoryFree(pMsg->pData);
|
||||
taosMemoryFree(pMsg->pEpSet);
|
||||
|
@ -1215,6 +1246,8 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
taosMemoryFree(pMsg->pData);
|
||||
taosMemoryFree(pMsg->pEpSet);
|
||||
|
||||
tscDebug("consumer:%" PRId64 ", put poll res into mqueue %p", tmq->consumerId, pRspWrapper);
|
||||
|
||||
taosWriteQitem(tmq->mqueue, pRspWrapper);
|
||||
tsem_post(&tmq->rspSem);
|
||||
|
||||
|
@ -1304,7 +1337,6 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) {
|
|||
for (int32_t i = 0; i < sz; i++) {
|
||||
SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i);
|
||||
if (pTopic->schema.nCols) taosMemoryFreeClear(pTopic->schema.pSchema);
|
||||
int32_t vgSz = taosArrayGetSize(pTopic->vgs);
|
||||
taosArrayDestroy(pTopic->vgs);
|
||||
}
|
||||
taosArrayDestroy(tmq->clientTopics);
|
||||
|
@ -1362,7 +1394,7 @@ int32_t tmqAskEpCb(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
tmqUpdateEp(tmq, head->epoch, &rsp);
|
||||
tDeleteSMqAskEpRsp(&rsp);
|
||||
} else {
|
||||
SMqAskEpRspWrapper* pWrapper = taosAllocateQitem(sizeof(SMqAskEpRspWrapper), DEF_QITEM);
|
||||
SMqAskEpRspWrapper* pWrapper = taosAllocateQitem(sizeof(SMqAskEpRspWrapper), DEF_QITEM, 0);
|
||||
if (pWrapper == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
code = -1;
|
||||
|
@ -1410,7 +1442,7 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) {
|
|||
return -1;
|
||||
}
|
||||
void* pReq = taosMemoryCalloc(1, tlen);
|
||||
if (tlen < 0) {
|
||||
if (pReq == NULL) {
|
||||
tscError("failed to malloc askEpReq msg, size:%d", tlen);
|
||||
return -1;
|
||||
}
|
||||
|
@ -1643,6 +1675,7 @@ int32_t tmqHandleNoPollRsp(tmq_t* tmq, SMqRspWrapper* rspWrapper, bool* pReset)
|
|||
tDeleteSMqAskEpRsp(rspMsg);
|
||||
*pReset = true;
|
||||
} else {
|
||||
tmqFreeRspWrapper(rspWrapper);
|
||||
*pReset = false;
|
||||
}
|
||||
} else {
|
||||
|
@ -1665,6 +1698,8 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
|
|||
}
|
||||
}
|
||||
|
||||
tscDebug("consumer:%" PRId64 " handle rsp %p", tmq->consumerId, rspWrapper);
|
||||
|
||||
if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__END_RSP) {
|
||||
taosFreeQitem(rspWrapper);
|
||||
terrno = TSDB_CODE_TQ_NO_COMMITTED_OFFSET;
|
||||
|
@ -1692,6 +1727,7 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
|
|||
} else {
|
||||
tscDebug("msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
|
||||
pollRspWrapper->dataRsp.head.epoch, consumerEpoch);
|
||||
tmqFreeRspWrapper(rspWrapper);
|
||||
taosFreeQitem(pollRspWrapper);
|
||||
}
|
||||
} else if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__POLL_META_RSP) {
|
||||
|
@ -1710,6 +1746,7 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
|
|||
} else {
|
||||
tscDebug("msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
|
||||
pollRspWrapper->metaRsp.head.epoch, consumerEpoch);
|
||||
tmqFreeRspWrapper(rspWrapper);
|
||||
taosFreeQitem(pollRspWrapper);
|
||||
}
|
||||
} else if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__TAOSX_RSP) {
|
||||
|
@ -1738,8 +1775,9 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
|
|||
taosFreeQitem(pollRspWrapper);
|
||||
return pRsp;
|
||||
} else {
|
||||
tscDebug("msg discard since epoch mismatch: msg epoch %d, consumer epoch %d\n",
|
||||
tscDebug("msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
|
||||
pollRspWrapper->taosxRsp.head.epoch, consumerEpoch);
|
||||
tmqFreeRspWrapper(rspWrapper);
|
||||
taosFreeQitem(pollRspWrapper);
|
||||
}
|
||||
} else {
|
||||
|
@ -1791,7 +1829,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
|
|||
while (1) {
|
||||
tmqHandleAllDelayedTask(tmq);
|
||||
if (tmqPollImpl(tmq, timeout) < 0) {
|
||||
tscDebug("return since poll err");
|
||||
tscDebug("consumer:%" PRId64 " return since poll err", tmq->consumerId);
|
||||
/*return NULL;*/
|
||||
}
|
||||
|
||||
|
|
|
@ -1946,9 +1946,9 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf)
|
|||
int32_t len = 0;
|
||||
len += snprintf(dumpBuf + len, size - len,
|
||||
"===stream===%s|block type %d|child id %d|group id:%" PRIu64 "|uid:%" PRId64
|
||||
"|rows:%d|version:%" PRIu64 "\n",
|
||||
"|rows:%d|version:%" PRIu64 "|cal start:%" PRIu64 "|cal end:%" PRIu64 "\n",
|
||||
flag, (int32_t)pDataBlock->info.type, pDataBlock->info.childId, pDataBlock->info.id.groupId,
|
||||
pDataBlock->info.id.uid, pDataBlock->info.rows, pDataBlock->info.version);
|
||||
pDataBlock->info.id.uid, pDataBlock->info.rows, pDataBlock->info.version, pDataBlock->info.calWin.skey, pDataBlock->info.calWin.ekey);
|
||||
if (len >= size - 1) return dumpBuf;
|
||||
|
||||
for (int32_t j = 0; j < rows; j++) {
|
||||
|
|
|
@ -333,6 +333,7 @@ static int32_t taosAddSystemCfg(SConfig *pCfg) {
|
|||
if (cfgAddTimezone(pCfg, "timezone", tsTimezoneStr) != 0) return -1;
|
||||
if (cfgAddLocale(pCfg, "locale", tsLocale) != 0) return -1;
|
||||
if (cfgAddCharset(pCfg, "charset", tsCharset) != 0) return -1;
|
||||
if (cfgAddBool(pCfg, "assert", 1, 1) != 0) return -1;
|
||||
if (cfgAddBool(pCfg, "enableCoreFile", 1, 1) != 0) return -1;
|
||||
if (cfgAddFloat(pCfg, "numOfCores", tsNumOfCores, 1, 100000, 1) != 0) return -1;
|
||||
|
||||
|
@ -407,7 +408,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
|
||||
tsNumOfQnodeQueryThreads = tsNumOfCores * 2;
|
||||
tsNumOfQnodeQueryThreads = TMAX(tsNumOfQnodeQueryThreads, 4);
|
||||
if (cfgAddInt32(pCfg, "numOfQnodeQueryThreads", tsNumOfQnodeQueryThreads, 1, 1024, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "numOfQnodeQueryThreads", tsNumOfQnodeQueryThreads, 4, 1024, 0) != 0) return -1;
|
||||
|
||||
// tsNumOfQnodeFetchThreads = tsNumOfCores / 2;
|
||||
// tsNumOfQnodeFetchThreads = TMAX(tsNumOfQnodeFetchThreads, 4);
|
||||
|
@ -693,6 +694,8 @@ static void taosSetSystemCfg(SConfig *pCfg) {
|
|||
bool enableCore = cfgGetItem(pCfg, "enableCoreFile")->bval;
|
||||
taosSetCoreDump(enableCore);
|
||||
|
||||
tsAssert = cfgGetItem(pCfg, "assert")->bval;
|
||||
|
||||
// todo
|
||||
tsVersion = 30000000;
|
||||
}
|
||||
|
@ -788,6 +791,8 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
|
|||
case 'a': {
|
||||
if (strcasecmp("asyncLog", name) == 0) {
|
||||
tsAsyncLog = cfgGetItem(pCfg, "asyncLog")->bval;
|
||||
} else if (strcasecmp("assert", name) == 0) {
|
||||
tsAssert = cfgGetItem(pCfg, "assert")->bval;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -28,6 +28,8 @@
|
|||
#undef TD_MSG_SEG_CODE_
|
||||
#include "tmsgdef.h"
|
||||
|
||||
#include "tlog.h"
|
||||
|
||||
int32_t tInitSubmitMsgIter(const SSubmitReq *pMsg, SSubmitMsgIter *pIter) {
|
||||
if (pMsg == NULL) {
|
||||
terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP;
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "trow.h"
|
||||
#include "tlog.h"
|
||||
|
||||
const uint8_t tdVTypeByte[2][3] = {{
|
||||
// 2 bits
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
#define _DEFAULT_SOURCE
|
||||
#include "ttime.h"
|
||||
|
||||
#include "tlog.h"
|
||||
|
||||
/*
|
||||
* mktime64 - Converts date to seconds.
|
||||
* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include "dmMgmt.h"
|
||||
#include "mnode.h"
|
||||
#include "tconfig.h"
|
||||
#include "tglobal.h"
|
||||
|
||||
// clang-format off
|
||||
#define DM_APOLLO_URL "The apollo string to use when configuring the server, such as: -a 'jsonFile:./tests/cfg.json', cfg.json text can be '{\"fqdn\":\"td1\"}'."
|
||||
|
@ -45,9 +46,30 @@ static struct {
|
|||
SArray *pArgs; // SConfigPair
|
||||
} global = {0};
|
||||
|
||||
static void dmStopDnode(int signum, void *info, void *ctx) { dmStop(); }
|
||||
static void dmSetDebugFlag(int32_t signum, void *sigInfo, void *context) { taosSetAllDebugFlag(143, true); }
|
||||
static void dmSetAssert(int32_t signum, void *sigInfo, void *context) { tsAssert = 1; }
|
||||
|
||||
static void dmStopDnode(int signum, void *sigInfo, void *context) {
|
||||
// taosIgnSignal(SIGUSR1);
|
||||
// taosIgnSignal(SIGUSR2);
|
||||
taosIgnSignal(SIGTERM);
|
||||
taosIgnSignal(SIGHUP);
|
||||
taosIgnSignal(SIGINT);
|
||||
taosIgnSignal(SIGABRT);
|
||||
taosIgnSignal(SIGBREAK);
|
||||
|
||||
dInfo("shut down signal is %d", signum);
|
||||
#ifndef WINDOWS
|
||||
dInfo("sender PID:%d cmdline:%s", ((siginfo_t *)sigInfo)->si_pid,
|
||||
taosGetCmdlineByPID(((siginfo_t *)sigInfo)->si_pid));
|
||||
#endif
|
||||
|
||||
dmStop();
|
||||
}
|
||||
|
||||
static void dmSetSignalHandle() {
|
||||
taosSetSignal(SIGUSR1, dmSetDebugFlag);
|
||||
taosSetSignal(SIGUSR2, dmSetAssert);
|
||||
taosSetSignal(SIGTERM, dmStopDnode);
|
||||
taosSetSignal(SIGHUP, dmStopDnode);
|
||||
taosSetSignal(SIGINT, dmStopDnode);
|
||||
|
@ -105,6 +127,19 @@ static int32_t dmParseArgs(int32_t argc, char const *argv[]) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void dmPrintArgs(int32_t argc, char const *argv[]) {
|
||||
char path[1024] = {0};
|
||||
taosGetCwd(path, sizeof(path));
|
||||
|
||||
char args[1024] = {0};
|
||||
int32_t arglen = snprintf(args, sizeof(args), "%s", argv[0]);
|
||||
for (int32_t i = 1; i < argc; ++i) {
|
||||
arglen = arglen + snprintf(args + arglen, sizeof(args) - arglen, " %s", argv[i]);
|
||||
}
|
||||
|
||||
dInfo("startup path:%s args:%s", path, args);
|
||||
}
|
||||
|
||||
static void dmGenerateGrant() { mndGenerateMachineCode(); }
|
||||
|
||||
static void dmPrintVersion() {
|
||||
|
@ -194,6 +229,8 @@ int mainWindows(int argc, char **argv) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
dmPrintArgs(argc, argv);
|
||||
|
||||
if (taosInitCfg(configDir, global.envCmd, global.envFile, global.apolloUrl, global.pArgs, 0) != 0) {
|
||||
dError("failed to start since read config error");
|
||||
taosCloseLog();
|
||||
|
@ -201,7 +238,12 @@ int mainWindows(int argc, char **argv) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
taosConvInit();
|
||||
if (taosConvInit() != 0) {
|
||||
dError("failed to init conv");
|
||||
taosCloseLog();
|
||||
taosCleanupArgs();
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (global.dumpConfig) {
|
||||
dmDumpCfg();
|
||||
|
|
|
@ -103,7 +103,12 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) {
|
|||
tSerializeSStatusReq(pHead, contLen, &req);
|
||||
tFreeSStatusReq(&req);
|
||||
|
||||
SRpcMsg rpcMsg = {.pCont = pHead, .contLen = contLen, .msgType = TDMT_MND_STATUS, .info.ahandle = (void *)0x9527};
|
||||
SRpcMsg rpcMsg = {.pCont = pHead,
|
||||
.contLen = contLen,
|
||||
.msgType = TDMT_MND_STATUS,
|
||||
.info.ahandle = (void *)0x9527,
|
||||
.info.refId = 0,
|
||||
.info.noResp = 0};
|
||||
SRpcMsg rpcRsp = {0};
|
||||
|
||||
dTrace("send status req to mnode, dnodeVer:%" PRId64 " statusSeq:%d", req.dnodeVer, req.statusSeq);
|
||||
|
@ -150,7 +155,8 @@ static void dmGetServerRunStatus(SDnodeMgmt *pMgmt, SServerStatusRsp *pStatus) {
|
|||
SServerStatusRsp statusRsp = {0};
|
||||
SMonMloadInfo minfo = {0};
|
||||
(*pMgmt->getMnodeLoadsFp)(&minfo);
|
||||
if (minfo.isMnode && (minfo.load.syncState == TAOS_SYNC_STATE_ERROR || minfo.load.syncState == TAOS_SYNC_STATE_OFFLINE)) {
|
||||
if (minfo.isMnode &&
|
||||
(minfo.load.syncState == TAOS_SYNC_STATE_ERROR || minfo.load.syncState == TAOS_SYNC_STATE_OFFLINE)) {
|
||||
pStatus->statusCode = TSDB_SRV_STATUS_SERVICE_DEGRADED;
|
||||
snprintf(pStatus->details, sizeof(pStatus->details), "mnode sync state is %s", syncStr(minfo.load.syncState));
|
||||
return;
|
||||
|
|
|
@ -21,6 +21,8 @@ static void *dmStatusThreadFp(void *param) {
|
|||
int64_t lastTime = taosGetTimestampMs();
|
||||
setThreadName("dnode-status");
|
||||
|
||||
const static int16_t TRIM_FREQ = 30;
|
||||
int32_t trimCount = 0;
|
||||
while (1) {
|
||||
taosMsleep(200);
|
||||
if (pMgmt->pData->dropped || pMgmt->pData->stopped) break;
|
||||
|
@ -28,9 +30,13 @@ static void *dmStatusThreadFp(void *param) {
|
|||
int64_t curTime = taosGetTimestampMs();
|
||||
float interval = (curTime - lastTime) / 1000.0f;
|
||||
if (interval >= tsStatusInterval) {
|
||||
taosMemoryTrim(0);
|
||||
dmSendStatusReq(pMgmt);
|
||||
lastTime = curTime;
|
||||
|
||||
trimCount = (trimCount + 1) % TRIM_FREQ;
|
||||
if (trimCount == 0) {
|
||||
taosMemoryTrim(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -159,12 +159,12 @@ int32_t mmPutMsgToQueue(SMnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
|
|||
}
|
||||
|
||||
if (pWorker == NULL) return -1;
|
||||
SRpcMsg *pMsg = taosAllocateQitem(sizeof(SRpcMsg), RPC_QITEM);
|
||||
SRpcMsg *pMsg = taosAllocateQitem(sizeof(SRpcMsg), RPC_QITEM, pRpc->contLen);
|
||||
if (pMsg == NULL) return -1;
|
||||
memcpy(pMsg, pRpc, sizeof(SRpcMsg));
|
||||
pRpc->pCont = NULL;
|
||||
|
||||
dTrace("msg:%p, is created and will put into %s queue, type:%s", pMsg, pWorker->name, TMSG_INFO(pRpc->msgType));
|
||||
dTrace("msg:%p, is created and will put into %s queue, type:%s len:%d", pMsg, pWorker->name, TMSG_INFO(pRpc->msgType), pRpc->contLen);
|
||||
int32_t code = mmPutMsgToWorker(pMgmt, pWorker, pMsg);
|
||||
if (code != 0) {
|
||||
dTrace("msg:%p, is freed", pMsg);
|
||||
|
|
|
@ -58,19 +58,19 @@ int32_t qmPutNodeMsgToFetchQueue(SQnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
}
|
||||
|
||||
int32_t qmPutRpcMsgToQueue(SQnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
|
||||
SRpcMsg *pMsg = taosAllocateQitem(sizeof(SRpcMsg), RPC_QITEM);
|
||||
SRpcMsg *pMsg = taosAllocateQitem(sizeof(SRpcMsg), RPC_QITEM, pRpc->contLen);
|
||||
if (pMsg == NULL) return -1;
|
||||
memcpy(pMsg, pRpc, sizeof(SRpcMsg));
|
||||
pRpc->pCont = NULL;
|
||||
|
||||
switch (qtype) {
|
||||
case QUERY_QUEUE:
|
||||
dTrace("msg:%p, is created and will put into qnode-query queue", pMsg);
|
||||
dTrace("msg:%p, is created and will put into qnode-query queue, len:%d", pMsg, pRpc->contLen);
|
||||
taosWriteQitem(pMgmt->queryWorker.queue, pMsg);
|
||||
return 0;
|
||||
case READ_QUEUE:
|
||||
case FETCH_QUEUE:
|
||||
dTrace("msg:%p, is created and will put into qnode-fetch queue", pMsg);
|
||||
dTrace("msg:%p, is created and will put into qnode-fetch queue, len:%d", pMsg, pRpc->contLen);
|
||||
taosWriteQitem(pMgmt->fetchWorker.queue, pMsg);
|
||||
return 0;
|
||||
default:
|
||||
|
|
|
@ -130,7 +130,7 @@ void smStopWorker(SSnodeMgmt *pMgmt) {
|
|||
}
|
||||
|
||||
int32_t smPutMsgToQueue(SSnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
|
||||
SRpcMsg *pMsg = taosAllocateQitem(sizeof(SRpcMsg), RPC_QITEM);
|
||||
SRpcMsg *pMsg = taosAllocateQitem(sizeof(SRpcMsg), RPC_QITEM, pRpc->contLen);
|
||||
if (pMsg == NULL) {
|
||||
rpcFreeCont(pRpc->pCont);
|
||||
pRpc->pCont = NULL;
|
||||
|
@ -139,8 +139,8 @@ int32_t smPutMsgToQueue(SSnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
|
|||
|
||||
SSnode *pSnode = pMgmt->pSnode;
|
||||
if (pSnode == NULL) {
|
||||
dError("snode: msg:%p failed to put into vnode queue since %s, type:%s qtype:%d", pMsg, terrstr(),
|
||||
TMSG_INFO(pMsg->msgType), qtype);
|
||||
dError("msg:%p failed to put into snode queue since %s, type:%s qtype:%d len:%d", pMsg, terrstr(),
|
||||
TMSG_INFO(pMsg->msgType), qtype, pRpc->contLen);
|
||||
taosFreeQitem(pMsg);
|
||||
rpcFreeCont(pRpc->pCont);
|
||||
pRpc->pCont = NULL;
|
||||
|
@ -161,7 +161,8 @@ int32_t smPutMsgToQueue(SSnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
|
|||
smPutNodeMsgToWriteQueue(pMgmt, pMsg);
|
||||
break;
|
||||
default:
|
||||
ASSERT(0);
|
||||
ASSERTS(0, "msg:%p failed to put into snode queue since %s, type:%s qtype:%d", pMsg, terrstr(),
|
||||
TMSG_INFO(pMsg->msgType), qtype);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -233,7 +233,7 @@ int32_t vmPutMsgToMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
}
|
||||
|
||||
int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
|
||||
SRpcMsg *pMsg = taosAllocateQitem(sizeof(SRpcMsg), RPC_QITEM);
|
||||
SRpcMsg *pMsg = taosAllocateQitem(sizeof(SRpcMsg), RPC_QITEM, pRpc->contLen);
|
||||
if (pMsg == NULL) {
|
||||
rpcFreeCont(pRpc->pCont);
|
||||
pRpc->pCont = NULL;
|
||||
|
@ -241,7 +241,7 @@ int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
|
|||
}
|
||||
|
||||
SMsgHead *pHead = pRpc->pCont;
|
||||
dTrace("vgId:%d, msg:%p is created, type:%s", pHead->vgId, pMsg, TMSG_INFO(pRpc->msgType));
|
||||
dTrace("vgId:%d, msg:%p is created, type:%s len:%d", pHead->vgId, pMsg, TMSG_INFO(pRpc->msgType), pRpc->contLen);
|
||||
|
||||
pHead->contLen = htonl(pHead->contLen);
|
||||
pHead->vgId = htonl(pHead->vgId);
|
||||
|
|
|
@ -50,7 +50,7 @@ int32_t dmProcessNodeMsg(SMgmtWrapper *pWrapper, SRpcMsg *pMsg) {
|
|||
|
||||
static bool dmFailFastFp(tmsg_t msgType) {
|
||||
// add more msg type later
|
||||
return msgType == TDMT_SYNC_HEARTBEAT;
|
||||
return msgType == TDMT_SYNC_HEARTBEAT || msgType == TDMT_SYNC_APPEND_ENTRIES;
|
||||
}
|
||||
|
||||
static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) {
|
||||
|
@ -141,11 +141,11 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) {
|
|||
}
|
||||
|
||||
pRpc->info.wrapper = pWrapper;
|
||||
pMsg = taosAllocateQitem(sizeof(SRpcMsg), RPC_QITEM);
|
||||
pMsg = taosAllocateQitem(sizeof(SRpcMsg), RPC_QITEM, pRpc->contLen);
|
||||
if (pMsg == NULL) goto _OVER;
|
||||
|
||||
memcpy(pMsg, pRpc, sizeof(SRpcMsg));
|
||||
dGTrace("msg:%p, is created, type:%s handle:%p", pMsg, TMSG_INFO(pRpc->msgType), pMsg->info.handle);
|
||||
dGTrace("msg:%p, is created, type:%s handle:%p len:%d", pMsg, TMSG_INFO(pRpc->msgType), pMsg->info.handle, pRpc->contLen);
|
||||
|
||||
code = dmProcessNodeMsg(pWrapper, pMsg);
|
||||
|
||||
|
@ -301,6 +301,7 @@ int32_t dmInitServer(SDnode *pDnode) {
|
|||
rpcInit.connType = TAOS_CONN_SERVER;
|
||||
rpcInit.idleTime = tsShellActivityTimer * 1000;
|
||||
rpcInit.parent = pDnode;
|
||||
rpcInit.compressSize = tsCompressMsgSize;
|
||||
|
||||
pTrans->serverRpc = rpcOpen(&rpcInit);
|
||||
if (pTrans->serverRpc == NULL) {
|
||||
|
|
|
@ -554,7 +554,16 @@ static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
|
|||
goto SUBSCRIBE_OVER;
|
||||
}
|
||||
|
||||
// check topic only
|
||||
#if 0
|
||||
if (mndCheckDbPrivilegeByName(pMnode, pMsg->info.conn.user, MND_OPER_READ_DB, pTopic->db) != 0) {
|
||||
mndReleaseTopic(pMnode, pTopic);
|
||||
goto SUBSCRIBE_OVER;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (mndCheckTopicPrivilege(pMnode, pMsg->info.conn.user, MND_OPER_SUBSCRIBE, pTopic) != 0) {
|
||||
mndReleaseTopic(pMnode, pTopic);
|
||||
goto SUBSCRIBE_OVER;
|
||||
}
|
||||
|
||||
|
|
|
@ -825,7 +825,13 @@ static int32_t mndProcessAlterDbReq(SRpcMsg *pReq) {
|
|||
dbObj.cfgVersion++;
|
||||
dbObj.updateTime = taosGetTimestampMs();
|
||||
code = mndAlterDb(pMnode, pReq, pDb, &dbObj);
|
||||
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
|
||||
|
||||
if (dbObj.cfg.replications != pDb->cfg.replications) {
|
||||
// return quickly, operation executed asynchronously
|
||||
mInfo("db:%s, alter db replica from %d to %d", pDb->name, pDb->cfg.replications, dbObj.cfg.replications);
|
||||
} else {
|
||||
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
|
||||
}
|
||||
|
||||
_OVER:
|
||||
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
||||
|
|
|
@ -769,7 +769,7 @@ static int32_t mndRetrieveQueries(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p
|
|||
}
|
||||
|
||||
int32_t numOfQueries = taosArrayGetSize(pConn->pQueries);
|
||||
for (int32_t i = 0; i < numOfQueries; ++i) {
|
||||
for (int32_t i = 0; i < numOfQueries && numOfRows < rows; ++i) {
|
||||
SQueryDesc *pQuery = taosArrayGet(pConn->pQueries, i);
|
||||
cols = 0;
|
||||
|
||||
|
|
|
@ -119,7 +119,13 @@ int32_t mndProcessWriteMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFsmCbMeta
|
|||
}
|
||||
|
||||
int32_t mndSyncCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFsmCbMeta *pMeta) {
|
||||
int32_t code = mndProcessWriteMsg(pFsm, pMsg, pMeta);
|
||||
int32_t code = 0;
|
||||
if (!syncUtilUserCommit(pMsg->msgType)) {
|
||||
goto _out;
|
||||
}
|
||||
code = mndProcessWriteMsg(pFsm, pMsg, pMeta);
|
||||
|
||||
_out:
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
pMsg->pCont = NULL;
|
||||
return code;
|
||||
|
@ -143,9 +149,13 @@ void mndRestoreFinish(const SSyncFSM *pFsm) {
|
|||
SMnode *pMnode = pFsm->data;
|
||||
|
||||
if (!pMnode->deploy) {
|
||||
mInfo("vgId:1, sync restore finished, and will handle outstanding transactions");
|
||||
mndTransPullup(pMnode);
|
||||
mndSetRestored(pMnode, true);
|
||||
if (!pMnode->restored) {
|
||||
mInfo("vgId:1, sync restore finished, and will handle outstanding transactions");
|
||||
mndTransPullup(pMnode);
|
||||
mndSetRestored(pMnode, true);
|
||||
} else {
|
||||
mInfo("vgId:1, sync restore finished, repeat call");
|
||||
}
|
||||
} else {
|
||||
mInfo("vgId:1, sync restore finished");
|
||||
}
|
||||
|
|
|
@ -507,7 +507,7 @@ static int32_t mndGetAvailableDnode(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup
|
|||
for (int32_t v = 0; v < pVgroup->replica; ++v) {
|
||||
SVnodeGid *pVgid = &pVgroup->vnodeGid[v];
|
||||
SDnodeObj *pDnode = taosArrayGet(pArray, v);
|
||||
if (pDnode == NULL || pDnode->numOfVnodes > pDnode->numOfSupportVnodes) {
|
||||
if (pDnode == NULL || pDnode->numOfVnodes >= pDnode->numOfSupportVnodes) {
|
||||
terrno = TSDB_CODE_MND_NO_ENOUGH_DNODES;
|
||||
return -1;
|
||||
}
|
||||
|
@ -891,7 +891,7 @@ static int32_t mndAddVnodeToVgroup(SMnode *pMnode, STrans *pTrans, SVgObj *pVgro
|
|||
}
|
||||
if (used) continue;
|
||||
|
||||
if (pDnode == NULL || pDnode->numOfVnodes > pDnode->numOfSupportVnodes) {
|
||||
if (pDnode == NULL || pDnode->numOfVnodes >= pDnode->numOfSupportVnodes) {
|
||||
terrno = TSDB_CODE_MND_NO_ENOUGH_DNODES;
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -174,7 +174,7 @@ int32_t tsdbReaderOpen(SVnode *pVnode, SQueryTableDataCond *pCond, void *pTableL
|
|||
void tsdbReaderClose(STsdbReader *pReader);
|
||||
bool tsdbNextDataBlock(STsdbReader *pReader);
|
||||
void tsdbRetrieveDataBlockInfo(const STsdbReader *pReader, int32_t *rows, uint64_t *uid, STimeWindow *pWindow);
|
||||
int32_t tsdbRetrieveDatablockSMA(STsdbReader *pReader, SColumnDataAgg ***pBlockSMA, bool *allHave);
|
||||
int32_t tsdbRetrieveDatablockSMA(STsdbReader *pReader, SSDataBlock* pDataBlock, bool *allHave);
|
||||
SSDataBlock *tsdbRetrieveDataBlock(STsdbReader *pTsdbReadHandle, SArray *pColumnIdList);
|
||||
int32_t tsdbReaderReset(STsdbReader *pReader, SQueryTableDataCond *pCond);
|
||||
int32_t tsdbGetFileBlocksDistInfo(STsdbReader *pReader, STableBlockDistInfo *pTableBlockInfo);
|
||||
|
|
|
@ -32,9 +32,9 @@ typedef struct SMetaStbStatsEntry {
|
|||
} SMetaStbStatsEntry;
|
||||
|
||||
typedef struct STagFilterResEntry {
|
||||
uint64_t suid; // uid for super table
|
||||
SList list; // the linked list of md5 digest, extracted from the serialized tag query condition
|
||||
uint32_t qTimes;// queried times for current super table
|
||||
uint64_t suid; // uid for super table
|
||||
SList list; // the linked list of md5 digest, extracted from the serialized tag query condition
|
||||
uint32_t qTimes; // queried times for current super table
|
||||
} STagFilterResEntry;
|
||||
|
||||
struct SMetaCache {
|
||||
|
@ -126,13 +126,14 @@ int32_t metaCacheOpen(SMeta* pMeta) {
|
|||
goto _err2;
|
||||
}
|
||||
|
||||
pCache->sTagFilterResCache.pUidResCache = taosLRUCacheInit(5*1024*1024, -1, 0.5);
|
||||
pCache->sTagFilterResCache.pUidResCache = taosLRUCacheInit(5 * 1024 * 1024, -1, 0.5);
|
||||
if (pCache->sTagFilterResCache.pUidResCache == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _err2;
|
||||
}
|
||||
|
||||
pCache->sTagFilterResCache.pTableEntry = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), false, HASH_NO_LOCK);
|
||||
pCache->sTagFilterResCache.pTableEntry =
|
||||
taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), false, HASH_NO_LOCK);
|
||||
if (pCache->sTagFilterResCache.pTableEntry == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _err2;
|
||||
|
@ -419,7 +420,8 @@ int32_t metaStatsCacheGet(SMeta* pMeta, int64_t uid, SMetaStbStats* pInfo) {
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray* pList1, bool* acquireRes) {
|
||||
int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray* pList1,
|
||||
bool* acquireRes) {
|
||||
uint64_t* pBuf = pMeta->pCache->sTagFilterResCache.keyBuf;
|
||||
|
||||
// generate the composed key for LRU cache
|
||||
|
@ -428,8 +430,8 @@ int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pK
|
|||
pBuf[0] = suid;
|
||||
memcpy(&pBuf[1], pKey, keyLen);
|
||||
|
||||
int32_t len = keyLen + sizeof(uint64_t);
|
||||
LRUHandle *pHandle = taosLRUCacheLookup(pCache, pBuf, len);
|
||||
int32_t len = keyLen + sizeof(uint64_t);
|
||||
LRUHandle* pHandle = taosLRUCacheLookup(pCache, pBuf, len);
|
||||
if (pHandle == NULL) {
|
||||
*acquireRes = 0;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -439,7 +441,7 @@ int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pK
|
|||
*acquireRes = 1;
|
||||
|
||||
const char* p = taosLRUCacheValue(pMeta->pCache->sTagFilterResCache.pUidResCache, pHandle);
|
||||
int32_t size = *(int32_t*) p;
|
||||
int32_t size = *(int32_t*)p;
|
||||
taosArrayAddBatch(pList1, p + sizeof(int32_t), size);
|
||||
|
||||
(*pEntry)->qTimes += 1;
|
||||
|
@ -454,7 +456,7 @@ int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pK
|
|||
|
||||
SListNode* pNode = NULL;
|
||||
while ((pNode = tdListNext(&iter)) != NULL) {
|
||||
memcpy(pBuf + sizeof(suid), pNode->data, keyLen);
|
||||
memcpy(&pBuf[1], pNode->data, keyLen);
|
||||
|
||||
// check whether it is existed in LRU cache, and remove it from linked list if not.
|
||||
LRUHandle* pRes = taosLRUCacheLookup(pCache, pBuf, len);
|
||||
|
@ -467,12 +469,15 @@ int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pK
|
|||
|
||||
// remove the keys, of which query uid lists have been replaced already.
|
||||
size_t s = taosArrayGetSize(pList);
|
||||
for(int32_t i = 0; i < s; ++i) {
|
||||
for (int32_t i = 0; i < s; ++i) {
|
||||
SListNode** p1 = taosArrayGet(pList, i);
|
||||
tdListPopNode(&(*pEntry)->list, *p1);
|
||||
taosMemoryFree(*p1);
|
||||
}
|
||||
|
||||
(*pEntry)->qTimes = 0; // reset the query times
|
||||
(*pEntry)->qTimes = 0; // reset the query times
|
||||
|
||||
taosArrayDestroy(pList);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -487,7 +492,8 @@ static void freePayload(const void* key, size_t keyLen, void* value) {
|
|||
}
|
||||
|
||||
// check both the payload size and selectivity ratio
|
||||
int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload, int32_t payloadLen, double selectivityRatio) {
|
||||
int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload,
|
||||
int32_t payloadLen, double selectivityRatio) {
|
||||
if (selectivityRatio > tsSelectivityRatio) {
|
||||
metaDebug("vgId:%d, suid:%" PRIu64
|
||||
" failed to add to uid list cache, due to selectivity ratio %.2f less than threshold %.2f",
|
||||
|
@ -525,9 +531,10 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int
|
|||
ASSERT(sizeof(uint64_t) + keyLen == 24);
|
||||
|
||||
// add to cache.
|
||||
taosLRUCacheInsert(pCache, pBuf, sizeof(uint64_t) + keyLen, pPayload, payloadLen, freePayload, NULL, TAOS_LRU_PRIORITY_LOW);
|
||||
metaDebug("vgId:%d, suid:%"PRIu64" list cache added into cache, total:%d, tables:%d", TD_VID(pMeta->pVnode),
|
||||
suid, (int32_t) taosLRUCacheGetUsage(pCache), taosHashGetSize(pTableEntry));
|
||||
taosLRUCacheInsert(pCache, pBuf, sizeof(uint64_t) + keyLen, pPayload, payloadLen, freePayload, NULL,
|
||||
TAOS_LRU_PRIORITY_LOW);
|
||||
metaDebug("vgId:%d, suid:%" PRIu64 " list cache added into cache, total:%d, tables:%d", TD_VID(pMeta->pVnode), suid,
|
||||
(int32_t)taosLRUCacheGetUsage(pCache), taosHashGetSize(pTableEntry));
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -539,7 +546,7 @@ int32_t metaUidCacheClear(SMeta* pMeta, uint64_t suid) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t keyLen = sizeof(uint64_t) * 3;
|
||||
int32_t keyLen = sizeof(uint64_t) * 3;
|
||||
uint64_t p[3] = {0};
|
||||
p[0] = suid;
|
||||
|
||||
|
|
|
@ -549,8 +549,8 @@ int metaTtlDropTable(SMeta *pMeta, int64_t ttl, SArray *tbUids) {
|
|||
}
|
||||
|
||||
static void metaBuildTtlIdxKey(STtlIdxKey *ttlKey, const SMetaEntry *pME) {
|
||||
int64_t ttlDays;
|
||||
int64_t ctime;
|
||||
int64_t ttlDays = 0;
|
||||
int64_t ctime = 0;
|
||||
if (pME->type == TSDB_CHILD_TABLE) {
|
||||
ctime = pME->ctbEntry.ctime;
|
||||
ttlDays = pME->ctbEntry.ttlDays;
|
||||
|
@ -1353,6 +1353,10 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) {
|
|||
goto end;
|
||||
}
|
||||
|
||||
if (stbEntry.stbEntry.schemaTag.pSchema == NULL) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
pTagColumn = &stbEntry.stbEntry.schemaTag.pSchema[0];
|
||||
|
||||
STagVal tagVal = {.cid = pTagColumn->colId};
|
||||
|
|
|
@ -763,7 +763,7 @@ static int32_t tdExecuteRSmaImplAsync(SSma *pSma, const void *pMsg, int32_t inpu
|
|||
tb_uid_t suid) {
|
||||
const SSubmitReq *pReq = (const SSubmitReq *)pMsg;
|
||||
|
||||
void *qItem = taosAllocateQitem(pReq->header.contLen, DEF_QITEM);
|
||||
void *qItem = taosAllocateQitem(pReq->header.contLen, DEF_QITEM, 0);
|
||||
if (!qItem) {
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
|
|
@ -725,9 +725,15 @@ int32_t tqProcessDeleteSubReq(STQ* pTq, int64_t version, char* msg, int32_t msgL
|
|||
}
|
||||
taosWUnLockLatch(&pTq->pushLock);
|
||||
|
||||
code = taosHashRemove(pTq->pHandle, pReq->subKey, strlen(pReq->subKey));
|
||||
if (code != 0) {
|
||||
tqError("cannot process tq delete req %s, since no such handle", pReq->subKey);
|
||||
STqHandle* pHandle = taosHashGet(pTq->pHandle, pReq->subKey, strlen(pReq->subKey));
|
||||
if (pHandle) {
|
||||
if (pHandle->pRef) {
|
||||
walCloseRef(pTq->pVnode->pWal, pHandle->pRef->refId);
|
||||
}
|
||||
code = taosHashRemove(pTq->pHandle, pReq->subKey, strlen(pReq->subKey));
|
||||
if (code != 0) {
|
||||
tqError("cannot process tq delete req %s, since no such handle", pReq->subKey);
|
||||
}
|
||||
}
|
||||
|
||||
code = tqOffsetDelete(pTq->pOffsetStore, pReq->subKey);
|
||||
|
@ -736,7 +742,7 @@ int32_t tqProcessDeleteSubReq(STQ* pTq, int64_t version, char* msg, int32_t msgL
|
|||
}
|
||||
|
||||
if (tqMetaDeleteHandle(pTq, pReq->subKey) < 0) {
|
||||
ASSERT(0);
|
||||
tqError("cannot process tq delete req %s, since no such offset in tdb", pReq->subKey);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1265,7 +1271,7 @@ int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver) {
|
|||
qDebug("delete req enqueue stream task: %d, ver: %" PRId64, pTask->taskId, ver);
|
||||
|
||||
if (!failed) {
|
||||
SStreamRefDataBlock* pRefBlock = taosAllocateQitem(sizeof(SStreamRefDataBlock), DEF_QITEM);
|
||||
SStreamRefDataBlock* pRefBlock = taosAllocateQitem(sizeof(SStreamRefDataBlock), DEF_QITEM, 0);
|
||||
pRefBlock->type = STREAM_INPUT__REF_DATA_BLOCK;
|
||||
pRefBlock->pBlock = pDelBlock;
|
||||
pRefBlock->dataRef = pRef;
|
||||
|
@ -1297,7 +1303,7 @@ int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver) {
|
|||
}
|
||||
|
||||
#if 0
|
||||
SStreamDataBlock* pStreamBlock = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
|
||||
SStreamDataBlock* pStreamBlock = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, 0);
|
||||
pStreamBlock->type = STREAM_INPUT__DATA_BLOCK;
|
||||
pStreamBlock->blocks = taosArrayInit(0, sizeof(SSDataBlock));
|
||||
SSDataBlock block = {0};
|
||||
|
|
|
@ -952,12 +952,17 @@ static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTs
|
|||
SArray *pDelIdxArray = taosArrayInit(32, sizeof(SDelIdx));
|
||||
|
||||
code = tsdbReadDelIdx(pDelFReader, pDelIdxArray);
|
||||
if (code) goto _err;
|
||||
if (code) {
|
||||
taosArrayDestroy(pDelIdxArray);
|
||||
tsdbDelFReaderClose(&pDelFReader);
|
||||
goto _err;
|
||||
}
|
||||
|
||||
SDelIdx *delIdx = taosArraySearch(pDelIdxArray, &(SDelIdx){.suid = suid, .uid = uid}, tCmprDelIdx, TD_EQ);
|
||||
|
||||
code = getTableDelSkyline(pMem, pIMem, pDelFReader, delIdx, pIter->pSkyline);
|
||||
if (code) {
|
||||
taosArrayDestroy(pDelIdxArray);
|
||||
tsdbDelFReaderClose(&pDelFReader);
|
||||
goto _err;
|
||||
}
|
||||
|
|
|
@ -53,6 +53,7 @@ typedef struct {
|
|||
// --------------
|
||||
TSKEY nextKey; // reset by each table commit
|
||||
int32_t commitFid;
|
||||
int32_t expLevel;
|
||||
TSKEY minKey;
|
||||
TSKEY maxKey;
|
||||
// commit file data
|
||||
|
@ -503,6 +504,7 @@ static int32_t tsdbCommitFileDataStart(SCommitter *pCommitter) {
|
|||
|
||||
// memory
|
||||
pCommitter->commitFid = tsdbKeyFid(pCommitter->nextKey, pCommitter->minutes, pCommitter->precision);
|
||||
pCommitter->expLevel = tsdbFidLevel(pCommitter->commitFid, &pCommitter->pTsdb->keepCfg, taosGetTimestampSec());
|
||||
tsdbFidKeyRange(pCommitter->commitFid, pCommitter->minutes, pCommitter->precision, &pCommitter->minKey,
|
||||
&pCommitter->maxKey);
|
||||
#if 0
|
||||
|
@ -556,7 +558,10 @@ static int32_t tsdbCommitFileDataStart(SCommitter *pCommitter) {
|
|||
}
|
||||
} else {
|
||||
SDiskID did = {0};
|
||||
tfsAllocDisk(pTsdb->pVnode->pTfs, 0, &did);
|
||||
if (tfsAllocDisk(pTsdb->pVnode->pTfs, pCommitter->expLevel, &did) < 0) {
|
||||
code = terrno;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
tfsMkdirRecurAt(pTsdb->pVnode->pTfs, pTsdb->path, did);
|
||||
wSet.diskId = did;
|
||||
wSet.nSttF = 1;
|
||||
|
|
|
@ -962,6 +962,7 @@ int32_t tsdbFSUpsertFSet(STsdbFS *pFS, SDFileSet *pSet) {
|
|||
}
|
||||
}
|
||||
|
||||
pDFileSet->diskId = pSet->diskId;
|
||||
goto _exit;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -121,7 +121,7 @@ static SBlockData *loadLastBlock(SLDataIter *pIter, const char *idStr) {
|
|||
return &pInfo->blockData[1];
|
||||
}
|
||||
|
||||
if (pIter->pSttBlk == NULL) {
|
||||
if (pIter->pSttBlk == NULL || pInfo->pSchema == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -2790,7 +2790,8 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) {
|
|||
while (1) {
|
||||
// load the last data block of current table
|
||||
STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)pStatus->pTableIter;
|
||||
bool hasVal = initLastBlockReader(pLastBlockReader, pScanInfo, pReader);
|
||||
|
||||
bool hasVal = initLastBlockReader(pLastBlockReader, pScanInfo, pReader);
|
||||
if (!hasVal) {
|
||||
bool hasNexTable = moveToNextTable(pOrderedCheckInfo, pStatus);
|
||||
if (!hasNexTable) {
|
||||
|
@ -3129,7 +3130,7 @@ bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32
|
|||
return false;
|
||||
} else if (pKey->ts == last->ts) {
|
||||
TSDBKEY* prev = taosArrayGet(pDelList, num - 2);
|
||||
return (prev->version >= pKey->version);
|
||||
return (prev->version >= pKey->version && prev->version <= pVerRange->maxVer && prev->version >= pVerRange->minVer);
|
||||
}
|
||||
} else {
|
||||
TSDBKEY* pCurrent = taosArrayGet(pDelList, *index);
|
||||
|
@ -3823,6 +3824,8 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL
|
|||
}
|
||||
|
||||
// NOTE: the endVersion in pCond is the data version not schema version, so pCond->endVersion is not correct here.
|
||||
// no valid error code set in metaGetTbTSchema, so let's set the error code here.
|
||||
// we should proceed in case of tmq processing.
|
||||
if (pCond->suid != 0) {
|
||||
pReader->pSchema = metaGetTbTSchema(pReader->pTsdb->pVnode->pMeta, pReader->suid, -1, 1);
|
||||
if (pReader->pSchema == NULL) {
|
||||
|
@ -3840,7 +3843,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL
|
|||
updateBlockSMAInfo(pReader->pSchema, &pReader->suppInfo);
|
||||
}
|
||||
|
||||
STsdbReader* p = (pReader->innerReader[0] != NULL)? pReader->innerReader[0]:pReader;
|
||||
STsdbReader* p = (pReader->innerReader[0] != NULL) ? pReader->innerReader[0] : pReader;
|
||||
pReader->status.pTableMap = createDataBlockScanInfo(p, &pReader->blockInfoBuf, pTableList, numOfTables);
|
||||
if (pReader->status.pTableMap == NULL) {
|
||||
tsdbReaderClose(p);
|
||||
|
@ -3888,10 +3891,11 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL
|
|||
tsdbDebug("%p total numOfTable:%d in this query %s", pReader, numOfTables, pReader->idStr);
|
||||
return code;
|
||||
|
||||
_err:
|
||||
_err:
|
||||
tsdbError("failed to create data reader, code:%s %s", tstrerror(code), idstr);
|
||||
tsdbReaderClose(pReader);
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
||||
void tsdbReaderClose(STsdbReader* pReader) {
|
||||
if (pReader == NULL) {
|
||||
|
@ -4112,8 +4116,9 @@ static void doFillNullColSMA(SBlockLoadSuppInfo* pSup, int32_t numOfRows, int32_
|
|||
}
|
||||
}
|
||||
|
||||
int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SColumnDataAgg ***pBlockSMA, bool* allHave) {
|
||||
int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SSDataBlock* pDataBlock, bool* allHave) {
|
||||
int32_t code = 0;
|
||||
SColumnDataAgg ***pBlockSMA = &pDataBlock->pBlockAgg;
|
||||
*allHave = false;
|
||||
|
||||
if (pReader->type == TIMEWINDOW_RANGE_EXTERNAL) {
|
||||
|
@ -4161,6 +4166,12 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SColumnDataAgg ***pBlockS
|
|||
int32_t i = 0, j = 0;
|
||||
size_t size = taosArrayGetSize(pSup->pColAgg);
|
||||
|
||||
// ensure capacity
|
||||
if(pDataBlock->pDataBlock) {
|
||||
size_t colsNum = taosArrayGetSize(pDataBlock->pDataBlock);
|
||||
taosArrayEnsureCap(pSup->pColAgg, colsNum);
|
||||
}
|
||||
|
||||
SSDataBlock* pResBlock = pReader->pResBlock;
|
||||
if (pResBlock->pBlockAgg == NULL) {
|
||||
size_t num = taosArrayGetSize(pResBlock->pDataBlock);
|
||||
|
|
|
@ -155,7 +155,7 @@ static int32_t tsdbSnapReadOpenFile(STsdbSnapReader* pReader) {
|
|||
|
||||
if (rowVer >= pReader->sver && rowVer <= pReader->ever) {
|
||||
pIter->rInfo.suid = pIter->bData.suid;
|
||||
pIter->rInfo.uid = pIter->bData.uid;
|
||||
pIter->rInfo.uid = pIter->bData.uid ? pIter->bData.uid : pIter->bData.aUid[pIter->iRow];
|
||||
pIter->rInfo.row = tsdbRowFromBlockData(&pIter->bData, pIter->iRow);
|
||||
goto _add_iter;
|
||||
}
|
||||
|
@ -179,16 +179,14 @@ _err:
|
|||
return code;
|
||||
}
|
||||
|
||||
static SRowInfo* tsdbSnapGetRow(STsdbSnapReader* pReader) { return pReader->pIter ? &pReader->pIter->rInfo : NULL; }
|
||||
|
||||
static int32_t tsdbSnapNextRow(STsdbSnapReader* pReader) {
|
||||
int32_t code = 0;
|
||||
|
||||
if (pReader->pIter) {
|
||||
SFDataIter* pIter = pReader->pIter;
|
||||
|
||||
SFDataIter* pIter = NULL;
|
||||
while (true) {
|
||||
_find_row:
|
||||
pIter = pReader->pIter;
|
||||
for (pIter->iRow++; pIter->iRow < pIter->bData.nRow; pIter->iRow++) {
|
||||
int64_t rowVer = pIter->bData.aVersion[pIter->iRow];
|
||||
|
||||
|
@ -224,6 +222,7 @@ static int32_t tsdbSnapNextRow(STsdbSnapReader* pReader) {
|
|||
}
|
||||
|
||||
pReader->pIter = NULL;
|
||||
break;
|
||||
} else if (pIter->type == SNAP_STT_FILE_ITER) {
|
||||
for (pIter->iSttBlk++; pIter->iSttBlk < taosArrayGetSize(pIter->aSttBlk); pIter->iSttBlk++) {
|
||||
SSttBlk* pSttBlk = (SSttBlk*)taosArrayGet(pIter->aSttBlk, pIter->iSttBlk);
|
||||
|
@ -238,6 +237,7 @@ static int32_t tsdbSnapNextRow(STsdbSnapReader* pReader) {
|
|||
}
|
||||
|
||||
pReader->pIter = NULL;
|
||||
break;
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
|
@ -269,6 +269,20 @@ _err:
|
|||
return code;
|
||||
}
|
||||
|
||||
static SRowInfo* tsdbSnapGetRow(STsdbSnapReader* pReader) {
|
||||
if (pReader->pIter) {
|
||||
return &pReader->pIter->rInfo;
|
||||
} else {
|
||||
tsdbSnapNextRow(pReader);
|
||||
|
||||
if (pReader->pIter) {
|
||||
return &pReader->pIter->rInfo;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t tsdbSnapCmprData(STsdbSnapReader* pReader, uint8_t** ppData) {
|
||||
int32_t code = 0;
|
||||
|
||||
|
@ -1356,7 +1370,7 @@ _exit:
|
|||
taosMemoryFree(pWriter);
|
||||
}
|
||||
} else {
|
||||
tsdbDebug("vgId:%d, tsdb snapshot writer open for %s succeed", TD_VID(pTsdb->pVnode), pTsdb->path);
|
||||
tsdbInfo("vgId:%d %s done", TD_VID(pTsdb->pVnode), __func__);
|
||||
*ppWriter = pWriter;
|
||||
}
|
||||
return code;
|
||||
|
@ -1421,7 +1435,7 @@ int32_t tsdbSnapWriterClose(STsdbSnapWriter** ppWriter, int8_t rollback) {
|
|||
for (int32_t iBuf = 0; iBuf < sizeof(pWriter->aBuf) / sizeof(uint8_t*); iBuf++) {
|
||||
tFree(pWriter->aBuf[iBuf]);
|
||||
}
|
||||
tsdbInfo("vgId:%d, vnode snapshot tsdb writer close for %s", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path);
|
||||
tsdbInfo("vgId:%d %s done", TD_VID(pWriter->pTsdb->pVnode), __func__);
|
||||
taosMemoryFree(pWriter);
|
||||
*ppWriter = NULL;
|
||||
return code;
|
||||
|
|
|
@ -190,9 +190,13 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp
|
|||
version);
|
||||
|
||||
ASSERT(pVnode->state.applyTerm <= pMsg->info.conn.applyTerm);
|
||||
ASSERT(pVnode->state.applied + 1 == version);
|
||||
|
||||
pVnode->state.applied = version;
|
||||
pVnode->state.applyTerm = pMsg->info.conn.applyTerm;
|
||||
|
||||
if (!syncUtilUserCommit(pMsg->msgType)) goto _exit;
|
||||
|
||||
// skip header
|
||||
pReq = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
len = pMsg->contLen - sizeof(SMsgHead);
|
||||
|
|
|
@ -547,6 +547,14 @@ typedef struct SCtgOperation {
|
|||
#define ctgDebug(param, ...) qDebug("CTG:%p " param, pCtg, __VA_ARGS__)
|
||||
#define ctgTrace(param, ...) qTrace("CTG:%p " param, pCtg, __VA_ARGS__)
|
||||
|
||||
#define ctgTaskFatal(param, ...) qFatal("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
||||
#define ctgTaskError(param, ...) qError("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
||||
#define ctgTaskWarn(param, ...) qWarn("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
||||
#define ctgTaskInfo(param, ...) qInfo("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
||||
#define ctgTaskDebug(param, ...) qDebug("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
||||
#define ctgTaskTrace(param, ...) qTrace("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
||||
|
||||
|
||||
#define CTG_LOCK_DEBUG(...) \
|
||||
do { \
|
||||
if (gCTGDebug.lockEnable) { \
|
||||
|
@ -790,6 +798,9 @@ SName* ctgGetFetchName(SArray* pNames, SCtgFetch* pFetch);
|
|||
int32_t ctgdGetOneHandle(SCatalog **pHandle);
|
||||
int ctgVgInfoComp(const void* lp, const void* rp);
|
||||
int32_t ctgMakeVgArray(SDBVgInfo* dbInfo);
|
||||
int32_t ctgAcquireVgMetaFromCache(SCatalog *pCtg, const char *dbFName, const char *tbName, SCtgDBCache **pDb, SCtgTbCache **pTb);
|
||||
int32_t ctgCopyTbMeta(SCatalog *pCtg, SCtgTbMetaCtx *ctx, SCtgDBCache **pDb, SCtgTbCache **pTb, STableMeta **pTableMeta, char* dbFName);
|
||||
void ctgReleaseVgMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, SCtgTbCache *pCache);
|
||||
|
||||
extern SCatalogMgmt gCtgMgmt;
|
||||
extern SCtgDebug gCTGDebug;
|
||||
|
|
|
@ -551,6 +551,35 @@ _return:
|
|||
CTG_RET(code);
|
||||
}
|
||||
|
||||
int32_t ctgGetCachedTbVgMeta(SCatalog* pCtg, const SName* pTableName, SVgroupInfo* pVgroup, STableMeta** pTableMeta) {
|
||||
int32_t code = 0;
|
||||
char db[TSDB_DB_FNAME_LEN] = {0};
|
||||
tNameGetFullDbName(pTableName, db);
|
||||
SCtgDBCache *dbCache = NULL;
|
||||
SCtgTbCache *tbCache = NULL;
|
||||
|
||||
CTG_ERR_RET(ctgAcquireVgMetaFromCache(pCtg, db, pTableName->tname, &dbCache, &tbCache));
|
||||
|
||||
if (NULL == dbCache || NULL == tbCache) {
|
||||
*pTableMeta = NULL;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pTableName, pVgroup));
|
||||
|
||||
SCtgTbMetaCtx ctx = {0};
|
||||
ctx.pName = (SName*)pTableName;
|
||||
ctx.flag = CTG_FLAG_UNKNOWN_STB;
|
||||
CTG_ERR_JRET(ctgCopyTbMeta(pCtg, &ctx, &dbCache, &tbCache, pTableMeta, db));
|
||||
|
||||
_return:
|
||||
|
||||
ctgReleaseVgMetaToCache(pCtg, dbCache, tbCache);
|
||||
|
||||
CTG_RET(code);
|
||||
}
|
||||
|
||||
|
||||
int32_t ctgRemoveTbMeta(SCatalog* pCtg, SName* pTableName) {
|
||||
int32_t code = 0;
|
||||
|
||||
|
@ -1118,6 +1147,13 @@ int32_t catalogGetCachedTableHashVgroup(SCatalog* pCtg, const SName* pTableName,
|
|||
CTG_API_LEAVE(ctgGetTbHashVgroup(pCtg, NULL, pTableName, pVgroup, exists));
|
||||
}
|
||||
|
||||
int32_t catalogGetCachedTableVgMeta(SCatalog* pCtg, const SName* pTableName, SVgroupInfo* pVgroup, STableMeta** pTableMeta) {
|
||||
CTG_API_ENTER();
|
||||
|
||||
CTG_API_LEAVE(ctgGetCachedTbVgMeta(pCtg, pTableName, pVgroup, pTableMeta));
|
||||
}
|
||||
|
||||
|
||||
#if 0
|
||||
int32_t catalogGetAllMeta(SCatalog* pCtg, SRequestConnInfo* pConn, const SCatalogReq* pReq, SMetaData* pRsp) {
|
||||
CTG_API_ENTER();
|
||||
|
|
|
@ -1094,6 +1094,9 @@ _return:
|
|||
ctgReleaseVgInfoToCache(pCtg, dbCache);
|
||||
}
|
||||
|
||||
if (code) {
|
||||
ctgTaskError("Get table %d.%s.%s meta failed with error %s", pName->acctId, pName->dbname, pName->tname, tstrerror(code));
|
||||
}
|
||||
if (pTask->res || code) {
|
||||
ctgHandleTaskEnd(pTask, code);
|
||||
}
|
||||
|
@ -1124,7 +1127,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
|
|||
SVgroupInfo vgInfo = {0};
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, pOut->dbVgroup, pName, &vgInfo));
|
||||
|
||||
ctgDebug("will refresh tbmeta, not supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag);
|
||||
ctgTaskDebug("will refresh tbmeta, not supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag);
|
||||
|
||||
*vgId = vgInfo.vgId;
|
||||
CTG_ERR_JRET(ctgGetTbMetaFromVnode(pCtg, pConn, pName, &vgInfo, NULL, tReq));
|
||||
|
@ -1144,7 +1147,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
|
|||
SVgroupInfo vgInfo = {0};
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pName, &vgInfo));
|
||||
|
||||
ctgDebug("will refresh tbmeta, supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag);
|
||||
ctgTaskDebug("will refresh tbmeta, supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag);
|
||||
|
||||
*vgId = vgInfo.vgId;
|
||||
CTG_ERR_JRET(ctgGetTbMetaFromVnode(pCtg, pConn, pName, &vgInfo, NULL, tReq));
|
||||
|
@ -1162,7 +1165,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
ctgError("no tbmeta got, tbName:%s", tNameGetTableName(pName));
|
||||
ctgTaskError("no tbmeta got, tbName:%s", tNameGetTableName(pName));
|
||||
ctgRemoveTbMetaFromCache(pCtg, pName, false);
|
||||
|
||||
CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST);
|
||||
|
@ -1180,7 +1183,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
|
|||
STableMetaOutput* pOut = (STableMetaOutput*)pMsgCtx->out;
|
||||
|
||||
if (CTG_IS_META_NULL(pOut->metaType)) {
|
||||
ctgError("no tbmeta got, tbNmae:%s", tNameGetTableName(pName));
|
||||
ctgTaskError("no tbmeta got, tbNmae:%s", tNameGetTableName(pName));
|
||||
ctgRemoveTbMetaFromCache(pCtg, pName, false);
|
||||
CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST);
|
||||
}
|
||||
|
@ -1190,7 +1193,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
|
|||
}
|
||||
|
||||
if (CTG_IS_META_TABLE(pOut->metaType) && TSDB_SUPER_TABLE == pOut->tbMeta->tableType) {
|
||||
ctgDebug("will continue to refresh tbmeta since got stb, tbName:%s", tNameGetTableName(pName));
|
||||
ctgTaskDebug("will continue to refresh tbmeta since got stb, tbName:%s", tNameGetTableName(pName));
|
||||
|
||||
taosMemoryFreeClear(pOut->tbMeta);
|
||||
|
||||
|
@ -1207,11 +1210,11 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
|
|||
STableMeta* stbMeta = NULL;
|
||||
(void)ctgReadTbMetaFromCache(pCtg, &stbCtx, &stbMeta);
|
||||
if (stbMeta && stbMeta->sversion >= pOut->tbMeta->sversion) {
|
||||
ctgDebug("use cached stb meta, tbName:%s", tNameGetTableName(pName));
|
||||
ctgTaskDebug("use cached stb meta, tbName:%s", tNameGetTableName(pName));
|
||||
exist = 1;
|
||||
taosMemoryFreeClear(stbMeta);
|
||||
} else {
|
||||
ctgDebug("need to get/update stb meta, tbName:%s", tNameGetTableName(pName));
|
||||
ctgTaskDebug("need to get/update stb meta, tbName:%s", tNameGetTableName(pName));
|
||||
taosMemoryFreeClear(pOut->tbMeta);
|
||||
taosMemoryFreeClear(stbMeta);
|
||||
}
|
||||
|
@ -1225,7 +1228,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
|
|||
break;
|
||||
}
|
||||
default:
|
||||
ctgError("invalid reqType %d", reqType);
|
||||
ctgTaskError("invalid reqType %d", reqType);
|
||||
CTG_ERR_JRET(TSDB_CODE_INVALID_MSG);
|
||||
}
|
||||
|
||||
|
@ -1280,6 +1283,7 @@ _return:
|
|||
TSWAP(pTask->res, ctx->pResList);
|
||||
taskDone = true;
|
||||
}
|
||||
ctgTaskError("Get table %d.%s.%s meta failed with error %s", pName->acctId, pName->dbname, pName->tname, tstrerror(code));
|
||||
}
|
||||
|
||||
if (pTask->res && taskDone) {
|
||||
|
|
|
@ -130,7 +130,7 @@ void ctgReleaseVgInfoToCache(SCatalog *pCtg, SCtgDBCache *dbCache) {
|
|||
}
|
||||
|
||||
void ctgReleaseTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, SCtgTbCache *pCache) {
|
||||
if (pCache) {
|
||||
if (pCache && dbCache) {
|
||||
CTG_UNLOCK(CTG_READ, &pCache->metaLock);
|
||||
taosHashRelease(dbCache->tbCache, pCache);
|
||||
}
|
||||
|
@ -151,6 +151,18 @@ void ctgReleaseTbIndexToCache(SCatalog *pCtg, SCtgDBCache *dbCache, SCtgTbCache
|
|||
}
|
||||
}
|
||||
|
||||
void ctgReleaseVgMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, SCtgTbCache *pCache) {
|
||||
if (pCache && dbCache) {
|
||||
CTG_UNLOCK(CTG_READ, &pCache->metaLock);
|
||||
taosHashRelease(dbCache->tbCache, pCache);
|
||||
}
|
||||
|
||||
if (dbCache) {
|
||||
ctgRUnlockVgInfo(dbCache);
|
||||
ctgReleaseDBCache(pCtg, dbCache);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t ctgAcquireVgInfoFromCache(SCatalog *pCtg, const char *dbFName, SCtgDBCache **pCache) {
|
||||
SCtgDBCache *dbCache = NULL;
|
||||
ctgAcquireDBCache(pCtg, dbFName, &dbCache);
|
||||
|
@ -226,6 +238,75 @@ _return:
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t ctgAcquireVgMetaFromCache(SCatalog *pCtg, const char *dbFName, const char *tbName, SCtgDBCache **pDb, SCtgTbCache **pTb) {
|
||||
SCtgDBCache *dbCache = NULL;
|
||||
SCtgTbCache *tbCache = NULL;
|
||||
bool vgInCache = false;
|
||||
|
||||
ctgAcquireDBCache(pCtg, dbFName, &dbCache);
|
||||
if (NULL == dbCache) {
|
||||
ctgDebug("db %s not in cache", dbFName);
|
||||
CTG_CACHE_STAT_INC(numOfVgMiss, 1);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
ctgRLockVgInfo(pCtg, dbCache, &vgInCache);
|
||||
if (!vgInCache) {
|
||||
ctgDebug("vgInfo of db %s not in cache", dbFName);
|
||||
CTG_CACHE_STAT_INC(numOfVgMiss, 1);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
*pDb = dbCache;
|
||||
|
||||
CTG_CACHE_STAT_INC(numOfVgHit, 1);
|
||||
|
||||
ctgDebug("Got db vgInfo from cache, dbFName:%s", dbFName);
|
||||
|
||||
tbCache = taosHashAcquire(dbCache->tbCache, tbName, strlen(tbName));
|
||||
if (NULL == tbCache) {
|
||||
ctgDebug("tb %s not in cache, dbFName:%s", tbName, dbFName);
|
||||
CTG_CACHE_STAT_INC(numOfMetaMiss, 1);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
CTG_LOCK(CTG_READ, &tbCache->metaLock);
|
||||
if (NULL == tbCache->pMeta) {
|
||||
ctgDebug("tb %s meta not in cache, dbFName:%s", tbName, dbFName);
|
||||
CTG_CACHE_STAT_INC(numOfMetaMiss, 1);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
*pTb = tbCache;
|
||||
|
||||
ctgDebug("tb %s meta got in cache, dbFName:%s", tbName, dbFName);
|
||||
|
||||
CTG_CACHE_STAT_INC(numOfMetaHit, 1);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
_return:
|
||||
|
||||
if (tbCache) {
|
||||
CTG_UNLOCK(CTG_READ, &tbCache->metaLock);
|
||||
taosHashRelease(dbCache->tbCache, tbCache);
|
||||
}
|
||||
|
||||
if (vgInCache) {
|
||||
ctgRUnlockVgInfo(dbCache);
|
||||
}
|
||||
|
||||
if (dbCache) {
|
||||
ctgReleaseDBCache(pCtg, dbCache);
|
||||
}
|
||||
|
||||
*pDb = NULL;
|
||||
*pTb = NULL;
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
int32_t ctgAcquireStbMetaFromCache(SCatalog *pCtg, char *dbFName, uint64_t suid, SCtgDBCache **pDb, SCtgTbCache **pTb) {
|
||||
SCtgDBCache *dbCache = NULL;
|
||||
|
@ -378,6 +459,78 @@ int32_t ctgTbMetaExistInCache(SCatalog *pCtg, char *dbFName, char *tbName, int32
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t ctgCopyTbMeta(SCatalog *pCtg, SCtgTbMetaCtx *ctx, SCtgDBCache **pDb, SCtgTbCache **pTb, STableMeta **pTableMeta, char* dbFName) {
|
||||
SCtgDBCache *dbCache = *pDb;
|
||||
SCtgTbCache *tbCache = *pTb;
|
||||
STableMeta *tbMeta = tbCache->pMeta;
|
||||
ctx->tbInfo.inCache = true;
|
||||
ctx->tbInfo.dbId = dbCache->dbId;
|
||||
ctx->tbInfo.suid = tbMeta->suid;
|
||||
ctx->tbInfo.tbType = tbMeta->tableType;
|
||||
|
||||
if (tbMeta->tableType != TSDB_CHILD_TABLE) {
|
||||
int32_t metaSize = CTG_META_SIZE(tbMeta);
|
||||
*pTableMeta = taosMemoryCalloc(1, metaSize);
|
||||
if (NULL == *pTableMeta) {
|
||||
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
memcpy(*pTableMeta, tbMeta, metaSize);
|
||||
|
||||
ctgDebug("Got tb %s meta from cache, type:%d, dbFName:%s", ctx->pName->tname, tbMeta->tableType, dbFName);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// PROCESS FOR CHILD TABLE
|
||||
|
||||
int32_t metaSize = sizeof(SCTableMeta);
|
||||
*pTableMeta = taosMemoryCalloc(1, metaSize);
|
||||
if (NULL == *pTableMeta) {
|
||||
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
memcpy(*pTableMeta, tbMeta, metaSize);
|
||||
|
||||
//ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
|
||||
|
||||
if (tbCache) {
|
||||
CTG_UNLOCK(CTG_READ, &tbCache->metaLock);
|
||||
taosHashRelease(dbCache->tbCache, tbCache);
|
||||
*pTb = NULL;
|
||||
}
|
||||
|
||||
ctgDebug("Got ctb %s meta from cache, will continue to get its stb meta, type:%d, dbFName:%s", ctx->pName->tname,
|
||||
ctx->tbInfo.tbType, dbFName);
|
||||
|
||||
ctgAcquireStbMetaFromCache(dbCache, pCtg, dbFName, ctx->tbInfo.suid, &tbCache);
|
||||
if (NULL == tbCache) {
|
||||
taosMemoryFreeClear(*pTableMeta);
|
||||
*pDb = NULL;
|
||||
ctgDebug("stb 0x%" PRIx64 " meta not in cache", ctx->tbInfo.suid);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
*pTb = tbCache;
|
||||
|
||||
STableMeta *stbMeta = tbCache->pMeta;
|
||||
if (stbMeta->suid != ctx->tbInfo.suid) {
|
||||
ctgError("stb suid 0x%" PRIx64 " in stbCache mis-match, expected suid 0x%" PRIx64, stbMeta->suid, ctx->tbInfo.suid);
|
||||
taosMemoryFreeClear(*pTableMeta);
|
||||
CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
metaSize = CTG_META_SIZE(stbMeta);
|
||||
*pTableMeta = taosMemoryRealloc(*pTableMeta, metaSize);
|
||||
if (NULL == *pTableMeta) {
|
||||
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
memcpy(&(*pTableMeta)->sversion, &stbMeta->sversion, metaSize - sizeof(SCTableMeta));
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
int32_t ctgReadTbMetaFromCache(SCatalog *pCtg, SCtgTbMetaCtx *ctx, STableMeta **pTableMeta) {
|
||||
int32_t code = 0;
|
||||
SCtgDBCache *dbCache = NULL;
|
||||
|
@ -397,70 +550,7 @@ int32_t ctgReadTbMetaFromCache(SCatalog *pCtg, SCtgTbMetaCtx *ctx, STableMeta **
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
STableMeta *tbMeta = tbCache->pMeta;
|
||||
ctx->tbInfo.inCache = true;
|
||||
ctx->tbInfo.dbId = dbCache->dbId;
|
||||
ctx->tbInfo.suid = tbMeta->suid;
|
||||
ctx->tbInfo.tbType = tbMeta->tableType;
|
||||
|
||||
if (tbMeta->tableType != TSDB_CHILD_TABLE) {
|
||||
int32_t metaSize = CTG_META_SIZE(tbMeta);
|
||||
*pTableMeta = taosMemoryCalloc(1, metaSize);
|
||||
if (NULL == *pTableMeta) {
|
||||
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
|
||||
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
memcpy(*pTableMeta, tbMeta, metaSize);
|
||||
|
||||
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
|
||||
ctgDebug("Got tb %s meta from cache, type:%d, dbFName:%s", ctx->pName->tname, tbMeta->tableType, dbFName);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// PROCESS FOR CHILD TABLE
|
||||
|
||||
int32_t metaSize = sizeof(SCTableMeta);
|
||||
*pTableMeta = taosMemoryCalloc(1, metaSize);
|
||||
if (NULL == *pTableMeta) {
|
||||
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
memcpy(*pTableMeta, tbMeta, metaSize);
|
||||
|
||||
//ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
|
||||
|
||||
if (tbCache) {
|
||||
CTG_UNLOCK(CTG_READ, &tbCache->metaLock);
|
||||
taosHashRelease(dbCache->tbCache, tbCache);
|
||||
}
|
||||
|
||||
ctgDebug("Got ctb %s meta from cache, will continue to get its stb meta, type:%d, dbFName:%s", ctx->pName->tname,
|
||||
ctx->tbInfo.tbType, dbFName);
|
||||
|
||||
ctgAcquireStbMetaFromCache(dbCache, pCtg, dbFName, ctx->tbInfo.suid, &tbCache);
|
||||
if (NULL == tbCache) {
|
||||
//ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
|
||||
taosMemoryFreeClear(*pTableMeta);
|
||||
ctgDebug("stb 0x%" PRIx64 " meta not in cache", ctx->tbInfo.suid);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
STableMeta *stbMeta = tbCache->pMeta;
|
||||
if (stbMeta->suid != ctx->tbInfo.suid) {
|
||||
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
|
||||
ctgError("stb suid 0x%" PRIx64 " in stbCache mis-match, expected suid 0x%" PRIx64, stbMeta->suid, ctx->tbInfo.suid);
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
metaSize = CTG_META_SIZE(stbMeta);
|
||||
*pTableMeta = taosMemoryRealloc(*pTableMeta, metaSize);
|
||||
if (NULL == *pTableMeta) {
|
||||
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
|
||||
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
memcpy(&(*pTableMeta)->sversion, &stbMeta->sversion, metaSize - sizeof(SCTableMeta));
|
||||
CTG_ERR_JRET(ctgCopyTbMeta(pCtg, ctx, &dbCache, &tbCache, pTableMeta, dbFName));
|
||||
|
||||
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
|
||||
|
||||
|
|
|
@ -133,7 +133,7 @@ static int32_t getStatus(SDataDeleterHandle* pDeleter) {
|
|||
|
||||
static int32_t putDataBlock(SDataSinkHandle* pHandle, const SInputData* pInput, bool* pContinue) {
|
||||
SDataDeleterHandle* pDeleter = (SDataDeleterHandle*)pHandle;
|
||||
SDataDeleterBuf* pBuf = taosAllocateQitem(sizeof(SDataDeleterBuf), DEF_QITEM);
|
||||
SDataDeleterBuf* pBuf = taosAllocateQitem(sizeof(SDataDeleterBuf), DEF_QITEM, 0);
|
||||
if (NULL == pBuf) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
|
|
@ -126,7 +126,7 @@ static int32_t getStatus(SDataDispatchHandle* pDispatcher) {
|
|||
|
||||
static int32_t putDataBlock(SDataSinkHandle* pHandle, const SInputData* pInput, bool* pContinue) {
|
||||
SDataDispatchHandle* pDispatcher = (SDataDispatchHandle*)pHandle;
|
||||
SDataDispatchBuf* pBuf = taosAllocateQitem(sizeof(SDataDispatchBuf), DEF_QITEM);
|
||||
SDataDispatchBuf* pBuf = taosAllocateQitem(sizeof(SDataDispatchBuf), DEF_QITEM, 0);
|
||||
if (NULL == pBuf) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
|
|
@ -314,7 +314,8 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
|
|||
}
|
||||
|
||||
int32_t rowIndex = j - num;
|
||||
applyAggFunctionOnPartialTuples(pTaskInfo, pCtx, NULL, rowIndex, num, pBlock->info.rows, pOperator->exprSupp.numOfExprs);
|
||||
applyAggFunctionOnPartialTuples(pTaskInfo, pCtx, NULL, rowIndex, num, pBlock->info.rows,
|
||||
pOperator->exprSupp.numOfExprs);
|
||||
|
||||
// assign the group keys or user input constant values if required
|
||||
doAssignGroupKeys(pCtx, pOperator->exprSupp.numOfExprs, pBlock->info.rows, rowIndex);
|
||||
|
@ -331,7 +332,8 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
|
|||
}
|
||||
|
||||
int32_t rowIndex = pBlock->info.rows - num;
|
||||
applyAggFunctionOnPartialTuples(pTaskInfo, pCtx, NULL, rowIndex, num, pBlock->info.rows, pOperator->exprSupp.numOfExprs);
|
||||
applyAggFunctionOnPartialTuples(pTaskInfo, pCtx, NULL, rowIndex, num, pBlock->info.rows,
|
||||
pOperator->exprSupp.numOfExprs);
|
||||
doAssignGroupKeys(pCtx, pOperator->exprSupp.numOfExprs, pBlock->info.rows, rowIndex);
|
||||
}
|
||||
}
|
||||
|
@ -469,8 +471,8 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode*
|
|||
initResultRowInfo(&pInfo->binfo.resultRowInfo);
|
||||
setOperatorInfo(pOperator, "GroupbyAggOperator", 0, true, OP_NOT_OPENED, pInfo, pTaskInfo);
|
||||
|
||||
pOperator->fpSet =
|
||||
createOperatorFpSet(optrDummyOpenFn, hashGroupbyAggregate, NULL, destroyGroupOperatorInfo, optrDefaultBufFn, NULL);
|
||||
pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, hashGroupbyAggregate, NULL, destroyGroupOperatorInfo,
|
||||
optrDefaultBufFn, NULL);
|
||||
code = appendDownstream(pOperator, &downstream, 1);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
|
@ -776,6 +778,12 @@ static void destroyPartitionOperatorInfo(void* param) {
|
|||
|
||||
taosArrayDestroy(pInfo->pGroupColVals);
|
||||
taosMemoryFree(pInfo->keyBuf);
|
||||
|
||||
int32_t size = taosArrayGetSize(pInfo->sortedGroupArray);
|
||||
for (int32_t i = 0; i < size; i++) {
|
||||
SDataGroupInfo* pGp = taosArrayGet(pInfo->sortedGroupArray, i);
|
||||
taosArrayDestroy(pGp->pPageList);
|
||||
}
|
||||
taosArrayDestroy(pInfo->sortedGroupArray);
|
||||
|
||||
void* pGroupIter = taosHashIterate(pInfo->pGroupSet, NULL);
|
||||
|
@ -850,7 +858,8 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
|
|||
pOperator->exprSupp.numOfExprs = numOfCols;
|
||||
pOperator->exprSupp.pExprInfo = pExprInfo;
|
||||
|
||||
pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, hashPartition, NULL, destroyPartitionOperatorInfo, optrDefaultBufFn, NULL);
|
||||
pOperator->fpSet =
|
||||
createOperatorFpSet(optrDummyOpenFn, hashPartition, NULL, destroyPartitionOperatorInfo, optrDefaultBufFn, NULL);
|
||||
|
||||
code = appendDownstream(pOperator, &downstream, 1);
|
||||
return pOperator;
|
||||
|
@ -1141,8 +1150,8 @@ SOperatorInfo* createStreamPartitionOperatorInfo(SOperatorInfo* downstream, SStr
|
|||
pInfo, pTaskInfo);
|
||||
pOperator->exprSupp.numOfExprs = numOfCols;
|
||||
pOperator->exprSupp.pExprInfo = pExprInfo;
|
||||
pOperator->fpSet =
|
||||
createOperatorFpSet(optrDummyOpenFn, doStreamHashPartition, NULL, destroyStreamPartitionOperatorInfo, optrDefaultBufFn, NULL);
|
||||
pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doStreamHashPartition, NULL,
|
||||
destroyStreamPartitionOperatorInfo, optrDefaultBufFn, NULL);
|
||||
|
||||
initParDownStream(downstream, &pInfo->partitionSup, &pInfo->scalarSup);
|
||||
code = appendDownstream(pOperator, &downstream, 1);
|
||||
|
|
|
@ -224,7 +224,7 @@ static bool doFilterByBlockSMA(SFilterInfo* pFilterInfo, SColumnDataAgg** pColsA
|
|||
|
||||
static bool doLoadBlockSMA(STableScanBase* pTableScanInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo) {
|
||||
bool allColumnsHaveAgg = true;
|
||||
int32_t code = tsdbRetrieveDatablockSMA(pTableScanInfo->dataReader, &pBlock->pBlockAgg, &allColumnsHaveAgg);
|
||||
int32_t code = tsdbRetrieveDatablockSMA(pTableScanInfo->dataReader, pBlock, &allColumnsHaveAgg);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
T_LONG_JMP(pTaskInfo->env, code);
|
||||
}
|
||||
|
|
|
@ -49,10 +49,10 @@ static void doKeepPrevRows(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlock
|
|||
if (!colDataIsNull_s(pColInfoData, rowIndex)) {
|
||||
pkey->isNull = false;
|
||||
char* val = colDataGetData(pColInfoData, rowIndex);
|
||||
if (!IS_VAR_DATA_TYPE(pkey->type)) {
|
||||
memcpy(pkey->pData, val, pkey->bytes);
|
||||
} else {
|
||||
if (IS_VAR_DATA_TYPE(pkey->type)) {
|
||||
memcpy(pkey->pData, val, varDataLen(val));
|
||||
} else {
|
||||
memcpy(pkey->pData, val, pkey->bytes);
|
||||
}
|
||||
} else {
|
||||
pkey->isNull = true;
|
||||
|
@ -95,14 +95,29 @@ static void doKeepLinearInfo(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlo
|
|||
// TODO: optimize to ignore null values for linear interpolation.
|
||||
if (!pLinearInfo->isStartSet) {
|
||||
if (!colDataIsNull_s(pColInfoData, rowIndex)) {
|
||||
ASSERT(IS_MATHABLE_TYPE(pColInfoData->info.type));
|
||||
|
||||
pLinearInfo->start.key = *(int64_t*)colDataGetData(pTsCol, rowIndex);
|
||||
memcpy(pLinearInfo->start.val, colDataGetData(pColInfoData, rowIndex), pLinearInfo->bytes);
|
||||
char* p = colDataGetData(pColInfoData, rowIndex);
|
||||
if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) {
|
||||
ASSERT(varDataTLen(p) <= pColInfoData->info.bytes);
|
||||
memcpy(pLinearInfo->start.val, p, varDataTLen(p));
|
||||
} else {
|
||||
memcpy(pLinearInfo->start.val, p, pLinearInfo->bytes);
|
||||
}
|
||||
}
|
||||
pLinearInfo->isStartSet = true;
|
||||
} else if (!pLinearInfo->isEndSet) {
|
||||
if (!colDataIsNull_s(pColInfoData, rowIndex)) {
|
||||
pLinearInfo->end.key = *(int64_t*)colDataGetData(pTsCol, rowIndex);
|
||||
memcpy(pLinearInfo->end.val, colDataGetData(pColInfoData, rowIndex), pLinearInfo->bytes);
|
||||
|
||||
char* p = colDataGetData(pColInfoData, rowIndex);
|
||||
if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) {
|
||||
ASSERT(varDataTLen(p) <= pColInfoData->info.bytes);
|
||||
memcpy(pLinearInfo->end.val, p, varDataTLen(p));
|
||||
} else {
|
||||
memcpy(pLinearInfo->end.val, p, pLinearInfo->bytes);
|
||||
}
|
||||
}
|
||||
pLinearInfo->isEndSet = true;
|
||||
} else {
|
||||
|
@ -111,7 +126,15 @@ static void doKeepLinearInfo(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlo
|
|||
|
||||
if (!colDataIsNull_s(pColInfoData, rowIndex)) {
|
||||
pLinearInfo->end.key = *(int64_t*)colDataGetData(pTsCol, rowIndex);
|
||||
memcpy(pLinearInfo->end.val, colDataGetData(pColInfoData, rowIndex), pLinearInfo->bytes);
|
||||
|
||||
char* p = colDataGetData(pColInfoData, rowIndex);
|
||||
if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) {
|
||||
ASSERT(varDataTLen(p) <= pColInfoData->info.bytes);
|
||||
memcpy(pLinearInfo->end.val, p, varDataTLen(p));
|
||||
} else {
|
||||
memcpy(pLinearInfo->end.val, p, pLinearInfo->bytes);
|
||||
}
|
||||
|
||||
} else {
|
||||
pLinearInfo->end.key = INT64_MIN;
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include "ttime.h"
|
||||
|
||||
#define IS_FINAL_OP(op) ((op)->isFinal)
|
||||
#define DEAULT_DELETE_MARK (1000LL * 60LL * 60LL * 24LL * 365LL * 10LL);
|
||||
|
||||
typedef struct SSessionAggOperatorInfo {
|
||||
SOptrBasicInfo binfo;
|
||||
|
@ -56,6 +57,7 @@ typedef enum SResultTsInterpType {
|
|||
typedef struct SPullWindowInfo {
|
||||
STimeWindow window;
|
||||
uint64_t groupId;
|
||||
STimeWindow calWin;
|
||||
} SPullWindowInfo;
|
||||
|
||||
typedef struct SOpenWindowInfo {
|
||||
|
@ -793,17 +795,18 @@ int32_t comparePullWinKey(void* pKey, void* data, int32_t index) {
|
|||
SArray* res = (SArray*)data;
|
||||
SPullWindowInfo* pos = taosArrayGet(res, index);
|
||||
SPullWindowInfo* pData = (SPullWindowInfo*)pKey;
|
||||
if (pData->window.skey == pos->window.skey) {
|
||||
if (pData->groupId > pos->groupId) {
|
||||
return 1;
|
||||
} else if (pData->groupId < pos->groupId) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
} else if (pData->window.skey > pos->window.skey) {
|
||||
if (pData->groupId > pos->groupId) {
|
||||
return 1;
|
||||
} else if (pData->groupId < pos->groupId) {
|
||||
return -1;
|
||||
}
|
||||
return -1;
|
||||
|
||||
if (pData->window.skey > pos->window.ekey) {
|
||||
return 1;
|
||||
} else if (pData->window.ekey < pos->window.skey) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t savePullWindow(SPullWindowInfo* pPullInfo, SArray* pPullWins) {
|
||||
|
@ -812,10 +815,16 @@ static int32_t savePullWindow(SPullWindowInfo* pPullInfo, SArray* pPullWins) {
|
|||
if (index == -1) {
|
||||
index = 0;
|
||||
} else {
|
||||
if (comparePullWinKey(pPullInfo, pPullWins, index) > 0) {
|
||||
index++;
|
||||
} else {
|
||||
int32_t code = comparePullWinKey(pPullInfo, pPullWins, index);
|
||||
if (code == 0) {
|
||||
SPullWindowInfo* pos = taosArrayGet(pPullWins ,index);
|
||||
pos->window.skey = TMIN(pos->window.skey, pPullInfo->window.skey);
|
||||
pos->window.ekey = TMAX(pos->window.ekey, pPullInfo->window.ekey);
|
||||
pos->calWin.skey = TMIN(pos->calWin.skey, pPullInfo->calWin.skey);
|
||||
pos->calWin.ekey = TMAX(pos->calWin.ekey, pPullInfo->calWin.ekey);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else if (code > 0 ){
|
||||
index++;
|
||||
}
|
||||
}
|
||||
if (taosArrayInsert(pPullWins, index, pPullInfo) == NULL) {
|
||||
|
@ -863,19 +872,20 @@ static void removeResults(SArray* pWins, SHashObj* pUpdatedMap) {
|
|||
|
||||
int32_t compareWinRes(void* pKey, void* data, int32_t index) {
|
||||
SArray* res = (SArray*)data;
|
||||
SWinKey* pos = taosArrayGet(res, index);
|
||||
SResKeyPos* pData = (SResKeyPos*)pKey;
|
||||
if (*(int64_t*)pData->key == pos->ts) {
|
||||
if (pData->groupId > pos->groupId) {
|
||||
return 1;
|
||||
} else if (pData->groupId < pos->groupId) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
} else if (*(int64_t*)pData->key > pos->ts) {
|
||||
SWinKey* pDataPos = taosArrayGet(res, index);
|
||||
SResKeyPos* pRKey = (SResKeyPos*)pKey;
|
||||
if (pRKey->groupId > pDataPos->groupId) {
|
||||
return 1;
|
||||
} else if (pRKey->groupId < pDataPos->groupId) {
|
||||
return -1;
|
||||
}
|
||||
return -1;
|
||||
|
||||
if (*(int64_t*)pRKey->key > pDataPos->ts) {
|
||||
return 1;
|
||||
} else if (*(int64_t*)pRKey->key < pDataPos->ts){
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void removeDeleteResults(SHashObj* pUpdatedMap, SArray* pDelWins) {
|
||||
|
@ -1400,19 +1410,21 @@ static int32_t getAllIntervalWindow(SSHashObj* pHashMap, SHashObj* resWins) {
|
|||
|
||||
int32_t compareWinKey(void* pKey, void* data, int32_t index) {
|
||||
SArray* res = (SArray*)data;
|
||||
SWinKey* pos = taosArrayGet(res, index);
|
||||
SWinKey* pData = (SWinKey*)pKey;
|
||||
if (pData->ts == pos->ts) {
|
||||
if (pData->groupId > pos->groupId) {
|
||||
return 1;
|
||||
} else if (pData->groupId < pos->groupId) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
} else if (pData->ts > pos->ts) {
|
||||
SWinKey* pDataPos = taosArrayGet(res, index);
|
||||
SWinKey* pWKey = (SWinKey*)pKey;
|
||||
|
||||
if (pWKey->groupId > pDataPos->groupId) {
|
||||
return 1;
|
||||
} else if (pWKey->groupId < pDataPos->groupId) {
|
||||
return -1;
|
||||
}
|
||||
return -1;
|
||||
|
||||
if (pWKey->ts > pDataPos->ts) {
|
||||
return 1;
|
||||
} else if (pWKey->ts < pDataPos->ts) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t closeStreamIntervalWindow(SSHashObj* pHashMap, STimeWindowAggSupp* pTwSup, SInterval* pInterval,
|
||||
|
@ -2252,8 +2264,8 @@ static void doBuildPullDataBlock(SArray* array, int32_t* pIndex, SSDataBlock* pB
|
|||
colDataAppend(pStartTs, pBlock->info.rows, (const char*)&pWin->window.skey, false);
|
||||
colDataAppend(pEndTs, pBlock->info.rows, (const char*)&pWin->window.ekey, false);
|
||||
colDataAppend(pGroupId, pBlock->info.rows, (const char*)&pWin->groupId, false);
|
||||
colDataAppend(pCalStartTs, pBlock->info.rows, (const char*)&pWin->window.skey, false);
|
||||
colDataAppend(pCalEndTs, pBlock->info.rows, (const char*)&pWin->window.ekey, false);
|
||||
colDataAppend(pCalStartTs, pBlock->info.rows, (const char*)&pWin->calWin.skey, false);
|
||||
colDataAppend(pCalEndTs, pBlock->info.rows, (const char*)&pWin->calWin.ekey, false);
|
||||
pBlock->info.rows++;
|
||||
}
|
||||
if ((*pIndex) == size) {
|
||||
|
@ -2263,27 +2275,33 @@ static void doBuildPullDataBlock(SArray* array, int32_t* pIndex, SSDataBlock* pB
|
|||
blockDataUpdateTsWindow(pBlock, 0);
|
||||
}
|
||||
|
||||
void processPullOver(SSDataBlock* pBlock, SHashObj* pMap) {
|
||||
void processPullOver(SSDataBlock* pBlock, SHashObj* pMap, SInterval* pInterval) {
|
||||
SColumnInfoData* pStartCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
|
||||
TSKEY* tsData = (TSKEY*)pStartCol->pData;
|
||||
SColumnInfoData* pEndCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
|
||||
TSKEY* tsEndData = (TSKEY*)pEndCol->pData;
|
||||
SColumnInfoData* pGroupCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
|
||||
uint64_t* groupIdData = (uint64_t*)pGroupCol->pData;
|
||||
int32_t chId = getChildIndex(pBlock);
|
||||
for (int32_t i = 0; i < pBlock->info.rows; i++) {
|
||||
SWinKey winRes = {.ts = tsData[i], .groupId = groupIdData[i]};
|
||||
void* chIds = taosHashGet(pMap, &winRes, sizeof(SWinKey));
|
||||
if (chIds) {
|
||||
SArray* chArray = *(SArray**)chIds;
|
||||
int32_t index = taosArraySearchIdx(chArray, &chId, compareInt32Val, TD_EQ);
|
||||
if (index != -1) {
|
||||
qDebug("===stream===window %" PRId64 " delete child id %d", winRes.ts, chId);
|
||||
taosArrayRemove(chArray, index);
|
||||
if (taosArrayGetSize(chArray) == 0) {
|
||||
// pull data is over
|
||||
taosArrayDestroy(chArray);
|
||||
taosHashRemove(pMap, &winRes, sizeof(SWinKey));
|
||||
TSKEY winTs = tsData[i];
|
||||
while (winTs < tsEndData[i]) {
|
||||
SWinKey winRes = {.ts = winTs, .groupId = groupIdData[i]};
|
||||
void* chIds = taosHashGet(pMap, &winRes, sizeof(SWinKey));
|
||||
if (chIds) {
|
||||
SArray* chArray = *(SArray**)chIds;
|
||||
int32_t index = taosArraySearchIdx(chArray, &chId, compareInt32Val, TD_EQ);
|
||||
if (index != -1) {
|
||||
qDebug("===stream===window %" PRId64 " delete child id %d", winRes.ts, chId);
|
||||
taosArrayRemove(chArray, index);
|
||||
if (taosArrayGetSize(chArray) == 0) {
|
||||
// pull data is over
|
||||
taosArrayDestroy(chArray);
|
||||
taosHashRemove(pMap, &winRes, sizeof(SWinKey));
|
||||
}
|
||||
}
|
||||
}
|
||||
winTs = taosTimeAdd(winTs, pInterval->sliding, pInterval->slidingUnit, pInterval->precision);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2296,12 +2314,13 @@ static void addRetriveWindow(SArray* wins, SStreamIntervalOperatorInfo* pInfo) {
|
|||
if (needDeleteWindowBuf(&nextWin, &pInfo->twAggSup) && !pInfo->ignoreExpiredData) {
|
||||
void* chIds = taosHashGet(pInfo->pPullDataMap, winKey, sizeof(SWinKey));
|
||||
if (!chIds) {
|
||||
SPullWindowInfo pull = {.window = nextWin, .groupId = winKey->groupId};
|
||||
SPullWindowInfo pull = {.window = nextWin, .groupId = winKey->groupId, .calWin.skey = nextWin.skey, .calWin.ekey = nextWin.skey};
|
||||
// add pull data request
|
||||
savePullWindow(&pull, pInfo->pPullWins);
|
||||
int32_t size1 = taosArrayGetSize(pInfo->pChildren);
|
||||
addPullWindow(pInfo->pPullDataMap, winKey, size1);
|
||||
qDebug("===stream===prepare retrive for delete %" PRId64 ", size:%d", winKey->ts, size1);
|
||||
if (savePullWindow(&pull, pInfo->pPullWins) == TSDB_CODE_SUCCESS) {
|
||||
int32_t size1 = taosArrayGetSize(pInfo->pChildren);
|
||||
addPullWindow(pInfo->pPullDataMap, winKey, size1);
|
||||
qDebug("===stream===prepare retrive for delete %" PRId64 ", size:%d", winKey->ts, size1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2371,12 +2390,13 @@ static void doStreamIntervalAggImpl(SOperatorInfo* pOperatorInfo, SSDataBlock* p
|
|||
};
|
||||
void* chIds = taosHashGet(pInfo->pPullDataMap, &winRes, sizeof(SWinKey));
|
||||
if (isDeletedStreamWindow(&nextWin, groupId, pInfo->pState, &pInfo->twAggSup) && !chIds) {
|
||||
SPullWindowInfo pull = {.window = nextWin, .groupId = groupId};
|
||||
SPullWindowInfo pull = {.window = nextWin, .groupId = groupId, .calWin.skey = nextWin.skey, .calWin.ekey = nextWin.skey};
|
||||
// add pull data request
|
||||
savePullWindow(&pull, pInfo->pPullWins);
|
||||
int32_t size = taosArrayGetSize(pInfo->pChildren);
|
||||
addPullWindow(pInfo->pPullDataMap, &winRes, size);
|
||||
qDebug("===stream===prepare retrive %" PRId64 ", size:%d", winRes.ts, size);
|
||||
if (savePullWindow(&pull, pInfo->pPullWins) == TSDB_CODE_SUCCESS) {
|
||||
int32_t size = taosArrayGetSize(pInfo->pChildren);
|
||||
addPullWindow(pInfo->pPullDataMap, &winRes, size);
|
||||
qDebug("===stream===prepare retrive %" PRId64 ", size:%d", winRes.ts, size);
|
||||
}
|
||||
} else {
|
||||
int32_t index = -1;
|
||||
SArray* chArray = NULL;
|
||||
|
@ -2557,7 +2577,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
|||
}
|
||||
continue;
|
||||
} else if (pBlock->info.type == STREAM_PULL_OVER && IS_FINAL_OP(pInfo)) {
|
||||
processPullOver(pBlock, pInfo->pPullDataMap);
|
||||
processPullOver(pBlock, pInfo->pPullDataMap, &pInfo->interval);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -2635,6 +2655,15 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
int64_t getDeleteMark(SIntervalPhysiNode* pIntervalPhyNode) {
|
||||
if (pIntervalPhyNode->window.deleteMark <= 0) {
|
||||
return DEAULT_DELETE_MARK;
|
||||
}
|
||||
int64_t deleteMark = TMAX(pIntervalPhyNode->window.deleteMark,pIntervalPhyNode->window.watermark);
|
||||
deleteMark = TMAX(deleteMark, pIntervalPhyNode->interval);
|
||||
return deleteMark;
|
||||
}
|
||||
|
||||
SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
|
||||
SExecTaskInfo* pTaskInfo, int32_t numOfChild) {
|
||||
SIntervalPhysiNode* pIntervalPhyNode = (SIntervalPhysiNode*)pPhyNode;
|
||||
|
@ -2656,9 +2685,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
|
|||
.calTrigger = pIntervalPhyNode->window.triggerType,
|
||||
.maxTs = INT64_MIN,
|
||||
.minTs = INT64_MAX,
|
||||
// for test 315360000000
|
||||
.deleteMark = 1000LL * 60LL * 60LL * 24LL * 365LL * 10LL,
|
||||
// .deleteMark = INT64_MAX,
|
||||
.deleteMark = getDeleteMark(pIntervalPhyNode),
|
||||
.deleteMarkSaved = 0,
|
||||
.calTriggerSaved = 0,
|
||||
};
|
||||
|
@ -4802,7 +4829,7 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys
|
|||
.calTrigger = pIntervalPhyNode->window.triggerType,
|
||||
.maxTs = INT64_MIN,
|
||||
.minTs = INT64_MAX,
|
||||
.deleteMark = INT64_MAX,
|
||||
.deleteMark = getDeleteMark(pIntervalPhyNode),
|
||||
};
|
||||
|
||||
ASSERT(twAggSupp.calTrigger != STREAM_TRIGGER_MAX_DELAY);
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include "taoserror.h"
|
||||
#include "tdef.h"
|
||||
#include "tpagedbuf.h"
|
||||
#include "tlog.h"
|
||||
|
||||
#define LHASH_CAP_RATIO 0.85
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include "tpagedbuf.h"
|
||||
#include "tpercentile.h"
|
||||
#include "ttypes.h"
|
||||
#include "tlog.h"
|
||||
|
||||
#define DEFAULT_NUM_OF_SLOT 1024
|
||||
|
||||
|
@ -367,11 +368,13 @@ int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) {
|
|||
pSlot->info.data = NULL;
|
||||
}
|
||||
|
||||
SArray *pPageIdList = (SArray *)taosHashGet(pBucket->groupPagesMap, &groupId, sizeof(groupId));
|
||||
if (pPageIdList == NULL) {
|
||||
SArray *pList = taosArrayInit(4, sizeof(int32_t));
|
||||
taosHashPut(pBucket->groupPagesMap, &groupId, sizeof(groupId), &pList, POINTER_BYTES);
|
||||
pPageIdList = pList;
|
||||
SArray *pPageIdList;
|
||||
void *p = taosHashGet(pBucket->groupPagesMap, &groupId, sizeof(groupId));
|
||||
if (p == NULL) {
|
||||
pPageIdList = taosArrayInit(4, sizeof(int32_t));
|
||||
taosHashPut(pBucket->groupPagesMap, &groupId, sizeof(groupId), &pPageIdList, POINTER_BYTES);
|
||||
} else {
|
||||
pPageIdList = *(SArray **)p;
|
||||
}
|
||||
|
||||
pSlot->info.data = getNewBufPage(pBucket->pBuffer, &pageId);
|
||||
|
|
|
@ -88,11 +88,13 @@ static int32_t udfSpawnUdfd(SUdfdData *pData) {
|
|||
}
|
||||
#ifdef WINDOWS
|
||||
if (strlen(path) == 0) {
|
||||
strcat(path, "udfd.exe");
|
||||
} else {
|
||||
strcat(path, "\\udfd.exe");
|
||||
strcat(path, "C:\\TDengine");
|
||||
}
|
||||
strcat(path, "\\udfd.exe");
|
||||
#else
|
||||
if (strlen(path) == 0) {
|
||||
strcat(path, "/usr/bin");
|
||||
}
|
||||
strcat(path, "/udfd");
|
||||
#endif
|
||||
char *argsUdfd[] = {path, "-c", configDir, NULL};
|
||||
|
|
|
@ -44,6 +44,7 @@ typedef struct SInsertParseContext {
|
|||
SParsedDataColInfo tags; // for stmt
|
||||
bool missCache;
|
||||
bool usingDuplicateTable;
|
||||
bool forceUpdate;
|
||||
} SInsertParseContext;
|
||||
|
||||
typedef int32_t (*_row_append_fn_t)(SMsgBuf* pMsgBuf, const void* value, int32_t len, void* param);
|
||||
|
@ -562,7 +563,8 @@ static int32_t parseTagValue(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt
|
|||
|
||||
static void buildCreateTbReq(SVnodeModifOpStmt* pStmt, STag* pTag, SArray* pTagName) {
|
||||
insBuildCreateTbReq(&pStmt->createTblReq, pStmt->targetTableName.tname, pTag, pStmt->pTableMeta->suid,
|
||||
pStmt->usingTableName.tname, pTagName, pStmt->pTableMeta->tableInfo.numOfTags, TSDB_DEFAULT_TABLE_TTL);
|
||||
pStmt->usingTableName.tname, pTagName, pStmt->pTableMeta->tableInfo.numOfTags,
|
||||
TSDB_DEFAULT_TABLE_TTL);
|
||||
}
|
||||
|
||||
static int32_t checkAndTrimValue(SToken* pToken, char* tmpTokenBuf, SMsgBuf* pMsgBuf) {
|
||||
|
@ -828,14 +830,69 @@ static int32_t getTableVgroup(SParseContext* pCxt, SVnodeModifOpStmt* pStmt, boo
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t getTableMetaAndVgroupImpl(SParseContext* pCxt, SVnodeModifOpStmt* pStmt, bool* pMissCache) {
|
||||
SVgroupInfo vg;
|
||||
int32_t code = catalogGetCachedTableVgMeta(pCxt->pCatalog, &pStmt->targetTableName, &vg, &pStmt->pTableMeta);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
if (NULL != pStmt->pTableMeta) {
|
||||
code = taosHashPut(pStmt->pVgroupsHashObj, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg));
|
||||
}
|
||||
*pMissCache = (NULL == pStmt->pTableMeta);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t getTableMetaAndVgroup(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt, bool* pMissCache) {
|
||||
SParseContext* pComCxt = pCxt->pComCxt;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
if (pComCxt->async) {
|
||||
code = getTableMetaAndVgroupImpl(pComCxt, pStmt, pMissCache);
|
||||
} else {
|
||||
code = getTableMeta(pCxt, &pStmt->targetTableName, false, &pStmt->pTableMeta, pMissCache);
|
||||
if (TSDB_CODE_SUCCESS == code && !pCxt->missCache) {
|
||||
code = getTableVgroup(pCxt->pComCxt, pStmt, false, &pCxt->missCache);
|
||||
}
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t collectUseTable(const SName* pName, SHashObj* pTable) {
|
||||
char fullName[TSDB_TABLE_FNAME_LEN];
|
||||
tNameExtractFullName(pName, fullName);
|
||||
return taosHashPut(pTable, fullName, strlen(fullName), pName, sizeof(SName));
|
||||
}
|
||||
|
||||
static int32_t collectUseDatabase(const SName* pName, SHashObj* pDbs) {
|
||||
char dbFName[TSDB_DB_FNAME_LEN] = {0};
|
||||
tNameGetFullDbName(pName, dbFName);
|
||||
return taosHashPut(pDbs, dbFName, strlen(dbFName), dbFName, sizeof(dbFName));
|
||||
}
|
||||
|
||||
static int32_t getTargetTableSchema(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt) {
|
||||
if (pCxt->forceUpdate) {
|
||||
pCxt->missCache = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t code = checkAuth(pCxt->pComCxt, &pStmt->targetTableName, &pCxt->missCache);
|
||||
#if 0
|
||||
if (TSDB_CODE_SUCCESS == code && !pCxt->missCache) {
|
||||
code = getTableMeta(pCxt, &pStmt->targetTableName, false, &pStmt->pTableMeta, &pCxt->missCache);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code && !pCxt->missCache) {
|
||||
code = getTableVgroup(pCxt->pComCxt, pStmt, false, &pCxt->missCache);
|
||||
}
|
||||
#else
|
||||
if (TSDB_CODE_SUCCESS == code && !pCxt->missCache) {
|
||||
code = getTableMetaAndVgroup(pCxt, pStmt, &pCxt->missCache);
|
||||
}
|
||||
#endif
|
||||
if (TSDB_CODE_SUCCESS == code && !pCxt->pComCxt->async) {
|
||||
code = collectUseDatabase(&pStmt->targetTableName, pStmt->pDbFNameHashObj);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = collectUseTable(&pStmt->targetTableName, pStmt->pTableNameHashObj);
|
||||
}
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -844,6 +901,11 @@ static int32_t preParseUsingTableName(SInsertParseContext* pCxt, SVnodeModifOpSt
|
|||
}
|
||||
|
||||
static int32_t getUsingTableSchema(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt) {
|
||||
if (pCxt->forceUpdate) {
|
||||
pCxt->missCache = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t code = checkAuth(pCxt->pComCxt, &pStmt->targetTableName, &pCxt->missCache);
|
||||
if (TSDB_CODE_SUCCESS == code && !pCxt->missCache) {
|
||||
code = getTableMeta(pCxt, &pStmt->usingTableName, true, &pStmt->pTableMeta, &pCxt->missCache);
|
||||
|
@ -851,6 +913,12 @@ static int32_t getUsingTableSchema(SInsertParseContext* pCxt, SVnodeModifOpStmt*
|
|||
if (TSDB_CODE_SUCCESS == code && !pCxt->missCache) {
|
||||
code = getTableVgroup(pCxt->pComCxt, pStmt, true, &pCxt->missCache);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code && !pCxt->pComCxt->async) {
|
||||
code = collectUseDatabase(&pStmt->usingTableName, pStmt->pDbFNameHashObj);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = collectUseTable(&pStmt->usingTableName, pStmt->pTableNameHashObj);
|
||||
}
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -923,10 +991,9 @@ static int32_t getTableDataBlocks(SInsertParseContext* pCxt, SVnodeModifOpStmt*
|
|||
pStmt->pTableMeta->uid = 0;
|
||||
}
|
||||
|
||||
return insGetDataBlockFromList(pStmt->pTableBlockHashObj, &uid, sizeof(pStmt->pTableMeta->uid),
|
||||
TSDB_DEFAULT_PAYLOAD_SIZE, sizeof(SSubmitBlk),
|
||||
getTableInfo(pStmt->pTableMeta).rowSize, pStmt->pTableMeta, pDataBuf, NULL,
|
||||
&pStmt->createTblReq);
|
||||
return insGetDataBlockFromList(
|
||||
pStmt->pTableBlockHashObj, &uid, sizeof(pStmt->pTableMeta->uid), TSDB_DEFAULT_PAYLOAD_SIZE, sizeof(SSubmitBlk),
|
||||
getTableInfo(pStmt->pTableMeta).rowSize, pStmt->pTableMeta, pDataBuf, NULL, &pStmt->createTblReq);
|
||||
}
|
||||
char tbFName[TSDB_TABLE_FNAME_LEN];
|
||||
tNameExtractFullName(&pStmt->targetTableName, tbFName);
|
||||
|
@ -1529,8 +1596,9 @@ static int32_t setStmtInfo(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt)
|
|||
memcpy(tags, &pCxt->tags, sizeof(pCxt->tags));
|
||||
|
||||
SStmtCallback* pStmtCb = pCxt->pComCxt->pStmtCb;
|
||||
int32_t code = (*pStmtCb->setInfoFn)(pStmtCb->pStmt, pStmt->pTableMeta, tags, &pStmt->targetTableName, pStmt->usingTableProcessing,
|
||||
pStmt->pVgroupsHashObj, pStmt->pTableBlockHashObj, pStmt->usingTableName.tname);
|
||||
int32_t code = (*pStmtCb->setInfoFn)(pStmtCb->pStmt, pStmt->pTableMeta, tags, &pStmt->targetTableName,
|
||||
pStmt->usingTableProcessing, pStmt->pVgroupsHashObj, pStmt->pTableBlockHashObj,
|
||||
pStmt->usingTableName.tname);
|
||||
|
||||
memset(&pCxt->tags, 0, sizeof(pCxt->tags));
|
||||
pStmt->pVgroupsHashObj = NULL;
|
||||
|
@ -1765,16 +1833,25 @@ static int32_t initInsertQuery(SInsertParseContext* pCxt, SCatalogReq* pCatalogR
|
|||
|
||||
static int32_t setRefreshMate(SQuery* pQuery) {
|
||||
SVnodeModifOpStmt* pStmt = (SVnodeModifOpStmt*)pQuery->pRoot;
|
||||
SName* pTable = taosHashIterate(pStmt->pTableNameHashObj, NULL);
|
||||
while (NULL != pTable) {
|
||||
taosArrayPush(pQuery->pTableList, pTable);
|
||||
pTable = taosHashIterate(pStmt->pTableNameHashObj, pTable);
|
||||
|
||||
if (taosHashGetSize(pStmt->pTableNameHashObj) > 0) {
|
||||
taosArrayDestroy(pQuery->pTableList);
|
||||
pQuery->pTableList = taosArrayInit(taosHashGetSize(pStmt->pTableNameHashObj), sizeof(SName));
|
||||
SName* pTable = taosHashIterate(pStmt->pTableNameHashObj, NULL);
|
||||
while (NULL != pTable) {
|
||||
taosArrayPush(pQuery->pTableList, pTable);
|
||||
pTable = taosHashIterate(pStmt->pTableNameHashObj, pTable);
|
||||
}
|
||||
}
|
||||
|
||||
char* pDb = taosHashIterate(pStmt->pDbFNameHashObj, NULL);
|
||||
while (NULL != pDb) {
|
||||
taosArrayPush(pQuery->pDbList, pDb);
|
||||
pDb = taosHashIterate(pStmt->pDbFNameHashObj, pDb);
|
||||
if (taosHashGetSize(pStmt->pDbFNameHashObj) > 0) {
|
||||
taosArrayDestroy(pQuery->pDbList);
|
||||
pQuery->pDbList = taosArrayInit(taosHashGetSize(pStmt->pDbFNameHashObj), TSDB_DB_FNAME_LEN);
|
||||
char* pDb = taosHashIterate(pStmt->pDbFNameHashObj, NULL);
|
||||
while (NULL != pDb) {
|
||||
taosArrayPush(pQuery->pDbList, pDb);
|
||||
pDb = taosHashIterate(pStmt->pDbFNameHashObj, pDb);
|
||||
}
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -1888,28 +1965,28 @@ static int32_t buildInsertCatalogReq(SInsertParseContext* pCxt, SVnodeModifOpStm
|
|||
}
|
||||
|
||||
static int32_t setNextStageInfo(SInsertParseContext* pCxt, SQuery* pQuery, SCatalogReq* pCatalogReq) {
|
||||
SVnodeModifOpStmt* pStmt = (SVnodeModifOpStmt*)pQuery->pRoot;
|
||||
if (pCxt->missCache) {
|
||||
parserDebug("0x%" PRIx64 " %d rows have been inserted before cache miss", pCxt->pComCxt->requestId,
|
||||
((SVnodeModifOpStmt*)pQuery->pRoot)->totalRowsNum);
|
||||
parserDebug("0x%" PRIx64 " %d rows of %d tables have been inserted before cache miss", pCxt->pComCxt->requestId,
|
||||
pStmt->totalRowsNum, pStmt->totalTbNum);
|
||||
|
||||
pQuery->execStage = QUERY_EXEC_STAGE_PARSE;
|
||||
return buildInsertCatalogReq(pCxt, (SVnodeModifOpStmt*)pQuery->pRoot, pCatalogReq);
|
||||
return buildInsertCatalogReq(pCxt, pStmt, pCatalogReq);
|
||||
}
|
||||
|
||||
parserDebug("0x%" PRIx64 " %d rows have been inserted", pCxt->pComCxt->requestId,
|
||||
((SVnodeModifOpStmt*)pQuery->pRoot)->totalRowsNum);
|
||||
parserDebug("0x%" PRIx64 " %d rows of %d tables have been inserted", pCxt->pComCxt->requestId, pStmt->totalRowsNum,
|
||||
pStmt->totalTbNum);
|
||||
|
||||
pQuery->execStage = QUERY_EXEC_STAGE_SCHEDULE;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t parseInsertSql(SParseContext* pCxt, SQuery** pQuery, SCatalogReq* pCatalogReq, const SMetaData* pMetaData) {
|
||||
SInsertParseContext context = {
|
||||
.pComCxt = pCxt,
|
||||
.msg = {.buf = pCxt->pMsg, .len = pCxt->msgLen},
|
||||
.missCache = false,
|
||||
.usingDuplicateTable = false,
|
||||
};
|
||||
SInsertParseContext context = {.pComCxt = pCxt,
|
||||
.msg = {.buf = pCxt->pMsg, .len = pCxt->msgLen},
|
||||
.missCache = false,
|
||||
.usingDuplicateTable = false,
|
||||
.forceUpdate = (NULL != pCatalogReq ? pCatalogReq->forceUpdate : false)};
|
||||
|
||||
int32_t code = initInsertQuery(&context, pCatalogReq, pMetaData, pQuery);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
|
|
|
@ -3890,12 +3890,17 @@ static int32_t checkDbKeepOption(STranslateContext* pCxt, SDatabaseOptions* pOpt
|
|||
pOptions->keep[2] = getBigintFromValueNode((SValueNode*)nodesListGetNode(pOptions->pKeep, 2));
|
||||
}
|
||||
|
||||
int64_t tsdbMaxKeep = TSDB_MAX_KEEP;
|
||||
if (pOptions->precision == TSDB_TIME_PRECISION_NANO) {
|
||||
tsdbMaxKeep = TSDB_MAX_KEEP_NS;
|
||||
}
|
||||
|
||||
if (pOptions->keep[0] < TSDB_MIN_KEEP || pOptions->keep[1] < TSDB_MIN_KEEP || pOptions->keep[2] < TSDB_MIN_KEEP ||
|
||||
pOptions->keep[0] > TSDB_MAX_KEEP || pOptions->keep[1] > TSDB_MAX_KEEP || pOptions->keep[2] > TSDB_MAX_KEEP) {
|
||||
pOptions->keep[0] > tsdbMaxKeep || pOptions->keep[1] > tsdbMaxKeep || pOptions->keep[2] > tsdbMaxKeep) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_DB_OPTION,
|
||||
"Invalid option keep: %" PRId64 ", %" PRId64 ", %" PRId64 " valid range: [%dm, %dm]",
|
||||
pOptions->keep[0], pOptions->keep[1], pOptions->keep[2], TSDB_MIN_KEEP,
|
||||
TSDB_MAX_KEEP);
|
||||
tsdbMaxKeep);
|
||||
}
|
||||
|
||||
if (!((pOptions->keep[0] <= pOptions->keep[1]) && (pOptions->keep[1] <= pOptions->keep[2]))) {
|
||||
|
@ -4047,7 +4052,10 @@ static int32_t checkDatabaseOptions(STranslateContext* pCxt, const char* pDbName
|
|||
TSDB_MAX_MINROWS_FBLOCK);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = checkDbKeepOption(pCxt, pOptions);
|
||||
code = checkDbPrecisionOption(pCxt, pOptions);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = checkDbKeepOption(pCxt, pOptions); // use precision
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = checkDbRangeOption(pCxt, "pages", pOptions->pages, TSDB_MIN_PAGES_PER_VNODE, TSDB_MAX_PAGES_PER_VNODE);
|
||||
|
@ -4060,9 +4068,6 @@ static int32_t checkDatabaseOptions(STranslateContext* pCxt, const char* pDbName
|
|||
code = checkDbRangeOption(pCxt, "tsdbPagesize", pOptions->tsdbPageSize, TSDB_MIN_TSDB_PAGESIZE,
|
||||
TSDB_MAX_TSDB_PAGESIZE);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = checkDbPrecisionOption(pCxt, pOptions);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = checkDbEnumOption(pCxt, "replications", pOptions->replica, TSDB_MIN_DB_REPLICA, TSDB_MAX_DB_REPLICA);
|
||||
}
|
||||
|
|
|
@ -248,6 +248,13 @@ int32_t __catalogGetCachedTableHashVgroup(SCatalog* pCtg, const SName* pTableNam
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t __catalogGetCachedTableVgMeta(SCatalog* pCtg, const SName* pTableName, SVgroupInfo* pVgroup, STableMeta** pTableMeta) {
|
||||
int32_t code = g_mockCatalogService->catalogGetTableMeta(pTableName, pTableMeta, true);
|
||||
if (code) return code;
|
||||
code = g_mockCatalogService->catalogGetTableHashVgroup(pTableName, pVgroup, true);
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t __catalogGetTableDistVgInfo(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName,
|
||||
SArray** pVgList) {
|
||||
return g_mockCatalogService->catalogGetTableDistVgInfo(pTableName, pVgList);
|
||||
|
@ -316,6 +323,7 @@ void initMetaDataEnv() {
|
|||
stub.set(catalogGetCachedSTableMeta, __catalogGetCachedTableMeta);
|
||||
stub.set(catalogGetTableHashVgroup, __catalogGetTableHashVgroup);
|
||||
stub.set(catalogGetCachedTableHashVgroup, __catalogGetCachedTableHashVgroup);
|
||||
stub.set(catalogGetCachedTableVgMeta, __catalogGetCachedTableVgMeta);
|
||||
stub.set(catalogGetTableDistVgInfo, __catalogGetTableDistVgInfo);
|
||||
stub.set(catalogGetDBVgVersion, __catalogGetDBVgVersion);
|
||||
stub.set(catalogGetDBVgList, __catalogGetDBVgList);
|
||||
|
|
|
@ -31,7 +31,7 @@ extern "C" {
|
|||
|
||||
#define QW_DEFAULT_SCHEDULER_NUMBER 100
|
||||
#define QW_DEFAULT_TASK_NUMBER 10000
|
||||
#define QW_DEFAULT_SCH_TASK_NUMBER 10000
|
||||
#define QW_DEFAULT_SCH_TASK_NUMBER 3000
|
||||
#define QW_DEFAULT_SHORT_RUN_TIMES 2
|
||||
#define QW_DEFAULT_HEARTBEAT_MSEC 5000
|
||||
#define QW_SCH_TIMEOUT_MSEC 180000
|
||||
|
@ -247,7 +247,7 @@ typedef struct SQWorkerMgmt {
|
|||
|
||||
#define QW_ERR_RET(c) \
|
||||
do { \
|
||||
int32_t _code = (c); \
|
||||
int32_t _code = (c); \
|
||||
if (_code != TSDB_CODE_SUCCESS) { \
|
||||
terrno = _code; \
|
||||
return _code; \
|
||||
|
@ -255,7 +255,7 @@ typedef struct SQWorkerMgmt {
|
|||
} while (0)
|
||||
#define QW_RET(c) \
|
||||
do { \
|
||||
int32_t _code = (c); \
|
||||
int32_t _code = (c); \
|
||||
if (_code != TSDB_CODE_SUCCESS) { \
|
||||
terrno = _code; \
|
||||
} \
|
||||
|
@ -263,7 +263,7 @@ typedef struct SQWorkerMgmt {
|
|||
} while (0)
|
||||
#define QW_ERR_JRET(c) \
|
||||
do { \
|
||||
code = (c); \
|
||||
code = (c); \
|
||||
if (code != TSDB_CODE_SUCCESS) { \
|
||||
terrno = code; \
|
||||
goto _return; \
|
||||
|
|
|
@ -463,6 +463,8 @@ void qwDestroyImpl(void *pMgmt) {
|
|||
int8_t nodeType = mgmt->nodeType;
|
||||
int32_t nodeId = mgmt->nodeId;
|
||||
|
||||
int32_t taskCount = 0;
|
||||
int32_t schStatusCount = 0;
|
||||
qDebug("start to destroy qworker, type:%d, id:%d, handle:%p", nodeType, nodeId, mgmt);
|
||||
|
||||
taosTmrStop(mgmt->hbTimer);
|
||||
|
@ -472,6 +474,7 @@ void qwDestroyImpl(void *pMgmt) {
|
|||
uint64_t qId, tId;
|
||||
int32_t eId;
|
||||
void *pIter = taosHashIterate(mgmt->ctxHash, NULL);
|
||||
|
||||
while (pIter) {
|
||||
SQWTaskCtx *ctx = (SQWTaskCtx *)pIter;
|
||||
void *key = taosHashGetKey(pIter, NULL);
|
||||
|
@ -480,6 +483,7 @@ void qwDestroyImpl(void *pMgmt) {
|
|||
qwFreeTaskCtx(ctx);
|
||||
QW_TASK_DLOG_E("task ctx freed");
|
||||
pIter = taosHashIterate(mgmt->ctxHash, pIter);
|
||||
taskCount++;
|
||||
}
|
||||
taosHashCleanup(mgmt->ctxHash);
|
||||
|
||||
|
@ -487,7 +491,9 @@ void qwDestroyImpl(void *pMgmt) {
|
|||
while (pIter) {
|
||||
SQWSchStatus *sch = (SQWSchStatus *)pIter;
|
||||
qwDestroySchStatus(sch);
|
||||
|
||||
pIter = taosHashIterate(mgmt->schHash, pIter);
|
||||
schStatusCount++;
|
||||
}
|
||||
taosHashCleanup(mgmt->schHash);
|
||||
|
||||
|
@ -499,7 +505,8 @@ void qwDestroyImpl(void *pMgmt) {
|
|||
|
||||
qwCloseRef();
|
||||
|
||||
qDebug("qworker destroyed, type:%d, id:%d, handle:%p", nodeType, nodeId, mgmt);
|
||||
qDebug("qworker destroyed, type:%d, id:%d, handle:%p, taskCount:%d, schStatusCount: %d", nodeType, nodeId, mgmt,
|
||||
taskCount, schStatusCount);
|
||||
}
|
||||
|
||||
int32_t qwOpenRef(void) {
|
||||
|
|
|
@ -8,9 +8,9 @@
|
|||
#include "qwMsg.h"
|
||||
#include "tcommon.h"
|
||||
#include "tdatablock.h"
|
||||
#include "tglobal.h"
|
||||
#include "tmsg.h"
|
||||
#include "tname.h"
|
||||
#include "tglobal.h"
|
||||
|
||||
SQWorkerMgmt gQwMgmt = {
|
||||
.lock = 0,
|
||||
|
@ -275,7 +275,7 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen,
|
|||
QW_ERR_RET(code);
|
||||
}
|
||||
|
||||
QW_TASK_DLOG("no more data in sink and query end, fetched blocks %d rows %"PRId64, pOutput->numOfBlocks,
|
||||
QW_TASK_DLOG("no more data in sink and query end, fetched blocks %d rows %" PRId64, pOutput->numOfBlocks,
|
||||
pOutput->numOfRows);
|
||||
|
||||
qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_SUCC);
|
||||
|
@ -327,12 +327,14 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen,
|
|||
}
|
||||
|
||||
if (0 == ctx->level) {
|
||||
QW_TASK_DLOG("task fetched blocks %d rows %"PRId64", level %d", pOutput->numOfBlocks, pOutput->numOfRows, ctx->level);
|
||||
QW_TASK_DLOG("task fetched blocks %d rows %" PRId64 ", level %d", pOutput->numOfBlocks, pOutput->numOfRows,
|
||||
ctx->level);
|
||||
break;
|
||||
}
|
||||
|
||||
if (pOutput->numOfRows >= QW_MIN_RES_ROWS) {
|
||||
QW_TASK_DLOG("task fetched blocks %d rows %" PRId64 " reaches the min rows", pOutput->numOfBlocks, pOutput->numOfRows);
|
||||
QW_TASK_DLOG("task fetched blocks %d rows %" PRId64 " reaches the min rows", pOutput->numOfBlocks,
|
||||
pOutput->numOfRows);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -650,8 +652,8 @@ _return:
|
|||
code = qwHandlePostPhaseEvents(QW_FPARAMS(), QW_PHASE_POST_QUERY, &input, NULL);
|
||||
|
||||
if (QUERY_RSP_POLICY_QUICK == tsQueryRspPolicy && ctx != NULL && QW_EVENT_RECEIVED(ctx, QW_EVENT_FETCH)) {
|
||||
void *rsp = NULL;
|
||||
int32_t dataLen = 0;
|
||||
void *rsp = NULL;
|
||||
int32_t dataLen = 0;
|
||||
SOutputData sOutput = {0};
|
||||
if (qwGetQueryResFromSink(QW_FPARAMS(), ctx, &dataLen, &rsp, &sOutput)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -671,8 +673,8 @@ _return:
|
|||
qwBuildAndSendFetchRsp(ctx->fetchType, &qwMsg->connInfo, rsp, dataLen, code);
|
||||
rsp = NULL;
|
||||
|
||||
QW_TASK_DLOG("fetch rsp send, handle:%p, code:%x - %s, dataLen:%d", qwMsg->connInfo.handle, code,
|
||||
tstrerror(code), dataLen);
|
||||
QW_TASK_DLOG("fetch rsp send, handle:%p, code:%x - %s, dataLen:%d", qwMsg->connInfo.handle, code, tstrerror(code),
|
||||
dataLen);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -748,7 +750,8 @@ int32_t qwProcessCQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
|
|||
}
|
||||
|
||||
QW_LOCK(QW_WRITE, &ctx->lock);
|
||||
if ((queryStop && (0 == atomic_load_8((int8_t *)&ctx->queryContinue))) || code || 0 == atomic_load_8((int8_t *)&ctx->queryContinue)) {
|
||||
if ((queryStop && (0 == atomic_load_8((int8_t *)&ctx->queryContinue))) || code ||
|
||||
0 == atomic_load_8((int8_t *)&ctx->queryContinue)) {
|
||||
// Note: query is not running anymore
|
||||
QW_SET_PHASE(ctx, 0);
|
||||
QW_UNLOCK(QW_WRITE, &ctx->lock);
|
||||
|
|
|
@ -377,7 +377,7 @@ extern SSchedulerMgmt schMgmt;
|
|||
#define SCH_IS_EXPLAIN_JOB(_job) (EXPLAIN_MODE_ANALYZE == (_job)->attr.explainMode)
|
||||
#define SCH_NETWORK_ERR(_code) ((_code) == TSDB_CODE_RPC_BROKEN_LINK || (_code) == TSDB_CODE_RPC_NETWORK_UNAVAIL)
|
||||
#define SCH_MERGE_TASK_NETWORK_ERR(_task, _code, _len) \
|
||||
(SCH_NETWORK_ERR(_code) && (((_len) > 0) || (!SCH_IS_DATA_BIND_TASK(_task))))
|
||||
(SCH_NETWORK_ERR(_code) && (((_len) > 0) || (!SCH_IS_DATA_BIND_TASK(_task)) || (_task)->redirectCtx.inRedirect))
|
||||
#define SCH_REDIRECT_MSGTYPE(_msgType) \
|
||||
((_msgType) == TDMT_SCH_LINK_BROKEN || (_msgType) == TDMT_SCH_QUERY || (_msgType) == TDMT_SCH_MERGE_QUERY || \
|
||||
(_msgType) == TDMT_SCH_FETCH || (_msgType) == TDMT_SCH_MERGE_FETCH)
|
||||
|
|
|
@ -156,6 +156,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa
|
|||
SCH_RET(schHandleRedirect(pJob, pTask, (SDataBuf *)pMsg, rspCode));
|
||||
}
|
||||
|
||||
pTask->redirectCtx.inRedirect = false;
|
||||
|
||||
switch (msgType) {
|
||||
case TDMT_VND_COMMIT_RSP: {
|
||||
SCH_ERR_JRET(rspCode);
|
||||
|
|
|
@ -362,17 +362,12 @@ int32_t schChkUpdateRedirectCtx(SSchJob *pJob, SSchTask *pTask, SEpSet *pEpSet,
|
|||
}
|
||||
|
||||
pCtx->totalTimes++;
|
||||
pCtx->roundTimes++;
|
||||
|
||||
if (SCH_IS_DATA_BIND_TASK(pTask) && pEpSet) {
|
||||
pCtx->roundTotal = pEpSet->numOfEps;
|
||||
pCtx->roundTimes = 0;
|
||||
|
||||
pTask->delayExecMs = 0;
|
||||
|
||||
goto _return;
|
||||
}
|
||||
|
||||
pCtx->roundTimes++;
|
||||
|
||||
if (pCtx->roundTimes >= pCtx->roundTotal) {
|
||||
int64_t nowTs = taosGetTimestampMs();
|
||||
|
|
|
@ -56,7 +56,7 @@ void streamSchedByTimer(void* param, void* tmrId) {
|
|||
}
|
||||
|
||||
if (atomic_load_8(&pTask->triggerStatus) == TASK_TRIGGER_STATUS__ACTIVE) {
|
||||
SStreamTrigger* trigger = taosAllocateQitem(sizeof(SStreamTrigger), DEF_QITEM);
|
||||
SStreamTrigger* trigger = taosAllocateQitem(sizeof(SStreamTrigger), DEF_QITEM, 0);
|
||||
if (trigger == NULL) return;
|
||||
trigger->type = STREAM_INPUT__GET_RES;
|
||||
trigger->pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock));
|
||||
|
@ -112,7 +112,7 @@ int32_t streamSchedExec(SStreamTask* pTask) {
|
|||
}
|
||||
|
||||
int32_t streamTaskEnqueue(SStreamTask* pTask, const SStreamDispatchReq* pReq, SRpcMsg* pRsp) {
|
||||
SStreamDataBlock* pData = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
|
||||
SStreamDataBlock* pData = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, 0);
|
||||
int8_t status;
|
||||
|
||||
// enqueue
|
||||
|
@ -150,7 +150,7 @@ int32_t streamTaskEnqueue(SStreamTask* pTask, const SStreamDispatchReq* pReq, SR
|
|||
}
|
||||
|
||||
int32_t streamTaskEnqueueRetrieve(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pRsp) {
|
||||
SStreamDataBlock* pData = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
|
||||
SStreamDataBlock* pData = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, 0);
|
||||
int8_t status = TASK_INPUT_STATUS__NORMAL;
|
||||
|
||||
// enqueue
|
||||
|
|
|
@ -67,7 +67,7 @@ int32_t streamRetrieveReqToData(const SStreamRetrieveReq* pReq, SStreamDataBlock
|
|||
}
|
||||
|
||||
SStreamDataSubmit* streamDataSubmitNew(SSubmitReq* pReq) {
|
||||
SStreamDataSubmit* pDataSubmit = (SStreamDataSubmit*)taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM);
|
||||
SStreamDataSubmit* pDataSubmit = (SStreamDataSubmit*)taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM, 0);
|
||||
if (pDataSubmit == NULL) return NULL;
|
||||
pDataSubmit->dataRef = (int32_t*)taosMemoryMalloc(sizeof(int32_t));
|
||||
if (pDataSubmit->dataRef == NULL) goto FAIL;
|
||||
|
@ -81,7 +81,7 @@ FAIL:
|
|||
}
|
||||
|
||||
SStreamMergedSubmit* streamMergedSubmitNew() {
|
||||
SStreamMergedSubmit* pMerged = (SStreamMergedSubmit*)taosAllocateQitem(sizeof(SStreamMergedSubmit), DEF_QITEM);
|
||||
SStreamMergedSubmit* pMerged = (SStreamMergedSubmit*)taosAllocateQitem(sizeof(SStreamMergedSubmit), DEF_QITEM, 0);
|
||||
if (pMerged == NULL) return NULL;
|
||||
pMerged->reqs = taosArrayInit(0, sizeof(void*));
|
||||
pMerged->dataRefs = taosArrayInit(0, sizeof(void*));
|
||||
|
@ -107,7 +107,7 @@ static FORCE_INLINE void streamDataSubmitRefInc(SStreamDataSubmit* pDataSubmit)
|
|||
}
|
||||
|
||||
SStreamDataSubmit* streamSubmitRefClone(SStreamDataSubmit* pSubmit) {
|
||||
SStreamDataSubmit* pSubmitClone = taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM);
|
||||
SStreamDataSubmit* pSubmitClone = taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM, 0);
|
||||
if (pSubmitClone == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -54,6 +54,7 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, const void* data, SArray*
|
|||
/*ASSERT(false);*/
|
||||
qError("unexpected stream execution, stream %" PRId64 " task: %d, since %s", pTask->streamId, pTask->taskId,
|
||||
terrstr());
|
||||
continue;
|
||||
}
|
||||
if (output == NULL) {
|
||||
if (pItem->type == STREAM_INPUT__DATA_RETRIEVE) {
|
||||
|
@ -126,7 +127,7 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) {
|
|||
taosArrayDestroy(pRes);
|
||||
break;
|
||||
}
|
||||
SStreamDataBlock* qRes = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
|
||||
SStreamDataBlock* qRes = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, 0);
|
||||
if (qRes == NULL) {
|
||||
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -234,7 +235,7 @@ int32_t streamExecForAll(SStreamTask* pTask) {
|
|||
qDebug("stream task %d exec end", pTask->taskId);
|
||||
|
||||
if (taosArrayGetSize(pRes) != 0) {
|
||||
SStreamDataBlock* qRes = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
|
||||
SStreamDataBlock* qRes = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, 0);
|
||||
if (qRes == NULL) {
|
||||
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
|
||||
streamFreeQitem(input);
|
||||
|
|
|
@ -44,6 +44,7 @@ typedef struct SSyncSnapshotSender {
|
|||
SyncTerm term;
|
||||
int64_t startTime;
|
||||
int64_t endTime;
|
||||
int64_t lastSendTime;
|
||||
bool finish;
|
||||
|
||||
// init when create
|
||||
|
|
|
@ -79,7 +79,6 @@ char* syncUtilPrintBin2(char* ptr, uint32_t len);
|
|||
void syncUtilMsgHtoN(void* msg);
|
||||
void syncUtilMsgNtoH(void* msg);
|
||||
bool syncUtilUserPreCommit(tmsg_t msgType);
|
||||
bool syncUtilUserCommit(tmsg_t msgType);
|
||||
bool syncUtilUserRollback(tmsg_t msgType);
|
||||
|
||||
void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNode* pNode, const char* format, ...);
|
||||
|
|
|
@ -84,7 +84,7 @@ void syncOneReplicaAdvance(SSyncNode* pSyncNode) {
|
|||
}
|
||||
|
||||
void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) {
|
||||
ASSERT(false && "deprecated");
|
||||
ASSERTS(false, "deprecated");
|
||||
if (pSyncNode == NULL) {
|
||||
sError("pSyncNode is NULL");
|
||||
return;
|
||||
|
|
|
@ -151,7 +151,7 @@ int32_t syncReconfig(int64_t rid, SSyncCfg* pNewCfg) {
|
|||
}
|
||||
|
||||
syncNodeStartHeartbeatTimer(pSyncNode);
|
||||
syncNodeReplicate(pSyncNode);
|
||||
//syncNodeReplicate(pSyncNode);
|
||||
}
|
||||
|
||||
syncNodeRelease(pSyncNode);
|
||||
|
@ -791,9 +791,9 @@ static int32_t syncHbTimerStop(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer) {
|
|||
}
|
||||
|
||||
int32_t syncNodeLogStoreRestoreOnNeed(SSyncNode* pNode) {
|
||||
ASSERT(pNode->pLogStore != NULL && "log store not created");
|
||||
ASSERT(pNode->pFsm != NULL && "pFsm not registered");
|
||||
ASSERT(pNode->pFsm->FpGetSnapshotInfo != NULL && "FpGetSnapshotInfo not registered");
|
||||
ASSERTS(pNode->pLogStore != NULL, "log store not created");
|
||||
ASSERTS(pNode->pFsm != NULL, "pFsm not registered");
|
||||
ASSERTS(pNode->pFsm->FpGetSnapshotInfo != NULL, "FpGetSnapshotInfo not registered");
|
||||
SSnapshot snapshot;
|
||||
if (pNode->pFsm->FpGetSnapshotInfo(pNode->pFsm, &snapshot) < 0) {
|
||||
sError("vgId:%d, failed to get snapshot info since %s", pNode->vgId, terrstr());
|
||||
|
@ -1144,8 +1144,8 @@ void syncNodeMaybeUpdateCommitBySnapshot(SSyncNode* pSyncNode) {
|
|||
}
|
||||
|
||||
int32_t syncNodeRestore(SSyncNode* pSyncNode) {
|
||||
ASSERT(pSyncNode->pLogStore != NULL && "log store not created");
|
||||
ASSERT(pSyncNode->pLogBuf != NULL && "ring log buffer not created");
|
||||
ASSERTS(pSyncNode->pLogStore != NULL, "log store not created");
|
||||
ASSERTS(pSyncNode->pLogBuf != NULL, "ring log buffer not created");
|
||||
|
||||
SyncIndex lastVer = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore);
|
||||
SyncIndex commitIndex = pSyncNode->pLogStore->syncLogCommitIndex(pSyncNode->pLogStore);
|
||||
|
@ -1839,7 +1839,8 @@ void syncNodeBecomeLeader(SSyncNode* pSyncNode, const char* debugStr) {
|
|||
#endif
|
||||
|
||||
// close receiver
|
||||
if (snapshotReceiverIsStart(pSyncNode->pNewNodeReceiver)) {
|
||||
if (pSyncNode != NULL && pSyncNode->pNewNodeReceiver != NULL &&
|
||||
snapshotReceiverIsStart(pSyncNode->pNewNodeReceiver)) {
|
||||
snapshotReceiverForceStop(pSyncNode->pNewNodeReceiver);
|
||||
}
|
||||
|
||||
|
@ -2663,7 +2664,7 @@ int32_t syncNodeOnClientRequest(SSyncNode* ths, SRpcMsg* pMsg, SyncIndex* pRetIn
|
|||
|
||||
int32_t code = syncNodeAppend(ths, pEntry);
|
||||
if (code < 0 && ths->vgId != 1 && vnodeIsMsgBlock(pEntry->originalRpcType)) {
|
||||
ASSERT(false && "failed to append blocking msg");
|
||||
ASSERTS(false, "failed to append blocking msg");
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ int32_t syncLogBufferAppend(SSyncLogBuffer* pBuf, SSyncNode* pNode, SSyncRaftEnt
|
|||
|
||||
// initial log buffer with at least one item, e.g. commitIndex
|
||||
SSyncRaftEntry* pMatch = pBuf->entries[(index - 1 + pBuf->size) % pBuf->size].pItem;
|
||||
ASSERT(pMatch != NULL && "no matched log entry");
|
||||
ASSERTS(pMatch != NULL, "no matched log entry");
|
||||
ASSERT(pMatch->index + 1 == index);
|
||||
|
||||
SSyncLogBufEntry tmp = {.pItem = pEntry, .prevLogIndex = pMatch->index, .prevLogTerm = pMatch->term};
|
||||
|
@ -86,14 +86,14 @@ SyncTerm syncLogReplMgrGetPrevLogTerm(SSyncLogReplMgr* pMgr, SSyncNode* pNode, S
|
|||
|
||||
if (prevIndex >= pBuf->startIndex) {
|
||||
pEntry = pBuf->entries[(prevIndex + pBuf->size) % pBuf->size].pItem;
|
||||
ASSERT(pEntry != NULL && "no log entry found");
|
||||
ASSERTS(pEntry != NULL, "no log entry found");
|
||||
prevLogTerm = pEntry->term;
|
||||
return prevLogTerm;
|
||||
}
|
||||
|
||||
if (pMgr && pMgr->startIndex <= prevIndex && prevIndex < pMgr->endIndex) {
|
||||
int64_t timeMs = pMgr->states[(prevIndex + pMgr->size) % pMgr->size].timeMs;
|
||||
ASSERT(timeMs != 0 && "no log entry found");
|
||||
ASSERTS(timeMs != 0, "no log entry found");
|
||||
prevLogTerm = pMgr->states[(prevIndex + pMgr->size) % pMgr->size].term;
|
||||
ASSERT(prevIndex == 0 || prevLogTerm != 0);
|
||||
return prevLogTerm;
|
||||
|
@ -141,9 +141,9 @@ int32_t syncLogValidateAlignmentOfCommit(SSyncNode* pNode, SyncIndex commitIndex
|
|||
}
|
||||
|
||||
int32_t syncLogBufferInitWithoutLock(SSyncLogBuffer* pBuf, SSyncNode* pNode) {
|
||||
ASSERT(pNode->pLogStore != NULL && "log store not created");
|
||||
ASSERT(pNode->pFsm != NULL && "pFsm not registered");
|
||||
ASSERT(pNode->pFsm->FpGetSnapshotInfo != NULL && "FpGetSnapshotInfo not registered");
|
||||
ASSERTS(pNode->pLogStore != NULL, "log store not created");
|
||||
ASSERTS(pNode->pFsm != NULL, "pFsm not registered");
|
||||
ASSERTS(pNode->pFsm->FpGetSnapshotInfo != NULL, "FpGetSnapshotInfo not registered");
|
||||
|
||||
SSnapshot snapshot;
|
||||
if (pNode->pFsm->FpGetSnapshotInfo(pNode->pFsm, &snapshot) < 0) {
|
||||
|
@ -437,7 +437,7 @@ _out:
|
|||
}
|
||||
|
||||
int32_t syncLogFsmExecute(SSyncNode* pNode, SSyncFSM* pFsm, ESyncState role, SyncTerm term, SSyncRaftEntry* pEntry) {
|
||||
ASSERT(pFsm->FpCommitCb != NULL && "No commit cb registered for the FSM");
|
||||
ASSERTS(pFsm->FpCommitCb != NULL, "No commit cb registered for the FSM");
|
||||
|
||||
if ((pNode->replicaNum == 1) && pNode->restoreFinish && pNode->vgId != 1) {
|
||||
return 0;
|
||||
|
@ -513,13 +513,8 @@ int32_t syncLogBufferCommit(SSyncLogBuffer* pBuf, SSyncNode* pNode, int64_t comm
|
|||
if (!syncUtilUserCommit(pEntry->originalRpcType)) {
|
||||
sInfo("vgId:%d, commit sync barrier. index: %" PRId64 ", term:%" PRId64 ", type: %s", vgId, pEntry->index,
|
||||
pEntry->term, TMSG_INFO(pEntry->originalRpcType));
|
||||
pBuf->commitIndex = index;
|
||||
if (!inBuf) {
|
||||
syncEntryDestroy(pEntry);
|
||||
pEntry = NULL;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (syncLogFsmExecute(pNode, pFsm, role, term, pEntry) != 0) {
|
||||
sError("vgId:%d, failed to execute sync log entry. index:%" PRId64 ", term:%" PRId64
|
||||
", role: %d, current term: %" PRId64,
|
||||
|
@ -905,7 +900,7 @@ int32_t syncNodeLogReplMgrInit(SSyncNode* pNode) {
|
|||
ASSERT(pNode->logReplMgrs[i] == NULL);
|
||||
pNode->logReplMgrs[i] = syncLogReplMgrCreate();
|
||||
pNode->logReplMgrs[i]->peerId = i;
|
||||
ASSERT(pNode->logReplMgrs[i] != NULL && "Out of memory.");
|
||||
ASSERTS(pNode->logReplMgrs[i] != NULL, "Out of memory.");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -103,6 +103,7 @@ int32_t snapshotSenderStart(SSyncSnapshotSender *pSender) {
|
|||
pSender->sendingMS = 0;
|
||||
pSender->term = pSender->pSyncNode->pRaftStore->currentTerm;
|
||||
pSender->startTime = taosGetTimestampMs();
|
||||
pSender->lastSendTime = pSender->startTime;
|
||||
pSender->finish = false;
|
||||
|
||||
// build begin msg
|
||||
|
@ -201,6 +202,8 @@ int32_t snapshotSend(SSyncSnapshotSender *pSender) {
|
|||
syncNodeSendMsgById(&pMsg->destId, pSender->pSyncNode, &rpcMsg);
|
||||
syncLogSendSyncSnapshotSend(pSender->pSyncNode, pMsg, "");
|
||||
|
||||
pSender->lastSendTime = taosGetTimestampMs();
|
||||
|
||||
// event log
|
||||
if (pSender->seq == SYNC_SNAPSHOT_SEQ_END) {
|
||||
sSTrace(pSender, "snapshot sender finish");
|
||||
|
@ -213,33 +216,36 @@ int32_t snapshotSend(SSyncSnapshotSender *pSender) {
|
|||
// send snapshot data from cache
|
||||
int32_t snapshotReSend(SSyncSnapshotSender *pSender) {
|
||||
// send current block data
|
||||
|
||||
// build msg
|
||||
SRpcMsg rpcMsg = {0};
|
||||
(void)syncBuildSnapshotSend(&rpcMsg, pSender->blockLen, pSender->pSyncNode->vgId);
|
||||
|
||||
SyncSnapshotSend *pMsg = rpcMsg.pCont;
|
||||
pMsg->srcId = pSender->pSyncNode->myRaftId;
|
||||
pMsg->destId = (pSender->pSyncNode->replicasId)[pSender->replicaIndex];
|
||||
pMsg->term = pSender->pSyncNode->pRaftStore->currentTerm;
|
||||
pMsg->beginIndex = pSender->snapshotParam.start;
|
||||
pMsg->lastIndex = pSender->snapshot.lastApplyIndex;
|
||||
pMsg->lastTerm = pSender->snapshot.lastApplyTerm;
|
||||
pMsg->lastConfigIndex = pSender->snapshot.lastConfigIndex;
|
||||
pMsg->lastConfig = pSender->lastConfig;
|
||||
pMsg->seq = pSender->seq;
|
||||
|
||||
if (pSender->pCurrentBlock != NULL && pSender->blockLen > 0) {
|
||||
// build msg
|
||||
SRpcMsg rpcMsg = {0};
|
||||
(void)syncBuildSnapshotSend(&rpcMsg, pSender->blockLen, pSender->pSyncNode->vgId);
|
||||
|
||||
SyncSnapshotSend *pMsg = rpcMsg.pCont;
|
||||
pMsg->srcId = pSender->pSyncNode->myRaftId;
|
||||
pMsg->destId = (pSender->pSyncNode->replicasId)[pSender->replicaIndex];
|
||||
pMsg->term = pSender->pSyncNode->pRaftStore->currentTerm;
|
||||
pMsg->beginIndex = pSender->snapshotParam.start;
|
||||
pMsg->lastIndex = pSender->snapshot.lastApplyIndex;
|
||||
pMsg->lastTerm = pSender->snapshot.lastApplyTerm;
|
||||
pMsg->lastConfigIndex = pSender->snapshot.lastConfigIndex;
|
||||
pMsg->lastConfig = pSender->lastConfig;
|
||||
pMsg->seq = pSender->seq;
|
||||
|
||||
// pMsg->privateTerm = pSender->privateTerm;
|
||||
memcpy(pMsg->data, pSender->pCurrentBlock, pSender->blockLen);
|
||||
|
||||
// send msg
|
||||
syncNodeSendMsgById(&pMsg->destId, pSender->pSyncNode, &rpcMsg);
|
||||
syncLogSendSyncSnapshotSend(pSender->pSyncNode, pMsg, "");
|
||||
|
||||
// event log
|
||||
sSTrace(pSender, "snapshot sender resend");
|
||||
}
|
||||
|
||||
// send msg
|
||||
syncNodeSendMsgById(&pMsg->destId, pSender->pSyncNode, &rpcMsg);
|
||||
syncLogSendSyncSnapshotSend(pSender->pSyncNode, pMsg, "");
|
||||
|
||||
pSender->lastSendTime = taosGetTimestampMs();
|
||||
|
||||
// event log
|
||||
sSTrace(pSender, "snapshot sender resend");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -339,6 +345,8 @@ bool snapshotReceiverIsStart(SSyncSnapshotReceiver *pReceiver) { return pReceive
|
|||
void snapshotReceiverForceStop(SSyncSnapshotReceiver *pReceiver) {
|
||||
// force close, abandon incomplete data
|
||||
if (pReceiver->pWriter != NULL) {
|
||||
// event log
|
||||
sRTrace(pReceiver, "snapshot receiver force stop");
|
||||
int32_t ret = pReceiver->pSyncNode->pFsm->FpSnapshotStopWrite(pReceiver->pSyncNode->pFsm, pReceiver->pWriter, false,
|
||||
&(pReceiver->snapshot));
|
||||
ASSERT(ret == 0);
|
||||
|
@ -348,7 +356,7 @@ void snapshotReceiverForceStop(SSyncSnapshotReceiver *pReceiver) {
|
|||
pReceiver->start = false;
|
||||
|
||||
// event log
|
||||
sRTrace(pReceiver, "snapshot receiver force stop");
|
||||
// sRTrace(pReceiver, "snapshot receiver force stop");
|
||||
}
|
||||
|
||||
int32_t snapshotReceiverStartWriter(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pBeginMsg) {
|
||||
|
@ -669,6 +677,7 @@ static int32_t syncNodeOnSnapshotEnd(SSyncNode *pSyncNode, SyncSnapshotSend *pMs
|
|||
sNTrace(pSyncNode, "snapshot receiver finish waitting for true time, now:%" PRId64 ", stime:%" PRId64, timeNow,
|
||||
pMsg->startTime);
|
||||
taosMsleep(10);
|
||||
timeNow = taosGetTimestampMs();
|
||||
}
|
||||
|
||||
int32_t code = snapshotReceiverFinish(pReceiver, pMsg);
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include "syncRaftLog.h"
|
||||
#include "syncReplication.h"
|
||||
#include "syncRespMgr.h"
|
||||
#include "syncSnapshot.h"
|
||||
#include "syncUtil.h"
|
||||
|
||||
static void syncNodeCleanConfigIndex(SSyncNode* ths) {
|
||||
|
@ -70,6 +71,20 @@ static int32_t syncNodeTimerRoutine(SSyncNode* ths) {
|
|||
}
|
||||
|
||||
int64_t timeNow = taosGetTimestampMs();
|
||||
|
||||
for (int i = 0; i < ths->peersNum; ++i) {
|
||||
SSyncSnapshotSender* pSender = syncNodeGetSnapshotSender(ths, &(ths->peersId[i]));
|
||||
if (pSender != NULL) {
|
||||
if (ths->isStart && ths->state == TAOS_SYNC_STATE_LEADER && pSender->start &&
|
||||
timeNow - pSender->lastSendTime > SYNC_SNAP_RESEND_MS) {
|
||||
snapshotReSend(pSender);
|
||||
} else {
|
||||
sTrace("vgId:%d, do not resend: nstart%d, now:%" PRId64 ", lstsend:%" PRId64 ", diff:%" PRId64, ths->vgId,
|
||||
ths->isStart, timeNow, pSender->lastSendTime, timeNow - pSender->lastSendTime);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (atomic_load_64(&ths->snapshottingIndex) != SYNC_INDEX_INVALID) {
|
||||
// end timeout wal snapshot
|
||||
if (timeNow - ths->snapshottingTime > SYNC_DEL_WAL_MS &&
|
||||
|
|
|
@ -160,8 +160,6 @@ void syncUtilMsgNtoH(void* msg) {
|
|||
|
||||
bool syncUtilUserPreCommit(tmsg_t msgType) { return msgType != TDMT_SYNC_NOOP && msgType != TDMT_SYNC_LEADER_TRANSFER; }
|
||||
|
||||
bool syncUtilUserCommit(tmsg_t msgType) { return msgType != TDMT_SYNC_NOOP && msgType != TDMT_SYNC_LEADER_TRANSFER; }
|
||||
|
||||
bool syncUtilUserRollback(tmsg_t msgType) { return msgType != TDMT_SYNC_NOOP && msgType != TDMT_SYNC_LEADER_TRANSFER; }
|
||||
|
||||
void syncCfg2SimpleStr(const SSyncCfg* pCfg, char* buf, int32_t bufLen) {
|
||||
|
@ -568,7 +566,7 @@ void syncLogSendSyncSnapshotSend(SSyncNode* pSyncNode, const SyncSnapshotSend* p
|
|||
syncUtilU642Addr(pMsg->destId.addr, host, sizeof(host), &port);
|
||||
|
||||
sNTrace(pSyncNode,
|
||||
"send sync-snapshot-send from %s:%d {term:%" PRId64 ", begin:%" PRId64 ", end:%" PRId64 ", lterm:%" PRId64
|
||||
"send sync-snapshot-send to %s:%d {term:%" PRId64 ", begin:%" PRId64 ", end:%" PRId64 ", lterm:%" PRId64
|
||||
", stime:%" PRId64 ", seq:%d}, %s",
|
||||
host, port, pMsg->term, pMsg->beginIndex, pMsg->lastIndex, pMsg->lastTerm, pMsg->startTime, pMsg->seq, s);
|
||||
}
|
||||
|
@ -595,7 +593,7 @@ void syncLogSendSyncSnapshotRsp(SSyncNode* pSyncNode, const SyncSnapshotRsp* pMs
|
|||
syncUtilU642Addr(pMsg->destId.addr, host, sizeof(host), &port);
|
||||
|
||||
sNTrace(pSyncNode,
|
||||
"send sync-snapshot-rsp from %s:%d {term:%" PRId64 ", begin:%" PRId64 ", lst:%" PRId64 ", lterm:%" PRId64
|
||||
"send sync-snapshot-rsp to %s:%d {term:%" PRId64 ", begin:%" PRId64 ", lst:%" PRId64 ", lterm:%" PRId64
|
||||
", stime:%" PRId64 ", ack:%d}, %s",
|
||||
host, port, pMsg->term, pMsg->snapBeginIndex, pMsg->lastIndex, pMsg->lastTerm, pMsg->startTime, pMsg->ack, s);
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ SyncAppendEntriesReply *createMsg() {
|
|||
pMsg->success = true;
|
||||
pMsg->matchIndex = 77;
|
||||
pMsg->term = 33;
|
||||
pMsg->privateTerm = 44;
|
||||
// pMsg->privateTerm = 44;
|
||||
pMsg->startTime = taosGetTimestampMs();
|
||||
return pMsg;
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue