Merge branch '3.0' into fix/TD-23623

This commit is contained in:
Minglei Jin 2023-04-20 16:32:43 +08:00 committed by GitHub
commit 1e51b2d781
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
141 changed files with 4013 additions and 2274 deletions

View File

@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "3.0.3.2")
SET(TD_VER_NUMBER "3.0.4.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)

View File

@ -2,7 +2,7 @@
# taosadapter
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
GIT_TAG cb1e89c
GIT_TAG e02ddb2
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

9
compile_flags.txt Normal file
View File

@ -0,0 +1,9 @@
-DLINUX
-DWEBSOCKET
-I/usr/include
-Iinclude
-Iinclude/os
-Iinclude/common
-Iinclude/util
-Iinclude/libs/transport
-Itools/shell/inc

View File

@ -459,12 +459,17 @@ TO_JSON(str_literal)
#### TO_UNIXTIMESTAMP
```sql
TO_UNIXTIMESTAMP(expr)
TO_UNIXTIMESTAMP(expr [, return_timestamp])
return_timestamp: {
0
| 1
}
```
**Description**: UNIX timestamp converted from a string of date/time format
**Return value type**: BIGINT
**Return value type**: BIGINT, TIMESTAMP
**Applicable column types**: VARCHAR and NCHAR
@ -476,6 +481,7 @@ TO_UNIXTIMESTAMP(expr)
- The input string must be compatible with ISO8601/RFC3339 standard, NULL will be returned if the string can't be converted
- The precision of the returned timestamp is same as the precision set for the current data base in use
- return_timestamp indicates whether the returned value type is TIMESTAMP or not. If this parameter set to 1, function will return TIMESTAMP type. Otherwise function will return BIGINT type. If parameter is omitted, default return value type is BIGINT.
### Time and Date Functions

View File

@ -69,7 +69,7 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE
| 1 | consumer_id | BIGINT | Consumer ID |
| 2 | consumer_group | BINARY(192) | Consumer group |
| 3 | client_id | BINARY(192) | Client ID (user-defined) |
| 4 | status | BINARY(20) | Consumer status |
| 4 | status | BINARY(20) | Consumer status. All possible status include: ready(consumer is in normal state), lost(the connection between consumer and mnode is broken), rebalance(the redistribution of vgroups that belongs to current consumer is now in progress), unknown(consumer is in invalid state)
| 5 | topics | BINARY(204) | Subscribed topic. Returns one row for each topic. |
| 6 | up_time | TIMESTAMP | Time of first connection to TDengine Server |
| 7 | subscribe_time | TIMESTAMP | Time of first subscription |

View File

@ -27,7 +27,7 @@ The following data types can be used in the schema for standard tables.
| - | :------- | :-------- | :------- |
| 1 | ALTER ACCOUNT | Deprecated| This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported."
| 2 | ALTER ALL DNODES | Added | Modifies the configuration of all dnodes.
| 3 | ALTER DATABASE | Modified | Deprecated<ul><li>QUORUM: Specified the required number of confirmations. STRICT is now used to specify strong or weak consistency. The STRICT parameter cannot be modified. </li><li>BLOCKS: Specified the memory blocks used by each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>UPDATE: Specified whether update operations were supported. All databases now support updating data in certain columns. </li><li>CACHELAST: Specified how to cache the newest row of data. CACHEMODEL now replaces CACHELAST. </li><li>COMP: Cannot be modified. <br/>Added</li><li>CACHEMODEL: Specifies whether to cache the latest subtable data. </li><li>CACHESIZE: Specifies the size of the cache for the newest subtable data. </li><li>WAL_FSYNC_PERIOD: Replaces the FSYNC parameter. </li><li>WAL_LEVEL: Replaces the WAL parameter. </li><li>WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. </li><li>WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. <br/>Modified</li><li>REPLICA: Cannot be modified. </li><li>KEEP: Now supports units. </li></ul>
| 3 | ALTER DATABASE | Modified | Deprecated<ul><li>QUORUM: Specified the required number of confirmations. TDengine 3.0 provides strict consistency by default and doesn't allow to change to weak consitency. </li><li>BLOCKS: Specified the memory blocks used by each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>UPDATE: Specified whether update operations were supported. All databases now support updating data in certain columns. </li><li>CACHELAST: Specified how to cache the newest row of data. CACHEMODEL now replaces CACHELAST. </li><li>COMP: Cannot be modified. <br/>Added</li><li>CACHEMODEL: Specifies whether to cache the latest subtable data. </li><li>CACHESIZE: Specifies the size of the cache for the newest subtable data. </li><li>WAL_FSYNC_PERIOD: Replaces the FSYNC parameter. </li><li>WAL_LEVEL: Replaces the WAL parameter. </li><li>WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. </li><li>WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. <br/>Modified</li><li>REPLICA: Cannot be modified. </li><li>KEEP: Now supports units. </li></ul>
| 4 | ALTER STABLE | Modified | Deprecated<ul><li>CHANGE TAG: Modified the name of a tag. Replaced by RENAME TAG. <br/>Added</li><li>RENAME TAG: Replaces CHANGE TAG. </li><li>COMMENT: Specifies comments for a supertable. </li></ul>
| 5 | ALTER TABLE | Modified | Deprecated<ul><li>CHANGE TAG: Modified the name of a tag. Replaced by RENAME TAG. <br/>Added</li><li>RENAME TAG: Replaces CHANGE TAG. </li><li>COMMENT: Specifies comments for a standard table. </li><li>TTL: Specifies the time-to-live for a standard table. </li></ul>
| 6 | ALTER USER | Modified | Deprecated<ul><li>PRIVILEGE: Specified user permissions. Replaced by GRANT and REVOKE. <br/>Added</li><li>ENABLE: Enables or disables a user. </li><li>SYSINFO: Specifies whether a user can query system information. </li></ul>

View File

@ -459,6 +459,56 @@ For a more detailed description of the `sql()` method, please refer to [RestClie
</TabItem>
</Tabs>
### Schemaless Insert
Connector support schemaless insert.
<Tabs defaultValue="list">
<TabItem value="list" label="List Insert">
Simple insert
```python
{{#include docs/examples/python/schemaless_insert.py}}
```
Insert with ttl argument
```python
{{#include docs/examples/python/schemaless_insert_ttl.py}}
```
Insert with req_id argument
```python
{{#include docs/examples/python/schemaless_insert_req_id.py}}
```
</TabItem>
<TabItem value="raw" label="Raw Insert">
Simple insert
```python
{{#include docs/examples/python/schemaless_insert_raw.py}}
```
Insert with ttl argument
```python
{{#include docs/examples/python/schemaless_insert_raw_ttl.py}}
```
Insert with req_id argument
```python
{{#include docs/examples/python/schemaless_insert_raw_req_id.py}}
```
</TabItem>
</Tabs>
### Other sample programs
| Example program links | Example program content |

View File

@ -1 +1 @@
label: "connector"
label: "Connector"

View File

@ -54,94 +54,91 @@ Command-line arguments take precedence over environment variables over configura
```shell
Usage of taosAdapter:
--collectd.db string collectd db name. Env "TAOS_ADAPTER_COLLECTD_DB" (default "collectd")
--collectd.enable enable collectd. Env "TAOS_ADAPTER_COLLECTD_ENABLE" (default true)
--collectd.password string collectd password. Env "TAOS_ADAPTER_COLLECTD_PASSWORD" (default "taosdata")
--collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045)
--collectd.ttl int collectd data ttl. Env "TAOS_ADAPTER_COLLECTD_TTL"
--collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root")
--collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10)
-c, --config string config path default /etc/taos/taosadapter.toml
--cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true)
--cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials"
--cors.allowHeaders stringArray cors allow HEADERS. Env "TAOS_ADAPTER_ALLOW_HEADERS"
--cors.allowOrigins stringArray cors allow origins. Env "TAOS_ADAPTER_ALLOW_ORIGINS"
--cors.allowWebSockets cors allow WebSockets. Env "TAOS_ADAPTER_CORS_ALLOW_WebSockets"
--cors.exposeHeaders stringArray cors expose headers. Env "TAOS_ADAPTER_Expose_Headers"
--debug enable debug mode. Env "TAOS_ADAPTER_DEBUG" (default true)
--help Print this help message and exit
--httpCodeServerError Use a non-200 http status code when taosd returns an error. Env "TAOS_ADAPTER_HTTP_CODE_SERVER_ERROR"
--influxdb.enable enable influxdb. Env "TAOS_ADAPTER_INFLUXDB_ENABLE" (default true)
--log.enableRecordHttpSql whether to record http sql. Env "TAOS_ADAPTER_LOG_ENABLE_RECORD_HTTP_SQL"
--log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos")
--log.rotationCount uint log rotation count. Env "TAOS_ADAPTER_LOG_ROTATION_COUNT" (default 30)
--log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_ROTATION_SIZE" (default "1GB")
--log.rotationTime duration log rotation time. Env "TAOS_ADAPTER_LOG_ROTATION_TIME" (default 24h0m0s)
--log.sqlRotationCount uint record sql log rotation count. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_COUNT" (default 2)
--log.sqlRotationSize string record sql log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_SIZE" (default "1GB")
--log.sqlRotationTime duration record sql log rotation time. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_TIME" (default 24h0m0s)
--logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info")
--monitor.collectDuration duration Set monitor duration. Env "TAOS_ADAPTER_MONITOR_COLLECT_DURATION" (default 3s)
--monitor.disable Whether to disable monitoring. Env "TAOS_ADAPTER_MONITOR_DISABLE"
--monitor.disableCollectClientIP Whether to disable collecting clientIP. Env "TAOS_ADAPTER_MONITOR_DISABLE_COLLECT_CLIENT_IP"
--monitor.identity string The identity of the current instance, or 'hostname:port' if it is empty. Env "TAOS_ADAPTER_MONITOR_IDENTITY"
--monitor.incgroup Whether running in cgroup. Env "TAOS_ADAPTER_MONITOR_INCGROUP"
--monitor.password string TDengine password. Env "TAOS_ADAPTER_MONITOR_PASSWORD" (default "taosdata")
--monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_ADAPTER_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80)
--monitor.pauseQueryMemoryThreshold float Memory percentage threshold for pause query. Env "TAOS_ADAPTER_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default 70)
--monitor.user string TDengine user. Env "TAOS_ADAPTER_MONITOR_USER" (default "root")
--monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_ADAPTER_MONITOR_WRITE_INTERVAL" (default 30s)
--monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_ADAPTER_MONITOR_WRITE_TO_TD"
--node_exporter.caCertFile string node_exporter ca cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CA_CERT_FILE"
--node_exporter.certFile string node_exporter cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CERT_FILE"
--node_exporter.db string node_exporter db name. Env "TAOS_ADAPTER_NODE_EXPORTER_DB" (default "node_exporter")
--node_exporter.enable enable node_exporter. Env "TAOS_ADAPTER_NODE_EXPORTER_ENABLE"
--node_exporter.gatherDuration duration node_exporter gather duration. Env "TAOS_ADAPTER_NODE_EXPORTER_GATHER_DURATION" (default 5s)
--node_exporter.httpBearerTokenString string node_exporter http bearer token. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_BEARER_TOKEN_STRING"
--node_exporter.httpPassword string node_exporter http password. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_PASSWORD"
--node_exporter.httpUsername string node_exporter http username. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_USERNAME"
--node_exporter.insecureSkipVerify node_exporter skip ssl check. Env "TAOS_ADAPTER_NODE_EXPORTER_INSECURE_SKIP_VERIFY" (default true)
--node_exporter.keyFile string node_exporter cert key file path. Env "TAOS_ADAPTER_NODE_EXPORTER_KEY_FILE"
--node_exporter.password string node_exporter password. Env "TAOS_ADAPTER_NODE_EXPORTER_PASSWORD" (default "taosdata")
--node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s)
--node_exporter.ttl int node_exporter data ttl. Env "TAOS_ADAPTER_NODE_EXPORTER_TTL"
--node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100])
--node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root")
--opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true)
--opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1)
--opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb])
--opentsdb_telnet.enable enable opentsdb telnet,warning: without auth info(default false). Env "TAOS_ADAPTER_OPENTSDB_TELNET_ENABLE"
--opentsdb_telnet.flushInterval duration opentsdb_telnet flush interval (0s means not valid) . Env "TAOS_ADAPTER_OPENTSDB_TELNET_FLUSH_INTERVAL"
--opentsdb_telnet.maxTCPConnections int max tcp connections. Env "TAOS_ADAPTER_OPENTSDB_TELNET_MAX_TCP_CONNECTIONS" (default 250)
--opentsdb_telnet.password string opentsdb_telnet password. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PASSWORD" (default "taosdata")
--opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
--opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE"
--opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL"
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
--pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT"
--pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT"
--pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE"
-P, --port int http port. Env "TAOS_ADAPTER_PORT" (default 6041)
--prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true)
--restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1)
--statsd.allowPendingMessages int statsd allow pending messages. Env "TAOS_ADAPTER_STATSD_ALLOW_PENDING_MESSAGES" (default 50000)
--statsd.db string statsd db name. Env "TAOS_ADAPTER_STATSD_DB" (default "statsd")
--statsd.deleteCounters statsd delete counter cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_COUNTERS" (default true)
--statsd.deleteGauges statsd delete gauge cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_GAUGES" (default true)
--statsd.deleteSets statsd delete set cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_SETS" (default true)
--statsd.deleteTimings statsd delete timing cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_TIMINGS" (default true)
--statsd.enable enable statsd. Env "TAOS_ADAPTER_STATSD_ENABLE" (default true)
--statsd.gatherInterval duration statsd gather interval. Env "TAOS_ADAPTER_STATSD_GATHER_INTERVAL" (default 5s)
--statsd.maxTCPConnections int statsd max tcp connections. Env "TAOS_ADAPTER_STATSD_MAX_TCP_CONNECTIONS" (default 250)
--statsd.password string statsd password. Env "TAOS_ADAPTER_STATSD_PASSWORD" (default "taosdata")
--statsd.port int statsd server port. Env "TAOS_ADAPTER_STATSD_PORT" (default 6044)
--statsd.protocol string statsd protocol [tcp or udp]. Env "TAOS_ADAPTER_STATSD_PROTOCOL" (default "udp")
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE"
--statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL"
--statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root")
--statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10)
--taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE"
--version Print the version and exit
--collectd.db string collectd db name. Env "TAOS_ADAPTER_COLLECTD_DB" (default "collectd")
--collectd.enable enable collectd. Env "TAOS_ADAPTER_COLLECTD_ENABLE" (default true)
--collectd.password string collectd password. Env "TAOS_ADAPTER_COLLECTD_PASSWORD" (default "taosdata")
--collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045)
--collectd.ttl int collectd data ttl. Env "TAOS_ADAPTER_COLLECTD_TTL"
--collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root")
--collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10)
-c, --config string config path default /etc/taos/taosadapter.toml
--cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true)
--cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials"
--cors.allowHeaders stringArray cors allow HEADERS. Env "TAOS_ADAPTER_ALLOW_HEADERS"
--cors.allowOrigins stringArray cors allow origins. Env "TAOS_ADAPTER_ALLOW_ORIGINS"
--cors.allowWebSockets cors allow WebSockets. Env "TAOS_ADAPTER_CORS_ALLOW_WebSockets" --cors.exposeHeaders stringArray cors expose headers. Env "TAOS_ADAPTER_Expose_Headers"
--debug enable debug mode. Env "TAOS_ADAPTER_DEBUG" (default true)
--help Print this help message and exit
--httpCodeServerError Use a non-200 http status code when server returns an error. Env "TAOS_ADAPTER_HTTP_CODE_SERVER_ERROR"
--influxdb.enable enable influxdb. Env "TAOS_ADAPTER_INFLUXDB_ENABLE" (default true)
--log.enableRecordHttpSql whether to record http sql. Env "TAOS_ADAPTER_LOG_ENABLE_RECORD_HTTP_SQL"
--log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos") --log.rotationCount uint log rotation count. Env "TAOS_ADAPTER_LOG_ROTATION_COUNT" (default 30)
--log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_ROTATION_SIZE" (default "1GB")
--log.rotationTime duration log rotation time. Env "TAOS_ADAPTER_LOG_ROTATION_TIME" (default 24h0m0s)
--log.sqlRotationCount uint record sql log rotation count. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_COUNT" (default 2)
--log.sqlRotationSize string record sql log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_SIZE" (default "1GB")
--log.sqlRotationTime duration record sql log rotation time. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_TIME" (default 24h0m0s)
--logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info")
--monitor.collectDuration duration Set monitor duration. Env "TAOS_ADAPTER_MONITOR_COLLECT_DURATION" (default 3s)
--monitor.disable Whether to disable monitoring. Env "TAOS_ADAPTER_MONITOR_DISABLE"
--monitor.disableCollectClientIP Whether to disable collecting clientIP. Env "TAOS_ADAPTER_MONITOR_DISABLE_COLLECT_CLIENT_IP"
--monitor.identity string The identity of the current instance, or 'hostname:port' if it is empty. Env "TAOS_ADAPTER_MONITOR_IDENTITY"
--monitor.incgroup Whether running in cgroup. Env "TAOS_ADAPTER_MONITOR_INCGROUP"
--monitor.password string TDengine password. Env "TAOS_ADAPTER_MONITOR_PASSWORD" (default "taosdata")
--monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_ADAPTER_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80)
--monitor.pauseQueryMemoryThreshold float Memory percentage threshold for pause query. Env "TAOS_ADAPTER_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default 70)
--monitor.user string TDengine user. Env "TAOS_ADAPTER_MONITOR_USER" (default "root") --monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_ADAPTER_MONITOR_WRITE_INTERVAL" (default 30s)
--monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_ADAPTER_MONITOR_WRITE_TO_TD"
--node_exporter.caCertFile string node_exporter ca cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CA_CERT_FILE"
--node_exporter.certFile string node_exporter cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CERT_FILE"
--node_exporter.db string node_exporter db name. Env "TAOS_ADAPTER_NODE_EXPORTER_DB" (default "node_exporter")
--node_exporter.enable enable node_exporter. Env "TAOS_ADAPTER_NODE_EXPORTER_ENABLE"
--node_exporter.gatherDuration duration node_exporter gather duration. Env "TAOS_ADAPTER_NODE_EXPORTER_GATHER_DURATION" (default 5s)
--node_exporter.httpBearerTokenString string node_exporter http bearer token. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_BEARER_TOKEN_STRING"
--node_exporter.httpPassword string node_exporter http password. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_PASSWORD"
--node_exporter.httpUsername string node_exporter http username. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_USERNAME"
--node_exporter.insecureSkipVerify node_exporter skip ssl check. Env "TAOS_ADAPTER_NODE_EXPORTER_INSECURE_SKIP_VERIFY" (default true)
--node_exporter.keyFile string node_exporter cert key file path. Env "TAOS_ADAPTER_NODE_EXPORTER_KEY_FILE"
--node_exporter.password string node_exporter password. Env "TAOS_ADAPTER_NODE_EXPORTER_PASSWORD" (default "taosdata")
--node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s)
--node_exporter.ttl int node_exporter data ttl. Env "TAOS_ADAPTER_NODE_EXPORTER_TTL"
--node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100])
--node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root")
--opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true)
--opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1)
--opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb])
--opentsdb_telnet.enable enable opentsdb telnet,warning: without auth info(default false). Env "TAOS_ADAPTER_OPENTSDB_TELNET_ENABLE"
--opentsdb_telnet.flushInterval duration opentsdb_telnet flush interval (0s means not valid) . Env "TAOS_ADAPTER_OPENTSDB_TELNET_FLUSH_INTERVAL"
--opentsdb_telnet.maxTCPConnections int max tcp connections. Env "TAOS_ADAPTER_OPENTSDB_TELNET_MAX_TCP_CONNECTIONS" (default 250)
--opentsdb_telnet.password string opentsdb_telnet password. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PASSWORD" (default "taosdata")
--opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
--opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE"
--opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL"
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
--pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT"
--pool.maxConnect int max connections to server. Env "TAOS_ADAPTER_POOL_MAX_CONNECT"
--pool.maxIdle int max idle connections to server. Env "TAOS_ADAPTER_POOL_MAX_IDLE"
-P, --port int http port. Env "TAOS_ADAPTER_PORT" (default 6041)
--prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true)
--restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1)
--smlAutoCreateDB Whether to automatically create db when writing with schemaless. Env "TAOS_ADAPTER_SML_AUTO_CREATE_DB"
--statsd.allowPendingMessages int statsd allow pending messages. Env "TAOS_ADAPTER_STATSD_ALLOW_PENDING_MESSAGES" (default 50000)
--statsd.db string statsd db name. Env "TAOS_ADAPTER_STATSD_DB" (default "statsd") --statsd.deleteCounters statsd delete counter cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_COUNTERS" (default true)
--statsd.deleteGauges statsd delete gauge cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_GAUGES" (default true)
--statsd.deleteSets statsd delete set cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_SETS" (default true)
--statsd.deleteTimings statsd delete timing cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_TIMINGS" (default true)
--statsd.enable enable statsd. Env "TAOS_ADAPTER_STATSD_ENABLE" (default true)
--statsd.gatherInterval duration statsd gather interval. Env "TAOS_ADAPTER_STATSD_GATHER_INTERVAL" (default 5s)
--statsd.maxTCPConnections int statsd max tcp connections. Env "TAOS_ADAPTER_STATSD_MAX_TCP_CONNECTIONS" (default 250)
--statsd.password string statsd password. Env "TAOS_ADAPTER_STATSD_PASSWORD" (default "taosdata")
--statsd.port int statsd server port. Env "TAOS_ADAPTER_STATSD_PORT" (default 6044)
--statsd.protocol string statsd protocol [tcp or udp]. Env "TAOS_ADAPTER_STATSD_PROTOCOL" (default "udp")
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE" --statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL"
--statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root")
--statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10)
--taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE"
--tmq.releaseIntervalMultiplierForAutocommit int When set to autocommit, the interval for message release is a multiple of the autocommit interval, with a default value of 2 and a minimum value of 1 and a maximum value of 10. Env "TAOS_ADAPTER_TMQ_RELEASE_INTERVAL_MULTIPLIER_FOR_AUTOCOMMIT" (default 2)
--version Print the version and exit
```
Note:
@ -332,6 +329,10 @@ This parameter controls the number of results returned by the following interfac
taosAdapter uses the parameter `httpCodeServerError` to set whether to return a non-200 http status code http status code other than when the C interface returns an error. When set to true, different http status codes will be returned according to the error code returned by C. For details, see [RESTful API](https://docs.tdengine.com/reference/rest-api/) HTTP Response Code chapter.
## Configure whether schemaless writes automatically create DBs
Starting from version 3.0.4.0, the taosAdapter provides the parameter "smlAutoCreateDB" to control whether to automatically create DBs when writing with the schemaless protocol. The default value is false, which means that the DB will not be automatically created and the user needs to manually create the DB before performing schemaless writing.
## Troubleshooting
You can check the taosAdapter running status with the `systemctl status taosadapter` command.

View File

@ -628,6 +628,16 @@ The charset that takes effect is UTF-8.
| Default Value | 1 |
| Note | The core file is generated under root directory `systemctl start taosd`/`launchctl start com.tdengine.taosd` is used to start, or under the working directory if `taosd` is started directly on Linux/macOS Shell. |
### enableScience
| Attribute | Description |
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
| Applicable | Only taos-CLI client |
| Meaning | Whether to show float and double with the scientific notation |
| Value Range | 0: false, 1: true |
| Default Value | 0 |
### udf
| Attribute | Description |

View File

@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
import Release from "/components/ReleaseV3";
## 3.0.4.0
<Release type="tdengine" version="3.0.4.0" />
## 3.0.3.2
<Release type="tdengine" version="3.0.3.2" />

View File

@ -10,6 +10,10 @@ For other historical version installers, please visit [here](https://www.taosdat
import Release from "/components/ReleaseV3";
## 2.4.12
<Release type="tools" version="2.4.12" />
## 2.4.11
<Release type="tools" version="2.4.11" />

View File

@ -0,0 +1,21 @@
import taos
conn = taos.connect()
dbname = "pytest_line"
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s precision 'us'" % dbname)
conn.select_db(dbname)
lines = [
'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000',
]
conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED)
print("inserted")
conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED)
result = conn.query("show tables")
for row in result:
print(row)
conn.execute("drop database if exists %s" % dbname)

View File

@ -0,0 +1,74 @@
import taos
from taos import utils
from taos import TaosConnection
from taos.cinterface import *
from taos.error import OperationalError, SchemalessError
conn = taos.connect()
dbname = "taos_schemaless_insert"
try:
conn.execute("drop database if exists %s" % dbname)
if taos.IS_V3:
conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname)
else:
conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname)
conn.select_db(dbname)
lines = '''st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000
st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000
stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000'''
res = conn.schemaless_insert_raw(lines, 1, 0)
print("affected rows: ", res)
assert (res == 3)
lines = '''stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000'''
res = conn.schemaless_insert_raw(lines, 1, 0)
print("affected rows: ", res)
assert (res == 1)
result = conn.query("select * from st")
dict2 = result.fetch_all_into_dict()
print(dict2)
print(result.row_count)
all = result.rows_iter()
for row in all:
print(row)
result.close()
assert (result.row_count == 2)
# error test
lines = ''',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000'''
try:
res = conn.schemaless_insert_raw(lines, 1, 0)
print(res)
# assert(False)
except SchemalessError as err:
print('**** error: ', err)
# assert (err.msg == 'Invalid data format')
result = conn.query("select * from st")
print(result.row_count)
all = result.rows_iter()
for row in all:
print(row)
result.close()
conn.execute("drop database if exists %s" % dbname)
conn.close()
except InterfaceError as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
except SchemalessError as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
except Exception as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
raise err

View File

@ -0,0 +1,76 @@
import taos
from taos import utils
from taos import TaosConnection
from taos.cinterface import *
from taos.error import OperationalError, SchemalessError
conn = taos.connect()
dbname = "taos_schemaless_insert"
try:
conn.execute("drop database if exists %s" % dbname)
if taos.IS_V3:
conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname)
else:
conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname)
conn.select_db(dbname)
lines = '''st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000
st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000
stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000'''
ttl = 1000
req_id = utils.gen_req_id()
res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl, req_id=req_id)
print("affected rows: ", res)
assert (res == 3)
lines = '''stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000'''
ttl = 1000
req_id = utils.gen_req_id()
res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl, req_id=req_id)
print("affected rows: ", res)
assert (res == 1)
result = conn.query("select * from st")
dict2 = result.fetch_all_into_dict()
print(dict2)
print(result.row_count)
all = result.rows_iter()
for row in all:
print(row)
result.close()
assert (result.row_count == 2)
# error test
lines = ''',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000'''
try:
ttl = 1000
req_id = utils.gen_req_id()
res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl, req_id=req_id)
print(res)
# assert(False)
except SchemalessError as err:
print('**** error: ', err)
# assert (err.msg == 'Invalid data format')
result = conn.query("select * from st")
print(result.row_count)
all = result.rows_iter()
for row in all:
print(row)
result.close()
conn.execute("drop database if exists %s" % dbname)
conn.close()
except InterfaceError as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
except Exception as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
raise err

View File

@ -0,0 +1,73 @@
import taos
from taos import utils
from taos import TaosConnection
from taos.cinterface import *
from taos.error import OperationalError, SchemalessError
conn = taos.connect()
dbname = "taos_schemaless_insert"
try:
conn.execute("drop database if exists %s" % dbname)
if taos.IS_V3:
conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname)
else:
conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname)
conn.select_db(dbname)
lines = '''st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000
st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000
stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000'''
ttl = 1000
res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl)
print("affected rows: ", res)
assert (res == 3)
lines = '''stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000'''
ttl = 1000
res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl)
print("affected rows: ", res)
assert (res == 1)
result = conn.query("select * from st")
dict2 = result.fetch_all_into_dict()
print(dict2)
print(result.row_count)
all = result.rows_iter()
for row in all:
print(row)
result.close()
assert (result.row_count == 2)
# error test
lines = ''',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000'''
try:
ttl = 1000
res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl)
print(res)
# assert(False)
except SchemalessError as err:
print('**** error: ', err)
# assert (err.msg == 'Invalid data format')
result = conn.query("select * from st")
print(result.row_count)
all = result.rows_iter()
for row in all:
print(row)
result.close()
conn.execute("drop database if exists %s" % dbname)
conn.close()
except InterfaceError as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
except Exception as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
raise err

View File

@ -0,0 +1,22 @@
import taos
from taos import SmlProtocol, SmlPrecision
conn = taos.connect()
dbname = "pytest_line"
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s precision 'us'" % dbname)
conn.select_db(dbname)
lines = [
'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000',
]
conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED, req_id=1)
print("inserted")
conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED, req_id=2)
result = conn.query("show tables")
for row in result:
print(row)
conn.execute("drop database if exists %s" % dbname)

View File

@ -0,0 +1,22 @@
import taos
from taos import SmlProtocol, SmlPrecision
conn = taos.connect()
dbname = "pytest_line"
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s precision 'us'" % dbname)
conn.select_db(dbname)
lines = [
'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000',
]
conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED, ttl=1000)
print("inserted")
conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED, ttl=1000)
result = conn.query("show tables")
for row in result:
print(row)
conn.execute("drop database if exists %s" % dbname)

View File

@ -484,6 +484,56 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
</TabItem>
</Tabs>
### 无模式写入
连接器支持无模式写入功能。
<Tabs defaultValue="list">
<TabItem value="list" label="List 写入">
简单写入
```python
{{#include docs/examples/python/schemaless_insert.py}}
```
带有 ttl 参数的写入
```python
{{#include docs/examples/python/schemaless_insert_ttl.py}}
```
带有 req_id 参数的写入
```python
{{#include docs/examples/python/schemaless_insert_req_id.py}}
```
</TabItem>
<TabItem value="raw" label="Raw 写入">
简单写入
```python
{{#include docs/examples/python/schemaless_insert_raw.py}}
```
带有 ttl 参数的写入
```python
{{#include docs/examples/python/schemaless_insert_raw_ttl.py}}
```
带有 req_id 参数的写入
```python
{{#include docs/examples/python/schemaless_insert_raw_req_id.py}}
```
</TabItem>
</Tabs>
### 其它示例程序
| 示例程序链接 | 示例程序内容 |

View File

@ -459,12 +459,17 @@ TO_JSON(str_literal)
#### TO_UNIXTIMESTAMP
```sql
TO_UNIXTIMESTAMP(expr)
TO_UNIXTIMESTAMP(expr [, return_timestamp])
return_timestamp: {
0
| 1
}
```
**功能说明**:将日期时间格式的字符串转换成为 UNIX 时间戳。
**返回结果数据类型**BIGINT。
**返回结果数据类型**BIGINT, TIMESTAMP
**应用字段**VARCHAR, NCHAR。
@ -476,6 +481,7 @@ TO_UNIXTIMESTAMP(expr)
- 输入的日期时间字符串须符合 ISO8601/RFC3339 标准,无法转换的字符串格式将返回 NULL。
- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。
- return_timestamp 指定函数返回值是否为时间戳类型设置为1时返回 TIMESTAMP 类型设置为0时返回 BIGINT 类型。如不指定缺省返回 BIGINT 类型。
### 时间和日期函数

View File

@ -69,7 +69,7 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其
| 1 | consumer_id | BIGINT | 消费者的唯一 ID |
| 2 | consumer_group | BINARY(192) | 消费者组 |
| 3 | client_id | BINARY(192) | 用户自定义字符串,通过创建 consumer 时指定 client_id 来展示 |
| 4 | status | BINARY(20) | 消费者当前状态 |
| 4 | status | BINARY(20) | 消费者当前状态。消费者状态包括ready(正常可用)、 lost(连接已丢失)、 rebalancing(消费者所属 vgroup 正在分配中)、unknown(未知状态)|
| 5 | topics | BINARY(204) | 被订阅的 topic。若订阅多个 topic则展示为多行 |
| 6 | up_time | TIMESTAMP | 第一次连接 taosd 的时间 |
| 7 | subscribe_time | TIMESTAMP | 上一次发起订阅的时间 |

View File

@ -27,7 +27,7 @@ description: "TDengine 3.0 版本的语法变更说明"
| - | :------- | :-------- | :------- |
| 1 | ALTER ACCOUNT | 废除 | 2.x中为企业版功能3.0不再支持。语法暂时保留了执行报“This statement is no longer supported”错误。
| 2 | ALTER ALL DNODES | 新增 | 修改所有DNODE的参数。
| 3 | ALTER DATABASE | 调整 | 废除<ul><li>QUORUM写入需要的副本确认数。3.0版本使用STRICT来指定强一致还是弱一致。3.0.0版本STRICT暂不支持修改</li><li>BLOCKSVNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>UPDATE更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>CACHELAST缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>COMP3.0版本暂不支持修改。</li><br/>新增<li>CACHEMODEL表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE表示缓存子表最近数据的内存大小。</li><li>WAL_FSYNC_PERIOD代替原FSYNC参数。</li><li>WAL_LEVEL代替原WAL参数。</li><li>WAL_RETENTION_PERIOD3.0.4.0版本新增wal文件的额外保留策略用于数据订阅。</li><li>WAL_RETENTION_SIZE3.0.4.0版本新增wal文件的额外保留策略用于数据订阅。<br/>调整</li><li>REPLICA3.0.0版本暂不支持修改。</li><li>KEEP3.0版本新增支持带单位的设置方式。</li></ul>
| 3 | ALTER DATABASE | 调整 | 废除<ul><li>QUORUM写入需要的副本确认数。3.0 版本默认行为是强一致性,且不支持修改为弱一致性</li><li>BLOCKSVNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>UPDATE更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>CACHELAST缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>COMP3.0版本暂不支持修改。</li><br/>新增<li>CACHEMODEL表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE表示缓存子表最近数据的内存大小。</li><li>WAL_FSYNC_PERIOD代替原FSYNC参数。</li><li>WAL_LEVEL代替原WAL参数。</li><li>WAL_RETENTION_PERIOD3.0.4.0版本新增wal文件的额外保留策略用于数据订阅。</li><li>WAL_RETENTION_SIZE3.0.4.0版本新增wal文件的额外保留策略用于数据订阅。<br/>调整</li><li>REPLICA3.0.0版本暂不支持修改。</li><li>KEEP3.0版本新增支持带单位的设置方式。</li></ul>
| 4 | ALTER STABLE | 调整 | 废除<ul><li>CHANGE TAG修改标签列的名称。3.0版本使用RENAME TAG代替。<br/>新增</li><li>RENAME TAG代替原CHANGE TAG子句。</li><li>COMMENT修改超级表的注释。</li></ul>
| 5 | ALTER TABLE | 调整 | 废除<ul><li>CHANGE TAG修改标签列的名称。3.0版本使用RENAME TAG代替。<br/>新增</li><li>RENAME TAG代替原CHANGE TAG子句。</li><li>COMMENT修改表的注释。</li><li>TTL修改表的生命周期。</li></ul>
| 6 | ALTER USER | 调整 | 废除<ul><li>PRIVILEGE修改用户权限。3.0版本使用GRANT和REVOKE来授予和回收权限。<br/>新增</li><li>ENABLE启用或停用此用户。</li><li>SYSINFO修改用户是否可查看系统信息。</li></ul>

View File

@ -54,94 +54,91 @@ taosAdapter 支持通过命令行参数、环境变量和配置文件来进行
```shell
Usage of taosAdapter:
--collectd.db string collectd db name. Env "TAOS_ADAPTER_COLLECTD_DB" (default "collectd")
--collectd.enable enable collectd. Env "TAOS_ADAPTER_COLLECTD_ENABLE" (default true)
--collectd.password string collectd password. Env "TAOS_ADAPTER_COLLECTD_PASSWORD" (default "taosdata")
--collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045)
--collectd.ttl int collectd data ttl. Env "TAOS_ADAPTER_COLLECTD_TTL"
--collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root")
--collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10)
-c, --config string config path default /etc/taos/taosadapter.toml
--cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true)
--cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials"
--cors.allowHeaders stringArray cors allow HEADERS. Env "TAOS_ADAPTER_ALLOW_HEADERS"
--cors.allowOrigins stringArray cors allow origins. Env "TAOS_ADAPTER_ALLOW_ORIGINS"
--cors.allowWebSockets cors allow WebSockets. Env "TAOS_ADAPTER_CORS_ALLOW_WebSockets"
--cors.exposeHeaders stringArray cors expose headers. Env "TAOS_ADAPTER_Expose_Headers"
--debug enable debug mode. Env "TAOS_ADAPTER_DEBUG" (default true)
--help Print this help message and exit
--httpCodeServerError Use a non-200 http status code when taosd returns an error. Env "TAOS_ADAPTER_HTTP_CODE_SERVER_ERROR"
--influxdb.enable enable influxdb. Env "TAOS_ADAPTER_INFLUXDB_ENABLE" (default true)
--log.enableRecordHttpSql whether to record http sql. Env "TAOS_ADAPTER_LOG_ENABLE_RECORD_HTTP_SQL"
--log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos")
--log.rotationCount uint log rotation count. Env "TAOS_ADAPTER_LOG_ROTATION_COUNT" (default 30)
--log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_ROTATION_SIZE" (default "1GB")
--log.rotationTime duration log rotation time. Env "TAOS_ADAPTER_LOG_ROTATION_TIME" (default 24h0m0s)
--log.sqlRotationCount uint record sql log rotation count. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_COUNT" (default 2)
--log.sqlRotationSize string record sql log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_SIZE" (default "1GB")
--log.sqlRotationTime duration record sql log rotation time. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_TIME" (default 24h0m0s)
--logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info")
--monitor.collectDuration duration Set monitor duration. Env "TAOS_ADAPTER_MONITOR_COLLECT_DURATION" (default 3s)
--monitor.disable Whether to disable monitoring. Env "TAOS_ADAPTER_MONITOR_DISABLE"
--monitor.disableCollectClientIP Whether to disable collecting clientIP. Env "TAOS_ADAPTER_MONITOR_DISABLE_COLLECT_CLIENT_IP"
--monitor.identity string The identity of the current instance, or 'hostname:port' if it is empty. Env "TAOS_ADAPTER_MONITOR_IDENTITY"
--monitor.incgroup Whether running in cgroup. Env "TAOS_ADAPTER_MONITOR_INCGROUP"
--monitor.password string TDengine password. Env "TAOS_ADAPTER_MONITOR_PASSWORD" (default "taosdata")
--monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_ADAPTER_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80)
--monitor.pauseQueryMemoryThreshold float Memory percentage threshold for pause query. Env "TAOS_ADAPTER_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default 70)
--monitor.user string TDengine user. Env "TAOS_ADAPTER_MONITOR_USER" (default "root")
--monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_ADAPTER_MONITOR_WRITE_INTERVAL" (default 30s)
--monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_ADAPTER_MONITOR_WRITE_TO_TD"
--node_exporter.caCertFile string node_exporter ca cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CA_CERT_FILE"
--node_exporter.certFile string node_exporter cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CERT_FILE"
--node_exporter.db string node_exporter db name. Env "TAOS_ADAPTER_NODE_EXPORTER_DB" (default "node_exporter")
--node_exporter.enable enable node_exporter. Env "TAOS_ADAPTER_NODE_EXPORTER_ENABLE"
--node_exporter.gatherDuration duration node_exporter gather duration. Env "TAOS_ADAPTER_NODE_EXPORTER_GATHER_DURATION" (default 5s)
--node_exporter.httpBearerTokenString string node_exporter http bearer token. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_BEARER_TOKEN_STRING"
--node_exporter.httpPassword string node_exporter http password. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_PASSWORD"
--node_exporter.httpUsername string node_exporter http username. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_USERNAME"
--node_exporter.insecureSkipVerify node_exporter skip ssl check. Env "TAOS_ADAPTER_NODE_EXPORTER_INSECURE_SKIP_VERIFY" (default true)
--node_exporter.keyFile string node_exporter cert key file path. Env "TAOS_ADAPTER_NODE_EXPORTER_KEY_FILE"
--node_exporter.password string node_exporter password. Env "TAOS_ADAPTER_NODE_EXPORTER_PASSWORD" (default "taosdata")
--node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s)
--node_exporter.ttl int node_exporter data ttl. Env "TAOS_ADAPTER_NODE_EXPORTER_TTL"
--node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100])
--node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root")
--opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true)
--opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1)
--opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb])
--opentsdb_telnet.enable enable opentsdb telnet,warning: without auth info(default false). Env "TAOS_ADAPTER_OPENTSDB_TELNET_ENABLE"
--opentsdb_telnet.flushInterval duration opentsdb_telnet flush interval (0s means not valid) . Env "TAOS_ADAPTER_OPENTSDB_TELNET_FLUSH_INTERVAL"
--opentsdb_telnet.maxTCPConnections int max tcp connections. Env "TAOS_ADAPTER_OPENTSDB_TELNET_MAX_TCP_CONNECTIONS" (default 250)
--opentsdb_telnet.password string opentsdb_telnet password. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PASSWORD" (default "taosdata")
--opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
--opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE"
--opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL"
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
--pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT"
--pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT"
--pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE"
-P, --port int http port. Env "TAOS_ADAPTER_PORT" (default 6041)
--prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true)
--restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1)
--statsd.allowPendingMessages int statsd allow pending messages. Env "TAOS_ADAPTER_STATSD_ALLOW_PENDING_MESSAGES" (default 50000)
--statsd.db string statsd db name. Env "TAOS_ADAPTER_STATSD_DB" (default "statsd")
--statsd.deleteCounters statsd delete counter cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_COUNTERS" (default true)
--statsd.deleteGauges statsd delete gauge cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_GAUGES" (default true)
--statsd.deleteSets statsd delete set cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_SETS" (default true)
--statsd.deleteTimings statsd delete timing cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_TIMINGS" (default true)
--statsd.enable enable statsd. Env "TAOS_ADAPTER_STATSD_ENABLE" (default true)
--statsd.gatherInterval duration statsd gather interval. Env "TAOS_ADAPTER_STATSD_GATHER_INTERVAL" (default 5s)
--statsd.maxTCPConnections int statsd max tcp connections. Env "TAOS_ADAPTER_STATSD_MAX_TCP_CONNECTIONS" (default 250)
--statsd.password string statsd password. Env "TAOS_ADAPTER_STATSD_PASSWORD" (default "taosdata")
--statsd.port int statsd server port. Env "TAOS_ADAPTER_STATSD_PORT" (default 6044)
--statsd.protocol string statsd protocol [tcp or udp]. Env "TAOS_ADAPTER_STATSD_PROTOCOL" (default "udp")
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE"
--statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL"
--statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root")
--statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10)
--taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE"
--version Print the version and exit
--collectd.db string collectd db name. Env "TAOS_ADAPTER_COLLECTD_DB" (default "collectd")
--collectd.enable enable collectd. Env "TAOS_ADAPTER_COLLECTD_ENABLE" (default true)
--collectd.password string collectd password. Env "TAOS_ADAPTER_COLLECTD_PASSWORD" (default "taosdata")
--collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045)
--collectd.ttl int collectd data ttl. Env "TAOS_ADAPTER_COLLECTD_TTL"
--collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root")
--collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10)
-c, --config string config path default /etc/taos/taosadapter.toml
--cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true)
--cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials"
--cors.allowHeaders stringArray cors allow HEADERS. Env "TAOS_ADAPTER_ALLOW_HEADERS"
--cors.allowOrigins stringArray cors allow origins. Env "TAOS_ADAPTER_ALLOW_ORIGINS"
--cors.allowWebSockets cors allow WebSockets. Env "TAOS_ADAPTER_CORS_ALLOW_WebSockets" --cors.exposeHeaders stringArray cors expose headers. Env "TAOS_ADAPTER_Expose_Headers"
--debug enable debug mode. Env "TAOS_ADAPTER_DEBUG" (default true)
--help Print this help message and exit
--httpCodeServerError Use a non-200 http status code when server returns an error. Env "TAOS_ADAPTER_HTTP_CODE_SERVER_ERROR"
--influxdb.enable enable influxdb. Env "TAOS_ADAPTER_INFLUXDB_ENABLE" (default true)
--log.enableRecordHttpSql whether to record http sql. Env "TAOS_ADAPTER_LOG_ENABLE_RECORD_HTTP_SQL"
--log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos") --log.rotationCount uint log rotation count. Env "TAOS_ADAPTER_LOG_ROTATION_COUNT" (default 30)
--log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_ROTATION_SIZE" (default "1GB")
--log.rotationTime duration log rotation time. Env "TAOS_ADAPTER_LOG_ROTATION_TIME" (default 24h0m0s)
--log.sqlRotationCount uint record sql log rotation count. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_COUNT" (default 2)
--log.sqlRotationSize string record sql log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_SIZE" (default "1GB")
--log.sqlRotationTime duration record sql log rotation time. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_TIME" (default 24h0m0s)
--logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info")
--monitor.collectDuration duration Set monitor duration. Env "TAOS_ADAPTER_MONITOR_COLLECT_DURATION" (default 3s)
--monitor.disable Whether to disable monitoring. Env "TAOS_ADAPTER_MONITOR_DISABLE"
--monitor.disableCollectClientIP Whether to disable collecting clientIP. Env "TAOS_ADAPTER_MONITOR_DISABLE_COLLECT_CLIENT_IP"
--monitor.identity string The identity of the current instance, or 'hostname:port' if it is empty. Env "TAOS_ADAPTER_MONITOR_IDENTITY"
--monitor.incgroup Whether running in cgroup. Env "TAOS_ADAPTER_MONITOR_INCGROUP"
--monitor.password string TDengine password. Env "TAOS_ADAPTER_MONITOR_PASSWORD" (default "taosdata")
--monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_ADAPTER_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80)
--monitor.pauseQueryMemoryThreshold float Memory percentage threshold for pause query. Env "TAOS_ADAPTER_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default 70)
--monitor.user string TDengine user. Env "TAOS_ADAPTER_MONITOR_USER" (default "root") --monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_ADAPTER_MONITOR_WRITE_INTERVAL" (default 30s)
--monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_ADAPTER_MONITOR_WRITE_TO_TD"
--node_exporter.caCertFile string node_exporter ca cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CA_CERT_FILE"
--node_exporter.certFile string node_exporter cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CERT_FILE"
--node_exporter.db string node_exporter db name. Env "TAOS_ADAPTER_NODE_EXPORTER_DB" (default "node_exporter")
--node_exporter.enable enable node_exporter. Env "TAOS_ADAPTER_NODE_EXPORTER_ENABLE"
--node_exporter.gatherDuration duration node_exporter gather duration. Env "TAOS_ADAPTER_NODE_EXPORTER_GATHER_DURATION" (default 5s)
--node_exporter.httpBearerTokenString string node_exporter http bearer token. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_BEARER_TOKEN_STRING"
--node_exporter.httpPassword string node_exporter http password. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_PASSWORD"
--node_exporter.httpUsername string node_exporter http username. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_USERNAME"
--node_exporter.insecureSkipVerify node_exporter skip ssl check. Env "TAOS_ADAPTER_NODE_EXPORTER_INSECURE_SKIP_VERIFY" (default true)
--node_exporter.keyFile string node_exporter cert key file path. Env "TAOS_ADAPTER_NODE_EXPORTER_KEY_FILE"
--node_exporter.password string node_exporter password. Env "TAOS_ADAPTER_NODE_EXPORTER_PASSWORD" (default "taosdata")
--node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s)
--node_exporter.ttl int node_exporter data ttl. Env "TAOS_ADAPTER_NODE_EXPORTER_TTL"
--node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100])
--node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root")
--opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true)
--opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1)
--opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb])
--opentsdb_telnet.enable enable opentsdb telnet,warning: without auth info(default false). Env "TAOS_ADAPTER_OPENTSDB_TELNET_ENABLE"
--opentsdb_telnet.flushInterval duration opentsdb_telnet flush interval (0s means not valid) . Env "TAOS_ADAPTER_OPENTSDB_TELNET_FLUSH_INTERVAL"
--opentsdb_telnet.maxTCPConnections int max tcp connections. Env "TAOS_ADAPTER_OPENTSDB_TELNET_MAX_TCP_CONNECTIONS" (default 250)
--opentsdb_telnet.password string opentsdb_telnet password. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PASSWORD" (default "taosdata")
--opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
--opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE"
--opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL"
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
--pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT"
--pool.maxConnect int max connections to server. Env "TAOS_ADAPTER_POOL_MAX_CONNECT"
--pool.maxIdle int max idle connections to server. Env "TAOS_ADAPTER_POOL_MAX_IDLE"
-P, --port int http port. Env "TAOS_ADAPTER_PORT" (default 6041)
--prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true)
--restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1)
--smlAutoCreateDB Whether to automatically create db when writing with schemaless. Env "TAOS_ADAPTER_SML_AUTO_CREATE_DB"
--statsd.allowPendingMessages int statsd allow pending messages. Env "TAOS_ADAPTER_STATSD_ALLOW_PENDING_MESSAGES" (default 50000)
--statsd.db string statsd db name. Env "TAOS_ADAPTER_STATSD_DB" (default "statsd") --statsd.deleteCounters statsd delete counter cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_COUNTERS" (default true)
--statsd.deleteGauges statsd delete gauge cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_GAUGES" (default true)
--statsd.deleteSets statsd delete set cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_SETS" (default true)
--statsd.deleteTimings statsd delete timing cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_TIMINGS" (default true)
--statsd.enable enable statsd. Env "TAOS_ADAPTER_STATSD_ENABLE" (default true)
--statsd.gatherInterval duration statsd gather interval. Env "TAOS_ADAPTER_STATSD_GATHER_INTERVAL" (default 5s)
--statsd.maxTCPConnections int statsd max tcp connections. Env "TAOS_ADAPTER_STATSD_MAX_TCP_CONNECTIONS" (default 250)
--statsd.password string statsd password. Env "TAOS_ADAPTER_STATSD_PASSWORD" (default "taosdata")
--statsd.port int statsd server port. Env "TAOS_ADAPTER_STATSD_PORT" (default 6044)
--statsd.protocol string statsd protocol [tcp or udp]. Env "TAOS_ADAPTER_STATSD_PROTOCOL" (default "udp")
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE" --statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL"
--statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root")
--statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10)
--taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE"
--tmq.releaseIntervalMultiplierForAutocommit int When set to autocommit, the interval for message release is a multiple of the autocommit interval, with a default value of 2 and a minimum value of 1 and a maximum value of 10. Env "TAOS_ADAPTER_TMQ_RELEASE_INTERVAL_MULTIPLIER_FOR_AUTOCOMMIT" (default 2)
--version Print the version and exit
```
备注:
@ -331,6 +328,10 @@ taosAdapter 通过参数 `restfulRowLimit` 来控制结果的返回条数,-1
taosAdapter 通过参数 `httpCodeServerError` 来设置当 C 接口返回错误时是否返回非 200 的 http 状态码。当设置为 true 时将根据 C 返回的错误码返回不同 http 状态码。具体见 [HTTP 响应码](../../connector/rest-api/#http-响应码)。
## 配置 schemaless 写入是否自动创建 DB
taosAdapter 从 3.0.4.0 版本开始,提供参数 `smlAutoCreateDB` 来控制在 schemaless 协议写入时是否自动创建 DB。默认值为 false 不自动创建 DB需要用户手动创建 DB 后进行 schemaless 写入。
## 故障解决
您可以通过命令 `systemctl status taosadapter` 来检查 taosAdapter 运行状态。

View File

@ -626,6 +626,15 @@ charset 的有效值是 UTF-8。
| 缺省值 | 1 |
| 补充说明 | 不同的启动方式,生成 core 文件的目录如下1、systemctl start taosd 启动:生成的 core 在根目录下 <br/> 2、手动启动就在 taosd 执行目录下。 |
### enableScience
| 属性 | 说明 |
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
| 适用范围 | 仅客户端 TAOS-CLI 适用 |
| 含义 | 是否开启科学计数法显示浮点数 |
| 取值范围 | 01是 |
| 缺省值 | 0 |
### udf
| 属性 | 说明 |

View File

@ -38,3 +38,303 @@ chmod +x TDinsight.sh
运行程序并重启 Grafana 服务,打开面板:`http://localhost:3000/d/tdinsight`。
更多使用场景和限制请参考[TDinsight](/reference/tdinsight/) 文档。
## log 库
TDinsight dashboard 数据来源于 log 库存放监控数据的默认db可以在 taoskeeper 配置文件中修改,具体参考 [taoskeeper 文档](/reference/taosKeeper)。taoskeeper 启动后会自动创建 log 库,并将监控数据写入到该数据库中。
### cluster\_info 表
`cluster_info` 表记录集群信息。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|first\_ep|VARCHAR||集群 first ep|
|first\_ep\_dnode\_id|INT||集群 first ep 的 dnode id|
|version|VARCHAR||tdengine version。例如3.0.4.0|
|master\_uptime|FLOAT||当前 master 节点的uptime。单位天|
|monitor_interval|INT||monitor interval。单位秒|
|dbs\_total|INT||database 总数|
|tbs\_total|BIGINT||当前集群 table 总数|
|stbs\_total|INT||当前集群 stable 总数|
|dnodes\_total|INT||当前集群 dnode 总数|
|dnodes\_alive|INT||当前集群 dnode 存活总数|
|mnodes\_total|INT||当前集群 mnode 总数|
|mnodes\_alive|INT||当前集群 mnode 存活总数|
|vgroups\_total|INT||当前集群 vgroup 总数|
|vgroups\_alive|INT||当前集群 vgroup 存活总数|
|vnodes\_total|INT||当前集群 vnode 总数|
|vnodes\_alive|INT||当前集群 vnode 存活总数|
|connections\_total|INT||当前集群连接总数|
|topics\_total|INT||当前集群 topic 总数|
|streams\_total|INT||当前集群 stream 总数|
|protocol|INT||协议版本,目前为 1|
|cluster\_id|NCHAR|TAG|cluster id|
### d\_info 表
`d_info` 表记录 dnode 状态信息。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|status|VARCHAR||dnode 状态|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### m\_info 表
`m_info` 表记录 mnode 角色信息。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|role|VARCHAR||mnode 角色, leader 或 follower|
|mnode\_id|INT|TAG|master node id|
|mnode\_ep|NCHAR|TAG|master node endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### dnodes\_info 表
`dnodes_info` 记录 dnode 信息。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|uptime|FLOAT||dnode uptime|
|cpu\_engine|FLOAT||taosd cpu 使用率,从 `/proc/<taosd_pid>/stat` 读取|
|cpu\_system|FLOAT||服务器 cpu 使用率,从 `/proc/stat` 读取|
|cpu\_cores|FLOAT||服务器 cpu 核数|
|mem\_engine|INT||taosd 内存使用率,从 `/proc/<taosd_pid>/status` 读取|
|mem\_system|INT||服务器内存使用率|
|mem\_total|INT||服务器内存总量,单位 KB|
|disk\_engine|INT|||
|disk\_used|BIGINT||data dir 挂载的磁盘使用量,单位 bytes|
|disk\_total|BIGINT||data dir 挂载的磁盘总容量,单位 bytes|
|net\_in|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 received bytes。单位 bytes per second|
|net\_out|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 transmit bytes。单位 bytes per second|
|io\_read|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 rchar 与上次数值计算之后,计算得到速度。单位 bytes per second|
|io\_write|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 wchar 与上次数值计算之后,计算得到速度。单位 bytes per second|
|io\_read\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 read_bytes。单位 bytes per second|
|io\_write\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 write_bytes。单位 bytes per second|
|req\_select|INT||两个间隔内发生的查询请求数目|
|req\_select\_rate|FLOAT||两个间隔内的查询请求速度 = `req_select / monitorInterval`|
|req\_insert|INT||两个间隔内发生的写入请求,包含的单条数据数目|
|req\_insert\_success|INT||两个间隔内发生的处理成功的写入请求,包含的单条数据数目|
|req\_insert\_rate|FLOAT||两个间隔内的单条数据写入速度 = `req_insert / monitorInterval`|
|req\_insert\_batch|INT||两个间隔内发生的写入请求数目|
|req\_insert\_batch\_success|INT||两个间隔内发生的成功的批量写入请求数目|
|req\_insert\_batch\_rate|FLOAT||两个间隔内的写入请求数目的速度 = `req_insert_batch / monitorInterval`|
|errors|INT||两个间隔内的出错的写入请求的数目|
|vnodes\_num|INT||dnode 上 vnodes 数量|
|masters|INT||dnode 上 master node 数量|
|has\_mnode|INT||dnode 是否包含 mnode|
|has\_qnode|INT||dnode 是否包含 qnode|
|has\_snode|INT||dnode 是否包含 snode|
|has\_bnode|INT||dnode 是否包含 bnode|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### data\_dir 表
`data_dir` 表记录 data 目录信息。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|name|NCHAR||data 目录,一般为 `/var/lib/taos`|
|level|INT||0、1、2 多级存储级别|
|avail|BIGINT||data 目录可用空间|
|used|BIGINT||data 目录已使用空间|
|total|BIGINT||data 目录空间|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### log\_dir 表
`log_dir` 表记录 log 目录信息。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|name|NCHAR||log 目录名,一般为 `/var/log/taos/`|
|avail|BIGINT||log 目录可用空间|
|used|BIGINT||log 目录已使用空间|
|total|BIGINT||log 目录空间|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### temp\_dir 表
`temp_dir` 表记录 temp 目录信息。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|name|NCHAR||temp 目录名,一般为 `/tmp/`|
|avail|BIGINT||temp 目录可用空间|
|used|BIGINT||temp 目录已使用空间|
|total|BIGINT||temp 目录空间|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### vgroups\_info 表
`vgroups_info` 表记录虚拟节点组信息。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|vgroup\_id|INT||vgroup id|
|database\_name|VARCHAR||vgroup 所属的 database 名字|
|tables\_num|BIGINT||vgroup 中 table 数量|
|status|VARCHAR||vgroup 状态|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### vnodes\_role 表
`vnodes_role` 表记录虚拟节点角色信息。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|vnode\_role|VARCHAR||vnode 角色leader 或 follower|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### logs 表
`logs` 表记录登录信息。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|level|VARCHAR||log level|
|content|NCHAR||log content长度不超过1024字节|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### log\_summary 表
`log_summary` 记录日志统计信息。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|error|INT||error 总数|
|info|INT||info 总数|
|debug|INT||debug 总数|
|trace|INT||trace 总数|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### grants\_info 表
`grants_info` 记录授权信息。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|expire\_time|BIGINT||认证过期时间,企业版有效,社区版为 bigint 最大值|
|timeseries\_used|BIGINT||已用测点数|
|timeseries\_total|BIGINT||总测点数,开源版本为 bigint 最大值|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### keeper\_monitor 表
`keeper_monitor` 记录 taoskeeper 监控数据。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|cpu|FLOAT||cpu 使用率|
|mem|FLOAT||内存使用率|
|identify|NCHAR|TAG||
### taosadapter\_restful\_http\_request\_total 表
`taosadapter_restful_http_request_total` 记录 taosadapter rest 请求信息,该表为 schemaless 方式创建的表,时间戳字段名为 `_ts`
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|guage|DOUBLE||监控指标值|
|client\_ip|NCHAR|TAG|client ip|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|request\_method|NCHAR|TAG|request method|
|request\_uri|NCHAR|TAG|request uri|
|status\_code|NCHAR|TAG|status code|
### taosadapter\_restful\_http\_request\_fail 表
`taosadapter_restful_http_request_fail` 记录 taosadapter rest 请求失败信息,该表为 schemaless 方式创建的表,时间戳字段名为 `_ts`
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|guage|DOUBLE||监控指标值|
|client\_ip|NCHAR|TAG|client ip|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|request\_method|NCHAR|TAG|request method|
|request\_uri|NCHAR|TAG|request uri|
|status\_code|NCHAR|TAG|status code|
### taosadapter\_restful\_http\_request\_in\_flight 表
`taosadapter_restful_http_request_in_flight` 记录 taosadapter rest 实时请求信息,该表为 schemaless 方式创建的表,时间戳字段名为 `_ts`
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|guage|DOUBLE||监控指标值|
|endpoint|NCHAR|TAG|taosadpater endpoint|
### taosadapter\_restful\_http\_request\_summary\_milliseconds 表
`taosadapter_restful_http_request_summary_milliseconds` 记录 taosadapter rest 请求汇总信息,该表为 schemaless 方式创建的表,时间戳字段名为 `_ts`
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|count|DOUBLE|||
|sum|DOUBLE|||
|0.5|DOUBLE|||
|0.9|DOUBLE|||
|0.99|DOUBLE|||
|0.1|DOUBLE|||
|0.2|DOUBLE|||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|request\_method|NCHAR|TAG|request method|
|request\_uri|NCHAR|TAG|request uri|
### taosadapter\_system\_mem\_percent 表
`taosadapter_system_mem_percent` 表记录 taosadapter 内存使用情况,该表为 schemaless 方式创建的表,时间戳字段名为 `_ts`
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|guage|DOUBLE||监控指标值|
|endpoint|NCHAR|TAG|taosadpater endpoint|
### taosadapter\_system\_cpu\_percent 表
`taosadapter_system_cpu_percent` 表记录 taosadapter cpu 使用情况,该表为 schemaless 方式创建的表,时间戳字段名为 `_ts`
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|guage|DOUBLE||监控指标值|
|endpoint|NCHAR|TAG|taosadpater endpoint|

View File

@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
import Release from "/components/ReleaseV3";
## 3.0.4.0
<Release type="tdengine" version="3.0.4.0" />
## 3.0.3.2
<Release type="tdengine" version="3.0.3.2" />

View File

@ -10,6 +10,10 @@ taosTools 各版本安装包下载链接如下:
import Release from "/components/ReleaseV3";
## 2.4.12
<Release type="tools" version="2.4.12" />
## 2.4.11
<Release type="tools" version="2.4.11" />

View File

@ -112,6 +112,7 @@ extern int32_t tsQueryNodeChunkSize;
extern bool tsQueryUseNodeAllocator;
extern bool tsKeepColumnName;
extern bool tsEnableQueryHb;
extern bool tsEnableScience;
extern int32_t tsRedirectPeriod;
extern int32_t tsRedirectFactor;
extern int32_t tsRedirectMaxPeriod;

View File

@ -26,6 +26,7 @@ extern "C" {
typedef void* qTaskInfo_t;
typedef void* DataSinkHandle;
struct SRpcMsg;
struct SSubplan;
@ -91,6 +92,9 @@ void qSetTaskId(qTaskInfo_t tinfo, uint64_t taskId, uint64_t queryId);
int32_t qSetStreamOpOpen(qTaskInfo_t tinfo);
// todo refactor
void qGetCheckpointVersion(qTaskInfo_t tinfo, int64_t* dataVer, int64_t* ckId);
/**
* Set multiple input data blocks for the stream scan.
* @param tinfo
@ -119,7 +123,7 @@ int32_t qSetSMAInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks,
* @param isAdd
* @return
*/
int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bool isAdd);
int32_t qUpdateTableListForStreamScanner(qTaskInfo_t tinfo, const SArray* tableIdList, bool isAdd);
/**
* Create the exec task object according to task json
@ -163,6 +167,7 @@ void qCleanExecTaskBlockBuf(qTaskInfo_t tinfo);
* @return
*/
int32_t qAsyncKillTask(qTaskInfo_t tinfo, int32_t rspCode);
int32_t qKillTask(qTaskInfo_t tinfo, int32_t rspCode);
bool qTaskIsExecuting(qTaskInfo_t qinfo);
@ -182,21 +187,11 @@ int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len);
int32_t qDeserializeTaskStatus(qTaskInfo_t tinfo, const char* pInput, int32_t len);
STimeWindow getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t key);
/**
* return the scan info, in the form of tuple of two items, including table uid and current timestamp
* @param tinfo
* @param uid
* @param ts
* @return
*/
int32_t qGetStreamScanStatus(qTaskInfo_t tinfo, uint64_t* uid, int64_t* ts);
int32_t qStreamPrepareTsdbScan(qTaskInfo_t tinfo, uint64_t uid, int64_t ts);
SArray* qGetQueriedTableListInfo(qTaskInfo_t tinfo);
int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subType);
// int32_t qStreamScanMemData(qTaskInfo_t tinfo, const SSubmitReq* pReq, int64_t ver);
//
int32_t qStreamSetScanMemData(qTaskInfo_t tinfo, SPackedData submit);
void qStreamSetOpen(qTaskInfo_t tinfo);

View File

@ -189,7 +189,7 @@ bool fmIsScalarFunc(int32_t funcId);
bool fmIsVectorFunc(int32_t funcId);
bool fmIsIndefiniteRowsFunc(int32_t funcId);
bool fmIsStringFunc(int32_t funcId);
bool fmIsDatetimeFunc(int32_t funcId);
bool fmIsDateTimeFunc(int32_t funcId);
bool fmIsSelectFunc(int32_t funcId);
bool fmIsTimelineFunc(int32_t funcId);
bool fmIsTimeorderFunc(int32_t funcId);

View File

@ -165,6 +165,8 @@ static FORCE_INLINE int32_t udfColEnsureCapacity(SUdfColumn *pColumn, int32_t ne
if (tmp == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
uint32_t extend = BitmapLen(allocCapacity) - BitmapLen(data->rowsAlloc);
memset(tmp + BitmapLen(data->rowsAlloc), 0, extend);
data->fixLenCol.nullBitmap = tmp;
data->fixLenCol.nullBitmapLen = BitmapLen(allocCapacity);
int32_t oldLen = BitmapLen(existedRows);

View File

@ -194,6 +194,7 @@ typedef struct SRequestConnInfo {
typedef void (*__freeFunc)(void* param);
// todo add creator/destroyer function
typedef struct SMsgSendInfo {
__async_send_cb_fn_t fp; // async callback function
STargetInfo target; // for update epset

View File

@ -42,6 +42,7 @@ typedef struct STdbState {
typedef struct {
STdbState* pTdbState;
int32_t number;
int64_t checkPointId;
} SStreamState;
SStreamState* streamStateOpen(char* path, struct SStreamTask* pTask, bool specPath, int32_t szPage, int32_t pages);

View File

@ -13,8 +13,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "executor.h"
#include "os.h"
#include "executor.h"
#include "query.h"
#include "streamState.h"
#include "tdatablock.h"
@ -50,6 +50,7 @@ enum {
TASK_STATUS__RECOVER_PREPARE,
TASK_STATUS__RECOVER1,
TASK_STATUS__RECOVER2,
TASK_STATUS__RESTORE, // only available for source task to replay WAL from the checkpoint
};
enum {
@ -103,21 +104,8 @@ typedef struct {
int8_t type;
} SStreamQueueItem;
#if 0
typedef struct {
int8_t type;
int64_t ver;
int32_t* dataRef;
SSubmitReq* data;
} SStreamDataSubmit;
typedef struct {
int8_t type;
int64_t ver;
SArray* dataRefs; // SArray<int32_t*>
SArray* reqs; // SArray<SSubmitReq*>
} SStreamMergedSubmit;
#endif
typedef void FTbSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data);
typedef int32_t FTaskExpand(void* ahandle, SStreamTask* pTask, int64_t ver);
typedef struct {
int8_t type;
@ -219,21 +207,20 @@ static FORCE_INLINE void streamQueueProcessFail(SStreamQueue* queue) {
}
static FORCE_INLINE void* streamQueueCurItem(SStreamQueue* queue) {
//
return queue->qItem;
}
void* streamQueueNextItem(SStreamQueue* queue);
SStreamDataSubmit2* streamDataSubmitNew(SPackedData submit);
SStreamDataSubmit2* streamDataSubmitNew(SPackedData submit, int32_t type);
void streamDataSubmitDestroy(SStreamDataSubmit2* pDataSubmit);
SStreamDataSubmit2* streamSubmitBlockClone(SStreamDataSubmit2* pSubmit);
typedef struct {
char* qmsg;
// followings are not applicable to encoder and decoder
void* executor;
char* qmsg;
void* pExecutor; // not applicable to encoder and decoder
struct SWalReader* pWalReader; // not applicable to encoder and decoder
} STaskExec;
typedef struct {
@ -248,16 +235,13 @@ typedef struct {
SUseDbRsp dbInfo;
} STaskDispatcherShuffle;
typedef void FTbSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data);
typedef struct {
int64_t stbUid;
char stbFullName[TSDB_TABLE_FNAME_LEN];
SSchemaWrapper* pSchemaWrapper;
// not applicable to encoder and decoder
void* vnode;
FTbSink* tbSinkFunc;
STSchema* pTSchema;
void* vnode; // not available to encoder and decoder
FTbSink* tbSinkFunc;
STSchema* pTSchema;
} STaskSinkTb;
typedef void FSmaSink(void* vnode, int64_t smaId, const SArray* data);
@ -280,24 +264,34 @@ typedef struct {
SEpSet epSet;
} SStreamChildEpInfo;
struct SStreamTask {
int64_t streamId;
int32_t taskId;
int32_t totalLevel;
int8_t taskLevel;
int8_t outputType;
int16_t dispatchMsgType;
typedef struct SStreamId {
int64_t streamId;
int32_t taskId;
const char* idStr;
} SStreamId;
typedef struct SCheckpointInfo {
int64_t id;
int64_t version; // offset in WAL
} SCheckpointInfo;
typedef struct SStreamStatus {
int8_t taskStatus;
int8_t schedStatus;
} SStreamStatus;
// node info
int32_t selfChildId;
int32_t nodeId;
SEpSet epSet;
int64_t recoverSnapVer;
int64_t startVer;
struct SStreamTask {
SStreamId id;
int32_t totalLevel;
int8_t taskLevel;
int8_t outputType;
int16_t dispatchMsgType;
SStreamStatus status;
int32_t selfChildId;
int32_t nodeId;
SEpSet epSet;
SCheckpointInfo chkInfo;
STaskExec exec;
// fill history
int8_t fillHistory;
@ -307,9 +301,6 @@ struct SStreamTask {
int32_t nextCheckId;
SArray* checkpointInfo; // SArray<SStreamCheckpointInfo>
// exec
STaskExec exec;
// output
union {
STaskDispatcherFixedEp fixedEpDispatcher;
@ -319,44 +310,54 @@ struct SStreamTask {
STaskSinkFetch fetchSink;
};
int8_t inputStatus;
int8_t outputStatus;
// STaosQueue* inputQueue1;
// STaosQall* inputQall;
int8_t inputStatus;
int8_t outputStatus;
SStreamQueue* inputQueue;
SStreamQueue* outputQueue;
// trigger
int8_t triggerStatus;
int64_t triggerParam;
void* timer;
int8_t triggerStatus;
int64_t triggerParam;
void* timer;
SMsgCb* pMsgCb; // msg handle
SStreamState* pState; // state backend
// msg handle
SMsgCb* pMsgCb;
// state backend
SStreamState* pState;
// do not serialize
int32_t recoverTryingDownstream;
int32_t recoverWaitingUpstream;
int64_t checkReqId;
SArray* checkReqIds; // shuffle
int32_t refCnt;
int64_t checkpointingId;
int32_t checkpointAlignCnt;
// the followings attributes don't be serialized
int32_t recoverTryingDownstream;
int32_t recoverWaitingUpstream;
int64_t checkReqId;
SArray* checkReqIds; // shuffle
int32_t refCnt;
int64_t checkpointingId;
int32_t checkpointAlignCnt;
struct SStreamMeta* pMeta;
};
// meta
typedef struct SStreamMeta {
char* path;
TDB* db;
TTB* pTaskDb;
TTB* pCheckpointDb;
SHashObj* pTasks;
void* ahandle;
TXN* txn;
FTaskExpand* expandFunc;
int32_t vgId;
SRWLatch lock;
int8_t walScan;
bool quit;
} SStreamMeta;
int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo);
int32_t tDecodeStreamEpInfo(SDecoder* pDecoder, SStreamChildEpInfo* pInfo);
SStreamTask* tNewSStreamTask(int64_t streamId);
int32_t tEncodeSStreamTask(SEncoder* pEncoder, const SStreamTask* pTask);
int32_t tDecodeSStreamTask(SDecoder* pDecoder, SStreamTask* pTask);
void tFreeSStreamTask(SStreamTask* pTask);
int32_t tAppendDataForStream(SStreamTask* pTask, SStreamQueueItem* pItem);
SStreamTask* tNewStreamTask(int64_t streamId);
int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask);
int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask);
void tFreeStreamTask(SStreamTask* pTask);
int32_t tAppendDataToInputQueue(SStreamTask* pTask, SStreamQueueItem* pItem);
bool tInputQueueIsFull(const SStreamTask* pTask);
static FORCE_INLINE void streamTaskInputFail(SStreamTask* pTask) {
atomic_store_8(&pTask->inputStatus, TASK_INPUT_STATUS__FAILED);
@ -564,40 +565,22 @@ int32_t streamAggRecoverPrepare(SStreamTask* pTask);
// int32_t streamAggChildrenRecoverFinish(SStreamTask* pTask);
int32_t streamProcessRecoverFinishReq(SStreamTask* pTask, int32_t childId);
// expand and deploy
typedef int32_t FTaskExpand(void* ahandle, SStreamTask* pTask, int64_t ver);
// meta
typedef struct SStreamMeta {
char* path;
TDB* db;
TTB* pTaskDb;
TTB* pCheckpointDb;
SHashObj* pTasks;
SHashObj* pRecoverStatus;
void* ahandle;
TXN* txn;
FTaskExpand* expandFunc;
int32_t vgId;
SRWLatch lock;
} SStreamMeta;
SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId);
void streamMetaClose(SStreamMeta* streamMeta);
int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask);
int32_t streamMetaAddTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask);
int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t startVer, char* msg, int32_t msgLen);
// SStreamTask* streamMetaGetTask(SStreamMeta* pMeta, int32_t taskId);
int32_t streamMetaAddDeployedTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask);
int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t checkpointVer, char* msg, int32_t msgLen);
int32_t streamMetaGetNumOfTasks(const SStreamMeta* pMeta);
SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int32_t taskId);
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
void streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId);
int32_t streamMetaBegin(SStreamMeta* pMeta);
int32_t streamMetaCommit(SStreamMeta* pMeta);
int32_t streamMetaRollBack(SStreamMeta* pMeta);
int32_t streamLoadTasks(SStreamMeta* pMeta, int64_t ver);
int32_t streamMetaBegin(SStreamMeta* pMeta);
int32_t streamMetaCommit(SStreamMeta* pMeta);
int32_t streamMetaRollBack(SStreamMeta* pMeta);
int32_t streamLoadTasks(SStreamMeta* pMeta, int64_t ver);
// checkpoint
int32_t streamProcessCheckpointSourceReq(SStreamMeta* pMeta, SStreamTask* pTask, SStreamCheckpointSourceReq* pReq);

View File

@ -138,7 +138,8 @@ typedef struct {
int8_t enableRef;
} SWalFilterCond;
typedef struct {
// todo hide this struct
typedef struct SWalReader {
SWal *pWal;
int64_t readerId;
TdFilePtr pLogFile;
@ -196,6 +197,7 @@ void walReadReset(SWalReader *pReader);
int32_t walReadVer(SWalReader *pRead, int64_t ver);
int32_t walReadSeekVer(SWalReader *pRead, int64_t ver);
int32_t walNextValidMsg(SWalReader *pRead);
int64_t walReaderGetCurrentVer(const SWalReader* pReader);
// only for tq usage
void walSetReaderCapacity(SWalReader *pRead, int32_t capacity);

View File

@ -1,7 +1,7 @@
#!/usr/bin/expect
set packageName [lindex $argv 0]
set packageSuffix [lindex $argv 1]
set timeout 3
set timeout 30
if { ${packageSuffix} == "deb" } {
spawn dpkg -i ${packageName}
} elseif { ${packageSuffix} == "rpm"} {

View File

@ -12,7 +12,7 @@ ENV TINI_VERSION v0.19.0
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${cpuType} /tini
ENV DEBIAN_FRONTEND=noninteractive
WORKDIR /root/
RUN tar -zxf ${pkgFile} && cd /root/${dirName}/ && /bin/bash install.sh -e no && cd /root && rm /root/${pkgFile} && rm -rf /root/${dirName} && apt-get update && apt-get install -y locales tzdata netcat && locale-gen en_US.UTF-8 && apt-get clean && rm -rf /var/lib/apt/lists/ && chmod +x /tini
RUN tar -zxf ${pkgFile} && cd /root/${dirName}/ && /bin/bash install.sh -e no && cd /root && rm /root/${pkgFile} && rm -rf /root/${dirName} && apt-get update && apt-get install -y locales tzdata netcat curl gdb vim tmux less net-tools valgrind && locale-gen en_US.UTF-8 && apt-get clean && rm -rf /var/lib/apt/lists/ && chmod +x /tini
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \
LC_CTYPE=en_US.UTF-8 \

View File

@ -246,7 +246,7 @@ if [ ! -f debRpmAutoInstall.sh ];then
echo '#!/usr/bin/expect ' > debRpmAutoInstall.sh
echo 'set packageName [lindex $argv 0]' >> debRpmAutoInstall.sh
echo 'set packageSuffix [lindex $argv 1]' >> debRpmAutoInstall.sh
echo 'set timeout 3 ' >> debRpmAutoInstall.sh
echo 'set timeout 30 ' >> debRpmAutoInstall.sh
echo 'if { ${packageSuffix} == "deb" } {' >> debRpmAutoInstall.sh
echo ' spawn dpkg -i ${packageName} ' >> debRpmAutoInstall.sh
echo '} elseif { ${packageSuffix} == "rpm"} {' >> debRpmAutoInstall.sh

View File

@ -575,11 +575,12 @@ function install_config() {
function install_share_etc() {
[ ! -d ${script_dir}/share/etc ] && return
for c in `ls ${script_dir}/share/etc/`; do
if [ -e /etc/$c ]; then
out=/etc/$c.new.`date +%F`
if [ -e /etc/${clientName2}/$c ]; then
out=/etc/${clientName2}/$c.new.`date +%F`
${csudo}cp -f ${script_dir}/share/etc/$c $out ||:
else
${csudo}cp -f ${script_dir}/share/etc/$c /etc/$c ||:
${csudo}mkdir -p /etc/${clientName2} >/dev/null 2>/dev/null ||:
${csudo}cp -f ${script_dir}/share/etc/$c /etc/${clientName2}/$c ||:
fi
done

View File

@ -1,9 +1,9 @@
TDengine is an open-source, cloud-native time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. With its built-in caching, stream processing, and data subscription capabilities, TDengine offers a simplified solution for time-series data processing.
To configure TDengine : edit /etc/taos/taos.cfg
To start service : launchctl start com.tdengine.taosd
To start Taos Adapter : launchctl start com.tdengine.taosadapter
To access TDengine : use taos in shell
• To configure TDengine, edit /etc/taos/taos.cfg
• To start service, run launchctl start com.tdengine.taosd
• To start Taos Adapter, run launchctl start com.tdengine.taosadapter
• To access TDengine from your local machine, run taos
If you're experiencing problems installing TDengine, check the file /var/log/taos/tdengine_install.log to help troubleshoot the installation.

View File

@ -1,9 +1,9 @@
TDengine is an open-source, cloud-native time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. With its built-in caching, stream processing, and data subscription capabilities, TDengine offers a simplified solution for time-series data processing.
Once it's installed, please take the steps below:
1: open a terminal/shell in Mac
2: if connecting to Cloud Service, follow the instructions on your cloud service account and configure the environment variable
3: if connecting to another TDengine Service, you can also view help information via "taos --help"
4: execute command taos
After the installation process is complete, perform the following steps to start using TDengine:
1: Open Terminal on your Mac.
2: To connect to a TDengine server using the default settings and credentials, run the taos command.
3: To connect to a TDengine server using custom settings or credentials, run taos --help for more information.
4: To connect to TDengine Cloud, follow the instructions on the Tools - TDengine CLI page in your TDengine Cloud account.
If you're experiencing problems installing TDengine, check the file /var/log/taos/tdengine_install.log to help troubleshoot the installation.
If any issues occur during installation, check the /var/log/taos/tdengine_install.log file to troubleshoot.

View File

@ -150,7 +150,7 @@ fi
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/${serverName}.deb
mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/${serverName}.rpm
mkdir -p ${install_dir}/share && cp -rf ${build_dir}/share/{etc,srv} ${install_dir}/share ||:
# mkdir -p ${install_dir}/share && cp -rf ${build_dir}/share/{etc,srv} ${install_dir}/share ||:
if [ $adapterName != "taosadapter" ]; then
mv ${install_dir}/cfg/${clientName2}adapter.toml ${install_dir}/cfg/$adapterName.toml
@ -322,6 +322,7 @@ if [[ $dbName == "taos" ]]; then
mkdir -p ${install_dir}/share/
cp -Rfap ${web_dir}/admin ${install_dir}/share/
cp ${web_dir}/png/taos.png ${install_dir}/share/admin/images/taos.png
cp -rf ${build_dir}/share/{etc,srv} ${install_dir}/share ||:
else
echo "directory not found for enterprise release: ${web_dir}/admin"
fi

View File

@ -36,14 +36,6 @@ extern "C" {
#include "tconfig.h"
#define CHECK_CODE_GOTO(expr, label) \
do { \
code = expr; \
if (TSDB_CODE_SUCCESS != code) { \
goto label; \
} \
} while (0)
#define ERROR_MSG_BUF_DEFAULT_SIZE 512
#define HEARTBEAT_INTERVAL 1500 // ms
@ -286,28 +278,7 @@ static FORCE_INLINE SReqResultInfo* tmqGetCurResInfo(TAOS_RES* res) {
return (SReqResultInfo*)&msg->resInfo;
}
static FORCE_INLINE SReqResultInfo* tmqGetNextResInfo(TAOS_RES* res, bool convertUcs4) {
SMqRspObj* pRspObj = (SMqRspObj*)res;
pRspObj->resIter++;
if (pRspObj->resIter < pRspObj->rsp.blockNum) {
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(pRspObj->rsp.blockData, pRspObj->resIter);
if (pRspObj->rsp.withSchema) {
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(pRspObj->rsp.blockSchema, pRspObj->resIter);
setResSchemaInfo(&pRspObj->resInfo, pSW->pSchema, pSW->nCols);
taosMemoryFreeClear(pRspObj->resInfo.row);
taosMemoryFreeClear(pRspObj->resInfo.pCol);
taosMemoryFreeClear(pRspObj->resInfo.length);
taosMemoryFreeClear(pRspObj->resInfo.convertBuf);
taosMemoryFreeClear(pRspObj->resInfo.convertJson);
}
setQueryResultFromRsp(&pRspObj->resInfo, pRetrieve, convertUcs4, false);
return &pRspObj->resInfo;
}
return NULL;
}
SReqResultInfo* tmqGetNextResInfo(TAOS_RES* res, bool convertUcs4);
static FORCE_INLINE SReqResultInfo* tscGetCurResInfo(TAOS_RES* res) {
if (TD_RES_QUERY(res)) return &(((SRequestObj*)res)->body.resInfo);
@ -320,7 +291,6 @@ extern int32_t clientConnRefPool;
extern int32_t timestampDeltaLimit;
extern int64_t lastClusterId;
__async_send_cb_fn_t getMsgRspHandle(int32_t msgType);
SMsgSendInfo* buildMsgInfoImpl(SRequestObj* pReqObj);
@ -373,7 +343,6 @@ void taos_close_internal(void* taos);
// global, called by mgmt
int hbMgrInit();
void hbMgrCleanUp();
int hbHandleRsp(SClientHbBatchRsp* hbRsp);
// cluster level
SAppHbMgr* appHbMgrInit(SAppInstInfo* pAppInstInfo, char* key);
@ -386,9 +355,6 @@ void stopAllRequests(SHashObj* pRequests);
int hbRegisterConn(SAppHbMgr* pAppHbMgr, int64_t tscRefId, int64_t clusterId, int8_t connType);
void hbDeregisterConn(SAppHbMgr* pAppHbMgr, SClientHbKey connKey);
// --- mq
void hbMgrInitMqHbRspHandle();
typedef struct SSqlCallbackWrapper {
SParseContext* pParseCtx;
SCatalogReq* pCatalogReq;

View File

@ -1039,8 +1039,7 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat
.sysInfo = pRequest->pTscObj->sysInfo,
.allocatorId = pRequest->allocatorRefId};
SAppInstInfo* pAppInfo = getAppInfo(pRequest);
SQueryPlan* pDag = NULL;
SQueryPlan* pDag = NULL;
int64_t st = taosGetTimestampUs();
int32_t code = qCreateQueryPlan(&cxt, &pDag, pMnodeList);
@ -1052,7 +1051,6 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat
}
pRequest->metric.execStart = taosGetTimestampUs();
pRequest->metric.planCostUs = pRequest->metric.execStart - st;
if (TSDB_CODE_SUCCESS == code && !pRequest->validateOnly) {

View File

@ -1259,8 +1259,9 @@ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_schemalessInse
int code = taos_errno(tres);
if (code != TSDB_CODE_SUCCESS) {
jniError("jobj:%p, conn:%p, code: 0x%x, msg:%s", jobj, taos, code, taos_errstr(tres));
jobject jobject = createSchemalessResp(env, 0, code, taos_errstr(tres));
taos_free_result(tres);
return createSchemalessResp(env, 0, code, taos_errstr(tres));
return jobject;
}
taos_free_result(tres);
@ -1286,8 +1287,9 @@ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_schemalessInse
int code = taos_errno(tres);
if (code != TSDB_CODE_SUCCESS) {
jniError("jobj:%p, conn:%p, code: 0x%x, msg:%s", jobj, taos, code, taos_errstr(tres));
jobject jobject = createSchemalessResp(env, 0, code, taos_errstr(tres));
taos_free_result(tres);
return createSchemalessResp(env, 0, code, taos_errstr(tres));
return jobject;
}
taos_free_result(tres);
@ -1315,8 +1317,9 @@ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_schemalessInse
int code = taos_errno(tres);
if (code != TSDB_CODE_SUCCESS) {
jniError("jobj:%p, conn:%p, code: 0x%x, msg:%s", jobj, taos, code, taos_errstr(tres));
jobject jobject = createSchemalessResp(env, 0, code, taos_errstr(tres));
taos_free_result(tres);
return createSchemalessResp(env, 0, code, taos_errstr(tres));
return jobject;
}
taos_free_result(tres);
@ -1343,8 +1346,9 @@ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_schemalessInse
int code = taos_errno(tres);
if (code != TSDB_CODE_SUCCESS) {
jniError("jobj:%p, conn:%p, code: 0x%x, msg:%s", jobj, taos, code, taos_errstr(tres));
jobject jobject = createSchemalessResp(env, 0, code, taos_errstr(tres));
taos_free_result(tres);
return createSchemalessResp(env, 0, code, taos_errstr(tres));
return jobject;
}
taos_free_result(tres);

View File

@ -210,6 +210,11 @@ typedef struct {
tmq_t* pTmq;
} SMqCommitCbParam;
typedef struct SSyncCommitInfo {
tsem_t sem;
int32_t code;
} SSyncCommitInfo;
static int32_t doAskEp(tmq_t* tmq);
static int32_t makeTopicVgroupKey(char* dst, const char* topicName, int32_t vg);
static int32_t tmqCommitDone(SMqCommitCbParamSet* pParamSet);
@ -521,11 +526,7 @@ static int32_t doSendCommitMsg(tmq_t* tmq, SMqClientVg* pVg, const char* pTopicN
return TSDB_CODE_OUT_OF_MEMORY;
}
pMsgSendInfo->msgInfo = (SDataBuf){
.pData = buf,
.len = sizeof(SMsgHead) + len,
.handle = NULL,
};
pMsgSendInfo->msgInfo = (SDataBuf) { .pData = buf, .len = sizeof(SMsgHead) + len, .handle = NULL };
pMsgSendInfo->requestId = generateRequestId();
pMsgSendInfo->requestObjRefId = 0;
@ -786,11 +787,7 @@ void tmqSendHbReq(void* param, void* tmrId) {
goto OVER;
}
sendInfo->msgInfo = (SDataBuf){
.pData = pReq,
.len = tlen,
.handle = NULL,
};
sendInfo->msgInfo = (SDataBuf){ .pData = pReq, .len = tlen, .handle = NULL };
sendInfo->requestId = generateRequestId();
sendInfo->requestObjRefId = 0;
@ -2126,13 +2123,8 @@ void tmq_commit_async(tmq_t* tmq, const TAOS_RES* pRes, tmq_commit_cb* cb, void*
}
}
typedef struct SSyncCommitInfo {
tsem_t sem;
int32_t code;
} SSyncCommitInfo;
static void commitCallBackFn(tmq_t* pTmq, int32_t code, void* param) {
SSyncCommitInfo* pInfo = (SSyncCommitInfo*)param;
static void commitCallBackFn(tmq_t *pTmq, int32_t code, void* param) {
SSyncCommitInfo* pInfo = (SSyncCommitInfo*) param;
pInfo->code = code;
tsem_post(&pInfo->sem);
}
@ -2309,3 +2301,26 @@ void commitRspCountDown(SMqCommitCbParamSet* pParamSet, int64_t consumerId, cons
waitingRspNum);
}
}
SReqResultInfo* tmqGetNextResInfo(TAOS_RES* res, bool convertUcs4) {
SMqRspObj* pRspObj = (SMqRspObj*)res;
pRspObj->resIter++;
if (pRspObj->resIter < pRspObj->rsp.blockNum) {
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(pRspObj->rsp.blockData, pRspObj->resIter);
if (pRspObj->rsp.withSchema) {
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(pRspObj->rsp.blockSchema, pRspObj->resIter);
setResSchemaInfo(&pRspObj->resInfo, pSW->pSchema, pSW->nCols);
taosMemoryFreeClear(pRspObj->resInfo.row);
taosMemoryFreeClear(pRspObj->resInfo.pCol);
taosMemoryFreeClear(pRspObj->resInfo.length);
taosMemoryFreeClear(pRspObj->resInfo.convertBuf);
taosMemoryFreeClear(pRspObj->resInfo.convertJson);
}
setQueryResultFromRsp(&pRspObj->resInfo, pRetrieve, convertUcs4, false);
return &pRspObj->resInfo;
}
return NULL;
}

View File

@ -221,6 +221,7 @@ static void doBitmapMerge(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, c
}
uint8_t* p = (uint8_t*)pSource->nullbitmap;
pColumnInfoData->nullbitmap[BitmapLen(numOfRow1) - 1] &= (0B11111111 << shiftBits); // clear remind bits
pColumnInfoData->nullbitmap[BitmapLen(numOfRow1) - 1] |= (p[0] >> remindBits); // copy remind bits
if (BitmapLen(numOfRow1) == BitmapLen(total)) {
@ -232,6 +233,7 @@ static void doBitmapMerge(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, c
uint8_t* start = (uint8_t*)&pColumnInfoData->nullbitmap[BitmapLen(numOfRow1)];
int32_t overCount = BitmapLen(total) - BitmapLen(numOfRow1);
memset(start, 0, overCount);
while (i < len) { // size limit of pSource->nullbitmap
if (i >= 1) {
start[i - 1] |= (p[i] >> remindBits); // copy remind bits
@ -309,9 +311,11 @@ int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int
pColumnInfoData->pData = tmp;
if (BitmapLen(numOfRow1) < BitmapLen(finalNumOfRows)) {
char* btmp = taosMemoryRealloc(pColumnInfoData->nullbitmap, BitmapLen(finalNumOfRows));
if (btmp == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
uint32_t extend = BitmapLen(finalNumOfRows) - BitmapLen(numOfRow1);
memset(btmp + BitmapLen(numOfRow1), 0, extend);
pColumnInfoData->nullbitmap = btmp;
}

View File

@ -105,6 +105,7 @@ int32_t tsQueryPolicy = 1;
int32_t tsQueryRspPolicy = 0;
int64_t tsQueryMaxConcurrentTables = 200; // unit is TSDB_TABLE_NUM_UNIT
bool tsEnableQueryHb = false;
bool tsEnableScience = false; // on taos-cli show float and doulbe with scientific notation if true
int32_t tsQuerySmaOptimize = 0;
int32_t tsQueryRsmaTolerance = 1000; // the tolerance time (ms) to judge from which level to query rsma data.
bool tsQueryPlannerTrace = false;
@ -117,6 +118,7 @@ int32_t tsRedirectMaxPeriod = 1000;
int32_t tsMaxRetryWaitTime = 10000;
bool tsUseAdapter = false;
/*
* denote if the server needs to compress response message at the application layer to client, including query rsp,
* metricmeta rsp, and multi-meter query rsp message body. The client compress the submit message to server.
@ -328,6 +330,7 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "compressColData", tsCompressColData, -1, 100000000, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "queryPolicy", tsQueryPolicy, 1, 4, 1) != 0) return -1;
if (cfgAddBool(pCfg, "enableQueryHb", tsEnableQueryHb, false) != 0) return -1;
if (cfgAddBool(pCfg, "enableScience", tsEnableScience, false) != 0) return -1;
if (cfgAddInt32(pCfg, "querySmaOptimize", tsQuerySmaOptimize, 0, 1, 1) != 0) return -1;
if (cfgAddBool(pCfg, "queryPlannerTrace", tsQueryPlannerTrace, true) != 0) return -1;
if (cfgAddInt32(pCfg, "queryNodeChunkSize", tsQueryNodeChunkSize, 1024, 128 * 1024, true) != 0) return -1;
@ -730,6 +733,7 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
tsNumOfTaskQueueThreads = cfgGetItem(pCfg, "numOfTaskQueueThreads")->i32;
tsQueryPolicy = cfgGetItem(pCfg, "queryPolicy")->i32;
tsEnableQueryHb = cfgGetItem(pCfg, "enableQueryHb")->bval;
tsEnableScience = cfgGetItem(pCfg, "enableScience")->bval;
tsQuerySmaOptimize = cfgGetItem(pCfg, "querySmaOptimize")->i32;
tsQueryPlannerTrace = cfgGetItem(pCfg, "queryPlannerTrace")->bval;
tsQueryNodeChunkSize = cfgGetItem(pCfg, "queryNodeChunkSize")->i32;

View File

@ -55,6 +55,7 @@ int32_t smOpen(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) {
smClose(pMgmt);
return -1;
}
tmsgReportStartup("snode-impl", "initialized");
if (smStartWorker(pMgmt) != 0) {

View File

@ -15,6 +15,7 @@
#define _DEFAULT_SOURCE
#include "vmInt.h"
#include "vnd.h"
SVnodeObj *vmAcquireVnode(SVnodeMgmt *pMgmt, int32_t vgId) {
SVnodeObj *pVnode = NULL;
@ -78,6 +79,11 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) {
void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal) {
char path[TSDB_FILENAME_LEN] = {0};
bool atExit = true;
if (vnodeIsLeader(pVnode->pImpl)) {
vnodeProposeCommitOnNeed(pVnode->pImpl, atExit);
}
taosThreadRwlockWrlock(&pMgmt->lock);
taosHashRemove(pMgmt->hash, &pVnode->vgId, sizeof(int32_t));

View File

@ -163,8 +163,8 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, pHead->vgId);
if (pVnode == NULL) {
dGError("vgId:%d, msg:%p failed to put into vnode queue since %s, type:%s qtype:%d contLen:%d", pHead->vgId, pMsg,
terrstr(), TMSG_INFO(pMsg->msgType), qtype, pHead->contLen);
dGWarn("vgId:%d, msg:%p failed to put into vnode queue since %s, type:%s qtype:%d contLen:%d", pHead->vgId, pMsg,
terrstr(), TMSG_INFO(pMsg->msgType), qtype, pHead->contLen);
terrno = (terrno != 0) ? terrno : -1;
return terrno;
}

View File

@ -5,6 +5,7 @@ ENDIF ()
IF (TD_ENTERPRISE)
LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/privilege/src/privilege.c)
LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/mnode/src/mndDb.c)
LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/mnode/src/mndVgroup.c)
ENDIF ()
add_library(mnode STATIC ${MNODE_SRC})

View File

@ -67,7 +67,7 @@ int32_t mndGetClusterName(SMnode *pMnode, char *clusterName, int32_t len) {
return 0;
}
static SClusterObj *mndAcquireCluster(SMnode *pMnode) {
static SClusterObj *mndAcquireCluster(SMnode *pMnode, void **ppIter) {
SSdb *pSdb = pMnode->pSdb;
void *pIter = NULL;
@ -76,23 +76,27 @@ static SClusterObj *mndAcquireCluster(SMnode *pMnode) {
pIter = sdbFetch(pSdb, SDB_CLUSTER, pIter, (void **)&pCluster);
if (pIter == NULL) break;
*ppIter = pIter;
return pCluster;
}
return NULL;
}
static void mndReleaseCluster(SMnode *pMnode, SClusterObj *pCluster) {
static void mndReleaseCluster(SMnode *pMnode, SClusterObj *pCluster, void *pIter) {
SSdb *pSdb = pMnode->pSdb;
sdbCancelFetch(pSdb, pIter);
sdbRelease(pSdb, pCluster);
}
int64_t mndGetClusterId(SMnode *pMnode) {
int64_t clusterId = 0;
SClusterObj *pCluster = mndAcquireCluster(pMnode);
void *pIter = NULL;
SClusterObj *pCluster = mndAcquireCluster(pMnode, &pIter);
if (pCluster != NULL) {
clusterId = pCluster->id;
mndReleaseCluster(pMnode, pCluster);
mndReleaseCluster(pMnode, pCluster, pIter);
}
return clusterId;
@ -100,10 +104,11 @@ int64_t mndGetClusterId(SMnode *pMnode) {
int64_t mndGetClusterCreateTime(SMnode *pMnode) {
int64_t createTime = 0;
SClusterObj *pCluster = mndAcquireCluster(pMnode);
void *pIter = NULL;
SClusterObj *pCluster = mndAcquireCluster(pMnode, &pIter);
if (pCluster != NULL) {
createTime = pCluster->createdTime;
mndReleaseCluster(pMnode, pCluster);
mndReleaseCluster(pMnode, pCluster, pIter);
}
return createTime;
@ -121,10 +126,11 @@ static int32_t mndGetClusterUpTimeImp(SClusterObj *pCluster) {
float mndGetClusterUpTime(SMnode *pMnode) {
int64_t upTime = 0;
SClusterObj *pCluster = mndAcquireCluster(pMnode);
void *pIter = NULL;
SClusterObj *pCluster = mndAcquireCluster(pMnode, &pIter);
if (pCluster != NULL) {
upTime = mndGetClusterUpTimeImp(pCluster);
mndReleaseCluster(pMnode, pCluster);
mndReleaseCluster(pMnode, pCluster, pIter);
}
return upTime / 86400.0f;
@ -321,11 +327,12 @@ static void mndCancelGetNextCluster(SMnode *pMnode, void *pIter) {
static int32_t mndProcessUptimeTimer(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
SClusterObj clusterObj = {0};
SClusterObj *pCluster = mndAcquireCluster(pMnode);
void *pIter = NULL;
SClusterObj *pCluster = mndAcquireCluster(pMnode, &pIter);
if (pCluster != NULL) {
memcpy(&clusterObj, pCluster, sizeof(SClusterObj));
clusterObj.upTime += tsUptimeInterval;
mndReleaseCluster(pMnode, pCluster);
mndReleaseCluster(pMnode, pCluster, pIter);
}
if (clusterObj.id <= 0) {

View File

@ -70,7 +70,7 @@ int32_t tEncodeSStreamObj(SEncoder *pEncoder, const SStreamObj *pObj) {
if (tEncodeI32(pEncoder, innerSz) < 0) return -1;
for (int32_t j = 0; j < innerSz; j++) {
SStreamTask *pTask = taosArrayGetP(pArray, j);
if (tEncodeSStreamTask(pEncoder, pTask) < 0) return -1;
if (tEncodeStreamTask(pEncoder, pTask) < 0) return -1;
}
}
@ -130,7 +130,7 @@ int32_t tDecodeSStreamObj(SDecoder *pDecoder, SStreamObj *pObj, int32_t sver) {
taosArrayDestroy(pArray);
return -1;
}
if (tDecodeSStreamTask(pDecoder, pTask) < 0) {
if (tDecodeStreamTask(pDecoder, pTask) < 0) {
taosMemoryFree(pTask);
taosArrayDestroy(pArray);
return -1;
@ -158,7 +158,10 @@ void tFreeStreamObj(SStreamObj *pStream) {
taosMemoryFree(pStream->sql);
taosMemoryFree(pStream->ast);
taosMemoryFree(pStream->physicalPlan);
if (pStream->outputSchema.nCols) taosMemoryFree(pStream->outputSchema.pSchema);
if (pStream->outputSchema.nCols) {
taosMemoryFree(pStream->outputSchema.pSchema);
}
int32_t sz = taosArrayGetSize(pStream->tasks);
for (int32_t i = 0; i < sz; i++) {
@ -166,11 +169,14 @@ void tFreeStreamObj(SStreamObj *pStream) {
int32_t taskSz = taosArrayGetSize(pLevel);
for (int32_t j = 0; j < taskSz; j++) {
SStreamTask *pTask = taosArrayGetP(pLevel, j);
tFreeSStreamTask(pTask);
tFreeStreamTask(pTask);
}
taosArrayDestroy(pLevel);
}
taosArrayDestroy(pStream->tasks);
// tagSchema.pSchema
if (pStream->tagSchema.nCols > 0) {
taosMemoryFree(pStream->tagSchema.pSchema);

View File

@ -138,7 +138,7 @@ int32_t mndAddDispatcherToInnerTask(SMnode* pMnode, SStreamObj* pStream, SStream
for (int32_t j = 0; j < sinkLvSize; j++) {
SStreamTask* pLastLevelTask = taosArrayGetP(sinkLv, j);
if (pLastLevelTask->nodeId == pVgInfo->vgId) {
pVgInfo->taskId = pLastLevelTask->taskId;
pVgInfo->taskId = pLastLevelTask->id.taskId;
break;
}
}
@ -149,7 +149,7 @@ int32_t mndAddDispatcherToInnerTask(SMnode* pMnode, SStreamObj* pStream, SStream
SArray* pArray = taosArrayGetP(pStream->tasks, 0);
// one sink only
SStreamTask* lastLevelTask = taosArrayGetP(pArray, 0);
pTask->fixedEpDispatcher.taskId = lastLevelTask->taskId;
pTask->fixedEpDispatcher.taskId = lastLevelTask->id.taskId;
pTask->fixedEpDispatcher.nodeId = lastLevelTask->nodeId;
pTask->fixedEpDispatcher.epSet = lastLevelTask->epSet;
}
@ -224,7 +224,7 @@ int32_t mndAddShuffleSinkTasksToStream(SMnode* pMnode, SStreamObj* pStream) {
continue;
}
SStreamTask* pTask = tNewSStreamTask(pStream->uid);
SStreamTask* pTask = tNewStreamTask(pStream->uid);
if (pTask == NULL) {
sdbRelease(pSdb, pVgroup);
terrno = TSDB_CODE_OUT_OF_MEMORY;
@ -260,7 +260,7 @@ int32_t mndAddShuffleSinkTasksToStream(SMnode* pMnode, SStreamObj* pStream) {
int32_t mndAddFixedSinkTaskToStream(SMnode* pMnode, SStreamObj* pStream) {
SArray* tasks = taosArrayGetP(pStream->tasks, 0);
SStreamTask* pTask = tNewSStreamTask(pStream->uid);
SStreamTask* pTask = tNewStreamTask(pStream->uid);
if (pTask == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
@ -350,12 +350,13 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
return -1;
}
pInnerTask = tNewSStreamTask(pStream->uid);
pInnerTask = tNewStreamTask(pStream->uid);
if (pInnerTask == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
qDestroyQueryPlan(pPlan);
return -1;
}
pInnerTask->fillHistory = pStream->fillHistory;
mndAddTaskToTaskSet(taskInnerLevel, pInnerTask);
@ -421,7 +422,7 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
continue;
}
SStreamTask* pTask = tNewSStreamTask(pStream->uid);
SStreamTask* pTask = tNewStreamTask(pStream->uid);
if (pTask == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
sdbRelease(pSdb, pVgroup);
@ -440,7 +441,7 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
pTask->dispatchMsgType = TDMT_STREAM_TASK_DISPATCH;
pTask->outputType = TASK_OUTPUT__FIXED_DISPATCH;
pTask->fixedEpDispatcher.taskId = pInnerTask->taskId;
pTask->fixedEpDispatcher.taskId = pInnerTask->id.taskId;
pTask->fixedEpDispatcher.nodeId = pInnerTask->nodeId;
pTask->fixedEpDispatcher.epSet = pInnerTask->epSet;
@ -460,7 +461,7 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
pEpInfo->childId = pTask->selfChildId;
pEpInfo->epSet = pTask->epSet;
pEpInfo->nodeId = pTask->nodeId;
pEpInfo->taskId = pTask->taskId;
pEpInfo->taskId = pTask->id.taskId;
taosArrayPush(pInnerTask->childEpInfo, &pEpInfo);
sdbRelease(pSdb, pVgroup);
}
@ -491,7 +492,7 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
continue;
}
SStreamTask* pTask = tNewSStreamTask(pStream->uid);
SStreamTask* pTask = tNewStreamTask(pStream->uid);
if (pTask == NULL) {
sdbRelease(pSdb, pVgroup);
qDestroyQueryPlan(pPlan);

View File

@ -35,12 +35,12 @@
static int32_t mndStreamActionInsert(SSdb *pSdb, SStreamObj *pStream);
static int32_t mndStreamActionDelete(SSdb *pSdb, SStreamObj *pStream);
static int32_t mndStreamActionUpdate(SSdb *pSdb, SStreamObj *pStream, SStreamObj *pNewStream);
static int32_t mndStreamActionUpdate(SSdb *pSdb, SStreamObj *pOldStream, SStreamObj *pNewStream);
static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq);
static int32_t mndProcessDropStreamReq(SRpcMsg *pReq);
static int32_t mndProcessStreamCheckpointTmr(SRpcMsg *pReq);
// static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq);
/*static int32_t mndProcessRecoverStreamReq(SRpcMsg *pReq);*/
static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq);
static int32_t mndProcessRecoverStreamReq(SRpcMsg *pReq);
static int32_t mndProcessStreamMetaReq(SRpcMsg *pReq);
static int32_t mndGetStreamMeta(SRpcMsg *pReq, SShowObj *pShow, STableMetaRsp *pMeta);
static int32_t mndRetrieveStream(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows);
@ -418,7 +418,7 @@ FAIL:
int32_t mndPersistTaskDeployReq(STrans *pTrans, const SStreamTask *pTask) {
SEncoder encoder;
tEncoderInit(&encoder, NULL, 0);
tEncodeSStreamTask(&encoder, pTask);
tEncodeStreamTask(&encoder, pTask);
int32_t size = encoder.pos;
int32_t tlen = sizeof(SMsgHead) + size;
tEncoderClear(&encoder);
@ -430,7 +430,7 @@ int32_t mndPersistTaskDeployReq(STrans *pTrans, const SStreamTask *pTask) {
((SMsgHead *)buf)->vgId = htonl(pTask->nodeId);
void *abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
tEncoderInit(&encoder, abuf, size);
tEncodeSStreamTask(&encoder, pTask);
tEncodeStreamTask(&encoder, pTask);
tEncoderClear(&encoder);
STransAction action = {0};
@ -601,7 +601,7 @@ static int32_t mndPersistTaskDropReq(STrans *pTrans, SStreamTask *pTask) {
return -1;
}
pReq->head.vgId = htonl(pTask->nodeId);
pReq->taskId = pTask->taskId;
pReq->taskId = pTask->id.taskId;
STransAction action = {0};
memcpy(&action.epSet, &pTask->epSet, sizeof(SEpSet));
action.pCont = pReq;
@ -1209,7 +1209,7 @@ static int32_t mndRetrieveStreamTask(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
// task id
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pTask->taskId, false);
colDataSetVal(pColInfo, numOfRows, (const char *)&pTask->id.taskId, false);
// node type
char nodeType[20 + VARSTR_HEADER_SIZE] = {0};

View File

@ -671,7 +671,7 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) {
// possibly no vg is changed
// when each topic is re-balanced, issue an trans to save the results in sdb.
if (mndPersistRebResult(pMnode, pMsg, &rebOutput) < 0) {
mError("mq re-balance persist output error, possibly vnode splitted or dropped");
mError("mq re-balance persist output error, possibly vnode splitted or dropped,msg:%s", terrstr());
}
taosArrayDestroy(rebOutput.newConsumers);

View File

@ -1168,15 +1168,13 @@ static void mndLoopHash(SHashObj *hash, char *priType, SSDataBlock *pBlock, int3
sprintf(sql, "error");
}
// char *obj = taosMemoryMalloc(sqlLen + VARSTR_HEADER_SIZE + 1);
char obj[TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(obj, sql, pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, *numOfRows, (const char *)obj, false);
// taosMemoryFree(obj);
} else {
char condition[20] = {0};
char condition[TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(condition, "", pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, *numOfRows, (const char *)condition, false);
@ -1257,12 +1255,12 @@ static int32_t mndRetrievePrivileges(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)objName, false);
char tableName[20] = {0};
char tableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(tableName, "", pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)tableName, false);
char condition[20] = {0};
char condition[TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(condition, "", pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)condition, false);
@ -1292,12 +1290,12 @@ static int32_t mndRetrievePrivileges(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)objName, false);
char tableName[20] = {0};
char tableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(tableName, "", pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)tableName, false);
char condition[20] = {0};
char condition[TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(condition, "", pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)condition, false);
@ -1329,12 +1327,12 @@ static int32_t mndRetrievePrivileges(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)topicName, false);
char tableName[20] = {0};
char tableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(tableName, "", pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)tableName, false);
char condition[20] = {0};
char condition[TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(condition, "", pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)condition, false);

View File

@ -1891,57 +1891,18 @@ int32_t mndAddVgroupBalanceToTrans(SMnode *pMnode, SVgObj *pVgroup, STrans *pTra
return 0;
}
extern int32_t mndProcessVgroupBalanceLeaderMsgImp(SRpcMsg *pReq);
int32_t mndProcessVgroupBalanceLeaderMsg(SRpcMsg *pReq) {
int32_t code = -1;
SBalanceVgroupLeaderReq req = {0};
if (tDeserializeSBalanceVgroupLeaderReq(pReq->pCont, pReq->contLen, &req) != 0) {
terrno = TSDB_CODE_INVALID_MSG;
return code;
}
SMnode *pMnode = pReq->info.node;
SSdb *pSdb = pMnode->pSdb;
int32_t total = sdbGetSize(pSdb, SDB_VGROUP);
if(total <= 0) {
terrno = TSDB_CODE_TSC_INVALID_OPERATION;
return code;
}
STrans *pTrans = NULL;
pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pReq, "bal-vg-leader");
if (pTrans == NULL) goto _OVER;
mndTransSetSerial(pTrans);
mInfo("trans:%d, used to balance vgroup leader", pTrans->id);
void *pIter = NULL;
int32_t count = 0;
while (1) {
SVgObj *pVgroup = NULL;
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup);
if (pIter == NULL) break;
if(mndAddVgroupBalanceToTrans(pMnode, pVgroup, pTrans) == 0){
count++;
}
sdbRelease(pSdb, pVgroup);
}
if(count == 0) {
terrno = TSDB_CODE_TSC_INVALID_OPERATION;
goto _OVER;
}
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
code = 0;
_OVER:
mndTransDrop(pTrans);
return code;
return mndProcessVgroupBalanceLeaderMsgImp(pReq);
}
#ifndef TD_ENTERPRISE
int32_t mndProcessVgroupBalanceLeaderMsgImp(SRpcMsg *pReq) {
return 0;
}
#endif
static int32_t mndCheckDnodeMemory(SMnode *pMnode, SDbObj *pOldDb, SDbObj *pNewDb, SVgObj *pOldVgroup,
SVgObj *pNewVgroup, SArray *pArray) {
for (int32_t i = 0; i < (int32_t)taosArrayGetSize(pArray); ++i) {

View File

@ -32,6 +32,7 @@ void sndEnqueueStreamDispatch(SSnode *pSnode, SRpcMsg *pMsg) {
tDecoderClear(&decoder);
goto FAIL;
}
tDecoderClear(&decoder);
int32_t taskId = req.taskId;
@ -65,7 +66,7 @@ int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t ver) {
ASSERT(taosArrayGetSize(pTask->childEpInfo) != 0);
pTask->refCnt = 1;
pTask->schedStatus = TASK_SCHED_STATUS__INACTIVE;
pTask->status.schedStatus = TASK_SCHED_STATUS__INACTIVE;
pTask->inputQueue = streamQueueOpen();
pTask->outputQueue = streamQueueOpen();
@ -76,21 +77,19 @@ int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t ver) {
pTask->inputStatus = TASK_INPUT_STATUS__NORMAL;
pTask->outputStatus = TASK_OUTPUT_STATUS__NORMAL;
pTask->pMsgCb = &pSnode->msgCb;
pTask->startVer = ver;
pTask->chkInfo.version = ver;
pTask->pMeta = pSnode->pMeta;
pTask->pState = streamStateOpen(pSnode->path, pTask, false, -1, -1);
if (pTask->pState == NULL) {
return -1;
}
SReadHandle mgHandle = {
.vnode = NULL,
.numOfVgroups = (int32_t)taosArrayGetSize(pTask->childEpInfo),
.pStateBackend = pTask->pState,
};
int32_t numOfChildEp = taosArrayGetSize(pTask->childEpInfo);
SReadHandle mgHandle = { .vnode = NULL, .numOfVgroups = numOfChildEp, .pStateBackend = pTask->pState };
pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &mgHandle, 0);
ASSERT(pTask->exec.executor);
pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &mgHandle, 0);
ASSERT(pTask->exec.pExecutor);
streamSetupTrigger(pTask);
return 0;
@ -140,9 +139,10 @@ int32_t sndProcessTaskDeployReq(SSnode *pSnode, char *msg, int32_t msgLen) {
if (pTask == NULL) {
return -1;
}
SDecoder decoder;
tDecoderInit(&decoder, (uint8_t *)msg, msgLen);
code = tDecodeSStreamTask(&decoder, pTask);
code = tDecodeStreamTask(&decoder, pTask);
if (code < 0) {
tDecoderClear(&decoder);
taosMemoryFree(pTask);
@ -153,7 +153,7 @@ int32_t sndProcessTaskDeployReq(SSnode *pSnode, char *msg, int32_t msgLen) {
ASSERT(pTask->taskLevel == TASK_LEVEL__AGG);
// 2.save task
code = streamMetaAddTask(pSnode->pMeta, -1, pTask);
code = streamMetaAddDeployedTask(pSnode->pMeta, -1, pTask);
if (code < 0) {
return -1;
}

View File

@ -57,6 +57,7 @@ target_sources(
# tq
"src/tq/tq.c"
"src/tq/tqUtil.c"
"src/tq/tqScan.c"
"src/tq/tqMeta.c"
"src/tq/tqRead.c"
@ -64,6 +65,7 @@ target_sources(
"src/tq/tqPush.c"
"src/tq/tqSink.c"
"src/tq/tqCommit.c"
"src/tq/tqRestore.c"
"src/tq/tqSnapshot.c"
"src/tq/tqOffsetSnapshot.c"
)

View File

@ -92,7 +92,7 @@ int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg);
int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo);
void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs);
void vnodeApplyWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs);
void vnodeProposeCommitOnNeed(SVnode *pVnode);
void vnodeProposeCommitOnNeed(SVnode *pVnode, bool atExit);
// meta
typedef struct SMeta SMeta; // todo: remove
@ -257,15 +257,16 @@ void tqCloseReader(STqReader *);
void tqReaderSetColIdList(STqReader *pReader, SArray *pColIdList);
int32_t tqReaderSetTbUidList(STqReader *pReader, const SArray *tbUidList);
int32_t tqReaderAddTbUidList(STqReader *pReader, const SArray *tbUidList);
int32_t tqReaderAddTbUidList(STqReader *pReader, const SArray *pTableUidList);
int32_t tqReaderRemoveTbUidList(STqReader *pReader, const SArray *tbUidList);
int32_t tqSeekVer(STqReader *pReader, int64_t ver, const char *id);
void tqNextBlock(STqReader *pReader, SFetchRet *ret);
int32_t extractSubmitMsgFromWal(SWalReader *pReader, SPackedData *pPackedData);
int32_t tqReaderSetSubmitReq2(STqReader *pReader, void *msgStr, int32_t msgLen, int64_t ver);
int32_t tqReaderSetSubmitMsg(STqReader *pReader, void *msgStr, int32_t msgLen, int64_t ver);
// int32_t tqReaderSetDataMsg(STqReader *pReader, const SSubmitReq *pMsg, int64_t ver);
bool tqNextDataBlock2(STqReader *pReader);
bool tqNextDataBlock(STqReader *pReader);
bool tqNextDataBlockFilterOut2(STqReader *pReader, SHashObj *filterOutUids);
int32_t tqRetrieveDataBlock2(SSDataBlock *pBlock, STqReader *pReader, SSubmitTbData **pSubmitTbDataRet);
int32_t tqRetrieveTaosxBlock2(STqReader *pReader, SArray *blocks, SArray *schemas, SSubmitTbData **pSubmitTbDataRet);

View File

@ -80,7 +80,7 @@ typedef struct {
typedef struct {
int8_t subType;
STqReader* pExecReader;
STqReader* pTqReader;
qTaskInfo_t task;
union {
STqExecCol execCol;
@ -128,6 +128,10 @@ typedef struct {
tmr_h timer;
} STqMgmt;
typedef struct {
int32_t size;
} STqOffsetHead;
static STqMgmt tqMgmt = {0};
int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle);
@ -154,10 +158,6 @@ int32_t tqMetaSaveCheckInfo(STQ* pTq, const char* key, const void* value, int32_
int32_t tqMetaDeleteCheckInfo(STQ* pTq, const char* key);
int32_t tqMetaRestoreCheckInfo(STQ* pTq);
typedef struct {
int32_t size;
} STqOffsetHead;
STqOffsetStore* tqOffsetOpen(STQ* pTq);
void tqOffsetClose(STqOffsetStore*);
STqOffset* tqOffsetRead(STqOffsetStore* pStore, const char* subscribeKey);
@ -176,6 +176,18 @@ int32_t tqOffsetRestoreFromFile(STqOffsetStore* pStore, const char* fname);
// tqStream
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver);
int32_t tqStreamTasksScanWal(STQ* pTq);
// tq util
void createStreamTaskOffsetKey(char* dst, uint64_t streamId, uint32_t taskId);
int32_t tqAddInputBlockNLaunchTask(SStreamTask* pTask, SStreamQueueItem* pQueueItem, int64_t ver);
int32_t launchTaskForWalBlock(SStreamTask* pTask, SFetchRet* pRet, STqOffset* pOffset);
int32_t tqExtractDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg);
void doSaveTaskOffset(STqOffsetStore* pOffsetStore, const char* pKey, int64_t ver);
void saveOffsetForAllTasks(STQ* pTq, int64_t ver);
void initOffsetForAllRestoreTasks(STQ* pTq);
int32_t transferToWalReadTask(SStreamMeta* pStreamMeta, SArray* pTaskList);
#ifdef __cplusplus
}

View File

@ -96,7 +96,7 @@ int32_t vnodeGetBatchMeta(SVnode* pVnode, SRpcMsg* pMsg);
// vnodeCommit.c
int32_t vnodeBegin(SVnode* pVnode);
int32_t vnodeShouldCommit(SVnode* pVnode);
int32_t vnodeShouldCommit(SVnode* pVnode, bool atExit);
void vnodeUpdCommitSched(SVnode* pVnode);
void vnodeRollback(SVnode* pVnode);
int32_t vnodeSaveInfo(const char* dir, const SVnodeInfo* pCfg);
@ -115,7 +115,6 @@ void vnodeSyncClose(SVnode* pVnode);
void vnodeRedirectRpcMsg(SVnode* pVnode, SRpcMsg* pMsg, int32_t code);
bool vnodeIsLeader(SVnode* pVnode);
bool vnodeIsRoleLeader(SVnode* pVnode);
int vnodeShouldCommit(SVnode* pVnode);
#ifdef __cplusplus
}

View File

@ -193,9 +193,10 @@ void tqCleanUp();
STQ* tqOpen(const char* path, SVnode* pVnode);
void tqClose(STQ*);
int tqPushMsg(STQ*, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver);
int tqRegisterPushEntry(STQ* pTq, void* pHandle, const SMqPollReq* pRequest, SRpcMsg* pRpcMsg, SMqDataRsp* pDataRsp,
int tqRegisterPushHandle(STQ* pTq, void* pHandle, const SMqPollReq* pRequest, SRpcMsg* pRpcMsg, SMqDataRsp* pDataRsp,
int32_t type);
int tqUnregisterPushEntry(STQ* pTq, const char* pKey, int32_t keyLen, uint64_t consumerId, bool rspConsumer);
int tqUnregisterPushHandle(STQ* pTq, const char* pKey, int32_t keyLen, uint64_t consumerId, bool rspConsumer);
int tqStartStreamTasks(STQ* pTq); // restore all stream tasks after vnode launching completed.
int tqCommit(STQ*);
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd);

View File

@ -531,10 +531,11 @@ static void freePayload(const void* key, size_t keyLen, void* value) {
return;
}
SHashObj* pHashObj = (SHashObj*)p[0];
SHashObj* pHashObj = (SHashObj*)p[0];
STagFilterResEntry** pEntry = taosHashGet(pHashObj, &p[1], sizeof(uint64_t));
{
if (pEntry != NULL && (*pEntry) != NULL) {
int64_t st = taosGetTimestampUs();
SListIter iter = {0};
@ -547,9 +548,9 @@ static void freePayload(const void* key, size_t keyLen, void* value) {
void* tmp = tdListPopNode(&((*pEntry)->list), pNode);
taosMemoryFree(tmp);
int64_t et = taosGetTimestampUs();
metaInfo("clear items in cache, remain cached item:%d, elapsed time:%.2fms", listNEles(&((*pEntry)->list)),
(et - st) / 1000.0);
double el = (taosGetTimestampUs() - st) / 1000.0;
metaInfo("clear items in meta-cache, remain cached item:%d, elapsed time:%.2fms", listNEles(&((*pEntry)->list)),
el);
break;
}
}

View File

@ -168,7 +168,7 @@ static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids,
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
if (pRSmaInfo->taskInfo[i]) {
if ((terrno = qUpdateQualifiedTableId(pRSmaInfo->taskInfo[i], tbUids, isAdd)) < 0) {
if ((terrno = qUpdateTableListForStreamScanner(pRSmaInfo->taskInfo[i], tbUids, isAdd)) < 0) {
tdReleaseRSmaInfo(pSma, pRSmaInfo);
smaError("vgId:%d, update tbUidList failed for uid:%" PRIi64 " level %d since %s", SMA_VID(pSma), *suid, i,
terrstr());

View File

@ -18,6 +18,7 @@
// 0: not init
// 1: already inited
// 2: wait to be inited or cleaup
#define WAL_READ_TASKS_ID (-1)
int32_t tqInit() {
int8_t old;
@ -61,12 +62,12 @@ static void destroyTqHandle(void* data) {
if (pData->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
taosMemoryFreeClear(pData->execHandle.execCol.qmsg);
} else if (pData->execHandle.subType == TOPIC_SUB_TYPE__DB) {
tqCloseReader(pData->execHandle.pExecReader);
tqCloseReader(pData->execHandle.pTqReader);
walCloseReader(pData->pWalReader);
taosHashCleanup(pData->execHandle.execDb.pFilterOutTbUid);
} else if (pData->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
walCloseReader(pData->pWalReader);
tqCloseReader(pData->execHandle.pExecReader);
tqCloseReader(pData->execHandle.pTqReader);
}
}
@ -82,12 +83,18 @@ static void tqPushEntryFree(void* data) {
taosMemoryFree(p);
}
static bool tqOffsetLessOrEqual(const STqOffset* pLeft, const STqOffset* pRight) {
return pLeft->val.type == TMQ_OFFSET__LOG && pRight->val.type == TMQ_OFFSET__LOG &&
pLeft->val.version <= pRight->val.version;
}
STQ* tqOpen(const char* path, SVnode* pVnode) {
STQ* pTq = taosMemoryCalloc(1, sizeof(STQ));
if (pTq == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
pTq->path = taosStrdup(path);
pTq->pVnode = pVnode;
pTq->walLogLastVer = pVnode->pWal->vers.lastVer;
@ -138,44 +145,6 @@ void tqClose(STQ* pTq) {
taosMemoryFree(pTq);
}
int32_t tqSendMetaPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqMetaRsp* pRsp) {
int32_t len = 0;
int32_t code = 0;
tEncodeSize(tEncodeSMqMetaRsp, pRsp, len, code);
if (code < 0) {
return -1;
}
int32_t tlen = sizeof(SMqRspHead) + len;
void* buf = rpcMallocCont(tlen);
if (buf == NULL) {
return -1;
}
((SMqRspHead*)buf)->mqMsgType = TMQ_MSG_TYPE__POLL_META_RSP;
((SMqRspHead*)buf)->epoch = pReq->epoch;
((SMqRspHead*)buf)->consumerId = pReq->consumerId;
void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
SEncoder encoder = {0};
tEncoderInit(&encoder, abuf, len);
tEncodeSMqMetaRsp(&encoder, pRsp);
tEncoderClear(&encoder);
SRpcMsg resp = {
.info = pMsg->info,
.pCont = buf,
.contLen = tlen,
.code = 0,
};
tmsgSendRsp(&resp);
tqDebug("vgId:%d, from consumer:0x%" PRIx64 " (epoch %d) send rsp, res msg type %d, offset type:%d",
TD_VID(pTq->pVnode), pReq->consumerId, pReq->epoch, pRsp->resMsgType, pRsp->rspOffset.type);
return 0;
}
static int32_t doSendDataRsp(const SRpcHandleInfo* pRpcHandleInfo, const SMqDataRsp* pRsp, int32_t epoch,
int64_t consumerId, int32_t type) {
int32_t len = 0;
@ -253,11 +222,6 @@ int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, con
return 0;
}
static FORCE_INLINE bool tqOffsetLessOrEqual(const STqOffset* pLeft, const STqOffset* pRight) {
return pLeft->val.type == TMQ_OFFSET__LOG && pRight->val.type == TMQ_OFFSET__LOG &&
pLeft->val.version <= pRight->val.version;
}
int32_t tqProcessOffsetCommitReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) {
STqOffset offset = {0};
int32_t vgId = TD_VID(pTq->pVnode);
@ -330,318 +294,6 @@ int32_t tqCheckColModifiable(STQ* pTq, int64_t tbUid, int32_t colId) {
return 0;
}
static int32_t tqInitDataRsp(SMqDataRsp* pRsp, const SMqPollReq* pReq, int8_t subType) {
pRsp->reqOffset = pReq->reqOffset;
pRsp->blockData = taosArrayInit(0, sizeof(void*));
pRsp->blockDataLen = taosArrayInit(0, sizeof(int32_t));
if (pRsp->blockData == NULL || pRsp->blockDataLen == NULL) {
return -1;
}
pRsp->withTbName = 0;
pRsp->withSchema = false;
return 0;
}
static int32_t tqInitTaosxRsp(STaosxRsp* pRsp, const SMqPollReq* pReq) {
pRsp->reqOffset = pReq->reqOffset;
pRsp->withTbName = 1;
pRsp->withSchema = 1;
pRsp->blockData = taosArrayInit(0, sizeof(void*));
pRsp->blockDataLen = taosArrayInit(0, sizeof(int32_t));
pRsp->blockTbName = taosArrayInit(0, sizeof(void*));
pRsp->blockSchema = taosArrayInit(0, sizeof(void*));
if (pRsp->blockData == NULL || pRsp->blockDataLen == NULL || pRsp->blockTbName == NULL || pRsp->blockSchema == NULL) {
return -1;
}
return 0;
}
static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest,
SRpcMsg* pMsg, bool* pBlockReturned) {
uint64_t consumerId = pRequest->consumerId;
STqOffsetVal reqOffset = pRequest->reqOffset;
STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, pRequest->subKey);
int32_t vgId = TD_VID(pTq->pVnode);
*pBlockReturned = false;
// In this vnode, data has been polled by consumer for this topic, so let's continue from the last offset value.
if (pOffset != NULL) {
*pOffsetVal = pOffset->val;
char formatBuf[80];
tFormatOffset(formatBuf, 80, pOffsetVal);
tqDebug("tmq poll: consumer:0x%" PRIx64
", subkey %s, vgId:%d, existed offset found, offset reset to %s and continue. reqId:0x%" PRIx64,
consumerId, pHandle->subKey, vgId, formatBuf, pRequest->reqId);
return 0;
} else {
// no poll occurs in this vnode for this topic, let's seek to the right offset value.
if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEAST) {
if (pRequest->useSnapshot) {
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey:%s, vgId:%d, (earliest) set offset to be snapshot",
consumerId, pHandle->subKey, vgId);
if (pHandle->fetchMeta) {
tqOffsetResetToMeta(pOffsetVal, 0);
} else {
tqOffsetResetToData(pOffsetVal, 0, 0);
}
} else {
pHandle->pRef = walRefFirstVer(pTq->pVnode->pWal, pHandle->pRef);
if (pHandle->pRef == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
// offset set to previous version when init
tqOffsetResetToLog(pOffsetVal, pHandle->pRef->refVer - 1);
}
} else if (reqOffset.type == TMQ_OFFSET__RESET_LATEST) {
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
SMqDataRsp dataRsp = {0};
tqInitDataRsp(&dataRsp, pRequest, pHandle->execHandle.subType);
tqOffsetResetToLog(&dataRsp.rspOffset, walGetLastVer(pTq->pVnode->pWal));
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, (latest) offset reset to %" PRId64, consumerId,
pHandle->subKey, vgId, dataRsp.rspOffset.version);
int32_t code = tqSendDataRsp(pTq, pMsg, pRequest, &dataRsp, TMQ_MSG_TYPE__POLL_RSP);
tDeleteSMqDataRsp(&dataRsp);
*pBlockReturned = true;
return code;
} else {
STaosxRsp taosxRsp = {0};
tqInitTaosxRsp(&taosxRsp, pRequest);
tqOffsetResetToLog(&taosxRsp.rspOffset, walGetLastVer(pTq->pVnode->pWal));
int32_t code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
tDeleteSTaosxRsp(&taosxRsp);
*pBlockReturned = true;
return code;
}
} else if (reqOffset.type == TMQ_OFFSET__RESET_NONE) {
tqError("tmq poll: subkey:%s, no offset committed for consumer:0x%" PRIx64
" in vg %d, subkey %s, reset none failed",
pHandle->subKey, consumerId, vgId, pRequest->subKey);
terrno = TSDB_CODE_TQ_NO_COMMITTED_OFFSET;
return -1;
}
}
return 0;
}
#define IS_OFFSET_RESET_TYPE(_t) ((_t) < 0)
static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest,
SRpcMsg* pMsg, STqOffsetVal* pOffset) {
uint64_t consumerId = pRequest->consumerId;
int32_t vgId = TD_VID(pTq->pVnode);
SMqDataRsp dataRsp = {0};
tqInitDataRsp(&dataRsp, pRequest, pHandle->execHandle.subType);
// lock
taosWLockLatch(&pTq->lock);
qSetTaskId(pHandle->execHandle.task, consumerId, pRequest->reqId);
int code = tqScanData(pTq, pHandle, &dataRsp, pOffset);
if (code != 0) {
goto end;
}
// till now, all data has been transferred to consumer, new data needs to push client once arrived.
if (dataRsp.blockNum == 0 && dataRsp.reqOffset.type == TMQ_OFFSET__LOG &&
dataRsp.reqOffset.version == dataRsp.rspOffset.version && pHandle->consumerId == pRequest->consumerId) {
code = tqRegisterPushEntry(pTq, pHandle, pRequest, pMsg, &dataRsp, TMQ_MSG_TYPE__POLL_RSP);
taosWUnLockLatch(&pTq->lock);
return code;
}
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&dataRsp, TMQ_MSG_TYPE__POLL_RSP);
// NOTE: this pHandle->consumerId may have been changed already.
end : {
char buf[80] = {0};
tFormatOffset(buf, 80, &dataRsp.rspOffset);
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, rsp block:%d, rsp offset type:%s, reqId:0x%" PRIx64
" code:%d",
consumerId, pHandle->subKey, vgId, dataRsp.blockNum, buf, pRequest->reqId, code);
taosWUnLockLatch(&pTq->lock);
tDeleteSMqDataRsp(&dataRsp);
}
return code;
}
static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest,
SRpcMsg* pMsg, STqOffsetVal* offset) {
int code = 0;
int32_t vgId = TD_VID(pTq->pVnode);
SWalCkHead* pCkHead = NULL;
SMqMetaRsp metaRsp = {0};
STaosxRsp taosxRsp = {0};
tqInitTaosxRsp(&taosxRsp, pRequest);
if (offset->type != TMQ_OFFSET__LOG) {
if (tqScanTaosx(pTq, pHandle, &taosxRsp, &metaRsp, offset) < 0) {
return -1;
}
if (metaRsp.metaRspLen > 0) {
code = tqSendMetaPollRsp(pTq, pMsg, pRequest, &metaRsp);
tqDebug("tmq poll: consumer:0x%" PRIx64 " subkey:%s vgId:%d, send meta offset type:%d,uid:%" PRId64
",ts:%" PRId64,
pRequest->consumerId, pHandle->subKey, vgId, metaRsp.rspOffset.type, metaRsp.rspOffset.uid,
metaRsp.rspOffset.ts);
taosMemoryFree(metaRsp.metaRsp);
tDeleteSTaosxRsp(&taosxRsp);
return code;
}
tqDebug("taosx poll: consumer:0x%" PRIx64 " subkey:%s vgId:%d, send data blockNum:%d, offset type:%d,uid:%" PRId64
",ts:%" PRId64,
pRequest->consumerId, pHandle->subKey, vgId, taosxRsp.blockNum, taosxRsp.rspOffset.type,
taosxRsp.rspOffset.uid, taosxRsp.rspOffset.ts);
if (taosxRsp.blockNum > 0) {
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
tDeleteSTaosxRsp(&taosxRsp);
return code;
} else {
*offset = taosxRsp.rspOffset;
}
}
if (offset->type == TMQ_OFFSET__LOG) {
int64_t fetchVer = offset->version + 1;
pCkHead = taosMemoryMalloc(sizeof(SWalCkHead) + 2048);
if (pCkHead == NULL) {
tDeleteSTaosxRsp(&taosxRsp);
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
walSetReaderCapacity(pHandle->pWalReader, 2048);
int totalRows = 0;
while (1) {
int32_t savedEpoch = atomic_load_32(&pHandle->epoch);
if (savedEpoch > pRequest->epoch) {
tqWarn("tmq poll: consumer:0x%" PRIx64 " (epoch %d), subkey:%s vgId:%d offset %" PRId64
", found new consumer epoch %d, discard req epoch %d",
pRequest->consumerId, pRequest->epoch, pHandle->subKey, vgId, fetchVer, savedEpoch, pRequest->epoch);
break;
}
if (tqFetchLog(pTq, pHandle, &fetchVer, &pCkHead, pRequest->reqId) < 0) {
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
tDeleteSTaosxRsp(&taosxRsp);
taosMemoryFreeClear(pCkHead);
return code;
}
SWalCont* pHead = &pCkHead->head;
tqDebug("tmq poll: consumer:0x%" PRIx64 " (epoch %d) iter log, vgId:%d offset %" PRId64 " msgType %d",
pRequest->consumerId, pRequest->epoch, vgId, fetchVer, pHead->msgType);
// process meta
if (pHead->msgType != TDMT_VND_SUBMIT) {
if (totalRows > 0) {
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer - 1);
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
tDeleteSTaosxRsp(&taosxRsp);
taosMemoryFreeClear(pCkHead);
return code;
}
tqDebug("fetch meta msg, ver:%" PRId64 ", type:%s", pHead->version, TMSG_INFO(pHead->msgType));
tqOffsetResetToLog(&metaRsp.rspOffset, fetchVer);
metaRsp.resMsgType = pHead->msgType;
metaRsp.metaRspLen = pHead->bodyLen;
metaRsp.metaRsp = pHead->body;
if (tqSendMetaPollRsp(pTq, pMsg, pRequest, &metaRsp) < 0) {
code = -1;
taosMemoryFreeClear(pCkHead);
tDeleteSTaosxRsp(&taosxRsp);
return code;
}
code = 0;
taosMemoryFreeClear(pCkHead);
tDeleteSTaosxRsp(&taosxRsp);
return code;
}
// process data
SPackedData submit = {
.msgStr = POINTER_SHIFT(pHead->body, sizeof(SSubmitReq2Msg)),
.msgLen = pHead->bodyLen - sizeof(SSubmitReq2Msg),
.ver = pHead->version,
};
if (tqTaosxScanLog(pTq, pHandle, submit, &taosxRsp, &totalRows) < 0) {
tqError("tmq poll: tqTaosxScanLog error %" PRId64 ", in vgId:%d, subkey %s", pRequest->consumerId, vgId,
pRequest->subKey);
taosMemoryFreeClear(pCkHead);
tDeleteSTaosxRsp(&taosxRsp);
return -1;
}
if (totalRows >= 4096 || taosxRsp.createTableNum > 0) {
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
tDeleteSTaosxRsp(&taosxRsp);
taosMemoryFreeClear(pCkHead);
return code;
} else {
fetchVer++;
}
}
}
tDeleteSTaosxRsp(&taosxRsp);
taosMemoryFreeClear(pCkHead);
return 0;
}
static int32_t doPollDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg) {
int32_t code = -1;
STqOffsetVal offset = {0};
STqOffsetVal reqOffset = pRequest->reqOffset;
// 1. reset the offset if needed
if (IS_OFFSET_RESET_TYPE(reqOffset.type)) {
// handle the reset offset cases, according to the consumer's choice.
bool blockReturned = false;
code = extractResetOffsetVal(&offset, pTq, pHandle, pRequest, pMsg, &blockReturned);
if (code != 0) {
return code;
}
// empty block returned, quit
if (blockReturned) {
return 0;
}
} else { // use the consumer specified offset
// the offset value can not be monotonious increase??
offset = reqOffset;
}
// this is a normal subscribe requirement
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
return extractDataAndRspForNormalSubscribe(pTq, pHandle, pRequest, pMsg, &offset);
}
// todo handle the case where re-balance occurs.
// for taosx
return extractDataAndRspForDbStbSubscribe(pTq, pHandle, pRequest, pMsg, &offset);
}
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
SMqPollReq req = {0};
if (tDeserializeSMqPollReq(pMsg->pCont, pMsg->contLen, &req) < 0) {
@ -689,7 +341,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
tqDebug("tmq poll: consumer:0x%" PRIx64 " (epoch %d), subkey %s, recv poll req vgId:%d, req:%s, reqId:0x%" PRIx64,
consumerId, req.epoch, pHandle->subKey, vgId, buf, req.reqId);
return doPollDataForMq(pTq, pHandle, &req, pMsg);
return tqExtractDataForMq(pTq, pHandle, &req, pMsg);
}
int32_t tqProcessDeleteSubReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) {
@ -815,10 +467,10 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
&pHandle->execHandle.numOfCols, req.newConsumerId);
void* scanner = NULL;
qExtractStreamScanner(pHandle->execHandle.task, &scanner);
pHandle->execHandle.pExecReader = qExtractReaderFromStreamScanner(scanner);
pHandle->execHandle.pTqReader = qExtractReaderFromStreamScanner(scanner);
} else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB) {
pHandle->pWalReader = walOpenReader(pVnode->pWal, NULL);
pHandle->execHandle.pExecReader = tqOpenReader(pVnode);
pHandle->execHandle.pTqReader = tqOpenReader(pVnode);
pHandle->execHandle.execDb.pFilterOutTbUid =
taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
@ -837,8 +489,8 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
int64_t tbUid = *(int64_t*)taosArrayGet(tbUidList, i);
tqDebug("vgId:%d, idx %d, uid:%" PRId64, vgId, i, tbUid);
}
pHandle->execHandle.pExecReader = tqOpenReader(pVnode);
tqReaderSetTbUidList(pHandle->execHandle.pExecReader, tbUidList);
pHandle->execHandle.pTqReader = tqOpenReader(pVnode);
tqReaderSetTbUidList(pHandle->execHandle.pTqReader, tbUidList);
taosArrayDestroy(tbUidList);
buildSnapContext(handle.meta, handle.version, req.suid, pHandle->execHandle.subType, pHandle->fetchMeta,
@ -874,7 +526,7 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
atomic_store_32(&pHandle->epoch, -1);
// remove if it has been register in the push manager, and return one empty block to consumer
tqUnregisterPushEntry(pTq, req.subKey, (int32_t)strlen(req.subKey), pHandle->consumerId, true);
tqUnregisterPushHandle(pTq, req.subKey, (int32_t)strlen(req.subKey), pHandle->consumerId, true);
atomic_store_64(&pHandle->consumerId, req.newConsumerId);
atomic_add_fetch_32(&pHandle->epoch, 1);
@ -896,16 +548,14 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
}
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
#if 0
if (pTask->taskLevel == TASK_LEVEL__AGG) {
A(taosArrayGetSize(pTask->childEpInfo) != 0);
}
#endif
// todo extract method
char buf[128] = {0};
sprintf(buf, "0x%"PRIx64"-%d", pTask->id.streamId, pTask->id.taskId);
int32_t vgId = TD_VID(pTq->pVnode);
pTask->id.idStr = taosStrdup(buf);
pTask->refCnt = 1;
pTask->schedStatus = TASK_SCHED_STATUS__INACTIVE;
pTask->status.schedStatus = TASK_SCHED_STATUS__INACTIVE;
pTask->inputQueue = streamQueueOpen();
pTask->outputQueue = streamQueueOpen();
@ -916,11 +566,14 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
pTask->inputStatus = TASK_INPUT_STATUS__NORMAL;
pTask->outputStatus = TASK_OUTPUT_STATUS__NORMAL;
pTask->pMsgCb = &pTq->pVnode->msgCb;
pTask->startVer = ver;
pTask->pMeta = pTq->pStreamMeta;
pTask->chkInfo.version = ver;
// expand executor
if (pTask->fillHistory) {
pTask->taskStatus = TASK_STATUS__WAIT_DOWNSTREAM;
pTask->status.taskStatus = TASK_STATUS__WAIT_DOWNSTREAM;
} else {
pTask->status.taskStatus = TASK_STATUS__RESTORE;
}
if (pTask->taskLevel == TASK_LEVEL__SOURCE) {
@ -930,14 +583,10 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
}
SReadHandle handle = {
.meta = pTq->pVnode->pMeta,
.vnode = pTq->pVnode,
.initTqReader = 1,
.pStateBackend = pTask->pState,
};
.meta = pTq->pVnode->pMeta, .vnode = pTq->pVnode, .initTqReader = 1, .pStateBackend = pTask->pState};
pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle, vgId);
if (pTask->exec.executor == NULL) {
pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle, vgId);
if (pTask->exec.pExecutor == NULL) {
return -1;
}
@ -946,14 +595,12 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
if (pTask->pState == NULL) {
return -1;
}
SReadHandle mgHandle = {
.vnode = NULL,
.numOfVgroups = (int32_t)taosArrayGetSize(pTask->childEpInfo),
.pStateBackend = pTask->pState,
};
pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &mgHandle, vgId);
if (pTask->exec.executor == NULL) {
int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTask->childEpInfo);
SReadHandle mgHandle = { .vnode = NULL, .numOfVgroups = numOfVgroups, .pStateBackend = pTask->pState};
pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &mgHandle, vgId);
if (pTask->exec.pExecutor == NULL) {
return -1;
}
}
@ -974,16 +621,20 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
ver1 = info.skmVer;
}
pTask->tbSink.pTSchema =
tBuildTSchema(pTask->tbSink.pSchemaWrapper->pSchema, pTask->tbSink.pSchemaWrapper->nCols, ver1);
if (pTask->tbSink.pTSchema == NULL) {
SSchemaWrapper* pschemaWrapper = pTask->tbSink.pSchemaWrapper;
pTask->tbSink.pTSchema = tBuildTSchema(pschemaWrapper->pSchema, pschemaWrapper->nCols, ver1);
if(pTask->tbSink.pTSchema == NULL) {
return -1;
}
}
if (pTask->taskLevel == TASK_LEVEL__SOURCE) {
pTask->exec.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
}
streamSetupTrigger(pTask);
tqInfo("expand stream task on vg %d, task id %d, child id %d, level %d", vgId, pTask->taskId, pTask->selfChildId,
pTask->taskLevel);
tqInfo("vgId:%d expand stream task, s-task:%s, ver:%" PRId64 " child id:%d, level:%d", vgId, pTask->id.idStr,
pTask->chkInfo.version, pTask->selfChildId, pTask->taskLevel);
return 0;
}
@ -1006,18 +657,24 @@ int32_t tqProcessStreamTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) {
.upstreamNodeId = req.upstreamNodeId,
.upstreamTaskId = req.upstreamTaskId,
};
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
if (pTask && atomic_load_8(&pTask->taskStatus) == TASK_STATUS__NORMAL) {
rsp.status = 1;
if (pTask) {
rsp.status = (atomic_load_8(&pTask->status.taskStatus) == TASK_STATUS__NORMAL) ? 1 : 0;
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
tqDebug("tq recv task check req(reqId:0x%" PRIx64
") %d at node %d task status:%d, check req from task %d at node %d, rsp status %d",
rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, pTask->status.taskStatus, rsp.upstreamTaskId,
rsp.upstreamNodeId, rsp.status);
} else {
rsp.status = 0;
tqDebug("tq recv task check(taskId:%d not built yet) req(reqId:0x%" PRIx64
") %d at node %d, check req from task %d at node %d, rsp status %d",
taskId, rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, rsp.upstreamTaskId, rsp.upstreamNodeId,
rsp.status);
}
if (pTask) streamMetaReleaseTask(pTq->pStreamMeta, pTask);
tqDebug("tq recv task check req(reqId:0x%" PRIx64 ") %d at node %d check req from task %d at node %d, status %d",
rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
SEncoder encoder;
int32_t code;
int32_t len;
@ -1035,13 +692,7 @@ int32_t tqProcessStreamTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) {
tEncodeSStreamTaskCheckRsp(&encoder, &rsp);
tEncoderClear(&encoder);
SRpcMsg rspMsg = {
.code = 0,
.pCont = buf,
.contLen = sizeof(SMsgHead) + len,
.info = pMsg->info,
};
SRpcMsg rspMsg = { .code = 0, .pCont = buf, .contLen = sizeof(SMsgHead) + len, .info = pMsg->info };
tmsgSendRsp(&rspMsg);
return 0;
}
@ -1057,8 +708,8 @@ int32_t tqProcessStreamTaskCheckRsp(STQ* pTq, int64_t sversion, char* msg, int32
tDecoderClear(&decoder);
return -1;
}
tDecoderClear(&decoder);
tDecoderClear(&decoder);
tqDebug("tq recv task check rsp(reqId:0x%" PRIx64 ") %d at node %d check req from task %d at node %d, status %d",
rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
@ -1090,17 +741,20 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t sversion, char* msg, int32_t ms
SDecoder decoder;
tDecoderInit(&decoder, (uint8_t*)msg, msgLen);
code = tDecodeSStreamTask(&decoder, pTask);
code = tDecodeStreamTask(&decoder, pTask);
if (code < 0) {
tDecoderClear(&decoder);
taosMemoryFree(pTask);
return -1;
}
tDecoderClear(&decoder);
// 2.save task
code = streamMetaAddTask(pTq->pStreamMeta, sversion, pTask);
code = streamMetaAddDeployedTask(pTq->pStreamMeta, sversion, pTask);
if (code < 0) {
tqError("vgId:%d failed to add s-task:%s, total:%d", TD_VID(pTq->pVnode), pTask->id.idStr,
streamMetaGetNumOfTasks(pTq->pStreamMeta));
return -1;
}
@ -1109,6 +763,8 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t sversion, char* msg, int32_t ms
streamTaskCheckDownstream(pTask, sversion);
}
tqDebug("vgId:%d s-task:%s is deployed and add meta from mnd, status:%d, total:%d", TD_VID(pTq->pVnode),
pTask->id.idStr, pTask->status.taskStatus, streamMetaGetNumOfTasks(pTq->pStreamMeta));
return 0;
}
@ -1124,7 +780,7 @@ int32_t tqProcessTaskRecover1Req(STQ* pTq, SRpcMsg* pMsg) {
}
// check param
int64_t fillVer1 = pTask->startVer;
int64_t fillVer1 = pTask->chkInfo.version;
if (fillVer1 <= 0) {
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
return -1;
@ -1133,7 +789,7 @@ int32_t tqProcessTaskRecover1Req(STQ* pTq, SRpcMsg* pMsg) {
// do recovery step 1
streamSourceRecoverScanStep1(pTask);
if (atomic_load_8(&pTask->taskStatus) == TASK_STATUS__DROPPING) {
if (atomic_load_8(&pTask->status.taskStatus) == TASK_STATUS__DROPPING) {
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
return 0;
}
@ -1148,7 +804,7 @@ int32_t tqProcessTaskRecover1Req(STQ* pTq, SRpcMsg* pMsg) {
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
if (atomic_load_8(&pTask->taskStatus) == TASK_STATUS__DROPPING) {
if (atomic_load_8(&pTask->status.taskStatus) == TASK_STATUS__DROPPING) {
return 0;
}
@ -1190,7 +846,7 @@ int32_t tqProcessTaskRecover2Req(STQ* pTq, int64_t sversion, char* msg, int32_t
return -1;
}
if (atomic_load_8(&pTask->taskStatus) == TASK_STATUS__DROPPING) {
if (atomic_load_8(&pTask->status.taskStatus) == TASK_STATUS__DROPPING) {
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
return 0;
}
@ -1309,7 +965,7 @@ int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver) {
SStreamTask* pTask = *(SStreamTask**)pIter;
if (pTask->taskLevel != TASK_LEVEL__SOURCE) continue;
qDebug("delete req enqueue stream task: %d, ver: %" PRId64, pTask->taskId, ver);
qDebug("delete req enqueue stream task: %d, ver: %" PRId64, pTask->id.taskId, ver);
if (!failed) {
SStreamRefDataBlock* pRefBlock = taosAllocateQitem(sizeof(SStreamRefDataBlock), DEF_QITEM, 0);
@ -1318,8 +974,8 @@ int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver) {
pRefBlock->dataRef = pRef;
atomic_add_fetch_32(pRefBlock->dataRef, 1);
if (tAppendDataForStream(pTask, (SStreamQueueItem*)pRefBlock) < 0) {
qError("stream task input del failed, task id %d", pTask->taskId);
if (tAppendDataToInputQueue(pTask, (SStreamQueueItem*)pRefBlock) < 0) {
qError("stream task input del failed, task id %d", pTask->id.taskId);
atomic_sub_fetch_32(pRef, 1);
taosFreeQitem(pRefBlock);
@ -1327,7 +983,7 @@ int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver) {
}
if (streamSchedExec(pTask) < 0) {
qError("stream task launch failed, task id %d", pTask->taskId);
qError("stream task launch failed, task id %d", pTask->id.taskId);
continue;
}
@ -1353,13 +1009,13 @@ int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver) {
taosArrayPush(pStreamBlock->blocks, &block);
if (!failed) {
if (tAppendDataForStream(pTask, (SStreamQueueItem*)pStreamBlock) < 0) {
qError("stream task input del failed, task id %d", pTask->taskId);
if (tAppendDataToInputQueue(pTask, (SStreamQueueItem*)pStreamBlock) < 0) {
qError("stream task input del failed, task id %d", pTask->id.taskId);
continue;
}
if (streamSchedExec(pTask) < 0) {
qError("stream task launch failed, task id %d", pTask->taskId);
qError("stream task launch failed, task id %d", pTask->id.taskId);
continue;
}
} else {
@ -1372,17 +1028,32 @@ int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver) {
return 0;
}
int32_t tqProcessSubmitReq(STQ* pTq, SPackedData submit) {
void* pIter = NULL;
bool succ = true;
static int32_t addSubmitBlockNLaunchTask(STqOffsetStore* pOffsetStore, SStreamTask* pTask, SStreamDataSubmit2* pSubmit,
const char* key, int64_t ver) {
doSaveTaskOffset(pOffsetStore, key, ver);
int32_t code = tqAddInputBlockNLaunchTask(pTask, (SStreamQueueItem*)pSubmit, ver);
SStreamDataSubmit2* pSubmit = streamDataSubmitNew(submit);
// remove the offset, if all functions are completed successfully.
if (code == TSDB_CODE_SUCCESS) {
tqOffsetDelete(pOffsetStore, key);
}
return code;
}
int32_t tqProcessSubmitReq(STQ* pTq, SPackedData submit) {
#if 0
void* pIter = NULL;
SStreamDataSubmit2* pSubmit = streamDataSubmitNew(submit, STREAM_INPUT__DATA_SUBMIT);
if (pSubmit == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
tqError("failed to create data submit for stream since out of memory");
succ = false;
saveOffsetForAllTasks(pTq, submit.ver);
return -1;
}
SArray* pInputQueueFullTasks = taosArrayInit(4, POINTER_BYTES);
while (1) {
pIter = taosHashIterate(pTq->pStreamMeta->pTasks, pIter);
if (pIter == NULL) {
@ -1394,47 +1065,75 @@ int32_t tqProcessSubmitReq(STQ* pTq, SPackedData submit) {
continue;
}
if (pTask->taskStatus == TASK_STATUS__RECOVER_PREPARE || pTask->taskStatus == TASK_STATUS__WAIT_DOWNSTREAM) {
tqDebug("stream task:%d skip push data, not ready for processing, status %d", pTask->taskId, pTask->taskStatus);
if (pTask->status.taskStatus == TASK_STATUS__RECOVER_PREPARE || pTask->status.taskStatus == TASK_STATUS__WAIT_DOWNSTREAM) {
tqDebug("stream task:%d skip push data, not ready for processing, status %d", pTask->id.taskId,
pTask->status.taskStatus);
continue;
}
tqDebug("data submit enqueue stream task:%d, ver: %" PRId64, pTask->taskId, submit.ver);
if (succ) {
int32_t code = tAppendDataForStream(pTask, (SStreamQueueItem*)pSubmit);
if (code < 0) {
// let's handle the back pressure
// check if offset value exists
char key[128] = {0};
createStreamTaskOffsetKey(key, pTask->id.streamId, pTask->id.taskId);
tqError("stream task:%d failed to put into queue for, too many", pTask->taskId);
continue;
if (tInputQueueIsFull(pTask)) {
STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, key);
int64_t ver = submit.ver;
if (pOffset == NULL) {
doSaveTaskOffset(pTq->pOffsetStore, key, submit.ver);
} else {
ver = pOffset->val.version;
}
if (streamSchedExec(pTask) < 0) {
tqError("stream task:%d launch failed, code:%s", pTask->taskId, tstrerror(terrno));
continue;
}
} else {
streamTaskInputFail(pTask);
tqDebug("s-task:%s input queue is full, discard submit block, ver:%" PRId64, pTask->id.idStr, ver);
taosArrayPush(pInputQueueFullTasks, &pTask);
continue;
}
// check if offset value exists
STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, key);
ASSERT(pOffset == NULL);
addSubmitBlockNLaunchTask(pTq->pOffsetStore, pTask, pSubmit, key, submit.ver);
}
if (pSubmit != NULL) {
streamDataSubmitDestroy(pSubmit);
taosFreeQitem(pSubmit);
}
streamDataSubmitDestroy(pSubmit);
taosFreeQitem(pSubmit);
#endif
return succ ? 0 : -1;
tqStartStreamTasks(pTq);
return 0;
}
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) {
SStreamTaskRunReq* pReq = pMsg->pCont;
int32_t taskId = pReq->taskId;
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
if (pTask) {
streamProcessRunReq(pTask);
int32_t taskId = pReq->taskId;
int32_t vgId = TD_VID(pTq->pVnode);
if (taskId == WAL_READ_TASKS_ID) { // all tasks are extracted submit data from the wal
tqStreamTasksScanWal(pTq);
return 0;
}
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
if (pTask != NULL) {
if (pTask->status.taskStatus == TASK_STATUS__NORMAL) {
tqDebug("vgId:%d s-task:%s start to process run req", vgId, pTask->id.idStr);
streamProcessRunReq(pTask);
} else if (pTask->status.taskStatus == TASK_STATUS__RESTORE) {
tqDebug("vgId:%d s-task:%s start to process block from wal, last chk point:%" PRId64, vgId,
pTask->id.idStr, pTask->chkInfo.version);
streamProcessRunReq(pTask);
} else {
tqDebug("vgId:%d s-task:%s ignore run req since not in ready state", vgId, pTask->id.idStr);
}
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
tqStartStreamTasks(pTq);
return 0;
} else {
tqError("vgId:%d failed to found s-task, taskId:%d", vgId, taskId);
return -1;
}
}
@ -1447,14 +1146,10 @@ int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec) {
SDecoder decoder;
tDecoderInit(&decoder, (uint8_t*)msgBody, msgLen);
tDecodeStreamDispatchReq(&decoder, &req);
int32_t taskId = req.taskId;
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.taskId);
if (pTask) {
SRpcMsg rsp = {
.info = pMsg->info,
.code = 0,
};
SRpcMsg rsp = { .info = pMsg->info, .code = 0 };
streamProcessDispatchReq(pTask, &req, &rsp, exec);
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
return 0;
@ -1467,7 +1162,7 @@ int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) {
SStreamDispatchRsp* pRsp = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
int32_t taskId = ntohl(pRsp->upstreamTaskId);
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
tqDebug("recv dispatch rsp, code: %x", pMsg->code);
tqDebug("recv dispatch rsp, code:%x", pMsg->code);
if (pTask) {
streamProcessDispatchRsp(pTask, pRsp, pMsg->code);
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
@ -1495,10 +1190,7 @@ int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg) {
int32_t taskId = req.dstTaskId;
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
if (pTask) {
SRpcMsg rsp = {
.info = pMsg->info,
.code = 0,
};
SRpcMsg rsp = { .info = pMsg->info, .code = 0 };
streamProcessRetrieveReq(pTask, &req, &rsp);
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
tDeleteStreamRetrieveReq(&req);
@ -1534,10 +1226,7 @@ int32_t vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) {
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
if (pTask) {
SRpcMsg rsp = {
.info = pMsg->info,
.code = 0,
};
SRpcMsg rsp = { .info = pMsg->info, .code = 0 };
streamProcessDispatchReq(pTask, &req, &rsp, false);
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
rpcFreeCont(pMsg->pCont);
@ -1554,10 +1243,7 @@ FAIL:
SMsgHead* pRspHead = rpcMallocCont(sizeof(SMsgHead) + sizeof(SStreamDispatchRsp));
if (pRspHead == NULL) {
SRpcMsg rsp = {
.code = TSDB_CODE_OUT_OF_MEMORY,
.info = pMsg->info,
};
SRpcMsg rsp = { .code = TSDB_CODE_OUT_OF_MEMORY, .info = pMsg->info };
tqDebug("send dispatch error rsp, code: %x", code);
tmsgSendRsp(&rsp);
rpcFreeCont(pMsg->pCont);
@ -1575,11 +1261,7 @@ FAIL:
pRsp->inputStatus = TASK_OUTPUT_STATUS__NORMAL;
SRpcMsg rsp = {
.code = code,
.info = pMsg->info,
.contLen = sizeof(SMsgHead) + sizeof(SStreamDispatchRsp),
.pCont = pRspHead,
};
.code = code, .info = pMsg->info, .contLen = sizeof(SMsgHead) + sizeof(SStreamDispatchRsp), .pCont = pRspHead};
tqDebug("send dispatch error rsp, code: %x", code);
tmsgSendRsp(&rsp);
rpcFreeCont(pMsg->pCont);
@ -1588,3 +1270,40 @@ FAIL:
}
int32_t tqCheckLogInWal(STQ* pTq, int64_t sversion) { return sversion <= pTq->walLogLastVer; }
int32_t tqStartStreamTasks(STQ* pTq) {
int32_t vgId = TD_VID(pTq->pVnode);
SStreamMeta* pMeta = pTq->pStreamMeta;
taosWLockLatch(&pMeta->lock);
pMeta->walScan += 1;
if (pMeta->walScan > 1) {
tqDebug("vgId:%d wal read task has been launched, remain scan times:%d", vgId, pMeta->walScan);
taosWUnLockLatch(&pTq->pStreamMeta->lock);
return 0;
}
SStreamTaskRunReq* pRunReq = rpcMallocCont(sizeof(SStreamTaskRunReq));
if (pRunReq == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
tqError("vgId:%d failed restore stream tasks, code:%s", vgId, terrstr(terrno));
taosWUnLockLatch(&pTq->pStreamMeta->lock);
return -1;
}
int32_t numOfTasks = taosHashGetSize(pTq->pStreamMeta->pTasks);
tqInfo("vgId:%d start wal scan stream tasks, tasks:%d", vgId, numOfTasks);
initOffsetForAllRestoreTasks(pTq);
pRunReq->head.vgId = vgId;
pRunReq->streamId = 0;
pRunReq->taskId = WAL_READ_TASKS_ID;
SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)};
tmsgPutToQueue(&pTq->pVnode->msgCb, STREAM_QUEUE, &msg);
taosWUnLockLatch(&pTq->pStreamMeta->lock);
return 0;
}

View File

@ -16,10 +16,13 @@
#include "tq.h"
int tqCommit(STQ* pTq) {
#if 0
// stream meta commit does not be aligned to the vnode commit
if (streamMetaCommit(pTq->pStreamMeta) < 0) {
tqError("vgId:%d, failed to commit stream meta since %s", TD_VID(pTq->pVnode), terrstr());
return -1;
}
#endif
return tqOffsetCommitFile(pTq->pOffsetStore);
}

View File

@ -320,15 +320,15 @@ int32_t tqMetaRestoreHandle(STQ* pTq) {
code = -1;
goto end;
}
handle.execHandle.pExecReader = qExtractReaderFromStreamScanner(scanner);
if (handle.execHandle.pExecReader == NULL) {
handle.execHandle.pTqReader = qExtractReaderFromStreamScanner(scanner);
if (handle.execHandle.pTqReader == NULL) {
tqError("cannot extract exec reader for %s", handle.subKey);
code = -1;
goto end;
}
} else if (handle.execHandle.subType == TOPIC_SUB_TYPE__DB) {
handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
handle.execHandle.pExecReader = tqOpenReader(pTq->pVnode);
handle.execHandle.pTqReader = tqOpenReader(pTq->pVnode);
buildSnapContext(reader.meta, reader.version, 0, handle.execHandle.subType, handle.fetchMeta,
(SSnapContext**)(&reader.sContext));
@ -343,8 +343,8 @@ int32_t tqMetaRestoreHandle(STQ* pTq) {
int64_t tbUid = *(int64_t*)taosArrayGet(tbUidList, i);
tqDebug("vgId:%d, idx %d, uid:%" PRId64, vgId, i, tbUid);
}
handle.execHandle.pExecReader = tqOpenReader(pTq->pVnode);
tqReaderSetTbUidList(handle.execHandle.pExecReader, tbUidList);
handle.execHandle.pTqReader = tqOpenReader(pTq->pVnode);
tqReaderSetTbUidList(handle.execHandle.pTqReader, tbUidList);
taosArrayDestroy(tbUidList);
buildSnapContext(reader.meta, reader.version, handle.execHandle.execTb.suid, handle.execHandle.subType,

View File

@ -128,31 +128,35 @@ int32_t tqOffsetDelete(STqOffsetStore* pStore, const char* subscribeKey) {
}
int32_t tqOffsetCommitFile(STqOffsetStore* pStore) {
if (!pStore->needCommit) return 0;
if (!pStore->needCommit) {
return 0;
}
// TODO file name should be with a newer version
char* fname = tqOffsetBuildFName(pStore->pTq->path, 0);
TdFilePtr pFile = taosOpenFile(fname, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND);
if (pFile == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
int32_t err = terrno;
const char* errStr = tstrerror(err);
int32_t sysErr = errno;
const char* sysErrStr = strerror(errno);
tqError("vgId:%d, cannot open file %s when commit offset since %s", pStore->pTq->pVnode->config.vgId, fname,
sysErrStr);
const char* err = strerror(errno);
tqError("vgId:%d, failed to open offset file %s, since %s", TD_VID(pStore->pTq->pVnode), fname, err);
taosMemoryFree(fname);
return -1;
}
taosMemoryFree(fname);
void* pIter = NULL;
while (1) {
pIter = taosHashIterate(pStore->pHash, pIter);
if (pIter == NULL) break;
if (pIter == NULL) {
break;
}
STqOffset* pOffset = (STqOffset*)pIter;
int32_t bodyLen;
int32_t code;
tEncodeSize(tEncodeSTqOffset, pOffset, bodyLen, code);
if (code < 0) {
taosHashCancelIterate(pStore->pHash, pIter);
return -1;
@ -166,6 +170,7 @@ int32_t tqOffsetCommitFile(STqOffsetStore* pStore) {
SEncoder encoder;
tEncoderInit(&encoder, abuf, bodyLen);
tEncodeSTqOffset(&encoder, pOffset);
// write file
int64_t writeLen;
if ((writeLen = taosWriteFile(pFile, buf, totLen)) != totLen) {
@ -174,8 +179,10 @@ int32_t tqOffsetCommitFile(STqOffsetStore* pStore) {
taosMemoryFree(buf);
return -1;
}
taosMemoryFree(buf);
}
// close and rename file
taosCloseFile(&pFile);
pStore->needCommit = 0;

View File

@ -323,15 +323,22 @@ int32_t tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t v
taosWUnLockLatch(&pTq->lock);
}
// push data for stream processing
if (!tsDisableStream && vnodeIsRoleLeader(pTq->pVnode)) {
tqDebug("handle submit, restore:%d, size:%d", pTq->pVnode->restored, (int)taosHashGetSize(pTq->pStreamMeta->pTasks));
// push data for stream processing:
// 1. the vnode has already been restored.
// 2. the vnode should be the leader.
// 3. the stream is not suspended yet.
if (!tsDisableStream && vnodeIsRoleLeader(pTq->pVnode) && pTq->pVnode->restored) {
if (taosHashGetSize(pTq->pStreamMeta->pTasks) == 0) {
return 0;
}
if (msgType == TDMT_VND_SUBMIT) {
#if 0
void* data = taosMemoryMalloc(len);
if (data == NULL) {
// todo: for all stream in this vnode, keep this offset in the offset files, and wait for a moment, and then retry
terrno = TSDB_CODE_OUT_OF_MEMORY;
tqError("vgId:%d, failed to copy submit data for stream processing, since out of memory", vgId);
return -1;
@ -340,7 +347,10 @@ int32_t tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t v
memcpy(data, pReq, len);
SPackedData submit = {.msgStr = data, .msgLen = len, .ver = ver};
tqDebug("tq copy write msg %p %d %" PRId64 " from %p", data, len, ver, pReq);
tqDebug("vgId:%d tq copy submit msg:%p len:%d ver:%" PRId64 " from %p for stream", vgId, data, len, ver, pReq);
tqProcessSubmitReq(pTq, submit);
#endif
SPackedData submit = {0};
tqProcessSubmitReq(pTq, submit);
}
@ -352,7 +362,7 @@ int32_t tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t v
return 0;
}
int32_t tqRegisterPushEntry(STQ* pTq, void* pHandle, const SMqPollReq* pRequest, SRpcMsg* pRpcMsg, SMqDataRsp* pDataRsp,
int32_t tqRegisterPushHandle(STQ* pTq, void* pHandle, const SMqPollReq* pRequest, SRpcMsg* pRpcMsg, SMqDataRsp* pDataRsp,
int32_t type) {
uint64_t consumerId = pRequest->consumerId;
int32_t vgId = TD_VID(pTq->pVnode);
@ -389,7 +399,7 @@ int32_t tqRegisterPushEntry(STQ* pTq, void* pHandle, const SMqPollReq* pRequest,
return 0;
}
int32_t tqUnregisterPushEntry(STQ* pTq, const char* pKey, int32_t keyLen, uint64_t consumerId, bool rspConsumer) {
int32_t tqUnregisterPushHandle(STQ* pTq, const char* pKey, int32_t keyLen, uint64_t consumerId, bool rspConsumer) {
int32_t vgId = TD_VID(pTq->pVnode);
STqPushEntry** pEntry = taosHashGet(pTq->pPushMgr, pKey, keyLen);

View File

@ -113,7 +113,7 @@ bool isValValidForTable(STqHandle* pHandle, SWalCont* pHead) {
}
SMetaReader mr = {0};
metaReaderInit(&mr, pHandle->execHandle.pExecReader->pVnodeMeta, 0);
metaReaderInit(&mr, pHandle->execHandle.pTqReader->pVnodeMeta, 0);
if (metaGetTableEntryByName(&mr, req.tbName) < 0) {
metaReaderClear(&mr);
@ -262,8 +262,6 @@ STqReader* tqOpenReader(SVnode* pVnode) {
}
pReader->pVnodeMeta = pVnode->pMeta;
/*pReader->pMsg = NULL;*/
// pReader->ver = -1;
pReader->pColIdList = NULL;
pReader->cachedSchemaVer = 0;
pReader->cachedSchemaSuid = 0;
@ -298,7 +296,29 @@ int32_t tqSeekVer(STqReader* pReader, int64_t ver, const char* id) {
if (walReadSeekVer(pReader->pWalReader, ver) < 0) {
return -1;
}
tqDebug("tmq poll: wal reader seek to ver success ver:%"PRId64" %s", ver, id);
tqDebug("wal reader seek to ver:%"PRId64" %s", ver, id);
return 0;
}
int32_t extractSubmitMsgFromWal(SWalReader* pReader, SPackedData* pPackedData) {
if (walNextValidMsg(pReader) < 0) {
return -1;
}
void* pBody = POINTER_SHIFT(pReader->pHead->head.body, sizeof(SSubmitReq2Msg));
int32_t len = pReader->pHead->head.bodyLen - sizeof(SSubmitReq2Msg);
int64_t ver = pReader->pHead->head.version;
void* data = taosMemoryMalloc(len);
if (data == NULL) {
// todo: for all stream in this vnode, keep this offset in the offset files, and wait for a moment, and then retry
terrno = TSDB_CODE_OUT_OF_MEMORY;
tqError("vgId:%d, failed to copy submit data for stream processing, since out of memory", 0);
return -1;
}
memcpy(data, pBody, len);
*pPackedData = (SPackedData){.ver = ver, .msgLen = len, .msgStr = data};
return 0;
}
@ -309,26 +329,28 @@ void tqNextBlock(STqReader* pReader, SFetchRet* ret) {
ret->fetchType = FETCH_TYPE__NONE;
return;
}
void* body = POINTER_SHIFT(pReader->pWalReader->pHead->head.body, sizeof(SSubmitReq2Msg));
void* pBody = POINTER_SHIFT(pReader->pWalReader->pHead->head.body, sizeof(SSubmitReq2Msg));
int32_t bodyLen = pReader->pWalReader->pHead->head.bodyLen - sizeof(SSubmitReq2Msg);
int64_t ver = pReader->pWalReader->pHead->head.version;
tqReaderSetSubmitReq2(pReader, body, bodyLen, ver);
tqReaderSetSubmitMsg(pReader, pBody, bodyLen, ver);
}
while (tqNextDataBlock2(pReader)) {
while (tqNextDataBlock(pReader)) {
memset(&ret->data, 0, sizeof(SSDataBlock));
int32_t code = tqRetrieveDataBlock2(&ret->data, pReader, NULL);
if (code != 0 || ret->data.info.rows == 0) {
continue;
}
ret->fetchType = FETCH_TYPE__DATA;
return;
}
}
}
int32_t tqReaderSetSubmitReq2(STqReader* pReader, void* msgStr, int32_t msgLen, int64_t ver) {
int32_t tqReaderSetSubmitMsg(STqReader* pReader, void* msgStr, int32_t msgLen, int64_t ver) {
pReader->msg2.msgStr = msgStr;
pReader->msg2.msgLen = msgLen;
pReader->msg2.ver = ver;
@ -345,7 +367,7 @@ int32_t tqReaderSetSubmitReq2(STqReader* pReader, void* msgStr, int32_t msgLen,
return 0;
}
bool tqNextDataBlock2(STqReader* pReader) {
bool tqNextDataBlock(STqReader* pReader) {
if (pReader->msg2.msgStr == NULL) {
return false;
}
@ -354,13 +376,20 @@ bool tqNextDataBlock2(STqReader* pReader) {
while (pReader->nextBlk < blockSz) {
tqDebug("tq reader next data block %p, %d %" PRId64 " %d", pReader->msg2.msgStr, pReader->msg2.msgLen,
pReader->msg2.ver, pReader->nextBlk);
SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk);
if (pReader->tbIdHash == NULL) return true;
if (pReader->tbIdHash == NULL) {
return true;
}
void* ret = taosHashGet(pReader->tbIdHash, &pSubmitTbData->uid, sizeof(int64_t));
if (ret != NULL) {
tqDebug("tq reader block found, ver:%"PRId64", uid:%"PRId64, pReader->msg2.ver, pSubmitTbData->uid);
return true;
} else {
tqDebug("tq reader discard block, uid:%"PRId64", continue", pSubmitTbData->uid);
}
pReader->nextBlk++;
}
@ -427,7 +456,10 @@ int32_t tqRetrieveDataBlock2(SSDataBlock* pBlock, STqReader* pReader, SSubmitTbD
SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk);
pReader->nextBlk++;
if (pSubmitTbDataRet) *pSubmitTbDataRet = pSubmitTbData;
if (pSubmitTbDataRet) {
*pSubmitTbDataRet = pSubmitTbData;
}
int32_t sversion = pSubmitTbData->sver;
int64_t suid = pSubmitTbData->suid;
int64_t uid = pSubmitTbData->uid;
@ -900,7 +932,7 @@ int tqReaderSetTbUidList(STqReader* pReader, const SArray* tbUidList) {
return 0;
}
int tqReaderAddTbUidList(STqReader* pReader, const SArray* tbUidList) {
int tqReaderAddTbUidList(STqReader* pReader, const SArray* pTableUidList) {
if (pReader->tbIdHash == NULL) {
pReader->tbIdHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK);
if (pReader->tbIdHash == NULL) {
@ -909,8 +941,9 @@ int tqReaderAddTbUidList(STqReader* pReader, const SArray* tbUidList) {
}
}
for (int i = 0; i < taosArrayGetSize(tbUidList); i++) {
int64_t* pKey = (int64_t*)taosArrayGet(tbUidList, i);
int32_t numOfTables = taosArrayGetSize(pTableUidList);
for (int i = 0; i < numOfTables; i++) {
int64_t* pKey = (int64_t*)taosArrayGet(pTableUidList, i);
taosHashPut(pReader->tbIdHash, pKey, sizeof(int64_t), NULL, 0);
}
@ -926,30 +959,34 @@ int tqReaderRemoveTbUidList(STqReader* pReader, const SArray* tbUidList) {
return 0;
}
// todo update the table list in wal reader
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
void* pIter = NULL;
void* pIter = NULL;
int32_t vgId = TD_VID(pTq->pVnode);
// update the table list for each consumer handle
while (1) {
pIter = taosHashIterate(pTq->pHandle, pIter);
if (pIter == NULL) {
break;
}
STqHandle* pExec = (STqHandle*)pIter;
if (pExec->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
int32_t code = qUpdateQualifiedTableId(pExec->execHandle.task, tbUidList, isAdd);
STqHandle* pTqHandle = (STqHandle*)pIter;
if (pTqHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
int32_t code = qUpdateTableListForStreamScanner(pTqHandle->execHandle.task, tbUidList, isAdd);
if (code != 0) {
tqError("update qualified table error for %s", pExec->subKey);
tqError("update qualified table error for %s", pTqHandle->subKey);
continue;
}
} else if (pExec->execHandle.subType == TOPIC_SUB_TYPE__DB) {
} else if (pTqHandle->execHandle.subType == TOPIC_SUB_TYPE__DB) {
if (!isAdd) {
int32_t sz = taosArrayGetSize(tbUidList);
for (int32_t i = 0; i < sz; i++) {
int64_t tbUid = *(int64_t*)taosArrayGet(tbUidList, i);
taosHashPut(pExec->execHandle.execDb.pFilterOutTbUid, &tbUid, sizeof(int64_t), NULL, 0);
taosHashPut(pTqHandle->execHandle.execDb.pFilterOutTbUid, &tbUid, sizeof(int64_t), NULL, 0);
}
}
} else if (pExec->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
} else if (pTqHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
if (isAdd) {
SArray* qa = taosArrayInit(4, sizeof(tb_uid_t));
SMetaReader mr = {0};
@ -964,35 +1001,43 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
}
tDecoderClear(&mr.coder);
if (mr.me.type != TSDB_CHILD_TABLE || mr.me.ctbEntry.suid != pExec->execHandle.execTb.suid) {
if (mr.me.type != TSDB_CHILD_TABLE || mr.me.ctbEntry.suid != pTqHandle->execHandle.execTb.suid) {
tqDebug("table uid %" PRId64 " does not add to tq handle", *id);
continue;
}
tqDebug("table uid %" PRId64 " add to tq handle", *id);
taosArrayPush(qa, id);
}
metaReaderClear(&mr);
if (taosArrayGetSize(qa) > 0) {
tqReaderAddTbUidList(pExec->execHandle.pExecReader, qa);
tqReaderAddTbUidList(pTqHandle->execHandle.pTqReader, qa);
}
taosArrayDestroy(qa);
} else {
tqReaderRemoveTbUidList(pExec->execHandle.pExecReader, tbUidList);
tqReaderRemoveTbUidList(pTqHandle->execHandle.pTqReader, tbUidList);
}
}
}
// update the table list handle for each stream scanner/wal reader
while (1) {
pIter = taosHashIterate(pTq->pStreamMeta->pTasks, pIter);
if (pIter == NULL) break;
if (pIter == NULL) {
break;
}
SStreamTask* pTask = *(SStreamTask**)pIter;
if (pTask->taskLevel == TASK_LEVEL__SOURCE) {
int32_t code = qUpdateQualifiedTableId(pTask->exec.executor, tbUidList, isAdd);
int32_t code = qUpdateTableListForStreamScanner(pTask->exec.pExecutor, tbUidList, isAdd);
if (code != 0) {
tqError("update qualified table error for stream task %d", pTask->taskId);
tqError("vgId:%d, s-task:%s update qualified table error for stream task", vgId, pTask->id.idStr);
continue;
}
}
}
return 0;
}

View File

@ -0,0 +1,167 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "tq.h"
static int32_t streamTaskReplayWal(SStreamMeta* pStreamMeta, STqOffsetStore* pOffsetStore, bool* pScanIdle);
static int32_t transferToNormalTask(SStreamMeta* pStreamMeta, SArray* pTaskList);
// this function should be executed by stream threads.
// there is a case that the WAL increases more fast than the restore procedure, and this restore procedure
// will not stop eventually.
int tqStreamTasksScanWal(STQ* pTq) {
int32_t vgId = TD_VID(pTq->pVnode);
SStreamMeta* pMeta = pTq->pStreamMeta;
int64_t st = taosGetTimestampMs();
while (1) {
tqInfo("vgId:%d continue check if data in wal are available", vgId);
// check all restore tasks
bool allFull = true;
streamTaskReplayWal(pTq->pStreamMeta, pTq->pOffsetStore, &allFull);
int32_t times = 0;
if (allFull) {
taosWLockLatch(&pMeta->lock);
pMeta->walScan -= 1;
times = pMeta->walScan;
if (pMeta->walScan <= 0) {
taosWUnLockLatch(&pMeta->lock);
break;
}
taosWUnLockLatch(&pMeta->lock);
tqInfo("vgId:%d scan wal for stream tasks for %d times", vgId, times);
}
}
double el = (taosGetTimestampMs() - st) / 1000.0;
tqInfo("vgId:%d scan wal for stream tasks completed, elapsed time:%.2f sec", vgId, el);
// restore wal scan flag
// atomic_store_8(&pTq->pStreamMeta->walScan, 0);
return 0;
}
//int32_t transferToNormalTask(SStreamMeta* pStreamMeta, SArray* pTaskList) {
// int32_t numOfTask = taosArrayGetSize(pTaskList);
// if (numOfTask <= 0) {
// return TSDB_CODE_SUCCESS;
// }
//
// // todo: add lock
// for (int32_t i = 0; i < numOfTask; ++i) {
// SStreamTask* pTask = taosArrayGetP(pTaskList, i);
// tqDebug("vgId:%d transfer s-task:%s state restore -> ready, checkpoint:%" PRId64 " checkpoint id:%" PRId64,
// pStreamMeta->vgId, pTask->id.idStr, pTask->chkInfo.version, pTask->chkInfo.id);
// taosHashRemove(pStreamMeta->pWalReadTasks, &pTask->id.taskId, sizeof(pTask->id.taskId));
//
// // NOTE: do not change the following order
// atomic_store_8(&pTask->status.taskStatus, TASK_STATUS__NORMAL);
// taosHashPut(pStreamMeta->pTasks, &pTask->id.taskId, sizeof(pTask->id.taskId), &pTask, POINTER_BYTES);
// }
//
// return TSDB_CODE_SUCCESS;
//}
int32_t streamTaskReplayWal(SStreamMeta* pStreamMeta, STqOffsetStore* pOffsetStore, bool* pScanIdle) {
void* pIter = NULL;
int32_t vgId = pStreamMeta->vgId;
*pScanIdle = true;
bool allWalChecked = true;
tqDebug("vgId:%d start to check wal to extract new submit block", vgId);
while (1) {
pIter = taosHashIterate(pStreamMeta->pTasks, pIter);
if (pIter == NULL) {
break;
}
SStreamTask* pTask = *(SStreamTask**)pIter;
if (pTask->taskLevel != TASK_LEVEL__SOURCE) {
continue;
}
if (pTask->status.taskStatus == TASK_STATUS__RECOVER_PREPARE ||
pTask->status.taskStatus == TASK_STATUS__WAIT_DOWNSTREAM) {
tqDebug("s-task:%s skip push data, not ready for processing, status %d", pTask->id.idStr,
pTask->status.taskStatus);
continue;
}
// check if offset value exists
char key[128] = {0};
createStreamTaskOffsetKey(key, pTask->id.streamId, pTask->id.taskId);
if (tInputQueueIsFull(pTask)) {
tqDebug("vgId:%d s-task:%s input queue is full, do nothing", vgId, pTask->id.idStr);
continue;
}
*pScanIdle = false;
// check if offset value exists
STqOffset* pOffset = tqOffsetRead(pOffsetStore, key);
ASSERT(pOffset != NULL);
// seek the stored version and extract data from WAL
int32_t code = walReadSeekVer(pTask->exec.pWalReader, pOffset->val.version);
if (code != TSDB_CODE_SUCCESS) { // no data in wal, quit
continue;
}
// append the data for the stream
tqDebug("vgId:%d wal reader seek to ver:%" PRId64 " %s", vgId, pOffset->val.version, pTask->id.idStr);
SPackedData packData = {0};
code = extractSubmitMsgFromWal(pTask->exec.pWalReader, &packData);
if (code != TSDB_CODE_SUCCESS) { // failed, continue
continue;
}
SStreamDataSubmit2* p = streamDataSubmitNew(packData, STREAM_INPUT__DATA_SUBMIT);
if (p == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
tqError("%s failed to create data submit for stream since out of memory", pTask->id.idStr);
continue;
}
allWalChecked = false;
tqDebug("s-task:%s submit data extracted from WAL", pTask->id.idStr);
code = tqAddInputBlockNLaunchTask(pTask, (SStreamQueueItem*)p, packData.ver);
if (code == TSDB_CODE_SUCCESS) {
pOffset->val.version = walReaderGetCurrentVer(pTask->exec.pWalReader);
tqDebug("s-task:%s set the ver:%" PRId64 " from WALReader after extract block from WAL", pTask->id.idStr,
pOffset->val.version);
} else {
// do nothing
}
streamDataSubmitDestroy(p);
taosFreeQitem(p);
}
if (allWalChecked) {
*pScanIdle = true;
}
return 0;
}

View File

@ -38,7 +38,7 @@ int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t
}
static int32_t tqAddBlockSchemaToRsp(const STqExecHandle* pExec, STaosxRsp* pRsp) {
SSchemaWrapper* pSW = tCloneSSchemaWrapper(pExec->pExecReader->pSchemaWrapper);
SSchemaWrapper* pSW = tCloneSSchemaWrapper(pExec->pTqReader->pSchemaWrapper);
if (pSW == NULL) {
return -1;
}
@ -137,7 +137,7 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, STaosxRsp* pRsp, SMqMeta
if (pDataBlock != NULL && pDataBlock->info.rows > 0) {
if (pRsp->withTbName) {
if (pOffset->type == TMQ_OFFSET__LOG) {
int64_t uid = pExec->pExecReader->lastBlkUid;
int64_t uid = pExec->pTqReader->lastBlkUid;
if (tqAddTbNameToRsp(pTq, uid, pRsp, 1) < 0) {
continue;
}
@ -203,9 +203,9 @@ int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, STaosxR
SArray* pSchemas = taosArrayInit(0, sizeof(void*));
if (pExec->subType == TOPIC_SUB_TYPE__TABLE) {
STqReader* pReader = pExec->pExecReader;
tqReaderSetSubmitReq2(pReader, submit.msgStr, submit.msgLen, submit.ver);
while (tqNextDataBlock2(pReader)) {
STqReader* pReader = pExec->pTqReader;
tqReaderSetSubmitMsg(pReader, submit.msgStr, submit.msgLen, submit.ver);
while (tqNextDataBlock(pReader)) {
taosArrayClear(pBlocks);
taosArrayClear(pSchemas);
SSubmitTbData* pSubmitTbDataRet = NULL;
@ -213,7 +213,7 @@ int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, STaosxR
if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) continue;
}
if (pRsp->withTbName) {
int64_t uid = pExec->pExecReader->lastBlkUid;
int64_t uid = pExec->pTqReader->lastBlkUid;
if (tqAddTbNameToRsp(pTq, uid, pRsp, taosArrayGetSize(pBlocks)) < 0) {
taosArrayDestroyEx(pBlocks, (FDelete)blockDataFreeRes);
taosArrayDestroyP(pSchemas, (FDelete)tDeleteSSchemaWrapper);
@ -262,8 +262,8 @@ int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, STaosxR
}
}
} else if (pExec->subType == TOPIC_SUB_TYPE__DB) {
STqReader* pReader = pExec->pExecReader;
tqReaderSetSubmitReq2(pReader, submit.msgStr, submit.msgLen, submit.ver);
STqReader* pReader = pExec->pTqReader;
tqReaderSetSubmitMsg(pReader, submit.msgStr, submit.msgLen, submit.ver);
while (tqNextDataBlockFilterOut2(pReader, pExec->execDb.pFilterOutTbUid)) {
taosArrayClear(pBlocks);
taosArrayClear(pSchemas);
@ -272,7 +272,7 @@ int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, STaosxR
if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) continue;
}
if (pRsp->withTbName) {
int64_t uid = pExec->pExecReader->lastBlkUid;
int64_t uid = pExec->pTqReader->lastBlkUid;
if (tqAddTbNameToRsp(pTq, uid, pRsp, taosArrayGetSize(pBlocks)) < 0) {
taosArrayDestroyEx(pBlocks, (FDelete)blockDataFreeRes);
taosArrayDestroyP(pSchemas, (FDelete)tDeleteSSchemaWrapper);

View File

@ -87,7 +87,7 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d
return;
}
tqDebug("vgId:%d, task %d write into table, block num: %d", TD_VID(pVnode), pTask->taskId, blockSz);
tqDebug("vgId:%d, s-task:%s write into table, block num: %d", TD_VID(pVnode), pTask->id.idStr, blockSz);
for (int32_t i = 0; i < blockSz; i++) {
bool createTb = true;
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
@ -382,7 +382,7 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void*
int32_t blockSz = taosArrayGetSize(pBlocks);
tqDebug("vgId:%d, task %d write into table, block num: %d", TD_VID(pVnode), pTask->taskId, blockSz);
tqDebug("vgId:%d, s-task:%s write results blocks:%d into table", TD_VID(pVnode), pTask->id.idStr, blockSz);
void* pBuf = NULL;
SArray* tagArray = NULL;
@ -475,11 +475,9 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void*
}
for (int32_t tagId = UD_TAG_COLUMN_INDEX, step = 1; tagId < size; tagId++, step++) {
SColumnInfoData* pTagData = taosArrayGet(pDataBlock->pDataBlock, tagId);
STagVal tagVal = {
.cid = pTSchema->numOfCols + step,
.type = pTagData->info.type,
};
void* pData = colDataGetData(pTagData, rowId);
STagVal tagVal = {.cid = pTSchema->numOfCols + step, .type = pTagData->info.type};
void* pData = colDataGetData(pTagData, rowId);
if (colDataIsNull_s(pTagData, rowId)) {
continue;
} else if (IS_VAR_DATA_TYPE(pTagData->info.type)) {

View File

@ -0,0 +1,462 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "tq.h"
#define IS_OFFSET_RESET_TYPE(_t) ((_t) < 0)
static int32_t tqSendMetaPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqMetaRsp* pRsp);
// stream_task:stream_id:task_id
void createStreamTaskOffsetKey(char* dst, uint64_t streamId, uint32_t taskId) {
int32_t n = 12;
char* p = dst;
memcpy(p, "stream_task:", n);
p += n;
int32_t inc = tintToHex(streamId, p);
p += inc;
*(p++) = ':';
tintToHex(taskId, p);
}
int32_t tqAddInputBlockNLaunchTask(SStreamTask* pTask, SStreamQueueItem* pQueueItem, int64_t ver) {
int32_t code = tAppendDataToInputQueue(pTask, pQueueItem);
if (code < 0) {
tqError("s-task:%s failed to put into queue, too many, next start ver:%" PRId64, pTask->id.idStr, ver);
return -1;
}
if (streamSchedExec(pTask) < 0) {
tqError("stream task:%d failed to be launched, code:%s", pTask->id.taskId, tstrerror(terrno));
return -1;
}
return TSDB_CODE_SUCCESS;
}
void initOffsetForAllRestoreTasks(STQ* pTq) {
void* pIter = NULL;
while(1) {
pIter = taosHashIterate(pTq->pStreamMeta->pTasks, pIter);
if (pIter == NULL) {
break;
}
SStreamTask* pTask = *(SStreamTask**)pIter;
if (pTask->taskLevel != TASK_LEVEL__SOURCE) {
continue;
}
if (pTask->status.taskStatus == TASK_STATUS__RECOVER_PREPARE || pTask->status.taskStatus == TASK_STATUS__WAIT_DOWNSTREAM) {
tqDebug("s-task:%s skip push data, since not ready, status %d", pTask->id.idStr, pTask->status.taskStatus);
continue;
}
char key[128] = {0};
createStreamTaskOffsetKey(key, pTask->id.streamId, pTask->id.taskId);
STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, key);
if (pOffset == NULL) {
doSaveTaskOffset(pTq->pOffsetStore, key, pTask->chkInfo.version);
}
}
}
void saveOffsetForAllTasks(STQ* pTq, int64_t ver) {
void* pIter = NULL;
while(1) {
pIter = taosHashIterate(pTq->pStreamMeta->pTasks, pIter);
if (pIter == NULL) {
break;
}
SStreamTask* pTask = *(SStreamTask**)pIter;
if (pTask->taskLevel != TASK_LEVEL__SOURCE) {
continue;
}
if (pTask->status.taskStatus == TASK_STATUS__RECOVER_PREPARE || pTask->status.taskStatus == TASK_STATUS__WAIT_DOWNSTREAM) {
tqDebug("s-task:%s skip push data, not ready for processing, status %d", pTask->id.idStr,
pTask->status.taskStatus);
continue;
}
char key[128] = {0};
createStreamTaskOffsetKey(key, pTask->id.streamId, pTask->id.taskId);
STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, key);
if (pOffset == NULL) {
doSaveTaskOffset(pTq->pOffsetStore, key, ver);
}
}
}
void doSaveTaskOffset(STqOffsetStore* pOffsetStore, const char* pKey, int64_t ver) {
STqOffset offset = {0};
tqOffsetResetToLog(&offset.val, ver);
tstrncpy(offset.subKey, pKey, tListLen(offset.subKey));
// keep the offset info in the offset store
tqOffsetWrite(pOffsetStore, &offset);
}
static int32_t tqInitDataRsp(SMqDataRsp* pRsp, const SMqPollReq* pReq, int8_t subType) {
pRsp->reqOffset = pReq->reqOffset;
pRsp->blockData = taosArrayInit(0, sizeof(void*));
pRsp->blockDataLen = taosArrayInit(0, sizeof(int32_t));
if (pRsp->blockData == NULL || pRsp->blockDataLen == NULL) {
return -1;
}
pRsp->withTbName = 0;
pRsp->withSchema = false;
return 0;
}
static int32_t tqInitTaosxRsp(STaosxRsp* pRsp, const SMqPollReq* pReq) {
pRsp->reqOffset = pReq->reqOffset;
pRsp->withTbName = 1;
pRsp->withSchema = 1;
pRsp->blockData = taosArrayInit(0, sizeof(void*));
pRsp->blockDataLen = taosArrayInit(0, sizeof(int32_t));
pRsp->blockTbName = taosArrayInit(0, sizeof(void*));
pRsp->blockSchema = taosArrayInit(0, sizeof(void*));
if (pRsp->blockData == NULL || pRsp->blockDataLen == NULL || pRsp->blockTbName == NULL || pRsp->blockSchema == NULL) {
return -1;
}
return 0;
}
static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest,
SRpcMsg* pMsg, bool* pBlockReturned) {
uint64_t consumerId = pRequest->consumerId;
STqOffsetVal reqOffset = pRequest->reqOffset;
STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, pRequest->subKey);
int32_t vgId = TD_VID(pTq->pVnode);
*pBlockReturned = false;
// In this vnode, data has been polled by consumer for this topic, so let's continue from the last offset value.
if (pOffset != NULL) {
*pOffsetVal = pOffset->val;
char formatBuf[80];
tFormatOffset(formatBuf, 80, pOffsetVal);
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, existed offset found, offset reset to %s and continue. reqId:0x%"PRIx64,
consumerId, pHandle->subKey, vgId, formatBuf, pRequest->reqId);
return 0;
} else {
// no poll occurs in this vnode for this topic, let's seek to the right offset value.
if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEAST) {
if (pRequest->useSnapshot) {
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey:%s, vgId:%d, (earliest) set offset to be snapshot",
consumerId, pHandle->subKey, vgId);
if (pHandle->fetchMeta) {
tqOffsetResetToMeta(pOffsetVal, 0);
} else {
tqOffsetResetToData(pOffsetVal, 0, 0);
}
} else {
pHandle->pRef = walRefFirstVer(pTq->pVnode->pWal, pHandle->pRef);
if (pHandle->pRef == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
// offset set to previous version when init
tqOffsetResetToLog(pOffsetVal, pHandle->pRef->refVer - 1);
}
} else if (reqOffset.type == TMQ_OFFSET__RESET_LATEST) {
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
SMqDataRsp dataRsp = {0};
tqInitDataRsp(&dataRsp, pRequest, pHandle->execHandle.subType);
tqOffsetResetToLog(&dataRsp.rspOffset, walGetLastVer(pTq->pVnode->pWal));
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, (latest) offset reset to %" PRId64, consumerId,
pHandle->subKey, vgId, dataRsp.rspOffset.version);
int32_t code = tqSendDataRsp(pTq, pMsg, pRequest, &dataRsp, TMQ_MSG_TYPE__POLL_RSP);
tDeleteSMqDataRsp(&dataRsp);
*pBlockReturned = true;
return code;
} else {
STaosxRsp taosxRsp = {0};
tqInitTaosxRsp(&taosxRsp, pRequest);
tqOffsetResetToLog(&taosxRsp.rspOffset, walGetLastVer(pTq->pVnode->pWal));
int32_t code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
tDeleteSTaosxRsp(&taosxRsp);
*pBlockReturned = true;
return code;
}
} else if (reqOffset.type == TMQ_OFFSET__RESET_NONE) {
tqError("tmq poll: subkey:%s, no offset committed for consumer:0x%" PRIx64 " in vg %d, subkey %s, reset none failed",
pHandle->subKey, consumerId, vgId, pRequest->subKey);
terrno = TSDB_CODE_TQ_NO_COMMITTED_OFFSET;
return -1;
}
}
return 0;
}
static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest,
SRpcMsg* pMsg, STqOffsetVal* pOffset) {
uint64_t consumerId = pRequest->consumerId;
int32_t vgId = TD_VID(pTq->pVnode);
SMqDataRsp dataRsp = {0};
tqInitDataRsp(&dataRsp, pRequest, pHandle->execHandle.subType);
// lock
taosWLockLatch(&pTq->lock);
qSetTaskId(pHandle->execHandle.task, consumerId, pRequest->reqId);
int code = tqScanData(pTq, pHandle, &dataRsp, pOffset);
if(code != 0) {
goto end;
}
// till now, all data has been transferred to consumer, new data needs to push client once arrived.
if (dataRsp.blockNum == 0 && dataRsp.reqOffset.type == TMQ_OFFSET__LOG &&
dataRsp.reqOffset.version == dataRsp.rspOffset.version && pHandle->consumerId == pRequest->consumerId) {
code = tqRegisterPushHandle(pTq, pHandle, pRequest, pMsg, &dataRsp, TMQ_MSG_TYPE__POLL_RSP);
taosWUnLockLatch(&pTq->lock);
return code;
}
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&dataRsp, TMQ_MSG_TYPE__POLL_RSP);
// NOTE: this pHandle->consumerId may have been changed already.
end:
{
char buf[80] = {0};
tFormatOffset(buf, 80, &dataRsp.rspOffset);
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, rsp block:%d, rsp offset type:%s, reqId:0x%" PRIx64 " code:%d",
consumerId, pHandle->subKey, vgId, dataRsp.blockNum, buf, pRequest->reqId, code);
taosWUnLockLatch(&pTq->lock);
tDeleteSMqDataRsp(&dataRsp);
}
return code;
}
static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg, STqOffsetVal *offset) {
int code = 0;
int32_t vgId = TD_VID(pTq->pVnode);
SWalCkHead* pCkHead = NULL;
SMqMetaRsp metaRsp = {0};
STaosxRsp taosxRsp = {0};
tqInitTaosxRsp(&taosxRsp, pRequest);
if (offset->type != TMQ_OFFSET__LOG) {
if (tqScanTaosx(pTq, pHandle, &taosxRsp, &metaRsp, offset) < 0) {
return -1;
}
if (metaRsp.metaRspLen > 0) {
code = tqSendMetaPollRsp(pTq, pMsg, pRequest, &metaRsp);
tqDebug("tmq poll: consumer:0x%" PRIx64 " subkey:%s vgId:%d, send meta offset type:%d,uid:%" PRId64 ",ts:%" PRId64,
pRequest->consumerId, pHandle->subKey, vgId, metaRsp.rspOffset.type, metaRsp.rspOffset.uid, metaRsp.rspOffset.ts);
taosMemoryFree(metaRsp.metaRsp);
tDeleteSTaosxRsp(&taosxRsp);
return code;
}
tqDebug("taosx poll: consumer:0x%" PRIx64 " subkey:%s vgId:%d, send data blockNum:%d, offset type:%d,uid:%" PRId64
",ts:%" PRId64,pRequest->consumerId, pHandle->subKey, vgId, taosxRsp.blockNum, taosxRsp.rspOffset.type, taosxRsp.rspOffset.uid,taosxRsp.rspOffset.ts);
if (taosxRsp.blockNum > 0) {
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
tDeleteSTaosxRsp(&taosxRsp);
return code;
}else {
*offset = taosxRsp.rspOffset;
}
}
if (offset->type == TMQ_OFFSET__LOG) {
int64_t fetchVer = offset->version + 1;
pCkHead = taosMemoryMalloc(sizeof(SWalCkHead) + 2048);
if (pCkHead == NULL) {
tDeleteSTaosxRsp(&taosxRsp);
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
walSetReaderCapacity(pHandle->pWalReader, 2048);
int totalRows = 0;
while (1) {
int32_t savedEpoch = atomic_load_32(&pHandle->epoch);
if (savedEpoch > pRequest->epoch) {
tqWarn("tmq poll: consumer:0x%" PRIx64 " (epoch %d), subkey:%s vgId:%d offset %" PRId64
", found new consumer epoch %d, discard req epoch %d", pRequest->consumerId, pRequest->epoch, pHandle->subKey, vgId, fetchVer, savedEpoch, pRequest->epoch);
break;
}
if (tqFetchLog(pTq, pHandle, &fetchVer, &pCkHead, pRequest->reqId) < 0) {
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
tDeleteSTaosxRsp(&taosxRsp);
taosMemoryFreeClear(pCkHead);
return code;
}
SWalCont* pHead = &pCkHead->head;
tqDebug("tmq poll: consumer:0x%" PRIx64 " (epoch %d) iter log, vgId:%d offset %" PRId64 " msgType %d", pRequest->consumerId,
pRequest->epoch, vgId, fetchVer, pHead->msgType);
// process meta
if (pHead->msgType != TDMT_VND_SUBMIT) {
if(totalRows > 0) {
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer - 1);
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
tDeleteSTaosxRsp(&taosxRsp);
taosMemoryFreeClear(pCkHead);
return code;
}
tqDebug("fetch meta msg, ver:%" PRId64 ", type:%s", pHead->version, TMSG_INFO(pHead->msgType));
tqOffsetResetToLog(&metaRsp.rspOffset, fetchVer);
metaRsp.resMsgType = pHead->msgType;
metaRsp.metaRspLen = pHead->bodyLen;
metaRsp.metaRsp = pHead->body;
if (tqSendMetaPollRsp(pTq, pMsg, pRequest, &metaRsp) < 0) {
code = -1;
taosMemoryFreeClear(pCkHead);
tDeleteSTaosxRsp(&taosxRsp);
return code;
}
code = 0;
taosMemoryFreeClear(pCkHead);
tDeleteSTaosxRsp(&taosxRsp);
return code;
}
// process data
SPackedData submit = {
.msgStr = POINTER_SHIFT(pHead->body, sizeof(SSubmitReq2Msg)),
.msgLen = pHead->bodyLen - sizeof(SSubmitReq2Msg),
.ver = pHead->version,
};
if (tqTaosxScanLog(pTq, pHandle, submit, &taosxRsp, &totalRows) < 0) {
tqError("tmq poll: tqTaosxScanLog error %" PRId64 ", in vgId:%d, subkey %s", pRequest->consumerId, vgId,
pRequest->subKey);
taosMemoryFreeClear(pCkHead);
tDeleteSTaosxRsp(&taosxRsp);
return -1;
}
if (totalRows >= 4096 || taosxRsp.createTableNum > 0) {
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
tDeleteSTaosxRsp(&taosxRsp);
taosMemoryFreeClear(pCkHead);
return code;
} else {
fetchVer++;
}
}
}
tDeleteSTaosxRsp(&taosxRsp);
taosMemoryFreeClear(pCkHead);
return 0;
}
int32_t tqExtractDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg) {
int32_t code = -1;
STqOffsetVal offset = {0};
STqOffsetVal reqOffset = pRequest->reqOffset;
// 1. reset the offset if needed
if (IS_OFFSET_RESET_TYPE(reqOffset.type)) {
// handle the reset offset cases, according to the consumer's choice.
bool blockReturned = false;
code = extractResetOffsetVal(&offset, pTq, pHandle, pRequest, pMsg, &blockReturned);
if (code != 0) {
return code;
}
// empty block returned, quit
if (blockReturned) {
return 0;
}
} else { // use the consumer specified offset
// the offset value can not be monotonious increase??
offset = reqOffset;
}
// this is a normal subscribe requirement
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
return extractDataAndRspForNormalSubscribe(pTq, pHandle, pRequest, pMsg, &offset);
}
// todo handle the case where re-balance occurs.
// for taosx
return extractDataAndRspForDbStbSubscribe(pTq, pHandle, pRequest, pMsg, &offset);
}
int32_t tqSendMetaPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqMetaRsp* pRsp) {
int32_t len = 0;
int32_t code = 0;
tEncodeSize(tEncodeSMqMetaRsp, pRsp, len, code);
if (code < 0) {
return -1;
}
int32_t tlen = sizeof(SMqRspHead) + len;
void* buf = rpcMallocCont(tlen);
if (buf == NULL) {
return -1;
}
((SMqRspHead*)buf)->mqMsgType = TMQ_MSG_TYPE__POLL_META_RSP;
((SMqRspHead*)buf)->epoch = pReq->epoch;
((SMqRspHead*)buf)->consumerId = pReq->consumerId;
void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
SEncoder encoder = {0};
tEncoderInit(&encoder, abuf, len);
tEncodeSMqMetaRsp(&encoder, pRsp);
tEncoderClear(&encoder);
SRpcMsg resp = {
.info = pMsg->info,
.pCont = buf,
.contLen = tlen,
.code = 0,
};
tmsgSendRsp(&resp);
tqDebug("vgId:%d, from consumer:0x%" PRIx64 " (epoch %d) send rsp, res msg type %d, offset type:%d",
TD_VID(pTq->pVnode), pReq->consumerId, pReq->epoch, pRsp->resMsgType, pRsp->rspOffset.type);
return 0;
}

View File

@ -35,7 +35,11 @@ _err:
static void tsdbCloseBICache(STsdb *pTsdb) {
SLRUCache *pCache = pTsdb->biCache;
if (pCache) {
int32_t elems = taosLRUCacheGetElems(pCache);
tsdbTrace("vgId:%d, elems: %d", TD_VID(pTsdb->pVnode), elems);
taosLRUCacheEraseUnrefEntries(pCache);
elems = taosLRUCacheGetElems(pCache);
tsdbTrace("vgId:%d, elems: %d", TD_VID(pTsdb->pVnode), elems);
taosLRUCacheCleanup(pCache);
@ -1112,7 +1116,12 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow, bool *pIgnoreEarlie
* &state->blockIdx);
*/
state->pBlockIdx = taosArraySearch(state->aBlockIdx, state->pBlockIdxExp, tCmprBlockIdx, TD_EQ);
if (!state->pBlockIdx) { /*
if (!state->pBlockIdx) {
tsdbBICacheRelease(state->pTsdb->biCache, state->aBlockIdxHandle);
state->aBlockIdxHandle = NULL;
state->aBlockIdx = NULL;
/*
tsdbDataFReaderClose(state->pDataFReader);
*state->pDataFReader = NULL;
resetLastBlockLoadInfo(state->pLoadInfo);*/
@ -1761,11 +1770,14 @@ static int32_t mergeLastRow(tb_uid_t uid, STsdb *pTsdb, bool *dup, SArray **ppCo
hasRow = true;
code = updateTSchema(TSDBROW_SVERSION(pRow), pr, uid);
if (TSDB_CODE_SUCCESS != code) {
goto _err;
int32_t sversion = TSDBROW_SVERSION(pRow);
if (sversion != -1) {
code = updateTSchema(sversion, pr, uid);
if (TSDB_CODE_SUCCESS != code) {
goto _err;
}
pTSchema = pr->pCurrSchema;
}
pTSchema = pr->pCurrSchema;
int16_t nCol = pTSchema->numOfCols;
TSKEY rowTs = TSDBROW_TS(pRow);
@ -1915,11 +1927,14 @@ static int32_t mergeLast(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SCach
hasRow = true;
code = updateTSchema(TSDBROW_SVERSION(pRow), pr, uid);
if (TSDB_CODE_SUCCESS != code) {
goto _err;
int32_t sversion = TSDBROW_SVERSION(pRow);
if (sversion != -1) {
code = updateTSchema(sversion, pr, uid);
if (TSDB_CODE_SUCCESS != code) {
goto _err;
}
pTSchema = pr->pCurrSchema;
}
pTSchema = pr->pCurrSchema;
int16_t nCol = pTSchema->numOfCols;
TSKEY rowTs = TSDBROW_TS(pRow);
@ -2223,6 +2238,7 @@ int32_t tsdbCacheGetBlockIdx(SLRUCache *pCache, SDataFReader *pFileReader, LRUHa
taosThreadMutexUnlock(&pTsdb->biMutex);
}
tsdbTrace("bi cache:%p, ref", pCache);
*handle = h;
return code;
@ -2232,6 +2248,7 @@ int32_t tsdbBICacheRelease(SLRUCache *pCache, LRUHandle *h) {
int32_t code = 0;
taosLRUCacheRelease(pCache, h, false);
tsdbTrace("bi cache:%p, release", pCache);
return code;
}

View File

@ -367,15 +367,6 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
p->ts = pCol->ts;
p->colVal = pCol->colVal;
singleTableLastTs = pCol->ts;
// only set value for last row query
if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_LAST_ROW)) {
if (taosArrayGetSize(pTableUidList) == 0) {
taosArrayPush(pTableUidList, &pKeyInfo->uid);
} else {
taosArraySet(pTableUidList, 0, &pKeyInfo->uid);
}
}
}
} else {
SLastCol* p = taosArrayGet(pLastCols, slotId);
@ -422,6 +413,12 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
}
}
if (taosArrayGetSize(pTableUidList) == 0) {
taosArrayPush(pTableUidList, &pKeyInfo->uid);
} else {
taosArraySet(pTableUidList, 0, &pKeyInfo->uid);
}
tsdbCacheRelease(lruCache, h);
}

View File

@ -831,6 +831,7 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader,
// this block belongs to a table that is not queried.
STableBlockScanInfo* pScanInfo = getTableBlockScanInfo(pReader->status.pTableMap, pBlockIdx->uid, pReader->idStr);
if (pScanInfo == NULL) {
tsdbBICacheRelease(pFileReader->pTsdb->biCache, handle);
return terrno;
}
@ -2088,7 +2089,7 @@ static int32_t doMergeFileBlockAndLastBlock(SLastBlockReader* pLastBlockReader,
pBlockScanInfo->lastKey = tsLastBlock;
return TSDB_CODE_SUCCESS;
} else {
int32_t code = tsdbRowMergerInit(&merge, NULL, &fRow, pReader->pSchema);
code = tsdbRowMergerInit(&merge, NULL, &fRow, pReader->pSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@ -2112,7 +2113,7 @@ static int32_t doMergeFileBlockAndLastBlock(SLastBlockReader* pLastBlockReader,
}
}
} else { // not merge block data
int32_t code = tsdbRowMergerInit(&merge, NULL, &fRow, pReader->pSchema);
code = tsdbRowMergerInit(&merge, NULL, &fRow, pReader->pSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@ -2320,7 +2321,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
tsdbRowMergerAdd(&merge, pRow, pSchema);
} else {
STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
// STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
code = tsdbRowMergerInit(&merge, NULL, pRow, pSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
@ -2352,7 +2353,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
tsdbRowMergerAdd(&merge, piRow, piSchema);
} else {
init = true;
STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(piRow), pReader, pBlockScanInfo->uid);
// STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(piRow), pReader, pBlockScanInfo->uid);
code = tsdbRowMergerInit(&merge, pSchema, piRow, piSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
@ -2575,7 +2576,7 @@ int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBloc
SRow* pTSRow = NULL;
SRowMerger merge = {0};
int32_t code = tsdbRowMergerInit(&merge, NULL, &fRow, pReader->pSchema);
code = tsdbRowMergerInit(&merge, NULL, &fRow, pReader->pSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@ -3242,8 +3243,8 @@ static int32_t readRowsCountFromFiles(STsdbReader* pReader) {
int32_t code = TSDB_CODE_SUCCESS;
while (1) {
bool hasNext = false;
int32_t code = filesetIteratorNext(&pReader->status.fileIter, pReader, &hasNext);
bool hasNext = false;
code = filesetIteratorNext(&pReader->status.fileIter, pReader, &hasNext);
if (code) {
return code;
}
@ -3515,8 +3516,8 @@ SVersionRange getQueryVerRange(SVnode* pVnode, SQueryTableDataCond* pCond, int8_
int64_t startVer = (pCond->startVersion == -1) ? 0 : pCond->startVersion;
int64_t endVer = 0;
if (pCond->endVersion ==
-1) { // user not specified end version, set current maximum version of vnode as the endVersion
if (pCond->endVersion == -1) {
// user not specified end version, set current maximum version of vnode as the endVersion
endVer = pVnode->state.applied;
} else {
endVer = (pCond->endVersion > pVnode->state.applied) ? pVnode->state.applied : pCond->endVersion;

View File

@ -1259,6 +1259,11 @@ void tBlockDataReset(SBlockData *pBlockData) {
pBlockData->suid = 0;
pBlockData->uid = 0;
pBlockData->nRow = 0;
for (int32_t i = 0; i < pBlockData->nColData; i++) {
tColDataDestroy(&pBlockData->aColData[i]);
}
pBlockData->nColData = 0;
taosMemoryFreeClear(pBlockData->aColData);
}
void tBlockDataClear(SBlockData *pBlockData) {

View File

@ -149,7 +149,7 @@ void vnodeUpdCommitSched(SVnode *pVnode) {
pVnode->commitSched.maxWaitMs = tsVndCommitMaxIntervalMs + (randNum % tsVndCommitMaxIntervalMs);
}
int vnodeShouldCommit(SVnode *pVnode) {
int vnodeShouldCommit(SVnode *pVnode, bool atExit) {
SVCommitSched *pSched = &pVnode->commitSched;
int64_t nowMs = taosGetMonoTimestampMs();
bool diskAvail = osDataSpaceAvailable();
@ -158,7 +158,8 @@ int vnodeShouldCommit(SVnode *pVnode) {
taosThreadMutexLock(&pVnode->mutex);
if (pVnode->inUse && diskAvail) {
needCommit =
((pVnode->inUse->size > pVnode->inUse->node.size) && (pSched->commitMs + SYNC_VND_COMMIT_MIN_MS < nowMs));
((pVnode->inUse->size > pVnode->inUse->node.size) && (pSched->commitMs + SYNC_VND_COMMIT_MIN_MS < nowMs)) ||
((pVnode->inUse->size > 0) && atExit);
}
taosThreadMutexUnlock(&pVnode->mutex);
return needCommit;

View File

@ -400,7 +400,7 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp
}
break;
case TDMT_STREAM_TASK_DEPLOY: {
if (tqProcessTaskDeployReq(pVnode->pTq, version, pReq, len) < 0) {
if (pVnode->restored && tqProcessTaskDeployReq(pVnode->pTq, version, pReq, len) < 0) {
goto _err;
}
} break;
@ -447,13 +447,11 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp
walApplyVer(pVnode->pWal, version);
/*vInfo("vgId:%d, push msg begin", pVnode->config.vgId);*/
if (tqPushMsg(pVnode->pTq, pMsg->pCont, pMsg->contLen, pMsg->msgType, version) < 0) {
/*vInfo("vgId:%d, push msg end", pVnode->config.vgId);*/
vError("vgId:%d, failed to push msg to TQ since %s", TD_VID(pVnode), tstrerror(terrno));
return -1;
}
/*vInfo("vgId:%d, push msg end", pVnode->config.vgId);*/
// commit if need
if (needCommit) {
@ -541,13 +539,10 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
return vnodeGetBatchMeta(pVnode, pMsg);
case TDMT_VND_TMQ_CONSUME:
return tqProcessPollReq(pVnode->pTq, pMsg);
case TDMT_STREAM_TASK_RUN:
return tqProcessTaskRunReq(pVnode->pTq, pMsg);
#if 1
case TDMT_STREAM_TASK_DISPATCH:
return tqProcessTaskDispatchReq(pVnode->pTq, pMsg, true);
#endif
case TDMT_STREAM_TASK_CHECK:
return tqProcessStreamTaskCheckReq(pVnode->pTq, pMsg);
case TDMT_STREAM_TASK_DISPATCH_RSP:

View File

@ -129,8 +129,8 @@ static int32_t inline vnodeProposeMsg(SVnode *pVnode, SRpcMsg *pMsg, bool isWeak
return code;
}
void vnodeProposeCommitOnNeed(SVnode *pVnode) {
if (!vnodeShouldCommit(pVnode)) {
void vnodeProposeCommitOnNeed(SVnode *pVnode, bool atExit) {
if (!vnodeShouldCommit(pVnode, atExit)) {
return;
}
@ -145,18 +145,20 @@ void vnodeProposeCommitOnNeed(SVnode *pVnode) {
rpcMsg.pCont = pHead;
rpcMsg.info.noResp = 1;
vInfo("vgId:%d, propose vnode commit", pVnode->config.vgId);
bool isWeak = false;
if (vnodeProposeMsg(pVnode, &rpcMsg, isWeak) < 0) {
vTrace("vgId:%d, failed to propose vnode commit since %s", pVnode->config.vgId, terrstr());
goto _out;
if (!atExit) {
if (vnodeProposeMsg(pVnode, &rpcMsg, isWeak) < 0) {
vTrace("vgId:%d, failed to propose vnode commit since %s", pVnode->config.vgId, terrstr());
}
rpcFreeCont(rpcMsg.pCont);
rpcMsg.pCont = NULL;
} else {
tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &rpcMsg);
}
vInfo("vgId:%d, proposed vnode commit", pVnode->config.vgId);
_out:
vnodeUpdCommitSched(pVnode);
rpcFreeCont(rpcMsg.pCont);
rpcMsg.pCont = NULL;
}
#if BATCH_ENABLE
@ -236,7 +238,8 @@ void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs)
continue;
}
vnodeProposeCommitOnNeed(pVnode);
bool atExit = false;
vnodeProposeCommitOnNeed(pVnode, atExit);
code = vnodePreProcessWriteMsg(pVnode, pMsg);
if (code != 0) {
@ -288,7 +291,8 @@ void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs)
continue;
}
vnodeProposeCommitOnNeed(pVnode);
bool atExit = false;
vnodeProposeCommitOnNeed(pVnode, atExit);
code = vnodePreProcessWriteMsg(pVnode, pMsg);
if (code != 0) {
@ -549,6 +553,9 @@ static void vnodeRestoreFinish(const SSyncFSM *pFsm, const SyncIndex commitIdx)
pVnode->restored = true;
vInfo("vgId:%d, sync restore finished", pVnode->config.vgId);
// start to restore all stream tasks
tqStartStreamTasks(pVnode->pTq);
}
static void vnodeBecomeFollower(const SSyncFSM *pFsm) {

View File

@ -127,14 +127,9 @@ enum {
};
typedef struct {
// TODO remove prepareStatus
// STqOffsetVal prepareStatus; // for tmq
STqOffsetVal currentOffset; // for tmq
SMqMetaRsp metaRsp; // for tmq fetching meta
// int8_t returned;
int64_t snapshotVer;
// const SSubmitReq* pReq;
STqOffsetVal currentOffset; // for tmq
SMqMetaRsp metaRsp; // for tmq fetching meta
int64_t snapshotVer;
SPackedData submit;
SSchemaWrapper* schema;
char tbName[TSDB_TABLE_NAME_LEN];
@ -144,6 +139,8 @@ typedef struct {
int64_t fillHistoryVer1;
int64_t fillHistoryVer2;
SStreamState* pState;
int64_t dataVersion;
int64_t checkPointId;
} SStreamTaskInfo;
typedef struct {
@ -191,7 +188,6 @@ enum {
OP_OPENED = 0x1,
OP_RES_TO_RETURN = 0x5,
OP_EXEC_DONE = 0x9,
// OP_EXEC_RECV = 0x11,
};
typedef struct SOperatorFpSet {
@ -560,6 +556,7 @@ typedef struct SStreamIntervalOperatorInfo {
uint64_t numOfDatapack;
SArray* pUpdated;
SSHashObj* pUpdatedMap;
int64_t dataVersion;
} SStreamIntervalOperatorInfo;
typedef struct SDataGroupInfo {
@ -609,6 +606,7 @@ typedef struct SStreamSessionAggOperatorInfo {
bool ignoreExpiredDataSaved;
SArray* pUpdated;
SSHashObj* pStUpdated;
int64_t dataVersion;
} SStreamSessionAggOperatorInfo;
typedef struct SStreamStateAggOperatorInfo {
@ -627,6 +625,7 @@ typedef struct SStreamStateAggOperatorInfo {
bool ignoreExpiredDataSaved;
SArray* pUpdated;
SSHashObj* pSeUpdated;
int64_t dataVersion;
} SStreamStateAggOperatorInfo;
typedef struct SStreamPartitionOperatorInfo {
@ -827,7 +826,7 @@ void setTaskKilled(SExecTaskInfo* pTaskInfo, int32_t rspCode);
void doDestroyTask(SExecTaskInfo* pTaskInfo);
void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status);
char* buildTaskId(uint64_t taskId, uint64_t queryId);
void buildTaskId(uint64_t taskId, uint64_t queryId, char* dst);
SArray* getTableListInfo(const SExecTaskInfo* pTaskInfo);

View File

@ -264,8 +264,6 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) {
pInfo->pRes->info.id.groupId = pKeyInfo->groupId;
if (taosArrayGetSize(pInfo->pUidList) > 0) {
ASSERT((pInfo->retrieveType & CACHESCAN_RETRIEVE_LAST_ROW) == CACHESCAN_RETRIEVE_LAST_ROW);
pInfo->pRes->info.id.uid = *(tb_uid_t*)taosArrayGet(pInfo->pUidList, 0);
code = addTagPseudoColumnData(&pInfo->readHandle, pSup->pExprInfo, pSup->numOfExprs, pInfo->pRes,
pInfo->pRes->info.rows, GET_TASKID(pTaskInfo), NULL);

View File

@ -127,7 +127,7 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
pOperator->status = OP_NOT_OPENED;
SStreamScanInfo* pInfo = pOperator->info;
qDebug("task stream set total blocks:%d %s", (int32_t)numOfBlocks, id);
qDebug("s-task set source blocks:%d %s", (int32_t)numOfBlocks, id);
ASSERT(pInfo->validBlockIndex == 0 && taosArrayGetSize(pInfo->pBlockLists) == 0);
if (type == STREAM_INPUT__MERGED_SUBMIT) {
@ -173,9 +173,7 @@ void doSetTaskId(SOperatorInfo* pOperator) {
void qSetTaskId(qTaskInfo_t tinfo, uint64_t taskId, uint64_t queryId) {
SExecTaskInfo* pTaskInfo = tinfo;
pTaskInfo->id.queryId = queryId;
taosMemoryFreeClear(pTaskInfo->id.str);
pTaskInfo->id.str = buildTaskId(taskId, queryId);
buildTaskId(taskId, queryId, pTaskInfo->id.str);
// set the idstr for tsdbReader
doSetTaskId(pTaskInfo->pRoot);
@ -198,6 +196,13 @@ int32_t qSetStreamOpOpen(qTaskInfo_t tinfo) {
return code;
}
void qGetCheckpointVersion(qTaskInfo_t tinfo, int64_t* dataVer, int64_t* ckId) {
SExecTaskInfo* pTaskInfo = tinfo;
*dataVer = pTaskInfo->streamInfo.dataVersion;
*ckId = pTaskInfo->streamInfo.checkPointId;
}
int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type) {
if (tinfo == NULL) {
return TSDB_CODE_APP_ERROR;
@ -363,27 +368,23 @@ static SArray* filterUnqualifiedTables(const SStreamScanInfo* pScanInfo, const S
return qa;
}
int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bool isAdd) {
int32_t qUpdateTableListForStreamScanner(qTaskInfo_t tinfo, const SArray* tableIdList, bool isAdd) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
const char* id = GET_TASKID(pTaskInfo);
int32_t code = 0;
if (isAdd) {
qDebug("add %d tables id into query list, %s", (int32_t)taosArrayGetSize(tableIdList), pTaskInfo->id.str);
qDebug("add %d tables id into query list, %s", (int32_t)taosArrayGetSize(tableIdList), id);
}
// traverse to the stream scanner node to add this table id
SOperatorInfo* pInfo = pTaskInfo->pRoot;
while (pInfo->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
pInfo = pInfo->pDownstream[0];
}
int32_t code = 0;
SOperatorInfo* pInfo = extractOperatorInTree(pTaskInfo->pRoot, QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN, id);
SStreamScanInfo* pScanInfo = pInfo->info;
if (isAdd) { // add new table id
SArray* qa = filterUnqualifiedTables(pScanInfo, tableIdList, GET_TASKID(pTaskInfo));
int32_t numOfQualifiedTables = taosArrayGetSize(qa);
qDebug(" %d qualified child tables added into stream scanner", numOfQualifiedTables);
qDebug("%d qualified child tables added into stream scanner, %s", numOfQualifiedTables, id);
code = tqReaderAddTbUidList(pScanInfo->tqReader, qa);
if (code != TSDB_CODE_SUCCESS) {
taosArrayDestroy(qa);
@ -424,19 +425,6 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo
}
}
#if 0
bool exists = false;
for (int32_t k = 0; k < taosArrayGetSize(pListInfo->pTableList); ++k) {
STableKeyInfo* pKeyInfo = taosArrayGet(pListInfo->pTableList, k);
if (pKeyInfo->uid == keyInfo.uid) {
qWarn("ignore duplicated query table uid:%" PRIu64 " added, %s", pKeyInfo->uid, pTaskInfo->id.str);
exists = true;
}
}
if (!exists) {
#endif
tableListAddTableInfo(pTableListInfo, keyInfo.uid, keyInfo.groupId);
}
@ -447,7 +435,7 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo
taosArrayDestroy(qa);
} else { // remove the table id in current list
qDebug(" %d remove child tables from the stream scanner", (int32_t)taosArrayGetSize(tableIdList));
qDebug("%d remove child tables from the stream scanner, %s", (int32_t)taosArrayGetSize(tableIdList), id);
taosWLockLatch(&pTaskInfo->lock);
code = tqReaderRemoveTbUidList(pScanInfo->tqReader, tableIdList);
taosWUnLockLatch(&pTaskInfo->lock);
@ -1273,3 +1261,22 @@ void qProcessRspMsg(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
rpcFreeCont(pMsg->pCont);
destroySendMsgInfo(pSendInfo);
}
SArray* qGetQueriedTableListInfo(qTaskInfo_t tinfo) {
SExecTaskInfo* pTaskInfo = tinfo;
SArray* plist = getTableListInfo(pTaskInfo);
// only extract table in the first elements
STableListInfo* pTableListInfo = taosArrayGetP(plist, 0);
SArray* pUidList = taosArrayInit(10, sizeof(uint64_t));
int32_t numOfTables = tableListGetSize(pTableListInfo);
for(int32_t i = 0; i < numOfTables; ++i) {
STableKeyInfo* pKeyInfo = tableListGetInfo(pTableListInfo, i);
taosArrayPush(pUidList, &pKeyInfo->uid);
}
taosArrayDestroy(plist);
return pUidList;
}

View File

@ -1151,8 +1151,8 @@ void cleanupExprSupp(SExprSupp* pSupp) {
void cleanupBasicInfo(SOptrBasicInfo* pInfo) { pInfo->pRes = blockDataDestroy(pInfo->pRes); }
char* buildTaskId(uint64_t taskId, uint64_t queryId) {
char* p = taosMemoryMalloc(64);
void buildTaskId(uint64_t taskId, uint64_t queryId, char* dst) {
char* p = dst;
int32_t offset = 6;
memcpy(p, "TID:0x", offset);
@ -1163,7 +1163,6 @@ char* buildTaskId(uint64_t taskId, uint64_t queryId) {
offset += tintToHex(queryId, &p[offset]);
p[offset] = 0;
return p;
}
SExecTaskInfo* doCreateExecTaskInfo(uint64_t queryId, uint64_t taskId, int32_t vgId, EOPTR_EXEC_MODEL model,
@ -1185,7 +1184,9 @@ SExecTaskInfo* doCreateExecTaskInfo(uint64_t queryId, uint64_t taskId, int32_t v
taosInitRWLatch(&pTaskInfo->lock);
pTaskInfo->id.vgId = vgId;
pTaskInfo->id.queryId = queryId;
pTaskInfo->id.str = buildTaskId(taskId, queryId);
pTaskInfo->id.str = taosMemoryMalloc(64);
buildTaskId(taskId, queryId, pTaskInfo->id.str);
return pTaskInfo;
}
@ -2008,7 +2009,11 @@ void qStreamCloseTsdbReader(void* task) {
}
static void extractTableList(SArray* pList, const SOperatorInfo* pOperator) {
if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
SStreamScanInfo* pScanInfo = pOperator->info;
STableScanInfo* pTableScanInfo = pScanInfo->pTableScanOp->info;
taosArrayPush(pList, &pTableScanInfo->base.pTableListInfo);
} else if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
STableScanInfo* pScanInfo = pOperator->info;
taosArrayPush(pList, &pScanInfo->base.pTableListInfo);
} else {

View File

@ -33,7 +33,7 @@
int32_t scanDebug = 0;
#define MULTI_READER_MAX_TABLE_NUM 5000
#define MULTI_READER_MAX_TABLE_NUM 5000
#define SET_REVERSE_SCAN_FLAG(_info) ((_info)->scanFlag = REVERSE_SCAN)
#define SWITCH_ORDER(n) (((n) = ((n) == TSDB_ORDER_ASC) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC))
@ -330,7 +330,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca
qDebug("%s data block skipped, brange:%" PRId64 "-%" PRId64 ", rows:%" PRId64 ", uid:%" PRIu64,
GET_TASKID(pTaskInfo), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows,
pBlockInfo->id.uid);
doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, 1);
doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, pBlock->info.rows);
pCost->skipBlocks += 1;
tsdbReleaseDataBlock(pTableScanInfo->dataReader);
return TSDB_CODE_SUCCESS;
@ -341,7 +341,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca
if (success) { // failed to load the block sma data, data block statistics does not exist, load data block instead
qDebug("%s data block SMA loaded, brange:%" PRId64 "-%" PRId64 ", rows:%" PRId64, GET_TASKID(pTaskInfo),
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, 1);
doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, pBlock->info.rows);
tsdbReleaseDataBlock(pTableScanInfo->dataReader);
return TSDB_CODE_SUCCESS;
} else {
@ -708,9 +708,9 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
// todo refactor
/*pTableScanInfo->lastStatus.uid = pBlock->info.id.uid;*/
/*pTableScanInfo->lastStatus.ts = pBlock->info.window.ekey;*/
// pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_DATA;
// pTaskInfo->streamInfo.lastStatus.uid = pBlock->info.id.uid;
// pTaskInfo->streamInfo.lastStatus.ts = pBlock->info.window.ekey;
// pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_DATA;
// pTaskInfo->streamInfo.lastStatus.uid = pBlock->info.id.uid;
// pTaskInfo->streamInfo.lastStatus.ts = pBlock->info.window.ekey;
return pBlock;
}
@ -900,7 +900,7 @@ static void destroyTableScanOperatorInfo(void* param) {
SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* readHandle,
STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo) {
int32_t code = 0;
int32_t code = 0;
STableScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STableScanInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
@ -912,7 +912,8 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
SDataBlockDescNode* pDescNode = pScanNode->node.pOutputDataBlockDesc;
int32_t numOfCols = 0;
code = extractColMatchInfo(pScanNode->pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID, &pInfo->base.matchInfo);
code =
extractColMatchInfo(pScanNode->pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID, &pInfo->base.matchInfo);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@ -1636,7 +1637,7 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
if (pTaskInfo->streamInfo.submit.msgStr != NULL) {
if (pInfo->tqReader->msg2.msgStr == NULL) {
SPackedData submit = pTaskInfo->streamInfo.submit;
if (tqReaderSetSubmitReq2(pInfo->tqReader, submit.msgStr, submit.msgLen, submit.ver) < 0) {
if (tqReaderSetSubmitMsg(pInfo->tqReader, submit.msgStr, submit.msgLen, submit.ver) < 0) {
qError("submit msg messed up when initing stream submit block %p", submit.msgStr);
return NULL;
}
@ -1645,7 +1646,7 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
blockDataCleanup(pInfo->pRes);
SDataBlockInfo* pBlockInfo = &pInfo->pRes->info;
while (tqNextDataBlock2(pInfo->tqReader)) {
while (tqNextDataBlock(pInfo->tqReader)) {
SSDataBlock block = {0};
int32_t code = tqRetrieveDataBlock2(&block, pInfo->tqReader, NULL);
@ -1668,8 +1669,9 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
if (pTaskInfo->streamInfo.currentOffset.type == TMQ_OFFSET__SNAPSHOT_DATA) {
SSDataBlock* pResult = doTableScan(pInfo->pTableScanOp);
if (pResult && pResult->info.rows > 0) {
qDebug("queue scan tsdb return %"PRId64" rows min:%" PRId64 " max:%" PRId64 " wal curVersion:%" PRId64, pResult->info.rows,
pResult->info.window.skey, pResult->info.window.ekey, pInfo->tqReader->pWalReader->curVersion);
qDebug("queue scan tsdb return %" PRId64 " rows min:%" PRId64 " max:%" PRId64 " wal curVersion:%" PRId64,
pResult->info.rows, pResult->info.window.skey, pResult->info.window.ekey,
pInfo->tqReader->pWalReader->curVersion);
tqOffsetResetToData(&pTaskInfo->streamInfo.currentOffset, pResult->info.id.uid, pResult->info.window.ekey);
return pResult;
}
@ -1687,17 +1689,21 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
while (1) {
SFetchRet ret = {0};
tqNextBlock(pInfo->tqReader, &ret);
tqOffsetResetToLog(&pTaskInfo->streamInfo.currentOffset, pInfo->tqReader->pWalReader->curVersion - 1); //curVersion move to next, so currentOffset = curVersion - 1
tqOffsetResetToLog(
&pTaskInfo->streamInfo.currentOffset,
pInfo->tqReader->pWalReader->curVersion - 1); // curVersion move to next, so currentOffset = curVersion - 1
if (ret.fetchType == FETCH_TYPE__DATA) {
qDebug("doQueueScan get data from log %"PRId64" rows, version:%" PRId64, ret.data.info.rows, pTaskInfo->streamInfo.currentOffset.version);
qDebug("doQueueScan get data from log %" PRId64 " rows, version:%" PRId64, ret.data.info.rows,
pTaskInfo->streamInfo.currentOffset.version);
blockDataCleanup(pInfo->pRes);
setBlockIntoRes(pInfo, &ret.data, true);
if (pInfo->pRes->info.rows > 0) {
qDebug("doQueueScan get data from log %"PRId64" rows, return, version:%" PRId64, pInfo->pRes->info.rows, pTaskInfo->streamInfo.currentOffset.version);
qDebug("doQueueScan get data from log %" PRId64 " rows, return, version:%" PRId64, pInfo->pRes->info.rows,
pTaskInfo->streamInfo.currentOffset.version);
return pInfo->pRes;
}
}else if(ret.fetchType == FETCH_TYPE__NONE){
} else if (ret.fetchType == FETCH_TYPE__NONE) {
qDebug("doQueueScan get none from log, return, version:%" PRId64, pTaskInfo->streamInfo.currentOffset.version);
return NULL;
}
@ -1806,7 +1812,6 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
/*resetTableScanInfo(pTSInfo, pWin);*/
tsdbReaderClose(pTSInfo->base.dataReader);
qDebug("4");
pTSInfo->base.dataReader = NULL;
pInfo->pTableScanOp->status = OP_OPENED;
@ -1889,7 +1894,6 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
pTaskInfo->streamInfo.recoverStep = STREAM_RECOVER_STEP__NONE;
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
tsdbReaderClose(pTSInfo->base.dataReader);
qDebug("5");
pTSInfo->base.dataReader = NULL;
@ -1916,6 +1920,7 @@ FETCH_NEXT_BLOCK:
if (pBlock->info.parTbName[0]) {
streamStatePutParName(pTaskInfo->streamInfo.pState, pBlock->info.id.groupId, pBlock->info.parTbName);
}
// TODO move into scan
pBlock->info.calWin.skey = INT64_MIN;
pBlock->info.calWin.ekey = INT64_MAX;
@ -2058,7 +2063,7 @@ FETCH_NEXT_BLOCK:
int32_t current = pInfo->validBlockIndex++;
SPackedData* pSubmit = taosArrayGet(pInfo->pBlockLists, current);
if (tqReaderSetSubmitReq2(pInfo->tqReader, pSubmit->msgStr, pSubmit->msgLen, pSubmit->ver) < 0) {
if (tqReaderSetSubmitMsg(pInfo->tqReader, pSubmit->msgStr, pSubmit->msgLen, pSubmit->ver) < 0) {
qError("submit msg messed up when initing stream submit block %p, current %d, total %d", pSubmit, current,
totBlockNum);
continue;
@ -2067,7 +2072,7 @@ FETCH_NEXT_BLOCK:
blockDataCleanup(pInfo->pRes);
while (tqNextDataBlock2(pInfo->tqReader)) {
while (tqNextDataBlock(pInfo->tqReader)) {
SSDataBlock block = {0};
int32_t code = tqRetrieveDataBlock2(&block, pInfo->tqReader, NULL);
@ -2176,7 +2181,7 @@ static SSDataBlock* doRawScan(SOperatorInfo* pOperator) {
}
SMetaTableInfo mtInfo = getUidfromSnapShot(pInfo->sContext);
STqOffsetVal offset = {0};
STqOffsetVal offset = {0};
if (mtInfo.uid == 0) { // read snapshot done, change to get data from wal
qDebug("tmqsnap read snapshot done, change to get data from wal");
tqOffsetResetToLog(&offset, pInfo->sContext->snapVersion);

View File

@ -2333,9 +2333,15 @@ static int32_t getNextQualifiedFinalWindow(SInterval* pInterval, STimeWindow* pN
return startPos;
}
static void setStreamDataVersion(SExecTaskInfo* pTaskInfo, int64_t version, int64_t ckId) {
pTaskInfo->streamInfo.dataVersion = version;
pTaskInfo->streamInfo.checkPointId = ckId;
}
static void doStreamIntervalAggImpl(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBlock, uint64_t groupId,
SSHashObj* pUpdatedMap) {
SStreamIntervalOperatorInfo* pInfo = (SStreamIntervalOperatorInfo*)pOperatorInfo->info;
pInfo->dataVersion = TMAX(pInfo->dataVersion, pSDataBlock->info.version);
SResultRowInfo* pResultRowInfo = &(pInfo->binfo.resultRowInfo);
SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
@ -2432,10 +2438,8 @@ static void doStreamIntervalAggImpl(SOperatorInfo* pOperatorInfo, SSDataBlock* p
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true);
applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows,
pSDataBlock->info.rows, numOfOutput);
SWinKey key = {
.ts = nextWin.skey,
.groupId = groupId,
};
SWinKey key = { .ts = nextWin.skey, .groupId = groupId };
saveOutputBuf(pInfo->pState, &key, pResult, pInfo->aggSup.resultRowSize);
releaseOutputBuf(pInfo->pState, &key, pResult);
if (pInfo->delKey.ts > key.ts) {
@ -2503,6 +2507,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
clearFunctionContext(&pOperator->exprSupp);
// semi interval operator clear disk buffer
clearStreamIntervalOperator(pInfo);
setStreamDataVersion(pTaskInfo, pInfo->dataVersion, pInfo->pState->checkPointId);
qDebug("===stream===clear semi operator");
} else {
deleteIntervalDiscBuf(pInfo->pState, pInfo->pPullDataMap, pInfo->twAggSup.maxTs - pInfo->twAggSup.deleteMark,
@ -2776,6 +2781,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
pInfo->numOfDatapack = 0;
pInfo->pUpdated = NULL;
pInfo->pUpdatedMap = NULL;
pInfo->dataVersion = 0;
pOperator->operatorType = pPhyNode->type;
pOperator->blocking = true;
@ -3126,6 +3132,8 @@ static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSData
int32_t rows = pSDataBlock->info.rows;
int32_t winRows = 0;
pInfo->dataVersion = TMAX(pInfo->dataVersion, pSDataBlock->info.version);
SColumnInfoData* pStartTsCol = taosArrayGet(pSDataBlock->pDataBlock, pInfo->primaryTsIndex);
TSKEY* startTsCols = (int64_t*)pStartTsCol->pData;
SColumnInfoData* pEndTsCol = NULL;
@ -3589,6 +3597,7 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh
pInfo->ignoreExpiredDataSaved = false;
pInfo->pUpdated = NULL;
pInfo->pStUpdated = NULL;
pInfo->dataVersion = 0;
setOperatorInfo(pOperator, "StreamSessionWindowAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION, true,
OP_NOT_OPENED, pInfo, pTaskInfo);
@ -3899,6 +3908,9 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl
TSKEY* tsCols = NULL;
SResultRow* pResult = NULL;
int32_t winRows = 0;
pInfo->dataVersion = TMAX(pInfo->dataVersion, pSDataBlock->info.version);
if (pSDataBlock->pDataBlock != NULL) {
SColumnInfoData* pColDataInfo = taosArrayGet(pSDataBlock->pDataBlock, pInfo->primaryTsIndex);
tsCols = (int64_t*)pColDataInfo->pData;
@ -4115,6 +4127,7 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys
pInfo->ignoreExpiredDataSaved = false;
pInfo->pUpdated = NULL;
pInfo->pSeUpdated = NULL;
pInfo->dataVersion = 0;
setOperatorInfo(pOperator, "StreamStateAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE, true, OP_NOT_OPENED,
pInfo, pTaskInfo);
@ -4750,6 +4763,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
&pInfo->delKey);
setOperatorCompleted(pOperator);
streamStateCommit(pTaskInfo->streamInfo.pState);
setStreamDataVersion(pTaskInfo, pInfo->dataVersion, pInfo->pState->checkPointId);
return NULL;
}
@ -4771,6 +4785,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
pInfo->numOfDatapack = 0;
break;
}
pInfo->numOfDatapack++;
printDataBlock(pBlock, "single interval recv");

View File

@ -1933,14 +1933,35 @@ static int32_t translateToIso8601(SFunctionNode* pFunc, char* pErrBuf, int32_t l
}
static int32_t translateToUnixtimestamp(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
if (1 != LIST_LENGTH(pFunc->pParameterList)) {
int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
int16_t resType = TSDB_DATA_TYPE_BIGINT;
if (1 != numOfParams && 2 != numOfParams) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
if (!IS_STR_DATA_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type)) {
uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
if (!IS_STR_DATA_TYPE(para1Type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
if (2 == numOfParams) {
uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
if (!IS_INTEGER_TYPE(para2Type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
SValueNode* pValue = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1);
if (pValue->datum.i == 1) {
resType = TSDB_DATA_TYPE_TIMESTAMP;
} else if (pValue->datum.i == 0) {
resType = TSDB_DATA_TYPE_BIGINT;
} else {
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
"TO_UNIXTIMESTAMP function second parameter should be 0/1");
}
}
// add database precision as param
uint8_t dbPrec = pFunc->node.resType.precision;
int32_t code = addDbPrecisonParam(&pFunc->pParameterList, dbPrec);
@ -1948,7 +1969,7 @@ static int32_t translateToUnixtimestamp(SFunctionNode* pFunc, char* pErrBuf, int
return code;
}
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT};
pFunc->node.resType = (SDataType){.bytes = tDataTypes[resType].bytes, .type = resType};
return TSDB_CODE_SUCCESS;
}
@ -3279,7 +3300,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "_irowts",
.type = FUNCTION_TYPE_IROWTS,
.classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_INTERP_PC_FUNC|FUNC_MGT_KEEP_ORDER_FUNC,
.classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_INTERP_PC_FUNC | FUNC_MGT_KEEP_ORDER_FUNC,
.translateFunc = translateTimePseudoColumn,
.getEnvFunc = getTimePseudoFuncEnv,
.initFunc = NULL,

View File

@ -3353,7 +3353,7 @@ int32_t spreadFunction(SqlFunctionCtx* pCtx) {
goto _spread_over;
}
double tmin = 0.0, tmax = 0.0;
if (IS_SIGNED_NUMERIC_TYPE(type)) {
if (IS_SIGNED_NUMERIC_TYPE(type) || IS_TIMESTAMP_TYPE(type)) {
tmin = (double)GET_INT64_VAL(&pAgg->min);
tmax = (double)GET_INT64_VAL(&pAgg->max);
} else if (IS_FLOAT_TYPE(type)) {

View File

@ -174,6 +174,8 @@ bool fmIsSelectFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC
bool fmIsTimelineFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_TIMELINE_FUNC); }
bool fmIsDateTimeFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_DATETIME_FUNC); }
bool fmIsPseudoColumnFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_PSEUDO_COLUMN_FUNC); }
bool fmIsScanPseudoColumnFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_SCAN_PC_FUNC); }
@ -184,6 +186,7 @@ bool fmIsWindowClauseFunc(int32_t funcId) { return fmIsAggFunc(funcId) || fmIsWi
bool fmIsIndefiniteRowsFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_INDEFINITE_ROWS_FUNC); }
bool fmIsSpecialDataRequiredFunc(int32_t funcId) {
return isSpecificClassifyFunc(funcId, FUNC_MGT_SPECIAL_DATA_REQUIRED);
}

View File

@ -343,7 +343,7 @@ typedef struct SUdfcFuncStub {
char udfName[TSDB_FUNC_NAME_LEN + 1];
UdfcFuncHandle handle;
int32_t refCount;
int64_t lastRefTime;
int64_t createTime;
} SUdfcFuncStub;
typedef struct SUdfcProxy {
@ -363,6 +363,7 @@ typedef struct SUdfcProxy {
uv_mutex_t udfStubsMutex;
SArray *udfStubs; // SUdfcFuncStub
SArray *expiredUdfStubs; //SUdfcFuncStub
uv_mutex_t udfcUvMutex;
int8_t initialized;
@ -959,7 +960,7 @@ int32_t udfcOpen();
int32_t udfcClose();
int32_t acquireUdfFuncHandle(char *udfName, UdfcFuncHandle *pHandle);
void releaseUdfFuncHandle(char *udfName);
void releaseUdfFuncHandle(char *udfName, UdfcFuncHandle handle);
int32_t cleanUpUdfs();
bool udfAggGetEnv(struct SFunctionNode *pFunc, SFuncExecEnv *pEnv);
@ -967,6 +968,8 @@ bool udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo *pRes
int32_t udfAggProcess(struct SqlFunctionCtx *pCtx);
int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock *pBlock);
void cleanupNotExpiredUdfs();
void cleanupExpiredUdfs();
int compareUdfcFuncSub(const void *elem1, const void *elem2) {
SUdfcFuncStub *stub1 = (SUdfcFuncStub *)elem1;
SUdfcFuncStub *stub2 = (SUdfcFuncStub *)elem2;
@ -982,16 +985,24 @@ int32_t acquireUdfFuncHandle(char *udfName, UdfcFuncHandle *pHandle) {
if (stubIndex != -1) {
SUdfcFuncStub *foundStub = taosArrayGet(gUdfcProxy.udfStubs, stubIndex);
UdfcFuncHandle handle = foundStub->handle;
if (handle != NULL && ((SUdfcUvSession *)handle)->udfUvPipe != NULL) {
*pHandle = foundStub->handle;
++foundStub->refCount;
foundStub->lastRefTime = taosGetTimestampUs();
uv_mutex_unlock(&gUdfcProxy.udfStubsMutex);
return 0;
int64_t currUs = taosGetTimestampUs();
bool expired = (currUs - foundStub->createTime) >= 10 * 1000 * 1000;
if (!expired) {
if (handle != NULL && ((SUdfcUvSession *)handle)->udfUvPipe != NULL) {
*pHandle = foundStub->handle;
++foundStub->refCount;
uv_mutex_unlock(&gUdfcProxy.udfStubsMutex);
return 0;
} else {
fnInfo("udf invalid handle for %s, refCount: %d, create time: %" PRId64 ". remove it from cache", udfName,
foundStub->refCount, foundStub->createTime);
taosArrayRemove(gUdfcProxy.udfStubs, stubIndex);
}
} else {
fnInfo("invalid handle for %s, refCount: %d, last ref time: %" PRId64 ". remove it from cache", udfName,
foundStub->refCount, foundStub->lastRefTime);
fnInfo("udf handle expired for %s, will setup udf. move it to expired list", udfName);
taosArrayRemove(gUdfcProxy.udfStubs, stubIndex);
taosArrayPush(gUdfcProxy.expiredUdfStubs, foundStub);
taosArraySort(gUdfcProxy.expiredUdfStubs, compareUdfcFuncSub);
}
}
*pHandle = NULL;
@ -1001,7 +1012,7 @@ int32_t acquireUdfFuncHandle(char *udfName, UdfcFuncHandle *pHandle) {
strncpy(stub.udfName, udfName, TSDB_FUNC_NAME_LEN);
stub.handle = *pHandle;
++stub.refCount;
stub.lastRefTime = taosGetTimestampUs();
stub.createTime = taosGetTimestampUs();
taosArrayPush(gUdfcProxy.udfStubs, &stub);
taosArraySort(gUdfcProxy.udfStubs, compareUdfcFuncSub);
} else {
@ -1012,21 +1023,75 @@ int32_t acquireUdfFuncHandle(char *udfName, UdfcFuncHandle *pHandle) {
return code;
}
void releaseUdfFuncHandle(char *udfName) {
void releaseUdfFuncHandle(char *udfName, UdfcFuncHandle handle) {
uv_mutex_lock(&gUdfcProxy.udfStubsMutex);
SUdfcFuncStub key = {0};
strncpy(key.udfName, udfName, TSDB_FUNC_NAME_LEN);
SUdfcFuncStub *foundStub = taosArraySearch(gUdfcProxy.udfStubs, &key, compareUdfcFuncSub, TD_EQ);
if (!foundStub) {
SUdfcFuncStub *expiredStub = taosArraySearch(gUdfcProxy.expiredUdfStubs, &key, compareUdfcFuncSub, TD_EQ);
if (!foundStub && !expiredStub) {
uv_mutex_unlock(&gUdfcProxy.udfStubsMutex);
return;
}
if (foundStub->refCount > 0) {
if (foundStub != NULL && foundStub->handle == handle && foundStub->refCount > 0) {
--foundStub->refCount;
}
if (expiredStub != NULL && expiredStub->handle == handle && expiredStub->refCount > 0) {
--expiredStub->refCount;
}
uv_mutex_unlock(&gUdfcProxy.udfStubsMutex);
}
void cleanupExpiredUdfs() {
int32_t i = 0;
SArray *expiredUdfStubs = taosArrayInit(16, sizeof(SUdfcFuncStub));
while (i < taosArrayGetSize(gUdfcProxy.expiredUdfStubs)) {
SUdfcFuncStub *stub = taosArrayGet(gUdfcProxy.expiredUdfStubs, i);
if (stub->refCount == 0) {
fnInfo("tear down udf. expired. udf name: %s, handle: %p, ref count: %d", stub->udfName, stub->handle, stub->refCount);
doTeardownUdf(stub->handle);
} else {
fnInfo("udf still in use. expired. udf name: %s, ref count: %d, create time: %" PRId64 ", handle: %p", stub->udfName,
stub->refCount, stub->createTime, stub->handle);
UdfcFuncHandle handle = stub->handle;
if (handle != NULL && ((SUdfcUvSession *)handle)->udfUvPipe != NULL) {
taosArrayPush(expiredUdfStubs, stub);
} else {
fnInfo("udf invalid handle for %s, expired. refCount: %d, create time: %" PRId64 ". remove it from cache",
stub->udfName, stub->refCount, stub->createTime);
}
}
++i;
}
taosArrayDestroy(gUdfcProxy.expiredUdfStubs);
gUdfcProxy.expiredUdfStubs = expiredUdfStubs;
}
void cleanupNotExpiredUdfs() {
SArray *udfStubs = taosArrayInit(16, sizeof(SUdfcFuncStub));
int32_t i = 0;
while (i < taosArrayGetSize(gUdfcProxy.udfStubs)) {
SUdfcFuncStub *stub = taosArrayGet(gUdfcProxy.udfStubs, i);
if (stub->refCount == 0) {
fnInfo("tear down udf. udf name: %s, handle: %p, ref count: %d", stub->udfName, stub->handle, stub->refCount);
doTeardownUdf(stub->handle);
} else {
fnInfo("udf still in use. udf name: %s, ref count: %d, create time: %" PRId64 ", handle: %p", stub->udfName,
stub->refCount, stub->createTime, stub->handle);
UdfcFuncHandle handle = stub->handle;
if (handle != NULL && ((SUdfcUvSession *)handle)->udfUvPipe != NULL) {
taosArrayPush(udfStubs, stub);
} else {
fnInfo("udf invalid handle for %s, refCount: %d, create time: %" PRId64 ". remove it from cache",
stub->udfName, stub->refCount, stub->createTime);
}
}
++i;
}
taosArrayDestroy(gUdfcProxy.udfStubs);
gUdfcProxy.udfStubs = udfStubs;
}
int32_t cleanUpUdfs() {
int8_t initialized = atomic_load_8(&gUdfcProxy.initialized);
if (!initialized) {
@ -1034,32 +1099,15 @@ int32_t cleanUpUdfs() {
}
uv_mutex_lock(&gUdfcProxy.udfStubsMutex);
if (gUdfcProxy.udfStubs == NULL || taosArrayGetSize(gUdfcProxy.udfStubs) == 0) {
if ((gUdfcProxy.udfStubs == NULL || taosArrayGetSize(gUdfcProxy.udfStubs) == 0) &&
(gUdfcProxy.expiredUdfStubs == NULL || taosArrayGetSize(gUdfcProxy.expiredUdfStubs) == 0)) {
uv_mutex_unlock(&gUdfcProxy.udfStubsMutex);
return TSDB_CODE_SUCCESS;
}
SArray *udfStubs = taosArrayInit(16, sizeof(SUdfcFuncStub));
int32_t i = 0;
while (i < taosArrayGetSize(gUdfcProxy.udfStubs)) {
SUdfcFuncStub *stub = taosArrayGet(gUdfcProxy.udfStubs, i);
if (stub->refCount == 0) {
fnInfo("tear down udf. udf name: %s, handle: %p, ref count: %d", stub->udfName, stub->handle, stub->refCount);
doTeardownUdf(stub->handle);
} else {
fnInfo("udf still in use. udf name: %s, ref count: %d, last ref time: %" PRId64 ", handle: %p", stub->udfName,
stub->refCount, stub->lastRefTime, stub->handle);
UdfcFuncHandle handle = stub->handle;
if (handle != NULL && ((SUdfcUvSession *)handle)->udfUvPipe != NULL) {
taosArrayPush(udfStubs, stub);
} else {
fnInfo("udf invalid handle for %s, refCount: %d, last ref time: %" PRId64 ". remove it from cache",
stub->udfName, stub->refCount, stub->lastRefTime);
}
}
++i;
}
taosArrayDestroy(gUdfcProxy.udfStubs);
gUdfcProxy.udfStubs = udfStubs;
cleanupNotExpiredUdfs();
cleanupExpiredUdfs();
uv_mutex_unlock(&gUdfcProxy.udfStubsMutex);
return 0;
}
@ -1075,7 +1123,7 @@ int32_t callUdfScalarFunc(char *udfName, SScalarParam *input, int32_t numOfCols,
code = doCallUdfScalarFunc(handle, input, numOfCols, output);
if (code != TSDB_CODE_SUCCESS) {
fnError("udfc scalar function execution failure");
releaseUdfFuncHandle(udfName);
releaseUdfFuncHandle(udfName, handle);
return code;
}
@ -1089,7 +1137,7 @@ int32_t callUdfScalarFunc(char *udfName, SScalarParam *input, int32_t numOfCols,
code = TSDB_CODE_UDF_INVALID_OUTPUT_TYPE;
}
}
releaseUdfFuncHandle(udfName);
releaseUdfFuncHandle(udfName, handle);
return code;
}
@ -1122,7 +1170,7 @@ bool udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo *pResult
SUdfInterBuf buf = {0};
if ((udfCode = doCallUdfAggInit(handle, &buf)) != 0) {
fnError("udfAggInit error. step doCallUdfAggInit. udf code: %d", udfCode);
releaseUdfFuncHandle(pCtx->udfName);
releaseUdfFuncHandle(pCtx->udfName, handle);
return false;
}
if (buf.bufLen <= session->bufSize) {
@ -1131,10 +1179,10 @@ bool udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo *pResult
udfRes->interResNum = buf.numOfResult;
} else {
fnError("udfc inter buf size %d is greater than function bufSize %d", buf.bufLen, session->bufSize);
releaseUdfFuncHandle(pCtx->udfName);
releaseUdfFuncHandle(pCtx->udfName, handle);
return false;
}
releaseUdfFuncHandle(pCtx->udfName);
releaseUdfFuncHandle(pCtx->udfName, handle);
freeUdfInterBuf(&buf);
return true;
}
@ -1191,7 +1239,7 @@ int32_t udfAggProcess(struct SqlFunctionCtx *pCtx) {
taosArrayDestroy(pTempBlock->pDataBlock);
taosMemoryFree(pTempBlock);
releaseUdfFuncHandle(pCtx->udfName);
releaseUdfFuncHandle(pCtx->udfName, handle);
freeUdfInterBuf(&newState);
return udfCode;
}
@ -1236,7 +1284,7 @@ int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock *pBlock) {
freeUdfInterBuf(&resultBuf);
int32_t numOfResults = functionFinalizeWithResultBuf(pCtx, pBlock, udfRes->finalResBuf);
releaseUdfFuncHandle(pCtx->udfName);
releaseUdfFuncHandle(pCtx->udfName, handle);
return udfCallCode == 0 ? numOfResults : udfCallCode;
}
@ -1663,6 +1711,7 @@ int32_t udfcOpen() {
uv_barrier_wait(&proxy->initBarrier);
uv_mutex_init(&proxy->udfStubsMutex);
proxy->udfStubs = taosArrayInit(8, sizeof(SUdfcFuncStub));
proxy->expiredUdfStubs = taosArrayInit(8, sizeof(SUdfcFuncStub));
uv_mutex_init(&proxy->udfcUvMutex);
fnInfo("udfc initialized") return 0;
}
@ -1679,6 +1728,7 @@ int32_t udfcClose() {
uv_thread_join(&udfc->loopThread);
uv_mutex_destroy(&udfc->taskQueueMutex);
uv_barrier_destroy(&udfc->initBarrier);
taosArrayDestroy(udfc->expiredUdfStubs);
taosArrayDestroy(udfc->udfStubs);
uv_mutex_destroy(&udfc->udfStubsMutex);
uv_mutex_destroy(&udfc->udfcUvMutex);

View File

@ -591,7 +591,7 @@ SUdf *udfdNewUdf(const char *udfName) {
SUdf *udfdGetOrCreateUdf(const char *udfName) {
uv_mutex_lock(&global.udfsMutex);
SUdf **pUdfHash = taosHashGet(global.udfsHash, udfName, strlen(udfName));
int64_t currTime = taosGetTimestampSec();
int64_t currTime = taosGetTimestampMs();
bool expired = false;
if (pUdfHash) {
expired = currTime - (*pUdfHash)->lastFetchTime > 10 * 1000; // 10s
@ -688,6 +688,8 @@ void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
output.colMeta.type = udf->outputType;
output.colMeta.precision = 0;
output.colMeta.scale = 0;
udfColEnsureCapacity(&output, call->block.info.rows);
SUdfDataBlock input = {0};
convertDataBlockToUdfDataBlock(&call->block, &input);
code = udf->scriptPlugin->udfScalarProcFunc(&input, &output, udf->scriptUdfCtx);

View File

@ -24,7 +24,7 @@ DLL_EXPORT int32_t udf1(SUdfDataBlock *block, SUdfColumn *resultCol) {
}
}
if (j == block->numOfCols) {
int32_t luckyNum = 88;
int32_t luckyNum = 1;
udfColDataSet(resultCol, i, (char *)&luckyNum, false);
}
}

View File

@ -136,6 +136,7 @@ static bool logicConditionNodeEqual(const SLogicConditionNode* a, const SLogicCo
static bool functionNodeEqual(const SFunctionNode* a, const SFunctionNode* b) {
COMPARE_SCALAR_FIELD(funcId);
COMPARE_STRING_FIELD(functionName);
COMPARE_NODE_LIST_FIELD(pParameterList);
return true;
}

View File

@ -1047,8 +1047,8 @@ sliding_opt(A) ::= SLIDING NK_LP duration_literal(B) NK_RP.
fill_opt(A) ::= . { A = NULL; }
fill_opt(A) ::= FILL NK_LP fill_mode(B) NK_RP. { A = createFillNode(pCxt, B, NULL); }
fill_opt(A) ::= FILL NK_LP VALUE NK_COMMA literal_list(B) NK_RP. { A = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, B)); }
fill_opt(A) ::= FILL NK_LP VALUE_F NK_COMMA literal_list(B) NK_RP. { A = createFillNode(pCxt, FILL_MODE_VALUE_F, createNodeListNode(pCxt, B)); }
fill_opt(A) ::= FILL NK_LP VALUE NK_COMMA expression_list(B) NK_RP. { A = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, B)); }
fill_opt(A) ::= FILL NK_LP VALUE_F NK_COMMA expression_list(B) NK_RP. { A = createFillNode(pCxt, FILL_MODE_VALUE_F, createNodeListNode(pCxt, B)); }
%type fill_mode { EFillMode }
%destructor fill_mode { }

View File

@ -645,6 +645,10 @@ static bool isSelectStmt(SNode* pCurrStmt) {
return NULL != pCurrStmt && QUERY_NODE_SELECT_STMT == nodeType(pCurrStmt);
}
static bool isDeleteStmt(SNode* pCurrStmt) {
return NULL != pCurrStmt && QUERY_NODE_DELETE_STMT == nodeType(pCurrStmt);
}
static bool isSetOperator(SNode* pCurrStmt) {
return NULL != pCurrStmt && QUERY_NODE_SET_OPERATOR == nodeType(pCurrStmt);
}
@ -669,6 +673,9 @@ static uint8_t getPrecisionFromCurrStmt(SNode* pCurrStmt, uint8_t defaultVal) {
if (NULL != pCurrStmt && QUERY_NODE_CREATE_STREAM_STMT == nodeType(pCurrStmt)) {
return getPrecisionFromCurrStmt(((SCreateStreamStmt*)pCurrStmt)->pQuery, defaultVal);
}
if (isDeleteStmt(pCurrStmt)) {
return ((SDeleteStmt*)pCurrStmt)->precision;
}
return defaultVal;
}
@ -688,6 +695,10 @@ static bool isWindowPseudoColumnFunc(const SNode* pNode) {
return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsWindowPseudoColumnFunc(((SFunctionNode*)pNode)->funcId));
}
static bool isInterpPseudoColumnFunc(const SNode* pNode) {
return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsInterpPseudoColumnFunc(((SFunctionNode*)pNode)->funcId));
}
static bool isTimelineFunc(const SNode* pNode) {
return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsTimelineFunc(((SFunctionNode*)pNode)->funcId));
}
@ -1235,6 +1246,10 @@ static int32_t calcTypeBytes(SDataType dt) {
}
static EDealRes translateValue(STranslateContext* pCxt, SValueNode* pVal) {
if (pVal->translate) {
return TSDB_CODE_SUCCESS;
}
SDataType dt = pVal->node.resType;
dt.bytes = calcTypeBytes(dt);
return translateValueImpl(pCxt, pVal, dt, false);
@ -1295,7 +1310,8 @@ static EDealRes translateOperator(STranslateContext* pCxt, SOperatorNode* pOp) {
}
static EDealRes haveVectorFunction(SNode* pNode, void* pContext) {
if (isAggFunc(pNode) || isIndefiniteRowsFunc(pNode) || isWindowPseudoColumnFunc(pNode)) {
if (isAggFunc(pNode) || isIndefiniteRowsFunc(pNode) || isWindowPseudoColumnFunc(pNode) ||
isInterpPseudoColumnFunc(pNode)) {
*((bool*)pContext) = true;
return DEAL_RES_END;
}
@ -1522,6 +1538,21 @@ static int32_t translateInterpFunc(STranslateContext* pCxt, SFunctionNode* pFunc
return TSDB_CODE_SUCCESS;
}
static int32_t translateInterpPseudoColumnFunc(STranslateContext* pCxt, SFunctionNode* pFunc) {
if (!fmIsInterpPseudoColumnFunc(pFunc->funcId)) {
return TSDB_CODE_SUCCESS;
}
if (!isSelectStmt(pCxt->pCurrStmt)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC,
"%s must be used in select statements", pFunc->functionName);
}
if (pCxt->currClause == SQL_CLAUSE_WHERE) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_INTERP_CLAUSE,
"%s is not allowed in where clause", pFunc->functionName);
}
return TSDB_CODE_SUCCESS;
}
static int32_t translateTimelineFunc(STranslateContext* pCxt, SFunctionNode* pFunc) {
if (!fmIsTimelineFunc(pFunc->funcId)) {
return TSDB_CODE_SUCCESS;
@ -1539,6 +1570,21 @@ static int32_t translateTimelineFunc(STranslateContext* pCxt, SFunctionNode* pFu
return TSDB_CODE_SUCCESS;
}
static int32_t translateDateTimeFunc(STranslateContext* pCxt, SFunctionNode* pFunc) {
if (!fmIsDateTimeFunc(pFunc->funcId)) {
return TSDB_CODE_SUCCESS;
}
if (!isSelectStmt(pCxt->pCurrStmt)) {
return TSDB_CODE_SUCCESS;
}
SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt;
pFunc->node.resType.precision = pSelect->precision;
return TSDB_CODE_SUCCESS;
}
static bool hasFillClause(SNode* pCurrStmt) {
if (!isSelectStmt(pCurrStmt)) {
return false;
@ -1678,6 +1724,7 @@ static void setFuncClassification(SNode* pCurrStmt, SFunctionNode* pFunc) {
SSelectStmt* pSelect = (SSelectStmt*)pCurrStmt;
pSelect->hasAggFuncs = pSelect->hasAggFuncs ? true : fmIsAggFunc(pFunc->funcId);
pSelect->hasRepeatScanFuncs = pSelect->hasRepeatScanFuncs ? true : fmIsRepeatScanFunc(pFunc->funcId);
if (fmIsIndefiniteRowsFunc(pFunc->funcId)) {
pSelect->hasIndefiniteRowsFunc = true;
pSelect->returnRows = fmGetFuncReturnRows(pFunc);
@ -1692,7 +1739,8 @@ static void setFuncClassification(SNode* pCurrStmt, SFunctionNode* pFunc) {
pSelect->hasUniqueFunc = pSelect->hasUniqueFunc ? true : (FUNCTION_TYPE_UNIQUE == pFunc->funcType);
pSelect->hasTailFunc = pSelect->hasTailFunc ? true : (FUNCTION_TYPE_TAIL == pFunc->funcType);
pSelect->hasInterpFunc = pSelect->hasInterpFunc ? true : (FUNCTION_TYPE_INTERP == pFunc->funcType);
pSelect->hasInterpPseudoColFunc = pSelect->hasInterpPseudoColFunc ? true : fmIsInterpPseudoColumnFunc(pFunc->funcId);
pSelect->hasInterpPseudoColFunc =
pSelect->hasInterpPseudoColFunc ? true : fmIsInterpPseudoColumnFunc(pFunc->funcId);
pSelect->hasLastRowFunc = pSelect->hasLastRowFunc ? true : (FUNCTION_TYPE_LAST_ROW == pFunc->funcType);
pSelect->hasLastFunc = pSelect->hasLastFunc ? true : (FUNCTION_TYPE_LAST == pFunc->funcType);
pSelect->hasTimeLineFunc = pSelect->hasTimeLineFunc ? true : fmIsTimelineFunc(pFunc->funcId);
@ -1819,9 +1867,15 @@ static int32_t translateNormalFunction(STranslateContext* pCxt, SFunctionNode* p
if (TSDB_CODE_SUCCESS == code) {
code = translateInterpFunc(pCxt, pFunc);
}
if (TSDB_CODE_SUCCESS == code) {
code = translateInterpPseudoColumnFunc(pCxt, pFunc);
}
if (TSDB_CODE_SUCCESS == code) {
code = translateTimelineFunc(pCxt, pFunc);
}
if (TSDB_CODE_SUCCESS == code) {
code = translateDateTimeFunc(pCxt, pFunc);
}
if (TSDB_CODE_SUCCESS == code) {
code = translateBlockDistFunc(pCxt, pFunc);
}
@ -2877,6 +2931,11 @@ static int32_t convertFillValue(STranslateContext* pCxt, SDataType dt, SNodeList
if (TSDB_CODE_SUCCESS == code) {
code = scalarCalculateConstants(pCaseFunc, &pCell->pNode);
}
if (TSDB_CODE_SUCCESS == code && QUERY_NODE_VALUE != nodeType(pCell->pNode)) {
code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value is just a constant");
} else if (TSDB_CODE_SUCCESS != code) {
code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled data type mismatch");
}
return code;
}
@ -2893,9 +2952,9 @@ static int32_t checkFillValues(STranslateContext* pCxt, SFillNode* pFill, SNodeL
if (fillNo >= LIST_LENGTH(pFillValues->pNodeList)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled values number mismatch");
}
if (TSDB_CODE_SUCCESS !=
convertFillValue(pCxt, ((SExprNode*)pProject)->resType, pFillValues->pNodeList, fillNo)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled data type mismatch");
int32_t code = convertFillValue(pCxt, ((SExprNode*)pProject)->resType, pFillValues->pNodeList, fillNo);
if (TSDB_CODE_SUCCESS != code) {
return code;
}
++fillNo;
}
@ -2976,12 +3035,13 @@ static int32_t translateSelectList(STranslateContext* pCxt, SSelectStmt* pSelect
}
static int32_t translateHaving(STranslateContext* pCxt, SSelectStmt* pSelect) {
if (NULL == pSelect->pGroupByList && NULL != pSelect->pHaving) {
if (NULL == pSelect->pGroupByList && NULL == pSelect->pPartitionByList && NULL == pSelect->pWindow &&
NULL != pSelect->pHaving) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_GROUPBY_LACK_EXPRESSION);
}
pCxt->currClause = SQL_CLAUSE_HAVING;
int32_t code = translateExpr(pCxt, &pSelect->pHaving);
if (TSDB_CODE_SUCCESS == code) {
if (TSDB_CODE_SUCCESS == code && (NULL != pSelect->pGroupByList || NULL != pSelect->pWindow)) {
code = checkExprForGroupBy(pCxt, &pSelect->pHaving);
}
return code;
@ -3372,7 +3432,8 @@ static int32_t translateInterp(STranslateContext* pCxt, SSelectStmt* pSelect) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_INTERP_CLAUSE);
}
if (pSelect->hasInterpPseudoColFunc) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, "Has Interp pseudo column(s) but missing interp function");
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC,
"Has Interp pseudo column(s) but missing interp function");
}
return TSDB_CODE_SUCCESS;
}
@ -3741,6 +3802,7 @@ static int32_t translateDelete(STranslateContext* pCxt, SDeleteStmt* pDelete) {
pCxt->pCurrStmt = (SNode*)pDelete;
int32_t code = translateFrom(pCxt, pDelete->pFromTable);
if (TSDB_CODE_SUCCESS == code) {
pDelete->precision = ((STableNode*)pDelete->pFromTable)->precision;
code = translateDeleteWhere(pCxt, pDelete);
}
pCxt->currClause = SQL_CLAUSE_SELECT;
@ -5143,26 +5205,32 @@ static int32_t checkAlterSuperTableBySchema(STranslateContext* pCxt, SAlterTable
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG);
}
if (getNumOfTags(pTableMeta) == 1 && pStmt->alterType == TSDB_ALTER_TABLE_DROP_TAG) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE, "the only tag cannot be dropped");
}
int32_t tagsLen = 0;
for (int32_t i = 0; i < pTableMeta->tableInfo.numOfTags; ++i) {
tagsLen += pTagsSchema[i].bytes;
}
if (TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES == pStmt->alterType ||
TSDB_ALTER_TABLE_UPDATE_TAG_BYTES == pStmt->alterType) {
TSDB_ALTER_TABLE_UPDATE_TAG_BYTES == pStmt->alterType || TSDB_ALTER_TABLE_DROP_COLUMN == pStmt->alterType ||
TSDB_ALTER_TABLE_DROP_TAG == pStmt->alterType) {
if (TSDB_SUPER_TABLE != pTableMeta->tableType) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE, "Table is not super table");
}
const SSchema* pSchema = getColSchema(pTableMeta, pStmt->colName);
if (NULL == pSchema) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMN, pStmt->colName);
} else if (!IS_VAR_DATA_TYPE(pSchema->type) || pSchema->type != pStmt->dataType.type ||
pSchema->bytes >= calcTypeBytes(pStmt->dataType)) {
return generateSyntaxErrMsg(
&pCxt->msgBuf,
(TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES == pStmt->alterType || TSDB_ALTER_TABLE_DROP_COLUMN == pStmt->alterType)
? TSDB_CODE_PAR_INVALID_COLUMN
: TSDB_CODE_PAR_INVALID_TAG_NAME,
pStmt->colName);
}
if ((TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES == pStmt->alterType ||
TSDB_ALTER_TABLE_UPDATE_TAG_BYTES == pStmt->alterType) &&
(!IS_VAR_DATA_TYPE(pSchema->type) || pSchema->type != pStmt->dataType.type ||
pSchema->bytes >= calcTypeBytes(pStmt->dataType))) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_MODIFY_COL);
}
@ -5207,6 +5275,10 @@ static int32_t checkAlterSuperTableBySchema(STranslateContext* pCxt, SAlterTable
}
}
if (getNumOfTags(pTableMeta) == 1 && pStmt->alterType == TSDB_ALTER_TABLE_DROP_TAG) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE, "the only tag cannot be dropped");
}
return TSDB_CODE_SUCCESS;
}
@ -5679,6 +5751,14 @@ static int32_t translateDropCGroup(STranslateContext* pCxt, SDropCGroupStmt* pSt
static int32_t translateAlterLocal(STranslateContext* pCxt, SAlterLocalStmt* pStmt) {
// The statement is executed directly on the client without constructing a message.
if ('\0' != pStmt->value[0]) {
return TSDB_CODE_SUCCESS;
}
char* p = strchr(pStmt->config, ' ');
if (NULL != p) {
*p = 0;
strcpy(pStmt->value, p + 1);
}
return TSDB_CODE_SUCCESS;
}
@ -8257,10 +8337,6 @@ static void destoryAlterTbReq(SVAlterTbReq* pReq) {
static int32_t rewriteAlterTableImpl(STranslateContext* pCxt, SAlterTableStmt* pStmt, STableMeta* pTableMeta,
SQuery* pQuery) {
if (getNumOfTags(pTableMeta) == 1 && pStmt->alterType == TSDB_ALTER_TABLE_DROP_TAG) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE, "the only tag cannot be dropped");
}
if (TSDB_SUPER_TABLE == pTableMeta->tableType) {
return TSDB_CODE_SUCCESS;
} else if (TSDB_CHILD_TABLE != pTableMeta->tableType && TSDB_NORMAL_TABLE != pTableMeta->tableType) {

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More