Merge remote-tracking branch 'origin/main' into enh/TD-32951
|
@ -99,6 +99,7 @@ tests/examples/JDBC/JDBCDemo/.classpath
|
||||||
tests/examples/JDBC/JDBCDemo/.project
|
tests/examples/JDBC/JDBCDemo/.project
|
||||||
tests/examples/JDBC/JDBCDemo/.settings/
|
tests/examples/JDBC/JDBCDemo/.settings/
|
||||||
source/libs/parser/inc/sql.*
|
source/libs/parser/inc/sql.*
|
||||||
|
source/os/src/timezone/
|
||||||
tests/script/tmqResult.txt
|
tests/script/tmqResult.txt
|
||||||
tests/system-test/case_to_run.txt
|
tests/system-test/case_to_run.txt
|
||||||
tests/develop-test/case_to_run.txt
|
tests/develop-test/case_to_run.txt
|
||||||
|
@ -170,4 +171,4 @@ tzdir.h
|
||||||
tzfile.h
|
tzfile.h
|
||||||
coverage.info
|
coverage.info
|
||||||
taos
|
taos
|
||||||
taosd
|
taosd
|
||||||
|
|
|
@ -402,6 +402,7 @@ pipeline {
|
||||||
WKDIR = '/var/lib/jenkins/workspace'
|
WKDIR = '/var/lib/jenkins/workspace'
|
||||||
WK = '/var/lib/jenkins/workspace/TDinternal'
|
WK = '/var/lib/jenkins/workspace/TDinternal'
|
||||||
WKC = '/var/lib/jenkins/workspace/TDinternal/community'
|
WKC = '/var/lib/jenkins/workspace/TDinternal/community'
|
||||||
|
WKPY = '/var/lib/jenkins/workspace/taos-connector-python'
|
||||||
DOC_WKC = '/root/doc_ci_work'
|
DOC_WKC = '/root/doc_ci_work'
|
||||||
td_repo = 'TDengine'
|
td_repo = 'TDengine'
|
||||||
zh_doc_repo = 'docs.taosdata.com'
|
zh_doc_repo = 'docs.taosdata.com'
|
||||||
|
|
|
@ -97,10 +97,13 @@ ELSE()
|
||||||
SET(TD_TAOS_TOOLS TRUE)
|
SET(TD_TAOS_TOOLS TRUE)
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
|
SET(TAOS_LIB taos)
|
||||||
|
SET(TAOS_LIB_STATIC taos_static)
|
||||||
|
|
||||||
IF(${TD_WINDOWS})
|
IF(${TD_WINDOWS})
|
||||||
SET(TAOS_LIB taos_static)
|
SET(TAOS_LIB_PLATFORM_SPEC taos_static)
|
||||||
ELSE()
|
ELSE()
|
||||||
SET(TAOS_LIB taos)
|
SET(TAOS_LIB_PLATFORM_SPEC taos)
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
# build TSZ by default
|
# build TSZ by default
|
||||||
|
@ -128,7 +131,7 @@ IF(TD_WINDOWS)
|
||||||
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")
|
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO")
|
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO /FORCE:MULTIPLE")
|
||||||
|
|
||||||
# IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
|
# IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
|
||||||
# SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
|
# SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# libuv
|
# libuv
|
||||||
ExternalProject_Add(libuv
|
ExternalProject_Add(libuv
|
||||||
GIT_REPOSITORY https://github.com/libuv/libuv.git
|
GIT_REPOSITORY https://github.com/libuv/libuv.git
|
||||||
GIT_TAG v1.48.0
|
GIT_TAG v1.49.2
|
||||||
SOURCE_DIR "${TD_CONTRIB_DIR}/libuv"
|
SOURCE_DIR "${TD_CONTRIB_DIR}/libuv"
|
||||||
BINARY_DIR "${TD_CONTRIB_DIR}/libuv"
|
BINARY_DIR "${TD_CONTRIB_DIR}/libuv"
|
||||||
CONFIGURE_COMMAND ""
|
CONFIGURE_COMMAND ""
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
# timezone
|
||||||
|
ExternalProject_Add(tz
|
||||||
|
GIT_REPOSITORY https://github.com/eggert/tz.git
|
||||||
|
GIT_TAG main
|
||||||
|
SOURCE_DIR "${TD_CONTRIB_DIR}/tz"
|
||||||
|
BINARY_DIR ""
|
||||||
|
CONFIGURE_COMMAND ""
|
||||||
|
#BUILD_COMMAND ""
|
||||||
|
INSTALL_COMMAND ""
|
||||||
|
TEST_COMMAND ""
|
||||||
|
GIT_SHALLOW true
|
||||||
|
GIT_PROGRESS true
|
||||||
|
BUILD_COMMAND ""
|
||||||
|
)
|
||||||
|
|
|
@ -106,6 +106,10 @@ cat("${TD_SUPPORT_DIR}/zlib_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
# cJson
|
# cJson
|
||||||
cat("${TD_SUPPORT_DIR}/cjson_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
cat("${TD_SUPPORT_DIR}/cjson_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
|
|
||||||
|
if(NOT ${TD_WINDOWS})
|
||||||
|
cat("${TD_SUPPORT_DIR}/tz_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
|
endif(NOT ${TD_WINDOWS})
|
||||||
|
|
||||||
# xz
|
# xz
|
||||||
# cat("${TD_SUPPORT_DIR}/xz_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
# cat("${TD_SUPPORT_DIR}/xz_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
|
|
||||||
|
@ -651,6 +655,35 @@ if(${TD_LINUX} AND ${BUILD_WITH_S3})
|
||||||
add_subdirectory(azure-cmake EXCLUDE_FROM_ALL)
|
add_subdirectory(azure-cmake EXCLUDE_FROM_ALL)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
IF(TD_LINUX)
|
||||||
|
SET(TZ_OUTPUT_PATH /usr/share/zoneinfo)
|
||||||
|
ELSEIF(TD_DARWIN)
|
||||||
|
SET(TZ_OUTPUT_PATH /var/db/timezone/zoneinfo)
|
||||||
|
ENDIF()
|
||||||
|
|
||||||
|
|
||||||
|
if(NOT ${TD_WINDOWS})
|
||||||
|
MESSAGE(STATUS "timezone file path: " ${TZ_OUTPUT_PATH})
|
||||||
|
|
||||||
|
execute_process(
|
||||||
|
COMMAND make TZDIR=${TZ_OUTPUT_PATH}/ clean tzdir.h
|
||||||
|
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/tz"
|
||||||
|
)
|
||||||
|
|
||||||
|
set(TZ_SRC_DIR "${TD_SOURCE_DIR}/source/os/src/timezone")
|
||||||
|
file(REMOVE_RECURSE ${TZ_SRC_DIR})
|
||||||
|
file(MAKE_DIRECTORY ${TZ_SRC_DIR})
|
||||||
|
file(COPY ${TD_CONTRIB_DIR}/tz/private.h ${TD_CONTRIB_DIR}/tz/tzdir.h ${TD_CONTRIB_DIR}/tz/tzfile.h
|
||||||
|
${TD_CONTRIB_DIR}/tz/localtime.c ${TD_CONTRIB_DIR}/tz/strftime.c
|
||||||
|
DESTINATION ${TZ_SRC_DIR})
|
||||||
|
endif(NOT ${TD_WINDOWS})
|
||||||
|
|
||||||
|
#if(NOT ${TD_WINDOWS})
|
||||||
|
# execute_process(
|
||||||
|
# COMMAND make CFLAGS+=-fPIC CFLAGS+=-g TZDIR=${TZ_OUTPUT_PATH} clean libtz.a
|
||||||
|
# WORKING_DIRECTORY "${TD_CONTRIB_DIR}/tz"
|
||||||
|
# )
|
||||||
|
#endif(NOT ${TD_WINDOWS})
|
||||||
# ================================================================================================
|
# ================================================================================================
|
||||||
# Build test
|
# Build test
|
||||||
# ================================================================================================
|
# ================================================================================================
|
||||||
|
|
|
@ -28,6 +28,9 @@ if(${BUILD_WITH_TRAFT})
|
||||||
# add_subdirectory(traft)
|
# add_subdirectory(traft)
|
||||||
endif(${BUILD_WITH_TRAFT})
|
endif(${BUILD_WITH_TRAFT})
|
||||||
|
|
||||||
add_subdirectory(azure)
|
if(${BUILD_S3})
|
||||||
|
add_subdirectory(azure)
|
||||||
|
endif()
|
||||||
|
|
||||||
add_subdirectory(tdev)
|
add_subdirectory(tdev)
|
||||||
add_subdirectory(lz4)
|
add_subdirectory(lz4)
|
||||||
|
|
|
@ -12,7 +12,7 @@ TDengine is configured by default with only one root user, who has the highest p
|
||||||
Only the root user can perform the operation of creating users, with the syntax as follows.
|
Only the root user can perform the operation of creating users, with the syntax as follows.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
create user user_name pass'password' [sysinfo {1|0}]
|
create user user_name pass'password' [sysinfo {1|0}] [createdb {1|0}]
|
||||||
```
|
```
|
||||||
|
|
||||||
The parameters are explained as follows.
|
The parameters are explained as follows.
|
||||||
|
@ -20,11 +20,12 @@ The parameters are explained as follows.
|
||||||
- user_name: Up to 23 B long.
|
- user_name: Up to 23 B long.
|
||||||
- password: The password must be between 8 and 16 characters long and include at least three types of characters from the following: uppercase letters, lowercase letters, numbers, and special characters. Special characters include `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`.
|
- password: The password must be between 8 and 16 characters long and include at least three types of characters from the following: uppercase letters, lowercase letters, numbers, and special characters. Special characters include `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`.
|
||||||
- sysinfo: Whether the user can view system information. 1 means they can view it, 0 means they cannot. System information includes server configuration information, various node information such as dnode, query node (qnode), etc., as well as storage-related information, etc. The default is to view system information.
|
- sysinfo: Whether the user can view system information. 1 means they can view it, 0 means they cannot. System information includes server configuration information, various node information such as dnode, query node (qnode), etc., as well as storage-related information, etc. The default is to view system information.
|
||||||
|
- createdb: Whether the user can create databases. 1 means they can create databases, 0 means they cannot. The default value is 0. // Supported starting from TDengine Enterprise version 3.3.2.0
|
||||||
|
|
||||||
The following SQL can create a user named test with the password 123456 who can view system information.
|
The following SQL can create a user named test with the password abc123!@# who can view system information.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
create user test pass '123456' sysinfo 1
|
create user test pass 'abc123!@#' sysinfo 1
|
||||||
```
|
```
|
||||||
|
|
||||||
### Viewing Users
|
### Viewing Users
|
||||||
|
@ -51,6 +52,7 @@ alter_user_clause: {
|
||||||
pass 'literal'
|
pass 'literal'
|
||||||
| enable value
|
| enable value
|
||||||
| sysinfo value
|
| sysinfo value
|
||||||
|
| createdb value
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -59,6 +61,7 @@ The parameters are explained as follows.
|
||||||
- pass: Modify the user's password.
|
- pass: Modify the user's password.
|
||||||
- enable: Whether to enable the user. 1 means to enable this user, 0 means to disable this user.
|
- enable: Whether to enable the user. 1 means to enable this user, 0 means to disable this user.
|
||||||
- sysinfo: Whether the user can view system information. 1 means they can view system information, 0 means they cannot.
|
- sysinfo: Whether the user can view system information. 1 means they can view system information, 0 means they cannot.
|
||||||
|
- createdb: Whether the user can create databases. 1 means they can create databases, 0 means they cannot. // Supported starting from TDengine Enterprise version 3.3.2.0
|
||||||
|
|
||||||
The following SQL disables the user test.
|
The following SQL disables the user test.
|
||||||
|
|
||||||
|
|
|
@ -28,68 +28,70 @@ After modifying configuration file parameters, you need to restart the *taosd* s
|
||||||
|
|
||||||
### Connection Related
|
### Connection Related
|
||||||
|
|
||||||
|Parameter Name |Supported Version |Description|
|
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||||
|-----------------------|-------------------------|------------|
|
|-----------------------|-------------------------|--------------------|------------|
|
||||||
|firstEp | |Endpoint of the first dnode in the cluster that taosd actively connects to at startup, default value localhost:6030|
|
|firstEp | |Not supported |Endpoint of the first dnode in the cluster that taosd actively connects to at startup, default value localhost:6030|
|
||||||
|secondEp | |Endpoint of the second dnode in the cluster that taosd tries to connect to if the firstEp is unreachable, no default value|
|
|secondEp | |Not supported |Endpoint of the second dnode in the cluster that taosd tries to connect to if the firstEp is unreachable, no default value|
|
||||||
|fqdn | |The service address that taosd listens on, default is the first hostname configured on the server|
|
|fqdn | |Not supported |The service address that taosd listens on, default is the first hostname configured on the server|
|
||||||
|serverPort | |The port that taosd listens on, default value 6030|
|
|serverPort | |Not supported |The port that taosd listens on, default value 6030|
|
||||||
|compressMsgSize | |Whether to compress RPC messages; -1: do not compress any messages; 0: compress all messages; N (N>0): only compress messages larger than N bytes; default value -1|
|
|compressMsgSize | |Supported, effective after restart|Whether to compress RPC messages; -1: do not compress any messages; 0: compress all messages; N (N>0): only compress messages larger than N bytes; default value -1|
|
||||||
|shellActivityTimer | |Duration in seconds for the client to send heartbeat to mnode, range 1-120, default value 3 |
|
|shellActivityTimer | |Supported, effective immediately |Duration in seconds for the client to send heartbeat to mnode, range 1-120, default value 3 |
|
||||||
|numOfRpcSessions | |Maximum number of connections supported by RPC, range 100-100000, default value 30000|
|
|numOfRpcSessions | |Supported, effective after restart|Maximum number of connections supported by RPC, range 100-100000, default value 30000|
|
||||||
|numOfRpcThreads | |Number of threads for receiving and sending RPC data, range 1-50, default value is half of the CPU cores|
|
|numOfRpcThreads | |Supported, effective after restart|Number of threads for receiving and sending RPC data, range 1-50, default value is half of the CPU cores|
|
||||||
|numOfTaskQueueThreads | |Number of threads for client to process RPC messages, range 4-16, default value is half of the CPU cores|
|
|numOfTaskQueueThreads | |Supported, effective after restart|Number of threads for client to process RPC messages, range 4-16, default value is half of the CPU cores|
|
||||||
|rpcQueueMemoryAllowed | |Maximum memory allowed for received RPC messages in dnode, in bytes, range 104857600-INT64_MAX, default value is 1/10 of server memory |
|
|rpcQueueMemoryAllowed | |Supported, effective immediately |Maximum memory allowed for received RPC messages in dnode, in bytes, range 104857600-INT64_MAX, default value is 1/10 of server memory |
|
||||||
|resolveFQDNRetryTime | Cancelled after 3.x |Number of retries when FQDN resolution fails|
|
|resolveFQDNRetryTime | Cancelled after 3.x |Not supported |Number of retries when FQDN resolution fails|
|
||||||
|timeToGetAvailableConn | Cancelled after 3.3.4.x |Maximum waiting time to get an available connection, range 10-50000000, in milliseconds, default value 500000|
|
|timeToGetAvailableConn | Cancelled after 3.3.4.x |Maximum waiting time to get an available connection, range 10-50000000, in milliseconds, default value 500000|
|
||||||
|maxShellConns | Cancelled after 3.x |Maximum number of connections allowed|
|
|maxShellConns | Cancelled after 3.x |Supported, effective after restart|Maximum number of connections allowed|
|
||||||
|maxRetryWaitTime | |Maximum timeout for reconnection, default value is 10s|
|
|maxRetryWaitTime | |Supported, effective after restart|Maximum timeout for reconnection, default value is 10s|
|
||||||
|shareConnLimit |Added in 3.3.4.0 |Number of requests a connection can share, range 1-512, default value 10|
|
|shareConnLimit |Added in 3.3.4.0 |Supported, effective after restart|Number of requests a connection can share, range 1-512, default value 10|
|
||||||
|readTimeout |Added in 3.3.4.0 |Minimum timeout for a single request, range 64-604800, in seconds, default value 900|
|
|readTimeout |Added in 3.3.4.0 |Supported, effective after restart|Minimum timeout for a single request, range 64-604800, in seconds, default value 900|
|
||||||
|
|
||||||
### Monitoring Related
|
### Monitoring Related
|
||||||
|
|
||||||
|Parameter Name|Supported Version|Description|
|
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||||
|-----------------------|----------|-|
|
|-----------------------|-------------------------|--------------------|------------|
|
||||||
|monitor | |Whether to collect and report monitoring data, 0: off; 1: on; default value 0|
|
|monitor | |Supported, effective immediately |Whether to collect and report monitoring data, 0: off; 1: on; default value 0|
|
||||||
|monitorFqdn | |The FQDN of the server where the taosKeeper service is located, default value none|
|
|monitorFqdn | |Supported, effective after restart|The FQDN of the server where the taosKeeper service is located, default value none|
|
||||||
|monitorPort | |The port number listened to by the taosKeeper service, default value 6043|
|
|monitorPort | |Supported, effective after restart|The port number listened to by the taosKeeper service, default value 6043|
|
||||||
|monitorInterval | |The time interval for recording system parameters (CPU/memory) in the monitoring database, in seconds, range 1-200000, default value 30|
|
|monitorInterval | |Supported, effective immediately |The time interval for recording system parameters (CPU/memory) in the monitoring database, in seconds, range 1-200000, default value 30|
|
||||||
|monitorMaxLogs | |Number of cached logs pending report|
|
|monitorMaxLogs | |Supported, effective immediately |Number of cached logs pending report|
|
||||||
|monitorComp | |Whether to use compression when reporting monitoring logs|
|
|monitorComp | |Supported, effective after restart|Whether to use compression when reporting monitoring logs|
|
||||||
|monitorLogProtocol | |Whether to print monitoring logs|
|
|monitorLogProtocol | |Supported, effective immediately |Whether to print monitoring logs|
|
||||||
|monitorForceV2 | |Whether to use V2 protocol for reporting|
|
|monitorForceV2 | |Supported, effective immediately |Whether to use V2 protocol for reporting|
|
||||||
|telemetryReporting | |Whether to upload telemetry, 0: do not upload, 1: upload, default value 1|
|
|telemetryReporting | |Supported, effective immediately |Whether to upload telemetry, 0: do not upload, 1: upload, default value 1|
|
||||||
|telemetryServer | |Telemetry server address|
|
|telemetryServer | |Not supported |Telemetry server address|
|
||||||
|telemetryPort | |Telemetry server port number|
|
|telemetryPort | |Not supported |Telemetry server port number|
|
||||||
|telemetryInterval | |Telemetry upload interval, in seconds, default 86400|
|
|telemetryInterval | |Supported, effective immediately |Telemetry upload interval, in seconds, default 86400|
|
||||||
|crashReporting | |Whether to upload crash information; 0: do not upload, 1: upload; default value 1|
|
|crashReporting | |Supported, effective immediately |Whether to upload crash information; 0: do not upload, 1: upload; default value 1|
|
||||||
|
|
||||||
### Query Related
|
### Query Related
|
||||||
|
|
||||||
|Parameter Name|Supported Version|Description|
|
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||||
|------------------------|----------|-|
|
|-----------------------|-------------------------|--------------------|------------|
|
||||||
|countAlwaysReturnValue | |Whether count/hyperloglog functions return a value when input data is empty or NULL; 0: return empty row, 1: return; default value 1; When this parameter is set to 1, if the query contains an INTERVAL clause or the query uses TSMA, and the corresponding group or window has empty or NULL data, the corresponding group or window will not return a query result; Note that this parameter should be consistent between client and server|
|
|countAlwaysReturnValue | |Supported, effective immediately |Whether count/hyperloglog functions return a value when input data is empty or NULL; 0: return empty row, 1: return; default value 1; When this parameter is set to 1, if the query contains an INTERVAL clause or the query uses TSMA, and the corresponding group or window has empty or NULL data, the corresponding group or window will not return a query result; Note that this parameter should be consistent between client and server|
|
||||||
|tagFilterCache | |Whether to cache tag filter results|
|
|tagFilterCache | |Not supported |Whether to cache tag filter results|
|
||||||
|maxNumOfDistinctRes | |Maximum number of distinct results allowed to return, default value 100,000, maximum allowed value 100 million|
|
|queryBufferSize | |Supported, effective after restart|Not effective yet|
|
||||||
|queryBufferSize | |Not effective yet|
|
|queryRspPolicy | |Supported, effective immediately |Query response strategy|
|
||||||
|queryRspPolicy | |Query response strategy|
|
|queryUseMemoryPool | |Not supported |Whether query will use memory pool to manage memory, default value: 1 (on); 0: off, 1: on|
|
||||||
|filterScalarMode | |Force scalar filter mode, 0: off; 1: on, default value 0|
|
|minReservedMemorySize | |Not supported |The minimum reserved system available memory size, all memory except reserved can be used for queries, unit: MB, default reserved size is 20% of system physical memory, value range 1024-1000000000|
|
||||||
|queryPlannerTrace | |Internal parameter, whether the query plan outputs detailed logs|
|
|singleQueryMaxMemorySize| |Not supported |The memory limit that a single query can use on a single node (dnode), exceeding this limit will return an error, unit: MB, default value: 0 (no limit), value range 0-1000000000|
|
||||||
|queryNodeChunkSize | |Internal parameter, chunk size of the query plan|
|
|filterScalarMode | |Not supported |Force scalar filter mode, 0: off; 1: on, default value 0|
|
||||||
|queryUseNodeAllocator | |Internal parameter, allocation method of the query plan|
|
|queryPlannerTrace | |Supported, effective immediately |Internal parameter, whether the query plan outputs detailed logs|
|
||||||
|queryMaxConcurrentTables| |Internal parameter, concurrency number of the query plan|
|
|queryNodeChunkSize | |Supported, effective immediately |Internal parameter, chunk size of the query plan|
|
||||||
|queryRsmaTolerance | |Internal parameter, tolerance time for determining which level of rsma data to query, in milliseconds|
|
|queryUseNodeAllocator | |Supported, effective immediately |Internal parameter, allocation method of the query plan|
|
||||||
|enableQueryHb | |Internal parameter, whether to send query heartbeat messages|
|
|queryMaxConcurrentTables| |Not supported |Internal parameter, concurrency number of the query plan|
|
||||||
|pqSortMemThreshold | |Internal parameter, memory threshold for sorting|
|
|queryRsmaTolerance | |Not supported |Internal parameter, tolerance time for determining which level of rsma data to query, in milliseconds|
|
||||||
|
|enableQueryHb | |Supported, effective immediately |Internal parameter, whether to send query heartbeat messages|
|
||||||
|
|pqSortMemThreshold | |Not supported |Internal parameter, memory threshold for sorting|
|
||||||
|
|
||||||
### Region Related
|
### Region Related
|
||||||
|
|
||||||
|Parameter Name|Supported Version|Description|
|
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||||
|-----------------|----------|-|
|
|-----------------------|-------------------------|--------------------|------------|
|
||||||
|timezone | |Time zone; defaults to dynamically obtaining the current time zone setting from the system|
|
|timezone | |Not supported |Time zone; defaults to dynamically obtaining the current time zone setting from the system|
|
||||||
|locale | |System locale information and encoding format, defaults to obtaining from the system|
|
|locale | |Not supported |System locale information and encoding format, defaults to obtaining from the system|
|
||||||
|charset | |Character set encoding, defaults to obtaining from the system|
|
|charset | |Not supported |Character set encoding, defaults to obtaining from the system|
|
||||||
|
|
||||||
:::info
|
:::info
|
||||||
|
|
||||||
|
@ -167,152 +169,153 @@ The effective value of charset is UTF-8.
|
||||||
|
|
||||||
### Storage Related
|
### Storage Related
|
||||||
|
|
||||||
|Parameter Name|Supported Version|Description|
|
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||||
|--------------------|----------|-|
|
|-----------------------|-------------------------|--------------------|------------|
|
||||||
|dataDir | |Directory for data files, all data files are written to this directory, default value /var/lib/taos|
|
|dataDir | |Not supported |Directory for data files, all data files are written to this directory, default value /var/lib/taos|
|
||||||
|tempDir | |Specifies the directory for generating temporary files during system operation, default value /tmp|
|
|tempDir | |Not supported |Specifies the directory for generating temporary files during system operation, default value /tmp|
|
||||||
|minimalDataDirGB | |Minimum space to be reserved in the time-series data storage directory specified by dataDir, in GB, default value 2|
|
|minimalDataDirGB | |Not supported |Minimum space to be reserved in the time-series data storage directory specified by dataDir, in GB, default value 2|
|
||||||
|minimalTmpDirGB | |Minimum space to be reserved in the temporary file directory specified by tempDir, in GB, default value 1|
|
|minimalTmpDirGB | |Not supported |Minimum space to be reserved in the temporary file directory specified by tempDir, in GB, default value 1|
|
||||||
|minDiskFreeSize |After 3.1.1.0|When the available space on a disk is less than or equal to this threshold, the disk will no longer be selected for generating new data files, unit is bytes, range 52428800-1073741824, default value 52428800; Enterprise parameter|
|
|minDiskFreeSize |After 3.1.1.0|Supported, effective immediately |When the available space on a disk is less than or equal to this threshold, the disk will no longer be selected for generating new data files, unit is bytes, range 52428800-1073741824, default value 52428800; Enterprise parameter|
|
||||||
|s3MigrateIntervalSec|After 3.3.4.3|Trigger cycle for automatic upload of local data files to S3, in seconds. Minimum: 600; Maximum: 100000. Default value 3600; Enterprise parameter|
|
|s3MigrateIntervalSec|After 3.3.4.3|Supported, effective immediately |Trigger cycle for automatic upload of local data files to S3, in seconds. Minimum: 600; Maximum: 100000. Default value 3600; Enterprise parameter|
|
||||||
|s3MigrateEnabled |After 3.3.4.3|Whether to automatically perform S3 migration, default value is 0, which means auto S3 migration is off, can be set to 1; Enterprise parameter|
|
|s3MigrateEnabled |After 3.3.4.3|Supported, effective immediately |Whether to automatically perform S3 migration, default value is 0, which means auto S3 migration is off, can be set to 1; Enterprise parameter|
|
||||||
|s3Accesskey |After 3.3.4.3|Colon-separated user SecretId:SecretKey, for example AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E; Enterprise parameter|
|
|s3Accesskey |After 3.3.4.3|Supported, effective after restart|Colon-separated user SecretId:SecretKey, for example AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E; Enterprise parameter|
|
||||||
|s3Endpoint |After 3.3.4.3|COS service domain name in the user's region, supports http and https, the region of the bucket must match the endpoint, otherwise it cannot be accessed; Enterprise parameter|
|
|s3Endpoint |After 3.3.4.3|Supported, effective after restart|COS service domain name in the user's region, supports http and https, the region of the bucket must match the endpoint, otherwise it cannot be accessed; Enterprise parameter|
|
||||||
|s3BucketName |After 3.3.4.3|Bucket name, followed by a hyphen and the AppId of the user registered COS service, where AppId is unique to COS, not present in AWS and Alibaba Cloud, needs to be part of the bucket name, separated by a hyphen; parameter values are string type, but do not need quotes; for example test0711-1309024725; Enterprise parameter|
|
|s3BucketName |After 3.3.4.3|Supported, effective after restart|Bucket name, followed by a hyphen and the AppId of the user registered COS service, where AppId is unique to COS, not present in AWS and Alibaba Cloud, needs to be part of the bucket name, separated by a hyphen; parameter values are string type, but do not need quotes; for example test0711-1309024725; Enterprise parameter|
|
||||||
|s3PageCacheSize |After 3.3.4.3|Number of S3 page cache pages, range 4-1048576, unit is pages, default value 4096; Enterprise parameter|
|
|s3PageCacheSize |After 3.3.4.3|Supported, effective after restart|Number of S3 page cache pages, range 4-1048576, unit is pages, default value 4096; Enterprise parameter|
|
||||||
|s3UploadDelaySec |After 3.3.4.3|How long a data file remains unchanged before being uploaded to S3, range 1-2592000 (30 days), in seconds, default value 60; Enterprise parameter|
|
|s3UploadDelaySec |After 3.3.4.3|Supported, effective immediately |How long a data file remains unchanged before being uploaded to S3, range 1-2592000 (30 days), in seconds, default value 60; Enterprise parameter|
|
||||||
|cacheLazyLoadThreshold| |Internal parameter, cache loading strategy|
|
|cacheLazyLoadThreshold| |Supported, effective immediately |Internal parameter, cache loading strategy|
|
||||||
|
|
||||||
### Cluster Related
|
### Cluster Related
|
||||||
|
|
||||||
|Parameter Name|Supported Version|Description|
|
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||||
|--------------------------|----------|-|
|
|-----------------------|-------------------------|--------------------|------------|
|
||||||
|supportVnodes | |Maximum number of vnodes supported by a dnode, range 0-4096, default value is twice the number of CPU cores + 5|
|
|supportVnodes | |Supported, effective immediately |Maximum number of vnodes supported by a dnode, range 0-4096, default value is twice the number of CPU cores + 5|
|
||||||
|numOfCommitThreads | |Maximum number of commit threads, range 0-1024, default value 4|
|
|numOfCommitThreads | |Supported, effective after restart|Maximum number of commit threads, range 0-1024, default value 4|
|
||||||
|numOfMnodeReadThreads | |Number of Read threads for mnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|
|numOfMnodeReadThreads | |Supported, effective after restart|Number of Read threads for mnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|
||||||
|numOfVnodeQueryThreads | |Number of Query threads for vnode, range 0-1024, default value is twice the number of CPU cores (not exceeding 16)|
|
|numOfVnodeQueryThreads | |Supported, effective after restart|Number of Query threads for vnode, range 0-1024, default value is twice the number of CPU cores (not exceeding 16)|
|
||||||
|numOfVnodeFetchThreads | |Number of Fetch threads for vnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|
|numOfVnodeFetchThreads | |Supported, effective after restart|Number of Fetch threads for vnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|
||||||
|numOfVnodeRsmaThreads | |Number of Rsma threads for vnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|
|numOfVnodeRsmaThreads | |Supported, effective after restart|Number of Rsma threads for vnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|
||||||
|numOfQnodeQueryThreads | |Number of Query threads for qnode, range 0-1024, default value is twice the number of CPU cores (not exceeding 16)|
|
|numOfQnodeQueryThreads | |Supported, effective after restart|Number of Query threads for qnode, range 0-1024, default value is twice the number of CPU cores (not exceeding 16)|
|
||||||
|numOfSnodeSharedThreads | |Number of shared threads for snode, range 0-1024, default value is one quarter of the CPU cores (not less than 2, not exceeding 4)|
|
|numOfSnodeSharedThreads | |Supported, effective after restart|Number of shared threads for snode, range 0-1024, default value is one quarter of the CPU cores (not less than 2, not exceeding 4)|
|
||||||
|numOfSnodeUniqueThreads | |Number of exclusive threads for snode, range 0-1024, default value is one quarter of the CPU cores (not less than 2, not exceeding 4)|
|
|numOfSnodeUniqueThreads | |Supported, effective after restart|Number of exclusive threads for snode, range 0-1024, default value is one quarter of the CPU cores (not less than 2, not exceeding 4)|
|
||||||
|ratioOfVnodeStreamThreads | |Ratio of stream computing using vnode threads, range 0.01-4, default value 4|
|
|ratioOfVnodeStreamThreads | |Supported, effective after restart|Ratio of stream computing using vnode threads, range 0.01-4, default value 4|
|
||||||
|ttlUnit | |Unit for ttl parameter, range 1-31572500, in seconds, default value 86400|
|
|ttlUnit | |Not supported |Unit for ttl parameter, range 1-31572500, in seconds, default value 86400|
|
||||||
|ttlPushInterval | |Frequency of ttl timeout checks, range 1-100000, in seconds, default value 10|
|
|ttlPushInterval | |Supported, effective immediately |Frequency of ttl timeout checks, range 1-100000, in seconds, default value 10|
|
||||||
|ttlChangeOnWrite | |Whether ttl expiration time changes with table modification; 0: no change, 1: change; default value 0|
|
|ttlChangeOnWrite | |Supported, effective immediately |Whether ttl expiration time changes with table modification; 0: no change, 1: change; default value 0|
|
||||||
|ttlBatchDropNum | |Number of subtables deleted in a batch for ttl, minimum value 0, default value 10000|
|
|ttlBatchDropNum | |Supported, effective immediately |Number of subtables deleted in a batch for ttl, minimum value 0, default value 10000|
|
||||||
|retentionSpeedLimitMB | |Speed limit for data migration across different levels of disks, range 0-1024, in MB, default value 0, which means no limit|
|
|retentionSpeedLimitMB | |Supported, effective immediately |Speed limit for data migration across different levels of disks, range 0-1024, in MB, default value 0, which means no limit|
|
||||||
|maxTsmaNum | |Maximum number of TSMAs that can be created in the cluster; range 0-3; default value 3|
|
|maxTsmaNum | |Supported, effective immediately |Maximum number of TSMAs that can be created in the cluster; range 0-3; default value 3|
|
||||||
|tmqMaxTopicNum | |Maximum number of topics that can be established for subscription; range 1-10000; default value 20|
|
|tmqMaxTopicNum | |Supported, effective immediately |Maximum number of topics that can be established for subscription; range 1-10000; default value 20|
|
||||||
|tmqRowSize | |Maximum number of records in a subscription data block, range 1-1000000, default value 4096|
|
|tmqRowSize | |Supported, effective immediately |Maximum number of records in a subscription data block, range 1-1000000, default value 4096|
|
||||||
|audit | |Audit feature switch; Enterprise parameter|
|
|audit | |Supported, effective immediately |Audit feature switch; Enterprise parameter|
|
||||||
|auditInterval | |Time interval for reporting audit data; Enterprise parameter|
|
|auditInterval | |Supported, effective immediately |Time interval for reporting audit data; Enterprise parameter|
|
||||||
|auditCreateTable | |Whether to enable audit feature for creating subtables; Enterprise parameter|
|
|auditCreateTable | |Supported, effective immediately |Whether to enable audit feature for creating subtables; Enterprise parameter|
|
||||||
|encryptAlgorithm | |Data encryption algorithm; Enterprise parameter|
|
|encryptAlgorithm | |Not supported |Data encryption algorithm; Enterprise parameter|
|
||||||
|encryptScope | |Encryption scope; Enterprise parameter|
|
|encryptScope | |Not supported |Encryption scope; Enterprise parameter|
|
||||||
|enableWhiteList | |Switch for whitelist feature; Enterprise parameter|
|
|enableWhiteList | |Supported, effective immediately |Switch for whitelist feature; Enterprise parameter|
|
||||||
|syncLogBufferMemoryAllowed| |Maximum memory allowed for sync log cache messages for a dnode, in bytes, range 104857600-INT64_MAX, default value is 1/10 of server memory, effective from versions 3.1.3.2/3.3.2.13|
|
|syncLogBufferMemoryAllowed| |Supported, effective immediately |Maximum memory allowed for sync log cache messages for a dnode, in bytes, range 104857600-INT64_MAX, default value is 1/10 of server memory, effective from versions 3.1.3.2/3.3.2.13|
|
||||||
|syncElectInterval | |Internal parameter, for debugging synchronization module|
|
|syncElectInterval | |Not supported |Internal parameter, for debugging synchronization module|
|
||||||
|syncHeartbeatInterval | |Internal parameter, for debugging synchronization module|
|
|syncHeartbeatInterval | |Not supported |Internal parameter, for debugging synchronization module|
|
||||||
|syncHeartbeatTimeout | |Internal parameter, for debugging synchronization module|
|
|syncHeartbeatTimeout | |Not supported |Internal parameter, for debugging synchronization module|
|
||||||
|syncSnapReplMaxWaitN | |Internal parameter, for debugging synchronization module|
|
|syncSnapReplMaxWaitN | |Supported, effective immediately |Internal parameter, for debugging synchronization module|
|
||||||
|syncSnapReplMaxWaitN | |Internal parameter, for debugging synchronization module|
|
|arbHeartBeatIntervalSec | |Supported, effective immediately |Internal parameter, for debugging synchronization module|
|
||||||
|arbHeartBeatIntervalSec | |Internal parameter, for debugging synchronization module|
|
|arbCheckSyncIntervalSec | |Supported, effective immediately |Internal parameter, for debugging synchronization module|
|
||||||
|arbCheckSyncIntervalSec | |Internal parameter, for debugging synchronization module|
|
|arbSetAssignedTimeoutSec | |Supported, effective immediately |Internal parameter, for debugging synchronization module|
|
||||||
|arbSetAssignedTimeoutSec | |Internal parameter, for debugging synchronization module|
|
|mndSdbWriteDelta | |Supported, effective immediately |Internal parameter, for debugging mnode module|
|
||||||
|mndSdbWriteDelta | |Internal parameter, for debugging mnode module|
|
|mndLogRetention | |Supported, effective immediately |Internal parameter, for debugging mnode module|
|
||||||
|mndLogRetention | |Internal parameter, for debugging mnode module|
|
|skipGrant | |Not supported |Internal parameter, for authorization checks|
|
||||||
|skipGrant | |Internal parameter, for authorization checks|
|
|trimVDbIntervalSec | |Supported, effective immediately |Internal parameter, for deleting expired data|
|
||||||
|trimVDbIntervalSec | |Internal parameter, for deleting expired data|
|
|ttlFlushThreshold | |Supported, effective immediately |Internal parameter, frequency of ttl timer|
|
||||||
|ttlFlushThreshold | |Internal parameter, frequency of ttl timer|
|
|compactPullupInterval | |Supported, effective immediately |Internal parameter, frequency of data reorganization timer|
|
||||||
|compactPullupInterval | |Internal parameter, frequency of data reorganization timer|
|
|walFsyncDataSizeLimit | |Supported, effective immediately |Internal parameter, threshold for WAL to perform FSYNC|
|
||||||
|walFsyncDataSizeLimit | |Internal parameter, threshold for WAL to perform FSYNC|
|
|transPullupInterval | |Supported, effective immediately |Internal parameter, retry interval for mnode to execute transactions|
|
||||||
|transPullupInterval | |Internal parameter, retry interval for mnode to execute transactions|
|
|mqRebalanceInterval | |Supported, effective immediately |Internal parameter, interval for consumer rebalancing|
|
||||||
|mqRebalanceInterval | |Internal parameter, interval for consumer rebalancing|
|
|uptimeInterval | |Supported, effective immediately |Internal parameter, for recording system uptime|
|
||||||
|uptimeInterval | |Internal parameter, for recording system uptime|
|
|timeseriesThreshold | |Supported, effective immediately |Internal parameter, for usage statistics|
|
||||||
|timeseriesThreshold | |Internal parameter, for usage statistics|
|
|udf | |Supported, effective after restart|Whether to start UDF service; 0: do not start, 1: start; default value 0 |
|
||||||
|udf | |Whether to start UDF service; 0: do not start, 1: start; default value 0 |
|
|udfdResFuncs | |Supported, effective after restart|Internal parameter, for setting UDF result sets|
|
||||||
|udfdResFuncs | |Internal parameter, for setting UDF result sets|
|
|udfdLdLibPath | |Supported, effective after restart|Internal parameter, indicates the library path for loading UDF|
|
||||||
|udfdLdLibPath | |Internal parameter, indicates the library path for loading UDF|
|
|
||||||
|
|
||||||
### Stream Computing Parameters
|
### Stream Computing Parameters
|
||||||
|
|
||||||
| Parameter Name | Supported Version | Description |
|
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||||
|-----------------------|----------|-|
|
|-----------------------|-------------------------|--------------------|------------|
|
||||||
| disableStream | | Switch to enable or disable stream computing |
|
| disableStream | |Supported, effective immediately | Switch to enable or disable stream computing |
|
||||||
| streamBufferSize | | Controls the size of the window state cache in memory, default value is 128MB |
|
| streamBufferSize | |Supported, effective immediately | Controls the size of the window state cache in memory, default value is 128MB |
|
||||||
| streamAggCnt | | Internal parameter, number of concurrent aggregation computations |
|
| streamAggCnt | |Not supported | Internal parameter, number of concurrent aggregation computations |
|
||||||
| checkpointInterval | | Internal parameter, checkpoint synchronization interval |
|
| checkpointInterval | |Supported, effective after restart| Internal parameter, checkpoint synchronization interval |
|
||||||
| concurrentCheckpoint | | Internal parameter, whether to check checkpoints concurrently |
|
| concurrentCheckpoint | |Supported, effective immediately | Internal parameter, whether to check checkpoints concurrently |
|
||||||
| maxStreamBackendCache | | Internal parameter, maximum cache used by stream computing |
|
| maxStreamBackendCache | |Supported, effective immediately | Internal parameter, maximum cache used by stream computing |
|
||||||
| streamSinkDataRate | | Internal parameter, used to control the write speed of stream computing results |
|
| streamSinkDataRate | |Supported, effective after restart| Internal parameter, used to control the write speed of stream computing results |
|
||||||
|
|
||||||
### Log Related
|
### Log Related
|
||||||
|
|
||||||
| Parameter Name | Supported Version | Description |
|
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||||
|----------------|----------|-|
|
|-----------------------|-------------------------|--------------------|------------|
|
||||||
| logDir | | Log file directory, operational logs will be written to this directory, default value /var/log/taos |
|
| logDir | |Not supported | Log file directory, operational logs will be written to this directory, default value /var/log/taos |
|
||||||
| minimalLogDirGB | | Stops writing logs when the available space on the disk where the log folder is located is less than this value, unit GB, default value 1 |
|
| minimalLogDirGB | |Not supported | Stops writing logs when the available space on the disk where the log folder is located is less than this value, unit GB, default value 1 |
|
||||||
| numOfLogLines | | Maximum number of lines allowed in a single log file, default value 10,000,000 |
|
| numOfLogLines | |Supported, effective immediately | Maximum number of lines allowed in a single log file, default value 10,000,000 |
|
||||||
| asyncLog | | Log writing mode, 0: synchronous, 1: asynchronous, default value 1 |
|
| asyncLog | |Supported, effective immediately | Log writing mode, 0: synchronous, 1: asynchronous, default value 1 |
|
||||||
| logKeepDays | | Maximum retention time for log files, unit: days, default value 0, which means unlimited retention, log files will not be renamed, nor will new log files be rolled out, but the content of the log files may continue to roll depending on the log file size setting; when set to a value greater than 0, when the log file size reaches the set limit, it will be renamed to taosdlog.yyy, where yyy is the timestamp of the last modification of the log file, and a new log file will be rolled out |
|
| logKeepDays | |Supported, effective immediately | Maximum retention time for log files, unit: days, default value 0, which means unlimited retention, log files will not be renamed, nor will new log files be rolled out, but the content of the log files may continue to roll depending on the log file size setting; when set to a value greater than 0, when the log file size reaches the set limit, it will be renamed to taosdlog.yyy, where yyy is the timestamp of the last modification of the log file, and a new log file will be rolled out |
|
||||||
| slowLogThreshold| 3.3.3.0 onwards | Slow query threshold, queries taking longer than or equal to this threshold are considered slow, unit seconds, default value 3 |
|
| slowLogThreshold| 3.3.3.0 onwards |Supported, effective immediately | Slow query threshold, queries taking longer than or equal to this threshold are considered slow, unit seconds, default value 3 |
|
||||||
| slowLogMaxLen | 3.3.3.0 onwards | Maximum length of slow query logs, range 1-16384, default value 4096 |
|
| slowLogMaxLen | 3.3.3.0 onwards |Supported, effective immediately | Maximum length of slow query logs, range 1-16384, default value 4096 |
|
||||||
| slowLogScope | 3.3.3.0 onwards | Type of slow query records, range ALL/QUERY/INSERT/OTHERS/NONE, default value QUERY |
|
| slowLogScope | 3.3.3.0 onwards |Supported, effective immediately | Type of slow query records, range ALL/QUERY/INSERT/OTHERS/NONE, default value QUERY |
|
||||||
| slowLogExceptDb | 3.3.3.0 onwards | Specifies the database that does not report slow queries, only supports configuring one database |
|
| slowLogExceptDb | 3.3.3.0 onwards |Supported, effective immediately | Specifies the database that does not report slow queries, only supports configuring one database |
|
||||||
| debugFlag | | Log switch for running logs, 131 (outputs error and warning logs), 135 (outputs error, warning, and debug logs), 143 (outputs error, warning, debug, and trace logs); default value 131 or 135 (depending on the module) |
|
| debugFlag | |Supported, effective immediately | Log switch for running logs, 131 (outputs error and warning logs), 135 (outputs error, warning, and debug logs), 143 (outputs error, warning, debug, and trace logs); default value 131 or 135 (depending on the module) |
|
||||||
| tmrDebugFlag | | Log switch for the timer module, range as above |
|
| tmrDebugFlag | |Supported, effective immediately | Log switch for the timer module, range as above |
|
||||||
| uDebugFlag | | Log switch for the utility module, range as above |
|
| uDebugFlag | |Supported, effective immediately | Log switch for the utility module, range as above |
|
||||||
| rpcDebugFlag | | Log switch for the rpc module, range as above |
|
| rpcDebugFlag | |Supported, effective immediately | Log switch for the rpc module, range as above |
|
||||||
| qDebugFlag | | Log switch for the query module, range as above |
|
| qDebugFlag | |Supported, effective immediately | Log switch for the query module, range as above |
|
||||||
| dDebugFlag | | Log switch for the dnode module, range as above |
|
| dDebugFlag | |Supported, effective immediately | Log switch for the dnode module, range as above |
|
||||||
| vDebugFlag | | Log switch for the vnode module, range as above |
|
| vDebugFlag | |Supported, effective immediately | Log switch for the vnode module, range as above |
|
||||||
| mDebugFlag | | Log switch for the mnode module, range as above |
|
| mDebugFlag | |Supported, effective immediately | Log switch for the mnode module, range as above |
|
||||||
| azDebugFlag | 3.3.4.3 onwards | Log switch for the S3 module, range as above |
|
| azDebugFlag | 3.3.4.3 onwards |Supported, effective immediately | Log switch for the S3 module, range as above |
|
||||||
| sDebugFlag | | Log switch for the sync module, range as above |
|
| sDebugFlag | |Supported, effective immediately | Log switch for the sync module, range as above |
|
||||||
| tsdbDebugFlag | | Log switch for the tsdb module, range as above |
|
| tsdbDebugFlag | |Supported, effective immediately | Log switch for the tsdb module, range as above |
|
||||||
| tqDebugFlag | | Log switch for the tq module, range as above |
|
| tqDebugFlag | |Supported, effective immediately | Log switch for the tq module, range as above |
|
||||||
| fsDebugFlag | | Log switch for the fs module, range as above |
|
| fsDebugFlag | |Supported, effective immediately | Log switch for the fs module, range as above |
|
||||||
| udfDebugFlag | | Log switch for the udf module, range as above |
|
| udfDebugFlag | |Supported, effective immediately | Log switch for the udf module, range as above |
|
||||||
| smaDebugFlag | | Log switch for the sma module, range as above |
|
| smaDebugFlag | |Supported, effective immediately | Log switch for the sma module, range as above |
|
||||||
| idxDebugFlag | | Log switch for the index module, range as above |
|
| idxDebugFlag | |Supported, effective immediately | Log switch for the index module, range as above |
|
||||||
| tdbDebugFlag | | Log switch for the tdb module, range as above |
|
| tdbDebugFlag | |Supported, effective immediately | Log switch for the tdb module, range as above |
|
||||||
| metaDebugFlag | | Log switch for the meta module, range as above |
|
| metaDebugFlag | |Supported, effective immediately | Log switch for the meta module, range as above |
|
||||||
| stDebugFlag | | Log switch for the stream module, range as above |
|
| stDebugFlag | |Supported, effective immediately | Log switch for the stream module, range as above |
|
||||||
| sndDebugFlag | | Log switch for the snode module, range as above |
|
| sndDebugFlag | |Supported, effective immediately | Log switch for the snode module, range as above |
|
||||||
|
|
||||||
### Debugging Related
|
### Debugging Related
|
||||||
|
|
||||||
| Parameter Name | Supported Version | Description |
|
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||||
|----------------------|-------------------|-------------|
|
|-----------------------|-------------------------|--------------------|------------|
|
||||||
| enableCoreFile | | Whether to generate a core file when crashing, 0: do not generate, 1: generate; default value is 1 |
|
| enableCoreFile | |Supported, effective immediately | Whether to generate a core file when crashing, 0: do not generate, 1: generate; default value is 1 |
|
||||||
| configDir | | Directory where the configuration files are located |
|
| configDir | |Not supported | Directory where the configuration files are located |
|
||||||
| scriptDir | | Directory for internal test tool scripts |
|
|forceReadConfig | |Not supported ||Force the use of parameters from the configuration file,default value: 0|
|
||||||
| assert | | Assertion control switch, default value is 0 |
|
| scriptDir | |Not supported | Directory for internal test tool scripts |
|
||||||
| randErrorChance | | Internal parameter, used for random failure testing |
|
| assert | |Not supported | Assertion control switch, default value is 0 |
|
||||||
| randErrorDivisor | | Internal parameter, used for random failure testing |
|
| randErrorChance | |Supported, effective immediately | Internal parameter, used for random failure testing |
|
||||||
| randErrorScope | | Internal parameter, used for random failure testing |
|
| randErrorDivisor | |Supported, effective immediately | Internal parameter, used for random failure testing |
|
||||||
| safetyCheckLevel | | Internal parameter, used for random failure testing |
|
| randErrorScope | |Supported, effective immediately | Internal parameter, used for random failure testing |
|
||||||
| experimental | | Internal parameter, used for some experimental features |
|
| safetyCheckLevel | |Supported, effective immediately | Internal parameter, used for random failure testing |
|
||||||
| simdEnable | After 3.3.4.3 | Internal parameter, used for testing SIMD acceleration |
|
| experimental | |Supported, effective immediately | Internal parameter, used for some experimental features |
|
||||||
| AVX512Enable | After 3.3.4.3 | Internal parameter, used for testing AVX512 acceleration |
|
| simdEnable | After 3.3.4.3 |Not supported | Internal parameter, used for testing SIMD acceleration |
|
||||||
| rsyncPort | | Internal parameter, used for debugging stream computing |
|
| AVX512Enable | After 3.3.4.3 |Not supported | Internal parameter, used for testing AVX512 acceleration |
|
||||||
| snodeAddress | | Internal parameter, used for debugging stream computing |
|
| rsyncPort | |Not supported | Internal parameter, used for debugging stream computing |
|
||||||
| checkpointBackupDir | | Internal parameter, used for restoring snode data |
|
| snodeAddress | |Supported, effective immediately | Internal parameter, used for debugging stream computing |
|
||||||
| enableAuditDelete | | Internal parameter, used for testing audit functions |
|
| checkpointBackupDir | |Supported, effective immediately | Internal parameter, used for restoring snode data |
|
||||||
| slowLogThresholdTest | | Internal parameter, used for testing slow logs |
|
| enableAuditDelete | |Not supported | Internal parameter, used for testing audit functions |
|
||||||
|
| slowLogThresholdTest | |Not supported | Internal parameter, used for testing slow logs |
|
||||||
|
| bypassFlag |After 3.3.4.5 |Supported, effective immediately | Internal parameter, used for short-circuit testing|
|
||||||
|
|
||||||
### Compression Parameters
|
### Compression Parameters
|
||||||
|
|
||||||
| Parameter Name | Supported Version | Description |
|
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||||
|----------------|-------------------|-------------|
|
|-----------------------|-------------------------|--------------------|------------|
|
||||||
| fPrecision | | Sets the compression precision for float type floating numbers, range 0.1 ~ 0.00000001, default value 0.00000001, floating numbers smaller than this value will have their mantissa truncated |
|
| fPrecision | |Supported, effective immediately | Sets the compression precision for float type floating numbers, range 0.1 ~ 0.00000001, default value 0.00000001, floating numbers smaller than this value will have their mantissa truncated |
|
||||||
| dPrecision | | Sets the compression precision for double type floating numbers, range 0.1 ~ 0.0000000000000001, default value 0.0000000000000001, floating numbers smaller than this value will have their mantissa truncated |
|
| dPrecision | |Supported, effective immediately | Sets the compression precision for double type floating numbers, range 0.1 ~ 0.0000000000000001, default value 0.0000000000000001, floating numbers smaller than this value will have their mantissa truncated |
|
||||||
| lossyColumn | Before 3.3.0.0 | Enables TSZ lossy compression for float and/or double types; range float/double/none; default value none, indicating lossless compression is off |
|
| lossyColumn | Before 3.3.0.0 |Not supported | Enables TSZ lossy compression for float and/or double types; range float/double/none; default value none, indicating lossless compression is off |
|
||||||
| ifAdtFse | | When TSZ lossy compression is enabled, use the FSE algorithm instead of the HUFFMAN algorithm, FSE algorithm is faster in compression but slightly slower in decompression, choose this for faster compression speed; 0: off, 1: on; default value is 0 |
|
| ifAdtFse | |Supported, effective after restart| When TSZ lossy compression is enabled, use the FSE algorithm instead of the HUFFMAN algorithm, FSE algorithm is faster in compression but slightly slower in decompression, choose this for faster compression speed; 0: off, 1: on; default value is 0 |
|
||||||
| maxRange | | Internal parameter, used for setting lossy compression |
|
| maxRange | |Supported, effective after restart| Internal parameter, used for setting lossy compression |
|
||||||
| curRange | | Internal parameter, used for setting lossy compression |
|
| curRange | |Supported, effective after restart| Internal parameter, used for setting lossy compression |
|
||||||
| compressor | | Internal parameter, used for setting lossy compression |
|
| compressor | |Supported, effective after restart| Internal parameter, used for setting lossy compression |
|
||||||
|
|
||||||
**Additional Notes**
|
**Additional Notes**
|
||||||
|
|
||||||
|
|
|
@ -10,107 +10,109 @@ The TDengine client driver provides all the APIs needed for application programm
|
||||||
|
|
||||||
### Connection Related
|
### Connection Related
|
||||||
|
|
||||||
|Parameter Name|Supported Version|Description|
|
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||||
|----------------------|----------|-------------|
|
|----------------------|----------|--------------------|-------------|
|
||||||
|firstEp | |At startup, the endpoint of the first dnode in the cluster to actively connect to, default value: hostname:6030, if the server's hostname cannot be obtained, it is assigned to localhost|
|
|firstEp | |Supported, effective immediately |At startup, the endpoint of the first dnode in the cluster to actively connect to, default value: hostname:6030, if the server's hostname cannot be obtained, it is assigned to localhost|
|
||||||
|secondEp | |At startup, if the firstEp cannot be connected, try to connect to the endpoint of the second dnode in the cluster, no default value|
|
|secondEp | |Supported, effective immediately |At startup, if the firstEp cannot be connected, try to connect to the endpoint of the second dnode in the cluster, no default value|
|
||||||
|compressMsgSize | |Whether to compress RPC messages; -1: no messages are compressed; 0: all messages are compressed; N (N>0): only messages larger than N bytes are compressed; default value -1|
|
|compressMsgSize | |Supported, effective immediately |Whether to compress RPC messages; -1: no messages are compressed; 0: all messages are compressed; N (N>0): only messages larger than N bytes are compressed; default value -1|
|
||||||
|shellActivityTimer | |The duration in seconds for the client to send heartbeats to mnode, range 1-120, default value 3|
|
|shellActivityTimer | |Not supported |The duration in seconds for the client to send heartbeats to mnode, range 1-120, default value 3|
|
||||||
|numOfRpcSessions | |Maximum number of connections supported by RPC, range 100-100000, default value 30000|
|
|numOfRpcSessions | |Supported, effective immediately |Maximum number of connections supported by RPC, range 100-100000, default value 30000|
|
||||||
|numOfRpcThreads | |Number of threads for RPC to send and receive data, range 1-50, default value is half of the CPU cores|
|
|numOfRpcThreads | |Not supported |Number of threads for RPC to send and receive data, range 1-50, default value is half of the CPU cores|
|
||||||
|numOfTaskQueueThreads | |Number of threads for the client to handle RPC messages, range 4-16, default value is half of the CPU cores|
|
|numOfTaskQueueThreads | |Not supported |Number of threads for the client to handle RPC messages, range 4-16, default value is half of the CPU cores|
|
||||||
|timeToGetAvailableConn| Cancelled after 3.3.4.* |The longest waiting time to get an available connection, range 10-50000000, in milliseconds, default value 500000|
|
|timeToGetAvailableConn| Cancelled after 3.3.4.* |Not supported |The longest waiting time to get an available connection, range 10-50000000, in milliseconds, default value 500000|
|
||||||
|useAdapter | |Internal parameter, whether to use taosadapter, affects CSV file import|
|
|useAdapter | |Supported, effective immediately |Internal parameter, whether to use taosadapter, affects CSV file import|
|
||||||
|shareConnLimit |Added in 3.3.4.0|Internal parameter, the number of queries a link can share, range 1-256, default value 10|
|
|shareConnLimit |Added in 3.3.4.0|Not supported |Internal parameter, the number of queries a link can share, range 1-256, default value 10|
|
||||||
|readTimeout |Added in 3.3.4.0|Internal parameter, minimum timeout, range 64-604800, in seconds, default value 900|
|
|readTimeout |Added in 3.3.4.0|Not supported |Internal parameter, minimum timeout, range 64-604800, in seconds, default value 900|
|
||||||
|
|
||||||
### Query Related
|
### Query Related
|
||||||
|
|
||||||
|Parameter Name|Supported Version|Description|
|
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||||
|---------------------------------|---------|-|
|
|----------------------|----------|--------------------|-------------|
|
||||||
|countAlwaysReturnValue | |Whether the count/hyperloglog function returns a value when the input data is empty or NULL; 0: returns an empty row, 1: returns; default value 1; when this parameter is set to 1, if the query contains an INTERVAL clause or the query uses TSMA, and the corresponding group or window has empty or NULL data, the corresponding group or window will not return a query result; note that this parameter should be consistent between client and server|
|
|countAlwaysReturnValue | |Supported, effective immediately |Whether the count/hyperloglog function returns a value when the input data is empty or NULL; 0: returns an empty row, 1: returns; default value 1; when this parameter is set to 1, if the query contains an INTERVAL clause or the query uses TSMA, and the corresponding group or window has empty or NULL data, the corresponding group or window will not return a query result; note that this parameter should be consistent between client and server|
|
||||||
|keepColumnName | |Automatically sets the alias to the column name (excluding the function name) when querying with Last, First, LastRow functions without specifying an alias, thus the order by clause will automatically refer to the column corresponding to the function; 1: automatically sets the alias to the column name (excluding the function name), 0: does not automatically set an alias; default value: 0|
|
|keepColumnName | |Supported, effective immediately |Automatically sets the alias to the column name (excluding the function name) when querying with Last, First, LastRow functions without specifying an alias, thus the order by clause will automatically refer to the column corresponding to the function; 1: automatically sets the alias to the column name (excluding the function name), 0: does not automatically set an alias; default value: 0|
|
||||||
|multiResultFunctionStarReturnTags|After 3.3.3.0|When querying a supertable, whether last(\*)/last_row(\*)/first(\*) returns tag columns; when querying basic tables, subtables, it is not affected by this parameter; 0: does not return tag columns, 1: returns tag columns; default value: 0; when this parameter is set to 0, last(\*)/last_row(\*)/first(\*) only returns the ordinary columns of the supertable; when set to 1, it returns both the ordinary columns and tag columns of the supertable|
|
|multiResultFunctionStarReturnTags|After 3.3.3.0|Supported, effective immediately |When querying a supertable, whether last(\*)/last_row(\*)/first(\*) returns tag columns; when querying basic tables, subtables, it is not affected by this parameter; 0: does not return tag columns, 1: returns tag columns; default value: 0; when this parameter is set to 0, last(\*)/last_row(\*)/first(\*) only returns the ordinary columns of the supertable; when set to 1, it returns both the ordinary columns and tag columns of the supertable|
|
||||||
|metaCacheMaxSize | |Specifies the maximum size of metadata cache for a single client, in MB; default value -1, meaning unlimited|
|
|metaCacheMaxSize | |Supported, effective immediately |Specifies the maximum size of metadata cache for a single client, in MB; default value -1, meaning unlimited|
|
||||||
|maxTsmaCalcDelay | |The allowable delay for tsma calculation by the client during query, range 600s - 86400s, i.e., 10 minutes - 1 day; default value: 600 seconds|
|
|maxTsmaCalcDelay | |Supported, effective immediately |The allowable delay for tsma calculation by the client during query, range 600s - 86400s, i.e., 10 minutes - 1 day; default value: 600 seconds|
|
||||||
|tsmaDataDeleteMark | |The retention time for intermediate results of historical data calculated by TSMA, in milliseconds; range >= 3600000, i.e., at least 1h; default value: 86400000, i.e., 1d |
|
|tsmaDataDeleteMark | |Supported, effective immediately |The retention time for intermediate results of historical data calculated by TSMA, in milliseconds; range >= 3600000, i.e., at least 1h; default value: 86400000, i.e., 1d |
|
||||||
|queryPolicy | |Execution strategy for query statements, 1: only use vnode, do not use qnode; 2: subtasks without scan operators are executed on qnode, subtasks with scan operators are executed on vnode; 3: vnode only runs scan operators, all other operators are executed on qnode; default value: 1|
|
|queryPolicy | |Supported, effective immediately |Execution strategy for query statements, 1: only use vnode, do not use qnode; 2: subtasks without scan operators are executed on qnode, subtasks with scan operators are executed on vnode; 3: vnode only runs scan operators, all other operators are executed on qnode; default value: 1|
|
||||||
|queryTableNotExistAsEmpty | |Whether to return an empty result set when the queried table does not exist; false: returns an error; true: returns an empty result set; default value false|
|
|queryTableNotExistAsEmpty | |Supported, effective immediately |Whether to return an empty result set when the queried table does not exist; false: returns an error; true: returns an empty result set; default value false|
|
||||||
|querySmaOptimize | |Optimization strategy for sma index, 0: do not use sma index, always query from original data; 1: use sma index, directly query from pre-calculated results for eligible statements; default value: 0|
|
|querySmaOptimize | |Supported, effective immediately |Optimization strategy for sma index, 0: do not use sma index, always query from original data; 1: use sma index, directly query from pre-calculated results for eligible statements; default value: 0|
|
||||||
|queryPlannerTrace | |Internal parameter, whether the query plan outputs detailed logs|
|
|queryPlannerTrace | |Supported, effective immediately |Internal parameter, whether the query plan outputs detailed logs|
|
||||||
|queryNodeChunkSize | |Internal parameter, chunk size of the query plan|
|
|queryNodeChunkSize | |Supported, effective immediately |Internal parameter, chunk size of the query plan|
|
||||||
|queryUseNodeAllocator | |Internal parameter, allocation method of the query plan|
|
|queryUseNodeAllocator | |Supported, effective immediately |Internal parameter, allocation method of the query plan|
|
||||||
|queryMaxConcurrentTables | |Internal parameter, concurrency number of the query plan|
|
|queryMaxConcurrentTables | |Not supported |Internal parameter, concurrency number of the query plan|
|
||||||
|enableQueryHb | |Internal parameter, whether to send query heartbeat messages|
|
|enableQueryHb | |Supported, effective immediately |Internal parameter, whether to send query heartbeat messages|
|
||||||
|minSlidingTime | |Internal parameter, minimum allowable value for sliding|
|
|minSlidingTime | |Supported, effective immediately |Internal parameter, minimum allowable value for sliding|
|
||||||
|minIntervalTime | |Internal parameter, minimum allowable value for interval|
|
|minIntervalTime | |Supported, effective immediately |Internal parameter, minimum allowable value for interval|
|
||||||
|
|
||||||
### Writing Related
|
### Writing Related
|
||||||
|
|
||||||
| Parameter Name | Supported Version | Description |
|
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||||
|---------------------------------|-------------------|-------------|
|
|----------------------|----------|--------------------|-------------|
|
||||||
| smlChildTableName | | Key for custom child table name in schemaless, no default value |
|
| smlChildTableName | |Supported, effective immediately | Key for custom child table name in schemaless, no default value |
|
||||||
| smlAutoChildTableNameDelimiter | | Delimiter between schemaless tags, concatenated as the child table name, no default value |
|
| smlAutoChildTableNameDelimiter | |Supported, effective immediately | Delimiter between schemaless tags, concatenated as the child table name, no default value |
|
||||||
| smlTagName | | Default tag name when schemaless tag is empty, default value "_tag_null" |
|
| smlTagName | |Supported, effective immediately | Default tag name when schemaless tag is empty, default value "_tag_null" |
|
||||||
| smlTsDefaultName | | Configuration for setting the time column name in schemaless auto table creation, default value "_ts" |
|
| smlTsDefaultName | |Supported, effective immediately | Configuration for setting the time column name in schemaless auto table creation, default value "_ts" |
|
||||||
| smlDot2Underline | | Converts dots in supertable names to underscores in schemaless |
|
| smlDot2Underline | |Supported, effective immediately | Converts dots in supertable names to underscores in schemaless |
|
||||||
| maxInsertBatchRows | | Internal parameter, maximum number of rows per batch insert |
|
| maxInsertBatchRows | |Supported, effective immediately | Internal parameter, maximum number of rows per batch insert |
|
||||||
|
|
||||||
### Region Related
|
### Region Related
|
||||||
|
|
||||||
| Parameter Name | Supported Version | Description |
|
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||||
|----------------|-------------------|-------------|
|
|----------------------|----------|--------------------|-------------|
|
||||||
| timezone | | Time zone; defaults to dynamically obtaining the current system time zone setting |
|
| timezone | |Supported, effective immediately | Time zone; defaults to dynamically obtaining the current system time zone setting |
|
||||||
| locale | | System locale and encoding format, defaults to system settings |
|
| locale | |Supported, effective immediately | System locale and encoding format, defaults to system settings |
|
||||||
| charset | | Character set encoding, defaults to system settings |
|
| charset | |Supported, effective immediately | Character set encoding, defaults to system settings |
|
||||||
|
|
||||||
### Storage Related
|
### Storage Related
|
||||||
|
|
||||||
| Parameter Name | Supported Version | Description |
|
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||||
|-----------------|-------------------|-------------|
|
|----------------------|----------|--------------------|-------------|
|
||||||
| tempDir | | Specifies the directory for generating temporary files during operation, default on Linux platform is /tmp |
|
| tempDir | |Supported, effective immediately | Specifies the directory for generating temporary files during operation, default on Linux platform is /tmp |
|
||||||
| minimalTmpDirGB | | Minimum space required to be reserved in the directory specified by tempDir, in GB, default value: 1 |
|
| minimalTmpDirGB | |Supported, effective immediately | Minimum space required to be reserved in the directory specified by tempDir, in GB, default value: 1 |
|
||||||
|
|
||||||
### Log Related
|
### Log Related
|
||||||
|
|
||||||
| Parameter Name | Supported Version | Description |
|
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||||
|------------------|-------------------|-------------|
|
|----------------------|----------|--------------------|-------------|
|
||||||
| logDir | | Log file directory, operational logs will be written to this directory, default value: /var/log/taos |
|
| logDir | |Not supported | Log file directory, operational logs will be written to this directory, default value: /var/log/taos |
|
||||||
| minimalLogDirGB | | Stops writing logs when the disk space available in the log directory is less than this value, in GB, default value: 1 |
|
| minimalLogDirGB | |Supported, effective immediately | Stops writing logs when the disk space available in the log directory is less than this value, in GB, default value: 1 |
|
||||||
| numOfLogLines | | Maximum number of lines allowed in a single log file, default value: 10,000,000 |
|
| numOfLogLines | |Supported, effective immediately | Maximum number of lines allowed in a single log file, default value: 10,000,000 |
|
||||||
| asyncLog | | Log writing mode, 0: synchronous, 1: asynchronous, default value: 1 |
|
| asyncLog | |Supported, effective immediately | Log writing mode, 0: synchronous, 1: asynchronous, default value: 1 |
|
||||||
| logKeepDays | | Maximum retention time for log files, in days, default value: 0, meaning unlimited retention. Log files will not be renamed, nor will new log files be rolled out, but the content of the log files may continue to roll depending on the log file size setting; when set to a value greater than 0, the log file will be renamed to taoslogx.yyy, where yyy is the timestamp of the last modification of the log file, and a new log file will be rolled out |
|
| logKeepDays | |Supported, effective immediately | Maximum retention time for log files, in days, default value: 0, meaning unlimited retention. Log files will not be renamed, nor will new log files be rolled out, but the content of the log files may continue to roll depending on the log file size setting; when set to a value greater than 0, the log file will be renamed to taoslogx.yyy, where yyy is the timestamp of the last modification of the log file, and a new log file will be rolled out |
|
||||||
| debugFlag | | Log switch for running logs, 131 (output error and warning logs), 135 (output error, warning, and debug logs), 143 (output error, warning, debug, and trace logs); default value 131 or 135 (depending on the module) |
|
| debugFlag | |Supported, effective immediately | Log switch for running logs, 131 (output error and warning logs), 135 (output error, warning, and debug logs), 143 (output error, warning, debug, and trace logs); default value 131 or 135 (depending on the module) |
|
||||||
| tmrDebugFlag | | Log switch for the timer module, value range as above |
|
| tmrDebugFlag | |Supported, effective immediately | Log switch for the timer module, value range as above |
|
||||||
| uDebugFlag | | Log switch for the utility module, value range as above |
|
| uDebugFlag | |Supported, effective immediately | Log switch for the utility module, value range as above |
|
||||||
| rpcDebugFlag | | Log switch for the rpc module, value range as above |
|
| rpcDebugFlag | |Supported, effective immediately | Log switch for the rpc module, value range as above |
|
||||||
| jniDebugFlag | | Log switch for the jni module, value range as above |
|
| jniDebugFlag | |Supported, effective immediately | Log switch for the jni module, value range as above |
|
||||||
| qDebugFlag | | Log switch for the query module, value range as above |
|
| qDebugFlag | |Supported, effective immediately | Log switch for the query module, value range as above |
|
||||||
| cDebugFlag | | Log switch for the client module, value range as above |
|
| cDebugFlag | |Supported, effective immediately | Log switch for the client module, value range as above |
|
||||||
| simDebugFlag | | Internal parameter, log switch for the test tool, value range as above |
|
| simDebugFlag | |Supported, effective immediately | Internal parameter, log switch for the test tool, value range as above |
|
||||||
| tqClientDebugFlag| After 3.3.4.3 | Log switch for the client module, value range as above |
|
| tqClientDebugFlag| After 3.3.4.3 |Supported, effective immediately | Log switch for the client module, value range as above |
|
||||||
|
|
||||||
### Debugging Related
|
### Debugging Related
|
||||||
|
|
||||||
| Parameter Name | Supported Version | Description |
|
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||||
|------------------|-------------------|-------------|
|
|----------------------|----------|--------------------|-------------|
|
||||||
| crashReporting | | Whether to upload crash to telemetry, 0: do not upload, 1: upload; default value: 1 |
|
| crashReporting | |Supported, effective immediately | Whether to upload crash to telemetry, 0: do not upload, 1: upload; default value: 1 |
|
||||||
| enableCoreFile | | Whether to generate a core file when crashing, 0: do not generate, 1: generate; default value: 1 |
|
| enableCoreFile | |Supported, effective immediately | Whether to generate a core file when crashing, 0: do not generate, 1: generate; default value: 1 |
|
||||||
| assert | | Assertion control switch, default value: 0 |
|
| assert | |Not supported | Assertion control switch, default value: 0 |
|
||||||
| configDir | | Directory for configuration files |
|
| configDir | |Not supported | Directory for configuration files |
|
||||||
| scriptDir | | Internal parameter, directory for test cases |
|
| scriptDir | |Not supported | Internal parameter, directory for test cases |
|
||||||
| randErrorChance | After 3.3.3.0 | Internal parameter, used for random failure testing |
|
| randErrorChance | After 3.3.3.0 |Not supported | Internal parameter, used for random failure testing |
|
||||||
| randErrorDivisor | After 3.3.3.0 | Internal parameter, used for random failure testing |
|
| randErrorDivisor | After 3.3.3.0 |Not supported | Internal parameter, used for random failure testing |
|
||||||
| randErrorScope | After 3.3.3.0 | Internal parameter, used for random failure testing |
|
| randErrorScope | After 3.3.3.0 |Not supported | Internal parameter, used for random failure testing |
|
||||||
| safetyCheckLevel | After 3.3.3.0 | Internal parameter, used for random failure testing |
|
| safetyCheckLevel | After 3.3.3.0 |Not supported | Internal parameter, used for random failure testing |
|
||||||
| simdEnable | After 3.3.4.3 | Internal parameter, used for testing SIMD acceleration |
|
| simdEnable | After 3.3.4.3 |Not supported | Internal parameter, used for testing SIMD acceleration |
|
||||||
| AVX512Enable | After 3.3.4.3 | Internal parameter, used for testing AVX512 acceleration |
|
| AVX512Enable | After 3.3.4.3 |Not supported | Internal parameter, used for testing AVX512 acceleration |
|
||||||
|
| bypassFlag |After 3.3.4.5 |Supported, effective immediately | Internal parameter, used for short-circuit testing|
|
||||||
|
|
||||||
|
|
||||||
### SHELL Related
|
### SHELL Related
|
||||||
|
|
||||||
|Parameter Name|Supported Version|Description|
|
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||||
|-----------------|----------|-|
|
|----------------------|----------|--------------------|-------------|
|
||||||
|enableScience | |Whether to enable scientific notation for displaying floating numbers; 0: do not enable, 1: enable; default value: 1|
|
|enableScience | |Not supported |Whether to enable scientific notation for displaying floating numbers; 0: do not enable, 1: enable; default value: 1|
|
||||||
|
|
||||||
## API
|
## API
|
||||||
|
|
||||||
|
|
|
@ -215,4 +215,19 @@ Automatically adjusts the distribution of vnodes in all vgroups of the cluster,
|
||||||
SHOW db_name.ALIVE;
|
SHOW db_name.ALIVE;
|
||||||
```
|
```
|
||||||
|
|
||||||
Query the availability status of the database db_name, return values 0: unavailable, 1: fully available, 2: partially available (i.e., some nodes of the VNODEs included in the database are available, some are not)
|
Query the availability status of the database db_name, with return values of 0 (unavailable), 1 (fully available), or 2 (partially available, indicating that some VNODEs in the database are available while others are not).
|
||||||
|
|
||||||
|
## View DB Disk Usage
|
||||||
|
|
||||||
|
```sql
|
||||||
|
select * from INFORMATION_SCHEMA.INS_DISK_USAGE where db_name = 'db_name'
|
||||||
|
```
|
||||||
|
|
||||||
|
View the disk usage of each module in the DB.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW db_name.disk_info;
|
||||||
|
```
|
||||||
|
View the compression ratio and disk usage of the database db_name
|
||||||
|
|
||||||
|
This command is essentially equivalent to `select sum(data1 + data2 + data3)/sum(raw_data), sum(data1 + data2 + data3) from information_schema.ins_disk_usage where db_name="dbname"`
|
||||||
|
|
|
@ -130,11 +130,25 @@ The forward sliding time of SLIDING cannot exceed the time range of one window.
|
||||||
SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m);
|
SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m);
|
||||||
```
|
```
|
||||||
|
|
||||||
|
The INTERVAL clause allows the use of the AUTO keyword to specify the window offset. If the WHERE condition provides a clear applicable start time limit, the required offset will be automatically calculated, dividing the time window from that point; otherwise, it defaults to an offset of 0. Here are some simple examples:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- With a start time limit, divide the time window from '2018-10-03 14:38:05'
|
||||||
|
SELECT COUNT(*) FROM meters WHERE _rowts >= '2018-10-03 14:38:05' INTERVAL (1m, AUTO);
|
||||||
|
|
||||||
|
-- Without a start time limit, defaults to an offset of 0
|
||||||
|
SELECT COUNT(*) FROM meters WHERE _rowts < '2018-10-03 15:00:00' INTERVAL (1m, AUTO);
|
||||||
|
|
||||||
|
-- Unclear start time limit, defaults to an offset of 0
|
||||||
|
SELECT COUNT(*) FROM meters WHERE _rowts - voltage > 1000000;
|
||||||
|
```
|
||||||
|
|
||||||
When using time windows, note:
|
When using time windows, note:
|
||||||
|
|
||||||
- The window width of the aggregation period is specified by the keyword INTERVAL, with the shortest interval being 10 milliseconds (10a); it also supports an offset (the offset must be less than the interval), which is the offset of the time window division compared to "UTC moment 0". The SLIDING statement is used to specify the forward increment of the aggregation period, i.e., the duration of each window slide forward.
|
- The window width of the aggregation period is specified by the keyword INTERVAL, with the shortest interval being 10 milliseconds (10a); it also supports an offset (the offset must be less than the interval), which is the offset of the time window division compared to "UTC moment 0". The SLIDING statement is used to specify the forward increment of the aggregation period, i.e., the duration of each window slide forward.
|
||||||
- When using the INTERVAL statement, unless in very special cases, it is required to configure the timezone parameter in the taos.cfg configuration files of both the client and server to the same value to avoid frequent cross-time zone conversions by time processing functions, which can cause severe performance impacts.
|
- When using the INTERVAL statement, unless in very special cases, it is required to configure the timezone parameter in the taos.cfg configuration files of both the client and server to the same value to avoid frequent cross-time zone conversions by time processing functions, which can cause severe performance impacts.
|
||||||
- The returned results have a strictly monotonically increasing time-series.
|
- The returned results have a strictly monotonically increasing time-series.
|
||||||
|
- When using AUTO as the window offset, if the window width unit is d (day), n (month), w (week), y (year), such as: INTERVAL(1d, AUTO), INTERVAL(3w, AUTO), the TSMA optimization cannot take effect. If TSMA is manually created on the target table, the statement will report an error and exit; in this case, you can explicitly specify the Hint SKIP_TSMA or not use AUTO as the window offset.
|
||||||
|
|
||||||
### State Window
|
### State Window
|
||||||
|
|
||||||
|
|
|
@ -41,38 +41,28 @@ If there is a single replica on the node and the node is offline, to forcibly de
|
||||||
ALTER DNODE dnode_id dnode_option
|
ALTER DNODE dnode_id dnode_option
|
||||||
|
|
||||||
ALTER ALL DNODES dnode_option
|
ALTER ALL DNODES dnode_option
|
||||||
|
|
||||||
dnode_option: {
|
|
||||||
'resetLog'
|
|
||||||
| 'balance' 'value'
|
|
||||||
| 'monitor' 'value'
|
|
||||||
| 'debugFlag' 'value'
|
|
||||||
| 'monDebugFlag' 'value'
|
|
||||||
| 'vDebugFlag' 'value'
|
|
||||||
| 'mDebugFlag' 'value'
|
|
||||||
| 'cDebugFlag' 'value'
|
|
||||||
| 'httpDebugFlag' 'value'
|
|
||||||
| 'qDebugflag' 'value'
|
|
||||||
| 'sdbDebugFlag' 'value'
|
|
||||||
| 'uDebugFlag' 'value'
|
|
||||||
| 'tsdbDebugFlag' 'value'
|
|
||||||
| 'sDebugflag' 'value'
|
|
||||||
| 'rpcDebugFlag' 'value'
|
|
||||||
| 'dDebugFlag' 'value'
|
|
||||||
| 'mqttDebugFlag' 'value'
|
|
||||||
| 'wDebugFlag' 'value'
|
|
||||||
| 'tmrDebugFlag' 'value'
|
|
||||||
| 'cqDebugFlag' 'value'
|
|
||||||
}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
The modifiable configuration items in the syntax above are configured in the same way as in the dnode configuration file, the difference being that modifications are dynamic, take immediate effect, and do not require restarting the dnode.
|
For configuration parameters that support dynamic modification, you can use the ALTER DNODE or ALTER ALL DNODES syntax to modify the values of configuration parameters in a dnode. Starting from version 3.3.4.0, the modified configuration parameters will be automatically persisted and will remain effective even after the database service is restarted.
|
||||||
|
|
||||||
`value` is the value of the parameter, which needs to be in string format. For example, to change the log output level of dnode 1 to debug:
|
To check whether a configuration parameter supports dynamic modification, please refer to the following page: [taosd Reference](../01-components/01-taosd.md)
|
||||||
|
|
||||||
|
The value is the parameter's value and needs to be in character format. For example, to change the log output level of dnode 1 to debug:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
ALTER DNODE 1 'debugFlag' '143';
|
ALTER DNODE 1 'debugFlag' '143';
|
||||||
```
|
```
|
||||||
|
### Additional Notes:
|
||||||
|
Configuration parameters in a dnode are divided into global configuration parameters and local configuration parameters. You can check the category field in SHOW VARIABLES or SHOW DNODE dnode_id VARIABLE to determine whether a configuration parameter is a global configuration parameter or a local configuration parameter:
|
||||||
|
|
||||||
|
Local configuration parameters: You can use ALTER DNODE or ALTER ALL DNODES to update the local configuration parameters of a specific dnode or all dnodes.
|
||||||
|
Global configuration parameters: Global configuration parameters require consistency across all dnodes, so you can only use ALTER ALL DNODES to update the global configuration parameters of all dnodes.
|
||||||
|
There are three cases for whether a configuration parameter can be dynamically modified:
|
||||||
|
|
||||||
|
Supports dynamic modification, effective immediately
|
||||||
|
Supports dynamic modification, effective after restart
|
||||||
|
Does not support dynamic modification
|
||||||
|
For configuration parameters that take effect after a restart, you can see the modified values through SHOW VARIABLES or SHOW DNODE dnode_id VARIABLE, but you need to restart the database service to make them effective.
|
||||||
|
|
||||||
## Add Management Node
|
## Add Management Node
|
||||||
|
|
||||||
|
@ -136,18 +126,12 @@ If the client is also considered as part of the cluster in a broader sense, the
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
ALTER LOCAL local_option
|
ALTER LOCAL local_option
|
||||||
|
|
||||||
local_option: {
|
|
||||||
'resetLog'
|
|
||||||
| 'rpcDebugFlag' 'value'
|
|
||||||
| 'tmrDebugFlag' 'value'
|
|
||||||
| 'cDebugFlag' 'value'
|
|
||||||
| 'uDebugFlag' 'value'
|
|
||||||
| 'debugFlag' 'value'
|
|
||||||
}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
The parameters in the syntax above are used in the same way as in the configuration file for the client, but do not require a restart of the client, and the changes take effect immediately.
|
You can use the above syntax to modify the client's configuration parameters, and there is no need to restart the client. The changes take effect immediately.
|
||||||
|
|
||||||
|
To check whether a configuration parameter supports dynamic modification, please refer to the following page:[taosc Reference](../01-components/02-taosc.md)
|
||||||
|
|
||||||
|
|
||||||
## View Client Configuration
|
## View Client Configuration
|
||||||
|
|
||||||
|
|
|
@ -317,10 +317,43 @@ Configuration parameters for each dnode in the system. Users with SYSINFO attrib
|
||||||
|
|
||||||
Note: Users with SYSINFO property set to 0 cannot view this table.
|
Note: Users with SYSINFO property set to 0 cannot view this table.
|
||||||
|
|
||||||
| # | **Column Name** | **Data Type** | **Description** |
|
| # | **Column Name** | **Data Type** | **Description** |
|
||||||
| ---- | :-------------: | -------------- | ----------------------------------- |
|
|:-----|:----------------|:---------------|:-------------------------------------|
|
||||||
| 1 | user_name | VARCHAR(24) | Username |
|
| 1 | user_name | VARCHAR(24) | Username |
|
||||||
| 2 | privilege | VARCHAR(10) | Privilege description |
|
| 2 | privilege | VARCHAR(10) | Permission description |
|
||||||
| 3 | db_name | VARCHAR(65) | Database name |
|
| 3 | db_name | VARCHAR(65) | Database name |
|
||||||
| 4 | table_name | VARCHAR(193) | Table name |
|
| 4 | table_name | VARCHAR(193) | Table name |
|
||||||
| 5 | condition | VARCHAR(49152) | Subtable privilege filter condition |
|
| 5 | condition | VARCHAR(49152) | Subtable permission filter condition |
|
||||||
|
|
||||||
|
|
||||||
|
## INS_DISK_USAGE
|
||||||
|
|
||||||
|
| # | **Column Name** | **Data type** | **Description**|
|
||||||
|
|:----|:-----------|:-----------|:--------------------|
|
||||||
|
| 1 | db_name | VARCHAR(32) | Database name |
|
||||||
|
| 2 | vgroup_id | INT | vgroup ID |
|
||||||
|
| 3 | wal | BIGINT | WAL file size, in KB |
|
||||||
|
| 4 | data1 | BIGINT | Data file size on primary storage, in KB |
|
||||||
|
| 5 | data2 | BIGINT | Data file size on secondary storage, in KB |
|
||||||
|
| 6 | data3 | BIGINT | Data file size on tertiary storage, in KB |
|
||||||
|
| 7 | cache_rdb | BIGINT | Size of last/last_row files, in KB |
|
||||||
|
| 8 | table_meta | BIGINT | Size of meta files, in KB |
|
||||||
|
| 9 | s3 | BIGINT | Size occupied on S3, in KB |
|
||||||
|
| 10 | raw_data | BIGINT | Estimated size of raw data, in KB |
|
||||||
|
|
||||||
|
note:
|
||||||
|
|
||||||
|
## INS_FILESETS
|
||||||
|
|
||||||
|
Provides information about file sets.
|
||||||
|
|
||||||
|
| # | **Column** | **Data Type** | **Description** | ** |
|
||||||
|
| --- | :-----------: | ------------- | ---------------------------------------------------- |
|
||||||
|
| 1 | db_name | VARCHAR(65) | Database name |
|
||||||
|
| 2 | vgroup_id | INT | Vgroup ID |
|
||||||
|
| 3 | fileset_id | INT | File set ID |
|
||||||
|
| 4 | start_time | TIMESTAMP | Start time of the time range covered by the file set |
|
||||||
|
| 5 | end_time | TIMESTAMP | End time of the time range covered by the file set |
|
||||||
|
| 6 | total_size | BIGINT | Total size of the file set |
|
||||||
|
| 7 | last_compact | TIMESTAMP | Time of the last compaction |
|
||||||
|
| 8 | shold_compact | bool | Whether the file set should be compacted |
|
||||||
|
|
|
@ -8,7 +8,7 @@ User and permission management is a feature of TDengine Enterprise Edition. This
|
||||||
## Create User
|
## Create User
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE USER user_name PASS 'password' [SYSINFO {1|0}];
|
CREATE USER user_name PASS 'password' [SYSINFO {1|0}] [CREATEDB {1|0}];
|
||||||
```
|
```
|
||||||
|
|
||||||
The username can be up to 23 bytes long.
|
The username can be up to 23 bytes long.
|
||||||
|
@ -17,6 +17,8 @@ The password must be between 8 and 16 characters long and include at least three
|
||||||
|
|
||||||
`SYSINFO` indicates whether the user can view system information. `1` means they can view, `0` means they have no permission to view. System information includes service configuration, dnode, vnode, storage, etc. The default value is `1`.
|
`SYSINFO` indicates whether the user can view system information. `1` means they can view, `0` means they have no permission to view. System information includes service configuration, dnode, vnode, storage, etc. The default value is `1`.
|
||||||
|
|
||||||
|
`CREATEDB` indicates whether the user can create databases. `1` means they can create databases, `0` means they have no permission to create databases. The default value is `0`. // Supported starting from TDengine Enterprise version 3.3.2.0
|
||||||
|
|
||||||
In the example below, we create a user with the password `abc123!@#` who can view system information.
|
In the example below, we create a user with the password `abc123!@#` who can view system information.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
@ -76,7 +78,7 @@ alter_user_clause: {
|
||||||
- PASS: Change the password, followed by the new password
|
- PASS: Change the password, followed by the new password
|
||||||
- ENABLE: Enable or disable the user, `1` means enable, `0` means disable
|
- ENABLE: Enable or disable the user, `1` means enable, `0` means disable
|
||||||
- SYSINFO: Allow or prohibit viewing system information, `1` means allow, `0` means prohibit
|
- SYSINFO: Allow or prohibit viewing system information, `1` means allow, `0` means prohibit
|
||||||
- CREATEDB: Allow or prohibit creating databases, `1` means allow, `0` means prohibit
|
- CREATEDB: Allow or prohibit creating databases, `1` means allow, `0` means prohibit. // Supported starting from TDengine Enterprise version 3.3.2.0
|
||||||
|
|
||||||
The following example disables the user named `test`:
|
The following example disables the user named `test`:
|
||||||
|
|
||||||
|
|
|
@ -688,6 +688,27 @@ The basic API is used to establish database connections and provide a runtime en
|
||||||
- `arg`: [Input] Setting item value.
|
- `arg`: [Input] Setting item value.
|
||||||
- **Return Value**: `0`: Success, `-1`: Failure.
|
- **Return Value**: `0`: Success, `-1`: Failure.
|
||||||
|
|
||||||
|
- `int taos_options_connection(TAOS *taos, TSDB_OPTION_CONNECTION option, const void *arg, ...)`
|
||||||
|
- **description**:Set each connection option on the client side. Currently, it supports character set setting(`TSDB_OPTION_CONNECTION_CHARSET`), time zone setting(`TSDB_OPTION_CONNECTION_TIMEZONE`), user IP setting(`TSDB_OPTION_CONNECTION_USER_IP`), and user APP setting(`TSDB_OPTION_CONNECTION_USER_APP`).
|
||||||
|
- **input**:
|
||||||
|
- `taos`: returned by taos_connect.
|
||||||
|
- `option`: option name.
|
||||||
|
- `arg`: option value.
|
||||||
|
- **return**:
|
||||||
|
- `0`: success.
|
||||||
|
- `others`: fail.
|
||||||
|
- **notice**:
|
||||||
|
- The character set and time zone default to the current settings of the operating system, and Windows does not support connection level time zone settings.
|
||||||
|
- When arg is NULL, it means resetting the option.
|
||||||
|
- This interface is only valid for the current connection and will not affect other connections.
|
||||||
|
- If the same parameter is called multiple times, the latter shall prevail and can be used as a modification method.
|
||||||
|
- The option of TSDB_OPTION_CONNECTION_CLEAR is used to reset all connection options.
|
||||||
|
- After resetting the time zone and character set, using the operating system settings, the user IP and user app will be reset to empty.
|
||||||
|
- The values of the connection options are all string type, and the maximum value of the user app parameter is 23, which will be truncated if exceeded; Error reported when other parameters are illegal.
|
||||||
|
- If time zone value can not be used to find a time zone file or can not be interpreted as a direct specification, UTC is used, which is the same as the operating system time zone rules. Please refer to the tzset function description for details. You can view the current time zone of the connection by sql:select timezone().
|
||||||
|
- Time zones and character sets only work on the client side and do not affect related behaviors on the server side.
|
||||||
|
- The time zone file uses the operating system time zone file and can be updated by oneself. If there is an error when setting the time zone, please check if the time zone file or path (mac:/var/db/timezone/zoneinfo, Linux:/var/share/zoneinfo) is correct.
|
||||||
|
|
||||||
- `char *taos_get_client_info()`
|
- `char *taos_get_client_info()`
|
||||||
- **Interface Description**: Gets client version information.
|
- **Interface Description**: Gets client version information.
|
||||||
- **Return Value**: Returns client version information.
|
- **Return Value**: Returns client version information.
|
||||||
|
|
|
@ -15,10 +15,20 @@ This document details the server error codes that may be encountered when using
|
||||||
| 0x80000015 | Unable to resolve FQDN | Invalid fqdn set | Check fqdn settings |
|
| 0x80000015 | Unable to resolve FQDN | Invalid fqdn set | Check fqdn settings |
|
||||||
| 0x80000017 | Port already in use | The port is already occupied by some service, and the newly started service still tries to bind to that port | 1. Change the server port of the new service 2. Kill the service that previously occupied the port |
|
| 0x80000017 | Port already in use | The port is already occupied by some service, and the newly started service still tries to bind to that port | 1. Change the server port of the new service 2. Kill the service that previously occupied the port |
|
||||||
| 0x80000018 | Conn is broken | Due to network jitter or request time being too long (over 900 seconds), the system actively disconnects | 1. Set the system's maximum timeout duration 2. Check request duration |
|
| 0x80000018 | Conn is broken | Due to network jitter or request time being too long (over 900 seconds), the system actively disconnects | 1. Set the system's maximum timeout duration 2. Check request duration |
|
||||||
| 0x80000019 | Conn read timeout | Not enabled | |
|
| 0x80000019 | Conn read timeout | 1. The request processing time is too long 2. The server is overwhelmed 3. The server is deadlocked | 1. Explicitly configure the readTimeout parameter 2. Analyze the stack on taos |
|
||||||
| 0x80000020 | some vnode/qnode/mnode(s) out of service | After multiple retries, still unable to connect to the cluster, possibly all nodes have crashed, or the surviving nodes are not Leader nodes | 1. Check the status of taosd, analyze the reasons for taosd crash 2. Analyze why the surviving taosd cannot elect a Leader |
|
| 0x80000020 | some vnode/qnode/mnode(s) out of service | After multiple retries, still unable to connect to the cluster, possibly all nodes have crashed, or the surviving nodes are not Leader nodes | 1. Check the status of taosd, analyze the reasons for taosd crash 2. Analyze why the surviving taosd cannot elect a Leader |
|
||||||
| 0x80000021 | some vnode/qnode/mnode(s) conn is broken | After multiple retries, still unable to connect to the cluster, possibly due to network issues, request time too long, server deadlock, etc. | 1. Check network 2. Request execution time |
|
| 0x80000021 | some vnode/qnode/mnode(s) conn is broken | After multiple retries, still unable to connect to the cluster, possibly due to network issues, request time too long, server deadlock, etc. | 1. Check network 2. Request execution time |
|
||||||
| 0x80000022 | rpc open too many session | 1. High concurrency causing the number of occupied connections to reach the limit 2. Server BUG, causing connections not to be released | 1. Adjust configuration parameter numOfRpcSessions 2. Adjust configuration parameter timeToGetAvailableConn 3. Analyze reasons for server not releasing connections |
|
| 0x80000022 | rpc open too many session | 1. High concurrency causing the number of occupied connections to reach the limit 2. Server BUG, causing connections not to be released | 1. Adjust configuration parameter numOfRpcSessions 2. Adjust configuration parameter timeToGetAvailableConn 3. Analyze reasons for server not releasing connections |
|
||||||
|
| 0x80000023 | RPC network error | 1. Network issues, possibly intermittent 2. Server crash | 1. Check the network 2. Check if the server has restarted |
|
||||||
|
| 0x80000024 | RPC network bus | 1. When pulling data between clusters, no available connections are obtained, or the number of connections has reached the limit | 1. Check if the concurrency is too high 2. Check if there are any anomalies in the cluster nodes, such as deadlocks |
|
||||||
|
| 0x80000025 | HTTP-report already quit | 1. Issues with HTTP reporting | Internal issue, can be ignored |
|
||||||
|
| 0x80000026 | RPC module already quit | 1. The client instance has already exited, but still uses the instance for queries | Check the business code to see if there is a mistake in usage |
|
||||||
|
| 0x80000027 | RPC async module already quit | 1. Engine error, can be ignored, this error code will not be returned to the user side | If returned to the user side, the engine side needs to investigate the issue |
|
||||||
|
| 0x80000028 | RPC async in process | 1. Engine error, can be ignored, this error code will not be returned to the user side | If returned to the user side, the engine side needs to investigate the issue |
|
||||||
|
| 0x80000029 | RPC no state | 1. Engine error, can be ignored, this error code will not be returned to the user side | If returned to the user side, the engine side needs to investigate the issue |
|
||||||
|
| 0x8000002A | RPC state already dropped | 1. Engine error, can be ignored, this error code will not be returned to the user side | If returned to the user side, the engine side needs to investigate the issue |
|
||||||
|
| 0x8000002B | RPC msg exceed limit | 1. Single RPC message exceeds the limit, this error code will not be returned to the user side | If returned to the user side, the engine side needs to investigate the issue |
|
||||||
|
|
||||||
|
|
||||||
## common
|
## common
|
||||||
|
|
||||||
|
@ -62,6 +72,8 @@ This document details the server error codes that may be encountered when using
|
||||||
| 0x80000133 | Invalid operation | Invalid or unsupported operation | 1. Modify to confirm the current operation is legal and supported, check parameter validity 2. If the problem persists, preserve the scene and logs, report issue on github |
|
| 0x80000133 | Invalid operation | Invalid or unsupported operation | 1. Modify to confirm the current operation is legal and supported, check parameter validity 2. If the problem persists, preserve the scene and logs, report issue on github |
|
||||||
| 0x80000134 | Invalid value | Invalid value | Preserve the scene and logs, report issue on github |
|
| 0x80000134 | Invalid value | Invalid value | Preserve the scene and logs, report issue on github |
|
||||||
| 0x80000135 | Invalid fqdn | Invalid FQDN | Check if the configured or input FQDN value is correct |
|
| 0x80000135 | Invalid fqdn | Invalid FQDN | Check if the configured or input FQDN value is correct |
|
||||||
|
| 0x8000013C | Invalid disk id | Invalid disk id | Check users whether the mounted disk is invalid or use the parameter diskIDCheckEnabled to skip the disk check. |
|
||||||
|
|
||||||
|
|
||||||
## tsc
|
## tsc
|
||||||
|
|
||||||
|
@ -241,6 +253,7 @@ This document details the server error codes that may be encountered when using
|
||||||
| 0x80000529 | Vnode is stopped | Vnode is closed | Report issue |
|
| 0x80000529 | Vnode is stopped | Vnode is closed | Report issue |
|
||||||
| 0x80000530 | Duplicate write request | Duplicate write request, internal error | Report issue |
|
| 0x80000530 | Duplicate write request | Duplicate write request, internal error | Report issue |
|
||||||
| 0x80000531 | Vnode query is busy | Query is busy | Report issue |
|
| 0x80000531 | Vnode query is busy | Query is busy | Report issue |
|
||||||
|
| 0x80000540 | Vnode already exist but Dbid not match | Internal error | Report issue |
|
||||||
|
|
||||||
## tsdb
|
## tsdb
|
||||||
|
|
||||||
|
@ -273,6 +286,9 @@ This document details the server error codes that may be encountered when using
|
||||||
| 0x80000729 | Task message error | Query message error | Preserve the scene and logs, report issue on GitHub |
|
| 0x80000729 | Task message error | Query message error | Preserve the scene and logs, report issue on GitHub |
|
||||||
| 0x8000072B | Task status error | Subquery status error | Preserve the scene and logs, report issue on GitHub |
|
| 0x8000072B | Task status error | Subquery status error | Preserve the scene and logs, report issue on GitHub |
|
||||||
| 0x8000072F | Job not exist | Query JOB no longer exists | Preserve the scene and logs, report issue on GitHub |
|
| 0x8000072F | Job not exist | Query JOB no longer exists | Preserve the scene and logs, report issue on GitHub |
|
||||||
|
| 0x80000739 | Query memory upper limit is reached | Single query memory upper limit is reached | Modify memory upper limit size or optimize SQL |
|
||||||
|
| 0x8000073A | Query memory exhausted | Query memory in dnode is exhausted | Limit concurrent queries or add more physical memory |
|
||||||
|
| 0x8000073B | Timeout for long time no fetch | Query without fetch for a long time | Correct application to fetch data asap |
|
||||||
|
|
||||||
## grant
|
## grant
|
||||||
|
|
||||||
|
@ -493,6 +509,7 @@ This document details the server error codes that may be encountered when using
|
||||||
| 0x80003103 | Invalid tsma state | The vgroup of the stream computing result is inconsistent with the vgroup that created the TSMA index | Check error logs, contact development for handling |
|
| 0x80003103 | Invalid tsma state | The vgroup of the stream computing result is inconsistent with the vgroup that created the TSMA index | Check error logs, contact development for handling |
|
||||||
| 0x80003104 | Invalid tsma pointer | Processing the results issued by stream computing, the message body is a null pointer. | Check error logs, contact development for handling |
|
| 0x80003104 | Invalid tsma pointer | Processing the results issued by stream computing, the message body is a null pointer. | Check error logs, contact development for handling |
|
||||||
| 0x80003105 | Invalid tsma parameters | Processing the results issued by stream computing, the result count is 0. | Check error logs, contact development for handling |
|
| 0x80003105 | Invalid tsma parameters | Processing the results issued by stream computing, the result count is 0. | Check error logs, contact development for handling |
|
||||||
|
| 0x80003113 | Tsma optimization cannot be applied with INTERVAL AUTO offset. | Tsma optimization cannot be enabled with INTERVAL AUTO OFFSET under the current query conditions. | Use SKIP_TSMA Hint or specify a manual INTERVAL OFFSET. |
|
||||||
| 0x80003150 | Invalid rsma env | Rsma execution environment is abnormal. | Check error logs, contact development for handling |
|
| 0x80003150 | Invalid rsma env | Rsma execution environment is abnormal. | Check error logs, contact development for handling |
|
||||||
| 0x80003151 | Invalid rsma state | Rsma execution state is abnormal. | Check error logs, contact development for handling |
|
| 0x80003151 | Invalid rsma state | Rsma execution state is abnormal. | Check error logs, contact development for handling |
|
||||||
| 0x80003152 | Rsma qtaskinfo creation error | Creating stream computing environment failed. | Check error logs, contact development for handling |
|
| 0x80003152 | Rsma qtaskinfo creation error | Creating stream computing environment failed. | Check error logs, contact development for handling |
|
||||||
|
|
|
@ -22,19 +22,19 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.springframework</groupId>
|
<groupId>org.springframework</groupId>
|
||||||
<artifactId>spring-context</artifactId>
|
<artifactId>spring-context</artifactId>
|
||||||
<version>5.2.8.RELEASE</version>
|
<version>5.3.39</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.springframework</groupId>
|
<groupId>org.springframework</groupId>
|
||||||
<artifactId>spring-jdbc</artifactId>
|
<artifactId>spring-jdbc</artifactId>
|
||||||
<version>5.1.9.RELEASE</version>
|
<version>5.3.39</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.springframework</groupId>
|
<groupId>org.springframework</groupId>
|
||||||
<artifactId>spring-test</artifactId>
|
<artifactId>spring-test</artifactId>
|
||||||
<version>5.1.9.RELEASE</version>
|
<version>5.3.39</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
|
@ -47,7 +47,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.0.0</version>
|
<version>3.4.0</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.springframework.boot</groupId>
|
<groupId>org.springframework.boot</groupId>
|
||||||
<artifactId>spring-boot-starter-parent</artifactId>
|
<artifactId>spring-boot-starter-parent</artifactId>
|
||||||
<version>2.2.1.RELEASE</version>
|
<version>2.6.15</version>
|
||||||
<relativePath/> <!-- lookup parent from repository -->
|
<relativePath/> <!-- lookup parent from repository -->
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>com.taosdata.example</groupId>
|
<groupId>com.taosdata.example</groupId>
|
||||||
|
@ -65,6 +65,8 @@
|
||||||
<artifactId>spring-boot-starter-aop</artifactId>
|
<artifactId>spring-boot-starter-aop</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
|
|
|
@ -3,9 +3,10 @@ package com.taosdata.example.springbootdemo;
|
||||||
import org.mybatis.spring.annotation.MapperScan;
|
import org.mybatis.spring.annotation.MapperScan;
|
||||||
import org.springframework.boot.SpringApplication;
|
import org.springframework.boot.SpringApplication;
|
||||||
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
||||||
|
import org.springframework.boot.autoconfigure.data.jdbc.JdbcRepositoriesAutoConfiguration;
|
||||||
|
|
||||||
@MapperScan(basePackages = {"com.taosdata.example.springbootdemo"})
|
@MapperScan(basePackages = {"com.taosdata.example.springbootdemo"})
|
||||||
@SpringBootApplication
|
@SpringBootApplication(exclude = {JdbcRepositoriesAutoConfiguration.class})
|
||||||
public class SpringbootdemoApplication {
|
public class SpringbootdemoApplication {
|
||||||
|
|
||||||
public static void main(String[] args) {
|
public static void main(String[] args) {
|
||||||
|
|
|
@ -15,6 +15,8 @@ spring.datasource.druid.max-wait=30000
|
||||||
spring.datasource.druid.validation-query=select SERVER_VERSION();
|
spring.datasource.druid.validation-query=select SERVER_VERSION();
|
||||||
spring.aop.auto=true
|
spring.aop.auto=true
|
||||||
spring.aop.proxy-target-class=true
|
spring.aop.proxy-target-class=true
|
||||||
|
|
||||||
|
spring.jooq.sql-dialect=
|
||||||
#mybatis
|
#mybatis
|
||||||
mybatis.mapper-locations=classpath:mapper/*.xml
|
mybatis.mapper-locations=classpath:mapper/*.xml
|
||||||
logging.level.com.taosdata.jdbc.springbootdemo.dao=debug
|
logging.level.com.taosdata.jdbc.springbootdemo.dao=debug
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
<description>Demo project for TDengine</description>
|
<description>Demo project for TDengine</description>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
<spring.version>5.3.27</spring.version>
|
<spring.version>5.3.39</spring.version>
|
||||||
</properties>
|
</properties>
|
||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
|
@ -130,6 +130,7 @@
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
<artifactId>maven-compiler-plugin</artifactId>
|
<artifactId>maven-compiler-plugin</artifactId>
|
||||||
|
<version>3.13.0</version>
|
||||||
<configuration>
|
<configuration>
|
||||||
<source>8</source>
|
<source>8</source>
|
||||||
<target>8</target>
|
<target>8</target>
|
||||||
|
|
|
@ -37,7 +37,7 @@ public class QueryService {
|
||||||
stmt.execute("use " + dbName);
|
stmt.execute("use " + dbName);
|
||||||
ResultSet rs = stmt.executeQuery("show stables");
|
ResultSet rs = stmt.executeQuery("show stables");
|
||||||
while (rs.next()) {
|
while (rs.next()) {
|
||||||
String name = rs.getString("name");
|
String name = rs.getString("stable_name");
|
||||||
sqls.add("select count(*) from " + dbName + "." + name);
|
sqls.add("select count(*) from " + dbName + "." + name);
|
||||||
sqls.add("select first(*) from " + dbName + "." + name);
|
sqls.add("select first(*) from " + dbName + "." + name);
|
||||||
sqls.add("select last(*) from " + dbName + "." + name);
|
sqls.add("select last(*) from " + dbName + "." + name);
|
||||||
|
|
|
@ -1,10 +1,14 @@
|
||||||
package com.taosdata.taosdemo.service;
|
package com.taosdata.taosdemo.service;
|
||||||
|
|
||||||
|
import com.zaxxer.hikari.HikariConfig;
|
||||||
|
import com.zaxxer.hikari.HikariDataSource;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.springframework.beans.factory.annotation.Autowired;
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
|
|
||||||
public class DatabaseServiceTest {
|
public class DatabaseServiceTest {
|
||||||
private DatabaseService service;
|
|
||||||
|
private static DatabaseService service;
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testCreateDatabase1() {
|
public void testCreateDatabase1() {
|
||||||
|
@ -20,4 +24,16 @@ public class DatabaseServiceTest {
|
||||||
public void useDatabase() {
|
public void useDatabase() {
|
||||||
service.useDatabase("test");
|
service.useDatabase("test");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void beforeClass() throws ClassNotFoundException {
|
||||||
|
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||||
|
HikariConfig config = new HikariConfig();
|
||||||
|
config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/?charset=UTF-8&locale=en_US.UTF-8&timezone=UTC-8");
|
||||||
|
config.setUsername("root");
|
||||||
|
config.setPassword("taosdata");
|
||||||
|
HikariDataSource dataSource = new HikariDataSource(config);
|
||||||
|
service = new DatabaseService(dataSource);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
|
@ -15,7 +15,7 @@ public class QueryServiceTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void generateSuperTableQueries() {
|
public void generateSuperTableQueries() {
|
||||||
String[] sqls = queryService.generateSuperTableQueries("restful_test");
|
String[] sqls = queryService.generateSuperTableQueries("test");
|
||||||
for (String sql : sqls) {
|
for (String sql : sqls) {
|
||||||
System.out.println(sql);
|
System.out.println(sql);
|
||||||
}
|
}
|
||||||
|
@ -23,8 +23,8 @@ public class QueryServiceTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void querySuperTable() {
|
public void querySuperTable() {
|
||||||
String[] sqls = queryService.generateSuperTableQueries("restful_test");
|
String[] sqls = queryService.generateSuperTableQueries("test");
|
||||||
queryService.querySuperTable(sqls, 1000, 10, 10);
|
queryService.querySuperTable(sqls, 100, 3, 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
|
|
|
@ -3,6 +3,9 @@ package com.taosdata.taosdemo.service;
|
||||||
import com.taosdata.taosdemo.domain.FieldMeta;
|
import com.taosdata.taosdemo.domain.FieldMeta;
|
||||||
import com.taosdata.taosdemo.domain.SuperTableMeta;
|
import com.taosdata.taosdemo.domain.SuperTableMeta;
|
||||||
import com.taosdata.taosdemo.domain.TagMeta;
|
import com.taosdata.taosdemo.domain.TagMeta;
|
||||||
|
import com.zaxxer.hikari.HikariConfig;
|
||||||
|
import com.zaxxer.hikari.HikariDataSource;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
@ -10,7 +13,7 @@ import java.util.List;
|
||||||
|
|
||||||
public class SuperTableServiceTest {
|
public class SuperTableServiceTest {
|
||||||
|
|
||||||
private SuperTableService service;
|
private static SuperTableService service;
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testCreate() {
|
public void testCreate() {
|
||||||
|
@ -29,4 +32,15 @@ public class SuperTableServiceTest {
|
||||||
service.create(superTableMeta);
|
service.create(superTableMeta);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void beforeClass() throws ClassNotFoundException {
|
||||||
|
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||||
|
HikariConfig config = new HikariConfig();
|
||||||
|
config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/?charset=UTF-8&locale=en_US.UTF-8&timezone=UTC-8");
|
||||||
|
config.setUsername("root");
|
||||||
|
config.setPassword("taosdata");
|
||||||
|
HikariDataSource dataSource = new HikariDataSource(config);
|
||||||
|
service = new SuperTableService(dataSource);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
package com.taosdata.example;
|
package com.taos.example;
|
||||||
|
|
||||||
import com.alibaba.druid.pool.DruidDataSource;
|
import com.alibaba.druid.pool.DruidDataSource;
|
||||||
|
|
||||||
|
@ -8,11 +8,11 @@ import java.sql.Statement;
|
||||||
public class DruidDemo {
|
public class DruidDemo {
|
||||||
// ANCHOR: connection_pool
|
// ANCHOR: connection_pool
|
||||||
public static void main(String[] args) throws Exception {
|
public static void main(String[] args) throws Exception {
|
||||||
String url = "jdbc:TAOS://127.0.0.1:6030/log";
|
String url = "jdbc:TAOS-WS://127.0.0.1:6041/log";
|
||||||
|
|
||||||
DruidDataSource dataSource = new DruidDataSource();
|
DruidDataSource dataSource = new DruidDataSource();
|
||||||
// jdbc properties
|
// jdbc properties
|
||||||
dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver");
|
dataSource.setDriverClassName("com.taosdata.jdbc.ws.WebSocketDriver");
|
||||||
dataSource.setUrl(url);
|
dataSource.setUrl(url);
|
||||||
dataSource.setUsername("root");
|
dataSource.setUsername("root");
|
||||||
dataSource.setPassword("taosdata");
|
dataSource.setPassword("taosdata");
|
||||||
|
|
|
@ -144,8 +144,9 @@ public class GeometryDemo {
|
||||||
|
|
||||||
private void executeQuery(String sql) {
|
private void executeQuery(String sql) {
|
||||||
long start = System.currentTimeMillis();
|
long start = System.currentTimeMillis();
|
||||||
try (Statement statement = connection.createStatement()) {
|
try (Statement statement = connection.createStatement();
|
||||||
ResultSet resultSet = statement.executeQuery(sql);
|
ResultSet resultSet = statement.executeQuery(sql)) {
|
||||||
|
|
||||||
long end = System.currentTimeMillis();
|
long end = System.currentTimeMillis();
|
||||||
printSql(sql, true, (end - start));
|
printSql(sql, true, (end - start));
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
package com.taosdata.example;
|
package com.taos.example;
|
||||||
|
|
||||||
import com.zaxxer.hikari.HikariConfig;
|
import com.zaxxer.hikari.HikariConfig;
|
||||||
import com.zaxxer.hikari.HikariDataSource;
|
import com.zaxxer.hikari.HikariDataSource;
|
||||||
|
@ -11,7 +11,7 @@ public class HikariDemo {
|
||||||
public static void main(String[] args) throws Exception {
|
public static void main(String[] args) throws Exception {
|
||||||
HikariConfig config = new HikariConfig();
|
HikariConfig config = new HikariConfig();
|
||||||
// jdbc properties
|
// jdbc properties
|
||||||
config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log");
|
config.setJdbcUrl("jdbc:TAOS-WS://127.0.0.1:6041/log");
|
||||||
config.setUsername("root");
|
config.setUsername("root");
|
||||||
config.setPassword("taosdata");
|
config.setPassword("taosdata");
|
||||||
// connection pool configurations
|
// connection pool configurations
|
||||||
|
|
|
@ -39,6 +39,7 @@ public class TelnetLineProtocolExample {
|
||||||
createDatabase(conn);
|
createDatabase(conn);
|
||||||
SchemalessWriter writer = new SchemalessWriter(conn);
|
SchemalessWriter writer = new SchemalessWriter(conn);
|
||||||
writer.write(lines, SchemalessProtocolType.TELNET, SchemalessTimestampType.NOT_CONFIGURED);
|
writer.write(lines, SchemalessProtocolType.TELNET, SchemalessTimestampType.NOT_CONFIGURED);
|
||||||
|
writer.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -95,8 +95,8 @@ async function all_type_example() {
|
||||||
tagParams.setBoolean([true]);
|
tagParams.setBoolean([true]);
|
||||||
tagParams.setVarchar(["hello"]);
|
tagParams.setVarchar(["hello"]);
|
||||||
tagParams.setNchar(["stmt"]);
|
tagParams.setNchar(["stmt"]);
|
||||||
tagParams.setGeometry([geometryData]);
|
|
||||||
tagParams.setVarBinary([vbData]);
|
tagParams.setVarBinary([vbData]);
|
||||||
|
tagParams.setGeometry([geometryData]);
|
||||||
await stmt.setTags(tagParams);
|
await stmt.setTags(tagParams);
|
||||||
|
|
||||||
|
|
||||||
|
@ -108,8 +108,8 @@ async function all_type_example() {
|
||||||
bindParams.setBoolean([true]);
|
bindParams.setBoolean([true]);
|
||||||
bindParams.setVarchar(["hello"]);
|
bindParams.setVarchar(["hello"]);
|
||||||
bindParams.setNchar(["stmt"]);
|
bindParams.setNchar(["stmt"]);
|
||||||
bindParams.setGeometry([geometryData]);
|
|
||||||
bindParams.setVarBinary([vbData]);
|
bindParams.setVarBinary([vbData]);
|
||||||
|
bindParams.setGeometry([geometryData]);
|
||||||
|
|
||||||
await stmt.bind(bindParams);
|
await stmt.bind(bindParams);
|
||||||
await stmt.batch();
|
await stmt.batch();
|
||||||
|
|
|
@ -40,7 +40,6 @@ async function prepare() {
|
||||||
let conf = new taos.WSConfig('ws://localhost:6041');
|
let conf = new taos.WSConfig('ws://localhost:6041');
|
||||||
conf.setUser('root');
|
conf.setUser('root');
|
||||||
conf.setPwd('taosdata');
|
conf.setPwd('taosdata');
|
||||||
conf.setDb('power');
|
|
||||||
const createDB = `CREATE DATABASE IF NOT EXISTS ${db}`;
|
const createDB = `CREATE DATABASE IF NOT EXISTS ${db}`;
|
||||||
const createStable = `CREATE STABLE IF NOT EXISTS ${db}.${stable} (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);`;
|
const createStable = `CREATE STABLE IF NOT EXISTS ${db}.${stable} (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);`;
|
||||||
|
|
||||||
|
|
|
@ -34,10 +34,10 @@ async function createConsumer() {
|
||||||
}
|
}
|
||||||
|
|
||||||
async function prepare() {
|
async function prepare() {
|
||||||
let conf = new taos.WSConfig('ws://192.168.1.98:6041');
|
let conf = new taos.WSConfig('ws://localhost:6041');
|
||||||
conf.setUser('root');
|
conf.setUser('root');
|
||||||
conf.setPwd('taosdata');
|
conf.setPwd('taosdata');
|
||||||
conf.setDb('power');
|
|
||||||
const createDB = `CREATE DATABASE IF NOT EXISTS ${db}`;
|
const createDB = `CREATE DATABASE IF NOT EXISTS ${db}`;
|
||||||
const createStable = `CREATE STABLE IF NOT EXISTS ${db}.${stable} (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);`;
|
const createStable = `CREATE STABLE IF NOT EXISTS ${db}.${stable} (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);`;
|
||||||
|
|
||||||
|
|
|
@ -303,13 +303,12 @@ Query OK, 5 row(s) in set (0.016812s)
|
||||||
|
|
||||||
#### FILL 子句
|
#### FILL 子句
|
||||||
|
|
||||||
FILL 子句,用于指定某一窗口区间数据缺失的情况下的填充模式。填充模式包括以下几种:
|
|
||||||
1. 不进行填充:NONE(默认填充模式)。
|
1. 不进行填充:NONE(默认填充模式)。
|
||||||
2. VALUE 填充:固定值填充,此时需要指定填充的数值。例如:FILL(VALUE, 1.23)。这里需要注意,最终填充的值受由相应列的类型决定,如 FILL(VALUE, 1.23),相应列为 INT 类型,则填充值为 1。
|
2. VALUE 填充:固定值填充,此时需要指定填充的数值。例如:FILL(VALUE, 1.23)。这里需要注意,最终填充的值受由相应列的类型决定,如 FILL(VALUE, 1.23),相应列为 INT 类型,则填充值为 1, 若查询列表中有多列需要 FILL, 则需要给每一个 FILL 列指定 VALUE, 如 `SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`, 注意, SELECT 表达式中只有包含普通列时才需要指定 FILL VALUE, 如 `_wstart`, `_wstart+1a`, `now`, `1+1` 以及使用 partition by 时的 partition key (如 tbname)都不需要指定 VALUE, 如 `timediff(last(ts), _wstart)` 则需要指定VALUE。
|
||||||
3. PREV 填充:使用前一个非 NULL 值填充数据。例如:FILL(PREV)。
|
3. PREV 填充:使用前一个非 NULL 值填充数据。例如:FILL(PREV)。
|
||||||
4. NULL 填充:使用 NULL 填充数据。例如:FILL(NULL)。
|
4. NULL 填充:使用 NULL 填充数据。例如:FILL(NULL)。
|
||||||
5. LINEAR 填充:根据前后距离最近的非 NULL 值做线性插值填充。例如:FILL(LINEAR)。
|
5. LINEAR 填充:根据前后距离最近的非 NULL 值做线性插值填充。例如:FILL(LINEAR)。
|
||||||
6. NEXT 填充:使用下一个非 NULL 值填充数据。例如:FILL(NEXT)。
|
6. NEXT 填充:使用下一个非 NULL 值填充数据。例如:FILL(NEXT)。
|
||||||
|
|
||||||
以上填充模式中,除了 NONE 模式默认不填充值之外,其他模式在查询的整个时间范围内如果没有数据 FILL 子句将被忽略,即不产生填充数据,查询结果为空。这种行为在部分模式(PREV、NEXT、LINEAR)下具有合理性,因为在这些模式下没有数据意味着无法产生填充数值。
|
以上填充模式中,除了 NONE 模式默认不填充值之外,其他模式在查询的整个时间范围内如果没有数据 FILL 子句将被忽略,即不产生填充数据,查询结果为空。这种行为在部分模式(PREV、NEXT、LINEAR)下具有合理性,因为在这些模式下没有数据意味着无法产生填充数值。
|
||||||
|
|
||||||
|
|
|
@ -150,7 +150,7 @@ CSV 文件中的每个 Row 配置一个 OPC 数据点位。Row 的规则如下
|
||||||
|
|
||||||
#### 5.2. 选择数据点位
|
#### 5.2. 选择数据点位
|
||||||
|
|
||||||
可以通过配置 **根节点ID**、**命名空间**、**正则匹配** 等条件,对点位进行筛选。
|
可以通过配置 **根节点ID**、**命名空间**、**节点ID**、**节点名称** 等条件,对点位进行筛选。
|
||||||
|
|
||||||
通过配置 **超级表名**、**表名称**,指定数据要写入的超级表、子表。
|
通过配置 **超级表名**、**表名称**,指定数据要写入的超级表、子表。
|
||||||
|
|
||||||
|
|
|
@ -126,7 +126,7 @@ CSV 文件中的每个 Row 配置一个 OPC 数据点位。Row 的规则如下
|
||||||
|
|
||||||
#### 4.2. 选择数据点位
|
#### 4.2. 选择数据点位
|
||||||
|
|
||||||
可以通过配置 **根节点ID** 和 **正则匹配** 作为过滤条件,对点位进行筛选。
|
可以通过配置 **根节点ID**、**节点ID**、**节点名称** 作为过滤条件,对点位进行筛选。
|
||||||
|
|
||||||
通过配置 **超级表名**、**表名称**,指定数据要写入的超级表、子表。
|
通过配置 **超级表名**、**表名称**,指定数据要写入的超级表、子表。
|
||||||
|
|
||||||
|
|
|
@ -63,7 +63,11 @@ TDengine 可以通过 MQTT 连接器从 MQTT 代理订阅数据并将其写入 T
|
||||||
|
|
||||||
在 **Clean Session** 中,选择是否清除会话。默认值为 true。
|
在 **Clean Session** 中,选择是否清除会话。默认值为 true。
|
||||||
|
|
||||||
在 **订阅主题及 QoS 配置** 中填写要消费的 Topic 名称。使用如下格式设置: `topic1::0,topic2::1`。
|
在 **订阅主题及 QoS 配置** 中填写要消费的 Topic 名称和 QoS。使用如下格式设置: `{topic_name}::{qos}`(如:`my_topic::0`)。MQTT 协议 5.0 支持共享订阅,可以通过多个客户端订阅同一个 Topic 实现负载均衡,使用如下格式: `$share/{group_name}/{topic_name}::{qos}`,其中,`$share` 是固定前缀,表示启用共享订阅,`group_name` 是分组名称,类似 kafka 的消费者组。
|
||||||
|
|
||||||
|
在 **数据压缩** 中,配置消息体压缩算法,taosX 在接收到消息后,使用对应的压缩算法对消息体进行解压缩获取原始数据。可选项 none(不压缩), gzip, snappy, lz4 和 zstd,默认为 none。
|
||||||
|
|
||||||
|
在 **字符编码** 中,配置消息体编码格式,taosX 在接收到消息后,使用对应的编码格式对消息体进行解码获取原始数据。可选项 UTF_8, GBK, GB18030, BIG5,默认为 UTF_8
|
||||||
|
|
||||||
点击 **检查连通性** 按钮,检查数据源是否可用。
|
点击 **检查连通性** 按钮,检查数据源是否可用。
|
||||||
|
|
||||||
|
@ -146,7 +150,13 @@ json 数据支持 JSONObject 或者 JSONArray,使用 json 解析器可以解
|
||||||
|
|
||||||
### 7. 高级选项
|
### 7. 高级选项
|
||||||
|
|
||||||
在 **日志级别** 下拉列表中选择日志级别。有五个选项:`TRACE`、`DEBUG`、`INFO`、`WARN`、`ERROR`。 默认值为 INFO。
|
在 **消息等待队列大小** 中填写接收 MQTT 消息的缓存队列大小,当队列满时,新到达的数据会直接丢弃。可设置为 0,即不缓存。
|
||||||
|
|
||||||
|
在 **处理批次上限** 中填写可以同时进行数据处理流程的批次数量,当到达此上限后,不再从消息缓存队列中获取消息,会导致缓存队列的消息积压,最小值为 1。
|
||||||
|
|
||||||
|
在 **批次大小** 中填写每次发送给数据处理流程的消息数量,和 **批次延时** 配合使用,当读取的 MQTT 消息数量达到批次大小时,就算 **批次延时** 没有到达也立即向数据处理流程发送数据,最小值为 1。
|
||||||
|
|
||||||
|
在 **批次延时** 中填写每次生成批次消息的超时时间(单位:毫秒),从每批次接收到的第一个消息开始算起,和 **批次大小** 配合使用,当读取消息到达超时时间时,就算 **批次大小** 不满足数量也立即向数据处理流程发送数据,最小值为 1。
|
||||||
|
|
||||||
当 **保存原始数据时**,以下2个参数配置生效。
|
当 **保存原始数据时**,以下2个参数配置生效。
|
||||||
|
|
||||||
|
|
|
@ -113,6 +113,8 @@ kcat <topic> \
|
||||||
|
|
||||||
在 **获取数据的最大时长** 中设置获取消息时等待数据不足的最长时间(以毫秒为单位),默认值为 100ms。
|
在 **获取数据的最大时长** 中设置获取消息时等待数据不足的最长时间(以毫秒为单位),默认值为 100ms。
|
||||||
|
|
||||||
|
在 **字符编码** 中,配置消息体编码格式,taosX 在接收到消息后,使用对应的编码格式对消息体进行解码获取原始数据。可选项 UTF_8, GBK, GB18030, BIG5,默认为 UTF_8
|
||||||
|
|
||||||
点击 **连通性检查** 按钮,检查数据源是否可用。
|
点击 **连通性检查** 按钮,检查数据源是否可用。
|
||||||
|
|
||||||

|

|
||||||
|
|
|
@ -2,86 +2,123 @@
|
||||||
title: "CSV"
|
title: "CSV"
|
||||||
sidebar_label: "CSV"
|
sidebar_label: "CSV"
|
||||||
---
|
---
|
||||||
本节讲述如何通过 Explorer 界面创建数据迁移任务, 从 CSV 迁移数据到当前 TDengine 集群。
|
本节讲述如何通过 Explorer 界面创建数据迁移任务,从 CSV 迁移数据到当前 TDengine 集群。
|
||||||
|
|
||||||
## 功能概述
|
## 功能概述
|
||||||
导入一个或多个 CSV 文件数据到 TDengine。
|
导入一个或多个 CSV 文件数据到 TDengine。
|
||||||
|
|
||||||
## 创建任务
|
## 创建任务
|
||||||
### 1. 新增数据源
|
### 1. 新增数据源
|
||||||
在数据写入页面中,点击 **+新增数据源** 按钮,进入新增数据源页面。
|
在数据写入任务列表页面中,点击 **+新建任务** 按钮,进入新建任务页面。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 2. 配置基本信息
|
### 2. 配置基本信息
|
||||||
在 **名称** 中输入任务名称,如:“test_csv”;
|
在 **名称** 中输入任务名称,如:“test_csv”。
|
||||||
|
|
||||||
在 **类型** 下拉列表中选择 **CSV**。
|
在 **类型** 下拉列表中选择 **CSV**。
|
||||||
|
|
||||||
在 **目标数据库** 下拉列表中选择一个目标数据库,也可以先点击右侧的 **+创建数据库** 按钮
|
在 **目标数据库** 下拉列表中选择一个目标数据库,也可以先点击右侧的 **+创建数据库** 按钮。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 3. 配置 CSV 选项
|
### 3. 配置 CSV 选项
|
||||||
在 **包含表头** 区域点击开启或关闭,如果包含表头,则第一行将被视为列信息。
|
在 **包含表头** 区域点击开启或关闭,如果包含表头,则 CSV 文件内容第一行将被视为列信息。
|
||||||
|
|
||||||
在 **忽略前 N 行** 区域填写 N,表示忽略 CSV 文件的前 N 行。
|
在 **忽略前 N 行** 区域填写数字 N,表示忽略 CSV 文件的前 N 行。
|
||||||
|
|
||||||
在 **字段分隔符** 区域进行选择,CSV 字段之间的分隔符,默认是 “,” 。
|
在 **字段分隔符** 区域选择 CSV 字段分隔符,用于分隔行内容为多个字段,默认是 `,`。
|
||||||
|
|
||||||
在 **字段引用符** 区域进行选择,当 CSV 字段中包含分隔符或换行符时,用于包围字段内容,以确保整个字段被正确识别,默认是 "“"。
|
在 **字段引用符** 区域选择 CSV 字段引用符,当 CSV 字段中包含分隔符或换行符时,用于包围字段内容,以确保整个字段被正确识别,默认是 `"`。
|
||||||
|
|
||||||
在 **注释前缀符** 区域进行选择,当 CSV 文件中某行以此处指定的字符开头,则忽略该行默认是 “#”。
|
在 **注释前缀符** 区域选择 CSV 行注释前缀符,当 CSV 文件中某行以此处指定的字符开头,则忽略该行,默认是 `#`。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 4. 配置解析 CSV 文件
|
### 4. 配置解析 CSV 文件
|
||||||
在本地上传 CSV 文件,例如:test-json.csv,之后会使用这条示例 csv 文件来配置提取和过滤条件。
|
|
||||||
|
|
||||||
#### 4.1 解析
|
#### 4.1 配置数据源
|
||||||
|
|
||||||
点击 **选取文件** 后,选择 test-json.csv,点击 **解析** 预览识别的列。
|
包含“上传 CSV 文件”与“监听文件目录”两种方式,“上传 CSV 文件”是指将本地文件通过浏览器上传到 taosx 所在服务器作为数据源,“监听文件目录”是指配置一个 taosx 所在服务器的绝对路径作为数据源,以下将分别进行介绍:
|
||||||
|
|
||||||
|
##### 4.1.1 上传 CSV 文件
|
||||||
|
|
||||||
|
在“上传 CSV 文件”标签页中:
|
||||||
|
|
||||||
|
点击 **选取文件** 按钮,选取一个或多个本地文件,上传到服务器作为数据源。
|
||||||
|
|
||||||
|
在 **保留已处理文件** 区域点击开启或关闭,如果开启,则文件被处理完成后仍会保留在服务器中,如果关闭,则将被删除。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
**预览解析结果**
|
##### 4.1.2 监听文件目录
|
||||||
|
|
||||||
|
在“监听文件目录”标签页中:
|
||||||
|
|
||||||
|
在 **文件监听目录** 中输入一个 taosx 所在服务器的绝对路径,路径中包含的文件及子目录文件将作为数据源。
|
||||||
|
|
||||||
|
在 **匹配模式** 中输入一个正则表达式,用于筛选过滤目录中的文件。
|
||||||
|
|
||||||
|
在 **监听新文件** 区域点击开启或关闭,如果开启,则任务永不停止,且持续处理目录中新增的文件,如果关闭,则不处理新增文件,且初始文件处理结束后任务变为完成状态。
|
||||||
|
|
||||||
|
在 **监听间隔** 中输入一个数字,用于配置监听新文件的时间间隔。
|
||||||
|
|
||||||
|
在 **文件处理顺序** 区域选择“正序”或“倒序”,用于指定文件列表的处理先后顺序,“正序”将按照文件名的字典序正序处理,“倒序”将按照文件名的字典序倒序处理,与此同时,程序总是保持先处理文件后处理同级子目录的顺序。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
#### 4.2 字段拆分
|
#### 4.2 解析
|
||||||
|
|
||||||
在 **从列中提取或拆分** 中填写从消息体中提取或拆分的字段,例如:将 message 字段拆分成 `text_0` 和 `text_1` 这2个字段,选择 split 提取器,seperator 填写 -, number 填写2。
|
上传文件或配置监听目录后,点击解析按钮,页面将获取文件中的示例数据,同时得到识别的列与示例数据解析结果:
|
||||||
点击 **删除**,可以删除当前提取规则。
|
|
||||||
点击 **新增**,可以添加更多提取规则。
|
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
点击 **放大镜图标** 可预览提取或拆分结果。
|
#### 4.2 从列中提取或拆分
|
||||||
|
|
||||||
|
在 **从列中提取或拆分** 中填写从消息体中提取或拆分规则,例如:将 `desc` 字段拆分为 `desc_0` 与 `desc_1` 两个字段,可以选择 split 规则,separator 填写 `,`,number 填写 2 即可。
|
||||||
|
|
||||||
|
点击 **删除** 可以删除当前提取规则。
|
||||||
|
|
||||||
|
点击 **预览** 可以预览提取或拆分结果。
|
||||||
|
|
||||||
|
点击 **新增提取/拆分** 可以添加更多提取规则。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
<!-- 在 **过滤** 中,填写过滤条件,例如:填写 `id != 1`,则只有 id 不为 1 的数据才会被写入 TDengine。
|
#### 4.3 过滤
|
||||||
点击 **删除**,可以删除当前过滤规则。
|
|
||||||
|
在 **过滤** 中填写过滤条件,例如:填写 `id != "1"`,则只有 id 不为 1 的数据才会被处理。
|
||||||
|
|
||||||
|
点击 **删除** 可以删除当前过滤规则。
|
||||||
|
|
||||||
|
点击 **预览** 可以预览过滤结果。
|
||||||
|
|
||||||
|
点击 **新增过滤** 可以添加更多过滤规则。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
点击 **放大镜图标** 可查看预览过滤结果。
|
#### 4.4 映射
|
||||||
|
|
||||||
 -->
|
|
||||||
|
|
||||||
#### 4.3 表映射
|
|
||||||
|
|
||||||
在 **目标超级表** 的下拉列表中选择一个目标超级表,也可以先点击右侧的 **创建超级表** 按钮
|
在 **目标超级表** 的下拉列表中选择一个目标超级表,也可以先点击右侧的 **创建超级表** 按钮
|
||||||
|
|
||||||
在 **映射** 中,填写目标超级表中的子表名称,例如:`t_${groupid}`。
|
在映射规则中,填写目标超级表中的子表名称,例如:`csv_meter_${id}`,同时配置映射到超级表的列。
|
||||||
|
|
||||||

|
点击 **预览** 可以预览映射的结果。
|
||||||
|
|
||||||
点击 **预览**,可以预览映射的结果。
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
### 5. 创建完成
|
### 5. 创建完成
|
||||||
|
|
||||||
点击 **提交** 按钮,完成创建 CSV 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。
|
点击 **提交** 按钮,完成创建 CSV 到 TDengine 的数据同步任务,回到数据写入任务列表页面,可查看任务执行情况,也可以进行任务的“启动/停止”操作与“查看/编辑/删除/复制”操作。
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
### 6. 查看运行指标
|
||||||
|
|
||||||
|
点击 **查看** 按钮,查看任务的运行指标,同时也可以查看任务中所有文件的处理情况。
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|

|
|
@ -17,7 +17,7 @@ TDengine 可以高效地从 AVEVA Historian 读取数据并将其写入 TDengine
|
||||||
|
|
||||||
在数据写入页面中,点击 **+新增数据源** 按钮,进入新增数据源页面。
|
在数据写入页面中,点击 **+新增数据源** 按钮,进入新增数据源页面。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 2. 配置基本信息
|
### 2. 配置基本信息
|
||||||
|
|
||||||
|
@ -25,11 +25,11 @@ TDengine 可以高效地从 AVEVA Historian 读取数据并将其写入 TDengine
|
||||||
|
|
||||||
在 **类型** 下拉列表中选择 **AVEVA Historian**。
|
在 **类型** 下拉列表中选择 **AVEVA Historian**。
|
||||||
|
|
||||||
**代理** 是非必填项,如有需要,可以在下拉框中选择指定的代理,也可以先点击右侧的 **+创建新的代理** 按钮
|
**代理** 是非必填项,如有需要,可以在下拉框中选择指定的代理,也可以先点击右侧的 **+创建新的代理** 按钮
|
||||||
|
|
||||||
在 **目标数据库** 下拉列表中选择一个目标数据库,也可以先点击右侧的 **+创建数据库** 按钮
|
在 **目标数据库** 下拉列表中选择一个目标数据库,也可以先点击右侧的 **+创建数据库** 按钮
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 3. 配置连接信息
|
### 3. 配置连接信息
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ TDengine 可以高效地从 AVEVA Historian 读取数据并将其写入 TDengine
|
||||||
|
|
||||||
点击 **连通性检查** 按钮,检查数据源是否可用。
|
点击 **连通性检查** 按钮,检查数据源是否可用。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 4. 配置采集信息
|
### 4. 配置采集信息
|
||||||
|
|
||||||
|
@ -61,7 +61,7 @@ TDengine 可以高效地从 AVEVA Historian 读取数据并将其写入 TDengine
|
||||||
|
|
||||||
在 **查询的时间窗口** 中,填写一个时间间隔,数据迁移任务将按照这个时间间隔划分时间窗口。
|
在 **查询的时间窗口** 中,填写一个时间间隔,数据迁移任务将按照这个时间间隔划分时间窗口。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
#### 4.2. 同步 History 表的数据
|
#### 4.2. 同步 History 表的数据
|
||||||
|
|
||||||
|
@ -83,7 +83,7 @@ TDengine 可以高效地从 AVEVA Historian 读取数据并将其写入 TDengine
|
||||||
|
|
||||||
在 **乱序时间上限** 中,填写一个时间间隔,实时数据同步过程中,超过这个时间才入库的数据可能会丢失。
|
在 **乱序时间上限** 中,填写一个时间间隔,实时数据同步过程中,超过这个时间才入库的数据可能会丢失。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
#### 4.3. 同步 Live 表的数据
|
#### 4.3. 同步 Live 表的数据
|
||||||
|
|
||||||
|
@ -97,7 +97,7 @@ TDengine 可以高效地从 AVEVA Historian 读取数据并将其写入 TDengine
|
||||||
|
|
||||||
在 **实时同步的时间间隔** 中,填写一个时间间隔,实时数据部分将按照这个时间间隔轮询数据。
|
在 **实时同步的时间间隔** 中,填写一个时间间隔,实时数据部分将按照这个时间间隔轮询数据。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 5. 配置数据映射
|
### 5. 配置数据映射
|
||||||
|
|
||||||
|
@ -105,7 +105,8 @@ TDengine 可以高效地从 AVEVA Historian 读取数据并将其写入 TDengine
|
||||||
|
|
||||||
点击 **从服务器检索** 按钮,从 AVEVA Historian 服务器获取示例数据。
|
点击 **从服务器检索** 按钮,从 AVEVA Historian 服务器获取示例数据。
|
||||||
|
|
||||||
在 **从列中提取或拆分** 中填写从消息体中提取或拆分的字段,例如:将 vValue 字段拆分成 `vValue_0` 和 `vValue_1` 这 2 个字段,选择 split 提取器,seperator 填写分割符 `,`, number 填写 2。
|
在 **从列中提取或拆分** 中填写从消息体中提取或拆分的字段,例如:将 vValue 字段拆分成 `vValue_0` 和 `vValue_1` 这 2 个字段,选择
|
||||||
|
split 提取器,seperator 填写分割符 `,`, number 填写 2。
|
||||||
|
|
||||||
在 **过滤** 中,填写过滤条件,例如:填写`Value > 0`,则只有 Value 大于 0 的数据才会被写入 TDengine。
|
在 **过滤** 中,填写过滤条件,例如:填写`Value > 0`,则只有 Value 大于 0 的数据才会被写入 TDengine。
|
||||||
|
|
||||||
|
@ -113,7 +114,7 @@ TDengine 可以高效地从 AVEVA Historian 读取数据并将其写入 TDengine
|
||||||
|
|
||||||
点击 **预览**,可以查看映射的结果。
|
点击 **预览**,可以查看映射的结果。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 6. 配置高级选项
|
### 6. 配置高级选项
|
||||||
|
|
||||||
|
@ -131,7 +132,7 @@ TDengine 可以高效地从 AVEVA Historian 读取数据并将其写入 TDengine
|
||||||
|
|
||||||
在 **原始数据存储目录** 中设置原始数据保存路径。
|
在 **原始数据存储目录** 中设置原始数据保存路径。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 7. 创建完成
|
### 7. 创建完成
|
||||||
|
|
Before Width: | Height: | Size: 36 KiB After Width: | Height: | Size: 23 KiB |
Before Width: | Height: | Size: 18 KiB After Width: | Height: | Size: 8.9 KiB |
Before Width: | Height: | Size: 79 KiB After Width: | Height: | Size: 17 KiB |
Before Width: | Height: | Size: 57 KiB After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 29 KiB After Width: | Height: | Size: 17 KiB |
Before Width: | Height: | Size: 22 KiB After Width: | Height: | Size: 34 KiB |
Before Width: | Height: | Size: 31 KiB After Width: | Height: | Size: 43 KiB |
After Width: | Height: | Size: 26 KiB |
After Width: | Height: | Size: 80 KiB |
Before Width: | Height: | Size: 42 KiB After Width: | Height: | Size: 41 KiB |
Before Width: | Height: | Size: 31 KiB After Width: | Height: | Size: 177 KiB |
Before Width: | Height: | Size: 50 KiB After Width: | Height: | Size: 192 KiB |
Before Width: | Height: | Size: 363 KiB After Width: | Height: | Size: 71 KiB |
Before Width: | Height: | Size: 118 KiB |
Before Width: | Height: | Size: 48 KiB After Width: | Height: | Size: 51 KiB |
Before Width: | Height: | Size: 104 KiB After Width: | Height: | Size: 53 KiB |
Before Width: | Height: | Size: 40 KiB After Width: | Height: | Size: 62 KiB |
Before Width: | Height: | Size: 36 KiB After Width: | Height: | Size: 36 KiB |
Before Width: | Height: | Size: 71 KiB After Width: | Height: | Size: 71 KiB |
Before Width: | Height: | Size: 92 KiB After Width: | Height: | Size: 92 KiB |
Before Width: | Height: | Size: 120 KiB After Width: | Height: | Size: 120 KiB |
Before Width: | Height: | Size: 142 KiB After Width: | Height: | Size: 142 KiB |
Before Width: | Height: | Size: 74 KiB After Width: | Height: | Size: 74 KiB |
Before Width: | Height: | Size: 247 KiB After Width: | Height: | Size: 247 KiB |
Before Width: | Height: | Size: 124 KiB After Width: | Height: | Size: 124 KiB |
Before Width: | Height: | Size: 46 KiB After Width: | Height: | Size: 51 KiB |
Before Width: | Height: | Size: 62 KiB After Width: | Height: | Size: 58 KiB |
|
@ -18,6 +18,7 @@ TDengine 面向多种写入场景,而很多写入场景下,TDengine 的存
|
||||||
|
|
||||||
```SQL
|
```SQL
|
||||||
COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY'];
|
COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY'];
|
||||||
|
COMPACT [db_name.]VGROUPS IN (vgroup_id1, vgroup_id2, ...) [start with 'XXXX'] [end with 'YYYY'];
|
||||||
SHOW COMPACTS [compact_id];
|
SHOW COMPACTS [compact_id];
|
||||||
KILL COMPACT compact_id;
|
KILL COMPACT compact_id;
|
||||||
```
|
```
|
||||||
|
@ -25,6 +26,7 @@ KILL COMPACT compact_id;
|
||||||
### 效果
|
### 效果
|
||||||
|
|
||||||
- 扫描并压缩指定的 DB 中所有 VGROUP 中 VNODE 的所有数据文件
|
- 扫描并压缩指定的 DB 中所有 VGROUP 中 VNODE 的所有数据文件
|
||||||
|
- 扫描并压缩 DB 中指定的 VGROUP 列表中 VNODE 的所有数据文件, 若 db_name 为空,则默认为当前数据库
|
||||||
- COMPCAT 会删除被删除数据以及被删除的表的数据
|
- COMPCAT 会删除被删除数据以及被删除的表的数据
|
||||||
- COMPACT 会合并多个 STT 文件
|
- COMPACT 会合并多个 STT 文件
|
||||||
- 可通过 start with 关键字指定 COMPACT 数据的起始时间
|
- 可通过 start with 关键字指定 COMPACT 数据的起始时间
|
||||||
|
|
|
@ -12,13 +12,14 @@ TDengine 默认仅配置了一个 root 用户,该用户拥有最高权限。TD
|
||||||
|
|
||||||
创建用户的操作只能由 root 用户进行,语法如下。
|
创建用户的操作只能由 root 用户进行,语法如下。
|
||||||
```sql
|
```sql
|
||||||
create user user_name pass'password' [sysinfo {1|0}]
|
create user user_name pass'password' [sysinfo {1|0}] [createdb {1|0}]
|
||||||
```
|
```
|
||||||
|
|
||||||
相关参数说明如下。
|
相关参数说明如下。
|
||||||
- user_name:用户名最长不超过 23 个字节。
|
- user_name:用户名最长不超过 23 个字节。
|
||||||
- password:密码长度必须为 8 到 16 位,并且至少包含大写字母、小写字母、数字、特殊字符中的三类。特殊字符包括 `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`。
|
- password:密码长度必须为 8 到 16 位,并且至少包含大写字母、小写字母、数字、特殊字符中的三类。特殊字符包括 `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`。
|
||||||
- sysinfo :用户是否可以查看系统信息。1 表示可以查看,0 表示不可以查看。系统信息包括服务端配置信息、服务端各种节点信息,如 dnode、查询节点(qnode)等,以及与存储相关的信息等。默认为可以查看系统信息。
|
- sysinfo :用户是否可以查看系统信息。1 表示可以查看,0 表示不可以查看。系统信息包括服务端配置信息、服务端各种节点信息,如 dnode、查询节点(qnode)等,以及与存储相关的信息等。默认为可以查看系统信息。
|
||||||
|
- createdb:用户是否可以创建数据库。1 表示可以创建,0 表示不可以创建。缺省值为 0。// 从 TDengine 企业版 3.3.2.0 开始支持
|
||||||
|
|
||||||
如下 SQL 可以创建密码为 abc123!@# 且可以查看系统信息的用户 test。
|
如下 SQL 可以创建密码为 abc123!@# 且可以查看系统信息的用户 test。
|
||||||
|
|
||||||
|
@ -47,6 +48,7 @@ alter_user_clause: {
|
||||||
pass 'literal'
|
pass 'literal'
|
||||||
| enable value
|
| enable value
|
||||||
| sysinfo value
|
| sysinfo value
|
||||||
|
| createdb value
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -54,6 +56,7 @@ alter_user_clause: {
|
||||||
- pass:修改用户密码。
|
- pass:修改用户密码。
|
||||||
- enable:是否启用用户。1 表示启用此用户,0 表示禁用此用户。
|
- enable:是否启用用户。1 表示启用此用户,0 表示禁用此用户。
|
||||||
- sysinfo :用户是否可查看系统信息。1 表示可以查看系统信息,0 表示不可以查看系统信息
|
- sysinfo :用户是否可查看系统信息。1 表示可以查看系统信息,0 表示不可以查看系统信息
|
||||||
|
- createdb:用户是否可创建数据库。1 表示可以创建数据库,0 表示不可以创建数据库。// 从 TDengine 企业版 3.3.2.0 开始支持
|
||||||
|
|
||||||
如下 SQL 禁用 test 用户。
|
如下 SQL 禁用 test 用户。
|
||||||
```sql
|
```sql
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
---
|
||||||
|
sidebar_label: 分析调试
|
||||||
|
title: 分析调试
|
||||||
|
toc_max_heading_level: 4
|
||||||
|
---
|
||||||
|
为了更好的分析调试 TDengine ,推荐开发者在操作系统中安装以下分析调试工具:
|
||||||
|
## gdb
|
||||||
|
GDB(GNU Debugger)是一个功能强大的命令行调试器,广泛用于调试 C、C++ 和其他编程语言的程序。
|
||||||
|
## valgrind
|
||||||
|
valgrind 是一个用于内存调试、内存泄漏检测和性能分析的工具框架。Valgrind 提供了一组工具,帮助开发者检测和修复程序中的内存错误、线程错误和性能问题。
|
||||||
|
## bpftrace
|
||||||
|
bpftrace 是一个高级的动态跟踪工具,基于 eBPF(Extended Berkeley Packet Filter)技术,用于在 Linux 系统上进行性能分析和故障排除。
|
||||||
|
## perf
|
||||||
|
perf 是一个强大的 Linux 性能分析工具。它提供了对系统和应用程序的详细性能分析,帮助开发者和系统管理员识别和解决性能瓶颈。
|
|
@ -26,65 +26,67 @@ taosd 命令行参数如下
|
||||||
:::
|
:::
|
||||||
|
|
||||||
### 连接相关
|
### 连接相关
|
||||||
|参数名称|支持版本|参数含义|
|
|参数名称|支持版本|动态修改|参数含义|
|
||||||
|-----------------------|-------------------------|------------|
|
|-----------------------|-------------------------|-------------------------|------------|
|
||||||
|firstEp | |taosd 启动时,主动连接的集群中首个 dnode 的 end point,默认值 localhost:6030|
|
|firstEp | |不支持动态修改 |taosd 启动时,主动连接的集群中首个 dnode 的 end point,默认值 localhost:6030|
|
||||||
|secondEp | |taosd 启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint,无默认值|
|
|secondEp | |不支持动态修改 |taosd 启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint,无默认值|
|
||||||
|fqdn | |taosd 监听的服务地址,默认为所在服务器上配置的第一个 hostname|
|
|fqdn | |不支持动态修改 |taosd 监听的服务地址,默认为所在服务器上配置的第一个 hostname|
|
||||||
|serverPort | |taosd 监听的端口,默认值 6030|
|
|serverPort | |不支持动态修改 |taosd 监听的端口,默认值 6030|
|
||||||
|compressMsgSize | |是否对 RPC 消息进行压缩;-1:所有消息都不压缩;0:所有消息都压缩;N (N>0):只有大于 N 个字节的消息才压缩;默认值 -1|
|
|compressMsgSize | |支持动态修改 重启生效 |是否对 RPC 消息进行压缩;-1:所有消息都不压缩;0:所有消息都压缩;N (N>0):只有大于 N 个字节的消息才压缩;默认值 -1|
|
||||||
|shellActivityTimer | |客户端向 mnode 发送心跳的时长,单位为秒,取值范围 1-120,默认值 3 |
|
|shellActivityTimer | |支持动态修改 立即生效 |客户端向 mnode 发送心跳的时长,单位为秒,取值范围 1-120,默认值 3 |
|
||||||
|numOfRpcSessions | |RPC 支持的最大连接数,取值范围 100-100000,默认值 30000|
|
|numOfRpcSessions | |支持动态修改 重启生效 |RPC 支持的最大连接数,取值范围 100-100000,默认值 30000|
|
||||||
|numOfRpcThreads | |RPC 收发数据线程数目,取值范围1-50,默认值为 CPU 核数的一半|
|
|numOfRpcThreads | |支持动态修改 重启生效 |RPC 收发数据线程数目,取值范围1-50,默认值为 CPU 核数的一半|
|
||||||
|numOfTaskQueueThreads | |客户端处理 RPC 消息的线程数取值, 范围4-16,默认值为 CPU 核数的一半|
|
|numOfTaskQueueThreads | |支持动态修改 重启生效 |客户端处理 RPC 消息的线程数取值, 范围4-16,默认值为 CPU 核数的一半|
|
||||||
|rpcQueueMemoryAllowed | |dnode允许的已经收到的RPC消息占用的内存最大值,单位 bytes,取值范围 104857600-INT64_MAX,默认值为服务器内存的 1/10 |
|
|rpcQueueMemoryAllowed | |支持动态修改 立即生效 |dnode允许的已经收到的RPC消息占用的内存最大值,单位 bytes,取值范围 104857600-INT64_MAX,默认值为服务器内存的 1/10 |
|
||||||
|resolveFQDNRetryTime | 3.x 之后取消 |FQDN 解析失败时的重试次数|
|
|resolveFQDNRetryTime | 3.x 之后取消 |不支持动态修改 |FQDN 解析失败时的重试次数|
|
||||||
|timeToGetAvailableConn | 3.3.4.x之后取消 |获得可用连接的最长等待时间,取值范围 10-50000000,单位为毫秒,默认值 500000|
|
|timeToGetAvailableConn | 3.3.4.x之后取消 |支持动态修改 重启生效 |获得可用连接的最长等待时间,取值范围 10-50000000,单位为毫秒,默认值 500000|
|
||||||
|maxShellConns | 3.x 后取消 |允许创建的最大链接数|
|
|maxShellConns | 3.x 后取消 |支持动态修改 重启生效 |允许创建的最大链接数|
|
||||||
|maxRetryWaitTime | |重连最大超时时间, 默认值是 10s|
|
|maxRetryWaitTime | |支持动态修改 重启生效 |重连最大超时时间, 默认值是 10s|
|
||||||
|shareConnLimit |3.3.4.0 新增 |一个链接可以共享的请求的数目,取值范围 1-512,默认值 10|
|
|shareConnLimit |3.3.4.0 新增 |支持动态修改 重启生效 |一个链接可以共享的请求的数目,取值范围 1-512,默认值 10|
|
||||||
|readTimeout |3.3.4.0 新增 |单个请求最小超时时间,取值范围 64-604800,单位为秒,默认值 900|
|
|readTimeout |3.3.4.0 新增 |支持动态修改 重启生效 |单个请求最小超时时间,取值范围 64-604800,单位为秒,默认值 900|
|
||||||
|
|
||||||
### 监控相关
|
### 监控相关
|
||||||
|参数名称|支持版本|参数含义|
|
|参数名称|支持版本|动态修改|参数含义|
|
||||||
|-----------------------|----------|-|
|
|-----------------------|----------|-------------------------|-|
|
||||||
|monitor | |是否收集监控数据并上报,0:关闭;1:打开;默认值 0|
|
|monitor | |支持动态修改 立即生效 |是否收集监控数据并上报,0:关闭;1:打开;默认值 0|
|
||||||
|monitorFqdn | |taosKeeper 服务所在服务器的 FQDN,默认值 无|
|
|monitorFqdn | |支持动态修改 重启生效 |taosKeeper 服务所在服务器的 FQDN,默认值 无|
|
||||||
|monitorPort | |taosKeeper 服务所监听的端口号,默认值 6043|
|
|monitorPort | |支持动态修改 重启生效 |taosKeeper 服务所监听的端口号,默认值 6043|
|
||||||
|monitorInterval | |监控数据库记录系统参数(CPU/内存)的时间间隔,单位是秒,取值范围 1-200000 ,默认值 30|
|
|monitorInterval | |支持动态修改 立即生效 |监控数据库记录系统参数(CPU/内存)的时间间隔,单位是秒,取值范围 1-200000 ,默认值 30|
|
||||||
|monitorMaxLogs | |缓存的待上报日志条数|
|
|monitorMaxLogs | |支持动态修改 立即生效 |缓存的待上报日志条数|
|
||||||
|monitorComp | |是否采用压缩方式上报监控日志时|
|
|monitorComp | |支持动态修改 重启生效 |是否采用压缩方式上报监控日志时|
|
||||||
|monitorLogProtocol | |是否打印监控日志|
|
|monitorLogProtocol | |支持动态修改 立即生效 |是否打印监控日志|
|
||||||
|monitorForceV2 | |是否使用 V2 版本协议上报|
|
|monitorForceV2 | |支持动态修改 立即生效 |是否使用 V2 版本协议上报|
|
||||||
|telemetryReporting | |是否上传 telemetry,0:不上传,1:上传,默认值 1|
|
|telemetryReporting | |支持动态修改 立即生效 |是否上传 telemetry,0:不上传,1:上传,默认值 1|
|
||||||
|telemetryServer | |telemetry 服务器地址|
|
|telemetryServer | |不支持动态修改 |telemetry 服务器地址|
|
||||||
|telemetryPort | |telemetry 服务器端口编号|
|
|telemetryPort | |不支持动态修改 |telemetry 服务器端口编号|
|
||||||
|telemetryInterval | |telemetry 上传时间间隔,单位为秒,默认 86400|
|
|telemetryInterval | |支持动态修改 立即生效 |telemetry 上传时间间隔,单位为秒,默认 86400|
|
||||||
|crashReporting | |是否上传 crash 信息;0:不上传,1:上传;默认值 1|
|
|crashReporting | |支持动态修改 立即生效 |是否上传 crash 信息;0:不上传,1:上传;默认值 1|
|
||||||
|
|
||||||
### 查询相关
|
### 查询相关
|
||||||
|参数名称|支持版本|参数含义|
|
|参数名称|支持版本|动态修改|参数含义|
|
||||||
|------------------------|----------|-|
|
|------------------------|----------|-------------------------|-|
|
||||||
|countAlwaysReturnValue | |count/hyperloglog 函数在输入数据为空或者 NULL 的情况下是否返回值;0:返回空行,1:返回;默认值 1;该参数设置为 1 时,如果查询中含有 INTERVAL 子句或者该查询使用了 TSMA 时,且相应的组或窗口内数据为空或者 NULL,对应的组或窗口将不返回查询结果;注意此参数客户端和服务端值应保持一致|
|
|countAlwaysReturnValue | |支持动态修改 立即生效 |count/hyperloglog 函数在输入数据为空或者 NULL 的情况下是否返回值;0:返回空行,1:返回;默认值 1;该参数设置为 1 时,如果查询中含有 INTERVAL 子句或者该查询使用了 TSMA 时,且相应的组或窗口内数据为空或者 NULL,对应的组或窗口将不返回查询结果;注意此参数客户端和服务端值应保持一致|
|
||||||
|tagFilterCache | |是否缓存标签过滤结果|
|
|tagFilterCache | |不支持动态修改 |是否缓存标签过滤结果|
|
||||||
|maxNumOfDistinctRes | |允许返回的 distinct 结果最大行数,默认值 10 万,最大允许值 1 亿|
|
|queryBufferSize | |支持动态修改 重启生效 |暂不生效|
|
||||||
|queryBufferSize | |暂不生效|
|
|queryRspPolicy | |支持动态修改 立即生效 |查询响应策略|
|
||||||
|queryRspPolicy | |查询响应策略|
|
|queryUseMemoryPool | |不支持动态修改 |查询是否使用内存池管理内存,默认值:1(打开); 0: 关闭,1: 打开|
|
||||||
|filterScalarMode | |强制使用标量过滤模式,0:关闭;1:开启,默认值 0|
|
|minReservedMemorySize | |不支持动态修改 |最小预留的系统可用内存数量,除预留外的内存都可以被用于查询,单位:MB,默认预留大小为系统物理内存的 20%,取值范围 1024 - 1000000000|
|
||||||
|queryPlannerTrace | |内部参数,查询计划是否输出详细日志|
|
|singleQueryMaxMemorySize| |不支持动态修改 |单个查询在单个节点(dnode)上可以使用的内存上限,超过该上限将返回错误,单位:MB,默认值:0(无上限),取值范围 0 - 1000000000|
|
||||||
|queryNodeChunkSize | |内部参数,查询计划的块大小|
|
|filterScalarMode | |不支持动态修改 |强制使用标量过滤模式,0:关闭;1:开启,默认值 0|
|
||||||
|queryUseNodeAllocator | |内部参数,查询计划的分配方法|
|
|queryPlannerTrace | |支持动态修改 立即生效 |内部参数,查询计划是否输出详细日志|
|
||||||
|queryMaxConcurrentTables| |内部参数,查询计划的并发数目|
|
|queryNodeChunkSize | |支持动态修改 立即生效 |内部参数,查询计划的块大小|
|
||||||
|queryRsmaTolerance | |内部参数,用于判定查询哪一级 rsma 数据时的容忍时间,单位为毫秒|
|
|queryUseNodeAllocator | |支持动态修改 立即生效 |内部参数,查询计划的分配方法|
|
||||||
|enableQueryHb | |内部参数,是否发送查询心跳消息|
|
|queryMaxConcurrentTables| |不支持动态修改 |内部参数,查询计划的并发数目|
|
||||||
|pqSortMemThreshold | |内部参数,排序使用的内存阈值|
|
|queryRsmaTolerance | |不支持动态修改 |内部参数,用于判定查询哪一级 rsma 数据时的容忍时间,单位为毫秒|
|
||||||
|
|enableQueryHb | |支持动态修改 立即生效 |内部参数,是否发送查询心跳消息|
|
||||||
|
|pqSortMemThreshold | |不支持动态修改 |内部参数,排序使用的内存阈值|
|
||||||
|
|
||||||
### 区域相关
|
### 区域相关
|
||||||
|参数名称|支持版本|参数含义|
|
|参数名称|支持版本|动态修改|参数含义|
|
||||||
|-----------------|----------|-|
|
|-----------------|----------|-------------------------|-|
|
||||||
|timezone | |时区;缺省从系统中动态获取当前的时区设置|
|
|timezone | |不支持动态修改 |时区;缺省从系统中动态获取当前的时区设置|
|
||||||
|locale | |系统区位信息及编码格式,缺省从系统中获取|
|
|locale | |不支持动态修改 |系统区位信息及编码格式,缺省从系统中获取|
|
||||||
|charset | |字符集编码,缺省从系统中获取|
|
|charset | |不支持动态修改 |字符集编码,缺省从系统中获取|
|
||||||
|
|
||||||
:::info
|
:::info
|
||||||
1. 为应对多时区的数据写入和查询问题,TDengine 采用 Unix 时间戳(Unix Timestamp)来记录和存储时间戳。Unix 时间戳的特点决定了任一时刻不论在任何时区,产生的时间戳均一致。需要注意的是,Unix 时间戳是在客户端完成转换和记录。为了确保客户端其他形式的时间转换为正确的 Unix 时间戳,需要设置正确的时区。
|
1. 为应对多时区的数据写入和查询问题,TDengine 采用 Unix 时间戳(Unix Timestamp)来记录和存储时间戳。Unix 时间戳的特点决定了任一时刻不论在任何时区,产生的时间戳均一致。需要注意的是,Unix 时间戳是在客户端完成转换和记录。为了确保客户端其他形式的时间转换为正确的 Unix 时间戳,需要设置正确的时区。
|
||||||
|
@ -162,158 +164,161 @@ charset 的有效值是 UTF-8。
|
||||||
:::
|
:::
|
||||||
|
|
||||||
### 存储相关
|
### 存储相关
|
||||||
|参数名称|支持版本|参数含义|
|
|参数名称|支持版本|动态修改|参数含义|
|
||||||
|--------------------|----------|-|
|
|--------------------|----------|-------------------------|-|
|
||||||
|dataDir | |数据文件目录,所有的数据文件都将写入该目录,默认值 /var/lib/taos|
|
|dataDir | |不支持动态修改 |数据文件目录,所有的数据文件都将写入该目录,默认值 /var/lib/taos|
|
||||||
|tempDir | |指定所有系统运行过程中的临时文件生成的目录,默认值 /tmp|
|
|diskIDCheckEnabled | |不支持动态修改 |在 3.3.4.3 后,在重启 dnode 时增加了检查 dataDir 所在磁盘 id 是否发生改变,0:进行检查,1:不进行检查;默认值:1|
|
||||||
|minimalDataDirGB | |dataDir 指定的时序数据存储目录所需要保留的最小空间,单位 GB,默认值 2|
|
|tempDir | |不支持动态修改 |指定所有系统运行过程中的临时文件生成的目录,默认值 /tmp|
|
||||||
|minimalTmpDirGB | |tempDir 所指定的临时文件目录所需要保留的最小空间,单位 GB,默认值 1|
|
|minimalDataDirGB | |不支持动态修改 |dataDir 指定的时序数据存储目录所需要保留的最小空间,单位 GB,默认值 2|
|
||||||
|minDiskFreeSize |3.1.1.0 后|当某块磁盘上的可用空间小于等于这个阈值时,该磁盘将不再被选择用于生成新的数据文件,单位为字节,取值范围 52428800-1073741824,默认值为 52428800;企业版参数|
|
|minimalTmpDirGB | |不支持动态修改 |tempDir 所指定的临时文件目录所需要保留的最小空间,单位 GB,默认值 1|
|
||||||
|s3MigrateIntervalSec|3.3.4.3 后|本地数据文件自动上传 S3 的触发周期,单位为秒。最小值:600;最大值:100000。默认值 3600;企业版参数|
|
|minDiskFreeSize |3.1.1.0 后|支持动态修改 立即生效 |当某块磁盘上的可用空间小于等于这个阈值时,该磁盘将不再被选择用于生成新的数据文件,单位为字节,取值范围 52428800-1073741824,默认值为 52428800;企业版参数|
|
||||||
|s3MigrateEnabled |3.3.4.3 后|是否自动进行 S3 迁移,默认值为 0,表示关闭自动 S3 迁移,可配置为 1;企业版参数|
|
|s3MigrateIntervalSec|3.3.4.3 后|支持动态修改 立即生效 |本地数据文件自动上传 S3 的触发周期,单位为秒。最小值:600;最大值:100000。默认值 3600;企业版参数|
|
||||||
|s3Accesskey |3.3.4.3 后|冒号分隔的用户 SecretId:SecretKey,例如 AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E;企业版参数|
|
|s3MigrateEnabled |3.3.4.3 后|支持动态修改 立即生效 |是否自动进行 S3 迁移,默认值为 0,表示关闭自动 S3 迁移,可配置为 1;企业版参数|
|
||||||
|s3Endpoint |3.3.4.3 后|用户所在地域的 COS 服务域名,支持 http 和 https,bucket 的区域需要与 endpoint 保持一致,否则无法访问;企业版参数|
|
|s3Accesskey |3.3.4.3 后|支持动态修改 重启生效 |冒号分隔的用户 SecretId:SecretKey,例如 AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E;企业版参数|
|
||||||
|s3BucketName |3.3.4.3 后|存储桶名称,减号后面是用户注册 COS 服务的 AppId,其中 AppId 是 COS 特有,AWS 和阿里云都没有,配置时需要作为 bucket name 的一部分,使用减号分隔;参数值均为字符串类型,但不需要引号;例如 test0711-1309024725;企业版参数|
|
|s3Endpoint |3.3.4.3 后|支持动态修改 重启生效 |用户所在地域的 COS 服务域名,支持 http 和 https,bucket 的区域需要与 endpoint 保持一致,否则无法访问;企业版参数|
|
||||||
|s3PageCacheSize |3.3.4.3 后|S3 page cache 缓存页数目,取值范围 4-1048576,单位为页,默认值 4096;企业版参数|
|
|s3BucketName |3.3.4.3 后|支持动态修改 重启生效 |存储桶名称,减号后面是用户注册 COS 服务的 AppId,其中 AppId 是 COS 特有,AWS 和阿里云都没有,配置时需要作为 bucket name 的一部分,使用减号分隔;参数值均为字符串类型,但不需要引号;例如 test0711-1309024725;企业版参数|
|
||||||
|s3UploadDelaySec |3.3.4.3 后|data 文件持续多长时间不再变动后上传至 S3,取值范围 1-2592000 (30天),单位为秒,默认值 60;企业版参数|
|
|s3PageCacheSize |3.3.4.3 后|支持动态修改 重启生效 |S3 page cache 缓存页数目,取值范围 4-1048576,单位为页,默认值 4096;企业版参数|
|
||||||
|cacheLazyLoadThreshold| |内部参数,缓存的装载策略|
|
|s3UploadDelaySec |3.3.4.3 后|支持动态修改 立即生效 |data 文件持续多长时间不再变动后上传至 S3,取值范围 1-2592000 (30天),单位为秒,默认值 60;企业版参数|
|
||||||
|
|cacheLazyLoadThreshold| |支持动态修改 立即生效 |内部参数,缓存的装载策略|
|
||||||
|
|
||||||
### 集群相关
|
### 集群相关
|
||||||
|参数名称|支持版本|参数含义|
|
|参数名称|支持版本|动态修改|参数含义|
|
||||||
|--------------------------|----------|-|
|
|--------------------------|----------|-------------------------|-|
|
||||||
|supportVnodes | |dnode 支持的最大 vnode 数目,取值范围 0-4096,默认值 CPU 核数的 2 倍 + 5|
|
|supportVnodes | |支持动态修改 立即生效 |dnode 支持的最大 vnode 数目,取值范围 0-4096,默认值 CPU 核数的 2 倍 + 5|
|
||||||
|numOfCommitThreads | |落盘线程的最大数量,取值范围 0-1024,默认值为 4|
|
|numOfCommitThreads | |支持动态修改 重启生效 |落盘线程的最大数量,取值范围 0-1024,默认值为 4|
|
||||||
|numOfMnodeReadThreads | |mnode 的 Read 线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不超过 4)|
|
|numOfMnodeReadThreads | |支持动态修改 重启生效 |mnode 的 Read 线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不超过 4)|
|
||||||
|numOfVnodeQueryThreads | |vnode 的 Query 线程数目,取值范围 0-1024,默认值为 CPU 核数的两倍(不超过 16)|
|
|numOfVnodeQueryThreads | |支持动态修改 重启生效 |vnode 的 Query 线程数目,取值范围 0-1024,默认值为 CPU 核数的两倍(不超过 16)|
|
||||||
|numOfVnodeFetchThreads | |vnode 的 Fetch 线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不超过 4)|
|
|numOfVnodeFetchThreads | |支持动态修改 重启生效 |vnode 的 Fetch 线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不超过 4)|
|
||||||
|numOfVnodeRsmaThreads | |vnode 的 Rsma 线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不超过 4)|
|
|numOfVnodeRsmaThreads | |支持动态修改 重启生效 |vnode 的 Rsma 线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不超过 4)|
|
||||||
|numOfQnodeQueryThreads | |qnode 的 Query 线程数目,取值范围 0-1024,默认值为 CPU 核数的两倍(不超过 16)|
|
|numOfQnodeQueryThreads | |支持动态修改 重启生效 |qnode 的 Query 线程数目,取值范围 0-1024,默认值为 CPU 核数的两倍(不超过 16)|
|
||||||
|numOfSnodeSharedThreads | |snode 的共享线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不小于 2,不超过 4)|
|
|numOfSnodeSharedThreads | |支持动态修改 重启生效 |snode 的共享线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不小于 2,不超过 4)|
|
||||||
|numOfSnodeUniqueThreads | |snode 的独占线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不小于 2,不超过 4)|
|
|numOfSnodeUniqueThreads | |支持动态修改 重启生效 |snode 的独占线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不小于 2,不超过 4)|
|
||||||
|ratioOfVnodeStreamThreads | |流计算使用 vnode 线程的比例,取值范围 0.01-4,默认值 4|
|
|ratioOfVnodeStreamThreads | |支持动态修改 重启生效 |流计算使用 vnode 线程的比例,取值范围 0.01-4,默认值 4|
|
||||||
|ttlUnit | |ttl 参数的单位,取值范围 1-31572500,单位为秒,默认值 86400|
|
|ttlUnit | |不支持动态修改 |ttl 参数的单位,取值范围 1-31572500,单位为秒,默认值 86400|
|
||||||
|ttlPushInterval | |ttl 检测超时频率,取值范围 1-100000,单位为秒,默认值 10|
|
|ttlPushInterval | |支持动态修改 立即生效 |ttl 检测超时频率,取值范围 1-100000,单位为秒,默认值 10|
|
||||||
|ttlChangeOnWrite | |ttl 到期时间是否伴随表的修改操作改变;0:不改变,1:改变;默认值为 0|
|
|ttlChangeOnWrite | |支持动态修改 立即生效 |ttl 到期时间是否伴随表的修改操作改变;0:不改变,1:改变;默认值为 0|
|
||||||
|ttlBatchDropNum | |ttl 一批删除子表的数目,最小值为 0,默认值 10000|
|
|ttlBatchDropNum | |支持动态修改 立即生效 |ttl 一批删除子表的数目,最小值为 0,默认值 10000|
|
||||||
|retentionSpeedLimitMB | |数据在不同级别硬盘上迁移时的速度限制,取值范围 0-1024,单位 MB,默认值 0,表示不限制|
|
|retentionSpeedLimitMB | |支持动态修改 立即生效 |数据在不同级别硬盘上迁移时的速度限制,取值范围 0-1024,单位 MB,默认值 0,表示不限制|
|
||||||
|maxTsmaNum | |集群内可创建的TSMA个数;取值范围 0-3;默认值 3|
|
|maxTsmaNum | |支持动态修改 立即生效 |集群内可创建的TSMA个数;取值范围 0-3;默认值 3|
|
||||||
|tmqMaxTopicNum | |订阅最多可建立的 topic 数量;取值范围 1-10000;默认值为 20|
|
|tmqMaxTopicNum | |支持动态修改 立即生效 |订阅最多可建立的 topic 数量;取值范围 1-10000;默认值为 20|
|
||||||
|tmqRowSize | |订阅数据块的最大记录条数,取值范围 1-1000000,默认值 4096|
|
|tmqRowSize | |支持动态修改 立即生效 |订阅数据块的最大记录条数,取值范围 1-1000000,默认值 4096|
|
||||||
|audit | |审计功能开关;企业版参数|
|
|audit | |支持动态修改 立即生效 |审计功能开关;企业版参数|
|
||||||
|auditInterval | |审计数据上报的时间间隔;企业版参数|
|
|auditInterval | |支持动态修改 立即生效 |审计数据上报的时间间隔;企业版参数|
|
||||||
|auditCreateTable | |是否针对创建子表开启申计功能;企业版参数|
|
|auditCreateTable | |支持动态修改 立即生效 |是否针对创建子表开启申计功能;企业版参数|
|
||||||
|encryptAlgorithm | |数据加密算法;企业版参数|
|
|encryptAlgorithm | |不支持动态修改 |数据加密算法;企业版参数|
|
||||||
|encryptScope | |加密范围;企业版参数|
|
|encryptScope | |不支持动态修改 |加密范围;企业版参数|
|
||||||
|enableWhiteList | |白名单功能开关;企业版参数|
|
|enableWhiteList | |支持动态修改 立即生效 |白名单功能开关;企业版参数|
|
||||||
|syncLogBufferMemoryAllowed| |一个 dnode 允许的 sync 日志缓存消息占用的内存最大值,单位 bytes,取值范围 104857600-INT64_MAX,默认值 服务器内存的 1/10,3.1.3.2/3.3.2.13 版本开始生效 |
|
|syncLogBufferMemoryAllowed| |支持动态修改 立即生效 |一个 dnode 允许的 sync 日志缓存消息占用的内存最大值,单位 bytes,取值范围 104857600-INT64_MAX,默认值 服务器内存的 1/10,3.1.3.2/3.3.2.13 版本开始生效 |
|
||||||
|syncElectInterval | |内部参数,用于同步模块调试|
|
|syncElectInterval | |不支持动态修改 |内部参数,用于同步模块调试|
|
||||||
|syncHeartbeatInterval | |内部参数,用于同步模块调试|
|
|syncHeartbeatInterval | |不支持动态修改 |内部参数,用于同步模块调试|
|
||||||
|syncHeartbeatTimeout | |内部参数,用于同步模块调试|
|
|syncHeartbeatTimeout | |不支持动态修改 |内部参数,用于同步模块调试|
|
||||||
|syncSnapReplMaxWaitN | |内部参数,用于同步模块调试|
|
|syncSnapReplMaxWaitN | |支持动态修改 立即生效 |内部参数,用于同步模块调试|
|
||||||
|syncSnapReplMaxWaitN | |内部参数,用于同步模块调试|
|
|arbHeartBeatIntervalSec | |支持动态修改 立即生效 |内部参数,用于同步模块调试|
|
||||||
|arbHeartBeatIntervalSec | |内部参数,用于同步模块调试|
|
|arbCheckSyncIntervalSec | |支持动态修改 立即生效 |内部参数,用于同步模块调试|
|
||||||
|arbCheckSyncIntervalSec | |内部参数,用于同步模块调试|
|
|arbSetAssignedTimeoutSec | |支持动态修改 立即生效 |内部参数,用于同步模块调试|
|
||||||
|arbSetAssignedTimeoutSec | |内部参数,用于同步模块调试|
|
|mndSdbWriteDelta | |支持动态修改 立即生效 |内部参数,用于 mnode 模块调试|
|
||||||
|mndSdbWriteDelta | |内部参数,用于 mnode 模块调试|
|
|mndLogRetention | |支持动态修改 立即生效 |内部参数,用于 mnode 模块调试|
|
||||||
|mndLogRetention | |内部参数,用于 mnode 模块调试|
|
|skipGrant | |不支持动态修改 |内部参数,用于授权检查|
|
||||||
|skipGrant | |内部参数,用于授权检查|
|
|trimVDbIntervalSec | |支持动态修改 立即生效 |内部参数,用于删除过期数据|
|
||||||
|trimVDbIntervalSec | |内部参数,用于删除过期数据|
|
|ttlFlushThreshold | |支持动态修改 立即生效 |内部参数,ttl 定时器的频率|
|
||||||
|ttlFlushThreshold | |内部参数,ttl 定时器的频率|
|
|compactPullupInterval | |支持动态修改 立即生效 |内部参数,数据重整定时器的频率|
|
||||||
|compactPullupInterval | |内部参数,数据重整定时器的频率|
|
|walFsyncDataSizeLimit | |支持动态修改 立即生效 |内部参数,WAL 进行 FSYNC 的阈值|
|
||||||
|walFsyncDataSizeLimit | |内部参数,WAL 进行 FSYNC 的阈值|
|
|transPullupInterval | |支持动态修改 立即生效 |内部参数,mnode 执行事务的重试间隔|
|
||||||
|transPullupInterval | |内部参数,mnode 执行事务的重试间隔|
|
|mqRebalanceInterval | |支持动态修改 立即生效 |内部参数,消费者再平衡的时间间隔|
|
||||||
|mqRebalanceInterval | |内部参数,消费者再平衡的时间间隔|
|
|uptimeInterval | |支持动态修改 立即生效 |内部参数,用于记录系统启动时间|
|
||||||
|uptimeInterval | |内部参数,用于记录系统启动时间|
|
|timeseriesThreshold | |支持动态修改 立即生效 |内部参数,用于统计用量|
|
||||||
|timeseriesThreshold | |内部参数,用于统计用量|
|
|udf | |支持动态修改 重启生效 |是否启动 UDF 服务;0:不启动,1:启动;默认值为 0 |
|
||||||
|udf | |是否启动 UDF 服务;0:不启动,1:启动;默认值为 0 |
|
|udfdResFuncs | |支持动态修改 重启生效 |内部参数,用于 UDF 结果集设置|
|
||||||
|udfdResFuncs | |内部参数,用于 UDF 结果集设置|
|
|udfdLdLibPath | |支持动态修改 重启生效 |内部参数,表示 UDF 装载的库路径|
|
||||||
|udfdLdLibPath | |内部参数,表示 UDF 装载的库路径|
|
|
||||||
|
|
||||||
|
|
||||||
### 流计算参数
|
### 流计算参数
|
||||||
|参数名称|支持版本|参数含义|
|
|参数名称|支持版本|动态修改|参数含义|
|
||||||
|-----------------------|----------|-|
|
|-----------------------|-------------------------|----------|-|
|
||||||
|disableStream | |流计算的启动开关|
|
|disableStream | |支持动态修改 重启生效 |流计算的启动开关|
|
||||||
|streamBufferSize | |控制内存中窗口状态缓存的大小,默认值为 128MB|
|
|streamBufferSize | |支持动态修改 重启生效 |控制内存中窗口状态缓存的大小,默认值为 128MB|
|
||||||
|streamAggCnt | |内部参数,并发进行聚合计算的数目|
|
|streamAggCnt | |不支持动态修改 |内部参数,并发进行聚合计算的数目|
|
||||||
|checkpointInterval | |内部参数,checkponit 同步间隔|
|
|checkpointInterval | |支持动态修改 重启生效 |内部参数,checkponit 同步间隔|
|
||||||
|concurrentCheckpoint | |内部参数,是否并发检查 checkpoint|
|
|concurrentCheckpoint | |支持动态修改 立即生效 |内部参数,是否并发检查 checkpoint|
|
||||||
|maxStreamBackendCache | |内部参数,流计算使用的最大缓存|
|
|maxStreamBackendCache | |支持动态修改 立即生效 |内部参数,流计算使用的最大缓存|
|
||||||
|streamSinkDataRate | |内部参数,用于控制流计算结果的写入速度|
|
|streamSinkDataRate | |支持动态修改 重启生效 |内部参数,用于控制流计算结果的写入速度|
|
||||||
|
|
||||||
### 日志相关
|
### 日志相关
|
||||||
|参数名称|支持版本|参数含义|
|
|参数名称|支持版本|动态修改|参数含义|
|
||||||
|----------------|----------|-|
|
|----------------|-------------------------|----------|-|
|
||||||
|logDir | |日志文件目录,运行日志将写入该目录,默认值 /var/log/taos|
|
|logDir | |不支持动态修改 |日志文件目录,运行日志将写入该目录,默认值 /var/log/taos|
|
||||||
|minimalLogDirGB | |日志文件夹所在磁盘可用空间大小小于该值时,停止写日志,单位 GB,默认值 1|
|
|minimalLogDirGB | |不支持动态修改 |日志文件夹所在磁盘可用空间大小小于该值时,停止写日志,单位 GB,默认值 1|
|
||||||
|numOfLogLines | |单个日志文件允许的最大行数,默认值 10,000,000|
|
|numOfLogLines | |支持动态修改 立即生效 |单个日志文件允许的最大行数,默认值 10,000,000|
|
||||||
|asyncLog | |日志写入模式,0:同步,1:异步,默认值 1|
|
|asyncLog | |支持动态修改 立即生效 |日志写入模式,0:同步,1:异步,默认值 1|
|
||||||
|logKeepDays | |日志文件的最长保存时间,单位:天,默认值 0,意味着无限保存,日志文件不会被重命名,也不会有新的日志文件滚动产生,但日志文件的内容有可能会不断滚动,取决于日志文件大小的设置;当设置为大于 0 的值时,当日志文件大小达到设置的上限时会被重命名为 taosdlog.yyy,其中 yyy 为日志文件最后修改的时间戳,并滚动产生新的日志文件|
|
|logKeepDays | |支持动态修改 立即生效 |日志文件的最长保存时间,单位:天,默认值 0,意味着无限保存,日志文件不会被重命名,也不会有新的日志文件滚动产生,但日志文件的内容有可能会不断滚动,取决于日志文件大小的设置;当设置为大于 0 的值时,当日志文件大小达到设置的上限时会被重命名为 taosdlog.yyy,其中 yyy 为日志文件最后修改的时间戳,并滚动产生新的日志文件|
|
||||||
|slowLogThreshold|3.3.3.0 后|慢查询门限值,大于等于门限值认为是慢查询,单位秒,默认值 3 |
|
|slowLogThreshold|3.3.3.0 后|支持动态修改 立即生效 |慢查询门限值,大于等于门限值认为是慢查询,单位秒,默认值 3 |
|
||||||
|slowLogMaxLen |3.3.3.0 后|慢查询日志最大长度,取值范围 1-16384,默认值 4096|
|
|slowLogMaxLen |3.3.3.0 后|支持动态修改 立即生效 |慢查询日志最大长度,取值范围 1-16384,默认值 4096|
|
||||||
|slowLogScope |3.3.3.0 后|慢查询记录类型,取值范围 ALL/QUERY/INSERT/OTHERS/NONE,默认值 QUERY|
|
|slowLogScope |3.3.3.0 后|支持动态修改 立即生效 |慢查询记录类型,取值范围 ALL/QUERY/INSERT/OTHERS/NONE,默认值 QUERY|
|
||||||
|slowLogExceptDb |3.3.3.0 后|指定的数据库不上报慢查询,仅支持配置换一个数据库|
|
|slowLogExceptDb |3.3.3.0 后|支持动态修改 立即生效 |指定的数据库不上报慢查询,仅支持配置换一个数据库|
|
||||||
|debugFlag | |运行日志开关,131(输出错误和警告日志),135(输出错误、警告和调试日志),143(输出错误、警告、调试和跟踪日志);默认值 131 或 135 (取决于不同模块)|
|
|debugFlag | |支持动态修改 立即生效 |运行日志开关,131(输出错误和警告日志),135(输出错误、警告和调试日志),143(输出错误、警告、调试和跟踪日志);默认值 131 或 135 (取决于不同模块)|
|
||||||
|tmrDebugFlag | |定时器模块的日志开关,取值范围同上|
|
|tmrDebugFlag | |支持动态修改 立即生效 |定时器模块的日志开关,取值范围同上|
|
||||||
|uDebugFlag | |共用功能模块的日志开关,取值范围同上|
|
|uDebugFlag | |支持动态修改 立即生效 |共用功能模块的日志开关,取值范围同上|
|
||||||
|rpcDebugFlag | |rpc 模块的日志开关,取值范围同上|
|
|rpcDebugFlag | |支持动态修改 立即生效 |rpc 模块的日志开关,取值范围同上|
|
||||||
|qDebugFlag | |query 模块的日志开关,取值范围同上|
|
|qDebugFlag | |支持动态修改 立即生效 |query 模块的日志开关,取值范围同上|
|
||||||
|dDebugFlag | |dnode 模块的日志开关,取值范围同上|
|
|dDebugFlag | |支持动态修改 立即生效 |dnode 模块的日志开关,取值范围同上|
|
||||||
|vDebugFlag | |vnode 模块的日志开关,取值范围同上|
|
|vDebugFlag | |支持动态修改 立即生效 |vnode 模块的日志开关,取值范围同上|
|
||||||
|mDebugFlag | |mnode 模块的日志开关,取值范围同上|
|
|mDebugFlag | |支持动态修改 立即生效 |mnode 模块的日志开关,取值范围同上|
|
||||||
|azDebugFlag |3.3.4.3 后|S3 模块的日志开关,取值范围同上|
|
|azDebugFlag |3.3.4.3 后|支持动态修改 立即生效 |S3 模块的日志开关,取值范围同上|
|
||||||
|sDebugFlag | |sync 模块的日志开关,取值范围同上|
|
|sDebugFlag | |支持动态修改 立即生效 |sync 模块的日志开关,取值范围同上|
|
||||||
|tsdbDebugFlag | |tsdb 模块的日志开关,取值范围同上|
|
|tsdbDebugFlag | |支持动态修改 立即生效 |tsdb 模块的日志开关,取值范围同上|
|
||||||
|tqDebugFlag | |tq 模块的日志开关,取值范围同上|
|
|tqDebugFlag | |支持动态修改 立即生效 |tq 模块的日志开关,取值范围同上|
|
||||||
|fsDebugFlag | |fs 模块的日志开关,取值范围同上|
|
|fsDebugFlag | |支持动态修改 立即生效 |fs 模块的日志开关,取值范围同上|
|
||||||
|udfDebugFlag | |udf 模块的日志开关,取值范围同上|
|
|udfDebugFlag | |支持动态修改 立即生效 |udf 模块的日志开关,取值范围同上|
|
||||||
|smaDebugFlag | |sma 模块的日志开关,取值范围同上|
|
|smaDebugFlag | |支持动态修改 立即生效 |sma 模块的日志开关,取值范围同上|
|
||||||
|idxDebugFlag | |index 模块的日志开关,取值范围同上|
|
|idxDebugFlag | |支持动态修改 立即生效 |index 模块的日志开关,取值范围同上|
|
||||||
|tdbDebugFlag | |tdb 模块的日志开关,取值范围同上|
|
|tdbDebugFlag | |支持动态修改 立即生效 |tdb 模块的日志开关,取值范围同上|
|
||||||
|metaDebugFlag | |meta 模块的日志开关,取值范围同上|
|
|metaDebugFlag | |支持动态修改 立即生效 |meta 模块的日志开关,取值范围同上|
|
||||||
|stDebugFlag | |stream 模块的日志开关,取值范围同上|
|
|stDebugFlag | |支持动态修改 立即生效 |stream 模块的日志开关,取值范围同上|
|
||||||
|sndDebugFlag | |snode 模块的日志开关,取值范围同上|
|
|sndDebugFlag | |支持动态修改 立即生效 |snode 模块的日志开关,取值范围同上|
|
||||||
|
|
||||||
### 调试相关
|
### 调试相关
|
||||||
|参数名称|支持版本|参数含义|
|
|参数名称|支持版本|动态修改|参数含义|
|
||||||
|--------------------|----------|-|
|
|--------------------|-------------------------|----------|-|
|
||||||
|enableCoreFile | |crash 时是否生成 core 文件,0:不生成,1:生成;默认值 1|
|
|enableCoreFile | |支持动态修改 立即生效 |crash 时是否生成 core 文件,0:不生成,1:生成;默认值 1|
|
||||||
|configDir | |配置文件所在目录|
|
|configDir | |不支持动态修改 |配置文件所在目录|
|
||||||
|scriptDir | |内部测试工具的脚本目录|
|
|forceReadConfig | |不支持动态修改 |强制使用配置文件中的参数,0:使用持久化的配置参数,1:使用配置文件中的配置参数;默认值 0|
|
||||||
|assert | |断言控制开关,默认值 0|
|
|scriptDir | |不支持动态修改 |内部测试工具的脚本目录|
|
||||||
|randErrorChance | |内部参数,用于随机失败测试|
|
|assert | |不支持动态修改 |断言控制开关,默认值 0|
|
||||||
|randErrorDivisor | |内部参数,用于随机失败测试|
|
|randErrorChance | |支持动态修改 立即生效 |内部参数,用于随机失败测试|
|
||||||
|randErrorScope | |内部参数,用于随机失败测试|
|
|randErrorDivisor | |支持动态修改 立即生效 |内部参数,用于随机失败测试|
|
||||||
|safetyCheckLevel | |内部参数,用于随机失败测试|
|
|randErrorScope | |支持动态修改 立即生效 |内部参数,用于随机失败测试|
|
||||||
|experimental | |内部参数,用于一些实验特性|
|
|safetyCheckLevel | |支持动态修改 立即生效 |内部参数,用于随机失败测试|
|
||||||
|simdEnable |3.3.4.3 后|内部参数,用于测试 SIMD 加速|
|
|experimental | |支持动态修改 立即生效 |内部参数,用于一些实验特性|
|
||||||
|AVX512Enable |3.3.4.3 后|内部参数,用于测试 AVX512 加速|
|
|simdEnable |3.3.4.3 后|不支持动态修改 |内部参数,用于测试 SIMD 加速|
|
||||||
|rsyncPort | |内部参数,用于调试流计算|
|
|AVX512Enable |3.3.4.3 后|不支持动态修改 |内部参数,用于测试 AVX512 加速|
|
||||||
|snodeAddress | |内部参数,用于调试流计算|
|
|rsyncPort | |不支持动态修改 |内部参数,用于调试流计算|
|
||||||
|checkpointBackupDir | |内部参数,用于恢复 snode 数据|
|
|snodeAddress | |支持动态修改 重启生效 |内部参数,用于调试流计算|
|
||||||
|enableAuditDelete | |内部参数,用于测试审计功能|
|
|checkpointBackupDir | |支持动态修改 重启生效 |内部参数,用于恢复 snode 数据|
|
||||||
|slowLogThresholdTest| |内部参数,用于测试慢日志|
|
|enableAuditDelete | |不支持动态修改 |内部参数,用于测试审计功能|
|
||||||
|
|slowLogThresholdTest| |不支持动态修改 |内部参数,用于测试慢日志|
|
||||||
|
|bypassFlag |3.3.4.5 后|支持动态修改 立即生效 |内部参数,用于短路测试,0:正常写入,1:写入消息在 taos 客户端发送 RPC 消息前返回,2:写入消息在 taosd 服务端收到 RPC 消息后返回,4:写入消息在 taosd 服务端写入内存缓存前返回,8:写入消息在 taosd 服务端数据落盘前返回;默认值 0|
|
||||||
|
|
||||||
### 压缩参数
|
### 压缩参数
|
||||||
|参数名称|支持版本|参数含义|
|
|参数名称|支持版本|动态修改|参数含义|
|
||||||
|------------|----------|-|
|
|------------|----------|-------------------------|-|
|
||||||
|fPrecision | |设置 float 类型浮点数压缩精度 ,取值范围 0.1 ~ 0.00000001 ,默认值 0.00000001 , 小于此值的浮点数尾数部分将被截断|
|
|fPrecision | |支持动态修改 立即生效 |设置 float 类型浮点数压缩精度 ,取值范围 0.1 ~ 0.00000001 ,默认值 0.00000001 , 小于此值的浮点数尾数部分将被截断|
|
||||||
|dPrecision | |设置 double 类型浮点数压缩精度 , 取值范围 0.1 ~ 0.0000000000000001 , 默认值 0.0000000000000001 , 小于此值的浮点数尾数部分将被截取|
|
|dPrecision | |支持动态修改 立即生效 |设置 double 类型浮点数压缩精度 , 取值范围 0.1 ~ 0.0000000000000001 , 默认值 0.0000000000000001 , 小于此值的浮点数尾数部分将被截取|
|
||||||
|lossyColumn |3.3.0.0 前|对 float 和/或 double 类型启用 TSZ 有损压缩;取值范围 float/double/none;默认值 none,表示关闭无损压缩|
|
|lossyColumn |3.3.0.0 前|不支持动态修改 |对 float 和/或 double 类型启用 TSZ 有损压缩;取值范围 float/double/none;默认值 none,表示关闭无损压缩|
|
||||||
|ifAdtFse | |在启用 TSZ 有损压缩时,使用 FSE 算法替换 HUFFMAN 算法,FSE 算法压缩速度更快,但解压稍慢,追求压缩速度可选用此算法;0:关闭,1:打开;默认值为 0|
|
|ifAdtFse | |支持动态修改 重启生效 |在启用 TSZ 有损压缩时,使用 FSE 算法替换 HUFFMAN 算法,FSE 算法压缩速度更快,但解压稍慢,追求压缩速度可选用此算法;0:关闭,1:打开;默认值为 0|
|
||||||
|maxRange | |内部参数,用于有损压缩设置|
|
|maxRange | |支持动态修改 重启生效 |内部参数,用于有损压缩设置|
|
||||||
|curRange | |内部参数,用于有损压缩设置|
|
|curRange | |支持动态修改 重启生效 |内部参数,用于有损压缩设置|
|
||||||
|compressor | |内部参数,用于有损压缩设置|
|
|compressor | |支持动态修改 重启生效 |内部参数,用于有损压缩设置|
|
||||||
|
|
||||||
**补充说明**
|
**补充说明**
|
||||||
1. 在 3.2.0.0 ~ 3.3.0.0(不包含)版本生效,启用该参数后不能回退到升级前的版本
|
1. 在 3.4.0.0 之后,所有配置参数都将被持久化到本地存储,重启数据库服务后,将默认使用持久化的配置参数列表;如果您希望继续使用 config 文件中配置的参数,需设置 forceReadConfig 为 1。
|
||||||
2. TSZ 压缩算法是通过数据预测技术完成的压缩,所以更适合有规律变化的数据
|
2. 在 3.2.0.0 ~ 3.3.0.0(不包含)版本生效,启用该参数后不能回退到升级前的版本
|
||||||
3. TSZ 压缩时间会更长一些,如果您的服务器 CPU 空闲多,存储空间小的情况下适合选用
|
3. TSZ 压缩算法是通过数据预测技术完成的压缩,所以更适合有规律变化的数据
|
||||||
4. 示例:对 float 和 double 类型都启用有损压缩
|
4. TSZ 压缩时间会更长一些,如果您的服务器 CPU 空闲多,存储空间小的情况下适合选用
|
||||||
|
5. 示例:对 float 和 double 类型都启用有损压缩
|
||||||
```shell
|
```shell
|
||||||
lossyColumns float|double
|
lossyColumns float|double
|
||||||
```
|
```
|
||||||
5. 配置需重启服务生效,重启如果在 taosd 日志中看到以下内容,表明配置已生效:
|
6. 配置需重启服务生效,重启如果在 taosd 日志中看到以下内容,表明配置已生效:
|
||||||
```sql
|
```sql
|
||||||
02/22 10:49:27.607990 00002933 UTL lossyColumns float|double
|
02/22 10:49:27.607990 00002933 UTL lossyColumns float|double
|
||||||
```
|
```
|
||||||
|
|
|
@ -9,100 +9,102 @@ TDengine 客户端驱动提供了应用编程所需要的全部 API,并且在
|
||||||
## 配置参数
|
## 配置参数
|
||||||
|
|
||||||
### 连接相关
|
### 连接相关
|
||||||
|参数名称|支持版本|参数含义|
|
|参数名称|支持版本|动态修改|参数含义|
|
||||||
|----------------------|----------|-------------|
|
|----------------------|----------|-------------------------|-------------|
|
||||||
|firstEp | |启动时,主动连接的集群中首个 dnode 的 endpoint,缺省值:hostname:6030,若无法获取该服务器的 hostname,则赋值为 localhost|
|
|firstEp | |支持动态修改 立即生效 |启动时,主动连接的集群中首个 dnode 的 endpoint,缺省值:hostname:6030,若无法获取该服务器的 hostname,则赋值为 localhost|
|
||||||
|secondEp | |启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint,没有缺省值|
|
|secondEp | |支持动态修改 立即生效 |启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint,没有缺省值|
|
||||||
|compressMsgSize | |是否对 RPC 消息进行压缩;-1:所有消息都不压缩;0:所有消息都压缩;N (N>0):只有大于 N 个字节的消息才压缩;缺省值 -1|
|
|serverPort | |支持动态修改 立即生效 |taosd 监听的端口,默认值 6030|
|
||||||
|shellActivityTimer | |客户端向 mnode 发送心跳的时长,单位为秒,取值范围 1-120,默认值 3|
|
|compressMsgSize | |支持动态修改 立即生效 |是否对 RPC 消息进行压缩;-1:所有消息都不压缩;0:所有消息都压缩;N (N>0):只有大于 N 个字节的消息才压缩;缺省值 -1|
|
||||||
|numOfRpcSessions | |RPC 支持的最大连接数,取值范围 100-100000,缺省值 30000|
|
|shellActivityTimer | |不支持动态修改 |客户端向 mnode 发送心跳的时长,单位为秒,取值范围 1-120,默认值 3|
|
||||||
|numOfRpcThreads | |RPC 收发数据线程数目,取值范围1-50,默认值为 CPU 核数的一半|
|
|numOfRpcSessions | |支持动态修改 立即生效 |RPC 支持的最大连接数,取值范围 100-100000,缺省值 30000|
|
||||||
|numOfTaskQueueThreads | |客户端处理 RPC消息的线程数, 范围4-16,默认值为 CPU 核数的一半|
|
|numOfRpcThreads | |不支持动态修改 |RPC 收发数据线程数目,取值范围1-50,默认值为 CPU 核数的一半|
|
||||||
|timeToGetAvailableConn| 3.3.4.*之后取消 |获得可用连接的最长等待时间,取值范围 10-50000000,单位为毫秒,缺省值 500000|
|
|numOfTaskQueueThreads | |不支持动态修改 |客户端处理 RPC消息的线程数, 范围4-16,默认值为 CPU 核数的一半|
|
||||||
|useAdapter | |内部参数,是否使用 taosadapter,影响 CSV 文件导入|
|
|timeToGetAvailableConn| 3.3.4.*之后取消 |不支持动态修改 |获得可用连接的最长等待时间,取值范围 10-50000000,单位为毫秒,缺省值 500000|
|
||||||
|shareConnLimit |3.3.4.0 新增|内部参数,一个链接可以共享的查询数目,取值范围 1-256,默认值 10|
|
|useAdapter | |支持动态修改 立即生效 |内部参数,是否使用 taosadapter,影响 CSV 文件导入|
|
||||||
|readTimeout |3.3.4.0 新增|内部参数,最小超时时间,取值范围 64-604800,单位为秒,默认值 900|
|
|shareConnLimit |3.3.4.0 新增|不支持动态修改 |内部参数,一个链接可以共享的查询数目,取值范围 1-256,默认值 10|
|
||||||
|
|readTimeout |3.3.4.0 新增|不支持动态修改 |内部参数,最小超时时间,取值范围 64-604800,单位为秒,默认值 900|
|
||||||
|
|
||||||
### 查询相关
|
### 查询相关
|
||||||
|参数名称|支持版本|参数含义|
|
|参数名称|支持版本|动态修改|参数含义|
|
||||||
|---------------------------------|---------|-|
|
|----------------------|----------|-------------------------|-------------|
|
||||||
|countAlwaysReturnValue | |count/hyperloglog 函数在输入数据为空或者 NULL 的情况下是否返回值;0:返回空行,1:返回;默认值 1;该参数设置为 1 时,如果查询中含有 INTERVAL 子句或者该查询使用了 TSMA 时,且相应的组或窗口内数据为空或者 NULL,对应的组或窗口将不返回查询结果;注意此参数客户端和服务端值应保持一致|
|
|countAlwaysReturnValue | |支持动态修改 立即生效 |count/hyperloglog 函数在输入数据为空或者 NULL 的情况下是否返回值;0:返回空行,1:返回;默认值 1;该参数设置为 1 时,如果查询中含有 INTERVAL 子句或者该查询使用了 TSMA 时,且相应的组或窗口内数据为空或者 NULL,对应的组或窗口将不返回查询结果;注意此参数客户端和服务端值应保持一致|
|
||||||
|keepColumnName | |Last、First、LastRow 函数查询且未指定别名时,自动设置别名为列名(不含函数名),因此 order by 子句如果引用了该列名将自动引用该列对应的函数;1:表示自动设置别名为列名(不包含函数名),0:表示不自动设置别名;缺省值:0|
|
|keepColumnName | |支持动态修改 立即生效 |Last、First、LastRow 函数查询且未指定别名时,自动设置别名为列名(不含函数名),因此 order by 子句如果引用了该列名将自动引用该列对应的函数;1:表示自动设置别名为列名(不包含函数名),0:表示不自动设置别名;缺省值:0|
|
||||||
|multiResultFunctionStarReturnTags|3.3.3.0 后|查询超级表时,last(\*)/last_row(\*)/first(\*) 是否返回标签列;查询普通表、子表时,不受该参数影响;0:不返回标签列,1:返回标签列;缺省值:0;该参数设置为 0 时,last(\*)/last_row(\*)/first(\*) 只返回超级表的普通列;为 1 时,返回超级表的普通列和标签列|
|
|multiResultFunctionStarReturnTags|3.3.3.0 后|支持动态修改 立即生效 |查询超级表时,last(\*)/last_row(\*)/first(\*) 是否返回标签列;查询普通表、子表时,不受该参数影响;0:不返回标签列,1:返回标签列;缺省值:0;该参数设置为 0 时,last(\*)/last_row(\*)/first(\*) 只返回超级表的普通列;为 1 时,返回超级表的普通列和标签列|
|
||||||
|metaCacheMaxSize | |指定单个客户端元数据缓存大小的最大值,单位 MB;缺省值 -1,表示无限制|
|
|metaCacheMaxSize | |支持动态修改 立即生效 |指定单个客户端元数据缓存大小的最大值,单位 MB;缺省值 -1,表示无限制|
|
||||||
|maxTsmaCalcDelay | |查询时客户端可允许的 tsma 计算延迟,若 tsma 的计算延迟大于配置值,则该 TSMA 将不会被使用;取值范围 600s - 86400s,即 10 分钟 - 1 小时;缺省值:600 秒|
|
|maxTsmaCalcDelay | |支持动态修改 立即生效 |查询时客户端可允许的 tsma 计算延迟,若 tsma 的计算延迟大于配置值,则该 TSMA 将不会被使用;取值范围 600s - 86400s,即 10 分钟 - 1 小时;缺省值:600 秒|
|
||||||
|tsmaDataDeleteMark | |TSMA 计算的历史数据中间结果保存时间,单位为毫秒;取值范围 >= 3600000,即大于等于1h;缺省值:86400000,即 1d |
|
|tsmaDataDeleteMark | |支持动态修改 立即生效 |TSMA 计算的历史数据中间结果保存时间,单位为毫秒;取值范围 >= 3600000,即大于等于1h;缺省值:86400000,即 1d |
|
||||||
|queryPolicy | |查询语句的执行策略,1:只使用 vnode,不使用 qnode;2:没有扫描算子的子任务在 qnode 执行,带扫描算子的子任务在 vnode 执行;3:vnode 只运行扫描算子,其余算子均在 qnode 执行;缺省值:1|
|
|queryPolicy | |支持动态修改 立即生效 |查询语句的执行策略,1:只使用 vnode,不使用 qnode;2:没有扫描算子的子任务在 qnode 执行,带扫描算子的子任务在 vnode 执行;3:vnode 只运行扫描算子,其余算子均在 qnode 执行;缺省值:1|
|
||||||
|queryTableNotExistAsEmpty | |查询表不存在时是否返回空结果集;false:返回错误;true:返回空结果集;缺省值 false|
|
|queryTableNotExistAsEmpty | |支持动态修改 立即生效 |查询表不存在时是否返回空结果集;false:返回错误;true:返回空结果集;缺省值 false|
|
||||||
|querySmaOptimize | |sma index 的优化策略,0:表示不使用 sma index,永远从原始数据进行查询;1:表示使用 sma index,对符合的语句,直接从预计算的结果进行查询;缺省值:0|
|
|querySmaOptimize | |支持动态修改 立即生效 |sma index 的优化策略,0:表示不使用 sma index,永远从原始数据进行查询;1:表示使用 sma index,对符合的语句,直接从预计算的结果进行查询;缺省值:0|
|
||||||
|queryPlannerTrace | |内部参数,查询计划是否输出详细日志|
|
|queryPlannerTrace | |支持动态修改 立即生效 |内部参数,查询计划是否输出详细日志|
|
||||||
|queryNodeChunkSize | |内部参数,查询计划的块大小|
|
|queryNodeChunkSize | |支持动态修改 立即生效 |内部参数,查询计划的块大小|
|
||||||
|queryUseNodeAllocator | |内部参数,查询计划的分配方法|
|
|queryUseNodeAllocator | |支持动态修改 立即生效 |内部参数,查询计划的分配方法|
|
||||||
|queryMaxConcurrentTables | |内部参数,查询计划的并发数目|
|
|queryMaxConcurrentTables | |不支持动态修改 |内部参数,查询计划的并发数目|
|
||||||
|enableQueryHb | |内部参数,是否发送查询心跳消息|
|
|enableQueryHb | |支持动态修改 立即生效 |内部参数,是否发送查询心跳消息|
|
||||||
|minSlidingTime | |内部参数,sliding 的最小允许值|
|
|minSlidingTime | |支持动态修改 立即生效 |内部参数,sliding 的最小允许值|
|
||||||
|minIntervalTime | |内部参数,interval 的最小允许值|
|
|minIntervalTime | |支持动态修改 立即生效 |内部参数,interval 的最小允许值|
|
||||||
|
|
||||||
### 写入相关
|
### 写入相关
|
||||||
|参数名称|支持版本|参数含义|
|
|参数名称|支持版本|动态修改|参数含义|
|
||||||
|------------------------------|----------|-|
|
|----------------------|----------|-------------------------|-------------|
|
||||||
|smlChildTableName | |schemaless 自定义的子表名的 key,无缺省值|
|
|smlChildTableName | |支持动态修改 立即生效 |schemaless 自定义的子表名的 key,无缺省值|
|
||||||
|smlAutoChildTableNameDelimiter| |schemaless tag 之间的连接符,连起来作为子表名,无缺省值|
|
|smlAutoChildTableNameDelimiter| |支持动态修改 立即生效 |schemaless tag 之间的连接符,连起来作为子表名,无缺省值|
|
||||||
|smlTagName | |schemaless tag 为空时默认的 tag 名字,缺省值 "_tag_null"|
|
|smlTagName | |支持动态修改 立即生效 |schemaless tag 为空时默认的 tag 名字,缺省值 "_tag_null"|
|
||||||
|smlTsDefaultName | |schemaless 自动建表的时间列名字通过该配置设置,缺省值 "_ts"|
|
|smlTsDefaultName | |支持动态修改 立即生效 |schemaless 自动建表的时间列名字通过该配置设置,缺省值 "_ts"|
|
||||||
|smlDot2Underline | |schemaless 把超级表名中的 dot 转成下划线|
|
|smlDot2Underline | |支持动态修改 立即生效 |schemaless 把超级表名中的 dot 转成下划线|
|
||||||
|maxInsertBatchRows | |内部参数,一批写入的最大条数|
|
|maxInsertBatchRows | |支持动态修改 立即生效 |内部参数,一批写入的最大条数|
|
||||||
|
|
||||||
### 区域相关
|
### 区域相关
|
||||||
|参数名称|支持版本|参数含义|
|
|参数名称|支持版本|动态修改|参数含义|
|
||||||
|-----------------|----------|-|
|
|----------------------|----------|-------------------------|-------------|
|
||||||
|timezone | |时区;缺省从系统中动态获取当前的时区设置|
|
|timezone | |支持动态修改 立即生效 |时区;缺省从系统中动态获取当前的时区设置|
|
||||||
|locale | |系统区位信息及编码格式,缺省从系统中获取|
|
|locale | |支持动态修改 立即生效 |系统区位信息及编码格式,缺省从系统中获取|
|
||||||
|charset | |字符集编码,缺省从系统中获取|
|
|charset | |支持动态修改 立即生效 |字符集编码,缺省从系统中获取|
|
||||||
|
|
||||||
### 存储相关
|
### 存储相关
|
||||||
|参数名称|支持版本|参数含义|
|
|参数名称|支持版本|动态修改|参数含义|
|
||||||
|-----------------|----------|-|
|
|----------------------|----------|-------------------------|-------------|
|
||||||
|tempDir | |指定所有运行过程中的临时文件生成的目录,Linux 平台默认值为 /tmp|
|
|tempDir | |支持动态修改 立即生效 |指定所有运行过程中的临时文件生成的目录,Linux 平台默认值为 /tmp|
|
||||||
|minimalTmpDirGB | |tempDir 所指定的临时文件目录所需要保留的最小空间,单位 GB,缺省值:1|
|
|minimalTmpDirGB | |支持动态修改 立即生效 |tempDir 所指定的临时文件目录所需要保留的最小空间,单位 GB,缺省值:1|
|
||||||
|
|
||||||
### 日志相关
|
### 日志相关
|
||||||
|参数名称|支持版本|参数含义|
|
|参数名称|支持版本|动态修改|参数含义|
|
||||||
|-----------------|----------|-|
|
|----------------------|----------|-------------------------|-------------|
|
||||||
|logDir | |日志文件目录,运行日志将写入该目录,缺省值:/var/log/taos|
|
|logDir | |不支持动态修改 |日志文件目录,运行日志将写入该目录,缺省值:/var/log/taos|
|
||||||
|minimalLogDirGB | |日志文件夹所在磁盘可用空间大小小于该值时,停止写日志,单位 GB,缺省值:1|
|
|minimalLogDirGB | |支持动态修改 立即生效 |日志文件夹所在磁盘可用空间大小小于该值时,停止写日志,单位 GB,缺省值:1|
|
||||||
|numOfLogLines | |单个日志文件允许的最大行数,缺省值:10,000,000|
|
|numOfLogLines | |支持动态修改 立即生效 |单个日志文件允许的最大行数,缺省值:10,000,000|
|
||||||
|asyncLog | |日志写入模式,0:同步,1:异步,缺省值:1|
|
|asyncLog | |支持动态修改 立即生效 |日志写入模式,0:同步,1:异步,缺省值:1|
|
||||||
|logKeepDays | |日志文件的最长保存时间,单位:天,缺省值:0,意味着无限保存,日志文件不会被重命名,也不会有新的日志文件滚动产生,但日志文件的内容有可能会不断滚动,取决于日志文件大小的设置;当设置为大于 0 的值时,当日志文件大小达到设置的上限时会被重命名为 taoslogx.yyy,其中 yyy 为日志文件最后修改的时间戳,并滚动产生新的日志文件|
|
|logKeepDays | |支持动态修改 立即生效 |日志文件的最长保存时间,单位:天,缺省值:0,意味着无限保存,日志文件不会被重命名,也不会有新的日志文件滚动产生,但日志文件的内容有可能会不断滚动,取决于日志文件大小的设置;当设置为大于 0 的值时,当日志文件大小达到设置的上限时会被重命名为 taoslogx.yyy,其中 yyy 为日志文件最后修改的时间戳,并滚动产生新的日志文件|
|
||||||
|debugFlag | |运行日志开关,131(输出错误和警告日志),135(输出错误、警告和调试日志),143(输出错误、警告、调试和跟踪日志);默认值 131 或 135 (取决于不同模块)|
|
|debugFlag | |支持动态修改 立即生效 |运行日志开关,131(输出错误和警告日志),135(输出错误、警告和调试日志),143(输出错误、警告、调试和跟踪日志);默认值 131 或 135 (取决于不同模块)|
|
||||||
|tmrDebugFlag | |定时器模块的日志开关,取值范围同上|
|
|tmrDebugFlag | |支持动态修改 立即生效 |定时器模块的日志开关,取值范围同上|
|
||||||
|uDebugFlag | |共用功能模块的日志开关,取值范围同上|
|
|uDebugFlag | |支持动态修改 立即生效 |共用功能模块的日志开关,取值范围同上|
|
||||||
|rpcDebugFlag | |rpc 模块的日志开关,取值范围同上|
|
|rpcDebugFlag | |支持动态修改 立即生效 |rpc 模块的日志开关,取值范围同上|
|
||||||
|jniDebugFlag | |jni 模块的日志开关,取值范围同上|
|
|jniDebugFlag | |支持动态修改 立即生效 |jni 模块的日志开关,取值范围同上|
|
||||||
|qDebugFlag | |query 模块的日志开关,取值范围同上|
|
|qDebugFlag | |支持动态修改 立即生效 |query 模块的日志开关,取值范围同上|
|
||||||
|cDebugFlag | |客户端模块的日志开关,取值范围同上|
|
|cDebugFlag | |支持动态修改 立即生效 |客户端模块的日志开关,取值范围同上|
|
||||||
|simDebugFlag | |内部参数,测试工具的日志开关,取值范围同上|
|
|simDebugFlag | |支持动态修改 立即生效 |内部参数,测试工具的日志开关,取值范围同上|
|
||||||
|tqClientDebugFlag|3.3.4.3 后|客户端模块的日志开关,取值范围同上|
|
|tqClientDebugFlag|3.3.4.3 后|支持动态修改 立即生效 |客户端模块的日志开关,取值范围同上|
|
||||||
|
|
||||||
### 调试相关
|
### 调试相关
|
||||||
|参数名称|支持版本|参数含义|
|
|参数名称|支持版本|动态修改|参数含义|
|
||||||
|-----------------|-----------|-|
|
|----------------------|----------|-------------------------|-------------|
|
||||||
|crashReporting | |是否上传 crash 到 telemetry,0:不上传,1:上传;缺省值:1|
|
|crashReporting | |支持动态修改 立即生效 |是否上传 crash 到 telemetry,0:不上传,1:上传;缺省值:1|
|
||||||
|enableCoreFile | |crash 时是否生成 core 文件,0:不生成,1:生成;缺省值:1|
|
|enableCoreFile | |支持动态修改 立即生效 |crash 时是否生成 core 文件,0:不生成,1:生成;缺省值:1|
|
||||||
|assert | |断言控制开关,缺省值:0|
|
|assert | |不支持动态修改 |断言控制开关,缺省值:0|
|
||||||
|configDir | |配置文件所在目录|
|
|configDir | |不支持动态修改 |配置文件所在目录|
|
||||||
|scriptDir | |内部参数,测试用例的目录|
|
|scriptDir | |不支持动态修改 |内部参数,测试用例的目录|
|
||||||
|randErrorChance |3.3.3.0 后|内部参数,用于随机失败测试|
|
|randErrorChance |3.3.3.0 后|不支持动态修改 |内部参数,用于随机失败测试|
|
||||||
|randErrorDivisor |3.3.3.0 后|内部参数,用于随机失败测试|
|
|randErrorDivisor |3.3.3.0 后|不支持动态修改 |内部参数,用于随机失败测试|
|
||||||
|randErrorScope |3.3.3.0 后|内部参数,用于随机失败测试|
|
|randErrorScope |3.3.3.0 后|不支持动态修改 |内部参数,用于随机失败测试|
|
||||||
|safetyCheckLevel |3.3.3.0 后|内部参数,用于随机失败测试|
|
|safetyCheckLevel |3.3.3.0 后|不支持动态修改 |内部参数,用于随机失败测试|
|
||||||
|simdEnable |3.3.4.3 后|内部参数,用于测试 SIMD 加速|
|
|simdEnable |3.3.4.3 后|不支持动态修改 |内部参数,用于测试 SIMD 加速|
|
||||||
|AVX512Enable |3.3.4.3 后|内部参数,用于测试 AVX512 加速|
|
|AVX512Enable |3.3.4.3 后|不支持动态修改 |内部参数,用于测试 AVX512 加速|
|
||||||
|
|bypassFlag |3.3.4.5 后|支持动态修改 立即生效 |内部参数,用于短路测试,0:正常写入,1:写入消息在 taos 客户端发送 RPC 消息前返回,2:写入消息在 taosd 服务端收到 RPC 消息后返回,4:写入消息在 taosd 服务端写入内存缓存前返回,8:写入消息在 taosd 服务端数据落盘前返回;缺省值:0|
|
||||||
|
|
||||||
### SHELL 相关
|
### SHELL 相关
|
||||||
|参数名称|支持版本|参数含义|
|
|参数名称|支持版本|动态修改|参数含义|
|
||||||
|-----------------|----------|-|
|
|----------------------|----------|-------------------------|-------------|
|
||||||
|enableScience | |是否开启科学计数法显示浮点数;0:不开始,1:开启;缺省值:1|
|
|enableScience | |不支持动态修改 |是否开启科学计数法显示浮点数;0:不开始,1:开启;缺省值:1|
|
||||||
|
|
||||||
## API
|
## API
|
||||||
|
|
||||||
|
|
|
@ -64,7 +64,8 @@ database_option: {
|
||||||
- DURATION:数据文件存储数据的时间跨度。可以使用加单位的表示形式,如 DURATION 100h、DURATION 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。不加时间单位时默认单位为天,如 DURATION 50 表示 50 天。
|
- DURATION:数据文件存储数据的时间跨度。可以使用加单位的表示形式,如 DURATION 100h、DURATION 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。不加时间单位时默认单位为天,如 DURATION 50 表示 50 天。
|
||||||
- MAXROWS:文件块中记录的最大条数,默认为 4096 条。
|
- MAXROWS:文件块中记录的最大条数,默认为 4096 条。
|
||||||
- MINROWS:文件块中记录的最小条数,默认为 100 条。
|
- MINROWS:文件块中记录的最小条数,默认为 100 条。
|
||||||
- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于3倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据从而释放存储空间。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](../../operation/planning/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 \<= keep 1 \<= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。
|
- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于 3 倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据从而释放存储空间。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](../../operation/planning/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 \<= keep 1 \<= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。了解更多,请点击 [关于主键时间戳](https://docs.taosdata.com/reference/taos-sql/insert/)
|
||||||
|
|
||||||
- KEEP_TIME_OFFSET:自 3.2.0.0 版本生效。删除或迁移保存时间超过 KEEP 值的数据的延迟执行时间,默认值为 0 (小时)。在数据文件保存时间超过 KEEP 后,删除或迁移操作不会立即执行,而会额外等待本参数指定的时间间隔,以实现与业务高峰期错开的目的。
|
- KEEP_TIME_OFFSET:自 3.2.0.0 版本生效。删除或迁移保存时间超过 KEEP 值的数据的延迟执行时间,默认值为 0 (小时)。在数据文件保存时间超过 KEEP 后,删除或迁移操作不会立即执行,而会额外等待本参数指定的时间间隔,以实现与业务高峰期错开的目的。
|
||||||
- STT_TRIGGER:表示落盘文件触发文件合并的个数。开源版本固定为 1,企业版本可设置范围为 1 到 16。对于少表高频写入场景,此参数建议使用默认配置;而对于多表低频写入场景,此参数建议配置较大的值。
|
- STT_TRIGGER:表示落盘文件触发文件合并的个数。开源版本固定为 1,企业版本可设置范围为 1 到 16。对于少表高频写入场景,此参数建议使用默认配置;而对于多表低频写入场景,此参数建议配置较大的值。
|
||||||
- SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表,用于超级表列非常多的情况。
|
- SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表,用于超级表列非常多的情况。
|
||||||
|
@ -80,6 +81,7 @@ database_option: {
|
||||||
- WAL_FSYNC_PERIOD:当 WAL_LEVEL 参数设置为 2 时,用于设置落盘的周期。默认为 3000,单位毫秒。最小为 0,表示每次写入立即落盘;最大为 180000,即三分钟。
|
- WAL_FSYNC_PERIOD:当 WAL_LEVEL 参数设置为 2 时,用于设置落盘的周期。默认为 3000,单位毫秒。最小为 0,表示每次写入立即落盘;最大为 180000,即三分钟。
|
||||||
- WAL_RETENTION_PERIOD: 为了数据订阅消费,需要 WAL 日志文件额外保留的最大时长策略。WAL 日志清理,不受订阅客户端消费状态影响。单位为 s。默认为 3600,表示在 WAL 保留最近 3600 秒的数据,请根据数据订阅的需要修改这个参数为适当值。
|
- WAL_RETENTION_PERIOD: 为了数据订阅消费,需要 WAL 日志文件额外保留的最大时长策略。WAL 日志清理,不受订阅客户端消费状态影响。单位为 s。默认为 3600,表示在 WAL 保留最近 3600 秒的数据,请根据数据订阅的需要修改这个参数为适当值。
|
||||||
- WAL_RETENTION_SIZE:为了数据订阅消费,需要 WAL 日志文件额外保留的最大累计大小策略。单位为 KB。默认为 0,表示累计大小无上限。
|
- WAL_RETENTION_SIZE:为了数据订阅消费,需要 WAL 日志文件额外保留的最大累计大小策略。单位为 KB。默认为 0,表示累计大小无上限。
|
||||||
|
|
||||||
### 创建数据库示例
|
### 创建数据库示例
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
@ -90,7 +92,7 @@ create database if not exists db vgroups 10 buffer 10
|
||||||
|
|
||||||
### 使用数据库
|
### 使用数据库
|
||||||
|
|
||||||
```
|
```sql
|
||||||
USE db_name;
|
USE db_name;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -98,7 +100,7 @@ USE db_name;
|
||||||
|
|
||||||
## 删除数据库
|
## 删除数据库
|
||||||
|
|
||||||
```
|
```sql
|
||||||
DROP DATABASE [IF EXISTS] db_name
|
DROP DATABASE [IF EXISTS] db_name
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -128,7 +130,7 @@ alter_database_option: {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
### 修改 CACHESIZE
|
### 修改 CACHESIZE
|
||||||
|
|
||||||
修改数据库参数的命令使用简单,难的是如何确定是否需要修改以及如何修改。本小节描述如何判断数据库的 cachesize 是否够用。
|
修改数据库参数的命令使用简单,难的是如何确定是否需要修改以及如何修改。本小节描述如何判断数据库的 cachesize 是否够用。
|
||||||
|
|
||||||
|
@ -157,13 +159,13 @@ alter_database_option: {
|
||||||
|
|
||||||
### 查看系统中的所有数据库
|
### 查看系统中的所有数据库
|
||||||
|
|
||||||
```
|
```sql
|
||||||
SHOW DATABASES;
|
SHOW DATABASES;
|
||||||
```
|
```
|
||||||
|
|
||||||
### 显示一个数据库的创建语句
|
### 显示一个数据库的创建语句
|
||||||
|
|
||||||
```
|
```sql
|
||||||
SHOW CREATE DATABASE db_name \G;
|
SHOW CREATE DATABASE db_name \G;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -191,23 +193,23 @@ TRIM DATABASE db_name;
|
||||||
FLUSH DATABASE db_name;
|
FLUSH DATABASE db_name;
|
||||||
```
|
```
|
||||||
|
|
||||||
落盘内存中的数据。在关闭节点之前,执行这条命令可以避免重启后的数据回放,加速启动过程。
|
落盘内存中的数据。在关闭节点之前,执行这条命令可以避免重启后的预写数据日志回放,加速启动过程。
|
||||||
|
|
||||||
## 调整VGROUP中VNODE的分布
|
## 调整 VGROUP 中 VNODE 的分布
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
REDISTRIBUTE VGROUP vgroup_no DNODE dnode_id1 [DNODE dnode_id2] [DNODE dnode_id3]
|
REDISTRIBUTE VGROUP vgroup_no DNODE dnode_id1 [DNODE dnode_id2] [DNODE dnode_id3]
|
||||||
```
|
```
|
||||||
|
|
||||||
按照给定的dnode列表,调整vgroup中的vnode分布。因为副本数目最大为3,所以最多输入3个dnode。
|
按照给定的 dnode 列表,调整 vgroup 中的 vnode 分布。因为副本数目最大为 3,所以最多输入 3 个 dnode。
|
||||||
|
|
||||||
## 自动调整VGROUP中VNODE的分布
|
## 自动调整 VGROUP 中 LEADER 的分布
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
BALANCE VGROUP
|
BALANCE VGROUP LEADER
|
||||||
```
|
```
|
||||||
|
|
||||||
自动调整集群所有vgroup中的vnode分布,相当于在vnode级别对集群进行数据的负载均衡操作。
|
触发集群所有 vgroup 中的 leader 重新选主,对集群各节点进行负载再均衡操作。
|
||||||
|
|
||||||
## 查看数据库工作状态
|
## 查看数据库工作状态
|
||||||
|
|
||||||
|
@ -216,3 +218,18 @@ SHOW db_name.ALIVE;
|
||||||
```
|
```
|
||||||
|
|
||||||
查询数据库 db_name 的可用状态,返回值 0:不可用 1:完全可用 2:部分可用(即数据库包含的 VNODE 部分节点可用,部分节点不可用)
|
查询数据库 db_name 的可用状态,返回值 0:不可用 1:完全可用 2:部分可用(即数据库包含的 VNODE 部分节点可用,部分节点不可用)
|
||||||
|
|
||||||
|
## 查看DB 的磁盘空间占用
|
||||||
|
|
||||||
|
```sql
|
||||||
|
select * from INFORMATION_SCHEMA.INS_DISK_USAGE where db_name = 'db_name'
|
||||||
|
```
|
||||||
|
查看DB各个模块所占用磁盘的大小
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW db_name.disk_info;
|
||||||
|
```
|
||||||
|
查看数据库 db_name 的数据压缩压缩率和数据在磁盘上所占用的大小
|
||||||
|
|
||||||
|
该命令本质上等同于 `select sum(data1 + data2 + data3)/sum(raw_data), sum(data1 + data2 + data3) from information_schema.ins_disk_usage where db_name="dbname"`
|
||||||
|
|
||||||
|
|
|
@ -5,9 +5,11 @@ description: 写入数据的详细语法
|
||||||
---
|
---
|
||||||
|
|
||||||
## 写入语法
|
## 写入语法
|
||||||
|
|
||||||
写入记录支持两种语法, 正常语法和超级表语法. 正常语法下, 紧跟INSERT INTO后名的表名是子表名或者普通表名. 超级表语法下, 紧跟INSERT INTO后名的表名是超级表名
|
写入记录支持两种语法, 正常语法和超级表语法. 正常语法下, 紧跟INSERT INTO后名的表名是子表名或者普通表名. 超级表语法下, 紧跟INSERT INTO后名的表名是超级表名
|
||||||
|
|
||||||
### 正常语法
|
### 正常语法
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO
|
INSERT INTO
|
||||||
tb_name
|
tb_name
|
||||||
|
@ -22,7 +24,9 @@ INSERT INTO
|
||||||
|
|
||||||
INSERT INTO tb_name [(field1_name, ...)] subquery
|
INSERT INTO tb_name [(field1_name, ...)] subquery
|
||||||
```
|
```
|
||||||
|
|
||||||
### 超级表语法
|
### 超级表语法
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO
|
INSERT INTO
|
||||||
stb1_name [(field1_name, ...)]
|
stb1_name [(field1_name, ...)]
|
||||||
|
@ -32,16 +36,18 @@ INSERT INTO
|
||||||
...];
|
...];
|
||||||
```
|
```
|
||||||
|
|
||||||
**关于时间戳**
|
#### 关于主键时间戳
|
||||||
|
|
||||||
1. TDengine 要求插入的数据必须要有时间戳,插入数据的时间戳要注意以下几点:
|
TDengine 要求插入的数据必须要有时间戳,插入数据的时间戳要注意以下几点:
|
||||||
|
|
||||||
2. 时间戳不同的格式语法会有不同的精度影响。字符串格式的时间戳写法不受所在 DATABASE 的时间精度设置影响;而长整形格式的时间戳写法会受到所在 DATABASE 的时间精度设置影响。例如,时间戳"2021-07-13 16:16:48"的 UNIX 秒数为 1626164208。则其在毫秒精度下需要写作 1626164208000,在微秒精度设置下就需要写为 1626164208000000,纳秒精度设置下需要写为 1626164208000000000。
|
1. 时间戳不同的格式语法会有不同的精度影响。字符串格式的时间戳写法不受所在 DATABASE 的时间精度设置影响;而长整形格式的时间戳写法会受到所在 DATABASE 的时间精度设置影响。例如,时间戳"2021-07-13 16:16:48"的 UNIX 秒数为 1626164208。则其在毫秒精度下需要写作 1626164208000,在微秒精度设置下就需要写为 1626164208000000,纳秒精度设置下需要写为 1626164208000000000。
|
||||||
|
|
||||||
3. 一次插入多行数据时,不要把首列的时间戳的值都写 NOW。否则会导致语句中的多条记录使用相同的时间戳,于是就可能出现相互覆盖以致这些数据行无法全部被正确保存。其原因在于,NOW 函数在执行中会被解析为所在 SQL 语句的客户端执行时间,出现在同一语句中的多个 NOW 标记也就会被替换为完全相同的时间戳取值。
|
2. 一次插入多行数据时,不要把首列的时间戳的值都写 NOW。否则会导致语句中的多条记录使用相同的时间戳,于是就可能出现相互覆盖以致这些数据行无法全部被正确保存。其原因在于,NOW 函数在执行中会被解析为所在 SQL 语句的客户端执行时间,出现在同一语句中的多个 NOW 标记也就会被替换为完全相同的时间戳取值。
|
||||||
允许插入的最老记录的时间戳,是相对于当前服务器时间,减去配置的 KEEP 值(数据保留的天数, 可以在创建数据库时指定,缺省值是 3650 天)。允许插入的最新记录的时间戳,取决于数据库的 PRECISION 值(时间戳精度, 可以在创建数据库时指定, ms 表示毫秒,us 表示微秒,ns 表示纳秒,默认毫秒):如果是毫秒或微秒, 取值为 1970 年 1 月 1 日 00:00:00.000 UTC 加上 1000 年, 即 2970 年 1 月 1 日 00:00:00.000 UTC; 如果是纳秒, 取值为 1970 年 1 月 1 日 00:00:00.000000000 UTC 加上 292 年, 即 2262 年 1 月 1 日 00:00:00.000000000 UTC。
|
|
||||||
|
|
||||||
**语法说明**
|
3. 允许插入的最大时间戳为当前时间加上 100 年, 比如当前时间为`2024-11-11 12:00:00`,则允许插入的最大时间戳为`2124-11-11 12:00:00`。允许插入的最小时间戳取决于数据库的 KEEP 设置。企业版支持三级存储,可以设置多个 KEEP 时间,如下图所示,如果数据库的 KEEP 配置为`100h,100d,3650d`,则允许的最小时间戳为当前时间减去 3650 天。那么时间戳在`[Now - 100h, Now + 100y)`内的会保存在一级存储,时间戳在`[Now - 100d, Now - 100h)`内的会保存在二级存储,时间戳在`[Now - 3650d, Now - 100d)`内的会保存在三级存储。社区版不支持多级存储功能,只能配置一个 KEEP 值,如果配置多个,则取其最大者。如果时间戳不在有效时间范围内,TDengine 将返回错误“Timestamp out of range"。
|
||||||
|

|
||||||
|
|
||||||
|
#### 语法说明
|
||||||
|
|
||||||
1. 可以指定要插入值的列,对于未指定的列数据库将自动填充为 NULL。
|
1. 可以指定要插入值的列,对于未指定的列数据库将自动填充为 NULL。
|
||||||
|
|
||||||
|
@ -56,22 +62,24 @@ INSERT INTO
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a');
|
INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a');
|
||||||
```
|
```
|
||||||
|
|
||||||
6. 对于向多个子表插入数据的情况,依然会有部分数据写入失败,部分数据写入成功的情况。这是因为多个子表可能分布在不同的 VNODE 上,客户端将 INSERT 语句完整解析后,将数据发往各个涉及的 VNODE 上,每个 VNODE 独立进行写入操作。如果某个 VNODE 因为某些原因(比如网络问题或磁盘故障)导致写入失败,并不会影响其他 VNODE 节点的写入。
|
6. 对于向多个子表插入数据的情况,依然会有部分数据写入失败,部分数据写入成功的情况。这是因为多个子表可能分布在不同的 VNODE 上,客户端将 INSERT 语句完整解析后,将数据发往各个涉及的 VNODE 上,每个 VNODE 独立进行写入操作。如果某个 VNODE 因为某些原因(比如网络问题或磁盘故障)导致写入失败,并不会影响其他 VNODE 节点的写入。
|
||||||
7. 主键列值必须指定且不能为 NULL。
|
7. 主键列值必须指定且不能为 NULL。
|
||||||
|
|
||||||
**正常语法说明**
|
#### 正常语法说明
|
||||||
|
|
||||||
1. USING 子句是自动建表语法。如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 TAGS 取值。可以只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将置为 NULL。
|
1. USING 子句是自动建表语法。如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 TAGS 取值。可以只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将置为 NULL。
|
||||||
|
|
||||||
2. 可以使用 `INSERT ... subquery` 语句将 TDengine 中的数据插入到指定表中。subquery 可以是任意的查询语句。此语法只能用于子表和普通表,且不支持自动建表。
|
2. 可以使用 `INSERT ... subquery` 语句将 TDengine 中的数据插入到指定表中。subquery 可以是任意的查询语句。此语法只能用于子表和普通表,且不支持自动建表。
|
||||||
|
|
||||||
**超级表语法说明**
|
#### 超级表语法说明
|
||||||
|
|
||||||
1. 在 field_name 列表中必须指定 tbname 列,否则报错. tbname列是子表名, 类型是字符串. 其中字符不用转义, 不能包含点‘.‘
|
1. 在 field_name 列表中必须指定 tbname 列,否则报错. tbname列是子表名, 类型是字符串. 其中字符不用转义, 不能包含点‘.‘
|
||||||
|
|
||||||
2. 在 field_name 列表中支持标签列,当子表已经存在时,指定标签值并不会触发标签值的修改;当子表不存在时会使用所指定的标签值建立子表. 如果没有指定任何标签列,则把所有标签列的值设置为NULL
|
2. 在 field_name 列表中支持标签列,当子表已经存在时,指定标签值并不会触发标签值的修改;当子表不存在时会使用所指定的标签值建立子表. 如果没有指定任何标签列,则把所有标签列的值设置为NULL
|
||||||
|
|
||||||
3. 不支持参数绑定写入
|
3. 不支持参数绑定写入
|
||||||
|
|
||||||
## 插入一条记录
|
## 插入一条记录
|
||||||
|
|
||||||
指定已经创建好的数据子表的表名,并通过 VALUES 关键字提供一行或多行数据,即可向数据库写入这些数据。例如,执行如下语句可以写入一行记录:
|
指定已经创建好的数据子表的表名,并通过 VALUES 关键字提供一行或多行数据,即可向数据库写入这些数据。例如,执行如下语句可以写入一行记录:
|
||||||
|
@ -154,15 +162,18 @@ INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/c
|
||||||
INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv'
|
INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv'
|
||||||
d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv';
|
d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv';
|
||||||
```
|
```
|
||||||
|
|
||||||
## 向超级表插入数据并自动创建子表
|
## 向超级表插入数据并自动创建子表
|
||||||
|
|
||||||
自动建表, 表名通过 tbname 列指定
|
自动建表, 表名通过 tbname 列指定
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO meters(tbname, location, groupId, ts, current, voltage, phase)
|
INSERT INTO meters(tbname, location, groupId, ts, current, voltage, phase)
|
||||||
VALUES ('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 219, 0.32)
|
VALUES ('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 219, 0.32)
|
||||||
('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:35.779', 10.15, 217, 0.33)
|
('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:35.779', 10.15, 217, 0.33)
|
||||||
('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 217, 0.33)
|
('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 217, 0.33)
|
||||||
```
|
```
|
||||||
|
|
||||||
## 通过 CSV 文件向超级表插入数据并自动创建子表
|
## 通过 CSV 文件向超级表插入数据并自动创建子表
|
||||||
|
|
||||||
根据 csv 文件内容,为 超级表创建子表,并填充相应 column 与 tag
|
根据 csv 文件内容,为 超级表创建子表,并填充相应 column 与 tag
|
||||||
|
|
|
@ -120,11 +120,25 @@ SLIDING 的向前滑动的时间不能超过一个窗口的时间范围。以下
|
||||||
SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m);
|
SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m);
|
||||||
```
|
```
|
||||||
|
|
||||||
|
INTERVAL 子句允许使用 AUTO 关键字来指定窗口偏移量,此时如果 WHERE 条件给定了明确可应用的起始时间限制,则会自动计算所需偏移量,使得从该时间点切分时间窗口;否则不生效,即:仍以 0 作为偏移量。以下是简单示例说明:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- 有起始时间限制,从 '2018-10-03 14:38:05' 切分时间窗口
|
||||||
|
SELECT COUNT(*) FROM meters WHERE _rowts >= '2018-10-03 14:38:05' INTERVAL (1m, AUTO);
|
||||||
|
|
||||||
|
-- 无起始时间限制,不生效,仍以 0 为偏移量
|
||||||
|
SELECT COUNT(*) FROM meters WHERE _rowts < '2018-10-03 15:00:00' INTERVAL (1m, AUTO);
|
||||||
|
|
||||||
|
-- 起始时间限制不明确,不生效,仍以 0 为偏移量
|
||||||
|
SELECT COUNT(*) FROM meters WHERE _rowts - voltage > 1000000;
|
||||||
|
```
|
||||||
|
|
||||||
使用时间窗口需要注意:
|
使用时间窗口需要注意:
|
||||||
|
|
||||||
- 聚合时间段的窗口宽度由关键词 INTERVAL 指定,最短时间间隔 10 毫秒(10a);并且支持偏移 offset(偏移必须小于间隔),也即时间窗口划分与“UTC 时刻 0”相比的偏移量。SLIDING 语句用于指定聚合时间段的前向增量,也即每次窗口向前滑动的时长。
|
- 聚合时间段的窗口宽度由关键词 INTERVAL 指定,最短时间间隔 10 毫秒(10a);并且支持偏移 offset(偏移必须小于间隔),也即时间窗口划分与“UTC 时刻 0”相比的偏移量。SLIDING 语句用于指定聚合时间段的前向增量,也即每次窗口向前滑动的时长。
|
||||||
- 使用 INTERVAL 语句时,除非极特殊的情况,都要求把客户端和服务端的 taos.cfg 配置文件中的 timezone 参数配置为相同的取值,以避免时间处理函数频繁进行跨时区转换而导致的严重性能影响。
|
- 使用 INTERVAL 语句时,除非极特殊的情况,都要求把客户端和服务端的 taos.cfg 配置文件中的 timezone 参数配置为相同的取值,以避免时间处理函数频繁进行跨时区转换而导致的严重性能影响。
|
||||||
- 返回的结果中时间序列严格单调递增。
|
- 返回的结果中时间序列严格单调递增。
|
||||||
|
- 使用 AUTO 作为窗口偏移量时,如果窗口宽度的单位是 d (天), n (月), w (周), y (年),比如: INTERVAL(1d, AUTO), INTERVAL(3w, AUTO),此时 TSMA 优化无法生效。如果目标表上手动创建了TSMA,语句会报错退出;这种情况下,可以显式指定 Hint SKIP_TSMA 或者不使用 AUTO 作为窗口偏移量。
|
||||||
|
|
||||||
### 状态窗口
|
### 状态窗口
|
||||||
|
|
||||||
|
|
|
@ -42,32 +42,11 @@ DROP DNODE dnode_id [force] [unsafe]
|
||||||
ALTER DNODE dnode_id dnode_option
|
ALTER DNODE dnode_id dnode_option
|
||||||
|
|
||||||
ALTER ALL DNODES dnode_option
|
ALTER ALL DNODES dnode_option
|
||||||
|
|
||||||
dnode_option: {
|
|
||||||
'resetLog'
|
|
||||||
| 'balance' 'value'
|
|
||||||
| 'monitor' 'value'
|
|
||||||
| 'debugFlag' 'value'
|
|
||||||
| 'monDebugFlag' 'value'
|
|
||||||
| 'vDebugFlag' 'value'
|
|
||||||
| 'mDebugFlag' 'value'
|
|
||||||
| 'cDebugFlag' 'value'
|
|
||||||
| 'httpDebugFlag' 'value'
|
|
||||||
| 'qDebugflag' 'value'
|
|
||||||
| 'sdbDebugFlag' 'value'
|
|
||||||
| 'uDebugFlag' 'value'
|
|
||||||
| 'tsdbDebugFlag' 'value'
|
|
||||||
| 'sDebugflag' 'value'
|
|
||||||
| 'rpcDebugFlag' 'value'
|
|
||||||
| 'dDebugFlag' 'value'
|
|
||||||
| 'mqttDebugFlag' 'value'
|
|
||||||
| 'wDebugFlag' 'value'
|
|
||||||
| 'tmrDebugFlag' 'value'
|
|
||||||
| 'cqDebugFlag' 'value'
|
|
||||||
}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
上面语法中的这些可修改配置项其配置方式与 dnode 配置文件中的配置方式相同,区别是修改是动态的立即生效,且不需要重启 dnode。
|
对于支持动态修改的配置参数,您可以使用 ALTER DNODE 或 ALTER ALL DNODES 语法修改 dnode 中配置参数的值,自 3.3.4.0 后,修改的配置参数将自动持久化,即便数据库服务重启后仍然生效。
|
||||||
|
|
||||||
|
对于一个配置参数是否支持动态修改,请您参考以下页面:[taosd 参考手册](../01-components/01-taosd.md)
|
||||||
|
|
||||||
value 是参数的值,需要是字符格式。如修改 dnode 1 的日志输出级别为 debug:
|
value 是参数的值,需要是字符格式。如修改 dnode 1 的日志输出级别为 debug:
|
||||||
|
|
||||||
|
@ -75,6 +54,18 @@ value 是参数的值,需要是字符格式。如修改 dnode 1 的日志输
|
||||||
ALTER DNODE 1 'debugFlag' '143';
|
ALTER DNODE 1 'debugFlag' '143';
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### 补充说明:
|
||||||
|
配置参数在 dnode 中被分为全局配置参数与局部配置参数,您可以查看 SHOW VARIABLES 或 SHOW DNODE dnode_id VARIABLE 中的 category 字段来确认配置参数属于全局配置参数还是局部配置参数:
|
||||||
|
1. 局部配置参数:您可以使用 ALTER DNODE 或 ALTER ALL DNODES 来更新某一个 dnode 或全部 dnodes 的局部配置参数。
|
||||||
|
2. 全局配置参数:全局配置参数要求各个 dnode 保持一致,所以您只可以使用 ALTER ALL DNODES 来更新全部 dnodes 的全局配置参数。
|
||||||
|
|
||||||
|
配置参数是否可以动态修改,有以下三种情况:
|
||||||
|
1. 支持动态修改 立即生效
|
||||||
|
2. 支持动态修改 重启生效
|
||||||
|
3. 不支持动态修改
|
||||||
|
|
||||||
|
对于重启后生效的配置参数,您可以通过 SHOW VARIABLES 或 SHOW DNODE dnode_id VARIABLE 看到修改后的值,但是需要重启数据库服务才使其生效。
|
||||||
|
|
||||||
## 添加管理节点
|
## 添加管理节点
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
@ -137,18 +128,12 @@ SHOW CLUSTER ALIVE;
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
ALTER LOCAL local_option
|
ALTER LOCAL local_option
|
||||||
|
|
||||||
local_option: {
|
|
||||||
'resetLog'
|
|
||||||
| 'rpcDebugFlag' 'value'
|
|
||||||
| 'tmrDebugFlag' 'value'
|
|
||||||
| 'cDebugFlag' 'value'
|
|
||||||
| 'uDebugFlag' 'value'
|
|
||||||
| 'debugFlag' 'value'
|
|
||||||
}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
上面语法中的参数与在配置文件中配置客户端的用法相同,但不需要重启客户端,修改后立即生效。
|
您可以使用以上语法更该客户端的配置参数,并且不需要重启客户端,修改后立即生效。
|
||||||
|
|
||||||
|
对于一个配置参数是否支持动态修改,请您参考以下页面:[taosc 参考手册](../01-components/02-taosc.md)
|
||||||
|
|
||||||
|
|
||||||
## 查看客户端配置
|
## 查看客户端配置
|
||||||
|
|
||||||
|
|
|
@ -303,26 +303,57 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
||||||
|
|
||||||
## INS_STREAMS
|
## INS_STREAMS
|
||||||
|
|
||||||
| # | **列名** | **数据类型** | **说明** |
|
| # | **列名** | **数据类型** | **说明** |
|
||||||
| --- | :----------: | ------------ | -------------------------------------------------------------------------------------------------------------------- |
|
|:----|:-----------|:------------|:--------|
|
||||||
| 1 | stream_name | VARCHAR(64) | 流计算名称 |
|
| 1 | stream_name | VARCHAR(64) | 流计算名称 |
|
||||||
| 2 | create_time | TIMESTAMP | 创建时间 |
|
| 2 | create_time | TIMESTAMP | 创建时间 |
|
||||||
| 3 | sql | VARCHAR(1024) | 创建流计算时提供的 SQL 语句 |
|
| 3 | sql | VARCHAR(1024) | 创建流计算时提供的 SQL 语句 |
|
||||||
| 4 | status | VARCHAR(20) | 流当前状态 |
|
| 4 | status | VARCHAR(20) | 流当前状态 |
|
||||||
| 5 | source_db | VARCHAR(64) | 源数据库 |
|
| 5 | source_db | VARCHAR(64) | 源数据库 |
|
||||||
| 6 | target_db | VARCHAR(64) | 目的数据库 |
|
| 6 | target_db | VARCHAR(64) | 目的数据库 |
|
||||||
| 7 | target_table | VARCHAR(192) | 流计算写入的目标表 |
|
| 7 | target_table | VARCHAR(192) | 流计算写入的目标表 |
|
||||||
| 8 | watermark | BIGINT | watermark,详见 SQL 手册流式计算。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
| 8 | watermark | BIGINT | watermark,详见 SQL 手册流式计算。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||||
| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算。需要注意,`trigger` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算。需要注意,`trigger` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||||
|
|
||||||
## INS_USER_PRIVILEGES
|
## INS_USER_PRIVILEGES
|
||||||
|
|
||||||
注:SYSINFO 属性为 0 的用户不能查看此表。
|
注:SYSINFO 属性为 0 的用户不能查看此表。
|
||||||
|
|
||||||
| # | **列名** | **数据类型** | **说明** |
|
| # | **列名** | **数据类型** | **说明** |
|
||||||
| --- | :----------: | ------------ | -------------------------------------------------------------------------------------------------------------------- |
|
|:----|:-----------|:------------|:--------|
|
||||||
| 1 | user_name | VARCHAR(24) | 用户名
|
| 1 | user_name | VARCHAR(24) | 用户名
|
||||||
| 2 | privilege | VARCHAR(10) | 权限描述
|
| 2 | privilege | VARCHAR(10) | 权限描述
|
||||||
| 3 | db_name | VARCHAR(65) | 数据库名称
|
| 3 | db_name | VARCHAR(65) | 数据库名称
|
||||||
| 4 | table_name | VARCHAR(193) | 表名称
|
| 4 | table_name | VARCHAR(193) | 表名称
|
||||||
| 5 | condition | VARCHAR(49152) | 子表权限过滤条件
|
| 5 | condition | VARCHAR(49152) | 子表权限过滤条件
|
||||||
|
|
||||||
|
## INS_DISK_USAGE
|
||||||
|
|
||||||
|
| # | **列名** | **数据类型** | **说明** |
|
||||||
|
|:----|:-----------|:------------|:--------|
|
||||||
|
| 1 | db_name | VARCHAR(32) | 数据库名称 |
|
||||||
|
| 2 | vgroup_id | INT | vgroup 的 ID |
|
||||||
|
| 3 | wal | BIGINT | wal 文件大小, 单位为 K |
|
||||||
|
| 4 | data1 | BIGINT | 一级存储上数据文件的大小,单位为KB |
|
||||||
|
| 5 | data2 | BIGINT | 二级存储上数据文件的大小,单位为 KB |
|
||||||
|
| 6 | data3 | BIGINT | 三级存储上数据文件的大小, 单位为KB |
|
||||||
|
| 7 | cache_rdb | BIGINT | last/last_row 文件的大小,单位为KB |
|
||||||
|
| 8 | table_meta | BIGINT | meta 文件的大小, 单位为KB |
|
||||||
|
| 9 | s3 | BIGINT | s3 上占用的大小, 单位为KB |
|
||||||
|
| 10 | raw_data | BIGINT | 预估的原始数据的大小, 单位为KB |
|
||||||
|
|
||||||
|
|
||||||
|
## INS_FILESETS
|
||||||
|
|
||||||
|
提供当前数据存储的文件组的相关信息。
|
||||||
|
|
||||||
|
| # | **列名** | **数据类型** | **说明** |
|
||||||
|
| --- | :-----------: | ------------ | --------------------------------------- |
|
||||||
|
| 1 | db_name | VARCHAR(65) | 数据库名 |
|
||||||
|
| 2 | vgroup_id | INT | vgroup id |
|
||||||
|
| 3 | fileset_id | INT | 文件组 id |
|
||||||
|
| 4 | start_time | TIMESTAMP | 文件组的覆盖数据的开始时间 |
|
||||||
|
| 5 | end_time | TIMESTAMP | 文件组的覆盖数据的结束时间 |
|
||||||
|
| 6 | total_size | BIGINT | 文件组的总大小 |
|
||||||
|
| 7 | last_compact | TIMESTAMP | 最后一次压缩的时间 |
|
||||||
|
| 8 | shold_compact | bool | 是否需要压缩,true:需要,false:不需要 |
|
||||||
|
|
|
@ -9,7 +9,7 @@ description: 本节讲述基本的用户管理功能
|
||||||
## 创建用户
|
## 创建用户
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE USER user_name PASS 'password' [SYSINFO {1|0}];
|
CREATE USER user_name PASS 'password' [SYSINFO {1|0}] [CREATEDB {1|0}];
|
||||||
```
|
```
|
||||||
|
|
||||||
用户名最长不超过 23 个字节。
|
用户名最长不超过 23 个字节。
|
||||||
|
@ -18,6 +18,8 @@ CREATE USER user_name PASS 'password' [SYSINFO {1|0}];
|
||||||
|
|
||||||
`SYSINFO` 表示该用户是否能够查看系统信息。`1` 表示可以查看,`0` 表示无权查看。系统信息包括服务配置、dnode、vnode、存储等信息。缺省值为 `1`。
|
`SYSINFO` 表示该用户是否能够查看系统信息。`1` 表示可以查看,`0` 表示无权查看。系统信息包括服务配置、dnode、vnode、存储等信息。缺省值为 `1`。
|
||||||
|
|
||||||
|
`CREATEDB` 表示该用户是否能够创建数据库。`1` 表示可以创建,`0` 表示无权创建。缺省值为 `0`。// 从 TDengine 企业版 3.3.2.0 开始支持
|
||||||
|
|
||||||
在下面的示例中,我们创建一个密码为 `abc123!@#` 且可以查看系统信息的用户。
|
在下面的示例中,我们创建一个密码为 `abc123!@#` 且可以查看系统信息的用户。
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
@ -77,7 +79,7 @@ alter_user_clause: {
|
||||||
- PASS: 修改密码,后跟新密码
|
- PASS: 修改密码,后跟新密码
|
||||||
- ENABLE: 启用或禁用该用户,`1` 表示启用,`0` 表示禁用
|
- ENABLE: 启用或禁用该用户,`1` 表示启用,`0` 表示禁用
|
||||||
- SYSINFO: 允许或禁止查看系统信息,`1` 表示允许,`0` 表示禁止
|
- SYSINFO: 允许或禁止查看系统信息,`1` 表示允许,`0` 表示禁止
|
||||||
- CREATEDB: 允许或禁止创建数据库,`1` 表示允许,`0` 表示禁止
|
- CREATEDB: 允许或禁止创建数据库,`1` 表示允许,`0` 表示禁止。// 从 TDengine 企业版 3.3.2.0 开始支持
|
||||||
|
|
||||||
下面的示例禁用了名为 `test` 的用户:
|
下面的示例禁用了名为 `test` 的用户:
|
||||||
|
|
||||||
|
|
After Width: | Height: | Size: 113 KiB |
|
@ -680,12 +680,31 @@ TDengine 客户端驱动的版本号与 TDengine 服务端的版本号是一一
|
||||||
- **接口说明**:清理运行环境,应用退出前应调用。
|
- **接口说明**:清理运行环境,应用退出前应调用。
|
||||||
|
|
||||||
- `int taos_options(TSDB_OPTION option, const void * arg, ...)`
|
- `int taos_options(TSDB_OPTION option, const void * arg, ...)`
|
||||||
- **接口说明**:设置客户端选项,目前支持区域设置(`TSDB_OPTION_LOCALE`)、字符集设置(`TSDB_OPTION_CHARSET`)、时区设置(`TSDB_OPTION_TIMEZONE`)、配置文件路径设置(`TSDB_OPTION_CONFIGDIR`)。区域设置、字符集、时区默认为操作系统当前设置。
|
- **接口说明**:设置客户端选项,支持区域设置(`TSDB_OPTION_LOCALE`)、字符集设置(`TSDB_OPTION_CHARSET`)、时区设置(`TSDB_OPTION_TIMEZONE`)、配置文件路径设置(`TSDB_OPTION_CONFIGDIR`)。区域设置、字符集、时区默认为操作系统当前设置。
|
||||||
- **参数说明**:
|
- **参数说明**:
|
||||||
- `option`:[入参] 设置项类型。
|
- `option`:[入参] 设置项类型。
|
||||||
- `arg`:[入参] 设置项值。
|
- `arg`:[入参] 设置项值。
|
||||||
- **返回值**:`0`:成功,`-1`:失败。
|
- **返回值**:`0`:成功,`-1`:失败。
|
||||||
|
|
||||||
|
- `int taos_options_connection(TAOS *taos, TSDB_OPTION_CONNECTION option, const void *arg, ...)`
|
||||||
|
- **接口说明**:设置客户端连接选项,目前支持字符集设置(`TSDB_OPTION_CONNECTION_CHARSET`)、时区设置(`TSDB_OPTION_CONNECTION_TIMEZONE`)、用户 IP 设置(`TSDB_OPTION_CONNECTION_USER_IP`)、用户 APP 设置(`TSDB_OPTION_CONNECTION_USER_APP`)。
|
||||||
|
- **参数说明**:
|
||||||
|
- `taos`: [入参] taos_connect 返回的连接句柄。
|
||||||
|
- `option`:[入参] 设置项类型。
|
||||||
|
- `arg`:[入参] 设置项值。
|
||||||
|
- **返回值**:`0`:成功,`非0`:失败。
|
||||||
|
- **说明**:
|
||||||
|
- 字符集、时区默认为操作系统当前设置,windows 不支持连接级别的时区设置。
|
||||||
|
- arg 为 NULL 时表示重置该选项。
|
||||||
|
- 该接口只对当前连接有效,不会影响其他连接。
|
||||||
|
- 同样参数多次调用该接口,以后面的为准,可以作为修改的方法。
|
||||||
|
- TSDB_OPTION_CONNECTION_CLEAR 选项用于重置所有连接选项。
|
||||||
|
- 时区和字符集重置后,使用系统的设置,user ip 和 user app 重置后为空。
|
||||||
|
- 连接选项的值都是 string 类型,user app 参数值最大长度为 23,超过该长度会被截断;其他参数非法时报错。
|
||||||
|
- 时区配置找不到时区文件或者不能按照规范解释时,默认为 UTC,和操作系统时区规则相同,详见 tzset 函数说明。可通过 select timezone() 查看当前连接的时区。
|
||||||
|
- 时区和字符集只在 client 侧起作用,对于在服务端的相关行为不起作用。
|
||||||
|
- 时区文件使用操作系统时区文件,可以自行更新操作系统时区文件。如果设置时区报错,请检查是否有时区文件或路径(mac:/var/db/timezone/zoneinfo, linux:/usr/share/zoneinfo)是否正确。
|
||||||
|
|
||||||
- `char *taos_get_client_info()`
|
- `char *taos_get_client_info()`
|
||||||
- **接口说明**:获取客户端版本信息。
|
- **接口说明**:获取客户端版本信息。
|
||||||
- **返回值**:返回客户端版本信息。
|
- **返回值**:返回客户端版本信息。
|
||||||
|
|
|
@ -12,16 +12,25 @@ description: TDengine 服务端的错误码列表和详细说明
|
||||||
## rpc
|
## rpc
|
||||||
|
|
||||||
| 错误码 | 错误描述 | 可能的出错场景或者可能的原因 | 建议用户采取的措施 |
|
| 错误码 | 错误描述 | 可能的出错场景或者可能的原因 | 建议用户采取的措施 |
|
||||||
| ---------- | -----------------------------| --- | --- |
|
| ---------- | -----------------------------| --------- | ------- |
|
||||||
| 0x8000000B | Unable to establish connection | 1.网络不通 2.多次重试、依然不能执行请求 | 1.检查网络 2.分析日志,具体原因比较复杂 |
|
| 0x8000000B | Unable to establish connection | 1.网络不通 2.多次重试、依然不能执行请求 | 1.检查网络 2.分析日志,具体原因比较复杂 |
|
||||||
| 0x80000013 | Client and server's time is not synchronized | 1.客户端和服务端不在同一个时区 2.客户端和服务端在同一个时区,但是两者的时间不同步,相差超过 900 秒 | 1.调整到同一个时区 2.校准客户端和服务端的时间|
|
| 0x80000013 | Client and server's time is not synchronized | 1.客户端和服务端不在同一个时区 2.客户端和服务端在同一个时区,但是两者的时间不同步,相差超过 900 秒 | 1.调整到同一个时区 2.校准客户端和服务端的时间|
|
||||||
| 0x80000015 | Unable to resolve FQDN | 设置了无效的 fqdn | 检查fqdn 的设置 |
|
| 0x80000015 | Unable to resolve FQDN | 设置了无效的 fqdn | 检查fqdn 的设置 |
|
||||||
| 0x80000017 | Port already in use | 端口已经被某个服务占用的情况下,新启的服务依然尝试绑定该端口 | 1.改动新服务的服务端口 2.杀死之前占用端口的服务 |
|
| 0x80000017 | Port already in use | 端口已经被某个服务占用的情况下,新启的服务依然尝试绑定该端口 | 1.改动新服务的服务端口 2.杀死之前占用端口的服务 |
|
||||||
| 0x80000018 | Conn is broken | 由于网络抖动或者请求时间过长(超过 900 秒),导致系统主动摘掉连接 | 1.设置系统的最大超时时长 2.检查请求时长 |
|
| 0x80000018 | Conn is broken | 由于网络抖动或者请求时间过长(超过 900 秒),导致系统主动摘掉连接 | 1.设置系统的最大超时时长 2.检查请求时长 |
|
||||||
| 0x80000019 | Conn read timeout | 未启用 | |
|
| 0x80000019 | Conn read timeout | 1.请求是否处理时间过长 2. 服务端处理不过来 3. 服务端已经死锁| 1. 显式配置readTimeout参数,2. 分析taosd上堆栈 |
|
||||||
| 0x80000020 | some vnode/qnode/mnode(s) out of service | 多次重试之后,仍然无法连接到集群,可能是所有的节点都宕机了,或者存活的节点不是 Leader 节点 | 1.查看 taosd 的状态、分析 taosd 宕机的原因 2.分析存活的 taosd 为什么无法选取 Leader |
|
| 0x80000020 | some vnode/qnode/mnode(s) out of service | 多次重试之后,仍然无法连接到集群,可能是所有的节点都宕机了,或者存活的节点不是 Leader 节点 | 1.查看 taosd 的状态、分析 taosd 宕机的原因 2.分析存活的 taosd 为什么无法选取 Leader |
|
||||||
| 0x80000021 | some vnode/qnode/mnode(s) conn is broken | 多次重试之后,仍然无法连接到集群,可能是网络异常、请求时间太长、服务端死锁等问题 | 1.检查网络 2.请求的执行时间 |
|
| 0x80000021 | some vnode/qnode/mnode(s) conn is broken | 多次重试之后,仍然无法连接到集群,可能是网络异常、请求时间太长、服务端死锁等问题 | 1.检查网络 2.请求的执行时间 |
|
||||||
| 0x80000022 | rpc open too many session | 1.并发太高导致占用链接已经到达上限 2.服务端的 BUG,导致连接一直不释放 | 1.调整配置参数 numOfRpcSessions 2.调整配置参数 timeToGetAvailableConn 3.分析服务端不释放的连接的原因 |
|
| 0x80000022 | rpc open too many session | 1.并发太高导致占用链接已经到达上限 2.服务端的 BUG,导致连接一直不释放 | 1.调整配置参数 numOfRpcSessions 2.调整配置参数 timeToGetAvailableConn 3.分析服务端不释放的连接的原因 |
|
||||||
|
| 0x80000023 | rpc network error | 1. 网络问题,可能是闪断,2. 服务端crash | 1. 检查网络 2. 检查服务端是否重启|
|
||||||
|
| 0x80000024 |rpc network bus | 1.集群间互相拉数据的时候,没有拿到可用链接,或者链接数目已经到上限 | 1.是否并发太高 2. 检查集群各个节点是否有异常,是否出现了死锁等情况|
|
||||||
|
| 0x80000025 | http-report already quit | 1. http上报出现的问题| 内部问题,可以忽略|
|
||||||
|
| 0x80000026 | rpc module already quit | 1.客户端实例已经退出,依然用该实例做查询 | 检查业务代码,是否用错|
|
||||||
|
| 0x80000027 | rpc async module already quit | 1. 引擎错误, 可以忽略, 该错误码不会返回到用户侧| 如果返回到用户侧, 需要引擎侧追查问题|
|
||||||
|
| 0x80000028 | rpc async in proces | 1. 引擎错误, 可以忽略, 该错误码不会返回到用户侧 | 如果返回到用户侧, 需要引擎侧追查问题|
|
||||||
|
| 0x80000029 | rpc no state | 1. 引擎错误, 可以忽略, 该错误码不会返回到用户侧 | 如果返回到用户侧, 需要引擎侧追查问题 |
|
||||||
|
| 0x8000002A | rpc state already dropped | 1. 引擎错误, 可以忽略, 该错误码不会返回到用户侧 | 如果返回到用户侧, 需要引擎侧追查问题|
|
||||||
|
| 0x8000002B | rpc msg exceed limit | 1. 单个rpc 消息超过上限,该错误码不会返回到用户侧 | 如果返回到用户侧, 需要引擎侧追查问题|
|
||||||
|
|
||||||
|
|
||||||
## common
|
## common
|
||||||
|
@ -66,6 +75,7 @@ description: TDengine 服务端的错误码列表和详细说明
|
||||||
| 0x80000133 | Invalid operation | 无效的或不支持的操作 | 1. 修改确认当前操作为合法有效支持的操作,检查参数有效性 2. 如果问题还未解决,保留现场和日志,github上报issue |
|
| 0x80000133 | Invalid operation | 无效的或不支持的操作 | 1. 修改确认当前操作为合法有效支持的操作,检查参数有效性 2. 如果问题还未解决,保留现场和日志,github上报issue |
|
||||||
| 0x80000134 | Invalid value | 无效值 | 保留现场和日志,github上报issue |
|
| 0x80000134 | Invalid value | 无效值 | 保留现场和日志,github上报issue |
|
||||||
| 0x80000135 | Invalid fqdn | 无效FQDN | 检查配置或输入的FQDN值是否正确 |
|
| 0x80000135 | Invalid fqdn | 无效FQDN | 检查配置或输入的FQDN值是否正确 |
|
||||||
|
| 0x8000013C | Invalid disk id | 不合法的disk id | 建议用户检查挂载磁盘是否失效或者使用参数 diskIDCheckEnabled 来跳过磁盘检查 |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -252,6 +262,7 @@ description: TDengine 服务端的错误码列表和详细说明
|
||||||
| 0x80000529 | Vnode is stopped | Vnode 已经关闭 | 上报问题 |
|
| 0x80000529 | Vnode is stopped | Vnode 已经关闭 | 上报问题 |
|
||||||
| 0x80000530 | Duplicate write request | 重复写入请求,内部错误 | 上报问题 |
|
| 0x80000530 | Duplicate write request | 重复写入请求,内部错误 | 上报问题 |
|
||||||
| 0x80000531 | Vnode query is busy | 查询忙碌 | 上报问题 |
|
| 0x80000531 | Vnode query is busy | 查询忙碌 | 上报问题 |
|
||||||
|
| 0x80000540 | Vnode already exist but Dbid not match | 内部错误 | 上报问题 |
|
||||||
|
|
||||||
|
|
||||||
## tsdb
|
## tsdb
|
||||||
|
@ -285,6 +296,9 @@ description: TDengine 服务端的错误码列表和详细说明
|
||||||
| 0x80000729 | Task message error | 查询消息错误 | 保留现场和日志,github上报issue |
|
| 0x80000729 | Task message error | 查询消息错误 | 保留现场和日志,github上报issue |
|
||||||
| 0x8000072B | Task status error | 子查询状态错误 | 保留现场和日志,github上报issue |
|
| 0x8000072B | Task status error | 子查询状态错误 | 保留现场和日志,github上报issue |
|
||||||
| 0x8000072F | Job not exist | 查询JOB已经不存在 | 保留现场和日志,github上报issue |
|
| 0x8000072F | Job not exist | 查询JOB已经不存在 | 保留现场和日志,github上报issue |
|
||||||
|
| 0x80000739 | Query memory upper limit is reached | 单个查询达到内存使用上限 | 设置合理的内存上限或调整 SQL 语句 |
|
||||||
|
| 0x8000073A | Query memory exhausted | dnode查询内存到达使用上限 | 设置合理的内存上限或调整并发查询量或增大系统内存 |
|
||||||
|
| 0x8000073B | Timeout for long time no fetch | 查询被长时间中断未恢复 | 调整应用实现尽快 fetch 数据 |
|
||||||
|
|
||||||
## grant
|
## grant
|
||||||
|
|
||||||
|
@ -514,6 +528,7 @@ description: TDengine 服务端的错误码列表和详细说明
|
||||||
| 0x80003103 | Invalid tsma state | 流计算下发结果的 vgroup 与创建 TSMA index 的 vgroup 不一致 | 检查错误日志,联系开发处理 |
|
| 0x80003103 | Invalid tsma state | 流计算下发结果的 vgroup 与创建 TSMA index 的 vgroup 不一致 | 检查错误日志,联系开发处理 |
|
||||||
| 0x80003104 | Invalid tsma pointer | 在处理写入流计算下发的结果,消息体为空指针。 | 检查错误日志,联系开发处理 |
|
| 0x80003104 | Invalid tsma pointer | 在处理写入流计算下发的结果,消息体为空指针。 | 检查错误日志,联系开发处理 |
|
||||||
| 0x80003105 | Invalid tsma parameters | 在处理写入流计算下发的结果,结果数量为0。 | 检查错误日志,联系开发处理 |
|
| 0x80003105 | Invalid tsma parameters | 在处理写入流计算下发的结果,结果数量为0。 | 检查错误日志,联系开发处理 |
|
||||||
|
| 0x80003113 | Tsma optimization cannot be applied with INTERVAL AUTO offset. | 当前查询条件下使用 INTERVAL AUTO OFFSET 无法启用 tsma 优化。 | 使用 SKIP_TSMA Hint 或者手动指定 INTERVAL OFFSET。 |
|
||||||
| 0x80003150 | Invalid rsma env | Rsma 执行环境异常。 | 检查错误日志,联系开发处理 |
|
| 0x80003150 | Invalid rsma env | Rsma 执行环境异常。 | 检查错误日志,联系开发处理 |
|
||||||
| 0x80003151 | Invalid rsma state | Rsma 执行状态异常。 | 检查错误日志,联系开发处理 |
|
| 0x80003151 | Invalid rsma state | Rsma 执行状态异常。 | 检查错误日志,联系开发处理 |
|
||||||
| 0x80003152 | Rsma qtaskinfo creation error | 创建流计算环境异常。 | 检查错误日志,联系开发处理 |
|
| 0x80003152 | Rsma qtaskinfo creation error | 创建流计算环境异常。 | 检查错误日志,联系开发处理 |
|
||||||
|
|
|
@ -293,6 +293,14 @@ TDengine 采纳了一种独特的时间驱动缓存管理策略,亦称为写
|
||||||
|
|
||||||
此外,考虑到物联网数据的特点,用户通常最关注的是数据的实时性,即最新产生的数据。TDengine 很好地利用了这一特点,优先将最新到达的(即当前状态)数据存储在缓存中。具体而言,TDengine 会将最新到达的数据直接存入缓存,以便快速响应用户对最新一条或多条数据的查询和分析需求,从而在整体上提高数据库查询的响应速度。从这个角度来看,通过合理设置数据库参数,TDengine 完全可以作为数据缓存来使用,这样就无须再部署 Redis 或其他额外的缓存系统。这种做法不仅有效简化了系统架构,还有助于降低运维成本。需要注意的是,一旦 TDengine 重启,缓存中的数据将被清除,所有先前缓存的数据都会被批量写入硬盘,而不会像专业的 Key-Value 缓存系统那样自动将之前缓存的数据重新加载回缓存。
|
此外,考虑到物联网数据的特点,用户通常最关注的是数据的实时性,即最新产生的数据。TDengine 很好地利用了这一特点,优先将最新到达的(即当前状态)数据存储在缓存中。具体而言,TDengine 会将最新到达的数据直接存入缓存,以便快速响应用户对最新一条或多条数据的查询和分析需求,从而在整体上提高数据库查询的响应速度。从这个角度来看,通过合理设置数据库参数,TDengine 完全可以作为数据缓存来使用,这样就无须再部署 Redis 或其他额外的缓存系统。这种做法不仅有效简化了系统架构,还有助于降低运维成本。需要注意的是,一旦 TDengine 重启,缓存中的数据将被清除,所有先前缓存的数据都会被批量写入硬盘,而不会像专业的 Key-Value 缓存系统那样自动将之前缓存的数据重新加载回缓存。
|
||||||
|
|
||||||
|
### last/last_row 缓存
|
||||||
|
|
||||||
|
在时序数据的场景中,查询表的最后一条记录(last_row)或最后一条非 NULL 记录(last)是一个常见的需求。为了提高 TDengine 对这种查询的响应速度,TSDB 为每张表的 last 和 last_row 数据提供了 LRU 缓存。LRU 缓存采用延迟加载策略,当首次查询某张表的 last 或 last_row 时,缓存模块会去内存池和磁盘文件加载数据,处理后放入LRU 缓存,并返回给查询模块继续处理;当有新的数据插入或删除时,如果缓存需要更新,会进行相应的更新操作;如果缓存中没有当前被写入表的数据,则直接跳过,无需其它操作。
|
||||||
|
|
||||||
|
此外在缓存配置更新的时候,也会更新缓存数据。比如,缓存功能默认是关闭的,用户使用命令开启缓存功能之后,就会在首次查询时加载数据;当关闭缓存开关时,会释放之前的缓存区。当查询某一个子表的 last 或 last_row 数据时,如果缓存中没有,则从内存池和磁盘文件加载对应的 last 或 last_row 数据到缓存中;当查询某一个超级表的 last 或 last_row 数据时,这个超级表对应的所有子表都需要加载到缓存中。
|
||||||
|
|
||||||
|
通过数据库参数 cachemodel 可以配置某一个数据库的缓存参数,默认值为 "none",表示不开启缓存,另外三个值为 "last_row","last_value","both";分别是开启 last_row 缓存,开启 last 缓存,和两个同时开启。缓存当前所使用的内存数量,可在通过 show vgroups; 命令,在 cacheload 列中进行查看,单位为字节。
|
||||||
|
|
||||||
### 持久化存储
|
### 持久化存储
|
||||||
|
|
||||||
TDengine 采用了一种数据驱动的策略来实现缓存数据的持久化存储。当 vnode 中的缓存数据积累到一定量时,为了避免阻塞后续数据的写入,TDengine 会启动落盘线程,将这些缓存数据写入持久化存储设备。在此过程中,TDengine 会创建新的数据库日志文件用于数据落盘,并在落盘成功后删除旧的日志文件,以防止日志文件无限制增长。
|
TDengine 采用了一种数据驱动的策略来实现缓存数据的持久化存储。当 vnode 中的缓存数据积累到一定量时,为了避免阻塞后续数据的写入,TDengine 会启动落盘线程,将这些缓存数据写入持久化存储设备。在此过程中,TDengine 会创建新的数据库日志文件用于数据落盘,并在落盘成功后删除旧的日志文件,以防止日志文件无限制增长。
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
PROJECT(TDengine)
|
PROJECT(TDengine)
|
||||||
|
|
||||||
IF (TD_LINUX)
|
IF(TD_LINUX)
|
||||||
INCLUDE_DIRECTORIES(. ${TD_SOURCE_DIR}/src/inc ${TD_SOURCE_DIR}/src/client/inc ${TD_SOURCE_DIR}/inc)
|
INCLUDE_DIRECTORIES(. ${TD_SOURCE_DIR}/src/inc ${TD_SOURCE_DIR}/src/client/inc ${TD_SOURCE_DIR}/inc)
|
||||||
AUX_SOURCE_DIRECTORY(. SRC)
|
AUX_SOURCE_DIRECTORY(. SRC)
|
||||||
|
|
||||||
add_executable(tmq "")
|
add_executable(tmq "")
|
||||||
|
@ -12,58 +12,58 @@ IF (TD_LINUX)
|
||||||
add_executable(asyncdemo "")
|
add_executable(asyncdemo "")
|
||||||
|
|
||||||
target_sources(tmq
|
target_sources(tmq
|
||||||
PRIVATE
|
PRIVATE
|
||||||
"tmq.c"
|
"tmq.c"
|
||||||
)
|
)
|
||||||
|
|
||||||
target_sources(stream_demo
|
target_sources(stream_demo
|
||||||
PRIVATE
|
PRIVATE
|
||||||
"stream_demo.c"
|
"stream_demo.c"
|
||||||
)
|
)
|
||||||
|
|
||||||
target_sources(schemaless
|
target_sources(schemaless
|
||||||
PRIVATE
|
PRIVATE
|
||||||
"schemaless.c"
|
"schemaless.c"
|
||||||
)
|
)
|
||||||
|
|
||||||
target_sources(prepare
|
target_sources(prepare
|
||||||
PRIVATE
|
PRIVATE
|
||||||
"prepare.c"
|
"prepare.c"
|
||||||
)
|
)
|
||||||
|
|
||||||
target_sources(demo
|
target_sources(demo
|
||||||
PRIVATE
|
PRIVATE
|
||||||
"demo.c"
|
"demo.c"
|
||||||
)
|
)
|
||||||
|
|
||||||
target_sources(asyncdemo
|
target_sources(asyncdemo
|
||||||
PRIVATE
|
PRIVATE
|
||||||
"asyncdemo.c"
|
"asyncdemo.c"
|
||||||
)
|
)
|
||||||
|
|
||||||
target_link_libraries(tmq
|
target_link_libraries(tmq
|
||||||
taos
|
${TAOS_LIB}
|
||||||
)
|
)
|
||||||
|
|
||||||
target_link_libraries(stream_demo
|
target_link_libraries(stream_demo
|
||||||
taos
|
${TAOS_LIB}
|
||||||
)
|
)
|
||||||
|
|
||||||
target_link_libraries(schemaless
|
target_link_libraries(schemaless
|
||||||
taos
|
${TAOS_LIB}
|
||||||
)
|
)
|
||||||
|
|
||||||
target_link_libraries(prepare
|
target_link_libraries(prepare
|
||||||
taos
|
${TAOS_LIB}
|
||||||
)
|
)
|
||||||
|
|
||||||
target_link_libraries(demo
|
target_link_libraries(demo
|
||||||
taos
|
${TAOS_LIB}
|
||||||
)
|
)
|
||||||
|
|
||||||
target_link_libraries(asyncdemo
|
target_link_libraries(asyncdemo
|
||||||
taos
|
${TAOS_LIB}
|
||||||
)
|
)
|
||||||
|
|
||||||
SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq)
|
SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq)
|
||||||
SET_TARGET_PROPERTIES(stream_demo PROPERTIES OUTPUT_NAME stream_demo)
|
SET_TARGET_PROPERTIES(stream_demo PROPERTIES OUTPUT_NAME stream_demo)
|
||||||
|
@ -71,8 +71,9 @@ IF (TD_LINUX)
|
||||||
SET_TARGET_PROPERTIES(prepare PROPERTIES OUTPUT_NAME prepare)
|
SET_TARGET_PROPERTIES(prepare PROPERTIES OUTPUT_NAME prepare)
|
||||||
SET_TARGET_PROPERTIES(demo PROPERTIES OUTPUT_NAME demo)
|
SET_TARGET_PROPERTIES(demo PROPERTIES OUTPUT_NAME demo)
|
||||||
SET_TARGET_PROPERTIES(asyncdemo PROPERTIES OUTPUT_NAME asyncdemo)
|
SET_TARGET_PROPERTIES(asyncdemo PROPERTIES OUTPUT_NAME asyncdemo)
|
||||||
ENDIF ()
|
ENDIF()
|
||||||
IF (TD_DARWIN)
|
|
||||||
INCLUDE_DIRECTORIES(. ${TD_SOURCE_DIR}/src/inc ${TD_SOURCE_DIR}/src/client/inc ${TD_SOURCE_DIR}/inc)
|
IF(TD_DARWIN)
|
||||||
|
INCLUDE_DIRECTORIES(. ${TD_SOURCE_DIR}/src/inc ${TD_SOURCE_DIR}/src/client/inc ${TD_SOURCE_DIR}/inc)
|
||||||
AUX_SOURCE_DIRECTORY(. SRC)
|
AUX_SOURCE_DIRECTORY(. SRC)
|
||||||
ENDIF ()
|
ENDIF()
|
||||||
|
|
|
@ -64,6 +64,15 @@ typedef enum {
|
||||||
TSDB_MAX_OPTIONS
|
TSDB_MAX_OPTIONS
|
||||||
} TSDB_OPTION;
|
} TSDB_OPTION;
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
TSDB_OPTION_CONNECTION_CLEAR = -1, // means clear all option in this connection
|
||||||
|
TSDB_OPTION_CONNECTION_CHARSET, // charset, Same as the scope supported by the system
|
||||||
|
TSDB_OPTION_CONNECTION_TIMEZONE, // timezone, Same as the scope supported by the system
|
||||||
|
TSDB_OPTION_CONNECTION_USER_IP, // user ip
|
||||||
|
TSDB_OPTION_CONNECTION_USER_APP, // user app, max lengthe is 23, truncated if longer than 23
|
||||||
|
TSDB_MAX_OPTIONS_CONNECTION
|
||||||
|
} TSDB_OPTION_CONNECTION;
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
TSDB_SML_UNKNOWN_PROTOCOL = 0,
|
TSDB_SML_UNKNOWN_PROTOCOL = 0,
|
||||||
TSDB_SML_LINE_PROTOCOL = 1,
|
TSDB_SML_LINE_PROTOCOL = 1,
|
||||||
|
@ -108,7 +117,7 @@ typedef struct TAOS_FIELD_STB {
|
||||||
uint8_t precision;
|
uint8_t precision;
|
||||||
uint8_t scale;
|
uint8_t scale;
|
||||||
int32_t bytes;
|
int32_t bytes;
|
||||||
TAOS_FIELD_T field_type;
|
uint8_t field_type;
|
||||||
} TAOS_FIELD_STB;
|
} TAOS_FIELD_STB;
|
||||||
|
|
||||||
#ifdef WINDOWS
|
#ifdef WINDOWS
|
||||||
|
@ -174,11 +183,12 @@ typedef struct TAOS_STMT_OPTIONS {
|
||||||
|
|
||||||
DLL_EXPORT void taos_cleanup(void);
|
DLL_EXPORT void taos_cleanup(void);
|
||||||
DLL_EXPORT int taos_options(TSDB_OPTION option, const void *arg, ...);
|
DLL_EXPORT int taos_options(TSDB_OPTION option, const void *arg, ...);
|
||||||
|
DLL_EXPORT int taos_options_connection(TAOS *taos, TSDB_OPTION_CONNECTION option, const void *arg, ...);
|
||||||
DLL_EXPORT setConfRet taos_set_config(const char *config);
|
DLL_EXPORT setConfRet taos_set_config(const char *config);
|
||||||
DLL_EXPORT int taos_init(void);
|
DLL_EXPORT int taos_init(void);
|
||||||
DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port);
|
DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port);
|
||||||
DLL_EXPORT TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port);
|
DLL_EXPORT TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port);
|
||||||
DLL_EXPORT void taos_close(TAOS *taos);
|
DLL_EXPORT void taos_close(TAOS *taos);
|
||||||
|
|
||||||
DLL_EXPORT const char *taos_data_type(int type);
|
DLL_EXPORT const char *taos_data_type(int type);
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ typedef struct {
|
||||||
|
|
||||||
char file_path[TSDB_FILENAME_LEN]; // local file path
|
char file_path[TSDB_FILENAME_LEN]; // local file path
|
||||||
int64_t file_size; // local file size, for upload
|
int64_t file_size; // local file size, for upload
|
||||||
int32_t file_last_modified; // local file last modified time, for upload
|
int64_t file_last_modified; // local file last modified time, for upload
|
||||||
char file_md5[64]; // md5 of the local file content, for upload, reserved
|
char file_md5[64]; // md5 of the local file content, for upload, reserved
|
||||||
|
|
||||||
char object_name[128]; // object name
|
char object_name[128]; // object name
|
||||||
|
@ -67,9 +67,9 @@ int32_t cos_cp_load(char const* filepath, SCheckpoint* checkpoint);
|
||||||
int32_t cos_cp_dump(SCheckpoint* checkpoint);
|
int32_t cos_cp_dump(SCheckpoint* checkpoint);
|
||||||
void cos_cp_get_undo_parts(SCheckpoint* checkpoint, int* part_num, SCheckpointPart* parts, int64_t* consume_bytes);
|
void cos_cp_get_undo_parts(SCheckpoint* checkpoint, int* part_num, SCheckpointPart* parts, int64_t* consume_bytes);
|
||||||
void cos_cp_update(SCheckpoint* checkpoint, int32_t part_index, char const* etag, uint64_t crc64);
|
void cos_cp_update(SCheckpoint* checkpoint, int32_t part_index, char const* etag, uint64_t crc64);
|
||||||
void cos_cp_build_upload(SCheckpoint* checkpoint, char const* filepath, int64_t size, int32_t mtime,
|
void cos_cp_build_upload(SCheckpoint* checkpoint, char const* filepath, int64_t size, int64_t mtime,
|
||||||
char const* upload_id, int64_t part_size);
|
char const* upload_id, int64_t part_size);
|
||||||
bool cos_cp_is_valid_upload(SCheckpoint* checkpoint, int64_t size, int32_t mtime);
|
bool cos_cp_is_valid_upload(SCheckpoint* checkpoint, int64_t size, int64_t mtime);
|
||||||
|
|
||||||
void cos_cp_build_download(SCheckpoint* checkpoint, char const* filepath, char const* object_name, int64_t object_size,
|
void cos_cp_build_download(SCheckpoint* checkpoint, char const* filepath, char const* object_name, int64_t object_size,
|
||||||
char const* object_lmtime, char const* object_etag, int64_t part_size);
|
char const* object_lmtime, char const* object_etag, int64_t part_size);
|
||||||
|
|
|
@ -17,12 +17,23 @@
|
||||||
#define TDENGINE_STREAMMSG_H
|
#define TDENGINE_STREAMMSG_H
|
||||||
|
|
||||||
#include "tmsg.h"
|
#include "tmsg.h"
|
||||||
#include "trpc.h"
|
//#include "trpc.h"
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
typedef struct SStreamRetrieveReq SStreamRetrieveReq;
|
||||||
|
typedef struct SStreamDispatchReq SStreamDispatchReq;
|
||||||
|
typedef struct STokenBucket STokenBucket;
|
||||||
|
typedef struct SMetaHbInfo SMetaHbInfo;
|
||||||
|
|
||||||
|
typedef struct SNodeUpdateInfo {
|
||||||
|
int32_t nodeId;
|
||||||
|
SEpSet prevEp;
|
||||||
|
SEpSet newEp;
|
||||||
|
} SNodeUpdateInfo;
|
||||||
|
|
||||||
typedef struct SStreamUpstreamEpInfo {
|
typedef struct SStreamUpstreamEpInfo {
|
||||||
int32_t nodeId;
|
int32_t nodeId;
|
||||||
int32_t childId;
|
int32_t childId;
|
||||||
|
@ -170,15 +181,19 @@ typedef struct SStreamHbMsg {
|
||||||
SArray* pUpdateNodes; // SArray<int32_t>, needs update the epsets in stream tasks for those nodes.
|
SArray* pUpdateNodes; // SArray<int32_t>, needs update the epsets in stream tasks for those nodes.
|
||||||
} SStreamHbMsg;
|
} SStreamHbMsg;
|
||||||
|
|
||||||
int32_t tEncodeStreamHbMsg(SEncoder* pEncoder, const SStreamHbMsg* pRsp);
|
int32_t tEncodeStreamHbMsg(SEncoder* pEncoder, const SStreamHbMsg* pReq);
|
||||||
int32_t tDecodeStreamHbMsg(SDecoder* pDecoder, SStreamHbMsg* pRsp);
|
int32_t tDecodeStreamHbMsg(SDecoder* pDecoder, SStreamHbMsg* pReq);
|
||||||
void tCleanupStreamHbMsg(SStreamHbMsg* pMsg);
|
void tCleanupStreamHbMsg(SStreamHbMsg* pMsg);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
SMsgHead head;
|
SMsgHead head;
|
||||||
int32_t msgId;
|
int32_t msgId;
|
||||||
|
SEpSet mndEpset;
|
||||||
} SMStreamHbRspMsg;
|
} SMStreamHbRspMsg;
|
||||||
|
|
||||||
|
int32_t tEncodeStreamHbRsp(SEncoder* pEncoder, const SMStreamHbRspMsg* pRsp);
|
||||||
|
int32_t tDecodeStreamHbRsp(SDecoder* pDecoder, SMStreamHbRspMsg* pRsp);
|
||||||
|
|
||||||
typedef struct SRetrieveChkptTriggerReq {
|
typedef struct SRetrieveChkptTriggerReq {
|
||||||
SMsgHead head;
|
SMsgHead head;
|
||||||
int64_t streamId;
|
int64_t streamId;
|
||||||
|
@ -189,6 +204,9 @@ typedef struct SRetrieveChkptTriggerReq {
|
||||||
int64_t downstreamTaskId;
|
int64_t downstreamTaskId;
|
||||||
} SRetrieveChkptTriggerReq;
|
} SRetrieveChkptTriggerReq;
|
||||||
|
|
||||||
|
int32_t tEncodeRetrieveChkptTriggerReq(SEncoder* pEncoder, const SRetrieveChkptTriggerReq* pReq);
|
||||||
|
int32_t tDecodeRetrieveChkptTriggerReq(SDecoder* pDecoder, SRetrieveChkptTriggerReq* pReq);
|
||||||
|
|
||||||
typedef struct SCheckpointTriggerRsp {
|
typedef struct SCheckpointTriggerRsp {
|
||||||
int64_t streamId;
|
int64_t streamId;
|
||||||
int64_t checkpointId;
|
int64_t checkpointId;
|
||||||
|
@ -198,6 +216,9 @@ typedef struct SCheckpointTriggerRsp {
|
||||||
int32_t rspCode;
|
int32_t rspCode;
|
||||||
} SCheckpointTriggerRsp;
|
} SCheckpointTriggerRsp;
|
||||||
|
|
||||||
|
int32_t tEncodeCheckpointTriggerRsp(SEncoder* pEncoder, const SCheckpointTriggerRsp* pRsp);
|
||||||
|
int32_t tDecodeCheckpointTriggerRsp(SDecoder* pDecoder, SCheckpointTriggerRsp* pRsp);
|
||||||
|
|
||||||
typedef struct SCheckpointReport {
|
typedef struct SCheckpointReport {
|
||||||
int64_t streamId;
|
int64_t streamId;
|
||||||
int32_t taskId;
|
int32_t taskId;
|
||||||
|
@ -222,7 +243,7 @@ typedef struct SRestoreCheckpointInfo {
|
||||||
int32_t nodeId;
|
int32_t nodeId;
|
||||||
} SRestoreCheckpointInfo;
|
} SRestoreCheckpointInfo;
|
||||||
|
|
||||||
int32_t tEncodeRestoreCheckpointInfo (SEncoder* pEncoder, const SRestoreCheckpointInfo* pReq);
|
int32_t tEncodeRestoreCheckpointInfo(SEncoder* pEncoder, const SRestoreCheckpointInfo* pReq);
|
||||||
int32_t tDecodeRestoreCheckpointInfo(SDecoder* pDecoder, SRestoreCheckpointInfo* pReq);
|
int32_t tDecodeRestoreCheckpointInfo(SDecoder* pDecoder, SRestoreCheckpointInfo* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -232,10 +253,8 @@ typedef struct {
|
||||||
int32_t reqType;
|
int32_t reqType;
|
||||||
} SStreamTaskRunReq;
|
} SStreamTaskRunReq;
|
||||||
|
|
||||||
typedef struct SCheckpointConsensusEntry {
|
int32_t tEncodeStreamTaskRunReq(SEncoder* pEncoder, const SStreamTaskRunReq* pReq);
|
||||||
SRestoreCheckpointInfo req;
|
int32_t tDecodeStreamTaskRunReq(SDecoder* pDecoder, SStreamTaskRunReq* pReq);
|
||||||
int64_t ts;
|
|
||||||
} SCheckpointConsensusEntry;
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
|
@ -61,6 +61,8 @@ extern "C" {
|
||||||
#define TSDB_INS_TABLE_MACHINES "ins_machines"
|
#define TSDB_INS_TABLE_MACHINES "ins_machines"
|
||||||
#define TSDB_INS_TABLE_ENCRYPTIONS "ins_encryptions"
|
#define TSDB_INS_TABLE_ENCRYPTIONS "ins_encryptions"
|
||||||
#define TSDB_INS_TABLE_TSMAS "ins_tsmas"
|
#define TSDB_INS_TABLE_TSMAS "ins_tsmas"
|
||||||
|
#define TSDB_INS_DISK_USAGE "ins_disk_usage"
|
||||||
|
#define TSDB_INS_TABLE_FILESETS "ins_filesets"
|
||||||
|
|
||||||
#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema"
|
#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema"
|
||||||
#define TSDB_PERFS_TABLE_SMAS "perf_smas"
|
#define TSDB_PERFS_TABLE_SMAS "perf_smas"
|
||||||
|
|
|
@ -155,6 +155,7 @@ typedef enum EStreamType {
|
||||||
STREAM_MID_RETRIEVE,
|
STREAM_MID_RETRIEVE,
|
||||||
STREAM_PARTITION_DELETE_DATA,
|
STREAM_PARTITION_DELETE_DATA,
|
||||||
STREAM_GET_RESULT,
|
STREAM_GET_RESULT,
|
||||||
|
STREAM_DROP_CHILD_TABLE,
|
||||||
} EStreamType;
|
} EStreamType;
|
||||||
|
|
||||||
#pragma pack(push, 1)
|
#pragma pack(push, 1)
|
||||||
|
@ -217,9 +218,9 @@ typedef struct SDataBlockInfo {
|
||||||
} SDataBlockInfo;
|
} SDataBlockInfo;
|
||||||
|
|
||||||
typedef struct SSDataBlock {
|
typedef struct SSDataBlock {
|
||||||
SColumnDataAgg* pBlockAgg;
|
SColumnDataAgg* pBlockAgg;
|
||||||
SArray* pDataBlock; // SArray<SColumnInfoData>
|
SArray* pDataBlock; // SArray<SColumnInfoData>
|
||||||
SDataBlockInfo info;
|
SDataBlockInfo info;
|
||||||
} SSDataBlock;
|
} SSDataBlock;
|
||||||
|
|
||||||
typedef struct SVarColAttr {
|
typedef struct SVarColAttr {
|
||||||
|
@ -301,6 +302,15 @@ typedef struct STableBlockDistInfo {
|
||||||
int32_t tSerializeBlockDistInfo(void* buf, int32_t bufLen, const STableBlockDistInfo* pInfo);
|
int32_t tSerializeBlockDistInfo(void* buf, int32_t bufLen, const STableBlockDistInfo* pInfo);
|
||||||
int32_t tDeserializeBlockDistInfo(void* buf, int32_t bufLen, STableBlockDistInfo* pInfo);
|
int32_t tDeserializeBlockDistInfo(void* buf, int32_t bufLen, STableBlockDistInfo* pInfo);
|
||||||
|
|
||||||
|
typedef struct SDBBlockUsageInfo {
|
||||||
|
uint64_t dataInDiskSize;
|
||||||
|
uint64_t walInDiskSize;
|
||||||
|
uint64_t rawDataSize;
|
||||||
|
} SDBBlockUsageInfo;
|
||||||
|
|
||||||
|
int32_t tSerializeBlockDbUsage(void* buf, int32_t bufLen, const SDBBlockUsageInfo* pInfo);
|
||||||
|
int32_t tDeserializeBlockDbUsage(void* buf, int32_t bufLen, SDBBlockUsageInfo* pInfo);
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
FUNC_PARAM_TYPE_VALUE = 0x1,
|
FUNC_PARAM_TYPE_VALUE = 0x1,
|
||||||
FUNC_PARAM_TYPE_COLUMN = 0x2,
|
FUNC_PARAM_TYPE_COLUMN = 0x2,
|
||||||
|
@ -397,11 +407,13 @@ typedef struct STUidTagInfo {
|
||||||
int32_t taosGenCrashJsonMsg(int signum, char** pMsg, int64_t clusterId, int64_t startTime);
|
int32_t taosGenCrashJsonMsg(int signum, char** pMsg, int64_t clusterId, int64_t startTime);
|
||||||
int32_t dumpConfToDataBlock(SSDataBlock* pBlock, int32_t startCol);
|
int32_t dumpConfToDataBlock(SSDataBlock* pBlock, int32_t startCol);
|
||||||
|
|
||||||
#define TSMA_RES_STB_POSTFIX "_tsma_res_stb_"
|
#define TSMA_RES_STB_POSTFIX "_tsma_res_stb_"
|
||||||
#define MD5_OUTPUT_LEN 32
|
#define MD5_OUTPUT_LEN 32
|
||||||
#define TSMA_RES_STB_EXTRA_COLUMN_NUM 4 // 3 columns: _wstart, _wend, _wduration, 1 tag: tbname
|
#define TSMA_RES_STB_EXTRA_COLUMN_NUM 4 // 3 columns: _wstart, _wend, _wduration, 1 tag: tbname
|
||||||
|
|
||||||
static inline bool isTsmaResSTb(const char* stbName) {
|
static inline bool isTsmaResSTb(const char* stbName) {
|
||||||
|
static bool showTsmaTables = false;
|
||||||
|
if (showTsmaTables) return false;
|
||||||
const char* pos = strstr(stbName, TSMA_RES_STB_POSTFIX);
|
const char* pos = strstr(stbName, TSMA_RES_STB_POSTFIX);
|
||||||
if (pos && strlen(stbName) == (pos - stbName) + strlen(TSMA_RES_STB_POSTFIX)) {
|
if (pos && strlen(stbName) == (pos - stbName) + strlen(TSMA_RES_STB_POSTFIX)) {
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -57,9 +57,9 @@ const static uint8_t BIT2_MAP[4] = {0b11111100, 0b11110011, 0b11001111, 0b001111
|
||||||
#define ONE ((uint8_t)1)
|
#define ONE ((uint8_t)1)
|
||||||
#define THREE ((uint8_t)3)
|
#define THREE ((uint8_t)3)
|
||||||
#define DIV_8(i) ((i) >> 3)
|
#define DIV_8(i) ((i) >> 3)
|
||||||
#define MOD_8(i) ((i)&7)
|
#define MOD_8(i) ((i) & 7)
|
||||||
#define DIV_4(i) ((i) >> 2)
|
#define DIV_4(i) ((i) >> 2)
|
||||||
#define MOD_4(i) ((i)&3)
|
#define MOD_4(i) ((i) & 3)
|
||||||
#define MOD_4_TIME_2(i) (MOD_4(i) << 1)
|
#define MOD_4_TIME_2(i) (MOD_4(i) << 1)
|
||||||
#define BIT1_SIZE(n) (DIV_8((n)-1) + 1)
|
#define BIT1_SIZE(n) (DIV_8((n)-1) + 1)
|
||||||
#define BIT2_SIZE(n) (DIV_4((n)-1) + 1)
|
#define BIT2_SIZE(n) (DIV_4((n)-1) + 1)
|
||||||
|
@ -154,7 +154,7 @@ int32_t tEncodeTag(SEncoder *pEncoder, const STag *pTag);
|
||||||
int32_t tDecodeTag(SDecoder *pDecoder, STag **ppTag);
|
int32_t tDecodeTag(SDecoder *pDecoder, STag **ppTag);
|
||||||
int32_t tTagToValArray(const STag *pTag, SArray **ppArray);
|
int32_t tTagToValArray(const STag *pTag, SArray **ppArray);
|
||||||
void debugPrintSTag(STag *pTag, const char *tag, int32_t ln); // TODO: remove
|
void debugPrintSTag(STag *pTag, const char *tag, int32_t ln); // TODO: remove
|
||||||
int32_t parseJsontoTagData(const char *json, SArray *pTagVals, STag **ppTag, void *pMsgBuf);
|
int32_t parseJsontoTagData(const char *json, SArray *pTagVals, STag **ppTag, void *pMsgBuf, void *charsetCxt);
|
||||||
|
|
||||||
// SColData ================================
|
// SColData ================================
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -173,6 +173,8 @@ typedef struct {
|
||||||
} SColDataCompressInfo;
|
} SColDataCompressInfo;
|
||||||
|
|
||||||
typedef void *(*xMallocFn)(void *, int32_t);
|
typedef void *(*xMallocFn)(void *, int32_t);
|
||||||
|
typedef int32_t (*checkWKBGeometryFn)(const unsigned char *geoWKB, size_t nGeom);
|
||||||
|
typedef int32_t (*initGeosFn)();
|
||||||
|
|
||||||
void tColDataDestroy(void *ph);
|
void tColDataDestroy(void *ph);
|
||||||
void tColDataInit(SColData *pColData, int16_t cid, int8_t type, int8_t cflag);
|
void tColDataInit(SColData *pColData, int16_t cid, int8_t type, int8_t cflag);
|
||||||
|
@ -191,7 +193,8 @@ int32_t tColDataCompress(SColData *colData, SColDataCompressInfo *info, SBuffer
|
||||||
int32_t tColDataDecompress(void *input, SColDataCompressInfo *info, SColData *colData, SBuffer *assist);
|
int32_t tColDataDecompress(void *input, SColDataCompressInfo *info, SColData *colData, SBuffer *assist);
|
||||||
|
|
||||||
// for stmt bind
|
// for stmt bind
|
||||||
int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind, int32_t buffMaxLen);
|
int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind, int32_t buffMaxLen, initGeosFn igeos,
|
||||||
|
checkWKBGeometryFn cgeos);
|
||||||
int32_t tColDataSortMerge(SArray **arr);
|
int32_t tColDataSortMerge(SArray **arr);
|
||||||
|
|
||||||
// for raw block
|
// for raw block
|
||||||
|
@ -378,7 +381,8 @@ int32_t tRowBuildFromBind(SBindInfo *infos, int32_t numOfInfos, bool infoSorted,
|
||||||
SArray *rowArray);
|
SArray *rowArray);
|
||||||
|
|
||||||
// stmt2 binding
|
// stmt2 binding
|
||||||
int32_t tColDataAddValueByBind2(SColData *pColData, TAOS_STMT2_BIND *pBind, int32_t buffMaxLen);
|
int32_t tColDataAddValueByBind2(SColData *pColData, TAOS_STMT2_BIND *pBind, int32_t buffMaxLen, initGeosFn igeos,
|
||||||
|
checkWKBGeometryFn cgeos);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t columnId;
|
int32_t columnId;
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
#include "tarray.h"
|
#include "tarray.h"
|
||||||
#include "tconfig.h"
|
#include "tconfig.h"
|
||||||
#include "tdef.h"
|
#include "tdef.h"
|
||||||
|
#include "tmsg.h"
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
|
@ -30,6 +31,9 @@ extern "C" {
|
||||||
#define SLOW_LOG_TYPE_OTHERS 0x4
|
#define SLOW_LOG_TYPE_OTHERS 0x4
|
||||||
#define SLOW_LOG_TYPE_ALL 0x7
|
#define SLOW_LOG_TYPE_ALL 0x7
|
||||||
|
|
||||||
|
#define GLOBAL_CONFIG_FILE_VERSION 1
|
||||||
|
#define LOCAL_CONFIG_FILE_VERSION 1
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
DND_CA_SM4 = 1,
|
DND_CA_SM4 = 1,
|
||||||
} EEncryptAlgor;
|
} EEncryptAlgor;
|
||||||
|
@ -41,6 +45,8 @@ typedef enum {
|
||||||
DND_CS_MNODE_WAL = 8,
|
DND_CS_MNODE_WAL = 8,
|
||||||
} EEncryptScope;
|
} EEncryptScope;
|
||||||
|
|
||||||
|
extern SConfig *tsCfg;
|
||||||
|
|
||||||
// cluster
|
// cluster
|
||||||
extern char tsFirst[];
|
extern char tsFirst[];
|
||||||
extern char tsSecond[];
|
extern char tsSecond[];
|
||||||
|
@ -49,6 +55,9 @@ extern char tsLocalEp[];
|
||||||
extern char tsVersionName[];
|
extern char tsVersionName[];
|
||||||
extern uint16_t tsServerPort;
|
extern uint16_t tsServerPort;
|
||||||
extern int32_t tsVersion;
|
extern int32_t tsVersion;
|
||||||
|
extern int32_t tsForceReadConfig;
|
||||||
|
extern int32_t tsdmConfigVersion;
|
||||||
|
extern int32_t tsConfigInited;
|
||||||
extern int32_t tsStatusInterval;
|
extern int32_t tsStatusInterval;
|
||||||
extern int32_t tsNumOfSupportVnodes;
|
extern int32_t tsNumOfSupportVnodes;
|
||||||
extern char tsEncryptAlgorithm[];
|
extern char tsEncryptAlgorithm[];
|
||||||
|
@ -67,14 +76,26 @@ extern int64_t tsTickPerHour[3];
|
||||||
extern int32_t tsCountAlwaysReturnValue;
|
extern int32_t tsCountAlwaysReturnValue;
|
||||||
extern float tsSelectivityRatio;
|
extern float tsSelectivityRatio;
|
||||||
extern int32_t tsTagFilterResCacheSize;
|
extern int32_t tsTagFilterResCacheSize;
|
||||||
|
extern int32_t tsBypassFlag;
|
||||||
|
|
||||||
// queue & threads
|
// queue & threads
|
||||||
|
extern int32_t tsQueryMinConcurrentTaskNum;
|
||||||
|
extern int32_t tsQueryMaxConcurrentTaskNum;
|
||||||
|
extern int32_t tsQueryConcurrentTaskNum;
|
||||||
|
extern int32_t tsSingleQueryMaxMemorySize;
|
||||||
|
extern int8_t tsQueryUseMemoryPool;
|
||||||
|
extern int8_t tsMemPoolFullFunc;
|
||||||
|
// extern int32_t tsQueryBufferPoolSize;
|
||||||
|
extern int32_t tsMinReservedMemorySize;
|
||||||
|
extern int64_t tsCurrentAvailMemorySize;
|
||||||
|
extern int8_t tsNeedTrim;
|
||||||
|
extern int32_t tsQueryNoFetchTimeoutSec;
|
||||||
|
extern int32_t tsNumOfQueryThreads;
|
||||||
extern int32_t tsNumOfRpcThreads;
|
extern int32_t tsNumOfRpcThreads;
|
||||||
extern int32_t tsNumOfRpcSessions;
|
extern int32_t tsNumOfRpcSessions;
|
||||||
extern int32_t tsShareConnLimit;
|
extern int32_t tsShareConnLimit;
|
||||||
extern int32_t tsReadTimeout;
|
extern int32_t tsReadTimeout;
|
||||||
extern int32_t tsTimeToGetAvailableConn;
|
extern int32_t tsTimeToGetAvailableConn;
|
||||||
extern int32_t tsKeepAliveIdle;
|
|
||||||
extern int32_t tsNumOfCommitThreads;
|
extern int32_t tsNumOfCommitThreads;
|
||||||
extern int32_t tsNumOfTaskQueueThreads;
|
extern int32_t tsNumOfTaskQueueThreads;
|
||||||
extern int32_t tsNumOfMnodeQueryThreads;
|
extern int32_t tsNumOfMnodeQueryThreads;
|
||||||
|
@ -91,6 +112,9 @@ extern int32_t tsNumOfSnodeWriteThreads;
|
||||||
extern int64_t tsQueueMemoryAllowed;
|
extern int64_t tsQueueMemoryAllowed;
|
||||||
extern int32_t tsRetentionSpeedLimitMB;
|
extern int32_t tsRetentionSpeedLimitMB;
|
||||||
|
|
||||||
|
extern const char *tsAlterCompactTaskKeywords;
|
||||||
|
extern int32_t tsNumOfCompactThreads;
|
||||||
|
|
||||||
// sync raft
|
// sync raft
|
||||||
extern int32_t tsElectInterval;
|
extern int32_t tsElectInterval;
|
||||||
extern int32_t tsHeartbeatInterval;
|
extern int32_t tsHeartbeatInterval;
|
||||||
|
@ -154,7 +178,7 @@ extern bool tsEnableCrashReport;
|
||||||
extern char *tsTelemUri;
|
extern char *tsTelemUri;
|
||||||
extern char *tsClientCrashReportUri;
|
extern char *tsClientCrashReportUri;
|
||||||
extern char *tsSvrCrashReportUri;
|
extern char *tsSvrCrashReportUri;
|
||||||
extern int8_t tsSafetyCheckLevel;
|
extern int32_t tsSafetyCheckLevel;
|
||||||
enum {
|
enum {
|
||||||
TSDB_SAFETY_CHECK_LEVELL_NEVER = 0,
|
TSDB_SAFETY_CHECK_LEVELL_NEVER = 0,
|
||||||
TSDB_SAFETY_CHECK_LEVELL_NORMAL = 1,
|
TSDB_SAFETY_CHECK_LEVELL_NORMAL = 1,
|
||||||
|
@ -188,7 +212,6 @@ extern int32_t tsMaxRetryWaitTime;
|
||||||
extern bool tsUseAdapter;
|
extern bool tsUseAdapter;
|
||||||
extern int32_t tsMetaCacheMaxSize;
|
extern int32_t tsMetaCacheMaxSize;
|
||||||
extern int32_t tsSlowLogThreshold;
|
extern int32_t tsSlowLogThreshold;
|
||||||
extern int32_t tsSlowLogThresholdTest;
|
|
||||||
extern char tsSlowLogExceptDb[];
|
extern char tsSlowLogExceptDb[];
|
||||||
extern int32_t tsSlowLogScope;
|
extern int32_t tsSlowLogScope;
|
||||||
extern int32_t tsSlowLogMaxLen;
|
extern int32_t tsSlowLogMaxLen;
|
||||||
|
@ -244,6 +267,7 @@ extern int64_t tsmaDataDeleteMark;
|
||||||
extern int64_t tsWalFsyncDataSizeLimit;
|
extern int64_t tsWalFsyncDataSizeLimit;
|
||||||
|
|
||||||
// internal
|
// internal
|
||||||
|
extern bool tsDiskIDCheckEnabled;
|
||||||
extern int32_t tsTransPullupInterval;
|
extern int32_t tsTransPullupInterval;
|
||||||
extern int32_t tsCompactPullupInterval;
|
extern int32_t tsCompactPullupInterval;
|
||||||
extern int32_t tsMqRebalanceInterval;
|
extern int32_t tsMqRebalanceInterval;
|
||||||
|
@ -259,7 +283,7 @@ extern int32_t tsS3MigrateIntervalSec;
|
||||||
extern bool tsS3MigrateEnabled;
|
extern bool tsS3MigrateEnabled;
|
||||||
extern int32_t tsGrantHBInterval;
|
extern int32_t tsGrantHBInterval;
|
||||||
extern int32_t tsUptimeInterval;
|
extern int32_t tsUptimeInterval;
|
||||||
|
extern bool tsUpdateCacheBatch;
|
||||||
extern bool tsDisableStream;
|
extern bool tsDisableStream;
|
||||||
extern int64_t tsStreamBufferSize;
|
extern int64_t tsStreamBufferSize;
|
||||||
extern int tsStreamAggCnt;
|
extern int tsStreamAggCnt;
|
||||||
|
@ -289,6 +313,16 @@ void taosLocalCfgForbiddenToChange(char *name, bool *forbidden);
|
||||||
int8_t taosGranted(int8_t type);
|
int8_t taosGranted(int8_t type);
|
||||||
int32_t taosSetSlowLogScope(char *pScopeStr, int32_t *pScope);
|
int32_t taosSetSlowLogScope(char *pScopeStr, int32_t *pScope);
|
||||||
|
|
||||||
|
int32_t taosPersistGlobalConfig(SArray *array, const char *path, int32_t version);
|
||||||
|
int32_t taosPersistLocalConfig(const char *path);
|
||||||
|
int32_t localConfigSerialize(SArray *array, char **serialized);
|
||||||
|
int32_t tSerializeSConfigArray(SEncoder *pEncoder, SArray *array);
|
||||||
|
int32_t tDeserializeSConfigArray(SDecoder *pDecoder, SArray *array);
|
||||||
|
int32_t setAllConfigs(SConfig *pCfg);
|
||||||
|
void printConfigNotMatch(SArray *array);
|
||||||
|
|
||||||
|
int32_t compareSConfigItemArrays(SArray *mArray, const SArray *dArray, SArray *diffArray);
|
||||||
|
bool isConifgItemLazyMode(SConfigItem *item);
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -161,6 +161,8 @@ typedef enum _mgmt_table {
|
||||||
TSDB_MGMT_TABLE_USER_FULL,
|
TSDB_MGMT_TABLE_USER_FULL,
|
||||||
TSDB_MGMT_TABLE_ANODE,
|
TSDB_MGMT_TABLE_ANODE,
|
||||||
TSDB_MGMT_TABLE_ANODE_FULL,
|
TSDB_MGMT_TABLE_ANODE_FULL,
|
||||||
|
TSDB_MGMT_TABLE_USAGE,
|
||||||
|
TSDB_MGMT_TABLE_FILESETS,
|
||||||
TSDB_MGMT_TABLE_MAX,
|
TSDB_MGMT_TABLE_MAX,
|
||||||
} EShowType;
|
} EShowType;
|
||||||
|
|
||||||
|
@ -309,6 +311,7 @@ typedef enum ENodeType {
|
||||||
QUERY_NODE_DESCRIBE_STMT,
|
QUERY_NODE_DESCRIBE_STMT,
|
||||||
QUERY_NODE_RESET_QUERY_CACHE_STMT,
|
QUERY_NODE_RESET_QUERY_CACHE_STMT,
|
||||||
QUERY_NODE_COMPACT_DATABASE_STMT,
|
QUERY_NODE_COMPACT_DATABASE_STMT,
|
||||||
|
QUERY_NODE_COMPACT_VGROUPS_STMT,
|
||||||
QUERY_NODE_CREATE_FUNCTION_STMT,
|
QUERY_NODE_CREATE_FUNCTION_STMT,
|
||||||
QUERY_NODE_DROP_FUNCTION_STMT,
|
QUERY_NODE_DROP_FUNCTION_STMT,
|
||||||
QUERY_NODE_CREATE_STREAM_STMT,
|
QUERY_NODE_CREATE_STREAM_STMT,
|
||||||
|
@ -397,9 +400,11 @@ typedef enum ENodeType {
|
||||||
QUERY_NODE_SHOW_TSMAS_STMT,
|
QUERY_NODE_SHOW_TSMAS_STMT,
|
||||||
QUERY_NODE_SHOW_ANODES_STMT,
|
QUERY_NODE_SHOW_ANODES_STMT,
|
||||||
QUERY_NODE_SHOW_ANODES_FULL_STMT,
|
QUERY_NODE_SHOW_ANODES_FULL_STMT,
|
||||||
|
QUERY_NODE_SHOW_USAGE_STMT,
|
||||||
QUERY_NODE_CREATE_TSMA_STMT,
|
QUERY_NODE_CREATE_TSMA_STMT,
|
||||||
QUERY_NODE_SHOW_CREATE_TSMA_STMT,
|
QUERY_NODE_SHOW_CREATE_TSMA_STMT,
|
||||||
QUERY_NODE_DROP_TSMA_STMT,
|
QUERY_NODE_DROP_TSMA_STMT,
|
||||||
|
QUERY_NODE_SHOW_FILESETS_STMT,
|
||||||
|
|
||||||
// logic plan node
|
// logic plan node
|
||||||
QUERY_NODE_LOGIC_PLAN_SCAN = 1000,
|
QUERY_NODE_LOGIC_PLAN_SCAN = 1000,
|
||||||
|
@ -679,7 +684,7 @@ typedef struct {
|
||||||
int32_t tsSlowLogThreshold;
|
int32_t tsSlowLogThreshold;
|
||||||
int32_t tsSlowLogMaxLen;
|
int32_t tsSlowLogMaxLen;
|
||||||
int32_t tsSlowLogScope;
|
int32_t tsSlowLogScope;
|
||||||
int32_t tsSlowLogThresholdTest;
|
int32_t tsSlowLogThresholdTest; // Obsolete
|
||||||
char tsSlowLogExceptDb[TSDB_DB_NAME_LEN];
|
char tsSlowLogExceptDb[TSDB_DB_NAME_LEN];
|
||||||
} SMonitorParas;
|
} SMonitorParas;
|
||||||
|
|
||||||
|
@ -1240,14 +1245,15 @@ typedef struct {
|
||||||
} STsBufInfo;
|
} STsBufInfo;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t tz; // query client timezone
|
void* timezone;
|
||||||
char intervalUnit;
|
char intervalUnit;
|
||||||
char slidingUnit;
|
char slidingUnit;
|
||||||
char offsetUnit;
|
char offsetUnit;
|
||||||
int8_t precision;
|
int8_t precision;
|
||||||
int64_t interval;
|
int64_t interval;
|
||||||
int64_t sliding;
|
int64_t sliding;
|
||||||
int64_t offset;
|
int64_t offset;
|
||||||
|
STimeWindow timeRange;
|
||||||
} SInterval;
|
} SInterval;
|
||||||
|
|
||||||
typedef struct STbVerInfo {
|
typedef struct STbVerInfo {
|
||||||
|
@ -1344,6 +1350,11 @@ typedef struct {
|
||||||
int8_t withArbitrator;
|
int8_t withArbitrator;
|
||||||
int8_t encryptAlgorithm;
|
int8_t encryptAlgorithm;
|
||||||
char dnodeListStr[TSDB_DNODE_LIST_LEN];
|
char dnodeListStr[TSDB_DNODE_LIST_LEN];
|
||||||
|
// 1. add auto-compact parameters
|
||||||
|
int32_t compactInterval; // minutes
|
||||||
|
int32_t compactStartTime; // minutes
|
||||||
|
int32_t compactEndTime; // minutes
|
||||||
|
int8_t compactTimeOffset; // hour
|
||||||
} SCreateDbReq;
|
} SCreateDbReq;
|
||||||
|
|
||||||
int32_t tSerializeSCreateDbReq(void* buf, int32_t bufLen, SCreateDbReq* pReq);
|
int32_t tSerializeSCreateDbReq(void* buf, int32_t bufLen, SCreateDbReq* pReq);
|
||||||
|
@ -1375,6 +1386,11 @@ typedef struct {
|
||||||
int32_t sqlLen;
|
int32_t sqlLen;
|
||||||
char* sql;
|
char* sql;
|
||||||
int8_t withArbitrator;
|
int8_t withArbitrator;
|
||||||
|
// 1. add auto-compact parameters
|
||||||
|
int32_t compactInterval;
|
||||||
|
int32_t compactStartTime;
|
||||||
|
int32_t compactEndTime;
|
||||||
|
int8_t compactTimeOffset;
|
||||||
} SAlterDbReq;
|
} SAlterDbReq;
|
||||||
|
|
||||||
int32_t tSerializeSAlterDbReq(void* buf, int32_t bufLen, SAlterDbReq* pReq);
|
int32_t tSerializeSAlterDbReq(void* buf, int32_t bufLen, SAlterDbReq* pReq);
|
||||||
|
@ -1507,6 +1523,10 @@ typedef struct {
|
||||||
int32_t s3ChunkSize;
|
int32_t s3ChunkSize;
|
||||||
int32_t s3KeepLocal;
|
int32_t s3KeepLocal;
|
||||||
int8_t s3Compact;
|
int8_t s3Compact;
|
||||||
|
int8_t compactTimeOffset;
|
||||||
|
int32_t compactInterval;
|
||||||
|
int32_t compactStartTime;
|
||||||
|
int32_t compactEndTime;
|
||||||
int32_t tsdbPageSize;
|
int32_t tsdbPageSize;
|
||||||
int32_t walRetentionPeriod;
|
int32_t walRetentionPeriod;
|
||||||
int32_t walRollPeriod;
|
int32_t walRollPeriod;
|
||||||
|
@ -1614,6 +1634,7 @@ typedef struct {
|
||||||
STimeWindow timeRange;
|
STimeWindow timeRange;
|
||||||
int32_t sqlLen;
|
int32_t sqlLen;
|
||||||
char* sql;
|
char* sql;
|
||||||
|
SArray* vgroupIds;
|
||||||
} SCompactDbReq;
|
} SCompactDbReq;
|
||||||
|
|
||||||
int32_t tSerializeSCompactDbReq(void* buf, int32_t bufLen, SCompactDbReq* pReq);
|
int32_t tSerializeSCompactDbReq(void* buf, int32_t bufLen, SCompactDbReq* pReq);
|
||||||
|
@ -1754,6 +1775,21 @@ typedef struct {
|
||||||
int32_t learnerProgress; // use one reservered
|
int32_t learnerProgress; // use one reservered
|
||||||
} SVnodeLoad;
|
} SVnodeLoad;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
int32_t vgId;
|
||||||
|
int64_t numOfTables;
|
||||||
|
int64_t memSize;
|
||||||
|
int64_t l1Size;
|
||||||
|
int64_t l2Size;
|
||||||
|
int64_t l3Size;
|
||||||
|
int64_t cacheSize;
|
||||||
|
int64_t walSize;
|
||||||
|
int64_t metaSize;
|
||||||
|
int64_t rawDataSize;
|
||||||
|
int64_t s3Size;
|
||||||
|
const char* dbname;
|
||||||
|
} SDbSizeStatisInfo;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t vgId;
|
int32_t vgId;
|
||||||
int64_t nTimeSeries;
|
int64_t nTimeSeries;
|
||||||
|
@ -1809,6 +1845,16 @@ int32_t tSerializeSStatusReq(void* buf, int32_t bufLen, SStatusReq* pReq);
|
||||||
int32_t tDeserializeSStatusReq(void* buf, int32_t bufLen, SStatusReq* pReq);
|
int32_t tDeserializeSStatusReq(void* buf, int32_t bufLen, SStatusReq* pReq);
|
||||||
void tFreeSStatusReq(SStatusReq* pReq);
|
void tFreeSStatusReq(SStatusReq* pReq);
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
int32_t forceReadConfig;
|
||||||
|
int32_t cver;
|
||||||
|
SArray* array;
|
||||||
|
} SConfigReq;
|
||||||
|
|
||||||
|
int32_t tSerializeSConfigReq(void* buf, int32_t bufLen, SConfigReq* pReq);
|
||||||
|
int32_t tDeserializeSConfigReq(void* buf, int32_t bufLen, SConfigReq* pReq);
|
||||||
|
void tFreeSConfigReq(SConfigReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t dnodeId;
|
int32_t dnodeId;
|
||||||
char machineId[TSDB_MACHINE_ID_LEN + 1];
|
char machineId[TSDB_MACHINE_ID_LEN + 1];
|
||||||
|
@ -1886,6 +1932,18 @@ int32_t tSerializeSStatusRsp(void* buf, int32_t bufLen, SStatusRsp* pRsp);
|
||||||
int32_t tDeserializeSStatusRsp(void* buf, int32_t bufLen, SStatusRsp* pRsp);
|
int32_t tDeserializeSStatusRsp(void* buf, int32_t bufLen, SStatusRsp* pRsp);
|
||||||
void tFreeSStatusRsp(SStatusRsp* pRsp);
|
void tFreeSStatusRsp(SStatusRsp* pRsp);
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
int32_t forceReadConfig;
|
||||||
|
int32_t isConifgVerified;
|
||||||
|
int32_t isVersionVerified;
|
||||||
|
int32_t cver;
|
||||||
|
SArray* array;
|
||||||
|
} SConfigRsp;
|
||||||
|
|
||||||
|
int32_t tSerializeSConfigRsp(void* buf, int32_t bufLen, SConfigRsp* pRsp);
|
||||||
|
int32_t tDeserializeSConfigRsp(void* buf, int32_t bufLen, SConfigRsp* pRsp);
|
||||||
|
void tFreeSConfigRsp(SConfigRsp* pRsp);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t reserved;
|
int32_t reserved;
|
||||||
} SMTimerReq;
|
} SMTimerReq;
|
||||||
|
@ -1984,6 +2042,8 @@ typedef struct {
|
||||||
int32_t dnodeId;
|
int32_t dnodeId;
|
||||||
int32_t numberFileset;
|
int32_t numberFileset;
|
||||||
int32_t finished;
|
int32_t finished;
|
||||||
|
int32_t progress;
|
||||||
|
int64_t remainingTime;
|
||||||
} SQueryCompactProgressRsp;
|
} SQueryCompactProgressRsp;
|
||||||
|
|
||||||
int32_t tSerializeSQueryCompactProgressRsp(void* buf, int32_t bufLen, SQueryCompactProgressRsp* pReq);
|
int32_t tSerializeSQueryCompactProgressRsp(void* buf, int32_t bufLen, SQueryCompactProgressRsp* pReq);
|
||||||
|
@ -2191,6 +2251,7 @@ typedef struct {
|
||||||
char name[TSDB_CONFIG_OPTION_LEN + 1];
|
char name[TSDB_CONFIG_OPTION_LEN + 1];
|
||||||
char value[TSDB_CONFIG_PATH_LEN + 1];
|
char value[TSDB_CONFIG_PATH_LEN + 1];
|
||||||
char scope[TSDB_CONFIG_SCOPE_LEN + 1];
|
char scope[TSDB_CONFIG_SCOPE_LEN + 1];
|
||||||
|
char category[TSDB_CONFIG_CATEGORY_LEN + 1];
|
||||||
char info[TSDB_CONFIG_INFO_LEN + 1];
|
char info[TSDB_CONFIG_INFO_LEN + 1];
|
||||||
} SVariablesInfo;
|
} SVariablesInfo;
|
||||||
|
|
||||||
|
@ -2399,8 +2460,9 @@ int32_t tDeserializeSMCfgDnodeReq(void* buf, int32_t bufLen, SMCfgDnodeReq* pReq
|
||||||
void tFreeSMCfgDnodeReq(SMCfgDnodeReq* pReq);
|
void tFreeSMCfgDnodeReq(SMCfgDnodeReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char config[TSDB_DNODE_CONFIG_LEN];
|
char config[TSDB_DNODE_CONFIG_LEN];
|
||||||
char value[TSDB_DNODE_VALUE_LEN];
|
char value[TSDB_DNODE_VALUE_LEN];
|
||||||
|
int32_t version;
|
||||||
} SDCfgDnodeReq;
|
} SDCfgDnodeReq;
|
||||||
|
|
||||||
int32_t tSerializeSDCfgDnodeReq(void* buf, int32_t bufLen, SDCfgDnodeReq* pReq);
|
int32_t tSerializeSDCfgDnodeReq(void* buf, int32_t bufLen, SDCfgDnodeReq* pReq);
|
||||||
|
@ -2740,7 +2802,7 @@ int32_t tDeserializeSResFetchReq(void* buf, int32_t bufLen, SResFetchReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
SMsgHead header;
|
SMsgHead header;
|
||||||
uint64_t sId;
|
uint64_t clientId;
|
||||||
} SSchTasksStatusReq;
|
} SSchTasksStatusReq;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -2770,7 +2832,7 @@ typedef struct SQueryNodeEpId {
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
SMsgHead header;
|
SMsgHead header;
|
||||||
uint64_t sId;
|
uint64_t clientId;
|
||||||
SQueryNodeEpId epId;
|
SQueryNodeEpId epId;
|
||||||
SArray* taskAction; // SArray<STaskAction>
|
SArray* taskAction; // SArray<STaskAction>
|
||||||
} SSchedulerHbReq;
|
} SSchedulerHbReq;
|
||||||
|
@ -3231,6 +3293,7 @@ int tDecodeSVCreateTbBatchRsp(SDecoder* pCoder, SVCreateTbBatchRsp* pRsp);
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char* name;
|
char* name;
|
||||||
uint64_t suid; // for tmq in wal format
|
uint64_t suid; // for tmq in wal format
|
||||||
|
int64_t uid;
|
||||||
int8_t igNotExists;
|
int8_t igNotExists;
|
||||||
} SVDropTbReq;
|
} SVDropTbReq;
|
||||||
|
|
||||||
|
@ -3433,6 +3496,8 @@ typedef struct {
|
||||||
SAppHbReq app;
|
SAppHbReq app;
|
||||||
SQueryHbReqBasic* query;
|
SQueryHbReqBasic* query;
|
||||||
SHashObj* info; // hash<Skv.key, Skv>
|
SHashObj* info; // hash<Skv.key, Skv>
|
||||||
|
char userApp[TSDB_APP_NAME_LEN];
|
||||||
|
uint32_t userIp;
|
||||||
} SClientHbReq;
|
} SClientHbReq;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -3811,7 +3876,14 @@ typedef struct {
|
||||||
SMsgHead head;
|
SMsgHead head;
|
||||||
int64_t streamId;
|
int64_t streamId;
|
||||||
int32_t taskId;
|
int32_t taskId;
|
||||||
} SVPauseStreamTaskReq, SVResetStreamTaskReq;
|
} SVPauseStreamTaskReq;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
SMsgHead head;
|
||||||
|
int64_t streamId;
|
||||||
|
int32_t taskId;
|
||||||
|
int64_t chkptId;
|
||||||
|
} SVResetStreamTaskReq;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char name[TSDB_STREAM_FNAME_LEN];
|
char name[TSDB_STREAM_FNAME_LEN];
|
||||||
|
@ -3847,7 +3919,7 @@ typedef struct {
|
||||||
int8_t igExists;
|
int8_t igExists;
|
||||||
int8_t intervalUnit;
|
int8_t intervalUnit;
|
||||||
int8_t slidingUnit;
|
int8_t slidingUnit;
|
||||||
int8_t timezone;
|
int8_t timezone; // int8_t is not enough, timezone is unit of second
|
||||||
int32_t dstVgId; // for stream
|
int32_t dstVgId; // for stream
|
||||||
int64_t interval;
|
int64_t interval;
|
||||||
int64_t offset;
|
int64_t offset;
|
||||||
|
|
|
@ -47,7 +47,7 @@ typedef int32_t (*GetQueueSizeFp)(void* pMgmt, int32_t vgId, EQueueType qtype);
|
||||||
typedef int32_t (*SendReqFp)(const SEpSet* pEpSet, SRpcMsg* pMsg);
|
typedef int32_t (*SendReqFp)(const SEpSet* pEpSet, SRpcMsg* pMsg);
|
||||||
typedef void (*SendRspFp)(SRpcMsg* pMsg);
|
typedef void (*SendRspFp)(SRpcMsg* pMsg);
|
||||||
typedef void (*RegisterBrokenLinkArgFp)(struct SRpcMsg* pMsg);
|
typedef void (*RegisterBrokenLinkArgFp)(struct SRpcMsg* pMsg);
|
||||||
typedef void (*ReleaseHandleFp)(SRpcHandleInfo* pHandle, int8_t type);
|
typedef void (*ReleaseHandleFp)(SRpcHandleInfo* pHandle, int8_t type, int32_t status);
|
||||||
typedef void (*ReportStartup)(const char* name, const char* desc);
|
typedef void (*ReportStartup)(const char* name, const char* desc);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -76,7 +76,7 @@ int32_t tmsgSendReq(const SEpSet* epSet, SRpcMsg* pMsg);
|
||||||
int32_t tmsgSendSyncReq(const SEpSet* epSet, SRpcMsg* pMsg);
|
int32_t tmsgSendSyncReq(const SEpSet* epSet, SRpcMsg* pMsg);
|
||||||
void tmsgSendRsp(SRpcMsg* pMsg);
|
void tmsgSendRsp(SRpcMsg* pMsg);
|
||||||
void tmsgRegisterBrokenLinkArg(SRpcMsg* pMsg);
|
void tmsgRegisterBrokenLinkArg(SRpcMsg* pMsg);
|
||||||
void tmsgReleaseHandle(SRpcHandleInfo* pHandle, int8_t type);
|
void tmsgReleaseHandle(SRpcHandleInfo* pHandle, int8_t type, int32_t code);
|
||||||
void tmsgReportStartup(const char* name, const char* desc);
|
void tmsgReportStartup(const char* name, const char* desc);
|
||||||
bool tmsgUpdateDnodeInfo(int32_t* dnodeId, int64_t* clusterId, char* fqdn, uint16_t* port);
|
bool tmsgUpdateDnodeInfo(int32_t* dnodeId, int64_t* clusterId, char* fqdn, uint16_t* port);
|
||||||
void tmsgUpdateDnodeEpSet(SEpSet* epset);
|
void tmsgUpdateDnodeEpSet(SEpSet* epset);
|
||||||
|
|
|
@ -260,6 +260,7 @@
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_TASK_RESET, "stream-reset-tasks", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_TASK_RESET, "stream-reset-tasks", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_UPDATE_DNODE_INFO, "update-dnode-info", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_UPDATE_DNODE_INFO, "update-dnode-info", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_AUDIT, "audit", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_AUDIT, "audit", NULL, NULL)
|
||||||
|
TD_DEF_MSG_TYPE(TDMT_MND_CONFIG, "init-config", NULL, NULL)
|
||||||
TD_CLOSE_MSG_SEG(TDMT_END_MND_MSG)
|
TD_CLOSE_MSG_SEG(TDMT_END_MND_MSG)
|
||||||
|
|
||||||
TD_NEW_MSG_SEG(TDMT_VND_MSG) // 2<<8
|
TD_NEW_MSG_SEG(TDMT_VND_MSG) // 2<<8
|
||||||
|
|
|
@ -36,6 +36,9 @@ extern "C" {
|
||||||
#define TIME_UNIT_MONTH 'n'
|
#define TIME_UNIT_MONTH 'n'
|
||||||
#define TIME_UNIT_YEAR 'y'
|
#define TIME_UNIT_YEAR 'y'
|
||||||
|
|
||||||
|
#define AUTO_DURATION_LITERAL "auto"
|
||||||
|
#define AUTO_DURATION_VALUE -1
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @return timestamp decided by global conf variable, tsTimePrecision
|
* @return timestamp decided by global conf variable, tsTimePrecision
|
||||||
* if precision == TSDB_TIME_PRECISION_MICRO, it returns timestamp in microsecond.
|
* if precision == TSDB_TIME_PRECISION_MICRO, it returns timestamp in microsecond.
|
||||||
|
@ -58,37 +61,24 @@ static FORCE_INLINE int64_t taosGetTimestamp(int32_t precision) {
|
||||||
* precision == TSDB_TIME_PRECISION_MILLI, it returns timestamp in millisecond.
|
* precision == TSDB_TIME_PRECISION_MILLI, it returns timestamp in millisecond.
|
||||||
* precision == TSDB_TIME_PRECISION_NANO, it returns timestamp in nanosecond.
|
* precision == TSDB_TIME_PRECISION_NANO, it returns timestamp in nanosecond.
|
||||||
*/
|
*/
|
||||||
static FORCE_INLINE int64_t taosGetTimestampToday(int32_t precision) {
|
int64_t taosGetTimestampToday(int32_t precision, timezone_t tz);
|
||||||
int64_t factor = (precision == TSDB_TIME_PRECISION_MILLI) ? 1000
|
|
||||||
: (precision == TSDB_TIME_PRECISION_MICRO) ? 1000000
|
|
||||||
: 1000000000;
|
|
||||||
time_t t;
|
|
||||||
(void) taosTime(&t);
|
|
||||||
struct tm tm;
|
|
||||||
(void) taosLocalTime(&t, &tm, NULL, 0);
|
|
||||||
tm.tm_hour = 0;
|
|
||||||
tm.tm_min = 0;
|
|
||||||
tm.tm_sec = 0;
|
|
||||||
|
|
||||||
return (int64_t)taosMktime(&tm) * factor;
|
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision, timezone_t tz);
|
||||||
}
|
|
||||||
|
|
||||||
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision);
|
|
||||||
|
|
||||||
int64_t taosTimeTruncate(int64_t ts, const SInterval* pInterval);
|
int64_t taosTimeTruncate(int64_t ts, const SInterval* pInterval);
|
||||||
int64_t taosTimeGetIntervalEnd(int64_t ts, const SInterval* pInterval);
|
int64_t taosTimeGetIntervalEnd(int64_t ts, const SInterval* pInterval);
|
||||||
int32_t taosTimeCountIntervalForFill(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision, int32_t order);
|
int32_t taosTimeCountIntervalForFill(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision, int32_t order);
|
||||||
|
void calcIntervalAutoOffset(SInterval* interval);
|
||||||
|
|
||||||
int32_t parseAbsoluteDuration(const char* token, int32_t tokenlen, int64_t* ts, char* unit, int32_t timePrecision);
|
int32_t parseAbsoluteDuration(const char* token, int32_t tokenlen, int64_t* ts, char* unit, int32_t timePrecision);
|
||||||
int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit, int32_t timePrecision, bool negativeAllow);
|
int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit, int32_t timePrecision, bool negativeAllow);
|
||||||
|
|
||||||
int32_t taosParseTime(const char* timestr, int64_t* pTime, int32_t len, int32_t timePrec, int8_t dayligth);
|
int32_t taosParseTime(const char* timestr, int64_t* pTime, int32_t len, int32_t timePrec, timezone_t tz);
|
||||||
void deltaToUtcInitOnce();
|
|
||||||
char getPrecisionUnit(int32_t precision);
|
char getPrecisionUnit(int32_t precision);
|
||||||
|
|
||||||
int64_t convertTimePrecision(int64_t ts, int32_t fromPrecision, int32_t toPrecision);
|
int64_t convertTimePrecision(int64_t ts, int32_t fromPrecision, int32_t toPrecision);
|
||||||
int32_t convertTimeFromPrecisionToUnit(int64_t time, int32_t fromPrecision, char toUnit, int64_t* pRes);
|
int32_t convertTimeFromPrecisionToUnit(int64_t time, int32_t fromPrecision, char toUnit, int64_t* pRes);
|
||||||
int32_t convertStringToTimestamp(int16_t type, char* inputData, int64_t timePrec, int64_t* timeVal);
|
int32_t convertStringToTimestamp(int16_t type, char* inputData, int64_t timePrec, int64_t* timeVal, timezone_t tz, void* charsetCxt);
|
||||||
int32_t getDuration(int64_t val, char unit, int64_t* result, int32_t timePrecision);
|
int32_t getDuration(int64_t val, char unit, int64_t* result, int32_t timePrecision);
|
||||||
|
|
||||||
int32_t taosFormatUtcTime(char* buf, int32_t bufLen, int64_t ts, int32_t precision);
|
int32_t taosFormatUtcTime(char* buf, int32_t bufLen, int64_t ts, int32_t precision);
|
||||||
|
@ -98,8 +88,8 @@ struct STm {
|
||||||
int64_t fsec; // in NANOSECOND
|
int64_t fsec; // in NANOSECOND
|
||||||
};
|
};
|
||||||
|
|
||||||
int32_t taosTs2Tm(int64_t ts, int32_t precision, struct STm* tm);
|
int32_t taosTs2Tm(int64_t ts, int32_t precision, struct STm* tm, timezone_t tz);
|
||||||
int32_t taosTm2Ts(struct STm* tm, int64_t* ts, int32_t precision);
|
int32_t taosTm2Ts(struct STm* tm, int64_t* ts, int32_t precision, timezone_t tz);
|
||||||
|
|
||||||
/// @brief convert a timestamp to a formatted string
|
/// @brief convert a timestamp to a formatted string
|
||||||
/// @param format the timestamp format, must null terminated
|
/// @param format the timestamp format, must null terminated
|
||||||
|
@ -108,7 +98,7 @@ int32_t taosTm2Ts(struct STm* tm, int64_t* ts, int32_t precision);
|
||||||
/// formats array; If not NULL, [formats] will be used instead of [format] to skip parse formats again.
|
/// formats array; If not NULL, [formats] will be used instead of [format] to skip parse formats again.
|
||||||
/// @param out output buffer, should be initialized by memset
|
/// @param out output buffer, should be initialized by memset
|
||||||
/// @notes remember to free the generated formats
|
/// @notes remember to free the generated formats
|
||||||
int32_t taosTs2Char(const char* format, SArray** formats, int64_t ts, int32_t precision, char* out, int32_t outLen);
|
int32_t taosTs2Char(const char* format, SArray** formats, int64_t ts, int32_t precision, char* out, int32_t outLen, timezone_t tz);
|
||||||
/// @brief convert a formatted timestamp string to a timestamp
|
/// @brief convert a formatted timestamp string to a timestamp
|
||||||
/// @param format must null terminated
|
/// @param format must null terminated
|
||||||
/// @param [in, out] formats, see taosTs2Char
|
/// @param [in, out] formats, see taosTs2Char
|
||||||
|
@ -116,7 +106,7 @@ int32_t taosTs2Char(const char* format, SArray** formats, int64_t ts, int32_t pr
|
||||||
/// @retval 0 for success, otherwise error occured
|
/// @retval 0 for success, otherwise error occured
|
||||||
/// @notes remember to free the generated formats even when error occured
|
/// @notes remember to free the generated formats even when error occured
|
||||||
int32_t taosChar2Ts(const char* format, SArray** formats, const char* tsStr, int64_t* ts, int32_t precision, char* errMsg,
|
int32_t taosChar2Ts(const char* format, SArray** formats, const char* tsStr, int64_t* ts, int32_t precision, char* errMsg,
|
||||||
int32_t errMsgLen);
|
int32_t errMsgLen, timezone_t tz);
|
||||||
|
|
||||||
int32_t TEST_ts2char(const char* format, int64_t ts, int32_t precision, char* out, int32_t outLen);
|
int32_t TEST_ts2char(const char* format, int64_t ts, int32_t precision, char* out, int32_t outLen);
|
||||||
int32_t TEST_char2ts(const char* format, int64_t* ts, int32_t precision, const char* tsStr);
|
int32_t TEST_char2ts(const char* format, int64_t* ts, int32_t precision, const char* tsStr);
|
||||||
|
|