Merge branch '3.0' into 3.0test/jcy
|
@ -88,4 +88,3 @@ Standard: Auto
|
|||
TabWidth: 8
|
||||
UseTab: Never
|
||||
...
|
||||
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
*.py linguist-detectable=false
|
|
@ -1,5 +1,6 @@
|
|||
build/
|
||||
compile_commands.json
|
||||
CMakeSettings.json
|
||||
.cache
|
||||
.ycm_extra_conf.py
|
||||
.tasks
|
||||
|
|
|
@ -34,7 +34,7 @@ endif(${BUILD_TEST})
|
|||
|
||||
add_subdirectory(source)
|
||||
add_subdirectory(tools)
|
||||
add_subdirectory(tests)
|
||||
add_subdirectory(utils)
|
||||
add_subdirectory(examples/c)
|
||||
|
||||
# docs
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
{
|
||||
"configurations": [
|
||||
{
|
||||
"name": "WSL-GCC-Debug",
|
||||
"generator": "Unix Makefiles",
|
||||
"configurationType": "Debug",
|
||||
"buildRoot": "${projectDir}\\build\\",
|
||||
"installRoot": "${projectDir}\\build\\",
|
||||
"cmakeExecutable": "/usr/bin/cmake",
|
||||
"cmakeCommandArgs": "",
|
||||
"buildCommandArgs": "",
|
||||
"ctestCommandArgs": "",
|
||||
"inheritEnvironments": [ "linux_x64" ],
|
||||
"wslPath": "${defaultWSLPath}",
|
||||
"addressSanitizerRuntimeFlags": "detect_leaks=0",
|
||||
"variables": [
|
||||
{
|
||||
"name": "CMAKE_INSTALL_PREFIX",
|
||||
"value": "/mnt/d/TDengine/TDengine/build",
|
||||
"type": "PATH"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
16
README-CN.md
|
@ -303,14 +303,14 @@ Query OK, 2 row(s) in set (0.001700s)
|
|||
|
||||
TDengine 提供了丰富的应用程序开发接口,其中包括 C/C++、Java、Python、Go、Node.js、C# 、RESTful 等,便于用户快速开发应用:
|
||||
|
||||
- [Java](https://docs.taosdata.com/reference/connector/java/)
|
||||
- [C/C++](https://www.taosdata.com/cn/documentation/connector#c-cpp)
|
||||
- [Python](https://docs.taosdata.com/reference/connector/python/)
|
||||
- [Go](https://docs.taosdata.com/reference/connector/go/)
|
||||
- [Node.js](https://docs.taosdata.com/reference/connector/node/)
|
||||
- [Rust](https://docs.taosdata.com/reference/connector/rust/)
|
||||
- [C#](https://docs.taosdata.com/reference/connector/csharp/)
|
||||
- [RESTful API](https://docs.taosdata.com/reference/rest-api/)
|
||||
- [Java](https://docs.taosdata.com/connector/java/)
|
||||
- [C/C++](https://docs.taosdata.com/connector/cpp/)
|
||||
- [Python](https://docs.taosdata.com/connector/python/)
|
||||
- [Go](https://docs.taosdata.com/connector/go/)
|
||||
- [Node.js](https://docs.taosdata.com/connector/node/)
|
||||
- [Rust](https://docs.taosdata.com/connector/rust/)
|
||||
- [C#](https://docs.taosdata.com/connector/csharp/)
|
||||
- [RESTful API](https://docs.taosdata.com/connector/rest-api/)
|
||||
|
||||
# 成为社区贡献者
|
||||
|
||||
|
|
38
README.md
|
@ -19,29 +19,29 @@ English | [简体中文](README-CN.md) | We are hiring, check [here](https://tde
|
|||
|
||||
# What is TDengine?
|
||||
|
||||
TDengine is an open source, high-performance, cloud native time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. TDengine differentiates itself from other time-seires databases with the following advantages:
|
||||
TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/what-is-a-time-series-database/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. TDengine differentiates itself from other time-seires databases with the following advantages:
|
||||
|
||||
- **High-Performance**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
|
||||
- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
|
||||
|
||||
- **Simplified Solution**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
|
||||
- **[Simplified Solution](https://tdengine.com/tdengine/simplified-time-series-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
|
||||
|
||||
- **Cloud Native**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds.
|
||||
- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds.
|
||||
|
||||
- **Ease of Use**: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
|
||||
- **[Ease of Use](https://docs.tdengine.com/get-started/docker/)**: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
|
||||
|
||||
- **Easy Data Analytics**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
|
||||
- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
|
||||
|
||||
- **Open Source**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered 18.8k stars on GitHub. There is an active developer community, and over 139k running instances worldwide.
|
||||
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered 18.8k stars on GitHub. There is an active developer community, and over 139k running instances worldwide.
|
||||
|
||||
# Documentation
|
||||
|
||||
For user manual, system design and architecture, please refer to [TDengine Documentation](https://docs.taosdata.com) ([TDengine 文档](https://docs.taosdata.com))
|
||||
For user manual, system design and architecture, please refer to [TDengine Documentation](https://docs.tdengine.com) ([TDengine 文档](https://docs.taosdata.com))
|
||||
|
||||
# Building
|
||||
|
||||
At the moment, TDengine server supports running on Linux, Windows systems.Any OS application can also choose the RESTful interface of taosAdapter to connect the taosd service . TDengine supports X64/ARM64 CPU , and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future.
|
||||
At the moment, TDengine server supports running on Linux and Windows systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service . TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future.
|
||||
|
||||
You can choose to install through source code according to your needs, [container](https://docs.taosdata.com/get-started/docker/), [installation package](https://docs.taosdata.com/get-started/package/) or [Kubernetes](https://docs.taosdata.com/deployment/k8s/) to install. This quick guide only applies to installing from source.
|
||||
You can choose to install through source code, [container](https://docs.tdengine.com/get-started/docker/), [installation package](https://docs.tdengine.com/get-started/package/) or [Kubernetes](https://docs.tdengine.com/deployment/k8s/). This quick guide only applies to installing from source.
|
||||
|
||||
TDengine provide a few useful tools such as taosBenchmark (was named taosdemo) and taosdump. They were part of TDengine. By default, TDengine compiling does not include taosTools. You can use `cmake .. -DBUILD_TOOLS=true` to make them be compiled with TDengine.
|
||||
|
||||
|
@ -256,6 +256,7 @@ After building successfully, TDengine can be installed by:
|
|||
nmake install
|
||||
```
|
||||
|
||||
<!--
|
||||
## On macOS platform
|
||||
|
||||
After building successfully, TDengine can be installed by:
|
||||
|
@ -263,6 +264,7 @@ After building successfully, TDengine can be installed by:
|
|||
```bash
|
||||
sudo make install
|
||||
```
|
||||
-->
|
||||
|
||||
## Quick Run
|
||||
|
||||
|
@ -304,14 +306,14 @@ Query OK, 2 row(s) in set (0.001700s)
|
|||
|
||||
TDengine provides abundant developing tools for users to develop on TDengine. Follow the links below to find your desired connectors and relevant documentation.
|
||||
|
||||
- [Java](https://docs.taosdata.com/reference/connector/java/)
|
||||
- [C/C++](https://docs.taosdata.com/reference/connector/cpp/)
|
||||
- [Python](https://docs.taosdata.com/reference/connector/python/)
|
||||
- [Go](https://docs.taosdata.com/reference/connector/go/)
|
||||
- [Node.js](https://docs.taosdata.com/reference/connector/node/)
|
||||
- [Rust](https://docs.taosdata.com/reference/connector/rust/)
|
||||
- [C#](https://docs.taosdata.com/reference/connector/csharp/)
|
||||
- [RESTful API](https://docs.taosdata.com/reference/rest-api/)
|
||||
- [Java](https://docs.tdengine.com/reference/connector/java/)
|
||||
- [C/C++](https://docs.tdengine.com/reference/connector/cpp/)
|
||||
- [Python](https://docs.tdengine.com/reference/connector/python/)
|
||||
- [Go](https://docs.tdengine.com/reference/connector/go/)
|
||||
- [Node.js](https://docs.tdengine.com/reference/connector/node/)
|
||||
- [Rust](https://docs.tdengine.com/reference/connector/rust/)
|
||||
- [C#](https://docs.tdengine.com/reference/connector/csharp/)
|
||||
- [RESTful API](https://docs.tdengine.com/reference/rest-api/)
|
||||
|
||||
# Contribute to TDengine
|
||||
|
||||
|
|
BIN
TDenginelogo.png
Before Width: | Height: | Size: 19 KiB |
|
@ -2,8 +2,6 @@ cmake_minimum_required(VERSION 3.0)
|
|||
|
||||
set(CMAKE_VERBOSE_MAKEFILE OFF)
|
||||
|
||||
SET(BUILD_SHARED_LIBS "OFF")
|
||||
|
||||
#set output directory
|
||||
SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/lib)
|
||||
SET(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/bin)
|
||||
|
@ -103,6 +101,9 @@ IF (TD_WINDOWS)
|
|||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMMON_FLAGS}")
|
||||
|
||||
ELSE ()
|
||||
IF (${TD_DARWIN})
|
||||
set(CMAKE_MACOSX_RPATH 0)
|
||||
ENDIF ()
|
||||
IF (${COVER} MATCHES "true")
|
||||
MESSAGE(STATUS "Test coverage mode, add extra flags")
|
||||
SET(GCC_COVERAGE_COMPILE_FLAGS "-fprofile-arcs -ftest-coverage")
|
||||
|
|
|
@ -1,3 +1,19 @@
|
|||
SET(PREPARE_ENV_CMD "prepare_env_cmd")
|
||||
SET(PREPARE_ENV_TARGET "prepare_env_target")
|
||||
ADD_CUSTOM_COMMAND(OUTPUT ${PREPARE_ENV_CMD}
|
||||
POST_BUILD
|
||||
COMMAND echo "make test directory"
|
||||
DEPENDS taosd
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory ${TD_TESTS_OUTPUT_DIR}/cfg/
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory ${TD_TESTS_OUTPUT_DIR}/log/
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory ${TD_TESTS_OUTPUT_DIR}/data/
|
||||
COMMAND ${CMAKE_COMMAND} -E echo dataDir ${TD_TESTS_OUTPUT_DIR}/data > ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
|
||||
COMMAND ${CMAKE_COMMAND} -E echo logDir ${TD_TESTS_OUTPUT_DIR}/log >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
|
||||
COMMAND ${CMAKE_COMMAND} -E echo charset UTF-8 >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
|
||||
COMMAND ${CMAKE_COMMAND} -E echo monitor 0 >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
|
||||
COMMENT "prepare taosd environment")
|
||||
ADD_CUSTOM_TARGET(${PREPARE_ENV_TARGET} ALL WORKING_DIRECTORY ${TD_EXECUTABLE_OUTPUT_PATH} DEPENDS ${PREPARE_ENV_CMD})
|
||||
|
||||
IF (TD_LINUX)
|
||||
SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.sh")
|
||||
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
|
||||
|
|
|
@ -90,6 +90,12 @@ ELSE ()
|
|||
ENDIF ()
|
||||
ENDIF ()
|
||||
|
||||
option(
|
||||
BUILD_SHARED_LIBS
|
||||
""
|
||||
OFF
|
||||
)
|
||||
|
||||
option(
|
||||
RUST_BINDINGS
|
||||
"If build with rust-bindings"
|
||||
|
|
|
@ -46,7 +46,7 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin
|
|||
|
||||
MESSAGE("Current system processor is ${CMAKE_SYSTEM_PROCESSOR}.")
|
||||
IF (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm64" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "x86_64")
|
||||
MESSAGE("Current system arch is arm64")
|
||||
MESSAGE("Current system arch is 64")
|
||||
SET(TD_DARWIN_64 TRUE)
|
||||
ADD_DEFINITIONS("-D_TD_DARWIN_64")
|
||||
ENDIF ()
|
||||
|
@ -87,7 +87,7 @@ IF ("${CPUTYPE}" STREQUAL "")
|
|||
SET(TD_ARM_32 TRUE)
|
||||
ADD_DEFINITIONS("-D_TD_ARM_")
|
||||
ADD_DEFINITIONS("-D_TD_ARM_32")
|
||||
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
|
||||
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "(aarch64)|(arm64)")
|
||||
MESSAGE(STATUS "The current platform is aarch64")
|
||||
SET(PLATFORM_ARCH_STR "arm64")
|
||||
SET(TD_ARM_64 TRUE)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.0.0.1")
|
||||
SET(TD_VER_NUMBER "3.0.1.0")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosadapter
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG abed566
|
||||
GIT_TAG 71e7ccf
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG 2af2222
|
||||
GIT_TAG 2dba49c
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosws-rs
|
||||
ExternalProject_Add(taosws-rs
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git
|
||||
GIT_TAG 7a54d21
|
||||
GIT_TAG e771403
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -4,25 +4,24 @@ sidebar_label: Documentation Home
|
|||
slug: /
|
||||
---
|
||||
|
||||
|
||||
TDengine is an [open source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud native](https://tdengine.com/tdengine/cloud-native-time-series-database/) time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design and other topics. It’s written mainly for architects, developers and system administrators.
|
||||
TDengine is an [open-source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud-native](https://tdengine.com/tdengine/cloud-native-time-series-database/) time-series database optimized for the Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design, and other topics. It’s written mainly for architects, developers, and system administrators.
|
||||
|
||||
To get an overview of TDengine, such as a feature list, benchmarks, and competitive advantages, please browse through the [Introduction](./intro) section.
|
||||
|
||||
TDengine greatly improves the efficiency of data ingestion, querying and storage by exploiting the characteristics of time series data, introducing the novel concepts of "one table for one data collection point" and "super table", and designing an innovative storage engine. To understand the new concepts in TDengine and make full use of the features and capabilities of TDengine, please read [“Concepts”](./concept) thoroughly.
|
||||
TDengine greatly improves the efficiency of data ingestion, querying, and storage by exploiting the characteristics of time series data, introducing the novel concepts of "one table for one data collection point" and "super table", and designing an innovative storage engine. To understand the new concepts in TDengine and make full use of the features and capabilities of TDengine, please read [Concepts](./concept) thoroughly.
|
||||
|
||||
If you are a developer, please read the [“Developer Guide”](./develop) carefully. This section introduces the database connection, data modeling, data ingestion, query, continuous query, cache, data subscription, user-defined functions, and other functionality in detail. Sample code is provided for a variety of programming languages. In most cases, you can just copy and paste the sample code, make a few changes to accommodate your application, and it will work.
|
||||
If you are a developer, please read the [Developer Guide](./develop) carefully. This section introduces the database connection, data modeling, data ingestion, query, continuous query, cache, data subscription, user-defined functions, and other functionality in detail. Sample code is provided for a variety of programming languages. In most cases, you can just copy and paste the sample code, and make a few changes to accommodate your application, and it will work.
|
||||
|
||||
We live in the era of big data, and scale-up is unable to meet the growing needs of business. Any modern data system must have the ability to scale out, and clustering has become an indispensable feature of big data systems. Not only did the TDengine team develop the cluster feature, but also decided to open source this important feature. To learn how to deploy, manage and maintain a TDengine cluster please refer to ["cluster deployment"](../deployment).
|
||||
We live in the era of big data, and scale-up is unable to meet the growing needs of the business. Any modern data system must have the ability to scale out, and clustering has become an indispensable feature of big data systems. Not only did the TDengine team develop the cluster feature, but also decided to open source this important feature. To learn how to deploy, manage and maintain a TDengine cluster please refer to [Cluster Deployment](../deployment).
|
||||
|
||||
TDengine uses ubiquitious SQL as its query language, which greatly reduces learning costs and migration costs. In addition to the standard SQL, TDengine has extensions to better support time series data analysis. These extensions include functions such as roll up, interpolation and time weighted average, among many others. The ["SQL Reference"](./taos-sql) chapter describes the SQL syntax in detail, and lists the various supported commands and functions.
|
||||
TDengine uses ubiquitous SQL as its query language, which greatly reduces learning costs and migration costs. In addition to the standard SQL, TDengine has extensions to better support time series data analysis. These extensions include functions such as roll-up, interpolation, and time-weighted average, among many others. The [SQL Reference](./taos-sql) chapter describes the SQL syntax in detail and lists the various supported commands and functions.
|
||||
|
||||
If you are a system administrator who cares about installation, upgrade, fault tolerance, disaster recovery, data import, data export, system configuration, how to monitor whether TDengine is running healthily, and how to improve system performance, please refer to, and thoroughly read the ["Administration"](./operation) section.
|
||||
If you are a system administrator who cares about installation, upgrade, fault tolerance, disaster recovery, data import, data export, system configuration, how to monitor whether TDengine is running healthily, and how to improve system performance, please refer to, and thoroughly read the [Administration](./operation) section.
|
||||
|
||||
If you want to know more about TDengine tools, the REST API, and connectors for various programming languages, please see the ["Reference"](./reference) chapter.
|
||||
If you want to know more about TDengine tools, the REST API, and connectors for various programming languages, please see the [Reference](./reference) chapter.
|
||||
|
||||
If you are very interested in the internal design of TDengine, please read the chapter ["Inside TDengine”](./tdinternal), which introduces the cluster design, data partitioning, sharding, writing, and reading processes in detail. If you want to study TDengine code or even contribute code, please read this chapter carefully.
|
||||
If you are very interested in the internal design of TDengine, please read the chapter [Inside TDengine](./tdinternal), which introduces the cluster design, data partitioning, sharding, writing, and reading processes in detail. If you want to study TDengine code or even contribute code, please read this chapter carefully.
|
||||
|
||||
TDengine is an open source database, and we would love for you to be a part of TDengine. If you find any errors in the documentation, or see parts where more clarity or elaboration is needed, please click "Edit this page" at the bottom of each page to edit it directly.
|
||||
TDengine is an open-source database, and we would love for you to be a part of TDengine. If you find any errors in the documentation or see parts where more clarity or elaboration is needed, please click "Edit this page" at the bottom of each page to edit it directly.
|
||||
|
||||
Together, we make a difference.
|
||||
Together, we make a difference!
|
||||
|
|
|
@ -11,23 +11,35 @@ This section introduces the major features, competitive advantages, typical use-
|
|||
|
||||
The major features are listed below:
|
||||
|
||||
1. While TDengine supports [using SQL to insert](/develop/insert-data/sql-writing), it also supports [Schemaless writing](/reference/schemaless/) just like NoSQL databases. TDengine also supports standard protocols like [InfluxDB LINE](/develop/insert-data/influxdb-line),[OpenTSDB Telnet](/develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](/develop/insert-data/opentsdb-json) among others.
|
||||
2. TDengine supports seamless integration with third-party data collection agents like [Telegraf](/third-party/telegraf),[Prometheus](/third-party/prometheus),[StatsD](/third-party/statsd),[collectd](/third-party/collectd),[icinga2](/third-party/icinga2), [TCollector](/third-party/tcollector), [EMQX](/third-party/emq-broker), [HiveMQ](/third-party/hive-mq-broker). These agents can write data into TDengine with simple configuration and without a single line of code.
|
||||
3. Support for [all kinds of queries](/develop/query-data), including aggregation, nested query, downsampling, interpolation and others.
|
||||
4. Support for [user defined functions](/develop/udf).
|
||||
5. Support for [caching](/develop/cache). TDengine always saves the last data point in cache, so Redis is not needed in some scenarios.
|
||||
6. Support for [continuous query](../develop/stream).
|
||||
7. Support for [data subscription](../develop/tmq) with the capability to specify filter conditions.
|
||||
8. Support for [cluster](../deployment/), with the capability of increasing processing power by adding more nodes. High availability is supported by replication.
|
||||
9. Provides an interactive [command-line interface](/reference/taos-shell) for management, maintenance and ad-hoc queries.
|
||||
10. Provides many ways to [import](/operation/import) and [export](/operation/export) data.
|
||||
11. Provides [monitoring](/operation/monitor) on running instances of TDengine.
|
||||
12. Provides [connectors](/reference/connector/) for [C/C++](/reference/connector/cpp), [Java](/reference/connector/java), [Python](/reference/connector/python), [Go](/reference/connector/go), [Rust](/reference/connector/rust), [Node.js](/reference/connector/node) and other programming languages.
|
||||
13. Provides a [REST API](/reference/rest-api/).
|
||||
14. Supports seamless integration with [Grafana](/third-party/grafana) for visualization.
|
||||
15. Supports seamless integration with Google Data Studio.
|
||||
1. Insert data
|
||||
- Supports [using SQL to insert](../develop/insert-data/sql-writing).
|
||||
- Supports [schemaless writing](../reference/schemaless/) just like NoSQL databases. It also supports standard protocols like [InfluxDB Line](../develop/insert-data/influxdb-line), [OpenTSDB Telnet](../develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](../develop/insert-data/opentsdb-json) among others.
|
||||
- Supports seamless integration with third-party tools like [Telegraf](../third-party/telegraf/), [Prometheus](../third-party/prometheus/), [collectd](../third-party/collectd/), [StatsD](../third-party/statsd/), [TCollector](../third-party/tcollector/), [EMQX](../third-party/emq-broker), [HiveMQ](../third-party/hive-mq-broker), and [Icinga2](../third-party/icinga2/), they can write data into TDengine with simple configuration and without a single line of code.
|
||||
2. Query data
|
||||
- Supports standard [SQL](../taos-sql/), including nested query.
|
||||
- Supports [time series specific functions](../taos-sql/function/#time-series-extensions) and [time series specific queries](../taos-sql/distinguished), like downsampling, interpolation, cumulated sum, time weighted average, state window, session window and many others.
|
||||
- Supports [User Defined Functions (UDF)](../taos-sql/udf).
|
||||
3. [Caching](../develop/cache/): TDengine always saves the last data point in cache, so Redis is not needed for time-series data processing.
|
||||
4. [Stream Processing](../develop/stream/): Not only is the continuous query is supported, but TDengine also supports event driven stream processing, so Flink or Spark is not needed for time-series data processing.
|
||||
5. [Data Subscription](../develop/tmq/): Application can subscribe a table or a set of tables. API is the same as Kafka, but you can specify filter conditions.
|
||||
6. Visualization
|
||||
- Supports seamless integration with [Grafana](../third-party/grafana/) for visualization.
|
||||
- Supports seamless integration with Google Data Studio.
|
||||
7. Cluster
|
||||
- Supports [cluster](../deployment/) with the capability of increasing processing power by adding more nodes.
|
||||
- Supports [deployment on Kubernetes](../deployment/k8s/).
|
||||
- Supports high availability via data replication.
|
||||
8. Administration
|
||||
- Provides [monitoring](../operation/monitor) on running instances of TDengine.
|
||||
- Provides many ways to [import](../operation/import) and [export](../operation/export) data.
|
||||
9. Tools
|
||||
- Provides an interactive [Command-line Interface (CLI)](../reference/taos-shell) for management, maintenance and ad-hoc queries.
|
||||
- Provides a tool [taosBenchmark](../reference/taosbenchmark/) for testing the performance of TDengine.
|
||||
10. Programming
|
||||
- Provides [connectors](../reference/connector/) for [C/C++](../reference/connector/cpp), [Java](../reference/connector/java), [Python](../reference/connector/python), [Go](../reference/connector/go), [Rust](../reference/connector/rust), [Node.js](../reference/connector/node) and other programming languages.
|
||||
- Provides a [REST API](../reference/rest-api/).
|
||||
|
||||
For more details on features, please read through the entire documentation.
|
||||
For more details on features, please read through the entire documentation.
|
||||
|
||||
## Competitive Advantages
|
||||
|
||||
|
@ -37,23 +49,31 @@ By making full use of [characteristics of time series data](https://tdengine.com
|
|||
|
||||
- **[Simplified Solution](https://tdengine.com/tdengine/simplified-time-series-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
|
||||
|
||||
- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds.
|
||||
- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for Kubernetes deployment and full observability, TDengine is a cloud native Time-series Database and can be deployed on public, private or hybrid clouds.
|
||||
|
||||
- **[Ease of Use](https://tdengine.com/tdengine/easy-time-series-data-platform/)**: For administrators, TDengine significantly reduces the effort to[
|
||||
](https://tdengine.com/tdengine/easy-time-series-data-platform/) deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
|
||||
](https://tdengine.com/tdengine/easy-time-series-data-platform/) deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
|
||||
|
||||
- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
|
||||
- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
|
||||
|
||||
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide.
|
||||
|
||||
With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced. 1: With its superior performance, the computing and storage resources are reduced significantly;2: With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly;3: With its simplified solution and nearly zero management, the operation and maintenance costs are reduced significantly.
|
||||
With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced.
|
||||
|
||||
1. With its superior performance, the computing and storage resources are reduced significantly.
|
||||
2. With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly.
|
||||
3. With its simplified solution and nearly zero management, the operation and maintenance costs are reduced significantly.
|
||||
|
||||
## Technical Ecosystem
|
||||
|
||||
This is how TDengine would be situated, in a typical time-series data processing platform:
|
||||
|
||||
<figure>
|
||||
|
||||

|
||||
|
||||
<center>Figure 1. TDengine Technical Ecosystem</center>
|
||||
<center><figcaption>Figure 1. TDengine Technical Ecosystem</figcaption></center>
|
||||
</figure>
|
||||
|
||||
On the left-hand side, there are data collection agents like OPC-UA, MQTT, Telegraf and Kafka. On the right-hand side, visualization/BI tools, HMI, Python/R, and IoT Apps can be connected. TDengine itself provides an interactive command-line interface and a web interface for management and maintenance.
|
||||
|
||||
|
@ -63,42 +83,42 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
|
|||
|
||||
### Characteristics and Requirements of Data Sources
|
||||
|
||||
| **Data Source Characteristics and Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| -------------------------------------------------------- | ------------------ | ----------------------- | ------------------- | :----------------------------------------------------------- |
|
||||
| A massive amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure with matching high compression ratio to achieve the best storage efficiency in the industry.|
|
||||
| Data input velocity is extremely high | | | √ | TDengine's performance is much higher than that of other similar products. It can continuously process larger amounts of input data in the same hardware environment, and provides a performance evaluation tool that can easily run in the user environment. |
|
||||
| A huge number of data sources | | | √ | TDengine is optimized specifically for a huge number of data sources. It is especially suitable for efficiently ingesting, writing and querying data from billions of data sources. |
|
||||
| **Data Source Characteristics and Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| ------------------------------------------------ | ------------------ | ----------------------- | ------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| A massive amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure with matching high compression ratio to achieve the best storage efficiency in the industry. |
|
||||
| Data input velocity is extremely high | | | √ | TDengine's performance is much higher than that of other similar products. It can continuously process larger amounts of input data in the same hardware environment, and provides a performance evaluation tool that can easily run in the user environment. |
|
||||
| A huge number of data sources | | | √ | TDengine is optimized specifically for a huge number of data sources. It is especially suitable for efficiently ingesting, writing and querying data from billions of data sources. |
|
||||
|
||||
### System Architecture Requirements
|
||||
|
||||
| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
|
||||
| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| ----------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| A simple and reliable system architecture | | | √ | TDengine's system architecture is very simple and reliable, with its own message queue, cache, stream computing, monitoring and other functions. There is no need to integrate any additional third-party products. |
|
||||
| Fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability and high-availability functions such as fault tolerance and disaster recovery. |
|
||||
| Standardization support | | | √ | TDengine supports standard SQL and provides SQL extensions for time-series data analysis. |
|
||||
| Fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability and high-availability functions such as fault tolerance and disaster recovery. |
|
||||
| Standardization support | | | √ | TDengine supports standard SQL and provides SQL extensions for time-series data analysis. |
|
||||
|
||||
### System Function Requirements
|
||||
|
||||
| **System Function Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
|
||||
| Complete data processing algorithms built-in | | √ | | While TDengine implements various general data processing algorithms, industry specific algorithms and special types of processing will need to be implemented at the application level.|
|
||||
| A large number of crosstab queries | | √ | | This type of processing is better handled by general purpose relational database systems but TDengine can work in concert with relational database systems to provide more complete solutions. |
|
||||
| **System Function Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| -------------------------------------------- | ------------------ | ----------------------- | ------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Complete data processing algorithms built-in | | √ | | While TDengine implements various general data processing algorithms, industry specific algorithms and special types of processing will need to be implemented at the application level. |
|
||||
| A large number of crosstab queries | | √ | | This type of processing is better handled by general purpose relational database systems but TDengine can work in concert with relational database systems to provide more complete solutions. |
|
||||
|
||||
### System Performance Requirements
|
||||
|
||||
| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
|
||||
| Very large total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server coordination. |
|
||||
| Extremely high-speed data processing | | | √ | TDengine’s storage and data processing are optimized for IoT, and can process data many times faster than similar products.|
|
||||
| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. |
|
||||
| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Very large total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server coordination. |
|
||||
| Extremely high-speed data processing | | | √ | TDengine’s storage and data processing are optimized for IoT, and can process data many times faster than similar products. |
|
||||
| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. |
|
||||
|
||||
### System Maintenance Requirements
|
||||
|
||||
| **System Maintenance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
|
||||
| Native high-reliability | | | √ | TDengine has a very robust, reliable and easily configurable system architecture to simplify routine operation. Human errors and accidents are eliminated to the greatest extent, with a streamlined experience for operators. |
|
||||
| Minimize learning and maintenance costs | | | √ | In addition to being easily configurable, standard SQL support and the TDengine CLI for ad hoc queries makes maintenance simpler, allows reuse and reduces learning costs.|
|
||||
| Abundant talent supply | √ | | | Given the above, and given the extensive training and professional services provided by TDengine, it is easy to migrate from existing solutions or create a new and lasting solution based on TDengine.|
|
||||
| **System Maintenance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| --------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| Native high-reliability | | | √ | TDengine has a very robust, reliable and easily configurable system architecture to simplify routine operation. Human errors and accidents are eliminated to the greatest extent, with a streamlined experience for operators. |
|
||||
| Minimize learning and maintenance costs | | | √ | In addition to being easily configurable, standard SQL support and the TDengine CLI for ad hoc queries makes maintenance simpler, allows reuse and reduces learning costs. |
|
||||
| Abundant talent supply | √ | | | Given the above, and given the extensive training and professional services provided by TDengine, it is easy to migrate from existing solutions or create a new and lasting solution based on TDengine. |
|
||||
|
||||
## Comparison with other databases
|
||||
|
||||
|
|
|
@ -6,128 +6,127 @@ In order to explain the basic concepts and provide some sample code, the TDengin
|
|||
|
||||
<div className="center-table">
|
||||
<table>
|
||||
<thead><tr>
|
||||
<th>Device ID</th>
|
||||
<th>Time Stamp</th>
|
||||
<th colSpan="3">Collected Metrics</th>
|
||||
<th colSpan="2">Tags</th>
|
||||
<thead>
|
||||
<tr>
|
||||
<th rowSpan="2">Device ID</th>
|
||||
<th rowSpan="2">Timestamp</th>
|
||||
<th colSpan="3">Collected Metrics</th>
|
||||
<th colSpan="2">Tags</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th>Device ID</th>
|
||||
<th>Time Stamp</th>
|
||||
<th>current</th>
|
||||
<th>voltage</th>
|
||||
<th>phase</th>
|
||||
<th>location</th>
|
||||
<th>groupId</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>d1001</td>
|
||||
<td>1538548685000</td>
|
||||
<td>10.3</td>
|
||||
<td>219</td>
|
||||
<td>0.31</td>
|
||||
<td>California.SanFrancisco</td>
|
||||
<td>2</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1002</td>
|
||||
<td>1538548684000</td>
|
||||
<td>10.2</td>
|
||||
<td>220</td>
|
||||
<td>0.23</td>
|
||||
<td>California.SanFrancisco</td>
|
||||
<td>3</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1003</td>
|
||||
<td>1538548686500</td>
|
||||
<td>11.5</td>
|
||||
<td>221</td>
|
||||
<td>0.35</td>
|
||||
<td>California.LosAngeles</td>
|
||||
<td>3</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1004</td>
|
||||
<td>1538548685500</td>
|
||||
<td>13.4</td>
|
||||
<td>223</td>
|
||||
<td>0.29</td>
|
||||
<td>California.LosAngeles</td>
|
||||
<td>2</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1001</td>
|
||||
<td>1538548695000</td>
|
||||
<td>12.6</td>
|
||||
<td>218</td>
|
||||
<td>0.33</td>
|
||||
<td>California.SanFrancisco</td>
|
||||
<td>2</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1004</td>
|
||||
<td>1538548696600</td>
|
||||
<td>11.8</td>
|
||||
<td>221</td>
|
||||
<td>0.28</td>
|
||||
<td>California.LosAngeles</td>
|
||||
<td>2</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1002</td>
|
||||
<td>1538548696650</td>
|
||||
<td>10.3</td>
|
||||
<td>218</td>
|
||||
<td>0.25</td>
|
||||
<td>California.SanFrancisco</td>
|
||||
<td>3</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1001</td>
|
||||
<td>1538548696800</td>
|
||||
<td>12.3</td>
|
||||
<td>221</td>
|
||||
<td>0.31</td>
|
||||
<td>California.SanFrancisco</td>
|
||||
<td>2</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
<tr>
|
||||
<th>current</th>
|
||||
<th>voltage</th>
|
||||
<th>phase</th>
|
||||
<th>location</th>
|
||||
<th>groupid</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>d1001</td>
|
||||
<td>1538548685000</td>
|
||||
<td>10.3</td>
|
||||
<td>219</td>
|
||||
<td>0.31</td>
|
||||
<td>California.SanFrancisco</td>
|
||||
<td>2</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1002</td>
|
||||
<td>1538548684000</td>
|
||||
<td>10.2</td>
|
||||
<td>220</td>
|
||||
<td>0.23</td>
|
||||
<td>California.SanFrancisco</td>
|
||||
<td>3</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1003</td>
|
||||
<td>1538548686500</td>
|
||||
<td>11.5</td>
|
||||
<td>221</td>
|
||||
<td>0.35</td>
|
||||
<td>California.LosAngeles</td>
|
||||
<td>3</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1004</td>
|
||||
<td>1538548685500</td>
|
||||
<td>13.4</td>
|
||||
<td>223</td>
|
||||
<td>0.29</td>
|
||||
<td>California.LosAngeles</td>
|
||||
<td>2</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1001</td>
|
||||
<td>1538548695000</td>
|
||||
<td>12.6</td>
|
||||
<td>218</td>
|
||||
<td>0.33</td>
|
||||
<td>California.SanFrancisco</td>
|
||||
<td>2</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1004</td>
|
||||
<td>1538548696600</td>
|
||||
<td>11.8</td>
|
||||
<td>221</td>
|
||||
<td>0.28</td>
|
||||
<td>California.LosAngeles</td>
|
||||
<td>2</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1002</td>
|
||||
<td>1538548696650</td>
|
||||
<td>10.3</td>
|
||||
<td>218</td>
|
||||
<td>0.25</td>
|
||||
<td>California.SanFrancisco</td>
|
||||
<td>3</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1001</td>
|
||||
<td>1538548696800</td>
|
||||
<td>12.3</td>
|
||||
<td>221</td>
|
||||
<td>0.31</td>
|
||||
<td>California.SanFrancisco</td>
|
||||
<td>2</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<a href="#model_table1">Table 1: Smart meter example data</a>
|
||||
</div>
|
||||
|
||||
Each row contains the device ID, time stamp, collected metrics (current, voltage, phase as above), and static tags (location and groupId in Table 1) associated with the devices. Each smart meter generates a row (measurement) in a pre-defined time interval or triggered by an external event. The device produces a sequence of measurements with associated time stamps.
|
||||
Each row contains the device ID, timestamp, collected metrics (`current`, `voltage`, `phase` as above), and static tags (`location` and `groupid` in Table 1) associated with the devices. Each smart meter generates a row (measurement) in a pre-defined time interval or triggered by an external event. The device produces a sequence of measurements with associated timestamps.
|
||||
|
||||
## Metric
|
||||
|
||||
Metric refers to the physical quantity collected by sensors, equipment or other types of data collection devices, such as current, voltage, temperature, pressure, GPS position, etc., which change with time, and the data type can be integer, float, Boolean, or strings. As time goes by, the amount of collected metric data stored increases.
|
||||
Metric refers to the physical quantity collected by sensors, equipment or other types of data collection devices, such as current, voltage, temperature, pressure, GPS position, etc., which change with time, and the data type can be integer, float, Boolean, or strings. As time goes by, the amount of collected metric data stored increases. In the smart meters example, current, voltage and phase are the metrics.
|
||||
|
||||
## Label/Tag
|
||||
|
||||
Label/Tag refers to the static properties of sensors, equipment or other types of data collection devices, which do not change with time, such as device model, color, fixed location of the device, etc. The data type can be any type. Although static, TDengine allows users to add, delete or update tag values at any time. Unlike the collected metric data, the amount of tag data stored does not change over time.
|
||||
Label/Tag refers to the static properties of sensors, equipment or other types of data collection devices, which do not change with time, such as device model, color, fixed location of the device, etc. The data type can be any type. Although static, TDengine allows users to add, delete or update tag values at any time. Unlike the collected metric data, the amount of tag data stored does not change over time. In the meters example, `location` and `groupid` are the tags.
|
||||
|
||||
## Data Collection Point
|
||||
|
||||
Data Collection Point (DCP) refers to hardware or software that collects metrics based on preset time periods or triggered by events. A data collection point can collect one or multiple metrics, but these metrics are collected at the same time and have the same time stamp. For some complex equipment, there are often multiple data collection points, and the sampling rate of each collection point may be different, and fully independent. For example, for a car, there could be a data collection point to collect GPS position metrics, a data collection point to collect engine status metrics, and a data collection point to collect the environment metrics inside the car. So in this example the car would have three data collection points.
|
||||
Data Collection Point (DCP) refers to hardware or software that collects metrics based on preset time periods or triggered by events. A data collection point can collect one or multiple metrics, but these metrics are collected at the same time and have the same timestamp. For some complex equipment, there are often multiple data collection points, and the sampling rate of each collection point may be different, and fully independent. For example, for a car, there could be a data collection point to collect GPS position metrics, a data collection point to collect engine status metrics, and a data collection point to collect the environment metrics inside the car. So in this example the car would have three data collection points. In the smart meters example, d1001, d1002, d1003, and d1004 are the data collection points.
|
||||
|
||||
## Table
|
||||
|
||||
Since time-series data is most likely to be structured data, TDengine adopts the traditional relational database model to process them with a short learning curve. You need to create a database, create tables, then insert data points and execute queries to explore the data.
|
||||
|
||||
To make full use of time-series data characteristics, TDengine adopts a strategy of "**One Table for One Data Collection Point**". TDengine requires the user to create a table for each data collection point (DCP) to store collected time-series data. For example, if there are over 10 million smart meters, it means 10 million tables should be created. For the table above, 4 tables should be created for devices D1001, D1002, D1003, and D1004 to store the data collected. This design has several benefits:
|
||||
To make full use of time-series data characteristics, TDengine adopts a strategy of "**One Table for One Data Collection Point**". TDengine requires the user to create a table for each data collection point (DCP) to store collected time-series data. For example, if there are over 10 million smart meters, it means 10 million tables should be created. For the table above, 4 tables should be created for devices d1001, d1002, d1003, and d1004 to store the data collected. This design has several benefits:
|
||||
|
||||
1. Since the metric data from different DCP are fully independent, the data source of each DCP is unique, and a table has only one writer. In this way, data points can be written in a lock-free manner, and the writing speed can be greatly improved.
|
||||
2. For a DCP, the metric data generated by DCP is ordered by timestamp, so the write operation can be implemented by simple appending, which further greatly improves the data writing speed.
|
||||
3. The metric data from a DCP is continuously stored, block by block. If you read data for a period of time, it can greatly reduce random read operations and improve read and query performance by orders of magnitude.
|
||||
4. Inside a data block for a DCP, columnar storage is used, and different compression algorithms are used for different data types. Metrics generally don't vary as significantly between themselves over a time range as compared to other metrics, which allows for a higher compression rate.
|
||||
|
||||
If the metric data of multiple DCPs are traditionally written into a single table, due to uncontrollable network delays, the timing of the data from different DCPs arriving at the server cannot be guaranteed, write operations must be protected by locks, and metric data from one DCP cannot be guaranteed to be continuously stored together. ** One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest possible extent.**
|
||||
If the metric data of multiple DCPs are traditionally written into a single table, due to uncontrollable network delays, the timing of the data from different DCPs arriving at the server cannot be guaranteed, write operations must be protected by locks, and metric data from one DCP cannot be guaranteed to be continuously stored together. **One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest possible extent.**
|
||||
|
||||
TDengine suggests using DCP ID as the table name (like D1001 in the above table). Each DCP may collect one or multiple metrics (like the current, voltage, phase as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the time stamp as the index, and won’t build the index on any metrics stored. Column wise storage is used.
|
||||
TDengine suggests using DCP ID as the table name (like d1001 in the above table). Each DCP may collect one or multiple metrics (like the `current`, `voltage`, `phase` as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the timestamp as the index, and won’t build the index on any metrics stored. Column wise storage is used.
|
||||
|
||||
Complex devices, such as connected cars, may have multiple DCPs. In this case, multiple tables are created for a single device, one table per DCP.
|
||||
|
||||
|
@ -137,7 +136,7 @@ The design of one table for one data collection point will require a huge number
|
|||
|
||||
STable is a template for a type of data collection point. A STable contains a set of data collection points (tables) that have the same schema or data structure, but with different static attributes (tags). To describe a STable, in addition to defining the table structure of the metrics, it is also necessary to define the schema of its tags. The data type of tags can be int, float, string, and there can be multiple tags, which can be added, deleted, or modified afterward. If the whole system has N different types of data collection points, N STables need to be established.
|
||||
|
||||
In the design of TDengine, **a table is used to represent a specific data collection point, and STable is used to represent a set of data collection points of the same type**.
|
||||
In the design of TDengine, **a table is used to represent a specific data collection point, and STable is used to represent a set of data collection points of the same type**. In the smart meters example, we can create a super table named `meters`.
|
||||
|
||||
## Subtable
|
||||
|
||||
|
@ -156,11 +155,20 @@ The relationship between a STable and the subtables created based on this STable
|
|||
|
||||
Queries can be executed on both a table (subtable) and a STable. For a query on a STable, TDengine will treat the data in all its subtables as a whole data set for processing. TDengine will first find the subtables that meet the tag filter conditions, then scan the time-series data of these subtables to perform aggregation operation, which reduces the number of data sets to be scanned which in turn greatly improves the performance of data aggregation across multiple DCPs. In essence, querying a supertable is a very efficient aggregate query on multiple DCPs of the same type.
|
||||
|
||||
In TDengine, it is recommended to use a subtable instead of a regular table for a DCP.
|
||||
In TDengine, it is recommended to use a subtable instead of a regular table for a DCP. In the smart meters example, we can create subtables like d1001, d1002, d1003, and d1004 under super table `meters`.
|
||||
|
||||
To better understand the data model using metrics, tags, super table and subtable, please refer to the diagram below which demonstrates the data model of the smart meters example.
|
||||
|
||||
<figure>
|
||||
|
||||

|
||||
|
||||
<center><figcaption>Figure 1. Meters Data Model Diagram</figcaption></center>
|
||||
</figure>
|
||||
|
||||
## Database
|
||||
|
||||
A database is a collection of tables. TDengine allows a running instance to have multiple databases, and each database can be configured with different storage policies. The [characteristics of time-series data](https://www.taosdata.com/blog/2019/07/09/86.html) from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. In order for TDengine to work with maximum efficiency in various scenarios, TDengine recommends that STables with different data characteristics be created in different databases.
|
||||
A database is a collection of tables. TDengine allows a running instance to have multiple databases, and each database can be configured with different storage policies. The [characteristics of time-series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/) from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. In order for TDengine to work with maximum efficiency in various scenarios, TDengine recommends that STables with different data characteristics be created in different databases.
|
||||
|
||||
In a database, there can be one or more STables, but a STable belongs to only one database. All tables owned by a STable are stored in only one database.
|
||||
|
||||
|
@ -170,4 +178,4 @@ FQDN (Fully Qualified Domain Name) is the full domain name of a specific compute
|
|||
|
||||
Each node of a TDengine cluster is uniquely identified by an End Point, which consists of an FQDN and a Port, such as h1.tdengine.com:6030. In this way, when the IP changes, we can still use the FQDN to dynamically find the node without changing any configuration of the cluster. In addition, FQDN is used to facilitate unified access to the same cluster from the Intranet and the Internet.
|
||||
|
||||
TDengine does not recommend using an IP address to access the cluster. FQDN is recommended for cluster management.
|
||||
TDengine does not recommend using an IP address to access the cluster. FQDN is recommended for cluster management.
|
||||
|
|
After Width: | Height: | Size: 33 KiB |
|
@ -13,7 +13,7 @@ If Docker is already installed on your computer, run the following command:
|
|||
docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
|
||||
```
|
||||
|
||||
Note that TDengine Server uses TCP port 6030. Port 6041 is used by taosAdapter for the REST API service. Ports 6043 through 6049 are used by taosAdapter for other connectors. You can open these ports as needed.
|
||||
Note that TDengine Server 3.0 uses TCP port 6030. Port 6041 is used by taosAdapter for the REST API service. Ports 6043 through 6049 are used by taosAdapter for other connectors. You can open these ports as needed.
|
||||
|
||||
Run the following command to ensure that your container is running:
|
||||
|
||||
|
@ -21,7 +21,7 @@ Run the following command to ensure that your container is running:
|
|||
docker ps
|
||||
```
|
||||
|
||||
Enter the container and open the bash shell:
|
||||
Enter the container and open the `bash` shell:
|
||||
|
||||
```shell
|
||||
docker exec -it <container name> bash
|
||||
|
@ -31,68 +31,68 @@ You can now access TDengine or run other Linux commands.
|
|||
|
||||
Note: For information about installing docker, see the [official documentation](https://docs.docker.com/get-docker/).
|
||||
|
||||
## Insert Data into TDengine
|
||||
|
||||
You can use the `taosBenchmark` tool included with TDengine to write test data into your deployment.
|
||||
|
||||
To do so, run the following command:
|
||||
|
||||
```bash
|
||||
$ taosBenchmark
|
||||
|
||||
```
|
||||
|
||||
This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to ten and a `location` tag of either `California.SanFrancisco` or `California.SanDiego`.
|
||||
|
||||
The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required depends on the hardware specifications of the local system.
|
||||
|
||||
You can customize the test deployment that taosBenchmark creates by specifying command-line parameters. For information about command-line parameters, run the `taosBenchmark --help` command. For more information about taosBenchmark, see [taosBenchmark](/reference/taosbenchmark).
|
||||
|
||||
## Open the TDengine CLI
|
||||
|
||||
On the container, run the following command to open the TDengine CLI:
|
||||
On the container, run the following command to open the TDengine CLI:
|
||||
|
||||
```
|
||||
$ taos
|
||||
|
||||
taos>
|
||||
taos>
|
||||
|
||||
```
|
||||
|
||||
## Query Data in TDengine
|
||||
## Test data insert performance
|
||||
|
||||
After using taosBenchmark to create your test deployment, you can run queries in the TDengine CLI to test its performance. For example:
|
||||
After your TDengine Server is running normally, you can run the taosBenchmark utility to test its performance:
|
||||
|
||||
From the TDengine CLI query the number of rows in the `meters` supertable:
|
||||
Start TDengine service and execute `taosBenchmark` (formerly named `taosdemo`) in a Linux or Windows terminal.
|
||||
|
||||
```bash
|
||||
taosBenchmark
|
||||
```
|
||||
|
||||
This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `Campbell`, `Cupertino`, `Los Angeles`, `Mountain View`, `Palo Alto`, `San Diego`, `San Francisco`, `San Jose`, `Santa Clara` or `Sunnyvale`.
|
||||
|
||||
The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required to create the deployment depends on your hardware. On most modern servers, the deployment is created in ten to twenty seconds.
|
||||
|
||||
You can customize the test deployment that taosBenchmark creates by specifying command-line parameters. For information about command-line parameters, run the `taosBenchmark --help` command. For more information about taosBenchmark, see [taosBenchmark](../../reference/taosbenchmark).
|
||||
|
||||
## Test data query performance
|
||||
|
||||
After using `taosBenchmark` to create your test deployment, you can run queries in the TDengine CLI to test its performance:
|
||||
|
||||
From the TDengine CLI (taos) query the number of rows in the `meters` supertable:
|
||||
|
||||
```sql
|
||||
select count(*) from test.meters;
|
||||
SELECT COUNT(*) FROM test.meters;
|
||||
```
|
||||
|
||||
Query the average, maximum, and minimum values of all 100 million rows of data:
|
||||
|
||||
```sql
|
||||
select avg(current), max(voltage), min(phase) from test.meters;
|
||||
SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters;
|
||||
```
|
||||
|
||||
Query the number of rows whose `location` tag is `San Francisco`:
|
||||
|
||||
```sql
|
||||
select count(*) from test.meters where location="San Francisco";
|
||||
SELECT COUNT(*) FROM test.meters WHERE location = "San Francisco";
|
||||
```
|
||||
|
||||
Query the average, maximum, and minimum values of all rows whose `groupId` tag is `10`:
|
||||
|
||||
```sql
|
||||
select avg(current), max(voltage), min(phase) from test.meters where groupId=10;
|
||||
SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 10;
|
||||
```
|
||||
|
||||
Query the average, maximum, and minimum values for table `d10` in 1 second intervals:
|
||||
Query the average, maximum, and minimum values for table `d10` in 10 second intervals:
|
||||
|
||||
```sql
|
||||
select first(ts), avg(current), max(voltage), min(phase) from test.d10 interval(1s);
|
||||
SELECT FIRST(ts), AVG(current), MAX(voltage), MIN(phase) FROM test.d10 INTERVAL(10s);
|
||||
```
|
||||
In the query above you are selecting the first timestamp (ts) in the interval, another way of selecting this would be _wstart which will give the start of the time window. For more information about windowed queries, see [Time-Series Extensions](../../taos-sql/distinguished/).
|
||||
|
||||
In the query above you are selecting the first timestamp (ts) in the interval, another way of selecting this would be `\_wstart` which will give the start of the time window. For more information about windowed queries, see [Time-Series Extensions](../../taos-sql/distinguished/).
|
||||
|
||||
## Additional Information
|
||||
|
||||
|
|
|
@ -9,23 +9,24 @@ import PkgListV3 from "/components/PkgListV3";
|
|||
|
||||
For information about installing TDengine on Docker, see [Quick Install on Docker](../../get-started/docker). If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
|
||||
|
||||
The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface, and some tools. Note that taosAdapter supports Linux only. In addition to connectors for multiple languages, TDengine also provides a [REST API](../../reference/rest-api) through [taosAdapter](../../reference/taosadapter).
|
||||
The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface (CLI, taos), and some tools. Note that taosAdapter supports Linux only. In addition to connectors for multiple languages, TDengine also provides a [REST API](../../reference/rest-api) through [taosAdapter](../../reference/taosadapter).
|
||||
|
||||
The standard server installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark`, and sample code. You can also download a lite package that includes only `taosd` and the C/C++ connector.
|
||||
The standard server installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark`, and sample code. You can also download the Lite package that includes only `taosd` and the C/C++ connector.
|
||||
|
||||
The TDengine Community Edition is released as .deb and .rpm packages. The .deb package can be installed on Debian, Ubuntu, and derivative systems. The .rpm package can be installed on CentOS, RHEL, SUSE, and derivative systems. A .tar.gz package is also provided for enterprise customers, and you can install TDengine over `apt-get` as well. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the .deb or .rpm package, download and install taosTools separately. TDengine can also be installed on 64-bit Windows servers.
|
||||
The TDengine Community Edition is released as Deb and RPM packages. The Deb package can be installed on Debian, Ubuntu, and derivative systems. The RPM package can be installed on CentOS, RHEL, SUSE, and derivative systems. A .tar.gz package is also provided for enterprise customers, and you can install TDengine over `apt-get` as well. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the Deb or RPM package, download and install taosTools separately. TDengine can also be installed on 64-bit Windows.
|
||||
|
||||
## Installation
|
||||
|
||||
<Tabs>
|
||||
<TabItem label=".deb" value="debinst">
|
||||
|
||||
1. Download the .deb installation package.
|
||||
<PkgListV3 type={6}/>
|
||||
1. Download the Deb installation package.
|
||||
<PkgListV3 type={6}/>
|
||||
2. In the directory where the package is located, use `dpkg` to install the package:
|
||||
|
||||
> Please replace `<version>` with the corresponding version of the package downloaded
|
||||
|
||||
```bash
|
||||
# Enter the name of the package that you downloaded.
|
||||
sudo dpkg -i TDengine-server-<version>-Linux-x64.deb
|
||||
```
|
||||
|
||||
|
@ -34,11 +35,12 @@ sudo dpkg -i TDengine-server-<version>-Linux-x64.deb
|
|||
<TabItem label=".rpm" value="rpminst">
|
||||
|
||||
1. Download the .rpm installation package.
|
||||
<PkgListV3 type={5}/>
|
||||
<PkgListV3 type={5}/>
|
||||
2. In the directory where the package is located, use rpm to install the package:
|
||||
|
||||
> Please replace `<version>` with the corresponding version of the package downloaded
|
||||
|
||||
```bash
|
||||
# Enter the name of the package that you downloaded.
|
||||
sudo rpm -ivh TDengine-server-<version>-Linux-x64.rpm
|
||||
```
|
||||
|
||||
|
@ -47,11 +49,12 @@ sudo rpm -ivh TDengine-server-<version>-Linux-x64.rpm
|
|||
<TabItem label=".tar.gz" value="tarinst">
|
||||
|
||||
1. Download the .tar.gz installation package.
|
||||
<PkgListV3 type={0}/>
|
||||
<PkgListV3 type={0}/>
|
||||
2. In the directory where the package is located, use `tar` to decompress the package:
|
||||
|
||||
> Please replace `<version>` with the corresponding version of the package downloaded
|
||||
|
||||
```bash
|
||||
# Enter the name of the package that you downloaded.
|
||||
tar -zxvf TDengine-server-<version>-Linux-x64.tar.gz
|
||||
```
|
||||
|
||||
|
@ -96,23 +99,23 @@ sudo apt-get install tdengine
|
|||
This installation method is supported only for Debian and Ubuntu.
|
||||
::::
|
||||
</TabItem>
|
||||
<TabItem label="Windows" value="windows">
|
||||
<TabItem label="Windows" value="windows">
|
||||
|
||||
Note: TDengine only supports Windows Server 2016/2019 and windows 10/11 system versions on the windows platform.
|
||||
Note: TDengine only supports Windows Server 2016/2019 and Windows 10/11 on the Windows platform.
|
||||
|
||||
1. Download the Windows installation package.
|
||||
<PkgListV3 type={3}/>
|
||||
<PkgListV3 type={3}/>
|
||||
2. Run the downloaded package to install TDengine.
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
:::info
|
||||
For information about TDengine releases, see [Release History](../../releases).
|
||||
For information about TDengine releases, see [Release History](../../releases).
|
||||
:::
|
||||
|
||||
:::note
|
||||
On the first node in your TDengine cluster, leave the `Enter FQDN:` prompt blank and press **Enter**. On subsequent nodes, you can enter the end point of the first dnode in the cluster. You can also configure this setting after you have finished installing TDengine.
|
||||
On the first node in your TDengine cluster, leave the `Enter FQDN:` prompt blank and press **Enter**. On subsequent nodes, you can enter the endpoint of the first dnode in the cluster. You can also configure this setting after you have finished installing TDengine.
|
||||
|
||||
:::
|
||||
|
||||
|
@ -147,7 +150,7 @@ Active: inactive (dead)
|
|||
|
||||
After confirming that TDengine is running, run the `taos` command to access the TDengine CLI.
|
||||
|
||||
The following `systemctl` commands can help you manage TDengine:
|
||||
The following `systemctl` commands can help you manage TDengine service:
|
||||
|
||||
- Start TDengine Server: `systemctl start taosd`
|
||||
|
||||
|
@ -159,7 +162,7 @@ The following `systemctl` commands can help you manage TDengine:
|
|||
|
||||
:::info
|
||||
|
||||
- The `systemctl` command requires _root_ privileges. If you are not logged in as the `root` user, use the `sudo` command.
|
||||
- The `systemctl` command requires _root_ privileges. If you are not logged in as the _root_ user, use the `sudo` command.
|
||||
- The `systemctl stop taosd` command does not instantly stop TDengine Server. The server is stopped only after all data in memory is flushed to disk. The time required depends on the cache size.
|
||||
- If your system does not include `systemd`, you can run `/usr/local/taos/bin/taosd` to start TDengine manually.
|
||||
|
||||
|
@ -174,23 +177,9 @@ After the installation is complete, run `C:\TDengine\taosd.exe` to start TDengin
|
|||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
## Test data insert performance
|
||||
## Command Line Interface (CLI)
|
||||
|
||||
After your TDengine Server is running normally, you can run the taosBenchmark utility to test its performance:
|
||||
|
||||
```bash
|
||||
taosBenchmark
|
||||
```
|
||||
|
||||
This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `Campbell`, `Cupertino`, `Los Angeles`, `Mountain View`, `Palo Alto`, `San Diego`, `San Francisco`, `San Jose`, `Santa Clara` or `Sunnyvale`.
|
||||
|
||||
The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required to create the deployment depends on your hardware. On most modern servers, the deployment is created in less than a minute.
|
||||
|
||||
You can customize the test deployment that taosBenchmark creates by specifying command-line parameters. For information about command-line parameters, run the `taosBenchmark --help` command. For more information about taosBenchmark, see [taosBenchmark](../../reference/taosbenchmark).
|
||||
|
||||
## Command Line Interface
|
||||
|
||||
You can use the TDengine CLI to monitor your TDengine deployment and execute ad hoc queries. To open the CLI, run the following command:
|
||||
You can use the TDengine CLI to monitor your TDengine deployment and execute ad hoc queries. To open the CLI, you can execute `taos` in the Linux terminal where TDengine is installed, or you can run `taos.exe` in the `C:\TDengine` directory of the Windows terminal where TDengine is installed to start the TDengine command line.
|
||||
|
||||
```bash
|
||||
taos
|
||||
|
@ -205,52 +194,71 @@ taos>
|
|||
For example, you can create and delete databases and tables and run all types of queries. Each SQL command must be end with a semicolon (;). For example:
|
||||
|
||||
```sql
|
||||
create database demo;
|
||||
use demo;
|
||||
create table t (ts timestamp, speed int);
|
||||
insert into t values ('2019-07-15 00:00:00', 10);
|
||||
insert into t values ('2019-07-15 01:00:00', 20);
|
||||
select * from t;
|
||||
CREATE DATABASE demo;
|
||||
USE demo;
|
||||
CREATE TABLE t (ts TIMESTAMP, speed INT);
|
||||
INSERT INTO t VALUES ('2019-07-15 00:00:00', 10);
|
||||
INSERT INTO t VALUES ('2019-07-15 01:00:00', 20);
|
||||
SELECT * FROM t;
|
||||
|
||||
ts | speed |
|
||||
========================================
|
||||
2019-07-15 00:00:00.000 | 10 |
|
||||
2019-07-15 01:00:00.000 | 20 |
|
||||
|
||||
Query OK, 2 row(s) in set (0.003128s)
|
||||
```
|
||||
|
||||
You can also can monitor the deployment status, add and remove user accounts, and manage running instances. You can run the TDengine CLI on either Linux or Windows machines. For more information, see [TDengine CLI](../../reference/taos-shell/).
|
||||
|
||||
|
||||
## Test data insert performance
|
||||
|
||||
After your TDengine Server is running normally, you can run the taosBenchmark utility to test its performance:
|
||||
|
||||
Start TDengine service and execute `taosBenchmark` (formerly named `taosdemo`) in a Linux or Windows terminal.
|
||||
|
||||
```bash
|
||||
taosBenchmark
|
||||
```
|
||||
|
||||
This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `Campbell`, `Cupertino`, `Los Angeles`, `Mountain View`, `Palo Alto`, `San Diego`, `San Francisco`, `San Jose`, `Santa Clara` or `Sunnyvale`.
|
||||
|
||||
The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required to create the deployment depends on your hardware. On most modern servers, the deployment is created in ten to twenty seconds.
|
||||
|
||||
You can customize the test deployment that taosBenchmark creates by specifying command-line parameters. For information about command-line parameters, run the `taosBenchmark --help` command. For more information about taosBenchmark, see [taosBenchmark](../../reference/taosbenchmark).
|
||||
|
||||
## Test data query performance
|
||||
|
||||
After using taosBenchmark to create your test deployment, you can run queries in the TDengine CLI to test its performance:
|
||||
After using `taosBenchmark` to create your test deployment, you can run queries in the TDengine CLI to test its performance:
|
||||
|
||||
From the TDengine CLI query the number of rows in the `meters` supertable:
|
||||
From the TDengine CLI (taos) query the number of rows in the `meters` supertable:
|
||||
|
||||
```sql
|
||||
select count(*) from test.meters;
|
||||
SELECT COUNT(*) FROM test.meters;
|
||||
```
|
||||
|
||||
Query the average, maximum, and minimum values of all 100 million rows of data:
|
||||
|
||||
```sql
|
||||
select avg(current), max(voltage), min(phase) from test.meters;
|
||||
SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters;
|
||||
```
|
||||
|
||||
Query the number of rows whose `location` tag is `San Francisco`:
|
||||
|
||||
```sql
|
||||
select count(*) from test.meters where location="San Francisco";
|
||||
SELECT COUNT(*) FROM test.meters WHERE location = "San Francisco";
|
||||
```
|
||||
|
||||
Query the average, maximum, and minimum values of all rows whose `groupId` tag is `10`:
|
||||
|
||||
```sql
|
||||
select avg(current), max(voltage), min(phase) from test.meters where groupId=10;
|
||||
SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 10;
|
||||
```
|
||||
|
||||
Query the average, maximum, and minimum values for table `d10` in 1 second intervals:
|
||||
Query the average, maximum, and minimum values for table `d10` in 10 second intervals:
|
||||
|
||||
```sql
|
||||
select first(ts), avg(current), max(voltage), min(phase) from test.d10 interval(1s);
|
||||
SELECT FIRST(ts), AVG(current), MAX(voltage), MIN(phase) FROM test.d10 INTERVAL(10s);
|
||||
```
|
||||
In the query above you are selecting the first timestamp (ts) in the interval, another way of selecting this would be _wstart which will give the start of the time window. For more information about windowed queries, see [Time-Series Extensions](../../taos-sql/distinguished/).
|
||||
|
||||
In the query above you are selecting the first timestamp (ts) in the interval, another way of selecting this would be `\_wstart` which will give the start of the time window. For more information about windowed queries, see [Time-Series Extensions](../../taos-sql/distinguished/).
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
title: Connect
|
||||
description: "This document explains how to establish connections to TDengine and how to install and use TDengine connectors."
|
||||
sidebar_label: Connect
|
||||
title: Connect to TDengine
|
||||
description: "How to establish connections to TDengine and how to install and use TDengine connectors."
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
|
@ -279,6 +280,6 @@ Prior to establishing connection, please make sure TDengine is already running a
|
|||
</Tabs>
|
||||
|
||||
:::tip
|
||||
If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](https://docs.taosdata.com/train-faq/faq).
|
||||
If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](https://docs.tdengine.com/train-faq/faq).
|
||||
|
||||
:::
|
||||
|
|
|
@ -0,0 +1,441 @@
|
|||
---
|
||||
sidebar_label: High Performance Writing
|
||||
title: High Performance Writing
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
|
||||
This chapter introduces how to write data into TDengine with high throughput.
|
||||
|
||||
## How to achieve high performance data writing
|
||||
|
||||
To achieve high performance writing, there are a few aspects to consider. In the following sections we will describe these important factors in achieving high performance writing.
|
||||
|
||||
### Application Program
|
||||
|
||||
From the perspective of application program, you need to consider:
|
||||
|
||||
1. The data size of each single write, also known as batch size. Generally speaking, higher batch size generates better writing performance. However, once the batch size is over a specific value, you will not get any additional benefit anymore. When using SQL to write into TDengine, it's better to put as much as possible data in single SQL. The maximum SQL length supported by TDengine is 1,048,576 bytes, i.e. 1 MB.
|
||||
|
||||
2. The number of concurrent connections. Normally more connections can get better result. However, once the number of connections exceeds the processing ability of the server side, the performance may downgrade.
|
||||
|
||||
3. The distribution of data to be written across tables or sub-tables. Writing to single table in one batch is more efficient than writing to multiple tables in one batch.
|
||||
|
||||
4. Data Writing Protocol.
|
||||
- Prameter binding mode is more efficient than SQL because it doesn't have the cost of parsing SQL.
|
||||
- Writing to known existing tables is more efficient than wirting to uncertain tables in automatic creating mode because the later needs to check whether the table exists or not before actually writing data into it
|
||||
- Writing in SQL is more efficient than writing in schemaless mode because schemaless writing creats table automatically and may alter table schema
|
||||
|
||||
Application programs need to take care of the above factors and try to take advantage of them. The application progam should write to single table in each write batch. The batch size needs to be tuned to a proper value on a specific system. The number of concurrent connections needs to be tuned to a proper value too to achieve the best writing throughput.
|
||||
|
||||
### Data Source
|
||||
|
||||
Application programs need to read data from data source then write into TDengine. If you meet one or more of below situations, you need to setup message queues between the threads for reading from data source and the threads for writing into TDengine.
|
||||
|
||||
1. There are multiple data sources, the data generation speed of each data source is much slower than the speed of single writing thread. In this case, the purpose of message queues is to consolidate the data from multiple data sources together to increase the batch size of single write.
|
||||
2. The speed of data generation from single data source is much higher than the speed of single writing thread. The purpose of message queue in this case is to provide buffer so that data is not lost and multiple writing threads can get data from the buffer.
|
||||
3. The data for single table are from multiple data source. In this case the purpose of message queues is to combine the data for single table together to improve the write efficiency.
|
||||
|
||||
If the data source is Kafka, then the appication program is a consumer of Kafka, you can benefit from some kafka features to achieve high performance writing:
|
||||
|
||||
1. Put the data for a table in single partition of single topic so that it's easier to put the data for each table together and write in batch
|
||||
2. Subscribe multiple topics to accumulate data together.
|
||||
3. Add more consumers to gain more concurrency and throughput.
|
||||
4. Incrase the size of single fetch to increase the size of write batch.
|
||||
|
||||
### Tune TDengine
|
||||
|
||||
On the server side, database configuration parameter `vgroups` needs to be set carefully to maximize the system performance. If it's set too low, the system capability can't be utilized fully; if it's set too big, unnecessary resource competition may be produced. A normal recommendation for `vgroups` parameter is 2 times of the number of CPU cores. However, depending on the actual system resources, it may still need to tuned.
|
||||
|
||||
For more configuration parameters, please refer to [Database Configuration](../../../taos-sql/database) and [Server Configuration](../../../reference/config)。
|
||||
|
||||
## Sample Programs
|
||||
|
||||
This section will introduce the sample programs to demonstrate how to write into TDengine with high performance.
|
||||
|
||||
### Scenario
|
||||
|
||||
Below are the scenario for the sample programs of high performance wrting.
|
||||
|
||||
- Application program reads data from data source, the sample program simulates a data source by generating data
|
||||
- The speed of single writing thread is much slower than the speed of generating data, so the program starts multiple writing threads while each thread establish a connection to TDengine and each thread has a message queue of fixed size.
|
||||
- Application program maps the received data to different writing threads based on table name to make sure all the data for each table is always processed by a specific writing thread.
|
||||
- Each writing thread writes the received data into TDengine once the message queue becomes empty or the read data meets a threshold.
|
||||
|
||||

|
||||
|
||||
### Sample Programs
|
||||
|
||||
The sample programs listed in this section are based on the scenario described previously. If your scenarios is different, please try to adjust the code based on the principles described in this chapter.
|
||||
|
||||
The sample programs assume the source data is for all the different sub tables in same super table (meters). The super table has been created before the sample program starts to writing data. Sub tables are created automatically according to received data. If there are multiple super tables in your case, please try to adjust the part of creating table automatically.
|
||||
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
<TabItem label="Java" value="java">
|
||||
|
||||
**Program Inventory**
|
||||
|
||||
| Class | Description |
|
||||
| ---------------- | ----------------------------------------------------------------------------------------------------- |
|
||||
| FastWriteExample | Main Program |
|
||||
| ReadTask | Read data from simulated data source and put into a queue according to the hash value of table name |
|
||||
| WriteTask | Read data from Queue, compose a wirte batch and write into TDengine |
|
||||
| MockDataSource | Generate data for some sub tables of super table meters |
|
||||
| SQLWriter | WriteTask uses this class to compose SQL, create table automatically, check SQL length and write data |
|
||||
| StmtWriter | Write in Parameter binding mode (Not finished yet) |
|
||||
| DataBaseMonitor | Calculate the writing speed and output on console every 10 seconds |
|
||||
|
||||
Below is the list of complete code of the classes in above table and more detailed description.
|
||||
|
||||
<details>
|
||||
<summary>FastWriteExample</summary>
|
||||
The main Program is responsible for:
|
||||
|
||||
1. Create message queues
|
||||
2. Start writing threads
|
||||
3. Start reading threads
|
||||
4. Otuput writing speed every 10 seconds
|
||||
|
||||
The main program provides 4 parameters for tuning:
|
||||
|
||||
1. The number of reading threads, default value is 1
|
||||
2. The number of writing threads, default alue is 2
|
||||
3. The total number of tables in the generated data, default value is 1000. These tables are distributed evenly across all writing threads. If the number of tables is very big, it will cost much time to firstly create these tables.
|
||||
4. The batch size of single write, default value is 3,000
|
||||
|
||||
The capacity of message queue also impacts performance and can be tuned by modifying program. Normally it's always better to have a larger message queue. A larger message queue means lower possibility of being blocked when enqueueing and higher throughput. But a larger message queue consumes more memory space. The default value used in the sample programs is already big enoug.
|
||||
|
||||
```java
|
||||
{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>ReadTask</summary>
|
||||
|
||||
ReadTask reads data from data source. Each ReadTask is associated with a simulated data source, each data source generates data for a group of specific tables, and the data of any table is only generated from a single specific data source.
|
||||
|
||||
ReadTask puts data in message queue in blocking mode. That means, the putting operation is blocked if the message queue is full.
|
||||
|
||||
```java
|
||||
{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>WriteTask</summary>
|
||||
|
||||
```java
|
||||
{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
|
||||
<summary>MockDataSource</summary>
|
||||
|
||||
```java
|
||||
{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
|
||||
<summary>SQLWriter</summary>
|
||||
|
||||
SQLWriter class encapsulates the logic of composing SQL and writing data. Please be noted that the tables have not been created before writing, but are created automatically when catching the exception of table doesn't exist. For other exceptions caught, the SQL which caused the exception are logged for you to debug.
|
||||
|
||||
```java
|
||||
{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
|
||||
<summary>DataBaseMonitor</summary>
|
||||
|
||||
```java
|
||||
{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
**Steps to Launch**
|
||||
|
||||
<details>
|
||||
<summary>Launch Java Sample Program</summary>
|
||||
|
||||
You need to set environment variable `TDENGINE_JDBC_URL` before launching the program. If TDengine Server is setup on localhost, then the default value for user name, password and port can be used, like below:
|
||||
|
||||
```
|
||||
TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
|
||||
```
|
||||
|
||||
**Launch in IDE**
|
||||
|
||||
1. Clone TDengine repolitory
|
||||
```
|
||||
git clone git@github.com:taosdata/TDengine.git --depth 1
|
||||
```
|
||||
2. Use IDE to open `docs/examples/java` directory
|
||||
3. Configure environment variable `TDENGINE_JDBC_URL`, you can also configure it before launching the IDE, if so you can skip this step.
|
||||
4. Run class `com.taos.example.highvolume.FastWriteExample`
|
||||
|
||||
**Launch on server**
|
||||
|
||||
If you want to launch the sample program on a remote server, please follow below steps:
|
||||
|
||||
1. Package the sample programs. Execute below command under directory `TDengine/docs/examples/java` :
|
||||
```
|
||||
mvn package
|
||||
```
|
||||
2. Create `examples/java` directory on the server
|
||||
```
|
||||
mkdir -p examples/java
|
||||
```
|
||||
3. Copy dependencies (below commands assume you are working on a local Windows host and try to launch on a remote Linux host)
|
||||
- Copy dependent packages
|
||||
```
|
||||
scp -r .\target\lib <user>@<host>:~/examples/java
|
||||
```
|
||||
- Copy the jar of sample programs
|
||||
```
|
||||
scp -r .\target\javaexample-1.0.jar <user>@<host>:~/examples/java
|
||||
```
|
||||
4. Configure environment variable
|
||||
Edit `~/.bash_profile` or `~/.bashrc` and add below:
|
||||
|
||||
```
|
||||
export TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
|
||||
```
|
||||
|
||||
If your TDengine server is not deployed on localhost or doesn't use default port, you need to change the above URL to correct value in your environment.
|
||||
|
||||
5. Launch the sample program
|
||||
|
||||
```
|
||||
java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample <read_thread_count> <white_thread_count> <total_table_count> <max_batch_size>
|
||||
```
|
||||
|
||||
6. The sample program doesn't exit unless you press <kbd>CTRL</kbd> + <kbd>C</kbd> to terminate it.
|
||||
Below is the output of running on a server of 16 cores, 64GB memory and SSD hard disk.
|
||||
|
||||
```
|
||||
root@vm85$ java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample 2 12
|
||||
18:56:35.896 [main] INFO c.t.e.highvolume.FastWriteExample - readTaskCount=2, writeTaskCount=12 tableCount=1000 maxBatchSize=3000
|
||||
18:56:36.011 [WriteThread-0] INFO c.taos.example.highvolume.WriteTask - started
|
||||
18:56:36.015 [WriteThread-0] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
|
||||
18:56:36.021 [WriteThread-1] INFO c.taos.example.highvolume.WriteTask - started
|
||||
18:56:36.022 [WriteThread-1] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
|
||||
18:56:36.031 [WriteThread-2] INFO c.taos.example.highvolume.WriteTask - started
|
||||
18:56:36.032 [WriteThread-2] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
|
||||
18:56:36.041 [WriteThread-3] INFO c.taos.example.highvolume.WriteTask - started
|
||||
18:56:36.042 [WriteThread-3] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
|
||||
18:56:36.093 [WriteThread-4] INFO c.taos.example.highvolume.WriteTask - started
|
||||
18:56:36.094 [WriteThread-4] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
|
||||
18:56:36.099 [WriteThread-5] INFO c.taos.example.highvolume.WriteTask - started
|
||||
18:56:36.100 [WriteThread-5] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
|
||||
18:56:36.100 [WriteThread-6] INFO c.taos.example.highvolume.WriteTask - started
|
||||
18:56:36.101 [WriteThread-6] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
|
||||
18:56:36.103 [WriteThread-7] INFO c.taos.example.highvolume.WriteTask - started
|
||||
18:56:36.104 [WriteThread-7] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
|
||||
18:56:36.105 [WriteThread-8] INFO c.taos.example.highvolume.WriteTask - started
|
||||
18:56:36.107 [WriteThread-8] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
|
||||
18:56:36.108 [WriteThread-9] INFO c.taos.example.highvolume.WriteTask - started
|
||||
18:56:36.109 [WriteThread-9] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
|
||||
18:56:36.156 [WriteThread-10] INFO c.taos.example.highvolume.WriteTask - started
|
||||
18:56:36.157 [WriteThread-11] INFO c.taos.example.highvolume.WriteTask - started
|
||||
18:56:36.158 [WriteThread-10] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
|
||||
18:56:36.158 [ReadThread-0] INFO com.taos.example.highvolume.ReadTask - started
|
||||
18:56:36.158 [ReadThread-1] INFO com.taos.example.highvolume.ReadTask - started
|
||||
18:56:36.158 [WriteThread-11] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
|
||||
18:56:46.369 [main] INFO c.t.e.highvolume.FastWriteExample - count=18554448 speed=1855444
|
||||
18:56:56.946 [main] INFO c.t.e.highvolume.FastWriteExample - count=39059660 speed=2050521
|
||||
18:57:07.322 [main] INFO c.t.e.highvolume.FastWriteExample - count=59403604 speed=2034394
|
||||
18:57:18.032 [main] INFO c.t.e.highvolume.FastWriteExample - count=80262938 speed=2085933
|
||||
18:57:28.432 [main] INFO c.t.e.highvolume.FastWriteExample - count=101139906 speed=2087696
|
||||
18:57:38.921 [main] INFO c.t.e.highvolume.FastWriteExample - count=121807202 speed=2066729
|
||||
18:57:49.375 [main] INFO c.t.e.highvolume.FastWriteExample - count=142952417 speed=2114521
|
||||
18:58:00.689 [main] INFO c.t.e.highvolume.FastWriteExample - count=163650306 speed=2069788
|
||||
18:58:11.646 [main] INFO c.t.e.highvolume.FastWriteExample - count=185019808 speed=2136950
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="Python" value="python">
|
||||
|
||||
**Program Inventory**
|
||||
|
||||
Sample programs in Python uses multi-process and cross-process message queues.
|
||||
|
||||
| Function/CLass | Description |
|
||||
| ---------------------------- | --------------------------------------------------------------------------- |
|
||||
| main Function | Program entry point, create child processes and message queues |
|
||||
| run_monitor_process Function | Create database, super table, calculate writing speed and output to console |
|
||||
| run_read_task Function | Read data and distribute to message queues |
|
||||
| MockDataSource Class | Simulate data source, return next 1,000 rows of each table |
|
||||
| run_write_task Function | Read as much as possible data from message queue and write in batch |
|
||||
| SQLWriter Class | Write in SQL and create table utomatically |
|
||||
| StmtWriter Class | Write in parameter binding mode (not finished yet) |
|
||||
|
||||
<details>
|
||||
<summary>main function</summary>
|
||||
|
||||
`main` function is responsible for creating message queues and fork child processes, there are 3 kinds of child processes:
|
||||
|
||||
1. Monitoring process, initializes database and calculating writing speed
|
||||
2. Reading process (n), reads data from data source
|
||||
3. Writing process (m), wirtes data into TDengine
|
||||
|
||||
`main` function provides 5 parameters:
|
||||
|
||||
1. The number of reading tasks, default value is 1
|
||||
2. The number of writing tasks, default value is 1
|
||||
3. The number of tables, default value is 1,000
|
||||
4. The capacity of message queue, default value is 1,000,000 bytes
|
||||
5. The batch size in single write, default value is 3000
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/fast_write_example.py:main}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>run_monitor_process</summary>
|
||||
|
||||
Monitoring process initilizes database and monitoring writing speed.
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/fast_write_example.py:monitor}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
|
||||
<summary>run_read_task function</summary>
|
||||
|
||||
Reading process reads data from other data system and distributes to the message queue allocated for it.
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/fast_write_example.py:read}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
|
||||
<summary>MockDataSource</summary>
|
||||
|
||||
Below is the simulated data source, we assume table name exists in each generated data.
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/mockdatasource.py}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>run_write_task function</summary>
|
||||
|
||||
Writing process tries to read as much as possible data from message queue and writes in batch.
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/fast_write_example.py:write}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
|
||||
SQLWriter class encapsulates the logic of composing SQL and writing data. Please be noted that the tables have not been created before writing, but are created automatically when catching the exception of table doesn't exist. For other exceptions caught, the SQL which caused the exception are logged for you to debug. This class also checks the SQL length, and passes the maximum SQL length by parameter maxSQLLength according to actual TDengine limit.
|
||||
|
||||
<summary>SQLWriter</summary>
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/sql_writer.py}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
**Steps to Launch**
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Launch Sample Program in Python</summary>
|
||||
|
||||
1. Prerequisities
|
||||
|
||||
- TDengine client driver has been installed
|
||||
- Python3 has been installed, the the version >= 3.8
|
||||
- TDengine Python connector `taospy` has been installed
|
||||
|
||||
2. Install faster-fifo to replace python builtin multiprocessing.Queue
|
||||
|
||||
```
|
||||
pip3 install faster-fifo
|
||||
```
|
||||
|
||||
3. Click the "Copy" in the above sample programs to copy `fast_write_example.py` 、 `sql_writer.py` and `mockdatasource.py`.
|
||||
|
||||
4. Execute the program
|
||||
|
||||
```
|
||||
python3 fast_write_example.py <READ_TASK_COUNT> <WRITE_TASK_COUNT> <TABLE_COUNT> <QUEUE_SIZE> <MAX_BATCH_SIZE>
|
||||
```
|
||||
|
||||
Below is the output of running on a server of 16 cores, 64GB memory and SSD hard disk.
|
||||
|
||||
```
|
||||
root@vm85$ python3 fast_write_example.py 8 8
|
||||
2022-07-14 19:13:45,869 [root] - READ_TASK_COUNT=8, WRITE_TASK_COUNT=8, TABLE_COUNT=1000, QUEUE_SIZE=1000000, MAX_BATCH_SIZE=3000
|
||||
2022-07-14 19:13:48,882 [root] - WriteTask-0 started with pid 718347
|
||||
2022-07-14 19:13:48,883 [root] - WriteTask-1 started with pid 718348
|
||||
2022-07-14 19:13:48,884 [root] - WriteTask-2 started with pid 718349
|
||||
2022-07-14 19:13:48,884 [root] - WriteTask-3 started with pid 718350
|
||||
2022-07-14 19:13:48,885 [root] - WriteTask-4 started with pid 718351
|
||||
2022-07-14 19:13:48,885 [root] - WriteTask-5 started with pid 718352
|
||||
2022-07-14 19:13:48,886 [root] - WriteTask-6 started with pid 718353
|
||||
2022-07-14 19:13:48,886 [root] - WriteTask-7 started with pid 718354
|
||||
2022-07-14 19:13:48,887 [root] - ReadTask-0 started with pid 718355
|
||||
2022-07-14 19:13:48,888 [root] - ReadTask-1 started with pid 718356
|
||||
2022-07-14 19:13:48,889 [root] - ReadTask-2 started with pid 718357
|
||||
2022-07-14 19:13:48,889 [root] - ReadTask-3 started with pid 718358
|
||||
2022-07-14 19:13:48,890 [root] - ReadTask-4 started with pid 718359
|
||||
2022-07-14 19:13:48,891 [root] - ReadTask-5 started with pid 718361
|
||||
2022-07-14 19:13:48,892 [root] - ReadTask-6 started with pid 718364
|
||||
2022-07-14 19:13:48,893 [root] - ReadTask-7 started with pid 718365
|
||||
2022-07-14 19:13:56,042 [DataBaseMonitor] - count=6676310 speed=667631.0
|
||||
2022-07-14 19:14:06,196 [DataBaseMonitor] - count=20004310 speed=1332800.0
|
||||
2022-07-14 19:14:16,366 [DataBaseMonitor] - count=32290310 speed=1228600.0
|
||||
2022-07-14 19:14:26,527 [DataBaseMonitor] - count=44438310 speed=1214800.0
|
||||
2022-07-14 19:14:36,673 [DataBaseMonitor] - count=56608310 speed=1217000.0
|
||||
2022-07-14 19:14:46,834 [DataBaseMonitor] - count=68757310 speed=1214900.0
|
||||
2022-07-14 19:14:57,280 [DataBaseMonitor] - count=80992310 speed=1223500.0
|
||||
2022-07-14 19:15:07,689 [DataBaseMonitor] - count=93805310 speed=1281300.0
|
||||
2022-07-14 19:15:18,020 [DataBaseMonitor] - count=106111310 speed=1230600.0
|
||||
2022-07-14 19:15:28,356 [DataBaseMonitor] - count=118394310 speed=1228300.0
|
||||
2022-07-14 19:15:38,690 [DataBaseMonitor] - count=130742310 speed=1234800.0
|
||||
2022-07-14 19:15:49,000 [DataBaseMonitor] - count=143051310 speed=1230900.0
|
||||
2022-07-14 19:15:59,323 [DataBaseMonitor] - count=155276310 speed=1222500.0
|
||||
2022-07-14 19:16:09,649 [DataBaseMonitor] - count=167603310 speed=1232700.0
|
||||
2022-07-14 19:16:19,995 [DataBaseMonitor] - count=179976310 speed=1237300.0
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
:::note
|
||||
Don't establish connection to TDengine in the parent process if using Python connector in multi-process way, otherwise all the connections in child processes are blocked always. This is a known issue.
|
||||
|
||||
:::
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
After Width: | Height: | Size: 7.1 KiB |
|
@ -16,7 +16,7 @@ import CDemo from "./_sub_c.mdx";
|
|||
|
||||
TDengine provides data subscription and consumption interfaces similar to message queue products. These interfaces make it easier for applications to obtain data written to TDengine either in real time and to process data in the order that events occurred. This simplifies your time-series data processing systems and reduces your costs because it is no longer necessary to deploy a message queue product such as Kafka.
|
||||
|
||||
To use TDengine data subscription, you define topics like in Kafka. However, a topic in TDengine is based on query conditions for an existing supertable, standard table, or subtable - in other words, a SELECT statement. You can use SQL to filter data by tag, table name, column, or expression and then perform a scalar function or user-defined function on the data. Aggregate functions are not supported. This gives TDengine data subscription more flexibility than similar products. The granularity of data can be controlled on demand by applications, while filtering and preprocessing are handled by TDengine instead of the application layer. This implementation reduces the amount of data transmitted and the complexity of applications.
|
||||
To use TDengine data subscription, you define topics like in Kafka. However, a topic in TDengine is based on query conditions for an existing supertable, table, or subtable - in other words, a SELECT statement. You can use SQL to filter data by tag, table name, column, or expression and then perform a scalar function or user-defined function on the data. Aggregate functions are not supported. This gives TDengine data subscription more flexibility than similar products. The granularity of data can be controlled on demand by applications, while filtering and preprocessing are handled by TDengine instead of the application layer. This implementation reduces the amount of data transmitted and the complexity of applications.
|
||||
|
||||
By subscribing to a topic, a consumer can obtain the latest data in that topic in real time. Multiple consumers can be formed into a consumer group that consumes messages together. Consumer groups enable faster speed through multi-threaded, distributed data consumption. Note that consumers in different groups that are subscribed to the same topic do not consume messages together. A single consumer can subscribe to multiple topics. If the data in a supertable is sharded across multiple vnodes, consumer groups can consume it much more efficiently than single consumers. TDengine also includes an acknowledgement mechanism that ensures at-least-once delivery in complicated environments where machines may crash or restart.
|
||||
|
||||
|
|
|
@ -20,11 +20,11 @@ In theory, larger cache sizes are always better. However, at a certain point, it
|
|||
|
||||
## Read Cache
|
||||
|
||||
When you create a database, you can configure whether the latest data from every subtable is cached. To do so, set the *cachelast* parameter as follows:
|
||||
- 0: Caching is disabled.
|
||||
- 1: The latest row of data in each subtable is cached. This option significantly improves the performance of the `LAST_ROW` function
|
||||
- 2: The latest non-null value in each column of each subtable is cached. This option significantly improves the performance of the `LAST` function in normal situations, such as WHERE, ORDER BY, GROUP BY, and INTERVAL statements.
|
||||
- 3: Rows and columns are both cached. This option is equivalent to simultaneously enabling options 1 and 2.
|
||||
When you create a database, you can configure whether the latest data from every subtable is cached. To do so, set the *cachemodel* parameter as follows:
|
||||
- none: Caching is disabled.
|
||||
- last_row: The latest row of data in each subtable is cached. This option significantly improves the performance of the `LAST_ROW` function
|
||||
- last_value: The latest non-null value in each column of each subtable is cached. This option significantly improves the performance of the `LAST` function in normal situations, such as WHERE, ORDER BY, GROUP BY, and INTERVAL statements.
|
||||
- both: Rows and columns are both cached. This option is equivalent to simultaneously enabling option last_row and last_value.
|
||||
|
||||
## Metadata Cache
|
||||
|
||||
|
|
|
@ -39,18 +39,18 @@ To get the hostname on any host, the command `hostname -f` can be executed.
|
|||
|
||||
On the physical machine running the application, ping the dnode that is running taosd. If the dnode is not accessible, the application cannot connect to taosd. In this case, verify the DNS and hosts settings on the physical node running the application.
|
||||
|
||||
The end point of each dnode is the output hostname and port, such as h1.taosdata.com:6030.
|
||||
The end point of each dnode is the output hostname and port, such as h1.tdengine.com:6030.
|
||||
|
||||
### Step 5
|
||||
|
||||
Modify the TDengine configuration file `/etc/taos/taos.cfg` on each node. Assuming the first dnode of TDengine cluster is "h1.taosdata.com:6030", its `taos.cfg` is configured as following.
|
||||
Modify the TDengine configuration file `/etc/taos/taos.cfg` on each node. Assuming the first dnode of TDengine cluster is "h1.tdengine.com:6030", its `taos.cfg` is configured as following.
|
||||
|
||||
```c
|
||||
// firstEp is the end point to connect to when any dnode starts
|
||||
firstEp h1.taosdata.com:6030
|
||||
firstEp h1.tdengine.com:6030
|
||||
|
||||
// must be configured to the FQDN of the host where the dnode is launched
|
||||
fqdn h1.taosdata.com
|
||||
fqdn h1.tdengine.com
|
||||
|
||||
// the port used by the dnode, default is 6030
|
||||
serverPort 6030
|
||||
|
@ -76,13 +76,13 @@ The first dnode can be started following the instructions in [Get Started](/get-
|
|||
taos> show dnodes;
|
||||
id | endpoint | vnodes | support_vnodes | status | create_time | note |
|
||||
============================================================================================================================================
|
||||
1 | h1.taosdata.com:6030 | 0 | 1024 | ready | 2022-07-16 10:50:42.673 | |
|
||||
1 | h1.tdengine.com:6030 | 0 | 1024 | ready | 2022-07-16 10:50:42.673 | |
|
||||
Query OK, 1 rows affected (0.007984s)
|
||||
|
||||
|
||||
```
|
||||
|
||||
From the above output, it is shown that the end point of the started dnode is "h1.taosdata.com:6030", which is the `firstEp` of the cluster.
|
||||
From the above output, it is shown that the end point of the started dnode is "h1.tdengine.com:6030", which is the `firstEp` of the cluster.
|
||||
|
||||
## Add DNODE
|
||||
|
||||
|
@ -90,7 +90,7 @@ There are a few steps necessary to add other dnodes in the cluster.
|
|||
|
||||
Second, we can start `taosd` as instructed in [Get Started](/get-started/).
|
||||
|
||||
Then, on the first dnode i.e. h1.taosdata.com in our example, use TDengine CLI `taos` to execute the following command:
|
||||
Then, on the first dnode i.e. h1.tdengine.com in our example, use TDengine CLI `taos` to execute the following command:
|
||||
|
||||
```sql
|
||||
CREATE DNODE "h2.taos.com:6030";
|
||||
|
@ -98,7 +98,7 @@ CREATE DNODE "h2.taos.com:6030";
|
|||
|
||||
This adds the end point of the new dnode (from Step 4) into the end point list of the cluster. In the command "fqdn:port" should be quoted using double quotes. Change `"h2.taos.com:6030"` to the end point of your new dnode.
|
||||
|
||||
Then on the first dnode h1.taosdata.com, execute `show dnodes` in `taos`
|
||||
Then on the first dnode h1.tdengine.com, execute `show dnodes` in `taos`
|
||||
|
||||
```sql
|
||||
SHOW DNODES;
|
||||
|
@ -114,7 +114,9 @@ The above process can be repeated to add more dnodes in the cluster.
|
|||
|
||||
Any node that is in the cluster and online can be the firstEp of new nodes.
|
||||
Nodes use the firstEp parameter only when joining a cluster for the first time. After a node has joined the cluster, it stores the latest mnode in its end point list and no longer makes use of firstEp.
|
||||
However, firstEp is used by clients that connect to the cluster. For example, if you run `taos shell` without arguments, it connects to the firstEp by default.
|
||||
|
||||
However, firstEp is used by clients that connect to the cluster. For example, if you run TDengine CLI `taos` without arguments, it connects to the firstEp by default.
|
||||
|
||||
Two dnodes that are launched without a firstEp value operate independently of each other. It is not possible to add one dnode to the other dnode and form a cluster. It is also not possible to form two independent clusters into a new cluster.
|
||||
|
||||
:::
|
||||
|
|
|
@ -9,6 +9,7 @@ TDengine is a cloud-native time-series database that can be deployed on Kubernet
|
|||
|
||||
Before deploying TDengine on Kubernetes, perform the following:
|
||||
|
||||
* Current steps are compatible with Kubernetes v1.5 and later version.
|
||||
* Install and configure minikube, kubectl, and helm.
|
||||
* Install and deploy Kubernetes and ensure that it can be accessed and used normally. Update any container registries or other services as necessary.
|
||||
|
||||
|
@ -100,7 +101,7 @@ spec:
|
|||
# Must set if you want a cluster.
|
||||
- name: TAOS_FIRST_EP
|
||||
value: "$(STS_NAME)-0.$(SERVICE_NAME).$(STS_NAMESPACE).svc.cluster.local:$(TAOS_SERVER_PORT)"
|
||||
# TAOS_FQND should always be setted in k8s env.
|
||||
# TAOS_FQDN should always be set in k8s env.
|
||||
- name: TAOS_FQDN
|
||||
value: "$(POD_NAME).$(SERVICE_NAME).$(STS_NAMESPACE).svc.cluster.local"
|
||||
volumeMounts:
|
||||
|
|
|
@ -152,7 +152,7 @@ clusterDomainSuffix: ""
|
|||
# converting an upper-snake-cased variable like `TAOS_DEBUG_FLAG`,
|
||||
# to a camelCase taos config variable `debugFlag`.
|
||||
#
|
||||
# See the variable list at https://www.taosdata.com/cn/documentation/administrator .
|
||||
# See the [Configuration Variables](../../reference/config)
|
||||
#
|
||||
# Note:
|
||||
# 1. firstEp/secondEp: should not be setted here, it's auto generated at scale-up.
|
||||
|
@ -170,71 +170,21 @@ taoscfg:
|
|||
# number of replications, for cluster only
|
||||
TAOS_REPLICA: "1"
|
||||
|
||||
|
||||
# number of days per DB file
|
||||
# TAOS_DAYS: "10"
|
||||
|
||||
# number of days to keep DB file, default is 10 years.
|
||||
#TAOS_KEEP: "3650"
|
||||
|
||||
# cache block size (Mbyte)
|
||||
#TAOS_CACHE: "16"
|
||||
|
||||
# number of cache blocks per vnode
|
||||
#TAOS_BLOCKS: "6"
|
||||
|
||||
# minimum rows of records in file block
|
||||
#TAOS_MIN_ROWS: "100"
|
||||
|
||||
# maximum rows of records in file block
|
||||
#TAOS_MAX_ROWS: "4096"
|
||||
|
||||
#
|
||||
# TAOS_NUM_OF_THREADS_PER_CORE: number of threads per CPU core
|
||||
#TAOS_NUM_OF_THREADS_PER_CORE: "1.0"
|
||||
# TAOS_NUM_OF_RPC_THREADS: number of threads for RPC
|
||||
#TAOS_NUM_OF_RPC_THREADS: "2"
|
||||
|
||||
|
||||
#
|
||||
# TAOS_NUM_OF_COMMIT_THREADS: number of threads to commit cache data
|
||||
#TAOS_NUM_OF_COMMIT_THREADS: "4"
|
||||
|
||||
#
|
||||
# TAOS_RATIO_OF_QUERY_CORES:
|
||||
# the proportion of total CPU cores available for query processing
|
||||
# 2.0: the query threads will be set to double of the CPU cores.
|
||||
# 1.0: all CPU cores are available for query processing [default].
|
||||
# 0.5: only half of the CPU cores are available for query.
|
||||
# 0.0: only one core available.
|
||||
#TAOS_RATIO_OF_QUERY_CORES: "1.0"
|
||||
|
||||
#
|
||||
# TAOS_KEEP_COLUMN_NAME:
|
||||
# the last_row/first/last aggregator will not change the original column name in the result fields
|
||||
#TAOS_KEEP_COLUMN_NAME: "0"
|
||||
|
||||
# enable/disable backuping vnode directory when removing vnode
|
||||
#TAOS_VNODE_BAK: "1"
|
||||
|
||||
# enable/disable installation / usage report
|
||||
#TAOS_TELEMETRY_REPORTING: "1"
|
||||
|
||||
# enable/disable load balancing
|
||||
#TAOS_BALANCE: "1"
|
||||
|
||||
# max timer control blocks
|
||||
#TAOS_MAX_TMR_CTRL: "512"
|
||||
|
||||
# time interval of system monitor, seconds
|
||||
#TAOS_MONITOR_INTERVAL: "30"
|
||||
|
||||
# number of seconds allowed for a dnode to be offline, for cluster only
|
||||
#TAOS_OFFLINE_THRESHOLD: "8640000"
|
||||
|
||||
# RPC re-try timer, millisecond
|
||||
#TAOS_RPC_TIMER: "1000"
|
||||
|
||||
# RPC maximum time for ack, seconds.
|
||||
#TAOS_RPC_MAX_TIME: "600"
|
||||
|
||||
# time interval of dnode status reporting to mnode, seconds, for cluster only
|
||||
#TAOS_STATUS_INTERVAL: "1"
|
||||
|
||||
|
@ -245,37 +195,7 @@ taoscfg:
|
|||
#TAOS_MIN_SLIDING_TIME: "10"
|
||||
|
||||
# minimum time window, milli-second
|
||||
#TAOS_MIN_INTERVAL_TIME: "10"
|
||||
|
||||
# maximum delay before launching a stream computation, milli-second
|
||||
#TAOS_MAX_STREAM_COMP_DELAY: "20000"
|
||||
|
||||
# maximum delay before launching a stream computation for the first time, milli-second
|
||||
#TAOS_MAX_FIRST_STREAM_COMP_DELAY: "10000"
|
||||
|
||||
# retry delay when a stream computation fails, milli-second
|
||||
#TAOS_RETRY_STREAM_COMP_DELAY: "10"
|
||||
|
||||
# the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9
|
||||
#TAOS_STREAM_COMP_DELAY_RATIO: "0.1"
|
||||
|
||||
# max number of vgroups per db, 0 means configured automatically
|
||||
#TAOS_MAX_VGROUPS_PER_DB: "0"
|
||||
|
||||
# max number of tables per vnode
|
||||
#TAOS_MAX_TABLES_PER_VNODE: "1000000"
|
||||
|
||||
# the number of acknowledgments required for successful data writing
|
||||
#TAOS_QUORUM: "1"
|
||||
|
||||
# enable/disable compression
|
||||
#TAOS_COMP: "2"
|
||||
|
||||
# write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync
|
||||
#TAOS_WAL_LEVEL: "1"
|
||||
|
||||
# if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away
|
||||
#TAOS_FSYNC: "3000"
|
||||
#TAOS_MIN_INTERVAL_TIME: "1"
|
||||
|
||||
# the compressed rpc message, option:
|
||||
# -1 (no compression)
|
||||
|
@ -283,17 +203,8 @@ taoscfg:
|
|||
# > 0 (rpc message body which larger than this value will be compressed)
|
||||
#TAOS_COMPRESS_MSG_SIZE: "-1"
|
||||
|
||||
# max length of an SQL
|
||||
#TAOS_MAX_SQL_LENGTH: "1048576"
|
||||
|
||||
# the maximum number of records allowed for super table time sorting
|
||||
#TAOS_MAX_NUM_OF_ORDERED_RES: "100000"
|
||||
|
||||
# max number of connections allowed in dnode
|
||||
#TAOS_MAX_SHELL_CONNS: "5000"
|
||||
|
||||
# max number of connections allowed in client
|
||||
#TAOS_MAX_CONNECTIONS: "5000"
|
||||
#TAOS_MAX_SHELL_CONNS: "50000"
|
||||
|
||||
# stop writing logs when the disk size of the log folder is less than this value
|
||||
#TAOS_MINIMAL_LOG_DIR_G_B: "0.1"
|
||||
|
@ -313,21 +224,8 @@ taoscfg:
|
|||
# enable/disable system monitor
|
||||
#TAOS_MONITOR: "1"
|
||||
|
||||
# enable/disable recording the SQL statements via restful interface
|
||||
#TAOS_HTTP_ENABLE_RECORD_SQL: "0"
|
||||
|
||||
# number of threads used to process http requests
|
||||
#TAOS_HTTP_MAX_THREADS: "2"
|
||||
|
||||
# maximum number of rows returned by the restful interface
|
||||
#TAOS_RESTFUL_ROW_LIMIT: "10240"
|
||||
|
||||
# The following parameter is used to limit the maximum number of lines in log files.
|
||||
# max number of lines per log filters
|
||||
# numOfLogLines 10000000
|
||||
|
||||
# enable/disable async log
|
||||
#TAOS_ASYNC_LOG: "0"
|
||||
#TAOS_ASYNC_LOG: "1"
|
||||
|
||||
#
|
||||
# time of keeping log files, days
|
||||
|
@ -344,25 +242,8 @@ taoscfg:
|
|||
# debug flag for all log type, take effect when non-zero value\
|
||||
#TAOS_DEBUG_FLAG: "143"
|
||||
|
||||
# enable/disable recording the SQL in taos client
|
||||
#TAOS_ENABLE_RECORD_SQL: "0"
|
||||
|
||||
# generate core file when service crash
|
||||
#TAOS_ENABLE_CORE_FILE: "1"
|
||||
|
||||
# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
|
||||
#TAOS_MAX_BINARY_DISPLAY_WIDTH: "30"
|
||||
|
||||
# enable/disable stream (continuous query)
|
||||
#TAOS_STREAM: "1"
|
||||
|
||||
# in retrieve blocking model, only in 50% query threads will be used in query processing in dnode
|
||||
#TAOS_RETRIEVE_BLOCKING_MODEL: "0"
|
||||
|
||||
# the maximum allowed query buffer size in MB during query processing for each data node
|
||||
# -1 no limit (default)
|
||||
# 0 no query allowed, queries are disabled
|
||||
#TAOS_QUERY_BUFFER_SIZE: "-1"
|
||||
```
|
||||
|
||||
## Scaling Out
|
||||
|
|
|
@ -11,7 +11,7 @@ When using TDengine to store and query data, the most important part of the data
|
|||
- The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128`
|
||||
- Internal function `now` can be used to get the current timestamp on the client side
|
||||
- The current timestamp of the client side is applied when `now` is used to insert data
|
||||
- Epoch Time:timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from 1970-01-01 00:00:00.000 (UTC/GMT)
|
||||
- Epoch Time:timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from UTC 1970-01-01 00:00:00.
|
||||
- Add/subtract operations can be carried out on timestamps. For example `now-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `select * from t1 where ts > now-2w and ts <= now-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations.
|
||||
|
||||
Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`. The default time precision is millisecond. In the statement below, the precision is set to nanonseconds.
|
||||
|
|
|
@ -71,9 +71,9 @@ database_option: {
|
|||
- SINGLE_STABLE: specifies whether the database can contain more than one supertable.
|
||||
- 0: The database can contain multiple supertables.
|
||||
- 1: The database can contain only one supertable.
|
||||
- WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. Enter a time in seconds. The default value is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted.
|
||||
- WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted.
|
||||
- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk.
|
||||
- WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. Enter a time in seconds. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is 4 days.
|
||||
- WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. Enter a size in KB. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is -1.
|
||||
- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value of single copy is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk. The default values of multiple copy is 1 day.
|
||||
- WAL_SEGMENT_SIZE: specifies the maximum size of a WAL file. After the current WAL file reaches this size, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk.
|
||||
|
||||
### Example Statement
|
||||
|
|
|
@ -57,7 +57,7 @@ table_option: {
|
|||
3. MAX_DELAY: specifies the maximum latency for pushing computation results. The default value is 15 minutes or the value of the INTERVAL parameter, whichever is smaller. Enter a value between 0 and 15 minutes in milliseconds, seconds, or minutes. You can enter multiple values separated by commas (,). Note: Retain the default value if possible. Configuring a small MAX_DELAY may cause results to be frequently pushed, affecting storage and query performance. This parameter applies only to supertables and takes effect only when the RETENTIONS parameter has been specified for the database.
|
||||
4. ROLLUP: specifies aggregate functions to roll up. Rolling up a function provides downsampled results based on multiple axes. This parameter applies only to supertables and takes effect only when the RETENTIONS parameter has been specified for the database. You can specify only one function to roll up. The rollup takes effect on all columns except TS. Enter one of the following values: avg, sum, min, max, last, or first.
|
||||
5. SMA: specifies functions on which to enable small materialized aggregates (SMA). SMA is user-defined precomputation of aggregates based on data blocks. Enter one of the following values: max, min, or sum This parameter can be used with supertables and standard tables.
|
||||
6. TTL: specifies the time to live (TTL) for the table. If the period specified by the TTL parameter elapses without any data being written to the table, TDengine will automatically delete the table. Note: The system may not delete the table at the exact moment that the TTL expires. Enter a value in days. The default value is 0. Note: The TTL parameter has a higher priority than the KEEP parameter. If a table is marked for deletion because the TTL has expired, it will be deleted even if the time specified by the KEEP parameter has not elapsed. This parameter can be used with standard tables and subtables.
|
||||
6. TTL: specifies the time to live (TTL) for the table. If TTL is specified when creatinga table, after the time period for which the table has been existing is over TTL, TDengine will automatically delete the table. Please be noted that the system may not delete the table at the exact moment that the TTL expires but guarantee there is such a system and finally the table will be deleted. The unit of TTL is in days. The default value is 0, i.e. never expire.
|
||||
|
||||
## Create Subtables
|
||||
|
||||
|
|
|
@ -52,11 +52,6 @@ window_clause: {
|
|||
| STATE_WINDOW(col)
|
||||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
||||
|
||||
changes_option: {
|
||||
DURATION duration_val
|
||||
| ROWS rows_val
|
||||
}
|
||||
|
||||
group_by_clause:
|
||||
GROUP BY expr [, expr] ... HAVING condition
|
||||
|
||||
|
@ -126,7 +121,6 @@ SELECT DISTINCT col_name [, col_name ...] FROM tb_name;
|
|||
|
||||
1. Configuration parameter `maxNumOfDistinctRes` in `taos.cfg` is used to control the number of rows to output. The minimum configurable value is 100,000, the maximum configurable value is 100,000,000, the default value is 1,000,000. If the actual number of rows exceeds the value of this parameter, only the number of rows specified by this parameter will be output.
|
||||
2. It can't be guaranteed that the results selected by using `DISTINCT` on columns of `FLOAT` or `DOUBLE` are exactly unique because of the precision errors in floating point numbers.
|
||||
3. `DISTINCT` can't be used in the sub-query of a nested query statement, and can't be used together with aggregate functions, `GROUP BY` or `JOIN` in the same SQL statement.
|
||||
|
||||
:::
|
||||
|
||||
|
|
|
@ -613,6 +613,7 @@ SELECT APERCENTILE(field_name, P[, algo_type]) FROM { tb_name | stb_name } [WHER
|
|||
**Explanations**:
|
||||
- _P_ is in range [0,100], when _P_ is 0, the result is same as using function MIN; when _P_ is 100, the result is same as function MAX.
|
||||
- `algo_type` can only be input as `default` or `t-digest` Enter `default` to use a histogram-based algorithm. Enter `t-digest` to use the t-digest algorithm to calculate the approximation of the quantile. `default` is used by default.
|
||||
- The approximation result of `t-digest` algorithm is sensitive to input data order. For example, when querying STable with different input data order there might be minor differences in calculated results.
|
||||
|
||||
### AVG
|
||||
|
||||
|
@ -916,7 +917,7 @@ SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
**Return value type**:Same as the data type of the column being operated upon
|
||||
|
||||
**Applicable data types**: Numeric
|
||||
**Applicable data types**: Numeric, Timestamp
|
||||
|
||||
**Applicable table types**: standard tables and supertables
|
||||
|
||||
|
@ -931,7 +932,7 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
|
|||
|
||||
**Return value type**:Same as the data type of the column being operated upon
|
||||
|
||||
**Applicable data types**: Numeric
|
||||
**Applicable data types**: Numeric, Timestamp
|
||||
|
||||
**Applicable table types**: standard tables and supertables
|
||||
|
||||
|
@ -1139,7 +1140,7 @@ SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clau
|
|||
|
||||
**Applicable parameter values**:
|
||||
|
||||
- oper : Can be one of `LT` (lower than), `GT` (greater than), `LE` (lower than or equal to), `GE` (greater than or equal to), `NE` (not equal to), `EQ` (equal to), the value is case insensitive
|
||||
- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes.
|
||||
- val : Numeric types
|
||||
|
||||
**Return value type**: Integer
|
||||
|
@ -1166,7 +1167,7 @@ SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [W
|
|||
|
||||
**Applicable parameter values**:
|
||||
|
||||
- oper : Can be one of `LT` (lower than), `GT` (greater than), `LE` (lower than or equal to), `GE` (greater than or equal to), `NE` (not equal to), `EQ` (equal to), the value is case insensitive
|
||||
- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes.
|
||||
- val : Numeric types
|
||||
- unit: The unit of time interval. Enter one of the following options: 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) If you do not enter a unit of time, the precision of the current database is used by default.
|
||||
|
||||
|
|
|
@ -44,13 +44,13 @@ For example, the following SQL statement creates a stream and automatically crea
|
|||
|
||||
```sql
|
||||
CREATE STREAM avg_vol_s INTO avg_vol AS
|
||||
SELECT _wstartts, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
|
||||
SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
|
||||
```
|
||||
|
||||
## Delete a Stream
|
||||
|
||||
```sql
|
||||
DROP STREAM [IF NOT EXISTS] stream_name
|
||||
DROP STREAM [IF EXISTS] stream_name
|
||||
```
|
||||
|
||||
This statement deletes the stream processing service only. The data generated by the stream is retained.
|
||||
|
|
|
@ -30,7 +30,7 @@ The following characters cannot occur in a password: single quotation marks ('),
|
|||
- Maximum number of columns is 4096. There must be at least 2 columns, and the first column must be timestamp.
|
||||
- The maximum length of a tag name is 64 bytes
|
||||
- Maximum number of tags is 128. There must be at least 1 tag. The total length of tag values cannot exceed 16 KB.
|
||||
- Maximum length of single SQL statement is 1 MB (1048576 bytes). It can be configured in the parameter `maxSQLLength` in the client side, the applicable range is [65480, 1048576].
|
||||
- Maximum length of single SQL statement is 1 MB (1048576 bytes).
|
||||
- At most 4096 columns can be returned by `SELECT`. Functions in the query statement constitute columns. An error is returned if the limit is exceeded.
|
||||
- Maximum numbers of databases, STables, tables are dependent only on the system resources.
|
||||
- The number of replicas can only be 1 or 3.
|
||||
|
|
|
@ -245,3 +245,35 @@ Provides dnode configuration information.
|
|||
| 1 | dnode_id | INT | Dnode ID |
|
||||
| 2 | name | BINARY(32) | Parameter |
|
||||
| 3 | value | BINARY(64) | Value |
|
||||
|
||||
## INS_TOPICS
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :---------: | ------------ | ------------------------------ |
|
||||
| 1 | topic_name | BINARY(192) | Topic name |
|
||||
| 2 | db_name | BINARY(64) | Database for the topic |
|
||||
| 3 | create_time | TIMESTAMP | Creation time |
|
||||
| 4 | sql | BINARY(1024) | SQL statement used to create the topic |
|
||||
|
||||
## INS_SUBSCRIPTIONS
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :------------: | ------------ | ------------------------ |
|
||||
| 1 | topic_name | BINARY(204) | Subscribed topic |
|
||||
| 2 | consumer_group | BINARY(193) | Subscribed consumer group |
|
||||
| 3 | vgroup_id | INT | Vgroup ID for the consumer |
|
||||
| 4 | consumer_id | BIGINT | Consumer ID |
|
||||
|
||||
## INS_STREAMS
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :----------: | ------------ | --------------------------------------- |
|
||||
| 1 | stream_name | BINARY(64) | Stream name |
|
||||
| 2 | create_time | TIMESTAMP | Creation time |
|
||||
| 3 | sql | BINARY(1024) | SQL statement used to create the stream |
|
||||
| 4 | status | BIANRY(20) | Current status |
|
||||
| 5 | source_db | BINARY(64) | Source database |
|
||||
| 6 | target_db | BIANRY(64) | Target database |
|
||||
| 7 | target_table | BINARY(192) | Target table |
|
||||
| 8 | watermark | BIGINT | Watermark (see stream processing documentation) |
|
||||
| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation) |
|
||||
|
|
|
@ -61,15 +61,6 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE
|
|||
| 12 | sub_status | BINARY(1000) | Subquery status |
|
||||
| 13 | sql | BINARY(1024) | SQL statement |
|
||||
|
||||
## PERF_TOPICS
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :---------: | ------------ | ------------------------------ |
|
||||
| 1 | topic_name | BINARY(192) | Topic name |
|
||||
| 2 | db_name | BINARY(64) | Database for the topic |
|
||||
| 3 | create_time | TIMESTAMP | Creation time |
|
||||
| 4 | sql | BINARY(1024) | SQL statement used to create the topic |
|
||||
|
||||
## PERF_CONSUMERS
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
|
@ -83,15 +74,6 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE
|
|||
| 7 | subscribe_time | TIMESTAMP | Time of first subscription |
|
||||
| 8 | rebalance_time | TIMESTAMP | Time of first rebalance triggering |
|
||||
|
||||
## PERF_SUBSCRIPTIONS
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :------------: | ------------ | ------------------------ |
|
||||
| 1 | topic_name | BINARY(204) | Subscribed topic |
|
||||
| 2 | consumer_group | BINARY(193) | Subscribed consumer group |
|
||||
| 3 | vgroup_id | INT | Vgroup ID for the consumer |
|
||||
| 4 | consumer_id | BIGINT | Consumer ID |
|
||||
|
||||
## PERF_TRANS
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
|
@ -113,17 +95,3 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE
|
|||
| 2 | create_time | TIMESTAMP | Creation time |
|
||||
| 3 | stable_name | BINARY(192) | Supertable name |
|
||||
| 4 | vgroup_id | INT | Dedicated vgroup name |
|
||||
|
||||
## PERF_STREAMS
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :----------: | ------------ | --------------------------------------- |
|
||||
| 1 | stream_name | BINARY(64) | Stream name |
|
||||
| 2 | create_time | TIMESTAMP | Creation time |
|
||||
| 3 | sql | BINARY(1024) | SQL statement used to create the stream |
|
||||
| 4 | status | BIANRY(20) | Current status |
|
||||
| 5 | source_db | BINARY(64) | Source database |
|
||||
| 6 | target_db | BIANRY(64) | Target database |
|
||||
| 7 | target_table | BINARY(192) | Target table |
|
||||
| 8 | watermark | BIGINT | Watermark (see stream processing documentation) |
|
||||
| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation) |
|
||||
|
|
|
@ -3,17 +3,7 @@ sidebar_label: SHOW Statement
|
|||
title: SHOW Statement for Metadata
|
||||
---
|
||||
|
||||
In addition to running SELECT statements on INFORMATION_SCHEMA, you can also use SHOW to obtain system metadata, information, and status.
|
||||
|
||||
## SHOW ACCOUNTS
|
||||
|
||||
```sql
|
||||
SHOW ACCOUNTS;
|
||||
```
|
||||
|
||||
Shows information about tenants on the system.
|
||||
|
||||
Note: TDengine Enterprise Edition only.
|
||||
`SHOW` command can be used to get brief system information. To get details about metatadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`.
|
||||
|
||||
## SHOW APPS
|
||||
|
||||
|
@ -194,7 +184,7 @@ Shows information about streams in the system.
|
|||
SHOW SUBSCRIPTIONS;
|
||||
```
|
||||
|
||||
Shows all subscriptions in the current database.
|
||||
Shows all subscriptions in the system.
|
||||
|
||||
## SHOW TABLES
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: Permissions Management
|
||||
title: Permissions Management
|
||||
sidebar_label: Access Control
|
||||
title: User and Access Control
|
||||
description: Manage user and user's permission
|
||||
---
|
||||
|
||||
This document describes how to manage permissions in TDengine.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
title: TDengine SQL
|
||||
description: "The syntax supported by TDengine SQL "
|
||||
description: 'The syntax supported by TDengine SQL '
|
||||
---
|
||||
|
||||
This section explains the syntax of SQL to perform operations on databases, tables and STables, insert data, select data and use functions. We also provide some tips that can be used in TDengine SQL. If you have previous experience with SQL this section will be fairly easy to understand. If you do not have previous experience with SQL, you'll come to appreciate the simplicity and power of SQL. TDengine SQL has been enhanced in version 3.0, and the query engine has been rearchitected. For information about how TDengine SQL has changed, see [Changes in TDengine 3.0](../taos-sql/changes).
|
||||
|
@ -15,7 +15,7 @@ Syntax Specifications used in this chapter:
|
|||
- | means one of a few options, excluding | itself.
|
||||
- … means the item prior to it can be repeated multiple times.
|
||||
|
||||
To better demonstrate the syntax, usage and rules of TAOS SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below:
|
||||
To better demonstrate the syntax, usage and rules of TDengine SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below:
|
||||
|
||||
```
|
||||
taos> DESCRIBE meters;
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
---
|
||||
title: Install & Uninstall
|
||||
title: Install and Uninstall
|
||||
description: Install, Uninstall, Start, Stop and Upgrade
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
|
||||
TDengine community version provides deb and rpm packages for users to choose from, based on their system environment. The deb package supports Debian, Ubuntu and derivative systems. The rpm package supports CentOS, RHEL, SUSE and derivative systems. Furthermore, a tar.gz package is provided for TDengine Enterprise customers.
|
||||
This document gives more information about installing, uninstalling, and upgrading TDengine.
|
||||
|
||||
## Install
|
||||
|
||||
|
@ -15,11 +15,48 @@ About details of installing TDenine, please refer to [Installation Guide](../../
|
|||
## Uninstall
|
||||
|
||||
<Tabs>
|
||||
<TabItem label="Uninstall apt-get" value="aptremove">
|
||||
|
||||
Apt-get package of TDengine can be uninstalled as below:
|
||||
|
||||
```bash
|
||||
$ sudo apt-get remove tdengine
|
||||
Reading package lists... Done
|
||||
Building dependency tree
|
||||
Reading state information... Done
|
||||
The following packages will be REMOVED:
|
||||
tdengine
|
||||
0 upgraded, 0 newly installed, 1 to remove and 18 not upgraded.
|
||||
After this operation, 68.3 MB disk space will be freed.
|
||||
Do you want to continue? [Y/n] y
|
||||
(Reading database ... 135625 files and directories currently installed.)
|
||||
Removing tdengine (3.0.0.0) ...
|
||||
TDengine is removed successfully!
|
||||
|
||||
```
|
||||
|
||||
Apt-get package of taosTools can be uninstalled as below:
|
||||
|
||||
```
|
||||
$ sudo apt remove taostools
|
||||
Reading package lists... Done
|
||||
Building dependency tree
|
||||
Reading state information... Done
|
||||
The following packages will be REMOVED:
|
||||
taostools
|
||||
0 upgraded, 0 newly installed, 1 to remove and 0 not upgraded.
|
||||
After this operation, 68.3 MB disk space will be freed.
|
||||
Do you want to continue? [Y/n]
|
||||
(Reading database ... 147973 files and directories currently installed.)
|
||||
Removing taostools (2.1.2) ...
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="Uninstall Deb" value="debuninst">
|
||||
|
||||
Deb package of TDengine can be uninstalled as below:
|
||||
|
||||
```bash
|
||||
```
|
||||
$ sudo dpkg -r tdengine
|
||||
(Reading database ... 137504 files and directories currently installed.)
|
||||
Removing tdengine (3.0.0.0) ...
|
||||
|
@ -27,6 +64,14 @@ TDengine is removed successfully!
|
|||
|
||||
```
|
||||
|
||||
Deb package of taosTools can be uninstalled as below:
|
||||
|
||||
```
|
||||
$ sudo dpkg -r taostools
|
||||
(Reading database ... 147973 files and directories currently installed.)
|
||||
Removing taostools (2.1.2) ...
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Uninstall RPM" value="rpmuninst">
|
||||
|
@ -38,6 +83,13 @@ $ sudo rpm -e tdengine
|
|||
TDengine is removed successfully!
|
||||
```
|
||||
|
||||
RPM package of taosTools can be uninstalled as below:
|
||||
|
||||
```
|
||||
sudo rpm -e taostools
|
||||
taosToole is removed successfully!
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Uninstall tar.gz" value="taruninst">
|
||||
|
@ -46,115 +98,69 @@ tar.gz package of TDengine can be uninstalled as below:
|
|||
|
||||
```
|
||||
$ rmtaos
|
||||
Nginx for TDengine is running, stopping it...
|
||||
TDengine is removed successfully!
|
||||
```
|
||||
|
||||
taosKeeper is removed successfully!
|
||||
tar.gz package of taosTools can be uninstalled as below:
|
||||
|
||||
```
|
||||
$ rmtaostools
|
||||
Start to uninstall taos tools ...
|
||||
|
||||
taos tools is uninstalled successfully!
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="Windows uninstall" value="windows">
|
||||
Run C:\TDengine\unins000.exe to uninstall TDengine on a Windows system.
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
:::note
|
||||
:::info
|
||||
|
||||
- We strongly recommend not to use multiple kinds of installation packages on a single host TDengine.
|
||||
- After deb package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed.
|
||||
- We strongly recommend not to use multiple kinds of installation packages on a single host TDengine. The packages may affect each other and cause errors.
|
||||
|
||||
```bash
|
||||
$ sudo rm -f /var/lib/dpkg/info/tdengine*
|
||||
```
|
||||
- After deb package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information.
|
||||
|
||||
- After rpm package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed.
|
||||
```
|
||||
$ sudo rm -f /var/lib/dpkg/info/tdengine*
|
||||
```
|
||||
|
||||
```bash
|
||||
$ sudo rpm -e --noscripts tdengine
|
||||
```
|
||||
You can then reinstall if needed.
|
||||
|
||||
- After rpm package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information.
|
||||
|
||||
```
|
||||
$ sudo rpm -e --noscripts tdengine
|
||||
```
|
||||
|
||||
You can then reinstall if needed.
|
||||
|
||||
:::
|
||||
|
||||
## Installation Directory
|
||||
|
||||
TDengine is installed at /usr/local/taos if successful.
|
||||
|
||||
```bash
|
||||
$ cd /usr/local/taos
|
||||
$ ll
|
||||
$ ll
|
||||
total 28
|
||||
drwxr-xr-x 7 root root 4096 Feb 22 09:34 ./
|
||||
drwxr-xr-x 12 root root 4096 Feb 22 09:34 ../
|
||||
drwxr-xr-x 2 root root 4096 Feb 22 09:34 bin/
|
||||
drwxr-xr-x 2 root root 4096 Feb 22 09:34 cfg/
|
||||
lrwxrwxrwx 1 root root 13 Feb 22 09:34 data -> /var/lib/taos/
|
||||
drwxr-xr-x 2 root root 4096 Feb 22 09:34 driver/
|
||||
drwxr-xr-x 10 root root 4096 Feb 22 09:34 examples/
|
||||
drwxr-xr-x 2 root root 4096 Feb 22 09:34 include/
|
||||
lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/
|
||||
```
|
||||
|
||||
During the installation process:
|
||||
|
||||
- Configuration directory, data directory, and log directory are created automatically if they don't exist
|
||||
- The default configuration file is located at /etc/taos/taos.cfg, which is a copy of /usr/local/taos/cfg/taos.cfg
|
||||
- The default data directory is /var/lib/taos, which is a soft link to /usr/local/taos/data
|
||||
- The default log directory is /var/log/taos, which is a soft link to /usr/local/taos/log
|
||||
- The executables at /usr/local/taos/bin are linked to /usr/bin
|
||||
- The DLL files at /usr/local/taos/driver are linked to /usr/lib
|
||||
- The header files at /usr/local/taos/include are linked to /usr/include
|
||||
|
||||
:::note
|
||||
Uninstalling and Modifying Files
|
||||
|
||||
- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution, because data can't be recovered. Please follow data integrity, security, backup or relevant SOPs before deleting any data.
|
||||
|
||||
- When reinstalling TDengine, if the default configuration file /etc/taos/taos.cfg exists, it will be kept and the configuration file in the installation package will be renamed to taos.cfg.orig and stored at /usr/local/taos/cfg to be used as configuration sample. Otherwise the configuration file in the installation package will be installed to /etc/taos/taos.cfg and used.
|
||||
|
||||
## Start and Stop
|
||||
|
||||
Linux system services `systemd`, `systemctl` or `service` are used to start, stop and restart TDengine. The server process of TDengine is `taosd`, which is started automatically after the Linux system is started. System operators can use `systemd`, `systemctl` or `service` to start, stop or restart TDengine server.
|
||||
|
||||
For example, if using `systemctl` , the commands to start, stop, restart and check TDengine server are below:
|
||||
|
||||
- Start server:`systemctl start taosd`
|
||||
|
||||
- Stop server:`systemctl stop taosd`
|
||||
|
||||
- Restart server:`systemctl restart taosd`
|
||||
|
||||
- Check server status:`systemctl status taosd`
|
||||
|
||||
Another component named as `taosAdapter` is to provide HTTP service for TDengine, it should be started and stopped using `systemctl`.
|
||||
|
||||
If the server process is OK, the output of `systemctl status` is like below:
|
||||
|
||||
```
|
||||
Active: active (running)
|
||||
```
|
||||
|
||||
Otherwise, the output is as below:
|
||||
|
||||
```
|
||||
Active: inactive (dead)
|
||||
```
|
||||
|
||||
## Upgrade
|
||||
|
||||
There are two aspects in upgrade operation: upgrade installation package and upgrade a running server.
|
||||
|
||||
To upgrade a package, follow the steps mentioned previously to first uninstall the old version then install the new version.
|
||||
|
||||
Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 3 sections match can the old version be upgraded to the new version. The steps of upgrading a running server are as below:
|
||||
|
||||
- Stop inserting data
|
||||
- Make sure all data is persisted to disk
|
||||
- Make some simple queries (Such as total rows in stables, tables and so on. Note down the values. Follow best practices and relevant SOPs.)
|
||||
- Stop the cluster of TDengine
|
||||
- Uninstall old version and install new version
|
||||
- Start the cluster of TDengine
|
||||
- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss
|
||||
- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss
|
||||
- Run some simple data insertion statements to make sure the cluster works well
|
||||
- Restore business services
|
||||
|
||||
:::warning
|
||||
|
||||
TDengine doesn't guarantee any lower version is compatible with the data generated by a higher version, so it's never recommended to downgrade the version.
|
||||
|
||||
:::
|
||||
|
|
|
@ -1,40 +1,32 @@
|
|||
---
|
||||
sidebar_label: Resource Planning
|
||||
title: Resource Planning
|
||||
---
|
||||
|
||||
It is important to plan computing and storage resources if using TDengine to build an IoT, time-series or Big Data platform. How to plan the CPU, memory and disk resources required, will be described in this chapter.
|
||||
|
||||
## Memory Requirement of Server Side
|
||||
## Server Memory Requirements
|
||||
|
||||
By default, the number of vgroups created for each database is the same as the number of CPU cores. This can be configured by the parameter `maxVgroupsPerDb`. Each vnode in a vgroup stores one replica. Each vnode consumes a fixed amount of memory, i.e. `blocks` \* `cache`. In addition, some memory is required for tag values associated with each table. A fixed amount of memory is required for each cluster. So, the memory required for each DB can be calculated using the formula below:
|
||||
Each database creates a fixed number of vgroups. This number is 2 by default and can be configured with the `vgroups` parameter. The number of replicas can be controlled with the `replica` parameter. Each replica requires one vnode per vgroup. Altogether, the memory required by each database depends on the following configuration options:
|
||||
|
||||
- vgroups
|
||||
- replica
|
||||
- buffer
|
||||
- pages
|
||||
- pagesize
|
||||
- cachesize
|
||||
|
||||
For more information, see [Database](../../taos-sql/database).
|
||||
|
||||
The memory required by a database is therefore greater than or equal to:
|
||||
|
||||
```
|
||||
Database Memory Size = maxVgroupsPerDb * replica * (blocks * cache + 10MB) + numOfTables * (tagSizePerTable + 0.5KB)
|
||||
vgroups * replica * (buffer + pages * pagesize + cachesize)
|
||||
```
|
||||
|
||||
For example, assuming the default value of `maxVgroupPerDB` is 64, the default value of `cache` is 16M, the default value of `blocks` is 6, there are 100,000 tables in a DB, the replica number is 1, total length of tag values is 256 bytes, the total memory required for this DB is: 64 \* 1 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 6792M.
|
||||
However, note that this requirement is spread over all dnodes in the cluster, not on a single physical machine. The physical servers that run dnodes meet the requirement together. If a cluster has multiple databases, the memory required increases accordingly. In complex environments where dnodes were added after initial deployment in response to increasing resource requirements, load may not be balanced among the original dnodes and newer dnodes. In this situation, the actual status of your dnodes is more important than theoretical calculations.
|
||||
|
||||
In the real operation of TDengine, we are more concerned about the memory used by each TDengine server process `taosd`.
|
||||
|
||||
```
|
||||
taosd_memory = vnode_memory + mnode_memory + query_memory
|
||||
```
|
||||
|
||||
In the above formula:
|
||||
|
||||
1. "vnode_memory" of a `taosd` process is the memory used by all vnodes hosted by this `taosd` process. It can be roughly calculated by firstly adding up the total memory of all DBs whose memory usage can be derived according to the formula for Database Memory Size, mentioned above, then dividing by number of dnodes and multiplying the number of replicas.
|
||||
|
||||
```
|
||||
vnode_memory = (sum(Database Memory Size) / number_of_dnodes) * replica
|
||||
```
|
||||
|
||||
2. "mnode_memory" of a `taosd` process is the memory consumed by a mnode. If there is one (and only one) mnode hosted in a `taosd` process, the memory consumed by "mnode" is "0.2KB \* the total number of tables in the cluster".
|
||||
|
||||
3. "query_memory" is the memory used when processing query requests. Each ongoing query consumes at least "0.2 KB \* total number of involved tables".
|
||||
|
||||
Please note that the above formulas can only be used to estimate the minimum memory requirement, instead of maximum memory usage. In a real production environment, it's better to reserve some redundance beyond the estimated minimum memory requirement. If memory is abundant, it's suggested to increase the value of parameter `blocks` to speed up data insertion and data query.
|
||||
|
||||
## Memory Requirement of Client Side
|
||||
## Client Memory Requirements
|
||||
|
||||
For the client programs using TDengine client driver `taosc` to connect to the server side there is a memory requirement as well.
|
||||
|
||||
|
@ -56,10 +48,10 @@ So, at least 3GB needs to be reserved for such a client.
|
|||
|
||||
The CPU resources required depend on two aspects:
|
||||
|
||||
- **Data Insertion** Each dnode of TDengine can process at least 10,000 insertion requests in one second, while each insertion request can have multiple rows. The difference in computing resource consumed, between inserting 1 row at a time, and inserting 10 rows at a time is very small. So, the more the number of rows that can be inserted one time, the higher the efficiency. Inserting in batch also imposes requirements on the client side which needs to cache rows to insert in batch once the number of cached rows reaches a threshold.
|
||||
- **Data Insertion** Each dnode of TDengine can process at least 10,000 insertion requests in one second, while each insertion request can have multiple rows. The difference in computing resource consumed, between inserting 1 row at a time, and inserting 10 rows at a time is very small. So, the more the number of rows that can be inserted one time, the higher the efficiency. If each insert request contains more than 200 records, a single core can process more than 1 million records per second. Inserting in batch also imposes requirements on the client side which needs to cache rows to insert in batch once the number of cached rows reaches a threshold.
|
||||
- **Data Query** High efficiency query is provided in TDengine, but it's hard to estimate the CPU resource required because the queries used in different use cases and the frequency of queries vary significantly. It can only be verified with the query statements, query frequency, data size to be queried, and other requirements provided by users.
|
||||
|
||||
In short, the CPU resource required for data insertion can be estimated but it's hard to do so for query use cases. In real operation, it's suggested to control CPU usage below 50%. If this threshold is exceeded, it's a reminder for system operator to add more nodes in the cluster to expand resources.
|
||||
In short, the CPU resource required for data insertion can be estimated but it's hard to do so for query use cases. If possible, ensure that CPU usage remains below 50%. If this threshold is exceeded, it's a reminder for system operator to add more nodes in the cluster to expand resources.
|
||||
|
||||
## Disk Requirement
|
||||
|
||||
|
@ -77,6 +69,6 @@ To increase performance, multiple disks can be setup for parallel data reading o
|
|||
|
||||
## Number of Hosts
|
||||
|
||||
A host can be either physical or virtual. The total memory, total CPU, total disk required can be estimated according to the formulae mentioned previously. Then, according to the system resources that a single host can provide, assuming all hosts have the same resources, the number of hosts can be derived easily.
|
||||
A host can be either physical or virtual. The total memory, total CPU, total disk required can be estimated according to the formulae mentioned previously. If the number of data replicas is not 1, the required resources are multiplied by the number of replicas.
|
||||
|
||||
**Quick Estimation for CPU, Memory and Disk** Please refer to [Resource Estimate](https://www.taosdata.com/config/config.html).
|
||||
Then, according to the system resources that a single host can provide, assuming all hosts have the same resources, the number of hosts can be derived easily.
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
---
|
||||
sidebar_label: Fault Tolerance
|
||||
title: Fault Tolerance & Disaster Recovery
|
||||
title: Fault Tolerance and Disaster Recovery
|
||||
---
|
||||
|
||||
## Fault Tolerance
|
||||
|
@ -11,22 +10,21 @@ When a data block is received by TDengine, the original data block is first writ
|
|||
|
||||
There are 2 configuration parameters related to WAL:
|
||||
|
||||
- walLevel:
|
||||
- 0:wal is disabled
|
||||
- 1:wal is enabled without fsync
|
||||
- 2:wal is enabled with fsync
|
||||
- fsync:This parameter is only valid when walLevel is set to 2. It specifies the interval, in milliseconds, of invoking fsync. If set to 0, it means fsync is invoked immediately once WAL is written.
|
||||
- wal_level: Specifies the WAL level. 1 indicates that WAL is enabled but fsync is disabled. 2 indicates that WAL and fsync are both enabled. The default value is 1.
|
||||
- wal_fsync_period: This parameter is only valid when wal_level is set to 2. It specifies the interval, in milliseconds, of invoking fsync. If set to 0, it means fsync is invoked immediately once WAL is written.
|
||||
|
||||
To achieve absolutely no data loss, walLevel should be set to 2 and fsync should be set to 1. There is a performance penalty to the data ingestion rate. However, if the concurrent data insertion threads on the client side can reach a big enough number, for example 50, the data ingestion performance will be still good enough. Our verification shows that the drop is only 30% when fsync is set to 3,000 milliseconds.
|
||||
To achieve absolutely no data loss, set wal_level to 2 and wal_fsync_period to 0. There is a performance penalty to the data ingestion rate. However, if the concurrent data insertion threads on the client side can reach a big enough number, for example 50, the data ingestion performance will be still good enough. Our verification shows that the drop is only 30% when wal_fsync_period is set to 3000 milliseconds.
|
||||
|
||||
## Disaster Recovery
|
||||
|
||||
TDengine uses replication to provide high availability and disaster recovery capability.
|
||||
TDengine uses replication to provide high availability.
|
||||
|
||||
A TDengine cluster is managed by mnode. To ensure the high availability of mnode, multiple replicas can be configured by the system parameter `numOfMnodes`. The data replication between mnode replicas is performed in a synchronous way to guarantee metadata consistency.
|
||||
A TDengine cluster is managed by mnodes. You can configure up to three mnodes to ensure high availability. The data replication between mnode replicas is performed in a synchronous way to guarantee metadata consistency.
|
||||
|
||||
The number of replicas for time series data in TDengine is associated with each database. There can be many databases in a cluster and each database can be configured with a different number of replicas. When creating a database, parameter `replica` is used to configure the number of replications. To achieve high availability, `replica` needs to be higher than 1.
|
||||
The number of replicas for time series data in TDengine is associated with each database. There can be many databases in a cluster and each database can be configured with a different number of replicas. When creating a database, the parameter `replica` is used to specify the number of replicas. To achieve high availability, set `replica` to 3.
|
||||
|
||||
The number of dnodes in a TDengine cluster must NOT be lower than the number of replicas for any database, otherwise it would fail when trying to create a table.
|
||||
|
||||
As long as the dnodes of a TDengine cluster are deployed on different physical machines and the replica number is higher than 1, high availability can be achieved without any other assistance. For disaster recovery, dnodes of a TDengine cluster should be deployed in geographically different data centers.
|
||||
|
||||
Alternatively, you can use taosX to synchronize the data from one TDengine cluster to another cluster in a remote location. However, taosX is only available in TDengine enterprise version, for more information please contact tdengine.com.
|
||||
|
|
|
@ -13,110 +13,59 @@ Diagnostic steps:
|
|||
1. If the port range to be diagnosed is being occupied by a `taosd` server process, please first stop `taosd.
|
||||
2. On the server side, execute command `taos -n server -P <port> -l <pktlen>` to monitor the port range starting from the port specified by `-P` parameter with the role of "server".
|
||||
3. On the client side, execute command `taos -n client -h <fqdn of server> -P <port> -l <pktlen>` to send a testing package to the specified server and port.
|
||||
|
||||
-l <pktlen\>: The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000. Please note that the package length must be same in the above 2 commands executed on server side and client side respectively.
|
||||
|
||||
-l <pktlen\>: The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000.
|
||||
Please note that the package length must be same in the above 2 commands executed on server side and client side respectively.
|
||||
|
||||
Output of the server side for the example is below:
|
||||
|
||||
```bash
|
||||
# taos -n server -P 6000
|
||||
12/21 14:50:13.522509 0x7f536f455200 UTL work as server, host:172.27.0.7 startPort:6000 endPort:6011 pkgLen:1000
|
||||
|
||||
12/21 14:50:13.522659 0x7f5352242700 UTL TCP server at port:6000 is listening
|
||||
12/21 14:50:13.522727 0x7f5351240700 UTL TCP server at port:6001 is listening
|
||||
# taos -n server -P 6030 -l 1000
|
||||
network test server is initialized, port:6030
|
||||
request is received, size:1000
|
||||
request is received, size:1000
|
||||
...
|
||||
...
|
||||
...
|
||||
12/21 14:50:13.523954 0x7f5342fed700 UTL TCP server at port:6011 is listening
|
||||
12/21 14:50:13.523989 0x7f53437ee700 UTL UDP server at port:6010 is listening
|
||||
12/21 14:50:13.524019 0x7f53427ec700 UTL UDP server at port:6011 is listening
|
||||
12/21 14:50:22.192849 0x7f5352242700 UTL TCP: read:1000 bytes from 172.27.0.8 at 6000
|
||||
12/21 14:50:22.192993 0x7f5352242700 UTL TCP: write:1000 bytes to 172.27.0.8 at 6000
|
||||
12/21 14:50:22.237082 0x7f5351a41700 UTL UDP: recv:1000 bytes from 172.27.0.8 at 6000
|
||||
12/21 14:50:22.237203 0x7f5351a41700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6000
|
||||
12/21 14:50:22.237450 0x7f5351240700 UTL TCP: read:1000 bytes from 172.27.0.8 at 6001
|
||||
12/21 14:50:22.237576 0x7f5351240700 UTL TCP: write:1000 bytes to 172.27.0.8 at 6001
|
||||
12/21 14:50:22.281038 0x7f5350a3f700 UTL UDP: recv:1000 bytes from 172.27.0.8 at 6001
|
||||
12/21 14:50:22.281141 0x7f5350a3f700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6001
|
||||
...
|
||||
...
|
||||
...
|
||||
12/21 14:50:22.677443 0x7f5342fed700 UTL TCP: read:1000 bytes from 172.27.0.8 at 6011
|
||||
12/21 14:50:22.677576 0x7f5342fed700 UTL TCP: write:1000 bytes to 172.27.0.8 at 6011
|
||||
12/21 14:50:22.721144 0x7f53427ec700 UTL UDP: recv:1000 bytes from 172.27.0.8 at 6011
|
||||
12/21 14:50:22.721261 0x7f53427ec700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6011
|
||||
request is received, size:1000
|
||||
request is received, size:1000
|
||||
```
|
||||
|
||||
Output of the client side for the example is below:
|
||||
|
||||
```bash
|
||||
# taos -n client -h 172.27.0.7 -P 6000
|
||||
12/21 14:50:22.192434 0x7fc95d859200 UTL work as client, host:172.27.0.7 startPort:6000 endPort:6011 pkgLen:1000
|
||||
taos -n client -h v3s2 -P 6030 -l 1000
|
||||
network test client is initialized, the server is v3s2:6030
|
||||
request is sent, size:1000
|
||||
response is received, size:1000
|
||||
request is sent, size:1000
|
||||
response is received, size:1000
|
||||
...
|
||||
...
|
||||
...
|
||||
request is sent, size:1000
|
||||
response is received, size:1000
|
||||
request is sent, size:1000
|
||||
response is received, size:1000
|
||||
|
||||
12/21 14:50:22.192472 0x7fc95d859200 UTL server ip:172.27.0.7 is resolved from host:172.27.0.7
|
||||
12/21 14:50:22.236869 0x7fc95d859200 UTL successed to test TCP port:6000
|
||||
12/21 14:50:22.237215 0x7fc95d859200 UTL successed to test UDP port:6000
|
||||
...
|
||||
...
|
||||
...
|
||||
12/21 14:50:22.676891 0x7fc95d859200 UTL successed to test TCP port:6010
|
||||
12/21 14:50:22.677240 0x7fc95d859200 UTL successed to test UDP port:6010
|
||||
12/21 14:50:22.720893 0x7fc95d859200 UTL successed to test TCP port:6011
|
||||
12/21 14:50:22.721274 0x7fc95d859200 UTL successed to test UDP port:6011
|
||||
total succ: 100/100 cost: 16.23 ms speed: 5.87 MB/s
|
||||
```
|
||||
|
||||
The output needs to be checked carefully for the system operator to find the root cause and resolve the problem.
|
||||
|
||||
## Startup Status and RPC Diagnostic
|
||||
|
||||
`taos -n startup -h <fqdn of server>` can be used to check the startup status of a `taosd` process. This is a common task which should be performed by a system operator, especially in the case of a cluster, to determine whether `taosd` has been started successfully.
|
||||
|
||||
`taos -n rpc -h <fqdn of server>` can be used to check whether the port of a started `taosd` can be accessed or not. If `taosd` process doesn't respond or is working abnormally, this command can be used to initiate a rpc communication with the specified fqdn to determine whether it's a network problem or whether `taosd` is abnormal.
|
||||
|
||||
## Sync and Arbitrator Diagnostic
|
||||
|
||||
```bash
|
||||
taos -n sync -P 6040 -h <fqdn of server>
|
||||
taos -n sync -P 6042 -h <fqdn of server>
|
||||
```
|
||||
|
||||
The above commands can be executed in a Linux shell to check whether the port for sync is working well and whether the sync module on the server side is working well. Additionally, `-P 6042` is used to check whether the arbitrator is configured properly and is working well.
|
||||
|
||||
## Network Speed Diagnostic
|
||||
|
||||
`taos -n speed -h <fqdn of server> -P 6030 -N 10 -l 10000000 -S TCP`
|
||||
|
||||
From version 2.2.0.0 onwards, the above command can be executed in a Linux shell to test network speed. The command sends uncompressed packages to a running `taosd` server process or a simulated server process started by `taos -n server` to test the network speed. Parameters can be used when testing network speed are as below:
|
||||
|
||||
-n:When set to "speed", it means testing network speed.
|
||||
-h:The FQDN or IP of the server process to be connected to; if not set, the FQDN configured in `taos.cfg` is used.
|
||||
-P:The port of the server process to connect to, the default value is 6030.
|
||||
-N:The number of packages that will be sent in the test, range is [1,10000], default value is 100.
|
||||
-l:The size of each package in bytes, range is [1024, 1024 \* 1024 \* 1024], default value is 1024.
|
||||
-S:The type of network packages to send, can be either TCP or UDP, default value is TCP.
|
||||
|
||||
## FQDN Resolution Diagnostic
|
||||
|
||||
`taos -n fqdn -h <fqdn of server>`
|
||||
|
||||
From version 2.2.0.0 onward, the above command can be executed in a Linux shell to test the resolution speed of FQDN. It can be used to try to resolve a FQDN to an IP address and record the time spent in this process. The parameters that can be used for this purpose are as below:
|
||||
|
||||
-n:When set to "fqdn", it means testing the speed of resolving FQDN.
|
||||
-h:The FQDN to be resolved. If not set, the `FQDN` parameter in `taos.cfg` is used by default.
|
||||
|
||||
## Server Log
|
||||
|
||||
The parameter `debugFlag` is used to control the log level of the `taosd` server process. The default value is 131. For debugging and tracing, it needs to be set to either 135 or 143 respectively.
|
||||
The parameter `debugFlag` is used to control the log level of the `taosd` server process. The default value is 131. For debugging and tracing, it needs to be set to either 135 or 143 respectively.
|
||||
|
||||
Once this parameter is set to 135 or 143, the log file grows very quickly especially when there is a huge volume of data insertion and data query requests. If all the logs are stored together, some important information may be missed very easily and so on the server side, important information is stored in a different place from other logs.
|
||||
|
||||
- The log at level of INFO, WARNING and ERROR is stored in `taosinfo` so that it is easy to find important information
|
||||
- The log at level of DEBUG (135) and TRACE (143) and other information not handled by `taosinfo` are stored in `taosdlog`
|
||||
Once this parameter is set to 135 or 143, the log file grows very quickly especially when there is a huge volume of data insertion and data query requests. Ensure that the disk drive on which logs are stored has sufficient space.
|
||||
|
||||
## Client Log
|
||||
|
||||
An independent log file, named as "taoslog+<seq num\>" is generated for each client program, i.e. a client process. The default value of `debugFlag` is also 131 and only logs at level of INFO/ERROR/WARNING are recorded. As stated above, for debugging and tracing, it needs to be changed to 135 or 143 respectively, so that logs at DEBUG or TRACE level can be recorded.
|
||||
An independent log file, named as "taoslog+<seq num\>" is generated for each client program, i.e. a client process. The parameter `debugFlag` is used to control the log level. The default value is 131. For debugging and tracing, it needs to be set to either 135 or 143 respectively.
|
||||
|
||||
The default value of `debugFlag` is also 131 and only logs at level of INFO/ERROR/WARNING are recorded. As stated above, for debugging and tracing, it needs to be changed to 135 or 143 respectively, so that logs at DEBUG or TRACE level can be recorded.
|
||||
|
||||
The maximum length of a single log file is controlled by parameter `numOfLogLines` and only 2 log files are kept for each `taosd` server process.
|
||||
|
||||
Log files are written in an async way to minimize the workload on disk, but the trade off for performance is that a few log lines may be lost in some extreme conditions.
|
||||
Log files are written in an async way to minimize the workload on disk, but the trade off for performance is that a few log lines may be lost in some extreme conditions. You can configure asynclog to 0 when needed for troubleshooting purposes to ensure that no log information is lost.
|
||||
|
|
|
@ -10,7 +10,7 @@ One difference from the native connector is that the REST interface is stateless
|
|||
|
||||
## Installation
|
||||
|
||||
The REST interface does not rely on any TDengine native library, so the client application does not need to install any TDengine libraries. The client application's development language only needs to support the HTTP protocol.
|
||||
The REST interface does not rely on any TDengine native library, so the client application does not need to install any TDengine libraries. The client application's development language only needs to support the HTTP protocol. The REST interface is provided by [taosAdapter](../taosadapter), to use REST interface you need to make sure `taosAdapter` is running properly.
|
||||
|
||||
## Verification
|
||||
|
||||
|
@ -18,12 +18,12 @@ If the TDengine server is already installed, it can be verified as follows:
|
|||
|
||||
The following example is in an Ubuntu environment and uses the `curl` tool to verify that the REST interface is working. Note that the `curl` tool may need to be installed in your environment.
|
||||
|
||||
The following example lists all databases on the host h1.taosdata.com. To use it in your environment, replace `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number.
|
||||
The following example lists all databases on the host h1.tdengine.com. To use it in your environment, replace `h1.tdengine.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number.
|
||||
|
||||
```bash
|
||||
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" \
|
||||
-d "select name, ntables, status from information_schema.ins_databases;" \
|
||||
h1.taosdata.com:6041/rest/sql
|
||||
h1.tdengine.com:6041/rest/sql
|
||||
```
|
||||
|
||||
The following return value results indicate that the verification passed.
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: C/C++
|
||||
title: C/C++ Connector
|
||||
---
|
|
@ -1,6 +1,5 @@
|
|||
---
|
||||
toc_max_heading_level: 4
|
||||
sidebar_position: 2
|
||||
sidebar_label: Java
|
||||
title: TDengine Java Connector
|
||||
description: The TDengine Java Connector is implemented on the standard JDBC API and provides native and REST connectors.
|
||||
|
@ -134,8 +133,6 @@ The configuration parameters in the URL are as follows:
|
|||
- batchfetch: true: pulls result sets in batches when executing queries; false: pulls result sets row by row. The default value is true. Enabling batch pulling and obtaining a batch of data can improve query performance when the query data volume is large.
|
||||
- batchErrorIgnore:true: When executing statement executeBatch, if there is a SQL execution failure in the middle, the following SQL will continue to be executed. false: No more statements after the failed SQL are executed. The default value is: false.
|
||||
|
||||
For more information about JDBC native connections, see [Video Tutorial](https://www.taosdata.com/blog/2020/11/11/1955.html).
|
||||
|
||||
**Connect using the TDengine client-driven configuration file **
|
||||
|
||||
When you use a JDBC native connection to connect to a TDengine cluster, you can use the TDengine client driver configuration file to specify parameters such as `firstEp` and `secondEp` of the cluster in the configuration file as below:
|
|
@ -1,6 +1,5 @@
|
|||
---
|
||||
toc_max_heading_level: 4
|
||||
sidebar_position: 4
|
||||
sidebar_label: Go
|
||||
title: TDengine Go Connector
|
||||
---
|
||||
|
@ -8,7 +7,7 @@ title: TDengine Go Connector
|
|||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
|
||||
import Preparition from "./_preparition.mdx"
|
||||
import Preparition from "./_preparation.mdx"
|
||||
import GoInsert from "../../07-develop/03-insert-data/_go_sql.mdx"
|
||||
import GoInfluxLine from "../../07-develop/03-insert-data/_go_line.mdx"
|
||||
import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.mdx"
|
|
@ -1,6 +1,5 @@
|
|||
---
|
||||
toc_max_heading_level: 4
|
||||
sidebar_position: 5
|
||||
sidebar_label: Rust
|
||||
title: TDengine Rust Connector
|
||||
---
|
||||
|
@ -8,7 +7,7 @@ title: TDengine Rust Connector
|
|||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
|
||||
import Preparition from "./_preparition.mdx"
|
||||
import Preparition from "./_preparation.mdx"
|
||||
import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx"
|
||||
import RustBind from "../../07-develop/03-insert-data/_rust_stmt.mdx"
|
||||
import RustQuery from "../../07-develop/04-query-data/_rust.mdx"
|
|
@ -1,5 +1,4 @@
|
|||
---
|
||||
sidebar_position: 3
|
||||
sidebar_label: Python
|
||||
title: TDengine Python Connector
|
||||
description: "taospy is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. tasopy wraps both the native and REST interfaces of TDengine, corresponding to the two submodules of tasopy: taos and taosrest. In addition to wrapping the native and REST interfaces, taospy also provides a programming interface that conforms to the Python Data Access Specification (PEP 249), making it easy to integrate taospy with many third-party tools, such as SQLAlchemy and pandas."
|
||||
|
@ -8,7 +7,7 @@ description: "taospy is the official Python connector for TDengine. taospy provi
|
|||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
|
||||
`taospy is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively.
|
||||
`taospy` is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively.
|
||||
In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/).
|
||||
|
||||
The direct connection to the server using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST interface provided by taosAdapter is referred to hereinafter as a "REST connection".
|
|
@ -1,6 +1,5 @@
|
|||
---
|
||||
toc_max_heading_level: 4
|
||||
sidebar_position: 6
|
||||
sidebar_label: Node.js
|
||||
title: TDengine Node.js Connector
|
||||
---
|
||||
|
@ -8,7 +7,7 @@ title: TDengine Node.js Connector
|
|||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
|
||||
import Preparition from "./_preparition.mdx";
|
||||
import Preparition from "./_preparation.mdx";
|
||||
import NodeInsert from "../../07-develop/03-insert-data/_js_sql.mdx";
|
||||
import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx";
|
||||
import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx";
|
|
@ -1,6 +1,5 @@
|
|||
---
|
||||
toc_max_heading_level: 4
|
||||
sidebar_position: 7
|
||||
sidebar_label: C#
|
||||
title: C# Connector
|
||||
---
|
||||
|
@ -8,7 +7,7 @@ title: C# Connector
|
|||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
|
||||
import Preparition from "./_preparition.mdx"
|
||||
import Preparition from "./_preparation.mdx"
|
||||
import CSInsert from "../../07-develop/03-insert-data/_cs_sql.mdx"
|
||||
import CSInfluxLine from "../../07-develop/03-insert-data/_cs_line.mdx"
|
||||
import CSOpenTSDBTelnet from "../../07-develop/03-insert-data/_cs_opts_telnet.mdx"
|
||||
|
@ -173,7 +172,6 @@ namespace TDengineExample
|
|||
`Taos` is an ADO.NET connector for TDengine, supporting Linux and Windows platforms. Community contributor `Maikebing@@maikebing contributes the connector`. Please refer to:
|
||||
|
||||
* Interface download:<https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos>
|
||||
* Usage notes:<https://www.taosdata.com/blog/2020/11/02/1901.html>
|
||||
|
||||
## Frequently Asked Questions
|
||||
|
|
@ -1,6 +1,5 @@
|
|||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: PHP (community contribution)
|
||||
sidebar_label: PHP
|
||||
title: PHP Connector
|
||||
---
|
||||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
:::info
|
||||
|
||||
Since the TDengine client driver is written in C, using the native connection requires loading the client driver shared library file, which is usually included in the TDengine installer. You can install either standard TDengine server installation package or [TDengine client installation package](/get-started/). For Windows development, you need to install the corresponding [Windows client](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client) for TDengine.
|
||||
Since the TDengine client driver is written in C, using the native connection requires loading the client driver shared library file, which is usually included in the TDengine installer. You can install either standard TDengine server installation package or [TDengine client installation package](/get-started/). For Windows development, you need to install the corresponding Windows client, please refer to [Install TDengine](../../get-started/package).
|
||||
|
||||
- libtaos.so: After successful installation of TDengine on a Linux system, the dependent Linux version of the client driver `libtaos.so` file will be automatically linked to `/usr/lib/libtaos.so`, which is included in the Linux scannable path and does not need to be specified separately.
|
||||
- taos.dll: After installing the client on Windows, the dependent Windows version of the client driver taos.dll file will be automatically copied to the system default search path C:/Windows/System32, again without the need to specify it separately.
|
||||
|
|
|
@ -1,10 +0,0 @@
|
|||
- 已安装客户端驱动(使用原生连接必须安装,使用 REST 连接无需安装)
|
||||
|
||||
:::info
|
||||
|
||||
由于 TDengine 的客户端驱动使用 C 语言编写,使用原生连接时需要加载系统对应安装在本地的客户端驱动共享库文件,通常包含在 TDengine 安装包。TDengine Linux 服务端安装包附带了 TDengine 客户端,也可以单独安装 [Linux 客户端](/get-started/) 。在 Windows 环境开发时需要安装 TDengine 对应的 [Windows 客户端](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client) 。
|
||||
|
||||
- libtaos.so: 在 Linux 系统中成功安装 TDengine 后,依赖的 Linux 版客户端驱动 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。
|
||||
- taos.dll: 在 Windows 系统中安装完客户端之后,依赖的 Windows 版客户端驱动 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
|
||||
|
||||
:::
|
|
@ -116,5 +116,4 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
|
|||
Mandatory or optional arguments to long options are also mandatory or optional
|
||||
for any corresponding short options.
|
||||
|
||||
Report bugs to <support@taosdata.com>.
|
||||
```
|
||||
|
|
|
@ -263,7 +263,7 @@ Once the import is complete, the full page view of TDinsight is shown below.
|
|||
|
||||
## TDinsight dashboard details
|
||||
|
||||
The TDinsight dashboard is designed to provide the usage and status of TDengine-related resources [dnodes, mnodes, vnodes](https://www.taosdata.com/cn/documentation/architecture#cluster) or databases.
|
||||
The TDinsight dashboard is designed to provide the usage and status of TDengine-related resources [dnodes, mnodes, vnodes](../../taos-sql/node/) or databases.
|
||||
|
||||
Details of the metrics are as follows.
|
||||
|
||||
|
|
|
@ -72,7 +72,7 @@ Next, ensure the hostname "tdengine" is resolvable in `/etc/hosts`.
|
|||
echo 127.0.0.1 tdengine |sudo tee -a /etc/hosts
|
||||
```
|
||||
|
||||
Finally, the TDengine service can be accessed from the taos shell or any connector with "tdengine" as the server address.
|
||||
Finally, the TDengine service can be accessed from the TDengine CLI or any connector with "tdengine" as the server address.
|
||||
|
||||
```shell
|
||||
taos -h tdengine -P 6030
|
||||
|
@ -116,7 +116,7 @@ If you want to start your application in a container, you need to add the corres
|
|||
FROM ubuntu:20.04
|
||||
RUN apt-get update && apt-get install -y wget
|
||||
ENV TDENGINE_VERSION=3.0.0.0
|
||||
RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& cd TDengine-client-${TDENGINE_VERSION} \
|
||||
&& ./install_client.sh \
|
||||
|
@ -217,7 +217,7 @@ Here is the full Dockerfile:
|
|||
```docker
|
||||
FROM golang:1.17.6-buster as builder
|
||||
ENV TDENGINE_VERSION=3.0.0.0
|
||||
RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& cd TDengine-client-${TDENGINE_VERSION} \
|
||||
&& ./install_client.sh \
|
||||
|
@ -233,7 +233,7 @@ RUN go build
|
|||
FROM ubuntu:20.04
|
||||
RUN apt-get update && apt-get install -y wget
|
||||
ENV TDENGINE_VERSION=3.0.0.0
|
||||
RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& cd TDengine-client-${TDENGINE_VERSION} \
|
||||
&& ./install_client.sh \
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
---
|
||||
title: Schemaless Writing
|
||||
description: "The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data, as it is written to the interface."
|
||||
description: 'The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data, as it is written to the interface.'
|
||||
---
|
||||
|
||||
In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. To provide the flexibility needed in such cases and in a rapidly changing IoT landscape, TDengine provides a series of interfaces for the schemaless writing method. These interfaces eliminate the need to create super tables and subtables in advance by automatically creating the storage structure corresponding to the data as the data is written to the interface. When necessary, schemaless writing will automatically add the required columns to ensure that the data written by the user is stored correctly.
|
||||
In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. Schemaless writing automatically creates storage structures for your data as it is being written to TDengine, so that you do not need to create supertables in advance. When necessary, schemaless writing
|
||||
will automatically add the required columns to ensure that the data written by the user is stored correctly.
|
||||
|
||||
The schemaless writing method creates super tables and their corresponding subtables. These are completely indistinguishable from the super tables and subtables created directly via SQL. You can write data directly to them via SQL statements. Note that the names of tables created by schemaless writing are based on fixed mapping rules for tag values, so they are not explicitly ideographic and they lack readability.
|
||||
|
||||
|
@ -19,12 +20,12 @@ With the following formatting conventions, schemaless writing uses a single stri
|
|||
measurement,tag_set field_set timestamp
|
||||
```
|
||||
|
||||
where :
|
||||
where:
|
||||
|
||||
- measurement will be used as the data table name. It will be separated from tag_set by a comma.
|
||||
- tag_set will be used as tag data in the format `<tag_key>=<tag_value>,<tag_key>=<tag_value>`, i.e. multiple tags' data can be separated by a comma. It is separated from field_set by space.
|
||||
- field_set will be used as normal column data in the format of `<field_key>=<field_value>,<field_key>=<field_value>`, again using a comma to separate multiple normal columns of data. It is separated from the timestamp by a space.
|
||||
- The timestamp is the primary key corresponding to the data in this row.
|
||||
- `tag_set` will be used as tags, with format like `<tag_key>=<tag_value>,<tag_key>=<tag_value>` Enter a space between `tag_set` and `field_set`.
|
||||
- `field_set`will be used as data columns, with format like `<field_key>=<field_value>,<field_key>=<field_value>` Enter a space between `field_set` and `timestamp`.
|
||||
- `timestamp` is the primary key timestamp corresponding to this row of data
|
||||
|
||||
All data in tag_set is automatically converted to the NCHAR data type and does not require double quotes (").
|
||||
|
||||
|
@ -35,18 +36,20 @@ In the schemaless writing data line protocol, each data item in the field_set ne
|
|||
- Spaces, equal signs (=), commas (,), and double quotes (") need to be escaped with a backslash (\\) in front. (All refer to the ASCII character)
|
||||
- Numeric types will be distinguished from data types by the suffix.
|
||||
|
||||
| **Serial number** | **Postfix** | **Mapping type** | **Size (bytes)** |
|
||||
| -------- | -------- | ------------ | -------------- |
|
||||
| 1 | none or f64 | double | 8 |
|
||||
| 2 | f32 | float | 4 |
|
||||
| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
|
||||
| 4 | i16/u16 | SmallInt/USmallInt | 2 |
|
||||
| 5 | i32/u32 | Int/UInt | 4 |
|
||||
| 6 | i64/i/u64/u | Bigint/Bigint/UBigint/UBigint | 8 |
|
||||
| **Serial number** | **Postfix** | **Mapping type** | **Size (bytes)** |
|
||||
| ----------------- | ----------- | ----------------------------- | ---------------- |
|
||||
| 1 | None or f64 | double | 8 |
|
||||
| 2 | f32 | float | 4 |
|
||||
| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
|
||||
| 4 | i16/u16 | SmallInt/USmallInt | 2 |
|
||||
| 5 | i32/u32 | Int/UInt | 4 |
|
||||
| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 |
|
||||
|
||||
- `t`, `T`, `true`, `True`, `TRUE`, `f`, `F`, `false`, and `False` will be handled directly as BOOL types.
|
||||
|
||||
For example, the following data rows indicate that the t1 label is "3" (NCHAR), the t2 label is "4" (NCHAR), and the t3 label is "t3" to the super table named `st` labeled "t3" (NCHAR), write c1 column as 3 (BIGINT), c2 column as false (BOOL), c3 column is "passit" (BINARY), c4 column is 4 (DOUBLE), and the primary key timestamp is 1626006833639000000 in one row.
|
||||
For example, the following data rows indicate that the t1 label is "3" (NCHAR), the t2 label is "4" (NCHAR), and the t3 label
|
||||
is "t3" to the super table named `st` labeled "t3" (NCHAR), write c1 column as 3 (BIGINT), c2 column as false (BOOL), c3 column
|
||||
is "passit" (BINARY), c4 column is 4 (DOUBLE), and the primary key timestamp is 1626006833639000000 in one row.
|
||||
|
||||
```json
|
||||
st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
|
||||
|
@ -58,102 +61,105 @@ Note that if the wrong case is used when describing the data type suffix, or if
|
|||
|
||||
Schemaless writes process row data according to the following principles.
|
||||
|
||||
1. You can use the following rules to generate the subtable names: first, combine the measurement name and the key and value of the label into the next string:
|
||||
1. You can use the following rules to generate the subtable names: first, combine the measurement name and the key and value of the label into the next string:
|
||||
|
||||
```json
|
||||
"measurement,tag_key1=tag_value1,tag_key2=tag_value2"
|
||||
```
|
||||
|
||||
Note that tag_key1, tag_key2 are not the original order of the tags entered by the user but the result of using the tag names in ascending order of the strings. Therefore, tag_key1 is not the first tag entered in the line protocol.
|
||||
The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t*" is a fixed prefix that every table generated by this mapping relationship has.
|
||||
The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t_" is a fixed prefix that every table generated by this mapping relationship has.
|
||||
You can configure smlChildTableName to specify table names, for example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||
|
||||
2. If the super table obtained by parsing the line protocol does not exist, this super table is created.
|
||||
If the subtable obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the subtable name determined in steps 1 or 2.
|
||||
3. If the subtable obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the subtable name determined in steps 1 or 2.
|
||||
4. If the specified tag or regular column in the data row does not exist, the corresponding tag or regular column is added to the super table (only incremental).
|
||||
5. If there are some tag columns or regular columns in the super table that are not specified to take values in a data row, then the values of these columns are set to NULL.
|
||||
5. If there are some tag columns or regular columns in the super table that are not specified to take values in a data row, then the values of these columns are set to
|
||||
NULL.
|
||||
6. For BINARY or NCHAR columns, if the length of the value provided in a data row exceeds the column type limit, the maximum length of characters allowed to be stored in the column is automatically increased (only incremented and not decremented) to ensure complete preservation of the data.
|
||||
7. Errors encountered throughout the processing will interrupt the writing process and return an error code.
|
||||
8. In order to improve the efficiency of writing, it is assumed by default that the order of the fields in the same Super is the same (the first data contains all fields, and the following data is in this order). If the order is different, the parameter smlDataFormat needs to be configured to be false. Otherwise, the data is written in the same order, and the data in the library will be abnormal.
|
||||
8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat to false. Otherwise, data will be written out of order and a database error will occur.
|
||||
|
||||
:::tip
|
||||
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 16k bytes. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
|
||||
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed
|
||||
16KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
|
||||
|
||||
:::
|
||||
|
||||
## Time resolution recognition
|
||||
|
||||
Three specified modes are supported in the schemaless writing process, as follows:
|
||||
|
||||
| **Serial** | **Value** | **Description** |
|
||||
| -------- | ------------------- | ------------------------------- |
|
||||
| 1 | SML_LINE_PROTOCOL | InfluxDB Line Protocol |
|
||||
| 2 | SML_TELNET_PROTOCOL | OpenTSDB Text Line Protocol |
|
||||
| 3 | SML_JSON_PROTOCOL | JSON protocol format |
|
||||
| **Serial** | **Value** | **Description** |
|
||||
| ---------- | ------------------- | ---------------------- |
|
||||
| 1 | SML_LINE_PROTOCOL | InfluxDB Line Protocol |
|
||||
| 2 | SML_TELNET_PROTOCOL | OpenTSDB file protocol |
|
||||
| 3 | SML_JSON_PROTOCOL | OpenTSDB JSON protocol |
|
||||
|
||||
In the SML_LINE_PROTOCOL parsing mode, the user is required to specify the time resolution of the input timestamp. The available time resolutions are shown in the following table.
|
||||
In InfluxDB line protocol mode, you must specify the precision of the input timestamp. Valid precisions are described in the following table.
|
||||
|
||||
| **Serial Number** | **Time Resolution Definition** | **Meaning** |
|
||||
| -------- | --------------------------------- | -------------- |
|
||||
| 1 | TSDB_SML_TIMESTAMP_NOT_CONFIGURED | Not defined (invalid) |
|
||||
| 2 | TSDB_SML_TIMESTAMP_HOURS | hour |
|
||||
| 3 | TSDB_SML_TIMESTAMP_MINUTES | MINUTES
|
||||
| 4 | TSDB_SML_TIMESTAMP_SECONDS | SECONDS
|
||||
| 5 | TSDB_SML_TIMESTAMP_MILLI_SECONDS | milliseconds
|
||||
| 6 | TSDB_SML_TIMESTAMP_MICRO_SECONDS | microseconds
|
||||
| 7 | TSDB_SML_TIMESTAMP_NANO_SECONDS | nanoseconds |
|
||||
| **No.** | **Precision** | **Description** |
|
||||
| ------- | --------------------------------- | --------------------- |
|
||||
| 1 | TSDB_SML_TIMESTAMP_NOT_CONFIGURED | Not defined (invalid) |
|
||||
| 2 | TSDB_SML_TIMESTAMP_HOURS | Hours |
|
||||
| 3 | TSDB_SML_TIMESTAMP_MINUTES | Minutes |
|
||||
| 4 | TSDB_SML_TIMESTAMP_SECONDS | Seconds |
|
||||
| 5 | TSDB_SML_TIMESTAMP_MILLI_SECONDS | Milliseconds |
|
||||
| 6 | TSDB_SML_TIMESTAMP_MICRO_SECONDS | Microseconds |
|
||||
| 7 | TSDB_SML_TIMESTAMP_NANO_SECONDS | Nanoseconds |
|
||||
|
||||
In SML_TELNET_PROTOCOL and SML_JSON_PROTOCOL modes, the time precision is determined based on the length of the timestamp (in the same way as the OpenTSDB standard operation), and the user-specified time resolution is ignored at this point.
|
||||
In OpenTSDB file and JSON protocol modes, the precision of the timestamp is determined from its length in the standard OpenTSDB manner. User input is ignored.
|
||||
|
||||
## Data schema mapping rules
|
||||
## Data Model Mapping
|
||||
|
||||
This section describes how data for line protocols are mapped to data with a schema. The data measurement in each line protocol is mapped as follows:
|
||||
- The tag name in tag_set is the name of the tag in the data schema
|
||||
- The name in field_set is the column's name.
|
||||
|
||||
The following data is used as an example to illustrate the mapping rules.
|
||||
This section describes how data in line protocol is mapped to a schema. The data measurement in each line is mapped to a
|
||||
supertable name. The tag name in tag_set is the tag name in the schema, and the name in field_set is the column name in the schema. The following example shows how data is mapped:
|
||||
|
||||
```json
|
||||
st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
|
||||
```
|
||||
|
||||
The row data mapping generates a super table: `st`, which contains three labels of type NCHAR: t1, t2, t3. Five data columns are ts (timestamp), c1 (bigint), c3 (binary), c2 (bool), c4 (bigint). The mapping becomes the following SQL statement.
|
||||
This row is mapped to a supertable: `st` contains three NCHAR tags: t1, t2, and t3. Five columns are created: ts (timestamp), c1 (bigint), c3 (binary), c2 (bool), and c4 (bigint). The following SQL statement is generated:
|
||||
|
||||
```json
|
||||
create stable st (_ts timestamp, c1 bigint, c2 bool, c3 binary(6), c4 bigint) tags(t1 nchar(1), t2 nchar(1), t3 nchar(2))
|
||||
```
|
||||
|
||||
## Data schema change handling
|
||||
## Processing Schema Changes
|
||||
|
||||
This section describes the impact on the data schema for different line protocol data writing cases.
|
||||
This section describes the impact on the schema caused by different data being written.
|
||||
|
||||
When writing to an explicitly identified field type using the line protocol, subsequent changes to the field's type definition will result in an explicit data schema error, i.e., will trigger a write API report error. As shown below, the
|
||||
If you use line protocol to write to a specific tag field and then later change the field type, a schema error will ocur. This triggers an error on the write API. This is shown as follows:
|
||||
|
||||
```json
|
||||
st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4 1626006833639000000
|
||||
st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4i 1626006833640000000
|
||||
st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4 1626006833639000000
|
||||
st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4i 1626006833640000000
|
||||
```
|
||||
|
||||
The data type mapping in the first row defines column c4 as DOUBLE, but the data in the second row is declared as BIGINT by the numeric suffix, which triggers a parsing error with schemaless writing.
|
||||
The first row defines c4 as a double. However, in the second row, the suffix indicates that the value of c4 is a bigint. This causes schemaless writing to throw an error.
|
||||
|
||||
If the line protocol before the column declares the data column as BINARY, the subsequent one requires a longer binary length, which triggers a super table schema change.
|
||||
An error also occurs if data input into a binary column exceeds the defined length of the column.
|
||||
|
||||
```json
|
||||
st,t1=3,t2=4,t3=t3 c1=3i64,c5="pass" 1626006833639000000
|
||||
st,t1=3,t2=4,t3=t3 c1=3i64,c5="passit" 1626006833640000000
|
||||
st,t1=3,t2=4,t3=t3 c1=3i64,c5="pass" 1626006833639000000
|
||||
st,t1=3,t2=4,t3=t3 c1=3i64,c5="passit" 1626006833640000000
|
||||
```
|
||||
|
||||
The first line of the line protocol parsing will declare column c5 is a BINARY(4) field. The second line data write will parse column c5 as a BINARY column. But in the second line, c5's width is 6 so you need to increase the width of the BINARY field to be able to accommodate the new string.
|
||||
The first row defines c5 as a binary(4). but the second row writes 6 bytes to it. This means that the length of the binary column must be expanded to contain the data.
|
||||
|
||||
```json
|
||||
st,t1=3,t2=4,t3=t3 c1=3i64 1626006833639000000
|
||||
st,t1=3,t2=4,t3=t3 c1=3i64,c6="passit" 1626006833640000000
|
||||
st,t1=3,t2=4,t3=t3 c1=3i64 1626006833639000000
|
||||
st,t1=3,t2=4,t3=t3 c1=3i64,c6="passit" 1626006833640000000
|
||||
```
|
||||
|
||||
The second line of data has an additional column c6 of type BINARY(6) compared to the first row. Then a column c6 of type BINARY(6) is automatically added at this point.
|
||||
The preceding data includes a new entry, c6, with type binary(6). When this occurs, a new column c6 with type binary(6) is added automatically.
|
||||
|
||||
## Write integrity
|
||||
## Write Integrity
|
||||
|
||||
TDengine provides idempotency guarantees for data writing, i.e., you can repeatedly call the API to write data with errors. However, it does not give atomicity guarantees for writing multiple rows of data. During the process of writing numerous rows of data in one batch, some data will be written successfully, and some data will fail.
|
||||
TDengine guarantees the idempotency of data writes. This means that you can repeatedly call the API to perform write operations with bad data. However, TDengine does not guarantee the atomicity of multi-row writes. In a multi-row write, some data may be written successfully and other data unsuccessfully.
|
||||
|
||||
## Error code
|
||||
##: Error Codes
|
||||
|
||||
If it is an error in the data itself during the schemaless writing process, the application will get `TSDB_CODE_TSC_LINE_SYNTAX_ERROR` error message, which indicates that the error occurred in writing. The other error codes are consistent with the TDengine and can be obtained via the `taos_errstr()` to get the specific cause of the error.
|
||||
The TSDB_CODE_TSC_LINE_SYNTAX_ERROR indicates an error in the schemaless writing component.
|
||||
This error occurs when writing text. For other errors, schemaless writing uses the standard TDengine error codes
|
||||
found in taos_errstr.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
sidebar_label: taosKeeper
|
||||
title: taosKeeper
|
||||
description: Instructions and tips for using taosKeeper
|
||||
description: exports TDengine monitoring metrics.
|
||||
---
|
||||
|
||||
## Introduction
|
||||
|
@ -22,26 +22,35 @@ You can compile taosKeeper separately and install it. Please refer to the [taosK
|
|||
|
||||
### Configuration and running methods
|
||||
|
||||
<!-- taosKeeper needs to be executed on the terminal of the operating system, it supports two configuration methods: [Command-line arguments](#command-line-arguments-in-detail) and [configuration file](#configuration-file-parameters-in-detail). Command-line arguments take precedence over values in the configuration file. -->
|
||||
taosKeeper needs to be executed on the terminal of the operating system. To run taosKeeper, see [configuration file](#configuration-file-parameters-in-detail).
|
||||
taosKeeper needs to be executed on the terminal of the operating system, it supports three configuration methods: [Command-line arguments](#command-line-arguments-in-detail), [environment variable](#environment-variable-in-detail) and [configuration file](#configuration-file-parameters-in-detail). The precedence of those is Command-line, environment variable and configuration file.
|
||||
|
||||
**Make sure that the TDengine cluster is running correctly before running taosKeeper. ** Ensure that the monitoring service in TDengine has been started. For more information, see [TDengine Monitoring Configuration](../config/#monitoring).
|
||||
|
||||
<!--
|
||||
### Command-Line Parameters
|
||||
|
||||
You can use command-line parameters to run taosBenchmark and control its behavior:
|
||||
You can use command-line parameters to run taosKeeper and control its behavior:
|
||||
|
||||
```shell
|
||||
taosKeeper
|
||||
$ taosKeeper
|
||||
```
|
||||
-->
|
||||
### Environment variable
|
||||
|
||||
You can use Environment variable to run taosKeeper and control its behavior:
|
||||
|
||||
```shell
|
||||
$ export TAOS_KEEPER_TDENGINE_HOST=192.168.64.3
|
||||
|
||||
$ taoskeeper
|
||||
```
|
||||
|
||||
you can run `taoskeeper -h` for more detail.
|
||||
|
||||
### Configuration File
|
||||
|
||||
You can quickly launch taosKeeper with the following commands. If you do not specify a configuration file, `/etc/taos/keeper.toml` is used by default. If this file does not specify configurations, the default values are used.
|
||||
|
||||
```shell
|
||||
taoskeeper -c <keeper config file>
|
||||
$ taoskeeper -c <keeper config file>
|
||||
```
|
||||
|
||||
**Sample configuration files**
|
||||
|
@ -110,7 +119,7 @@ Query OK, 1 rows in database (0.036162s)
|
|||
#### Export Monitoring Metrics
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:6043/metrics
|
||||
$ curl http://127.0.0.1:6043/metrics
|
||||
```
|
||||
|
||||
Sample result set (excerpt):
|
||||
|
|
|
@ -6,9 +6,7 @@ title: Grafana
|
|||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
|
||||
TDengine can be quickly integrated with the open-source data visualization system [Grafana](https://www.grafana.com/) to build a data monitoring and alerting system. The whole process does not require any code development. And you can visualize the contents of the data tables in TDengine on a dashboard.
|
||||
|
||||
You can learn more about using the TDengine plugin on [GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md).
|
||||
TDengine can be quickly integrated with the open-source data visualization system [Grafana](https://www.grafana.com/) to build a data monitoring and alerting system. The whole process does not require any code development. And you can visualize the contents of the data tables in TDengine on a dashboard. You can learn more about using the TDengine plugin on [GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md).
|
||||
|
||||
## Prerequisites
|
||||
|
||||
|
@ -65,7 +63,6 @@ Restart Grafana service and open Grafana in web-browser, usually <http://localho
|
|||
Save the script and type `./install.sh --help` for the full usage of the script.
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="manual" label="Install & Configure Manually">
|
||||
|
||||
Follow the installation steps in [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) with the [``grafana-cli`` command-line tool](https://grafana.com/docs/grafana/latest/administration/cli/) for plugin installation.
|
||||
|
@ -76,7 +73,7 @@ grafana-cli plugins install tdengine-datasource
|
|||
sudo -u grafana grafana-cli plugins install tdengine-datasource
|
||||
```
|
||||
|
||||
Alternatively, you can manually download the .zip file from [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and unpack it into your grafana plugins directory.
|
||||
You can also download zip files from [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and install manually. The commands are as follows:
|
||||
|
||||
```bash
|
||||
GF_VERSION=3.2.2
|
||||
|
@ -131,7 +128,7 @@ docker run -d \
|
|||
grafana/grafana
|
||||
```
|
||||
|
||||
You can setup a zero-configuration stack for TDengine + Grafana by [docker-compose](https://docs.docker.com/compose/) and [Grafana provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) file:
|
||||
You can setup a zero-configuration stack for TDengine + Grafana by [docker-compose](https://docs.docker.com/compose/) and [Grafana provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) file:
|
||||
|
||||
1. Save the provisioning configuration file to `tdengine.yml`.
|
||||
|
||||
|
@ -196,7 +193,7 @@ Go back to the main interface to create a dashboard and click Add Query to enter
|
|||
|
||||
As shown above, select the `TDengine` data source in the `Query` and enter the corresponding SQL in the query box below for query.
|
||||
|
||||
- INPUT SQL: enter the statement to be queried (the result set of the SQL statement should be two columns and multiple rows), for example: `select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)`, where, from, to and interval are built-in variables of the TDengine plugin, indicating the range and time interval of queries fetched from the Grafana plugin panel. In addition to the built-in variables, custom template variables are also supported.
|
||||
- INPUT SQL: Enter the desired query (the results being two columns and multiple rows), such as `select _wstart, avg(mem_system) from log.dnodes_info where ts >= $from and ts < $to interval($interval)`. In this statement, $from, $to, and $interval are variables that Grafana replaces with the query time range and interval. In addition to the built-in variables, custom template variables are also supported.
|
||||
- ALIAS BY: This allows you to set the current query alias.
|
||||
- GENERATE SQL: Clicking this button will automatically replace the corresponding variables and generate the final executed statement.
|
||||
|
||||
|
@ -208,7 +205,11 @@ Follow the default prompt to query the average system memory usage for the speci
|
|||
|
||||
### Importing the Dashboard
|
||||
|
||||
You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. The dashboard is published in Grafana as [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167). Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
|
||||
You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. Ensure that you use TDinsight for 3.x.
|
||||
|
||||

|
||||
|
||||
A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) 。 Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
|
||||
|
||||
For more dashboards using TDengine data source, [search here in Grafana](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource). Here is a sub list:
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
sidebar_label: StatsD
|
||||
title: StatsD writing
|
||||
title: StatsD Writing
|
||||
---
|
||||
|
||||
import StatsD from "../14-reference/_statsd.mdx"
|
||||
|
@ -12,8 +12,8 @@ You can write StatsD data to TDengine by simply modifying the configuration file
|
|||
## Prerequisites
|
||||
|
||||
To write StatsD data to TDengine requires the following preparations.
|
||||
- The TDengine cluster has been deployed and is working properly
|
||||
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details.
|
||||
1. The TDengine cluster is deployed and functioning properly
|
||||
2. taosAdapter is installed and running properly. Please refer to the taosAdapter manual for details.
|
||||
- StatsD has been installed. To install StatsD, please refer to [official documentation](https://github.com/statsd/statsd)
|
||||
|
||||
## Configuration steps
|
||||
|
@ -39,8 +39,12 @@ $ echo "foo:1|c" | nc -u -w0 127.0.0.1 8125
|
|||
Use the TDengine CLI to verify that StatsD data is written to TDengine and can read out correctly.
|
||||
|
||||
```
|
||||
Welcome to the TDengine shell from Linux, Client Version:3.0.0.0
|
||||
Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
|
||||
taos> show databases;
|
||||
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
|
||||
====================================================================================================================================================================================================================================================================================
|
||||
log | 2022-04-20 07:19:50.260 | 11 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ms | 0 | ready |
|
||||
statsd | 2022-04-20 09:54:51.220 | 1 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready |
|
||||
Query OK, 2 row(s) in set (0.003142s)
|
||||
|
||||
taos> use statsd;
|
||||
Database changed.
|
||||
|
|
|
@ -9,7 +9,7 @@ MQTT is a popular IoT data transfer protocol. [EMQX](https://github.com/emqx/emq
|
|||
|
||||
The following preparations are required for EMQX to add TDengine data sources correctly.
|
||||
- The TDengine cluster is deployed and working properly
|
||||
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details.
|
||||
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](../../reference/taosadapter) for details.
|
||||
- If you use the emulated writers described later, you need to install the appropriate version of Node.js. V12 is recommended.
|
||||
|
||||
## Install and start EMQX
|
||||
|
@ -28,8 +28,6 @@ USE test;
|
|||
CREATE TABLE sensor_data (ts TIMESTAMP, temperature FLOAT, humidity FLOAT, volume FLOAT, pm10 FLOAT, pm25 FLOAT, so2 FLOAT, no2 FLOAT, co FLOAT, sensor_id NCHAR(255), area TINYINT, coll_time TIMESTAMP);
|
||||
```
|
||||
|
||||
Note: The table schema is based on the blog [(In Chinese) Data Transfer, Storage, Presentation, EMQX + TDengine Build MQTT IoT Data Visualization Platform](https://www.taosdata.com/blog/2020/08/04/1722.html) as an example. Subsequent operations are carried out with this blog scenario too. Please modify it according to your actual application scenario.
|
||||
|
||||
## Configuring EMQX Rules
|
||||
|
||||
Since the configuration interface of EMQX differs from version to version, here is v4.4.5 as an example. For other versions, please refer to the corresponding official documentation.
|
||||
|
@ -137,5 +135,5 @@ Use the TDengine CLI program to log in and query the appropriate databases and t
|
|||
|
||||

|
||||
|
||||
Please refer to the [TDengine official documentation](https://docs.taosdata.com/) for more details on how to use TDengine.
|
||||
Please refer to the [TDengine official documentation](https://docs.tdengine.com/) for more details on how to use TDengine.
|
||||
EMQX Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use EMQX.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
sidebar_label: HiveMQ Broker
|
||||
title: HiveMQ Broker writing
|
||||
title: HiveMQ Broker Writing
|
||||
---
|
||||
|
||||
[HiveMQ](https://www.hivemq.com/) is an MQTT broker that provides community and enterprise editions. HiveMQ is mainly for enterprise emerging machine-to-machine M2M communication and internal transport, meeting scalability, ease of management, and security features. HiveMQ provides an open-source plug-in development kit. MQTT data can be saved to TDengine via TDengine extension for HiveMQ. Please refer to the [HiveMQ extension - TDengine documentation](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README_EN.md) for details on how to use it.
|
||||
[HiveMQ](https://www.hivemq.com/) is an MQTT broker that provides community and enterprise editions. HiveMQ is mainly for enterprise emerging machine-to-machine M2M communication and internal transport, meeting scalability, ease of management, and security features. HiveMQ provides an open-source plug-in development kit. MQTT data can be saved to TDengine via TDengine extension for HiveMQ. For more information, see [HiveMQ TDengine Extension](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README_EN.md).
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
---
|
||||
sidebar_label: Google Data Studio
|
||||
title: Use Google Data Studio to access TDengine
|
||||
---
|
||||
|
||||
Data Studio is a powerful tool for reporting and visualization, offering a wide variety of charts and connectors and making it easy to generate reports based on predefined templates. Its ease of use and robust ecosystem have made it one of the first choices for people working in data analysis.
|
||||
|
||||
TDengine is a high-performance, scalable time-series database that supports SQL. Many businesses and developers in fields spanning from IoT and Industry Internet to IT and finance are using TDengine as their time-series database management solution.
|
||||
|
||||
The TDengine team immediately saw the benefits of using TDengine to process time-series data with Data Studio to analyze it, and they got to work to create a connector for Data Studio.
|
||||
|
||||
With the release of the TDengine connector in Data Studio, you can now get even more out of your data. To obtain the connector, first go to the Data Studio Connector Gallery, click Connect to Data, and search for “TDengine”.
|
||||
|
||||

|
||||
|
||||
Select the TDengine connector and click Authorize.
|
||||
|
||||

|
||||
|
||||
Then sign in to your Google Account and click Allow to enable the connection to TDengine.
|
||||
|
||||

|
||||
|
||||
In the Enter URL field, type the hostname and port of the server running the TDengine REST service. In the following fields, type your username, password, database name, table name, and the start and end times of your query range. Then, click Connect.
|
||||
|
||||

|
||||
|
||||
After the connection is established, you can use Data Studio to process your data and create reports.
|
||||
|
||||

|
||||
|
||||
In Data Studio, TDengine timestamps and tags are considered dimensions, and all other items are considered metrics. You can create all kinds of custom charts with your data – some examples are shown below.
|
||||
|
||||

|
||||
|
||||
With the ability to process petabytes of data per day and provide monitoring and alerting in real time, TDengine is a great solution for time-series data management. Now, with the Data Studio connector, we’re sure you’ll be able to gain new insights and obtain even more value from your data.
|
After Width: | Height: | Size: 17 KiB |
After Width: | Height: | Size: 17 KiB |
After Width: | Height: | Size: 14 KiB |
After Width: | Height: | Size: 16 KiB |
After Width: | Height: | Size: 4.6 KiB |
After Width: | Height: | Size: 5.9 KiB |
After Width: | Height: | Size: 14 KiB |
After Width: | Height: | Size: 14 KiB |
After Width: | Height: | Size: 14 KiB |
After Width: | Height: | Size: 14 KiB |
After Width: | Height: | Size: 12 KiB |
After Width: | Height: | Size: 6.3 KiB |
After Width: | Height: | Size: 41 KiB |
After Width: | Height: | Size: 259 KiB |
|
@ -12,6 +12,7 @@ The design of TDengine is based on the assumption that any hardware or software
|
|||
Logical structure diagram of TDengine's distributed architecture is as follows:
|
||||
|
||||

|
||||
|
||||
<center> Figure 1: TDengine architecture diagram </center>
|
||||
|
||||
A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDengine client driver (TAOSC) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through TAOSC's API. The following is a brief introduction to each logical unit.
|
||||
|
@ -38,15 +39,16 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc
|
|||
|
||||
**Cluster external connection**: TDengine cluster can accommodate a single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option `-h`, and the configured port number can be specified through `-p`. If the port is not configured, the system configuration parameter “serverPort” of TDengine will be adopted.
|
||||
|
||||
**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode:
|
||||
**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode:
|
||||
|
||||
1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step;
|
||||
1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step;
|
||||
2. Check the system configuration file taos.cfg to obtain node configuration parameters “firstEp” and “secondEp” (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step;
|
||||
3. Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connection. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again.
|
||||
|
||||
**The choice of MNODE**: TDengine logically has a management node, but there is no separate execution code. The server-side only has one set of execution code, taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, totally transparently and without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage.
|
||||
|
||||
**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster.
|
||||
**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster.
|
||||
|
||||
- Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"
|
||||
- Step 2: In the system configuration parameter file taos.cfg of the new data node, set the “firstEp” and “secondEp” parameters to the EP of any two data nodes in the existing cluster. Please refer to the user tutorial for detailed steps. In this way, the cluster will be established step by step.
|
||||
|
||||
|
@ -57,6 +59,7 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc
|
|||
To explain the relationship between vnode, mnode, TAOSC and application and their respective roles, the following is an analysis of a typical data writing process.
|
||||
|
||||

|
||||
|
||||
<center> Figure 2: Typical process of TDengine </center>
|
||||
|
||||
1. Application initiates a request to insert data through JDBC, ODBC, or other APIs.
|
||||
|
@ -121,16 +124,17 @@ The load balancing process does not require any manual intervention, and it is t
|
|||
|
||||
If a database has N replicas, a virtual node group has N virtual nodes. But only one is the Leader and all others are slaves. When the application writes a new record to system, only the Leader vnode can accept the writing request. If a follower vnode receives a writing request, the system will notifies TAOSC to redirect.
|
||||
|
||||
### Leader vnode Writing Process
|
||||
### Leader vnode Writing Process
|
||||
|
||||
Leader Vnode uses a writing process as follows:
|
||||
|
||||

|
||||
|
||||
<center> Figure 3: TDengine Leader writing process </center>
|
||||
|
||||
1. Leader vnode receives the application data insertion request, verifies, and moves to next step;
|
||||
2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
|
||||
3. If there are multiple replicas, vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data;
|
||||
3. If there are multiple replicas, vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data;
|
||||
4. Write into memory and add the record to “skip list”;
|
||||
5. Leader vnode returns a confirmation message to the application, indicating a successful write.
|
||||
6. If any of Step 2, 3 or 4 fails, the error will directly return to the application.
|
||||
|
@ -140,6 +144,7 @@ Leader Vnode uses a writing process as follows:
|
|||
For a follower vnode, the write process as follows:
|
||||
|
||||

|
||||
|
||||
<center> Figure 4: TDengine Follower Writing Process </center>
|
||||
|
||||
1. Follower vnode receives a data insertion request forwarded by Leader vnode;
|
||||
|
@ -212,6 +217,7 @@ When data is written to disk, the system decideswhether to compress the data bas
|
|||
By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”.
|
||||
|
||||
dataDir format is as follows:
|
||||
|
||||
```
|
||||
dataDir data_path [tier_level]
|
||||
```
|
||||
|
@ -270,6 +276,7 @@ For the data collected by device D1001, the number of records per hour is counte
|
|||
TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different data collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable (super table). STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is the same, but each table has its own static tag. There can be multiple tags which can be added, deleted and modified at any time. Applications can aggregate or statistically operate on all or a subset of tables under a STABLE by specifying tag filters. This greatly simplifies the development of applications. The process is shown in the following figure:
|
||||
|
||||

|
||||
|
||||
<center> Figure 5: Diagram of multi-table aggregation query </center>
|
||||
|
||||
1. Application sends a query condition to system;
|
||||
|
@ -279,9 +286,8 @@ TDengine creates a separate table for each data collection point, but in practic
|
|||
5. Each vnode first finds the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to TAOSC;
|
||||
6. TAOSC finally aggregates the results returned by multiple data nodes and send them back to application.
|
||||
|
||||
Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which reduces the volume of data to be scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details.
|
||||
Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which reduces the volume of data to be scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TDengine SQL for details.
|
||||
|
||||
### Precomputation
|
||||
|
||||
In order to effectively improve the performance of query processing, based-on the unchangeable feature of IoT data, statistical information of data stored in data block is recorded in the head of data block, including max value, min value, and sum. We call it a precomputing unit. If the query processing involves all the data of a whole data block, the pre-calculated results are directly used, and no need to read the data block contents at all. Since the amount of pre-calculated data is much smaller than the actual size of data block stored on disk, for query processing with disk IO as bottleneck, the use of pre-calculated results can greatly reduce the pressure of reading IO and accelerate the query process. The precomputation mechanism is similar to the BRIN (Block Range Index) of PostgreSQL.
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ The agents deployed in the application nodes are responsible for providing opera
|
|||
|
||||
- **TDengine installation and deployment**
|
||||
|
||||
First of all, please install TDengine. Download the latest stable version of TDengine from the official website and install it. For help with using various installation packages, please refer to the blog ["Installation and Uninstallation of TDengine Multiple Installation Packages"](https://www.taosdata.com/blog/2019/08/09/566.html).
|
||||
First of all, please install TDengine. Download the latest stable version of TDengine from the official website and install it. For help with using various installation packages, please refer to [Install TDengine](../../get-started/package)
|
||||
|
||||
Note that once the installation is complete, do not start the `taosd` service before properly configuring the parameters.
|
||||
|
||||
|
@ -51,7 +51,7 @@ TDengine version 2.4 and later version includes `taosAdapter`. taosAdapter is a
|
|||
|
||||
Users can flexibly deploy taosAdapter instances, based on their requirements, to improve data writing throughput and provide guarantees for data writes in different application scenarios.
|
||||
|
||||
Through taosAdapter, users can directly write the data collected by `collectd` or `StatsD` to TDengine to achieve easy, convenient and seamless migration in application scenarios. taosAdapter also supports Telegraf, Icinga, TCollector, and node_exporter data. For more details, please refer to [taosAdapter](/reference/taosadapter/).
|
||||
Through taosAdapter, users can directly write the data collected by `collectd` or `StatsD` to TDengine to achieve easy, convenient and seamless migration in application scenarios. taosAdapter also supports Telegraf, Icinga, TCollector, and node_exporter data. For more details, please refer to [taosAdapter](../../reference/taosadapter/).
|
||||
|
||||
If using collectd, modify the configuration file in its default location `/etc/collectd/collectd.conf` to point to the IP address and port of the node where to deploy taosAdapter. For example, assuming the taosAdapter IP address is 192.168.1.130 and port 6046, configure it as follows.
|
||||
|
||||
|
@ -411,7 +411,7 @@ TDengine provides a wealth of help documents to explain many aspects of cluster
|
|||
|
||||
### Cluster Deployment
|
||||
|
||||
The first is TDengine installation. Download the latest stable version of TDengine from the official website, and install it. Please refer to the blog ["Installation and Uninstallation of Various Installation Packages of TDengine"](https://www.taosdata.com/blog/2019/08/09/566.html) for the various installation package formats.
|
||||
The first is TDengine installation. Download the latest stable version of TDengine from the official website, and install it. Please refer to [Install TDengine](../../get-started/package) for more details.
|
||||
|
||||
Note that once the installation is complete, do not immediately start the `taosd` service, but start it after correctly configuring the parameters.
|
||||
|
||||
|
|
|
@ -1,114 +1,163 @@
|
|||
---
|
||||
sidebar_label: FAQ
|
||||
title: Frequently Asked Questions
|
||||
---
|
||||
|
||||
## Submit an Issue
|
||||
|
||||
If the tips in FAQ don't help much, please submit an issue on [GitHub](https://github.com/taosdata/TDengine) to describe your problem. In your description please include the TDengine version, hardware and OS information, the steps to reproduce the problem and any other relevant information. It would be very helpful if you can package the contents in `/var/log/taos` and `/etc/taos` and upload. These two are the default directories used by TDengine. If you have changed the default directories in your configuration, please package the files in your configured directories. We recommended setting `debugFlag` to 135 in `taos.cfg`, restarting `taosd`, then reproducing the problem and collecting the logs. If you don't want to restart, an alternative way of setting `debugFlag` is executing `alter dnode <dnode_id> debugFlag 135` command in TDengine CLI `taos`. During normal running, however, please make sure `debugFlag` is set to 131.
|
||||
If your issue could not be resolved by reviewing this documentation, you can submit your issue on GitHub and receive support from the TDengine Team. When you submit an issue, attach the following directories from your TDengine deployment:
|
||||
|
||||
1. The directory containing TDengine logs (`/var/log/taos` by default)
|
||||
2. The directory containing TDengine configuration files (`/etc/taos` by default)
|
||||
|
||||
In your GitHub issue, provide the version of TDengine and the operating system and environment for your deployment, the operations that you performed when the issue occurred, and the time of occurrence and affected tables.
|
||||
|
||||
To obtain more debugging information, open `taos.cfg` and set the `debugFlag` parameter to `135`. Then restart TDengine Server and reproduce the issue. The debug-level logs generated help the TDengine Team to resolve your issue. If it is not possible to restart TDengine Server, you can run the following command in the TDengine CLI to set the debug flag:
|
||||
|
||||
```
|
||||
alter dnode <dnode_id> 'debugFlag' '135';
|
||||
```
|
||||
|
||||
You can run the `SHOW DNODES` command to determine the dnode ID.
|
||||
|
||||
When debugging information is no longer needed, set `debugFlag` to 131.
|
||||
|
||||
## Frequently Asked Questions
|
||||
|
||||
### 1. How to upgrade to TDengine 2.0 from older version?
|
||||
### 1. What are the best practices for upgrading a previous version of TDengine to version 3.0?
|
||||
|
||||
version 2.x is not compatible with version 1.x. With regard to the configuration and data files, please perform the following steps before upgrading. Please follow data integrity, security, backup and other relevant SOPs, best practices before removing/deleting any data.
|
||||
TDengine 3.0 is not compatible with the configuration and data files from previous versions. Before upgrading, perform the following steps:
|
||||
|
||||
1. Delete configuration files: `sudo rm -rf /etc/taos/taos.cfg`
|
||||
2. Delete log files: `sudo rm -rf /var/log/taos/`
|
||||
3. Delete data files if the data doesn't need to be kept: `sudo rm -rf /var/lib/taos/`
|
||||
4. Install latest 2.x version
|
||||
5. If the data needs to be kept and migrated to newer version, please contact professional service at TDengine for assistance.
|
||||
1. Run `sudo rm -rf /etc/taos/taos.cfg` to delete your configuration file.
|
||||
2. Run `sudo rm -rf /var/log/taos/` to delete your log files.
|
||||
3. Run `sudo rm -rf /var/lib/taos/` to delete your data files.
|
||||
4. Install TDengine 3.0.
|
||||
5. For assistance in migrating data to TDengine 3.0, contact [TDengine Support](https://tdengine.com/support).
|
||||
|
||||
### 2. How to handle "Unable to establish connection"?
|
||||
### 4. How can I resolve the "Unable to establish connection" error?
|
||||
|
||||
When the client is unable to connect to the server, you can try the following ways to troubleshoot and resolve the problem.
|
||||
This error indicates that the client could not connect to the server. Perform the following troubleshooting steps:
|
||||
|
||||
1. Check the network
|
||||
1. Check the network.
|
||||
|
||||
- Check if the hosts where the client and server are running are accessible to each other, for example by `ping` command.
|
||||
- Check if the TCP/UDP on port 6030-6042 are open for access if firewall is enabled. If possible, disable the firewall for diagnostics, but please ensure that you are following security and other relevant protocols.
|
||||
- Check if the FQDN and serverPort are configured correctly in `taos.cfg` used by the server side.
|
||||
- Check if the `firstEp` is set properly in the `taos.cfg` used by the client side.
|
||||
- For machines deployed in the cloud, verify that your security group can access ports 6030 and 6031 (TCP and UDP).
|
||||
- For virtual machines deployed locally, verify that the hosts where the client and server are running are accessible to each other. Do not use localhost as the hostname.
|
||||
- For machines deployed on a corporate network, verify that your NAT configuration allows the server to respond to the client.
|
||||
|
||||
2. Make sure the client version and server version are same.
|
||||
2. Verify that the client and server are running the same version of TDengine.
|
||||
|
||||
3. On server side, check the running status of `taosd` by executing `systemctl status taosd` . If your server is started using another way instead of `systemctl`, use the proper method to check whether the server process is running normally.
|
||||
3. On the server, run `systemctl status taosd` to verify that taosd is running normally. If taosd is stopped, run `systemctl start taosd`.
|
||||
|
||||
4. If using connector of Python, Java, Go, Rust, C#, node.JS on Linux to connect to the server, please make sure `libtaos.so` is in directory `/usr/local/taos/driver` and `/usr/local/taos/driver` is in system lib search environment variable `LD_LIBRARY_PATH`.
|
||||
4. Verify that the client is configured with the correct FQDN for the server.
|
||||
|
||||
5. If using connector on Windows, please make sure `C:\TDengine\driver\taos.dll` is in your system lib search path. We recommend putting `taos.dll` under `C:\Windows\System32`.
|
||||
5. If the server cannot be reached with the `ping` command, verify that network and DNS or hosts file settings are correct. For a TDengine cluster, the client must be able to ping the FQDN of every node in the cluster.
|
||||
|
||||
6. Some advanced network diagnostics tools
|
||||
6. Verify that your firewall settings allow all hosts in the cluster to communicate on ports 6030 and 6041 (TCP and UDP). You can run `ufw status` (Ubuntu) or `firewall-cmd --list-port` (CentOS) to check the configuration.
|
||||
|
||||
- On Linux system tool `nc` can be used to check whether the TCP/UDP can be accessible on a specified port
|
||||
Check whether a UDP port is open: `nc -vuz {hostIP} {port} `
|
||||
Check whether a TCP port on server side is open: `nc -l {port}`
|
||||
Check whether a TCP port on client side is open: `nc {hostIP} {port}`
|
||||
7. If you are using the Python, Java, Go, Rust, C#, or Node.js connector on Linux to connect to the server, verify that `libtaos.so` is in the `/usr/local/taos/driver` directory and `/usr/local/taos/driver` is in the `LD_LIBRARY_PATH` environment variable.
|
||||
|
||||
- On Windows system `Test-NetConnection -ComputerName {fqdn} -Port {port}` on PowerShell can be used to check whether the port on server side is open for access.
|
||||
8. If you are using Windows, verify that `C:\TDengine\driver\taos.dll` is in the `PATH` environment variable. If possible, move `taos.dll` to the `C:\Windows\System32` directory.
|
||||
|
||||
7. TDengine CLI `taos` can also be used to check network, please refer to [TDengine CLI](/reference/taos-shell).
|
||||
9. On Linux systems, you can use the `nc` tool to check whether a port is accessible:
|
||||
- To check whether a UDP port is open, run `nc -vuz {hostIP} {port}`.
|
||||
- To check whether a TCP port on the server side is open, run `nc -l {port}`.
|
||||
- To check whether a TCP port on client side is open, run `nc {hostIP} {port}`.
|
||||
|
||||
### 3. How to handle "Unexpected generic error in RPC" or "Unable to resolve FQDN" ?
|
||||
10. On Windows systems, you can run `Test-NetConnection -ComputerName {fqdn} -Port {port}` in PowerShell to check whether a port on the server side is accessible.
|
||||
|
||||
This error is caused because the FQDN can't be resolved. Please try following ways:
|
||||
11. You can also use the TDengine CLI to diagnose network issues. For more information, see [Problem Diagnostics](https://docs.tdengine.com/operation/diagnose/).
|
||||
|
||||
1. Check whether the FQDN is configured properly on the server side
|
||||
2. If DSN server is configured in the network, please check whether it works; otherwise, check `/etc/hosts` to see whether the FQDN is configured with correct IP
|
||||
3. If the network configuration on the server side is OK, try to ping the server from the client side.
|
||||
4. If TDengine has been used before with an old hostname then the hostname has been changed, please check `/var/lib/taos/taos/dnode/dnodeEps.json`. Before setting up a new TDengine cluster, it's better to cleanup the directories configured.
|
||||
### 5. How can I resolve the "Unable to resolve FQDN" error?
|
||||
|
||||
### 4. "Invalid SQL" is returned even though the Syntax is correct
|
||||
Clients and dnodes must be able to resolve the FQDN of each required node. You can confirm your configuration as follows:
|
||||
|
||||
"Invalid SQL" is returned when the length of SQL statement exceeds maximum allowed length or the syntax is not correct.
|
||||
1. Verify that the FQDN is configured properly on the server.
|
||||
2. If your network has a DNS server, verify that it is operational.
|
||||
3. If your network does not have a DNS server, verify that the FQDNs in the `hosts` file are correct.
|
||||
4. On the client, use the `ping` command to test your connection to the server. If you cannot ping an FQDN, TDengine cannot reach it.
|
||||
5. If TDengine has been previously installed and the `hostname` was modified, open `dnode.json` in the `data` folder and verify that the endpoint configuration is correct. The default location of the dnode file is `/var/lib/taos/dnode`. Ensure that you clean up previous installations before reinstalling TDengine.
|
||||
6. Confirm whether FQDNs are preconfigured in `/etc/hosts` and `/etc/hostname`.
|
||||
|
||||
### 5. Whether validation queries are supported?
|
||||
### 6. What is the most effective way to write data to TDengine?
|
||||
|
||||
It's suggested to use a builtin database named as `log` to monitor.
|
||||
Writing data in batches provides higher efficiency in most situations. You can insert one or more data records into one or more tables in a single SQL statement.
|
||||
|
||||
<a class="anchor" id="update"></a>
|
||||
### 9. Why are table names not fully displayed?
|
||||
|
||||
### 6. Can I delete a record?
|
||||
The number of columns in the TDengine CLI terminal display is limited. This can cause table names to be cut off, and if you use an incomplete name in a statement, the "Table does not exist" error will occur. You can increase the display size with the `maxBinaryDisplayWidth` parameter or the SQL statement `set max_binary_display_width`. You can also append `\G` to your SQL statement to bypass this limitation.
|
||||
|
||||
From version 2.6.0.0 Enterprise version, deleting data can be supported.
|
||||
### 10. How can I migrate data?
|
||||
|
||||
### 7. How to create a table of over 1024 columns?
|
||||
In TDengine, the `hostname` uniquely identifies a machine. When you move data files to a new machine, you must configure the new machine to have the same `host name` as the original machine.
|
||||
|
||||
From version 2.1.7.0, at most 4096 columns can be defined for a table.
|
||||
:::note
|
||||
|
||||
### 8. How to improve the efficiency of inserting data?
|
||||
The data structure of previous versions of TDengine is not compatible with version 3.0. To migrate from TDengine 1.x or 2.x to 3.0, you must export data from your older deployment and import it back into TDengine 3.0.
|
||||
|
||||
Inserting data in batch is a good practice. Single SQL statement can insert data for one or multiple tables in batch.
|
||||
:::
|
||||
|
||||
### 9. JDBC Error: the executed SQL is not a DML or a DDL?
|
||||
### 11. How can I temporary change the log level from the TDengine Client?
|
||||
|
||||
Please upgrade to latest JDBC driver, for details please refer to [Java Connector](/reference/connector/java)
|
||||
|
||||
### 10. Failed to connect with error "invalid timestamp"
|
||||
|
||||
The most common reason is that the time setting is not aligned on the client side and the server side. On Linux system, please use `ntpdate` command. On Windows system, please enable automatic sync in system time setting.
|
||||
|
||||
### 11. Table name is not shown in full
|
||||
|
||||
There is a display width setting in TDengine CLI `taos`. It can be controlled by configuration parameter `maxBinaryDisplayWidth`, or can be set using SQL command `set max_binary_display_width`. A more convenient way is to append `\G` in a SQL command to bypass this limitation.
|
||||
|
||||
### 12. How to change log level temporarily?
|
||||
|
||||
Below SQL command can be used to adjust log level temporarily
|
||||
To change the log level for debugging purposes, you can use the following command:
|
||||
|
||||
```sql
|
||||
ALTER LOCAL flag_name flag_value;
|
||||
ALTER LOCAL local_option
|
||||
|
||||
local_option: {
|
||||
'resetLog'
|
||||
| 'rpcDebugFlag' value
|
||||
| 'tmrDebugFlag' value
|
||||
| 'cDebugFlag' value
|
||||
| 'uDebugFlag' value
|
||||
| 'debugFlag' value
|
||||
}
|
||||
```
|
||||
- flag_name can be: debugFlag,cDebugFlag,tmrDebugFlag,uDebugFlag,rpcDebugFlag
|
||||
- flag_value can be: 131 (INFO/WARNING/ERROR), 135 (plus DEBUG), 143 (plus TRACE)
|
||||
|
||||
<a class="anchor" id="timezone"></a>
|
||||
Use `resetlog` to remove all logs generated on the local client. Use the other parameters to specify a log level for a specific component.
|
||||
|
||||
### 13. What to do if go compilation fails?
|
||||
For each parameter, you can set the value to `131` (error and warning), `135` (error, warning, and debug), or `143` (error, warning, debug, and trace).
|
||||
|
||||
From version 2.3.0.0, a new component named `taosAdapter` is introduced. Its' developed in Go. If you want to compile from source code and meet go compilation problems, try to do below steps to resolve Go environment problems.
|
||||
### Why do TDengine components written in Go fail to compile?
|
||||
|
||||
```sh
|
||||
go env -w GO111MODULE=on
|
||||
go env -w GOPROXY=https://goproxy.cn,direct
|
||||
```
|
||||
TDengine includes taosAdapter, an independent component written in Go. This component provides the REST API as well as data access for other products such as Prometheus and Telegraf.
|
||||
When using the develop branch, you must run `git submodule update --init --recursive` to download the taosAdapter repository and then compile it.
|
||||
|
||||
TDengine Go components require Go version 1.14 or later.
|
||||
|
||||
### 13. How can I query the storage space being used by my data?
|
||||
|
||||
The TDengine data files are stored in `/var/lib/taos` by default. Log files are stored in `/var/log/taos`.
|
||||
|
||||
To see how much space your data files occupy, run `du -sh /var/lib/taos/vnode --exclude='wal'`. This excludes the write-ahead log (WAL) because its size is relatively fixed while writes are occurring, and it is written to disk and cleared when you shut down TDengine.
|
||||
|
||||
If you want to see how much space is occupied by a single database, first determine which vgroup is storing the database by running `show vgroups`. Then check `/var/lib/taos/vnode` for the files associated with the vgroup ID.
|
||||
|
||||
### 15. How is timezone information processed for timestamps?
|
||||
|
||||
TDengine uses the timezone of the client for timestamps. The server timezone does not affect timestamps. The client converts Unix timestamps in SQL statements to UTC before sending them to the server. When you query data on the server, it provides timestamps in UTC to the client, which converts them to its local time.
|
||||
|
||||
Timestamps are processed as follows:
|
||||
|
||||
1. The client uses its system timezone unless it has been configured otherwise.
|
||||
2. A timezone configured in `taos.cfg` takes precedence over the system timezone.
|
||||
3. A timezone explicitly specified when establishing a connection to TDengine through a connector takes precedence over `taos.cfg` and the system timezone. For example, the Java connector allows you to specify a timezone in the JDBC URL.
|
||||
4. If you use an RFC 3339 timestamp (2013-04-12T15:52:01.123+08:00), or an ISO 8601 timestamp (2013-04-12T15:52:01.123+0800), the timezone specified in the timestamp is used instead of the timestamps configured using any other method.
|
||||
|
||||
### 16. Which network ports are required by TDengine?
|
||||
|
||||
See [serverPort](https://docs.tdengine.com/reference/config/#serverport) in Configuration Parameters.
|
||||
|
||||
Note that ports are specified using 6030 as the default first port. If you change this port, all other ports change as well.
|
||||
|
||||
### 17. Why do applications such as Grafana fail to connect to TDengine over the REST API?
|
||||
|
||||
In TDengine, the REST API is provided by taosAdapter. Ensure that taosAdapter is running before you connect an application to TDengine over the REST API. You can run `systemctl start taosadapter` to start the service.
|
||||
|
||||
Note that the log path for taosAdapter must be configured separately. The default path is `/var/log/taos`. You can choose one of eight log levels. The default is `info`. You can set the log level to `panic` to disable log output. You can modify the taosAdapter configuration file to change these settings. The default location is `/etc/taos/taosadapter.toml`.
|
||||
|
||||
For more information, see [taosAdapter](https://docs.tdengine.com/reference/taosadapter/).
|
||||
|
||||
### 18. How can I resolve out-of-memory (OOM) errors?
|
||||
|
||||
OOM errors are thrown by the operating system when its memory, including swap, becomes insufficient and it needs to terminate processes to remain operational. Most OOM errors in TDengine occur for one of the following reasons: free memory is less than the value of `vm.min_free_kbytes` or free memory is less than the size of the request. If TDengine occupies reserved memory, an OOM error can occur even when free memory is sufficient.
|
||||
|
||||
TDengine preallocates memory to each vnode. The number of vnodes per database is determined by the `vgroups` parameter, and the amount of memory per vnode is determined by the `buffer` parameter. To prevent OOM errors from occurring, ensure that you prepare sufficient memory on your hosts to support the number of vnodes that your deployment requires. Configure an appropriately sized swap space. If you continue to receive OOM errors, your SQL statements may be querying too much data for your system. TDengine Enterprise Edition includes optimized memory management that increases stability for enterprise customers.
|
||||
|
|
|
@ -23,6 +23,7 @@ namespace TDengineExample
|
|||
CheckRes(conn, res, "failed to insert data");
|
||||
int affectedRows = TDengine.AffectRows(res);
|
||||
Console.WriteLine("affectedRows " + affectedRows);
|
||||
TDengine.FreeResult(res);
|
||||
ExitProgram(conn, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -16,14 +16,14 @@ public class RestInsertExample {
|
|||
|
||||
private static List<String> getRawData() {
|
||||
return Arrays.asList(
|
||||
"d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2",
|
||||
"d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2",
|
||||
"d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2",
|
||||
"d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3",
|
||||
"d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2",
|
||||
"d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2",
|
||||
"d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3",
|
||||
"d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3"
|
||||
"d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,'California.SanFrancisco',2",
|
||||
"d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,'California.SanFrancisco',2",
|
||||
"d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,'California.SanFrancisco',2",
|
||||
"d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,'California.SanFrancisco',3",
|
||||
"d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,'California.LosAngeles',2",
|
||||
"d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,'California.LosAngeles',2",
|
||||
"d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,'California.LosAngeles',3",
|
||||
"d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,'California.LosAngeles',3"
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -57,7 +57,7 @@ public class SubscribeDemo {
|
|||
properties.setProperty(TMQConstants.ENABLE_AUTO_COMMIT, "true");
|
||||
properties.setProperty(TMQConstants.GROUP_ID, "test");
|
||||
properties.setProperty(TMQConstants.VALUE_DESERIALIZER,
|
||||
"com.taosdata.jdbc.MetersDeserializer");
|
||||
"com.taos.example.MetersDeserializer");
|
||||
|
||||
// poll data
|
||||
try (TaosConsumer<Meters> consumer = new TaosConsumer<>(properties)) {
|
||||
|
|
|
@ -0,0 +1,63 @@
|
|||
package com.taos.example.highvolume;
|
||||
|
||||
import java.sql.*;
|
||||
|
||||
/**
|
||||
* Prepare target database.
|
||||
* Count total records in database periodically so that we can estimate the writing speed.
|
||||
*/
|
||||
public class DataBaseMonitor {
|
||||
private Connection conn;
|
||||
private Statement stmt;
|
||||
|
||||
public DataBaseMonitor init() throws SQLException {
|
||||
if (conn == null) {
|
||||
String jdbcURL = System.getenv("TDENGINE_JDBC_URL");
|
||||
conn = DriverManager.getConnection(jdbcURL);
|
||||
stmt = conn.createStatement();
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
public void close() {
|
||||
try {
|
||||
stmt.close();
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
try {
|
||||
conn.close();
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
|
||||
public void prepareDatabase() throws SQLException {
|
||||
stmt.execute("DROP DATABASE IF EXISTS test");
|
||||
stmt.execute("CREATE DATABASE test");
|
||||
stmt.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)");
|
||||
}
|
||||
|
||||
public Long count() throws SQLException {
|
||||
if (!stmt.isClosed()) {
|
||||
ResultSet result = stmt.executeQuery("SELECT count(*) from test.meters");
|
||||
result.next();
|
||||
return result.getLong(1);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* show test.stables;
|
||||
*
|
||||
* name | created_time | columns | tags | tables |
|
||||
* ============================================================================================
|
||||
* meters | 2022-07-20 08:39:30.902 | 4 | 2 | 620000 |
|
||||
*/
|
||||
public Long getTableCount() throws SQLException {
|
||||
if (!stmt.isClosed()) {
|
||||
ResultSet result = stmt.executeQuery("show test.stables");
|
||||
result.next();
|
||||
return result.getLong(5);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,70 @@
|
|||
package com.taos.example.highvolume;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.sql.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ArrayBlockingQueue;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
|
||||
|
||||
public class FastWriteExample {
|
||||
final static Logger logger = LoggerFactory.getLogger(FastWriteExample.class);
|
||||
|
||||
final static int taskQueueCapacity = 1000000;
|
||||
final static List<BlockingQueue<String>> taskQueues = new ArrayList<>();
|
||||
final static List<ReadTask> readTasks = new ArrayList<>();
|
||||
final static List<WriteTask> writeTasks = new ArrayList<>();
|
||||
final static DataBaseMonitor databaseMonitor = new DataBaseMonitor();
|
||||
|
||||
public static void stopAll() {
|
||||
logger.info("shutting down");
|
||||
readTasks.forEach(task -> task.stop());
|
||||
writeTasks.forEach(task -> task.stop());
|
||||
databaseMonitor.close();
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws InterruptedException, SQLException {
|
||||
int readTaskCount = args.length > 0 ? Integer.parseInt(args[0]) : 1;
|
||||
int writeTaskCount = args.length > 1 ? Integer.parseInt(args[1]) : 3;
|
||||
int tableCount = args.length > 2 ? Integer.parseInt(args[2]) : 1000;
|
||||
int maxBatchSize = args.length > 3 ? Integer.parseInt(args[3]) : 3000;
|
||||
|
||||
logger.info("readTaskCount={}, writeTaskCount={} tableCount={} maxBatchSize={}",
|
||||
readTaskCount, writeTaskCount, tableCount, maxBatchSize);
|
||||
|
||||
databaseMonitor.init().prepareDatabase();
|
||||
|
||||
// Create task queues, whiting tasks and start writing threads.
|
||||
for (int i = 0; i < writeTaskCount; ++i) {
|
||||
BlockingQueue<String> queue = new ArrayBlockingQueue<>(taskQueueCapacity);
|
||||
taskQueues.add(queue);
|
||||
WriteTask task = new WriteTask(queue, maxBatchSize);
|
||||
Thread t = new Thread(task);
|
||||
t.setName("WriteThread-" + i);
|
||||
t.start();
|
||||
}
|
||||
|
||||
// create reading tasks and start reading threads
|
||||
int tableCountPerTask = tableCount / readTaskCount;
|
||||
for (int i = 0; i < readTaskCount; ++i) {
|
||||
ReadTask task = new ReadTask(i, taskQueues, tableCountPerTask);
|
||||
Thread t = new Thread(task);
|
||||
t.setName("ReadThread-" + i);
|
||||
t.start();
|
||||
}
|
||||
|
||||
Runtime.getRuntime().addShutdownHook(new Thread(FastWriteExample::stopAll));
|
||||
|
||||
long lastCount = 0;
|
||||
while (true) {
|
||||
Thread.sleep(10000);
|
||||
long numberOfTable = databaseMonitor.getTableCount();
|
||||
long count = databaseMonitor.count();
|
||||
logger.info("numberOfTable={} count={} speed={}", numberOfTable, count, (count - lastCount) / 10);
|
||||
lastCount = count;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
package com.taos.example.highvolume;
|
||||
|
||||
import java.util.Iterator;
|
||||
|
||||
/**
|
||||
* Generate test data
|
||||
*/
|
||||
class MockDataSource implements Iterator {
|
||||
private String tbNamePrefix;
|
||||
private int tableCount;
|
||||
private long maxRowsPerTable = 1000000000L;
|
||||
|
||||
// 100 milliseconds between two neighbouring rows.
|
||||
long startMs = System.currentTimeMillis() - maxRowsPerTable * 100;
|
||||
private int currentRow = 0;
|
||||
private int currentTbId = -1;
|
||||
|
||||
// mock values
|
||||
String[] location = {"LosAngeles", "SanDiego", "Hollywood", "Compton", "San Francisco"};
|
||||
float[] current = {8.8f, 10.7f, 9.9f, 8.9f, 9.4f};
|
||||
int[] voltage = {119, 116, 111, 113, 118};
|
||||
float[] phase = {0.32f, 0.34f, 0.33f, 0.329f, 0.141f};
|
||||
|
||||
public MockDataSource(String tbNamePrefix, int tableCount) {
|
||||
this.tbNamePrefix = tbNamePrefix;
|
||||
this.tableCount = tableCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
currentTbId += 1;
|
||||
if (currentTbId == tableCount) {
|
||||
currentTbId = 0;
|
||||
currentRow += 1;
|
||||
}
|
||||
return currentRow < maxRowsPerTable;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String next() {
|
||||
long ts = startMs + 100 * currentRow;
|
||||
int groupId = currentTbId % 5 == 0 ? currentTbId / 5 : currentTbId / 5 + 1;
|
||||
StringBuilder sb = new StringBuilder(tbNamePrefix + "_" + currentTbId + ","); // tbName
|
||||
sb.append(ts).append(','); // ts
|
||||
sb.append(current[currentRow % 5]).append(','); // current
|
||||
sb.append(voltage[currentRow % 5]).append(','); // voltage
|
||||
sb.append(phase[currentRow % 5]).append(','); // phase
|
||||
sb.append(location[currentRow % 5]).append(','); // location
|
||||
sb.append(groupId); // groupID
|
||||
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
package com.taos.example.highvolume;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
|
||||
class ReadTask implements Runnable {
|
||||
private final static Logger logger = LoggerFactory.getLogger(ReadTask.class);
|
||||
private final int taskId;
|
||||
private final List<BlockingQueue<String>> taskQueues;
|
||||
private final int queueCount;
|
||||
private final int tableCount;
|
||||
private boolean active = true;
|
||||
|
||||
public ReadTask(int readTaskId, List<BlockingQueue<String>> queues, int tableCount) {
|
||||
this.taskId = readTaskId;
|
||||
this.taskQueues = queues;
|
||||
this.queueCount = queues.size();
|
||||
this.tableCount = tableCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Assign data received to different queues.
|
||||
* Here we use the suffix number in table name.
|
||||
* You are expected to define your own rule in practice.
|
||||
*
|
||||
* @param line record received
|
||||
* @return which queue to use
|
||||
*/
|
||||
public int getQueueId(String line) {
|
||||
String tbName = line.substring(0, line.indexOf(',')); // For example: tb1_101
|
||||
String suffixNumber = tbName.split("_")[1];
|
||||
return Integer.parseInt(suffixNumber) % this.queueCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
logger.info("started");
|
||||
Iterator<String> it = new MockDataSource("tb" + this.taskId, tableCount);
|
||||
try {
|
||||
while (it.hasNext() && active) {
|
||||
String line = it.next();
|
||||
int queueId = getQueueId(line);
|
||||
taskQueues.get(queueId).put(line);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("Read Task Error", e);
|
||||
}
|
||||
}
|
||||
|
||||
public void stop() {
|
||||
logger.info("stop");
|
||||
this.active = false;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,205 @@
|
|||
package com.taos.example.highvolume;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.sql.*;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* A helper class encapsulate the logic of writing using SQL.
|
||||
* <p>
|
||||
* The main interfaces are two methods:
|
||||
* <ol>
|
||||
* <li>{@link SQLWriter#processLine}, which receive raw lines from WriteTask and group them by table names.</li>
|
||||
* <li>{@link SQLWriter#flush}, which assemble INSERT statement and execute it.</li>
|
||||
* </ol>
|
||||
* <p>
|
||||
* There is a technical skill worth mentioning: we create table as needed when "table does not exist" error occur instead of creating table automatically using syntax "INSET INTO tb USING stb".
|
||||
* This ensure that checking table existence is a one-time-only operation.
|
||||
* </p>
|
||||
*
|
||||
* </p>
|
||||
*/
|
||||
public class SQLWriter {
|
||||
final static Logger logger = LoggerFactory.getLogger(SQLWriter.class);
|
||||
|
||||
private Connection conn;
|
||||
private Statement stmt;
|
||||
|
||||
/**
|
||||
* current number of buffered records
|
||||
*/
|
||||
private int bufferedCount = 0;
|
||||
/**
|
||||
* Maximum number of buffered records.
|
||||
* Flush action will be triggered if bufferedCount reached this value,
|
||||
*/
|
||||
private int maxBatchSize;
|
||||
|
||||
|
||||
/**
|
||||
* Maximum SQL length.
|
||||
*/
|
||||
private int maxSQLLength;
|
||||
|
||||
/**
|
||||
* Map from table name to column values. For example:
|
||||
* "tb001" -> "(1648432611249,2.1,114,0.09) (1648432611250,2.2,135,0.2)"
|
||||
*/
|
||||
private Map<String, String> tbValues = new HashMap<>();
|
||||
|
||||
/**
|
||||
* Map from table name to tag values in the same order as creating stable.
|
||||
* Used for creating table.
|
||||
*/
|
||||
private Map<String, String> tbTags = new HashMap<>();
|
||||
|
||||
public SQLWriter(int maxBatchSize) {
|
||||
this.maxBatchSize = maxBatchSize;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Get Database Connection
|
||||
*
|
||||
* @return Connection
|
||||
* @throws SQLException
|
||||
*/
|
||||
private static Connection getConnection() throws SQLException {
|
||||
String jdbcURL = System.getenv("TDENGINE_JDBC_URL");
|
||||
return DriverManager.getConnection(jdbcURL);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create Connection and Statement
|
||||
*
|
||||
* @throws SQLException
|
||||
*/
|
||||
public void init() throws SQLException {
|
||||
conn = getConnection();
|
||||
stmt = conn.createStatement();
|
||||
stmt.execute("use test");
|
||||
ResultSet rs = stmt.executeQuery("show variables");
|
||||
while (rs.next()) {
|
||||
String configName = rs.getString(1);
|
||||
if ("maxSQLLength".equals(configName)) {
|
||||
maxSQLLength = Integer.parseInt(rs.getString(2));
|
||||
logger.info("maxSQLLength={}", maxSQLLength);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert raw data to SQL fragments, group them by table name and cache them in a HashMap.
|
||||
* Trigger writing when number of buffered records reached maxBachSize.
|
||||
*
|
||||
* @param line raw data get from task queue in format: tbName,ts,current,voltage,phase,location,groupId
|
||||
*/
|
||||
public void processLine(String line) throws SQLException {
|
||||
bufferedCount += 1;
|
||||
int firstComma = line.indexOf(',');
|
||||
String tbName = line.substring(0, firstComma);
|
||||
int lastComma = line.lastIndexOf(',');
|
||||
int secondLastComma = line.lastIndexOf(',', lastComma - 1);
|
||||
String value = "(" + line.substring(firstComma + 1, secondLastComma) + ") ";
|
||||
if (tbValues.containsKey(tbName)) {
|
||||
tbValues.put(tbName, tbValues.get(tbName) + value);
|
||||
} else {
|
||||
tbValues.put(tbName, value);
|
||||
}
|
||||
if (!tbTags.containsKey(tbName)) {
|
||||
String location = line.substring(secondLastComma + 1, lastComma);
|
||||
String groupId = line.substring(lastComma + 1);
|
||||
String tagValues = "('" + location + "'," + groupId + ')';
|
||||
tbTags.put(tbName, tagValues);
|
||||
}
|
||||
if (bufferedCount == maxBatchSize) {
|
||||
flush();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Assemble INSERT statement using buffered SQL fragments in Map {@link SQLWriter#tbValues} and execute it.
|
||||
* In case of "Table does not exit" exception, create all tables in the sql and retry the sql.
|
||||
*/
|
||||
public void flush() throws SQLException {
|
||||
StringBuilder sb = new StringBuilder("INSERT INTO ");
|
||||
for (Map.Entry<String, String> entry : tbValues.entrySet()) {
|
||||
String tableName = entry.getKey();
|
||||
String values = entry.getValue();
|
||||
String q = tableName + " values " + values + " ";
|
||||
if (sb.length() + q.length() > maxSQLLength) {
|
||||
executeSQL(sb.toString());
|
||||
logger.warn("increase maxSQLLength or decrease maxBatchSize to gain better performance");
|
||||
sb = new StringBuilder("INSERT INTO ");
|
||||
}
|
||||
sb.append(q);
|
||||
}
|
||||
executeSQL(sb.toString());
|
||||
tbValues.clear();
|
||||
bufferedCount = 0;
|
||||
}
|
||||
|
||||
private void executeSQL(String sql) throws SQLException {
|
||||
try {
|
||||
stmt.executeUpdate(sql);
|
||||
} catch (SQLException e) {
|
||||
// convert to error code defined in taoserror.h
|
||||
int errorCode = e.getErrorCode() & 0xffff;
|
||||
if (errorCode == 0x362 || errorCode == 0x218) {
|
||||
// Table does not exist
|
||||
createTables();
|
||||
executeSQL(sql);
|
||||
} else {
|
||||
logger.error("Execute SQL: {}", sql);
|
||||
throw e;
|
||||
}
|
||||
} catch (Throwable throwable) {
|
||||
logger.error("Execute SQL: {}", sql);
|
||||
throw throwable;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create tables in batch using syntax:
|
||||
* <p>
|
||||
* CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF NOT EXISTS] tb_name2 USING stb_name TAGS (tag_value2, ...) ...;
|
||||
* </p>
|
||||
*/
|
||||
private void createTables() throws SQLException {
|
||||
StringBuilder sb = new StringBuilder("CREATE TABLE ");
|
||||
for (String tbName : tbValues.keySet()) {
|
||||
String tagValues = tbTags.get(tbName);
|
||||
sb.append("IF NOT EXISTS ").append(tbName).append(" USING meters TAGS ").append(tagValues).append(" ");
|
||||
}
|
||||
String sql = sb.toString();
|
||||
try {
|
||||
stmt.executeUpdate(sql);
|
||||
} catch (Throwable throwable) {
|
||||
logger.error("Execute SQL: {}", sql);
|
||||
throw throwable;
|
||||
}
|
||||
}
|
||||
|
||||
public boolean hasBufferedValues() {
|
||||
return bufferedCount > 0;
|
||||
}
|
||||
|
||||
public int getBufferedCount() {
|
||||
return bufferedCount;
|
||||
}
|
||||
|
||||
public void close() {
|
||||
try {
|
||||
stmt.close();
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
try {
|
||||
conn.close();
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
package com.taos.example.highvolume;
|
||||
|
||||
public class StmtWriter {
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
package com.taos.example.highvolume;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
|
||||
class WriteTask implements Runnable {
|
||||
private final static Logger logger = LoggerFactory.getLogger(WriteTask.class);
|
||||
private final int maxBatchSize;
|
||||
|
||||
// the queue from which this writing task get raw data.
|
||||
private final BlockingQueue<String> queue;
|
||||
|
||||
// A flag indicate whether to continue.
|
||||
private boolean active = true;
|
||||
|
||||
public WriteTask(BlockingQueue<String> taskQueue, int maxBatchSize) {
|
||||
this.queue = taskQueue;
|
||||
this.maxBatchSize = maxBatchSize;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
logger.info("started");
|
||||
String line = null; // data getting from the queue just now.
|
||||
SQLWriter writer = new SQLWriter(maxBatchSize);
|
||||
try {
|
||||
writer.init();
|
||||
while (active) {
|
||||
line = queue.poll();
|
||||
if (line != null) {
|
||||
// parse raw data and buffer the data.
|
||||
writer.processLine(line);
|
||||
} else if (writer.hasBufferedValues()) {
|
||||
// write data immediately if no more data in the queue
|
||||
writer.flush();
|
||||
} else {
|
||||
// sleep a while to avoid high CPU usage if no more data in the queue and no buffered records, .
|
||||
Thread.sleep(100);
|
||||
}
|
||||
}
|
||||
if (writer.hasBufferedValues()) {
|
||||
writer.flush();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
String msg = String.format("line=%s, bufferedCount=%s", line, writer.getBufferedCount());
|
||||
logger.error(msg, e);
|
||||
} finally {
|
||||
writer.close();
|
||||
}
|
||||
}
|
||||
|
||||
public void stop() {
|
||||
logger.info("stop");
|
||||
this.active = false;
|
||||
}
|
||||
}
|
|
@ -23,16 +23,16 @@ public class TestAll {
|
|||
String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata";
|
||||
try (Connection conn = DriverManager.getConnection(jdbcUrl)) {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
String sql = "INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000)\n" +
|
||||
" power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 15:38:15.000',12.60000,218,0.33000)\n" +
|
||||
" power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 15:38:16.800',12.30000,221,0.31000)\n" +
|
||||
" power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 15:38:16.650',10.30000,218,0.25000)\n" +
|
||||
" power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 15:38:05.500',11.80000,221,0.28000)\n" +
|
||||
" power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 15:38:16.600',13.40000,223,0.29000)\n" +
|
||||
" power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:05.000',10.80000,223,0.29000)\n" +
|
||||
" power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:06.000',10.80000,223,0.29000)\n" +
|
||||
" power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:07.000',10.80000,223,0.29000)\n" +
|
||||
" power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:08.500',11.50000,221,0.35000)";
|
||||
String sql = "INSERT INTO power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000)\n" +
|
||||
" power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 15:38:15.000',12.60000,218,0.33000)\n" +
|
||||
" power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 15:38:16.800',12.30000,221,0.31000)\n" +
|
||||
" power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 15:38:16.650',10.30000,218,0.25000)\n" +
|
||||
" power.d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 15:38:05.500',11.80000,221,0.28000)\n" +
|
||||
" power.d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 15:38:16.600',13.40000,223,0.29000)\n" +
|
||||
" power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 15:38:05.000',10.80000,223,0.29000)\n" +
|
||||
" power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 15:38:06.000',10.80000,223,0.29000)\n" +
|
||||
" power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 15:38:07.000',10.80000,223,0.29000)\n" +
|
||||
" power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 15:38:08.500',11.50000,221,0.35000)";
|
||||
|
||||
stmt.execute(sql);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,180 @@
|
|||
# install dependencies:
|
||||
# recommend python >= 3.8
|
||||
# pip3 install faster-fifo
|
||||
#
|
||||
|
||||
import logging
|
||||
import math
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
from multiprocessing import Process
|
||||
from faster_fifo import Queue
|
||||
from mockdatasource import MockDataSource
|
||||
from queue import Empty
|
||||
from typing import List
|
||||
|
||||
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format="%(asctime)s [%(name)s] - %(message)s")
|
||||
|
||||
READ_TASK_COUNT = 1
|
||||
WRITE_TASK_COUNT = 1
|
||||
TABLE_COUNT = 1000
|
||||
QUEUE_SIZE = 1000000
|
||||
MAX_BATCH_SIZE = 3000
|
||||
|
||||
read_processes = []
|
||||
write_processes = []
|
||||
|
||||
|
||||
def get_connection():
|
||||
"""
|
||||
If variable TDENGINE_FIRST_EP is provided then it will be used. If not, firstEP in /etc/taos/taos.cfg will be used.
|
||||
You can also override the default username and password by supply variable TDENGINE_USER and TDENGINE_PASSWORD
|
||||
"""
|
||||
import taos
|
||||
firstEP = os.environ.get("TDENGINE_FIRST_EP")
|
||||
if firstEP:
|
||||
host, port = firstEP.split(":")
|
||||
else:
|
||||
host, port = None, 0
|
||||
user = os.environ.get("TDENGINE_USER", "root")
|
||||
password = os.environ.get("TDENGINE_PASSWORD", "taosdata")
|
||||
return taos.connect(host=host, port=int(port), user=user, password=password)
|
||||
|
||||
|
||||
# ANCHOR: read
|
||||
|
||||
def run_read_task(task_id: int, task_queues: List[Queue]):
|
||||
table_count_per_task = TABLE_COUNT // READ_TASK_COUNT
|
||||
data_source = MockDataSource(f"tb{task_id}", table_count_per_task)
|
||||
try:
|
||||
for batch in data_source:
|
||||
for table_id, rows in batch:
|
||||
# hash data to different queue
|
||||
i = table_id % len(task_queues)
|
||||
# block putting forever when the queue is full
|
||||
task_queues[i].put_many(rows, block=True, timeout=-1)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
|
||||
# ANCHOR_END: read
|
||||
|
||||
# ANCHOR: write
|
||||
def run_write_task(task_id: int, queue: Queue):
|
||||
from sql_writer import SQLWriter
|
||||
log = logging.getLogger(f"WriteTask-{task_id}")
|
||||
writer = SQLWriter(get_connection)
|
||||
lines = None
|
||||
try:
|
||||
while True:
|
||||
try:
|
||||
# get as many as possible
|
||||
lines = queue.get_many(block=False, max_messages_to_get=MAX_BATCH_SIZE)
|
||||
writer.process_lines(lines)
|
||||
except Empty:
|
||||
time.sleep(0.01)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
except BaseException as e:
|
||||
log.debug(f"lines={lines}")
|
||||
raise e
|
||||
|
||||
|
||||
# ANCHOR_END: write
|
||||
|
||||
def set_global_config():
|
||||
argc = len(sys.argv)
|
||||
if argc > 1:
|
||||
global READ_TASK_COUNT
|
||||
READ_TASK_COUNT = int(sys.argv[1])
|
||||
if argc > 2:
|
||||
global WRITE_TASK_COUNT
|
||||
WRITE_TASK_COUNT = int(sys.argv[2])
|
||||
if argc > 3:
|
||||
global TABLE_COUNT
|
||||
TABLE_COUNT = int(sys.argv[3])
|
||||
if argc > 4:
|
||||
global QUEUE_SIZE
|
||||
QUEUE_SIZE = int(sys.argv[4])
|
||||
if argc > 5:
|
||||
global MAX_BATCH_SIZE
|
||||
MAX_BATCH_SIZE = int(sys.argv[5])
|
||||
|
||||
|
||||
# ANCHOR: monitor
|
||||
def run_monitor_process():
|
||||
log = logging.getLogger("DataBaseMonitor")
|
||||
conn = get_connection()
|
||||
conn.execute("DROP DATABASE IF EXISTS test")
|
||||
conn.execute("CREATE DATABASE test")
|
||||
conn.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) "
|
||||
"TAGS (location BINARY(64), groupId INT)")
|
||||
|
||||
def get_count():
|
||||
res = conn.query("SELECT count(*) FROM test.meters")
|
||||
rows = res.fetch_all()
|
||||
return rows[0][0] if rows else 0
|
||||
|
||||
last_count = 0
|
||||
while True:
|
||||
time.sleep(10)
|
||||
count = get_count()
|
||||
log.info(f"count={count} speed={(count - last_count) / 10}")
|
||||
last_count = count
|
||||
|
||||
|
||||
# ANCHOR_END: monitor
|
||||
# ANCHOR: main
|
||||
def main():
|
||||
set_global_config()
|
||||
logging.info(f"READ_TASK_COUNT={READ_TASK_COUNT}, WRITE_TASK_COUNT={WRITE_TASK_COUNT}, "
|
||||
f"TABLE_COUNT={TABLE_COUNT}, QUEUE_SIZE={QUEUE_SIZE}, MAX_BATCH_SIZE={MAX_BATCH_SIZE}")
|
||||
|
||||
monitor_process = Process(target=run_monitor_process)
|
||||
monitor_process.start()
|
||||
time.sleep(3) # waiting for database ready.
|
||||
|
||||
task_queues: List[Queue] = []
|
||||
# create task queues
|
||||
for i in range(WRITE_TASK_COUNT):
|
||||
queue = Queue(max_size_bytes=QUEUE_SIZE)
|
||||
task_queues.append(queue)
|
||||
|
||||
# create write processes
|
||||
for i in range(WRITE_TASK_COUNT):
|
||||
p = Process(target=run_write_task, args=(i, task_queues[i]))
|
||||
p.start()
|
||||
logging.debug(f"WriteTask-{i} started with pid {p.pid}")
|
||||
write_processes.append(p)
|
||||
|
||||
# create read processes
|
||||
for i in range(READ_TASK_COUNT):
|
||||
queues = assign_queues(i, task_queues)
|
||||
p = Process(target=run_read_task, args=(i, queues))
|
||||
p.start()
|
||||
logging.debug(f"ReadTask-{i} started with pid {p.pid}")
|
||||
read_processes.append(p)
|
||||
|
||||
try:
|
||||
monitor_process.join()
|
||||
except KeyboardInterrupt:
|
||||
monitor_process.terminate()
|
||||
[p.terminate() for p in read_processes]
|
||||
[p.terminate() for p in write_processes]
|
||||
[q.close() for q in task_queues]
|
||||
|
||||
|
||||
def assign_queues(read_task_id, task_queues):
|
||||
"""
|
||||
Compute target queues for a specific read task.
|
||||
"""
|
||||
ratio = WRITE_TASK_COUNT / READ_TASK_COUNT
|
||||
from_index = math.floor(read_task_id * ratio)
|
||||
end_index = math.ceil((read_task_id + 1) * ratio)
|
||||
return task_queues[from_index:end_index]
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
# ANCHOR_END: main
|
|
@ -0,0 +1,49 @@
|
|||
import time
|
||||
|
||||
|
||||
class MockDataSource:
|
||||
samples = [
|
||||
"8.8,119,0.32,LosAngeles,0",
|
||||
"10.7,116,0.34,SanDiego,1",
|
||||
"9.9,111,0.33,Hollywood,2",
|
||||
"8.9,113,0.329,Compton,3",
|
||||
"9.4,118,0.141,San Francisco,4"
|
||||
]
|
||||
|
||||
def __init__(self, tb_name_prefix, table_count):
|
||||
self.table_name_prefix = tb_name_prefix + "_"
|
||||
self.table_count = table_count
|
||||
self.max_rows = 10000000
|
||||
self.current_ts = round(time.time() * 1000) - self.max_rows * 100
|
||||
# [(tableId, tableName, values),]
|
||||
self.data = self._init_data()
|
||||
|
||||
def _init_data(self):
|
||||
lines = self.samples * (self.table_count // 5 + 1)
|
||||
data = []
|
||||
for i in range(self.table_count):
|
||||
table_name = self.table_name_prefix + str(i)
|
||||
data.append((i, table_name, lines[i])) # tableId, row
|
||||
return data
|
||||
|
||||
def __iter__(self):
|
||||
self.row = 0
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
"""
|
||||
next 1000 rows for each table.
|
||||
return: {tableId:[row,...]}
|
||||
"""
|
||||
# generate 1000 timestamps
|
||||
ts = []
|
||||
for _ in range(1000):
|
||||
self.current_ts += 100
|
||||
ts.append(str(self.current_ts))
|
||||
# add timestamp to each row
|
||||
# [(tableId, ["tableName,ts,current,voltage,phase,location,groupId"])]
|
||||
result = []
|
||||
for table_id, table_name, values in self.data:
|
||||
rows = [table_name + ',' + t + ',' + values for t in ts]
|
||||
result.append((table_id, rows))
|
||||
return result
|
|
@ -0,0 +1,90 @@
|
|||
import logging
|
||||
import taos
|
||||
|
||||
|
||||
class SQLWriter:
|
||||
log = logging.getLogger("SQLWriter")
|
||||
|
||||
def __init__(self, get_connection_func):
|
||||
self._tb_values = {}
|
||||
self._tb_tags = {}
|
||||
self._conn = get_connection_func()
|
||||
self._max_sql_length = self.get_max_sql_length()
|
||||
self._conn.execute("USE test")
|
||||
|
||||
def get_max_sql_length(self):
|
||||
rows = self._conn.query("SHOW variables").fetch_all()
|
||||
for r in rows:
|
||||
name = r[0]
|
||||
if name == "maxSQLLength":
|
||||
return int(r[1])
|
||||
return 1024 * 1024
|
||||
|
||||
def process_lines(self, lines: str):
|
||||
"""
|
||||
:param lines: [[tbName,ts,current,voltage,phase,location,groupId]]
|
||||
"""
|
||||
for line in lines:
|
||||
ps = line.split(",")
|
||||
table_name = ps[0]
|
||||
value = '(' + ",".join(ps[1:-2]) + ') '
|
||||
if table_name in self._tb_values:
|
||||
self._tb_values[table_name] += value
|
||||
else:
|
||||
self._tb_values[table_name] = value
|
||||
|
||||
if table_name not in self._tb_tags:
|
||||
location = ps[-2]
|
||||
group_id = ps[-1]
|
||||
tag_value = f"('{location}',{group_id})"
|
||||
self._tb_tags[table_name] = tag_value
|
||||
self.flush()
|
||||
|
||||
def flush(self):
|
||||
"""
|
||||
Assemble INSERT statement and execute it.
|
||||
When the sql length grows close to MAX_SQL_LENGTH, the sql will be executed immediately, and a new INSERT statement will be created.
|
||||
In case of "Table does not exit" exception, tables in the sql will be created and the sql will be re-executed.
|
||||
"""
|
||||
sql = "INSERT INTO "
|
||||
sql_len = len(sql)
|
||||
buf = []
|
||||
for tb_name, values in self._tb_values.items():
|
||||
q = tb_name + " VALUES " + values
|
||||
if sql_len + len(q) >= self._max_sql_length:
|
||||
sql += " ".join(buf)
|
||||
self.execute_sql(sql)
|
||||
sql = "INSERT INTO "
|
||||
sql_len = len(sql)
|
||||
buf = []
|
||||
buf.append(q)
|
||||
sql_len += len(q)
|
||||
sql += " ".join(buf)
|
||||
self.execute_sql(sql)
|
||||
self._tb_values.clear()
|
||||
|
||||
def execute_sql(self, sql):
|
||||
try:
|
||||
self._conn.execute(sql)
|
||||
except taos.Error as e:
|
||||
error_code = e.errno & 0xffff
|
||||
# Table does not exit
|
||||
if error_code == 9731:
|
||||
self.create_tables()
|
||||
else:
|
||||
self.log.error("Execute SQL: %s", sql)
|
||||
raise e
|
||||
except BaseException as baseException:
|
||||
self.log.error("Execute SQL: %s", sql)
|
||||
raise baseException
|
||||
|
||||
def create_tables(self):
|
||||
sql = "CREATE TABLE "
|
||||
for tb in self._tb_values.keys():
|
||||
tag_values = self._tb_tags[tb]
|
||||
sql += "IF NOT EXISTS " + tb + " USING meters TAGS " + tag_values + " "
|
||||
try:
|
||||
self._conn.execute(sql)
|
||||
except BaseException as e:
|
||||
self.log.error("Execute SQL: %s", sql)
|
||||
raise e
|