docs:modify docs for tmq
|
@ -315,7 +315,9 @@ def pre_test_build_win() {
|
|||
python.exe -m pip install --upgrade pip
|
||||
python -m pip uninstall taospy -y
|
||||
python -m pip install taospy==2.7.10
|
||||
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
|
||||
python -m pip uninstall taos-ws-py -y
|
||||
python -m pip install taos-ws-py==0.2.8
|
||||
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
|
||||
'''
|
||||
return 1
|
||||
}
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
# Security Policy
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Please submit CVE to https://github.com/taosdata/TDengine/security/advisories.
|
|
@ -0,0 +1,19 @@
|
|||
# apr-util
|
||||
ExternalProject_Add(aprutil-1
|
||||
URL https://dlcdn.apache.org//apr/apr-util-1.6.3.tar.gz
|
||||
URL_HASH SHA256=2b74d8932703826862ca305b094eef2983c27b39d5c9414442e9976a9acf1983
|
||||
DOWNLOAD_NO_PROGRESS 1
|
||||
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||
#GIT_REPOSITORY https://github.com/apache/apr-util.git
|
||||
#GIT_TAG 1.5.4
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util"
|
||||
#BINARY_DIR ""
|
||||
BUILD_IN_SOURCE TRUE
|
||||
BUILD_ALWAYS 1
|
||||
#UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --with-apr=$ENV{HOME}/.cos-local.1
|
||||
#CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr
|
||||
BUILD_COMMAND make
|
||||
INSTALL_COMMAND make install
|
||||
TEST_COMMAND ""
|
||||
)
|
|
@ -0,0 +1,19 @@
|
|||
# apr
|
||||
ExternalProject_Add(apr-1
|
||||
URL https://dlcdn.apache.org//apr/apr-1.7.4.tar.gz
|
||||
URL_HASH SHA256=a4137dd82a185076fa50ba54232d920a17c6469c30b0876569e1c2a05ff311d9
|
||||
DOWNLOAD_NO_PROGRESS 1
|
||||
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||
#GIT_REPOSITORY https://github.com/apache/apr.git
|
||||
#GIT_TAG 1.5.2
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/apr"
|
||||
BUILD_IN_SOURCE TRUE
|
||||
UPDATE_DISCONNECTED TRUE
|
||||
BUILD_ALWAYS 1
|
||||
#UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --enable-shared=no
|
||||
#CONFIGURE_COMMAND ./configure
|
||||
BUILD_COMMAND make
|
||||
INSTALL_COMMAND make install
|
||||
TEST_COMMAND ""
|
||||
)
|
|
@ -1,5 +1,5 @@
|
|||
cmake_minimum_required(VERSION 3.0)
|
||||
set(CMAKE_VERBOSE_MAKEFILE ON)
|
||||
set(CMAKE_VERBOSE_MAKEFILE FALSE)
|
||||
set(TD_BUILD_TAOSA_INTERNAL FALSE)
|
||||
|
||||
#set output directory
|
||||
|
@ -77,6 +77,12 @@ ELSE ()
|
|||
SET(TD_TAOS_TOOLS TRUE)
|
||||
ENDIF ()
|
||||
|
||||
IF (${TD_WINDOWS})
|
||||
SET(TAOS_LIB taos_static)
|
||||
ELSE ()
|
||||
SET(TAOS_LIB taos)
|
||||
ENDIF ()
|
||||
|
||||
IF (TD_WINDOWS)
|
||||
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
|
||||
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")
|
||||
|
|
|
@ -125,6 +125,16 @@ option(
|
|||
ON
|
||||
)
|
||||
|
||||
IF(${TD_LINUX})
|
||||
|
||||
option(
|
||||
BUILD_WITH_COS
|
||||
"If build with cos"
|
||||
ON
|
||||
)
|
||||
|
||||
ENDIF ()
|
||||
|
||||
option(
|
||||
BUILD_WITH_SQLITE
|
||||
"If build with sqlite"
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.1.0.2.alpha")
|
||||
SET(TD_VER_NUMBER "3.1.2.0.alpha")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
# cos
|
||||
ExternalProject_Add(cos
|
||||
GIT_REPOSITORY https://github.com/tencentyun/cos-c-sdk-v5.git
|
||||
GIT_TAG v5.0.16
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/cos-c-sdk-v5"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
|
@ -0,0 +1,17 @@
|
|||
# curl
|
||||
ExternalProject_Add(curl
|
||||
URL https://curl.se/download/curl-8.2.1.tar.gz
|
||||
DOWNLOAD_NO_PROGRESS 1
|
||||
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||
#GIT_REPOSITORY https://github.com/curl/curl.git
|
||||
#GIT_TAG curl-7_88_1
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/curl"
|
||||
BUILD_IN_SOURCE TRUE
|
||||
BUILD_ALWAYS 1
|
||||
#UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd
|
||||
#CONFIGURE_COMMAND ./configure --without-ssl
|
||||
BUILD_COMMAND make
|
||||
INSTALL_COMMAND make install
|
||||
TEST_COMMAND ""
|
||||
)
|
|
@ -0,0 +1,14 @@
|
|||
# cos
|
||||
ExternalProject_Add(mxml
|
||||
GIT_REPOSITORY https://github.com/michaelrsweet/mxml.git
|
||||
GIT_TAG v2.12
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/mxml"
|
||||
#BINARY_DIR ""
|
||||
BUILD_IN_SOURCE TRUE
|
||||
#UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --enable-shared=no
|
||||
#CONFIGURE_COMMAND ./configure
|
||||
BUILD_COMMAND make
|
||||
INSTALL_COMMAND make install
|
||||
TEST_COMMAND ""
|
||||
)
|
|
@ -6,6 +6,39 @@ function(cat IN_FILE OUT_FILE)
|
|||
file(APPEND ${OUT_FILE} "${CONTENTS}")
|
||||
endfunction(cat IN_FILE OUT_FILE)
|
||||
|
||||
if(${TD_LINUX})
|
||||
|
||||
set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3")
|
||||
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
|
||||
if(${BUILD_WITH_COS})
|
||||
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.1/)
|
||||
cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
endif(${BUILD_WITH_COS})
|
||||
|
||||
configure_file(${CONTRIB_TMP_FILE3} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
|
||||
set(CONTRIB_TMP_FILE2 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in2")
|
||||
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2})
|
||||
|
||||
if(${BUILD_WITH_COS})
|
||||
cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2})
|
||||
endif(${BUILD_WITH_COS})
|
||||
|
||||
configure_file(${CONTRIB_TMP_FILE2} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
|
||||
endif(${TD_LINUX})
|
||||
|
||||
set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in")
|
||||
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
||||
|
@ -122,6 +155,16 @@ if(${BUILD_WITH_SQLITE})
|
|||
cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif(${BUILD_WITH_SQLITE})
|
||||
|
||||
# cos
|
||||
if(${BUILD_WITH_COS})
|
||||
#cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
#cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
#cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
#cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
add_definitions(-DUSE_COS)
|
||||
endif(${BUILD_WITH_COS})
|
||||
|
||||
# lucene
|
||||
if(${BUILD_WITH_LUCENE})
|
||||
cat("${TD_SUPPORT_DIR}/lucene_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
@ -347,6 +390,31 @@ if (${BUILD_WITH_ROCKSDB})
|
|||
endif()
|
||||
endif()
|
||||
|
||||
# cos
|
||||
if(${BUILD_WITH_COS})
|
||||
if(${TD_LINUX})
|
||||
set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.1)
|
||||
#ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a)
|
||||
option(ENABLE_TEST "Enable the tests" OFF)
|
||||
INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.1/include)
|
||||
MESSAGE("$ENV{HOME}/.cos-local.1/include")
|
||||
|
||||
set(CMAKE_BUILD_TYPE debug)
|
||||
set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME})
|
||||
set(CMAKE_PROJECT_NAME cos_c_sdk)
|
||||
|
||||
add_subdirectory(cos-c-sdk-v5 EXCLUDE_FROM_ALL)
|
||||
target_include_directories(
|
||||
cos_c_sdk
|
||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cos-c-sdk-v5/cos_c_sdk>
|
||||
)
|
||||
|
||||
set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME})
|
||||
else()
|
||||
|
||||
endif(${TD_LINUX})
|
||||
endif(${BUILD_WITH_COS})
|
||||
|
||||
# lucene
|
||||
# To support build on ubuntu: sudo apt-get install libboost-all-dev
|
||||
if(${BUILD_WITH_LUCENE})
|
||||
|
|
|
@ -3,6 +3,11 @@ if(${BUILD_WITH_ROCKSDB})
|
|||
add_subdirectory(rocksdb)
|
||||
endif(${BUILD_WITH_ROCKSDB})
|
||||
|
||||
# cos
|
||||
if(${BUILD_WITH_COS})
|
||||
add_subdirectory(cos)
|
||||
endif(${BUILD_WITH_COS})
|
||||
|
||||
if(${BUILD_WITH_LUCENE})
|
||||
add_subdirectory(lucene)
|
||||
endif(${BUILD_WITH_LUCENE})
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
add_executable(cosTest "")
|
||||
target_sources(cosTest
|
||||
PRIVATE
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/main.c"
|
||||
)
|
||||
|
||||
#find_path(APR_INCLUDE_DIR apr-1/apr_time.h)
|
||||
#find_path(APR_UTIL_INCLUDE_DIR apr/include/apr-1/apr_md5.h)
|
||||
#find_path(MINIXML_INCLUDE_DIR mxml.h)
|
||||
#find_path(CURL_INCLUDE_DIR curl/curl.h)
|
||||
|
||||
#include_directories (${MINIXML_INCLUDE_DIR})
|
||||
#include_directories (${CURL_INCLUDE_DIR})
|
||||
FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/)
|
||||
#FIND_PROGRAM(APU_CONFIG_BIN NAMES apu-config apu-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/)
|
||||
|
||||
IF (APR_CONFIG_BIN)
|
||||
EXECUTE_PROCESS(
|
||||
COMMAND ${APR_CONFIG_BIN} --includedir
|
||||
OUTPUT_VARIABLE APR_INCLUDE_DIR
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
ENDIF()
|
||||
#IF (APU_CONFIG_BIN)
|
||||
# EXECUTE_PROCESS(
|
||||
# COMMAND ${APU_CONFIG_BIN} --includedir
|
||||
# OUTPUT_VARIABLE APR_UTIL_INCLUDE_DIR
|
||||
# OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
# )
|
||||
#ENDIF()
|
||||
|
||||
include_directories (${APR_INCLUDE_DIR})
|
||||
#include_directories (${APR_UTIL_INCLUDE_DIR})
|
||||
|
||||
target_include_directories(
|
||||
cosTest
|
||||
PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk"
|
||||
)
|
||||
|
||||
#find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/)
|
||||
#find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/)
|
||||
#find_library(MINIXML_LIBRARY mxml)
|
||||
#find_library(CURL_LIBRARY curl)
|
||||
|
||||
target_link_libraries(cosTest cos_c_sdk)
|
||||
target_link_libraries(cosTest apr-1})
|
||||
target_link_libraries(cosTest aprutil-1})
|
||||
target_link_libraries(cosTest mxml)
|
||||
target_link_libraries(cosTest curl)
|
|
@ -4,11 +4,11 @@ description: This document introduces the major features, competitive advantages
|
|||
toc_max_heading_level: 2
|
||||
---
|
||||
|
||||
TDengine is an [open source](https://tdengine.com/tdengine/open-source-time-series-database/), [high-performance](https://tdengine.com/tdengine/high-performance-time-series-database/), [cloud native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](../develop/cache), [stream processing](../develop/stream), [data subscription](../develop/tmq) and other functionalities to reduce the system complexity and cost of development and operation.
|
||||
TDengine is a big data platform designed and optimized for IoT (Internet of Things) and Industrial Internet. It can safely and effetively converge, store, process and distribute high volume data (TB or even PB) generated everyday by a lot of devices and data acquisition units, monitor and alert business operation status in real time and provide real time business insight. The core component of TDengine is TDengine OSS, which is a high performance, open source, cloud native and simplified time series database.
|
||||
|
||||
This section introduces the major features, competitive advantages, typical use-cases and benchmarks to help you get a high level overview of TDengine.
|
||||
|
||||
## Major Features
|
||||
## Major Features of TDengine OSS
|
||||
|
||||
The major features are listed below:
|
||||
|
||||
|
@ -132,3 +132,9 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
|
|||
- [Introduction to Time-Series Database](https://tdengine.com/tsdb/)
|
||||
- [Introduction to TDengine competitive advantages](https://tdengine.com/tdengine/)
|
||||
|
||||
|
||||
## Products
|
||||
|
||||
There are two products offered by TDengine: TDengine Enterprise and TDengine Cloud, for details please refer to
|
||||
- [TDengine Enterprise](https://www.taosdata.com/tdengine-pro)
|
||||
- [TDengine Cloud](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)
|
||||
|
|
|
@ -40,7 +40,6 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
|
|||
- Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h)
|
||||
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored
|
||||
- It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3, smlDataFormat is discarded since 3.0.3.0)
|
||||
- Due to the fact that SQL table names do not support point(.), schemaless has also processed point(.) and automatically replaced them with underscores(_)
|
||||
|
||||
:::
|
||||
|
||||
|
|
|
@ -34,7 +34,6 @@ meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
|
|||
```
|
||||
|
||||
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||
- Due to the fact that SQL table names do not support point(.), schemaless has also processed point(.) and automatically replaced them with underscores(_)
|
||||
|
||||
Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_telnet/put.html) for more details.
|
||||
|
||||
|
|
|
@ -49,7 +49,6 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http
|
|||
|
||||
- In JSON protocol, strings will be converted to NCHAR type and numeric values will be converted to double type.
|
||||
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||
- Due to the fact that SQL table names do not support point(.), schemaless has also processed point(.) and automatically replaced them with underscores(_)
|
||||
|
||||
:::
|
||||
|
||||
|
|
|
@ -106,8 +106,6 @@ The related schemas and APIs in various languages are described as follows:
|
|||
DLL_EXPORT const char *tmq_err2str(int32_t code);
|
||||
```
|
||||
|
||||
For more information, see [C/C++ Connector](/reference/connector/cpp).
|
||||
|
||||
The following example is based on the smart meter table described in Data Models. For complete sample code, see the C language section below.
|
||||
|
||||
</TabItem>
|
||||
|
@ -122,7 +120,19 @@ Set<String> subscription() throws SQLException;
|
|||
|
||||
ConsumerRecords<V> poll(Duration timeout) throws SQLException;
|
||||
|
||||
Set<TopicPartition> assignment() throws SQLException;
|
||||
long position(TopicPartition partition) throws SQLException;
|
||||
Map<TopicPartition, Long> position(String topic) throws SQLException;
|
||||
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
|
||||
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
|
||||
Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions) throws SQLException;
|
||||
|
||||
void seek(TopicPartition partition, long offset) throws SQLException;
|
||||
void seekToBeginning(Collection<TopicPartition> partitions) throws SQLException;
|
||||
void seekToEnd(Collection<TopicPartition> partitions) throws SQLException;
|
||||
|
||||
void commitSync() throws SQLException;
|
||||
void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) throws SQLException;
|
||||
|
||||
void close() throws SQLException;
|
||||
```
|
||||
|
|
|
@ -887,4 +887,4 @@ The `pycumsum` function finds the cumulative sum for all data in the input colum
|
|||
|
||||
</details>
|
||||
## Manage and Use UDF
|
||||
You need to add UDF to TDengine before using it in SQL queries. For more information about how to manage UDF and how to invoke UDF, please see [Manage and Use UDF](../12-taos-sql/26-udf.md).
|
||||
You need to add UDF to TDengine before using it in SQL queries. For more information about how to manage UDF and how to invoke UDF, please see [Manage and Use UDF](../taos-sql/udf/).
|
||||
|
|
|
@ -62,12 +62,13 @@ serverPort 6030
|
|||
|
||||
For all the dnodes in a TDengine cluster, the below parameters must be configured exactly the same, any node whose configuration is different from dnodes already in the cluster can't join the cluster.
|
||||
|
||||
| **#** | **Parameter** | **Definition** |
|
||||
| ----- | ------------------ | ------------------------------------------- |
|
||||
| 1 | statusInterval | The interval by which dnode reports its status to mnode |
|
||||
| 2 | timezone | Timezone |
|
||||
| 3 | locale | System region and encoding |
|
||||
| 4 | charset | Character set |
|
||||
| **#** | **Parameter** | **Definition** |
|
||||
| ----- | ---------------- | ----------------------------------------------------------------------------- |
|
||||
| 1 | statusInterval | The interval by which dnode reports its status to mnode |
|
||||
| 2 | timezone | Timezone |
|
||||
| 3 | locale | System region and encoding |
|
||||
| 4 | charset | Character set |
|
||||
| 5 | ttlChangeOnWrite | Whether the ttl expiration time changes with the table modification operation |
|
||||
|
||||
## Start Cluster
|
||||
|
||||
|
@ -97,7 +98,7 @@ Then, on the first dnode i.e. h1.tdengine.com in our example, use TDengine CLI `
|
|||
CREATE DNODE "h2.taos.com:6030";
|
||||
````
|
||||
|
||||
This adds the end point of the new dnode (from Step 4) into the end point list of the cluster. In the command "fqdn:port" should be quoted using double quotes. Change `"h2.taos.com:6030"` to the end point of your new dnode.
|
||||
This adds the end point of the new dnode (from Step 4) into the end point list of the cluster. In the command "fqdn:port" should be quoted using double quotes. Change `"h2.taos.com:6030"` to the end point of your new dnode.
|
||||
|
||||
Then on the first dnode h1.tdengine.com, execute `show dnodes` in `taos`
|
||||
|
||||
|
|
|
@ -43,6 +43,8 @@ In TDengine, the data types below can be used when specifying a column or tag.
|
|||
| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type. |
|
||||
| 16 | VARCHAR | User-defined | Alias of BINARY |
|
||||
| 17 | GEOMETRY | User-defined | Geometry |
|
||||
| 18 | VARBINARY | User-defined | Binary data with variable length
|
||||
|
||||
:::note
|
||||
|
||||
- Each row of the table cannot be longer than 48KB (64KB since version 3.0.5.0) (note that each BINARY/NCHAR/GEOMETRY column takes up an additional 2 bytes of storage space).
|
||||
|
@ -57,7 +59,7 @@ In TDengine, the data types below can be used when specifying a column or tag.
|
|||
| 3 | POLYGON((1.0 1.0, 2.0 2.0, 1.0 1.0)) | 13+3*16 | 13+4094*16 | +16 |
|
||||
|
||||
- Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.
|
||||
|
||||
- VARBINARY is a data type that stores binary data, with a maximum length of 65517 bytes and a maximum length of 16382 bytes for tag columns. Binary data can be written through SQL or schemaless (which needs to be converted to a string starting with \x), or written through stmt (which can directly use binary). Display starting with hexadecimal starting with \x.
|
||||
:::
|
||||
|
||||
## Constants
|
||||
|
|
|
@ -7,9 +7,9 @@ description: This document describes how to query data in TDengine.
|
|||
## Syntax
|
||||
|
||||
```sql
|
||||
SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE()}
|
||||
SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE() | CURRENT_USER() | USER() }
|
||||
|
||||
SELECT [DISTINCT] select_list
|
||||
SELECT [hints] [DISTINCT] select_list
|
||||
from_clause
|
||||
[WHERE condition]
|
||||
[partition_by_clause]
|
||||
|
@ -21,6 +21,11 @@ SELECT [DISTINCT] select_list
|
|||
[LIMIT limit_val [OFFSET offset_val]]
|
||||
[>> export_file]
|
||||
|
||||
hints: /*+ [hint([hint_param_list])] [hint([hint_param_list])] */
|
||||
|
||||
hint:
|
||||
BATCH_SCAN | NO_BATCH_SCAN
|
||||
|
||||
select_list:
|
||||
select_expr [, select_expr] ...
|
||||
|
||||
|
@ -70,6 +75,29 @@ order_expr:
|
|||
{expr | position | c_alias} [DESC | ASC] [NULLS FIRST | NULLS LAST]
|
||||
```
|
||||
|
||||
## Hints
|
||||
|
||||
Hints are a means of user control over query optimization for individual statements. Hints will be ignore automatically if they are not applicable to the current query statement. The specific instructions are as follows:
|
||||
|
||||
- Hints syntax starts with `/*+` and ends with `*/`, spaces are allowed before or after.
|
||||
- Hints syntax can only follow the SELECT keyword.
|
||||
- Each hints can contain multiple hint, separated by spaces. When multiple hints conflict or are identical, whichever comes first takes effect.
|
||||
- When an error occurs with a hint in hints, the effective hint before the error is still valid, and the current and subsequent hints are ignored.
|
||||
- hint_param_list are arguments to each hint, which varies according to each hint.
|
||||
|
||||
The list of currently supported Hints is as follows:
|
||||
|
||||
| **Hint** | **Params** | **Comment** | **Scopt** |
|
||||
| :-----------: | -------------- | -------------------------- | -------------------------- |
|
||||
| BATCH_SCAN | None | Batch table scan | JOIN statment for stable |
|
||||
| NO_BATCH_SCAN | None | Sequential table scan | JOIN statment for stable |
|
||||
|
||||
For example:
|
||||
|
||||
```sql
|
||||
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
|
||||
```
|
||||
|
||||
## Lists
|
||||
|
||||
A query can be performed on some or all columns. Data and tag columns can all be included in the SELECT list.
|
||||
|
@ -167,7 +195,7 @@ The following SQL statement returns the number of subtables within the meters su
|
|||
SELECT COUNT(*) FROM (SELECT DISTINCT TBNAME FROM meters);
|
||||
```
|
||||
|
||||
In the preceding two statements, only tags can be used as filtering conditions in the WHERE clause. For example:
|
||||
In the preceding two statements, only tags can be used as filtering conditions in the WHERE clause.
|
||||
|
||||
**\_QSTART and \_QEND**
|
||||
|
||||
|
@ -209,8 +237,7 @@ You can perform INNER JOIN statements based on the primary key. The following co
|
|||
3. For supertables, the ON condition must be equivalent to the primary key. In addition, the tag columns of the tables on which the INNER JOIN is performed must have a one-to-one relationship. You cannot specify an OR condition.
|
||||
4. The tables that are included in a JOIN clause must be of the same type (supertable, standard table, or subtable).
|
||||
5. You can include subqueries before and after the JOIN keyword.
|
||||
6. You cannot include more than ten tables in a JOIN clause.
|
||||
7. You cannot include a FILL clause and a JOIN clause in the same statement.
|
||||
6. You cannot include a FILL clause and a JOIN clause in the same statement.
|
||||
|
||||
## GROUP BY
|
||||
|
||||
|
@ -301,6 +328,12 @@ SELECT TODAY();
|
|||
SELECT TIMEZONE();
|
||||
```
|
||||
|
||||
### Obtain Current User
|
||||
|
||||
```sql
|
||||
SELECT CURRENT_USER();
|
||||
```
|
||||
|
||||
## Regular Expression
|
||||
|
||||
### Syntax
|
||||
|
@ -355,7 +388,7 @@ SELECT AVG(CASE WHEN voltage < 200 or voltage > 250 THEN 220 ELSE voltage END) F
|
|||
|
||||
## JOIN
|
||||
|
||||
TDengine supports the `INTER JOIN` based on the timestamp primary key, that is, the `JOIN` condition must contain the timestamp primary key. As long as the requirement of timestamp-based primary key is met, `INTER JOIN` can be made between normal tables, sub-tables, super tables and sub-queries at will, and there is no limit on the number of tables.
|
||||
TDengine supports the `INTER JOIN` based on the timestamp primary key, that is, the `JOIN` condition must contain the timestamp primary key. As long as the requirement of timestamp-based primary key is met, `INTER JOIN` can be made between normal tables, sub-tables, super tables and sub-queries at will, and there is no limit on the number of tables, primary key and other conditions must be combined with `AND` operator.
|
||||
|
||||
For standard tables:
|
||||
|
||||
|
|
|
@ -49,3 +49,5 @@ You can also add filter conditions to limit the results.
|
|||
6. You can' create index on a normal table or a child table.
|
||||
|
||||
7. If the unique values of a tag column are too few, it's better not to create index on such tag columns, the benefit would be very small.
|
||||
|
||||
8. The newly created super table will randomly generate an index name for the first column of tags, which is composed of the name tag0 column with 23 random bytes, and can be rebuilt or dropped.
|
||||
|
|
|
@ -292,11 +292,11 @@ CONCAT_WS(separator_expr, expr1, expr2 [, expr] ...)
|
|||
LENGTH(expr)
|
||||
```
|
||||
|
||||
**Description**: The length in bytes of a string
|
||||
**Description**: The length in bytes
|
||||
|
||||
**Return value type**: Bigint
|
||||
|
||||
**Applicable data types**: VARCHAR and NCHAR fields or columns
|
||||
**Applicable data types**: VARCHAR and NCHAR and VARBINARY
|
||||
|
||||
**Nested query**: It can be used in both the outer query and inner query in a nested query.
|
||||
|
||||
|
@ -402,7 +402,7 @@ CAST(expr AS type_name)
|
|||
|
||||
**Return value type**: The type specified by parameter `type_name`
|
||||
|
||||
**Applicable data types**: All data types except JSON
|
||||
**Applicable data types**: All data types except JSON and VARBINARY. If type_name is VARBINARY, expr can only be VARCHAR.
|
||||
|
||||
**Nested query**: It can be used in both the outer query and inner query in a nested query.
|
||||
|
||||
|
@ -698,7 +698,7 @@ ELAPSED(ts_primary_key [, time_unit])
|
|||
LEASTSQUARES(expr, start_val, step_val)
|
||||
```
|
||||
|
||||
**Description**: The linear regression function of the specified column and the timestamp column (primary key), `start_val` is the initial value and `step_val` is the step value.
|
||||
**Description**: The linear regression function of a specified column, `start_val` is the initial value and `step_val` is the step value.
|
||||
|
||||
**Return value type**: A string in the format of "(slope, intercept)"
|
||||
|
||||
|
@ -1275,6 +1275,14 @@ SELECT SERVER_STATUS();
|
|||
|
||||
**Description**: The server status.
|
||||
|
||||
### CURRENT_USER
|
||||
|
||||
```sql
|
||||
SELECT CURRENT_USER();
|
||||
```
|
||||
|
||||
**Description**: get current user.
|
||||
|
||||
|
||||
## Geometry Functions
|
||||
|
||||
|
|
|
@ -168,3 +168,11 @@ All [scalar functions](../function/#scalar-functions) are available in stream pr
|
|||
- [unique](../function/#unique)
|
||||
- [mode](../function/#mode)
|
||||
|
||||
## Pause\Resume stream
|
||||
1.pause stream
|
||||
PAUSE STREAM [IF EXISTS] stream_name;
|
||||
If "IF EXISTS" is not specified and the stream does not exist, an error will be reported; If "IF EXISTS" is specified and the stream does not exist, success is returned; If the stream exists, paused all stream tasks.
|
||||
|
||||
2.resume stream
|
||||
RESUME STREAM [IF EXISTS] [IGNORE UNTREATED] stream_name;
|
||||
If "IF EXISTS" is not specified and the stream does not exist, an error will be reported. If "IF EXISTS" is specified and the stream does not exist, success is returned; If the stream exists, all of the stream tasks will be resumed. If "IGNORE UntREATED" is specified, data written during the pause period of stream is ignored when resuming stream.
|
||||
|
|
|
@ -178,7 +178,7 @@ The following list shows all reserved keywords:
|
|||
|
||||
- MATCH
|
||||
- MAX_DELAY
|
||||
- MAX_SPEED
|
||||
- BWLIMIT
|
||||
- MAXROWS
|
||||
- MERGE
|
||||
- META
|
||||
|
|
|
@ -74,27 +74,27 @@ Provides information about the cluster.
|
|||
|
||||
Provides information about user-created databases. Similar to SHOW DATABASES.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :------------------: | ---------------- | ------------------------------------------------ |
|
||||
| 1| name| BINARY(32)| Database name |
|
||||
| 1 | name | VARCHAR(64) | Database name |
|
||||
| 2 | create_time | TIMESTAMP | Creation time |
|
||||
| 3 | ntables | INT | Number of standard tables and subtables (not including supertables) |
|
||||
| 4 | vgroups | INT | Number of vgroups. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 6 | replica | INT | Number of replicas. It should be noted that `replica` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 7 | strict | BINARY(4) | Obsoleted |
|
||||
| 8 | duration | INT | Duration for storage of single files. It should be noted that `duration` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | keep | INT | Data retention period. It should be noted that `keep` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 7 | strict | VARCHAR(4) | Obsoleted |
|
||||
| 8 | duration | VARCHAR(10) | Duration for storage of single files. It should be noted that `duration` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | keep | VARCHAR(32) | Data retention period. It should be noted that `keep` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 10 | buffer | INT | Write cache size per vnode, in MB. It should be noted that `buffer` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 11 | pagesize | INT | Page size for vnode metadata storage engine, in KB. It should be noted that `pagesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 12 | pages | INT | Number of pages per vnode metadata storage engine. It should be noted that `pages` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 13 | minrows | INT | Maximum number of records per file block. It should be noted that `minrows` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 14 | maxrows | INT | Minimum number of records per file block. It should be noted that `maxrows` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 15 | comp | INT | Compression method. It should be noted that `comp` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 16 | precision | BINARY(2) | Time precision. It should be noted that `precision` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 17 | status | BINARY(10) | Current database status |
|
||||
| 18 | retentions | BINARY (60) | Aggregation interval and retention period. It should be noted that `retentions` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 16 | precision | VARCHAR(2) | Time precision. It should be noted that `precision` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 17 | status | VARCHAR(10) | Current database status |
|
||||
| 18 | retentions | VARCHAR(60) | Aggregation interval and retention period. It should be noted that `retentions` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 19 | single_stable | BOOL | Whether the database can contain multiple supertables. It should be noted that `single_stable` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 20 | cachemodel | BINARY(60) | Caching method for the newest data. It should be noted that `cachemodel` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 20 | cachemodel | VARCHAR(60) | Caching method for the newest data. It should be noted that `cachemodel` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 21 | cachesize | INT | Memory per vnode used for caching the newest data. It should be noted that `cachesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 22 | wal_level | INT | WAL level. It should be noted that `wal_level` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 23 | wal_fsync_period | INT | Interval at which WAL is written to disk. It should be noted that `wal_fsync_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
|
|
|
@ -22,6 +22,14 @@ SHOW CLUSTER;
|
|||
|
||||
Shows information about the current cluster.
|
||||
|
||||
## SHOW CLUSTER ALIVE
|
||||
|
||||
```sql
|
||||
SHOW CLUSTER ALIVE;
|
||||
```
|
||||
|
||||
It is used to check whether the cluster is available or not. Return value: 0 means unavailable, 1 means available, 2 means partially available (some dnodes are offline, the other dnodes are available)
|
||||
|
||||
## SHOW CONNECTIONS
|
||||
|
||||
```sql
|
||||
|
|
|
@ -4,7 +4,7 @@ sidebar_label: Access Control
|
|||
description: This document describes how to manage users and permissions in TDengine.
|
||||
---
|
||||
|
||||
This document describes how to manage permissions in TDengine.
|
||||
User and Access control is a distingguished feature of TDengine enterprise edition. In this section, only the most fundamental functionalities of user and access control are demonstrated. To get the full knowledge of user and access control, please contact the TDengine team.
|
||||
|
||||
## Create a User
|
||||
|
||||
|
|
|
@ -19,6 +19,9 @@ index_option:
|
|||
functions:
|
||||
function [, function] ...
|
||||
```
|
||||
### tag Indexing
|
||||
|
||||
[tag index](../tag-index)
|
||||
|
||||
### SMA Indexing
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ You can use the SHOW CONNECTIONS statement to find the conn_id.
|
|||
## Terminate a Query
|
||||
|
||||
```sql
|
||||
KILL QUERY kill_id;
|
||||
KILL QUERY 'kill_id';
|
||||
```
|
||||
|
||||
You can use the SHOW QUERIES statement to find the kill_id.
|
||||
|
|
|
@ -135,7 +135,7 @@ The following describes the basic API, synchronous API, asynchronous API, subscr
|
|||
|
||||
The base API is used to do things like create database connections and provide a runtime environment for the execution of other APIs.
|
||||
|
||||
- `void taos_init()`
|
||||
- `int taos_init()`
|
||||
|
||||
Initializes the runtime environment. If the API is not actively called, the driver will automatically call the API when `taos_connect()` is called, so the program generally does not need to call it manually.
|
||||
|
||||
|
@ -168,6 +168,12 @@ The base API is used to do things like create database connections and provide a
|
|||
|
||||
:::
|
||||
|
||||
- `TAOS *taos_connect_auth(const char *host, const char *user, const char *auth, const char *db, uint16_t port)`
|
||||
|
||||
The function is the same as taos_connect. Except that the pass parameter is replaced by auth, other parameters are the same as taos_connect.
|
||||
|
||||
- auth: the 32-bit lowercase md5 of the raw password
|
||||
|
||||
- `char *taos_get_server_info(TAOS *taos)`
|
||||
|
||||
Get server-side version information.
|
||||
|
@ -184,6 +190,14 @@ The base API is used to do things like create database connections and provide a
|
|||
- If len is less than the space required to store the db (including the last '\0'), an error is returned. The truncated data assigned in the database ends with '\0'.
|
||||
- If len is greater than or equal to the space required to store the db (including the last '\0'), return normal 0, and assign the db name ending with '\0' in the database.
|
||||
|
||||
- `int taos_set_notify_cb(TAOS *taos, __taos_notify_fn_t fp, void *param, int type)`
|
||||
|
||||
Set the event callback function.
|
||||
|
||||
- fp: event callback function pointer. Declaration:typedef void (*__taos_notify_fn_t)(void *param, void *ext, int type);Param is a user-defined parameter, ext is an extended parameter (depending on the event type, and returns the user password version for TAOS_NOTIFY_PASSVER), and type is the event type
|
||||
- param: user-defined parameter
|
||||
- type: event type. Value range: 1) TAOS_NOTIFY_PASSVER: User password changed
|
||||
|
||||
- `void taos_close(TAOS *taos)`
|
||||
|
||||
Closes the connection, where `taos` is the handle returned by `taos_connect()`.
|
||||
|
@ -307,21 +321,20 @@ The specific functions related to the interface are as follows (see also the [pr
|
|||
|
||||
Parse a SQL command, and bind the parsed result and parameter information to `stmt`. If the parameter length is greater than 0, use this parameter as the length of the SQL command. If it is equal to 0, the length of the SQL command will be determined automatically.
|
||||
|
||||
- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind)`
|
||||
- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind)`
|
||||
|
||||
Not as efficient as `taos_stmt_bind_param_batch()`, but can support non-INSERT type SQL statements.
|
||||
To bind parameters, bind points to an array (representing the row of data to be bound), making sure that the number and order of the elements in this array are the same as the parameters in the SQL statement. taos_bind is used similarly to MYSQL_BIND in MySQL, as defined below.
|
||||
|
||||
```c
|
||||
typedef struct TAOS_BIND {
|
||||
typedef struct TAOS_MULTI_BIND {
|
||||
int buffer_type;
|
||||
void * buffer;
|
||||
uintptr_t buffer_length; // not in use
|
||||
uintptr_t * length;
|
||||
int * is_null;
|
||||
int is_unsigned; // not in use
|
||||
int * error; // not in use
|
||||
} TAOS_BIND;
|
||||
void *buffer;
|
||||
uintptr_t buffer_length;
|
||||
uint32_t *length;
|
||||
char *is_null;
|
||||
int num;
|
||||
} TAOS_MULTI_BIND;
|
||||
```
|
||||
|
||||
- `int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name)`
|
||||
|
@ -329,7 +342,7 @@ The specific functions related to the interface are as follows (see also the [pr
|
|||
(Available in 2.1.1.0 and later versions, only supported for replacing parameter values in INSERT statements)
|
||||
When the table name in the SQL command uses `? ` placeholder, you can use this function to bind a specific table name.
|
||||
|
||||
- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags)`
|
||||
- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_MULTI_BIND* tags)`
|
||||
|
||||
(Available in 2.1.2.0 and later versions, only supported for replacing parameter values in INSERT statements)
|
||||
When the table name and TAGS in the SQL command both use `? `, you can use this function to bind the specific table name and the specific TAGS value. The most typical usage scenario is an INSERT statement that uses the automatic table building function (the current version does not support specifying specific TAGS columns.) The number of columns in the TAGS parameter needs to be the same as the number of TAGS requested in the SQL command.
|
||||
|
@ -358,6 +371,14 @@ The specific functions related to the interface are as follows (see also the [pr
|
|||
|
||||
Execute the prepared statement. Currently, a statement can only be executed once.
|
||||
|
||||
- `int taos_stmt_affected_rows(TAOS_STMT *stmt)`
|
||||
|
||||
Gets the number of rows affected by executing bind statements multiple times.
|
||||
|
||||
- `int taos_stmt_affected_rows_once(TAOS_STMT *stmt)`
|
||||
|
||||
Gets the number of rows affected by executing a bind statement once.
|
||||
|
||||
- `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)`
|
||||
|
||||
Gets the result set of a statement. Use the result set in the same way as in the non-parametric call. When finished, `taos_free_result()` should be called on this result set to free resources.
|
||||
|
@ -514,4 +535,4 @@ In addition to writing data using the SQL method or the parameter binding API, w
|
|||
- topics: a list of topics subscribed by consumers,need to be freed by tmq_list_destroy
|
||||
|
||||
**Return value**
|
||||
- zero success,none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
|
||||
- zero success,none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
|
||||
|
|
|
@ -36,6 +36,7 @@ REST connection supports all platforms that can run Java.
|
|||
|
||||
| taos-jdbcdriver version | major changes | TDengine version |
|
||||
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
|
||||
| 3.2.5 | Subscription add committed() and assignment() method | 3.1.0.3 or later |
|
||||
| 3.2.4 | Subscription add the enable.auto.commit parameter and the unsubscribe() method in the WebSocket connection | - |
|
||||
| 3.2.3 | Fixed resultSet data parsing failure in some cases | - |
|
||||
| 3.2.2 | Subscription add seek function | 3.0.5.0 or later |
|
||||
|
@ -1019,14 +1020,19 @@ while(true) {
|
|||
#### Assignment subscription Offset
|
||||
|
||||
```java
|
||||
// get topicPartition
|
||||
Set<TopicPartition> assignment() throws SQLException;
|
||||
// get offset
|
||||
long position(TopicPartition partition) throws SQLException;
|
||||
Map<TopicPartition, Long> position(String topic) throws SQLException;
|
||||
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
|
||||
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
|
||||
Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions) throws SQLException;
|
||||
|
||||
// Overrides the fetch offsets that the consumer will use on the next poll(timeout).
|
||||
void seek(TopicPartition partition, long offset) throws SQLException;
|
||||
void seekToBeginning(Collection<TopicPartition> partitions) throws SQLException;
|
||||
void seekToEnd(Collection<TopicPartition> partitions) throws SQLException;
|
||||
```
|
||||
|
||||
Example usage is as follows.
|
||||
|
@ -1052,6 +1058,18 @@ try (TaosConsumer<ResultBean> consumer = new TaosConsumer<>(properties)) {
|
|||
}
|
||||
```
|
||||
|
||||
#### Commit offset
|
||||
|
||||
If `enable.auto.commit` is false, offset can be submitted manually.
|
||||
|
||||
```java
|
||||
void commitSync() throws SQLException;
|
||||
void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) throws SQLException;
|
||||
// async commit only support jni connection
|
||||
void commitAsync(OffsetCommitCallback<V> callback) throws SQLException;
|
||||
void commitAsync(Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback<V> callback) throws SQLException;
|
||||
```
|
||||
|
||||
#### Close subscriptions
|
||||
|
||||
```java
|
||||
|
@ -1174,7 +1192,6 @@ public abstract class ConsumerLoop {
|
|||
config.setProperty("client.id", "1");
|
||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
||||
config.setProperty("value.deserializer.encoding", "UTF-8");
|
||||
config.setProperty("experimental.snapshot.enable", "true");
|
||||
|
||||
this.consumer = new TaosConsumer<>(config);
|
||||
this.topics = Collections.singletonList("topic_speed");
|
||||
|
|
|
@ -30,6 +30,10 @@ The source code of `TDengine.Connector` is hosted on [GitHub](https://github.com
|
|||
|
||||
The supported platforms are the same as those supported by the TDengine client driver.
|
||||
|
||||
:::note
|
||||
Please note TDengine does not support 32bit Windows any more.
|
||||
:::
|
||||
|
||||
## Version support
|
||||
|
||||
Please refer to [version support list](/reference/connector#version-support)
|
||||
|
|
|
@ -102,6 +102,8 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
|
|||
-L, --loose-mode Use loose mode if the table name and column name
|
||||
use letter and number only. Default is NOT.
|
||||
-n, --no-escape No escape char '`'. Default is using it.
|
||||
-Q, --dot-replace Repalce dot character with underline character in
|
||||
the table name.
|
||||
-T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is
|
||||
8.
|
||||
-C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service
|
||||
|
|
|
@ -13,7 +13,7 @@ After TDengine starts, it automatically writes many metrics in specific interval
|
|||
To deploy TDinsight, we need
|
||||
- a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 3.0.1.0 and above, with the monitoring feature enabled. For detailed configuration, please refer to [TDengine monitoring configuration](../config/#monitoring-parameters).
|
||||
- taosAdapter has been installed and running, please refer to [taosAdapter](../taosadapter).
|
||||
- taosKeeper has been installed and running, please refer to [taosKeeper](../taosKeeper).
|
||||
- taosKeeper has been installed and running, please note the monitor-related items in taos.cfg file need be configured. Refer to [taosKeeper](../taosKeeper) for details.
|
||||
|
||||
Please record
|
||||
- The endpoint of taosAdapter REST service, for example `http://tdengine.local:6041`
|
||||
|
@ -80,7 +80,7 @@ chmod +x TDinsight.sh
|
|||
./TDinsight.sh
|
||||
```
|
||||
|
||||
This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsightV3.json) with configurable parameters for command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc. With the alert setting options provided by this script, you can also get built-in support for AliCloud SMS alert notifications.
|
||||
This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsightV3.json) with configurable parameters for command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc.
|
||||
|
||||
Assume you use TDengine and Grafana's default services on the same host. Run `. /TDinsight.sh` and open the Grafana browser window to see the TDinsight dashboard.
|
||||
|
||||
|
@ -112,9 +112,6 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
|
|||
-i, --tdinsight-uid <string> Replace with a non-space ASCII code as the dashboard id. [default: tdinsight]
|
||||
-t, --tdinsight-title <string> Dashboard title. [default: TDinsight]
|
||||
-e, --tdinsight-editable If the provisioning dashboard could be editable. [default: false]
|
||||
|
||||
-E, --external-notifier <string> Apply external notifier uid to TDinsight dashboard.
|
||||
|
||||
```
|
||||
|
||||
Most command-line options can take effect the same as environment variables.
|
||||
|
@ -132,7 +129,10 @@ Most command-line options can take effect the same as environment variables.
|
|||
| -i | --tdinsight-uid | TDINSIGHT_DASHBOARD_UID | TDinsight `uid` of the dashboard. [default: tdinsight] |
|
||||
| -t | --tdinsight-title | TDINSIGHT_DASHBOARD_TITLE | TDinsight dashboard title. [Default: TDinsight] | -e | -tdinsight-title
|
||||
| -e | --tdinsight-editable | TDINSIGHT_DASHBOARD_EDITABLE | If the dashboard is configured to be editable. [Default: false] | -e | --external
|
||||
| -E | --external-notifier | EXTERNAL_NOTIFIER | Apply the external notifier uid to the TDinsight dashboard. | -s
|
||||
|
||||
:::note
|
||||
The `-E` option is deprecated. We use Grafana unified alerting function instead.
|
||||
:::
|
||||
|
||||
Suppose you start a TDengine database on host `tdengine` with HTTP API port `6041`, user `root1`, and password `pass5ord`. Execute the script.
|
||||
|
||||
|
@ -140,18 +140,6 @@ Suppose you start a TDengine database on host `tdengine` with HTTP API port `604
|
|||
sudo . /TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord
|
||||
```
|
||||
|
||||
We provide a "-E" option to configure TDinsight to use the existing Notification Channel from the command line. Assuming your Grafana user and password is `admin:admin`, use the following command to get the `uid` of an existing notification channel.
|
||||
|
||||
```bash
|
||||
curl --no-progress-meter -u admin:admin http://localhost:3000/api/alert-notifications | jq
|
||||
```
|
||||
|
||||
Use the `uid` value obtained above as `-E` input.
|
||||
|
||||
```bash
|
||||
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
|
||||
```
|
||||
|
||||
If you want to monitor multiple TDengine clusters, you need to set up numerous TDinsight dashboards. Setting up non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and `-N` and `-L` should also be changed if using the built-in SMS alerting feature.
|
||||
|
||||
```bash
|
||||
|
|
|
@ -32,8 +32,10 @@ All data in tag_set is automatically converted to the NCHAR data type and does n
|
|||
|
||||
In the schemaless writing data line protocol, each data item in the field_set needs to be described with its data type. Let's explain in detail:
|
||||
|
||||
- If there are English double quotes on both sides, it indicates the VARCHAR(N) type. For example, `"abc"`.
|
||||
- If there are double quotes on both sides and an L prefix, it means NCHAR(N) type. For example, `L"error message"`.
|
||||
- If there are English double quotes on both sides, it indicates the VARCHAR type. For example, `"abc"`.
|
||||
- If there are double quotes on both sides and a L/l prefix, it means NCHAR type. For example, `L"error message"`.
|
||||
- If there are double quotes on both sides and a G/g prefix, it means GEOMETRY type. For example `G"Point(4.343 89.342)"`.
|
||||
- If there are double quotes on both sides and a B/b prefix, it means VARBINARY type. Hexadecimal start with \x or string can be used in double quotes. For example `B"\x98f46e"` `B"hello"`.
|
||||
- Spaces, equals sign (=), comma (,), double quote ("), and backslash (\\) need to be escaped with a backslash (\\) in front. (All refer to the ASCII character). The rules are as follows:
|
||||
|
||||
| **Serial number** | **Element** | **Escape characters** |
|
||||
|
@ -110,7 +112,7 @@ You can configure smlChildTableName in taos.cfg to specify table names, for exam
|
|||
Note: TDengine 3.0.3.0 and later automatically detect whether order is consistent. This parameter is no longer used.
|
||||
9. Due to the fact that SQL table names do not support period (.), schemaless has also processed period (.). If there is a period (.) in the table name automatically created by schemaless, it will be automatically replaced with an underscore (\_). If you manually specify a sub table name, if there is a dot (.) in the sub table name, it will also be converted to an underscore (\_)
|
||||
10. Taos.cfg adds the configuration of smlTsDefaultName (with a string value), which only works on the client side. After configuration, the time column name of the schemaless automatic table creation can be set through this configuration. If not configured, defaults to _ts.
|
||||
|
||||
11. Super table name or child table name are case sensitive.
|
||||
:::tip
|
||||
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB(64 KB since version 3.0.5.0) and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
|
||||
:::
|
||||
|
|
|
@ -74,7 +74,7 @@ grafana-cli plugins install tdengine-datasource
|
|||
sudo -u grafana grafana-cli plugins install tdengine-datasource
|
||||
```
|
||||
|
||||
You can also download zip files from [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and install manually. The commands are as follows:
|
||||
You can also download zip files from [GitHub](https://github.com/taosdata/grafanaplugin/releases/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and install manually. The commands are as follows:
|
||||
|
||||
```bash
|
||||
GF_VERSION=3.3.1
|
||||
|
@ -218,11 +218,11 @@ The example to query the average system memory usage for the specified interval
|
|||
|
||||
### Importing the Dashboard
|
||||
|
||||
You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. Ensure that you use TDinsight for 3.x.
|
||||
You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. Ensure that you use TDinsight for 3.x. Please note TDinsight for 3.x needs to configure and run taoskeeper correctly. Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
|
||||
|
||||

|
||||
|
||||
A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)). Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
|
||||
A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)).
|
||||
|
||||
For more dashboards using TDengine data source, [search here in Grafana](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource). Here is a sub list:
|
||||
|
||||
|
|
|
@ -0,0 +1,440 @@
|
|||
---
|
||||
sidebar_label: Seeq
|
||||
title: Seeq
|
||||
description: How to use Seeq and TDengine to perform time series data analysis
|
||||
---
|
||||
|
||||
# How to use Seeq and TDengine to perform time series data analysis
|
||||
|
||||
## Introduction
|
||||
|
||||
Seeq is an advanced analytics software for the manufacturing industry and the Industrial Internet of Things (IIoT). Seeq supports the use of machine learning innovations within process manufacturing organizations. These capabilities enable organizations to deploy their own or third-party machine learning algorithms into advanced analytics applications used by frontline process engineers and subject matter experts, thus extending the efforts of a single data scientist to many frontline workers.
|
||||
|
||||
With the TDengine Java connector, Seeq effortlessly supports querying time series data provided by TDengine and offers functionalities such as data visualization, analysis, and forecasting.
|
||||
|
||||
### Install Seeq
|
||||
|
||||
Please download Seeq Server and Seeq Data Lab software installation package from the [Seeq official website](https://www.seeq.com/customer-download).
|
||||
|
||||
### Install and start Seeq Server
|
||||
|
||||
```
|
||||
tar xvzf seeq-server-xxx.tar.gz
|
||||
cd seeq-server-installer
|
||||
sudo ./install
|
||||
|
||||
sudo seeq service enable
|
||||
sudo seeq start
|
||||
```
|
||||
|
||||
### Install and start Seeq Data Lab Server
|
||||
|
||||
Seeq Data Lab needs to be installed on a separate server from Seeq Server and connected to Seeq Server through configuration. For detailed installation and configuration instructions, please refer to [the official documentation](https://support.seeq.com/space/KB/1034059842).
|
||||
|
||||
```
|
||||
tar xvf seeq-data-lab-<version>-64bit-linux.tar.gz
|
||||
sudo seeq-data-lab-installer/install -f /opt/seeq/seeq-data-lab -g /var/opt/seeq -u seeq
|
||||
sudo seeq config set Network/DataLab/Hostname localhost
|
||||
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231)
|
||||
sudo seeq config set Network/Hostname <value> # the host IP or URL of the main Seeq Server
|
||||
|
||||
# If the main Seeq server is configured to listen over HTTPS
|
||||
sudo seeq config set Network/Webserver/SecurePort 443 # the secure port of the main Seeq Server (usually 443)
|
||||
|
||||
# If the main Seeq server is NOT configured to listen over HTTPS
|
||||
sudo seeq config set Network/Webserver/Port <value>
|
||||
|
||||
#On the main Seeq server, open a Seeq Command Prompt and set the hostname of the Data Lab server:
|
||||
sudo seeq config set Network/DataLab/Hostname <value> # the host IP (not URL) of the Data Lab server
|
||||
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231
|
||||
```
|
||||
|
||||
### Install TDengine on-premise instance
|
||||
|
||||
See [Quick Install from Package](../../get-started).
|
||||
|
||||
### Or use TDengine Cloud
|
||||
|
||||
Register for a [TDengine Cloud](https://cloud.tdengine.com) account and log in to your account.
|
||||
|
||||
## Make Seeq be able to access TDengine
|
||||
|
||||
1. Get data location configuration
|
||||
|
||||
```
|
||||
sudo seeq config get Folders/Data
|
||||
```
|
||||
|
||||
2. Download TDengine Java connector from maven.org. Please use the latest version (Current is 3.2.5, https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar).
|
||||
|
||||
3. Restart Seeq server
|
||||
|
||||
```
|
||||
sudo seeq restart
|
||||
```
|
||||
|
||||
4. Input License
|
||||
|
||||
Use a browser to access ip:34216 and input the license according to the guide.
|
||||
|
||||
## How to use Seeq to analyze time-series data that TDengine serves
|
||||
|
||||
This chapter demonstrates how to use Seeq software in conjunction with TDengine for time series data analysis.
|
||||
|
||||
### Scenario Overview
|
||||
|
||||
The example scenario involves a power system where users collect electricity consumption data from metering devices at a power station on a daily basis. This data is stored in a TDengine cluster. The user now wants to predict how the electricity consumption will develop and purchase additional equipment to support it. The electricity consumption varies with monthly orders, and seasonal variations also affect the power consumption. Since the city is located in the Northern Hemisphere, more electricity is consumed during the summer. We will use simulated data to reflect these assumptions.
|
||||
|
||||
### Schema
|
||||
|
||||
```
|
||||
CREATE STABLE meters (ts TIMESTAMP, num INT, temperature FLOAT, goods INT) TAGS (device NCHAR(20));
|
||||
CREATE TABLE goods (ts1 TIMESTAMP, ts2 TIMESTAMP, goods FLOAT);
|
||||
```
|
||||
|
||||

|
||||
|
||||
### Mock data
|
||||
|
||||
```
|
||||
python mockdata.py
|
||||
taos -s "insert into power.goods select _wstart, _wstart + 10d, avg(goods) from power.meters interval(10d);"
|
||||
```
|
||||
|
||||
The source code is hosted at [GitHub Repository](https://github.com/sangshuduo/td-forecasting).
|
||||
|
||||
### Using Seeq for data analysis
|
||||
|
||||
#### Data Source configuration
|
||||
|
||||
Please login with Seeq administrator and create a few data sources as following.
|
||||
|
||||
- Power
|
||||
|
||||
```
|
||||
{
|
||||
"QueryDefinitions": [
|
||||
{
|
||||
"Name": "PowerNum",
|
||||
"Type": "SIGNAL",
|
||||
"Sql": "SELECT ts, num FROM meters",
|
||||
"Enabled": true,
|
||||
"TestMode": false,
|
||||
"TestQueriesDuringSync": true,
|
||||
"InProgressCapsulesEnabled": false,
|
||||
"Variables": null,
|
||||
"Properties": [
|
||||
{
|
||||
"Name": "Name",
|
||||
"Value": "Num",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Interpolation Method",
|
||||
"Value": "linear",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Maximum Interpolation",
|
||||
"Value": "2day",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
}
|
||||
],
|
||||
"CapsuleProperties": null
|
||||
}
|
||||
],
|
||||
"Type": "GENERIC",
|
||||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": "root",
|
||||
"Password": "taosdata",
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
"UseWindowsAuth": false,
|
||||
"SqlFetchBatchSize": 100000,
|
||||
"UseSSL": false,
|
||||
"JdbcProperties": null,
|
||||
"GenericDatabaseConfig": {
|
||||
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
|
||||
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
|
||||
"ResolutionInNanoseconds": 1000,
|
||||
"ZonedColumnTypes": []
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- Goods
|
||||
|
||||
```
|
||||
{
|
||||
"QueryDefinitions": [
|
||||
{
|
||||
"Name": "PowerGoods",
|
||||
"Type": "CONDITION",
|
||||
"Sql": "SELECT ts1, ts2, goods FROM power.goods",
|
||||
"Enabled": true,
|
||||
"TestMode": false,
|
||||
"TestQueriesDuringSync": true,
|
||||
"InProgressCapsulesEnabled": false,
|
||||
"Variables": null,
|
||||
"Properties": [
|
||||
{
|
||||
"Name": "Name",
|
||||
"Value": "Goods",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Maximum Duration",
|
||||
"Value": "10days",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
}
|
||||
],
|
||||
"CapsuleProperties": [
|
||||
{
|
||||
"Name": "goods",
|
||||
"Value": "${columnResult}",
|
||||
"Column": "goods",
|
||||
"Uom": "string"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"Type": "GENERIC",
|
||||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": "root",
|
||||
"Password": "taosdata",
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
"UseWindowsAuth": false,
|
||||
"SqlFetchBatchSize": 100000,
|
||||
"UseSSL": false,
|
||||
"JdbcProperties": null,
|
||||
"GenericDatabaseConfig": {
|
||||
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
|
||||
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
|
||||
"ResolutionInNanoseconds": 1000,
|
||||
"ZonedColumnTypes": []
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- Temperature
|
||||
|
||||
```
|
||||
{
|
||||
"QueryDefinitions": [
|
||||
{
|
||||
"Name": "PowerNum",
|
||||
"Type": "SIGNAL",
|
||||
"Sql": "SELECT ts, temperature FROM meters",
|
||||
"Enabled": true,
|
||||
"TestMode": false,
|
||||
"TestQueriesDuringSync": true,
|
||||
"InProgressCapsulesEnabled": false,
|
||||
"Variables": null,
|
||||
"Properties": [
|
||||
{
|
||||
"Name": "Name",
|
||||
"Value": "Temperature",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Interpolation Method",
|
||||
"Value": "linear",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Maximum Interpolation",
|
||||
"Value": "2day",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
}
|
||||
],
|
||||
"CapsuleProperties": null
|
||||
}
|
||||
],
|
||||
"Type": "GENERIC",
|
||||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": "root",
|
||||
"Password": "taosdata",
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
"UseWindowsAuth": false,
|
||||
"SqlFetchBatchSize": 100000,
|
||||
"UseSSL": false,
|
||||
"JdbcProperties": null,
|
||||
"GenericDatabaseConfig": {
|
||||
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
|
||||
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
|
||||
"ResolutionInNanoseconds": 1000,
|
||||
"ZonedColumnTypes": []
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Launch Seeq Workbench
|
||||
|
||||
Please login to Seeq server with IP:port and create a new Seeq Workbench, then select data sources and choose the correct tools to do data visualization and analysis. Please refer to [the official documentation](https://support.seeq.com/space/KB/146440193/Seeq+Workbench) for the details.
|
||||
|
||||

|
||||
|
||||
#### Use Seeq Data Lab Server for advanced data analysis
|
||||
|
||||
Please login to the Seeq service with IP:port and create a new Seeq Data Lab. Then you can use advanced tools including Python environment and machine learning add-ons for more complex analysis.
|
||||
|
||||
```Python
|
||||
from seeq import spy
|
||||
spy.options.compatibility = 189
|
||||
import pandas as pd
|
||||
import matplotlib
|
||||
import matplotlib.pyplot as plt
|
||||
import mlforecast
|
||||
import lightgbm as lgb
|
||||
from mlforecast.target_transforms import Differences
|
||||
from sklearn.linear_model import LinearRegression
|
||||
|
||||
ds = spy.search({'ID': "8C91A9C7-B6C2-4E18-AAAF-XXXXXXXXX"})
|
||||
print(ds)
|
||||
|
||||
sig = ds.loc[ds['Name'].isin(['Num'])]
|
||||
print(sig)
|
||||
|
||||
data = spy.pull(sig, start='2015-01-01', end='2022-12-31', grid=None)
|
||||
print("data.info()")
|
||||
data.info()
|
||||
print(data)
|
||||
#data.plot()
|
||||
|
||||
print("data[Num].info()")
|
||||
data['Num'].info()
|
||||
da = data['Num'].index.tolist()
|
||||
#print(da)
|
||||
|
||||
li = data['Num'].tolist()
|
||||
#print(li)
|
||||
|
||||
data2 = pd.DataFrame()
|
||||
data2['ds'] = da
|
||||
print('1st data2 ds info()')
|
||||
data2['ds'].info()
|
||||
|
||||
#data2['ds'] = pd.to_datetime(data2['ds']).to_timestamp()
|
||||
data2['ds'] = pd.to_datetime(data2['ds']).astype('int64')
|
||||
data2['y'] = li
|
||||
print('2nd data2 ds info()')
|
||||
data2['ds'].info()
|
||||
print(data2)
|
||||
|
||||
data2.insert(0, column = "unique_id", value="unique_id")
|
||||
|
||||
print("Forecasting ...")
|
||||
|
||||
forecast = mlforecast.MLForecast(
|
||||
models = lgb.LGBMRegressor(),
|
||||
freq = 1,
|
||||
lags=[365],
|
||||
target_transforms=[Differences([365])],
|
||||
)
|
||||
|
||||
forecast.fit(data2)
|
||||
predicts = forecast.predict(365)
|
||||
|
||||
pd.concat([data2, predicts]).set_index("ds").plot(title = "current data with forecast")
|
||||
plt.show()
|
||||
```
|
||||
|
||||
Example output:
|
||||
|
||||

|
||||
|
||||
### How to configure Seeq data source to access TDengine Cloud
|
||||
|
||||
Configuring a Seeq data source connection to TDengine Cloud or a local installation instance does not have any essential differences. After logging in to TDengine Cloud, select "Programming - Java" and copy the JDBC URL string with the token provided. Then, use this JDBC URL string to fill in the DatabaseJdbcUrl value in the Seeq Data Source configuration.
|
||||
|
||||
Please note that when using TDengine Cloud, you need to specify the database name in your SQL commands.
|
||||
|
||||
#### The data source of TDengine Cloud example
|
||||
|
||||
```
|
||||
{
|
||||
"QueryDefinitions": [
|
||||
{
|
||||
"Name": "CloudVoltage",
|
||||
"Type": "SIGNAL",
|
||||
"Sql": "SELECT ts, voltage FROM test.meters",
|
||||
"Enabled": true,
|
||||
"TestMode": false,
|
||||
"TestQueriesDuringSync": true,
|
||||
"InProgressCapsulesEnabled": false,
|
||||
"Variables": null,
|
||||
"Properties": [
|
||||
{
|
||||
"Name": "Name",
|
||||
"Value": "Voltage",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Interpolation Method",
|
||||
"Value": "linear",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Maximum Interpolation",
|
||||
"Value": "2day",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
}
|
||||
],
|
||||
"CapsuleProperties": null
|
||||
}
|
||||
],
|
||||
"Type": "GENERIC",
|
||||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": "root",
|
||||
"Password": "taosdata",
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
"UseWindowsAuth": false,
|
||||
"SqlFetchBatchSize": 100000,
|
||||
"UseSSL": false,
|
||||
"JdbcProperties": null,
|
||||
"GenericDatabaseConfig": {
|
||||
"DatabaseJdbcUrl": "jdbc:TAOS-RS://gw.cloud.taosdata.com?useSSL=true&token=41ac9d61d641b6b334e8b76f45f5a8XXXXXXXXXX",
|
||||
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
|
||||
"ResolutionInNanoseconds": 1000,
|
||||
"ZonedColumnTypes": []
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Seeq Workbench with TDengine Cloud data source example
|
||||
|
||||

|
||||
|
||||
## Conclusion
|
||||
|
||||
By integrating Seeq and TDengine, it is possible to leverage the efficient storage and querying performance of TDengine while also benefiting from Seeq's powerful data visualization and analysis capabilities provided to users.
|
||||
|
||||
This integration allows users to take advantage of TDengine's high-performance time-series data storage and retrieval, ensuring efficient handling of large volumes of data. At the same time, Seeq provides advanced analytics features such as data visualization, anomaly detection, correlation analysis, and predictive modeling, enabling users to gain valuable insights and make data-driven decisions.
|
||||
|
||||
Together, Seeq and TDengine provide a comprehensive solution for time series data analysis in diverse industries such as manufacturing, IIoT, and power systems. The combination of efficient data storage and advanced analytics empowers users to unlock the full potential of their time series data, driving operational improvements, and enabling predictive and prescriptive analytics applications.
|
After Width: | Height: | Size: 13 KiB |
After Width: | Height: | Size: 56 KiB |
After Width: | Height: | Size: 26 KiB |
After Width: | Height: | Size: 47 KiB |
|
@ -10,6 +10,18 @@ For TDengine 2.x installation packages by version, please visit [here](https://t
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.1.1.0
|
||||
|
||||
<Release type="tdengine" version="3.1.1.0" />
|
||||
|
||||
## 3.1.0.3
|
||||
|
||||
<Release type="tdengine" version="3.1.0.3" />
|
||||
|
||||
## 3.1.0.2
|
||||
|
||||
<Release type="tdengine" version="3.1.0.2" />
|
||||
|
||||
## 3.1.0.0
|
||||
|
||||
:::note IMPORTANT
|
||||
|
|
|
@ -78,6 +78,7 @@ int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) {
|
|||
} break;
|
||||
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
case TSDB_DATA_TYPE_VARBINARY:
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
case TSDB_DATA_TYPE_GEOMETRY: {
|
||||
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
|
||||
|
|
|
@ -51,7 +51,7 @@ void insertData(TAOS *taos) {
|
|||
int code = taos_stmt_prepare(stmt, sql, 0);
|
||||
checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare");
|
||||
// bind table name and tags
|
||||
TAOS_BIND tags[2];
|
||||
TAOS_MULTI_BIND tags[2];
|
||||
char *location = "California.SanFrancisco";
|
||||
int groupId = 2;
|
||||
tags[0].buffer_type = TSDB_DATA_TYPE_BINARY;
|
||||
|
@ -144,4 +144,4 @@ int main() {
|
|||
}
|
||||
|
||||
// output:
|
||||
// successfully inserted 2 rows
|
||||
// successfully inserted 2 rows
|
||||
|
|
|
@ -76,6 +76,7 @@ int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) {
|
|||
} break;
|
||||
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
case TSDB_DATA_TYPE_VARBINARY:
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
case TSDB_DATA_TYPE_GEOMETRY: {
|
||||
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
|
||||
|
|
|
@ -58,7 +58,7 @@ void insertData(TAOS *taos) {
|
|||
int code = taos_stmt_prepare(stmt, sql, 0);
|
||||
checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare");
|
||||
// bind table name and tags
|
||||
TAOS_BIND tags[2];
|
||||
TAOS_MULTI_BIND tags[2];
|
||||
char* location = "California.SanFrancisco";
|
||||
int groupId = 2;
|
||||
tags[0].buffer_type = TSDB_DATA_TYPE_BINARY;
|
||||
|
@ -82,7 +82,7 @@ void insertData(TAOS *taos) {
|
|||
{1648432611749, 12.6, 218, 0.33},
|
||||
};
|
||||
|
||||
TAOS_BIND values[4];
|
||||
TAOS_MULTI_BIND values[4];
|
||||
values[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
|
||||
values[0].buffer_length = sizeof(int64_t);
|
||||
values[0].length = &values[0].buffer_length;
|
||||
|
@ -138,4 +138,4 @@ int main() {
|
|||
|
||||
|
||||
// output:
|
||||
// successfully inserted 2 rows
|
||||
// successfully inserted 2 rows
|
||||
|
|
|
@ -10,7 +10,7 @@ TDengine 充分利用了时序数据的特点,提出了“一个数据采集
|
|||
|
||||
如果你是开发工程师,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要复制粘贴示例代码,针对自己的应用稍作改动,就能跑起来。
|
||||
|
||||
我们已经生活在大数据时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 Database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请仔细参考[部署集群](./deployment)一章。
|
||||
我们已经生活在大数据时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 Database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请仔细参考[部署集群]一章。
|
||||
|
||||
TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移成本,但同时针对时序数据场景,又做了一些扩展,以支持插值、降采样、时间加权平均等操作。[SQL 手册](./taos-sql)一章详细描述了 SQL 语法、详细列出了各种支持的命令和函数。
|
||||
|
||||
|
@ -18,8 +18,6 @@ TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移
|
|||
|
||||
如果你对 TDengine 的外围工具、REST API、各种编程语言的连接器(Connector)想做更多详细了解,请看[参考指南](./reference)一章。
|
||||
|
||||
如果你对 TDengine 的内部架构设计很有兴趣,欢迎仔细阅读[技术内幕](./tdinternal)一章,里面对集群的设计、数据分区、分片、写入、读出、查询、聚合查询的流程都做了详细的介绍。如果你想研读 TDengine 代码甚至贡献代码,请一定仔细读完这一章。
|
||||
|
||||
最后,作为一个开源软件,欢迎大家的参与。如果发现文档有任何错误、描述不清晰的地方,请在每个页面的最下方,点击“编辑本文档”直接进行修改。
|
||||
|
||||
Together, we make a difference!
|
||||
|
|
|
@ -4,13 +4,14 @@ description: 简要介绍 TDengine 的主要功能
|
|||
toc_max_heading_level: 2
|
||||
---
|
||||
|
||||
TDengine 是一款开源、高性能、云原生的[时序数据库](https://tdengine.com/tsdb/),且针对物联网、车联网、工业互联网、金融、IT 运维等场景进行了优化。TDengine 的代码,包括集群功能,都在 GNU AGPL v3.0 下开源。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等其它功能以降低系统复杂度及研发和运维成本。
|
||||
TDengine 是一款专为物联网、工业互联网等场景设计并优化的大数据平台,它能安全高效地将大量设备、数据采集器每天产生的高达 TB 甚至 PB 级的数据进行汇聚、存储、分析和分发,对业务运行状态进行实时监测、预警,提供实时的商业洞察。其核心模块是高性能、集群开源、云原生、极简的时序数据库 TDengine OSS。
|
||||
|
||||
本章节介绍 TDengine 的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine 有个整体的了解。
|
||||
|
||||
本节介绍 TDengine OSS 的主要产品和功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine OSS 有个整体了解
|
||||
|
||||
## 主要功能
|
||||
|
||||
TDengine 的主要功能如下:
|
||||
TDengine OSS 的主要功能如下:
|
||||
|
||||
1. 写入数据,支持
|
||||
- [SQL 写入](../develop/insert-data/sql-writing)
|
||||
|
@ -143,3 +144,10 @@ TDengine 的主要功能如下:
|
|||
- [TDengine VS InfluxDB ,写入性能大 PK !](https://www.taosdata.com/2021/11/05/3248.html)
|
||||
- [TDengine 和 InfluxDB 查询性能对比测试报告](https://www.taosdata.com/2022/02/22/5969.html)
|
||||
- [TDengine 与 InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse 等数据库的对比测试报告](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf)
|
||||
|
||||
|
||||
## 主要产品
|
||||
|
||||
TDengine 有两个主要产品:TDengine Enterprise (即 TDengine 企业版)和 TDengine Cloud,关于它们的具体定义请参考
|
||||
- [TDengine 企业版](https://www.taosdata.com/tdengine-pro)
|
||||
- [TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)
|
||||
|
|
|
@ -39,8 +39,8 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
|
|||
- timestamp 支持多种时间精度。写入数据的时候需要用参数指定时间精度,支持从小时到纳秒的 6 种时间精度
|
||||
- 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数 smlDataFormat 为 false,否则,数据写入按照相同顺序写入,库中数据会异常。(3.0.1.3 之后的版本 smlDataFormat 默认为 false,从3.0.3.0开始,该配置废弃) [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||
- 默认产生的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||
- 由于sql建表表名不支持点号(.),所以schemaless也对点号(.)做了处理,自动替换为下划线(_)
|
||||
::
|
||||
|
||||
:::
|
||||
|
||||
要了解更多可参考:[InfluxDB Line 协议官方文档](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) 和 [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||
|
||||
|
|
|
@ -33,7 +33,6 @@ meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
|
|||
```
|
||||
|
||||
- 默认生产的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 meters.current 1648432611250 11.3 tname=cpu1 location=California.LosAngeles groupid=3 则创建的表名为 cpu1,注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。
|
||||
- 由于sql建表表名不支持点号(.),所以schemaless也对点号(.)做了处理,自动替换为下划线(_)。
|
||||
参考 [OpenTSDB Telnet API 文档](http://opentsdb.net/docs/build/html/api_telnet/put.html)。
|
||||
|
||||
## 示例代码
|
||||
|
|
|
@ -48,7 +48,7 @@ OpenTSDB JSON 格式协议采用一个 JSON 字符串表示一行或多行数据
|
|||
|
||||
- 对于 JSON 格式协议,TDengine 并不会自动把所有标签转成 NCHAR 类型, 字符串将将转为 NCHAR 类型, 数值将同样转换为 DOUBLE 类型。
|
||||
- 默认生成的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 `"tags": { "host": "web02","dc": "lga","tname":"cpu1"}` 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。
|
||||
- 由于sql建表表名不支持点号(.),所以schemaless也对点号(.)做了处理,自动替换为下划线(_)。
|
||||
|
||||
:::
|
||||
|
||||
## 示例代码
|
||||
|
|
|
@ -60,17 +60,17 @@ import CDemo from "./_sub_c.mdx";
|
|||
typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param));
|
||||
|
||||
typedef enum tmq_conf_res_t {
|
||||
TMQ_CONF_UNKNOWN = -2,
|
||||
TMQ_CONF_INVALID = -1,
|
||||
TMQ_CONF_OK = 0,
|
||||
} tmq_conf_res_t;
|
||||
TMQ_CONF_UNKNOWN = -2,
|
||||
TMQ_CONF_INVALID = -1,
|
||||
TMQ_CONF_OK = 0,
|
||||
} tmq_conf_res_t;
|
||||
|
||||
typedef struct tmq_topic_assignment {
|
||||
int32_t vgId;
|
||||
int64_t currentOffset;
|
||||
int64_t begin;
|
||||
int64_t end; // The last version of wal + 1
|
||||
} tmq_topic_assignment;
|
||||
int32_t vgId;
|
||||
int64_t currentOffset;
|
||||
int64_t begin;
|
||||
int64_t end;
|
||||
} tmq_topic_assignment;
|
||||
|
||||
DLL_EXPORT tmq_conf_t *tmq_conf_new();
|
||||
DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
|
||||
|
@ -89,24 +89,24 @@ import CDemo from "./_sub_c.mdx";
|
|||
DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics);
|
||||
DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
|
||||
DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
|
||||
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); //Commit the msg’s offset + 1
|
||||
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
|
||||
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
|
||||
DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
|
||||
DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param);
|
||||
DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment);
|
||||
DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment);
|
||||
DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
|
||||
DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); // The current offset is the offset of the last consumed message + 1
|
||||
DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId);
|
||||
DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId);
|
||||
|
||||
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
|
||||
DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
|
||||
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
|
||||
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); // Get current offset of the result
|
||||
DLL_EXPORT const char *tmq_err2str(int32_t code);
|
||||
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res);
|
||||
DLL_EXPORT const char *tmq_err2str(int32_t code);DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
|
||||
```
|
||||
|
||||
这些 API 的文档请见 [C/C++ Connector](../../connector/cpp),下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。
|
||||
下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="java" label="Java">
|
||||
|
@ -120,7 +120,19 @@ Set<String> subscription() throws SQLException;
|
|||
|
||||
ConsumerRecords<V> poll(Duration timeout) throws SQLException;
|
||||
|
||||
Set<TopicPartition> assignment() throws SQLException;
|
||||
long position(TopicPartition partition) throws SQLException;
|
||||
Map<TopicPartition, Long> position(String topic) throws SQLException;
|
||||
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
|
||||
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
|
||||
Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions) throws SQLException;
|
||||
|
||||
void seek(TopicPartition partition, long offset) throws SQLException;
|
||||
void seekToBeginning(Collection<TopicPartition> partitions) throws SQLException;
|
||||
void seekToEnd(Collection<TopicPartition> partitions) throws SQLException;
|
||||
|
||||
void commitSync() throws SQLException;
|
||||
void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) throws SQLException;
|
||||
|
||||
void close() throws SQLException;
|
||||
```
|
||||
|
|
|
@ -296,7 +296,7 @@ ldconfig
|
|||
3. 如果 Python UDF 程序执行时,通过 PYTHONPATH 引用其它的包,可以设置 taos.cfg 的 UdfdLdLibPath 变量为PYTHONPATH的内容
|
||||
|
||||
4. 启动 `taosd` 服务
|
||||
细节请参考 [快速开始](../../get-started)
|
||||
细节请参考 [立即开始](../../get-started)
|
||||
|
||||
### 接口定义
|
||||
|
||||
|
@ -883,5 +883,5 @@ pycumsum 使用 numpy 计算输入列所有数据的累积和。
|
|||
|
||||
</details>
|
||||
## 管理和使用 UDF
|
||||
在使用 UDF 之前需要先将其加入到 TDengine 系统中。关于如何管理和使用 UDF,请参考[管理和使用 UDF](../12-taos-sql/26-udf.md)
|
||||
在使用 UDF 之前需要先将其加入到 TDengine 系统中。关于如何管理和使用 UDF,请参考[管理和使用 UDF](../../taos-sql/udf)
|
||||
|
||||
|
|
|
@ -223,7 +223,7 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
|
|||
|
||||
基础 API 用于完成创建数据库连接等工作,为其它 API 的执行提供运行时环境。
|
||||
|
||||
- `void taos_init()`
|
||||
- `int taos_init()`
|
||||
|
||||
初始化运行环境。如果没有主动调用该 API,那么调用 `taos_connect()` 时驱动将自动调用该 API,故程序一般无需手动调用。
|
||||
|
||||
|
@ -256,6 +256,12 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
|
|||
|
||||
:::
|
||||
|
||||
- `TAOS *taos_connect_auth(const char *host, const char *user, const char *auth, const char *db, uint16_t port)`
|
||||
|
||||
功能同 taos_connect。除 pass 参数替换为 auth 外,其他参数同 taos_connect。
|
||||
|
||||
- auth: 原始密码取 32 位小写 md5
|
||||
|
||||
- `char *taos_get_server_info(TAOS *taos)`
|
||||
|
||||
获取服务端版本信息。
|
||||
|
@ -272,6 +278,14 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
|
|||
- 如果,len 小于 存储db需要的空间(包含最后的'\0'),返回错误,database里赋值截断的数据,以'\0'结尾。
|
||||
- 如果,len 大于等于 存储db需要的空间(包含最后的'\0'),返回正常0,database里赋值以'\0‘结尾的db名。
|
||||
|
||||
- `int taos_set_notify_cb(TAOS *taos, __taos_notify_fn_t fp, void *param, int type)`
|
||||
|
||||
设置事件回调函数。
|
||||
|
||||
- fp 事件回调函数指针。函数声明:typedef void (*__taos_notify_fn_t)(void *param, void *ext, int type);其中, param 为用户自定义参数,ext 为扩展参数(依赖事件类型,针对 TAOS_NOTIFY_PASSVER 返回用户密码版本),type 为事件类型
|
||||
- param 用户自定义参数
|
||||
- type 事件类型。取值范围:1)TAOS_NOTIFY_PASSVER: 用户密码改变
|
||||
|
||||
- `void taos_close(TAOS *taos)`
|
||||
|
||||
关闭连接,其中`taos`是 `taos_connect()` 返回的句柄。
|
||||
|
@ -396,21 +410,20 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
|
|||
|
||||
解析一条 SQL 语句,将解析结果和参数信息绑定到 stmt 上,如果参数 length 大于 0,将使用此参数作为 SQL 语句的长度,如等于 0,将自动判断 SQL 语句的长度。
|
||||
|
||||
- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind)`
|
||||
- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind)`
|
||||
|
||||
不如 `taos_stmt_bind_param_batch()` 效率高,但可以支持非 INSERT 类型的 SQL 语句。
|
||||
进行参数绑定,bind 指向一个数组(代表所要绑定的一行数据),需保证此数组中的元素数量和顺序与 SQL 语句中的参数完全一致。TAOS_BIND 的使用方法与 MySQL 中的 MYSQL_BIND 类似,具体定义如下:
|
||||
进行参数绑定,bind 指向一个数组(代表所要绑定的一行数据),需保证此数组中的元素数量和顺序与 SQL 语句中的参数完全一致。TAOS_MULTI_BIND 的使用方法与 MySQL 中的 MYSQL_BIND 类似,具体定义如下:
|
||||
|
||||
```c
|
||||
typedef struct TAOS_BIND {
|
||||
typedef struct TAOS_MULTI_BIND {
|
||||
int buffer_type;
|
||||
void * buffer;
|
||||
uintptr_t buffer_length; // not in use
|
||||
uintptr_t * length;
|
||||
int * is_null;
|
||||
int is_unsigned; // not in use
|
||||
int * error; // not in use
|
||||
} TAOS_BIND;
|
||||
void *buffer;
|
||||
uintptr_t buffer_length;
|
||||
uint32_t *length;
|
||||
char *is_null;
|
||||
int num; // the number of columns
|
||||
} TAOS_MULTI_BIND;
|
||||
```
|
||||
|
||||
- `int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name)`
|
||||
|
@ -418,7 +431,7 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
|
|||
(2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
|
||||
当 SQL 语句中的表名使用了 `?` 占位时,可以使用此函数绑定一个具体的表名。
|
||||
|
||||
- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags)`
|
||||
- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_MULTI_BIND* tags)`
|
||||
|
||||
(2.1.2.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
|
||||
当 SQL 语句中的表名和 TAGS 都使用了 `?` 占位时,可以使用此函数绑定具体的表名和具体的 TAGS 取值。最典型的使用场景是使用了自动建表功能的 INSERT 语句(目前版本不支持指定具体的 TAGS 列)。TAGS 参数中的列数量需要与 SQL 语句中要求的 TAGS 数量完全一致。
|
||||
|
@ -428,17 +441,6 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
|
|||
(2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
|
||||
以多列的方式传递待绑定的数据,需要保证这里传递的数据列的顺序、列的数量与 SQL 语句中的 VALUES 参数完全一致。TAOS_MULTI_BIND 的具体定义如下:
|
||||
|
||||
```c
|
||||
typedef struct TAOS_MULTI_BIND {
|
||||
int buffer_type;
|
||||
void * buffer;
|
||||
uintptr_t buffer_length;
|
||||
uintptr_t * length;
|
||||
char * is_null;
|
||||
int num; // the number of columns
|
||||
} TAOS_MULTI_BIND;
|
||||
```
|
||||
|
||||
- `int taos_stmt_add_batch(TAOS_STMT *stmt)`
|
||||
|
||||
将当前绑定的参数加入批处理中,调用此函数后,可以再次调用 `taos_stmt_bind_param()` 或 `taos_stmt_bind_param_batch()` 绑定新的参数。需要注意,此函数仅支持 INSERT/IMPORT 语句,如果是 SELECT 等其他 SQL 语句,将返回错误。
|
||||
|
@ -447,6 +449,14 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
|
|||
|
||||
执行准备好的语句。目前,一条语句只能执行一次。
|
||||
|
||||
- `int taos_stmt_affected_rows(TAOS_STMT *stmt)`
|
||||
|
||||
获取执行多次绑定语句影响的行数。
|
||||
|
||||
- `int taos_stmt_affected_rows_once(TAOS_STMT *stmt)`
|
||||
|
||||
获取执行一次绑定语句影响的行数。
|
||||
|
||||
- `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)`
|
||||
|
||||
获取语句的结果集。结果集的使用方式与非参数化调用时一致,使用完成后,应对此结果集调用 `taos_free_result()` 以释放资源。
|
||||
|
@ -602,4 +612,4 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
|
|||
- topics: 获取的 topic 列表存储在这个结构中,接口内分配内存,需调用tmq_list_destroy释放
|
||||
|
||||
**返回值**
|
||||
- 错误码,0成功,非0失败,可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息
|
||||
- 错误码,0成功,非0失败,可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息
|
||||
|
|
|
@ -36,6 +36,7 @@ REST 连接支持所有能运行 Java 的平台。
|
|||
|
||||
| taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
|
||||
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
|
||||
| 3.2.5 | 数据订阅增加 committed()、assignment() 方法 | 3.1.0.3 及更高版本 |
|
||||
| 3.2.4 | 数据订阅在 WebSocket 连接下增加 enable.auto.commit 参数,以及 unsubscribe() 方法。 | - |
|
||||
| 3.2.3 | 修复 ResultSet 在一些情况数据解析失败 | - |
|
||||
| 3.2.2 | 新增功能:数据订阅支持 seek 功能。 | 3.0.5.0 及更高版本 |
|
||||
|
@ -1003,7 +1004,7 @@ TaosConsumer consumer = new TaosConsumer<>(config);
|
|||
- httpConnectTimeout: 创建连接超时参数,单位 ms,默认为 5000 ms。仅在 WebSocket 连接下有效。
|
||||
- messageWaitTimeout: 数据传输超时参数,单位 ms,默认为 10000 ms。仅在 WebSocket 连接下有效。
|
||||
- httpPoolSize: 同一个连接下最大并行请求数。仅在 WebSocket 连接下有效。
|
||||
其他参数请参考:[Consumer 参数列表](../../../develop/tmq#创建-consumer-以及consumer-group)
|
||||
其他参数请参考:[Consumer 参数列表](../../develop/tmq#创建-consumer-以及consumer-group)
|
||||
|
||||
#### 订阅消费数据
|
||||
|
||||
|
@ -1022,14 +1023,19 @@ while(true) {
|
|||
#### 指定订阅 Offset
|
||||
|
||||
```java
|
||||
// 获取订阅的 topicPartition
|
||||
Set<TopicPartition> assignment() throws SQLException;
|
||||
// 获取 offset
|
||||
long position(TopicPartition partition) throws SQLException;
|
||||
Map<TopicPartition, Long> position(String topic) throws SQLException;
|
||||
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
|
||||
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
|
||||
Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions) throws SQLException;
|
||||
|
||||
// 指定下一次 poll 中使用的 offset
|
||||
void seek(TopicPartition partition, long offset) throws SQLException;
|
||||
void seekToBeginning(Collection<TopicPartition> partitions) throws SQLException;
|
||||
void seekToEnd(Collection<TopicPartition> partitions) throws SQLException;
|
||||
```
|
||||
|
||||
示例代码:
|
||||
|
@ -1055,6 +1061,18 @@ try (TaosConsumer<ResultBean> consumer = new TaosConsumer<>(properties)) {
|
|||
}
|
||||
```
|
||||
|
||||
#### 提交 Offset
|
||||
|
||||
当`enable.auto.commit`为 false 时,可以手动提交 offset。
|
||||
|
||||
```java
|
||||
void commitSync() throws SQLException;
|
||||
void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) throws SQLException;
|
||||
// 异步提交仅在 native 连接下有效
|
||||
void commitAsync(OffsetCommitCallback<V> callback) throws SQLException;
|
||||
void commitAsync(Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback<V> callback) throws SQLException;
|
||||
```
|
||||
|
||||
#### 关闭订阅
|
||||
|
||||
```java
|
||||
|
@ -1064,7 +1082,7 @@ consumer.unsubscribe();
|
|||
consumer.close()
|
||||
```
|
||||
|
||||
详情请参考:[数据订阅](../../../develop/tmq)
|
||||
详情请参考:[数据订阅](../../develop/tmq)
|
||||
|
||||
#### 完整示例
|
||||
|
||||
|
@ -1355,7 +1373,7 @@ public static void main(String[] args) throws Exception {
|
|||
|
||||
**解决方法**: 更换 taos-jdbcdriver 3.0.2+ 版本。
|
||||
|
||||
其它问题请参考 [FAQ](../../../train-faq/faq)
|
||||
其它问题请参考 [FAQ](../../train-faq/faq)
|
||||
|
||||
## API 参考
|
||||
|
||||
|
|
|
@ -352,7 +352,7 @@ client.put(&sml_data)?
|
|||
|
||||
### 数据订阅
|
||||
|
||||
TDengine 通过消息队列 [TMQ](../../../taos-sql/tmq/) 启动一个订阅。
|
||||
TDengine 通过消息队列 [TMQ](../../taos-sql/tmq/) 启动一个订阅。
|
||||
|
||||
#### 创建 Topic
|
||||
|
||||
|
@ -491,7 +491,7 @@ let taos = pool.get()?;
|
|||
|
||||
## 常见问题
|
||||
|
||||
请参考 [FAQ](../../../train-faq/faq)
|
||||
请参考 [FAQ](../../train-faq/faq)
|
||||
|
||||
## API 参考
|
||||
|
||||
|
|
|
@ -29,6 +29,10 @@ import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx"
|
|||
|
||||
支持的平台和 TDengine 客户端驱动支持的平台一致。
|
||||
|
||||
:::note
|
||||
注意 TDengine 不再支持 32 位 Windows 平台。
|
||||
:::
|
||||
|
||||
## 版本支持
|
||||
|
||||
请参考[版本支持列表](../#版本支持)
|
||||
|
|
|
@ -143,6 +143,7 @@ phpize && ./configure --enable-swoole && make -j && make install
|
|||
| `TDengine\TSDB_DATA_TYPE_FLOAT` | float |
|
||||
| `TDengine\TSDB_DATA_TYPE_DOUBLE` | double |
|
||||
| `TDengine\TSDB_DATA_TYPE_BINARY` | binary |
|
||||
| `TDengine\TSDB_DATA_TYPE_VARBINARY` | varbinary |
|
||||
| `TDengine\TSDB_DATA_TYPE_TIMESTAMP` | timestamp |
|
||||
| `TDengine\TSDB_DATA_TYPE_NCHAR` | nchar |
|
||||
| `TDengine\TSDB_DATA_TYPE_UTINYINT` | utinyint |
|
||||
|
|
|
@ -4,8 +4,6 @@ import PkgListV3 from "/components/PkgListV3";
|
|||
|
||||
<PkgListV3 type={1} sys="Linux" />
|
||||
|
||||
[所有下载](../../releases/tdengine)
|
||||
|
||||
2. 解压缩软件包
|
||||
|
||||
将软件包放置在当前用户可读写的任意目录下,然后执行下面的命令:`tar -xzvf TDengine-client-VERSION.tar.gz`
|
||||
|
|
|
@ -4,8 +4,6 @@ import PkgListV3 from "/components/PkgListV3";
|
|||
|
||||
<PkgListV3 type={8} sys="macOS" />
|
||||
|
||||
[所有下载](../../releases/tdengine)
|
||||
|
||||
2. 执行安装程序,按提示选择默认值,完成安装。如果安装被阻止,可以右键或者按 Ctrl 点击安装包,选择 `打开`。
|
||||
3. 配置 taos.cfg
|
||||
|
||||
|
|
|
@ -3,9 +3,7 @@ import PkgListV3 from "/components/PkgListV3";
|
|||
1. 下载客户端安装包
|
||||
|
||||
<PkgListV3 type={4} sys="Windows" />
|
||||
|
||||
[所有下载](../../releases/tdengine)
|
||||
|
||||
|
||||
2. 执行安装程序,按提示选择默认值,完成安装
|
||||
3. 安装路径
|
||||
|
||||
|
|
|
@ -62,12 +62,13 @@ serverPort 6030
|
|||
|
||||
加入到集群中的数据节点 dnode,下表中涉及集群相关的参数必须完全相同,否则不能成功加入到集群中。
|
||||
|
||||
| **#** | **配置参数名称** | **含义** |
|
||||
| ----- | ------------------ | ------------------------------------------- |
|
||||
| 1 | statusInterval | dnode 向 mnode 报告状态时长 |
|
||||
| 2 | timezone | 时区 |
|
||||
| 3 | locale | 系统区位信息及编码格式 |
|
||||
| 4 | charset | 字符集编码 |
|
||||
| **#** | **配置参数名称** | **含义** |
|
||||
| ----- | ---------------- | ------------------------------------ |
|
||||
| 1 | statusInterval | dnode 向 mnode 报告状态时长 |
|
||||
| 2 | timezone | 时区 |
|
||||
| 3 | locale | 系统区位信息及编码格式 |
|
||||
| 4 | charset | 字符集编码 |
|
||||
| 5 | ttlChangeOnWrite | ttl 到期时间是否伴随表的修改操作改变 |
|
||||
|
||||
## 启动集群
|
||||
|
||||
|
@ -196,10 +197,10 @@ dnodeID 是集群自动分配的,不得人工指定。它在生成时是递增
|
|||
1、建立集群时使用 CREATE DNODE 增加新节点后,新节点始终显示 offline 状态?
|
||||
```sql
|
||||
1)首先要检查增加的新节点上的 taosd 服务是否已经正常启动
|
||||
|
||||
|
||||
2)如果已经启动,再检查到新节点的网络是否通畅,可以使用 ping fqdn 验证下
|
||||
|
||||
|
||||
3)如果前面两步都没有问题,这一步要检查新节点做为独立集群在运行了,可以使用 taos -h fqdn 连接上后,show dnodes; 命令查看.
|
||||
如果显示的列表与你主节点上显示的不一致,说明此节点自己单独成立了一个集群,解决的方法是停止新节点上的服务,然后清空新节点上
|
||||
如果显示的列表与你主节点上显示的不一致,说明此节点自己单独成立了一个集群,解决的方法是停止新节点上的服务,然后清空新节点上
|
||||
taos.cfg 中配置的 dataDir 目录下的所有文件,重新启动新节点服务即可解决。
|
||||
```
|
||||
```
|
||||
|
|
|
@ -42,11 +42,12 @@ CREATE DATABASE db_name PRECISION 'ns';
|
|||
| 14 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 NCHAR 字符占用 4 字节的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\'`。NCHAR 使用时须指定字符串大小,类型为 NCHAR(10) 的列表示此列的字符串最多存储 10 个 NCHAR 字符。如果用户字符串长度超出声明长度,将会报错。 |
|
||||
| 15 | JSON | | JSON 数据类型, 只有 Tag 可以是 JSON 格式 |
|
||||
| 16 | VARCHAR | 自定义 | BINARY 类型的别名 |
|
||||
| 17 | GEOMETRY | 自定义 | 几何类型 |
|
||||
| 17 | GEOMETRY | 自定义 | 几何类型
|
||||
| 18 | VARBINARY | 自定义 | 可变长的二进制数据|
|
||||
|
||||
:::note
|
||||
|
||||
- 表的每行长度不能超过 48KB(从 3.0.5.0 版本开始为 64KB)(注意:每个 BINARY/NCHAR/GEOMETRY 类型的列还会额外占用 2 个字节的存储位置)。
|
||||
- 表的每行长度不能超过 48KB(从 3.0.5.0 版本开始为 64KB)(注意:每个 BINARY/NCHAR/GEOMETRY/VARBINARY 类型的列还会额外占用 2 个字节的存储位置)。
|
||||
- 虽然 BINARY 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 BINARY 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 NCHAR 类型进行保存。如果强行使用 BINARY 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏等情况。
|
||||
- BINARY 类型理论上最长可以有 16,374(从 3.0.5.0 版本开始,数据列为 65,517,标签列为 16,382) 字节。BINARY 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 BINARY(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 字节的存储空间,总共固定占用 20 字节的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\'`。
|
||||
- GEOMETRY 类型数据列为最大长度为 65,517 字节,标签列最大长度为 16,382 字节。支持 2D 的 POINT、LINESTRING 和 POLYGON 子类型数据。长度计算方式如下表所示:
|
||||
|
@ -58,6 +59,7 @@ CREATE DATABASE db_name PRECISION 'ns';
|
|||
| 3 | POLYGON((1.0 1.0, 2.0 2.0, 1.0 1.0)) | 13+3*16 | 13+4094*16 | +16 |
|
||||
|
||||
- SQL 语句中的数值类型将依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型,因此在使用时要注意相应类型越界的情况。例如,9999999999999999999 会认为超过长整型的上边界而溢出,而 9999999999999999999.0 会被认为是有效的浮点数。
|
||||
- VARBINARY 是一种存储二进制数据的数据类型,最大长度为 65,517 字节,标签列最大长度为 16,382 字节。可以通过sql或schemaless方式写入二进制数据(需要转换为\x开头的字符串写入),也可以通过stmt方式写入(可以直接使用二进制)。显示时通过16进制\x开头。
|
||||
|
||||
:::
|
||||
|
||||
|
|
|
@ -7,9 +7,9 @@ description: 查询数据的详细语法
|
|||
## 查询语法
|
||||
|
||||
```sql
|
||||
SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE()}
|
||||
SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE() | CURRENT_USER() | USER() }
|
||||
|
||||
SELECT [DISTINCT] select_list
|
||||
SELECT [hints] [DISTINCT] select_list
|
||||
from_clause
|
||||
[WHERE condition]
|
||||
[partition_by_clause]
|
||||
|
@ -21,6 +21,11 @@ SELECT [DISTINCT] select_list
|
|||
[LIMIT limit_val [OFFSET offset_val]]
|
||||
[>> export_file]
|
||||
|
||||
hints: /*+ [hint([hint_param_list])] [hint([hint_param_list])] */
|
||||
|
||||
hint:
|
||||
BATCH_SCAN | NO_BATCH_SCAN
|
||||
|
||||
select_list:
|
||||
select_expr [, select_expr] ...
|
||||
|
||||
|
@ -70,6 +75,29 @@ order_expr:
|
|||
{expr | position | c_alias} [DESC | ASC] [NULLS FIRST | NULLS LAST]
|
||||
```
|
||||
|
||||
## Hints
|
||||
|
||||
Hints 是用户控制单个语句查询优化的一种手段,当 Hint 不适用于当前的查询语句时会被自动忽略,具体说明如下:
|
||||
|
||||
- Hints 语法以`/*+`开始,终于`*/`,前后可有空格。
|
||||
- Hints 语法只能跟随在 SELECT 关键字后。
|
||||
- 每个 Hints 可以包含多个 Hint,Hint 间以空格分开,当多个 Hint 冲突或相同时以先出现的为准。
|
||||
- 当 Hints 中某个 Hint 出现错误时,错误出现之前的有效 Hint 仍然有效,当前及之后的 Hint 被忽略。
|
||||
- hint_param_list 是每个 Hint 的参数,根据每个 Hint 的不同而不同。
|
||||
|
||||
目前支持的 Hints 列表如下:
|
||||
|
||||
| **Hint** | **参数** | **说明** | **适用范围** |
|
||||
| :-----------: | -------------- | -------------------------- | -------------------------- |
|
||||
| BATCH_SCAN | 无 | 采用批量读表的方式 | 超级表 JOIN 语句 |
|
||||
| NO_BATCH_SCAN | 无 | 采用顺序读表的方式 | 超级表 JOIN 语句 |
|
||||
|
||||
举例:
|
||||
|
||||
```sql
|
||||
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
|
||||
```
|
||||
|
||||
## 列表
|
||||
|
||||
查询语句可以指定部分或全部列作为返回结果。数据列和标签列都可以出现在列表中。
|
||||
|
@ -167,7 +195,7 @@ SELECT table_name, tag_name, tag_type, tag_value FROM information_schema.ins_tag
|
|||
SELECT COUNT(*) FROM (SELECT DISTINCT TBNAME FROM meters);
|
||||
```
|
||||
|
||||
以上两个查询均只支持在 WHERE 条件子句中添加针对标签(TAGS)的过滤条件。例如:
|
||||
以上两个查询均只支持在 WHERE 条件子句中添加针对标签(TAGS)的过滤条件。
|
||||
|
||||
**\_QSTART/\_QEND**
|
||||
|
||||
|
@ -209,8 +237,7 @@ TDengine 支持基于时间戳主键的 INNER JOIN,规则如下:
|
|||
3. 对于超级表,ON 条件在时间戳主键的等值条件之外,还要求有可以一一对应的标签列等值条件,不支持 OR 条件。
|
||||
4. 参与 JOIN 计算的表只能是同一种类型,即只能都是超级表,或都是子表,或都是普通表。
|
||||
5. JOIN 两侧均支持子查询。
|
||||
6. 参与 JOIN 的表个数上限为 10 个。
|
||||
7. 不支持与 FILL 子句混合使用。
|
||||
6. 不支持与 FILL 子句混合使用。
|
||||
|
||||
## GROUP BY
|
||||
|
||||
|
@ -301,6 +328,12 @@ SELECT TODAY();
|
|||
SELECT TIMEZONE();
|
||||
```
|
||||
|
||||
### 获取当前用户
|
||||
|
||||
```sql
|
||||
SELECT CURRENT_USER();
|
||||
```
|
||||
|
||||
## 正则表达式过滤
|
||||
|
||||
### 语法
|
||||
|
@ -354,7 +387,7 @@ SELECT AVG(CASE WHEN voltage < 200 or voltage > 250 THEN 220 ELSE voltage END) F
|
|||
|
||||
## JOIN 子句
|
||||
|
||||
TDengine 支持基于时间戳主键的内连接,即 JOIN 条件必须包含时间戳主键。只要满足基于时间戳主键这个要求,普通表、子表、超级表和子查询之间可以随意的进行内连接,且对表个数没有限制。
|
||||
TDengine 支持基于时间戳主键的内连接,即 JOIN 条件必须包含时间戳主键。只要满足基于时间戳主键这个要求,普通表、子表、超级表和子查询之间可以随意的进行内连接,且对表个数没有限制,其它连接条件与主键间必须是 AND 操作。
|
||||
|
||||
普通表与普通表之间的 JOIN 操作:
|
||||
|
||||
|
|
|
@ -48,4 +48,6 @@ SELECT * FROM information_schema.INS_INDEXES
|
|||
|
||||
6. 不支持对普通和子表建立索引。
|
||||
|
||||
7. 如果某个 tag 列的唯一值较少时,不建议对其建立索引,这种情况下收效甚微。
|
||||
7. 如果某个 tag 列的唯一值较少时,不建议对其建立索引,这种情况下收效甚微。
|
||||
|
||||
8. 新建立的超级表,会给第一列tag,随机生成一个indexNewName, 生成规则是:tag0的name + 23个byte, 在系统表可以查,也可以按需要drop,行为和其他列tag 的索引一样
|
||||
|
|
|
@ -292,11 +292,11 @@ CONCAT_WS(separator_expr, expr1, expr2 [, expr] ...)
|
|||
LENGTH(expr)
|
||||
```
|
||||
|
||||
**功能说明**:以字节计数的字符串长度。
|
||||
**功能说明**:以字节计数的长度。
|
||||
|
||||
**返回结果类型**:BIGINT。
|
||||
|
||||
**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。
|
||||
**适用数据类型**:VARCHAR, NCHAR, VARBINARY。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
|
@ -402,7 +402,7 @@ CAST(expr AS type_name)
|
|||
|
||||
**返回结果类型**:CAST 中指定的类型(type_name)。
|
||||
|
||||
**适用数据类型**:输入参数 expression 的类型可以是除JSON外的所有类型。
|
||||
**适用数据类型**:输入参数 expr 的类型可以是除JSON和VARBINARY外的所有类型。如果 type_name 为 VARBINARY,则 expr 只能是 VARCHAR 类型。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
|
@ -700,7 +700,7 @@ ELAPSED(ts_primary_key [, time_unit])
|
|||
LEASTSQUARES(expr, start_val, step_val)
|
||||
```
|
||||
|
||||
**功能说明**:统计表中某列的值是主键(时间戳)的拟合直线方程。start_val 是自变量初始值,step_val 是自变量的步长值。
|
||||
**功能说明**:统计表中某列的值的拟合直线方程。start_val 是自变量初始值,step_val 是自变量的步长值。
|
||||
|
||||
**返回数据类型**:字符串表达式(斜率, 截距)。
|
||||
|
||||
|
@ -1266,6 +1266,14 @@ SELECT SERVER_STATUS();
|
|||
|
||||
**说明**:检测服务端是否所有 dnode 都在线,如果是则返回成功,否则返回无法建立连接的错误。
|
||||
|
||||
### CURRENT_USER
|
||||
|
||||
```sql
|
||||
SELECT CURRENT_USER();
|
||||
```
|
||||
|
||||
**说明**:获取当前用户。
|
||||
|
||||
|
||||
## Geometry 函数
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ select max(current) from meters partition by location interval(10m)
|
|||
|
||||
## 窗口切分查询
|
||||
|
||||
TDengine 支持按时间窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这种场景下可以使用窗口子句来获得需要的查询结果。窗口子句用于针对查询的数据集合按照窗口切分成为查询子集并进行聚合,窗口包含时间窗口(time window)、状态窗口(status window)、会话窗口(session window)、条件窗口(event window)四种窗口。其中时间窗口又可划分为滑动时间窗口和翻转时间窗口。
|
||||
TDengine 支持按时间窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这种场景下可以使用窗口子句来获得需要的查询结果。窗口子句用于针对查询的数据集合按照窗口切分成为查询子集并进行聚合,窗口包含时间窗口(time window)、状态窗口(status window)、会话窗口(session window)、事件窗口(event window)四种窗口。其中时间窗口又可划分为滑动时间窗口和翻转时间窗口。
|
||||
|
||||
窗口子句语法如下:
|
||||
|
||||
|
|
|
@ -201,7 +201,6 @@ TDengine 对于修改数据提供两种处理方式,由 IGNORE UPDATE 选项
|
|||
对于已经存在的超级表,检查列的schema信息
|
||||
1. 检查列的schema信息是否匹配,对于不匹配的,则自动进行类型转换,当前只有数据长度大于4096byte时才报错,其余场景都能进行类型转换。
|
||||
2. 检查列的个数是否相同,如果不同,需要显示的指定超级表与subquery的列的对应关系,否则报错;如果相同,可以指定对应关系,也可以不指定,不指定则按位置顺序对应。
|
||||
3. 至少自定义一个tag,否则报错。详见 自定义TAG
|
||||
|
||||
## 自定义TAG
|
||||
|
||||
|
@ -249,3 +248,12 @@ T = 最新事件时间 - DELETE_MARK
|
|||
- [unique](../function/#unique)
|
||||
- [mode](../function/#mode)
|
||||
|
||||
## 暂停、恢复流计算
|
||||
1.流计算暂停计算任务
|
||||
PAUSE STREAM [IF EXISTS] stream_name;
|
||||
没有指定IF EXISTS,如果该stream不存在,则报错;如果存在,则暂停流计算。指定了IF EXISTS,如果该stream不存在,则返回成功;如果存在,则暂停流计算
|
||||
|
||||
2.流计算恢复计算任务
|
||||
RESUME STREAM [IF EXISTS] [IGNORE UNTREATED] stream_name;
|
||||
没有指定IF EXISTS,如果该stream不存在,则报错,如果存在,则恢复流计算;指定了IF EXISTS,如果stream不存在,则返回成功;如果存在,则恢复流计算。如果指定IGNORE UNTREATED,则恢复流计算时,忽略流计算暂停期间写入的数据。
|
||||
|
||||
|
|
|
@ -178,7 +178,7 @@ description: TDengine 保留关键字的详细列表
|
|||
|
||||
- MATCH
|
||||
- MAX_DELAY
|
||||
- MAX_SPEED
|
||||
- BWLIMIT
|
||||
- MAXROWS
|
||||
- MERGE
|
||||
- META
|
||||
|
|
|
@ -76,25 +76,25 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :------------------: | ---------------- | ------------------------------------------------ |
|
||||
| 1 | name | BINARY(32) | 数据库名 |
|
||||
| 1 | name | VARCHAR(64) | 数据库名 |
|
||||
| 2 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 3 | ntables | INT | 数据库中表的数量,包含子表和普通表但不包含超级表 |
|
||||
| 4 | vgroups | INT | 数据库中有多少个 vgroup。需要注意,`vgroups` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 6 | replica | INT | 副本数。需要注意,`replica` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 7 | strict | BINARY(4) | 废弃参数 |
|
||||
| 8 | duration | INT | 单文件存储数据的时间跨度。需要注意,`duration` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | keep | INT | 数据保留时长。需要注意,`keep` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 7 | strict | VARCHAR(4) | 废弃参数 |
|
||||
| 8 | duration | VARCHAR(10) | 单文件存储数据的时间跨度。需要注意,`duration` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | keep | VARCHAR(32) | 数据保留时长。需要注意,`keep` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 10 | buffer | INT | 每个 vnode 写缓存的内存块大小,单位 MB。需要注意,`buffer` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 11 | pagesize | INT | 每个 VNODE 中元数据存储引擎的页大小,单位为 KB。需要注意,`pagesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 12 | pages | INT | 每个 vnode 元数据存储引擎的缓存页个数。需要注意,`pages` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 13 | minrows | INT | 文件块中记录的最大条数。需要注意,`minrows` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 14 | maxrows | INT | 文件块中记录的最小条数。需要注意,`maxrows` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 15 | comp | INT | 数据压缩方式。需要注意,`comp` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 16 | precision | BINARY(2) | 时间分辨率。需要注意,`precision` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 17 | status | BINARY(10) | 数据库状态 |
|
||||
| 18 | retentions | BINARY (60) | 数据的聚合周期和保存时长。需要注意,`retentions` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 16 | precision | VARCHAR(2) | 时间分辨率。需要注意,`precision` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 17 | status | VARCHAR(10) | 数据库状态 |
|
||||
| 18 | retentions | VARCHAR(60) | 数据的聚合周期和保存时长。需要注意,`retentions` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 19 | single_stable | BOOL | 表示此数据库中是否只可以创建一个超级表。需要注意,`single_stable` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 20 | cachemodel | BINARY(60) | 表示是否在内存中缓存子表的最近数据。需要注意,`cachemodel` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 20 | cachemodel | VARCHAR(60) | 表示是否在内存中缓存子表的最近数据。需要注意,`cachemodel` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 21 | cachesize | INT | 表示每个 vnode 中用于缓存子表最近数据的内存大小。需要注意,`cachesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 22 | wal_level | INT | WAL 级别。需要注意,`wal_level` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 23 | wal_fsync_period | INT | 数据落盘周期。需要注意,`wal_fsync_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
|
|
|
@ -22,6 +22,14 @@ SHOW CLUSTER;
|
|||
|
||||
显示当前集群的信息
|
||||
|
||||
## SHOW CLUSTER ALIVE
|
||||
|
||||
```sql
|
||||
SHOW CLUSTER ALIVE;
|
||||
```
|
||||
|
||||
查询当前集群的状态是否可用,返回值: 0:不可用 1:完全可用 2:部分可用(集群中部分节点下线,但其它节点仍可以正常使用)
|
||||
|
||||
## SHOW CONNECTIONS
|
||||
|
||||
```sql
|
||||
|
|
|
@ -1,138 +0,0 @@
|
|||
---
|
||||
sidebar_label: 权限管理
|
||||
title: 权限管理
|
||||
description: 企业版中才具有的权限管理功能
|
||||
---
|
||||
|
||||
本节讲述如何在 TDengine 中进行权限管理的相关操作。
|
||||
|
||||
## 创建用户
|
||||
|
||||
```sql
|
||||
CREATE USER use_name PASS 'password' [SYSINFO {1|0}];
|
||||
```
|
||||
|
||||
创建用户。
|
||||
|
||||
use_name 最长为 23 字节。
|
||||
|
||||
password 最长为 31 字节,合法字符包括"a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/",不可以出现单双引号、撇号、反斜杠和空格,且不可以为空。
|
||||
|
||||
SYSINFO 表示用户是否可以查看系统信息。1 表示可以查看,0 表示不可以查看。系统信息包括服务端配置信息、服务端各种节点信息(如 DNODE、QNODE等)、存储相关的信息等。默认为可以查看系统信息。
|
||||
|
||||
例如,创建密码为123456且可以查看系统信息的用户test如下:
|
||||
|
||||
```sql
|
||||
taos> create user test pass '123456' sysinfo 1;
|
||||
Query OK, 0 of 0 rows affected (0.001254s)
|
||||
```
|
||||
|
||||
## 查看用户
|
||||
|
||||
```sql
|
||||
SHOW USERS;
|
||||
```
|
||||
|
||||
查看用户信息。
|
||||
|
||||
```sql
|
||||
taos> show users;
|
||||
name | super | enable | sysinfo | create_time |
|
||||
================================================================================
|
||||
test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
|
||||
root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
|
||||
Query OK, 2 rows in database (0.001657s)
|
||||
```
|
||||
|
||||
也可以通过查询INFORMATION_SCHEMA.INS_USERS系统表来查看用户信息,例如:
|
||||
|
||||
```sql
|
||||
taos> select * from information_schema.ins_users;
|
||||
name | super | enable | sysinfo | create_time |
|
||||
================================================================================
|
||||
test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
|
||||
root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
|
||||
Query OK, 2 rows in database (0.001953s)
|
||||
```
|
||||
|
||||
## 删除用户
|
||||
|
||||
```sql
|
||||
DROP USER user_name;
|
||||
```
|
||||
|
||||
## 修改用户信息
|
||||
|
||||
```sql
|
||||
ALTER USER user_name alter_user_clause
|
||||
|
||||
alter_user_clause: {
|
||||
PASS 'literal'
|
||||
| ENABLE value
|
||||
| SYSINFO value
|
||||
}
|
||||
```
|
||||
|
||||
- PASS:修改用户密码。
|
||||
- ENABLE:修改用户是否启用。1 表示启用此用户,0 表示禁用此用户。
|
||||
- SYSINFO:修改用户是否可查看系统信息。1 表示可以查看系统信息,0 表示不可以查看系统信息。
|
||||
|
||||
例如,禁用 test 用户:
|
||||
|
||||
```sql
|
||||
taos> alter user test enable 0;
|
||||
Query OK, 0 of 0 rows affected (0.001160s)
|
||||
```
|
||||
|
||||
## 授权
|
||||
|
||||
```sql
|
||||
GRANT privileges ON priv_level TO user_name
|
||||
|
||||
privileges : {
|
||||
ALL
|
||||
| priv_type [, priv_type] ...
|
||||
}
|
||||
|
||||
priv_type : {
|
||||
READ
|
||||
| WRITE
|
||||
}
|
||||
|
||||
priv_level : {
|
||||
dbname.*
|
||||
| *.*
|
||||
}
|
||||
```
|
||||
|
||||
对用户授权。授权功能只包含在企业版中。
|
||||
|
||||
授权级别支持到DATABASE,权限有READ和WRITE两种。
|
||||
|
||||
TDengine 有超级用户和普通用户两类用户。超级用户缺省创建为root,拥有所有权限。使用超级用户创建出来的用户为普通用户。在未授权的情况下,普通用户可以创建DATABASE,并拥有自己创建的DATABASE的所有权限,包括删除数据库、修改数据库、查询时序数据和写入时序数据。超级用户可以给普通用户授予其他DATABASE的读写权限,使其可以在此DATABASE上读写数据,但不能对其进行删除和修改数据库的操作。
|
||||
|
||||
对于非DATABASE的对象,如USER、DNODE、UDF、QNODE等,普通用户只有读权限(一般为SHOW命令),不能创建和修改。
|
||||
|
||||
## 撤销授权
|
||||
|
||||
```sql
|
||||
REVOKE privileges ON priv_level FROM user_name
|
||||
|
||||
privileges : {
|
||||
ALL
|
||||
| priv_type [, priv_type] ...
|
||||
}
|
||||
|
||||
priv_type : {
|
||||
READ
|
||||
| WRITE
|
||||
}
|
||||
|
||||
priv_level : {
|
||||
dbname.*
|
||||
| *.*
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
收回对用户的授权。授权功能只包含在企业版中。
|
|
@ -20,6 +20,9 @@ index_option:
|
|||
functions:
|
||||
function [, function] ...
|
||||
```
|
||||
### tag 索引
|
||||
|
||||
[tag 索引](../tag-index)
|
||||
|
||||
### SMA 索引
|
||||
|
|
@ -17,7 +17,7 @@ conn_id 可以通过 `SHOW CONNECTIONS` 获取。
|
|||
## 终止查询
|
||||
|
||||
```sql
|
||||
KILL QUERY kill_id;
|
||||
KILL QUERY 'kill_id';
|
||||
```
|
||||
|
||||
kill_id 可以通过 `SHOW QUERIES` 获取。
|
||||
|
|
|
@ -180,7 +180,7 @@ AllowWebSockets
|
|||
node_export 是一个机器指标的导出器。请访问 [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) 了解更多信息。
|
||||
- 支持 Prometheus remote_read 和 remote_write
|
||||
remote_read 和 remote_write 是 Prometheus 数据读写分离的集群方案。请访问[https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) 了解更多信息。
|
||||
- 获取 table 所在的虚拟节点组(VGroup)的 VGroup ID。关于虚拟节点组(VGroup)的更多信息,请访问[整体架构文档](/tdinternal/arch/#主要逻辑单元) 。
|
||||
- 获取 table 所在的虚拟节点组(VGroup)的 VGroup ID。
|
||||
|
||||
## 接口
|
||||
|
||||
|
@ -245,7 +245,7 @@ Prometheus 使用的由 \*NIX 内核暴露的硬件和操作系统指标的输
|
|||
|
||||
### 获取 table 的 VGroup ID
|
||||
|
||||
可以访问 http 接口 `http://<fqdn>:6041/rest/vgid?db=<db>&table=<table>` 获取 table 的 VGroup ID。关于虚拟节点组(VGroup)的更多信息,请访问[整体架构文档](/tdinternal/arch/#主要逻辑单元) 。
|
||||
可以访问 http 接口 `http://<fqdn>:6041/rest/vgid?db=<db>&table=<table>` 获取 table 的 VGroup ID。
|
||||
|
||||
## 内存使用优化方法
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ taosBenchmark (曾用名 taosdemo ) 是一个用于测试 TDengine 产品性能
|
|||
|
||||
taosBenchmark 有两种安装方式:
|
||||
|
||||
- 安装 TDengine 官方安装包的同时会自动安装 taosBenchmark, 详情请参考[ TDengine 安装](/operation/pkg-install)。
|
||||
- 安装 TDengine 官方安装包的同时会自动安装 taosBenchmark, 详情请参考 [TDengine 安装](../../get-started/)。
|
||||
|
||||
- 单独编译 taos-tools 并安装, 详情请参考 [taos-tools](https://github.com/taosdata/taos-tools) 仓库。
|
||||
|
||||
|
|
|
@ -105,6 +105,8 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
|
|||
-L, --loose-mode Using loose mode if the table name and column name
|
||||
use letter and number only. Default is NOT.
|
||||
-n, --no-escape No escape char '`'. Default is using it.
|
||||
-Q, --dot-replace Repalce dot character with underline character in
|
||||
the table name.
|
||||
-T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is
|
||||
8.
|
||||
-C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service
|
||||
|
|
|
@ -15,7 +15,7 @@ TDengine 通过 [taosKeeper](../taosKeeper) 将服务器的 CPU、内存、硬
|
|||
|
||||
- 单节点的 TDengine 服务器或多节点的 [TDengine] 集群,以及一个[Grafana]服务器。此仪表盘需要 TDengine 3.0.0.0 及以上,并开启监控服务,具体配置请参考:[TDengine 监控配置](../config/#监控相关)。
|
||||
- taosAdapter 已经安装并正常运行。具体细节请参考:[taosAdapter 使用手册](../taosadapter)
|
||||
- taosKeeper 已安装并正常运行。具体细节请参考:[taosKeeper 使用手册](../taosKeeper)
|
||||
- taosKeeper 已安装并正常运行。注意需要 taos.cfg 文件中打开 monitor 相关配置项,具体细节请参考:[taosKeeper 使用手册](../taosKeeper)
|
||||
|
||||
记录以下信息:
|
||||
|
||||
|
@ -120,7 +120,7 @@ chmod +x TDinsight.sh
|
|||
./TDinsight.sh
|
||||
```
|
||||
|
||||
这个脚本会自动下载最新的[Grafana TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases/latest) 和 [TDinsight 仪表盘](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsightV3.json) ,将命令行选项中的可配置参数转为 [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) 配置文件,以进行自动化部署及更新等操作。利用该脚本提供的告警设置选项,你还可以获得内置的阿里云短信告警通知支持。
|
||||
这个脚本会自动下载最新的[Grafana TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases/latest) 和 [TDinsight 仪表盘](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsightV3.json) ,将命令行选项中的可配置参数转为 [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) 配置文件,以进行自动化部署及更新等操作。
|
||||
|
||||
假设您在同一台主机上使用 TDengine 和 Grafana 的默认服务。运行 `./TDinsight.sh` 并打开 Grafana 浏览器窗口就可以看到 TDinsight 仪表盘了。
|
||||
|
||||
|
@ -152,9 +152,6 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
|
|||
-i, --tdinsight-uid <string> Replace with a non-space ASCII code as the dashboard id. [default: tdinsight]
|
||||
-t, --tdinsight-title <string> Dashboard title. [default: TDinsight]
|
||||
-e, --tdinsight-editable If the provisioning dashboard could be editable. [default: false]
|
||||
|
||||
-E, --external-notifier <string> Apply external notifier uid to TDinsight dashboard.
|
||||
|
||||
```
|
||||
|
||||
大多数命令行选项都可以通过环境变量获得同样的效果。
|
||||
|
@ -172,7 +169,10 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
|
|||
| -i | --tdinsight-uid | TDINSIGHT_DASHBOARD_UID | TDinsight 仪表盘`uid`。 [默认值:tdinsight] |
|
||||
| -t | --tdinsight-title | TDINSIGHT_DASHBOARD_TITLE | TDinsight 仪表盘标题。 [默认:TDinsight] |
|
||||
| -e | --tdinsight-editable | TDINSIGHT_DASHBOARD_EDITABLE | 如果配置仪表盘可以编辑。 [默认值:false] |
|
||||
| -E | --external-notifier | EXTERNAL_NOTIFIER | 将外部通知程序 uid 应用于 TDinsight 仪表盘。 |
|
||||
|
||||
:::note
|
||||
新版本插件使用 Grafana unified alerting 功能,`-E` 选项不再支持。
|
||||
:::
|
||||
|
||||
假设您在主机 `tdengine` 上启动 TDengine 数据库,HTTP API 端口为 `6041`,用户为 `root1`,密码为 `pass5ord`。执行脚本:
|
||||
|
||||
|
@ -180,18 +180,6 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
|
|||
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord
|
||||
```
|
||||
|
||||
我们提供了一个“-E”选项,用于从命令行配置 TDinsight 使用现有的通知通道(Notification Channel)。假设你的 Grafana 用户和密码是 `admin:admin`,使用以下命令获取已有的通知通道的`uid`:
|
||||
|
||||
```bash
|
||||
curl --no-progress-meter -u admin:admin http://localhost:3000/api/alert-notifications | jq
|
||||
```
|
||||
|
||||
使用上面获取的 `uid` 值作为 `-E` 输入。
|
||||
|
||||
```bash
|
||||
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
|
||||
```
|
||||
|
||||
如果要监控多个 TDengine 集群,则需要设置多个 TDinsight 仪表盘。设置非默认 TDinsight 需要进行一些更改: `-n` `-i` `-t` 选项需要更改为非默认名称,如果使用 内置短信告警功能,`-N` 和 `-L` 也应该改变。
|
||||
|
||||
```bash
|
||||
|
|
|
@ -33,8 +33,10 @@ tag_set 中的所有的数据自动转化为 nchar 数据类型,并不需要
|
|||
|
||||
在无模式写入数据行协议中,field_set 中的每个数据项都需要对自身的数据类型进行描述。具体来说:
|
||||
|
||||
- 如果两边有英文双引号,表示 VARCHAR(N) 类型。例如 `"abc"`。
|
||||
- 如果两边有英文双引号而且带有 L 前缀,表示 NCHAR(N) 类型。例如 `L"报错信息"`。
|
||||
- 如果两边有英文双引号,表示 VARCHAR 类型。例如 `"abc"`。
|
||||
- 如果两边有英文双引号而且带有 L或l 前缀,表示 NCHAR 类型。例如 `L"报错信息"`。
|
||||
- 如果两边有英文双引号而且带有 G或g 前缀,表示 GEOMETRY 类型。例如 `G"Point(4.343 89.342)"`。
|
||||
- 如果两边有英文双引号而且带有 B或b 前缀,表示 VARBINARY 类型,双引号内可以为\x开头的16进制或者字符串。例如 `B"\x98f46e"` `B"hello"`。
|
||||
- 对空格、等号(=)、逗号(,)、双引号(")、反斜杠(\),前面需要使用反斜杠(\)进行转义。(都指的是英文半角符号)。具体转义规则如下:
|
||||
|
||||
| **序号** | **域** | **需转义字符** |
|
||||
|
@ -106,6 +108,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
|
|||
8. 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数 smlDataFormat 为 false,否则,数据写入按照相同顺序写入,库中数据会异常,从3.0.3.0开始,自动检测顺序是否一致,该配置废弃。
|
||||
9. 由于sql建表表名不支持点号(.),所以schemaless也对点号(.)做了处理,如果schemaless自动建表的表名如果有点号(.),会自动替换为下划线(\_)。如果手动指定子表名的话,子表名里有点号(.),同样转化为下划线(\_)。
|
||||
10. taos.cfg 增加 smlTsDefaultName 配置(值为字符串),只在client端起作用,配置后,schemaless自动建表的时间列名字可以通过该配置设置。不配置的话,默认为 _ts
|
||||
11. 无模式写入的数据超级表或子表名区分大小写
|
||||
|
||||
:::tip
|
||||
无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过
|
||||
|
|
|
@ -16,7 +16,7 @@ taosKeeper 是 TDengine 3.0 版本监控指标的导出工具,通过简单的
|
|||
taosKeeper 有两种安装方式:
|
||||
taosKeeper 安装方式:
|
||||
|
||||
- 安装 TDengine 官方安装包的同时会自动安装 taosKeeper, 详情请参考[ TDengine 安装](/operation/pkg-install)。
|
||||
- 安装 TDengine 官方安装包的同时会自动安装 taosKeeper, 详情请参考[ TDengine 安装](../../get-started/)。
|
||||
|
||||
- 单独编译 taosKeeper 并安装,详情请参考 [taosKeeper](https://github.com/taosdata/taoskeeper) 仓库。
|
||||
|
||||
|
|
|
@ -1,207 +0,0 @@
|
|||
---
|
||||
title: 安装和卸载
|
||||
description: 安装、卸载、启动、停止和升级
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
|
||||
本节将介绍一些关于安装和卸载更深层次的内容,以及升级的注意事项。
|
||||
|
||||
## 安装
|
||||
|
||||
关于安装,请参考 [使用安装包立即开始](../../get-started/package)
|
||||
|
||||
|
||||
|
||||
## 安装目录说明
|
||||
|
||||
TDengine 成功安装后,主安装目录是 /usr/local/taos,目录内容如下:
|
||||
|
||||
```
|
||||
$ cd /usr/local/taos
|
||||
$ ll
|
||||
$ ll
|
||||
total 28
|
||||
drwxr-xr-x 7 root root 4096 Feb 22 09:34 ./
|
||||
drwxr-xr-x 12 root root 4096 Feb 22 09:34 ../
|
||||
drwxr-xr-x 2 root root 4096 Feb 22 09:34 bin/
|
||||
drwxr-xr-x 2 root root 4096 Feb 22 09:34 cfg/
|
||||
lrwxrwxrwx 1 root root 13 Feb 22 09:34 data -> /var/lib/taos/
|
||||
drwxr-xr-x 2 root root 4096 Feb 22 09:34 driver/
|
||||
drwxr-xr-x 10 root root 4096 Feb 22 09:34 examples/
|
||||
drwxr-xr-x 2 root root 4096 Feb 22 09:34 include/
|
||||
lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/
|
||||
```
|
||||
|
||||
- 自动生成配置文件目录、数据库目录、日志目录。
|
||||
- 配置文件缺省目录:/etc/taos/taos.cfg, 软链接到 /usr/local/taos/cfg/taos.cfg;
|
||||
- 数据库缺省目录:/var/lib/taos, 软链接到 /usr/local/taos/data;
|
||||
- 日志缺省目录:/var/log/taos, 软链接到 /usr/local/taos/log;
|
||||
- /usr/local/taos/bin 目录下的可执行文件,会软链接到 /usr/bin 目录下;
|
||||
- /usr/local/taos/driver 目录下的动态库文件,会软链接到 /usr/lib 目录下;
|
||||
- /usr/local/taos/include 目录下的头文件,会软链接到到 /usr/include 目录下;
|
||||
|
||||
## 卸载
|
||||
|
||||
<Tabs>
|
||||
<TabItem label="apt-get 卸载" value="aptremove">
|
||||
|
||||
TDengine 卸载命令如下:
|
||||
|
||||
```
|
||||
$ sudo apt-get remove tdengine
|
||||
Reading package lists... Done
|
||||
Building dependency tree
|
||||
Reading state information... Done
|
||||
The following packages will be REMOVED:
|
||||
tdengine
|
||||
0 upgraded, 0 newly installed, 1 to remove and 18 not upgraded.
|
||||
After this operation, 68.3 MB disk space will be freed.
|
||||
Do you want to continue? [Y/n] y
|
||||
(Reading database ... 135625 files and directories currently installed.)
|
||||
Removing tdengine (3.0.0.0) ...
|
||||
TDengine is removed successfully!
|
||||
|
||||
```
|
||||
|
||||
taosTools 卸载命令如下:
|
||||
|
||||
```
|
||||
$ sudo apt remove taostools
|
||||
Reading package lists... Done
|
||||
Building dependency tree
|
||||
Reading state information... Done
|
||||
The following packages will be REMOVED:
|
||||
taostools
|
||||
0 upgraded, 0 newly installed, 1 to remove and 0 not upgraded.
|
||||
After this operation, 68.3 MB disk space will be freed.
|
||||
Do you want to continue? [Y/n]
|
||||
(Reading database ... 147973 files and directories currently installed.)
|
||||
Removing taostools (2.1.2) ...
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="Deb 卸载" value="debuninst">
|
||||
|
||||
TDengine 卸载命令如下:
|
||||
|
||||
```
|
||||
$ sudo dpkg -r tdengine
|
||||
(Reading database ... 120119 files and directories currently installed.)
|
||||
Removing tdengine (3.0.0.0) ...
|
||||
TDengine is removed successfully!
|
||||
|
||||
```
|
||||
|
||||
taosTools 卸载命令如下:
|
||||
|
||||
```
|
||||
$ sudo dpkg -r taostools
|
||||
(Reading database ... 147973 files and directories currently installed.)
|
||||
Removing taostools (2.1.2) ...
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="RPM 卸载" value="rpmuninst">
|
||||
|
||||
卸载 TDengine 命令如下:
|
||||
|
||||
```
|
||||
$ sudo rpm -e tdengine
|
||||
TDengine is removed successfully!
|
||||
```
|
||||
|
||||
卸载 taosTools 命令如下:
|
||||
|
||||
```
|
||||
sudo rpm -e taostools
|
||||
taosToole is removed successfully!
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="tar.gz 卸载" value="taruninst">
|
||||
|
||||
卸载 TDengine 命令如下:
|
||||
|
||||
```
|
||||
$ rmtaos
|
||||
TDengine is removed successfully!
|
||||
```
|
||||
|
||||
卸载 taosTools 命令如下:
|
||||
|
||||
```
|
||||
$ rmtaostools
|
||||
Start to uninstall taos tools ...
|
||||
|
||||
taos tools is uninstalled successfully!
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Windows 卸载" value="windows">
|
||||
在 C:\TDengine 目录下,通过运行 unins000.exe 卸载程序来卸载 TDengine。
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Mac 卸载" value="mac">
|
||||
|
||||
卸载 TDengine 命令如下:
|
||||
|
||||
```
|
||||
$ rmtaos
|
||||
TDengine is removed successfully!
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
:::info
|
||||
|
||||
- TDengine 提供了多种安装包,但最好不要在一个系统上同时使用 tar.gz 安装包和 deb 或 rpm 安装包。否则会相互影响,导致在使用时出现问题。
|
||||
|
||||
- 对于 deb 包安装后,如果安装目录被手工误删了部分,出现卸载、或重新安装不能成功。此时,需要清除 TDengine 包的安装信息,执行如下命令:
|
||||
|
||||
```
|
||||
$ sudo rm -f /var/lib/dpkg/info/tdengine*
|
||||
```
|
||||
|
||||
然后再重新进行安装就可以了。
|
||||
|
||||
- 对于 rpm 包安装后,如果安装目录被手工误删了部分,出现卸载、或重新安装不能成功。此时,需要清除 TDengine 包的安装信息,执行如下命令:
|
||||
|
||||
```
|
||||
$ sudo rpm -e --noscripts tdengine
|
||||
```
|
||||
|
||||
然后再重新进行安装就可以了。
|
||||
|
||||
:::
|
||||
|
||||
## 卸载和更新文件说明
|
||||
|
||||
卸载安装包的时候,将保留配置文件、数据库文件和日志文件,即 /etc/taos/taos.cfg 、 /var/lib/taos 、 /var/log/taos 。如果用户确认后不需保留,可以手工删除,但一定要慎重,因为删除后,数据将永久丢失,不可以恢复!
|
||||
|
||||
如果是更新安装,当缺省配置文件( /etc/taos/taos.cfg )存在时,仍然使用已有的配置文件,安装包中携带的配置文件修改为 taos.cfg.orig 保存在 /usr/local/taos/cfg/ 目录,可以作为设置配置参数的参考样例;如果不存在配置文件,就使用安装包中自带的配置文件。
|
||||
|
||||
## 升级
|
||||
升级分为两个层面:升级安装包 和 升级运行中的实例。
|
||||
|
||||
升级安装包请遵循前述安装和卸载的步骤先卸载旧版本再安装新版本。
|
||||
|
||||
升级运行中的实例则要复杂得多,首先请注意版本号,TDengine 的版本号目前分为四段,如 2.4.0.14 和 2.4.0.16,只有前三段版本号一致(即只有第四段版本号不同)才能把一个运行中的实例进行升级。升级步骤如下:
|
||||
- 停止数据写入
|
||||
- 确保所有数据落盘,即写入时序数据库
|
||||
- 停止 TDengine 集群
|
||||
- 卸载旧版本并安装新版本
|
||||
- 重新启动 TDengine 集群
|
||||
- 进行简单的查询操作确认旧数据没有丢失
|
||||
- 进行简单的写入操作确认 TDengine 集群可用
|
||||
- 重新恢复业务数据的写入
|
||||
|
||||
:::warning
|
||||
TDengine 不保证低版本能够兼容高版本的数据,所以任何时候都不推荐降级
|
||||
|
||||
:::
|
|
@ -218,11 +218,11 @@ docker run -d \
|
|||
|
||||
### 导入 Dashboard
|
||||
|
||||
在数据源配置页面,您可以为该数据源导入 TDinsight 面板,作为 TDengine 集群的监控可视化工具。如果 TDengine 服务端为 3.0 版本请选择 `TDinsight for 3.x` 导入。
|
||||
在数据源配置页面,您可以为该数据源导入 TDinsight 面板,作为 TDengine 集群的监控可视化工具。如果 TDengine 服务端为 3.0 版本请选择 `TDinsight for 3.x` 导入。注意 TDinsight for 3.x 需要运行和配置 taoskeeper,相关使用说明请见 [TDinsight 用户手册](/reference/tdinsight/)。
|
||||
|
||||

|
||||
|
||||
其中适配 TDengine 2.* 的 Dashboard 已发布在 Grafana:[Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) 。其他安装方式和相关使用说明请见 [TDinsight 用户手册](/reference/tdinsight/)。
|
||||
其中适配 TDengine 2.* 的 Dashboard 已发布在 Grafana:[Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) 。
|
||||
|
||||
使用 TDengine 作为数据源的其他面板,可以[在此搜索](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource)。以下是一份不完全列表:
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送
|
|||
1. Linux 操作系统
|
||||
2. 已安装 Java 8 和 Maven
|
||||
3. 已安装 Git、curl、vi
|
||||
4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](/operation/pkg-install)
|
||||
4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](../../get-started/)
|
||||
|
||||
## 安装 Kafka
|
||||
|
||||
|
|
|
@ -0,0 +1,438 @@
|
|||
---
|
||||
sidebar_label: Seeq
|
||||
title: Seeq
|
||||
description: 如何使用 Seeq 和 TDengine 进行时序数据分析
|
||||
---
|
||||
|
||||
# 如何使用 Seeq 和 TDengine 进行时序数据分析
|
||||
|
||||
## 方案介绍
|
||||
|
||||
Seeq 是制造业和工业互联网(IIOT)高级分析软件。Seeq 支持在工艺制造组织中使用机器学习创新的新功能。这些功能使组织能够将自己或第三方机器学习算法部署到前线流程工程师和主题专家使用的高级分析应用程序,从而使单个数据科学家的努力扩展到许多前线员工。
|
||||
|
||||
通过 TDengine Java connector, Seeq 可以轻松支持查询 TDengine 提供的时序数据,并提供数据展现、分析、预测等功能。
|
||||
|
||||
### Seeq 安装方法
|
||||
|
||||
从 [Seeq 官网](https://www.seeq.com/customer-download)下载相关软件,例如 Seeq Server 和 Seeq Data Lab 等。
|
||||
|
||||
### Seeq Server 安装和启动
|
||||
|
||||
```
|
||||
tar xvzf seeq-server-xxx.tar.gz
|
||||
cd seeq-server-installer
|
||||
sudo ./install
|
||||
|
||||
sudo seeq service enable
|
||||
sudo seeq start
|
||||
```
|
||||
|
||||
### Seeq Data Lab Server 安装和启动
|
||||
|
||||
Seeq Data Lab 需要安装在和 Seeq Server 不同的服务器上,并通过配置和 Seeq Server 互联。详细安装配置指令参见[Seeq 官方文档](https://support.seeq.com/space/KB/1034059842)。
|
||||
|
||||
```
|
||||
tar xvf seeq-data-lab-<version>-64bit-linux.tar.gz
|
||||
sudo seeq-data-lab-installer/install -f /opt/seeq/seeq-data-lab -g /var/opt/seeq -u seeq
|
||||
sudo seeq config set Network/DataLab/Hostname localhost
|
||||
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231)
|
||||
sudo seeq config set Network/Hostname <value> # the host IP or URL of the main Seeq Server
|
||||
|
||||
# If the main Seeq server is configured to listen over HTTPS
|
||||
sudo seeq config set Network/Webserver/SecurePort 443 # the secure port of the main Seeq Server (usually 443)
|
||||
|
||||
# If the main Seeq server is NOT configured to listen over HTTPS
|
||||
sudo seeq config set Network/Webserver/Port <value>
|
||||
|
||||
#On the main Seeq server, open a Seeq Command Prompt and set the hostname of the Data Lab server:
|
||||
sudo seeq config set Network/DataLab/Hostname <value> # the host IP (not URL) of the Data Lab server
|
||||
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231
|
||||
```
|
||||
|
||||
## TDengine 本地实例安装方法
|
||||
|
||||
请参考[官网文档](../../get-started)。
|
||||
|
||||
## TDengine Cloud 访问方法
|
||||
如果使用 Seeq 连接 TDengine Cloud,请在 https://cloud.taosdata.com 申请帐号并登录查看如何访问 TDengine Cloud。
|
||||
|
||||
## 如何配置 Seeq 访问 TDengine
|
||||
|
||||
1. 查看 data 存储位置
|
||||
|
||||
```
|
||||
sudo seeq config get Folders/Data
|
||||
```
|
||||
|
||||
2. 从 maven.org 下载 TDengine Java connector 包,目前最新版本为[3.2.5](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar),并拷贝至 data 存储位置的 plugins\lib 中。
|
||||
|
||||
3. 重新启动 seeq server
|
||||
|
||||
```
|
||||
sudo seeq restart
|
||||
```
|
||||
|
||||
4. 输入 License
|
||||
|
||||
使用浏览器访问 ip:34216 并按照说明输入 license。
|
||||
|
||||
## 使用 Seeq 分析 TDengine 时序数据
|
||||
|
||||
本章节演示如何使用 Seeq 软件配合 TDengine 进行时序数据分析。
|
||||
|
||||
### 场景介绍
|
||||
|
||||
示例场景为一个电力系统,用户每天从电站仪表收集用电量数据,并将其存储在 TDengine 集群中。现在用户想要预测电力消耗将会如何发展,并购买更多设备来支持它。用户电力消耗随着每月订单变化而不同,另外考虑到季节变化,电力消耗量会有所不同。这个城市位于北半球,所以在夏天会使用更多的电力。我们模拟数据来反映这些假定。
|
||||
|
||||
### 数据 Schema
|
||||
|
||||
```
|
||||
CREATE STABLE meters (ts TIMESTAMP, num INT, temperature FLOAT, goods INT) TAGS (device NCHAR(20));
|
||||
CREATE TABLE goods (ts1 TIMESTAMP, ts2 TIMESTAMP, goods FLOAT);
|
||||
```
|
||||
|
||||

|
||||
|
||||
### 构造数据方法
|
||||
|
||||
```
|
||||
python mockdata.py
|
||||
taos -s "insert into power.goods select _wstart, _wstart + 10d, avg(goods) from power.meters interval(10d);"
|
||||
```
|
||||
|
||||
源代码托管在[GitHub 仓库](https://github.com/sangshuduo/td-forecasting)。
|
||||
|
||||
### 使用 Seeq 进行数据分析
|
||||
|
||||
#### 配置数据源(Data Source)
|
||||
|
||||
使用 Seeq 管理员角色的帐号登录,并新建数据源。
|
||||
|
||||
- Power
|
||||
|
||||
```
|
||||
{
|
||||
"QueryDefinitions": [
|
||||
{
|
||||
"Name": "PowerNum",
|
||||
"Type": "SIGNAL",
|
||||
"Sql": "SELECT ts, num FROM meters",
|
||||
"Enabled": true,
|
||||
"TestMode": false,
|
||||
"TestQueriesDuringSync": true,
|
||||
"InProgressCapsulesEnabled": false,
|
||||
"Variables": null,
|
||||
"Properties": [
|
||||
{
|
||||
"Name": "Name",
|
||||
"Value": "Num",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Interpolation Method",
|
||||
"Value": "linear",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Maximum Interpolation",
|
||||
"Value": "2day",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
}
|
||||
],
|
||||
"CapsuleProperties": null
|
||||
}
|
||||
],
|
||||
"Type": "GENERIC",
|
||||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": "root",
|
||||
"Password": "taosdata",
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
"UseWindowsAuth": false,
|
||||
"SqlFetchBatchSize": 100000,
|
||||
"UseSSL": false,
|
||||
"JdbcProperties": null,
|
||||
"GenericDatabaseConfig": {
|
||||
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
|
||||
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
|
||||
"ResolutionInNanoseconds": 1000,
|
||||
"ZonedColumnTypes": []
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- Goods
|
||||
|
||||
```
|
||||
{
|
||||
"QueryDefinitions": [
|
||||
{
|
||||
"Name": "PowerGoods",
|
||||
"Type": "CONDITION",
|
||||
"Sql": "SELECT ts1, ts2, goods FROM power.goods",
|
||||
"Enabled": true,
|
||||
"TestMode": false,
|
||||
"TestQueriesDuringSync": true,
|
||||
"InProgressCapsulesEnabled": false,
|
||||
"Variables": null,
|
||||
"Properties": [
|
||||
{
|
||||
"Name": "Name",
|
||||
"Value": "Goods",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Maximum Duration",
|
||||
"Value": "10days",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
}
|
||||
],
|
||||
"CapsuleProperties": [
|
||||
{
|
||||
"Name": "goods",
|
||||
"Value": "${columnResult}",
|
||||
"Column": "goods",
|
||||
"Uom": "string"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"Type": "GENERIC",
|
||||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": "root",
|
||||
"Password": "taosdata",
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
"UseWindowsAuth": false,
|
||||
"SqlFetchBatchSize": 100000,
|
||||
"UseSSL": false,
|
||||
"JdbcProperties": null,
|
||||
"GenericDatabaseConfig": {
|
||||
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
|
||||
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
|
||||
"ResolutionInNanoseconds": 1000,
|
||||
"ZonedColumnTypes": []
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- Temperature
|
||||
|
||||
```
|
||||
{
|
||||
"QueryDefinitions": [
|
||||
{
|
||||
"Name": "PowerNum",
|
||||
"Type": "SIGNAL",
|
||||
"Sql": "SELECT ts, temperature FROM meters",
|
||||
"Enabled": true,
|
||||
"TestMode": false,
|
||||
"TestQueriesDuringSync": true,
|
||||
"InProgressCapsulesEnabled": false,
|
||||
"Variables": null,
|
||||
"Properties": [
|
||||
{
|
||||
"Name": "Name",
|
||||
"Value": "Temperature",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Interpolation Method",
|
||||
"Value": "linear",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Maximum Interpolation",
|
||||
"Value": "2day",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
}
|
||||
],
|
||||
"CapsuleProperties": null
|
||||
}
|
||||
],
|
||||
"Type": "GENERIC",
|
||||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": "root",
|
||||
"Password": "taosdata",
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
"UseWindowsAuth": false,
|
||||
"SqlFetchBatchSize": 100000,
|
||||
"UseSSL": false,
|
||||
"JdbcProperties": null,
|
||||
"GenericDatabaseConfig": {
|
||||
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
|
||||
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
|
||||
"ResolutionInNanoseconds": 1000,
|
||||
"ZonedColumnTypes": []
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### 使用 Seeq Workbench
|
||||
|
||||
登录 Seeq 服务页面并新建 Seeq Workbench,通过选择数据源搜索结果和根据需要选择不同的工具,可以进行数据展现或预测,详细使用方法参见[官方知识库](https://support.seeq.com/space/KB/146440193/Seeq+Workbench)。
|
||||
|
||||

|
||||
|
||||
#### 用 Seeq Data Lab Server 进行进一步的数据分析
|
||||
|
||||
登录 Seeq 服务页面并新建 Seeq Data Lab,可以进一步使用 Python 编程或其他机器学习工具进行更复杂的数据挖掘功能。
|
||||
|
||||
```Python
|
||||
from seeq import spy
|
||||
spy.options.compatibility = 189
|
||||
import pandas as pd
|
||||
import matplotlib
|
||||
import matplotlib.pyplot as plt
|
||||
import mlforecast
|
||||
import lightgbm as lgb
|
||||
from mlforecast.target_transforms import Differences
|
||||
from sklearn.linear_model import LinearRegression
|
||||
|
||||
ds = spy.search({'ID': "8C91A9C7-B6C2-4E18-AAAF-XXXXXXXXX"})
|
||||
print(ds)
|
||||
|
||||
sig = ds.loc[ds['Name'].isin(['Num'])]
|
||||
print(sig)
|
||||
|
||||
data = spy.pull(sig, start='2015-01-01', end='2022-12-31', grid=None)
|
||||
print("data.info()")
|
||||
data.info()
|
||||
print(data)
|
||||
#data.plot()
|
||||
|
||||
print("data[Num].info()")
|
||||
data['Num'].info()
|
||||
da = data['Num'].index.tolist()
|
||||
#print(da)
|
||||
|
||||
li = data['Num'].tolist()
|
||||
#print(li)
|
||||
|
||||
data2 = pd.DataFrame()
|
||||
data2['ds'] = da
|
||||
print('1st data2 ds info()')
|
||||
data2['ds'].info()
|
||||
|
||||
#data2['ds'] = pd.to_datetime(data2['ds']).to_timestamp()
|
||||
data2['ds'] = pd.to_datetime(data2['ds']).astype('int64')
|
||||
data2['y'] = li
|
||||
print('2nd data2 ds info()')
|
||||
data2['ds'].info()
|
||||
print(data2)
|
||||
|
||||
data2.insert(0, column = "unique_id", value="unique_id")
|
||||
|
||||
print("Forecasting ...")
|
||||
|
||||
forecast = mlforecast.MLForecast(
|
||||
models = lgb.LGBMRegressor(),
|
||||
freq = 1,
|
||||
lags=[365],
|
||||
target_transforms=[Differences([365])],
|
||||
)
|
||||
|
||||
forecast.fit(data2)
|
||||
predicts = forecast.predict(365)
|
||||
|
||||
pd.concat([data2, predicts]).set_index("ds").plot(title = "current data with forecast")
|
||||
plt.show()
|
||||
```
|
||||
|
||||
运行程序输出结果:
|
||||
|
||||

|
||||
|
||||
### 配置 Seeq 数据源连接 TDengine Cloud
|
||||
|
||||
配置 Seeq 数据源连接 TDengine Cloud 和连接 TDengine 本地安装实例没有本质的不同,只要登录 TDengine Cloud 后选择“编程 - Java”并拷贝带 token 字符串的 JDBC 填写为 Seeq Data Source 的 DatabaseJdbcUrl 值。
|
||||
注意使用 TDengine Cloud 时 SQL 命令中需要指定数据库名称。
|
||||
|
||||
#### 用 TDengine Cloud 作为数据源的配置内容示例:
|
||||
|
||||
```
|
||||
{
|
||||
"QueryDefinitions": [
|
||||
{
|
||||
"Name": "CloudVoltage",
|
||||
"Type": "SIGNAL",
|
||||
"Sql": "SELECT ts, voltage FROM test.meters",
|
||||
"Enabled": true,
|
||||
"TestMode": false,
|
||||
"TestQueriesDuringSync": true,
|
||||
"InProgressCapsulesEnabled": false,
|
||||
"Variables": null,
|
||||
"Properties": [
|
||||
{
|
||||
"Name": "Name",
|
||||
"Value": "Voltage",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Interpolation Method",
|
||||
"Value": "linear",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Maximum Interpolation",
|
||||
"Value": "2day",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
}
|
||||
],
|
||||
"CapsuleProperties": null
|
||||
}
|
||||
],
|
||||
"Type": "GENERIC",
|
||||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": "root",
|
||||
"Password": "taosdata",
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
"UseWindowsAuth": false,
|
||||
"SqlFetchBatchSize": 100000,
|
||||
"UseSSL": false,
|
||||
"JdbcProperties": null,
|
||||
"GenericDatabaseConfig": {
|
||||
"DatabaseJdbcUrl": "jdbc:TAOS-RS://gw.cloud.taosdata.com?useSSL=true&token=41ac9d61d641b6b334e8b76f45f5a8XXXXXXXXXX",
|
||||
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
|
||||
"ResolutionInNanoseconds": 1000,
|
||||
"ZonedColumnTypes": []
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### TDengine Cloud 作为数据源的 Seeq Workbench 界面示例
|
||||
|
||||

|
||||
|
||||
## 方案总结
|
||||
|
||||
通过集成Seeq和TDengine,可以充分利用TDengine高效的存储和查询性能,同时也可以受益于Seeq提供给用户的强大数据可视化和分析功能。
|
||||
|
||||
这种集成使用户能够充分利用TDengine的高性能时序数据存储和检索,确保高效处理大量数据。同时,Seeq提供高级分析功能,如数据可视化、异常检测、相关性分析和预测建模,使用户能够获得有价值的洞察并基于数据进行决策。
|
||||
|
||||
综合来看,Seeq和TDengine共同为制造业、工业物联网和电力系统等各行各业的时序数据分析提供了综合解决方案。高效数据存储和先进的分析相结合,赋予用户充分发挥时序数据潜力的能力,推动运营改进,并支持预测和规划分析应用。
|
After Width: | Height: | Size: 13 KiB |
After Width: | Height: | Size: 56 KiB |
After Width: | Height: | Size: 26 KiB |
After Width: | Height: | Size: 47 KiB |
|
@ -10,6 +10,18 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.1.1.0
|
||||
|
||||
<Release type="tdengine" version="3.1.1.0" />
|
||||
|
||||
## 3.1.0.3
|
||||
|
||||
<Release type="tdengine" version="3.1.0.3" />
|
||||
|
||||
## 3.1.0.2
|
||||
|
||||
<Release type="tdengine" version="3.1.0.2" />
|
||||
|
||||
## 3.1.0.0
|
||||
|
||||
<Release type="tdengine" version="3.1.0.0" />
|
||||
|
|
|
@ -44,17 +44,17 @@ OS name: "windows 10", version: "10.0", arch: "amd64", family: "windows"
|
|||
<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">
|
||||
<!-- 配置本地maven仓库的路径 -->
|
||||
<!-- 配置本地maven仓库的路径 -->
|
||||
<localRepository>D:\apache-maven-localRepository</localRepository>
|
||||
|
||||
<mirrors>
|
||||
<!-- 配置阿里云Maven镜像仓库 -->
|
||||
<mirror>
|
||||
<id>alimaven</id>
|
||||
<name>aliyun maven</name>
|
||||
<url>http://maven.aliyun.com/nexus/content/groups/public/</url>
|
||||
<mirrorOf>central</mirrorOf>
|
||||
</mirror>
|
||||
<mirror>
|
||||
<id>alimaven</id>
|
||||
<name>aliyun maven</name>
|
||||
<url>http://maven.aliyun.com/nexus/content/groups/public/</url>
|
||||
<mirrorOf>central</mirrorOf>
|
||||
</mirror>
|
||||
</mirrors>
|
||||
|
||||
<profiles>
|
||||
|
@ -126,7 +126,7 @@ https://www.taosdata.com/cn/all-downloads/
|
|||
修改client的hosts文件(C:\Windows\System32\drivers\etc\hosts),将server的hostname和ip配置到client的hosts文件中
|
||||
|
||||
```
|
||||
192.168.236.136 td01
|
||||
192.168.236.136 td01
|
||||
```
|
||||
|
||||
配置完成后,在命令行内使用TDengine CLI连接server端
|
||||
|
|
|
@ -133,6 +133,7 @@
|
|||
<configuration>
|
||||
<source>8</source>
|
||||
<target>8</target>
|
||||
<encoding>UTF-8</encoding>
|
||||
</configuration>
|
||||
</plugin>
|
||||
|
||||
|
|
|
@ -8,4 +8,4 @@ java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host <hostname> -data
|
|||
```
|
||||
|
||||
如果发生错误 Exception in thread "main" java.lang.UnsatisfiedLinkError: no taos in java.library.path
|
||||
请检查是否安装 TDengine 客户端安装包或编译 TDengine 安装。如果确定已经安装过还出现这个错误,可以在命令行 java 后加 -Djava.library.path=/usr/local/lib 来指定寻找共享库的路径。
|
||||
请检查是否安装 TDengine 客户端安装包或编译 TDengine 安装。如果确定已经安装过还出现这个错误,可以在命令行 java 后加 -Djava.library.path=/usr/lib 来指定寻找共享库的路径。
|
||||
|
|
|
@ -32,12 +32,12 @@ static void queryDB(TAOS *taos, char *command) {
|
|||
taos_free_result(pSql);
|
||||
pSql = NULL;
|
||||
}
|
||||
|
||||
|
||||
pSql = taos_query(taos, command);
|
||||
code = taos_errno(pSql);
|
||||
if (0 == code) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (code != 0) {
|
||||
|
@ -63,7 +63,7 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
TAOS *taos = taos_connect(argv[1], "root", "taosdata", NULL, 0);
|
||||
if (taos == NULL) {
|
||||
printf("failed to connect to server, reason:%s\n", "null taos"/*taos_errstr(taos)*/);
|
||||
printf("failed to connect to server, reason:%s\n", taos_errstr(NULL));
|
||||
exit(1);
|
||||
}
|
||||
for (int i = 0; i < 100; i++) {
|
||||
|
@ -86,14 +86,14 @@ void Test(TAOS *taos, char *qstr, int index) {
|
|||
for (i = 0; i < 10; ++i) {
|
||||
sprintf(qstr, "insert into m1 values (%" PRId64 ", %d, %d, %d, %d, %f, %lf, '%s')", (uint64_t)(1546300800000 + i * 1000), i, i, i, i*10000000, i*1.0, i*2.0, "hello");
|
||||
printf("qstr: %s\n", qstr);
|
||||
|
||||
|
||||
// note: how do you wanna do if taos_query returns non-NULL
|
||||
// if (taos_query(taos, qstr)) {
|
||||
// printf("insert row: %i, reason:%s\n", i, taos_errstr(taos));
|
||||
// }
|
||||
TAOS_RES *result1 = taos_query(taos, qstr);
|
||||
if (result1 == NULL || taos_errno(result1) != 0) {
|
||||
printf("failed to insert row, reason:%s\n", taos_errstr(result1));
|
||||
printf("failed to insert row, reason:%s\n", taos_errstr(result1));
|
||||
taos_free_result(result1);
|
||||
exit(1);
|
||||
} else {
|
||||
|
@ -107,7 +107,7 @@ void Test(TAOS *taos, char *qstr, int index) {
|
|||
sprintf(qstr, "SELECT * FROM m1");
|
||||
result = taos_query(taos, qstr);
|
||||
if (result == NULL || taos_errno(result) != 0) {
|
||||
printf("failed to select, reason:%s\n", taos_errstr(result));
|
||||
printf("failed to select, reason:%s\n", taos_errstr(result));
|
||||
taos_free_result(result);
|
||||
exit(1);
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ int main(int argc, char *argv[])
|
|||
taos_free_result(result);
|
||||
|
||||
// create table
|
||||
const char* sql = "create table m1 (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(40), blob nchar(10))";
|
||||
const char* sql = "create table m1 (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(40), blob nchar(10), varbin varbinary(16))";
|
||||
result = taos_query(taos, sql);
|
||||
code = taos_errno(result);
|
||||
if (code != 0) {
|
||||
|
@ -68,6 +68,7 @@ int main(int argc, char *argv[])
|
|||
double f8;
|
||||
char bin[40];
|
||||
char blob[80];
|
||||
int8_t varbin[16];
|
||||
} v = {0};
|
||||
|
||||
int32_t boolLen = sizeof(int8_t);
|
||||
|
@ -80,7 +81,7 @@ int main(int argc, char *argv[])
|
|||
int32_t ncharLen = 30;
|
||||
|
||||
stmt = taos_stmt_init(taos);
|
||||
TAOS_MULTI_BIND params[10];
|
||||
TAOS_MULTI_BIND params[11];
|
||||
params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
|
||||
params[0].buffer_length = sizeof(v.ts);
|
||||
params[0].buffer = &v.ts;
|
||||
|
@ -152,9 +153,19 @@ int main(int argc, char *argv[])
|
|||
params[9].is_null = NULL;
|
||||
params[9].num = 1;
|
||||
|
||||
int8_t tmp[16] = {'a', 0, 1, 13, '1'};
|
||||
int32_t vbinLen = 5;
|
||||
memcpy(v.varbin, tmp, sizeof(v.varbin));
|
||||
params[10].buffer_type = TSDB_DATA_TYPE_VARBINARY;
|
||||
params[10].buffer_length = sizeof(v.varbin);
|
||||
params[10].buffer = v.varbin;
|
||||
params[10].length = &vbinLen;
|
||||
params[10].is_null = NULL;
|
||||
params[10].num = 1;
|
||||
|
||||
char is_null = 1;
|
||||
|
||||
sql = "insert into m1 values(?,?,?,?,?,?,?,?,?,?)";
|
||||
sql = "insert into m1 values(?,?,?,?,?,?,?,?,?,?,?)";
|
||||
code = taos_stmt_prepare(stmt, sql, 0);
|
||||
if (code != 0){
|
||||
printf("failed to execute taos_stmt_prepare. code:0x%x\n", code);
|
||||
|
@ -162,7 +173,7 @@ int main(int argc, char *argv[])
|
|||
v.ts = 1591060628000;
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
v.ts += 1;
|
||||
for (int j = 1; j < 10; ++j) {
|
||||
for (int j = 1; j < 11; ++j) {
|
||||
params[j].is_null = ((i == j) ? &is_null : 0);
|
||||
}
|
||||
v.b = (int8_t)i % 2;
|
||||
|
@ -216,7 +227,7 @@ int main(int argc, char *argv[])
|
|||
printf("expect two rows, but %d rows are fetched\n", rows);
|
||||
}
|
||||
|
||||
taos_free_result(result);
|
||||
// taos_free_result(result);
|
||||
taos_stmt_close(stmt);
|
||||
|
||||
return 0;
|
||||
|
|