Merge branch '3.0' into enh/TD-25601-3.0
This commit is contained in:
commit
74cddda24b
|
@ -16,8 +16,6 @@ set(TD_SUPPORT_DIR "${TD_SOURCE_DIR}/cmake")
|
|||
set(TD_CONTRIB_DIR "${TD_SOURCE_DIR}/contrib")
|
||||
|
||||
|
||||
|
||||
|
||||
include(${TD_SUPPORT_DIR}/cmake.platform)
|
||||
include(${TD_SUPPORT_DIR}/cmake.define)
|
||||
include(${TD_SUPPORT_DIR}/cmake.options)
|
||||
|
@ -46,4 +44,4 @@ add_subdirectory(examples/c)
|
|||
include(${TD_SUPPORT_DIR}/cmake.install)
|
||||
|
||||
# docs
|
||||
add_subdirectory(docs/doxgen)
|
||||
add_subdirectory(docs/doxgen)
|
||||
|
|
|
@ -314,9 +314,9 @@ def pre_test_build_win() {
|
|||
cd %WIN_CONNECTOR_ROOT%
|
||||
python.exe -m pip install --upgrade pip
|
||||
python -m pip uninstall taospy -y
|
||||
python -m pip install taospy==2.7.10
|
||||
python -m pip install taospy==2.7.12
|
||||
python -m pip uninstall taos-ws-py -y
|
||||
python -m pip install taos-ws-py==0.2.8
|
||||
python -m pip install taos-ws-py==0.3.1
|
||||
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
|
||||
'''
|
||||
return 1
|
||||
|
@ -424,7 +424,7 @@ pipeline {
|
|||
echo "${WKDIR}/restore.sh -p ${BRANCH_NAME} -n ${BUILD_ID} -c {container name}"
|
||||
}
|
||||
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
|
||||
timeout(time: 130, unit: 'MINUTES'){
|
||||
timeout(time: 150, unit: 'MINUTES'){
|
||||
pre_test()
|
||||
script {
|
||||
sh '''
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
# Security Policy
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Please submit CVE to https://github.com/taosdata/TDengine/security/advisories.
|
|
@ -83,6 +83,18 @@ ELSE ()
|
|||
SET(TAOS_LIB taos)
|
||||
ENDIF ()
|
||||
|
||||
# build TSZ by default
|
||||
IF ("${TSZ_ENABLED}" MATCHES "false")
|
||||
set(VAR_TSZ "" CACHE INTERNAL "global variant empty" )
|
||||
ELSE()
|
||||
# define add
|
||||
MESSAGE(STATUS "build with TSZ enabled")
|
||||
ADD_DEFINITIONS(-DTD_TSZ)
|
||||
set(VAR_TSZ "TSZ" CACHE INTERNAL "global variant tsz" )
|
||||
ENDIF()
|
||||
|
||||
# force set all platform to JEMALLOC_ENABLED = false
|
||||
SET(JEMALLOC_ENABLED OFF)
|
||||
IF (TD_WINDOWS)
|
||||
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
|
||||
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")
|
||||
|
@ -106,8 +118,6 @@ IF (TD_WINDOWS)
|
|||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMMON_FLAGS}")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMMON_FLAGS}")
|
||||
|
||||
SET(JEMALLOC_ENABLED OFF)
|
||||
|
||||
ELSE ()
|
||||
IF (${TD_DARWIN})
|
||||
set(CMAKE_MACOSX_RPATH 0)
|
||||
|
|
|
@ -128,11 +128,43 @@ option(
|
|||
IF(${TD_LINUX})
|
||||
|
||||
option(
|
||||
BUILD_WITH_COS
|
||||
"If build with cos"
|
||||
BUILD_S3
|
||||
"If build with s3"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_WITH_S3
|
||||
"If build with s3"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_WITH_COS
|
||||
"If build with cos"
|
||||
OFF
|
||||
)
|
||||
|
||||
ENDIF ()
|
||||
|
||||
IF(${BUILD_S3})
|
||||
|
||||
IF(${BUILD_WITH_S3})
|
||||
|
||||
option(BUILD_WITH_COS "If build with cos" OFF)
|
||||
|
||||
ELSE ()
|
||||
|
||||
option(BUILD_WITH_COS "If build with cos" ON)
|
||||
|
||||
ENDIF ()
|
||||
|
||||
ELSE ()
|
||||
|
||||
option(BUILD_WITH_S3 "If build with s3" OFF)
|
||||
|
||||
option(BUILD_WITH_COS "If build with cos" OFF)
|
||||
|
||||
ENDIF ()
|
||||
|
||||
option(
|
||||
|
|
|
@ -93,36 +93,42 @@ ELSEIF (${CMAKE_SYSTEM_NAME} MATCHES "Windows")
|
|||
ENDIF()
|
||||
|
||||
IF ("${CPUTYPE}" STREQUAL "")
|
||||
IF (CMAKE_SYSTEM_PROCESSOR MATCHES "(amd64)|(AMD64)")
|
||||
IF (CMAKE_SYSTEM_PROCESSOR MATCHES "(amd64)|(AMD64)|(x86_64)|(X86_64)")
|
||||
MESSAGE(STATUS "Current platform is amd64")
|
||||
SET(PLATFORM_ARCH_STR "amd64")
|
||||
SET(CPUTYPE "x64")
|
||||
SET(TD_INTEL_64 TRUE)
|
||||
ADD_DEFINITIONS("-D_TD_X86_")
|
||||
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "(x86)|(X86)")
|
||||
MESSAGE(STATUS "Current platform is x86")
|
||||
SET(PLATFORM_ARCH_STR "i386")
|
||||
SET(CPUTYPE "x86")
|
||||
SET(TD_INTEL_32 TRUE)
|
||||
ADD_DEFINITIONS("-D_TD_X86_")
|
||||
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "armv7l")
|
||||
MESSAGE(STATUS "Current platform is aarch32")
|
||||
SET(PLATFORM_ARCH_STR "arm")
|
||||
SET(CPUTYPE "arm32")
|
||||
SET(TD_ARM_32 TRUE)
|
||||
ADD_DEFINITIONS("-D_TD_ARM_")
|
||||
ADD_DEFINITIONS("-D_TD_ARM_32")
|
||||
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "(aarch64)|(arm64)")
|
||||
MESSAGE(STATUS "Current platform is aarch64")
|
||||
SET(PLATFORM_ARCH_STR "arm64")
|
||||
SET(CPUTYPE "arm64")
|
||||
SET(TD_ARM_64 TRUE)
|
||||
ADD_DEFINITIONS("-D_TD_ARM_")
|
||||
ADD_DEFINITIONS("-D_TD_ARM_64")
|
||||
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "loongarch64")
|
||||
MESSAGE(STATUS "The current platform is loongarch64")
|
||||
SET(PLATFORM_ARCH_STR "loongarch64")
|
||||
SET(CPUTYPE "loongarch64")
|
||||
SET(TD_LOONGARCH_64 TRUE)
|
||||
ADD_DEFINITIONS("-D_TD_LOONGARCH_")
|
||||
ADD_DEFINITIONS("-D_TD_LOONGARCH_64")
|
||||
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "mips64")
|
||||
SET(PLATFORM_ARCH_STR "mips")
|
||||
SET(CPUTYPE "mips64")
|
||||
MESSAGE(STATUS "input cpuType: mips64")
|
||||
SET(TD_MIPS_64 TRUE)
|
||||
ADD_DEFINITIONS("-D_TD_MIPS_")
|
||||
|
@ -195,6 +201,20 @@ if (TD_LINUX)
|
|||
ELSE()
|
||||
set(TD_DEPS_DIR "x86")
|
||||
ENDIF()
|
||||
elseif (TD_DARWIN)
|
||||
IF (TD_ARM_64 OR TD_ARM_32)
|
||||
set(TD_DEPS_DIR "darwin/arm")
|
||||
ELSE ()
|
||||
set(TD_DEPS_DIR "darwin/x64")
|
||||
ENDIF ()
|
||||
elseif (TD_WINDOWS)
|
||||
IF (TD_WINDOWS_64)
|
||||
set(TD_DEPS_DIR "win/x64")
|
||||
ELSEIF (TD_WINDOWS_32)
|
||||
set(TD_DEPS_DIR "win/i386")
|
||||
ENDIF ()
|
||||
else ()
|
||||
MESSAGE(FATAL_ERROR "unsupported platform")
|
||||
endif()
|
||||
MESSAGE(STATUS "DEPS_DIR: " ${TD_DEPS_DIR})
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.2.0.0.alpha")
|
||||
SET(TD_VER_NUMBER "3.2.1.0.alpha")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
@ -50,7 +50,19 @@ ENDIF ()
|
|||
IF (DEFINED VERDATE)
|
||||
SET(TD_VER_DATE ${VERDATE})
|
||||
ELSE ()
|
||||
STRING(TIMESTAMP TD_VER_DATE "%Y-%m-%d %H:%M:%S")
|
||||
STRING(COMPARE GREATER_EQUAL "${CMAKE_VERSION}" "3.26" TD_CMAKE_SUPPORT_TZ)
|
||||
|
||||
IF (TD_CMAKE_SUPPORT_TZ)
|
||||
STRING(TIMESTAMP TD_VER_DATE "%Y-%m-%d %H:%M:%S %z")
|
||||
ELSE ()
|
||||
IF (TD_WINDOWS)
|
||||
STRING(TIMESTAMP TD_VER_DATE "%Y-%m-%d %H:%M:%S")
|
||||
ELSE ()
|
||||
EXECUTE_PROCESS(COMMAND date +"%F %T %z" OUTPUT_VARIABLE TD_VER_DATE)
|
||||
STRING(REPLACE "\"" "" TD_VER_DATE ${TD_VER_DATE})
|
||||
STRING(STRIP ${TD_VER_DATE} TD_VER_DATE)
|
||||
ENDIF ()
|
||||
ENDIF ()
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERTYPE)
|
||||
|
@ -67,9 +79,9 @@ ELSE ()
|
|||
ELSEIF (TD_LINUX_32)
|
||||
SET(TD_VER_CPUTYPE "x86")
|
||||
ELSEIF (TD_ARM_32)
|
||||
SET(TD_VER_CPUTYPE "x86")
|
||||
SET(TD_VER_CPUTYPE "arm32")
|
||||
ELSEIF (TD_MIPS_32)
|
||||
SET(TD_VER_CPUTYPE "x86")
|
||||
SET(TD_VER_CPUTYPE "mips32")
|
||||
ELSE ()
|
||||
SET(TD_VER_CPUTYPE "x64")
|
||||
ENDIF ()
|
||||
|
|
|
@ -1,17 +1,18 @@
|
|||
# curl
|
||||
ExternalProject_Add(curl
|
||||
ExternalProject_Add(curl2
|
||||
URL https://curl.se/download/curl-8.2.1.tar.gz
|
||||
URL_HASH MD5=b25588a43556068be05e1624e0e74d41
|
||||
DOWNLOAD_NO_PROGRESS 1
|
||||
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||
#GIT_REPOSITORY https://github.com/curl/curl.git
|
||||
#GIT_TAG curl-7_88_1
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/curl"
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/curl2"
|
||||
DEPENDS openssl
|
||||
BUILD_IN_SOURCE TRUE
|
||||
BUILD_ALWAYS 1
|
||||
#UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd
|
||||
#CONFIGURE_COMMAND ./configure --without-ssl
|
||||
BUILD_COMMAND make
|
||||
UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.2 --with-ssl=$ENV{HOME}/.cos-local.2 --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd --without-libidn2 --without-nghttp2 #--enable-debug
|
||||
BUILD_COMMAND make -j
|
||||
INSTALL_COMMAND make install
|
||||
TEST_COMMAND ""
|
||||
)
|
||||
|
|
|
@ -0,0 +1,430 @@
|
|||
# GNUmakefile
|
||||
#
|
||||
# Copyright 2008 Bryan Ischo <bryan@ischo.com>
|
||||
#
|
||||
# This file is part of libs3.
|
||||
#
|
||||
# libs3 is free software: you can redistribute it and/or modify it under the
|
||||
# terms of the GNU Lesser General Public License as published by the Free
|
||||
# Software Foundation, version 3 or above of the License. You can also
|
||||
# redistribute and/or modify it under the terms of the GNU General Public
|
||||
# License, version 2 or above of the License.
|
||||
#
|
||||
# In addition, as a special exception, the copyright holders give
|
||||
# permission to link the code of this library and its programs with the
|
||||
# OpenSSL library, and distribute linked combinations including the two.
|
||||
#
|
||||
# libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
|
||||
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
|
||||
# details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# version 3 along with libs3, in a file named COPYING. If not, see
|
||||
# <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
# You should also have received a copy of the GNU General Public License
|
||||
# version 2 along with libs3, in a file named COPYING-GPLv2. If not, see
|
||||
# <http://www.gnu.org/licenses/>.
|
||||
|
||||
# I tried to use the autoconf/automake/autolocal/etc (i.e. autohell) tools
|
||||
# but I just couldn't stomach them. Since this is a Makefile for POSIX
|
||||
# systems, I will simply do away with autohell completely and use a GNU
|
||||
# Makefile. GNU make ought to be available pretty much everywhere, so I
|
||||
# don't see this being a significant issue for portability.
|
||||
|
||||
# All commands assume a GNU compiler. For systems which do not use a GNU
|
||||
# compiler, write scripts with the same names as these commands, and taking
|
||||
# the same arguments, and translate the arguments and commands into the
|
||||
# appropriate non-POSIX ones as needed. libs3 assumes a GNU toolchain as
|
||||
# the most portable way to build software possible. Non-POSIX, non-GNU
|
||||
# systems can do the work of supporting this build infrastructure.
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# Set libs3 version number, unless it is already set.
|
||||
|
||||
LIBS3_VER_MAJOR ?= 4
|
||||
LIBS3_VER_MINOR ?= 1
|
||||
LIBS3_VER := $(LIBS3_VER_MAJOR).$(LIBS3_VER_MINOR)
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Determine verbosity. VERBOSE_SHOW should be prepended to every command which
|
||||
# should only be displayed if VERBOSE is set. QUIET_ECHO may be used to
|
||||
# echo text only if VERBOSE is not set. Typically, a VERBOSE_SHOW command will
|
||||
# be paired with a QUIET_ECHO command, to provide a command which is displayed
|
||||
# in VERBOSE mode, along with text which is displayed in non-VERBOSE mode to
|
||||
# describe the command.
|
||||
#
|
||||
# No matter what VERBOSE is defined to, it ends up as true if it's defined.
|
||||
# This will be weird if you defined VERBOSE=false in the environment, and we
|
||||
# switch it to true here; but the meaning of VERBOSE is, "if it's defined to
|
||||
# any value, then verbosity is turned on". So don't define VERBOSE if you
|
||||
# don't want verbosity in the build process.
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
ifdef VERBOSE
|
||||
VERBOSE = true
|
||||
VERBOSE_ECHO = @ echo
|
||||
VERBOSE_SHOW =
|
||||
QUIET_ECHO = @ echo > /dev/null
|
||||
else
|
||||
VERBOSE = false
|
||||
VERBOSE_ECHO = @ echo > /dev/null
|
||||
VERBOSE_SHOW = @
|
||||
QUIET_ECHO = @ echo
|
||||
endif
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# BUILD directory
|
||||
ifndef BUILD
|
||||
ifdef DEBUG
|
||||
BUILD := build-debug
|
||||
else
|
||||
BUILD := build
|
||||
endif
|
||||
endif
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# DESTDIR directory
|
||||
ifndef DESTDIR
|
||||
DESTDIR := ${HOME}/.cos-local.2
|
||||
endif
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# LIBDIR directory
|
||||
ifndef LIBDIR
|
||||
LIBDIR := ${DESTDIR}/lib
|
||||
endif
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# Compiler CC handling
|
||||
ifndef CC
|
||||
CC := gcc
|
||||
endif
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# Acquire configuration information for libraries that libs3 depends upon
|
||||
|
||||
ifndef CURL_LIBS
|
||||
CURL_LIBS := $(shell curl-config --libs)
|
||||
endif
|
||||
|
||||
ifndef CURL_CFLAGS
|
||||
CURL_CFLAGS := $(shell curl-config --cflags)
|
||||
endif
|
||||
|
||||
ifndef LIBXML2_LIBS
|
||||
LIBXML2_LIBS := $(shell xml2-config --libs)
|
||||
endif
|
||||
|
||||
ifndef LIBXML2_CFLAGS
|
||||
LIBXML2_CFLAGS := $(shell xml2-config --cflags)
|
||||
endif
|
||||
|
||||
ifndef OPENSSL_LIBS
|
||||
OPENSSL_LIBS := -lssl -lcrypto
|
||||
endif
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# These CFLAGS assume a GNU compiler. For other compilers, write a script
|
||||
# which converts these arguments into their equivalent for that particular
|
||||
# compiler.
|
||||
|
||||
ifndef CFLAGS
|
||||
ifdef DEBUG
|
||||
CFLAGS := -g
|
||||
else
|
||||
CFLAGS := -O3
|
||||
endif
|
||||
endif
|
||||
|
||||
CFLAGS += -Wall -Werror -Wshadow -Wextra -Iinc \
|
||||
$(CURL_CFLAGS) $(LIBXML2_CFLAGS) \
|
||||
-DLIBS3_VER_MAJOR=\"$(LIBS3_VER_MAJOR)\" \
|
||||
-DLIBS3_VER_MINOR=\"$(LIBS3_VER_MINOR)\" \
|
||||
-DLIBS3_VER=\"$(LIBS3_VER)\" \
|
||||
-D__STRICT_ANSI__ \
|
||||
-D_ISOC99_SOURCE \
|
||||
-D_POSIX_C_SOURCE=200112L
|
||||
|
||||
LDFLAGS = $(CURL_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) -lpthread
|
||||
|
||||
STRIP ?= strip
|
||||
INSTALL := install --strip-program=$(STRIP)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# Default targets are everything
|
||||
|
||||
.PHONY: all
|
||||
all: exported test
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# Exported targets are the library and driver program
|
||||
|
||||
.PHONY: exported
|
||||
exported: libs3 s3 headers
|
||||
exported_static: $(LIBS3_STATIC)
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# Install target
|
||||
|
||||
.PHONY: install install_static
|
||||
install_static: exported_static
|
||||
$(QUIET_ECHO) $(LIBDIR)/libs3.a: Installing static library
|
||||
$(VERBOSE_SHOW) $(INSTALL) -Dp -m u+rw,go+r $(BUILD)/lib/libs3.a \
|
||||
$(LIBDIR)/libs3.a
|
||||
$(QUIET_ECHO) $(DESTDIR)/include/libs3.h: Installing header
|
||||
$(VERBOSE_SHOW) $(INSTALL) -Dp -m u+rw,go+r inc/libs3.h \
|
||||
$(DESTDIR)/include/libs3.h
|
||||
|
||||
install: exported
|
||||
$(QUIET_ECHO) $(DESTDIR)/bin/s3: Installing executable
|
||||
$(VERBOSE_SHOW) $(INSTALL) -Dps -m u+rwx,go+rx $(BUILD)/bin/s3 \
|
||||
$(DESTDIR)/bin/s3
|
||||
$(QUIET_ECHO) \
|
||||
$(LIBDIR)/libs3.so.$(LIBS3_VER): Installing shared library
|
||||
$(VERBOSE_SHOW) $(INSTALL) -Dps -m u+rw,go+r \
|
||||
$(BUILD)/lib/libs3.so.$(LIBS3_VER_MAJOR) \
|
||||
$(LIBDIR)/libs3.so.$(LIBS3_VER)
|
||||
$(QUIET_ECHO) \
|
||||
$(LIBDIR)/libs3.so.$(LIBS3_VER_MAJOR): Linking shared library
|
||||
$(VERBOSE_SHOW) ln -sf libs3.so.$(LIBS3_VER) \
|
||||
$(LIBDIR)/libs3.so.$(LIBS3_VER_MAJOR)
|
||||
$(QUIET_ECHO) $(LIBDIR)/libs3.so: Linking shared library
|
||||
$(VERBOSE_SHOW) ln -sf libs3.so.$(LIBS3_VER_MAJOR) $(LIBDIR)/libs3.so
|
||||
$(QUIET_ECHO) $(LIBDIR)/libs3.a: Installing static library
|
||||
$(VERBOSE_SHOW) $(INSTALL) -Dp -m u+rw,go+r $(BUILD)/lib/libs3.a \
|
||||
$(LIBDIR)/libs3.a
|
||||
$(QUIET_ECHO) $(DESTDIR)/include/libs3.h: Installing header
|
||||
$(VERBOSE_SHOW) $(INSTALL) -Dp -m u+rw,go+r $(BUILD)/include/libs3.h \
|
||||
$(DESTDIR)/include/libs3.h
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# Uninstall target
|
||||
|
||||
.PHONY: uninstall
|
||||
uninstall:
|
||||
$(QUIET_ECHO) Installed files: Uninstalling
|
||||
$(VERBOSE_SHOW) \
|
||||
rm -f $(DESTDIR)/bin/s3 \
|
||||
$(DESTDIR)/include/libs3.h \
|
||||
$(DESTDIR)/lib/libs3.a \
|
||||
$(DESTDIR)/lib/libs3.so \
|
||||
$(DESTDIR)/lib/libs3.so.$(LIBS3_VER_MAJOR) \
|
||||
$(DESTDIR)/lib/libs3.so.$(LIBS3_VER)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# Compile target patterns
|
||||
|
||||
$(BUILD)/obj/%.o: src/%.c
|
||||
$(QUIET_ECHO) $@: Compiling object
|
||||
@ mkdir -p $(dir $(BUILD)/dep/$<)
|
||||
@ $(CC) $(CFLAGS) -M -MG -MQ $@ -DCOMPILINGDEPENDENCIES \
|
||||
-o $(BUILD)/dep/$(<:%.c=%.d) -c $<
|
||||
@ mkdir -p $(dir $@)
|
||||
$(VERBOSE_SHOW) $(CC) $(CFLAGS) -o $@ -c $<
|
||||
|
||||
$(BUILD)/obj/%.do: src/%.c
|
||||
$(QUIET_ECHO) $@: Compiling dynamic object
|
||||
$(QUIET_ECHO) cflags:${CFLAGS}
|
||||
@ mkdir -p $(dir $(BUILD)/dep/$<)
|
||||
@ $(CC) $(CFLAGS) -M -MG -MQ $@ -DCOMPILINGDEPENDENCIES \
|
||||
-o $(BUILD)/dep/$(<:%.c=%.dd) -c $<
|
||||
@ mkdir -p $(dir $@)
|
||||
$(VERBOSE_SHOW) $(CC) $(CFLAGS) -fpic -fPIC -o $@ -c $<
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# libs3 library targets
|
||||
|
||||
LIBS3_SHARED = $(BUILD)/lib/libs3.so.$(LIBS3_VER_MAJOR)
|
||||
LIBS3_STATIC = $(BUILD)/lib/libs3.a
|
||||
|
||||
.PHONY: libs3
|
||||
libs3: $(LIBS3_SHARED) $(LIBS3_STATIC)
|
||||
|
||||
LIBS3_SOURCES := bucket.c bucket_metadata.c error_parser.c general.c \
|
||||
object.c request.c request_context.c \
|
||||
response_headers_handler.c service_access_logging.c \
|
||||
service.c simplexml.c util.c multipart.c
|
||||
|
||||
$(LIBS3_SHARED): $(LIBS3_SOURCES:%.c=$(BUILD)/obj/%.do)
|
||||
$(QUIET_ECHO) $@: Building shared library
|
||||
@ mkdir -p $(dir $@)
|
||||
$(VERBOSE_SHOW) $(CC) -shared -Wl,-soname,libs3.so.$(LIBS3_VER_MAJOR) \
|
||||
-o $@ $^ $(LDFLAGS)
|
||||
|
||||
$(LIBS3_STATIC): $(LIBS3_SOURCES:%.c=$(BUILD)/obj/%.o)
|
||||
$(QUIET_ECHO) $@: Building static library
|
||||
@ mkdir -p $(dir $@)
|
||||
$(VERBOSE_SHOW) $(AR) cr $@ $^
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# Driver program targets
|
||||
|
||||
.PHONY: s3
|
||||
s3: $(BUILD)/bin/s3
|
||||
|
||||
$(BUILD)/bin/s3: $(BUILD)/obj/s3.o $(LIBS3_SHARED)
|
||||
$(QUIET_ECHO) $@: Building executable
|
||||
@ mkdir -p $(dir $@)
|
||||
$(VERBOSE_SHOW) $(CC) -o $@ $^ $(LDFLAGS)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# libs3 header targets
|
||||
|
||||
.PHONY: headers
|
||||
headers: $(BUILD)/include/libs3.h
|
||||
|
||||
$(BUILD)/include/libs3.h: inc/libs3.h
|
||||
$(QUIET_ECHO) $@: Linking header
|
||||
@ mkdir -p $(dir $@)
|
||||
$(VERBOSE_SHOW) ln -sf $(abspath $<) $@
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# Test targets
|
||||
|
||||
.PHONY: test
|
||||
test: $(BUILD)/bin/testsimplexml
|
||||
|
||||
$(BUILD)/bin/testsimplexml: $(BUILD)/obj/testsimplexml.o $(LIBS3_STATIC)
|
||||
$(QUIET_ECHO) $@: Building executable
|
||||
@ mkdir -p $(dir $@)
|
||||
$(VERBOSE_SHOW) $(CC) -o $@ $^ $(LIBXML2_LIBS)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# Clean target
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
$(QUIET_ECHO) $(BUILD): Cleaning
|
||||
$(VERBOSE_SHOW) rm -rf $(BUILD)
|
||||
|
||||
.PHONY: distclean
|
||||
distclean:
|
||||
$(QUIET_ECHO) $(BUILD): Cleaning
|
||||
$(VERBOSE_SHOW) rm -rf $(BUILD)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# Clean dependencies target
|
||||
|
||||
.PHONY: cleandeps
|
||||
cleandeps:
|
||||
$(QUIET_ECHO) $(BUILD)/dep: Cleaning dependencies
|
||||
$(VERBOSE_SHOW) rm -rf $(BUILD)/dep
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# Dependencies
|
||||
|
||||
ALL_SOURCES := $(LIBS3_SOURCES) s3.c testsimplexml.c
|
||||
|
||||
$(foreach i, $(ALL_SOURCES), $(eval -include $(BUILD)/dep/src/$(i:%.c=%.d)))
|
||||
$(foreach i, $(ALL_SOURCES), $(eval -include $(BUILD)/dep/src/$(i:%.c=%.dd)))
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# Debian package target
|
||||
|
||||
DEBPKG = $(BUILD)/pkg/libs3_$(LIBS3_VER).deb
|
||||
DEBDEVPKG = $(BUILD)/pkg/libs3-dev_$(LIBS3_VER).deb
|
||||
|
||||
.PHONY: deb
|
||||
deb: $(DEBPKG) $(DEBDEVPKG)
|
||||
|
||||
$(DEBPKG): DEBARCH = $(shell dpkg-architecture | grep ^DEB_BUILD_ARCH= | \
|
||||
cut -d '=' -f 2)
|
||||
$(DEBPKG): exported $(BUILD)/deb/DEBIAN/control $(BUILD)/deb/DEBIAN/shlibs \
|
||||
$(BUILD)/deb/DEBIAN/postinst \
|
||||
$(BUILD)/deb/usr/share/doc/libs3/changelog.gz \
|
||||
$(BUILD)/deb/usr/share/doc/libs3/changelog.Debian.gz \
|
||||
$(BUILD)/deb/usr/share/doc/libs3/copyright
|
||||
DESTDIR=$(BUILD)/deb/usr $(MAKE) install
|
||||
rm -rf $(BUILD)/deb/usr/include
|
||||
rm -f $(BUILD)/deb/usr/lib/libs3.a
|
||||
@mkdir -p $(dir $@)
|
||||
fakeroot dpkg-deb -b $(BUILD)/deb $@
|
||||
mv $@ $(BUILD)/pkg/libs3_$(LIBS3_VER)_$(DEBARCH).deb
|
||||
|
||||
$(DEBDEVPKG): DEBARCH = $(shell dpkg-architecture | grep ^DEB_BUILD_ARCH= | \
|
||||
cut -d '=' -f 2)
|
||||
$(DEBDEVPKG): exported $(BUILD)/deb-dev/DEBIAN/control \
|
||||
$(BUILD)/deb-dev/usr/share/doc/libs3-dev/changelog.gz \
|
||||
$(BUILD)/deb-dev/usr/share/doc/libs3-dev/changelog.Debian.gz \
|
||||
$(BUILD)/deb-dev/usr/share/doc/libs3-dev/copyright
|
||||
DESTDIR=$(BUILD)/deb-dev/usr $(MAKE) install
|
||||
rm -rf $(BUILD)/deb-dev/usr/bin
|
||||
rm -f $(BUILD)/deb-dev/usr/lib/libs3.so*
|
||||
@mkdir -p $(dir $@)
|
||||
fakeroot dpkg-deb -b $(BUILD)/deb-dev $@
|
||||
mv $@ $(BUILD)/pkg/libs3-dev_$(LIBS3_VER)_$(DEBARCH).deb
|
||||
|
||||
$(BUILD)/deb/DEBIAN/control: debian/control
|
||||
@mkdir -p $(dir $@)
|
||||
echo -n "Depends: " > $@
|
||||
dpkg-shlibdeps -Sbuild -O $(BUILD)/lib/libs3.so.$(LIBS3_VER_MAJOR) | \
|
||||
cut -d '=' -f 2- >> $@
|
||||
sed -e 's/LIBS3_VERSION/$(LIBS3_VER)/' \
|
||||
< $< | sed -e 's/DEBIAN_ARCHITECTURE/$(DEBARCH)/' | \
|
||||
grep -v ^Source: >> $@
|
||||
|
||||
$(BUILD)/deb-dev/DEBIAN/control: debian/control.dev
|
||||
@mkdir -p $(dir $@)
|
||||
sed -e 's/LIBS3_VERSION/$(LIBS3_VER)/' \
|
||||
< $< | sed -e 's/DEBIAN_ARCHITECTURE/$(DEBARCH)/' > $@
|
||||
|
||||
$(BUILD)/deb/DEBIAN/shlibs:
|
||||
echo -n "libs3 $(LIBS3_VER_MAJOR) libs3 " > $@
|
||||
echo "(>= $(LIBS3_VER))" >> $@
|
||||
|
||||
$(BUILD)/deb/DEBIAN/postinst: debian/postinst
|
||||
@mkdir -p $(dir $@)
|
||||
cp $< $@
|
||||
|
||||
$(BUILD)/deb/usr/share/doc/libs3/copyright: LICENSE
|
||||
@mkdir -p $(dir $@)
|
||||
cp $< $@
|
||||
@echo >> $@
|
||||
@echo -n "An alternate location for the GNU General Public " >> $@
|
||||
@echo "License version 3 on Debian" >> $@
|
||||
@echo "systems is /usr/share/common-licenses/GPL-3." >> $@
|
||||
|
||||
$(BUILD)/deb-dev/usr/share/doc/libs3-dev/copyright: LICENSE
|
||||
@mkdir -p $(dir $@)
|
||||
cp $< $@
|
||||
@echo >> $@
|
||||
@echo -n "An alternate location for the GNU General Public " >> $@
|
||||
@echo "License version 3 on Debian" >> $@
|
||||
@echo "systems is /usr/share/common-licenses/GPL-3." >> $@
|
||||
|
||||
$(BUILD)/deb/usr/share/doc/libs3/changelog.gz: debian/changelog
|
||||
@mkdir -p $(dir $@)
|
||||
gzip --best -c $< > $@
|
||||
|
||||
$(BUILD)/deb-dev/usr/share/doc/libs3-dev/changelog.gz: debian/changelog
|
||||
@mkdir -p $(dir $@)
|
||||
gzip --best -c $< > $@
|
||||
|
||||
$(BUILD)/deb/usr/share/doc/libs3/changelog.Debian.gz: debian/changelog.Debian
|
||||
@mkdir -p $(dir $@)
|
||||
gzip --best -c $< > $@
|
||||
|
||||
$(BUILD)/deb-dev/usr/share/doc/libs3-dev/changelog.Debian.gz: \
|
||||
debian/changelog.Debian
|
||||
@mkdir -p $(dir $@)
|
||||
gzip --best -c $< > $@
|
||||
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
# libs3
|
||||
|
||||
ExternalProject_Add(libs3
|
||||
GIT_REPOSITORY https://github.com/bji/libs3
|
||||
#GIT_TAG v5.0.16
|
||||
DEPENDS curl2 xml2
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/libs3"
|
||||
#BINARY_DIR ""
|
||||
BUILD_IN_SOURCE TRUE
|
||||
BUILD_ALWAYS 1
|
||||
UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND cp ${TD_SUPPORT_DIR}/libs3.GNUmakefile GNUmakefile && sed -i "s|CFLAGS += -Wall -Werror|CFLAGS += -I'$ENV{HOME}/.cos-local.2/include' -L'$ENV{HOME}/.cos-local.2/lib' |" ./GNUmakefile
|
||||
BUILD_COMMAND make build/lib/libs3.a
|
||||
INSTALL_COMMAND make install_static
|
||||
TEST_COMMAND ""
|
||||
)
|
|
@ -0,0 +1,15 @@
|
|||
# openssl
|
||||
ExternalProject_Add(openssl
|
||||
URL https://www.openssl.org/source/openssl-3.1.3.tar.gz
|
||||
URL_HASH SHA256=f0316a2ebd89e7f2352976445458689f80302093788c466692fb2a188b2eacf6
|
||||
DOWNLOAD_NO_PROGRESS 1
|
||||
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/openssl"
|
||||
BUILD_IN_SOURCE TRUE
|
||||
#BUILD_ALWAYS 1
|
||||
#UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND ./Configure --prefix=$ENV{HOME}/.cos-local.2 no-shared
|
||||
BUILD_COMMAND make -j
|
||||
INSTALL_COMMAND make install_sw -j
|
||||
TEST_COMMAND ""
|
||||
)
|
|
@ -2,7 +2,7 @@
|
|||
# taosadapter
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG main
|
||||
GIT_TAG 3.0
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
|
||||
# xml2
|
||||
ExternalProject_Add(xml2
|
||||
URL https://download.gnome.org/sources/libxml2/2.11/libxml2-2.11.5.tar.xz
|
||||
URL_HASH SHA256=3727b078c360ec69fa869de14bd6f75d7ee8d36987b071e6928d4720a28df3a6
|
||||
#https://github.com/GNOME/libxml2/archive/refs/tags/v2.11.5.tar.gz
|
||||
#GIT_REPOSITORY https://github.com/GNOME/libxml2
|
||||
#GIT_TAG v2.11.5
|
||||
DOWNLOAD_NO_PROGRESS 1
|
||||
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/xml2"
|
||||
#BINARY_DIR ""
|
||||
BUILD_IN_SOURCE TRUE
|
||||
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.2 --enable-shared=no --enable-static=yes --without-python --without-lzma
|
||||
BUILD_COMMAND make -j
|
||||
INSTALL_COMMAND make install && ln -s $ENV{HOME}/.cos-local.2/include/libxml2/libxml $ENV{HOME}/.cos-local.2/include/libxml
|
||||
TEST_COMMAND ""
|
||||
)
|
|
@ -6,7 +6,10 @@ function(cat IN_FILE OUT_FILE)
|
|||
file(APPEND ${OUT_FILE} "${CONTENTS}")
|
||||
endfunction(cat IN_FILE OUT_FILE)
|
||||
|
||||
if(${TD_LINUX})
|
||||
if(${BUILD_WITH_S3})
|
||||
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.2/)
|
||||
|
||||
elseif(${BUILD_WITH_COS})
|
||||
|
||||
set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3")
|
||||
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
|
@ -37,7 +40,8 @@ execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
|||
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
|
||||
endif(${TD_LINUX})
|
||||
endif()
|
||||
|
||||
|
||||
set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in")
|
||||
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
@ -155,15 +159,24 @@ if(${BUILD_WITH_SQLITE})
|
|||
cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif(${BUILD_WITH_SQLITE})
|
||||
|
||||
# s3
|
||||
if(${BUILD_WITH_S3})
|
||||
cat("${TD_SUPPORT_DIR}/ssl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/xml2_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/libs3_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
add_definitions(-DUSE_S3)
|
||||
|
||||
# cos
|
||||
if(${BUILD_WITH_COS})
|
||||
elseif(${BUILD_WITH_COS})
|
||||
#cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
#cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
#cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
#cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
add_definitions(-DUSE_COS)
|
||||
endif(${BUILD_WITH_COS})
|
||||
|
||||
endif()
|
||||
|
||||
# lucene
|
||||
if(${BUILD_WITH_LUCENE})
|
||||
|
@ -231,7 +244,6 @@ if(${BUILD_TEST})
|
|||
)
|
||||
endif(${TD_DARWIN})
|
||||
|
||||
|
||||
endif(${BUILD_TEST})
|
||||
|
||||
# cJson
|
||||
|
@ -248,6 +260,11 @@ target_include_directories(
|
|||
)
|
||||
unset(CMAKE_PROJECT_INCLUDE_BEFORE)
|
||||
|
||||
# xml2
|
||||
#if(${BUILD_WITH_S3})
|
||||
# add_subdirectory(xml2 EXCLUDE_FROM_ALL)
|
||||
#endif()
|
||||
|
||||
# lz4
|
||||
add_subdirectory(lz4/build/cmake EXCLUDE_FROM_ALL)
|
||||
target_include_directories(
|
||||
|
@ -390,16 +407,20 @@ if (${BUILD_WITH_ROCKSDB})
|
|||
endif()
|
||||
endif()
|
||||
|
||||
if(${BUILD_WITH_S3})
|
||||
INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.2/include)
|
||||
MESSAGE("build with s3: ${BUILD_WITH_S3}")
|
||||
|
||||
# cos
|
||||
if(${BUILD_WITH_COS})
|
||||
elseif(${BUILD_WITH_COS})
|
||||
if(${TD_LINUX})
|
||||
set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.1)
|
||||
#ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a)
|
||||
option(ENABLE_TEST "Enable the tests" OFF)
|
||||
INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.1/include)
|
||||
MESSAGE("$ENV{HOME}/.cos-local.1/include")
|
||||
#MESSAGE("$ENV{HOME}/.cos-local.1/include")
|
||||
|
||||
set(CMAKE_BUILD_TYPE debug)
|
||||
set(CMAKE_BUILD_TYPE Release)
|
||||
set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME})
|
||||
set(CMAKE_PROJECT_NAME cos_c_sdk)
|
||||
|
||||
|
@ -413,7 +434,8 @@ if(${BUILD_WITH_COS})
|
|||
else()
|
||||
|
||||
endif(${TD_LINUX})
|
||||
endif(${BUILD_WITH_COS})
|
||||
|
||||
endif()
|
||||
|
||||
# lucene
|
||||
# To support build on ubuntu: sudo apt-get install libboost-all-dev
|
||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -4,11 +4,11 @@ description: This document introduces the major features, competitive advantages
|
|||
toc_max_heading_level: 2
|
||||
---
|
||||
|
||||
TDengine is an [open source](https://tdengine.com/tdengine/open-source-time-series-database/), [high-performance](https://tdengine.com/tdengine/high-performance-time-series-database/), [cloud native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](../develop/cache), [stream processing](../develop/stream), [data subscription](../develop/tmq) and other functionalities to reduce the system complexity and cost of development and operation.
|
||||
TDengine is a big data platform designed and optimized for IoT (Internet of Things) and Industrial Internet. It can safely and effetively converge, store, process and distribute high volume data (TB or even PB) generated everyday by a lot of devices and data acquisition units, monitor and alert business operation status in real time and provide real time business insight. The core component of TDengine is TDengine OSS, which is a high performance, open source, cloud native and simplified time series database.
|
||||
|
||||
This section introduces the major features, competitive advantages, typical use-cases and benchmarks to help you get a high level overview of TDengine.
|
||||
|
||||
## Major Features
|
||||
## Major Features of TDengine OSS
|
||||
|
||||
The major features are listed below:
|
||||
|
||||
|
@ -132,3 +132,9 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
|
|||
- [Introduction to Time-Series Database](https://tdengine.com/tsdb/)
|
||||
- [Introduction to TDengine competitive advantages](https://tdengine.com/tdengine/)
|
||||
|
||||
|
||||
## Products
|
||||
|
||||
There are two products offered by TDengine: TDengine Enterprise and TDengine Cloud, for details please refer to
|
||||
- [TDengine Enterprise](https://www.taosdata.com/tdengine-pro)
|
||||
- [TDengine Cloud](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)
|
||||
|
|
|
@ -221,7 +221,7 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive
|
|||
&& tar -xzf php-tdengine.tar.gz -C php-tdengine --strip-components=1
|
||||
```
|
||||
|
||||
> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please check available version from [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases).
|
||||
> Version number `v1.0.2` is only for example, it can be replaced to any newer version.
|
||||
|
||||
**Non-Swoole Environment: **
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ At most 4096 columns are allowed in a STable. If there are more than 4096 of met
|
|||
|
||||
## Create Table
|
||||
|
||||
A specific table needs to be created for each data collection point. Similar to RDBMS, table name and schema are required to create a table. Additionally, one or more tags can be created for each table. To create a table, a STable needs to be used as template and the values need to be specified for the tags. For example, for the meters in [Table 1](/tdinternal/arch#model_table1), the table can be created using below SQL statement.
|
||||
A specific table needs to be created for each data collection point. Similar to RDBMS, table name and schema are required to create a table. Additionally, one or more tags can be created for each table. To create a table, a STable needs to be used as template and the values need to be specified for the tags. For example, for the smart meters table, the table can be created using below SQL statement.
|
||||
|
||||
```sql
|
||||
CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2);
|
||||
|
|
|
@ -38,7 +38,10 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
|
|||
- All the data in `tag_set` will be converted to NCHAR type automatically
|
||||
- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double
|
||||
- Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h)
|
||||
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored
|
||||
- The rule of table name
|
||||
- The child table name is created automatically in a rule to guarantee its uniqueness.
|
||||
- You can configure `smlAutoChildTableNameDelimiter` in taos.cfg to specify a delimiter between tag values as the table names. For example, you set `smlAutoChildTableNameDelimiter=-` in taos.cfg, when you insert `st,t0=cpu1,t1=4 c1=3 1626006833639000000`, the child table will be `cpu1-4`
|
||||
- You can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored
|
||||
- It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3, smlDataFormat is discarded since 3.0.3.0)
|
||||
|
||||
:::
|
||||
|
|
|
@ -33,7 +33,10 @@ For example:
|
|||
meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
|
||||
```
|
||||
|
||||
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||
- The rule of table name
|
||||
- The child table name is created automatically in a rule to guarantee its uniqueness.
|
||||
- You can configure `smlAutoChildTableNameDelimiter` in taos.cfg to specify a delimiter between tag values as the table names. For example, you set `smlAutoChildTableNameDelimiter=-` in taos.cfg, when you insert `st,t0=cpu1,t1=4 c1=3 1626006833639000000`, the child table will be `cpu1-4`
|
||||
- You can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored
|
||||
|
||||
Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_telnet/put.html) for more details.
|
||||
|
||||
|
|
|
@ -48,7 +48,10 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http
|
|||
:::note
|
||||
|
||||
- In JSON protocol, strings will be converted to NCHAR type and numeric values will be converted to double type.
|
||||
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||
- The rule of table name
|
||||
- The child table name is created automatically in a rule to guarantee its uniqueness.
|
||||
- You can configure `smlAutoChildTableNameDelimiter` in taos.cfg to specify a delimiter between tag values as the table names. For example, you set `smlAutoChildTableNameDelimiter=-` in taos.cfg, when you insert `st,t0=cpu1,t1=4 c1=3 1626006833639000000`, the child table will be `cpu1-4`
|
||||
- You can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored
|
||||
|
||||
:::
|
||||
|
||||
|
|
|
@ -23,20 +23,30 @@ By subscribing to a topic, a consumer can obtain the latest data in that topic i
|
|||
|
||||
To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers.
|
||||
|
||||
Tips:(c interface for example)
|
||||
1. A consumption group consumes all data under the same topic, and different consumption groups are independent of each other;
|
||||
2. A consumption group consumes all vgroups of the same topic, which can be composed of multiple consumers, but a vgroup is only consumed by one consumer. If the number of consumers exceeds the number of vgroups, the excess consumers do not consume data;
|
||||
3. On the server side, only one offset is saved for each vgroup, and the offsets for each vgroup are monotonically increasing, but not necessarily continuous. There is no correlation between the offsets of various vgroups;
|
||||
4. Each poll server will return a result block, which belongs to a vgroup and may contain data from multiple versions of wal. This block can be accessed through tmq_get_vgroup_offset. The offset interface obtains the offset of the first record in the block;
|
||||
5. If a consumer group has never committed an offset, when its member consumers restart and pull data again, they start consuming from the set value of the parameter auto.offset.reset; In a consumer lifecycle, the client locally records the offset of the most recent pull data and will not pull duplicate data;
|
||||
6. If a consumer terminates abnormally (without calling tmq_close), they need to wait for about 12 seconds to trigger their consumer group rebalance. The consumer's status on the server will change to LOST, and after about 1 day, the consumer will be automatically deleted; Exit normally, and after exiting, the consumer will be deleted; Add a new consumer, wait for about 2 seconds to trigger Rebalance, and the consumer's status on the server will change to ready;
|
||||
7. The consumer group Rebalance will reassign Vgroups to all consumer members in the ready state of the group, and consumers can only assign/see/commit/poll operations to the Vgroups they are responsible for;
|
||||
8. Consumers can tmq_position to obtain the offset of the current consumption, seek to the specified offset, and consume again;
|
||||
9. Seek points the position to the specified offset without executing the commit operation. Once the seek is successful, it can poll the specified offset and subsequent data;
|
||||
10. Before the seek operation, tmq must be call tmq_get_topic_assignment, The assignment interface obtains the vgroup ID and offset range of the consumer. The seek operation will detect whether the vgroup ID and offset are legal, and if they are illegal, an error will be reported;
|
||||
11. Due to the existence of a WAL expiration deletion mechanism, even if the seek operation is successful, it is possible that the offset has expired when polling data. If the offset of poll is less than the WAL minimum version number, it will be consumed from the WAL minimum version number;
|
||||
12. The tmq_get_vgroup_offset interface obtains the offset of the first data in the result block where the record is located. When seeking to this offset, it will consume all the data in this block. Refer to point four;
|
||||
13. Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products.
|
||||
The following are some explanations about data subscription, which require some understanding of the architecture of TDengine and the use of various language linker interfaces.
|
||||
- A consumption group consumes all data under the same topic, and different consumption groups are independent of each other;
|
||||
- A consumption group consumes all vgroups of the same topic, which can be composed of multiple consumers, but a vgroup is only consumed by one consumer. If the number of consumers exceeds the number of vgroups, the excess consumers do not consume data;
|
||||
- On the server side, only one offset is saved for each vgroup, and the offsets for each vgroup are monotonically increasing, but not necessarily continuous. There is no correlation between the offsets of various vgroups;
|
||||
- Each poll server will return a result block, which belongs to a vgroup and may contain data from multiple versions of wal. This block can be accessed through offset interface. The offset interface obtains the offset of the first record in the block;
|
||||
- If a consumer group has never committed an offset, when its member consumers restart and pull data again, they start consuming from the set value of the parameter auto.offset.reset; In a consumer lifecycle, the client locally records the offset of the most recent pull data and will not pull duplicate data;
|
||||
- If a consumer terminates abnormally (without calling tmq_close), they need to wait for about 12 seconds to trigger their consumer group rebalance. The consumer's status on the server will change to LOST, and after about 1 day, the consumer will be automatically deleted; Exit normally, and after exiting, the consumer will be deleted; Add a new consumer, wait for about 2 seconds to trigger Rebalance, and the consumer's status on the server will change to ready;
|
||||
- The consumer group Rebalance will reassign Vgroups to all consumer members in the ready state of the group, and consumers can only assign/see/commit/poll operations to the Vgroups they are responsible for;
|
||||
- Consumers can call position interface to obtain the offset of the current consumption, seek to the specified offset, and consume again;
|
||||
- Seek points the position to the specified offset without executing the commit operation. Once the seek is successful, it can poll the specified offset and subsequent data;
|
||||
- Position is to obtain the current consumption position, which is the position to be taken next time, not the current consumption position
|
||||
- Commit is the submission of the consumption location. Without parameters, it is the submission of the current consumption location (the location to be taken next time, not the current consumption location). With parameters, it is the location in the submission parameters (i.e. the location to be taken after the next exit and restart)
|
||||
- Seek is to set the consumer's consumption position. Wherever the seek goes, the position will be returned, all of which are the positions to be taken next time
|
||||
- Seek does not affect commit, commit does not affect seek, independent of each other, the two are different concepts
|
||||
- The begin interface is the offset of the first data in wal, and the end interface is the offset+1 of the last data in wal10.
|
||||
- Before the seek operation, tmq must be call assignment interface, The assignment interface obtains the vgroup ID and offset range of the consumer. The seek operation will detect whether the vgroup ID and offset are legal, and if they are illegal, an error will be reported;
|
||||
- Due to the existence of a WAL expiration deletion mechanism, even if the seek operation is successful, it is possible that the offset has expired when polling data. If the offset of poll is less than the WAL minimum version number, it will be consumed from the WAL minimum version number;
|
||||
- The offset interface obtains the offset of the first data in the result block where the record is located. When seeking to this offset, it will consume all the data in this block. Refer to point four;
|
||||
- Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products.
|
||||
|
||||
This document does not provide any further introduction to the knowledge of message queues themselves. If you need to know more, please search for it yourself.
|
||||
|
||||
Starting from version 3.2.0.0, data subscription supports vnode migration and splitting.
|
||||
Due to the dependence of data subscription on wal files, wal does not synchronize during vnode migration and splitting. Therefore, after migration or splitting, wal data that has not been consumed before cannot be consumed. So please ensure that all data has been consumed before proceeding with vnode migration or splitting, otherwise data loss may occur during consumption.
|
||||
|
||||
## Data Schema and API
|
||||
|
||||
|
@ -342,10 +352,11 @@ You configure the following parameters when creating a consumer:
|
|||
| `td.connect.port` | string | Port of the server side | |
|
||||
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. Each topic can create up to 100 consumer groups. |
|
||||
| `client.id` | string | Client ID | Maximum length: 192. |
|
||||
| `auto.offset.reset` | enum | Initial offset for the consumer group | `earliest`: subscribe from the earliest data, this is the default behavior; `latest`: subscribe from the latest data; or `none`: can't subscribe without committed offset|
|
||||
| `auto.offset.reset` | enum | Initial offset for the consumer group | `earliest`: subscribe from the earliest data, this is the default behavior(version < 3.2.0.0); `latest`: subscribe from the latest data, this is the default behavior(version >= 3.2.0.0); or `none`: can't subscribe without committed offset|
|
||||
| `enable.auto.commit` | boolean | Commit automatically; true: user application doesn't need to explicitly commit; false: user application need to handle commit by itself | Default value is true |
|
||||
| `auto.commit.interval.ms` | integer | Interval for automatic commits, in milliseconds |
|
||||
| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages | default value: false
|
||||
| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages. Not applicable if subscribe to a column (tbname can be written as a column in the subquery statement during column subscriptions) (This parameter has been deprecated since version 3.2.0.0 and remains true) | default value: false
|
||||
| `enable.replay` | boolean | Specify whether data replay function enabled or not |default value: false |
|
||||
|
||||
The method of specifying these parameters depends on the language used:
|
||||
|
||||
|
@ -361,7 +372,7 @@ tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
|
|||
tmq_conf_set(conf, "group.id", "cgrpName");
|
||||
tmq_conf_set(conf, "td.connect.user", "root");
|
||||
tmq_conf_set(conf, "td.connect.pass", "taosdata");
|
||||
tmq_conf_set(conf, "auto.offset.reset", "earliest");
|
||||
tmq_conf_set(conf, "auto.offset.reset", "latest");
|
||||
tmq_conf_set(conf, "msg.with.table.name", "true");
|
||||
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
||||
|
||||
|
@ -391,7 +402,7 @@ properties.setProperty("group.id", "cgrpName");
|
|||
properties.setProperty("bootstrap.servers", "127.0.0.1:6030");
|
||||
properties.setProperty("td.connect.user", "root");
|
||||
properties.setProperty("td.connect.pass", "taosdata");
|
||||
properties.setProperty("auto.offset.reset", "earliest");
|
||||
properties.setProperty("auto.offset.reset", "latest");
|
||||
properties.setProperty("msg.with.table.name", "true");
|
||||
properties.setProperty("value.deserializer", "com.taos.example.MetersDeserializer");
|
||||
|
||||
|
@ -411,7 +422,7 @@ public class MetersDeserializer extends ReferenceDeserializer<Meters> {
|
|||
```go
|
||||
conf := &tmq.ConfigMap{
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "earliest",
|
||||
"auto.offset.reset": "latest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
|
@ -431,7 +442,7 @@ consumer, err := NewConsumer(conf)
|
|||
let mut dsn: Dsn = "taos://".parse()?;
|
||||
dsn.set("group.id", "group1");
|
||||
dsn.set("client.id", "test");
|
||||
dsn.set("auto.offset.reset", "earliest");
|
||||
dsn.set("auto.offset.reset", "latest");
|
||||
|
||||
let tmq = TmqBuilder::from_dsn(dsn)?;
|
||||
|
||||
|
@ -448,7 +459,19 @@ from taos.tmq import Consumer
|
|||
# Syntax: `consumer = Consumer(configs)`
|
||||
#
|
||||
# Example:
|
||||
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
||||
consumer = Consumer(
|
||||
{
|
||||
"group.id": "local",
|
||||
"client.id": "1",
|
||||
"enable.auto.commit": "true",
|
||||
"auto.commit.interval.ms": "1000",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"auto.offset.reset": "latest",
|
||||
"msg.with.table.name": "true",
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -465,7 +488,7 @@ let consumer = taos.consumer({
|
|||
'group.id': 'tg2',
|
||||
'td.connect.user': 'root',
|
||||
'td.connect.pass': 'taosdata',
|
||||
'auto.offset.reset','earliest',
|
||||
'auto.offset.reset','latest',
|
||||
'msg.with.table.name': 'true',
|
||||
'td.connect.ip','127.0.0.1',
|
||||
'td.connect.port','6030'
|
||||
|
@ -488,7 +511,7 @@ var cfg = new ConsumerConfig
|
|||
GourpId = "TDengine-TMQ-C#",
|
||||
TDConnectUser = "root",
|
||||
TDConnectPasswd = "taosdata",
|
||||
AutoOffsetReset = "earliest"
|
||||
AutoOffsetReset = "latest"
|
||||
MsgWithTableName = "true",
|
||||
TDConnectIp = "127.0.0.1",
|
||||
TDConnectPort = "6030"
|
||||
|
@ -504,6 +527,24 @@ var consumer = new ConsumerBuilder(cfg).Build();
|
|||
|
||||
A consumer group is automatically created when multiple consumers are configured with the same consumer group ID.
|
||||
|
||||
Data replay function description:
|
||||
- Subscription adds replay function, which replays according to the time of data writing.
|
||||
For example, writing three pieces of data at the following time.
|
||||
```sql
|
||||
2023/09/22 00:00:00.000
|
||||
2023/09/22 00:00:05.000
|
||||
2023/09/22 00:00:08.000
|
||||
```
|
||||
After subscribing to the first data for 5 seconds, the second data is returned, and after obtaining the second data for 3 seconds, the third data is returned.
|
||||
- Only column subscriptions support data replay.
|
||||
- Replay needs to ensure an independent timeline
|
||||
- If it is a sub table subscription or a normal table subscription, only one vnode has data, ensuring a timeline.
|
||||
- If subscribing to a super table, it is necessary to ensure that the DB has only one vnode, otherwise an error will be reported (because the data subscribed to on multiple vnodes is not on the same timeline).
|
||||
- Super table and database subscriptions do not support replay
|
||||
- Add the enable.replay parameter. True indicates that the subscription replay function is enabled, while false indicates that the subscription replay function is not enabled by default.
|
||||
- Replay does not support progress saving, so when the replay parameter enable, auto commit will automatically close.
|
||||
- Due to the processing time required for data replay, there is an error of tens of milliseconds in the accuracy of replay.
|
||||
|
||||
## Subscribe to a Topic
|
||||
|
||||
A single consumer can subscribe to multiple topics.
|
||||
|
|
|
@ -12,7 +12,7 @@ The FQDN of all hosts must be setup properly. For e.g. FQDNs may have to be conf
|
|||
|
||||
### Step 1
|
||||
|
||||
If any previous version of TDengine has been installed and configured on any host, the installation needs to be removed and the data needs to be cleaned up. For details about uninstalling please refer to [Install and Uninstall](../../operation/pkg-install). To clean up the data, please use `rm -rf /var/lib/taos/\*` assuming the `dataDir` is configured as `/var/lib/taos`.
|
||||
If any previous version of TDengine has been installed and configured on any host, the installation needs to be removed and the data needs to be cleaned up. To clean up the data, please use `rm -rf /var/lib/taos/\*` assuming the `dataDir` is configured as `/var/lib/taos`.
|
||||
|
||||
:::note
|
||||
FQDN information is written to file. If you have started TDengine without configuring or changing the FQDN, ensure that data is backed up or no longer needed before running the `rm -rf /var/lib\taos/\*` command.
|
||||
|
|
|
@ -56,7 +56,7 @@ database_option: {
|
|||
- WAL_FSYNC_PERIOD: specifies the interval (in milliseconds) at which data is written from the WAL to disk. This parameter takes effect only when the WAL parameter is set to 2. The default value is 3000. Enter a value between 0 and 180000. The value 0 indicates that incoming data is immediately written to disk.
|
||||
- MAXROWS: specifies the maximum number of rows recorded in a block. The default value is 4096.
|
||||
- MINROWS: specifies the minimum number of rows recorded in a block. The default value is 100.
|
||||
- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default. TDengine Enterprise supports [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function, thus multiple KEEP values (comma separated and up to 3 values supported, and meet keep 0 <= keep 1 <= keep 2, e.g. KEEP 100h,100d,3650d) are supported; TDengine OSS does not support Tiered Storage function (although multiple keep values are configured, they do not take effect, only the maximum keep value is used as KEEP).
|
||||
- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to three times of the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default. TDengine Enterprise supports [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function, thus multiple KEEP values (comma separated and up to 3 values supported, and meet keep 0 <= keep 1 <= keep 2, e.g. KEEP 100h,100d,3650d) are supported; TDengine OSS does not support Tiered Storage function (although multiple keep values are configured, they do not take effect, only the maximum keep value is used as KEEP).
|
||||
- PAGES: specifies the number of pages in the metadata storage engine cache on each vnode. Enter a value greater than or equal to 64. The default value is 256. The space occupied by metadata storage on each vnode is equal to the product of the values of the PAGESIZE and PAGES parameters. The space occupied by default is 1 MB.
|
||||
- PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. The default value is 4. Enter a value between 1 and 16384.
|
||||
- PRECISION: specifies the precision at which a database records timestamps. Enter ms for milliseconds, us for microseconds, or ns for nanoseconds. The default value is ms.
|
||||
|
|
|
@ -24,7 +24,7 @@ SELECT [hints] [DISTINCT] select_list
|
|||
hints: /*+ [hint([hint_param_list])] [hint([hint_param_list])] */
|
||||
|
||||
hint:
|
||||
BATCH_SCAN | NO_BATCH_SCAN
|
||||
BATCH_SCAN | NO_BATCH_SCAN | SORT_FOR_GROUP
|
||||
|
||||
select_list:
|
||||
select_expr [, select_expr] ...
|
||||
|
@ -87,15 +87,17 @@ Hints are a means of user control over query optimization for individual stateme
|
|||
|
||||
The list of currently supported Hints is as follows:
|
||||
|
||||
| **Hint** | **Params** | **Comment** | **Scopt** |
|
||||
| :-----------: | -------------- | -------------------------- | -------------------------- |
|
||||
| BATCH_SCAN | None | Batch table scan | JOIN statment for stable |
|
||||
| NO_BATCH_SCAN | None | Sequential table scan | JOIN statment for stable |
|
||||
| **Hint** | **Params** | **Comment** | **Scopt** |
|
||||
| :-----------: | -------------- | -------------------------- | -----------------------------------|
|
||||
| BATCH_SCAN | None | Batch table scan | JOIN statment for stable |
|
||||
| NO_BATCH_SCAN | None | Sequential table scan | JOIN statment for stable |
|
||||
| SORT_FOR_GROUP| None | Use sort for partition | With normal column in partition by list |
|
||||
|
||||
For example:
|
||||
|
||||
```sql
|
||||
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
|
||||
SELECT /*+ SORT_FOR_GROUP() */ count(*), c1 FROM stable1 PARTITION BY c1;
|
||||
```
|
||||
|
||||
## Lists
|
||||
|
|
|
@ -483,6 +483,93 @@ return_timestamp: {
|
|||
- The precision of the returned timestamp is same as the precision set for the current data base in use
|
||||
- return_timestamp indicates whether the returned value type is TIMESTAMP or not. If this parameter set to 1, function will return TIMESTAMP type. Otherwise function will return BIGINT type. If parameter is omitted, default return value type is BIGINT.
|
||||
|
||||
#### TO_CHAR
|
||||
|
||||
```sql
|
||||
TO_CHAR(ts, format_str_literal)
|
||||
```
|
||||
|
||||
**Description**: Convert a ts column to string as the format specified
|
||||
|
||||
**Return value type**: VARCHAR
|
||||
|
||||
**Applicable column types**: TIMESTAMP
|
||||
|
||||
**Nested query**: It can be used in both the outer query and inner query in a nested query.
|
||||
|
||||
**Applicable table types**: standard tables and supertables
|
||||
|
||||
**Supported Formats**
|
||||
|
||||
| **Format** | **Comment**| **example** |
|
||||
| --- | --- | --- |
|
||||
|AM,am,PM,pm| Meridiem indicator(without periods) | 07:00:00am|
|
||||
|A.M.,a.m.,P.M.,p.m.| Meridiem indicator(with periods)| 07:00:00a.m.|
|
||||
|YYYY,yyyy|year, 4 or more digits| 2023-10-10|
|
||||
|YYY,yyy| year, last 3 digits| 023-10-10|
|
||||
|YY,yy| year, last 2 digits| 23-10-10|
|
||||
|Y,y| year, last digit| 3-10-10|
|
||||
|MONTH|full uppercase of month| 2023-JANUARY-01|
|
||||
|Month|full capitalized month| 2023-January-01|
|
||||
|month|full lowercase of month| 2023-january-01|
|
||||
|MON| abbreviated uppercase of month(3 char)| JAN, SEP|
|
||||
|Mon| abbreviated capitalized month| Jan, Sep|
|
||||
|mon|abbreviated lowercase of month| jan, sep|
|
||||
|MM,mm|month number 01-12|2023-01-01|
|
||||
|DD,dd|month day, 01-31||
|
||||
|DAY|full uppercase of week day|MONDAY|
|
||||
|Day|full capitalized week day|Monday|
|
||||
|day|full lowercase of week day|monday|
|
||||
|DY|abbreviated uppercase of week day|MON|
|
||||
|Dy|abbreviated capitalized week day|Mon|
|
||||
|dy|abbreviated lowercase of week day|mon|
|
||||
|DDD|year day, 001-366||
|
||||
|D,d|week day number, 1-7, Sunday(1) to Saturday(7)||
|
||||
|HH24,hh24|hour of day, 00-23|2023-01-30 23:59:59|
|
||||
|hh12,HH12, hh, HH| hour of day, 01-12|2023-01-30 12:59:59PM|
|
||||
|MI,mi|minute, 00-59||
|
||||
|SS,ss|second, 00-59||
|
||||
|MS,ms|milli second, 000-999||
|
||||
|US,us|micro second, 000000-999999||
|
||||
|NS,ns|nano second, 000000000-999999999||
|
||||
|TZH,tzh|time zone hour|2023-01-30 11:59:59PM +08|
|
||||
|
||||
**More explanations**:
|
||||
- The output format of `Month`, `Day` are left aligined, like`2023-OCTOBER -01`, `2023-SEPTEMBER-01`, `September` is the longest, no paddings. Week days are slimilar.
|
||||
- When `ms`,`us`,`ns` are used in `to_char`, like `to_char(ts, 'yyyy-mm-dd hh:mi:ss.ms.us.ns')`, The time of `ms`,`us`,`ns` corresponds to the same fraction seconds. When ts is `1697182085123`, the output of `ms` is `123`, `us` is `123000`, `ns` is `123000000`.
|
||||
- If we want to output some characters of format without converting, surround it with double quotes. `to_char(ts, 'yyyy-mm-dd "is formated by yyyy-mm-dd"')`. If want to output double quotes, add a back slash before double quote, like `to_char(ts, '\"yyyy-mm-dd\"')` will output `"2023-10-10"`.
|
||||
- For formats that output digits, the uppercase and lowercase formats are the same.
|
||||
- It's recommended to put time zone in the format, if not, the default time zone will be that in server or client.
|
||||
- The precision of the input timestamp will be recognized automatically according to the precision of the table used, milliseconds will be used if no table is specified.
|
||||
|
||||
#### TO_TIMESTAMP
|
||||
|
||||
```sql
|
||||
TO_TIMESTAMP(ts_str_literal, format_str_literal)
|
||||
```
|
||||
|
||||
**Description**: Convert a formated timestamp string to a timestamp
|
||||
|
||||
**Return value type**: TIMESTAMP
|
||||
|
||||
**Applicable column types**: VARCHAR
|
||||
|
||||
**Nested query**: It can be used in both the outer query and inner query in a nested query.
|
||||
|
||||
**Applicable table types**: standard tables and supertables
|
||||
|
||||
**Supported Formats**: The same as `TO_CHAR`.
|
||||
|
||||
**More explanations**:
|
||||
- When `ms`, `us`, `ns` are used in `to_timestamp`, if multi of them are specified, the results are accumulated. For example, `to_timestamp('2023-10-10 10:10:10.123.000456.000000789', 'yyyy-mm-dd hh:mi:ss.ms.us.ns')` will output the timestamp of `2023-10-10 10:10:10.123456789`.
|
||||
- The uppercase or lowercase of `MONTH`, `MON`, `DAY`, `DY` and formtas that output digits have same effect when used in `to_timestamp`, like `to_timestamp('2023-JANUARY-01', 'YYYY-month-dd')`, `month` can be replaced by `MONTH`, or `month`. The cases are ignored.
|
||||
- If multi times are specified for one component, the previous will be overwritten. Like `to_timestamp('2023-22-10-10', 'yyyy-yy-MM-dd')`, the output year will be `2022`.
|
||||
- To avoid unexpected time zone used during the convertion, it's recommended to put time zone in the ts string, e.g. '2023-10-10 10:10:10+08'. If time zone not specified, default will be that in server or client.
|
||||
- The default timestamp if some components are not specified will be: `1970-01-01 00:00:00` with the timezone specified or default to local timezone.
|
||||
- If `AM` or `PM` is specified in formats, the Hour must between `1-12`.
|
||||
- In some cases, `to_timestamp` can convert correctly even the format and the timestamp string are not totally matched. Like `to_timetamp('200101/2', 'yyyyMM1/dd')`, the digit `1` in format string are ignored, and the output timestsamp is `2001-01-02 00:00:00`. Spaces and tabs in formats and tiemstamp string are also ignored automatically.
|
||||
- The precision of the output timestamp will be the same as the table in SELECT stmt, millisecond will be used if no table is specified. The output of `select to_timestamp('2023-08-1 10:10:10.123456789', 'yyyy-mm-dd hh:mi:ss.ns')` will be truncated to millisecond precision. If a nano precision table is specified, no truncation will be applied. Like `select to_timestamp('2023-08-1 10:10:10.123456789', 'yyyy-mm-dd hh:mi:ss.ns') from db_ns.table_ns limit 1`.
|
||||
|
||||
|
||||
### Time and Date Functions
|
||||
|
||||
|
|
|
@ -38,11 +38,16 @@ Aggregation by time window is supported in TDengine. For example, in the case wh
|
|||
window_clause: {
|
||||
SESSION(ts_col, tol_val)
|
||||
| STATE_WINDOW(col)
|
||||
| INTERVAL(interval [, offset]) [SLIDING sliding] [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})]
|
||||
| INTERVAL(interval_val [, offset]) [SLIDING (sliding_value)] [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})]
|
||||
| EVENT_WINDOW START WITH start_trigger_condition END WITH end_trigger_condition
|
||||
}
|
||||
```
|
||||
|
||||
Both interval_val and sliding_value are time durations which have 3 forms of representation.
|
||||
- INTERVAL(1s, 500a) SLIDING(1s), the unit char should be any one of a (millisecond), b (nanosecond), d (day), h (hour), m (minute), n (month), s (second), u (microsecond), w (week), y (year).
|
||||
- INTERVAL(1000, 500) SLIDING(1000), the unit will the same as the queried database, if there are more than one databases, higher precision will be used.
|
||||
- INTERVAL('1s', '500a') SLIDING('1s'), unit must be specified, no spaces allowed.
|
||||
|
||||
The following restrictions apply:
|
||||
|
||||
### Other Rules
|
||||
|
|
|
@ -54,6 +54,7 @@ LIKE is used together with wildcards to match strings. Its usage is described as
|
|||
MATCH and NMATCH are used together with regular expressions to match strings. Their usage is described as follows:
|
||||
|
||||
- Use POSIX regular expression syntax. For more information, see Regular Expressions.
|
||||
- The `MATCH` operator returns true when the regular expression is matched. The `NMATCH` operator returns true when the regular expression is not matched.
|
||||
- Regular expression can be used against only table names, i.e. `tbname`, and tags/columns of binary/nchar types.
|
||||
- The maximum length of regular expression string is 128 bytes. Configuration parameter `maxRegexStringLen` can be used to set the maximum allowed regular expression. It's a configuration parameter on the client side, and will take effect after restarting the client.
|
||||
|
||||
|
|
|
@ -180,6 +180,7 @@ The following list shows all reserved keywords:
|
|||
- MAX_DELAY
|
||||
- BWLIMIT
|
||||
- MAXROWS
|
||||
- MAX_SPEED
|
||||
- MERGE
|
||||
- META
|
||||
- MINROWS
|
||||
|
|
|
@ -26,75 +26,85 @@ This document introduces the tables of INFORMATION_SCHEMA and their structure.
|
|||
|
||||
## INS_DNODES
|
||||
|
||||
Provides information about dnodes. Similar to SHOW DNODES.
|
||||
Provides information about dnodes. Similar to SHOW DNODES. Users whose SYSINFO attribute is 0 can't view this table.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :------------: | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | vnodes | SMALLINT | Current number of vnodes on the dnode. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 2 | support_vnodes | SMALLINT | Maximum number of vnodes on the dnode |
|
||||
| 3 | status | BINARY(10) | Current status |
|
||||
| 4 | note | BINARY(256) | Reason for going offline or other information |
|
||||
| 3 | status | VARCHAR(10) | Current status |
|
||||
| 4 | note | VARCHAR(256) | Reason for going offline or other information |
|
||||
| 5 | id | SMALLINT | Dnode ID |
|
||||
| 6 | endpoint | BINARY(134) | Dnode endpoint |
|
||||
| 6 | endpoint | VARCHAR(134) | Dnode endpoint |
|
||||
| 7 | create | TIMESTAMP | Creation time |
|
||||
|
||||
## INS_MNODES
|
||||
|
||||
Provides information about mnodes. Similar to SHOW MNODES.
|
||||
Provides information about mnodes. Similar to SHOW MNODES. Users whose SYSINFO attribute is 0 can't view this table.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :---------: | ------------- | ------------------------------------------ |
|
||||
| 1 | id | SMALLINT | Mnode ID |
|
||||
| 2 | endpoint | BINARY(134) | Mnode endpoint |
|
||||
| 3 | role | BINARY(10) | Current role |
|
||||
| 2 | endpoint | VARCHAR(134) | Mnode endpoint |
|
||||
| 3 | role | VARCHAR(10) | Current role |
|
||||
| 4 | role_time | TIMESTAMP | Time at which the current role was assumed |
|
||||
| 5 | create_time | TIMESTAMP | Creation time |
|
||||
|
||||
## INS_QNODES
|
||||
|
||||
Provides information about qnodes. Similar to SHOW QNODES.
|
||||
Provides information about qnodes. Similar to SHOW QNODES. Users whose SYSINFO attribute is 0 can't view this table.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :---------: | ------------- | --------------- |
|
||||
| 1 | id | SMALLINT | Qnode ID |
|
||||
| 2 | endpoint | BINARY(134) | Qnode endpoint |
|
||||
| 2 | endpoint | VARCHAR(134) | Qnode endpoint |
|
||||
| 3 | create_time | TIMESTAMP | Creation time |
|
||||
|
||||
## INS_SNODES
|
||||
|
||||
Provides information about snodes. Similar to SHOW SNODES. Users whose SYSINFO attribute is 0 can't view this table.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :---------: | ------------- | --------------- |
|
||||
| 1 | id | SMALLINT | Snode ID |
|
||||
| 2 | endpoint | VARCHAR(134) | Snode endpoint |
|
||||
| 3 | create_time | TIMESTAMP | Creation time |
|
||||
|
||||
## INS_CLUSTER
|
||||
|
||||
Provides information about the cluster.
|
||||
Provides information about the cluster. Users whose SYSINFO attribute is 0 can't view this table.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :---------: | ------------- | --------------- |
|
||||
| 1 | id | BIGINT | Cluster ID |
|
||||
| 2 | name | BINARY(134) | Cluster name |
|
||||
| 2 | name | VARCHAR(134) | Cluster name |
|
||||
| 3 | create_time | TIMESTAMP | Creation time |
|
||||
|
||||
## INS_DATABASES
|
||||
|
||||
Provides information about user-created databases. Similar to SHOW DATABASES.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :------------------: | ---------------- | ------------------------------------------------ |
|
||||
| 1| name| BINARY(32)| Database name |
|
||||
| 1 | name | VARCHAR(64) | Database name |
|
||||
| 2 | create_time | TIMESTAMP | Creation time |
|
||||
| 3 | ntables | INT | Number of standard tables and subtables (not including supertables) |
|
||||
| 4 | vgroups | INT | Number of vgroups. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 6 | replica | INT | Number of replicas. It should be noted that `replica` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 7 | strict | BINARY(4) | Obsoleted |
|
||||
| 8 | duration | INT | Duration for storage of single files. It should be noted that `duration` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | keep | INT | Data retention period. It should be noted that `keep` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 7 | strict | VARCHAR(4) | Obsoleted |
|
||||
| 8 | duration | VARCHAR(10) | Duration for storage of single files. It should be noted that `duration` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | keep | VARCHAR(32) | Data retention period. It should be noted that `keep` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 10 | buffer | INT | Write cache size per vnode, in MB. It should be noted that `buffer` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 11 | pagesize | INT | Page size for vnode metadata storage engine, in KB. It should be noted that `pagesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 12 | pages | INT | Number of pages per vnode metadata storage engine. It should be noted that `pages` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 13 | minrows | INT | Maximum number of records per file block. It should be noted that `minrows` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 14 | maxrows | INT | Minimum number of records per file block. It should be noted that `maxrows` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 15 | comp | INT | Compression method. It should be noted that `comp` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 16 | precision | BINARY(2) | Time precision. It should be noted that `precision` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 17 | status | BINARY(10) | Current database status |
|
||||
| 18 | retentions | BINARY (60) | Aggregation interval and retention period. It should be noted that `retentions` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 16 | precision | VARCHAR(2) | Time precision. It should be noted that `precision` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 17 | status | VARCHAR(10) | Current database status |
|
||||
| 18 | retentions | VARCHAR(60) | Aggregation interval and retention period. It should be noted that `retentions` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 19 | single_stable | BOOL | Whether the database can contain multiple supertables. It should be noted that `single_stable` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 20 | cachemodel | BINARY(60) | Caching method for the newest data. It should be noted that `cachemodel` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 20 | cachemodel | VARCHAR(60) | Caching method for the newest data. It should be noted that `cachemodel` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 21 | cachesize | INT | Memory per vnode used for caching the newest data. It should be noted that `cachesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 22 | wal_level | INT | WAL level. It should be noted that `wal_level` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 23 | wal_fsync_period | INT | Interval at which WAL is written to disk. It should be noted that `wal_fsync_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
|
@ -111,15 +121,15 @@ Provides information about user-defined functions.
|
|||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :-----------: | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | name | BINARY(64) | Function name |
|
||||
| 2 | comment | BINARY(255) | Function description. It should be noted that `comment` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 1 | name | VARCHAR(64) | Function name |
|
||||
| 2 | comment | VARCHAR(255) | Function description. It should be noted that `comment` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 3 | aggregate | INT | Whether the UDF is an aggregate function. It should be noted that `aggregate` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 4 | output_type | BINARY(31) | Output data type |
|
||||
| 4 | output_type | VARCHAR(31) | Output data type |
|
||||
| 5 | create_time | TIMESTAMP | Creation time |
|
||||
| 6 | code_len | INT | Length of the source code |
|
||||
| 7 | bufsize | INT | Buffer size |
|
||||
| 8 | func_language | BINARY(31) | UDF programming language |
|
||||
| 9 | func_body | BINARY(16384) | UDF function body |
|
||||
| 8 | func_language | VARCHAR(31) | UDF programming language |
|
||||
| 9 | func_body | VARCHAR(16384) | UDF function body |
|
||||
| 10 | func_version | INT | UDF function version. starting from 0. Increasing by 1 each time it is updated |
|
||||
|
||||
## INS_INDEXES
|
||||
|
@ -128,12 +138,12 @@ Provides information about user-created indices. Similar to SHOW INDEX.
|
|||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :--------------: | ------------- | --------------------------------------------------------------------- |
|
||||
| 1 | db_name | BINARY(32) | Database containing the table with the specified index |
|
||||
| 2 | table_name | BINARY(192) | Table containing the specified index |
|
||||
| 3 | index_name | BINARY(192) | Index name |
|
||||
| 4 | db_name | BINARY(64) | Index column |
|
||||
| 5 | index_type | BINARY(10) | SMA or tag index |
|
||||
| 6 | index_extensions | BINARY(256) | Other information For SMA/tag indices, this shows a list of functions |
|
||||
| 1 | db_name | VARCHAR(32) | Database containing the table with the specified index |
|
||||
| 2 | table_name | VARCHAR(192) | Table containing the specified index |
|
||||
| 3 | index_name | VARCHAR(192) | Index name |
|
||||
| 4 | db_name | VARCHAR(64) | Index column |
|
||||
| 5 | index_type | VARCHAR(10) | SMA or tag index |
|
||||
| 6 | index_extensions | VARCHAR(256) | Other information For SMA/tag indices, this shows a list of functions |
|
||||
|
||||
## INS_STABLES
|
||||
|
||||
|
@ -141,16 +151,16 @@ Provides information about supertables.
|
|||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :-----------: | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | stable_name | BINARY(192) | Supertable name |
|
||||
| 2 | db_name | BINARY(64) | All databases in the supertable |
|
||||
| 1 | stable_name | VARCHAR(192) | Supertable name |
|
||||
| 2 | db_name | VARCHAR(64) | All databases in the supertable |
|
||||
| 3 | create_time | TIMESTAMP | Creation time |
|
||||
| 4 | columns | INT | Number of columns |
|
||||
| 5 | tags | INT | Number of tags. It should be noted that `tags` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 6 | last_update | TIMESTAMP | Last updated time |
|
||||
| 7 | table_comment | BINARY(1024) | Table description |
|
||||
| 8 | watermark | BINARY(64) | Window closing time. It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | max_delay | BINARY(64) | Maximum delay for pushing stream processing results. It should be noted that `max_delay` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 10 | rollup | BINARY(128) | Rollup aggregate function. It should be noted that `rollup` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 7 | table_comment | VARCHAR(1024) | Table description |
|
||||
| 8 | watermark | VARCHAR(64) | Window closing time. It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | max_delay | VARCHAR(64) | Maximum delay for pushing stream processing results. It should be noted that `max_delay` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 10 | rollup | VARCHAR(128) | Rollup aggregate function. It should be noted that `rollup` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
|
||||
## INS_TABLES
|
||||
|
||||
|
@ -158,37 +168,37 @@ Provides information about standard tables and subtables.
|
|||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :-----------: | ------------- | ---------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | table_name | BINARY(192) | Table name |
|
||||
| 2 | db_name | BINARY(64) | Database name |
|
||||
| 1 | table_name | VARCHAR(192) | Table name |
|
||||
| 2 | db_name | VARCHAR(64) | Database name |
|
||||
| 3 | create_time | TIMESTAMP | Creation time |
|
||||
| 4 | columns | INT | Number of columns |
|
||||
| 5 | stable_name | BINARY(192) | Supertable name |
|
||||
| 5 | stable_name | VARCHAR(192) | Supertable name |
|
||||
| 6 | uid | BIGINT | Table ID |
|
||||
| 7 | vgroup_id | INT | Vgroup ID |
|
||||
| 8 | ttl | INT | Table time-to-live. It should be noted that `ttl` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | table_comment | BINARY(1024) | Table description |
|
||||
| 10 | type | BINARY(20) | Table type |
|
||||
| 9 | table_comment | VARCHAR(1024) | Table description |
|
||||
| 10 | type | VARCHAR(20) | Table type |
|
||||
|
||||
## INS_TAGS
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :---------: | ------------- | --------------- |
|
||||
| 1 | table_name | BINARY(192) | Table name |
|
||||
| 2 | db_name | BINARY(64) | Database name |
|
||||
| 3 | stable_name | BINARY(192) | Supertable name |
|
||||
| 4 | tag_name | BINARY(64) | Tag name |
|
||||
| 5 | tag_type | BINARY(64) | Tag type |
|
||||
| 6 | tag_value | BINARY(16384) | Tag value |
|
||||
| 1 | table_name | VARCHAR(192) | Table name |
|
||||
| 2 | db_name | VARCHAR(64) | Database name |
|
||||
| 3 | stable_name | VARCHAR(192) | Supertable name |
|
||||
| 4 | tag_name | VARCHAR(64) | Tag name |
|
||||
| 5 | tag_type | VARCHAR(64) | Tag type |
|
||||
| 6 | tag_value | VARCHAR(16384) | Tag value |
|
||||
|
||||
## INS_COLUMNS
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :-----------: | ------------- | ---------------- |
|
||||
| 1 | table_name | BINARY(192) | Table name |
|
||||
| 2 | db_name | BINARY(64) | Database name |
|
||||
| 3 | table_type | BINARY(21) | Table type |
|
||||
| 4 | col_name | BINARY(64) | Column name |
|
||||
| 5 | col_type | BINARY(32) | Column type |
|
||||
| 1 | table_name | VARCHAR(192) | Table name |
|
||||
| 2 | db_name | VARCHAR(64) | Database name |
|
||||
| 3 | table_type | VARCHAR(21) | Table type |
|
||||
| 4 | col_name | VARCHAR(64) | Column name |
|
||||
| 5 | col_type | VARCHAR(32) | Column type |
|
||||
| 6 | col_length | INT | Column length |
|
||||
| 7 | col_precision | INT | Column precision |
|
||||
| 8 | col_scale | INT | Column scale |
|
||||
|
@ -196,51 +206,51 @@ Provides information about standard tables and subtables.
|
|||
|
||||
## INS_USERS
|
||||
|
||||
Provides information about TDengine users.
|
||||
Provides information about TDengine users. Users whose SYSINFO attribute is 0 can't view this table.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :---------: | ------------- | ---------------- |
|
||||
| 1 | user_name | BINARY(23) | User name |
|
||||
| 2 | privilege | BINARY(256) | User permissions |
|
||||
| 1 | user_name | VARCHAR(23) | User name |
|
||||
| 2 | privilege | VARCHAR(256) | User permissions |
|
||||
| 3 | create_time | TIMESTAMP | Creation time |
|
||||
|
||||
## INS_GRANTS
|
||||
|
||||
Provides information about TDengine Enterprise Edition permissions.
|
||||
Provides information about TDengine Enterprise Edition permissions. Users whose SYSINFO attribute is 0 can't view this table.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :---------: | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | version | BINARY(9) | Whether the deployment is a licensed or trial version |
|
||||
| 2 | cpu_cores | BINARY(9) | CPU cores included in license |
|
||||
| 3 | dnodes | BINARY(10) | Dnodes included in license. It should be noted that `dnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 4 | streams | BINARY(10) | Streams included in license. It should be noted that `streams` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 5 | users | BINARY(10) | Users included in license. It should be noted that `users` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 6 | accounts | BINARY(10) | Accounts included in license. It should be noted that `accounts` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 7 | storage | BINARY(21) | Storage space included in license. It should be noted that `storage` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 8 | connections | BINARY(21) | Client connections included in license. It should be noted that `connections` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | databases | BINARY(11) | Databases included in license. It should be noted that `databases` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 10 | speed | BINARY(9) | Write speed specified in license (data points per second) |
|
||||
| 11 | querytime | BINARY(9) | Total query time specified in license |
|
||||
| 12 | timeseries | BINARY(21) | Number of metrics included in license |
|
||||
| 13 | expired | BINARY(5) | Whether the license has expired |
|
||||
| 14 | expire_time | BINARY(19) | When the trial period expires |
|
||||
| 1 | version | VARCHAR(9) | Whether the deployment is a licensed or trial version |
|
||||
| 2 | cpu_cores | VARCHAR(9) | CPU cores included in license |
|
||||
| 3 | dnodes | VARCHAR(10) | Dnodes included in license. It should be noted that `dnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 4 | streams | VARCHAR(10) | Streams included in license. It should be noted that `streams` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 5 | users | VARCHAR(10) | Users included in license. It should be noted that `users` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 6 | accounts | VARCHAR(10) | Accounts included in license. It should be noted that `accounts` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 7 | storage | VARCHAR(21) | Storage space included in license. It should be noted that `storage` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 8 | connections | VARCHAR(21) | Client connections included in license. It should be noted that `connections` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | databases | VARCHAR(11) | Databases included in license. It should be noted that `databases` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 10 | speed | VARCHAR(9) | Write speed specified in license (data points per second) |
|
||||
| 11 | querytime | VARCHAR(9) | Total query time specified in license |
|
||||
| 12 | timeseries | VARCHAR(21) | Number of metrics included in license |
|
||||
| 13 | expired | VARCHAR(5) | Whether the license has expired |
|
||||
| 14 | expire_time | VARCHAR(19) | When the trial period expires |
|
||||
|
||||
## INS_VGROUPS
|
||||
|
||||
Provides information about vgroups.
|
||||
Provides information about vgroups. Users whose SYSINFO attribute is 0 can't view this table.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :--------: | ------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | vgroup_id | INT | Vgroup ID |
|
||||
| 2 | db_name | BINARY(32) | Database name |
|
||||
| 2 | db_name | VARCHAR(32) | Database name |
|
||||
| 3 | tables | INT | Tables in vgroup. It should be noted that `tables` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 4 | status | BINARY(10) | Vgroup status |
|
||||
| 4 | status | VARCHAR(10) | Vgroup status |
|
||||
| 5 | v1_dnode | INT | Dnode ID of first vgroup member |
|
||||
| 6 | v1_status | BINARY(10) | Status of first vgroup member |
|
||||
| 6 | v1_status | VARCHAR(10) | Status of first vgroup member |
|
||||
| 7 | v2_dnode | INT | Dnode ID of second vgroup member |
|
||||
| 8 | v2_status | BINARY(10) | Status of second vgroup member |
|
||||
| 8 | v2_status | VARCHAR(10) | Status of second vgroup member |
|
||||
| 9 | v3_dnode | INT | Dnode ID of third vgroup member |
|
||||
| 10 | v3_status | BINARY(10) | Status of third vgroup member |
|
||||
| 10 | v3_status | VARCHAR(10) | Status of third vgroup member |
|
||||
| 11 | nfiles | INT | Number of data and metadata files in the vgroup |
|
||||
| 12 | file_size | INT | Size of the data and metadata files in the vgroup |
|
||||
| 13 | tsma | TINYINT | Whether time-range-wise SMA is enabled. 1 means enabled; 0 means disabled. |
|
||||
|
@ -251,55 +261,57 @@ Provides system configuration information.
|
|||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :--------: | ------------- | ----------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | name | BINARY(32) | Parameter |
|
||||
| 2 | value | BINARY(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 1 | name | VARCHAR(32) | Parameter |
|
||||
| 2 | value | VARCHAR(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
|
||||
## INS_DNODE_VARIABLES
|
||||
|
||||
Provides dnode configuration information.
|
||||
Provides dnode configuration information. Users whose SYSINFO attribute is 0 can't view this table.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :--------: | ------------- | ----------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | dnode_id | INT | Dnode ID |
|
||||
| 2 | name | BINARY(32) | Parameter |
|
||||
| 3 | value | BINARY(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 2 | name | VARCHAR(32) | Parameter |
|
||||
| 3 | value | VARCHAR(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
|
||||
## INS_TOPICS
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :---------: | ------------- | -------------------------------------- |
|
||||
| 1 | topic_name | BINARY(192) | Topic name |
|
||||
| 2 | db_name | BINARY(64) | Database for the topic |
|
||||
| 1 | topic_name | VARCHAR(192) | Topic name |
|
||||
| 2 | db_name | VARCHAR(64) | Database for the topic |
|
||||
| 3 | create_time | TIMESTAMP | Creation time |
|
||||
| 4 | sql | BINARY(1024) | SQL statement used to create the topic |
|
||||
| 4 | sql | VARCHAR(1024) | SQL statement used to create the topic |
|
||||
|
||||
## INS_SUBSCRIPTIONS
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :------------: | ------------- | --------------------------- |
|
||||
| 1 | topic_name | BINARY(204) | Subscribed topic |
|
||||
| 2 | consumer_group | BINARY(193) | Subscribed consumer group |
|
||||
| 1 | topic_name | VARCHAR(204) | Subscribed topic |
|
||||
| 2 | consumer_group | VARCHAR(193) | Subscribed consumer group |
|
||||
| 3 | vgroup_id | INT | Vgroup ID for the consumer |
|
||||
| 4 | consumer_id | BIGINT | Consumer ID |
|
||||
| 5 | offset | BINARY(64) | Consumption progress |
|
||||
| 5 | offset | VARCHAR(64) | Consumption progress |
|
||||
| 6 | rows | BIGINT | Number of consumption items |
|
||||
|
||||
## INS_STREAMS
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :----------: | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | stream_name | BINARY(64) | Stream name |
|
||||
| 1 | stream_name | VARCHAR(64) | Stream name |
|
||||
| 2 | create_time | TIMESTAMP | Creation time |
|
||||
| 3 | sql | BINARY(1024) | SQL statement used to create the stream |
|
||||
| 4 | status | BINARY(20) | Current status |
|
||||
| 5 | source_db | BINARY(64) | Source database |
|
||||
| 6 | target_db | BINARY(64) | Target database |
|
||||
| 7 | target_table | BINARY(192) | Target table |
|
||||
| 3 | sql | VARCHAR(1024) | SQL statement used to create the stream |
|
||||
| 4 | status | VARCHAR(20) | Current status |
|
||||
| 5 | source_db | VARCHAR(64) | Source database |
|
||||
| 6 | target_db | VARCHAR(64) | Target database |
|
||||
| 7 | target_table | VARCHAR(192) | Target table |
|
||||
| 8 | watermark | BIGINT | Watermark (see stream processing documentation). It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation). It should be noted that `trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
|
||||
## INS_USER_PRIVILEGES
|
||||
|
||||
Users whose SYSINFO attribute is 0 can't view this table.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |** |
|
||||
| --- | :----------: | ------------ | -------------------------------------------|
|
||||
| 1 | user_name | VARCHAR(24) | Username |
|
||||
|
|
|
@ -73,10 +73,10 @@ Shows the SQL statement used to create the specified table. This statement can b
|
|||
## SHOW DATABASES
|
||||
|
||||
```sql
|
||||
SHOW DATABASES;
|
||||
SHOW [USER | SYSTEM] DATABASES;
|
||||
```
|
||||
|
||||
Shows all user-created databases.
|
||||
Shows all databases. The `USER` qualifier specifies only user-created databases. The `SYSTEM` qualifier specifies only system databases.
|
||||
|
||||
## SHOW DNODES
|
||||
|
||||
|
@ -183,10 +183,10 @@ Shows all subscriptions in the system.
|
|||
## SHOW TABLES
|
||||
|
||||
```sql
|
||||
SHOW [db_name.]TABLES [LIKE 'pattern'];
|
||||
SHOW [NORMAL | CHILD] [db_name.]TABLES [LIKE 'pattern'];
|
||||
```
|
||||
|
||||
Shows all standard tables and subtables in the current database. You can use LIKE for fuzzy matching.
|
||||
Shows all standard tables and subtables in the current database. You can use LIKE for fuzzy matching. The `Normal` qualifier specifies standard tables. The `CHILD` qualifier specifies subtables.
|
||||
|
||||
## SHOW TABLE DISTRIBUTED
|
||||
|
||||
|
|
|
@ -1,178 +0,0 @@
|
|||
---
|
||||
title: Install and Uninstall
|
||||
description: This document describes how to install, upgrade, and uninstall TDengine.
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
|
||||
This document gives more information about installing, uninstalling, and upgrading TDengine.
|
||||
|
||||
## Install
|
||||
|
||||
About details of installing TDenine, please refer to [Installation Guide](../../get-started/package/).
|
||||
|
||||
## Uninstall
|
||||
|
||||
<Tabs>
|
||||
<TabItem label="Uninstall by apt-get" value="aptremove">
|
||||
|
||||
Uninstall package of TDengine by apt-get can be uninstalled as below:
|
||||
|
||||
```bash
|
||||
$ sudo apt-get remove tdengine
|
||||
Reading package lists... Done
|
||||
Building dependency tree
|
||||
Reading state information... Done
|
||||
The following packages will be REMOVED:
|
||||
tdengine
|
||||
0 upgraded, 0 newly installed, 1 to remove and 18 not upgraded.
|
||||
After this operation, 68.3 MB disk space will be freed.
|
||||
Do you want to continue? [Y/n] y
|
||||
(Reading database ... 135625 files and directories currently installed.)
|
||||
Removing tdengine (3.0.0.0) ...
|
||||
TDengine is removed successfully!
|
||||
|
||||
```
|
||||
|
||||
If you have installed taos-tools, please uninstall it first before uninstall TDengine. The command of uninstall is following:
|
||||
|
||||
```
|
||||
$ sudo apt remove taostools
|
||||
Reading package lists... Done
|
||||
Building dependency tree
|
||||
Reading state information... Done
|
||||
The following packages will be REMOVED:
|
||||
taostools
|
||||
0 upgraded, 0 newly installed, 1 to remove and 0 not upgraded.
|
||||
After this operation, 68.3 MB disk space will be freed.
|
||||
Do you want to continue? [Y/n]
|
||||
(Reading database ... 147973 files and directories currently installed.)
|
||||
Removing taostools (2.1.2) ...
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="Uninstall Deb" value="debuninst">
|
||||
|
||||
Deb package of TDengine can be uninstalled as below:
|
||||
|
||||
```
|
||||
$ sudo dpkg -r tdengine
|
||||
(Reading database ... 137504 files and directories currently installed.)
|
||||
Removing tdengine (3.0.0.0) ...
|
||||
TDengine is removed successfully!
|
||||
|
||||
```
|
||||
|
||||
Deb package of taosTools can be uninstalled as below:
|
||||
|
||||
```
|
||||
$ sudo dpkg -r taostools
|
||||
(Reading database ... 147973 files and directories currently installed.)
|
||||
Removing taostools (2.1.2) ...
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Uninstall RPM" value="rpmuninst">
|
||||
|
||||
RPM package of TDengine can be uninstalled as below:
|
||||
|
||||
```
|
||||
$ sudo rpm -e tdengine
|
||||
TDengine is removed successfully!
|
||||
```
|
||||
|
||||
RPM package of taosTools can be uninstalled as below:
|
||||
|
||||
```
|
||||
sudo rpm -e taostools
|
||||
taosToole is removed successfully!
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Uninstall tar.gz" value="taruninst">
|
||||
|
||||
tar.gz package of TDengine can be uninstalled as below:
|
||||
|
||||
```
|
||||
$ rmtaos
|
||||
TDengine is removed successfully!
|
||||
```
|
||||
|
||||
tar.gz package of taosTools can be uninstalled as below:
|
||||
|
||||
```
|
||||
$ rmtaostools
|
||||
Start to uninstall taos tools ...
|
||||
|
||||
taos tools is uninstalled successfully!
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Windows uninstall" value="windows">
|
||||
Run C:\TDengine\unins000.exe to uninstall TDengine on a Windows system.
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Mac uninstall" value="mac">
|
||||
|
||||
TDengine can be uninstalled as below:
|
||||
|
||||
```
|
||||
$ rmtaos
|
||||
TDengine is removed successfully!
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
:::info
|
||||
|
||||
- We strongly recommend not to use multiple kinds of installation packages on a single host TDengine. The packages may affect each other and cause errors.
|
||||
|
||||
- After deb package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information.
|
||||
|
||||
```
|
||||
$ sudo rm -f /var/lib/dpkg/info/tdengine*
|
||||
```
|
||||
|
||||
You can then reinstall if needed.
|
||||
|
||||
- After rpm package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information.
|
||||
|
||||
```
|
||||
$ sudo rpm -e --noscripts tdengine
|
||||
```
|
||||
|
||||
You can then reinstall if needed.
|
||||
|
||||
:::
|
||||
|
||||
Uninstalling and Modifying Files
|
||||
|
||||
- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution, because data can't be recovered. Please follow data integrity, security, backup or relevant SOPs before deleting any data.
|
||||
|
||||
- When reinstalling TDengine, if the default configuration file /etc/taos/taos.cfg exists, it will be kept and the configuration file in the installation package will be renamed to taos.cfg.orig and stored at /usr/local/taos/cfg to be used as configuration sample. Otherwise the configuration file in the installation package will be installed to /etc/taos/taos.cfg and used.
|
||||
|
||||
|
||||
## Upgrade
|
||||
There are two aspects in upgrade operation: upgrade installation package and upgrade a running server.
|
||||
|
||||
To upgrade a package, follow the steps mentioned previously to first uninstall the old version then install the new version.
|
||||
|
||||
Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 2 sections match can the old version be upgraded to the new version. The steps of upgrading a running server are as below:
|
||||
- Stop inserting data
|
||||
- Make sure all data is persisted to disk, please use command `flush database`
|
||||
- Stop the cluster of TDengine
|
||||
- Uninstall old version and install new version
|
||||
- Start the cluster of TDengine
|
||||
- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss
|
||||
- Run some simple data insertion statements to make sure the cluster works well
|
||||
- Restore business services
|
||||
|
||||
:::warning
|
||||
TDengine doesn't guarantee any lower version is compatible with the data generated by a higher version, so it's never recommended to downgrade the version.
|
||||
|
||||
:::
|
|
@ -41,8 +41,6 @@ An existing Grafana Notification Channel can be specified with parameter `-E`, t
|
|||
|
||||
Launch `TDinsight.sh` with the command above and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`.
|
||||
|
||||
For more use cases and restrictions please refer to [TDinsight](/reference/tdinsight/).
|
||||
|
||||
## log database
|
||||
|
||||
The data of tdinsight dashboard is stored in `log` database (default. You can change it in taoskeeper's config file. For more infrmation, please reference to [taoskeeper document](/reference/taosKeeper)). The taoskeeper will create log database on taoskeeper startup.
|
||||
|
@ -106,22 +104,22 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
|||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|uptime|FLOAT||dnode uptime|
|
||||
|uptime|FLOAT||dnode uptime in `days`|
|
||||
|cpu\_engine|FLOAT||cpu usage of tdengine. read from `/proc/<taosd_pid>/stat`|
|
||||
|cpu\_system|FLOAT||cpu usage of server. read from `/proc/stat`|
|
||||
|cpu\_cores|FLOAT||cpu cores of server|
|
||||
|mem\_engine|INT||memory usage of tdengine. read from `/proc/<taosd_pid>/status`|
|
||||
|mem\_system|INT||available memory on the server|
|
||||
|mem\_system|INT||available memory on the server in `KB`|
|
||||
|mem\_total|INT||total memory of server in `KB`|
|
||||
|disk\_engine|INT|||
|
||||
|disk\_used|BIGINT||usage of data dir in `bytes`|
|
||||
|disk\_total|BIGINT||the capacity of data dir in `bytes`|
|
||||
|net\_in|FLOAT||network throughput rate in kb/s. read from `/proc/net/dev`|
|
||||
|net\_out|FLOAT||network throughput rate in kb/s. read from `/proc/net/dev`|
|
||||
|io\_read|FLOAT||io throughput rate in kb/s. read from `/proc/<taosd_pid>/io`|
|
||||
|io\_write|FLOAT||io throughput rate in kb/s. read from `/proc/<taosd_pid>/io`|
|
||||
|io\_read\_disk|FLOAT||io throughput rate of disk in kb/s. read from `/proc/<taosd_pid>/io`|
|
||||
|io\_write\_disk|FLOAT||io throughput rate of disk in kb/s. read from `/proc/<taosd_pid>/io`|
|
||||
|net\_in|FLOAT||network throughput rate in byte/s. read from `/proc/net/dev`|
|
||||
|net\_out|FLOAT||network throughput rate in byte/s. read from `/proc/net/dev`|
|
||||
|io\_read|FLOAT||io throughput rate in byte/s. read from `/proc/<taosd_pid>/io`|
|
||||
|io\_write|FLOAT||io throughput rate in byte/s. read from `/proc/<taosd_pid>/io`|
|
||||
|io\_read\_disk|FLOAT||io throughput rate of disk in byte/s. read from `/proc/<taosd_pid>/io`|
|
||||
|io\_write\_disk|FLOAT||io throughput rate of disk in byte/s. read from `/proc/<taosd_pid>/io`|
|
||||
|req\_select|INT||number of select queries received per dnode|
|
||||
|req\_select\_rate|FLOAT||number of select queries received per dnode divided by monitor interval.|
|
||||
|req\_insert|INT||number of insert queries received per dnode|
|
||||
|
@ -150,9 +148,9 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
|||
|ts|TIMESTAMP||timestamp|
|
||||
|name|NCHAR||data directory. default is `/var/lib/taos`|
|
||||
|level|INT||level for multi-level storage|
|
||||
|avail|BIGINT||available space for data directory|
|
||||
|used|BIGINT||used space for data directory|
|
||||
|total|BIGINT||total space for data directory|
|
||||
|avail|BIGINT||available space for data directory in `bytes`|
|
||||
|used|BIGINT||used space for data directory in `bytes`|
|
||||
|total|BIGINT||total space for data directory in `bytes`|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
@ -165,9 +163,9 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
|||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|name|NCHAR||log directory. default is `/var/log/taos/`|
|
||||
|avail|BIGINT||available space for log directory|
|
||||
|used|BIGINT||used space for data directory|
|
||||
|total|BIGINT||total space for data directory|
|
||||
|avail|BIGINT||available space for log directory in `bytes`|
|
||||
|used|BIGINT||used space for data directory in `bytes`|
|
||||
|total|BIGINT||total space for data directory in `bytes`|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
@ -180,9 +178,9 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
|||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|name|NCHAR||temp directory. default is `/tmp/`|
|
||||
|avail|BIGINT||available space for temp directory|
|
||||
|used|BIGINT||used space for temp directory|
|
||||
|total|BIGINT||total space for temp directory|
|
||||
|avail|BIGINT||available space for temp directory in `bytes`|
|
||||
|used|BIGINT||used space for temp directory in `bytes`|
|
||||
|total|BIGINT||total space for temp directory in `bytes`|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
|
|
@ -142,8 +142,15 @@ TDengine currently supports timestamp, number, character, Boolean type, and the
|
|||
| BINARY | byte array |
|
||||
| NCHAR | java.lang.String |
|
||||
| JSON | java.lang.String |
|
||||
| VARBINARY | byte[] |
|
||||
| GEOMETRY | byte[] |
|
||||
|
||||
**Note**: Only TAG supports JSON types
|
||||
Due to historical reasons, the BINARY type data in TDengine is not truly binary data and is no longer recommended for use. Please use VARBINARY type instead.
|
||||
GEOMETRY type is binary data in little endian byte order, which complies with the WKB specification. For detailed information, please refer to [Data Type] (/tao-sql/data-type/#Data Types)
|
||||
For WKB specifications, please refer to [Well Known Binary (WKB)]( https://libgeos.org/specifications/wkb/ )
|
||||
For Java connector, the jts library can be used to easily create GEOMETRY type objects, serialize them, and write them to TDengine. Here is an example [Geometry example](https://github.com/taosdata/TDengine/blob/3.0/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/GeometryDemo.java)
|
||||
|
||||
|
||||
## Installation Steps
|
||||
|
||||
|
@ -354,7 +361,7 @@ The configuration parameters in properties are as follows.
|
|||
- TSDBDriver.PROPERTY_KEY_CONFIG_DIR: only works when using JDBC native connection. Client configuration file directory path, default value `/etc/taos` on Linux OS, default value `C:/TDengine/cfg` on Windows OS, default value `/etc/taos` on macOS.
|
||||
- TSDBDriver.PROPERTY_KEY_CHARSET: In the character set used by the client, the default value is the system character set.
|
||||
- TSDBDriver.PROPERTY_KEY_LOCALE: this only takes effect when using JDBC native connection. Client language environment, the default value is system current locale.
|
||||
- TSDBDriver.PROPERTY_KEY_TIME_ZONE: only takes effect when using JDBC native connection. In the time zone used by the client, the default value is the system's current time zone.
|
||||
- TSDBDriver.PROPERTY_KEY_TIME_ZONE: only takes effect when using JDBC native connection. In the time zone used by the client, the default value is the system's current time zone. Due to historical reasons, we only support some specifications of the POSIX standard, such as UTC-8 (representing timezone Shanghai in China), GMT-7, Europe/Paris.
|
||||
- TSDBDriver.HTTP_CONNECT_TIMEOUT: REST connection timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection.
|
||||
- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is false.
|
||||
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is true.
|
||||
|
@ -456,13 +463,15 @@ public class ParameterBindingDemo {
|
|||
|
||||
private static final String host = "127.0.0.1";
|
||||
private static final Random random = new Random(System.currentTimeMillis());
|
||||
private static final int BINARY_COLUMN_SIZE = 20;
|
||||
private static final int BINARY_COLUMN_SIZE = 50;
|
||||
private static final String[] schemaList = {
|
||||
"create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)",
|
||||
"create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)",
|
||||
"create table stable3(ts timestamp, f1 bool) tags(t1 bool)",
|
||||
"create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))",
|
||||
"create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))"
|
||||
"create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))",
|
||||
"create table stable6(ts timestamp, f1 varbinary(" + BINARY_COLUMN_SIZE + ")) tags(t1 varbinary(" + BINARY_COLUMN_SIZE + "))",
|
||||
"create table stable7(ts timestamp, f1 geometry(" + BINARY_COLUMN_SIZE + ")) tags(t1 geometry(" + BINARY_COLUMN_SIZE + "))",
|
||||
};
|
||||
private static final int numOfSubTable = 10, numOfRow = 10;
|
||||
|
||||
|
@ -474,21 +483,20 @@ public class ParameterBindingDemo {
|
|||
init(conn);
|
||||
|
||||
bindInteger(conn);
|
||||
|
||||
bindFloat(conn);
|
||||
|
||||
bindBoolean(conn);
|
||||
|
||||
bindBytes(conn);
|
||||
|
||||
bindString(conn);
|
||||
bindVarbinary(conn);
|
||||
bindGeometry(conn);
|
||||
|
||||
clean(conn);
|
||||
conn.close();
|
||||
}
|
||||
|
||||
private static void init(Connection conn) throws SQLException {
|
||||
clean(conn);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.execute("drop database if exists test_parabind");
|
||||
stmt.execute("create database if not exists test_parabind");
|
||||
stmt.execute("use test_parabind");
|
||||
for (int i = 0; i < schemaList.length; i++) {
|
||||
|
@ -496,6 +504,11 @@ public class ParameterBindingDemo {
|
|||
}
|
||||
}
|
||||
}
|
||||
private static void clean(Connection conn) throws SQLException {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.execute("drop database if exists test_parabind");
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindInteger(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)";
|
||||
|
@ -674,10 +687,84 @@ public class ParameterBindingDemo {
|
|||
pstmt.columnDataExecuteBatch();
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindVarbinary(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable6 tags(?) values(?,?)";
|
||||
|
||||
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t6_" + i);
|
||||
// set tags
|
||||
byte[] bTag = new byte[]{0,2,3,4,5};
|
||||
bTag[0] = (byte) i;
|
||||
pstmt.setTagVarbinary(0, bTag);
|
||||
|
||||
// set columns
|
||||
ArrayList<Long> tsList = new ArrayList<>();
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
tsList.add(current + j);
|
||||
pstmt.setTimestamp(0, tsList);
|
||||
|
||||
ArrayList<byte[]> f1List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
byte[] v = new byte[]{0,2,3,4,5,6};
|
||||
v[0] = (byte)j;
|
||||
f1List.add(v);
|
||||
}
|
||||
pstmt.setVarbinary(1, f1List, BINARY_COLUMN_SIZE);
|
||||
|
||||
// add column
|
||||
pstmt.columnDataAddBatch();
|
||||
}
|
||||
// execute
|
||||
pstmt.columnDataExecuteBatch();
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindGeometry(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable7 tags(?) values(?,?)";
|
||||
|
||||
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
|
||||
|
||||
byte[] g1 = StringUtils.hexToBytes("0101000000000000000000F03F0000000000000040");
|
||||
byte[] g2 = StringUtils.hexToBytes("0102000020E610000002000000000000000000F03F000000000000004000000000000008400000000000001040");
|
||||
List<byte[]> listGeo = new ArrayList<>();
|
||||
listGeo.add(g1);
|
||||
listGeo.add(g2);
|
||||
|
||||
for (int i = 1; i <= 2; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t7_" + i);
|
||||
// set tags
|
||||
pstmt.setTagGeometry(0, listGeo.get(i - 1));
|
||||
|
||||
// set columns
|
||||
ArrayList<Long> tsList = new ArrayList<>();
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
tsList.add(current + j);
|
||||
pstmt.setTimestamp(0, tsList);
|
||||
|
||||
ArrayList<byte[]> f1List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
f1List.add(listGeo.get(i - 1));
|
||||
}
|
||||
pstmt.setGeometry(1, f1List, BINARY_COLUMN_SIZE);
|
||||
|
||||
// add column
|
||||
pstmt.columnDataAddBatch();
|
||||
}
|
||||
// execute
|
||||
pstmt.columnDataExecuteBatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Note**: both setString and setNString require the user to declare the width of the corresponding column in the size parameter of the table definition
|
||||
**Note**: both String and byte[] require the user to declare the width of the corresponding column in the size parameter of the table definition
|
||||
|
||||
The methods to set VALUES columns:
|
||||
|
||||
|
@ -692,6 +779,8 @@ public void setByte(int columnIndex, ArrayList<Byte> list) throws SQLException
|
|||
public void setShort(int columnIndex, ArrayList<Short> list) throws SQLException
|
||||
public void setString(int columnIndex, ArrayList<String> list, int size) throws SQLException
|
||||
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
|
||||
public void setVarbinary(int columnIndex, ArrayList<byte[]> list, int size) throws SQLException
|
||||
public void setGeometry(int columnIndex, ArrayList<byte[]> list, int size) throws SQLException
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -880,6 +969,9 @@ public void setTagFloat(int index, float value)
|
|||
public void setTagDouble(int index, double value)
|
||||
public void setTagString(int index, String value)
|
||||
public void setTagNString(int index, String value)
|
||||
public void setTagJson(int index, String value)
|
||||
public void setTagVarbinary(int index, byte[] value)
|
||||
public void setTagGeometry(int index, byte[] value)
|
||||
```
|
||||
|
||||
### Schemaless Writing
|
||||
|
@ -1001,7 +1093,7 @@ TaosConsumer consumer = new TaosConsumer<>(config);
|
|||
- httpConnectTimeout: WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
|
||||
- messageWaitTimeout: socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
|
||||
- httpPoolSize: Maximum number of concurrent requests on the a connection。It only takes effect when using WebSocket type.
|
||||
- For more information, see [Consumer Parameters](../../../develop/tmq).
|
||||
- For more information, see [Consumer Parameters](../../../develop/tmq). Note that the default value of auto.offset.reset in data subscription on the TDengine server has changed since version 3.2.0.0.
|
||||
|
||||
#### Subscribe to consume data
|
||||
|
||||
|
@ -1101,7 +1193,7 @@ public abstract class ConsumerLoop {
|
|||
config.setProperty("bootstrap.servers", "localhost:6030");
|
||||
config.setProperty("td.connect.user", "root");
|
||||
config.setProperty("td.connect.pass", "taosdata");
|
||||
config.setProperty("auto.offset.reset", "earliest");
|
||||
config.setProperty("auto.offset.reset", "latest");
|
||||
config.setProperty("msg.with.table.name", "true");
|
||||
config.setProperty("enable.auto.commit", "true");
|
||||
config.setProperty("auto.commit.interval.ms", "1000");
|
||||
|
@ -1184,7 +1276,7 @@ public abstract class ConsumerLoop {
|
|||
config.setProperty("bootstrap.servers", "localhost:6041");
|
||||
config.setProperty("td.connect.user", "root");
|
||||
config.setProperty("td.connect.pass", "taosdata");
|
||||
config.setProperty("auto.offset.reset", "earliest");
|
||||
config.setProperty("auto.offset.reset", "latest");
|
||||
config.setProperty("msg.with.table.name", "true");
|
||||
config.setProperty("enable.auto.commit", "true");
|
||||
config.setProperty("auto.commit.interval.ms", "1000");
|
||||
|
|
|
@ -794,7 +794,7 @@ The TDengine Go Connector supports subscription functionality with the following
|
|||
```go
|
||||
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "earliest",
|
||||
"auto.offset.reset": "latest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
|
@ -870,6 +870,7 @@ package main
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/taosdata/driver-go/v3/af"
|
||||
"github.com/taosdata/driver-go/v3/af/tmq"
|
||||
|
@ -890,19 +891,16 @@ func main() {
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "earliest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"td.connect.port": "6030",
|
||||
"client.id": "test_tmq_client",
|
||||
"enable.auto.commit": "false",
|
||||
"msg.with.table.name": "true",
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "latest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"td.connect.port": "6030",
|
||||
"client.id": "test_tmq_client",
|
||||
"enable.auto.commit": "false",
|
||||
"msg.with.table.name": "true",
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -915,10 +913,16 @@ func main() {
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = db.Exec("insert into example_tmq.t1 values(now,1)")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
_, err = db.Exec("insert into example_tmq.t1 values(now,1)")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
}
|
||||
}()
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
ev := consumer.Poll(500)
|
||||
if ev != nil {
|
||||
|
@ -972,6 +976,7 @@ package main
|
|||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/taosdata/driver-go/v3/common"
|
||||
tmqcommon "github.com/taosdata/driver-go/v3/common/tmq"
|
||||
|
@ -995,7 +1000,7 @@ func main() {
|
|||
"td.connect.pass": "taosdata",
|
||||
"group.id": "example",
|
||||
"client.id": "example_consumer",
|
||||
"auto.offset.reset": "earliest",
|
||||
"auto.offset.reset": "latest",
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -1004,29 +1009,34 @@ func main() {
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
_, err = db.Exec("create table example_ws_tmq.t_all(ts timestamp," +
|
||||
"c1 bool," +
|
||||
"c2 tinyint," +
|
||||
"c3 smallint," +
|
||||
"c4 int," +
|
||||
"c5 bigint," +
|
||||
"c6 tinyint unsigned," +
|
||||
"c7 smallint unsigned," +
|
||||
"c8 int unsigned," +
|
||||
"c9 bigint unsigned," +
|
||||
"c10 float," +
|
||||
"c11 double," +
|
||||
"c12 binary(20)," +
|
||||
"c13 nchar(20)" +
|
||||
")")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
go func() {
|
||||
_, err := db.Exec("create table example_ws_tmq.t_all(ts timestamp," +
|
||||
"c1 bool," +
|
||||
"c2 tinyint," +
|
||||
"c3 smallint," +
|
||||
"c4 int," +
|
||||
"c5 bigint," +
|
||||
"c6 tinyint unsigned," +
|
||||
"c7 smallint unsigned," +
|
||||
"c8 int unsigned," +
|
||||
"c9 bigint unsigned," +
|
||||
"c10 float," +
|
||||
"c11 double," +
|
||||
"c12 binary(20)," +
|
||||
"c13 nchar(20)" +
|
||||
")")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = db.Exec("insert into example_ws_tmq.t_all values(now,true,2,3,4,5,6,7,8,9,10.123,11.123,'binary','nchar')")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
for {
|
||||
_, err = db.Exec("insert into example_ws_tmq.t_all values(now,true,2,3,4,5,6,7,8,9,10.123,11.123,'binary','nchar')")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
}
|
||||
|
||||
}()
|
||||
for i := 0; i < 5; i++ {
|
||||
ev := consumer.Poll(500)
|
||||
|
|
|
@ -442,7 +442,7 @@ The following parameters can be configured for the TMQ DSN. Only `group.id` is m
|
|||
|
||||
- `group.id`: Within a consumer group, load balancing is implemented by consuming messages on an at-least-once basis.
|
||||
- `client.id`: Subscriber client ID.
|
||||
- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default is earliest. Note: This parameter is set per consumer group.
|
||||
- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default value varies depending on the TDengine version. For details, see [Data Subscription](https://docs.tdengine.com/develop/tmq/). Note: This parameter is set per consumer group.
|
||||
- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential.
|
||||
- `auto.commit.interval.ms`: Interval for automatic commits.
|
||||
|
||||
|
|
|
@ -31,11 +31,13 @@ We recommend using the latest version of `taospy`, regardless of the version of
|
|||
|
||||
|Python Connector Version|major changes|
|
||||
|:-------------------:|:----:|
|
||||
|2.7.12|1. added support for `varbinary` type (STMT does not yet support)<br/> 2. improved query performance (thanks to contributor [hadrianl](https://github.com/taosdata/taos-connector-python/pull/209))|
|
||||
|2.7.9|support for getting assignment and seek function on subscription|
|
||||
|2.7.8|add `execute_many` method|
|
||||
|
||||
|Python Websocket Connector Version|major changes|
|
||||
|:----------------------------:|:-----:|
|
||||
|0.2.9|bugs fixes|
|
||||
|0.2.5|1. support for getting assignment and seek function on subscription <br/> 2. support schemaless <br/> 3. support STMT|
|
||||
|0.2.4|support `unsubscribe` on subscription|
|
||||
|
||||
|
@ -1023,10 +1025,6 @@ Due to the current imperfection of Python's nanosecond support (see link below),
|
|||
1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds
|
||||
2. https://www.python.org/dev/peps/pep-0564/
|
||||
|
||||
## Important Update
|
||||
|
||||
[**Release Notes**] (https://github.com/taosdata/taos-connector-python/releases)
|
||||
|
||||
## API Reference
|
||||
|
||||
- [taos](https://docs.taosdata.com/api/taospy/taos/)
|
||||
|
|
|
@ -52,8 +52,6 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive
|
|||
&& tar -xzf php-tdengine.tar.gz -C php-tdengine --strip-components=1
|
||||
```
|
||||
|
||||
> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please find available versions in [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases).
|
||||
|
||||
**Non-Swoole Environment: **
|
||||
|
||||
```shell
|
||||
|
|
|
@ -4,7 +4,6 @@ import PkgListV3 from "/components/PkgListV3";
|
|||
|
||||
<PkgListV3 type={1} sys="Linux" />
|
||||
|
||||
[All Downloads](../../releases/tdengine)
|
||||
|
||||
2. Unzip
|
||||
|
||||
|
|
|
@ -4,8 +4,6 @@ import PkgListV3 from "/components/PkgListV3";
|
|||
|
||||
<PkgListV3 type={8} sys="macOS" />
|
||||
|
||||
[All Downloads](../../releases/tdengine)
|
||||
|
||||
2. Execute the installer, select the default value as prompted, and complete the installation. If the installation is blocked, you can right-click or ctrl-click on the installation package and select `Open`.
|
||||
3. configure taos.cfg
|
||||
|
||||
|
|
|
@ -3,8 +3,6 @@ import PkgListV3 from "/components/PkgListV3";
|
|||
1. Download the client installation package
|
||||
|
||||
<PkgListV3 type={4} sys="Windows" />
|
||||
|
||||
[All Downloads](../../releases/tdengine)
|
||||
2. Execute the installer, select the default value as prompted, and complete the installation
|
||||
3. Installation path
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ taosAdapter provides the following features.
|
|||
|
||||
### Install taosAdapter
|
||||
|
||||
If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TDengine 3.0 released versions](../../releases/tdengine) to download the TDengine server installation package. If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/3.0/BUILD.md) documentation.
|
||||
If you use the TDengine server, you don't need additional steps to install taosAdapter. If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/3.0/BUILD.md) documentation.
|
||||
|
||||
### Start/Stop taosAdapter
|
||||
|
||||
|
@ -180,7 +180,7 @@ See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/bl
|
|||
node_export is an exporter for machine metrics. Please visit [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) for more information.
|
||||
- Support for Prometheus remote_read and remote_write
|
||||
remote_read and remote_write are interfaces for Prometheus data read and write from/to other data storage solution. Please visit [https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) for more information.
|
||||
- Get table's VGroup ID. For more information about VGroup, please refer to [primary-logic-unit](/tdinternal/arch/#primary-logic-unit).
|
||||
- Get table's VGroup ID.
|
||||
|
||||
## Interfaces
|
||||
|
||||
|
@ -246,7 +246,7 @@ node_export is an exporter of hardware and OS metrics exposed by the \*NIX kerne
|
|||
|
||||
### Get table's VGroup ID
|
||||
|
||||
You can call `http://<fqdn>:6041/rest/vgid?db=<db>&table=<table>` to get table's VGroup ID. For more information about VGroup, please refer to [primary-logic-unit](/tdinternal/arch/#primary-logic-unit).
|
||||
You can call `http://<fqdn>:6041/rest/vgid?db=<db>&table=<table>` to get table's VGroup ID.
|
||||
|
||||
## Memory usage optimization methods
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ taosBenchmark (formerly taosdemo ) is a tool for testing the performance of TDen
|
|||
|
||||
There are two ways to install taosBenchmark:
|
||||
|
||||
- Installing the official TDengine installer will automatically install taosBenchmark. Please refer to [TDengine installation](../../operation/pkg-install) for details.
|
||||
- Installing the official TDengine installer will automatically install taosBenchmark.
|
||||
|
||||
- Compile taos-tools separately and install them. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details.
|
||||
|
||||
|
@ -397,6 +397,7 @@ The configuration parameters for specifying super table tag columns and data col
|
|||
### Query scenario configuration parameters
|
||||
|
||||
`filetype` must be set to `query` in the query scenario.
|
||||
`query_times` is number of times queries were run.
|
||||
|
||||
To control the query scenario by setting `kill_slow_query_threshold` and `kill_slow_query_interval` parameters to kill the execution of slow query statements. Threshold controls exec_usec of query command will be killed by taosBenchmark after the specified time, in seconds; interval controls sleep time to avoid continuous querying of slow queries consuming CPU in seconds.
|
||||
|
||||
|
|
|
@ -103,7 +103,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
|
|||
use letter and number only. Default is NOT.
|
||||
-n, --no-escape No escape char '`'. Default is using it.
|
||||
-Q, --dot-replace Repalce dot character with underline character in
|
||||
the table name.
|
||||
the table name.(Version 2.5.3)
|
||||
-T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is
|
||||
8.
|
||||
-C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service
|
||||
|
@ -113,6 +113,10 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
|
|||
-?, --help Give this help list
|
||||
--usage Give a short usage message
|
||||
-V, --version Print program version
|
||||
-W, --rename=RENAME-LIST Rename database name with new name during
|
||||
importing data. RENAME-LIST:
|
||||
"db1=newDB1|db2=newDB2" means rename db1 to newDB1
|
||||
and rename db2 to newDB2 (Version 2.5.4)
|
||||
|
||||
Mandatory or optional arguments to long options are also mandatory or optional
|
||||
for any corresponding short options.
|
||||
|
|
|
@ -652,6 +652,15 @@ The charset that takes effect is UTF-8.
|
|||
| Type | String |
|
||||
| Default Value | None |
|
||||
|
||||
### smlAutoChildTableNameDelimiter
|
||||
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------------------------------ |
|
||||
| Applicable | Client only |
|
||||
| Meaning | Delimiter between tags as table name|
|
||||
| Type | String |
|
||||
| Default Value | None |
|
||||
|
||||
### smlTagName
|
||||
|
||||
| Attribute | Description |
|
||||
|
@ -731,16 +740,6 @@ The charset that takes effect is UTF-8.
|
|||
| Value Range | 0: not change; 1: change by modification |
|
||||
| Default Value | 0 |
|
||||
|
||||
### keepTimeOffset
|
||||
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Latency of data migration |
|
||||
| Unit | hour |
|
||||
| Value Range | 0-23 |
|
||||
| Default Value | 0 |
|
||||
|
||||
### tmqMaxTopicNum
|
||||
|
||||
| Attribute | Description |
|
||||
|
@ -807,4 +806,4 @@ The charset that takes effect is UTF-8.
|
|||
| 53 | udf | Yes | Yes | |
|
||||
| 54 | enableCoreFile | Yes | Yes | |
|
||||
| 55 | ttlChangeOnWrite | No | Yes | |
|
||||
| 56 | keepTimeOffset | Yes | Yes | |
|
||||
| 56 | keepTimeOffset | Yes | Yes(discarded since 3.2.0.0) | |
|
||||
|
|
|
@ -93,6 +93,8 @@ Note that tag_key1, tag_key2 are not the original order of the tags entered by t
|
|||
The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t\_" is a fixed prefix that every table generated by this mapping relationship has.
|
||||
:::
|
||||
|
||||
If you do not want to use an automatically generated table name, there are two ways to specify sub table names, the first one has a higher priority.
|
||||
You can configure smlAutoChildTableNameDelimiter in taos.cfg(except for `@ # space \r \t \n`), for example, `smlAutoChildTableNameDelimiter=tname`. You can insert `st,t0=cpul,t1=4 c1=3 1626006833639000000` and the table name will be cpu1-4.
|
||||
You can configure smlChildTableName in taos.cfg to specify table names, for example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||
|
||||
2. If the super table obtained by parsing the line protocol does not exist, this super table is created.
|
||||
|
|
|
@ -16,7 +16,7 @@ taosKeeper is a tool for TDengine that exports monitoring metrics. With taosKeep
|
|||
There are two ways to install taosKeeper:
|
||||
Methods of installing taosKeeper:
|
||||
|
||||
- Installing the official TDengine installer will automatically install taosKeeper. Please refer to [TDengine installation](../../operation/pkg-install) for details.
|
||||
- Installing the official TDengine installer will automatically install taosKeeper.
|
||||
|
||||
- You can compile taosKeeper separately and install it. Please refer to the [taosKeeper](https://github.com/taosdata/taoskeeper) repository for details.
|
||||
## Configuration and Launch
|
||||
|
|
|
@ -21,7 +21,7 @@ TDengine Source Connector is used to read data from TDengine in real-time and se
|
|||
1. Linux operating system
|
||||
2. Java 8 and Maven installed
|
||||
3. Git/curl/vi is installed
|
||||
4. TDengine is installed and started. If not, please refer to [Installation and Uninstallation](../../operation/pkg-install)
|
||||
4. TDengine is installed and started.
|
||||
|
||||
## Install Kafka
|
||||
|
||||
|
|
|
@ -10,76 +10,60 @@ description: How to use Seeq and TDengine to perform time series data analysis
|
|||
|
||||
Seeq is an advanced analytics software for the manufacturing industry and the Industrial Internet of Things (IIoT). Seeq supports the use of machine learning innovations within process manufacturing organizations. These capabilities enable organizations to deploy their own or third-party machine learning algorithms into advanced analytics applications used by frontline process engineers and subject matter experts, thus extending the efforts of a single data scientist to many frontline workers.
|
||||
|
||||
With the TDengine Java connector, Seeq effortlessly supports querying time series data provided by TDengine and offers functionalities such as data visualization, analysis, and forecasting.
|
||||
TDengine can be added as a data source into Seeq via JDBC connector. Once data source is configured, Seeq can read data from TDengine and offers functionalities such as data visualization, analysis, and forecasting.
|
||||
|
||||
### Install Seeq
|
||||
## Prerequisite
|
||||
|
||||
Please download Seeq Server and Seeq Data Lab software installation package from the [Seeq official website](https://www.seeq.com/customer-download).
|
||||
1. Install Seeq Server and Seeq Data Lab software
|
||||
2. Install TDengine or register TDengine Cloud service
|
||||
|
||||
### Install and start Seeq Server
|
||||
|
||||
```
|
||||
tar xvzf seeq-server-xxx.tar.gz
|
||||
cd seeq-server-installer
|
||||
sudo ./install
|
||||
|
||||
sudo seeq service enable
|
||||
sudo seeq start
|
||||
```
|
||||
|
||||
### Install and start Seeq Data Lab Server
|
||||
|
||||
Seeq Data Lab needs to be installed on a separate server from Seeq Server and connected to Seeq Server through configuration. For detailed installation and configuration instructions, please refer to [the official documentation](https://support.seeq.com/space/KB/1034059842).
|
||||
|
||||
```
|
||||
tar xvf seeq-data-lab-<version>-64bit-linux.tar.gz
|
||||
sudo seeq-data-lab-installer/install -f /opt/seeq/seeq-data-lab -g /var/opt/seeq -u seeq
|
||||
sudo seeq config set Network/DataLab/Hostname localhost
|
||||
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231)
|
||||
sudo seeq config set Network/Hostname <value> # the host IP or URL of the main Seeq Server
|
||||
|
||||
# If the main Seeq server is configured to listen over HTTPS
|
||||
sudo seeq config set Network/Webserver/SecurePort 443 # the secure port of the main Seeq Server (usually 443)
|
||||
|
||||
# If the main Seeq server is NOT configured to listen over HTTPS
|
||||
sudo seeq config set Network/Webserver/Port <value>
|
||||
|
||||
#On the main Seeq server, open a Seeq Command Prompt and set the hostname of the Data Lab server:
|
||||
sudo seeq config set Network/DataLab/Hostname <value> # the host IP (not URL) of the Data Lab server
|
||||
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231
|
||||
```
|
||||
|
||||
### Install TDengine on-premise instance
|
||||
|
||||
See [Quick Install from Package](../../get-started).
|
||||
|
||||
### Or use TDengine Cloud
|
||||
|
||||
Register for a [TDengine Cloud](https://cloud.tdengine.com) account and log in to your account.
|
||||
|
||||
## Make Seeq be able to access TDengine
|
||||
|
||||
1. Get data location configuration
|
||||
## Install TDengine JDBC connector
|
||||
|
||||
1. Get Seeq data location configuration
|
||||
```
|
||||
sudo seeq config get Folders/Data
|
||||
```
|
||||
|
||||
2. Download TDengine Java connector from maven.org. Please use the latest version (Current is 3.2.5, https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar).
|
||||
|
||||
2. Download the latest TDengine Java connector from maven.org (current is version is [3.2.5](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar)), and copy the JAR file into the_directory_found_in_step_1/plugins/lib/
|
||||
3. Restart Seeq server
|
||||
|
||||
```
|
||||
sudo seeq restart
|
||||
```
|
||||
|
||||
4. Input License
|
||||
## Add TDengine into Seeq's data source
|
||||
1. Open Seeq, login as admin, go to Administration, click "Add Data Source"
|
||||
2. For connector, choose SQL connector v2
|
||||
3. Inside "Additional Configuration" input box, copy and paste the following
|
||||
|
||||
Use a browser to access ip:34216 and input the license according to the guide.
|
||||
```
|
||||
{
|
||||
"QueryDefinitions": []
|
||||
"Type": "GENERIC",
|
||||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": null,
|
||||
"Password": null,
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
"UseWindowsAuth": false,
|
||||
"SqlFetchBatchSize": 100000,
|
||||
"UseSSL": false,
|
||||
"JdbcProperties": null,
|
||||
"GenericDatabaseConfig": {
|
||||
"DatabaseJdbcUrl": "jdbc:TAOS-RS://localhost:6030/?user=root&password=taosdata",
|
||||
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
|
||||
"ResolutionInNanoseconds": 1000,
|
||||
"ZonedColumnTypes": []
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## How to use Seeq to analyze time-series data that TDengine serves
|
||||
Note: You need to replace DatabaseJdbcUrl with your setting. Please login TDengine cloud or open taosExplorer for enterprise edition, click programming -> Java to find yours. For the "QueryDefintions", please follow the examples below to write your own.
|
||||
|
||||
This chapter demonstrates how to use Seeq software in conjunction with TDengine for time series data analysis.
|
||||
## Use Seeq to analyze time-series data stored inside TDengine
|
||||
|
||||
This chapter demonstrates how to use Seeq with TDengine for time series data analysis.
|
||||
|
||||
### Scenario Overview
|
||||
|
||||
|
@ -150,8 +134,8 @@ Please login with Seeq administrator and create a few data sources as following.
|
|||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": "root",
|
||||
"Password": "taosdata",
|
||||
"Username": null,
|
||||
"Password": null,
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
|
@ -210,8 +194,8 @@ Please login with Seeq administrator and create a few data sources as following.
|
|||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": "root",
|
||||
"Password": "taosdata",
|
||||
"Username": null,
|
||||
"Password": null,
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
|
@ -269,8 +253,8 @@ Please login with Seeq administrator and create a few data sources as following.
|
|||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": "root",
|
||||
"Password": "taosdata",
|
||||
"Username": null,
|
||||
"Password": null,
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
|
@ -289,13 +273,13 @@ Please login with Seeq administrator and create a few data sources as following.
|
|||
|
||||
#### Launch Seeq Workbench
|
||||
|
||||
Please login to Seeq server with IP:port and create a new Seeq Workbench, then select data sources and choose the correct tools to do data visualization and analysis. Please refer to [the official documentation](https://support.seeq.com/space/KB/146440193/Seeq+Workbench) for the details.
|
||||
Please login to Seeq server and create a new Seeq Workbench, then select data sources and choose the correct tools to do data visualization and analysis. Please refer to [the official documentation](https://support.seeq.com/space/KB/146440193/Seeq+Workbench) for the details.
|
||||
|
||||

|
||||
|
||||
#### Use Seeq Data Lab Server for advanced data analysis
|
||||
|
||||
Please login to the Seeq service with IP:port and create a new Seeq Data Lab. Then you can use advanced tools including Python environment and machine learning add-ons for more complex analysis.
|
||||
Please login to the Seeq service and create a new Seeq Data Lab. Then you can use advanced tools including Python environment and machine learning add-ons for more complex analysis.
|
||||
|
||||
```Python
|
||||
from seeq import spy
|
||||
|
@ -370,13 +354,15 @@ Please note that when using TDengine Cloud, you need to specify the database nam
|
|||
|
||||
#### The data source of TDengine Cloud example
|
||||
|
||||
This data source contains the data from a smart meter in public database smartmeters.
|
||||
|
||||
```
|
||||
{
|
||||
"QueryDefinitions": [
|
||||
{
|
||||
"Name": "CloudVoltage",
|
||||
"Type": "SIGNAL",
|
||||
"Sql": "SELECT ts, voltage FROM test.meters",
|
||||
"Sql": "SELECT ts, voltage FROM smartmeters.d1000",
|
||||
"Enabled": true,
|
||||
"TestMode": false,
|
||||
"TestQueriesDuringSync": true,
|
||||
|
@ -409,8 +395,8 @@ Please note that when using TDengine Cloud, you need to specify the database nam
|
|||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": "root",
|
||||
"Password": "taosdata",
|
||||
"Username": null,
|
||||
"Password": null,
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
|
@ -419,7 +405,7 @@ Please note that when using TDengine Cloud, you need to specify the database nam
|
|||
"UseSSL": false,
|
||||
"JdbcProperties": null,
|
||||
"GenericDatabaseConfig": {
|
||||
"DatabaseJdbcUrl": "jdbc:TAOS-RS://gw.cloud.taosdata.com?useSSL=true&token=41ac9d61d641b6b334e8b76f45f5a8XXXXXXXXXX",
|
||||
"DatabaseJdbcUrl": "jdbc:TAOS-RS://gw.us-west-2.aws.cloud.tdengine.com?useSSL=true&token=42b874395452d36f38dd6bf4317757611b213683",
|
||||
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
|
||||
"ResolutionInNanoseconds": 1000,
|
||||
"ZonedColumnTypes": []
|
||||
|
@ -433,8 +419,8 @@ Please note that when using TDengine Cloud, you need to specify the database nam
|
|||
|
||||
## Conclusion
|
||||
|
||||
By integrating Seeq and TDengine, it is possible to leverage the efficient storage and querying performance of TDengine while also benefiting from Seeq's powerful data visualization and analysis capabilities provided to users.
|
||||
By integrating Seeq and TDengine, you can leverage the efficient storage and querying performance of TDengine while also benefiting from Seeq's powerful data visualization and analysis capabilities provided to users.
|
||||
|
||||
This integration allows users to take advantage of TDengine's high-performance time-series data storage and retrieval, ensuring efficient handling of large volumes of data. At the same time, Seeq provides advanced analytics features such as data visualization, anomaly detection, correlation analysis, and predictive modeling, enabling users to gain valuable insights and make data-driven decisions.
|
||||
This integration allows users to take advantage of TDengine's high-performance time-series data storage and query, ensuring efficient handling of large volumes of data. At the same time, Seeq provides advanced analytics features such as data visualization, anomaly detection, correlation analysis, and predictive modeling, enabling users to gain valuable insights and make data-driven decisions.
|
||||
|
||||
Together, Seeq and TDengine provide a comprehensive solution for time series data analysis in diverse industries such as manufacturing, IIoT, and power systems. The combination of efficient data storage and advanced analytics empowers users to unlock the full potential of their time series data, driving operational improvements, and enabling predictive and prescriptive analytics applications.
|
||||
|
|
|
@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://t
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.2.0.0
|
||||
|
||||
<Release type="tdengine" version="3.2.0.0" />
|
||||
|
||||
## 3.1.1.0
|
||||
|
||||
<Release type="tdengine" version="3.1.1.0" />
|
||||
|
|
|
@ -3,6 +3,7 @@ package main
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/taosdata/driver-go/v3/af"
|
||||
"github.com/taosdata/driver-go/v3/af/tmq"
|
||||
|
@ -27,15 +28,15 @@ func main() {
|
|||
panic(err)
|
||||
}
|
||||
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "earliest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"td.connect.port": "6030",
|
||||
"client.id": "test_tmq_client",
|
||||
"enable.auto.commit": "false",
|
||||
"msg.with.table.name": "true",
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "latest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"td.connect.port": "6030",
|
||||
"client.id": "test_tmq_client",
|
||||
"enable.auto.commit": "false",
|
||||
"msg.with.table.name": "true",
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -48,12 +49,17 @@ func main() {
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = db.Exec("insert into example_tmq.t1 values(now,1)")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
_, err = db.Exec("insert into example_tmq.t1 values(now,1)")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
time.Sleep(time.Microsecond * 100)
|
||||
}
|
||||
}()
|
||||
for i := 0; i < 5; i++ {
|
||||
ev := consumer.Poll(0)
|
||||
ev := consumer.Poll(500)
|
||||
if ev != nil {
|
||||
switch e := ev.(type) {
|
||||
case *tmqcommon.DataMessage:
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.2.4</version>
|
||||
<version>3.2.7-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<!-- ANCHOR_END: dep-->
|
||||
<dependency>
|
||||
|
|
|
@ -66,7 +66,6 @@ public class SubscribeDemo {
|
|||
properties.setProperty(TMQConstants.VALUE_DESERIALIZER,
|
||||
"com.taos.example.MetersDeserializer");
|
||||
properties.setProperty(TMQConstants.VALUE_DESERIALIZER_ENCODING, "UTF-8");
|
||||
properties.setProperty(TMQConstants.EXPERIMENTAL_SNAPSHOT_ENABLE, "true");
|
||||
|
||||
// poll data
|
||||
try (TaosConsumer<Meters> consumer = new TaosConsumer<>(properties)) {
|
||||
|
|
|
@ -66,7 +66,6 @@ public class WebsocketSubscribeDemo {
|
|||
properties.setProperty(TMQConstants.VALUE_DESERIALIZER,
|
||||
"com.taos.example.MetersDeserializer");
|
||||
properties.setProperty(TMQConstants.VALUE_DESERIALIZER_ENCODING, "UTF-8");
|
||||
properties.setProperty(TMQConstants.EXPERIMENTAL_SNAPSHOT_ENABLE, "true");
|
||||
|
||||
// poll data
|
||||
try (TaosConsumer<Meters> consumer = new TaosConsumer<>(properties)) {
|
||||
|
|
|
@ -23,9 +23,6 @@ def taos_get_assignment_and_seek_demo():
|
|||
consumer = Consumer(
|
||||
{
|
||||
"group.id": "0",
|
||||
# should disable snapshot,
|
||||
# otherwise it will cause invalid params error
|
||||
"experimental.snapshot.enable": "false",
|
||||
}
|
||||
)
|
||||
consumer.subscribe(["tmq_assignment_demo_topic"])
|
||||
|
|
|
@ -21,9 +21,6 @@ def taosws_get_assignment_and_seek_demo():
|
|||
prepare()
|
||||
consumer = taosws.Consumer(conf={
|
||||
"td.connect.websocket.scheme": "ws",
|
||||
# should disable snapshot,
|
||||
# otherwise it will cause invalid params error
|
||||
"experimental.snapshot.enable": "false",
|
||||
"group.id": "0",
|
||||
})
|
||||
consumer.subscribe(["tmq_assignment_demo_topic"])
|
||||
|
|
|
@ -4,20 +4,14 @@ description: 简要介绍 TDengine 的主要功能
|
|||
toc_max_heading_level: 2
|
||||
---
|
||||
|
||||
TDengine 是一款开源、高性能、云原生的[时序数据库](https://tdengine.com/tsdb/),且针对物联网、车联网、工业互联网、金融、IT 运维等场景进行了优化。TDengine 的代码,包括集群功能,都在 GNU AGPL v3.0 下开源。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等其它功能以降低系统复杂度及研发和运维成本。
|
||||
TDengine 是一款专为物联网、工业互联网等场景设计并优化的大数据平台,它能安全高效地将大量设备、数据采集器每天产生的高达 TB 甚至 PB 级的数据进行汇聚、存储、分析和分发,对业务运行状态进行实时监测、预警,提供实时的商业洞察。其核心模块是高性能、集群开源、云原生、极简的时序数据库 TDengine OSS。
|
||||
|
||||
本章节介绍 TDengine 的主要产品和功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine 有个整体的了解。
|
||||
|
||||
## 主要产品
|
||||
|
||||
TDengine 有三个主要产品:TDengine Enterprise (即 TDengine 企业版),TDengine Cloud,和 TDengine OSS,关于它们的具体定义请参考
|
||||
- [TDengine 企业版](https://www.taosdata.com/tdengine-pro)
|
||||
- [TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)
|
||||
- [TDengine 开源版](https://www.taosdata.com/tdengine-oss)
|
||||
本节介绍 TDengine OSS 的主要产品和功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine OSS 有个整体了解
|
||||
|
||||
## 主要功能
|
||||
|
||||
TDengine 的主要功能如下:
|
||||
TDengine OSS 的主要功能如下:
|
||||
|
||||
1. 写入数据,支持
|
||||
- [SQL 写入](../develop/insert-data/sql-writing)
|
||||
|
@ -150,3 +144,10 @@ TDengine 的主要功能如下:
|
|||
- [TDengine VS InfluxDB ,写入性能大 PK !](https://www.taosdata.com/2021/11/05/3248.html)
|
||||
- [TDengine 和 InfluxDB 查询性能对比测试报告](https://www.taosdata.com/2022/02/22/5969.html)
|
||||
- [TDengine 与 InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse 等数据库的对比测试报告](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf)
|
||||
|
||||
|
||||
## 主要产品
|
||||
|
||||
TDengine 有两个主要产品:TDengine Enterprise (即 TDengine 企业版)和 TDengine Cloud,关于它们的具体定义请参考
|
||||
- [TDengine 企业版](https://www.taosdata.com/tdengine-pro)
|
||||
- [TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)
|
||||
|
|
|
@ -4,7 +4,7 @@ description: '快速设置 TDengine 环境并体验其高效写入和查询'
|
|||
---
|
||||
|
||||
import xiaot from './xiaot.webp'
|
||||
import xiaot_new from './xiaot-03.webp'
|
||||
import xiaot_new from './xiaot-20231007.png'
|
||||
import channel from './channel.webp'
|
||||
import official_account from './official-account.webp'
|
||||
|
||||
|
|
Binary file not shown.
After Width: | Height: | Size: 112 KiB |
|
@ -38,7 +38,10 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
|
|||
- field_set 中的每个数据项都需要对自身的数据类型进行描述, 比如 1.2f32 代表 FLOAT 类型的数值 1.2, 如果不带类型后缀会被当作 DOUBLE 处理
|
||||
- timestamp 支持多种时间精度。写入数据的时候需要用参数指定时间精度,支持从小时到纳秒的 6 种时间精度
|
||||
- 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数 smlDataFormat 为 false,否则,数据写入按照相同顺序写入,库中数据会异常。(3.0.1.3 之后的版本 smlDataFormat 默认为 false,从3.0.3.0开始,该配置废弃) [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||
- 默认产生的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||
- 子表名生成规则
|
||||
- 默认产生的子表名是根据规则生成的唯一 ID 值。
|
||||
- 用户也可以通过在client端的 taos.cfg 里配置 smlAutoChildTableNameDelimiter 参数来指定连接标签之间的分隔符,连接起来后作为子表名。举例如下:配置 smlAutoChildTableNameDelimiter=-, 插入数据为 st,t0=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1-4。
|
||||
- 用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||
|
||||
:::
|
||||
|
||||
|
|
|
@ -31,8 +31,11 @@ OpenTSDB 行协议同样采用一行字符串来表示一行数据。OpenTSDB
|
|||
```txt
|
||||
meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
|
||||
```
|
||||
- 子表名生成规则
|
||||
- 默认产生的子表名是根据规则生成的唯一 ID 值。
|
||||
- 用户也可以通过在client端的 taos.cfg 里配置 smlAutoChildTableNameDelimiter 参数来指定连接标签之间的分隔符,连接起来后作为子表名。举例如下:配置 smlAutoChildTableNameDelimiter=-, 插入数据为 st,t0=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1-4。
|
||||
- 用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||
|
||||
- 默认生产的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 meters.current 1648432611250 11.3 tname=cpu1 location=California.LosAngeles groupid=3 则创建的表名为 cpu1,注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。
|
||||
参考 [OpenTSDB Telnet API 文档](http://opentsdb.net/docs/build/html/api_telnet/put.html)。
|
||||
|
||||
## 示例代码
|
||||
|
|
|
@ -47,7 +47,10 @@ OpenTSDB JSON 格式协议采用一个 JSON 字符串表示一行或多行数据
|
|||
:::note
|
||||
|
||||
- 对于 JSON 格式协议,TDengine 并不会自动把所有标签转成 NCHAR 类型, 字符串将将转为 NCHAR 类型, 数值将同样转换为 DOUBLE 类型。
|
||||
- 默认生成的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 `"tags": { "host": "web02","dc": "lga","tname":"cpu1"}` 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。
|
||||
- 子表名生成规则
|
||||
- 默认产生的子表名是根据规则生成的唯一 ID 值。
|
||||
- 用户也可以通过在client端的 taos.cfg 里配置 smlAutoChildTableNameDelimiter 参数来指定连接标签之间的分隔符,连接起来后作为子表名。举例如下:配置 smlAutoChildTableNameDelimiter=-, 插入数据为 st,t0=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1-4。
|
||||
- 用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||
|
||||
:::
|
||||
|
||||
|
|
|
@ -23,22 +23,30 @@ import CDemo from "./_sub_c.mdx";
|
|||
|
||||
为了实现上述功能,TDengine 会为 WAL (Write-Ahead-Log) 文件自动创建索引以支持快速随机访问,并提供了灵活可配置的文件切换与保留机制:用户可以按需指定 WAL 文件保留的时间以及大小(详见 create database 语句)。通过以上方式将 WAL 改造成了一个保留事件到达顺序的、可持久化的存储引擎(但由于 TSDB 具有远比 WAL 更高的压缩率,我们不推荐保留太长时间,一般来说,不超过几天)。 对于以 topic 形式创建的查询,TDengine 将对接 WAL 而不是 TSDB 作为其存储引擎。在消费时,TDengine 根据当前消费进度从 WAL 直接读取数据,并使用统一的查询引擎实现过滤、变换等操作,将数据推送给消费者。
|
||||
|
||||
本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。
|
||||
下面为关于数据订阅的一些说明,需要对TDengine的架构有一些了解,结合各个语言链接器的接口使用。
|
||||
- 一个消费组消费同一个topic下的所有数据,不同消费组之间相互独立;
|
||||
- 一个消费组消费同一个topic所有的vgroup,消费组可由多个消费者组成,但一个vgroup仅被一个消费者消费,如果消费者数量超过了vgroup数量,多余的消费者不消费数据;
|
||||
- 在服务端每个vgroup仅保存一个offset,每个vgroup的offset是单调递增的,但不一定连续。各个vgroup的offset之间没有关联;
|
||||
- 每次poll服务端会返回一个结果block,该block属于一个vgroup,可能包含多个wal版本的数据,可以通过 offset 接口获得是该block第一条记录的offset;
|
||||
- 一个消费组如果从未commit过offset,当其成员消费者重启重新拉取数据时,均从参数auto.offset.reset设定值开始消费;在一个消费者生命周期中,客户端本地记录了最近一次拉取数据的offset,不会拉取重复数据;
|
||||
- 消费者如果异常终止(没有调用tmq_close),需等约12秒后触发其所属消费组rebalance,该消费者在服务端状态变为LOST,约1天后该消费者自动被删除;正常退出,退出后就会删除消费者;新增消费者,需等约2秒触发rebalance,该消费者在服务端状态变为ready;
|
||||
- 消费组rebalance会对该组所有ready状态的消费者成员重新进行vgroup分配,消费者仅能对自己负责的vgroup进行assignment/seek/commit/poll操作;
|
||||
- 消费者可利用 position 获得当前消费的offset,并seek到指定offset,重新消费;
|
||||
- seek将position指向指定offset,不执行commit操作,一旦seek成功,可poll拉取指定offset及以后的数据;
|
||||
- seek 操作之前须调用 assignment 接口获取该consumer的vgroup ID和offset范围。seek 操作会检测vgroup ID 和 offset是否合法,如非法将报错;
|
||||
- position是获取当前的消费位置,是下次要取的位置,不是当前消费到的位置
|
||||
- commit是提交消费位置,不带参数的话,是提交当前消费位置(下次要取的位置,不是当前消费到的位置),带参数的话,是提交参数里的位置(也即下次退出重启后要取的位置)
|
||||
- seek是设置consumer消费位置,seek到哪,position就返回哪,都是下次要取的位置
|
||||
- seek不会影响commit,commit不影响seek,相互独立,两个是不同的概念
|
||||
- begin接口为wal 第一条数据的offset,end 接口为wal 最后一条数据的offset + 1
|
||||
- offset接口获取的是记录所在结果block块里的第一条数据的offset,当seek至该offset时,将消费到这个block里的全部数据。参见第四点;
|
||||
- 由于存在 WAL 过期删除机制,即使seek 操作成功,poll数据时有可能offset已失效。如果poll 的offset 小于 WAL 最小版本号,将会从WAL最小版本号消费;
|
||||
- 数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似;
|
||||
|
||||
说明(以c接口为例):
|
||||
1. 一个消费组消费同一个topic下的所有数据,不同消费组之间相互独立;
|
||||
2. 一个消费组消费同一个topic所有的vgroup,消费组可由多个消费者组成,但一个vgroup仅被一个消费者消费,如果消费者数量超过了vgroup数量,多余的消费者不消费数据;
|
||||
3. 在服务端每个vgroup仅保存一个offset,每个vgroup的offset是单调递增的,但不一定连续。各个vgroup的offset之间没有关联;
|
||||
4. 每次poll服务端会返回一个结果block,该block属于一个vgroup,可能包含多个wal版本的数据,可以通过 tmq_get_vgroup_offset 接口获得是该block第一条记录的offset;
|
||||
5. 一个消费组如果从未commit过offset,当其成员消费者重启重新拉取数据时,均从参数auto.offset.reset设定值开始消费;在一个消费者生命周期中,客户端本地记录了最近一次拉取数据的offset,不会拉取重复数据;
|
||||
6. 消费者如果异常终止(没有调用tmq_close),需等约12秒后触发其所属消费组rebalance,该消费者在服务端状态变为LOST,约1天后该消费者自动被删除;正常退出,退出后就会删除消费者;新增消费者,需等约2秒触发rebalance,该消费者在服务端状态变为ready;
|
||||
7. 消费组rebalance会对该组所有ready状态的消费者成员重新进行vgroup分配,消费者仅能对自己负责的vgroup进行assignment/seek/commit/poll操作;
|
||||
8. 消费者可利用 tmq_position 获得当前消费的offset,并seek到指定offset,重新消费;
|
||||
9. seek将position指向指定offset,不执行commit操作,一旦seek成功,可poll拉取指定offset及以后的数据;
|
||||
10. seek 操作之前须调用 tmq_get_topic_assignment 接口获取该consumer的vgroup ID和offset范围。seek 操作会检测vgroup ID 和 offset是否合法,如非法将报错;
|
||||
11. tmq_get_vgroup_offset接口获取的是记录所在结果block块里的第一条数据的offset,当seek至该offset时,将消费到这个block里的全部数据。参见第四点;
|
||||
12. 由于存在 WAL 过期删除机制,即使seek 操作成功,poll数据时有可能offset已失效。如果poll 的offset 小于 WAL 最小版本号,将会从WAL最小版本号消费;
|
||||
13. 数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似;
|
||||
本文档不对消息队列本身的知识做更多的介绍,如果需要了解,请自行搜索。
|
||||
|
||||
从3.2.0.0版本开始,数据订阅支持vnode迁移和分裂。
|
||||
由于数据订阅依赖wal文件,而在vnode迁移和分裂的过程中,wal并不会同步过去,所以迁移或分裂后,之前没消费完的wal数据后消费不到。所以请保证之前把数据全部消费完后,再进行vnode迁移或分裂,否则,消费会丢失数据。
|
||||
|
||||
## 主要数据结构和 API
|
||||
|
||||
|
@ -55,17 +63,17 @@ import CDemo from "./_sub_c.mdx";
|
|||
typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param));
|
||||
|
||||
typedef enum tmq_conf_res_t {
|
||||
TMQ_CONF_UNKNOWN = -2,
|
||||
TMQ_CONF_INVALID = -1,
|
||||
TMQ_CONF_OK = 0,
|
||||
} tmq_conf_res_t;
|
||||
TMQ_CONF_UNKNOWN = -2,
|
||||
TMQ_CONF_INVALID = -1,
|
||||
TMQ_CONF_OK = 0,
|
||||
} tmq_conf_res_t;
|
||||
|
||||
typedef struct tmq_topic_assignment {
|
||||
int32_t vgId;
|
||||
int64_t currentOffset;
|
||||
int64_t begin;
|
||||
int64_t end;
|
||||
} tmq_topic_assignment;
|
||||
int32_t vgId;
|
||||
int64_t currentOffset;
|
||||
int64_t begin;
|
||||
int64_t end;
|
||||
} tmq_topic_assignment;
|
||||
|
||||
DLL_EXPORT tmq_conf_t *tmq_conf_new();
|
||||
DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
|
||||
|
@ -98,7 +106,7 @@ import CDemo from "./_sub_c.mdx";
|
|||
DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
|
||||
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
|
||||
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res);
|
||||
DLL_EXPORT const char *tmq_err2str(int32_t code);DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
|
||||
DLL_EXPORT const char *tmq_err2str(int32_t code);
|
||||
```
|
||||
|
||||
下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。
|
||||
|
@ -343,10 +351,11 @@ CREATE TOPIC topic_name [with meta] AS DATABASE db_name;
|
|||
| `td.connect.port` | integer | 服务端的端口号 | |
|
||||
| `group.id` | string | 消费组 ID,同一消费组共享消费进度 | <br />**必填项**。最大长度:192。<br />每个topic最多可建立100个 consumer group |
|
||||
| `client.id` | string | 客户端 ID | 最大长度:192。 |
|
||||
| `auto.offset.reset` | enum | 消费组订阅的初始位置 | <br />`earliest`: default;从头开始订阅; <br/>`latest`: 仅从最新数据开始订阅; <br/>`none`: 没有提交的 offset 无法订阅 |
|
||||
| `auto.offset.reset` | enum | 消费组订阅的初始位置 | <br />`earliest`: default(version < 3.2.0.0);从头开始订阅; <br/>`latest`: default(version >= 3.2.0.0);仅从最新数据开始订阅; <br/>`none`: 没有提交的 offset 无法订阅 |
|
||||
| `enable.auto.commit` | boolean | 是否启用消费位点自动提交,true: 自动提交,客户端应用无需commit;false:客户端应用需要自行commit | 默认值为 true |
|
||||
| `auto.commit.interval.ms` | integer | 消费记录自动提交消费位点时间间隔,单位为毫秒 | 默认值为 5000 |
|
||||
| `msg.with.table.name` | boolean | 是否允许从消息中解析表名, 不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句) |默认关闭 |
|
||||
| `msg.with.table.name` | boolean | 是否允许从消息中解析表名, 不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句)(从3.2.0.0版本该参数废弃,恒为true) |默认关闭 |
|
||||
| `enable.replay` | boolean | 是否开启数据回放功能 |默认关闭 |
|
||||
|
||||
对于不同编程语言,其设置方式如下:
|
||||
|
||||
|
@ -362,7 +371,7 @@ tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
|
|||
tmq_conf_set(conf, "group.id", "cgrpName");
|
||||
tmq_conf_set(conf, "td.connect.user", "root");
|
||||
tmq_conf_set(conf, "td.connect.pass", "taosdata");
|
||||
tmq_conf_set(conf, "auto.offset.reset", "earliest");
|
||||
tmq_conf_set(conf, "auto.offset.reset", "latest");
|
||||
tmq_conf_set(conf, "msg.with.table.name", "true");
|
||||
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
||||
|
||||
|
@ -392,7 +401,7 @@ properties.setProperty("group.id", "cgrpName");
|
|||
properties.setProperty("bootstrap.servers", "127.0.0.1:6030");
|
||||
properties.setProperty("td.connect.user", "root");
|
||||
properties.setProperty("td.connect.pass", "taosdata");
|
||||
properties.setProperty("auto.offset.reset", "earliest");
|
||||
properties.setProperty("auto.offset.reset", "latest");
|
||||
properties.setProperty("msg.with.table.name", "true");
|
||||
properties.setProperty("value.deserializer", "com.taos.example.MetersDeserializer");
|
||||
|
||||
|
@ -412,7 +421,7 @@ public class MetersDeserializer extends ReferenceDeserializer<Meters> {
|
|||
```go
|
||||
conf := &tmq.ConfigMap{
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "earliest",
|
||||
"auto.offset.reset": "latest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
|
@ -432,7 +441,7 @@ consumer, err := NewConsumer(conf)
|
|||
let mut dsn: Dsn = "taos://".parse()?;
|
||||
dsn.set("group.id", "group1");
|
||||
dsn.set("client.id", "test");
|
||||
dsn.set("auto.offset.reset", "earliest");
|
||||
dsn.set("auto.offset.reset", "latest");
|
||||
|
||||
let tmq = TmqBuilder::from_dsn(dsn)?;
|
||||
|
||||
|
@ -451,7 +460,19 @@ from taos.tmq import Consumer
|
|||
# Syntax: `consumer = Consumer(configs)`
|
||||
#
|
||||
# Example:
|
||||
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
||||
consumer = Consumer(
|
||||
{
|
||||
"group.id": "local",
|
||||
"client.id": "1",
|
||||
"enable.auto.commit": "true",
|
||||
"auto.commit.interval.ms": "1000",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"auto.offset.reset": "latest",
|
||||
"msg.with.table.name": "true",
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -468,7 +489,7 @@ let consumer = taos.consumer({
|
|||
'group.id': 'tg2',
|
||||
'td.connect.user': 'root',
|
||||
'td.connect.pass': 'taosdata',
|
||||
'auto.offset.reset','earliest',
|
||||
'auto.offset.reset','latest',
|
||||
'msg.with.table.name': 'true',
|
||||
'td.connect.ip','127.0.0.1',
|
||||
'td.connect.port','6030'
|
||||
|
@ -491,7 +512,7 @@ var cfg = new ConsumerConfig
|
|||
GourpId = "TDengine-TMQ-C#",
|
||||
TDConnectUser = "root",
|
||||
TDConnectPasswd = "taosdata",
|
||||
AutoOffsetReset = "earliest"
|
||||
AutoOffsetReset = "latest"
|
||||
MsgWithTableName = "true",
|
||||
TDConnectIp = "127.0.0.1",
|
||||
TDConnectPort = "6030"
|
||||
|
@ -507,6 +528,24 @@ var consumer = new ConsumerBuilder(cfg).Build();
|
|||
|
||||
上述配置中包括 consumer group ID,如果多个 consumer 指定的 consumer group ID 一样,则自动形成一个 consumer group,共享消费进度。
|
||||
|
||||
数据回放功能说明:
|
||||
- 订阅增加 replay 功能,按照数据写入的时间回放。
|
||||
比如,如下时间写入三条数据
|
||||
```sql
|
||||
2023/09/22 00:00:00.000
|
||||
2023/09/22 00:00:05.000
|
||||
2023/09/22 00:00:08.000
|
||||
```
|
||||
则订阅出第一条数据 5s 后返回第二条数据,获取第二条数据 3s 后返回第三条数据。
|
||||
- 仅列订阅支持数据回放
|
||||
- 回放需要保证独立时间线
|
||||
- 如果是子表订阅或者普通表订阅,只有一个vnode上有数据,保证是一个时间线
|
||||
- 如果超级表订阅,则需保证该 DB 只有一个vnode,否则报错(因为多个vnode上订阅出的数据不在一个时间线上)
|
||||
- 超级表和库订阅不支持回放
|
||||
- 增加 enable.replay 参数,true表示开启订阅回放功能,false表示不开启订阅回放功能,默认不开启。
|
||||
- 回放不支持进度保存,所以回放参数 enable.replay = true 时,auto commit 自动关闭
|
||||
- 因为数据回放本身需要处理时间,所以回放的精度存在几十ms的误差
|
||||
|
||||
## 订阅 *topics*
|
||||
|
||||
一个 consumer 支持同时订阅多个 topic。
|
||||
|
|
|
@ -142,8 +142,14 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对
|
|||
| BINARY | byte array |
|
||||
| NCHAR | java.lang.String |
|
||||
| JSON | java.lang.String |
|
||||
| VARBINARY | byte[] |
|
||||
| GEOMETRY | byte[] |
|
||||
|
||||
**注意**:JSON 类型仅在 tag 中支持。
|
||||
**注意**:JSON 类型仅在 tag 中支持。
|
||||
由于历史原因,TDengine中的BINARY底层不是真正的二进制数据,已不建议使用。请用VARBINARY类型代替。
|
||||
GEOMETRY类型是little endian字节序的二进制数据,符合WKB规范。详细信息请参考 [数据类型](/taos-sql/data-type/#数据类型)
|
||||
WKB规范请参考[Well-Known Binary (WKB)](https://libgeos.org/specifications/wkb/)
|
||||
对于java连接器,可以使用jts库来方便的创建GEOMETRY类型对象,序列化后写入TDengine,这里有一个样例[Geometry示例](https://github.com/taosdata/TDengine/blob/3.0/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/GeometryDemo.java)
|
||||
|
||||
## 安装步骤
|
||||
|
||||
|
@ -357,7 +363,7 @@ properties 中的配置参数如下:
|
|||
- TSDBDriver.PROPERTY_KEY_CONFIG_DIR:仅在使用 JDBC 原生连接时生效。客户端配置文件目录路径,Linux OS 上默认值 `/etc/taos`,Windows OS 上默认值 `C:/TDengine/cfg`。
|
||||
- TSDBDriver.PROPERTY_KEY_CHARSET:客户端使用的字符集,默认值为系统字符集。
|
||||
- TSDBDriver.PROPERTY_KEY_LOCALE:仅在使用 JDBC 原生连接时生效。 客户端语言环境,默认值系统当前 locale。
|
||||
- TSDBDriver.PROPERTY_KEY_TIME_ZONE:仅在使用 JDBC 原生连接时生效。 客户端使用的时区,默认值为系统当前时区。
|
||||
- TSDBDriver.PROPERTY_KEY_TIME_ZONE:仅在使用 JDBC 原生连接时生效。 客户端使用的时区,默认值为系统当前时区。因为历史的原因,我们只支持POSIX标准的部分规范,如UTC-8(代表中国上上海), GMT-8,Asia/Shanghai 这几种形式。
|
||||
- TSDBDriver.HTTP_CONNECT_TIMEOUT: 连接超时时间,单位 ms, 默认值为 60000。仅在 REST 连接时生效。
|
||||
- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket 超时时间,单位 ms,默认值为 60000。仅在 REST 连接且 batchfetch 设置为 false 时生效。
|
||||
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: 消息超时时间, 单位 ms, 默认值为 60000。 仅在 REST 连接且 batchfetch 设置为 true 时生效。
|
||||
|
@ -459,13 +465,15 @@ public class ParameterBindingDemo {
|
|||
|
||||
private static final String host = "127.0.0.1";
|
||||
private static final Random random = new Random(System.currentTimeMillis());
|
||||
private static final int BINARY_COLUMN_SIZE = 30;
|
||||
private static final int BINARY_COLUMN_SIZE = 50;
|
||||
private static final String[] schemaList = {
|
||||
"create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)",
|
||||
"create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)",
|
||||
"create table stable3(ts timestamp, f1 bool) tags(t1 bool)",
|
||||
"create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))",
|
||||
"create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))"
|
||||
"create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))",
|
||||
"create table stable6(ts timestamp, f1 varbinary(" + BINARY_COLUMN_SIZE + ")) tags(t1 varbinary(" + BINARY_COLUMN_SIZE + "))",
|
||||
"create table stable7(ts timestamp, f1 geometry(" + BINARY_COLUMN_SIZE + ")) tags(t1 geometry(" + BINARY_COLUMN_SIZE + "))",
|
||||
};
|
||||
private static final int numOfSubTable = 10, numOfRow = 10;
|
||||
|
||||
|
@ -477,21 +485,20 @@ public class ParameterBindingDemo {
|
|||
init(conn);
|
||||
|
||||
bindInteger(conn);
|
||||
|
||||
bindFloat(conn);
|
||||
|
||||
bindBoolean(conn);
|
||||
|
||||
bindBytes(conn);
|
||||
|
||||
bindString(conn);
|
||||
bindVarbinary(conn);
|
||||
bindGeometry(conn);
|
||||
|
||||
clean(conn);
|
||||
conn.close();
|
||||
}
|
||||
|
||||
private static void init(Connection conn) throws SQLException {
|
||||
clean(conn);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.execute("drop database if exists test_parabind");
|
||||
stmt.execute("create database if not exists test_parabind");
|
||||
stmt.execute("use test_parabind");
|
||||
for (int i = 0; i < schemaList.length; i++) {
|
||||
|
@ -499,6 +506,11 @@ public class ParameterBindingDemo {
|
|||
}
|
||||
}
|
||||
}
|
||||
private static void clean(Connection conn) throws SQLException {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.execute("drop database if exists test_parabind");
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindInteger(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)";
|
||||
|
@ -677,10 +689,84 @@ public class ParameterBindingDemo {
|
|||
pstmt.columnDataExecuteBatch();
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindVarbinary(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable6 tags(?) values(?,?)";
|
||||
|
||||
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t6_" + i);
|
||||
// set tags
|
||||
byte[] bTag = new byte[]{0,2,3,4,5};
|
||||
bTag[0] = (byte) i;
|
||||
pstmt.setTagVarbinary(0, bTag);
|
||||
|
||||
// set columns
|
||||
ArrayList<Long> tsList = new ArrayList<>();
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
tsList.add(current + j);
|
||||
pstmt.setTimestamp(0, tsList);
|
||||
|
||||
ArrayList<byte[]> f1List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
byte[] v = new byte[]{0,2,3,4,5,6};
|
||||
v[0] = (byte)j;
|
||||
f1List.add(v);
|
||||
}
|
||||
pstmt.setVarbinary(1, f1List, BINARY_COLUMN_SIZE);
|
||||
|
||||
// add column
|
||||
pstmt.columnDataAddBatch();
|
||||
}
|
||||
// execute
|
||||
pstmt.columnDataExecuteBatch();
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindGeometry(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable7 tags(?) values(?,?)";
|
||||
|
||||
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
|
||||
|
||||
byte[] g1 = StringUtils.hexToBytes("0101000000000000000000F03F0000000000000040");
|
||||
byte[] g2 = StringUtils.hexToBytes("0102000020E610000002000000000000000000F03F000000000000004000000000000008400000000000001040");
|
||||
List<byte[]> listGeo = new ArrayList<>();
|
||||
listGeo.add(g1);
|
||||
listGeo.add(g2);
|
||||
|
||||
for (int i = 1; i <= 2; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t7_" + i);
|
||||
// set tags
|
||||
pstmt.setTagGeometry(0, listGeo.get(i - 1));
|
||||
|
||||
// set columns
|
||||
ArrayList<Long> tsList = new ArrayList<>();
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
tsList.add(current + j);
|
||||
pstmt.setTimestamp(0, tsList);
|
||||
|
||||
ArrayList<byte[]> f1List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
f1List.add(listGeo.get(i - 1));
|
||||
}
|
||||
pstmt.setGeometry(1, f1List, BINARY_COLUMN_SIZE);
|
||||
|
||||
// add column
|
||||
pstmt.columnDataAddBatch();
|
||||
}
|
||||
// execute
|
||||
pstmt.columnDataExecuteBatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**注**:setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽
|
||||
**注**:字符串和数组类型都要求用户在 size 参数里声明表定义中对应列的列宽。
|
||||
|
||||
用于设定 VALUES 数据列的取值的方法总共有:
|
||||
|
||||
|
@ -695,6 +781,8 @@ public void setByte(int columnIndex, ArrayList<Byte> list) throws SQLException
|
|||
public void setShort(int columnIndex, ArrayList<Short> list) throws SQLException
|
||||
public void setString(int columnIndex, ArrayList<String> list, int size) throws SQLException
|
||||
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
|
||||
public void setVarbinary(int columnIndex, ArrayList<byte[]> list, int size) throws SQLException
|
||||
public void setGeometry(int columnIndex, ArrayList<byte[]> list, int size) throws SQLException
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -883,6 +971,9 @@ public void setTagFloat(int index, float value)
|
|||
public void setTagDouble(int index, double value)
|
||||
public void setTagString(int index, String value)
|
||||
public void setTagNString(int index, String value)
|
||||
public void setTagJson(int index, String value)
|
||||
public void setTagVarbinary(int index, byte[] value)
|
||||
public void setTagGeometry(int index, byte[] value)
|
||||
```
|
||||
|
||||
### 无模式写入
|
||||
|
@ -1004,7 +1095,8 @@ TaosConsumer consumer = new TaosConsumer<>(config);
|
|||
- httpConnectTimeout: 创建连接超时参数,单位 ms,默认为 5000 ms。仅在 WebSocket 连接下有效。
|
||||
- messageWaitTimeout: 数据传输超时参数,单位 ms,默认为 10000 ms。仅在 WebSocket 连接下有效。
|
||||
- httpPoolSize: 同一个连接下最大并行请求数。仅在 WebSocket 连接下有效。
|
||||
其他参数请参考:[Consumer 参数列表](../../develop/tmq#创建-consumer-以及consumer-group)
|
||||
其他参数请参考:[Consumer 参数列表](../../develop/tmq#创建-consumer-以及consumer-group), 注意TDengine服务端自3.2.0.0版本开始消息订阅中的auto.offset.reset默认值发生变化。
|
||||
|
||||
|
||||
#### 订阅消费数据
|
||||
|
||||
|
@ -1102,7 +1194,7 @@ public abstract class ConsumerLoop {
|
|||
config.setProperty("bootstrap.servers", "localhost:6030");
|
||||
config.setProperty("td.connect.user", "root");
|
||||
config.setProperty("td.connect.pass", "taosdata");
|
||||
config.setProperty("auto.offset.reset", "earliest");
|
||||
config.setProperty("auto.offset.reset", "latest");
|
||||
config.setProperty("msg.with.table.name", "true");
|
||||
config.setProperty("enable.auto.commit", "true");
|
||||
config.setProperty("auto.commit.interval.ms", "1000");
|
||||
|
@ -1110,7 +1202,6 @@ public abstract class ConsumerLoop {
|
|||
config.setProperty("client.id", "1");
|
||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
||||
config.setProperty("value.deserializer.encoding", "UTF-8");
|
||||
config.setProperty("experimental.snapshot.enable", "true");
|
||||
|
||||
this.consumer = new TaosConsumer<>(config);
|
||||
this.topics = Collections.singletonList("topic_speed");
|
||||
|
@ -1188,7 +1279,7 @@ public abstract class ConsumerLoop {
|
|||
config.setProperty("bootstrap.servers", "localhost:6041");
|
||||
config.setProperty("td.connect.user", "root");
|
||||
config.setProperty("td.connect.pass", "taosdata");
|
||||
config.setProperty("auto.offset.reset", "earliest");
|
||||
config.setProperty("auto.offset.reset", "latest");
|
||||
config.setProperty("msg.with.table.name", "true");
|
||||
config.setProperty("enable.auto.commit", "true");
|
||||
config.setProperty("auto.commit.interval.ms", "1000");
|
||||
|
@ -1196,7 +1287,6 @@ public abstract class ConsumerLoop {
|
|||
config.setProperty("client.id", "1");
|
||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
||||
config.setProperty("value.deserializer.encoding", "UTF-8");
|
||||
config.setProperty("experimental.snapshot.enable", "true");
|
||||
|
||||
this.consumer = new TaosConsumer<>(config);
|
||||
this.topics = Collections.singletonList("topic_speed");
|
||||
|
|
|
@ -797,7 +797,7 @@ TDengine Go 连接器支持订阅功能,应用 API 如下:
|
|||
```go
|
||||
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "earliest",
|
||||
"auto.offset.reset": "latest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
|
@ -873,6 +873,7 @@ package main
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/taosdata/driver-go/v3/af"
|
||||
"github.com/taosdata/driver-go/v3/af/tmq"
|
||||
|
@ -893,19 +894,16 @@ func main() {
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "earliest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"td.connect.port": "6030",
|
||||
"client.id": "test_tmq_client",
|
||||
"enable.auto.commit": "false",
|
||||
"msg.with.table.name": "true",
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "latest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"td.connect.port": "6030",
|
||||
"client.id": "test_tmq_client",
|
||||
"enable.auto.commit": "false",
|
||||
"msg.with.table.name": "true",
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -918,10 +916,16 @@ func main() {
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = db.Exec("insert into example_tmq.t1 values(now,1)")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
_, err = db.Exec("insert into example_tmq.t1 values(now,1)")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
}
|
||||
}()
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
ev := consumer.Poll(500)
|
||||
if ev != nil {
|
||||
|
@ -975,6 +979,7 @@ package main
|
|||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/taosdata/driver-go/v3/common"
|
||||
tmqcommon "github.com/taosdata/driver-go/v3/common/tmq"
|
||||
|
@ -998,7 +1003,7 @@ func main() {
|
|||
"td.connect.pass": "taosdata",
|
||||
"group.id": "example",
|
||||
"client.id": "example_consumer",
|
||||
"auto.offset.reset": "earliest",
|
||||
"auto.offset.reset": "latest",
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -1007,29 +1012,34 @@ func main() {
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
_, err = db.Exec("create table example_ws_tmq.t_all(ts timestamp," +
|
||||
"c1 bool," +
|
||||
"c2 tinyint," +
|
||||
"c3 smallint," +
|
||||
"c4 int," +
|
||||
"c5 bigint," +
|
||||
"c6 tinyint unsigned," +
|
||||
"c7 smallint unsigned," +
|
||||
"c8 int unsigned," +
|
||||
"c9 bigint unsigned," +
|
||||
"c10 float," +
|
||||
"c11 double," +
|
||||
"c12 binary(20)," +
|
||||
"c13 nchar(20)" +
|
||||
")")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
go func() {
|
||||
_, err := db.Exec("create table example_ws_tmq.t_all(ts timestamp," +
|
||||
"c1 bool," +
|
||||
"c2 tinyint," +
|
||||
"c3 smallint," +
|
||||
"c4 int," +
|
||||
"c5 bigint," +
|
||||
"c6 tinyint unsigned," +
|
||||
"c7 smallint unsigned," +
|
||||
"c8 int unsigned," +
|
||||
"c9 bigint unsigned," +
|
||||
"c10 float," +
|
||||
"c11 double," +
|
||||
"c12 binary(20)," +
|
||||
"c13 nchar(20)" +
|
||||
")")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = db.Exec("insert into example_ws_tmq.t_all values(now,true,2,3,4,5,6,7,8,9,10.123,11.123,'binary','nchar')")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
for {
|
||||
_, err = db.Exec("insert into example_ws_tmq.t_all values(now,true,2,3,4,5,6,7,8,9,10.123,11.123,'binary','nchar')")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
}
|
||||
|
||||
}()
|
||||
for i := 0; i < 5; i++ {
|
||||
ev := consumer.Poll(500)
|
||||
|
|
|
@ -447,7 +447,7 @@ consumer.unsubscribe().await;
|
|||
|
||||
- `group.id`: 同一个消费者组,将以至少消费一次的方式进行消息负载均衡。
|
||||
- `client.id`: 可选的订阅客户端识别项。
|
||||
- `auto.offset.reset`: 可选初始化订阅起点, *earliest* 为从头开始订阅, *latest* 为仅从最新数据开始订阅,默认为从头订阅。注意,此选项在同一个 `group.id` 中仅生效一次。
|
||||
- `auto.offset.reset`: 可选初始化订阅起点, *earliest* 为从头开始订阅, *latest* 为仅从最新数据开始订阅,默认值根据 TDengine 版本有所不同,详细参见 [数据订阅](https://docs.taosdata.com/develop/tmq/)。注意,此选项在同一个 `group.id` 中仅生效一次。
|
||||
- `enable.auto.commit`: 当设置为 `true` 时,将启用自动标记模式,当对数据一致性不敏感时,可以启用此方式。
|
||||
- `auto.commit.interval.ms`: 自动标记的时间间隔。
|
||||
|
||||
|
|
|
@ -33,11 +33,13 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con
|
|||
|
||||
|Python Connector 版本|主要变化|
|
||||
|:-------------------:|:----:|
|
||||
|2.7.12|1. 新增 varbinary 类型支持(STMT暂不支持 varbinary )<br/> 2. query 性能提升(感谢贡献者[hadrianl](https://github.com/taosdata/taos-connector-python/pull/209))|
|
||||
|2.7.9|数据订阅支持获取消费进度和重置消费进度|
|
||||
|2.7.8|新增 `execute_many`|
|
||||
|
||||
|Python Websocket Connector 版本|主要变化|
|
||||
|:----------------------------:|:-----:|
|
||||
|0.2.9|已知问题修复|
|
||||
|0.2.5|1. 数据订阅支持获取消费进度和重置消费进度 <br/> 2. 支持 schemaless <br/> 3. 支持 STMT|
|
||||
|0.2.4|数据订阅新增取消订阅方法|
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ database_option: {
|
|||
- WAL_FSYNC_PERIOD:当 WAL 参数设置为 2 时,落盘的周期。默认为 3000,单位毫秒。最小为 0,表示每次写入立即落盘;最大为 180000,即三分钟。
|
||||
- MAXROWS:文件块中记录的最大条数,默认为 4096 条。
|
||||
- MINROWS:文件块中记录的最小条数,默认为 100 条。
|
||||
- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 <= keep 1 <= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。
|
||||
- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于3倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 <= keep 1 <= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。
|
||||
- PAGES:一个 VNODE 中元数据存储引擎的缓存页个数,默认为 256,最小 64。一个 VNODE 元数据存储占用 PAGESIZE \* PAGES,默认情况下为 1MB 内存。
|
||||
- PAGESIZE:一个 VNODE 中元数据存储引擎的页大小,单位为 KB,默认为 4 KB。范围为 1 到 16384,即 1 KB 到 16 MB。
|
||||
- PRECISION:数据库的时间戳精度。ms 表示毫秒,us 表示微秒,ns 表示纳秒,默认 ms 毫秒。
|
||||
|
|
|
@ -24,7 +24,7 @@ SELECT [hints] [DISTINCT] select_list
|
|||
hints: /*+ [hint([hint_param_list])] [hint([hint_param_list])] */
|
||||
|
||||
hint:
|
||||
BATCH_SCAN | NO_BATCH_SCAN
|
||||
BATCH_SCAN | NO_BATCH_SCAN | SORT_FOR_GROUP
|
||||
|
||||
select_list:
|
||||
select_expr [, select_expr] ...
|
||||
|
@ -87,15 +87,17 @@ Hints 是用户控制单个语句查询优化的一种手段,当 Hint 不适
|
|||
|
||||
目前支持的 Hints 列表如下:
|
||||
|
||||
| **Hint** | **参数** | **说明** | **适用范围** |
|
||||
| :-----------: | -------------- | -------------------------- | -------------------------- |
|
||||
| BATCH_SCAN | 无 | 采用批量读表的方式 | 超级表 JOIN 语句 |
|
||||
| NO_BATCH_SCAN | 无 | 采用顺序读表的方式 | 超级表 JOIN 语句 |
|
||||
| **Hint** | **参数** | **说明** | **适用范围** |
|
||||
| :-----------: | -------------- | -------------------------- | -----------------------------|
|
||||
| BATCH_SCAN | 无 | 采用批量读表的方式 | 超级表 JOIN 语句 |
|
||||
| NO_BATCH_SCAN | 无 | 采用顺序读表的方式 | 超级表 JOIN 语句 |
|
||||
| SORT_FOR_GROUP| 无 | 采用sort方式进行分组 | partition by 列表有普通列时 |
|
||||
|
||||
举例:
|
||||
|
||||
```sql
|
||||
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
|
||||
SELECT /*+ SORT_FOR_GROUP() */ count(*), c1 FROM stable1 PARTITION BY c1;
|
||||
```
|
||||
|
||||
## 列表
|
||||
|
|
|
@ -483,6 +483,93 @@ return_timestamp: {
|
|||
- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。
|
||||
- return_timestamp 指定函数返回值是否为时间戳类型,设置为1时返回 TIMESTAMP 类型,设置为0时返回 BIGINT 类型。如不指定缺省返回 BIGINT 类型。
|
||||
|
||||
#### TO_CHAR
|
||||
|
||||
```sql
|
||||
TO_CHAR(ts, format_str_literal)
|
||||
```
|
||||
|
||||
**功能说明**: 将timestamp类型按照指定格式转换为字符串
|
||||
|
||||
**返回结果数据类型**: VARCHAR
|
||||
|
||||
**应用字段**: TIMESTAMP
|
||||
|
||||
**嵌套子查询支持**: 适用于内层查询和外层查询
|
||||
|
||||
**适用于**: 表和超级表
|
||||
|
||||
**支持的格式**
|
||||
|
||||
| **格式** | **说明**| **例子** |
|
||||
| --- | --- | --- |
|
||||
|AM,am,PM,pm| 无点分隔的上午下午 | 07:00:00am|
|
||||
|A.M.,a.m.,P.M.,p.m.| 有点分隔的上午下午| 07:00:00a.m.|
|
||||
|YYYY,yyyy|年, 4个及以上数字| 2023-10-10|
|
||||
|YYY,yyy| 年, 最后3位数字| 023-10-10|
|
||||
|YY,yy| 年, 最后2位数字| 23-10-10|
|
||||
|Y,y|年, 最后一位数字| 3-10-10|
|
||||
|MONTH|月, 全大写| 2023-JANUARY-01|
|
||||
|Month|月, 首字母大写| 2023-January-01|
|
||||
|month|月, 全小写| 2023-january-01|
|
||||
|MON| 月, 缩写, 全大写(三个字符)| JAN, SEP|
|
||||
|Mon| 月, 缩写, 首字母大写| Jan, Sep|
|
||||
|mon|月, 缩写, 全小写| jan, sep|
|
||||
|MM,mm|月, 数字 01-12|2023-01-01|
|
||||
|DD,dd|月日, 01-31||
|
||||
|DAY|周日, 全大写|MONDAY|
|
||||
|Day|周日, 首字符大写|Monday|
|
||||
|day|周日, 全小写|monday|
|
||||
|DY|周日, 缩写, 全大写|MON|
|
||||
|Dy|周日, 缩写, 首字符大写|Mon|
|
||||
|dy|周日, 缩写, 全小写|mon|
|
||||
|DDD|年日, 001-366||
|
||||
|D,d|周日, 数字, 1-7, Sunday(1) to Saturday(7)||
|
||||
|HH24,hh24|小时, 00-23|2023-01-30 23:59:59|
|
||||
|hh12,HH12, hh, HH| 小时, 01-12|2023-01-30 12:59:59PM|
|
||||
|MI,mi|分钟, 00-59||
|
||||
|SS,ss|秒, 00-59||
|
||||
|MS,ms|毫秒, 000-999||
|
||||
|US,us|微秒, 000000-999999||
|
||||
|NS,ns|纳秒, 000000000-999999999||
|
||||
|TZH,tzh|时区小时|2023-01-30 11:59:59PM +08|
|
||||
|
||||
**使用说明**:
|
||||
- `Month`, `Day`等的输出格式是左对齐的, 右侧添加空格, 如`2023-OCTOBER -01`, `2023-SEPTEMBER-01`, 9月是月份中英文字母数最长的, 因此9月没有空格. 星期类似.
|
||||
- 使用`ms`, `us`, `ns`时, 以上三种格式的输出只在精度上不同, 比如ts为 `1697182085123`, `ms` 的输出为 `123`, `us` 的输出为 `123000`, `ns` 的输出为 `123000000`.
|
||||
- 时间格式中无法匹配规则的内容会直接输出. 如果想要在格式串中指定某些能够匹配规则的部分不做转换, 可以使用双引号, 如`to_char(ts, 'yyyy-mm-dd "is formated by yyyy-mm-dd"')`. 如果想要输出双引号, 那么在双引号之前加一个反斜杠, 如 `to_char(ts, '\"yyyy-mm-dd\"')` 将会输出 `"2023-10-10"`.
|
||||
- 那些输出是数字的格式, 如`YYYY`, `DD`, 大写与小写意义相同, 即`yyyy` 和 `YYYY` 可以互换.
|
||||
- 推荐在时间格式中带时区信息,如果不带则默认输出的时区为服务端或客户端所配置的时区.
|
||||
- 输入时间戳的精度由所查询表的精度确定, 若未指定表, 则精度为毫秒.
|
||||
|
||||
#### TO_TIMESTAMP
|
||||
|
||||
```sql
|
||||
TO_TIMESTAMP(ts_str_literal, format_str_literal)
|
||||
```
|
||||
|
||||
**功能说明**: 将字符串按照指定格式转化为时间戳.
|
||||
|
||||
**返回结果数据类型**: TIMESTAMP
|
||||
|
||||
**应用字段**: VARCHAR
|
||||
|
||||
**嵌套子查询支持**: 适用于内层查询和外层查询
|
||||
|
||||
**适用于**: 表和超级表
|
||||
|
||||
**支持的格式**: 与`to_char`相同
|
||||
|
||||
**使用说明**:
|
||||
- 若`ms`, `us`, `ns`同时指定, 那么结果时间戳包含上述三个字段的和. 如 `to_timestamp('2023-10-10 10:10:10.123.000456.000000789', 'yyyy-mm-dd hh:mi:ss.ms.us.ns')` 输出为 `2023-10-10 10:10:10.123456789`对应的时间戳.
|
||||
- `MONTH`, `MON`, `DAY`, `DY` 以及其他输出为数字的格式的大小写意义相同, 如 `to_timestamp('2023-JANUARY-01', 'YYYY-month-dd')`, `month`可以被替换为`MONTH` 或者`Month`.
|
||||
- 如果同一字段被指定了多次, 那么前面的指定将会被覆盖. 如 `to_timestamp('2023-22-10-10', 'yyyy-yy-MM-dd')`, 输出年份是`2022`.
|
||||
- 为避免转换时使用了非预期的时区,推荐在时间中携带时区信息,例如'2023-10-10 10:10:10+08',如果未指定时区则默认时区为服务端或客户端指定的时区。
|
||||
- 如果没有指定完整的时间,那么默认时间值为指定或默认时区的 `1970-01-01 00:00:00`, 未指定部分使用该默认值中的对应部分.
|
||||
- 如果格式串中有`AM`, `PM`等, 那么小时必须是12小时制, 范围必须是01-12.
|
||||
- `to_timestamp`转换具有一定的容错机制, 在格式串和时间戳串不完全对应时, 有时也可转换, 如: `to_timestamp('200101/2', 'yyyyMM1/dd')`, 格式串中多出来的1会被丢弃. 格式串与时间戳串中多余的空格字符(空格, tab等)也会被 自动忽略. 如`to_timestamp(' 23 年 - 1 月 - 01 日 ', 'yy 年-MM月-dd日')` 可以被成功转换. 虽然`MM`等字段需要两个数字对应(只有一位时前面补0), 在`to_timestamp`时, 一个数字也可以成功转换.
|
||||
- 输出时间戳的精度与查询表的精度相同, 若查询未指定表, 则输出精度为毫秒. 如`select to_timestamp('2023-08-1 10:10:10.123456789', 'yyyy-mm-dd hh:mi:ss.ns')`的输出将会把微妙和纳秒进行截断. 如果指定一张纳秒表, 那么就不会发生截断, 如`select to_timestamp('2023-08-1 10:10:10.123456789', 'yyyy-mm-dd hh:mi:ss.ns') from db_ns.table_ns limit 1`.
|
||||
|
||||
|
||||
### 时间和日期函数
|
||||
|
||||
|
|
|
@ -44,7 +44,11 @@ window_clause: {
|
|||
}
|
||||
```
|
||||
|
||||
在上述语法中的具体限制如下
|
||||
其中,interval_val 和 sliding_val 都表示时间段, 语法上支持三种方式,举例说明如下:
|
||||
- INTERVAL(1s, 500a) SLIDING(1s), 自带时间单位的形式,其中的时间单位是单字符表示, 分别为: a (毫秒), b (纳秒), d (天), h (小时), m (分钟), n (月), s (秒), u (微妙), w (周), y (年).
|
||||
- INTERVAL(1000, 500) SLIDING(1000), 不带时间单位的形式,将使用查询库的时间精度作为默认时间单位,当存在多个库时默认采用精度更高的库.
|
||||
- INTERVAL('1s', '500a') SLIDING('1s'), 自带时间单位的字符串形式,字符串内部不能有任何空格等其它字符.
|
||||
|
||||
|
||||
### 窗口子句的规则
|
||||
|
||||
|
|
|
@ -54,6 +54,7 @@ LIKE 条件使用通配符字符串进行匹配检查,规则如下:
|
|||
MATCH 条件和 NMATCH 条件使用正则表达式进行匹配,规则如下:
|
||||
|
||||
- 支持符合 POSIX 规范的正则表达式,具体规范内容可参见 Regular Expressions。
|
||||
- MATCH 和正则表达式匹配时, 返回 TURE. NMATCH 和正则表达式不匹配时, 返回 TRUE.
|
||||
- 只能针对子表名(即 tbname)、字符串类型的标签值进行正则表达式过滤,不支持普通列的过滤。
|
||||
- 正则匹配字符串长度不能超过 128 字节。可以通过参数 maxRegexStringLen 设置和调整最大允许的正则匹配字符串,该参数是客户端配置参数,需要重启客户端才能生效
|
||||
|
||||
|
|
|
@ -180,6 +180,7 @@ description: TDengine 保留关键字的详细列表
|
|||
- MAX_DELAY
|
||||
- BWLIMIT
|
||||
- MAXROWS
|
||||
- MAX_SPEED
|
||||
- MERGE
|
||||
- META
|
||||
- MINROWS
|
||||
|
|
|
@ -26,7 +26,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
## INS_DNODES
|
||||
|
||||
提供 dnode 的相关信息。也可以使用 SHOW DNODES 来查询这些信息。
|
||||
提供 dnode 的相关信息。也可以使用 SHOW DNODES 来查询这些信息。 SYSINFO 为 0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :------------: | ------------ | ----------------------------------------------------------------------------------------------------- |
|
||||
|
@ -40,7 +40,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
## INS_MNODES
|
||||
|
||||
提供 mnode 的相关信息。也可以使用 SHOW MNODES 来查询这些信息。
|
||||
提供 mnode 的相关信息。也可以使用 SHOW MNODES 来查询这些信息。 SYSINFO 为 0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------ | ------------------ |
|
||||
|
@ -52,22 +52,33 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
## INS_QNODES
|
||||
|
||||
当前系统中 QNODE 的信息。也可以使用 SHOW QNODES 来查询这些信息。
|
||||
当前系统中 QNODE 的信息。也可以使用 SHOW QNODES 来查询这些信息。SYSINFO 属性为 0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------ | ------------ |
|
||||
| 1 | id | SMALLINT | qnode id |
|
||||
| 2 | endpoint | BINARY(134) | qnode 的地址 |
|
||||
| 2 | endpoint | VARCHAR(134) | qnode 的地址 |
|
||||
| 3 | create_time | TIMESTAMP | 创建时间 |
|
||||
|
||||
## INS_SNODES
|
||||
|
||||
当前系统中 SNODE 的信息。也可以使用 SHOW SNODES 来查询这些信息。SYSINFO 属性为 0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------ | ------------ |
|
||||
| 1 | id | SMALLINT | snode id |
|
||||
| 2 | endpoint | VARCHAR(134) | snode 的地址 |
|
||||
| 3 | create_time | TIMESTAMP | 创建时间 |
|
||||
|
||||
|
||||
## INS_CLUSTER
|
||||
|
||||
存储集群相关信息。
|
||||
存储集群相关信息。 SYSINFO 属性为 0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------ | ---------- |
|
||||
| 1 | id | BIGINT | cluster id |
|
||||
| 2 | name | BINARY(134) | 集群名称 |
|
||||
| 2 | name | VARCHAR(134) | 集群名称 |
|
||||
| 3 | create_time | TIMESTAMP | 创建时间 |
|
||||
|
||||
## INS_DATABASES
|
||||
|
@ -76,25 +87,25 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :------------------: | ---------------- | ------------------------------------------------ |
|
||||
| 1 | name | BINARY(32) | 数据库名 |
|
||||
| 1 | name | VARCHAR(64) | 数据库名 |
|
||||
| 2 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 3 | ntables | INT | 数据库中表的数量,包含子表和普通表但不包含超级表 |
|
||||
| 4 | vgroups | INT | 数据库中有多少个 vgroup。需要注意,`vgroups` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 6 | replica | INT | 副本数。需要注意,`replica` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 7 | strict | BINARY(4) | 废弃参数 |
|
||||
| 8 | duration | INT | 单文件存储数据的时间跨度。需要注意,`duration` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | keep | INT | 数据保留时长。需要注意,`keep` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 7 | strict | VARCHAR(4) | 废弃参数 |
|
||||
| 8 | duration | VARCHAR(10) | 单文件存储数据的时间跨度。需要注意,`duration` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | keep | VARCHAR(32) | 数据保留时长。需要注意,`keep` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 10 | buffer | INT | 每个 vnode 写缓存的内存块大小,单位 MB。需要注意,`buffer` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 11 | pagesize | INT | 每个 VNODE 中元数据存储引擎的页大小,单位为 KB。需要注意,`pagesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 12 | pages | INT | 每个 vnode 元数据存储引擎的缓存页个数。需要注意,`pages` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 13 | minrows | INT | 文件块中记录的最大条数。需要注意,`minrows` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 14 | maxrows | INT | 文件块中记录的最小条数。需要注意,`maxrows` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 15 | comp | INT | 数据压缩方式。需要注意,`comp` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 16 | precision | BINARY(2) | 时间分辨率。需要注意,`precision` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 17 | status | BINARY(10) | 数据库状态 |
|
||||
| 18 | retentions | BINARY (60) | 数据的聚合周期和保存时长。需要注意,`retentions` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 16 | precision | VARCHAR(2) | 时间分辨率。需要注意,`precision` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 17 | status | VARCHAR(10) | 数据库状态 |
|
||||
| 18 | retentions | VARCHAR(60) | 数据的聚合周期和保存时长。需要注意,`retentions` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 19 | single_stable | BOOL | 表示此数据库中是否只可以创建一个超级表。需要注意,`single_stable` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 20 | cachemodel | BINARY(60) | 表示是否在内存中缓存子表的最近数据。需要注意,`cachemodel` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 20 | cachemodel | VARCHAR(60) | 表示是否在内存中缓存子表的最近数据。需要注意,`cachemodel` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 21 | cachesize | INT | 表示每个 vnode 中用于缓存子表最近数据的内存大小。需要注意,`cachesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 22 | wal_level | INT | WAL 级别。需要注意,`wal_level` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 23 | wal_fsync_period | INT | 数据落盘周期。需要注意,`wal_fsync_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
|
@ -111,15 +122,15 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :-----------: | ------------- | --------------------------------------------------------------------------------------------- |
|
||||
| 1 | name | BINARY(64) | 函数名 |
|
||||
| 2 | comment | BINARY(255) | 补充说明。需要注意,`comment` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 1 | name | VARCHAR(64) | 函数名 |
|
||||
| 2 | comment | VARCHAR(255) | 补充说明。需要注意,`comment` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 3 | aggregate | INT | 是否为聚合函数。需要注意,`aggregate` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 4 | output_type | BINARY(31) | 输出类型 |
|
||||
| 4 | output_type | VARCHAR(31) | 输出类型 |
|
||||
| 5 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 6 | code_len | INT | 代码长度 |
|
||||
| 7 | bufsize | INT | buffer 大小 |
|
||||
| 8 | func_language | BINARY(31) | 自定义函数编程语言 |
|
||||
| 9 | func_body | BINARY(16384) | 函数体定义 |
|
||||
| 8 | func_language | VARCHAR(31) | 自定义函数编程语言 |
|
||||
| 9 | func_body | VARCHAR(16384) | 函数体定义 |
|
||||
| 10 | func_version | INT | 函数版本号。初始版本为0,每次替换更新,版本号加1。 |
|
||||
|
||||
|
||||
|
@ -129,12 +140,12 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :--------------: | ------------ | ------------------------------------------------------- |
|
||||
| 1 | db_name | BINARY(32) | 包含此索引的表所在的数据库名 |
|
||||
| 2 | table_name | BINARY(192) | 包含此索引的表的名称 |
|
||||
| 3 | index_name | BINARY(192) | 索引名 |
|
||||
| 4 | column_name | BINARY(64) | 建索引的列的列名 |
|
||||
| 5 | index_type | BINARY(10) | 目前有 SMA 和 tag |
|
||||
| 6 | index_extensions | BINARY(256) | 索引的额外信息。对 SMA/tag 类型的索引,是函数名的列表。 |
|
||||
| 1 | db_name | VARCHAR(32) | 包含此索引的表所在的数据库名 |
|
||||
| 2 | table_name | VARCHAR(192) | 包含此索引的表的名称 |
|
||||
| 3 | index_name | VARCHAR(192) | 索引名 |
|
||||
| 4 | column_name | VARCHAR(64) | 建索引的列的列名 |
|
||||
| 5 | index_type | VARCHAR(10) | 目前有 SMA 和 tag |
|
||||
| 6 | index_extensions | VARCHAR(256) | 索引的额外信息。对 SMA/tag 类型的索引,是函数名的列表。 |
|
||||
|
||||
## INS_STABLES
|
||||
|
||||
|
@ -142,16 +153,16 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :-----------: | ------------ | ----------------------------------------------------------------------------------------------------- |
|
||||
| 1 | stable_name | BINARY(192) | 超级表表名 |
|
||||
| 2 | db_name | BINARY(64) | 超级表所在的数据库的名称 |
|
||||
| 1 | stable_name | VARCHAR(192) | 超级表表名 |
|
||||
| 2 | db_name | VARCHAR(64) | 超级表所在的数据库的名称 |
|
||||
| 3 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 4 | columns | INT | 列数目 |
|
||||
| 5 | tags | INT | 标签数目。需要注意,`tags` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 6 | last_update | TIMESTAMP | 最后更新时间 |
|
||||
| 7 | table_comment | BINARY(1024) | 表注释 |
|
||||
| 8 | watermark | BINARY(64) | 窗口的关闭时间。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | max_delay | BINARY(64) | 推送计算结果的最大延迟。需要注意,`max_delay` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 10 | rollup | BINARY(128) | rollup 聚合函数。需要注意,`rollup` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 7 | table_comment | VARCHAR(1024) | 表注释 |
|
||||
| 8 | watermark | VARCHAR(64) | 窗口的关闭时间。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | max_delay | VARCHAR(64) | 推送计算结果的最大延迟。需要注意,`max_delay` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 10 | rollup | VARCHAR(128) | rollup 聚合函数。需要注意,`rollup` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
|
||||
## INS_TABLES
|
||||
|
||||
|
@ -159,37 +170,37 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :-----------: | ------------ | ------------------------------------------------------------------------------------- |
|
||||
| 1 | table_name | BINARY(192) | 表名 |
|
||||
| 2 | db_name | BINARY(64) | 数据库名 |
|
||||
| 1 | table_name | VARCHAR(192) | 表名 |
|
||||
| 2 | db_name | VARCHAR(64) | 数据库名 |
|
||||
| 3 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 4 | columns | INT | 列数目 |
|
||||
| 5 | stable_name | BINARY(192) | 所属的超级表表名 |
|
||||
| 5 | stable_name | VARCHAR(192) | 所属的超级表表名 |
|
||||
| 6 | uid | BIGINT | 表 id |
|
||||
| 7 | vgroup_id | INT | vgroup id |
|
||||
| 8 | ttl | INT | 表的生命周期。需要注意,`ttl` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | table_comment | BINARY(1024) | 表注释 |
|
||||
| 10 | type | BINARY(21) | 表类型 |
|
||||
| 9 | table_comment | VARCHAR(1024) | 表注释 |
|
||||
| 10 | type | VARCHAR(21) | 表类型 |
|
||||
|
||||
## INS_TAGS
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------- | ---------------------- |
|
||||
| 1 | table_name | BINARY(192) | 表名 |
|
||||
| 2 | db_name | BINARY(64) | 该表所在的数据库的名称 |
|
||||
| 3 | stable_name | BINARY(192) | 所属的超级表表名 |
|
||||
| 4 | tag_name | BINARY(64) | tag 的名称 |
|
||||
| 5 | tag_type | BINARY(64) | tag 的类型 |
|
||||
| 6 | tag_value | BINARY(16384) | tag 的值 |
|
||||
| 1 | table_name | VARCHAR(192) | 表名 |
|
||||
| 2 | db_name | VARCHAR(64) | 该表所在的数据库的名称 |
|
||||
| 3 | stable_name | VARCHAR(192) | 所属的超级表表名 |
|
||||
| 4 | tag_name | VARCHAR(64) | tag 的名称 |
|
||||
| 5 | tag_type | VARCHAR(64) | tag 的类型 |
|
||||
| 6 | tag_value | VARCHAR(16384) | tag 的值 |
|
||||
|
||||
## INS_COLUMNS
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :-----------: | ------------ | ---------------------- |
|
||||
| 1 | table_name | BINARY(192) | 表名 |
|
||||
| 2 | db_name | BINARY(64) | 该表所在的数据库的名称 |
|
||||
| 3 | table_type | BINARY(21) | 表类型 |
|
||||
| 4 | col_name | BINARY(64) | 列 的名称 |
|
||||
| 5 | col_type | BINARY(32) | 列 的类型 |
|
||||
| 1 | table_name | VARCHAR(192) | 表名 |
|
||||
| 2 | db_name | VARCHAR(64) | 该表所在的数据库的名称 |
|
||||
| 3 | table_type | VARCHAR(21) | 表类型 |
|
||||
| 4 | col_name | VARCHAR(64) | 列 的名称 |
|
||||
| 5 | col_type | VARCHAR(32) | 列 的类型 |
|
||||
| 6 | col_length | INT | 列 的长度 |
|
||||
| 7 | col_precision | INT | 列 的精度 |
|
||||
| 8 | col_scale | INT | 列 的比例 |
|
||||
|
@ -197,51 +208,51 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
## INS_USERS
|
||||
|
||||
提供系统中创建的用户的相关信息。
|
||||
提供系统中创建的用户的相关信息. SYSINFO 属性为0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------ | -------- |
|
||||
| 1 | user_name | BINARY(23) | 用户名 |
|
||||
| 2 | privilege | BINARY(256) | 权限 |
|
||||
| 1 | user_name | VARCHAR(23) | 用户名 |
|
||||
| 2 | privilege | VARCHAR(256) | 权限 |
|
||||
| 3 | create_time | TIMESTAMP | 创建时间 |
|
||||
|
||||
## INS_GRANTS
|
||||
|
||||
提供企业版授权的相关信息。
|
||||
提供企业版授权的相关信息。SYSINFO 属性为 0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------ | --------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | version | BINARY(9) | 企业版授权说明:official(官方授权的)/trial(试用的) |
|
||||
| 2 | cpu_cores | BINARY(9) | 授权使用的 CPU 核心数量 |
|
||||
| 3 | dnodes | BINARY(10) | 授权使用的 dnode 节点数量。需要注意,`dnodes` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 4 | streams | BINARY(10) | 授权创建的流数量。需要注意,`streams` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 5 | users | BINARY(10) | 授权创建的用户数量。需要注意,`users` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 6 | accounts | BINARY(10) | 授权创建的帐户数量。需要注意,`accounts` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 7 | storage | BINARY(21) | 授权使用的存储空间大小。需要注意,`storage` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 8 | connections | BINARY(21) | 授权使用的客户端连接数量。需要注意,`connections` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | databases | BINARY(11) | 授权使用的数据库数量。需要注意,`databases` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 10 | speed | BINARY(9) | 授权使用的数据点每秒写入数量 |
|
||||
| 11 | querytime | BINARY(9) | 授权使用的查询总时长 |
|
||||
| 12 | timeseries | BINARY(21) | 授权使用的测点数量 |
|
||||
| 13 | expired | BINARY(5) | 是否到期,true:到期,false:未到期 |
|
||||
| 14 | expire_time | BINARY(19) | 试用期到期时间 |
|
||||
| 1 | version | VARCHAR(9) | 企业版授权说明:official(官方授权的)/trial(试用的) |
|
||||
| 2 | cpu_cores | VARCHAR(9) | 授权使用的 CPU 核心数量 |
|
||||
| 3 | dnodes | VARCHAR(10) | 授权使用的 dnode 节点数量。需要注意,`dnodes` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 4 | streams | VARCHAR(10) | 授权创建的流数量。需要注意,`streams` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 5 | users | VARCHAR(10) | 授权创建的用户数量。需要注意,`users` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 6 | accounts | VARCHAR(10) | 授权创建的帐户数量。需要注意,`accounts` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 7 | storage | VARCHAR(21) | 授权使用的存储空间大小。需要注意,`storage` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 8 | connections | VARCHAR(21) | 授权使用的客户端连接数量。需要注意,`connections` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | databases | VARCHAR(11) | 授权使用的数据库数量。需要注意,`databases` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 10 | speed | VARCHAR(9) | 授权使用的数据点每秒写入数量 |
|
||||
| 11 | querytime | VARCHAR(9) | 授权使用的查询总时长 |
|
||||
| 12 | timeseries | VARCHAR(21) | 授权使用的测点数量 |
|
||||
| 13 | expired | VARCHAR(5) | 是否到期,true:到期,false:未到期 |
|
||||
| 14 | expire_time | VARCHAR(19) | 试用期到期时间 |
|
||||
|
||||
## INS_VGROUPS
|
||||
|
||||
系统中所有 vgroups 的信息。
|
||||
系统中所有 vgroups 的信息。SYSINFO 属性为 0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :-------: | ------------ | ------------------------------------------------------------------------------------------------ |
|
||||
| 1 | vgroup_id | INT | vgroup id |
|
||||
| 2 | db_name | BINARY(32) | 数据库名 |
|
||||
| 2 | db_name | VARCHAR(32) | 数据库名 |
|
||||
| 3 | tables | INT | 此 vgroup 内有多少表。需要注意,`tables` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 4 | status | BINARY(10) | 此 vgroup 的状态 |
|
||||
| 4 | status | VARCHAR(10) | 此 vgroup 的状态 |
|
||||
| 5 | v1_dnode | INT | 第一个成员所在的 dnode 的 id |
|
||||
| 6 | v1_status | BINARY(10) | 第一个成员的状态 |
|
||||
| 6 | v1_status | VARCHAR(10) | 第一个成员的状态 |
|
||||
| 7 | v2_dnode | INT | 第二个成员所在的 dnode 的 id |
|
||||
| 8 | v2_status | BINARY(10) | 第二个成员的状态 |
|
||||
| 8 | v2_status | VARCHAR(10) | 第二个成员的状态 |
|
||||
| 9 | v3_dnode | INT | 第三个成员所在的 dnode 的 id |
|
||||
| 10 | v3_status | BINARY(10) | 第三个成员的状态 |
|
||||
| 10 | v3_status | VARCHAR(10) | 第三个成员的状态 |
|
||||
| 11 | nfiles | INT | 此 vgroup 中数据/元数据文件的数量 |
|
||||
| 12 | file_size | INT | 此 vgroup 中数据/元数据文件的大小 |
|
||||
| 13 | tsma | TINYINT | 此 vgroup 是否专用于 Time-range-wise SMA,1: 是, 0: 否 |
|
||||
|
@ -252,55 +263,57 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :------: | ------------ | --------------------------------------------------------------------------------------- |
|
||||
| 1 | name | BINARY(32) | 配置项名称 |
|
||||
| 2 | value | BINARY(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 1 | name | VARCHAR(32) | 配置项名称 |
|
||||
| 2 | value | VARCHAR(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
|
||||
## INS_DNODE_VARIABLES
|
||||
|
||||
系统中每个 dnode 的配置参数。
|
||||
系统中每个 dnode 的配置参数。SYSINFO 属性 为 0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :------: | ------------ | --------------------------------------------------------------------------------------- |
|
||||
| 1 | dnode_id | INT | dnode 的 ID |
|
||||
| 2 | name | BINARY(32) | 配置项名称 |
|
||||
| 3 | value | BINARY(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 2 | name | VARCHAR(32) | 配置项名称 |
|
||||
| 3 | value | VARCHAR(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
|
||||
## INS_TOPICS
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------ | ------------------------------ |
|
||||
| 1 | topic_name | BINARY(192) | topic 名称 |
|
||||
| 2 | db_name | BINARY(64) | topic 相关的 DB |
|
||||
| 1 | topic_name | VARCHAR(192) | topic 名称 |
|
||||
| 2 | db_name | VARCHAR(64) | topic 相关的 DB |
|
||||
| 3 | create_time | TIMESTAMP | topic 的 创建时间 |
|
||||
| 4 | sql | BINARY(1024) | 创建该 topic 时所用的 SQL 语句 |
|
||||
| 4 | sql | VARCHAR(1024) | 创建该 topic 时所用的 SQL 语句 |
|
||||
|
||||
## INS_SUBSCRIPTIONS
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :------------: | ------------ | ------------------------ |
|
||||
| 1 | topic_name | BINARY(204) | 被订阅的 topic |
|
||||
| 2 | consumer_group | BINARY(193) | 订阅者的消费者组 |
|
||||
| 1 | topic_name | VARCHAR(204) | 被订阅的 topic |
|
||||
| 2 | consumer_group | VARCHAR(193) | 订阅者的消费者组 |
|
||||
| 3 | vgroup_id | INT | 消费者被分配的 vgroup id |
|
||||
| 4 | consumer_id | BIGINT | 消费者的唯一 id |
|
||||
| 5 | offset | BINARY(64) | 消费者的消费进度 |
|
||||
| 5 | offset | VARCHAR(64) | 消费者的消费进度 |
|
||||
| 6 | rows | BIGINT | 消费者的消费的数据条数 |
|
||||
|
||||
## INS_STREAMS
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :----------: | ------------ | -------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | stream_name | BINARY(64) | 流计算名称 |
|
||||
| 1 | stream_name | VARCHAR(64) | 流计算名称 |
|
||||
| 2 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 3 | sql | BINARY(1024) | 创建流计算时提供的 SQL 语句 |
|
||||
| 4 | status | BINARY(20) | 流当前状态 |
|
||||
| 5 | source_db | BINARY(64) | 源数据库 |
|
||||
| 6 | target_db | BINARY(64) | 目的数据库 |
|
||||
| 7 | target_table | BINARY(192) | 流计算写入的目标表 |
|
||||
| 3 | sql | VARCHAR(1024) | 创建流计算时提供的 SQL 语句 |
|
||||
| 4 | status | VARCHAR(20) | 流当前状态 |
|
||||
| 5 | source_db | VARCHAR(64) | 源数据库 |
|
||||
| 6 | target_db | VARCHAR(64) | 目的数据库 |
|
||||
| 7 | target_table | VARCHAR(192) | 流计算写入的目标表 |
|
||||
| 8 | watermark | BIGINT | watermark,详见 SQL 手册流式计算。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算。需要注意,`trigger` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
|
||||
## INS_USER_PRIVILEGES
|
||||
|
||||
注:SYSINFO 属性为 0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :----------: | ------------ | -------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | user_name | VARCHAR(24) | 用户名
|
||||
|
|
|
@ -73,10 +73,10 @@ SHOW CREATE TABLE [db_name.]tb_name
|
|||
## SHOW DATABASES
|
||||
|
||||
```sql
|
||||
SHOW DATABASES;
|
||||
SHOW [USER | SYSTEM] DATABASES;
|
||||
```
|
||||
|
||||
显示用户定义的所有数据库。
|
||||
显示定义的所有数据库。SYSTEM 指定只显示系统数据库。USER 指定只显示用户创建的数据库。
|
||||
|
||||
## SHOW DNODES
|
||||
|
||||
|
@ -183,10 +183,10 @@ SHOW SUBSCRIPTIONS;
|
|||
## SHOW TABLES
|
||||
|
||||
```sql
|
||||
SHOW [db_name.]TABLES [LIKE 'pattern'];
|
||||
SHOW [NORMAL | CHILD] [db_name.]TABLES [LIKE 'pattern'];
|
||||
```
|
||||
|
||||
显示当前数据库下的所有普通表和子表的信息。可以使用 LIKE 对表名进行模糊匹配。
|
||||
显示当前数据库下的所有普通表和子表的信息。可以使用 LIKE 对表名进行模糊匹配。NORMAL 指定只显示普通表信息, CHILD 指定只显示子表信息。
|
||||
|
||||
## SHOW TABLE DISTRIBUTED
|
||||
|
||||
|
|
|
@ -395,6 +395,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
|
|||
### 查询场景配置参数
|
||||
|
||||
查询场景下 `filetype` 必须设置为 `query`。
|
||||
`query_times` 指定运行查询的次数,数值类型
|
||||
|
||||
查询场景可以通过设置 `kill_slow_query_threshold` 和 `kill_slow_query_interval` 参数来控制杀掉慢查询语句的执行,threshold 控制如果 exec_usec 超过指定时间的查询将被 taosBenchmark 杀掉,单位为秒;interval 控制休眠时间,避免持续查询慢查询消耗 CPU ,单位为秒。
|
||||
|
||||
|
|
|
@ -106,7 +106,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
|
|||
use letter and number only. Default is NOT.
|
||||
-n, --no-escape No escape char '`'. Default is using it.
|
||||
-Q, --dot-replace Repalce dot character with underline character in
|
||||
the table name.
|
||||
the table name.(Version 2.5.3)
|
||||
-T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is
|
||||
8.
|
||||
-C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service
|
||||
|
@ -116,6 +116,10 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
|
|||
-?, --help Give this help list
|
||||
--usage Give a short usage message
|
||||
-V, --version Print program version
|
||||
-W, --rename=RENAME-LIST Rename database name with new name during
|
||||
importing data. RENAME-LIST:
|
||||
"db1=newDB1|db2=newDB2" means rename db1 to newDB1
|
||||
and rename db2 to newDB2 (Version 2.5.4)
|
||||
|
||||
Mandatory or optional arguments to long options are also mandatory or optional
|
||||
for any corresponding short options.
|
||||
|
|
|
@ -648,7 +648,16 @@ charset 的有效值是 UTF-8。
|
|||
| 适用范围 | 仅客户端适用 |
|
||||
| 含义 | schemaless 自定义的子表名的 key |
|
||||
| 类型 | 字符串 |
|
||||
| 缺省值 | 无 |
|
||||
| 缺省值 | 无
|
||||
|
||||
### smlAutoChildTableNameDelimiter
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------------------- |
|
||||
| 适用范围 | 仅客户端适用 |
|
||||
| 含义 | schemaless tag之间的连接符,连起来作为子表名 |
|
||||
| 类型 | 字符串 |
|
||||
| 缺省值 | 无 |
|
||||
|
||||
### smlTagName
|
||||
|
||||
|
@ -716,16 +725,6 @@ charset 的有效值是 UTF-8。
|
|||
| 取值范围 | 0: 不改变;1:改变 |
|
||||
| 缺省值 | 0 |
|
||||
|
||||
### keepTimeOffset
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 迁移操作的延时 |
|
||||
| 单位 | 小时 |
|
||||
| 取值范围 | 0-23 |
|
||||
| 缺省值 | 0 |
|
||||
|
||||
### tmqMaxTopicNum
|
||||
|
||||
| 属性 | 说明 |
|
||||
|
@ -803,7 +802,7 @@ charset 的有效值是 UTF-8。
|
|||
| 53 | udf | 是 | 是 | |
|
||||
| 54 | enableCoreFile | 是 | 是 | |
|
||||
| 55 | ttlChangeOnWrite | 否 | 是 | |
|
||||
| 56 | keepTimeOffset | 是 | 是 | |
|
||||
| 56 | keepTimeOffset | 否 | 是(从3.2.0.0开始,该配置废弃) | |
|
||||
|
||||
## 2.x->3.0 的废弃参数
|
||||
|
||||
|
|
|
@ -94,8 +94,11 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
|
|||
:::tip
|
||||
需要注意的是,这里的 tag_key1, tag_key2 并不是用户输入的标签的原始顺序,而是使用了标签名称按照字符串升序排列后的结果。所以,tag_key1 并不是在行协议中输入的第一个标签。
|
||||
排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名:“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。
|
||||
:::tip
|
||||
为了让用户可以指定生成的表名,可以通过在taos.cfg里配置 smlChildTableName 参数来指定。
|
||||
:::tip
|
||||
如果不想用自动生成的表名,有两种指定子表名的方式,第一种优先级更高:
|
||||
通过在taos.cfg里配置 smlAutoChildTableNameDelimiter 参数来指定(`@ # 空格 回车 换行 制表符`除外)。
|
||||
举例如下:配置 smlAutoChildTableNameDelimiter=- 插入数据为 st,t0=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1-4。
|
||||
通过在taos.cfg里配置 smlChildTableName 参数来指定。
|
||||
举例如下:配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1,注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。
|
||||
|
||||
2. 如果解析行协议获得的超级表不存在,则会创建这个超级表(不建议手动创建超级表,不然插入数据可能异常)。
|
||||
|
|
|
@ -9,8 +9,6 @@ TDengine 通过 [taosKeeper](/reference/taosKeeper/) 将服务器的 CPU、内
|
|||
|
||||
## TDinsight - 使用监控数据库 + Grafana 对 TDengine 进行监控的解决方案
|
||||
|
||||
监控数据库将提供更多的监控项,您可以从 [TDinsight Grafana Dashboard](/reference/tdinsight/) 了解如何使用 TDinsight 方案对 TDengine 进行监控。
|
||||
|
||||
我们提供了一个自动化脚本 `TDinsight.sh` 对 TDinsight 进行部署。
|
||||
|
||||
下载 `TDinsight.sh`:
|
||||
|
@ -37,8 +35,6 @@ chmod +x TDinsight.sh
|
|||
|
||||
运行程序并重启 Grafana 服务,打开面板:`http://localhost:3000/d/tdinsight`。
|
||||
|
||||
更多使用场景和限制请参考[TDinsight](/reference/tdinsight/) 文档。
|
||||
|
||||
## log 库
|
||||
|
||||
TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,可以在 taoskeeper 配置文件中修改,具体参考 [taoskeeper 文档](/reference/taosKeeper))。taoskeeper 启动后会自动创建 log 库,并将监控数据写入到该数据库中。
|
||||
|
@ -102,22 +98,22 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|uptime|FLOAT||dnode uptime|
|
||||
|uptime|FLOAT||dnode uptime,单位:天|
|
||||
|cpu\_engine|FLOAT||taosd cpu 使用率,从 `/proc/<taosd_pid>/stat` 读取|
|
||||
|cpu\_system|FLOAT||服务器 cpu 使用率,从 `/proc/stat` 读取|
|
||||
|cpu\_cores|FLOAT||服务器 cpu 核数|
|
||||
|mem\_engine|INT||taosd 内存使用率,从 `/proc/<taosd_pid>/status` 读取|
|
||||
|mem\_system|INT||服务器可用内存|
|
||||
|mem\_system|INT||服务器可用内存,单位 KB|
|
||||
|mem\_total|INT||服务器内存总量,单位 KB|
|
||||
|disk\_engine|INT|||
|
||||
|disk\_engine|INT||单位 bytes|
|
||||
|disk\_used|BIGINT||data dir 挂载的磁盘使用量,单位 bytes|
|
||||
|disk\_total|BIGINT||data dir 挂载的磁盘总容量,单位 bytes|
|
||||
|net\_in|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 received bytes。单位 kb/s|
|
||||
|net\_out|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 transmit bytes。单位 kb/s|
|
||||
|io\_read|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 rchar 与上次数值计算之后,计算得到速度。单位 kb/s|
|
||||
|io\_write|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 wchar 与上次数值计算之后,计算得到速度。单位 kb/s|
|
||||
|io\_read\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 read_bytes。单位 kb/s|
|
||||
|io\_write\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 write_bytes。单位 kb/s|
|
||||
|net\_in|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 received bytes。单位 byte/s|
|
||||
|net\_out|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 transmit bytes。单位 byte/s|
|
||||
|io\_read|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 rchar 与上次数值计算之后,计算得到速度。单位 byte/s|
|
||||
|io\_write|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 wchar 与上次数值计算之后,计算得到速度。单位 byte/s|
|
||||
|io\_read\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 read_bytes。单位 byte/s|
|
||||
|io\_write\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 write_bytes。单位 byte/s|
|
||||
|req\_select|INT||两个间隔内发生的查询请求数目|
|
||||
|req\_select\_rate|FLOAT||两个间隔内的查询请求速度 = `req_select / monitorInterval`|
|
||||
|req\_insert|INT||两个间隔内发生的写入请求,包含的单条数据数目|
|
||||
|
@ -146,9 +142,9 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|ts|TIMESTAMP||timestamp|
|
||||
|name|NCHAR||data 目录,一般为 `/var/lib/taos`|
|
||||
|level|INT||0、1、2 多级存储级别|
|
||||
|avail|BIGINT||data 目录可用空间|
|
||||
|used|BIGINT||data 目录已使用空间|
|
||||
|total|BIGINT||data 目录空间|
|
||||
|avail|BIGINT||data 目录可用空间。单位 byte|
|
||||
|used|BIGINT||data 目录已使用空间。单位 byte|
|
||||
|total|BIGINT||data 目录空间。单位 byte|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
@ -161,9 +157,9 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|name|NCHAR||log 目录名,一般为 `/var/log/taos/`|
|
||||
|avail|BIGINT||log 目录可用空间|
|
||||
|used|BIGINT||log 目录已使用空间|
|
||||
|total|BIGINT||log 目录空间|
|
||||
|avail|BIGINT||log 目录可用空间。单位 byte|
|
||||
|used|BIGINT||log 目录已使用空间。单位 byte|
|
||||
|total|BIGINT||log 目录空间。单位 byte|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
@ -176,9 +172,9 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|name|NCHAR||temp 目录名,一般为 `/tmp/`|
|
||||
|avail|BIGINT||temp 目录可用空间|
|
||||
|used|BIGINT||temp 目录已使用空间|
|
||||
|total|BIGINT||temp 目录空间|
|
||||
|avail|BIGINT||temp 目录可用空间。单位 byte|
|
||||
|used|BIGINT||temp 目录已使用空间。单位 byte|
|
||||
|total|BIGINT||temp 目录空间。单位 byte|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
|
|
@ -14,40 +14,7 @@ Seeq 是制造业和工业互联网(IIOT)高级分析软件。Seeq 支持在
|
|||
|
||||
### Seeq 安装方法
|
||||
|
||||
从 [Seeq 官网](https://www.seeq.com/customer-download)下载相关软件,例如 Seeq Server 和 Seeq Data Lab 等。
|
||||
|
||||
### Seeq Server 安装和启动
|
||||
|
||||
```
|
||||
tar xvzf seeq-server-xxx.tar.gz
|
||||
cd seeq-server-installer
|
||||
sudo ./install
|
||||
|
||||
sudo seeq service enable
|
||||
sudo seeq start
|
||||
```
|
||||
|
||||
### Seeq Data Lab Server 安装和启动
|
||||
|
||||
Seeq Data Lab 需要安装在和 Seeq Server 不同的服务器上,并通过配置和 Seeq Server 互联。详细安装配置指令参见[Seeq 官方文档](https://support.seeq.com/space/KB/1034059842)。
|
||||
|
||||
```
|
||||
tar xvf seeq-data-lab-<version>-64bit-linux.tar.gz
|
||||
sudo seeq-data-lab-installer/install -f /opt/seeq/seeq-data-lab -g /var/opt/seeq -u seeq
|
||||
sudo seeq config set Network/DataLab/Hostname localhost
|
||||
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231)
|
||||
sudo seeq config set Network/Hostname <value> # the host IP or URL of the main Seeq Server
|
||||
|
||||
# If the main Seeq server is configured to listen over HTTPS
|
||||
sudo seeq config set Network/Webserver/SecurePort 443 # the secure port of the main Seeq Server (usually 443)
|
||||
|
||||
# If the main Seeq server is NOT configured to listen over HTTPS
|
||||
sudo seeq config set Network/Webserver/Port <value>
|
||||
|
||||
#On the main Seeq server, open a Seeq Command Prompt and set the hostname of the Data Lab server:
|
||||
sudo seeq config set Network/DataLab/Hostname <value> # the host IP (not URL) of the Data Lab server
|
||||
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231
|
||||
```
|
||||
从 [Seeq 官网](https://www.seeq.com/customer-download)下载相关软件,例如 Seeq Server 和 Seeq Data Lab 等。Seeq Data Lab 需要安装在和 Seeq Server 不同的服务器上,并通过配置和 Seeq Server 互联。详细安装配置指令参见[Seeq 知识库]( https://support.seeq.com/kb/latest/cloud/)。
|
||||
|
||||
## TDengine 本地实例安装方法
|
||||
|
||||
|
|
|
@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.2.0.0
|
||||
|
||||
<Release type="tdengine" version="3.2.0.0" />
|
||||
|
||||
## 3.1.1.0
|
||||
|
||||
<Release type="tdengine" version="3.1.1.0" />
|
||||
|
|
|
@ -44,17 +44,17 @@ OS name: "windows 10", version: "10.0", arch: "amd64", family: "windows"
|
|||
<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">
|
||||
<!-- 配置本地maven仓库的路径 -->
|
||||
<!-- 配置本地maven仓库的路径 -->
|
||||
<localRepository>D:\apache-maven-localRepository</localRepository>
|
||||
|
||||
<mirrors>
|
||||
<!-- 配置阿里云Maven镜像仓库 -->
|
||||
<mirror>
|
||||
<id>alimaven</id>
|
||||
<name>aliyun maven</name>
|
||||
<url>http://maven.aliyun.com/nexus/content/groups/public/</url>
|
||||
<mirrorOf>central</mirrorOf>
|
||||
</mirror>
|
||||
<mirror>
|
||||
<id>alimaven</id>
|
||||
<name>aliyun maven</name>
|
||||
<url>http://maven.aliyun.com/nexus/content/groups/public/</url>
|
||||
<mirrorOf>central</mirrorOf>
|
||||
</mirror>
|
||||
</mirrors>
|
||||
|
||||
<profiles>
|
||||
|
@ -126,7 +126,7 @@ https://www.taosdata.com/cn/all-downloads/
|
|||
修改client的hosts文件(C:\Windows\System32\drivers\etc\hosts),将server的hostname和ip配置到client的hosts文件中
|
||||
|
||||
```
|
||||
192.168.236.136 td01
|
||||
192.168.236.136 td01
|
||||
```
|
||||
|
||||
配置完成后,在命令行内使用TDengine CLI连接server端
|
||||
|
|
|
@ -11,13 +11,20 @@
|
|||
|
||||
<properties>
|
||||
<project.assembly.dir>src/main/resources/assembly</project.assembly.dir>
|
||||
<java.version>1.8</java.version>
|
||||
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.0.0</version>
|
||||
<version>3.2.7</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.locationtech.jts</groupId>
|
||||
<artifactId>jts-core</artifactId>
|
||||
<version>1.19.0</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
|
@ -68,12 +75,12 @@
|
|||
</execution>
|
||||
|
||||
<execution>
|
||||
<id>SubscribeDemo</id>
|
||||
<id>GeometryDemo</id>
|
||||
<configuration>
|
||||
<finalName>SubscribeDemo</finalName>
|
||||
<finalName>GeometryDemo</finalName>
|
||||
<archive>
|
||||
<manifest>
|
||||
<mainClass>com.taosdata.example.SubscribeDemo</mainClass>
|
||||
<mainClass>com.taosdata.example.GeometryDemo</mainClass>
|
||||
</manifest>
|
||||
</archive>
|
||||
<descriptorRefs>
|
||||
|
|
|
@ -0,0 +1,94 @@
|
|||
package com.taosdata.example;
|
||||
|
||||
import com.taosdata.jdbc.tmq.ConsumerRecord;
|
||||
import com.taosdata.jdbc.tmq.ConsumerRecords;
|
||||
import com.taosdata.jdbc.tmq.ReferenceDeserializer;
|
||||
import com.taosdata.jdbc.tmq.TaosConsumer;
|
||||
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Timestamp;
|
||||
import java.time.Duration;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
public abstract class ConsumerLoop {
|
||||
private final TaosConsumer<ResultBean> consumer;
|
||||
private final List<String> topics;
|
||||
private final AtomicBoolean shutdown;
|
||||
private final CountDownLatch shutdownLatch;
|
||||
|
||||
public ConsumerLoop() throws SQLException {
|
||||
Properties config = new Properties();
|
||||
config.setProperty("td.connect.type", "jni");
|
||||
config.setProperty("bootstrap.servers", "localhost:6030");
|
||||
config.setProperty("td.connect.user", "root");
|
||||
config.setProperty("td.connect.pass", "taosdata");
|
||||
config.setProperty("auto.offset.reset", "earliest");
|
||||
config.setProperty("msg.with.table.name", "true");
|
||||
config.setProperty("enable.auto.commit", "true");
|
||||
config.setProperty("auto.commit.interval.ms", "1000");
|
||||
config.setProperty("group.id", "group1");
|
||||
config.setProperty("client.id", "1");
|
||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
||||
config.setProperty("value.deserializer.encoding", "UTF-8");
|
||||
config.setProperty("experimental.snapshot.enable", "true");
|
||||
|
||||
this.consumer = new TaosConsumer<>(config);
|
||||
this.topics = Collections.singletonList("topic_speed");
|
||||
this.shutdown = new AtomicBoolean(false);
|
||||
this.shutdownLatch = new CountDownLatch(1);
|
||||
}
|
||||
|
||||
public abstract void process(ResultBean result);
|
||||
|
||||
public void pollData() throws SQLException {
|
||||
try {
|
||||
consumer.subscribe(topics);
|
||||
|
||||
while (!shutdown.get()) {
|
||||
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
|
||||
for (ConsumerRecord<ResultBean> record : records) {
|
||||
ResultBean bean = record.value();
|
||||
process(bean);
|
||||
}
|
||||
}
|
||||
consumer.unsubscribe();
|
||||
} finally {
|
||||
consumer.close();
|
||||
shutdownLatch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
public void shutdown() throws InterruptedException {
|
||||
shutdown.set(true);
|
||||
shutdownLatch.await();
|
||||
}
|
||||
|
||||
public static class ResultDeserializer extends ReferenceDeserializer<ResultBean> {
|
||||
|
||||
}
|
||||
|
||||
public static class ResultBean {
|
||||
private Timestamp ts;
|
||||
private int speed;
|
||||
|
||||
public Timestamp getTs() {
|
||||
return ts;
|
||||
}
|
||||
|
||||
public void setTs(Timestamp ts) {
|
||||
this.ts = ts;
|
||||
}
|
||||
|
||||
public int getSpeed() {
|
||||
return speed;
|
||||
}
|
||||
|
||||
public void setSpeed(int speed) {
|
||||
this.speed = speed;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,190 @@
|
|||
package com.taosdata.example;
|
||||
|
||||
import com.taosdata.jdbc.TSDBPreparedStatement;
|
||||
import org.locationtech.jts.geom.*;
|
||||
import org.locationtech.jts.io.ByteOrderValues;
|
||||
import org.locationtech.jts.io.ParseException;
|
||||
import org.locationtech.jts.io.WKBReader;
|
||||
import org.locationtech.jts.io.WKBWriter;
|
||||
|
||||
import java.sql.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Properties;
|
||||
|
||||
public class GeometryDemo {
|
||||
private static String host = "localhost";
|
||||
private static final String dbName = "test";
|
||||
private static final String tbName = "weather";
|
||||
private static final String user = "root";
|
||||
private static final String password = "taosdata";
|
||||
|
||||
private Connection connection;
|
||||
|
||||
public static void main(String[] args) throws SQLException {
|
||||
for (int i = 0; i < args.length; i++) {
|
||||
if ("-host".equalsIgnoreCase(args[i]) && i < args.length - 1)
|
||||
host = args[++i];
|
||||
}
|
||||
if (host == null) {
|
||||
printHelp();
|
||||
}
|
||||
GeometryDemo demo = new GeometryDemo();
|
||||
demo.init();
|
||||
demo.createDatabase();
|
||||
demo.useDatabase();
|
||||
demo.dropTable();
|
||||
demo.createTable();
|
||||
|
||||
demo.insert();
|
||||
demo.stmtInsert();
|
||||
demo.select();
|
||||
|
||||
demo.dropTable();
|
||||
demo.close();
|
||||
}
|
||||
|
||||
private void init() {
|
||||
final String url = "jdbc:TAOS://" + host + ":6030/?user=" + user + "&password=" + password;
|
||||
// get connection
|
||||
try {
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty("charset", "UTF-8");
|
||||
properties.setProperty("locale", "en_US.UTF-8");
|
||||
properties.setProperty("timezone", "UTC-8");
|
||||
System.out.println("get connection starting...");
|
||||
connection = DriverManager.getConnection(url, properties);
|
||||
if (connection != null)
|
||||
System.out.println("[ OK ] Connection established.");
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
private void createDatabase() {
|
||||
String sql = "create database if not exists " + dbName;
|
||||
execute(sql);
|
||||
}
|
||||
|
||||
private void useDatabase() {
|
||||
String sql = "use " + dbName;
|
||||
execute(sql);
|
||||
}
|
||||
|
||||
private void dropTable() {
|
||||
final String sql = "drop table if exists " + dbName + "." + tbName + "";
|
||||
execute(sql);
|
||||
}
|
||||
|
||||
private void createTable() {
|
||||
final String sql = "create table if not exists " + dbName + "." + tbName + " (ts timestamp, temperature float, humidity int, location geometry(50))";
|
||||
execute(sql);
|
||||
}
|
||||
|
||||
private void insert() {
|
||||
final String sql = "insert into " + dbName + "." + tbName + " (ts, temperature, humidity, location) values(now, 20.5, 34, 'POINT(1 2)')";
|
||||
execute(sql);
|
||||
}
|
||||
|
||||
private void stmtInsert() throws SQLException {
|
||||
TSDBPreparedStatement preparedStatement = (TSDBPreparedStatement) connection.prepareStatement("insert into " + dbName + "." + tbName + " values (?, ?, ?, ?)");
|
||||
|
||||
long current = System.currentTimeMillis();
|
||||
ArrayList<Long> tsList = new ArrayList<>();
|
||||
tsList.add(current);
|
||||
tsList.add(current + 1);
|
||||
preparedStatement.setTimestamp(0, tsList);
|
||||
ArrayList<Float> tempList = new ArrayList<>();
|
||||
tempList.add(20.1F);
|
||||
tempList.add(21.2F);
|
||||
preparedStatement.setFloat(1, tempList);
|
||||
ArrayList<Integer> humList = new ArrayList<>();
|
||||
humList.add(30);
|
||||
humList.add(31);
|
||||
preparedStatement.setInt(2, humList);
|
||||
|
||||
|
||||
ArrayList<byte[]> list = new ArrayList<>();
|
||||
GeometryFactory gf = new GeometryFactory();
|
||||
Point p1 = gf.createPoint(new Coordinate(1,2));
|
||||
p1.setSRID(1234);
|
||||
|
||||
// NOTE: TDengine current version only support 2D dimension and little endian byte order
|
||||
WKBWriter w = new WKBWriter(2, ByteOrderValues.LITTLE_ENDIAN, true);
|
||||
byte[] wkb = w.write(p1);
|
||||
list.add(wkb);
|
||||
|
||||
Coordinate[] coordinates = { new Coordinate(10, 20),
|
||||
new Coordinate(30, 40)};
|
||||
LineString lineString = gf.createLineString(coordinates);
|
||||
lineString.setSRID(2345);
|
||||
byte[] wkb2 = w.write(lineString);
|
||||
list.add(wkb2);
|
||||
|
||||
preparedStatement.setGeometry(3, list, 50);
|
||||
|
||||
preparedStatement.columnDataAddBatch();
|
||||
preparedStatement.columnDataExecuteBatch();
|
||||
}
|
||||
|
||||
private void select() {
|
||||
final String sql = "select * from " + dbName + "." + tbName;
|
||||
executeQuery(sql);
|
||||
}
|
||||
|
||||
private void close() {
|
||||
try {
|
||||
if (connection != null) {
|
||||
this.connection.close();
|
||||
System.out.println("connection closed.");
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
private void executeQuery(String sql) {
|
||||
long start = System.currentTimeMillis();
|
||||
try (Statement statement = connection.createStatement()) {
|
||||
ResultSet resultSet = statement.executeQuery(sql);
|
||||
long end = System.currentTimeMillis();
|
||||
printSql(sql, true, (end - start));
|
||||
|
||||
while (resultSet.next()){
|
||||
byte[] result1 = resultSet.getBytes(4);
|
||||
WKBReader reader = new WKBReader();
|
||||
Geometry g1 = reader.read(result1);
|
||||
System.out.println("GEO OBJ: " + g1 + ", SRID: " + g1.getSRID());
|
||||
}
|
||||
|
||||
} catch (SQLException e) {
|
||||
long end = System.currentTimeMillis();
|
||||
printSql(sql, false, (end - start));
|
||||
e.printStackTrace();
|
||||
} catch (ParseException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private void printSql(String sql, boolean succeed, long cost) {
|
||||
System.out.println("[ " + (succeed ? "OK" : "ERROR!") + " ] time cost: " + cost + " ms, execute statement ====> " + sql);
|
||||
}
|
||||
|
||||
private void execute(String sql) {
|
||||
long start = System.currentTimeMillis();
|
||||
try (Statement statement = connection.createStatement()) {
|
||||
boolean execute = statement.execute(sql);
|
||||
long end = System.currentTimeMillis();
|
||||
printSql(sql, true, (end - start));
|
||||
} catch (SQLException e) {
|
||||
long end = System.currentTimeMillis();
|
||||
printSql(sql, false, (end - start));
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
private static void printHelp() {
|
||||
System.out.println("Usage: java -jar JDBCDemo.jar -host <hostname>");
|
||||
System.exit(0);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,316 @@
|
|||
package com.taosdata.example;
|
||||
|
||||
import com.taosdata.jdbc.TSDBPreparedStatement;
|
||||
import com.taosdata.jdbc.utils.StringUtils;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.DriverManager;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Statement;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
public class ParameterBindingDemo {
|
||||
|
||||
private static final String host = "127.0.0.1";
|
||||
private static final Random random = new Random(System.currentTimeMillis());
|
||||
private static final int BINARY_COLUMN_SIZE = 50;
|
||||
private static final String[] schemaList = {
|
||||
"create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)",
|
||||
"create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)",
|
||||
"create table stable3(ts timestamp, f1 bool) tags(t1 bool)",
|
||||
"create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))",
|
||||
"create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))",
|
||||
"create table stable6(ts timestamp, f1 varbinary(" + BINARY_COLUMN_SIZE + ")) tags(t1 varbinary(" + BINARY_COLUMN_SIZE + "))",
|
||||
"create table stable7(ts timestamp, f1 geometry(" + BINARY_COLUMN_SIZE + ")) tags(t1 geometry(" + BINARY_COLUMN_SIZE + "))",
|
||||
};
|
||||
private static final int numOfSubTable = 10, numOfRow = 10;
|
||||
|
||||
public static void main(String[] args) throws SQLException {
|
||||
|
||||
String jdbcUrl = "jdbc:TAOS://" + host + ":6030/";
|
||||
Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata");
|
||||
|
||||
init(conn);
|
||||
|
||||
bindInteger(conn);
|
||||
bindFloat(conn);
|
||||
bindBoolean(conn);
|
||||
bindBytes(conn);
|
||||
bindString(conn);
|
||||
bindVarbinary(conn);
|
||||
bindGeometry(conn);
|
||||
|
||||
clean(conn);
|
||||
conn.close();
|
||||
}
|
||||
|
||||
private static void init(Connection conn) throws SQLException {
|
||||
clean(conn);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.execute("create database if not exists test_parabind");
|
||||
stmt.execute("use test_parabind");
|
||||
for (int i = 0; i < schemaList.length; i++) {
|
||||
stmt.execute(schemaList[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
private static void clean(Connection conn) throws SQLException {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.execute("drop database if exists test_parabind");
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindInteger(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)";
|
||||
|
||||
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t1_" + i);
|
||||
// set tags
|
||||
pstmt.setTagByte(0, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
|
||||
pstmt.setTagShort(1, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
|
||||
pstmt.setTagInt(2, random.nextInt(Integer.MAX_VALUE));
|
||||
pstmt.setTagLong(3, random.nextLong());
|
||||
// set columns
|
||||
ArrayList<Long> tsList = new ArrayList<>();
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
tsList.add(current + j);
|
||||
pstmt.setTimestamp(0, tsList);
|
||||
|
||||
ArrayList<Byte> f1List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
f1List.add(Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
|
||||
pstmt.setByte(1, f1List);
|
||||
|
||||
ArrayList<Short> f2List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
f2List.add(Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
|
||||
pstmt.setShort(2, f2List);
|
||||
|
||||
ArrayList<Integer> f3List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
f3List.add(random.nextInt(Integer.MAX_VALUE));
|
||||
pstmt.setInt(3, f3List);
|
||||
|
||||
ArrayList<Long> f4List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
f4List.add(random.nextLong());
|
||||
pstmt.setLong(4, f4List);
|
||||
|
||||
// add column
|
||||
pstmt.columnDataAddBatch();
|
||||
}
|
||||
// execute column
|
||||
pstmt.columnDataExecuteBatch();
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindFloat(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)";
|
||||
|
||||
TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class);
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t2_" + i);
|
||||
// set tags
|
||||
pstmt.setTagFloat(0, random.nextFloat());
|
||||
pstmt.setTagDouble(1, random.nextDouble());
|
||||
// set columns
|
||||
ArrayList<Long> tsList = new ArrayList<>();
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
tsList.add(current + j);
|
||||
pstmt.setTimestamp(0, tsList);
|
||||
|
||||
ArrayList<Float> f1List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
f1List.add(random.nextFloat());
|
||||
pstmt.setFloat(1, f1List);
|
||||
|
||||
ArrayList<Double> f2List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
f2List.add(random.nextDouble());
|
||||
pstmt.setDouble(2, f2List);
|
||||
|
||||
// add column
|
||||
pstmt.columnDataAddBatch();
|
||||
}
|
||||
// execute
|
||||
pstmt.columnDataExecuteBatch();
|
||||
// close if no try-with-catch statement is used
|
||||
pstmt.close();
|
||||
}
|
||||
|
||||
private static void bindBoolean(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable3 tags(?) values(?,?)";
|
||||
|
||||
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t3_" + i);
|
||||
// set tags
|
||||
pstmt.setTagBoolean(0, random.nextBoolean());
|
||||
// set columns
|
||||
ArrayList<Long> tsList = new ArrayList<>();
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
tsList.add(current + j);
|
||||
pstmt.setTimestamp(0, tsList);
|
||||
|
||||
ArrayList<Boolean> f1List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
f1List.add(random.nextBoolean());
|
||||
pstmt.setBoolean(1, f1List);
|
||||
|
||||
// add column
|
||||
pstmt.columnDataAddBatch();
|
||||
}
|
||||
// execute
|
||||
pstmt.columnDataExecuteBatch();
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindBytes(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable4 tags(?) values(?,?)";
|
||||
|
||||
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t4_" + i);
|
||||
// set tags
|
||||
pstmt.setTagString(0, new String("abc"));
|
||||
|
||||
// set columns
|
||||
ArrayList<Long> tsList = new ArrayList<>();
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
tsList.add(current + j);
|
||||
pstmt.setTimestamp(0, tsList);
|
||||
|
||||
ArrayList<String> f1List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
f1List.add(new String("abc"));
|
||||
}
|
||||
pstmt.setString(1, f1List, BINARY_COLUMN_SIZE);
|
||||
|
||||
// add column
|
||||
pstmt.columnDataAddBatch();
|
||||
}
|
||||
// execute
|
||||
pstmt.columnDataExecuteBatch();
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindString(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable5 tags(?) values(?,?)";
|
||||
|
||||
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t5_" + i);
|
||||
// set tags
|
||||
pstmt.setTagNString(0, "California.SanFrancisco");
|
||||
|
||||
// set columns
|
||||
ArrayList<Long> tsList = new ArrayList<>();
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
tsList.add(current + j);
|
||||
pstmt.setTimestamp(0, tsList);
|
||||
|
||||
ArrayList<String> f1List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
f1List.add("California.LosAngeles");
|
||||
}
|
||||
pstmt.setNString(1, f1List, BINARY_COLUMN_SIZE);
|
||||
|
||||
// add column
|
||||
pstmt.columnDataAddBatch();
|
||||
}
|
||||
// execute
|
||||
pstmt.columnDataExecuteBatch();
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindVarbinary(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable6 tags(?) values(?,?)";
|
||||
|
||||
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t6_" + i);
|
||||
// set tags
|
||||
byte[] bTag = new byte[]{0,2,3,4,5};
|
||||
bTag[0] = (byte) i;
|
||||
pstmt.setTagVarbinary(0, bTag);
|
||||
|
||||
// set columns
|
||||
ArrayList<Long> tsList = new ArrayList<>();
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
tsList.add(current + j);
|
||||
pstmt.setTimestamp(0, tsList);
|
||||
|
||||
ArrayList<byte[]> f1List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
byte[] v = new byte[]{0,2,3,4,5,6};
|
||||
v[0] = (byte)j;
|
||||
f1List.add(v);
|
||||
}
|
||||
pstmt.setVarbinary(1, f1List, BINARY_COLUMN_SIZE);
|
||||
|
||||
// add column
|
||||
pstmt.columnDataAddBatch();
|
||||
}
|
||||
// execute
|
||||
pstmt.columnDataExecuteBatch();
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindGeometry(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable7 tags(?) values(?,?)";
|
||||
|
||||
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
|
||||
|
||||
byte[] g1 = StringUtils.hexToBytes("0101000000000000000000F03F0000000000000040");
|
||||
byte[] g2 = StringUtils.hexToBytes("0102000020E610000002000000000000000000F03F000000000000004000000000000008400000000000001040");
|
||||
List<byte[]> listGeo = new ArrayList<>();
|
||||
listGeo.add(g1);
|
||||
listGeo.add(g2);
|
||||
|
||||
for (int i = 1; i <= 2; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t7_" + i);
|
||||
// set tags
|
||||
pstmt.setTagGeometry(0, listGeo.get(i - 1));
|
||||
|
||||
// set columns
|
||||
ArrayList<Long> tsList = new ArrayList<>();
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
tsList.add(current + j);
|
||||
pstmt.setTimestamp(0, tsList);
|
||||
|
||||
ArrayList<byte[]> f1List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
f1List.add(listGeo.get(i - 1));
|
||||
}
|
||||
pstmt.setGeometry(1, f1List, BINARY_COLUMN_SIZE);
|
||||
|
||||
// add column
|
||||
pstmt.columnDataAddBatch();
|
||||
}
|
||||
// execute
|
||||
pstmt.columnDataExecuteBatch();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,74 +0,0 @@
|
|||
package com.taosdata.example;
|
||||
|
||||
import com.taosdata.jdbc.TSDBConnection;
|
||||
import com.taosdata.jdbc.TSDBDriver;
|
||||
import com.taosdata.jdbc.TSDBResultSet;
|
||||
import com.taosdata.jdbc.TSDBSubscribe;
|
||||
|
||||
import java.sql.DriverManager;
|
||||
import java.sql.ResultSetMetaData;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public class SubscribeDemo {
|
||||
private static final String usage = "java -jar SubscribeDemo.jar -host <hostname> -database <database name> -topic <topic> -sql <sql>";
|
||||
|
||||
public static void main(String[] args) {
|
||||
// parse args from command line
|
||||
String host = "", database = "", topic = "", sql = "";
|
||||
for (int i = 0; i < args.length; i++) {
|
||||
if ("-host".equalsIgnoreCase(args[i]) && i < args.length - 1) {
|
||||
host = args[++i];
|
||||
}
|
||||
if ("-database".equalsIgnoreCase(args[i]) && i < args.length - 1) {
|
||||
database = args[++i];
|
||||
}
|
||||
if ("-topic".equalsIgnoreCase(args[i]) && i < args.length - 1) {
|
||||
topic = args[++i];
|
||||
}
|
||||
if ("-sql".equalsIgnoreCase(args[i]) && i < args.length - 1) {
|
||||
sql = args[++i];
|
||||
}
|
||||
}
|
||||
if (host.isEmpty() || database.isEmpty() || topic.isEmpty() || sql.isEmpty()) {
|
||||
System.out.println(usage);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
final String url = "jdbc:TAOS://" + host + ":6030/" + database + "?user=root&password=taosdata";
|
||||
// get TSDBConnection
|
||||
TSDBConnection connection = (TSDBConnection) DriverManager.getConnection(url, properties);
|
||||
// create TSDBSubscribe
|
||||
TSDBSubscribe sub = connection.subscribe(topic, sql, false);
|
||||
|
||||
int total = 0;
|
||||
while (true) {
|
||||
TSDBResultSet rs = sub.consume();
|
||||
int count = 0;
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
while (rs.next()) {
|
||||
for (int i = 1; i <= meta.getColumnCount(); i++) {
|
||||
System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t");
|
||||
}
|
||||
System.out.println();
|
||||
count++;
|
||||
}
|
||||
total += count;
|
||||
// System.out.printf("%d rows consumed, total %d\n", count, total);
|
||||
if (total >= 10)
|
||||
break;
|
||||
TimeUnit.SECONDS.sleep(1);
|
||||
}
|
||||
sub.close(false);
|
||||
connection.close();
|
||||
} catch (Exception e) {
|
||||
System.out.println("host: " + host + ", database: " + database + ", topic: " + topic + ", sql: " + sql);
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,170 @@
|
|||
package com.taosdata.example;
|
||||
|
||||
import com.taosdata.jdbc.ws.TSWSPreparedStatement;
|
||||
|
||||
import java.sql.*;
|
||||
import java.util.Random;
|
||||
|
||||
public class WSParameterBindingDemo {
|
||||
private static final String host = "127.0.0.1";
|
||||
private static final Random random = new Random(System.currentTimeMillis());
|
||||
private static final int BINARY_COLUMN_SIZE = 30;
|
||||
private static final String[] schemaList = {
|
||||
"create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)",
|
||||
"create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)",
|
||||
"create table stable3(ts timestamp, f1 bool) tags(t1 bool)",
|
||||
"create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))",
|
||||
"create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))"
|
||||
};
|
||||
private static final int numOfSubTable = 10, numOfRow = 10;
|
||||
|
||||
public static void main(String[] args) throws SQLException {
|
||||
|
||||
String jdbcUrl = "jdbc:TAOS-RS://" + host + ":6041/?batchfetch=true";
|
||||
Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata");
|
||||
|
||||
init(conn);
|
||||
|
||||
bindInteger(conn);
|
||||
|
||||
bindFloat(conn);
|
||||
|
||||
bindBoolean(conn);
|
||||
|
||||
bindBytes(conn);
|
||||
|
||||
bindString(conn);
|
||||
|
||||
conn.close();
|
||||
}
|
||||
|
||||
private static void init(Connection conn) throws SQLException {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.execute("drop database if exists test_ws_parabind");
|
||||
stmt.execute("create database if not exists test_ws_parabind");
|
||||
stmt.execute("use test_ws_parabind");
|
||||
for (int i = 0; i < schemaList.length; i++) {
|
||||
stmt.execute(schemaList[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindInteger(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)";
|
||||
|
||||
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t1_" + i);
|
||||
// set tags
|
||||
pstmt.setTagByte(1, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
|
||||
pstmt.setTagShort(2, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
|
||||
pstmt.setTagInt(3, random.nextInt(Integer.MAX_VALUE));
|
||||
pstmt.setTagLong(4, random.nextLong());
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||
pstmt.setByte(2, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
|
||||
pstmt.setShort(3, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
|
||||
pstmt.setInt(4, random.nextInt(Integer.MAX_VALUE));
|
||||
pstmt.setLong(5, random.nextLong());
|
||||
pstmt.addBatch();
|
||||
}
|
||||
pstmt.executeBatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindFloat(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)";
|
||||
|
||||
try(TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t2_" + i);
|
||||
// set tags
|
||||
pstmt.setTagFloat(1, random.nextFloat());
|
||||
pstmt.setTagDouble(2, random.nextDouble());
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||
pstmt.setFloat(2, random.nextFloat());
|
||||
pstmt.setDouble(3, random.nextDouble());
|
||||
pstmt.addBatch();
|
||||
}
|
||||
pstmt.executeBatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindBoolean(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable3 tags(?) values(?,?)";
|
||||
|
||||
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t3_" + i);
|
||||
// set tags
|
||||
pstmt.setTagBoolean(1, random.nextBoolean());
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||
pstmt.setBoolean(2, random.nextBoolean());
|
||||
pstmt.addBatch();
|
||||
}
|
||||
pstmt.executeBatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindBytes(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable4 tags(?) values(?,?)";
|
||||
|
||||
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t4_" + i);
|
||||
// set tags
|
||||
pstmt.setTagString(1, new String("abc"));
|
||||
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||
pstmt.setString(2, "abc");
|
||||
pstmt.addBatch();
|
||||
}
|
||||
pstmt.executeBatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindString(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable5 tags(?) values(?,?)";
|
||||
|
||||
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t5_" + i);
|
||||
// set tags
|
||||
pstmt.setTagNString(1, "California.SanFrancisco");
|
||||
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
pstmt.setTimestamp(0, new Timestamp(current + j));
|
||||
pstmt.setNString(1, "California.SanFrancisco");
|
||||
pstmt.addBatch();
|
||||
}
|
||||
pstmt.executeBatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
go mod init demo
|
||||
go mod tidy
|
||||
go build
|
|
@ -125,7 +125,8 @@ typedef enum {
|
|||
|
||||
typedef enum {
|
||||
TAOS_NOTIFY_PASSVER = 0,
|
||||
TAOS_NOTIFY_WHITELIST_VER = 1
|
||||
TAOS_NOTIFY_WHITELIST_VER = 1,
|
||||
TAOS_NOTIFY_USER_DROPPED = 2,
|
||||
} TAOS_NOTIFY_TYPE;
|
||||
|
||||
#define RET_MSG_LENGTH 1024
|
||||
|
@ -240,6 +241,11 @@ DLL_EXPORT int taos_set_notify_cb(TAOS *taos, __taos_notify_fn_t fp, void *param
|
|||
typedef void (*__taos_async_whitelist_fn_t)(void *param, int code, TAOS *taos, int numOfWhiteLists, uint64_t* pWhiteLists);
|
||||
DLL_EXPORT void taos_fetch_whitelist_a(TAOS *taos, __taos_async_whitelist_fn_t fp, void *param);
|
||||
|
||||
typedef enum {
|
||||
TAOS_CONN_MODE_BI = 0,
|
||||
} TAOS_CONN_MODE;
|
||||
|
||||
DLL_EXPORT int taos_set_conn_mode(TAOS* taos, int mode, int value);
|
||||
/* --------------------------schemaless INTERFACE------------------------------- */
|
||||
|
||||
DLL_EXPORT TAOS_RES *taos_schemaless_insert(TAOS *taos, char *lines[], int numLines, int protocol, int precision);
|
||||
|
@ -313,6 +319,7 @@ DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t
|
|||
DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); // The current offset is the offset of the last consumed message + 1
|
||||
DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId);
|
||||
|
||||
DLL_EXPORT TAOS *tmq_get_connect(tmq_t *tmq);
|
||||
DLL_EXPORT const char *tmq_get_table_name(TAOS_RES *res);
|
||||
DLL_EXPORT tmq_res_t tmq_get_res_type(TAOS_RES *res);
|
||||
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
|
||||
|
@ -331,8 +338,11 @@ typedef struct tmq_raw_data {
|
|||
DLL_EXPORT int32_t tmq_get_raw(TAOS_RES *res, tmq_raw_data *raw);
|
||||
DLL_EXPORT int32_t tmq_write_raw(TAOS *taos, tmq_raw_data raw);
|
||||
DLL_EXPORT int taos_write_raw_block(TAOS *taos, int numOfRows, char *pData, const char *tbname);
|
||||
DLL_EXPORT int taos_write_raw_block_with_reqid(TAOS *taos, int numOfRows, char *pData, const char *tbname, int64_t reqid);
|
||||
DLL_EXPORT int taos_write_raw_block_with_fields(TAOS *taos, int rows, char *pData, const char *tbname,
|
||||
TAOS_FIELD *fields, int numFields);
|
||||
DLL_EXPORT int taos_write_raw_block_with_fields_with_reqid(TAOS *taos, int rows, char *pData, const char *tbname,
|
||||
TAOS_FIELD *fields, int numFields, int64_t reqid);
|
||||
DLL_EXPORT void tmq_free_raw(tmq_raw_data raw);
|
||||
|
||||
// Returning null means error. Returned result need to be freed by tmq_free_json_meta
|
||||
|
|
|
@ -16,21 +16,31 @@
|
|||
#ifndef _TD_VND_COS_H_
|
||||
#define _TD_VND_COS_H_
|
||||
|
||||
#include "vnd.h"
|
||||
#include "os.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
extern int8_t tsS3Enabled;
|
||||
#define S3_BLOCK_CACHE
|
||||
|
||||
extern int8_t tsS3StreamEnabled;
|
||||
extern int8_t tsS3Enabled;
|
||||
extern int32_t tsS3BlockSize;
|
||||
extern int32_t tsS3BlockCacheSize;
|
||||
extern int32_t tsS3PageCacheSize;
|
||||
extern int32_t tsS3UploadDelaySec;
|
||||
|
||||
int32_t s3Init();
|
||||
void s3CleanUp();
|
||||
int32_t s3PutObjectFromFile(const char *file, const char *object);
|
||||
int32_t s3PutObjectFromFile2(const char *file, const char *object);
|
||||
void s3DeleteObjectsByPrefix(const char *prefix);
|
||||
void s3DeleteObjects(const char *object_name[], int nobject);
|
||||
bool s3Exists(const char *object_name);
|
||||
bool s3Get(const char *object_name, const char *path);
|
||||
int32_t s3GetObjectsByPrefix(const char *prefix, const char* path);
|
||||
int32_t s3GetObjectBlock(const char *object_name, int64_t offset, int64_t size, uint8_t **ppBlock);
|
||||
void s3EvictCache(const char *path, long object_size);
|
||||
long s3Size(const char *object_name);
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
//
|
||||
// Created by mingming wanng on 2023/11/2.
|
||||
//
|
||||
|
||||
#ifndef TDENGINE_RSYNC_H
|
||||
#define TDENGINE_RSYNC_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "tarray.h"
|
||||
|
||||
void stopRsync();
|
||||
void startRsync();
|
||||
int uploadRsync(char* id, char* path);
|
||||
int downloadRsync(char* id, char* path);
|
||||
int deleteRsync(char* id);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // TDENGINE_RSYNC_H
|
|
@ -49,6 +49,7 @@ extern "C" {
|
|||
#define TSDB_INS_TABLE_STREAMS "ins_streams"
|
||||
#define TSDB_INS_TABLE_STREAM_TASKS "ins_stream_tasks"
|
||||
#define TSDB_INS_TABLE_USER_PRIVILEGES "ins_user_privileges"
|
||||
#define TSDB_INS_TABLE_VIEWS "ins_views"
|
||||
|
||||
#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema"
|
||||
#define TSDB_PERFS_TABLE_SMAS "perf_smas"
|
||||
|
|
|
@ -42,7 +42,8 @@ typedef enum {
|
|||
TSDB_TEMP_TABLE = 4, // temp table created by nest query
|
||||
TSDB_SYSTEM_TABLE = 5,
|
||||
TSDB_TSMA_TABLE = 6, // time-range-wise sma
|
||||
TSDB_TABLE_MAX = 7
|
||||
TSDB_VIEW_TABLE = 7,
|
||||
TSDB_TABLE_MAX = 8
|
||||
} ETableType;
|
||||
|
||||
typedef enum {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue