Merge branch '3.0' of github.com:taosdata/TDengine into szhou/bi-mode-tag-scan

This commit is contained in:
shenglian zhou 2023-10-25 13:54:58 +08:00
commit 6edf954cd3
178 changed files with 9832 additions and 2863 deletions

View File

@ -16,8 +16,6 @@ set(TD_SUPPORT_DIR "${TD_SOURCE_DIR}/cmake")
set(TD_CONTRIB_DIR "${TD_SOURCE_DIR}/contrib")
include(${TD_SUPPORT_DIR}/cmake.platform)
include(${TD_SUPPORT_DIR}/cmake.define)
include(${TD_SUPPORT_DIR}/cmake.options)
@ -46,4 +44,4 @@ add_subdirectory(examples/c)
include(${TD_SUPPORT_DIR}/cmake.install)
# docs
add_subdirectory(docs/doxgen)
add_subdirectory(docs/doxgen)

View File

@ -128,11 +128,43 @@ option(
IF(${TD_LINUX})
option(
BUILD_WITH_COS
"If build with cos"
BUILD_S3
"If build with s3"
ON
)
option(
BUILD_WITH_S3
"If build with s3"
ON
)
option(
BUILD_WITH_COS
"If build with cos"
OFF
)
ENDIF ()
IF(${BUILD_S3})
IF(${BUILD_WITH_S3})
option(BUILD_WITH_COS "If build with cos" OFF)
ELSE ()
option(BUILD_WITH_COS "If build with cos" ON)
ENDIF ()
ELSE ()
option(BUILD_WITH_S3 "If build with s3" OFF)
option(BUILD_WITH_COS "If build with cos" OFF)
ENDIF ()
option(

View File

@ -93,36 +93,42 @@ ELSEIF (${CMAKE_SYSTEM_NAME} MATCHES "Windows")
ENDIF()
IF ("${CPUTYPE}" STREQUAL "")
IF (CMAKE_SYSTEM_PROCESSOR MATCHES "(amd64)|(AMD64)")
IF (CMAKE_SYSTEM_PROCESSOR MATCHES "(amd64)|(AMD64)|(x86_64)|(X86_64)")
MESSAGE(STATUS "Current platform is amd64")
SET(PLATFORM_ARCH_STR "amd64")
SET(CPUTYPE "x64")
SET(TD_INTEL_64 TRUE)
ADD_DEFINITIONS("-D_TD_X86_")
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "(x86)|(X86)")
MESSAGE(STATUS "Current platform is x86")
SET(PLATFORM_ARCH_STR "i386")
SET(CPUTYPE "x86")
SET(TD_INTEL_32 TRUE)
ADD_DEFINITIONS("-D_TD_X86_")
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "armv7l")
MESSAGE(STATUS "Current platform is aarch32")
SET(PLATFORM_ARCH_STR "arm")
SET(CPUTYPE "arm32")
SET(TD_ARM_32 TRUE)
ADD_DEFINITIONS("-D_TD_ARM_")
ADD_DEFINITIONS("-D_TD_ARM_32")
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "(aarch64)|(arm64)")
MESSAGE(STATUS "Current platform is aarch64")
SET(PLATFORM_ARCH_STR "arm64")
SET(CPUTYPE "arm64")
SET(TD_ARM_64 TRUE)
ADD_DEFINITIONS("-D_TD_ARM_")
ADD_DEFINITIONS("-D_TD_ARM_64")
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "loongarch64")
MESSAGE(STATUS "The current platform is loongarch64")
SET(PLATFORM_ARCH_STR "loongarch64")
SET(CPUTYPE "loongarch64")
SET(TD_LOONGARCH_64 TRUE)
ADD_DEFINITIONS("-D_TD_LOONGARCH_")
ADD_DEFINITIONS("-D_TD_LOONGARCH_64")
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "mips64")
SET(PLATFORM_ARCH_STR "mips")
SET(CPUTYPE "mips64")
MESSAGE(STATUS "input cpuType: mips64")
SET(TD_MIPS_64 TRUE)
ADD_DEFINITIONS("-D_TD_MIPS_")

View File

@ -50,7 +50,19 @@ ENDIF ()
IF (DEFINED VERDATE)
SET(TD_VER_DATE ${VERDATE})
ELSE ()
STRING(TIMESTAMP TD_VER_DATE "%Y-%m-%d %H:%M:%S")
STRING(COMPARE GREATER_EQUAL "${CMAKE_VERSION}" "3.26" TD_CMAKE_SUPPORT_TZ)
IF (TD_CMAKE_SUPPORT_TZ)
STRING(TIMESTAMP TD_VER_DATE "%Y-%m-%d %H:%M:%S %z")
ELSE ()
IF (TD_WINDOWS)
STRING(TIMESTAMP TD_VER_DATE "%Y-%m-%d %H:%M:%S")
ELSE ()
EXECUTE_PROCESS(COMMAND date +"%F %T %z" OUTPUT_VARIABLE TD_VER_DATE)
STRING(REPLACE "\"" "" TD_VER_DATE ${TD_VER_DATE})
STRING(STRIP ${TD_VER_DATE} TD_VER_DATE)
ENDIF ()
ENDIF ()
ENDIF ()
IF (DEFINED VERTYPE)
@ -67,9 +79,9 @@ ELSE ()
ELSEIF (TD_LINUX_32)
SET(TD_VER_CPUTYPE "x86")
ELSEIF (TD_ARM_32)
SET(TD_VER_CPUTYPE "x86")
SET(TD_VER_CPUTYPE "arm32")
ELSEIF (TD_MIPS_32)
SET(TD_VER_CPUTYPE "x86")
SET(TD_VER_CPUTYPE "mips32")
ELSE ()
SET(TD_VER_CPUTYPE "x64")
ENDIF ()

View File

@ -1,18 +1,18 @@
# curl
ExternalProject_Add(curl
ExternalProject_Add(curl2
URL https://curl.se/download/curl-8.2.1.tar.gz
URL_HASH MD5=b25588a43556068be05e1624e0e74d41
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
#GIT_REPOSITORY https://github.com/curl/curl.git
#GIT_TAG curl-7_88_1
SOURCE_DIR "${TD_CONTRIB_DIR}/curl"
SOURCE_DIR "${TD_CONTRIB_DIR}/curl2"
DEPENDS openssl
BUILD_IN_SOURCE TRUE
BUILD_ALWAYS 1
#UPDATE_COMMAND ""
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd
#CONFIGURE_COMMAND ./configure --without-ssl
BUILD_COMMAND make
UPDATE_COMMAND ""
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.2 --with-ssl=$ENV{HOME}/.cos-local.2 --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd --without-libidn2 #--enable-debug
BUILD_COMMAND make -j
INSTALL_COMMAND make install
TEST_COMMAND ""
)

430
cmake/libs3.GNUmakefile Normal file
View File

@ -0,0 +1,430 @@
# GNUmakefile
#
# Copyright 2008 Bryan Ischo <bryan@ischo.com>
#
# This file is part of libs3.
#
# libs3 is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, version 3 or above of the License. You can also
# redistribute and/or modify it under the terms of the GNU General Public
# License, version 2 or above of the License.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of this library and its programs with the
# OpenSSL library, and distribute linked combinations including the two.
#
# libs3 is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# version 3 along with libs3, in a file named COPYING. If not, see
# <http://www.gnu.org/licenses/>.
#
# You should also have received a copy of the GNU General Public License
# version 2 along with libs3, in a file named COPYING-GPLv2. If not, see
# <http://www.gnu.org/licenses/>.
# I tried to use the autoconf/automake/autolocal/etc (i.e. autohell) tools
# but I just couldn't stomach them. Since this is a Makefile for POSIX
# systems, I will simply do away with autohell completely and use a GNU
# Makefile. GNU make ought to be available pretty much everywhere, so I
# don't see this being a significant issue for portability.
# All commands assume a GNU compiler. For systems which do not use a GNU
# compiler, write scripts with the same names as these commands, and taking
# the same arguments, and translate the arguments and commands into the
# appropriate non-POSIX ones as needed. libs3 assumes a GNU toolchain as
# the most portable way to build software possible. Non-POSIX, non-GNU
# systems can do the work of supporting this build infrastructure.
# --------------------------------------------------------------------------
# Set libs3 version number, unless it is already set.
LIBS3_VER_MAJOR ?= 4
LIBS3_VER_MINOR ?= 1
LIBS3_VER := $(LIBS3_VER_MAJOR).$(LIBS3_VER_MINOR)
# -----------------------------------------------------------------------------
# Determine verbosity. VERBOSE_SHOW should be prepended to every command which
# should only be displayed if VERBOSE is set. QUIET_ECHO may be used to
# echo text only if VERBOSE is not set. Typically, a VERBOSE_SHOW command will
# be paired with a QUIET_ECHO command, to provide a command which is displayed
# in VERBOSE mode, along with text which is displayed in non-VERBOSE mode to
# describe the command.
#
# No matter what VERBOSE is defined to, it ends up as true if it's defined.
# This will be weird if you defined VERBOSE=false in the environment, and we
# switch it to true here; but the meaning of VERBOSE is, "if it's defined to
# any value, then verbosity is turned on". So don't define VERBOSE if you
# don't want verbosity in the build process.
# -----------------------------------------------------------------------------
ifdef VERBOSE
VERBOSE = true
VERBOSE_ECHO = @ echo
VERBOSE_SHOW =
QUIET_ECHO = @ echo > /dev/null
else
VERBOSE = false
VERBOSE_ECHO = @ echo > /dev/null
VERBOSE_SHOW = @
QUIET_ECHO = @ echo
endif
# --------------------------------------------------------------------------
# BUILD directory
ifndef BUILD
ifdef DEBUG
BUILD := build-debug
else
BUILD := build
endif
endif
# --------------------------------------------------------------------------
# DESTDIR directory
ifndef DESTDIR
DESTDIR := ${HOME}/.cos-local.2
endif
# --------------------------------------------------------------------------
# LIBDIR directory
ifndef LIBDIR
LIBDIR := ${DESTDIR}/lib
endif
# --------------------------------------------------------------------------
# Compiler CC handling
ifndef CC
CC := gcc
endif
# --------------------------------------------------------------------------
# Acquire configuration information for libraries that libs3 depends upon
ifndef CURL_LIBS
CURL_LIBS := $(shell curl-config --libs)
endif
ifndef CURL_CFLAGS
CURL_CFLAGS := $(shell curl-config --cflags)
endif
ifndef LIBXML2_LIBS
LIBXML2_LIBS := $(shell xml2-config --libs)
endif
ifndef LIBXML2_CFLAGS
LIBXML2_CFLAGS := $(shell xml2-config --cflags)
endif
ifndef OPENSSL_LIBS
OPENSSL_LIBS := -lssl -lcrypto
endif
# --------------------------------------------------------------------------
# These CFLAGS assume a GNU compiler. For other compilers, write a script
# which converts these arguments into their equivalent for that particular
# compiler.
ifndef CFLAGS
ifdef DEBUG
CFLAGS := -g
else
CFLAGS := -O3
endif
endif
CFLAGS += -Wall -Werror -Wshadow -Wextra -Iinc \
$(CURL_CFLAGS) $(LIBXML2_CFLAGS) \
-DLIBS3_VER_MAJOR=\"$(LIBS3_VER_MAJOR)\" \
-DLIBS3_VER_MINOR=\"$(LIBS3_VER_MINOR)\" \
-DLIBS3_VER=\"$(LIBS3_VER)\" \
-D__STRICT_ANSI__ \
-D_ISOC99_SOURCE \
-D_POSIX_C_SOURCE=200112L
LDFLAGS = $(CURL_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) -lpthread
STRIP ?= strip
INSTALL := install --strip-program=$(STRIP)
# --------------------------------------------------------------------------
# Default targets are everything
.PHONY: all
all: exported test
# --------------------------------------------------------------------------
# Exported targets are the library and driver program
.PHONY: exported
exported: libs3 s3 headers
exported_static: $(LIBS3_STATIC)
# --------------------------------------------------------------------------
# Install target
.PHONY: install install_static
install_static: exported_static
$(QUIET_ECHO) $(LIBDIR)/libs3.a: Installing static library
$(VERBOSE_SHOW) $(INSTALL) -Dp -m u+rw,go+r $(BUILD)/lib/libs3.a \
$(LIBDIR)/libs3.a
$(QUIET_ECHO) $(DESTDIR)/include/libs3.h: Installing header
$(VERBOSE_SHOW) $(INSTALL) -Dp -m u+rw,go+r inc/libs3.h \
$(DESTDIR)/include/libs3.h
install: exported
$(QUIET_ECHO) $(DESTDIR)/bin/s3: Installing executable
$(VERBOSE_SHOW) $(INSTALL) -Dps -m u+rwx,go+rx $(BUILD)/bin/s3 \
$(DESTDIR)/bin/s3
$(QUIET_ECHO) \
$(LIBDIR)/libs3.so.$(LIBS3_VER): Installing shared library
$(VERBOSE_SHOW) $(INSTALL) -Dps -m u+rw,go+r \
$(BUILD)/lib/libs3.so.$(LIBS3_VER_MAJOR) \
$(LIBDIR)/libs3.so.$(LIBS3_VER)
$(QUIET_ECHO) \
$(LIBDIR)/libs3.so.$(LIBS3_VER_MAJOR): Linking shared library
$(VERBOSE_SHOW) ln -sf libs3.so.$(LIBS3_VER) \
$(LIBDIR)/libs3.so.$(LIBS3_VER_MAJOR)
$(QUIET_ECHO) $(LIBDIR)/libs3.so: Linking shared library
$(VERBOSE_SHOW) ln -sf libs3.so.$(LIBS3_VER_MAJOR) $(LIBDIR)/libs3.so
$(QUIET_ECHO) $(LIBDIR)/libs3.a: Installing static library
$(VERBOSE_SHOW) $(INSTALL) -Dp -m u+rw,go+r $(BUILD)/lib/libs3.a \
$(LIBDIR)/libs3.a
$(QUIET_ECHO) $(DESTDIR)/include/libs3.h: Installing header
$(VERBOSE_SHOW) $(INSTALL) -Dp -m u+rw,go+r $(BUILD)/include/libs3.h \
$(DESTDIR)/include/libs3.h
# --------------------------------------------------------------------------
# Uninstall target
.PHONY: uninstall
uninstall:
$(QUIET_ECHO) Installed files: Uninstalling
$(VERBOSE_SHOW) \
rm -f $(DESTDIR)/bin/s3 \
$(DESTDIR)/include/libs3.h \
$(DESTDIR)/lib/libs3.a \
$(DESTDIR)/lib/libs3.so \
$(DESTDIR)/lib/libs3.so.$(LIBS3_VER_MAJOR) \
$(DESTDIR)/lib/libs3.so.$(LIBS3_VER)
# --------------------------------------------------------------------------
# Compile target patterns
$(BUILD)/obj/%.o: src/%.c
$(QUIET_ECHO) $@: Compiling object
@ mkdir -p $(dir $(BUILD)/dep/$<)
@ $(CC) $(CFLAGS) -M -MG -MQ $@ -DCOMPILINGDEPENDENCIES \
-o $(BUILD)/dep/$(<:%.c=%.d) -c $<
@ mkdir -p $(dir $@)
$(VERBOSE_SHOW) $(CC) $(CFLAGS) -o $@ -c $<
$(BUILD)/obj/%.do: src/%.c
$(QUIET_ECHO) $@: Compiling dynamic object
$(QUIET_ECHO) cflags:${CFLAGS}
@ mkdir -p $(dir $(BUILD)/dep/$<)
@ $(CC) $(CFLAGS) -M -MG -MQ $@ -DCOMPILINGDEPENDENCIES \
-o $(BUILD)/dep/$(<:%.c=%.dd) -c $<
@ mkdir -p $(dir $@)
$(VERBOSE_SHOW) $(CC) $(CFLAGS) -fpic -fPIC -o $@ -c $<
# --------------------------------------------------------------------------
# libs3 library targets
LIBS3_SHARED = $(BUILD)/lib/libs3.so.$(LIBS3_VER_MAJOR)
LIBS3_STATIC = $(BUILD)/lib/libs3.a
.PHONY: libs3
libs3: $(LIBS3_SHARED) $(LIBS3_STATIC)
LIBS3_SOURCES := bucket.c bucket_metadata.c error_parser.c general.c \
object.c request.c request_context.c \
response_headers_handler.c service_access_logging.c \
service.c simplexml.c util.c multipart.c
$(LIBS3_SHARED): $(LIBS3_SOURCES:%.c=$(BUILD)/obj/%.do)
$(QUIET_ECHO) $@: Building shared library
@ mkdir -p $(dir $@)
$(VERBOSE_SHOW) $(CC) -shared -Wl,-soname,libs3.so.$(LIBS3_VER_MAJOR) \
-o $@ $^ $(LDFLAGS)
$(LIBS3_STATIC): $(LIBS3_SOURCES:%.c=$(BUILD)/obj/%.o)
$(QUIET_ECHO) $@: Building static library
@ mkdir -p $(dir $@)
$(VERBOSE_SHOW) $(AR) cr $@ $^
# --------------------------------------------------------------------------
# Driver program targets
.PHONY: s3
s3: $(BUILD)/bin/s3
$(BUILD)/bin/s3: $(BUILD)/obj/s3.o $(LIBS3_SHARED)
$(QUIET_ECHO) $@: Building executable
@ mkdir -p $(dir $@)
$(VERBOSE_SHOW) $(CC) -o $@ $^ $(LDFLAGS)
# --------------------------------------------------------------------------
# libs3 header targets
.PHONY: headers
headers: $(BUILD)/include/libs3.h
$(BUILD)/include/libs3.h: inc/libs3.h
$(QUIET_ECHO) $@: Linking header
@ mkdir -p $(dir $@)
$(VERBOSE_SHOW) ln -sf $(abspath $<) $@
# --------------------------------------------------------------------------
# Test targets
.PHONY: test
test: $(BUILD)/bin/testsimplexml
$(BUILD)/bin/testsimplexml: $(BUILD)/obj/testsimplexml.o $(LIBS3_STATIC)
$(QUIET_ECHO) $@: Building executable
@ mkdir -p $(dir $@)
$(VERBOSE_SHOW) $(CC) -o $@ $^ $(LIBXML2_LIBS)
# --------------------------------------------------------------------------
# Clean target
.PHONY: clean
clean:
$(QUIET_ECHO) $(BUILD): Cleaning
$(VERBOSE_SHOW) rm -rf $(BUILD)
.PHONY: distclean
distclean:
$(QUIET_ECHO) $(BUILD): Cleaning
$(VERBOSE_SHOW) rm -rf $(BUILD)
# --------------------------------------------------------------------------
# Clean dependencies target
.PHONY: cleandeps
cleandeps:
$(QUIET_ECHO) $(BUILD)/dep: Cleaning dependencies
$(VERBOSE_SHOW) rm -rf $(BUILD)/dep
# --------------------------------------------------------------------------
# Dependencies
ALL_SOURCES := $(LIBS3_SOURCES) s3.c testsimplexml.c
$(foreach i, $(ALL_SOURCES), $(eval -include $(BUILD)/dep/src/$(i:%.c=%.d)))
$(foreach i, $(ALL_SOURCES), $(eval -include $(BUILD)/dep/src/$(i:%.c=%.dd)))
# --------------------------------------------------------------------------
# Debian package target
DEBPKG = $(BUILD)/pkg/libs3_$(LIBS3_VER).deb
DEBDEVPKG = $(BUILD)/pkg/libs3-dev_$(LIBS3_VER).deb
.PHONY: deb
deb: $(DEBPKG) $(DEBDEVPKG)
$(DEBPKG): DEBARCH = $(shell dpkg-architecture | grep ^DEB_BUILD_ARCH= | \
cut -d '=' -f 2)
$(DEBPKG): exported $(BUILD)/deb/DEBIAN/control $(BUILD)/deb/DEBIAN/shlibs \
$(BUILD)/deb/DEBIAN/postinst \
$(BUILD)/deb/usr/share/doc/libs3/changelog.gz \
$(BUILD)/deb/usr/share/doc/libs3/changelog.Debian.gz \
$(BUILD)/deb/usr/share/doc/libs3/copyright
DESTDIR=$(BUILD)/deb/usr $(MAKE) install
rm -rf $(BUILD)/deb/usr/include
rm -f $(BUILD)/deb/usr/lib/libs3.a
@mkdir -p $(dir $@)
fakeroot dpkg-deb -b $(BUILD)/deb $@
mv $@ $(BUILD)/pkg/libs3_$(LIBS3_VER)_$(DEBARCH).deb
$(DEBDEVPKG): DEBARCH = $(shell dpkg-architecture | grep ^DEB_BUILD_ARCH= | \
cut -d '=' -f 2)
$(DEBDEVPKG): exported $(BUILD)/deb-dev/DEBIAN/control \
$(BUILD)/deb-dev/usr/share/doc/libs3-dev/changelog.gz \
$(BUILD)/deb-dev/usr/share/doc/libs3-dev/changelog.Debian.gz \
$(BUILD)/deb-dev/usr/share/doc/libs3-dev/copyright
DESTDIR=$(BUILD)/deb-dev/usr $(MAKE) install
rm -rf $(BUILD)/deb-dev/usr/bin
rm -f $(BUILD)/deb-dev/usr/lib/libs3.so*
@mkdir -p $(dir $@)
fakeroot dpkg-deb -b $(BUILD)/deb-dev $@
mv $@ $(BUILD)/pkg/libs3-dev_$(LIBS3_VER)_$(DEBARCH).deb
$(BUILD)/deb/DEBIAN/control: debian/control
@mkdir -p $(dir $@)
echo -n "Depends: " > $@
dpkg-shlibdeps -Sbuild -O $(BUILD)/lib/libs3.so.$(LIBS3_VER_MAJOR) | \
cut -d '=' -f 2- >> $@
sed -e 's/LIBS3_VERSION/$(LIBS3_VER)/' \
< $< | sed -e 's/DEBIAN_ARCHITECTURE/$(DEBARCH)/' | \
grep -v ^Source: >> $@
$(BUILD)/deb-dev/DEBIAN/control: debian/control.dev
@mkdir -p $(dir $@)
sed -e 's/LIBS3_VERSION/$(LIBS3_VER)/' \
< $< | sed -e 's/DEBIAN_ARCHITECTURE/$(DEBARCH)/' > $@
$(BUILD)/deb/DEBIAN/shlibs:
echo -n "libs3 $(LIBS3_VER_MAJOR) libs3 " > $@
echo "(>= $(LIBS3_VER))" >> $@
$(BUILD)/deb/DEBIAN/postinst: debian/postinst
@mkdir -p $(dir $@)
cp $< $@
$(BUILD)/deb/usr/share/doc/libs3/copyright: LICENSE
@mkdir -p $(dir $@)
cp $< $@
@echo >> $@
@echo -n "An alternate location for the GNU General Public " >> $@
@echo "License version 3 on Debian" >> $@
@echo "systems is /usr/share/common-licenses/GPL-3." >> $@
$(BUILD)/deb-dev/usr/share/doc/libs3-dev/copyright: LICENSE
@mkdir -p $(dir $@)
cp $< $@
@echo >> $@
@echo -n "An alternate location for the GNU General Public " >> $@
@echo "License version 3 on Debian" >> $@
@echo "systems is /usr/share/common-licenses/GPL-3." >> $@
$(BUILD)/deb/usr/share/doc/libs3/changelog.gz: debian/changelog
@mkdir -p $(dir $@)
gzip --best -c $< > $@
$(BUILD)/deb-dev/usr/share/doc/libs3-dev/changelog.gz: debian/changelog
@mkdir -p $(dir $@)
gzip --best -c $< > $@
$(BUILD)/deb/usr/share/doc/libs3/changelog.Debian.gz: debian/changelog.Debian
@mkdir -p $(dir $@)
gzip --best -c $< > $@
$(BUILD)/deb-dev/usr/share/doc/libs3-dev/changelog.Debian.gz: \
debian/changelog.Debian
@mkdir -p $(dir $@)
gzip --best -c $< > $@

View File

@ -0,0 +1,16 @@
# libs3
ExternalProject_Add(libs3
GIT_REPOSITORY https://github.com/bji/libs3
#GIT_TAG v5.0.16
DEPENDS curl2 xml2
SOURCE_DIR "${TD_CONTRIB_DIR}/libs3"
#BINARY_DIR ""
BUILD_IN_SOURCE TRUE
BUILD_ALWAYS 1
UPDATE_COMMAND ""
CONFIGURE_COMMAND cp ${TD_SUPPORT_DIR}/libs3.GNUmakefile GNUmakefile && sed -i "s|CFLAGS += -Wall -Werror|CFLAGS += -I'$ENV{HOME}/.cos-local.2/include' -L'$ENV{HOME}/.cos-local.2/lib' |" ./GNUmakefile
BUILD_COMMAND make build/lib/libs3.a
INSTALL_COMMAND make install_static
TEST_COMMAND ""
)

View File

@ -0,0 +1,15 @@
# openssl
ExternalProject_Add(openssl
URL https://www.openssl.org/source/openssl-3.1.3.tar.gz
URL_HASH SHA256=f0316a2ebd89e7f2352976445458689f80302093788c466692fb2a188b2eacf6
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
SOURCE_DIR "${TD_CONTRIB_DIR}/openssl"
BUILD_IN_SOURCE TRUE
#BUILD_ALWAYS 1
#UPDATE_COMMAND ""
CONFIGURE_COMMAND ./Configure --prefix=$ENV{HOME}/.cos-local.2 -static #--no-shared
BUILD_COMMAND make -j
INSTALL_COMMAND make install_sw -j
TEST_COMMAND ""
)

View File

@ -0,0 +1,16 @@
# xml2
ExternalProject_Add(xml2
URL https://github.com/GNOME/libxml2/archive/refs/tags/v2.11.5.tar.gz
#GIT_REPOSITORY https://github.com/GNOME/libxml2
#GIT_TAG v2.11.5
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
SOURCE_DIR "${TD_CONTRIB_DIR}/xml2"
#BINARY_DIR ""
BUILD_IN_SOURCE TRUE
CONFIGURE_COMMAND ./autogen.sh && ./configure --prefix=$ENV{HOME}/.cos-local.2 --enable-shared=no --enable-static=yes --without-python
BUILD_COMMAND make -j
INSTALL_COMMAND make install && ln -s $ENV{HOME}/.cos-local.2/include/libxml2/libxml $ENV{HOME}/.cos-local.2/include/libxml
TEST_COMMAND ""
)

View File

@ -6,7 +6,10 @@ function(cat IN_FILE OUT_FILE)
file(APPEND ${OUT_FILE} "${CONTENTS}")
endfunction(cat IN_FILE OUT_FILE)
if(${TD_LINUX})
if(${BUILD_WITH_S3})
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.2/)
elseif(${BUILD_WITH_COS})
set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3")
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
@ -37,7 +40,8 @@ execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
endif(${TD_LINUX})
endif()
set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in")
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
@ -155,15 +159,24 @@ if(${BUILD_WITH_SQLITE})
cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif(${BUILD_WITH_SQLITE})
# s3
if(${BUILD_WITH_S3})
cat("${TD_SUPPORT_DIR}/ssl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
cat("${TD_SUPPORT_DIR}/xml2_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
cat("${TD_SUPPORT_DIR}/libs3_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
add_definitions(-DUSE_S3)
# cos
if(${BUILD_WITH_COS})
elseif(${BUILD_WITH_COS})
#cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
#cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
#cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
#cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
add_definitions(-DUSE_COS)
endif(${BUILD_WITH_COS})
endif()
# lucene
if(${BUILD_WITH_LUCENE})
@ -231,7 +244,6 @@ if(${BUILD_TEST})
)
endif(${TD_DARWIN})
endif(${BUILD_TEST})
# cJson
@ -248,6 +260,11 @@ target_include_directories(
)
unset(CMAKE_PROJECT_INCLUDE_BEFORE)
# xml2
#if(${BUILD_WITH_S3})
# add_subdirectory(xml2 EXCLUDE_FROM_ALL)
#endif()
# lz4
add_subdirectory(lz4/build/cmake EXCLUDE_FROM_ALL)
target_include_directories(
@ -390,14 +407,18 @@ if (${BUILD_WITH_ROCKSDB})
endif()
endif()
if(${BUILD_WITH_S3})
INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.2/include)
MESSAGE("build with s3: ${BUILD_WITH_S3}")
# cos
if(${BUILD_WITH_COS})
elseif(${BUILD_WITH_COS})
if(${TD_LINUX})
set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.1)
#ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a)
option(ENABLE_TEST "Enable the tests" OFF)
INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.1/include)
MESSAGE("$ENV{HOME}/.cos-local.1/include")
#MESSAGE("$ENV{HOME}/.cos-local.1/include")
set(CMAKE_BUILD_TYPE Release)
set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME})
@ -413,7 +434,8 @@ if(${BUILD_WITH_COS})
else()
endif(${TD_LINUX})
endif(${BUILD_WITH_COS})
endif()
# lucene
# To support build on ubuntu: sudo apt-get install libboost-all-dev

View File

@ -38,7 +38,10 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
- All the data in `tag_set` will be converted to NCHAR type automatically
- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double
- Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h)
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored
- The rule of table name
- The child table name is created automatically in a rule to guarantee its uniqueness.
- You can configure `smlAutoChildTableNameDelimiter` in taos.cfg to specify a delimiter between tag values as the table names. For example, you set `smlAutoChildTableNameDelimiter=-` in taos.cfg, when you insert `st,t0=cpu1,t1=4 c1=3 1626006833639000000`, the child table will be `cpu1-4`
- You can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored
- It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3, smlDataFormat is discarded since 3.0.3.0)
:::

View File

@ -33,7 +33,10 @@ For example:
meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
```
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
- The rule of table name
- The child table name is created automatically in a rule to guarantee its uniqueness.
- You can configure `smlAutoChildTableNameDelimiter` in taos.cfg to specify a delimiter between tag values as the table names. For example, you set `smlAutoChildTableNameDelimiter=-` in taos.cfg, when you insert `st,t0=cpu1,t1=4 c1=3 1626006833639000000`, the child table will be `cpu1-4`
- You can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored
Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_telnet/put.html) for more details.

View File

@ -48,7 +48,10 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http
:::note
- In JSON protocol, strings will be converted to NCHAR type and numeric values will be converted to double type.
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
- The rule of table name
- The child table name is created automatically in a rule to guarantee its uniqueness.
- You can configure `smlAutoChildTableNameDelimiter` in taos.cfg to specify a delimiter between tag values as the table names. For example, you set `smlAutoChildTableNameDelimiter=-` in taos.cfg, when you insert `st,t0=cpu1,t1=4 c1=3 1626006833639000000`, the child table will be `cpu1-4`
- You can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored
:::

View File

@ -356,6 +356,7 @@ You configure the following parameters when creating a consumer:
| `enable.auto.commit` | boolean | Commit automatically; true: user application doesn't need to explicitly commit; false: user application need to handle commit by itself | Default value is true |
| `auto.commit.interval.ms` | integer | Interval for automatic commits, in milliseconds |
| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages. Not applicable if subscribe to a column (tbname can be written as a column in the subquery statement during column subscriptions) (This parameter has been deprecated since version 3.2.0.0 and remains true) | default value: false
| `enable.replay` | boolean | Specify whether data replay function enabled or not |default value: false |
The method of specifying these parameters depends on the language used:
@ -371,7 +372,7 @@ tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
tmq_conf_set(conf, "group.id", "cgrpName");
tmq_conf_set(conf, "td.connect.user", "root");
tmq_conf_set(conf, "td.connect.pass", "taosdata");
tmq_conf_set(conf, "auto.offset.reset", "earliest");
tmq_conf_set(conf, "auto.offset.reset", "latest");
tmq_conf_set(conf, "msg.with.table.name", "true");
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
@ -401,7 +402,7 @@ properties.setProperty("group.id", "cgrpName");
properties.setProperty("bootstrap.servers", "127.0.0.1:6030");
properties.setProperty("td.connect.user", "root");
properties.setProperty("td.connect.pass", "taosdata");
properties.setProperty("auto.offset.reset", "earliest");
properties.setProperty("auto.offset.reset", "latest");
properties.setProperty("msg.with.table.name", "true");
properties.setProperty("value.deserializer", "com.taos.example.MetersDeserializer");
@ -421,7 +422,7 @@ public class MetersDeserializer extends ReferenceDeserializer<Meters> {
```go
conf := &tmq.ConfigMap{
"group.id": "test",
"auto.offset.reset": "earliest",
"auto.offset.reset": "latest",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
@ -441,7 +442,7 @@ consumer, err := NewConsumer(conf)
let mut dsn: Dsn = "taos://".parse()?;
dsn.set("group.id", "group1");
dsn.set("client.id", "test");
dsn.set("auto.offset.reset", "earliest");
dsn.set("auto.offset.reset", "latest");
let tmq = TmqBuilder::from_dsn(dsn)?;
@ -467,7 +468,7 @@ consumer = Consumer(
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"auto.offset.reset": "earliest",
"auto.offset.reset": "latest",
"msg.with.table.name": "true",
}
)
@ -487,7 +488,7 @@ let consumer = taos.consumer({
'group.id': 'tg2',
'td.connect.user': 'root',
'td.connect.pass': 'taosdata',
'auto.offset.reset','earliest',
'auto.offset.reset','latest',
'msg.with.table.name': 'true',
'td.connect.ip','127.0.0.1',
'td.connect.port','6030'
@ -510,7 +511,7 @@ var cfg = new ConsumerConfig
GourpId = "TDengine-TMQ-C#",
TDConnectUser = "root",
TDConnectPasswd = "taosdata",
AutoOffsetReset = "earliest"
AutoOffsetReset = "latest"
MsgWithTableName = "true",
TDConnectIp = "127.0.0.1",
TDConnectPort = "6030"
@ -526,6 +527,24 @@ var consumer = new ConsumerBuilder(cfg).Build();
A consumer group is automatically created when multiple consumers are configured with the same consumer group ID.
Data replay function description:
- Subscription adds replay function, which replays according to the time of data writing.
For example, writing three pieces of data at the following time.
```sql
2023/09/22 00:00:00.000
2023/09/22 00:00:05.000
2023/09/22 00:00:08.000
```
After subscribing to the first data for 5 seconds, the second data is returned, and after obtaining the second data for 3 seconds, the third data is returned.
- Only column subscriptions support data replay.
- Replay needs to ensure an independent timeline
- If it is a sub table subscription or a normal table subscription, only one vnode has data, ensuring a timeline.
- If subscribing to a super table, it is necessary to ensure that the DB has only one vnode, otherwise an error will be reported (because the data subscribed to on multiple vnodes is not on the same timeline).
- Super table and database subscriptions do not support replay
- Add the enable.replay parameter. True indicates that the subscription replay function is enabled, while false indicates that the subscription replay function is not enabled by default.
- Replay does not support progress saving, so when the replay parameter enable, auto commit will automatically close.
- Due to the processing time required for data replay, there is an error of tens of milliseconds in the accuracy of replay.
## Subscribe to a Topic
A single consumer can subscribe to multiple topics.

View File

@ -1093,7 +1093,7 @@ TaosConsumer consumer = new TaosConsumer<>(config);
- httpConnectTimeout: WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
- messageWaitTimeout: socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
- httpPoolSize: Maximum number of concurrent requests on the a connection。It only takes effect when using WebSocket type.
- For more information, see [Consumer Parameters](../../../develop/tmq).
- For more information, see [Consumer Parameters](../../../develop/tmq). Note that the default value of auto.offset.reset in data subscription on the TDengine server has changed since version 3.2.0.0.
#### Subscribe to consume data
@ -1193,7 +1193,7 @@ public abstract class ConsumerLoop {
config.setProperty("bootstrap.servers", "localhost:6030");
config.setProperty("td.connect.user", "root");
config.setProperty("td.connect.pass", "taosdata");
config.setProperty("auto.offset.reset", "earliest");
config.setProperty("auto.offset.reset", "latest");
config.setProperty("msg.with.table.name", "true");
config.setProperty("enable.auto.commit", "true");
config.setProperty("auto.commit.interval.ms", "1000");
@ -1276,7 +1276,7 @@ public abstract class ConsumerLoop {
config.setProperty("bootstrap.servers", "localhost:6041");
config.setProperty("td.connect.user", "root");
config.setProperty("td.connect.pass", "taosdata");
config.setProperty("auto.offset.reset", "earliest");
config.setProperty("auto.offset.reset", "latest");
config.setProperty("msg.with.table.name", "true");
config.setProperty("enable.auto.commit", "true");
config.setProperty("auto.commit.interval.ms", "1000");

View File

@ -794,7 +794,7 @@ The TDengine Go Connector supports subscription functionality with the following
```go
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
"group.id": "test",
"auto.offset.reset": "earliest",
"auto.offset.reset": "latest",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
@ -870,6 +870,7 @@ package main
import (
"fmt"
"os"
"time"
"github.com/taosdata/driver-go/v3/af"
"github.com/taosdata/driver-go/v3/af/tmq"
@ -890,19 +891,16 @@ func main() {
if err != nil {
panic(err)
}
if err != nil {
panic(err)
}
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
"group.id": "test",
"auto.offset.reset": "earliest",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"td.connect.port": "6030",
"client.id": "test_tmq_client",
"enable.auto.commit": "false",
"msg.with.table.name": "true",
"group.id": "test",
"auto.offset.reset": "latest",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"td.connect.port": "6030",
"client.id": "test_tmq_client",
"enable.auto.commit": "false",
"msg.with.table.name": "true",
})
if err != nil {
panic(err)
@ -915,10 +913,16 @@ func main() {
if err != nil {
panic(err)
}
_, err = db.Exec("insert into example_tmq.t1 values(now,1)")
if err != nil {
panic(err)
}
go func() {
for {
_, err = db.Exec("insert into example_tmq.t1 values(now,1)")
if err != nil {
panic(err)
}
time.Sleep(time.Millisecond * 100)
}
}()
for i := 0; i < 5; i++ {
ev := consumer.Poll(500)
if ev != nil {
@ -972,6 +976,7 @@ package main
import (
"database/sql"
"fmt"
"time"
"github.com/taosdata/driver-go/v3/common"
tmqcommon "github.com/taosdata/driver-go/v3/common/tmq"
@ -995,7 +1000,7 @@ func main() {
"td.connect.pass": "taosdata",
"group.id": "example",
"client.id": "example_consumer",
"auto.offset.reset": "earliest",
"auto.offset.reset": "latest",
})
if err != nil {
panic(err)
@ -1004,29 +1009,34 @@ func main() {
if err != nil {
panic(err)
}
_, err = db.Exec("create table example_ws_tmq.t_all(ts timestamp," +
"c1 bool," +
"c2 tinyint," +
"c3 smallint," +
"c4 int," +
"c5 bigint," +
"c6 tinyint unsigned," +
"c7 smallint unsigned," +
"c8 int unsigned," +
"c9 bigint unsigned," +
"c10 float," +
"c11 double," +
"c12 binary(20)," +
"c13 nchar(20)" +
")")
if err != nil {
panic(err)
}
go func() {
_, err := db.Exec("create table example_ws_tmq.t_all(ts timestamp," +
"c1 bool," +
"c2 tinyint," +
"c3 smallint," +
"c4 int," +
"c5 bigint," +
"c6 tinyint unsigned," +
"c7 smallint unsigned," +
"c8 int unsigned," +
"c9 bigint unsigned," +
"c10 float," +
"c11 double," +
"c12 binary(20)," +
"c13 nchar(20)" +
")")
if err != nil {
panic(err)
}
_, err = db.Exec("insert into example_ws_tmq.t_all values(now,true,2,3,4,5,6,7,8,9,10.123,11.123,'binary','nchar')")
if err != nil {
panic(err)
for {
_, err = db.Exec("insert into example_ws_tmq.t_all values(now,true,2,3,4,5,6,7,8,9,10.123,11.123,'binary','nchar')")
if err != nil {
panic(err)
}
time.Sleep(time.Millisecond * 100)
}
}()
for i := 0; i < 5; i++ {
ev := consumer.Poll(500)

View File

@ -442,7 +442,7 @@ The following parameters can be configured for the TMQ DSN. Only `group.id` is m
- `group.id`: Within a consumer group, load balancing is implemented by consuming messages on an at-least-once basis.
- `client.id`: Subscriber client ID.
- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default is earliest. Note: This parameter is set per consumer group.
- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default value varies depending on the TDengine version. For details, see [Data Subscription](https://docs.tdengine.com/develop/tmq/). Note: This parameter is set per consumer group.
- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential.
- `auto.commit.interval.ms`: Interval for automatic commits.

View File

@ -652,6 +652,15 @@ The charset that takes effect is UTF-8.
| Type | String |
| Default Value | None |
### smlAutoChildTableNameDelimiter
| Attribute | Description |
| ------------- | ------------------------------------------ |
| Applicable | Client only |
| Meaning | Delimiter between tags as table name|
| Type | String |
| Default Value | None |
### smlTagName
| Attribute | Description |

View File

@ -93,6 +93,8 @@ Note that tag_key1, tag_key2 are not the original order of the tags entered by t
The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t\_" is a fixed prefix that every table generated by this mapping relationship has.
:::
If you do not want to use an automatically generated table name, there are two ways to specify sub table names, the first one has a higher priority.
You can configure smlAutoChildTableNameDelimiter in taos.cfg(except for `@ # space \r \t \n`), for example, `smlAutoChildTableNameDelimiter=tname`. You can insert `st,t0=cpul,t1=4 c1=3 1626006833639000000` and the table name will be cpu1-4.
You can configure smlChildTableName in taos.cfg to specify table names, for example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
2. If the super table obtained by parsing the line protocol does not exist, this super table is created.

View File

@ -3,6 +3,7 @@ package main
import (
"fmt"
"os"
"time"
"github.com/taosdata/driver-go/v3/af"
"github.com/taosdata/driver-go/v3/af/tmq"
@ -27,15 +28,15 @@ func main() {
panic(err)
}
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
"group.id": "test",
"auto.offset.reset": "earliest",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"td.connect.port": "6030",
"client.id": "test_tmq_client",
"enable.auto.commit": "false",
"msg.with.table.name": "true",
"group.id": "test",
"auto.offset.reset": "latest",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"td.connect.port": "6030",
"client.id": "test_tmq_client",
"enable.auto.commit": "false",
"msg.with.table.name": "true",
})
if err != nil {
panic(err)
@ -48,12 +49,17 @@ func main() {
if err != nil {
panic(err)
}
_, err = db.Exec("insert into example_tmq.t1 values(now,1)")
if err != nil {
panic(err)
}
go func() {
for {
_, err = db.Exec("insert into example_tmq.t1 values(now,1)")
if err != nil {
panic(err)
}
time.Sleep(time.Microsecond * 100)
}
}()
for i := 0; i < 5; i++ {
ev := consumer.Poll(0)
ev := consumer.Poll(500)
if ev != nil {
switch e := ev.(type) {
case *tmqcommon.DataMessage:

View File

@ -66,7 +66,6 @@ public class SubscribeDemo {
properties.setProperty(TMQConstants.VALUE_DESERIALIZER,
"com.taos.example.MetersDeserializer");
properties.setProperty(TMQConstants.VALUE_DESERIALIZER_ENCODING, "UTF-8");
properties.setProperty(TMQConstants.EXPERIMENTAL_SNAPSHOT_ENABLE, "true");
// poll data
try (TaosConsumer<Meters> consumer = new TaosConsumer<>(properties)) {

View File

@ -66,7 +66,6 @@ public class WebsocketSubscribeDemo {
properties.setProperty(TMQConstants.VALUE_DESERIALIZER,
"com.taos.example.MetersDeserializer");
properties.setProperty(TMQConstants.VALUE_DESERIALIZER_ENCODING, "UTF-8");
properties.setProperty(TMQConstants.EXPERIMENTAL_SNAPSHOT_ENABLE, "true");
// poll data
try (TaosConsumer<Meters> consumer = new TaosConsumer<>(properties)) {

View File

@ -23,9 +23,6 @@ def taos_get_assignment_and_seek_demo():
consumer = Consumer(
{
"group.id": "0",
# should disable snapshot,
# otherwise it will cause invalid params error
"experimental.snapshot.enable": "false",
}
)
consumer.subscribe(["tmq_assignment_demo_topic"])

View File

@ -21,9 +21,6 @@ def taosws_get_assignment_and_seek_demo():
prepare()
consumer = taosws.Consumer(conf={
"td.connect.websocket.scheme": "ws",
# should disable snapshot,
# otherwise it will cause invalid params error
"experimental.snapshot.enable": "false",
"group.id": "0",
})
consumer.subscribe(["tmq_assignment_demo_topic"])

View File

@ -38,7 +38,10 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
- field_set 中的每个数据项都需要对自身的数据类型进行描述, 比如 1.2f32 代表 FLOAT 类型的数值 1.2, 如果不带类型后缀会被当作 DOUBLE 处理
- timestamp 支持多种时间精度。写入数据的时候需要用参数指定时间精度,支持从小时到纳秒的 6 种时间精度
- 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field后面的数据按照这个顺序如果顺序不一样需要配置参数 smlDataFormat 为 false否则数据写入按照相同顺序写入库中数据会异常。3.0.1.3 之后的版本 smlDataFormat 默认为 false从3.0.3.0开始,该配置废弃) [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
- 默认产生的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
- 子表名生成规则
- 默认产生的子表名是根据规则生成的唯一 ID 值。
- 用户也可以通过在client端的 taos.cfg 里配置 smlAutoChildTableNameDelimiter 参数来指定连接标签之间的分隔符,连接起来后作为子表名。举例如下:配置 smlAutoChildTableNameDelimiter=-, 插入数据为 st,t0=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1-4。
- 用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
:::

View File

@ -31,8 +31,11 @@ OpenTSDB 行协议同样采用一行字符串来表示一行数据。OpenTSDB
```txt
meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
```
- 子表名生成规则
- 默认产生的子表名是根据规则生成的唯一 ID 值。
- 用户也可以通过在client端的 taos.cfg 里配置 smlAutoChildTableNameDelimiter 参数来指定连接标签之间的分隔符,连接起来后作为子表名。举例如下:配置 smlAutoChildTableNameDelimiter=-, 插入数据为 st,t0=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1-4。
- 用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
- 默认生产的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 meters.current 1648432611250 11.3 tname=cpu1 location=California.LosAngeles groupid=3 则创建的表名为 cpu1注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略
参考 [OpenTSDB Telnet API 文档](http://opentsdb.net/docs/build/html/api_telnet/put.html)。
## 示例代码

View File

@ -47,7 +47,10 @@ OpenTSDB JSON 格式协议采用一个 JSON 字符串表示一行或多行数据
:::note
- 对于 JSON 格式协议TDengine 并不会自动把所有标签转成 NCHAR 类型, 字符串将将转为 NCHAR 类型, 数值将同样转换为 DOUBLE 类型。
- 默认生成的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 `"tags": { "host": "web02","dc": "lga","tname":"cpu1"}` 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略
- 子表名生成规则
- 默认产生的子表名是根据规则生成的唯一 ID 值。
- 用户也可以通过在client端的 taos.cfg 里配置 smlAutoChildTableNameDelimiter 参数来指定连接标签之间的分隔符,连接起来后作为子表名。举例如下:配置 smlAutoChildTableNameDelimiter=-, 插入数据为 st,t0=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1-4。
- 用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
:::

View File

@ -355,6 +355,7 @@ CREATE TOPIC topic_name [with meta] AS DATABASE db_name;
| `enable.auto.commit` | boolean | 是否启用消费位点自动提交true: 自动提交客户端应用无需commitfalse客户端应用需要自行commit | 默认值为 true |
| `auto.commit.interval.ms` | integer | 消费记录自动提交消费位点时间间隔,单位为毫秒 | 默认值为 5000 |
| `msg.with.table.name` | boolean | 是否允许从消息中解析表名, 不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句从3.2.0.0版本该参数废弃恒为true |默认关闭 |
| `enable.replay` | boolean | 是否开启数据回放功能 |默认关闭 |
对于不同编程语言,其设置方式如下:
@ -370,7 +371,7 @@ tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
tmq_conf_set(conf, "group.id", "cgrpName");
tmq_conf_set(conf, "td.connect.user", "root");
tmq_conf_set(conf, "td.connect.pass", "taosdata");
tmq_conf_set(conf, "auto.offset.reset", "earliest");
tmq_conf_set(conf, "auto.offset.reset", "latest");
tmq_conf_set(conf, "msg.with.table.name", "true");
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
@ -400,7 +401,7 @@ properties.setProperty("group.id", "cgrpName");
properties.setProperty("bootstrap.servers", "127.0.0.1:6030");
properties.setProperty("td.connect.user", "root");
properties.setProperty("td.connect.pass", "taosdata");
properties.setProperty("auto.offset.reset", "earliest");
properties.setProperty("auto.offset.reset", "latest");
properties.setProperty("msg.with.table.name", "true");
properties.setProperty("value.deserializer", "com.taos.example.MetersDeserializer");
@ -420,7 +421,7 @@ public class MetersDeserializer extends ReferenceDeserializer<Meters> {
```go
conf := &tmq.ConfigMap{
"group.id": "test",
"auto.offset.reset": "earliest",
"auto.offset.reset": "latest",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
@ -440,7 +441,7 @@ consumer, err := NewConsumer(conf)
let mut dsn: Dsn = "taos://".parse()?;
dsn.set("group.id", "group1");
dsn.set("client.id", "test");
dsn.set("auto.offset.reset", "earliest");
dsn.set("auto.offset.reset", "latest");
let tmq = TmqBuilder::from_dsn(dsn)?;
@ -468,7 +469,7 @@ consumer = Consumer(
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"auto.offset.reset": "earliest",
"auto.offset.reset": "latest",
"msg.with.table.name": "true",
}
)
@ -488,7 +489,7 @@ let consumer = taos.consumer({
'group.id': 'tg2',
'td.connect.user': 'root',
'td.connect.pass': 'taosdata',
'auto.offset.reset','earliest',
'auto.offset.reset','latest',
'msg.with.table.name': 'true',
'td.connect.ip','127.0.0.1',
'td.connect.port','6030'
@ -511,7 +512,7 @@ var cfg = new ConsumerConfig
GourpId = "TDengine-TMQ-C#",
TDConnectUser = "root",
TDConnectPasswd = "taosdata",
AutoOffsetReset = "earliest"
AutoOffsetReset = "latest"
MsgWithTableName = "true",
TDConnectIp = "127.0.0.1",
TDConnectPort = "6030"
@ -527,6 +528,24 @@ var consumer = new ConsumerBuilder(cfg).Build();
上述配置中包括 consumer group ID如果多个 consumer 指定的 consumer group ID 一样,则自动形成一个 consumer group共享消费进度。
数据回放功能说明:
- 订阅增加 replay 功能,按照数据写入的时间回放。
比如,如下时间写入三条数据
```sql
2023/09/22 00:00:00.000
2023/09/22 00:00:05.000
2023/09/22 00:00:08.000
```
则订阅出第一条数据 5s 后返回第二条数据,获取第二条数据 3s 后返回第三条数据。
- 仅列订阅支持数据回放
- 回放需要保证独立时间线
- 如果是子表订阅或者普通表订阅只有一个vnode上有数据保证是一个时间线
- 如果超级表订阅,则需保证该 DB 只有一个vnode否则报错因为多个vnode上订阅出的数据不在一个时间线上
- 超级表和库订阅不支持回放
- 增加 enable.replay 参数true表示开启订阅回放功能false表示不开启订阅回放功能默认不开启。
- 回放不支持进度保存,所以回放参数 enable.replay = true 时auto commit 自动关闭
- 因为数据回放本身需要处理时间所以回放的精度存在几十ms的误差
## 订阅 *topics*
一个 consumer 支持同时订阅多个 topic。

View File

@ -1095,7 +1095,8 @@ TaosConsumer consumer = new TaosConsumer<>(config);
- httpConnectTimeout: 创建连接超时参数,单位 ms默认为 5000 ms。仅在 WebSocket 连接下有效。
- messageWaitTimeout: 数据传输超时参数,单位 ms默认为 10000 ms。仅在 WebSocket 连接下有效。
- httpPoolSize: 同一个连接下最大并行请求数。仅在 WebSocket 连接下有效。
其他参数请参考:[Consumer 参数列表](../../develop/tmq#创建-consumer-以及consumer-group)
其他参数请参考:[Consumer 参数列表](../../develop/tmq#创建-consumer-以及consumer-group) 注意TDengine服务端自3.2.0.0版本开始消息订阅中的auto.offset.reset默认值发生变化。
#### 订阅消费数据
@ -1193,7 +1194,7 @@ public abstract class ConsumerLoop {
config.setProperty("bootstrap.servers", "localhost:6030");
config.setProperty("td.connect.user", "root");
config.setProperty("td.connect.pass", "taosdata");
config.setProperty("auto.offset.reset", "earliest");
config.setProperty("auto.offset.reset", "latest");
config.setProperty("msg.with.table.name", "true");
config.setProperty("enable.auto.commit", "true");
config.setProperty("auto.commit.interval.ms", "1000");
@ -1201,7 +1202,6 @@ public abstract class ConsumerLoop {
config.setProperty("client.id", "1");
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
config.setProperty("value.deserializer.encoding", "UTF-8");
config.setProperty("experimental.snapshot.enable", "true");
this.consumer = new TaosConsumer<>(config);
this.topics = Collections.singletonList("topic_speed");
@ -1279,7 +1279,7 @@ public abstract class ConsumerLoop {
config.setProperty("bootstrap.servers", "localhost:6041");
config.setProperty("td.connect.user", "root");
config.setProperty("td.connect.pass", "taosdata");
config.setProperty("auto.offset.reset", "earliest");
config.setProperty("auto.offset.reset", "latest");
config.setProperty("msg.with.table.name", "true");
config.setProperty("enable.auto.commit", "true");
config.setProperty("auto.commit.interval.ms", "1000");
@ -1287,7 +1287,6 @@ public abstract class ConsumerLoop {
config.setProperty("client.id", "1");
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
config.setProperty("value.deserializer.encoding", "UTF-8");
config.setProperty("experimental.snapshot.enable", "true");
this.consumer = new TaosConsumer<>(config);
this.topics = Collections.singletonList("topic_speed");

View File

@ -797,7 +797,7 @@ TDengine Go 连接器支持订阅功能,应用 API 如下:
```go
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
"group.id": "test",
"auto.offset.reset": "earliest",
"auto.offset.reset": "latest",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
@ -873,6 +873,7 @@ package main
import (
"fmt"
"os"
"time"
"github.com/taosdata/driver-go/v3/af"
"github.com/taosdata/driver-go/v3/af/tmq"
@ -893,19 +894,16 @@ func main() {
if err != nil {
panic(err)
}
if err != nil {
panic(err)
}
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
"group.id": "test",
"auto.offset.reset": "earliest",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"td.connect.port": "6030",
"client.id": "test_tmq_client",
"enable.auto.commit": "false",
"msg.with.table.name": "true",
"group.id": "test",
"auto.offset.reset": "latest",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"td.connect.port": "6030",
"client.id": "test_tmq_client",
"enable.auto.commit": "false",
"msg.with.table.name": "true",
})
if err != nil {
panic(err)
@ -918,10 +916,16 @@ func main() {
if err != nil {
panic(err)
}
_, err = db.Exec("insert into example_tmq.t1 values(now,1)")
if err != nil {
panic(err)
}
go func() {
for {
_, err = db.Exec("insert into example_tmq.t1 values(now,1)")
if err != nil {
panic(err)
}
time.Sleep(time.Millisecond * 100)
}
}()
for i := 0; i < 5; i++ {
ev := consumer.Poll(500)
if ev != nil {
@ -975,6 +979,7 @@ package main
import (
"database/sql"
"fmt"
"time"
"github.com/taosdata/driver-go/v3/common"
tmqcommon "github.com/taosdata/driver-go/v3/common/tmq"
@ -998,7 +1003,7 @@ func main() {
"td.connect.pass": "taosdata",
"group.id": "example",
"client.id": "example_consumer",
"auto.offset.reset": "earliest",
"auto.offset.reset": "latest",
})
if err != nil {
panic(err)
@ -1007,29 +1012,34 @@ func main() {
if err != nil {
panic(err)
}
_, err = db.Exec("create table example_ws_tmq.t_all(ts timestamp," +
"c1 bool," +
"c2 tinyint," +
"c3 smallint," +
"c4 int," +
"c5 bigint," +
"c6 tinyint unsigned," +
"c7 smallint unsigned," +
"c8 int unsigned," +
"c9 bigint unsigned," +
"c10 float," +
"c11 double," +
"c12 binary(20)," +
"c13 nchar(20)" +
")")
if err != nil {
panic(err)
}
go func() {
_, err := db.Exec("create table example_ws_tmq.t_all(ts timestamp," +
"c1 bool," +
"c2 tinyint," +
"c3 smallint," +
"c4 int," +
"c5 bigint," +
"c6 tinyint unsigned," +
"c7 smallint unsigned," +
"c8 int unsigned," +
"c9 bigint unsigned," +
"c10 float," +
"c11 double," +
"c12 binary(20)," +
"c13 nchar(20)" +
")")
if err != nil {
panic(err)
}
_, err = db.Exec("insert into example_ws_tmq.t_all values(now,true,2,3,4,5,6,7,8,9,10.123,11.123,'binary','nchar')")
if err != nil {
panic(err)
for {
_, err = db.Exec("insert into example_ws_tmq.t_all values(now,true,2,3,4,5,6,7,8,9,10.123,11.123,'binary','nchar')")
if err != nil {
panic(err)
}
time.Sleep(time.Millisecond * 100)
}
}()
for i := 0; i < 5; i++ {
ev := consumer.Poll(500)

View File

@ -447,7 +447,7 @@ consumer.unsubscribe().await;
- `group.id`: 同一个消费者组,将以至少消费一次的方式进行消息负载均衡。
- `client.id`: 可选的订阅客户端识别项。
- `auto.offset.reset`: 可选初始化订阅起点, *earliest* 为从头开始订阅, *latest* 为仅从最新数据开始订阅,默认为从头订阅。注意,此选项在同一个 `group.id` 中仅生效一次。
- `auto.offset.reset`: 可选初始化订阅起点, *earliest* 为从头开始订阅, *latest* 为仅从最新数据开始订阅,默认值根据 TDengine 版本有所不同,详细参见 [数据订阅](https://docs.taosdata.com/develop/tmq/)。注意,此选项在同一个 `group.id` 中仅生效一次。
- `enable.auto.commit`: 当设置为 `true` 时,将启用自动标记模式,当对数据一致性不敏感时,可以启用此方式。
- `auto.commit.interval.ms`: 自动标记的时间间隔。

View File

@ -648,7 +648,16 @@ charset 的有效值是 UTF-8。
| 适用范围 | 仅客户端适用 |
| 含义 | schemaless 自定义的子表名的 key |
| 类型 | 字符串 |
| 缺省值 | 无 |
| 缺省值 | 无
### smlAutoChildTableNameDelimiter
| 属性 | 说明 |
| -------- | ------------------------------- |
| 适用范围 | 仅客户端适用 |
| 含义 | schemaless tag之间的连接符连起来作为子表名 |
| 类型 | 字符串 |
| 缺省值 | 无 |
### smlTagName

View File

@ -94,8 +94,11 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
:::tip
需要注意的是,这里的 tag_key1, tag_key2 并不是用户输入的标签的原始顺序而是使用了标签名称按照字符串升序排列后的结果。所以tag_key1 并不是在行协议中输入的第一个标签。
排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。
:::tip
为了让用户可以指定生成的表名可以通过在taos.cfg里配置 smlChildTableName 参数来指定。
:::tip
如果不想用自动生成的表名,有两种指定子表名的方式,第一种优先级更高:
通过在taos.cfg里配置 smlAutoChildTableNameDelimiter 参数来指定(`@ # 空格 回车 换行 制表符`除外)。
举例如下:配置 smlAutoChildTableNameDelimiter=- 插入数据为 st,t0=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1-4。
通过在taos.cfg里配置 smlChildTableName 参数来指定。
举例如下:配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略
2. 如果解析行协议获得的超级表不存在,则会创建这个超级表(不建议手动创建超级表,不然插入数据可能异常)。

View File

@ -55,8 +55,8 @@ typedef struct SSessionKey {
} SSessionKey;
typedef struct SVersionRange {
uint64_t minVer;
uint64_t maxVer;
int64_t minVer;
int64_t maxVer;
} SVersionRange;
static inline int winKeyCmprImpl(const void* pKey1, const void* pKey2) {

View File

@ -179,6 +179,7 @@ extern char tsUdfdLdLibPath[];
// schemaless
extern char tsSmlChildTableName[];
extern char tsSmlAutoChildTableNameDelimiter[];
extern char tsSmlTagName[];
extern bool tsSmlDot2Underline;
extern char tsSmlTsDefaultName[];

View File

@ -52,9 +52,9 @@ typedef enum {
int32_t grantCheck(EGrantType grant);
#ifndef TD_GRANT_OPTIMIZE
int32_t grantAlterActiveCode(const char* old, const char* new, char* out, int8_t type);
int32_t grantAlterActiveCode(const char* old, const char* newer, char* out, int8_t type);
#else
int32_t grantAlterActiveCode(int32_t did, const char* old, const char* new, char* out, int8_t type);
int32_t grantAlterActiveCode(int32_t did, const char* old, const char* newer, char* out, int8_t type);
#endif
#ifndef GRANTS_CFG
@ -114,4 +114,4 @@ int32_t grantAlterActiveCode(int32_t did, const char* old, const char* new, char
}
#endif
#endif /*_TD_COMMON_GRANT_H_*/
#endif /*_TD_COMMON_GRANT_H_*/

View File

@ -2365,17 +2365,6 @@ int32_t tSerializeSCMCreateStreamReq(void* buf, int32_t bufLen, const SCMCreateS
int32_t tDeserializeSCMCreateStreamReq(void* buf, int32_t bufLen, SCMCreateStreamReq* pReq);
void tFreeSCMCreateStreamReq(SCMCreateStreamReq* pReq);
typedef struct {
char name[TSDB_STREAM_FNAME_LEN];
int64_t streamId;
char* sql;
char* executorMsg;
} SMVCreateStreamReq, SMSCreateStreamReq;
typedef struct {
int64_t streamId;
} SMVCreateStreamRsp, SMSCreateStreamRsp;
enum {
TOPIC_SUB_TYPE__DB = 1,
TOPIC_SUB_TYPE__TABLE,
@ -2397,16 +2386,9 @@ int32_t tSerializeSCMCreateTopicReq(void* buf, int32_t bufLen, const SCMCreateTo
int32_t tDeserializeSCMCreateTopicReq(void* buf, int32_t bufLen, SCMCreateTopicReq* pReq);
void tFreeSCMCreateTopicReq(SCMCreateTopicReq* pReq);
typedef struct {
int64_t topicId;
} SCMCreateTopicRsp;
int32_t tSerializeSCMCreateTopicRsp(void* buf, int32_t bufLen, const SCMCreateTopicRsp* pRsp);
int32_t tDeserializeSCMCreateTopicRsp(void* buf, int32_t bufLen, SCMCreateTopicRsp* pRsp);
typedef struct {
int64_t consumerId;
} SMqConsumerLostMsg, SMqConsumerRecoverMsg, SMqConsumerClearMsg;
} SMqConsumerRecoverMsg, SMqConsumerClearMsg;
typedef struct {
int64_t consumerId;
@ -2418,6 +2400,7 @@ typedef struct {
int8_t autoCommit;
int32_t autoCommitInterval;
int8_t resetOffsetCfg;
int8_t enableReplay;
} SCMSubscribeReq;
static FORCE_INLINE int32_t tSerializeSCMSubscribeReq(void** buf, const SCMSubscribeReq* pReq) {
@ -2437,6 +2420,7 @@ static FORCE_INLINE int32_t tSerializeSCMSubscribeReq(void** buf, const SCMSubsc
tlen += taosEncodeFixedI8(buf, pReq->autoCommit);
tlen += taosEncodeFixedI32(buf, pReq->autoCommitInterval);
tlen += taosEncodeFixedI8(buf, pReq->resetOffsetCfg);
tlen += taosEncodeFixedI8(buf, pReq->enableReplay);
return tlen;
}
@ -2460,71 +2444,7 @@ static FORCE_INLINE void* tDeserializeSCMSubscribeReq(void* buf, SCMSubscribeReq
buf = taosDecodeFixedI8(buf, &pReq->autoCommit);
buf = taosDecodeFixedI32(buf, &pReq->autoCommitInterval);
buf = taosDecodeFixedI8(buf, &pReq->resetOffsetCfg);
return buf;
}
typedef struct SMqSubTopic {
int32_t vgId;
int64_t topicId;
SEpSet epSet;
} SMqSubTopic;
typedef struct {
int32_t topicNum;
SMqSubTopic topics[];
} SCMSubscribeRsp;
static FORCE_INLINE int32_t tSerializeSCMSubscribeRsp(void** buf, const SCMSubscribeRsp* pRsp) {
int32_t tlen = 0;
tlen += taosEncodeFixedI32(buf, pRsp->topicNum);
for (int32_t i = 0; i < pRsp->topicNum; i++) {
tlen += taosEncodeFixedI32(buf, pRsp->topics[i].vgId);
tlen += taosEncodeFixedI64(buf, pRsp->topics[i].topicId);
tlen += taosEncodeSEpSet(buf, &pRsp->topics[i].epSet);
}
return tlen;
}
static FORCE_INLINE void* tDeserializeSCMSubscribeRsp(void* buf, SCMSubscribeRsp* pRsp) {
buf = taosDecodeFixedI32(buf, &pRsp->topicNum);
for (int32_t i = 0; i < pRsp->topicNum; i++) {
buf = taosDecodeFixedI32(buf, &pRsp->topics[i].vgId);
buf = taosDecodeFixedI64(buf, &pRsp->topics[i].topicId);
buf = taosDecodeSEpSet(buf, &pRsp->topics[i].epSet);
}
return buf;
}
typedef struct {
int64_t topicId;
int64_t consumerId;
int64_t consumerGroupId;
int64_t offset;
char* sql;
char* logicalPlan;
char* physicalPlan;
} SMVSubscribeReq;
static FORCE_INLINE int32_t tSerializeSMVSubscribeReq(void** buf, SMVSubscribeReq* pReq) {
int32_t tlen = 0;
tlen += taosEncodeFixedI64(buf, pReq->topicId);
tlen += taosEncodeFixedI64(buf, pReq->consumerId);
tlen += taosEncodeFixedI64(buf, pReq->consumerGroupId);
tlen += taosEncodeFixedI64(buf, pReq->offset);
tlen += taosEncodeString(buf, pReq->sql);
tlen += taosEncodeString(buf, pReq->logicalPlan);
tlen += taosEncodeString(buf, pReq->physicalPlan);
return tlen;
}
static FORCE_INLINE void* tDeserializeSMVSubscribeReq(void* buf, SMVSubscribeReq* pReq) {
buf = taosDecodeFixedI64(buf, &pReq->topicId);
buf = taosDecodeFixedI64(buf, &pReq->consumerId);
buf = taosDecodeFixedI64(buf, &pReq->consumerGroupId);
buf = taosDecodeFixedI64(buf, &pReq->offset);
buf = taosDecodeString(buf, &pReq->sql);
buf = taosDecodeString(buf, &pReq->logicalPlan);
buf = taosDecodeString(buf, &pReq->physicalPlan);
buf = taosDecodeFixedI8(buf, &pReq->enableReplay);
return buf;
}
@ -3620,6 +3540,7 @@ typedef struct {
int64_t consumerId;
int64_t timeout;
STqOffsetVal reqOffset;
int8_t enableReplay;
} SMqPollReq;
int32_t tSerializeSMqPollReq(void* buf, int32_t bufLen, SMqPollReq* pReq);
@ -3679,6 +3600,7 @@ typedef struct {
SArray* blockData;
SArray* blockTbName;
SArray* blockSchema;
int64_t sleepTime;
} SMqDataRsp;
int32_t tEncodeMqDataRsp(SEncoder* pEncoder, const SMqDataRsp* pRsp);

View File

@ -299,8 +299,8 @@ enum { // WARN: new msg should be appended to segment tail
TD_DEF_MSG_TYPE(TDMT_SYNC_HEARTBEAT, "sync-heartbeat", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_HEARTBEAT_REPLY, "sync-heartbeat-reply", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_LOCAL_CMD, "sync-local-cmd", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_PRE_SNAPSHOT, "sync-pre-snapshot", NULL, NULL) // no longer used
TD_DEF_MSG_TYPE(TDMT_SYNC_PRE_SNAPSHOT_REPLY, "sync-pre-snapshot-reply", NULL, NULL) // no longer used
TD_DEF_MSG_TYPE(TDMT_SYNC_PREP_SNAPSHOT, "sync-prep-snapshot", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_PREP_SNAPSHOT_REPLY, "sync-prep-snapshot-reply", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_MAX_MSG, "sync-max", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_FORCE_FOLLOWER, "sync-force-become-follower", NULL, NULL)

View File

@ -227,6 +227,7 @@ typedef struct SStoreTqReader {
bool (*tqReaderNextBlockInWal)();
bool (*tqNextBlockImpl)(); // todo remove it
SSDataBlock* (*tqGetResultBlock)();
int64_t (*tqGetResultBlockTime)();
void (*tqReaderSetColIdList)();
int32_t (*tqReaderSetQueryTableList)();

View File

@ -35,6 +35,7 @@ typedef struct SRawExprNode {
char* p;
uint32_t n;
SNode* pNode;
bool isPseudoColumn;
} SRawExprNode;
typedef struct SDataType {

View File

@ -79,6 +79,9 @@ int32_t getSessionFlushedBuff(SStreamFileState* pFileState, SSessionKey* pKey, v
int32_t deleteSessionWinStateBuffFn(void* pBuff, const void *key, size_t keyLen);
int32_t deleteSessionWinStateBuffByPosFn(SStreamFileState* pFileState, SRowBuffPos* pPos);
SRowBuffPos* createSessionWinBuff(SStreamFileState* pFileState, SSessionKey* pKey, void* p, int32_t* pVLen);
int32_t recoverSesssion(SStreamFileState* pFileState, int64_t ckId);
void sessionWinStateClear(SStreamFileState* pFileState);
void sessionWinStateCleanup(void* pBuff);

View File

@ -36,8 +36,7 @@ extern "C" {
#define SYNC_DEL_WAL_MS (1000 * 60)
#define SYNC_ADD_QUORUM_COUNT 3
#define SYNC_VNODE_LOG_RETENTION (TSDB_SYNC_LOG_BUFFER_RETENTION + 1)
#define SNAPSHOT_MAX_CLOCK_SKEW_MS 1000 * 10
#define SNAPSHOT_WAIT_MS 1000 * 30
#define SNAPSHOT_WAIT_MS 1000 * 5
#define SYNC_MAX_RETRY_BACKOFF 5
#define SYNC_LOG_REPL_RETRY_WAIT_MS 100
@ -87,6 +86,11 @@ typedef enum {
TAOS_SYNC_ROLE_ERROR = 2,
} ESyncRole;
typedef enum {
SYNC_FSM_STATE_COMPLETE = 0,
SYNC_FSM_STATE_INCOMPLETE,
} ESyncFsmState;
typedef struct SNodeInfo {
int64_t clusterId;
int32_t nodeId;
@ -95,6 +99,12 @@ typedef struct SNodeInfo {
ESyncRole nodeRole;
} SNodeInfo;
typedef struct SSyncTLV {
int32_t typ;
int32_t len;
char val[];
} SSyncTLV;
typedef struct SSyncCfg {
int32_t totalReplicaNum;
int32_t replicaNum;
@ -139,10 +149,13 @@ typedef struct SReConfigCbMeta {
typedef struct SSnapshotParam {
SyncIndex start;
SyncIndex end;
SSyncTLV* data;
} SSnapshotParam;
typedef struct SSnapshot {
void* data;
int32_t type;
SSyncTLV* data;
ESyncFsmState state;
SyncIndex lastApplyIndex;
SyncTerm lastApplyTerm;
SyncIndex lastConfigIndex;
@ -171,7 +184,7 @@ typedef struct SSyncFSM {
void (*FpBecomeLearnerCb)(const struct SSyncFSM* pFsm);
int32_t (*FpGetSnapshot)(const struct SSyncFSM* pFsm, SSnapshot* pSnapshot, void* pReaderParam, void** ppReader);
void (*FpGetSnapshotInfo)(const struct SSyncFSM* pFsm, SSnapshot* pSnapshot);
int32_t (*FpGetSnapshotInfo)(const struct SSyncFSM* pFsm, SSnapshot* pSnapshot);
int32_t (*FpSnapshotStartRead)(const struct SSyncFSM* pFsm, void* pReaderParam, void** ppReader);
void (*FpSnapshotStopRead)(const struct SSyncFSM* pFsm, void* pReader);

View File

@ -163,6 +163,7 @@ int rpcReleaseHandle(void *handle, int8_t type); // just release conn to rpc in
// These functions will not be called in the child process
int rpcSendRequestWithCtx(void *thandle, const SEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid, SRpcCtx *ctx);
int rpcSendRecv(void *shandle, SEpSet *pEpSet, SRpcMsg *pReq, SRpcMsg *pRsp);
int rpcSendRecvWithTimeout(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *pRsp, int32_t timeoutMs);
int rpcSetDefaultAddr(void *thandle, const char *ip, const char *fqdn);
void *rpcAllocHandle();
void rpcSetIpWhite(void *thandl, void *arg);

View File

@ -557,7 +557,7 @@ int32_t* taosGetErrno();
// #define TSDB_CODE_SYN_TOO_MANY_FWDINFO TAOS_DEF_ERROR_CODE(0, 0x0904) // 2.x
// #define TSDB_CODE_SYN_MISMATCHED_PROTOCOL TAOS_DEF_ERROR_CODE(0, 0x0905) // 2.x
// #define TSDB_CODE_SYN_MISMATCHED_CLUSTERID TAOS_DEF_ERROR_CODE(0, 0x0906) // 2.x
// #define TSDB_CODE_SYN_MISMATCHED_SIGNATURE TAOS_DEF_ERROR_CODE(0, 0x0907) // 2.x
#define TSDB_CODE_SYN_MISMATCHED_SIGNATURE TAOS_DEF_ERROR_CODE(0, 0x0907)
// #define TSDB_CODE_SYN_INVALID_CHECKSUM TAOS_DEF_ERROR_CODE(0, 0x0908) // 2.x
// #define TSDB_CODE_SYN_INVALID_MSGLEN TAOS_DEF_ERROR_CODE(0, 0x0909) // 2.x
// #define TSDB_CODE_SYN_INVALID_MSGTYPE TAOS_DEF_ERROR_CODE(0, 0x090A) // 2.x
@ -799,6 +799,8 @@ int32_t* taosGetErrno();
#define TSDB_CODE_TMQ_NEED_INITIALIZED TAOS_DEF_ERROR_CODE(0, 0x4010)
#define TSDB_CODE_TMQ_NO_COMMITTED TAOS_DEF_ERROR_CODE(0, 0x4011)
#define TSDB_CODE_TMQ_SAME_COMMITTED_VALUE TAOS_DEF_ERROR_CODE(0, 0x4012)
#define TSDB_CODE_TMQ_REPLAY_NEED_ONE_VGROUP TAOS_DEF_ERROR_CODE(0, 0x4013)
#define TSDB_CODE_TMQ_REPLAY_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x4014)
// stream
#define TSDB_CODE_STREAM_TASK_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x4100)

View File

@ -24,6 +24,9 @@
extern "C" {
#endif
#define HASH_FUNCTION_1 taosFastHash
#define HASH_FUNCTION_2 taosDJB2Hash
typedef struct SBloomFilter {
uint32_t hashFunctions;
uint64_t expectedEntries;
@ -37,8 +40,9 @@ typedef struct SBloomFilter {
} SBloomFilter;
SBloomFilter *tBloomFilterInit(uint64_t expectedEntries, double errorRate);
int32_t tBloomFilterPutHash(SBloomFilter *pBF, uint64_t hash1, uint64_t hash2);
int32_t tBloomFilterPut(SBloomFilter *pBF, const void *keyBuf, uint32_t len);
int32_t tBloomFilterNoContain(const SBloomFilter *pBF, const void *keyBuf, uint32_t len);
int32_t tBloomFilterNoContain(const SBloomFilter *pBF, uint64_t h1, uint64_t h2);
void tBloomFilterDestroy(SBloomFilter *pBF);
void tBloomFilterDump(const SBloomFilter *pBF);
bool tBloomFilterIsFull(const SBloomFilter *pBF);

View File

@ -151,6 +151,8 @@ void *taosCacheIterGetData(const SCacheIter *pIter, size_t *dataLen);
void *taosCacheIterGetKey(const SCacheIter *pIter, size_t *keyLen);
void taosCacheDestroyIter(SCacheIter *pIter);
void taosCacheTryExtendLifeSpan(SCacheObj *pCacheObj, void **data);
#ifdef __cplusplus
}
#endif

View File

@ -249,7 +249,7 @@ typedef enum ELogicConditionType {
#define TSDB_PASSWORD_LEN 32
#define TSDB_USET_PASSWORD_LEN 129
#define TSDB_VERSION_LEN 32
#define TSDB_LABEL_LEN 8
#define TSDB_LABEL_LEN 12
#define TSDB_JOB_STATUS_LEN 32
#define TSDB_CLUSTER_ID_LEN 40

View File

@ -26,9 +26,12 @@ typedef struct SScalableBf {
SArray *bfArray; // array of bloom filters
uint32_t growth;
uint64_t numBits;
_hash_fn_t hashFn1;
_hash_fn_t hashFn2;
} SScalableBf;
SScalableBf *tScalableBfInit(uint64_t expectedEntries, double errorRate);
int32_t tScalableBfPutNoCheck(SScalableBf *pSBf, const void *keyBuf, uint32_t len);
int32_t tScalableBfPut(SScalableBf *pSBf, const void *keyBuf, uint32_t len);
int32_t tScalableBfNoContain(const SScalableBf *pSBf, const void *keyBuf, uint32_t len);
void tScalableBfDestroy(SScalableBf *pSBf);

View File

@ -193,20 +193,46 @@ cleanup:
}
static int32_t smlParseTableName(SArray *tags, char *childTableName) {
size_t childTableNameLen = strlen(tsSmlChildTableName);
if (childTableNameLen <= 0) return TSDB_CODE_SUCCESS;
for (int i = 0; i < taosArrayGetSize(tags); i++) {
SSmlKv *tag = (SSmlKv *)taosArrayGet(tags, i);
// handle child table name
if (childTableNameLen == tag->keyLen && strncmp(tag->key, tsSmlChildTableName, tag->keyLen) == 0) {
memset(childTableName, 0, TSDB_TABLE_NAME_LEN);
strncpy(childTableName, tag->value, (tag->length < TSDB_TABLE_NAME_LEN ? tag->length : TSDB_TABLE_NAME_LEN));
if(tsSmlDot2Underline){
smlStrReplace(childTableName, strlen(childTableName));
bool autoChildName = false;
size_t delimiter = strlen(tsSmlAutoChildTableNameDelimiter);
if(delimiter > 0){
size_t totalNameLen = delimiter * (taosArrayGetSize(tags) - 1);
for (int i = 0; i < taosArrayGetSize(tags); i++) {
SSmlKv *tag = (SSmlKv *)taosArrayGet(tags, i);
totalNameLen += tag->length;
}
if(totalNameLen < TSDB_TABLE_NAME_LEN){
autoChildName = true;
}
}
if(autoChildName){
memset(childTableName, 0, TSDB_TABLE_NAME_LEN);
for (int i = 0; i < taosArrayGetSize(tags); i++) {
SSmlKv *tag = (SSmlKv *)taosArrayGet(tags, i);
strncat(childTableName, tag->value, tag->length);
if(i != taosArrayGetSize(tags) - 1){
strcat(childTableName, tsSmlAutoChildTableNameDelimiter);
}
}
if(tsSmlDot2Underline){
smlStrReplace(childTableName, strlen(childTableName));
}
}else{
size_t childTableNameLen = strlen(tsSmlChildTableName);
if (childTableNameLen <= 0) return TSDB_CODE_SUCCESS;
for (int i = 0; i < taosArrayGetSize(tags); i++) {
SSmlKv *tag = (SSmlKv *)taosArrayGet(tags, i);
// handle child table name
if (childTableNameLen == tag->keyLen && strncmp(tag->key, tsSmlChildTableName, tag->keyLen) == 0) {
memset(childTableName, 0, TSDB_TABLE_NAME_LEN);
strncpy(childTableName, tag->value, (tag->length < TSDB_TABLE_NAME_LEN ? tag->length : TSDB_TABLE_NAME_LEN));
if(tsSmlDot2Underline){
smlStrReplace(childTableName, strlen(childTableName));
}
taosArrayRemove(tags, i);
break;
}
taosArrayRemove(tags, i);
break;
}
}

View File

@ -853,7 +853,6 @@ static int64_t smlParseTSFromJSONObj(SSmlHandle *info, cJSON *root, int32_t toPr
size_t typeLen = strlen(type->valuestring);
if (typeLen == 1 && (type->valuestring[0] == 's' || type->valuestring[0] == 'S')) {
// seconds
// int8_t fromPrecision = TSDB_TIME_PRECISION_SECONDS;
if (smlFactorS[toPrecision] < INT64_MAX / tsInt64) {
return tsInt64 * smlFactorS[toPrecision];
}

View File

@ -62,7 +62,7 @@ struct tmq_conf_t {
int8_t resetOffset;
int8_t withTbName;
int8_t snapEnable;
// int32_t snapBatchSize;
int8_t replayEnable;
uint16_t port;
int32_t autoCommitInterval;
char* ip;
@ -81,6 +81,7 @@ struct tmq_t {
int8_t autoCommit;
int32_t autoCommitInterval;
int8_t resetOffsetCfg;
int8_t replayEnable;
uint64_t consumerId;
tmq_commit_cb* commitCb;
void* commitCbUserParam;
@ -89,19 +90,13 @@ struct tmq_t {
SRWLatch lock;
int8_t status;
int32_t epoch;
#if 0
int8_t epStatus;
int32_t epSkipCnt;
#endif
// poll info
int64_t pollCnt;
int64_t totalRows;
// bool needReportOffsetRows;
// timer
tmr_h hbLiveTimer;
tmr_h epTimer;
tmr_h reportTimer;
tmr_h commitTimer;
STscObj* pTscObj; // connection
SArray* clientTopics; // SArray<SMqClientTopic>
@ -149,6 +144,8 @@ typedef struct {
int32_t vgStatus;
int32_t vgSkipCnt; // here used to mark the slow vgroups
int64_t emptyBlockReceiveTs; // once empty block is received, idle for ignoreCnt then start to poll data
int64_t blockReceiveTs; // once empty block is received, idle for ignoreCnt then start to poll data
int64_t blockSleepForReplay; // once empty block is received, idle for ignoreCnt then start to poll data
bool seekUpdated; // offset is updated by seek operator, therefore, not update by vnode rsp.
SEpSet epSet;
} SMqClientVg;
@ -356,24 +353,6 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
}
}
// if (strcasecmp(key, "experimental.snapshot.batch.size") == 0) {
// conf->snapBatchSize = taosStr2int64(value);
// return TMQ_CONF_OK;
// }
// if (strcasecmp(key, "enable.heartbeat.background") == 0) {
// if (strcasecmp(value, "true") == 0) {
// conf->hbBgEnable = true;
// return TMQ_CONF_OK;
// } else if (strcasecmp(value, "false") == 0) {
// conf->hbBgEnable = false;
// return TMQ_CONF_OK;
// } else {
// tscError("the default value of enable.heartbeat.background is true, can not be seted");
// return TMQ_CONF_INVALID;
// }
// }
if (strcasecmp(key, "td.connect.ip") == 0) {
conf->ip = taosStrdup(value);
return TMQ_CONF_OK;
@ -394,6 +373,18 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
return TMQ_CONF_OK;
}
if (strcasecmp(key, "enable.replay") == 0) {
if (strcasecmp(value, "true") == 0) {
conf->replayEnable = true;
return TMQ_CONF_OK;
} else if (strcasecmp(value, "false") == 0) {
conf->replayEnable = false;
return TMQ_CONF_OK;
} else {
return TMQ_CONF_INVALID;
}
}
if (strcasecmp(key, "td.connect.db") == 0) {
return TMQ_CONF_OK;
}
@ -729,6 +720,17 @@ void tmqAssignAskEpTask(void* param, void* tmrId) {
taosMemoryFree(param);
}
void tmqReplayTask(void* param, void* tmrId) {
int64_t refId = *(int64_t*)param;
tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId);
if(tmq == NULL) goto END;
tsem_post(&tmq->rspSem);
taosReleaseRef(tmqMgmt.rsetId, refId);
END:
taosMemoryFree(param);
}
void tmqAssignDelayedCommitTask(void* param, void* tmrId) {
int64_t refId = *(int64_t*)param;
generateTimedTask(refId, TMQ_DELAYED_TASK__COMMIT);
@ -1071,6 +1073,10 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
pTmq->commitCb = conf->commitCb;
pTmq->commitCbUserParam = conf->commitCbUserParam;
pTmq->resetOffsetCfg = conf->resetOffset;
pTmq->replayEnable = conf->replayEnable;
if(conf->replayEnable){
pTmq->autoCommit = false;
}
taosInitRWLatch(&pTmq->lock);
// assign consumerId
@ -1140,6 +1146,7 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
req.autoCommit = tmq->autoCommit;
req.autoCommitInterval = tmq->autoCommitInterval;
req.resetOffsetCfg = tmq->resetOffsetCfg;
req.enableReplay = tmq->replayEnable;
for (int32_t i = 0; i < sz; i++) {
char* topic = taosArrayGetP(container, i);
@ -1415,6 +1422,8 @@ static void initClientTopicFromRsp(SMqClientTopic* pTopic, SMqSubTopicEp* pTopic
.vgStatus = pInfo ? pInfo->vgStatus : TMQ_VG_STATUS__IDLE,
.vgSkipCnt = 0,
.emptyBlockReceiveTs = 0,
.blockReceiveTs = 0,
.blockSleepForReplay = 0,
.numOfRows = pInfo ? pInfo->numOfRows : 0,
};
@ -1526,6 +1535,7 @@ void tmqBuildConsumeReqImpl(SMqPollReq* pReq, tmq_t* tmq, int64_t timeout, SMqCl
pReq->head.vgId = pVg->vgId;
pReq->useSnapshot = tmq->useSnapshot;
pReq->reqId = generateRequestId();
pReq->enableReplay = tmq->replayEnable;
}
SMqMetaRspObj* tmqBuildMetaRspFromWrapper(SMqPollRspWrapper* pWrapper) {
@ -1686,6 +1696,12 @@ static int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
continue;
}
if (tmq->replayEnable && taosGetTimestampMs() - pVg->blockReceiveTs < pVg->blockSleepForReplay) { // less than 10ms
tscTrace("consumer:0x%" PRIx64 " epoch %d, vgId:%d idle for %" PRId64 "ms before start next poll when replay", tmq->consumerId,
tmq->epoch, pVg->vgId, pVg->blockSleepForReplay);
continue;
}
int32_t vgStatus = atomic_val_compare_exchange_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE, TMQ_VG_STATUS__WAIT);
if (vgStatus == TMQ_VG_STATUS__WAIT) {
int32_t vgSkipCnt = atomic_add_fetch_32(&pVg->vgSkipCnt, 1);
@ -1807,6 +1823,15 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout) {
SMqRspObj* pRsp = tmqBuildRspFromWrapper(pollRspWrapper, pVg, &numOfRows);
tmq->totalRows += numOfRows;
pVg->emptyBlockReceiveTs = 0;
if(tmq->replayEnable){
pVg->blockReceiveTs = taosGetTimestampMs();
pVg->blockSleepForReplay = pRsp->rsp.sleepTime;
if(pVg->blockSleepForReplay > 0){
int64_t* pRefId1 = taosMemoryMalloc(sizeof(int64_t));
*pRefId1 = tmq->refId;
taosTmrStart(tmqReplayTask, pVg->blockSleepForReplay, pRefId1, tmqMgmt.timer);
}
}
tscDebug("consumer:0x%" PRIx64 " process poll rsp, vgId:%d, offset:%s, blocks:%d, rows:%" PRId64
", vg total:%" PRId64 ", total:%" PRId64 ", reqId:0x%" PRIx64,
tmq->consumerId, pVg->vgId, buf, pDataRsp->blockNum, numOfRows, pVg->numOfRows, tmq->totalRows,

View File

@ -17,12 +17,11 @@ IF (TD_STORAGE)
ADD_DEFINITIONS(-D_STORAGE)
TARGET_LINK_LIBRARIES(common PRIVATE storage)
IF(${TD_LINUX})
IF(${BUILD_WITH_COS})
add_definitions(-DUSE_COS)
ENDIF(${BUILD_WITH_COS})
ENDIF(${TD_LINUX})
IF(${BUILD_WITH_S3})
add_definitions(-DUSE_S3)
ELSEIF(${BUILD_WITH_COS})
add_definitions(-DUSE_COS)
ENDIF()
ENDIF ()

View File

@ -15,12 +15,12 @@
#define _DEFAULT_SOURCE
#include "tglobal.h"
#include "defines.h"
#include "os.h"
#include "tconfig.h"
#include "tgrant.h"
#include "tlog.h"
#include "tmisce.h"
#include "defines.h"
#if defined(CUS_NAME) || defined(CUS_PROMPT) || defined(CUS_EMAIL)
#include "cus_name.h"
@ -118,7 +118,8 @@ bool tsSmlDot2Underline = true;
char tsSmlTsDefaultName[TSDB_COL_NAME_LEN] = "_ts";
char tsSmlTagName[TSDB_COL_NAME_LEN] = "_tag_null";
char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; // user defined child table name can be specified in tag value.
// If set to empty system will generate table name using MD5 hash.
char tsSmlAutoChildTableNameDelimiter[TSDB_TABLE_NAME_LEN] = "";
// If set to empty system will generate table name using MD5 hash.
// true means that the name and order of cols in each line are the same(only for influx protocol)
// bool tsSmlDataFormat = false;
// int32_t tsSmlBatchSize = 10000;
@ -221,7 +222,7 @@ float tsFPrecision = 1E-8; // float column precision
double tsDPrecision = 1E-16; // double column precision
uint32_t tsMaxRange = 500; // max quantization intervals
uint32_t tsCurRange = 100; // current quantization intervals
bool tsIfAdtFse = false; // ADT-FSE algorithom or original huffman algorithom
bool tsIfAdtFse = false; // ADT-FSE algorithom or original huffman algorithom
char tsCompressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR
// udf
@ -266,6 +267,9 @@ char tsS3BucketName[TSDB_FQDN_LEN] = "<bucketname>";
char tsS3AppId[TSDB_FQDN_LEN] = "<appid>";
int8_t tsS3Enabled = false;
int8_t tsS3Https = true;
char tsS3Hostname[TSDB_FQDN_LEN] = "<hostname>";
int32_t tsS3BlockSize = 4096; // number of tsdb pages
int32_t tsS3BlockCacheSize = 16; // number of blocks
@ -307,6 +311,14 @@ int32_t taosSetS3Cfg(SConfig *pCfg) {
tstrncpy(tsS3AccessKeySecret, colon + 1, TSDB_FQDN_LEN);
tstrncpy(tsS3Endpoint, cfgGetItem(pCfg, "s3Endpoint")->str, TSDB_FQDN_LEN);
tstrncpy(tsS3BucketName, cfgGetItem(pCfg, "s3BucketName")->str, TSDB_FQDN_LEN);
char *proto = strstr(tsS3Endpoint, "https://");
if (!proto) {
tsS3Https = false;
tstrncpy(tsS3Hostname, tsS3Endpoint + 7, TSDB_FQDN_LEN);
} else {
tstrncpy(tsS3Hostname, tsS3Endpoint + 8, TSDB_FQDN_LEN);
}
char *cos = strstr(tsS3Endpoint, "cos.");
if (cos) {
char *appid = strrchr(tsS3BucketName, '-');
@ -318,7 +330,7 @@ int32_t taosSetS3Cfg(SConfig *pCfg) {
}
}
if (tsS3BucketName[0] != '<' && tsDiskCfgNum > 1) {
#ifdef USE_COS
#if defined(USE_COS) || defined(USE_S3)
tsS3Enabled = true;
#endif
}
@ -439,7 +451,8 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "queryNodeChunkSize", tsQueryNodeChunkSize, 1024, 128 * 1024, CFG_SCOPE_CLIENT) != 0) return -1;
if (cfgAddBool(pCfg, "queryUseNodeAllocator", tsQueryUseNodeAllocator, CFG_SCOPE_CLIENT) != 0) return -1;
if (cfgAddBool(pCfg, "keepColumnName", tsKeepColumnName, CFG_SCOPE_CLIENT) != 0) return -1;
if (cfgAddString(pCfg, "smlChildTableName", "", CFG_SCOPE_CLIENT) != 0) return -1;
if (cfgAddString(pCfg, "smlChildTableName", tsSmlChildTableName, CFG_SCOPE_CLIENT) != 0) return -1;
if (cfgAddString(pCfg, "smlAutoChildTableNameDelimiter", tsSmlAutoChildTableNameDelimiter, CFG_SCOPE_CLIENT) != 0) return -1;
if (cfgAddString(pCfg, "smlTagName", tsSmlTagName, CFG_SCOPE_CLIENT) != 0) return -1;
if (cfgAddString(pCfg, "smlTsDefaultName", tsSmlTsDefaultName, CFG_SCOPE_CLIENT) != 0) return -1;
if (cfgAddBool(pCfg, "smlDot2Underline", tsSmlDot2Underline, CFG_SCOPE_CLIENT) != 0) return -1;
@ -931,6 +944,7 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
return -1;
}
tstrncpy(tsSmlAutoChildTableNameDelimiter, cfgGetItem(pCfg, "smlAutoChildTableNameDelimiter")->str, TSDB_TABLE_NAME_LEN);
tstrncpy(tsSmlChildTableName, cfgGetItem(pCfg, "smlChildTableName")->str, TSDB_TABLE_NAME_LEN);
tstrncpy(tsSmlTagName, cfgGetItem(pCfg, "smlTagName")->str, TSDB_COL_NAME_LEN);
tstrncpy(tsSmlTsDefaultName, cfgGetItem(pCfg, "smlTsDefaultName")->str, TSDB_COL_NAME_LEN);
@ -1088,7 +1102,6 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsIfAdtFse = cfgGetItem(pCfg, "IfAdtFse")->bval;
tstrncpy(tsCompressor, cfgGetItem(pCfg, "Compressor")->str, sizeof(tsCompressor));
tsDisableStream = cfgGetItem(pCfg, "disableStream")->bval;
tsStreamBufferSize = cfgGetItem(pCfg, "streamBufferSize")->i64;
@ -1396,6 +1409,8 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) {
cfgSetItem(pCfg, "secondEp", tsSecond, pSecondpItem->stype);
} else if (strcasecmp("smlChildTableName", name) == 0) {
tstrncpy(tsSmlChildTableName, cfgGetItem(pCfg, "smlChildTableName")->str, TSDB_TABLE_NAME_LEN);
} else if (strcasecmp("smlAutoChildTableNameDelimiter", name) == 0) {
tstrncpy(tsSmlAutoChildTableNameDelimiter, cfgGetItem(pCfg, "smlAutoChildTableNameDelimiter")->str, TSDB_TABLE_NAME_LEN);
} else if (strcasecmp("smlTagName", name) == 0) {
tstrncpy(tsSmlTagName, cfgGetItem(pCfg, "smlTagName")->str, TSDB_COL_NAME_LEN);
// } else if (strcasecmp("smlDataFormat", name) == 0) {
@ -1675,14 +1690,14 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
}
return;
}
/* cannot alter s3BlockSize
if (strcasecmp(option, "s3BlockSize") == 0) {
int32_t newS3BlockSize = atoi(value);
uInfo("s3BlockSize set from %d to %d", tsS3BlockSize, newS3BlockSize);
tsS3BlockSize = newS3BlockSize;
return;
}
*/
if (strcasecmp(option, "s3BlockCacheSize") == 0) {
int32_t newS3BlockCacheSize = atoi(value);
uInfo("s3BlockCacheSize set from %d to %d", tsS3BlockCacheSize, newS3BlockCacheSize);

View File

@ -4552,31 +4552,6 @@ void tFreeSCMCreateTopicReq(SCMCreateTopicReq *pReq) {
}
}
int32_t tSerializeSCMCreateTopicRsp(void *buf, int32_t bufLen, const SCMCreateTopicRsp *pRsp) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeI64(&encoder, pRsp->topicId) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
tEncoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSCMCreateTopicRsp(void *buf, int32_t bufLen, SCMCreateTopicRsp *pRsp) {
SDecoder decoder = {0};
tDecoderInit(&decoder, buf, bufLen);
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeI64(&decoder, &pRsp->topicId) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
int32_t tSerializeSConnectReq(void *buf, int32_t bufLen, SConnectReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@ -6219,6 +6194,7 @@ int32_t tSerializeSMqPollReq(void *buf, int32_t bufLen, SMqPollReq *pReq) {
if (tEncodeI64(&encoder, pReq->consumerId) < 0) return -1;
if (tEncodeI64(&encoder, pReq->timeout) < 0) return -1;
if (tSerializeSTqOffsetVal(&encoder, &pReq->reqOffset) < 0) return -1;
if (tEncodeI8(&encoder, pReq->enableReplay) < 0) return -1;
tEndEncode(&encoder);
@ -6255,6 +6231,10 @@ int32_t tDeserializeSMqPollReq(void *buf, int32_t bufLen, SMqPollReq *pReq) {
if (tDecodeI64(&decoder, &pReq->timeout) < 0) return -1;
if (tDerializeSTqOffsetVal(&decoder, &pReq->reqOffset) < 0) return -1;
if (!tDecodeIsEnd(&decoder)) {
if (tDecodeI8(&decoder, &pReq->enableReplay) < 0) return -1;
}
tEndDecode(&decoder);
tDecoderClear(&decoder);
@ -8057,6 +8037,7 @@ int32_t tEncodeMqDataRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) {
}
}
}
if (tEncodeI64(pEncoder, pRsp->sleepTime) < 0) return -1;
return 0;
}
@ -8102,6 +8083,8 @@ int32_t tDecodeMqDataRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) {
}
}
}
if (tDecodeI64(pDecoder, &pRsp->sleepTime) < 0) return -1;
return 0;
}

View File

@ -665,6 +665,9 @@ int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* durati
if (*unit == 'n' || *unit == 'y') {
return 0;
}
if(isdigit(*unit)) {
*unit = getPrecisionUnit(timePrecision);
}
return getDuration(*duration, *unit, duration, timePrecision);
}

View File

@ -160,7 +160,7 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) {
SEpSet epSet = {0};
dmGetMnodeEpSet(pMgmt->pData, &epSet);
rpcSendRecv(pMgmt->msgCb.clientRpc, &epSet, &rpcMsg, &rpcRsp);
rpcSendRecvWithTimeout(pMgmt->msgCb.clientRpc, &epSet, &rpcMsg, &rpcRsp, 5000);
if (rpcRsp.code != 0) {
dmRotateMnodeEpSet(pMgmt->pData);
char tbuf[256];

View File

@ -238,7 +238,7 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_SYNC_APPEND_ENTRIES_BATCH, mmPutMsgToSyncQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SYNC_APPEND_ENTRIES_REPLY, mmPutMsgToSyncQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SYNC_SNAPSHOT_SEND, mmPutMsgToSyncQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SYNC_PRE_SNAPSHOT, mmPutMsgToSyncQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SYNC_PREP_SNAPSHOT, mmPutMsgToSyncQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SYNC_FORCE_FOLLOWER_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
@ -246,7 +246,7 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_SYNC_HEARTBEAT, mmPutMsgToSyncRdQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SYNC_HEARTBEAT_REPLY, mmPutMsgToSyncRdQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SYNC_SNAPSHOT_RSP, mmPutMsgToSyncRdQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SYNC_PRE_SNAPSHOT_REPLY, mmPutMsgToSyncRdQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SYNC_PREP_SNAPSHOT_REPLY, mmPutMsgToSyncRdQueue, 1) == NULL) goto _OVER;
code = 0;

View File

@ -32,8 +32,13 @@ static int32_t mmRequire(const SMgmtInputOpt *pInput, bool *required) {
if (!option.deploy) {
*required = mmDeployRequired(pInput);
if (*required) {
dInfo("deploy mnode required. dnodeId:%d<=0, clusterId:%" PRId64 "<=0, localEp:%s==firstEp",
pInput->pData->dnodeId, pInput->pData->clusterId, tsLocalEp);
}
} else {
*required = true;
dInfo("deploy mnode required. option deploy:%d", option.deploy);
}
return 0;

View File

@ -56,6 +56,7 @@ typedef struct {
int32_t vgVersion;
int32_t refCount;
int8_t dropped;
int8_t failed;
int8_t disable;
int32_t diskPrimary;
int32_t toVgId;

View File

@ -30,9 +30,11 @@ void vmGetVnodeLoads(SVnodeMgmt *pMgmt, SMonVloadInfo *pInfo, bool isReset) {
if (ppVnode == NULL || *ppVnode == NULL) continue;
SVnodeObj *pVnode = *ppVnode;
SVnodeLoad vload = {0};
vnodeGetLoad(pVnode->pImpl, &vload);
if (isReset) vnodeResetLoad(pVnode->pImpl, &vload);
SVnodeLoad vload = {.vgId = pVnode->vgId};
if (!pVnode->failed) {
vnodeGetLoad(pVnode->pImpl, &vload);
if (isReset) vnodeResetLoad(pVnode->pImpl, &vload);
}
taosArrayPush(pInfo->pVloads, &vload);
pIter = taosHashIterate(pMgmt->hash, pIter);
}
@ -52,9 +54,11 @@ void vmGetVnodeLoadsLite(SVnodeMgmt *pMgmt, SMonVloadInfo *pInfo) {
if (ppVnode == NULL || *ppVnode == NULL) continue;
SVnodeObj *pVnode = *ppVnode;
SVnodeLoadLite vload = {0};
if (vnodeGetLoadLite(pVnode->pImpl, &vload) == 0) {
taosArrayPush(pInfo->pVloads, &vload);
if (!pVnode->failed) {
SVnodeLoadLite vload = {0};
if (vnodeGetLoadLite(pVnode->pImpl, &vload) == 0) {
taosArrayPush(pInfo->pVloads, &vload);
}
}
pIter = taosHashIterate(pMgmt->hash, pIter);
}
@ -278,7 +282,7 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
vmGenerateWrapperCfg(pMgmt, &req, &wrapperCfg);
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, req.vgId);
if (pVnode != NULL) {
if (pVnode != NULL && !pVnode->failed) {
dError("vgId:%d, already exist", req.vgId);
tFreeSCreateVnodeReq(&req);
vmReleaseVnode(pMgmt, pVnode);
@ -287,7 +291,9 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
return 0;
}
wrapperCfg.diskPrimary = vmAllocPrimaryDisk(pMgmt, vnodeCfg.vgId);
ASSERT(pVnode == NULL || pVnode->failed);
wrapperCfg.diskPrimary = pVnode ? pVnode->diskPrimary : vmAllocPrimaryDisk(pMgmt, vnodeCfg.vgId);
int32_t diskPrimary = wrapperCfg.diskPrimary;
snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, vnodeCfg.vgId);
@ -299,7 +305,7 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
goto _OVER;
}
SVnode *pImpl = vnodeOpen(path, diskPrimary, pMgmt->pTfs, pMgmt->msgCb);
SVnode *pImpl = vnodeOpen(path, diskPrimary, pMgmt->pTfs, pMgmt->msgCb, true);
if (pImpl == NULL) {
dError("vgId:%d, failed to open vnode since %s", req.vgId, terrstr());
code = terrno;
@ -364,9 +370,10 @@ int32_t vmProcessAlterVnodeTypeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
TMSG_INFO(pMsg->msgType));
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, req.vgId);
if (pVnode == NULL) {
if (pVnode == NULL || pVnode->failed) {
dError("vgId:%d, failed to alter vnode type since %s", req.vgId, terrstr());
terrno = TSDB_CODE_VND_NOT_EXIST;
if (pVnode) vmReleaseVnode(pMgmt, pVnode);
return -1;
}
@ -445,7 +452,7 @@ int32_t vmProcessAlterVnodeTypeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
}
dInfo("vgId:%d, begin to open vnode", vgId);
SVnode *pImpl = vnodeOpen(path, diskPrimary, pMgmt->pTfs, pMgmt->msgCb);
SVnode *pImpl = vnodeOpen(path, diskPrimary, pMgmt->pTfs, pMgmt->msgCb, false);
if (pImpl == NULL) {
dError("vgId:%d, failed to open vnode at %s since %s", vgId, path, terrstr());
return -1;
@ -481,9 +488,10 @@ int32_t vmProcessCheckLearnCatchupReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
req.vgId, TMSG_INFO(pMsg->msgType));
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, req.vgId);
if (pVnode == NULL) {
if (pVnode == NULL || pVnode->failed) {
dError("vgId:%d, failed to alter vnode type since %s", req.vgId, terrstr());
terrno = TSDB_CODE_VND_NOT_EXIST;
if (pVnode) vmReleaseVnode(pMgmt, pVnode);
return -1;
}
@ -523,9 +531,10 @@ int32_t vmProcessDisableVnodeWriteReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
dInfo("vgId:%d, vnode write disable:%d", req.vgId, req.disable);
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, req.vgId);
if (pVnode == NULL) {
if (pVnode == NULL || pVnode->failed) {
dError("vgId:%d, failed to disable write since %s", req.vgId, terrstr());
terrno = TSDB_CODE_VND_NOT_EXIST;
if (pVnode) vmReleaseVnode(pMgmt, pVnode);
return -1;
}
@ -555,9 +564,10 @@ int32_t vmProcessAlterHashRangeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
dInfo("vgId:%d, start to alter vnode hashrange:[%u, %u], dstVgId:%d", req.srcVgId, req.hashBegin, req.hashEnd,
req.dstVgId);
pVnode = vmAcquireVnode(pMgmt, srcVgId);
if (pVnode == NULL) {
if (pVnode == NULL || pVnode->failed) {
dError("vgId:%d, failed to alter hashrange since %s", srcVgId, terrstr());
terrno = TSDB_CODE_VND_NOT_EXIST;
if (pVnode) vmReleaseVnode(pMgmt, pVnode);
return -1;
}
@ -592,7 +602,7 @@ int32_t vmProcessAlterHashRangeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
}
dInfo("vgId:%d, open vnode", dstVgId);
SVnode *pImpl = vnodeOpen(dstPath, diskPrimary, pMgmt->pTfs, pMgmt->msgCb);
SVnode *pImpl = vnodeOpen(dstPath, diskPrimary, pMgmt->pTfs, pMgmt->msgCb, false);
if (pImpl == NULL) {
dError("vgId:%d, failed to open vnode at %s since %s", dstVgId, dstPath, terrstr());
@ -669,9 +679,10 @@ int32_t vmProcessAlterVnodeReplicaReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
}
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, vgId);
if (pVnode == NULL) {
if (pVnode == NULL || pVnode->failed) {
dError("vgId:%d, failed to alter replica since %s", vgId, terrstr());
terrno = TSDB_CODE_VND_NOT_EXIST;
if (pVnode) vmReleaseVnode(pMgmt, pVnode);
return -1;
}
@ -696,7 +707,7 @@ int32_t vmProcessAlterVnodeReplicaReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
}
dInfo("vgId:%d, begin to open vnode", vgId);
SVnode *pImpl = vnodeOpen(path, diskPrimary, pMgmt->pTfs, pMgmt->msgCb);
SVnode *pImpl = vnodeOpen(path, diskPrimary, pMgmt->pTfs, pMgmt->msgCb, false);
if (pImpl == NULL) {
dError("vgId:%d, failed to open vnode at %s since %s", vgId, path, terrstr());
return -1;
@ -848,14 +859,14 @@ SArray *vmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_SYNC_APPEND_ENTRIES_BATCH, vmPutMsgToSyncQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SYNC_APPEND_ENTRIES_REPLY, vmPutMsgToSyncQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SYNC_SNAPSHOT_SEND, vmPutMsgToSyncQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SYNC_PRE_SNAPSHOT, vmPutMsgToSyncQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SYNC_PREP_SNAPSHOT, vmPutMsgToSyncQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SYNC_FORCE_FOLLOWER, vmPutMsgToSyncQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SYNC_TIMEOUT, vmPutMsgToSyncRdQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SYNC_HEARTBEAT, vmPutMsgToSyncRdQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SYNC_HEARTBEAT_REPLY, vmPutMsgToSyncRdQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SYNC_SNAPSHOT_RSP, vmPutMsgToSyncRdQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SYNC_PRE_SNAPSHOT_REPLY, vmPutMsgToSyncRdQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SYNC_PREP_SNAPSHOT_REPLY, vmPutMsgToSyncRdQueue, 0) == NULL) goto _OVER;
code = 0;

View File

@ -112,6 +112,7 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) {
pVnode->diskPrimary = pCfg->diskPrimary;
pVnode->refCount = 0;
pVnode->dropped = 0;
pVnode->failed = 0;
pVnode->path = taosStrdup(pCfg->path);
pVnode->pImpl = pImpl;
@ -121,11 +122,15 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) {
return -1;
}
if (vmAllocQueue(pMgmt, pVnode) != 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
taosMemoryFree(pVnode->path);
taosMemoryFree(pVnode);
return -1;
if (pImpl) {
if (vmAllocQueue(pMgmt, pVnode) != 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
taosMemoryFree(pVnode->path);
taosMemoryFree(pVnode);
return -1;
}
} else {
pVnode->failed = 1;
}
taosThreadRwlockWrlock(&pMgmt->lock);
@ -267,12 +272,14 @@ static void *vmOpenVnodeInThread(void *param) {
int32_t diskPrimary = pCfg->diskPrimary;
snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, pCfg->vgId);
SVnode *pImpl = vnodeOpen(path, diskPrimary, pMgmt->pTfs, pMgmt->msgCb);
SVnode *pImpl = vnodeOpen(path, diskPrimary, pMgmt->pTfs, pMgmt->msgCb, false);
if (pImpl == NULL) {
dError("vgId:%d, failed to open vnode by thread:%d since %s", pCfg->vgId, pThread->threadIndex, terrstr());
pThread->failed++;
continue;
if (terrno != TSDB_CODE_NEED_RETRY) {
pThread->failed++;
continue;
}
}
if (vmOpenVnode(pMgmt, pCfg, pImpl) != 0) {
@ -379,6 +386,7 @@ static void *vmCloseVnodeInThread(void *param) {
for (int32_t v = 0; v < pThread->vnodeNum; ++v) {
SVnodeObj *pVnode = pThread->ppVnodes[v];
if (pVnode->failed) continue;
char stepDesc[TSDB_STEP_DESC_LEN] = {0};
snprintf(stepDesc, TSDB_STEP_DESC_LEN, "vgId:%d, start to close, %d of %d have been closed", pVnode->vgId,
@ -473,7 +481,9 @@ static void vmCheckSyncTimeout(SVnodeMgmt *pMgmt) {
if (ppVnodes != NULL) {
for (int32_t i = 0; i < numOfVnodes; ++i) {
SVnodeObj *pVnode = ppVnodes[i];
vnodeSyncCheckTimeout(pVnode->pImpl);
if (!pVnode->failed) {
vnodeSyncCheckTimeout(pVnode->pImpl);
}
vmReleaseVnode(pMgmt, pVnode);
}
taosMemoryFree(ppVnodes);
@ -605,6 +615,12 @@ static void *vmRestoreVnodeInThread(void *param) {
for (int32_t v = 0; v < pThread->vnodeNum; ++v) {
SVnodeObj *pVnode = pThread->ppVnodes[v];
if (pVnode->failed) {
dError("vgId:%d, skip restoring vnode in failure mode.", pVnode->vgId);
continue;
}
ASSERT(pVnode->pImpl);
char stepDesc[TSDB_STEP_DESC_LEN] = {0};
snprintf(stepDesc, TSDB_STEP_DESC_LEN, "vgId:%d, start to restore, %d of %d have been restored", pVnode->vgId,

View File

@ -187,9 +187,9 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp
pHead->vgId = ntohl(pHead->vgId);
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, pHead->vgId);
if (pVnode == NULL) {
dGWarn("vgId:%d, msg:%p failed to put into vnode queue since %s, type:%s qtype:%d contLen:%d", pHead->vgId, pMsg,
terrstr(), TMSG_INFO(pMsg->msgType), qtype, pHead->contLen);
if (pVnode == NULL || pVnode->failed) {
dGDebug("vgId:%d, msg:%p failed to put into vnode queue since %s, type:%s qtype:%d contLen:%d", pHead->vgId, pMsg,
terrstr(), TMSG_INFO(pMsg->msgType), qtype, pHead->contLen);
terrno = (terrno != 0) ? terrno : -1;
return terrno;
}
@ -316,7 +316,7 @@ int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
int32_t vmGetQueueSize(SVnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype) {
int32_t size = -1;
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, vgId);
if (pVnode != NULL) {
if (pVnode != NULL && !pVnode->failed) {
switch (qtype) {
case WRITE_QUEUE:
size = taosQueueItemSize(pVnode->pWriteW.queue);
@ -339,8 +339,8 @@ int32_t vmGetQueueSize(SVnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype) {
default:
break;
}
vmReleaseVnode(pMgmt, pVnode);
}
if (pVnode) vmReleaseVnode(pMgmt, pVnode);
if (size < 0) {
dTrace("vgId:%d, can't get size from queue since %s, qtype:%d", vgId, terrstr(), qtype);
size = 0;

View File

@ -115,7 +115,9 @@ int32_t dmRunDnode(SDnode *pDnode);
int32_t dmInitServer(SDnode *pDnode);
void dmCleanupServer(SDnode *pDnode);
int32_t dmInitClient(SDnode *pDnode);
int32_t dmInitStatusClient(SDnode *pDnode);
void dmCleanupClient(SDnode *pDnode);
void dmCleanupStatusClient(SDnode *pDnode);
SMsgCb dmGetMsgcb(SDnode *pDnode);
int32_t dmInitMsgHandle(SDnode *pDnode);
int32_t dmProcessNodeMsg(SMgmtWrapper *pWrapper, SRpcMsg *pMsg);

View File

@ -20,8 +20,8 @@
#include "qworker.h"
#include "tstream.h"
#ifdef TD_TSZ
#include "tglobal.h"
#include "tcompression.h"
#include "tglobal.h"
#endif
int32_t dmInitDnode(SDnode *pDnode) {
@ -66,7 +66,7 @@ int32_t dmInitDnode(SDnode *pDnode) {
goto _OVER;
}
if(dmInitModule(pDnode) != 0) {
if (dmInitModule(pDnode) != 0) {
goto _OVER;
}
@ -91,6 +91,7 @@ void dmCleanupDnode(SDnode *pDnode) {
if (pDnode == NULL) return;
dmCleanupClient(pDnode);
dmCleanupStatusClient(pDnode);
dmCleanupServer(pDnode);
dmClearVars(pDnode);
rpcCleanup();

View File

@ -358,6 +358,50 @@ int32_t dmInitClient(SDnode *pDnode) {
dDebug("dnode rpc client is initialized");
return 0;
}
int32_t dmInitStatusClient(SDnode *pDnode) {
SDnodeTrans *pTrans = &pDnode->trans;
SRpcInit rpcInit = {0};
rpcInit.label = "DND-STATUS";
rpcInit.numOfThreads = 1;
rpcInit.cfp = (RpcCfp)dmProcessRpcMsg;
rpcInit.sessions = 1024;
rpcInit.connType = TAOS_CONN_CLIENT;
rpcInit.user = TSDB_DEFAULT_USER;
rpcInit.idleTime = tsShellActivityTimer * 1000;
rpcInit.parent = pDnode;
rpcInit.rfp = rpcRfp;
rpcInit.compressSize = tsCompressMsgSize;
rpcInit.retryMinInterval = tsRedirectPeriod;
rpcInit.retryStepFactor = tsRedirectFactor;
rpcInit.retryMaxInterval = tsRedirectMaxPeriod;
rpcInit.retryMaxTimeout = tsMaxRetryWaitTime;
rpcInit.failFastInterval = 5000; // interval threshold(ms)
rpcInit.failFastThreshold = 3; // failed threshold
rpcInit.ffp = dmFailFastFp;
int32_t connLimitNum = 100;
connLimitNum = TMAX(connLimitNum, 10);
connLimitNum = TMIN(connLimitNum, 500);
rpcInit.connLimitNum = connLimitNum;
rpcInit.connLimitLock = 1;
rpcInit.supportBatch = 1;
rpcInit.batchSize = 8 * 1024;
rpcInit.timeToGetConn = tsTimeToGetAvailableConn;
taosVersionStrToInt(version, &(rpcInit.compatibilityVer));
// pTrans->statusClientRpc = rpcOpen(&rpcInit);
// if (pTrans->statusClientRpc == NULL) {
// dError("failed to init dnode rpc status client");
// return -1;
// }
dDebug("dnode rpc status client is initialized");
return 0;
}
void dmCleanupClient(SDnode *pDnode) {
SDnodeTrans *pTrans = &pDnode->trans;
@ -367,6 +411,14 @@ void dmCleanupClient(SDnode *pDnode) {
dDebug("dnode rpc client is closed");
}
}
void dmCleanupStatusClient(SDnode *pDnode) {
SDnodeTrans *pTrans = &pDnode->trans;
// if (pTrans->statusClientRpc) {
// rpcClose(pTrans->statusClientRpc);
// pTrans->statusClientRpc = NULL;
// dDebug("dnode rpc status client is closed");
// }
}
int32_t dmInitServer(SDnode *pDnode) {
SDnodeTrans *pTrans = &pDnode->trans;

View File

@ -491,32 +491,6 @@ typedef struct {
char filterTb[TSDB_TABLE_NAME_LEN];
} SShowObj;
typedef struct {
int64_t id;
int8_t type;
int8_t replica;
int16_t numOfColumns;
int32_t rowSize;
int32_t numOfRows;
int32_t numOfReads;
int32_t payloadLen;
void* pIter;
SMnode* pMnode;
char db[TSDB_DB_FNAME_LEN];
int16_t offset[TSDB_MAX_COLUMNS];
int32_t bytes[TSDB_MAX_COLUMNS];
char payload[];
} SSysTableRetrieveObj;
typedef struct {
char key[TSDB_PARTITION_KEY_LEN];
int64_t dbUid;
int64_t offset;
} SMqOffsetObj;
int32_t tEncodeSMqOffsetObj(void** buf, const SMqOffsetObj* pOffset);
void* tDecodeSMqOffsetObj(void* buf, SMqOffsetObj* pOffset);
typedef struct {
char name[TSDB_TOPIC_FNAME_LEN];
char db[TSDB_DB_FNAME_LEN];

View File

@ -18,6 +18,7 @@
#include "mndPrivilege.h"
#include "mndVgroup.h"
#include "mndShow.h"
#include "mndDb.h"
#include "mndSubscribe.h"
#include "mndTopic.h"
#include "mndTrans.h"
@ -124,30 +125,55 @@ void mndRebCntDec() {
}
}
static int32_t validateTopics(STrans *pTrans, const SArray *pTopicList, SMnode *pMnode, const char *pUser) {
int32_t numOfTopics = taosArrayGetSize(pTopicList);
static int32_t validateTopics(STrans *pTrans, const SArray *pTopicList, SMnode *pMnode, const char *pUser, bool enableReplay) {
SMqTopicObj *pTopic = NULL;
int32_t code = 0;
int32_t numOfTopics = taosArrayGetSize(pTopicList);
for (int32_t i = 0; i < numOfTopics; i++) {
char *pOneTopic = taosArrayGetP(pTopicList, i);
SMqTopicObj *pTopic = mndAcquireTopic(pMnode, pOneTopic);
pTopic = mndAcquireTopic(pMnode, pOneTopic);
if (pTopic == NULL) { // terrno has been set by callee function
return -1;
code = -1;
goto FAILED;
}
if (mndCheckTopicPrivilege(pMnode, pUser, MND_OPER_SUBSCRIBE, pTopic) != 0) {
mndReleaseTopic(pMnode, pTopic);
return -1;
code = -1;
goto FAILED;
}
if(enableReplay){
if(pTopic->subType != TOPIC_SUB_TYPE__COLUMN){
code = TSDB_CODE_TMQ_REPLAY_NOT_SUPPORT;
goto FAILED;
}else if(pTopic->ntbUid == 0 && pTopic->ctbStbUid == 0) {
SDbObj *pDb = mndAcquireDb(pMnode, pTopic->db);
if (pDb == NULL) {
code = -1;
goto FAILED;
}
if (pDb->cfg.numOfVgroups != 1) {
mndReleaseDb(pMnode, pDb);
code = TSDB_CODE_TMQ_REPLAY_NEED_ONE_VGROUP;
goto FAILED;
}
mndReleaseDb(pMnode, pDb);
}
}
mndTransSetDbName(pTrans, pOneTopic, NULL);
if(mndTransCheckConflict(pMnode, pTrans) != 0){
mndReleaseTopic(pMnode, pTopic);
return -1;
code = -1;
goto FAILED;
}
mndReleaseTopic(pMnode, pTopic);
}
return 0;
FAILED:
mndReleaseTopic(pMnode, pTopic);
return code;
}
static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg) {
@ -177,7 +203,7 @@ static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg) {
if (pTrans == NULL) {
goto FAIL;
}
if(validateTopics(pTrans, pConsumer->assignedTopics, pMnode, pMsg->info.conn.user) != 0){
if(validateTopics(pTrans, pConsumer->assignedTopics, pMnode, pMsg->info.conn.user, false) != 0){
goto FAIL;
}
@ -697,7 +723,7 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
goto _over;
}
code = validateTopics(pTrans, pTopicList, pMnode, pMsg->info.conn.user);
code = validateTopics(pTrans, pTopicList, pMnode, pMsg->info.conn.user, subscribe.enableReplay);
if (code != TSDB_CODE_SUCCESS) {
goto _over;
}

View File

@ -330,24 +330,6 @@ void dumpSubscribe(SSdb *pSdb, SJson *json) {
}
}
void dumpOffset(SSdb *pSdb, SJson *json) {
void *pIter = NULL;
SJson *items = tjsonAddArrayToObject(json, "offsets");
while (1) {
SMqOffsetObj *pObj = NULL;
pIter = sdbFetch(pSdb, SDB_OFFSET, pIter, (void **)&pObj);
if (pIter == NULL) break;
SJson *item = tjsonCreateObject();
tjsonAddItemToArray(items, item);
tjsonAddStringToObject(item, "key", pObj->key);
tjsonAddStringToObject(item, "dbUid", i642str(pObj->dbUid));
tjsonAddStringToObject(item, "offset", i642str(pObj->offset));
sdbRelease(pSdb, pObj);
}
}
void dumpStream(SSdb *pSdb, SJson *json) {
void *pIter = NULL;
SJson *items = tjsonAddArrayToObject(json, "streams");
@ -608,7 +590,7 @@ void mndDumpSdb() {
dumpTopic(pSdb, json);
dumpConsumer(pSdb, json);
dumpSubscribe(pSdb, json);
dumpOffset(pSdb, json);
// dumpOffset(pSdb, json);
dumpStream(pSdb, json);
dumpAcct(pSdb, json);
dumpAuth(pSdb, json);

View File

@ -59,7 +59,7 @@ static SConnObj *mndCreateConn(SMnode *pMnode, const char *user, int8_t connType
int32_t pid, const char *app, int64_t startTime);
static void mndFreeConn(SConnObj *pConn);
static SConnObj *mndAcquireConn(SMnode *pMnode, uint32_t connId);
static void mndReleaseConn(SMnode *pMnode, SConnObj *pConn);
static void mndReleaseConn(SMnode *pMnode, SConnObj *pConn, bool extendLifespan);
static void *mndGetNextConn(SMnode *pMnode, SCacheIter *pIter);
static void mndCancelGetNextConn(SMnode *pMnode, void *pIter);
static int32_t mndProcessHeartBeatReq(SRpcMsg *pReq);
@ -79,7 +79,7 @@ int32_t mndInitProfile(SMnode *pMnode) {
// in ms
int32_t checkTime = tsShellActivityTimer * 2 * 1000;
pMgmt->connCache = taosCacheInit(TSDB_DATA_TYPE_UINT, checkTime, true, (__cache_free_fn_t)mndFreeConn, "conn");
pMgmt->connCache = taosCacheInit(TSDB_DATA_TYPE_UINT, checkTime, false, (__cache_free_fn_t)mndFreeConn, "conn");
if (pMgmt->connCache == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
mError("failed to alloc profile cache since %s", terrstr());
@ -185,11 +185,12 @@ static SConnObj *mndAcquireConn(SMnode *pMnode, uint32_t connId) {
return pConn;
}
static void mndReleaseConn(SMnode *pMnode, SConnObj *pConn) {
static void mndReleaseConn(SMnode *pMnode, SConnObj *pConn, bool extendLifespan) {
if (pConn == NULL) return;
mTrace("conn:%u, released from cache, data:%p", pConn->id, pConn);
SProfileMgmt *pMgmt = &pMnode->profileMgmt;
if (extendLifespan) taosCacheTryExtendLifeSpan(pMgmt->connCache, (void **)&pConn);
taosCacheRelease(pMgmt->connCache, (void **)&pConn, false);
}
@ -323,7 +324,7 @@ _OVER:
mndReleaseUser(pMnode, pUser);
mndReleaseDb(pMnode, pDb);
mndReleaseConn(pMnode, pConn);
mndReleaseConn(pMnode, pConn, true);
return code;
}
@ -485,7 +486,7 @@ static int32_t mndProcessQueryHeartBeat(SMnode *pMnode, SRpcMsg *pMsg, SClientHb
SQueryHbRspBasic *rspBasic = taosMemoryCalloc(1, sizeof(SQueryHbRspBasic));
if (rspBasic == NULL) {
mndReleaseConn(pMnode, pConn);
mndReleaseConn(pMnode, pConn, true);
terrno = TSDB_CODE_OUT_OF_MEMORY;
mError("user:%s, conn:%u failed to process hb while since %s", pConn->user, pBasic->connId, terrstr());
return -1;
@ -508,7 +509,7 @@ static int32_t mndProcessQueryHeartBeat(SMnode *pMnode, SRpcMsg *pMsg, SClientHb
mndCreateQnodeList(pMnode, &rspBasic->pQnodeList, -1);
mndReleaseConn(pMnode, pConn);
mndReleaseConn(pMnode, pConn, true);
hbRsp.query = rspBasic;
} else {

View File

@ -858,22 +858,10 @@ int32_t mndBuildStbFromReq(SMnode *pMnode, SStbObj *pDst, SMCreateStbReq *pCreat
}
return 0;
}
static int32_t mndGenIdxNameForFirstTag(char *fullname, char *dbname, char *tagname) {
char randStr[TSDB_COL_NAME_LEN] = {0};
int32_t left = TSDB_COL_NAME_LEN - strlen(tagname) - 1;
if (left <= 1) {
sprintf(fullname, "%s.%s", dbname, tagname);
} else {
int8_t start = left < 8 ? 0 : 8;
int8_t end = left >= 24 ? 24 : left - 1;
// gen rand str len [base:end]
// note: ignore rand performance issues
int64_t len = taosRand() % (end - start + 1) + start;
taosRandStr2(randStr, len);
sprintf(fullname, "%s.%s_%s", dbname, tagname, randStr);
}
return 0;
static int32_t mndGenIdxNameForFirstTag(char *fullname, char *dbname, char *stbname, char *tagname) {
SName name = {0};
tNameFromString(&name, stbname, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
return snprintf(fullname, TSDB_INDEX_FNAME_LEN, "%s.%s_%s", dbname, tagname, tNameGetTableName(&name));
}
static int32_t mndCreateStb(SMnode *pMnode, SRpcMsg *pReq, SMCreateStbReq *pCreate, SDbObj *pDb) {
@ -889,7 +877,7 @@ static int32_t mndCreateStb(SMnode *pMnode, SRpcMsg *pReq, SMCreateStbReq *pCrea
if (mndBuildStbFromReq(pMnode, &stbObj, pCreate, pDb) != 0) goto _OVER;
SSchema *pSchema = &(stbObj.pTags[0]);
mndGenIdxNameForFirstTag(fullIdxName, pDb->name, pSchema->name);
mndGenIdxNameForFirstTag(fullIdxName, pDb->name, stbObj.name, pSchema->name);
SSIdx idx = {0};
if (mndAcquireGlobalIdx(pMnode, fullIdxName, SDB_IDX, &idx) == 0 && idx.pIdx != NULL) {
terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST;

View File

@ -286,9 +286,10 @@ int32_t mndSyncGetSnapshot(const SSyncFSM *pFsm, SSnapshot *pSnapshot, void *pRe
return 0;
}
static void mndSyncGetSnapshotInfo(const SSyncFSM *pFsm, SSnapshot *pSnapshot) {
static int32_t mndSyncGetSnapshotInfo(const SSyncFSM *pFsm, SSnapshot *pSnapshot) {
SMnode *pMnode = pFsm->data;
sdbGetCommitInfo(pMnode->pSdb, &pSnapshot->lastApplyIndex, &pSnapshot->lastApplyTerm, &pSnapshot->lastConfigIndex);
return 0;
}
void mndRestoreFinish(const SSyncFSM *pFsm, const SyncIndex commitIdx) {

View File

@ -298,11 +298,6 @@ static int32_t mndTopicActionUpdate(SSdb *pSdb, SMqTopicObj *pOldTopic, SMqTopic
atomic_exchange_64(&pOldTopic->updateTime, pNewTopic->updateTime);
atomic_exchange_32(&pOldTopic->version, pNewTopic->version);
/*taosWLockLatch(&pOldTopic->lock);*/
// TODO handle update
/*taosWUnLockLatch(&pOldTopic->lock);*/
return 0;
}
@ -320,23 +315,6 @@ void mndReleaseTopic(SMnode *pMnode, SMqTopicObj *pTopic) {
sdbRelease(pSdb, pTopic);
}
static SDDropTopicReq *mndBuildDropTopicMsg(SMnode *pMnode, SVgObj *pVgroup, SMqTopicObj *pTopic) {
int32_t contLen = sizeof(SDDropTopicReq);
SDDropTopicReq *pDrop = taosMemoryCalloc(1, contLen);
if (pDrop == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
pDrop->head.contLen = htonl(contLen);
pDrop->head.vgId = htonl(pVgroup->vgId);
memcpy(pDrop->name, pTopic->name, TSDB_TOPIC_FNAME_LEN);
pDrop->tuid = htobe64(pTopic->uid);
return pDrop;
}
static int32_t mndCheckCreateTopicReq(SCMCreateTopicReq *pCreate) {
terrno = TSDB_CODE_MND_INVALID_TOPIC;

View File

@ -428,6 +428,8 @@ static int32_t mndTransActionInsert(SSdb *pSdb, STrans *pTrans) {
mInfo("trans:%d, perform insert action, row:%p stage:%s, callfunc:1, startFunc:%d", pTrans->id, pTrans,
mndTransStr(pTrans->stage), pTrans->startFunc);
taosThreadMutexInit(&pTrans->mutex, NULL);
if (pTrans->startFunc > 0) {
TransCbFp fp = mndTransGetCbFp(pTrans->startFunc);
if (fp) {
@ -543,10 +545,6 @@ STrans *mndAcquireTrans(SMnode *pMnode, int32_t transId) {
STrans *pTrans = sdbAcquire(pMnode->pSdb, SDB_TRANS, &transId);
if (pTrans == NULL) {
terrno = TSDB_CODE_MND_TRANS_NOT_EXIST;
} else {
#ifdef WINDOWS
taosThreadMutexInit(&pTrans->mutex, NULL);
#endif
}
return pTrans;
}

View File

@ -161,7 +161,37 @@ target_link_libraries(
PUBLIC index
)
if(${TD_LINUX})
if(${BUILD_S3})
if(${BUILD_WITH_S3})
target_include_directories(
vnode
PUBLIC "$ENV{HOME}/.cos-local.2/include"
)
set(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.2)
find_library(S3_LIBRARY s3)
find_library(CURL_LIBRARY curl)
find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 NO_DEFAULT_PATH)
find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 NO_DEFAULT_PATH)
target_link_libraries(
vnode
# s3
PUBLIC ${S3_LIBRARY}
PUBLIC ${CURL_LIBRARY}
PUBLIC ${SSL_LIBRARY}
PUBLIC ${CRYPTO_LIBRARY}
PUBLIC xml2
)
add_definitions(-DUSE_S3)
endif()
if(${BUILD_WITH_COS})
set(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/)
find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/)
@ -194,11 +224,10 @@ target_include_directories(
PUBLIC "$ENV{HOME}/.cos-local.1/include"
)
if(${BUILD_WITH_COS})
add_definitions(-DUSE_COS)
endif(${BUILD_WITH_COS})
endif(${TD_LINUX})
endif()
IF (TD_GRANT)
TARGET_LINK_LIBRARIES(vnode PUBLIC grant)

View File

@ -58,7 +58,7 @@ int32_t vnodeAlterHashRange(const char *srcPath, const char *dstPath, SAlterVnod
int32_t vnodeRestoreVgroupId(const char *srcPath, const char *dstPath, int32_t srcVgId, int32_t dstVgId,
int32_t diskPrimary, STfs *pTfs);
void vnodeDestroy(int32_t vgId, const char *path, STfs *pTfs);
SVnode *vnodeOpen(const char *path, int32_t diskPrimary, STfs *pTfs, SMsgCb msgCb);
SVnode *vnodeOpen(const char *path, int32_t diskPrimary, STfs *pTfs, SMsgCb msgCb, bool force);
void vnodePreClose(SVnode *pVnode);
void vnodePostClose(SVnode *pVnode);
void vnodeSyncCheckTimeout(SVnode *pVnode);
@ -69,7 +69,7 @@ int32_t vnodeBegin(SVnode *pVnode);
int32_t vnodeStart(SVnode *pVnode);
void vnodeStop(SVnode *pVnode);
int64_t vnodeGetSyncHandle(SVnode *pVnode);
void vnodeGetSnapshot(SVnode *pVnode, SSnapshot *pSnapshot);
int32_t vnodeGetSnapshot(SVnode *pVnode, SSnapshot *pSnapshot);
void vnodeGetInfo(void *pVnode, const char **dbname, int32_t *vgId, int64_t *numOfTables, int64_t *numOfNormalTables);
int32_t vnodeProcessCreateTSma(SVnode *pVnode, void *pCont, uint32_t contLen);
int32_t vnodeGetTableList(void *pVnode, int8_t type, SArray *pList);
@ -226,6 +226,7 @@ typedef struct STqReader {
int64_t cachedSchemaUid;
SSchemaWrapper *pSchemaWrapper;
SSDataBlock *pResBlock;
int64_t lastTs;
} STqReader;
STqReader *tqReaderOpen(SVnode *pVnode);
@ -244,6 +245,7 @@ bool tqNextBlockInWal(STqReader *pReader, const char *idstr);
bool tqNextBlockImpl(STqReader *pReader, const char *idstr);
SWalReader *tqGetWalReader(STqReader *pReader);
SSDataBlock *tqGetResultBlock(STqReader *pReader);
int64_t tqGetResultBlockTime(STqReader *pReader);
int32_t extractMsgFromWal(SWalReader *pReader, void **pItem, int64_t maxVer, const char *id);
int32_t tqReaderSetSubmitMsg(STqReader *pReader, void *msgStr, int32_t msgLen, int64_t ver);
@ -257,11 +259,11 @@ int32_t vnodeEnqueueStreamMsg(SVnode *pVnode, SRpcMsg *pMsg);
int32_t smaGetTSmaDays(SVnodeCfg *pCfg, void *pCont, uint32_t contLen, int32_t *days);
// SVSnapReader
int32_t vnodeSnapReaderOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapReader **ppReader);
int32_t vnodeSnapReaderOpen(SVnode *pVnode, SSnapshotParam *pParam, SVSnapReader **ppReader);
void vnodeSnapReaderClose(SVSnapReader *pReader);
int32_t vnodeSnapRead(SVSnapReader *pReader, uint8_t **ppData, uint32_t *nData);
// SVSnapWriter
int32_t vnodeSnapWriterOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapWriter **ppWriter);
int32_t vnodeSnapWriterOpen(SVnode *pVnode, SSnapshotParam *pParam, SVSnapWriter **ppWriter);
int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback, SSnapshot *pSnapshot);
int32_t vnodeSnapWrite(SVSnapWriter *pWriter, uint8_t *pData, uint32_t nData);

View File

@ -46,6 +46,7 @@ typedef struct STqOffsetStore STqOffsetStore;
// tqPush
#define STREAM_EXEC_EXTRACT_DATA_IN_WAL_ID (-1)
#define STREAM_EXEC_TASK_STATUS_CHECK_ID (-2)
#define IS_OFFSET_RESET_TYPE(_t) ((_t) < 0)
// tqExec
typedef struct {
@ -90,6 +91,10 @@ typedef struct {
STqExecHandle execHandle; // exec
SRpcMsg* msg;
tq_handle_status status;
// for replay
SSDataBlock* block;
int64_t blockTime;
} STqHandle;
struct STQ {
@ -107,17 +112,13 @@ struct STQ {
SStreamMeta* pStreamMeta;
};
typedef struct {
int32_t size;
} STqOffsetHead;
int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle);
int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle);
void tqDestroyTqHandle(void* data);
// tqRead
int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, STaosxRsp* pRsp, SMqMetaRsp* pMetaRsp, STqOffsetVal* offset);
int32_t tqScanData(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset);
int32_t tqScanData(STQ* pTq, STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset, const SMqPollReq* pRequest);
int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, uint64_t reqId);
// tqExec

View File

@ -263,6 +263,7 @@ int32_t tsdbFSRollback(STsdb *pTsdb);
int32_t tsdbFSPrepareCommit(STsdb *pTsdb, STsdbFS *pFS);
int32_t tsdbFSRef(STsdb *pTsdb, STsdbFS *pFS);
void tsdbFSUnref(STsdb *pTsdb, STsdbFS *pFS);
void tsdbGetCurrentFName(STsdb *pTsdb, char *current, char *current_t);
int32_t tsdbFSUpsertFSet(STsdbFS *pFS, SDFileSet *pSet);
int32_t tsdbFSUpsertDelFile(STsdbFS *pFS, SDelFile *pDelFile);
@ -672,6 +673,42 @@ struct SDelFWriter {
typedef struct STFileSet STFileSet;
typedef TARRAY2(STFileSet *) TFileSetArray;
typedef struct STSnapRange STSnapRange;
typedef TARRAY2(STSnapRange *) TSnapRangeArray; // disjoint snap ranges
// util
int32_t tSerializeSnapRangeArray(void *buf, int32_t bufLen, TSnapRangeArray *pSnapR);
int32_t tDeserializeSnapRangeArray(void *buf, int32_t bufLen, TSnapRangeArray *pSnapR);
void tsdbSnapRangeArrayDestroy(TSnapRangeArray **ppSnap);
SHashObj *tsdbGetSnapRangeHash(TSnapRangeArray *pRanges);
// snap partition list
typedef TARRAY2(SVersionRange) SVerRangeList;
typedef struct STsdbSnapPartition STsdbSnapPartition;
typedef TARRAY2(STsdbSnapPartition *) STsdbSnapPartList;
// util
STsdbSnapPartList *tsdbSnapPartListCreate();
void tsdbSnapPartListDestroy(STsdbSnapPartList **ppList);
int32_t tSerializeTsdbSnapPartList(void *buf, int32_t bufLen, STsdbSnapPartList *pList);
int32_t tDeserializeTsdbSnapPartList(void *buf, int32_t bufLen, STsdbSnapPartList *pList);
int32_t tsdbSnapPartListToRangeDiff(STsdbSnapPartList *pList, TSnapRangeArray **ppRanges);
enum {
TSDB_SNAP_RANGE_TYP_HEAD = 0,
TSDB_SNAP_RANGE_TYP_DATA,
TSDB_SNAP_RANGE_TYP_SMA,
TSDB_SNAP_RANGE_TYP_TOMB,
TSDB_SNAP_RANGE_TYP_STT,
TSDB_SNAP_RANGE_TYP_MAX,
};
struct STsdbSnapPartition {
int64_t fid;
int8_t stat;
SVerRangeList verRanges[TSDB_SNAP_RANGE_TYP_MAX];
};
// snap read
struct STsdbReadSnap {
SMemTable *pMem;
SQueryNode *pNode;
@ -989,6 +1026,15 @@ struct STsdbFilterInfo {
TABLEID tbid;
};
typedef enum {
TSDB_FS_STATE_NORMAL = 0,
TSDB_FS_STATE_INCOMPLETE,
} ETsdbFsState;
// utils
ETsdbFsState tsdbSnapGetFsState(SVnode *pVnode);
int32_t tsdbSnapGetDetails(SVnode *pVnode, SSnapshot *pSnap);
#ifdef __cplusplus
}
#endif

View File

@ -202,7 +202,7 @@ typedef struct SMetaInfo {
int32_t metaGetInfo(SMeta* pMeta, int64_t uid, SMetaInfo* pInfo, SMetaReader* pReader);
// tsdb
int tsdbOpen(SVnode* pVnode, STsdb** ppTsdb, const char* dir, STsdbKeepCfg* pKeepCfg, int8_t rollback);
int tsdbOpen(SVnode* pVnode, STsdb** ppTsdb, const char* dir, STsdbKeepCfg* pKeepCfg, int8_t rollback, bool force);
int tsdbClose(STsdb** pTsdb);
int32_t tsdbBegin(STsdb* pTsdb);
// int32_t tsdbPrepareCommit(STsdb* pTsdb);
@ -267,7 +267,7 @@ int32_t tqProcessTaskScanHistoryFinishRsp(STQ* pTq, SRpcMsg* pMsg);
// sma
int32_t smaInit();
void smaCleanUp();
int32_t smaOpen(SVnode* pVnode, int8_t rollback);
int32_t smaOpen(SVnode* pVnode, int8_t rollback, bool force);
int32_t smaClose(SSma* pSma);
int32_t smaBegin(SSma* pSma);
int32_t smaPrepareAsyncCommit(SSma* pSma);
@ -295,11 +295,12 @@ int32_t metaSnapWriterOpen(SMeta* pMeta, int64_t sver, int64_t ever, SMetaSnapWr
int32_t metaSnapWrite(SMetaSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
int32_t metaSnapWriterClose(SMetaSnapWriter** ppWriter, int8_t rollback);
// STsdbSnapReader ========================================
int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, int8_t type, STsdbSnapReader** ppReader);
int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, int8_t type, void* pRanges,
STsdbSnapReader** ppReader);
int32_t tsdbSnapReaderClose(STsdbSnapReader** ppReader);
int32_t tsdbSnapRead(STsdbSnapReader* pReader, uint8_t** ppData);
// STsdbSnapWriter ========================================
int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapWriter** ppWriter);
int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, void* pRanges, STsdbSnapWriter** ppWriter);
int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, SSnapDataHdr* pHdr);
int32_t tsdbSnapWriterPrepareClose(STsdbSnapWriter* pWriter);
int32_t tsdbSnapWriterClose(STsdbSnapWriter** ppWriter, int8_t rollback);
@ -356,8 +357,9 @@ int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRSmaSnapRead
int32_t rsmaSnapReaderClose(SRSmaSnapReader** ppReader);
int32_t rsmaSnapRead(SRSmaSnapReader* pReader, uint8_t** ppData);
// SRSmaSnapWriter ========================================
int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, SRSmaSnapWriter** ppWriter);
int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, void** ppRanges, SRSmaSnapWriter** ppWriter);
int32_t rsmaSnapWrite(SRSmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
int32_t rsmaSnapWriterPrepareClose(SRSmaSnapWriter* pWriter);
int32_t rsmaSnapWriterClose(SRSmaSnapWriter** ppWriter, int8_t rollback);
typedef struct {
@ -497,6 +499,7 @@ struct SSma {
#define SMA_RSMA_TSDB0(s) ((s)->pVnode->pTsdb)
#define SMA_RSMA_TSDB1(s) ((s)->pRSmaTsdb[TSDB_RETENTION_L0])
#define SMA_RSMA_TSDB2(s) ((s)->pRSmaTsdb[TSDB_RETENTION_L1])
#define SMA_RSMA_GET_TSDB(pVnode, level) ((level == 0) ? pVnode->pTsdb : pVnode->pSma->pRSmaTsdb[level - 1])
// sma
void smaHandleRes(void* pVnode, int64_t smaId, const SArray* data);

View File

@ -30,7 +30,7 @@ static int32_t rsmaRestore(SSma *pSma);
pKeepCfg->keepTimeOffset = 0; \
} while (0)
#define SMA_OPEN_RSMA_IMPL(v, l) \
#define SMA_OPEN_RSMA_IMPL(v, l, force) \
do { \
SRetention *r = (SRetention *)VND_RETENTIONS(v) + l; \
if (!RETENTION_VALID(r)) { \
@ -42,7 +42,7 @@ static int32_t rsmaRestore(SSma *pSma);
} \
code = smaSetKeepCfg(v, &keepCfg, pCfg, TSDB_TYPE_RSMA_L##l); \
TSDB_CHECK_CODE(code, lino, _exit); \
if (tsdbOpen(v, &SMA_RSMA_TSDB##l(pSma), VNODE_RSMA##l##_DIR, &keepCfg, rollback) < 0) { \
if (tsdbOpen(v, &SMA_RSMA_TSDB##l(pSma), VNODE_RSMA##l##_DIR, &keepCfg, rollback, force) < 0) { \
code = terrno; \
TSDB_CHECK_CODE(code, lino, _exit); \
} \
@ -118,7 +118,7 @@ int smaSetKeepCfg(SVnode *pVnode, STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int ty
return terrno;
}
int32_t smaOpen(SVnode *pVnode, int8_t rollback) {
int32_t smaOpen(SVnode *pVnode, int8_t rollback, bool force) {
int32_t code = 0;
int32_t lino = 0;
STsdbCfg *pCfg = &pVnode->config.tsdbCfg;
@ -139,11 +139,11 @@ int32_t smaOpen(SVnode *pVnode, int8_t rollback) {
STsdbKeepCfg keepCfg = {0};
for (int32_t i = 0; i < TSDB_RETENTION_MAX; ++i) {
if (i == TSDB_RETENTION_L0) {
SMA_OPEN_RSMA_IMPL(pVnode, 0);
SMA_OPEN_RSMA_IMPL(pVnode, 0, force);
} else if (i == TSDB_RETENTION_L1) {
SMA_OPEN_RSMA_IMPL(pVnode, 1);
SMA_OPEN_RSMA_IMPL(pVnode, 1, force);
} else if (i == TSDB_RETENTION_L2) {
SMA_OPEN_RSMA_IMPL(pVnode, 2);
SMA_OPEN_RSMA_IMPL(pVnode, 2, force);
}
}

View File

@ -48,7 +48,7 @@ int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRSmaSnapRead
// open rsma1/rsma2
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
if (pSma->pRSmaTsdb[i]) {
code = tsdbSnapReaderOpen(pSma->pRSmaTsdb[i], sver, ever, i == 0 ? SNAP_DATA_RSMA1 : SNAP_DATA_RSMA2,
code = tsdbSnapReaderOpen(pSma->pRSmaTsdb[i], sver, ever, (i == 0 ? SNAP_DATA_RSMA1 : SNAP_DATA_RSMA2), NULL,
&pReader->pDataReader[i]);
TSDB_CHECK_CODE(code, lino, _exit);
}
@ -128,7 +128,7 @@ struct SRSmaSnapWriter {
STsdbSnapWriter* pDataWriter[TSDB_RETENTION_L2];
};
int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, SRSmaSnapWriter** ppWriter) {
int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, void** ppRanges, SRSmaSnapWriter** ppWriter) {
int32_t code = 0;
int32_t lino = 0;
SVnode* pVnode = pSma->pVnode;
@ -147,7 +147,7 @@ int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, SRSmaSnapWrit
// rsma1/rsma2
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
if (pSma->pRSmaTsdb[i]) {
code = tsdbSnapWriterOpen(pSma->pRSmaTsdb[i], sver, ever, &pWriter->pDataWriter[i]);
code = tsdbSnapWriterOpen(pSma->pRSmaTsdb[i], sver, ever, ((void**)ppRanges)[i], &pWriter->pDataWriter[i]);
TSDB_CHECK_CODE(code, lino, _exit);
}
}
@ -165,6 +165,21 @@ _exit:
return code;
}
int32_t rsmaSnapWriterPrepareClose(SRSmaSnapWriter* pWriter) {
int32_t code = 0;
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
if (pWriter->pDataWriter[i]) {
code = tsdbSnapWriterPrepareClose(pWriter->pDataWriter[i]);
if (code) {
smaError("vgId:%d, failed to prepare close tsdbSnapWriter since %s. i: %d", SMA_VID(pWriter->pSma), terrstr(),
i);
return -1;
}
}
}
return code;
}
int32_t rsmaSnapWriterClose(SRSmaSnapWriter** ppWriter, int8_t rollback) {
int32_t code = 0;
int32_t lino = 0;

View File

@ -82,6 +82,9 @@ void tqDestroyTqHandle(void* data) {
taosMemoryFree(pData->msg);
pData->msg = NULL;
}
if (pData->block != NULL){
blockDataDestroy(pData->block);
}
}
static bool tqOffsetEqual(const STqOffset* pLeft, const STqOffset* pRight) {
@ -1197,6 +1200,9 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
pStreamTask->status.taskStatus = TASK_STATUS__HALT;
nextProcessedVer = walReaderGetCurrentVer(pStreamTask->exec.pWalReader);
if (nextProcessedVer == -1) {
nextProcessedVer = pStreamTask->dataRange.range.maxVer + 1;
}
tqDebug("s-task:%s level:%d nextProcessedVer:%" PRId64 ", sched-status:%d is halt by fill-history task:%s",
pStreamTask->id.idStr, pStreamTask->info.taskLevel, nextProcessedVer, pStreamTask->status.schedStatus,
@ -1972,4 +1978,4 @@ int32_t tqProcessTaskResetReq(STQ* pTq, SRpcMsg* pMsg) {
streamMetaReleaseTask(pMeta, pTask);
return TSDB_CODE_SUCCESS;
}
}

View File

@ -37,11 +37,9 @@ int32_t tqOffsetRestoreFromFile(STqOffsetStore* pStore, const char* fname) {
int32_t vgId = TD_VID(pStore->pTq->pVnode);
int64_t code = 0;
STqOffsetHead head = {0};
int32_t size = 0;
while (1) {
if ((code = taosReadFile(pFile, &head, sizeof(STqOffsetHead))) != sizeof(STqOffsetHead)) {
if ((code = taosReadFile(pFile, &size, INT_BYTES)) != INT_BYTES) {
if (code == 0) {
break;
} else {
@ -49,7 +47,6 @@ int32_t tqOffsetRestoreFromFile(STqOffsetStore* pStore, const char* fname) {
}
}
int32_t size = htonl(head.size);
void* pMemBuf = taosMemoryCalloc(1, size);
if (pMemBuf == NULL) {
tqError("vgId:%d failed to restore offset from file, since out of memory, malloc size:%d", vgId, size);
@ -175,11 +172,11 @@ int32_t tqOffsetCommitFile(STqOffsetStore* pStore) {
return -1;
}
int32_t totLen = sizeof(STqOffsetHead) + bodyLen;
int32_t totLen = INT_BYTES + bodyLen;
void* buf = taosMemoryCalloc(1, totLen);
void* abuf = POINTER_SHIFT(buf, sizeof(STqOffsetHead));
void* abuf = POINTER_SHIFT(buf, INT_BYTES);
((STqOffsetHead*)buf)->size = htonl(bodyLen);
*(int32_t*)buf = bodyLen;
SEncoder encoder;
tEncoderInit(&encoder, abuf, bodyLen);
tEncodeSTqOffset(&encoder, pOffset);

View File

@ -369,82 +369,56 @@ int32_t extractMsgFromWal(SWalReader* pReader, void** pItem, int64_t maxVer, con
// todo ignore the error in wal?
bool tqNextBlockInWal(STqReader* pReader, const char* id) {
SWalReader* pWalReader = pReader->pWalReader;
SSDataBlock* pDataBlock = NULL;
uint64_t st = taosGetTimestampMs();
while (1) {
SArray* pBlockList = pReader->submit.aSubmitTbData;
if (pBlockList == NULL || pReader->nextBlk >= taosArrayGetSize(pBlockList)) {
// try next message in wal file
// todo always retry to avoid read failure caused by wal file deletion
if (walNextValidMsg(pWalReader) < 0) {
return false;
}
void* pBody = POINTER_SHIFT(pWalReader->pHead->head.body, sizeof(SSubmitReq2Msg));
int32_t bodyLen = pWalReader->pHead->head.bodyLen - sizeof(SSubmitReq2Msg);
int64_t ver = pWalReader->pHead->head.version;
SDecoder decoder = {0};
tDecoderInit(&decoder, pBody, bodyLen);
{
int32_t nSubmitTbData = taosArrayGetSize(pReader->submit.aSubmitTbData);
for (int32_t i = 0; i < nSubmitTbData; i++) {
SSubmitTbData* pData = taosArrayGet(pReader->submit.aSubmitTbData, i);
if (pData->pCreateTbReq != NULL) {
taosArrayDestroy(pData->pCreateTbReq->ctb.tagName);
taosMemoryFreeClear(pData->pCreateTbReq);
}
pData->aRowP = taosArrayDestroy(pData->aRowP);
}
pReader->submit.aSubmitTbData = taosArrayDestroy(pReader->submit.aSubmitTbData);
}
if (tDecodeSubmitReq(&decoder, &pReader->submit) < 0) {
tDecoderClear(&decoder);
tqError("decode wal file error, msgLen:%d, ver:%" PRId64, bodyLen, ver);
return false;
}
tDecoderClear(&decoder);
pReader->nextBlk = 0;
// try next message in wal file
if (walNextValidMsg(pWalReader) < 0) {
return false;
}
void* pBody = POINTER_SHIFT(pWalReader->pHead->head.body, sizeof(SSubmitReq2Msg));
int32_t bodyLen = pWalReader->pHead->head.bodyLen - sizeof(SSubmitReq2Msg);
int64_t ver = pWalReader->pHead->head.version;
tqReaderSetSubmitMsg(pReader, pBody, bodyLen, ver);
pReader->nextBlk = 0;
int32_t numOfBlocks = taosArrayGetSize(pReader->submit.aSubmitTbData);
while (pReader->nextBlk < numOfBlocks) {
tqTrace("tq reader next data block %d/%d, len:%d %" PRId64 " %d", pReader->nextBlk,
numOfBlocks, pReader->msg.msgLen, pReader->msg.ver, pReader->nextBlk);
tqTrace("tq reader next data block %d/%d, len:%d %" PRId64, pReader->nextBlk,
numOfBlocks, pReader->msg.msgLen, pReader->msg.ver);
SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk);
if (pReader->tbIdHash == NULL) {
SSDataBlock* pRes = NULL;
int32_t code = tqRetrieveDataBlock(pReader, &pRes, NULL);
if (code == TSDB_CODE_SUCCESS && pRes->info.rows > 0) {
return true;
}
}
void* ret = taosHashGet(pReader->tbIdHash, &pSubmitTbData->uid, sizeof(int64_t));
if (ret != NULL) {
tqTrace("tq reader return submit block, uid:%" PRId64 ", ver:%" PRId64, pSubmitTbData->uid, pReader->msg.ver);
if (pReader->tbIdHash == NULL || taosHashGet(pReader->tbIdHash, &pSubmitTbData->uid, sizeof(int64_t)) != NULL) {
tqTrace("tq reader return submit block, uid:%" PRId64, pSubmitTbData->uid);
SSDataBlock* pRes = NULL;
int32_t code = tqRetrieveDataBlock(pReader, &pRes, NULL);
if (code == TSDB_CODE_SUCCESS && pRes->info.rows > 0) {
return true;
if(pDataBlock == NULL){
pDataBlock = createOneDataBlock(pRes, true);
}else{
blockDataMerge(pDataBlock, pRes);
}
}
} else {
pReader->nextBlk += 1;
tqTrace("tq reader discard submit block, uid:%" PRId64 ", continue", pSubmitTbData->uid);
}
}
qTrace("stream scan return empty, all %d submit blocks consumed, %s", numOfBlocks, id);
tDestroySubmitReq(&pReader->submit, TSDB_MSG_FLG_DECODE);
pReader->msg.msgStr = NULL;
if(pDataBlock != NULL){
blockDataCleanup(pReader->pResBlock);
copyDataBlock(pReader->pResBlock, pDataBlock);
blockDataDestroy(pDataBlock);
return true;
}else{
qTrace("stream scan return empty, all %d submit blocks consumed, %s", numOfBlocks, id);
}
if(taosGetTimestampMs() - st > 1000){
return false;
}
@ -478,6 +452,10 @@ SSDataBlock* tqGetResultBlock (STqReader* pReader) {
return pReader->pResBlock;
}
int64_t tqGetResultBlockTime(STqReader *pReader){
return pReader->lastTs;
}
bool tqNextBlockImpl(STqReader* pReader, const char* idstr) {
if (pReader->msg.msgStr == NULL) {
return false;
@ -644,7 +622,7 @@ int32_t tqRetrieveDataBlock(STqReader* pReader, SSDataBlock** pRes, const char*
int32_t sversion = pSubmitTbData->sver;
int64_t suid = pSubmitTbData->suid;
int64_t uid = pSubmitTbData->uid;
pReader->lastBlkUid = uid;
pReader->lastTs = pSubmitTbData->ctimeMs;
pBlock->info.id.uid = uid;
pBlock->info.version = pReader->msg.ver;
@ -786,7 +764,6 @@ int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas
}
int32_t sversion = pSubmitTbData->sver;
int64_t suid = pSubmitTbData->suid;
int64_t uid = pSubmitTbData->uid;
pReader->lastBlkUid = uid;

View File

@ -64,7 +64,23 @@ static int32_t tqAddTbNameToRsp(const STQ* pTq, int64_t uid, STaosxRsp* pRsp, in
return 0;
}
int32_t tqScanData(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset) {
int32_t getDataBlock(qTaskInfo_t task, const STqHandle* pHandle, int32_t vgId, SSDataBlock** res){
uint64_t ts = 0;
qStreamSetOpen(task);
tqDebug("consumer:0x%" PRIx64 " vgId:%d, tmq one task start execute", pHandle->consumerId, vgId);
int32_t code = qExecTask(task, res, &ts);
if (code != TSDB_CODE_SUCCESS) {
tqError("consumer:0x%" PRIx64 " vgId:%d, task exec error since %s", pHandle->consumerId, vgId, tstrerror(code));
terrno = code;
return -1;
}
tqDebug("consumer:0x%" PRIx64 " vgId:%d tmq one task end executed, pDataBlock:%p", pHandle->consumerId, vgId, *res);
return 0;
}
int32_t tqScanData(STQ* pTq, STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset, const SMqPollReq* pRequest) {
const int32_t MAX_ROWS_TO_RETURN = 4096;
int32_t vgId = TD_VID(pTq->pVnode);
@ -80,34 +96,66 @@ int32_t tqScanData(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffs
while (1) {
SSDataBlock* pDataBlock = NULL;
uint64_t ts = 0;
qStreamSetOpen(task);
tqDebug("consumer:0x%" PRIx64 " vgId:%d, tmq one task start execute", pHandle->consumerId, vgId);
code = qExecTask(task, &pDataBlock, &ts);
if (code != TSDB_CODE_SUCCESS) {
tqError("consumer:0x%" PRIx64 " vgId:%d, task exec error since %s", pHandle->consumerId, vgId, tstrerror(code));
terrno = code;
return -1;
}
tqDebug("consumer:0x%" PRIx64 " vgId:%d tmq one task end executed, pDataBlock:%p", pHandle->consumerId, vgId,
pDataBlock);
// current scan should be stopped asap, since the rebalance occurs.
if (pDataBlock == NULL) {
break;
}
code = tqAddBlockDataToRsp(pDataBlock, pRsp, pExec->numOfCols, pTq->pVnode->config.tsdbCfg.precision);
if (code != TSDB_CODE_SUCCESS) {
tqError("vgId:%d, failed to add block to rsp msg", vgId);
code = getDataBlock(task, pHandle, vgId, &pDataBlock);
if (code != 0){
return code;
}
pRsp->blockNum++;
totalRows += pDataBlock->info.rows;
if (totalRows >= MAX_ROWS_TO_RETURN) {
if(pRequest->enableReplay){
if(IS_OFFSET_RESET_TYPE(pRequest->reqOffset.type) && pHandle->block != NULL){
blockDataDestroy(pHandle->block);
pHandle->block = NULL;
}
if(pHandle->block == NULL){
if (pDataBlock == NULL) {
break;
}
STqOffsetVal offset = {0};
qStreamExtractOffset(task, &offset);
pHandle->block = createOneDataBlock(pDataBlock, true);
// pHandle->block = createDataBlock();
// copyDataBlock(pHandle->block, pDataBlock);
pHandle->blockTime = offset.ts;
code = getDataBlock(task, pHandle, vgId, &pDataBlock);
if (code != 0){
return code;
}
}
code = tqAddBlockDataToRsp(pHandle->block, pRsp, pExec->numOfCols, pTq->pVnode->config.tsdbCfg.precision);
if (code != TSDB_CODE_SUCCESS) {
tqError("vgId:%d, failed to add block to rsp msg", vgId);
return code;
}
pRsp->blockNum++;
if (pDataBlock == NULL) {
blockDataDestroy(pHandle->block);
pHandle->block = NULL;
}else{
copyDataBlock(pHandle->block, pDataBlock);
STqOffsetVal offset = {0};
qStreamExtractOffset(task, &offset);
pRsp->sleepTime = offset.ts - pHandle->blockTime;
pHandle->blockTime = offset.ts;
}
break;
}else{
if (pDataBlock == NULL) {
break;
}
code = tqAddBlockDataToRsp(pDataBlock, pRsp, pExec->numOfCols, pTq->pVnode->config.tsdbCfg.precision);
if (code != TSDB_CODE_SUCCESS) {
tqError("vgId:%d, failed to add block to rsp msg", vgId);
return code;
}
pRsp->blockNum++;
totalRows += pDataBlock->info.rows;
if (totalRows >= MAX_ROWS_TO_RETURN) {
break;
}
}
}

View File

@ -198,8 +198,6 @@ int32_t streamTaskSnapWriterClose(SStreamTaskWriter* pWriter, int8_t rollback) {
taosWLockLatch(&pTq->pStreamMeta->lock);
tqDebug("vgId:%d, vnode stream-task snapshot writer closed", TD_VID(pTq->pVnode));
taosWLockLatch(&pTq->pStreamMeta->lock);
if (rollback) {
tdbAbort(pTq->pStreamMeta->db, pTq->pStreamMeta->txn);
} else {
@ -208,12 +206,6 @@ int32_t streamTaskSnapWriterClose(SStreamTaskWriter* pWriter, int8_t rollback) {
code = tdbPostCommit(pTq->pStreamMeta->db, pTq->pStreamMeta->txn);
if (code) goto _err;
}
if (tdbBegin(pTq->pStreamMeta->db, &pTq->pStreamMeta->txn, tdbDefaultMalloc, tdbDefaultFree, NULL, 0) < 0) {
code = -1;
goto _err;
}
taosWUnLockLatch(&pTq->pStreamMeta->lock);
if (tdbBegin(pTq->pStreamMeta->db, &pTq->pStreamMeta->txn, tdbDefaultMalloc, tdbDefaultFree, NULL, 0) < 0) {
code = -1;

View File

@ -15,8 +15,6 @@
#include "tq.h"
#define IS_OFFSET_RESET_TYPE(_t) ((_t) < 0)
static int32_t tqSendMetaPollRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPollReq* pReq,
const SMqMetaRsp* pRsp, int32_t vgId);
@ -152,7 +150,7 @@ static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle,
tqInitDataRsp(&dataRsp, *pOffset);
qSetTaskId(pHandle->execHandle.task, consumerId, pRequest->reqId);
int code = tqScanData(pTq, pHandle, &dataRsp, pOffset);
int code = tqScanData(pTq, pHandle, &dataRsp, pOffset, pRequest);
if (code != 0 && terrno != TSDB_CODE_WAL_LOG_NOT_EXIST) {
goto end;
}

View File

@ -128,8 +128,16 @@ static int32_t tsdbCommitTSData(SCommitter2 *committer) {
}
}
extern int8_t tsS3Enabled;
int32_t nlevel = tfsGetLevel(committer->tsdb->pVnode->pTfs);
bool skipRow = false;
if (tsS3Enabled && nlevel > 1 && committer->ctx->did.level == nlevel - 1) {
skipRow = true;
}
int64_t ts = TSDBROW_TS(&row->row);
if (ts > committer->ctx->maxKey) {
if (ts > committer->ctx->maxKey || skipRow) {
committer->ctx->nextKey = TMIN(committer->ctx->nextKey, ts);
code = tsdbIterMergerSkipTableData(committer->dataIterMerger, committer->ctx->tbid);
TSDB_CHECK_CODE(code, lino, _exit);

View File

@ -15,10 +15,6 @@
#include "tsdbDataFileRW.h"
extern int32_t tsdbFileWriteTombBlock(STsdbFD *fd, STombBlock *tombBlock, int8_t cmprAlg, int64_t *fileSize,
TTombBlkArray *tombBlkArray, uint8_t **bufArr);
extern int32_t tsdbFileWriteTombBlk(STsdbFD *fd, const TTombBlkArray *tombBlkArray, SFDataPtr *ptr, int64_t *fileSize);
// SDataFileReader =============================================
struct SDataFileReader {
SDataFileReaderConfig config[1];
@ -491,6 +487,9 @@ struct SDataFileWriter {
int32_t tombBlkArrayIdx;
STombBlock tombBlock[1];
int32_t tombBlockIdx;
// range
SVersionRange range;
SVersionRange tombRange;
} ctx[1];
STFile files[TSDB_FTYPE_MAX];
@ -589,6 +588,8 @@ static int32_t tsdbDataFileWriterDoOpen(SDataFileWriter *writer) {
.fid = writer->config->fid,
.cid = writer->config->cid,
.size = 0,
.minVer = VERSION_MAX,
.maxVer = VERSION_MIN,
};
// .data
@ -602,6 +603,8 @@ static int32_t tsdbDataFileWriterDoOpen(SDataFileWriter *writer) {
.fid = writer->config->fid,
.cid = writer->config->cid,
.size = 0,
.minVer = VERSION_MAX,
.maxVer = VERSION_MIN,
};
}
@ -616,6 +619,8 @@ static int32_t tsdbDataFileWriterDoOpen(SDataFileWriter *writer) {
.fid = writer->config->fid,
.cid = writer->config->cid,
.size = 0,
.minVer = VERSION_MAX,
.maxVer = VERSION_MIN,
};
}
@ -627,8 +632,14 @@ static int32_t tsdbDataFileWriterDoOpen(SDataFileWriter *writer) {
.fid = writer->config->fid,
.cid = writer->config->cid,
.size = 0,
.minVer = VERSION_MAX,
.maxVer = VERSION_MIN,
};
// range
writer->ctx->range = (SVersionRange){.minVer = VERSION_MAX, .maxVer = VERSION_MIN};
writer->ctx->tombRange = (SVersionRange){.minVer = VERSION_MAX, .maxVer = VERSION_MIN};
writer->ctx->opened = true;
_exit:
@ -638,8 +649,14 @@ _exit:
return code;
}
int32_t tsdbWriterUpdVerRange(SVersionRange *range, int64_t minVer, int64_t maxVer) {
range->minVer = TMIN(range->minVer, minVer);
range->maxVer = TMAX(range->maxVer, maxVer);
return 0;
}
int32_t tsdbFileWriteBrinBlock(STsdbFD *fd, SBrinBlock *brinBlock, int8_t cmprAlg, int64_t *fileSize,
TBrinBlkArray *brinBlkArray, uint8_t **bufArr) {
TBrinBlkArray *brinBlkArray, uint8_t **bufArr, SVersionRange *range) {
if (BRIN_BLOCK_SIZE(brinBlock) == 0) return 0;
int32_t code;
@ -678,6 +695,8 @@ int32_t tsdbFileWriteBrinBlock(STsdbFD *fd, SBrinBlock *brinBlock, int8_t cmprAl
}
}
tsdbWriterUpdVerRange(range, brinBlk->minVer, brinBlk->maxVer);
// write to file
for (int32_t i = 0; i < ARRAY_SIZE(brinBlock->dataArr1); i++) {
code = tsdbCmprData((uint8_t *)TARRAY2_DATA(brinBlock->dataArr1 + i), TARRAY2_DATA_LEN(brinBlock->dataArr1 + i),
@ -728,7 +747,8 @@ static int32_t tsdbDataFileWriteBrinBlock(SDataFileWriter *writer) {
int32_t lino = 0;
code = tsdbFileWriteBrinBlock(writer->fd[TSDB_FTYPE_HEAD], writer->brinBlock, writer->config->cmprAlg,
&writer->files[TSDB_FTYPE_HEAD].size, writer->brinBlkArray, writer->config->bufArr);
&writer->files[TSDB_FTYPE_HEAD].size, writer->brinBlkArray, writer->config->bufArr,
&writer->ctx->range);
TSDB_CHECK_CODE(code, lino, _exit);
_exit:
@ -795,6 +815,8 @@ static int32_t tsdbDataFileDoWriteBlockData(SDataFileWriter *writer, SBlockData
}
}
tsdbWriterUpdVerRange(&writer->ctx->range, record->minVer, record->maxVer);
// to .data file
int32_t sizeArr[5] = {0};
@ -1143,6 +1165,64 @@ int32_t tsdbFileWriteHeadFooter(STsdbFD *fd, int64_t *fileSize, const SHeadFoote
return 0;
}
int32_t tsdbFileWriteTombBlock(STsdbFD *fd, STombBlock *tombBlock, int8_t cmprAlg, int64_t *fileSize,
TTombBlkArray *tombBlkArray, uint8_t **bufArr, SVersionRange *range) {
int32_t code;
if (TOMB_BLOCK_SIZE(tombBlock) == 0) return 0;
STombBlk tombBlk[1] = {{
.dp[0] =
{
.offset = *fileSize,
.size = 0,
},
.minTbid =
{
.suid = TARRAY2_FIRST(tombBlock->suid),
.uid = TARRAY2_FIRST(tombBlock->uid),
},
.maxTbid =
{
.suid = TARRAY2_LAST(tombBlock->suid),
.uid = TARRAY2_LAST(tombBlock->uid),
},
.minVer = TARRAY2_FIRST(tombBlock->version),
.maxVer = TARRAY2_FIRST(tombBlock->version),
.numRec = TOMB_BLOCK_SIZE(tombBlock),
.cmprAlg = cmprAlg,
}};
for (int32_t i = 1; i < TOMB_BLOCK_SIZE(tombBlock); i++) {
if (tombBlk->minVer > TARRAY2_GET(tombBlock->version, i)) {
tombBlk->minVer = TARRAY2_GET(tombBlock->version, i);
}
if (tombBlk->maxVer < TARRAY2_GET(tombBlock->version, i)) {
tombBlk->maxVer = TARRAY2_GET(tombBlock->version, i);
}
}
tsdbWriterUpdVerRange(range, tombBlk->minVer, tombBlk->maxVer);
for (int32_t i = 0; i < ARRAY_SIZE(tombBlock->dataArr); i++) {
code = tsdbCmprData((uint8_t *)TARRAY2_DATA(&tombBlock->dataArr[i]), TARRAY2_DATA_LEN(&tombBlock->dataArr[i]),
TSDB_DATA_TYPE_BIGINT, tombBlk->cmprAlg, &bufArr[0], 0, &tombBlk->size[i], &bufArr[1]);
if (code) return code;
code = tsdbWriteFile(fd, *fileSize, bufArr[0], tombBlk->size[i]);
if (code) return code;
tombBlk->dp->size += tombBlk->size[i];
*fileSize += tombBlk->size[i];
}
code = TARRAY2_APPEND_PTR(tombBlkArray, tombBlk);
if (code) return code;
tTombBlockClear(tombBlock);
return 0;
}
static int32_t tsdbDataFileWriteHeadFooter(SDataFileWriter *writer) {
int32_t code = 0;
int32_t lino = 0;
@ -1164,7 +1244,8 @@ static int32_t tsdbDataFileDoWriteTombBlock(SDataFileWriter *writer) {
int32_t lino = 0;
code = tsdbFileWriteTombBlock(writer->fd[TSDB_FTYPE_TOMB], writer->tombBlock, writer->config->cmprAlg,
&writer->files[TSDB_FTYPE_TOMB].size, writer->tombBlkArray, writer->config->bufArr);
&writer->files[TSDB_FTYPE_TOMB].size, writer->tombBlkArray, writer->config->bufArr,
&writer->ctx->tombRange);
TSDB_CHECK_CODE(code, lino, _exit);
_exit:
@ -1174,6 +1255,21 @@ _exit:
return code;
}
int32_t tsdbFileWriteTombBlk(STsdbFD *fd, const TTombBlkArray *tombBlkArray, SFDataPtr *ptr, int64_t *fileSize) {
ptr->size = TARRAY2_DATA_LEN(tombBlkArray);
if (ptr->size > 0) {
ptr->offset = *fileSize;
int32_t code = tsdbWriteFile(fd, *fileSize, (const uint8_t *)TARRAY2_DATA(tombBlkArray), ptr->size);
if (code) {
return code;
}
*fileSize += ptr->size;
}
return 0;
}
static int32_t tsdbDataFileDoWriteTombBlk(SDataFileWriter *writer) {
ASSERT(TARRAY2_SIZE(writer->tombBlkArray) > 0);
@ -1306,6 +1402,12 @@ _exit:
return code;
}
int32_t tsdbTFileUpdVerRange(STFile *f, SVersionRange range) {
f->minVer = TMIN(f->minVer, range.minVer);
f->maxVer = TMAX(f->maxVer, range.maxVer);
return 0;
}
static int32_t tsdbDataFileWriterCloseCommit(SDataFileWriter *writer, TFileOpArray *opArr) {
int32_t code = 0;
int32_t lino = 0;
@ -1334,6 +1436,8 @@ static int32_t tsdbDataFileWriterCloseCommit(SDataFileWriter *writer, TFileOpArr
code = tsdbDataFileWriteHeadFooter(writer);
TSDB_CHECK_CODE(code, lino, _exit);
SVersionRange ofRange = {.minVer = VERSION_MAX, .maxVer = VERSION_MIN};
// .head
ftype = TSDB_FTYPE_HEAD;
if (writer->config->files[ftype].exist) {
@ -1342,6 +1446,7 @@ static int32_t tsdbDataFileWriterCloseCommit(SDataFileWriter *writer, TFileOpArr
.fid = writer->config->fid,
.of = writer->config->files[ftype].file,
};
ofRange = (SVersionRange){.minVer = op.of.minVer, .maxVer = op.of.maxVer};
code = TARRAY2_APPEND(opArr, op);
TSDB_CHECK_CODE(code, lino, _exit);
}
@ -1350,6 +1455,8 @@ static int32_t tsdbDataFileWriterCloseCommit(SDataFileWriter *writer, TFileOpArr
.fid = writer->config->fid,
.nf = writer->files[ftype],
};
tsdbTFileUpdVerRange(&op.nf, ofRange);
tsdbTFileUpdVerRange(&op.nf, writer->ctx->range);
code = TARRAY2_APPEND(opArr, op);
TSDB_CHECK_CODE(code, lino, _exit);
@ -1361,6 +1468,7 @@ static int32_t tsdbDataFileWriterCloseCommit(SDataFileWriter *writer, TFileOpArr
.fid = writer->config->fid,
.nf = writer->files[ftype],
};
tsdbTFileUpdVerRange(&op.nf, writer->ctx->range);
code = TARRAY2_APPEND(opArr, op);
TSDB_CHECK_CODE(code, lino, _exit);
} else if (writer->config->files[ftype].file.size != writer->files[ftype].size) {
@ -1370,6 +1478,7 @@ static int32_t tsdbDataFileWriterCloseCommit(SDataFileWriter *writer, TFileOpArr
.of = writer->config->files[ftype].file,
.nf = writer->files[ftype],
};
tsdbTFileUpdVerRange(&op.nf, writer->ctx->range);
code = TARRAY2_APPEND(opArr, op);
TSDB_CHECK_CODE(code, lino, _exit);
}
@ -1382,6 +1491,7 @@ static int32_t tsdbDataFileWriterCloseCommit(SDataFileWriter *writer, TFileOpArr
.fid = writer->config->fid,
.nf = writer->files[ftype],
};
tsdbTFileUpdVerRange(&op.nf, writer->ctx->range);
code = TARRAY2_APPEND(opArr, op);
TSDB_CHECK_CODE(code, lino, _exit);
} else if (writer->config->files[ftype].file.size != writer->files[ftype].size) {
@ -1391,6 +1501,7 @@ static int32_t tsdbDataFileWriterCloseCommit(SDataFileWriter *writer, TFileOpArr
.of = writer->config->files[ftype].file,
.nf = writer->files[ftype],
};
tsdbTFileUpdVerRange(&op.nf, writer->ctx->range);
code = TARRAY2_APPEND(opArr, op);
TSDB_CHECK_CODE(code, lino, _exit);
}
@ -1415,6 +1526,8 @@ static int32_t tsdbDataFileWriterCloseCommit(SDataFileWriter *writer, TFileOpArr
code = tsdbDataFileWriteTombFooter(writer);
TSDB_CHECK_CODE(code, lino, _exit);
SVersionRange ofRange = (SVersionRange){.minVer = VERSION_MAX, .maxVer = VERSION_MIN};
ftype = TSDB_FTYPE_TOMB;
if (writer->config->files[ftype].exist) {
op = (STFileOp){
@ -1422,6 +1535,7 @@ static int32_t tsdbDataFileWriterCloseCommit(SDataFileWriter *writer, TFileOpArr
.fid = writer->config->fid,
.of = writer->config->files[ftype].file,
};
ofRange = (SVersionRange){.minVer = op.of.minVer, .maxVer = op.of.maxVer};
code = TARRAY2_APPEND(opArr, op);
TSDB_CHECK_CODE(code, lino, _exit);
}
@ -1430,6 +1544,8 @@ static int32_t tsdbDataFileWriterCloseCommit(SDataFileWriter *writer, TFileOpArr
.fid = writer->config->fid,
.nf = writer->files[ftype],
};
tsdbTFileUpdVerRange(&op.nf, ofRange);
tsdbTFileUpdVerRange(&op.nf, writer->ctx->tombRange);
code = TARRAY2_APPEND(opArr, op);
TSDB_CHECK_CODE(code, lino, _exit);
}
@ -1598,6 +1714,7 @@ int32_t tsdbDataFileWriteBlockData(SDataFileWriter *writer, SBlockData *bData) {
) {
code = tsdbDataFileDoWriteBlockData(writer, bData);
TSDB_CHECK_CODE(code, lino, _exit);
} else {
for (int32_t i = 0; i < bData->nRow; ++i) {
TSDBROW row[1] = {tsdbRowFromBlockData(bData, i)};

View File

@ -95,10 +95,25 @@ int32_t tsdbDataFileWriteRow(SDataFileWriter *writer, SRowInfo *row);
int32_t tsdbDataFileWriteBlockData(SDataFileWriter *writer, SBlockData *bData);
int32_t tsdbDataFileFlush(SDataFileWriter *writer);
// head
int32_t tsdbFileWriteBrinBlock(STsdbFD *fd, SBrinBlock *brinBlock, int8_t cmprAlg, int64_t *fileSize,
TBrinBlkArray *brinBlkArray, uint8_t **bufArr, SVersionRange *range);
int32_t tsdbFileWriteBrinBlk(STsdbFD *fd, TBrinBlkArray *brinBlkArray, SFDataPtr *ptr, int64_t *fileSize);
int32_t tsdbFileWriteHeadFooter(STsdbFD *fd, int64_t *fileSize, const SHeadFooter *footer);
// tomb
int32_t tsdbDataFileWriteTombRecord(SDataFileWriter *writer, const STombRecord *record);
int32_t tsdbFileWriteTombBlock(STsdbFD *fd, STombBlock *tombBlock, int8_t cmprAlg, int64_t *fileSize,
TTombBlkArray *tombBlkArray, uint8_t **bufArr, SVersionRange *range);
int32_t tsdbFileWriteTombBlk(STsdbFD *fd, const TTombBlkArray *tombBlkArray, SFDataPtr *ptr, int64_t *fileSize);
int32_t tsdbFileWriteTombFooter(STsdbFD *fd, const STombFooter *footer, int64_t *fileSize);
// utils
int32_t tsdbWriterUpdVerRange(SVersionRange *range, int64_t minVer, int64_t maxVer);
int32_t tsdbTFileUpdVerRange(STFile *f, SVersionRange range);
#ifdef __cplusplus
}
#endif
#endif /*_TSDB_DATA_FILE_RW_H*/
#endif /*_TSDB_DATA_FILE_RW_H*/

View File

@ -38,13 +38,6 @@ typedef struct {
STFileHashEntry **buckets;
} STFileHash;
enum {
TSDB_FS_STATE_NONE = 0,
TSDB_FS_STATE_OPEN,
TSDB_FS_STATE_EDIT,
TSDB_FS_STATE_CLOSE,
};
static const char *gCurrentFname[] = {
[TSDB_FCURRENT] = "current.json",
[TSDB_FCURRENT_C] = "current.c.json",
@ -57,7 +50,7 @@ static int32_t create_fs(STsdb *pTsdb, STFileSystem **fs) {
fs[0]->tsdb = pTsdb;
tsem_init(&fs[0]->canEdit, 0, 1);
fs[0]->state = TSDB_FS_STATE_NONE;
fs[0]->fsstate = TSDB_FS_STATE_NORMAL;
fs[0]->neid = 0;
TARRAY2_INIT(fs[0]->fSetArr);
TARRAY2_INIT(fs[0]->fSetArrTmp);
@ -258,14 +251,6 @@ _exit:
return code;
}
static bool is_same_file(const STFile *f1, const STFile f2) {
if (f1->type != f2.type) return false;
if (f1->did.level != f2.did.level) return false;
if (f1->did.id != f2.did.id) return false;
if (f1->cid != f2.cid) return false;
return true;
}
static int32_t apply_commit(STFileSystem *fs) {
int32_t code = 0;
TFileSetArray *fsetArray1 = fs->fSetArr;
@ -504,6 +489,7 @@ static void tsdbFSDestroyFileObjHash(STFileHash *hash) {
static int32_t tsdbFSDoSanAndFix(STFileSystem *fs) {
int32_t code = 0;
int32_t lino = 0;
int32_t corrupt = false;
{ // scan each file
STFileSet *fset = NULL;
@ -511,8 +497,12 @@ static int32_t tsdbFSDoSanAndFix(STFileSystem *fs) {
// data file
for (int32_t ftype = 0; ftype < TSDB_FTYPE_MAX; ftype++) {
if (fset->farr[ftype] == NULL) continue;
code = tsdbFSDoScanAndFixFile(fs, fset->farr[ftype]);
TSDB_CHECK_CODE(code, lino, _exit);
STFileObj *fobj = fset->farr[ftype];
code = tsdbFSDoScanAndFixFile(fs, fobj);
if (code) {
fset->maxVerValid = (fobj->f->minVer <= fobj->f->maxVer) ? TMIN(fset->maxVerValid, fobj->f->minVer - 1) : -1;
corrupt = true;
}
}
// stt file
@ -521,12 +511,22 @@ static int32_t tsdbFSDoSanAndFix(STFileSystem *fs) {
STFileObj *fobj;
TARRAY2_FOREACH(lvl->fobjArr, fobj) {
code = tsdbFSDoScanAndFixFile(fs, fobj);
TSDB_CHECK_CODE(code, lino, _exit);
if (code) {
fset->maxVerValid = (fobj->f->minVer <= fobj->f->maxVer) ? TMIN(fset->maxVerValid, fobj->f->minVer - 1) : -1;
corrupt = true;
}
}
}
}
}
if (corrupt) {
tsdbError("vgId:%d, not to clear dangling files due to fset incompleteness", TD_VID(fs->tsdb->pVnode));
fs->fsstate = TSDB_FS_STATE_INCOMPLETE;
code = 0;
goto _exit;
}
{ // clear unreferenced files
STfsDir *dir = tfsOpendir(fs->tsdb->pVnode->pTfs, fs->tsdb->path);
if (dir == NULL) {
@ -961,6 +961,13 @@ int32_t tsdbFSDestroyCopySnapshot(TFileSetArray **fsetArr) {
}
int32_t tsdbFSCreateRefSnapshot(STFileSystem *fs, TFileSetArray **fsetArr) {
taosThreadRwlockRdlock(&fs->tsdb->rwLock);
int32_t code = tsdbFSCreateRefSnapshotWithoutLock(fs, fsetArr);
taosThreadRwlockUnlock(&fs->tsdb->rwLock);
return code;
}
int32_t tsdbFSCreateRefSnapshotWithoutLock(STFileSystem *fs, TFileSetArray **fsetArr) {
int32_t code = 0;
STFileSet *fset, *fset1;
@ -991,6 +998,142 @@ int32_t tsdbFSDestroyRefSnapshot(TFileSetArray **fsetArr) {
return 0;
}
int32_t tsdbFSCreateCopyRangedSnapshot(STFileSystem *fs, TSnapRangeArray *pRanges, TFileSetArray **fsetArr,
TFileOpArray *fopArr) {
int32_t code = 0;
STFileSet *fset;
STFileSet *fset1;
SHashObj *pHash = NULL;
fsetArr[0] = taosMemoryMalloc(sizeof(TFileSetArray));
if (fsetArr == NULL) return TSDB_CODE_OUT_OF_MEMORY;
TARRAY2_INIT(fsetArr[0]);
if (pRanges) {
pHash = tsdbGetSnapRangeHash(pRanges);
if (pHash == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _out;
}
}
taosThreadRwlockRdlock(&fs->tsdb->rwLock);
TARRAY2_FOREACH(fs->fSetArr, fset) {
int64_t ever = VERSION_MAX;
if (pHash) {
int32_t fid = fset->fid;
STSnapRange *u = taosHashGet(pHash, &fid, sizeof(fid));
if (u) {
ever = u->sver - 1;
}
}
code = tsdbTFileSetFilteredInitDup(fs->tsdb, fset, ever, &fset1, fopArr);
if (code) break;
code = TARRAY2_APPEND(fsetArr[0], fset1);
if (code) break;
}
taosThreadRwlockUnlock(&fs->tsdb->rwLock);
_out:
if (code) {
TARRAY2_DESTROY(fsetArr[0], tsdbTFileSetClear);
taosMemoryFree(fsetArr[0]);
fsetArr[0] = NULL;
}
if (pHash) {
taosHashCleanup(pHash);
pHash = NULL;
}
return code;
}
SHashObj *tsdbGetSnapRangeHash(TSnapRangeArray *pRanges) {
int32_t capacity = TARRAY2_SIZE(pRanges) * 2;
SHashObj *pHash = taosHashInit(capacity, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_ENTRY_LOCK);
if (pHash == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
for (int32_t i = 0; i < TARRAY2_SIZE(pRanges); i++) {
STSnapRange *u = TARRAY2_GET(pRanges, i);
int32_t fid = u->fid;
int32_t code = taosHashPut(pHash, &fid, sizeof(fid), u, sizeof(*u));
ASSERT(code == 0);
tsdbDebug("range diff hash fid:%d, sver:%" PRId64 ", ever:%" PRId64, u->fid, u->sver, u->ever);
}
return pHash;
}
int32_t tsdbFSCreateRefRangedSnapshot(STFileSystem *fs, int64_t sver, int64_t ever, TSnapRangeArray *pRanges,
TSnapRangeArray **fsrArr) {
int32_t code = 0;
STFileSet *fset;
STSnapRange *fsr1 = NULL;
SHashObj *pHash = NULL;
fsrArr[0] = taosMemoryCalloc(1, sizeof(*fsrArr[0]));
if (fsrArr[0] == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _out;
}
tsdbInfo("pRanges size:%d", (pRanges == NULL ? 0 : TARRAY2_SIZE(pRanges)));
if (pRanges) {
pHash = tsdbGetSnapRangeHash(pRanges);
if (pHash == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _out;
}
}
taosThreadRwlockRdlock(&fs->tsdb->rwLock);
TARRAY2_FOREACH(fs->fSetArr, fset) {
int64_t sver1 = sver;
int64_t ever1 = ever;
if (pHash) {
int32_t fid = fset->fid;
STSnapRange *u = taosHashGet(pHash, &fid, sizeof(fid));
if (u) {
sver1 = u->sver;
tsdbDebug("range hash get fid:%d, sver:%" PRId64 ", ever:%" PRId64, u->fid, u->sver, u->ever);
}
}
if (sver1 > ever1) {
tsdbDebug("skip fid:%d, sver:%" PRId64 ", ever:%" PRId64, fset->fid, sver1, ever1);
continue;
}
tsdbDebug("fsrArr:%p, fid:%d, sver:%" PRId64 ", ever:%" PRId64, fsrArr, fset->fid, sver1, ever1);
code = tsdbTSnapRangeInitRef(fs->tsdb, fset, sver1, ever1, &fsr1);
if (code) break;
code = TARRAY2_APPEND(fsrArr[0], fsr1);
if (code) break;
fsr1 = NULL;
}
taosThreadRwlockUnlock(&fs->tsdb->rwLock);
if (code) {
tsdbTSnapRangeClear(&fsr1);
TARRAY2_DESTROY(fsrArr[0], tsdbTSnapRangeClear);
fsrArr[0] = NULL;
}
_out:
if (pHash) {
taosHashCleanup(pHash);
pHash = NULL;
}
return code;
}
const char *gFSBgTaskName[] = {NULL, "MERGE", "RETENTION", "COMPACT"};
static int32_t tsdbFSRunBgTask(void *arg) {
@ -1148,4 +1291,4 @@ int32_t tsdbFSEnableBgTask(STFileSystem *fs) {
fs->stop = false;
taosThreadMutexUnlock(fs->mutex);
return 0;
}
}

View File

@ -52,7 +52,15 @@ int32_t tsdbCloseFS(STFileSystem **fs);
int32_t tsdbFSCreateCopySnapshot(STFileSystem *fs, TFileSetArray **fsetArr);
int32_t tsdbFSDestroyCopySnapshot(TFileSetArray **fsetArr);
int32_t tsdbFSCreateRefSnapshot(STFileSystem *fs, TFileSetArray **fsetArr);
int32_t tsdbFSCreateRefSnapshotWithoutLock(STFileSystem *fs, TFileSetArray **fsetArr);
int32_t tsdbFSDestroyRefSnapshot(TFileSetArray **fsetArr);
int32_t tsdbFSCreateCopyRangedSnapshot(STFileSystem *fs, TSnapRangeArray *pExclude, TFileSetArray **fsetArr,
TFileOpArray *fopArr);
int32_t tsdbFSDestroyCopyRangedSnapshot(TFileSetArray **fsetArr, TFileOpArray *fopArr);
int32_t tsdbFSCreateRefRangedSnapshot(STFileSystem *fs, int64_t sver, int64_t ever, TSnapRangeArray *pRanges,
TSnapRangeArray **fsrArr);
int32_t tsdbFSDestroyRefRangedSnapshot(TSnapRangeArray **fsrArr);
// txn
int64_t tsdbFSAllocEid(STFileSystem *fs);
int32_t tsdbFSEditBegin(STFileSystem *fs, const TFileOpArray *opArray, EFEditT etype);
@ -68,6 +76,9 @@ int32_t tsdbFSEnableBgTask(STFileSystem *fs);
// other
int32_t tsdbFSGetFSet(STFileSystem *fs, int32_t fid, STFileSet **fset);
int32_t tsdbFSCheckCommit(STFileSystem *fs);
// utils
int32_t save_fs(const TFileSetArray *arr, const char *fname);
int32_t current_fname(STsdb *pTsdb, char *fname, EFCurrentT ftype);
struct STFSBgTask {
EFSBgTaskT type;
@ -91,7 +102,7 @@ struct STFSBgTask {
struct STFileSystem {
STsdb *tsdb;
tsem_t canEdit;
int32_t state;
int32_t fsstate;
int64_t neid;
EFEditT etype;
TFileSetArray fSetArr[1];

View File

@ -65,6 +65,34 @@ static int32_t tsdbSttLvlInitRef(STsdb *pTsdb, const SSttLvl *lvl1, SSttLvl **lv
return 0;
}
static int32_t tsdbSttLvlFilteredInitEx(STsdb *pTsdb, const SSttLvl *lvl1, int64_t ever, SSttLvl **lvl,
TFileOpArray *fopArr) {
int32_t code = tsdbSttLvlInit(lvl1->level, lvl);
if (code) return code;
const STFileObj *fobj1;
TARRAY2_FOREACH(lvl1->fobjArr, fobj1) {
if (fobj1->f->maxVer <= ever) {
STFileObj *fobj;
code = tsdbTFileObjInit(pTsdb, fobj1->f, &fobj);
if (code) {
tsdbSttLvlClear(lvl);
return code;
}
TARRAY2_APPEND(lvl[0]->fobjArr, fobj);
} else {
STFileOp op = {
.optype = TSDB_FOP_REMOVE,
.fid = fobj1->f->fid,
.of = fobj1->f[0],
};
TARRAY2_APPEND(fopArr, op);
}
}
return 0;
}
static void tsdbSttLvlRemoveFObj(void *data) { tsdbTFileObjRemove(*(STFileObj **)data); }
static void tsdbSttLvlRemove(SSttLvl **lvl) {
TARRAY2_DESTROY(lvl[0]->fobjArr, tsdbSttLvlRemoveFObj);
@ -424,6 +452,7 @@ int32_t tsdbTFileSetInit(int32_t fid, STFileSet **fset) {
if (fset[0] == NULL) return TSDB_CODE_OUT_OF_MEMORY;
fset[0]->fid = fid;
fset[0]->maxVerValid = VERSION_MAX;
TARRAY2_INIT(fset[0]->lvlArr);
return 0;
}
@ -458,6 +487,61 @@ int32_t tsdbTFileSetInitDup(STsdb *pTsdb, const STFileSet *fset1, STFileSet **fs
return 0;
}
int32_t tsdbTFileSetFilteredInitDup(STsdb *pTsdb, const STFileSet *fset1, int64_t ever, STFileSet **fset,
TFileOpArray *fopArr) {
int32_t code = tsdbTFileSetInit(fset1->fid, fset);
if (code) return code;
for (int32_t ftype = TSDB_FTYPE_MIN; ftype < TSDB_FTYPE_MAX; ++ftype) {
if (fset1->farr[ftype] == NULL) continue;
STFileObj *fobj = fset1->farr[ftype];
if (fobj->f->maxVer <= ever) {
code = tsdbTFileObjInit(pTsdb, fobj->f, &fset[0]->farr[ftype]);
if (code) {
tsdbTFileSetClear(fset);
return code;
}
} else {
STFileOp op = {
.optype = TSDB_FOP_REMOVE,
.fid = fobj->f->fid,
.of = fobj->f[0],
};
TARRAY2_APPEND(fopArr, op);
}
}
const SSttLvl *lvl1;
TARRAY2_FOREACH(fset1->lvlArr, lvl1) {
SSttLvl *lvl;
code = tsdbSttLvlFilteredInitEx(pTsdb, lvl1, ever, &lvl, fopArr);
if (code) {
tsdbTFileSetClear(fset);
return code;
}
code = TARRAY2_APPEND(fset[0]->lvlArr, lvl);
if (code) return code;
}
return 0;
}
int32_t tsdbTSnapRangeInitRef(STsdb *pTsdb, const STFileSet *fset1, int64_t sver, int64_t ever, STSnapRange **fsr) {
fsr[0] = taosMemoryCalloc(1, sizeof(*fsr[0]));
if (fsr[0] == NULL) return TSDB_CODE_OUT_OF_MEMORY;
fsr[0]->fid = fset1->fid;
fsr[0]->sver = sver;
fsr[0]->ever = ever;
int32_t code = tsdbTFileSetInitRef(pTsdb, fset1, &fsr[0]->fset);
if (code) {
taosMemoryFree(fsr[0]);
fsr[0] = NULL;
}
return code;
}
int32_t tsdbTFileSetInitRef(STsdb *pTsdb, const STFileSet *fset1, STFileSet **fset) {
int32_t code = tsdbTFileSetInit(fset1->fid, fset);
if (code) return code;
@ -485,6 +569,15 @@ int32_t tsdbTFileSetInitRef(STsdb *pTsdb, const STFileSet *fset1, STFileSet **fs
return 0;
}
int32_t tsdbTSnapRangeClear(STSnapRange **fsr) {
if (!fsr[0]) return 0;
tsdbTFileSetClear(&fsr[0]->fset);
taosMemoryFree(fsr[0]);
fsr[0] = NULL;
return 0;
}
int32_t tsdbTFileSetClear(STFileSet **fset) {
if (!fset[0]) return 0;
@ -545,4 +638,4 @@ bool tsdbTFileSetIsEmpty(const STFileSet *fset) {
if (fset->farr[ftype] != NULL) return false;
}
return TARRAY2_SIZE(fset->lvlArr) == 0;
}
}

View File

@ -45,6 +45,13 @@ int32_t tsdbTFileSetInitDup(STsdb *pTsdb, const STFileSet *fset1, STFileSet **fs
int32_t tsdbTFileSetInitRef(STsdb *pTsdb, const STFileSet *fset1, STFileSet **fset);
int32_t tsdbTFileSetClear(STFileSet **fset);
int32_t tsdbTFileSetRemove(STFileSet **fset);
int32_t tsdbTFileSetFilteredInitDup(STsdb *pTsdb, const STFileSet *fset1, int64_t ever, STFileSet **fset,
TFileOpArray *fopArr);
int32_t tsdbTSnapRangeInitRef(STsdb *pTsdb, const STFileSet *fset1, int64_t sver, int64_t ever, STSnapRange **fsr);
int32_t tsdbTSnapRangeClear(STSnapRange **fsr);
// to/from json
int32_t tsdbTFileSetToJson(const STFileSet *fset, cJSON *json);
int32_t tsdbJsonToTFileSet(STsdb *pTsdb, const cJSON *json, STFileSet **fset);
@ -59,6 +66,9 @@ int64_t tsdbTFileSetMaxCid(const STFileSet *fset);
SSttLvl *tsdbTFileSetGetSttLvl(STFileSet *fset, int32_t level);
// is empty
bool tsdbTFileSetIsEmpty(const STFileSet *fset);
// stt
int32_t tsdbSttLvlInit(int32_t level, SSttLvl **lvl);
int32_t tsdbSttLvlClear(SSttLvl **lvl);
struct STFileOp {
tsdb_fop_t optype;
@ -74,12 +84,20 @@ struct SSttLvl {
struct STFileSet {
int32_t fid;
int64_t maxVerValid;
STFileObj *farr[TSDB_FTYPE_MAX]; // file array
TSttLvlArray lvlArr[1]; // level array
};
struct STSnapRange {
int32_t fid;
int64_t sver;
int64_t ever;
STFileSet *fset;
};
#ifdef __cplusplus
}
#endif
#endif /*_TSDB_FILE_SET2_H*/
#endif /*_TSDB_FILE_SET2_H*/

View File

@ -292,4 +292,4 @@ _exit:
TSDB_ERROR_LOG(TD_VID(writer->config->tsdb->pVnode), lino, code);
}
return code;
}
}

View File

@ -52,4 +52,4 @@ int32_t tsdbFSetWriteTombRecord(SFSetWriter *writer, const STombRecord *tombReco
}
#endif
#endif /*_TSDB_FSET_RW_H*/
#endif /*_TSDB_FSET_RW_H*/

View File

@ -76,6 +76,17 @@ static int32_t tfile_to_json(const STFile *file, cJSON *json) {
return TSDB_CODE_OUT_OF_MEMORY;
}
if (file->minVer <= file->maxVer) {
/* minVer */
if (cJSON_AddNumberToObject(json, "minVer", file->minVer) == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
/* maxVer */
if (cJSON_AddNumberToObject(json, "maxVer", file->maxVer) == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
}
return 0;
}
@ -122,6 +133,19 @@ static int32_t tfile_from_json(const cJSON *json, STFile *file) {
return TSDB_CODE_FILE_CORRUPTED;
}
/* minVer */
file->minVer = VERSION_MAX;
item = cJSON_GetObjectItem(json, "minVer");
if (cJSON_IsNumber(item)) {
file->minVer = item->valuedouble;
}
/* maxVer */
file->maxVer = VERSION_MIN;
item = cJSON_GetObjectItem(json, "maxVer");
if (cJSON_IsNumber(item)) {
file->maxVer = item->valuedouble;
}
return 0;
}
@ -296,4 +320,4 @@ int32_t tsdbTFileObjCmpr(const STFileObj **fobj1, const STFileObj **fobj2) {
} else {
return 0;
}
}
}

View File

@ -61,6 +61,8 @@ struct STFile {
int32_t fid; // file id
int64_t cid; // commit id
int64_t size;
int64_t minVer;
int64_t maxVer;
union {
struct {
int32_t level;
@ -80,4 +82,4 @@ struct STFileObj {
}
#endif
#endif /*_TSDB_FILE_H*/
#endif /*_TSDB_FILE_H*/

View File

@ -313,6 +313,7 @@ static int32_t tsdbMergeFileSetBeginOpenWriter(SMerger *merger) {
if (merger->ctx->fset->farr[ftype]) {
config.files[ftype].exist = true;
config.files[ftype].file = merger->ctx->fset->farr[ftype]->f[0];
} else {
config.files[ftype].exist = false;
}

View File

@ -35,7 +35,7 @@ int32_t tsdbSetKeepCfg(STsdb *pTsdb, STsdbCfg *pCfg) {
* @param dir
* @return int
*/
int tsdbOpen(SVnode *pVnode, STsdb **ppTsdb, const char *dir, STsdbKeepCfg *pKeepCfg, int8_t rollback) {
int tsdbOpen(SVnode *pVnode, STsdb **ppTsdb, const char *dir, STsdbKeepCfg *pKeepCfg, int8_t rollback, bool force) {
STsdb *pTsdb = NULL;
int slen = 0;
@ -72,6 +72,11 @@ int tsdbOpen(SVnode *pVnode, STsdb **ppTsdb, const char *dir, STsdbKeepCfg *pKee
goto _err;
}
if (pTsdb->pFS->fsstate == TSDB_FS_STATE_INCOMPLETE && force == false) {
terrno = TSDB_CODE_NEED_RETRY;
goto _err;
}
if (tsdbOpenCache(pTsdb) < 0) {
goto _err;
}

View File

@ -2670,6 +2670,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
STableBlockScanInfo* pScanInfo = NULL;
SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(pBlockIter);
SLastBlockReader* pLastBlockReader = pReader->status.fileIter.pLastBlockReader;
bool asc = ASCENDING_TRAVERSE(pReader->info.order);
if (pReader->pIgnoreTables && taosHashGet(*pReader->pIgnoreTables, &pBlockInfo->uid, sizeof(pBlockInfo->uid))) {
setBlockAllDumped(&pStatus->fBlockDumpInfo, pBlockInfo->record.lastKey, pReader->info.order);
@ -2705,8 +2706,8 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
} else {
bool bHasDataInLastBlock = hasDataInLastBlock(pLastBlockReader);
int64_t tsLast = bHasDataInLastBlock ? getCurrentKeyInLastBlock(pLastBlockReader) : INT64_MIN;
if (!bHasDataInLastBlock || ((ASCENDING_TRAVERSE(pReader->info.order) && pBlockInfo->record.lastKey < tsLast) ||
(!ASCENDING_TRAVERSE(pReader->info.order) && pBlockInfo->record.firstKey > tsLast))) {
if (!bHasDataInLastBlock ||
((asc && pBlockInfo->record.lastKey < tsLast) || (!asc && pBlockInfo->record.firstKey > tsLast))) {
// whole block is required, return it directly
SDataBlockInfo* pInfo = &pReader->resBlockInfo.pResBlock->info;
pInfo->rows = pBlockInfo->record.numRow;
@ -2728,26 +2729,28 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
tBlockDataReset(pBData);
SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock;
tsdbDebug("load data in last block firstly %s", pReader->idStr);
tsdbDebug("load data in last block firstly %s", pReader->idStr);
int64_t st = taosGetTimestampUs();
while (1) {
bool hasBlockLData = hasDataInLastBlock(pLastBlockReader);
// no data in last block and block, no need to proceed.
if (hasBlockLData == false) {
break;
}
// no data in last block, no need to proceed.
while (hasDataInLastBlock(pLastBlockReader)) {
code = buildComposedDataBlockImpl(pReader, pScanInfo, &pReader->status.fileBlockData, pLastBlockReader);
if (code) {
if (code != TSDB_CODE_SUCCESS) {
return code;
}
if (pResBlock->info.rows >= pReader->resBlockInfo.capacity) {
break;
}
// data in stt now overlaps with current active file data block, need to composed with file data block.
int64_t keyInStt = getCurrentKeyInLastBlock(pLastBlockReader);
if ((keyInStt >= pBlockInfo->record.firstKey && asc) || (keyInStt <= pBlockInfo->record.lastKey && (!asc))) {
tsdbDebug("%p keyInStt:%" PRId64 ", overlap with file block, brange:%" PRId64 "-%" PRId64 " %s", pReader,
keyInStt, pBlockInfo->record.firstKey, pBlockInfo->record.lastKey, pReader->idStr);
break;
}
}
double el = (taosGetTimestampUs() - st) / 1000.0;
@ -2760,7 +2763,6 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
pResBlock->info.rows, el, pReader->idStr);
}
}
}
return (pReader->code != TSDB_CODE_SUCCESS) ? pReader->code : code;
@ -4947,7 +4949,7 @@ int32_t tsdbTakeReadSnap2(STsdbReader* pReader, _query_reseek_func_t reseek, STs
}
// fs
code = tsdbFSCreateRefSnapshot(pTsdb->pFS, &pSnap->pfSetArray);
code = tsdbFSCreateRefSnapshotWithoutLock(pTsdb->pFS, &pSnap->pfSetArray);
// unlock
taosThreadRwlockUnlock(&pTsdb->rwLock);

Some files were not shown because too many files have changed in this diff Show More