Merge branch '3.0' into enh/changeQueue3.0

This commit is contained in:
Yihao Deng 2023-11-22 17:11:04 +08:00 committed by GitHub
commit ef4f173881
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
408 changed files with 32019 additions and 13420 deletions

View File

@ -151,14 +151,6 @@ def pre_test(){
cd ${WKC} cd ${WKC}
git submodule update --init --recursive git submodule update --init --recursive
''' '''
sh '''
cd ${WKPY}
git reset --hard
git pull
git log -5
echo "python connector log: `git log -5`" >>${WKDIR}/jenkins.log
echo >>${WKDIR}/jenkins.log
'''
return 1 return 1
} }
def pre_test_build_mac() { def pre_test_build_mac() {
@ -316,7 +308,7 @@ def pre_test_build_win() {
python -m pip uninstall taospy -y python -m pip uninstall taospy -y
python -m pip install taospy==2.7.12 python -m pip install taospy==2.7.12
python -m pip uninstall taos-ws-py -y python -m pip uninstall taos-ws-py -y
python -m pip install taos-ws-py==0.2.9 python -m pip install taos-ws-py==0.3.1
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32 xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
''' '''
return 1 return 1
@ -424,7 +416,7 @@ pipeline {
echo "${WKDIR}/restore.sh -p ${BRANCH_NAME} -n ${BUILD_ID} -c {container name}" echo "${WKDIR}/restore.sh -p ${BRANCH_NAME} -n ${BUILD_ID} -c {container name}"
} }
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
timeout(time: 130, unit: 'MINUTES'){ timeout(time: 150, unit: 'MINUTES'){
pre_test() pre_test()
script { script {
sh ''' sh '''

View File

@ -97,7 +97,15 @@ ENDIF()
SET(JEMALLOC_ENABLED OFF) SET(JEMALLOC_ENABLED OFF)
IF (TD_WINDOWS) IF (TD_WINDOWS)
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}") MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd") IF (${CMAKE_BUILD_TYPE} MATCHES "Release")
MESSAGE("${Green} will build Release version! ${ColourReset}")
SET(COMMON_FLAGS "/W3 /D_WIN32 /DWIN32 /Zi- /O2 /GL /MD")
ELSE ()
MESSAGE("${Green} will build Debug version! ${ColourReset}")
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")
ENDIF()
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO") SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO")
# IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900)) # IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
# SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18") # SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
@ -149,6 +157,8 @@ ELSE ()
CHECK_C_COMPILER_FLAG("-mfma" COMPILER_SUPPORT_FMA) CHECK_C_COMPILER_FLAG("-mfma" COMPILER_SUPPORT_FMA)
CHECK_C_COMPILER_FLAG("-mavx" COMPILER_SUPPORT_AVX) CHECK_C_COMPILER_FLAG("-mavx" COMPILER_SUPPORT_AVX)
CHECK_C_COMPILER_FLAG("-mavx2" COMPILER_SUPPORT_AVX2) CHECK_C_COMPILER_FLAG("-mavx2" COMPILER_SUPPORT_AVX2)
CHECK_C_COMPILER_FLAG("-mavx512f" COMPILER_SUPPORT_AVX512F)
CHECK_C_COMPILER_FLAG("-mavx512vbmi" COMPILER_SUPPORT_AVX512BMI)
IF (COMPILER_SUPPORT_SSE42) IF (COMPILER_SUPPORT_SSE42)
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse4.2") SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse4.2")
@ -169,6 +179,12 @@ ELSE ()
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx2") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx2")
ENDIF() ENDIF()
MESSAGE(STATUS "SIMD instructions (FMA/AVX/AVX2) is ACTIVATED") MESSAGE(STATUS "SIMD instructions (FMA/AVX/AVX2) is ACTIVATED")
IF (COMPILER_SUPPORT_AVX512F AND COMPILER_SUPPORT_AVX512BMI)
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512f -mavx512vbmi")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512f -mavx512vbmi")
MESSAGE(STATUS "avx512 supported by gcc")
ENDIF()
ENDIF() ENDIF()
# build mode # build mode

View File

@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER) IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER}) SET(TD_VER_NUMBER ${VERNUMBER})
ELSE () ELSE ()
SET(TD_VER_NUMBER "3.2.1.0.alpha") SET(TD_VER_NUMBER "3.2.2.0.alpha")
ENDIF () ENDIF ()
IF (DEFINED VERCOMPATIBLE) IF (DEFINED VERCOMPATIBLE)

View File

@ -11,7 +11,7 @@ ExternalProject_Add(curl2
BUILD_IN_SOURCE TRUE BUILD_IN_SOURCE TRUE
BUILD_ALWAYS 1 BUILD_ALWAYS 1
UPDATE_COMMAND "" UPDATE_COMMAND ""
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.2 --with-ssl=$ENV{HOME}/.cos-local.2 --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd --without-libidn2 #--enable-debug CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.2 --with-ssl=$ENV{HOME}/.cos-local.2 --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd --without-libidn2 --without-nghttp2 #--enable-debug
BUILD_COMMAND make -j BUILD_COMMAND make -j
INSTALL_COMMAND make install INSTALL_COMMAND make install
TEST_COMMAND "" TEST_COMMAND ""

View File

@ -8,7 +8,7 @@ ExternalProject_Add(openssl
BUILD_IN_SOURCE TRUE BUILD_IN_SOURCE TRUE
#BUILD_ALWAYS 1 #BUILD_ALWAYS 1
#UPDATE_COMMAND "" #UPDATE_COMMAND ""
CONFIGURE_COMMAND ./Configure --prefix=$ENV{HOME}/.cos-local.2 -static #--no-shared CONFIGURE_COMMAND ./Configure --prefix=$ENV{HOME}/.cos-local.2 no-shared
BUILD_COMMAND make -j BUILD_COMMAND make -j
INSTALL_COMMAND make install_sw -j INSTALL_COMMAND make install_sw -j
TEST_COMMAND "" TEST_COMMAND ""

View File

@ -2,7 +2,7 @@
# taosadapter # taosadapter
ExternalProject_Add(taosadapter ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
GIT_TAG main GIT_TAG 3.0
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter" SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR "" BINARY_DIR ""
#BUILD_IN_SOURCE TRUE #BUILD_IN_SOURCE TRUE

View File

@ -13,6 +13,6 @@ ExternalProject_Add(xml2
BUILD_IN_SOURCE TRUE BUILD_IN_SOURCE TRUE
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.2 --enable-shared=no --enable-static=yes --without-python --without-lzma CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.2 --enable-shared=no --enable-static=yes --without-python --without-lzma
BUILD_COMMAND make -j BUILD_COMMAND make -j
INSTALL_COMMAND make install && ln -s $ENV{HOME}/.cos-local.2/include/libxml2/libxml $ENV{HOME}/.cos-local.2/include/libxml INSTALL_COMMAND make install && ln -sf $ENV{HOME}/.cos-local.2/include/libxml2/libxml $ENV{HOME}/.cos-local.2/include/libxml
TEST_COMMAND "" TEST_COMMAND ""
) )

View File

@ -317,7 +317,8 @@ if (${BUILD_WITH_ROCKSDB})
SET(CMAKE_BUILD_TYPE Release) SET(CMAKE_BUILD_TYPE Release)
endif() endif()
endif(${TD_LINUX}) endif(${TD_LINUX})
MESSAGE(STATUS "CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS}) MESSAGE(STATUS "ROCKSDB CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
MESSAGE(STATUS "ROCKSDB C STATUS CONFIG: " ${CMAKE_C_FLAGS})
if(${TD_DARWIN}) if(${TD_DARWIN})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
@ -329,8 +330,12 @@ if (${BUILD_WITH_ROCKSDB})
if (${TD_WINDOWS}) if (${TD_WINDOWS})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4244 /wd4819")
option(WITH_JNI "" OFF) option(WITH_JNI "" OFF)
option(WITH_MD_LIBRARY "build with MD" OFF) if(CMAKE_C_FLAGS MATCHES "/MT" OR CMAKE_C_FLAGS MATCHES "/MTd")
message("Rocksdb build runtime lib use /MT or /MTd")
option(WITH_MD_LIBRARY "build with MD" OFF)
endif()
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib) set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
endif(${TD_WINDOWS}) endif(${TD_WINDOWS})
@ -361,9 +366,11 @@ if (${BUILD_WITH_ROCKSDB})
) )
else() else()
if (NOT ${TD_LINUX}) if (NOT ${TD_LINUX})
MESSAGE(STATUS "CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS}) MESSAGE(STATUS "ROCKSDB CXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
MESSAGE(STATUS "ROCKSDB C STATUS CONFIG: " ${CMAKE_C_FLAGS})
if(${TD_DARWIN}) if(${TD_DARWIN})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=maybe-uninitialized")
endif(${TD_DARWIN}) endif(${TD_DARWIN})
if (${TD_DARWIN_ARM64}) if (${TD_DARWIN_ARM64})
@ -372,8 +379,12 @@ if (${BUILD_WITH_ROCKSDB})
if (${TD_WINDOWS}) if (${TD_WINDOWS})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4244 /wd4819")
option(WITH_JNI "" OFF) option(WITH_JNI "" OFF)
option(WITH_MD_LIBRARY "build with MD" OFF) if(CMAKE_C_FLAGS MATCHES "/MT" OR CMAKE_C_FLAGS MATCHES "/MTd")
message("Rocksdb build runtime lib use /MT or /MTd")
option(WITH_MD_LIBRARY "build with MD" OFF)
endif()
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib) set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
endif(${TD_WINDOWS}) endif(${TD_WINDOWS})
@ -456,7 +467,9 @@ endif(${BUILD_WITH_NURAFT})
# pthread # pthread
if(${BUILD_PTHREAD}) if(${BUILD_PTHREAD})
set(CMAKE_BUILD_TYPE debug) if ("${CMAKE_BUILD_TYPE}" STREQUAL "")
SET(CMAKE_BUILD_TYPE Release)
endif()
add_definitions(-DPTW32_STATIC_LIB) add_definitions(-DPTW32_STATIC_LIB)
add_subdirectory(pthread EXCLUDE_FROM_ALL) add_subdirectory(pthread EXCLUDE_FROM_ALL)
set_target_properties(libpthreadVC3 PROPERTIES OUTPUT_NAME pthread) set_target_properties(libpthreadVC3 PROPERTIES OUTPUT_NAME pthread)
@ -640,13 +653,18 @@ if(${BUILD_GEOS})
if(${TD_LINUX}) if(${TD_LINUX})
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
IF ("${CMAKE_BUILD_TYPE}" STREQUAL "") if ("${CMAKE_BUILD_TYPE}" STREQUAL "")
SET(CMAKE_BUILD_TYPE Release) SET(CMAKE_BUILD_TYPE Release)
endif() endif()
endif(${TD_LINUX}) endif(${TD_LINUX})
option(BUILD_SHARED_LIBS "Build GEOS with shared libraries" OFF) option(BUILD_SHARED_LIBS "Build GEOS with shared libraries" OFF)
add_subdirectory(geos EXCLUDE_FROM_ALL) add_subdirectory(geos EXCLUDE_FROM_ALL)
unset(CMAKE_CXX_STANDARD CACHE) # undo libgeos's setting of global CMAKE_CXX_STANDARD if (${TD_WINDOWS})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
else ()
unset(CMAKE_CXX_STANDARD CACHE) # undo libgeos's setting of global CMAKE_CXX_STANDARD
endif(${TD_WINDOWS})
target_include_directories( target_include_directories(
geos_c geos_c
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/geos/include> PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/geos/include>

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -5,7 +5,8 @@ description: This document describes how to insert data into TDengine.
--- ---
## Syntax ## Syntax
The writing of records supports two syntaxes, normal syntax and super table syntax. In the normal syntax, the table name immediately following `INSERT INTO` represents subtable names or regular table names. In the super table syntax, the table name immediately following `INSERT INTO` represents the super table name.
### Normal Syntax
```sql ```sql
INSERT INTO INSERT INTO
tb_name tb_name
@ -20,6 +21,15 @@ INSERT INTO
INSERT INTO tb_name [(field1_name, ...)] subquery INSERT INTO tb_name [(field1_name, ...)] subquery
``` ```
### Super Table Syntax
```sql
INSERT INTO
stb1_name [(field1_name, ...)]
VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
[stb2_name [(field1_name, ...)]
VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
...];
```
**Timestamps** **Timestamps**
@ -32,26 +42,34 @@ INSERT INTO tb_name [(field1_name, ...)] subquery
**Syntax** **Syntax**
1. The USING clause automatically creates the specified subtable if it does not exist. If it's unknown whether the table already exists, the table can be created automatically while inserting using the SQL statement below. To use this functionality, a STable must be used as template and tag values must be provided. Any tags that you do not specify will be assigned a null value. 1. You can insert data into specified columns. Any columns in which you do not insert data will be assigned a null value.
2. You can insert data into specified columns. Any columns in which you do not insert data will be assigned a null value. 2. The VALUES clause inserts one or more rows of data into a table.
3. The VALUES clause inserts one or more rows of data into a table. 3. The FILE clause inserts tags or data from a comma-separates values (CSV) file. Do not include headers in your CSV files.
4. The FILE clause inserts tags or data from a comma-separates values (CSV) file. Do not include headers in your CSV files. 4. A single `INSERT ... VALUES` statement and `INSERT ... FILE` statement can write data to multiple tables.
5. A single `INSERT ... VALUES` statement and `INSERT ... FILE` statement can write data to multiple tables. 5. The INSERT statement is fully parsed before being executed, so that if any element of the statement fails, the entire statement will fail. For example, the following statement will not create a table because the latter part of the statement is invalid:
6. The INSERT statement is fully parsed before being executed, so that if any element of the statement fails, the entire statement will fail. For example, the following statement will not create a table because the latter part of the statement is invalid:
```sql ```sql
INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a'); INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a');
``` ```
7. However, an INSERT statement that writes data to multiple subtables can succeed for some tables and fail for others. This situation is caused because vnodes perform write operations independently of each other. One vnode failing to write data does not affect the ability of other vnodes to write successfully. 6. However, an INSERT statement that writes data to multiple subtables can succeed for some tables and fail for others. This situation is caused because vnodes perform write operations independently of each other. One vnode failing to write data does not affect the ability of other vnodes to write successfully.
8. Data from TDengine can be inserted into a specified table using the `INSERT ... subquery` statement. Arbitrary query statements are supported. This syntax can only be used for subtables and normal tables, and does not support automatic table creation. **Normal Syntax**
1. The USING clause automatically creates the specified subtable if it does not exist. If it's unknown whether the table already exists, the table can be created automatically while inserting using the SQL statement below. To use this functionality, a STable must be used as template and tag values must be provided. Any tags that you do not specify will be assigned a null value.
2. Data from TDengine can be inserted into a specified table using the `INSERT ... subquery` statement. Arbitrary query statements are supported. This syntax can only be used for subtables and normal tables, and does not support automatic table creation.
**Super Table Syntax**
1. The tbname column must be included in the field_name list and represents the name of the child table. This column is of string type, and the use of the . character is not permitted in the tbname column.
2. Tag columns are eligible for inclusion in the field_name list. If the specified child table doesn't exist, a new child table will be generated with the provided tag values. In the absence of specified tag values, the newly created table will have all NULL tag values. Existing child table tag values remain unchanged.
3. Param binding is not supported.
## Insert a Record ## Insert a Record
Single row or multiple rows specified with VALUES can be inserted into a specific table. A single row is inserted using the below statement. Single row or multiple rows specified with VALUES can be inserted into a specific table. A single row is inserted using the below statement.
@ -134,3 +152,14 @@ When writing data from a file, you can automatically create the specified subtab
INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv' INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv'
d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv'; d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv';
``` ```
## Super Table Syntax
Automatically creating table and the table name is specified through the `tbname` column
```sql
INSERT INTO meters(tbname, location, groupId, ts, current, phase)
values('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 219, 0.32)
values('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:35.779', 10.15, 217, 0.33)
values('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 217, 0.33)
```

View File

@ -539,7 +539,8 @@ TO_CHAR(ts, format_str_literal)
- When `ms`,`us`,`ns` are used in `to_char`, like `to_char(ts, 'yyyy-mm-dd hh:mi:ss.ms.us.ns')`, The time of `ms`,`us`,`ns` corresponds to the same fraction seconds. When ts is `1697182085123`, the output of `ms` is `123`, `us` is `123000`, `ns` is `123000000`. - When `ms`,`us`,`ns` are used in `to_char`, like `to_char(ts, 'yyyy-mm-dd hh:mi:ss.ms.us.ns')`, The time of `ms`,`us`,`ns` corresponds to the same fraction seconds. When ts is `1697182085123`, the output of `ms` is `123`, `us` is `123000`, `ns` is `123000000`.
- If we want to output some characters of format without converting, surround it with double quotes. `to_char(ts, 'yyyy-mm-dd "is formated by yyyy-mm-dd"')`. If want to output double quotes, add a back slash before double quote, like `to_char(ts, '\"yyyy-mm-dd\"')` will output `"2023-10-10"`. - If we want to output some characters of format without converting, surround it with double quotes. `to_char(ts, 'yyyy-mm-dd "is formated by yyyy-mm-dd"')`. If want to output double quotes, add a back slash before double quote, like `to_char(ts, '\"yyyy-mm-dd\"')` will output `"2023-10-10"`.
- For formats that output digits, the uppercase and lowercase formats are the same. - For formats that output digits, the uppercase and lowercase formats are the same.
- It's recommended to put time zone in the format, if not, the default time zone zone will be that in server or client. - It's recommended to put time zone in the format, if not, the default time zone will be that in server or client.
- The precision of the input timestamp will be recognized automatically according to the precision of the table used, milliseconds will be used if no table is specified.
#### TO_TIMESTAMP #### TO_TIMESTAMP
@ -564,9 +565,10 @@ TO_TIMESTAMP(ts_str_literal, format_str_literal)
- The uppercase or lowercase of `MONTH`, `MON`, `DAY`, `DY` and formtas that output digits have same effect when used in `to_timestamp`, like `to_timestamp('2023-JANUARY-01', 'YYYY-month-dd')`, `month` can be replaced by `MONTH`, or `month`. The cases are ignored. - The uppercase or lowercase of `MONTH`, `MON`, `DAY`, `DY` and formtas that output digits have same effect when used in `to_timestamp`, like `to_timestamp('2023-JANUARY-01', 'YYYY-month-dd')`, `month` can be replaced by `MONTH`, or `month`. The cases are ignored.
- If multi times are specified for one component, the previous will be overwritten. Like `to_timestamp('2023-22-10-10', 'yyyy-yy-MM-dd')`, the output year will be `2022`. - If multi times are specified for one component, the previous will be overwritten. Like `to_timestamp('2023-22-10-10', 'yyyy-yy-MM-dd')`, the output year will be `2022`.
- To avoid unexpected time zone used during the convertion, it's recommended to put time zone in the ts string, e.g. '2023-10-10 10:10:10+08'. If time zone not specified, default will be that in server or client. - To avoid unexpected time zone used during the convertion, it's recommended to put time zone in the ts string, e.g. '2023-10-10 10:10:10+08'. If time zone not specified, default will be that in server or client.
- The default timestamp if some components are not specified will be: `1970-01-01 00:00:00` with specified or default local timezone. - The default timestamp if some components are not specified will be: `1970-01-01 00:00:00` with the timezone specified or default to local timezone. Only `DDD` is specified without `DD` is not supported currently, e.g. format 'yyyy-mm-ddd' is not supported, but 'yyyy-mm-dd' is supported.
- If `AM` or `PM` is specified in formats, the Hour must between `1-12`. - If `AM` or `PM` is specified in formats, the Hour must between `1-12`.
- In some cases, `to_timestamp` can convert correctly even the format and the timestamp string are not totally matched. Like `to_timetamp('200101/2', 'yyyyMM1/dd')`, the digit `1` in format string are ignored, and the output timestsamp is `2001-01-02 00:00:00`. Spaces and tabs in formats and tiemstamp string are also ignored automatically. - In some cases, `to_timestamp` can convert correctly even the format and the timestamp string are not totally matched. Like `to_timetamp('200101/2', 'yyyyMM1/dd')`, the digit `1` in format string are ignored, and the output timestsamp is `2001-01-02 00:00:00`. Spaces and tabs in formats and tiemstamp string are also ignored automatically.
- The precision of the output timestamp will be the same as the table in SELECT stmt, millisecond will be used if no table is specified. The output of `select to_timestamp('2023-08-1 10:10:10.123456789', 'yyyy-mm-dd hh:mi:ss.ns')` will be truncated to millisecond precision. If a nano precision table is specified, no truncation will be applied. Like `select to_timestamp('2023-08-1 10:10:10.123456789', 'yyyy-mm-dd hh:mi:ss.ns') from db_ns.table_ns limit 1`.
### Time and Date Functions ### Time and Date Functions

View File

@ -38,11 +38,16 @@ Aggregation by time window is supported in TDengine. For example, in the case wh
window_clause: { window_clause: {
SESSION(ts_col, tol_val) SESSION(ts_col, tol_val)
| STATE_WINDOW(col) | STATE_WINDOW(col)
| INTERVAL(interval [, offset]) [SLIDING sliding] [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})] | INTERVAL(interval_val [, offset]) [SLIDING (sliding_value)] [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})]
| EVENT_WINDOW START WITH start_trigger_condition END WITH end_trigger_condition | EVENT_WINDOW START WITH start_trigger_condition END WITH end_trigger_condition
} }
``` ```
Both interval_val and sliding_value are time durations which have 3 forms of representation.
- INTERVAL(1s, 500a) SLIDING(1s), the unit char should be any one of a (millisecond), b (nanosecond), d (day), h (hour), m (minute), n (month), s (second), u (microsecond), w (week), y (year).
- INTERVAL(1000, 500) SLIDING(1000), the unit will the same as the queried database, if there are more than one databases, higher precision will be used.
- INTERVAL('1s', '500a') SLIDING('1s'), unit must be specified, no spaces allowed.
The following restrictions apply: The following restrictions apply:
### Other Rules ### Other Rules

View File

@ -59,4 +59,4 @@ Query OK, 9 row(s) affected (0.004763s)
## Import using taosdump ## Import using taosdump
A convenient tool for importing and exporting data is provided by TDengine, `taosdump`, which can be used to export data from one TDengine cluster and import into another one. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump). A convenient tool for importing and exporting data is provided by TDengine, `taosdump`, which can be used to export data from one TDengine cluster and import into another one. For the details of using `taosdump` please refer to the taosdump documentation.

View File

@ -19,4 +19,4 @@ The data of table or STable specified by `tb_name` will be exported into a file
## Export Using taosdump ## Export Using taosdump
With `taosdump`, you can choose to export the data of all databases, a database, a table or a STable, you can also choose to export the data within a time range, or even only export the schema definition of a table. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump). With `taosdump`, you can choose to export the data of all databases, a database, a table or a STable, you can also choose to export the data within a time range, or even only export the schema definition of a table. For the details of using `taosdump` please refer to the taosdump documentation.

View File

@ -11,8 +11,6 @@ The collection of the monitoring information is enabled by default, but can be d
TDinsight is a complete solution which uses the monitoring database `log` mentioned previously, and Grafana, to monitor a TDengine cluster. TDinsight is a complete solution which uses the monitoring database `log` mentioned previously, and Grafana, to monitor a TDengine cluster.
Please refer to [TDinsight Grafana Dashboard](../../reference/tdinsight) to learn more details about using TDinsight to monitor TDengine.
A script `TDinsight.sh` is provided to deploy TDinsight automatically. A script `TDinsight.sh` is provided to deploy TDinsight automatically.
Download `TDinsight.sh` with the below command: Download `TDinsight.sh` with the below command:

View File

@ -36,6 +36,7 @@ REST connection supports all platforms that can run Java.
| taos-jdbcdriver version | major changes | TDengine version | | taos-jdbcdriver version | major changes | TDengine version |
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: | | :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
| 3.2.7 | Support VARBINARY and GEOMETRY types, and add time zone support for native connections. Support websocket auto reconnection | 3.2.0.0 or later |
| 3.2.5 | Subscription add committed() and assignment() method | 3.1.0.3 or later | | 3.2.5 | Subscription add committed() and assignment() method | 3.1.0.3 or later |
| 3.2.4 | Subscription add the enable.auto.commit parameter and the unsubscribe() method in the WebSocket connection | - | | 3.2.4 | Subscription add the enable.auto.commit parameter and the unsubscribe() method in the WebSocket connection | - |
| 3.2.3 | Fixed resultSet data parsing failure in some cases | - | | 3.2.3 | Fixed resultSet data parsing failure in some cases | - |
@ -178,7 +179,7 @@ Add following dependency in the `pom.xml` file of your Maven project:
<dependency> <dependency>
<groupId>com.taosdata.jdbc</groupId> <groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId> <artifactId>taos-jdbcdriver</artifactId>
<version>3.2.2</version> <version>3.2.7</version>
</dependency> </dependency>
``` ```

View File

@ -436,11 +436,22 @@ The `TaosConnection` class and the `TaosResult` class already implement all the
:::note :::note
The TaosCursor class uses native connections for write and query operations. In a client-side multi-threaded scenario, this cursor instance must remain thread exclusive and cannot be shared across threads for use, otherwise, it will result in errors in the returned results. The TaosCursor class uses native connections for write and query operations. In a client-side multi-threaded scenario, this cursor instance must remain thread exclusive and cannot be shared across threads for use, otherwise, it will result in errors in the returned results.
The best practice for TaosCursor is to create a cursor at the beginning of a query and close it immediately after use. Please avoid reusing the same cursor for multiple executions.
::: :::
</TabItem> </TabItem>
<TabItem value="rest" label="REST connection"> <TabItem value="rest" label="REST connection">
##### Use of the RestClient class
The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
```python title="Use of RestClient"
{{#include docs/examples/python/rest_client_example.py}}
```
For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html).
##### Use of TaosRestCursor class ##### Use of TaosRestCursor class
The `TaosRestCursor` class is an implementation of the PEP249 Cursor interface. The `TaosRestCursor` class is an implementation of the PEP249 Cursor interface.
@ -452,15 +463,9 @@ The `TaosRestCursor` class is an implementation of the PEP249 Cursor interface.
- `cursor.rowcount` : For write operations, returns the number of successful rows written. For query operations, returns the number of rows in the result set. - `cursor.rowcount` : For write operations, returns the number of successful rows written. For query operations, returns the number of rows in the result set.
- `cursor.description` : Returns the description of the field. Please refer to [TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html) for the specific format of the description information. - `cursor.description` : Returns the description of the field. Please refer to [TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html) for the specific format of the description information.
##### Use of the RestClient class :::note
The best practice for TaosRestCursor is to create a cursor at the beginning of a query and close it immediately after use. Please avoid reusing the same cursor for multiple executions.
The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result. :::
```python title="Use of RestClient"
{{#include docs/examples/python/rest_client_example.py}}
```
For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html).
</TabItem> </TabItem>
<TabItem value="websocket" label="WebSocket connection"> <TabItem value="websocket" label="WebSocket connection">
@ -554,6 +559,16 @@ The `TaosConnection` class and the `TaosResult` class already implement all the
</TabItem> </TabItem>
<TabItem value="rest" label="REST connection"> <TabItem value="rest" label="REST connection">
##### Use of the RestClient class
The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
```python title="Use of RestClient"
{{#include docs/examples/python/rest_client_with_req_id_example.py}}
```
For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html).
##### Use of TaosRestCursor class ##### Use of TaosRestCursor class
As the way to connect introduced above but add `req_id` argument. As the way to connect introduced above but add `req_id` argument.
@ -565,16 +580,6 @@ As the way to connect introduced above but add `req_id` argument.
- `cursor.rowcount` : For write operations, returns the number of successful rows written. For query operations, returns the number of rows in the result set. - `cursor.rowcount` : For write operations, returns the number of successful rows written. For query operations, returns the number of rows in the result set.
- `cursor.description` : Returns the description of the field. Please refer to [TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html) for the specific format of the description information. - `cursor.description` : Returns the description of the field. Please refer to [TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html) for the specific format of the description information.
##### Use of the RestClient class
The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
```python title="Use of RestClient"
{{#include docs/examples/python/rest_client_with_req_id_example.py}}
```
For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html).
</TabItem> </TabItem>
<TabItem value="websocket" label="WebSocket connection"> <TabItem value="websocket" label="WebSocket connection">

View File

@ -218,7 +218,7 @@ The example to query the average system memory usage for the specified interval
### Importing the Dashboard ### Importing the Dashboard
You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. Ensure that you use TDinsight for 3.x. Please note TDinsight for 3.x needs to configure and run taoskeeper correctly. Check the [TDinsight User Manual](/reference/tdinsight/) for the details. You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. Ensure that you use TDinsight for 3.x. Please note TDinsight for 3.x needs to configure and run taoskeeper correctly.
![TDengine Database Grafana plugine import dashboard](./import_dashboard.webp) ![TDengine Database Grafana plugine import dashboard](./import_dashboard.webp)

View File

@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://t
import Release from "/components/ReleaseV3"; import Release from "/components/ReleaseV3";
## 3.2.1.0
<Release type="tdengine" version="3.2.1.0" />
## 3.2.0.0 ## 3.2.0.0
<Release type="tdengine" version="3.2.0.0" /> <Release type="tdengine" version="3.2.0.0" />

View File

@ -33,6 +33,8 @@ data = cursor.fetchall()
print(column_names) print(column_names)
for row in data: for row in data:
print(row) print(row)
# close cursor
cursor.close()
# output: # output:
# inserted row count: 8 # inserted row count: 8

View File

@ -33,6 +33,8 @@ data = cursor.fetchall()
print(column_names) print(column_names)
for row in data: for row in data:
print(row) print(row)
# close cursor
cursor.close()
# output: # output:
# inserted row count: 8 # inserted row count: 8

View File

@ -36,6 +36,7 @@ REST 连接支持所有能运行 Java 的平台。
| taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 | | taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: | | :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
| 3.2.7 | 支持VARBINARY和GEOMETRY类型增加native连接的时区设置支持。增加websocket自动重连功能。 | 3.2.0.0 及更高版本 |
| 3.2.5 | 数据订阅增加 committed()、assignment() 方法 | 3.1.0.3 及更高版本 | | 3.2.5 | 数据订阅增加 committed()、assignment() 方法 | 3.1.0.3 及更高版本 |
| 3.2.4 | 数据订阅在 WebSocket 连接下增加 enable.auto.commit 参数,以及 unsubscribe() 方法。 | - | | 3.2.4 | 数据订阅在 WebSocket 连接下增加 enable.auto.commit 参数,以及 unsubscribe() 方法。 | - |
| 3.2.3 | 修复 ResultSet 在一些情况数据解析失败 | - | | 3.2.3 | 修复 ResultSet 在一些情况数据解析失败 | - |
@ -177,7 +178,7 @@ Maven 项目中,在 pom.xml 中添加以下依赖:
<dependency> <dependency>
<groupId>com.taosdata.jdbc</groupId> <groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId> <artifactId>taos-jdbcdriver</artifactId>
<version>3.2.2</version> <version>3.2.7</version>
</dependency> </dependency>
``` ```
@ -1097,7 +1098,6 @@ TaosConsumer consumer = new TaosConsumer<>(config);
- httpPoolSize: 同一个连接下最大并行请求数。仅在 WebSocket 连接下有效。 - httpPoolSize: 同一个连接下最大并行请求数。仅在 WebSocket 连接下有效。
其他参数请参考:[Consumer 参数列表](../../develop/tmq#创建-consumer-以及consumer-group) 注意TDengine服务端自3.2.0.0版本开始消息订阅中的auto.offset.reset默认值发生变化。 其他参数请参考:[Consumer 参数列表](../../develop/tmq#创建-consumer-以及consumer-group) 注意TDengine服务端自3.2.0.0版本开始消息订阅中的auto.offset.reset默认值发生变化。
#### 订阅消费数据 #### 订阅消费数据
```java ```java

View File

@ -436,11 +436,23 @@ now 为系统内部函数,默认为客户端所在计算机当前时间。 now
:::note :::note
TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能跨线程共享使用,否则会导致返回结果出现错误。 TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能跨线程共享使用,否则会导致返回结果出现错误。
TaosCursor 的最佳实践是,查询开始时创建 cursor用完之后就关闭请避免复用同一个 cursor 多次执行。
::: :::
</TabItem> </TabItem>
<TabItem value="rest" label="REST 连接"> <TabItem value="rest" label="REST 连接">
##### RestClient 类的使用
`RestClient` 类是对于 [REST API](../rest-api) 的直接封装。它只包含一个 `sql()` 方法用于执行任意 SQL 语句, 并返回执行结果。
```python title="RestClient 的使用"
{{#include docs/examples/python/rest_client_example.py}}
```
对于 `sql()` 方法更详细的介绍, 请参考 [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html)。
##### TaosRestCursor 类的使用 ##### TaosRestCursor 类的使用
`TaosRestCursor` 类是对 PEP249 Cursor 接口的实现。 `TaosRestCursor` 类是对 PEP249 Cursor 接口的实现。
@ -452,15 +464,10 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
- `cursor.rowcount` 对于写入操作返回写入成功记录数。对于查询操作,返回结果集行数。 - `cursor.rowcount` 对于写入操作返回写入成功记录数。对于查询操作,返回结果集行数。
- `cursor.description` 返回字段的描述信息。关于描述信息的具体格式请参考[TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html)。 - `cursor.description` 返回字段的描述信息。关于描述信息的具体格式请参考[TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html)。
##### RestClient 类的使用 :::note
TaosRestCursor 的最佳实践是,查询开始时创建 cursor用完之后就关闭请避免复用同一个 cursor 多次执行。
:::
`RestClient` 类是对于 [REST API](../rest-api) 的直接封装。它只包含一个 `sql()` 方法用于执行任意 SQL 语句, 并返回执行结果。
```python title="RestClient 的使用"
{{#include docs/examples/python/rest_client_example.py}}
```
对于 `sql()` 方法更详细的介绍, 请参考 [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html)。
</TabItem> </TabItem>
<TabItem value="websocket" label="WebSocket 连接"> <TabItem value="websocket" label="WebSocket 连接">
@ -557,6 +564,16 @@ RestClient 类是对于 REST API 的直接封装。它只包含一个 sql() 方
类似上文介绍的使用方法,增加 `req_id` 参数。 类似上文介绍的使用方法,增加 `req_id` 参数。
##### RestClient 类的使用
`RestClient` 类是对于 [REST API](../rest-api) 的直接封装。它只包含一个 `sql()` 方法用于执行任意 SQL 语句, 并返回执行结果。
```python title="RestClient 的使用"
{{#include docs/examples/python/rest_client_with_req_id_example.py}}
```
对于 `sql()` 方法更详细的介绍, 请参考 [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html)。
##### TaosRestCursor 类的使用 ##### TaosRestCursor 类的使用
`TaosRestCursor` 类是对 PEP249 Cursor 接口的实现。 `TaosRestCursor` 类是对 PEP249 Cursor 接口的实现。
@ -568,15 +585,6 @@ RestClient 类是对于 REST API 的直接封装。它只包含一个 sql() 方
- `cursor.rowcount` 对于写入操作返回写入成功记录数。对于查询操作,返回结果集行数。 - `cursor.rowcount` 对于写入操作返回写入成功记录数。对于查询操作,返回结果集行数。
- `cursor.description` 返回字段的描述信息。关于描述信息的具体格式请参考[TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html)。 - `cursor.description` 返回字段的描述信息。关于描述信息的具体格式请参考[TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html)。
##### RestClient 类的使用
`RestClient` 类是对于 [REST API](../rest-api) 的直接封装。它只包含一个 `sql()` 方法用于执行任意 SQL 语句, 并返回执行结果。
```python title="RestClient 的使用"
{{#include docs/examples/python/rest_client_with_req_id_example.py}}
```
对于 `sql()` 方法更详细的介绍, 请参考 [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html)。
</TabItem> </TabItem>
<TabItem value="websocket" label="WebSocket 连接"> <TabItem value="websocket" label="WebSocket 连接">

View File

@ -105,7 +105,7 @@ spec:
# TZ for timezone settings, we recommend to always set it. # TZ for timezone settings, we recommend to always set it.
- name: TZ - name: TZ
value: "Asia/Shanghai" value: "Asia/Shanghai"
# TAOS_ prefix will configured in taos.cfg, strip prefix and camelCase. # Environment variables with prefix TAOS_ will be parsed and converted into corresponding parameter in taos.cfg. For example, serverPort in taos.cfg should be configured by TAOS_SERVER_PORT when using K8S to deploy
- name: TAOS_SERVER_PORT - name: TAOS_SERVER_PORT
value: "6030" value: "6030"
# Must set if you want a cluster. # Must set if you want a cluster.

View File

@ -53,7 +53,7 @@ database_option: {
- 1表示一阶段压缩。 - 1表示一阶段压缩。
- 2表示两阶段压缩。 - 2表示两阶段压缩。
- DURATION数据文件存储数据的时间跨度。可以使用加单位的表示形式如 DURATION 100h、DURATION 10d 等,支持 m分钟、h小时和 d三个单位。不加时间单位时默认单位为天如 DURATION 50 表示 50 天。 - DURATION数据文件存储数据的时间跨度。可以使用加单位的表示形式如 DURATION 100h、DURATION 10d 等,支持 m分钟、h小时和 d三个单位。不加时间单位时默认单位为天如 DURATION 50 表示 50 天。
- WAL_FSYNC_PERIOD当 WAL 参数设置为 2 时,落盘的周期。默认为 3000单位毫秒。最小为 0表示每次写入立即落盘最大为 180000即三分钟。 - WAL_FSYNC_PERIOD当 WAL_LEVEL 参数设置为 2 时,用于设置落盘的周期。默认为 3000单位毫秒。最小为 0表示每次写入立即落盘最大为 180000即三分钟。
- MAXROWS文件块中记录的最大条数默认为 4096 条。 - MAXROWS文件块中记录的最大条数默认为 4096 条。
- MINROWS文件块中记录的最小条数默认为 100 条。 - MINROWS文件块中记录的最小条数默认为 100 条。
- KEEP表示数据文件保存的天数缺省值为 3650取值范围 [1, 365000]且必须大于或等于3倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m分钟、h小时和 d三个单位。也可以不写单位如 KEEP 50此时默认单位为天。企业版支持[多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 <= keep 1 <= keep 2如 KEEP 100h,100d,3650d; 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。 - KEEP表示数据文件保存的天数缺省值为 3650取值范围 [1, 365000]且必须大于或等于3倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m分钟、h小时和 d三个单位。也可以不写单位如 KEEP 50此时默认单位为天。企业版支持[多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 <= keep 1 <= keep 2如 KEEP 100h,100d,3650d; 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。

View File

@ -5,7 +5,9 @@ description: 写入数据的详细语法
--- ---
## 写入语法 ## 写入语法
写入记录支持两种语法, 正常语法和超级表语法. 正常语法下, 紧跟INSERT INTO后名的表名是子表名或者普通表名. 超级表语法下, 紧跟INSERT INTO后名的表名是超级表名
### 正常语法
```sql ```sql
INSERT INTO INSERT INTO
tb_name tb_name
@ -20,6 +22,15 @@ INSERT INTO
INSERT INTO tb_name [(field1_name, ...)] subquery INSERT INTO tb_name [(field1_name, ...)] subquery
``` ```
### 超级表语法
```sql
INSERT INTO
stb1_name [(field1_name, ...)]
VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
[stb2_name [(field1_name, ...)]
VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
...];
```
**关于时间戳** **关于时间戳**
@ -32,26 +43,34 @@ INSERT INTO tb_name [(field1_name, ...)] subquery
**语法说明** **语法说明**
1. USING 子句是自动建表语法。如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 TAGS 取值。可以只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将置为 NULL。 1. 可以指定要插入值的列,对于未指定的列数据库将自动填充为 NULL。
2. 可以指定要插入值的列,对于为指定的列数据库将自动填充为 NULL 2. VALUES 语法表示了要插入的一行或多行数据
3. VALUES 语法表示了要插入的一行或多行数据 3. FILE 语法表示数据来自于 CSV 文件英文逗号分隔、英文单引号括住每个值CSV 文件无需表头
4. FILE 语法表示数据来自于 CSV 文件英文逗号分隔、英文单引号括住每个值CSV 文件无需表头 4. `INSERT ... VALUES` 语句和 `INSERT ... FILE` 语句均可以在一条 INSERT 语句中同时向多个表插入数据
5. `INSERT ... VALUES` 语句和 `INSERT ... FILE` 语句均可以在一条 INSERT 语句中同时向多个表插入数据。 5. INSERT 语句是完整解析后再执行的,对如下语句,不会再出现数据错误但建表成功的情况:
6. INSERT 语句是完整解析后再执行的,对如下语句,不会再出现数据错误但建表成功的情况:
```sql ```sql
INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a'); INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a');
``` ```
6. 对于向多个子表插入数据的情况,依然会有部分数据写入失败,部分数据写入成功的情况。这是因为多个子表可能分布在不同的 VNODE 上,客户端将 INSERT 语句完整解析后,将数据发往各个涉及的 VNODE 上,每个 VNODE 独立进行写入操作。如果某个 VNODE 因为某些原因(比如网络问题或磁盘故障)导致写入失败,并不会影响其他 VNODE 节点的写入。
7. 对于向多个子表插入数据的情况,依然会有部分数据写入失败,部分数据写入成功的情况。这是因为多个子表可能分布在不同的 VNODE 上,客户端将 INSERT 语句完整解析后,将数据发往各个涉及的 VNODE 上,每个 VNODE 独立进行写入操作。如果某个 VNODE 因为某些原因(比如网络问题或磁盘故障)导致写入失败,并不会影响其他 VNODE 节点的写入。 **正常语法说明**
8. 可以使用 `INSERT ... subquery` 语句将 TDengine 中的数据插入到指定表中。subquery 可以是任意的查询语句。此语法只能用于子表和普通表,且不支持自动建表 1. USING 子句是自动建表语法。如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 TAGS 取值。可以只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将置为 NULL
2. 可以使用 `INSERT ... subquery` 语句将 TDengine 中的数据插入到指定表中。subquery 可以是任意的查询语句。此语法只能用于子表和普通表,且不支持自动建表。
**超级表语法说明**
1. 在 field_name 列表中必须指定 tbname 列,否则报错. tbname列是子表名, 类型是字符串. 其中字符不用转义, 不能包含点‘.
2. 在 field_name 列表中支持标签列,当子表已经存在时,指定标签值并不会触发标签值的修改;当子表不存在时会使用所指定的标签值建立子表. 如果没有指定任何标签列则把所有标签列的值设置为NULL
3. 不支持参数绑定写入
## 插入一条记录 ## 插入一条记录
指定已经创建好的数据子表的表名,并通过 VALUES 关键字提供一行或多行数据,即可向数据库写入这些数据。例如,执行如下语句可以写入一行记录: 指定已经创建好的数据子表的表名,并通过 VALUES 关键字提供一行或多行数据,即可向数据库写入这些数据。例如,执行如下语句可以写入一行记录:
@ -134,3 +153,12 @@ INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/c
INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv' INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv'
d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv'; d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv';
``` ```
## 超级表语法
自动建表, 表名通过tbname列指定
```sql
INSERT INTO meters(tbname, location, groupId, ts, current, phase)
values('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 219, 0.32)
values('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:35.779', 10.15, 217, 0.33)
values('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 217, 0.33)
```

View File

@ -540,6 +540,7 @@ TO_CHAR(ts, format_str_literal)
- 时间格式中无法匹配规则的内容会直接输出. 如果想要在格式串中指定某些能够匹配规则的部分不做转换, 可以使用双引号, 如`to_char(ts, 'yyyy-mm-dd "is formated by yyyy-mm-dd"')`. 如果想要输出双引号, 那么在双引号之前加一个反斜杠, 如 `to_char(ts, '\"yyyy-mm-dd\"')` 将会输出 `"2023-10-10"`. - 时间格式中无法匹配规则的内容会直接输出. 如果想要在格式串中指定某些能够匹配规则的部分不做转换, 可以使用双引号, 如`to_char(ts, 'yyyy-mm-dd "is formated by yyyy-mm-dd"')`. 如果想要输出双引号, 那么在双引号之前加一个反斜杠, 如 `to_char(ts, '\"yyyy-mm-dd\"')` 将会输出 `"2023-10-10"`.
- 那些输出是数字的格式, 如`YYYY`, `DD`, 大写与小写意义相同, 即`yyyy` 和 `YYYY` 可以互换. - 那些输出是数字的格式, 如`YYYY`, `DD`, 大写与小写意义相同, 即`yyyy` 和 `YYYY` 可以互换.
- 推荐在时间格式中带时区信息,如果不带则默认输出的时区为服务端或客户端所配置的时区. - 推荐在时间格式中带时区信息,如果不带则默认输出的时区为服务端或客户端所配置的时区.
- 输入时间戳的精度由所查询表的精度确定, 若未指定表, 则精度为毫秒.
#### TO_TIMESTAMP #### TO_TIMESTAMP
@ -560,13 +561,14 @@ TO_TIMESTAMP(ts_str_literal, format_str_literal)
**支持的格式**: 与`to_char`相同 **支持的格式**: 与`to_char`相同
**使用说明**: **使用说明**:
- 若`ms`, `us`, `ns`同时指定, 那么结果时间戳包含上述三个字段的和. 如 `to_timestamp('2023-10-10 10:10:10.123.000456.000000789', 'yyyy-mm-dd hh:mi:ss.ms.us.ns')` 输出`2023-10-10 10:10:10.123456789`. - 若`ms`, `us`, `ns`同时指定, 那么结果时间戳包含上述三个字段的和. 如 `to_timestamp('2023-10-10 10:10:10.123.000456.000000789', 'yyyy-mm-dd hh:mi:ss.ms.us.ns')` 输出`2023-10-10 10:10:10.123456789`对应的时间戳.
- `MONTH`, `MON`, `DAY`, `DY` 以及其他输出为数字的格式的大小写意义相同, 如 `to_timestamp('2023-JANUARY-01', 'YYYY-month-dd')`, `month`可以被替换为`MONTH` 或者`Month`. - `MONTH`, `MON`, `DAY`, `DY` 以及其他输出为数字的格式的大小写意义相同, 如 `to_timestamp('2023-JANUARY-01', 'YYYY-month-dd')`, `month`可以被替换为`MONTH` 或者`Month`.
- 如果同一字段被指定了多次, 那么前面的指定将会被覆盖. 如 `to_timestamp('2023-22-10-10', 'yyyy-yy-MM-dd')`, 输出年份是`2022`. - 如果同一字段被指定了多次, 那么前面的指定将会被覆盖. 如 `to_timestamp('2023-22-10-10', 'yyyy-yy-MM-dd')`, 输出年份是`2022`.
- 为避免转换时使用了非预期的时区,推荐在时间中携带时区信息,例如'2023-10-10 10:10:10+08',如果未指定时区则默认时区为服务端或客户端指定的时区。 - 为避免转换时使用了非预期的时区,推荐在时间中携带时区信息,例如'2023-10-10 10:10:10+08',如果未指定时区则默认时区为服务端或客户端指定的时区。
- 如果没有指定完整的时间,那么默认时间值为指定或默认时区的 `1970-01-01 00:00:00`, 未指定部分使用该默认值中的对应部分. - 如果没有指定完整的时间,那么默认时间值为指定或默认时区的 `1970-01-01 00:00:00`, 未指定部分使用该默认值中的对应部分. 暂不支持只指定年日而不指定月日的格式, 如'yyyy-mm-DDD', 支持'yyyy-mm-DD'.
- 如果格式串中有`AM`, `PM`等, 那么小时必须是12小时制, 范围必须是01-12. - 如果格式串中有`AM`, `PM`等, 那么小时必须是12小时制, 范围必须是01-12.
- `to_timestamp`转换具有一定的容错机制, 在格式串和时间戳串不完全对应时, 有时也可转换, 如: `to_timestamp('200101/2', 'yyyyMM1/dd')`, 格式串中多出来的1会被丢弃. 格式串与时间戳串中多余的空格字符(空格, tab等)也会被 自动忽略. 如`to_timestamp(' 23 年 - 1 月 - 01 日 ', 'yy 年-MM月-dd日')` 可以被成功转换. 虽然`MM`等字段需要两个数字对应(只有一位时前面补0), 在`to_timestamp`时, 一个数字也可以成功转换. - `to_timestamp`转换具有一定的容错机制, 在格式串和时间戳串不完全对应时, 有时也可转换, 如: `to_timestamp('200101/2', 'yyyyMM1/dd')`, 格式串中多出来的1会被丢弃. 格式串与时间戳串中多余的空格字符(空格, tab等)也会被 自动忽略. 如`to_timestamp(' 23 年 - 1 月 - 01 日 ', 'yy 年-MM月-dd日')` 可以被成功转换. 虽然`MM`等字段需要两个数字对应(只有一位时前面补0), 在`to_timestamp`时, 一个数字也可以成功转换.
- 输出时间戳的精度与查询表的精度相同, 若查询未指定表, 则输出精度为毫秒. 如`select to_timestamp('2023-08-1 10:10:10.123456789', 'yyyy-mm-dd hh:mi:ss.ns')`的输出将会把微妙和纳秒进行截断. 如果指定一张纳秒表, 那么就不会发生截断, 如`select to_timestamp('2023-08-1 10:10:10.123456789', 'yyyy-mm-dd hh:mi:ss.ns') from db_ns.table_ns limit 1`.
### 时间和日期函数 ### 时间和日期函数

View File

@ -44,7 +44,11 @@ window_clause: {
} }
``` ```
在上述语法中的具体限制如下 其中interval_val 和 sliding_val 都表示时间段, 语法上支持三种方式,举例说明如下:
- INTERVAL(1s, 500a) SLIDING(1s), 自带时间单位的形式,其中的时间单位是单字符表示, 分别为: a (毫秒), b (纳秒), d (天), h (小时), m (分钟), n (月), s (秒), u (微妙), w (周), y (年).
- INTERVAL(1000, 500) SLIDING(1000), 不带时间单位的形式,将使用查询库的时间精度作为默认时间单位,当存在多个库时默认采用精度更高的库.
- INTERVAL('1s', '500a') SLIDING('1s'), 自带时间单位的字符串形式,字符串内部不能有任何空格等其它字符.
### 窗口子句的规则 ### 窗口子句的规则

View File

@ -59,4 +59,4 @@ Query OK, 9 row(s) affected (0.004763s)
## taosdump 工具导入 ## taosdump 工具导入
TDengine 提供了方便的数据库导入导出工具 taosdump。用户可以将 taosdump 从一个系统导出的数据,导入到其他系统中。具体使用方法,请参见:[TDengine 数据备份工具: taosdump](/reference/taosdump) TDengine 提供了方便的数据库导入导出工具 taosdump。用户可以将 taosdump 从一个系统导出的数据,导入到其他系统中。具体使用方法,请参考 taosdump 的相关文档

View File

@ -17,5 +17,4 @@ select * from <tb_name> >> data.csv;
## 用 taosdump 导出数据 ## 用 taosdump 导出数据
利用 taosdump用户可以根据需要选择导出所有数据库、一个数据库或者数据库中的一张表所有数据或一时间段的数据甚至仅仅表的定义。具体使用方法请参见 利用 taosdump用户可以根据需要选择导出所有数据库、一个数据库或者数据库中的一张表所有数据或一时间段的数据甚至仅仅表的定义。具体使用方法请参考 taosdump 的相关文档。
[TDengine 数据备份工具: taosdump](/reference/taosdump)。

View File

@ -218,7 +218,7 @@ docker run -d \
### 导入 Dashboard ### 导入 Dashboard
在数据源配置页面,您可以为该数据源导入 TDinsight 面板,作为 TDengine 集群的监控可视化工具。如果 TDengine 服务端为 3.0 版本请选择 `TDinsight for 3.x` 导入。注意 TDinsight for 3.x 需要运行和配置 taoskeeper,相关使用说明请见 [TDinsight 用户手册](/reference/tdinsight/) 在数据源配置页面,您可以为该数据源导入 TDinsight 面板,作为 TDengine 集群的监控可视化工具。如果 TDengine 服务端为 3.0 版本请选择 `TDinsight for 3.x` 导入。注意 TDinsight for 3.x 需要运行和配置 taoskeeper。
![TDengine Database Grafana plugine import dashboard](./import_dashboard.webp) ![TDengine Database Grafana plugine import dashboard](./import_dashboard.webp)

View File

@ -0,0 +1,69 @@
---
title: TSZ 压缩算法
description: TDengine 对浮点数进行高效压缩的算法
---
TSZ 压缩算法是 TDengine 为浮点数据类型提供的可选压缩算法,可以实现浮点数有损至无损全状态压缩,相比默认压缩算法, TSZ 压缩算法压缩率更高,即使切至无损状态,压缩率也会比默认压缩高一倍。
## 适合场景
- TSZ 压缩算法是通过数据预测技术完成的压缩,所以更适合有规律变化的数据
- TSZ 压缩时间会更长一些,如果您的服务器 CPU 空闲多,存储空间小的情况下适合选用
## 使用步骤
- TDengine 支持版本为 3.2.0.0 或以上
- 开启选项
在 taos.cfg 配置中增加以下内容,即可开启 TSZ 压缩算法,功能打开后,会替换默认算法。
以下表示字段类型是 float 及 double 类型都使用此压缩算法,也可以单独只配置一个
```sql
lossyColumns float|double
```
- 配置需重启服务生效
- Taosd 日志输出以下内容,表明功能已生效:
```sql
02/22 10:49:27.607990 00002933 UTL lossyColumns float|double
```
## 配置参数
### fPrecision
FLOAT 类型精度控制:
| 属性 | 说明 |
| -------- | -------------------------------- |
| 适用范围 | 服务器端 |
| 含义 | 设置 float 类型浮点数压缩精度 |
| 取值范围 | 0.1 ~ 0.00000001 |
| 缺省值 | 0.00000001 |
| 补充说明 | 小于此值的浮点数尾数部分将被截取 |
### dPrecision
DOUBLE 类型精度控制:
| 属性 | 说明 |
| -------- | -------------------------------- |
| 适用范围 | 服务器端 |
| 含义 | 设置 double 类型浮点数压缩精度 |
| 取值范围 | 0.1 ~ 0.0000000000000001 |
| 缺省值 | 0.0000000000000001 |
| 补充说明 | 小于此值的浮点数尾数部分将被截取 |
### ifAdtFse
TSZ 压缩中可选择的算法 FSE默认为 HUFFMAN
| 属性 | 说明 |
| -------- | -------------------------------- |
| 适用范围 | 服务器端 |
| 含义 | 使用 FSE 算法替换 HUFFMAN 算法, FSE 算法压缩速度更快,但解压稍慢,追求压缩速度可选用此算法 |
| 取值范围 | 0关闭 1打开 |
| 缺省值 | 0关闭 |
## 注意事项
- 打开 TSZ 后生成的存储数据格式,回退至 3.2.0.0 之前的版本,数据将不能被识别

View File

@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
import Release from "/components/ReleaseV3"; import Release from "/components/ReleaseV3";
## 3.2.1.0
<Release type="tdengine" version="3.2.1.0" />
## 3.2.0.0 ## 3.2.0.0
<Release type="tdengine" version="3.2.0.0" /> <Release type="tdengine" version="3.2.0.0" />

View File

@ -67,7 +67,7 @@
<dependency> <dependency>
<groupId>com.taosdata.jdbc</groupId> <groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId> <artifactId>taos-jdbcdriver</artifactId>
<version>3.0.0</version> <version>3.2.7</version>
<!-- <scope>system</scope>--> <!-- <scope>system</scope>-->
<!-- <systemPath>${project.basedir}/src/main/resources/lib/taos-jdbcdriver-2.0.15-dist.jar</systemPath>--> <!-- <systemPath>${project.basedir}/src/main/resources/lib/taos-jdbcdriver-2.0.15-dist.jar</systemPath>-->
</dependency> </dependency>

View File

@ -16,7 +16,7 @@
#ifndef _TD_VND_COS_H_ #ifndef _TD_VND_COS_H_
#define _TD_VND_COS_H_ #define _TD_VND_COS_H_
#include "vnd.h" #include "os.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
@ -24,9 +24,12 @@ extern "C" {
#define S3_BLOCK_CACHE #define S3_BLOCK_CACHE
extern int8_t tsS3StreamEnabled;
extern int8_t tsS3Enabled; extern int8_t tsS3Enabled;
extern int32_t tsS3BlockSize; extern int32_t tsS3BlockSize;
extern int32_t tsS3BlockCacheSize; extern int32_t tsS3BlockCacheSize;
extern int32_t tsS3PageCacheSize;
extern int32_t tsS3UploadDelaySec;
int32_t s3Init(); int32_t s3Init();
void s3CleanUp(); void s3CleanUp();
@ -36,7 +39,8 @@ void s3DeleteObjectsByPrefix(const char *prefix);
void s3DeleteObjects(const char *object_name[], int nobject); void s3DeleteObjects(const char *object_name[], int nobject);
bool s3Exists(const char *object_name); bool s3Exists(const char *object_name);
bool s3Get(const char *object_name, const char *path); bool s3Get(const char *object_name, const char *path);
int32_t s3GetObjectBlock(const char *object_name, int64_t offset, int64_t size, uint8_t **ppBlock); int32_t s3GetObjectBlock(const char *object_name, int64_t offset, int64_t size, bool check, uint8_t **ppBlock);
int32_t s3GetObjectsByPrefix(const char *prefix, const char *path);
void s3EvictCache(const char *path, long object_size); void s3EvictCache(const char *path, long object_size);
long s3Size(const char *object_name); long s3Size(const char *object_name);

24
include/common/rsync.h Normal file
View File

@ -0,0 +1,24 @@
//
// Created by mingming wanng on 2023/11/2.
//
#ifndef TDENGINE_RSYNC_H
#define TDENGINE_RSYNC_H
#ifdef __cplusplus
extern "C" {
#endif
#include "tarray.h"
void stopRsync();
void startRsync();
int uploadRsync(char* id, char* path);
int downloadRsync(char* id, char* path);
int deleteRsync(char* id);
#ifdef __cplusplus
}
#endif
#endif // TDENGINE_RSYNC_H

View File

@ -49,6 +49,7 @@ extern "C" {
#define TSDB_INS_TABLE_STREAMS "ins_streams" #define TSDB_INS_TABLE_STREAMS "ins_streams"
#define TSDB_INS_TABLE_STREAM_TASKS "ins_stream_tasks" #define TSDB_INS_TABLE_STREAM_TASKS "ins_stream_tasks"
#define TSDB_INS_TABLE_USER_PRIVILEGES "ins_user_privileges" #define TSDB_INS_TABLE_USER_PRIVILEGES "ins_user_privileges"
#define TSDB_INS_TABLE_VIEWS "ins_views"
#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema" #define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema"
#define TSDB_PERFS_TABLE_SMAS "perf_smas" #define TSDB_PERFS_TABLE_SMAS "perf_smas"

View File

@ -42,7 +42,8 @@ typedef enum {
TSDB_TEMP_TABLE = 4, // temp table created by nest query TSDB_TEMP_TABLE = 4, // temp table created by nest query
TSDB_SYSTEM_TABLE = 5, TSDB_SYSTEM_TABLE = 5,
TSDB_TSMA_TABLE = 6, // time-range-wise sma TSDB_TSMA_TABLE = 6, // time-range-wise sma
TSDB_TABLE_MAX = 7 TSDB_VIEW_TABLE = 7,
TSDB_TABLE_MAX = 8
} ETableType; } ETableType;
typedef enum { typedef enum {

View File

@ -249,6 +249,7 @@ typedef struct SQueryTableDataCond {
SColumnInfo* colList; SColumnInfo* colList;
int32_t* pSlotList; // the column output destation slot, and it may be null int32_t* pSlotList; // the column output destation slot, and it may be null
int32_t type; // data block load type: int32_t type; // data block load type:
bool skipRollup;
STimeWindow twindows; STimeWindow twindows;
int64_t startVersion; int64_t startVersion;
int64_t endVersion; int64_t endVersion;
@ -364,6 +365,11 @@ typedef struct SSortExecInfo {
int32_t readBytes; // read io bytes int32_t readBytes; // read io bytes
} SSortExecInfo; } SSortExecInfo;
typedef struct SNonSortExecInfo {
int32_t blkNums;
} SNonSortExecInfo;
typedef struct STUidTagInfo { typedef struct STUidTagInfo {
char* name; char* name;
uint64_t uid; uint64_t uid;

View File

@ -44,10 +44,6 @@ extern int32_t tsNumOfSupportVnodes;
extern int32_t tsMaxShellConns; extern int32_t tsMaxShellConns;
extern int32_t tsShellActivityTimer; extern int32_t tsShellActivityTimer;
extern int32_t tsCompressMsgSize; extern int32_t tsCompressMsgSize;
extern int32_t tsCompressColData;
extern int32_t tsMaxNumOfDistinctResults;
extern int32_t tsCompatibleModel;
extern bool tsPrintAuth;
extern int64_t tsTickPerMin[3]; extern int64_t tsTickPerMin[3];
extern int64_t tsTickPerHour[3]; extern int64_t tsTickPerHour[3];
extern int32_t tsCountAlwaysReturnValue; extern int32_t tsCountAlwaysReturnValue;
@ -79,8 +75,12 @@ extern int32_t tsElectInterval;
extern int32_t tsHeartbeatInterval; extern int32_t tsHeartbeatInterval;
extern int32_t tsHeartbeatTimeout; extern int32_t tsHeartbeatTimeout;
// vnode // snode
extern int64_t tsVndCommitMaxIntervalMs; extern int32_t tsRsyncPort;
extern char tsCheckpointBackupDir[];
// vnode checkpoint
extern char tsSnodeAddress[]; //127.0.0.1:873
// mnode // mnode
extern int64_t tsMndSdbWriteDelta; extern int64_t tsMndSdbWriteDelta;
@ -105,8 +105,6 @@ extern bool tsMonitorComp;
// audit // audit
extern bool tsEnableAudit; extern bool tsEnableAudit;
extern char tsAuditFqdn[];
extern uint16_t tsAuditPort;
extern bool tsEnableAuditCreateTable; extern bool tsEnableAuditCreateTable;
// telem // telem
@ -195,7 +193,8 @@ extern int64_t tsWalFsyncDataSizeLimit;
// internal // internal
extern int32_t tsTransPullupInterval; extern int32_t tsTransPullupInterval;
extern int32_t tsMqRebalanceInterval; extern int32_t tsMqRebalanceInterval;
extern int32_t tsStreamCheckpointTickInterval; extern int32_t tsStreamCheckpointInterval;
extern float tsSinkDataRate;
extern int32_t tsStreamNodeCheckInterval; extern int32_t tsStreamNodeCheckInterval;
extern int32_t tsTtlUnit; extern int32_t tsTtlUnit;
extern int32_t tsTtlPushIntervalSec; extern int32_t tsTtlPushIntervalSec;
@ -204,9 +203,6 @@ extern int32_t tsTrimVDbIntervalSec;
extern int32_t tsGrantHBInterval; extern int32_t tsGrantHBInterval;
extern int32_t tsUptimeInterval; extern int32_t tsUptimeInterval;
extern int32_t tsRpcRetryLimit;
extern int32_t tsRpcRetryInterval;
extern bool tsDisableStream; extern bool tsDisableStream;
extern int64_t tsStreamBufferSize; extern int64_t tsStreamBufferSize;
extern bool tsFilterScalarMode; extern bool tsFilterScalarMode;
@ -221,13 +217,13 @@ int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDi
int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile, char *apolloUrl, SArray *pArgs, int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile, char *apolloUrl, SArray *pArgs,
bool tsc); bool tsc);
void taosCleanupCfg(); void taosCleanupCfg();
void taosCfgDynamicOptions(const char *option, const char *value);
int32_t taosCfgDynamicOptions(SConfig *pCfg, char *name, bool forServer);
struct SConfig *taosGetCfg(); struct SConfig *taosGetCfg();
void taosSetAllDebugFlag(int32_t flag, bool rewrite); void taosSetAllDebugFlag(int32_t flag, bool rewrite);
void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite); void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite);
int32_t taosApplyLocalCfg(SConfig *pCfg, char *name);
void taosLocalCfgForbiddenToChange(char *name, bool *forbidden); void taosLocalCfgForbiddenToChange(char *name, bool *forbidden);
int8_t taosGranted(); int8_t taosGranted();

View File

@ -31,6 +31,8 @@ extern "C" {
#endif #endif
#define GRANT_HEART_BEAT_MIN 2 #define GRANT_HEART_BEAT_MIN 2
#define GRANT_ACTIVE_CODE "activeCode"
#define GRANT_C_ACTIVE_CODE "cActiveCode"
typedef enum { typedef enum {
TSDB_GRANT_ALL, TSDB_GRANT_ALL,
@ -50,6 +52,11 @@ typedef enum {
TSDB_GRANT_TABLE, TSDB_GRANT_TABLE,
} EGrantType; } EGrantType;
typedef struct {
int64_t grantedTime;
int64_t connGrantedTime;
} SGrantedInfo;
int32_t grantCheck(EGrantType grant); int32_t grantCheck(EGrantType grant);
int32_t grantAlterActiveCode(int32_t did, const char* old, const char* newer, char* out, int8_t type); int32_t grantAlterActiveCode(int32_t did, const char* old, const char* newer, char* out, int8_t type);

View File

@ -107,6 +107,8 @@ enum {
HEARTBEAT_KEY_DBINFO, HEARTBEAT_KEY_DBINFO,
HEARTBEAT_KEY_STBINFO, HEARTBEAT_KEY_STBINFO,
HEARTBEAT_KEY_TMQ, HEARTBEAT_KEY_TMQ,
HEARTBEAT_KEY_DYN_VIEW,
HEARTBEAT_KEY_VIEWINFO,
}; };
typedef enum _mgmt_table { typedef enum _mgmt_table {
@ -141,6 +143,7 @@ typedef enum _mgmt_table {
TSDB_MGMT_TABLE_APPS, TSDB_MGMT_TABLE_APPS,
TSDB_MGMT_TABLE_STREAM_TASKS, TSDB_MGMT_TABLE_STREAM_TASKS,
TSDB_MGMT_TABLE_PRIVILEGES, TSDB_MGMT_TABLE_PRIVILEGES,
TSDB_MGMT_TABLE_VIEWS,
TSDB_MGMT_TABLE_MAX, TSDB_MGMT_TABLE_MAX,
} EShowType; } EShowType;
@ -168,26 +171,12 @@ typedef enum _mgmt_table {
#define TSDB_ALTER_USER_PASSWD 0x1 #define TSDB_ALTER_USER_PASSWD 0x1
#define TSDB_ALTER_USER_SUPERUSER 0x2 #define TSDB_ALTER_USER_SUPERUSER 0x2
#define TSDB_ALTER_USER_ADD_READ_DB 0x3 #define TSDB_ALTER_USER_ENABLE 0x3
#define TSDB_ALTER_USER_REMOVE_READ_DB 0x4 #define TSDB_ALTER_USER_SYSINFO 0x4
#define TSDB_ALTER_USER_ADD_WRITE_DB 0x5 #define TSDB_ALTER_USER_ADD_PRIVILEGES 0x5
#define TSDB_ALTER_USER_REMOVE_WRITE_DB 0x6 #define TSDB_ALTER_USER_DEL_PRIVILEGES 0x6
#define TSDB_ALTER_USER_ADD_ALL_DB 0x7 #define TSDB_ALTER_USER_ADD_WHITE_LIST 0x7
#define TSDB_ALTER_USER_REMOVE_ALL_DB 0x8 #define TSDB_ALTER_USER_DROP_WHITE_LIST 0x8
#define TSDB_ALTER_USER_ENABLE 0x9
#define TSDB_ALTER_USER_SYSINFO 0xA
#define TSDB_ALTER_USER_ADD_SUBSCRIBE_TOPIC 0xB
#define TSDB_ALTER_USER_REMOVE_SUBSCRIBE_TOPIC 0xC
#define TSDB_ALTER_USER_ADD_READ_TABLE 0xD
#define TSDB_ALTER_USER_REMOVE_READ_TABLE 0xE
#define TSDB_ALTER_USER_ADD_WRITE_TABLE 0xF
#define TSDB_ALTER_USER_REMOVE_WRITE_TABLE 0x10
#define TSDB_ALTER_USER_ADD_ALL_TABLE 0x11
#define TSDB_ALTER_USER_REMOVE_ALL_TABLE 0x12
#define TSDB_ALTER_USER_ADD_WHITE_LIST 0x13
#define TSDB_ALTER_USER_DROP_WHITE_LIST 0x14
#define TSDB_ALTER_USER_PRIVILEGES 0x2
#define TSDB_KILL_MSG_LEN 30 #define TSDB_KILL_MSG_LEN 30
@ -251,6 +240,7 @@ typedef enum ENodeType {
QUERY_NODE_CASE_WHEN, QUERY_NODE_CASE_WHEN,
QUERY_NODE_EVENT_WINDOW, QUERY_NODE_EVENT_WINDOW,
QUERY_NODE_HINT, QUERY_NODE_HINT,
QUERY_NODE_VIEW,
// Statement nodes are used in parser and planner module. // Statement nodes are used in parser and planner module.
QUERY_NODE_SET_OPERATOR = 100, QUERY_NODE_SET_OPERATOR = 100,
@ -333,6 +323,8 @@ typedef enum ENodeType {
QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT, QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT,
QUERY_NODE_SHOW_VNODES_STMT, QUERY_NODE_SHOW_VNODES_STMT,
QUERY_NODE_SHOW_USER_PRIVILEGES_STMT, QUERY_NODE_SHOW_USER_PRIVILEGES_STMT,
QUERY_NODE_SHOW_VIEWS_STMT,
QUERY_NODE_SHOW_CREATE_VIEW_STMT,
QUERY_NODE_SHOW_CREATE_DATABASE_STMT, QUERY_NODE_SHOW_CREATE_DATABASE_STMT,
QUERY_NODE_SHOW_CREATE_TABLE_STMT, QUERY_NODE_SHOW_CREATE_TABLE_STMT,
QUERY_NODE_SHOW_CREATE_STABLE_STMT, QUERY_NODE_SHOW_CREATE_STABLE_STMT,
@ -355,6 +347,8 @@ typedef enum ENodeType {
QUERY_NODE_RESTORE_VNODE_STMT, QUERY_NODE_RESTORE_VNODE_STMT,
QUERY_NODE_PAUSE_STREAM_STMT, QUERY_NODE_PAUSE_STREAM_STMT,
QUERY_NODE_RESUME_STREAM_STMT, QUERY_NODE_RESUME_STREAM_STMT,
QUERY_NODE_CREATE_VIEW_STMT,
QUERY_NODE_DROP_VIEW_STMT,
// logic plan node // logic plan node
QUERY_NODE_LOGIC_PLAN_SCAN = 1000, QUERY_NODE_LOGIC_PLAN_SCAN = 1000,
@ -451,7 +445,7 @@ typedef struct SRetention {
int8_t keepUnit; int8_t keepUnit;
} SRetention; } SRetention;
#define RETENTION_VALID(r) (((r)->freq > 0) && ((r)->keep > 0)) #define RETENTION_VALID(l, r) ((((l) == 0 && (r)->freq >= 0) || ((r)->freq > 0)) && ((r)->keep > 0))
#pragma pack(push, 1) #pragma pack(push, 1)
@ -943,6 +937,7 @@ typedef struct {
int8_t superUser; int8_t superUser;
int8_t sysInfo; int8_t sysInfo;
int8_t enable; int8_t enable;
int8_t isView;
char user[TSDB_USER_LEN]; char user[TSDB_USER_LEN];
char pass[TSDB_USET_PASSWORD_LEN]; char pass[TSDB_USET_PASSWORD_LEN];
char objname[TSDB_DB_FNAME_LEN]; // db or topic char objname[TSDB_DB_FNAME_LEN]; // db or topic
@ -951,6 +946,7 @@ typedef struct {
int32_t tagCondLen; int32_t tagCondLen;
int32_t numIpRanges; int32_t numIpRanges;
SIpV4Range* pIpRanges; SIpV4Range* pIpRanges;
int64_t privileges;
int32_t sqlLen; int32_t sqlLen;
char* sql; char* sql;
} SAlterUserReq; } SAlterUserReq;
@ -979,6 +975,10 @@ typedef struct {
SHashObj* writeDbs; SHashObj* writeDbs;
SHashObj* readTbs; SHashObj* readTbs;
SHashObj* writeTbs; SHashObj* writeTbs;
SHashObj* alterTbs;
SHashObj* readViews;
SHashObj* writeViews;
SHashObj* alterViews;
SHashObj* useDbs; SHashObj* useDbs;
int64_t whiteListVer; int64_t whiteListVer;
} SGetUserAuthRsp; } SGetUserAuthRsp;
@ -1568,9 +1568,7 @@ typedef struct {
typedef struct { typedef struct {
int32_t id; int32_t id;
int8_t isMnode; int8_t isMnode;
#ifdef TD_GRANT_HB_OPTIMIZE
int8_t offlineReason; int8_t offlineReason;
#endif
SEp ep; SEp ep;
char active[TSDB_ACTIVE_KEY_LEN]; char active[TSDB_ACTIVE_KEY_LEN];
char connActive[TSDB_CONN_ACTIVE_KEY_LEN]; char connActive[TSDB_CONN_ACTIVE_KEY_LEN];
@ -1811,6 +1809,15 @@ int32_t tSerializeSSTbHbRsp(void* buf, int32_t bufLen, SSTbHbRsp* pRsp);
int32_t tDeserializeSSTbHbRsp(void* buf, int32_t bufLen, SSTbHbRsp* pRsp); int32_t tDeserializeSSTbHbRsp(void* buf, int32_t bufLen, SSTbHbRsp* pRsp);
void tFreeSSTbHbRsp(SSTbHbRsp* pRsp); void tFreeSSTbHbRsp(SSTbHbRsp* pRsp);
typedef struct {
SArray* pViewRsp; // Array of SViewMetaRsp*;
} SViewHbRsp;
int32_t tSerializeSViewHbRsp(void* buf, int32_t bufLen, SViewHbRsp* pRsp);
int32_t tDeserializeSViewHbRsp(void* buf, int32_t bufLen, SViewHbRsp* pRsp);
void tFreeSViewHbRsp(SViewHbRsp* pRsp);
typedef struct { typedef struct {
int32_t numOfTables; int32_t numOfTables;
int32_t numOfVgroup; int32_t numOfVgroup;
@ -3767,6 +3774,7 @@ typedef struct {
int64_t suid; int64_t suid;
SArray* deleteReqs; // SArray<SSingleDeleteReq> SArray* deleteReqs; // SArray<SSingleDeleteReq>
int64_t ctimeMs; // fill by vnode int64_t ctimeMs; // fill by vnode
int8_t level; // 0 tsdb(default), 1 rsma1 , 2 rsma2
} SBatchDeleteReq; } SBatchDeleteReq;
int32_t tEncodeSBatchDeleteReq(SEncoder* pCoder, const SBatchDeleteReq* pReq); int32_t tEncodeSBatchDeleteReq(SEncoder* pCoder, const SBatchDeleteReq* pReq);
@ -3883,6 +3891,58 @@ typedef struct {
}; };
} SPackedData; } SPackedData;
typedef struct {
char fullname[TSDB_VIEW_FNAME_LEN];
char name[TSDB_VIEW_NAME_LEN];
char dbFName[TSDB_DB_FNAME_LEN];
char* querySql;
char* sql;
int8_t orReplace;
int8_t precision;
int32_t numOfCols;
SSchema* pSchema;
} SCMCreateViewReq;
int32_t tSerializeSCMCreateViewReq(void* buf, int32_t bufLen, const SCMCreateViewReq* pReq);
int32_t tDeserializeSCMCreateViewReq(void* buf, int32_t bufLen, SCMCreateViewReq* pReq);
void tFreeSCMCreateViewReq(SCMCreateViewReq* pReq);
typedef struct {
char fullname[TSDB_VIEW_FNAME_LEN];
char name[TSDB_VIEW_NAME_LEN];
char dbFName[TSDB_DB_FNAME_LEN];
char* sql;
int8_t igNotExists;
} SCMDropViewReq;
int32_t tSerializeSCMDropViewReq(void* buf, int32_t bufLen, const SCMDropViewReq* pReq);
int32_t tDeserializeSCMDropViewReq(void* buf, int32_t bufLen, SCMDropViewReq* pReq);
void tFreeSCMDropViewReq(SCMDropViewReq* pReq);
typedef struct {
char fullname[TSDB_VIEW_FNAME_LEN];
} SViewMetaReq;
int32_t tSerializeSViewMetaReq(void* buf, int32_t bufLen, const SViewMetaReq* pReq);
int32_t tDeserializeSViewMetaReq(void* buf, int32_t bufLen, SViewMetaReq* pReq);
typedef struct {
char name[TSDB_VIEW_NAME_LEN];
char dbFName[TSDB_DB_FNAME_LEN];
char* user;
uint64_t dbId;
uint64_t viewId;
char* querySql;
int8_t precision;
int8_t type;
int32_t version;
int32_t numOfCols;
SSchema* pSchema;
} SViewMetaRsp;
int32_t tSerializeSViewMetaRsp(void* buf, int32_t bufLen, const SViewMetaRsp* pRsp);
int32_t tDeserializeSViewMetaRsp(void* buf, int32_t bufLen, SViewMetaRsp* pRsp);
void tFreeSViewMetaRsp(SViewMetaRsp* pRsp);
#pragma pack(pop) #pragma pack(pop)
#ifdef __cplusplus #ifdef __cplusplus

View File

@ -189,6 +189,9 @@ enum { // WARN: new msg should be appended to segment tail
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_NODECHANGE_CHECK, "stream-nodechange-check", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_STREAM_NODECHANGE_CHECK, "stream-nodechange-check", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TRIM_DB_TIMER, "trim-db-tmr", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_TRIM_DB_TIMER, "trim-db-tmr", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_GRANT_NOTIFY, "grant-notify", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_GRANT_NOTIFY, "grant-notify", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_VIEW, "create-view", SCMCreateViewReq, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_DROP_VIEW, "drop-view", SCMDropViewReq, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_VIEW_META, "view-meta", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_VND_MSG) TD_NEW_MSG_SEG(TDMT_VND_MSG)

View File

@ -118,6 +118,13 @@ int32_t taosChar2Ts(const char* format, SArray** formats, const char* tsStr, int
void TEST_ts2char(const char* format, int64_t ts, int32_t precision, char* out, int32_t outLen); void TEST_ts2char(const char* format, int64_t ts, int32_t precision, char* out, int32_t outLen);
int32_t TEST_char2ts(const char* format, int64_t* ts, int32_t precision, const char* tsStr); int32_t TEST_char2ts(const char* format, int64_t* ts, int32_t precision, const char* tsStr);
/// @brief get offset seconds from zero timezone to input timezone
/// for +XX timezone, the offset to zero is negative value
/// @param tzStr timezonestr, eg: +0800, -0830, -08
/// @param offset seconds, eg: +08 offset -28800, -01 offset 3600
/// @return 0 success, other fail
int32_t offsetOfTimezone(char* tzStr, int64_t* offset);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -187,180 +187,183 @@
#define TK_SUBSCRIPTIONS 168 #define TK_SUBSCRIPTIONS 168
#define TK_VNODES 169 #define TK_VNODES 169
#define TK_ALIVE 170 #define TK_ALIVE 170
#define TK_NORMAL 171 #define TK_VIEWS 171
#define TK_CHILD 172 #define TK_VIEW 172
#define TK_LIKE 173 #define TK_NORMAL 173
#define TK_TBNAME 174 #define TK_CHILD 174
#define TK_QTAGS 175 #define TK_LIKE 175
#define TK_AS 176 #define TK_TBNAME 176
#define TK_SYSTEM 177 #define TK_QTAGS 177
#define TK_INDEX 178 #define TK_AS 178
#define TK_FUNCTION 179 #define TK_SYSTEM 179
#define TK_INTERVAL 180 #define TK_INDEX 180
#define TK_COUNT 181 #define TK_FUNCTION 181
#define TK_LAST_ROW 182 #define TK_INTERVAL 182
#define TK_META 183 #define TK_COUNT 183
#define TK_ONLY 184 #define TK_LAST_ROW 184
#define TK_TOPIC 185 #define TK_META 185
#define TK_CONSUMER 186 #define TK_ONLY 186
#define TK_GROUP 187 #define TK_TOPIC 187
#define TK_DESC 188 #define TK_CONSUMER 188
#define TK_DESCRIBE 189 #define TK_GROUP 189
#define TK_RESET 190 #define TK_DESC 190
#define TK_QUERY 191 #define TK_DESCRIBE 191
#define TK_CACHE 192 #define TK_RESET 192
#define TK_EXPLAIN 193 #define TK_QUERY 193
#define TK_ANALYZE 194 #define TK_CACHE 194
#define TK_VERBOSE 195 #define TK_EXPLAIN 195
#define TK_NK_BOOL 196 #define TK_ANALYZE 196
#define TK_RATIO 197 #define TK_VERBOSE 197
#define TK_NK_FLOAT 198 #define TK_NK_BOOL 198
#define TK_OUTPUTTYPE 199 #define TK_RATIO 199
#define TK_AGGREGATE 200 #define TK_NK_FLOAT 200
#define TK_BUFSIZE 201 #define TK_OUTPUTTYPE 201
#define TK_LANGUAGE 202 #define TK_AGGREGATE 202
#define TK_REPLACE 203 #define TK_BUFSIZE 203
#define TK_STREAM 204 #define TK_LANGUAGE 204
#define TK_INTO 205 #define TK_REPLACE 205
#define TK_PAUSE 206 #define TK_STREAM 206
#define TK_RESUME 207 #define TK_INTO 207
#define TK_TRIGGER 208 #define TK_PAUSE 208
#define TK_AT_ONCE 209 #define TK_RESUME 209
#define TK_WINDOW_CLOSE 210 #define TK_TRIGGER 210
#define TK_IGNORE 211 #define TK_AT_ONCE 211
#define TK_EXPIRED 212 #define TK_WINDOW_CLOSE 212
#define TK_FILL_HISTORY 213 #define TK_IGNORE 213
#define TK_UPDATE 214 #define TK_EXPIRED 214
#define TK_SUBTABLE 215 #define TK_FILL_HISTORY 215
#define TK_UNTREATED 216 #define TK_UPDATE 216
#define TK_KILL 217 #define TK_SUBTABLE 217
#define TK_CONNECTION 218 #define TK_UNTREATED 218
#define TK_TRANSACTION 219 #define TK_KILL 219
#define TK_BALANCE 220 #define TK_CONNECTION 220
#define TK_VGROUP 221 #define TK_TRANSACTION 221
#define TK_LEADER 222 #define TK_BALANCE 222
#define TK_MERGE 223 #define TK_VGROUP 223
#define TK_REDISTRIBUTE 224 #define TK_LEADER 224
#define TK_SPLIT 225 #define TK_MERGE 225
#define TK_DELETE 226 #define TK_REDISTRIBUTE 226
#define TK_INSERT 227 #define TK_SPLIT 227
#define TK_NULL 228 #define TK_DELETE 228
#define TK_NK_QUESTION 229 #define TK_INSERT 229
#define TK_NK_ALIAS 230 #define TK_NULL 230
#define TK_NK_ARROW 231 #define TK_NK_QUESTION 231
#define TK_ROWTS 232 #define TK_NK_ALIAS 232
#define TK_QSTART 233 #define TK_NK_ARROW 233
#define TK_QEND 234 #define TK_ROWTS 234
#define TK_QDURATION 235 #define TK_QSTART 235
#define TK_WSTART 236 #define TK_QEND 236
#define TK_WEND 237 #define TK_QDURATION 237
#define TK_WDURATION 238 #define TK_WSTART 238
#define TK_IROWTS 239 #define TK_WEND 239
#define TK_ISFILLED 240 #define TK_WDURATION 240
#define TK_CAST 241 #define TK_IROWTS 241
#define TK_NOW 242 #define TK_ISFILLED 242
#define TK_TODAY 243 #define TK_CAST 243
#define TK_TIMEZONE 244 #define TK_NOW 244
#define TK_CLIENT_VERSION 245 #define TK_TODAY 245
#define TK_SERVER_VERSION 246 #define TK_TIMEZONE 246
#define TK_SERVER_STATUS 247 #define TK_CLIENT_VERSION 247
#define TK_CURRENT_USER 248 #define TK_SERVER_VERSION 248
#define TK_CASE 249 #define TK_SERVER_STATUS 249
#define TK_WHEN 250 #define TK_CURRENT_USER 250
#define TK_THEN 251 #define TK_CASE 251
#define TK_ELSE 252 #define TK_WHEN 252
#define TK_BETWEEN 253 #define TK_THEN 253
#define TK_IS 254 #define TK_ELSE 254
#define TK_NK_LT 255 #define TK_BETWEEN 255
#define TK_NK_GT 256 #define TK_IS 256
#define TK_NK_LE 257 #define TK_NK_LT 257
#define TK_NK_GE 258 #define TK_NK_GT 258
#define TK_NK_NE 259 #define TK_NK_LE 259
#define TK_MATCH 260 #define TK_NK_GE 260
#define TK_NMATCH 261 #define TK_NK_NE 261
#define TK_CONTAINS 262 #define TK_MATCH 262
#define TK_IN 263 #define TK_NMATCH 263
#define TK_JOIN 264 #define TK_CONTAINS 264
#define TK_INNER 265 #define TK_IN 265
#define TK_SELECT 266 #define TK_JOIN 266
#define TK_NK_HINT 267 #define TK_INNER 267
#define TK_DISTINCT 268 #define TK_SELECT 268
#define TK_WHERE 269 #define TK_NK_HINT 269
#define TK_PARTITION 270 #define TK_DISTINCT 270
#define TK_BY 271 #define TK_WHERE 271
#define TK_SESSION 272 #define TK_PARTITION 272
#define TK_STATE_WINDOW 273 #define TK_BY 273
#define TK_EVENT_WINDOW 274 #define TK_SESSION 274
#define TK_SLIDING 275 #define TK_STATE_WINDOW 275
#define TK_FILL 276 #define TK_EVENT_WINDOW 276
#define TK_VALUE 277 #define TK_SLIDING 277
#define TK_VALUE_F 278 #define TK_FILL 278
#define TK_NONE 279 #define TK_VALUE 279
#define TK_PREV 280 #define TK_VALUE_F 280
#define TK_NULL_F 281 #define TK_NONE 281
#define TK_LINEAR 282 #define TK_PREV 282
#define TK_NEXT 283 #define TK_NULL_F 283
#define TK_HAVING 284 #define TK_LINEAR 284
#define TK_RANGE 285 #define TK_NEXT 285
#define TK_EVERY 286 #define TK_HAVING 286
#define TK_ORDER 287 #define TK_RANGE 287
#define TK_SLIMIT 288 #define TK_EVERY 288
#define TK_SOFFSET 289 #define TK_ORDER 289
#define TK_LIMIT 290 #define TK_SLIMIT 290
#define TK_OFFSET 291 #define TK_SOFFSET 291
#define TK_ASC 292 #define TK_LIMIT 292
#define TK_NULLS 293 #define TK_OFFSET 293
#define TK_ABORT 294 #define TK_ASC 294
#define TK_AFTER 295 #define TK_NULLS 295
#define TK_ATTACH 296 #define TK_ABORT 296
#define TK_BEFORE 297 #define TK_AFTER 297
#define TK_BEGIN 298 #define TK_ATTACH 298
#define TK_BITAND 299 #define TK_BEFORE 299
#define TK_BITNOT 300 #define TK_BEGIN 300
#define TK_BITOR 301 #define TK_BITAND 301
#define TK_BLOCKS 302 #define TK_BITNOT 302
#define TK_CHANGE 303 #define TK_BITOR 303
#define TK_COMMA 304 #define TK_BLOCKS 304
#define TK_CONCAT 305 #define TK_CHANGE 305
#define TK_CONFLICT 306 #define TK_COMMA 306
#define TK_COPY 307 #define TK_CONCAT 307
#define TK_DEFERRED 308 #define TK_CONFLICT 308
#define TK_DELIMITERS 309 #define TK_COPY 309
#define TK_DETACH 310 #define TK_DEFERRED 310
#define TK_DIVIDE 311 #define TK_DELIMITERS 311
#define TK_DOT 312 #define TK_DETACH 312
#define TK_EACH 313 #define TK_DIVIDE 313
#define TK_FAIL 314 #define TK_DOT 314
#define TK_FILE 315 #define TK_EACH 315
#define TK_FOR 316 #define TK_FAIL 316
#define TK_GLOB 317 #define TK_FILE 317
#define TK_ID 318 #define TK_FOR 318
#define TK_IMMEDIATE 319 #define TK_GLOB 319
#define TK_IMPORT 320 #define TK_ID 320
#define TK_INITIALLY 321 #define TK_IMMEDIATE 321
#define TK_INSTEAD 322 #define TK_IMPORT 322
#define TK_ISNULL 323 #define TK_INITIALLY 323
#define TK_KEY 324 #define TK_INSTEAD 324
#define TK_MODULES 325 #define TK_ISNULL 325
#define TK_NK_BITNOT 326 #define TK_KEY 326
#define TK_NK_SEMI 327 #define TK_MODULES 327
#define TK_NOTNULL 328 #define TK_NK_BITNOT 328
#define TK_OF 329 #define TK_NK_SEMI 329
#define TK_PLUS 330 #define TK_NOTNULL 330
#define TK_PRIVILEGE 331 #define TK_OF 331
#define TK_RAISE 332 #define TK_PLUS 332
#define TK_RESTRICT 333 #define TK_PRIVILEGE 333
#define TK_ROW 334 #define TK_RAISE 334
#define TK_SEMI 335 #define TK_RESTRICT 335
#define TK_STAR 336 #define TK_ROW 336
#define TK_STATEMENT 337 #define TK_SEMI 337
#define TK_STRICT 338 #define TK_STAR 338
#define TK_STRING 339 #define TK_STATEMENT 339
#define TK_TIMES 340 #define TK_STRICT 340
#define TK_VALUES 341 #define TK_STRING 341
#define TK_VARIABLE 342 #define TK_TIMES 342
#define TK_VIEW 343 #define TK_VALUES 343
#define TK_WAL 344 #define TK_VARIABLE 344
#define TK_WAL 345

View File

@ -45,6 +45,7 @@ typedef struct {
*/ */
SSnode *sndOpen(const char *path, const SSnodeOpt *pOption); SSnode *sndOpen(const char *path, const SSnodeOpt *pOption);
int32_t sndInit(SSnode * pSnode);
/** /**
* @brief Stop Snode in Dnode. * @brief Stop Snode in Dnode.
* *

View File

@ -0,0 +1,18 @@
//
// Created by mingming wanng on 2023/11/15.
//
#ifndef TDENGINE_STREAM_H
#define TDENGINE_STREAM_H
#define STREAM_EXEC_EXTRACT_DATA_IN_WAL_ID (-1)
#define STREAM_EXEC_START_ALL_TASKS_ID (-2)
#define STREAM_EXEC_RESTART_ALL_TASKS_ID (-3)
typedef struct STaskUpdateEntry {
int64_t streamId;
int32_t taskId;
int32_t transId;
} STaskUpdateEntry;
#endif // TDENGINE_STREAM_H

View File

@ -37,13 +37,16 @@ enum {
CTG_DBG_DB_NUM = 1, CTG_DBG_DB_NUM = 1,
CTG_DBG_META_NUM, CTG_DBG_META_NUM,
CTG_DBG_STB_NUM, CTG_DBG_STB_NUM,
CTG_DBG_VIEW_NUM,
CTG_DBG_DB_RENT_NUM, CTG_DBG_DB_RENT_NUM,
CTG_DBG_STB_RENT_NUM, CTG_DBG_STB_RENT_NUM,
CTG_DBG_VIEW_RENT_NUM,
}; };
typedef enum { typedef enum {
AUTH_TYPE_READ = 1, AUTH_TYPE_READ = 1,
AUTH_TYPE_WRITE, AUTH_TYPE_WRITE,
AUTH_TYPE_ALTER,
AUTH_TYPE_OTHER, AUTH_TYPE_OTHER,
AUTH_TYPE_READ_OR_WRITE, AUTH_TYPE_READ_OR_WRITE,
} AUTH_TYPE; } AUTH_TYPE;
@ -51,12 +54,19 @@ typedef enum {
typedef struct SUserAuthInfo { typedef struct SUserAuthInfo {
char user[TSDB_USER_LEN]; char user[TSDB_USER_LEN];
SName tbName; SName tbName;
bool isView;
AUTH_TYPE type; AUTH_TYPE type;
} SUserAuthInfo; } SUserAuthInfo;
typedef enum {
AUTH_RES_BASIC = 0,
AUTH_RES_VIEW,
AUTH_RES_MAX_VALUE
} AUTH_RES_TYPE;
typedef struct SUserAuthRes { typedef struct SUserAuthRes {
bool pass; bool pass[AUTH_RES_MAX_VALUE];
SNode* pCond; SNode* pCond[AUTH_RES_MAX_VALUE];
} SUserAuthRes; } SUserAuthRes;
typedef struct SDbInfo { typedef struct SDbInfo {
@ -83,6 +93,7 @@ typedef struct SCatalogReq {
SArray* pTableIndex; // element is SNAME SArray* pTableIndex; // element is SNAME
SArray* pTableCfg; // element is SNAME SArray* pTableCfg; // element is SNAME
SArray* pTableTag; // element is SNAME SArray* pTableTag; // element is SNAME
SArray* pView; // element is STablesReq
bool qNodeRequired; // valid qnode bool qNodeRequired; // valid qnode
bool dNodeRequired; // valid dnode bool dNodeRequired; // valid dnode
bool svrVerRequired; bool svrVerRequired;
@ -96,6 +107,7 @@ typedef struct SMetaRes {
} SMetaRes; } SMetaRes;
typedef struct SMetaData { typedef struct SMetaData {
bool ctgFree; // need to freed by catalog module
SArray* pDbVgroup; // pRes = SArray<SVgroupInfo>* SArray* pDbVgroup; // pRes = SArray<SVgroupInfo>*
SArray* pDbCfg; // pRes = SDbCfgInfo* SArray* pDbCfg; // pRes = SDbCfgInfo*
SArray* pDbInfo; // pRes = SDbInfo* SArray* pDbInfo; // pRes = SDbInfo*
@ -109,21 +121,24 @@ typedef struct SMetaData {
SArray* pTableCfg; // pRes = STableCfg* SArray* pTableCfg; // pRes = STableCfg*
SArray* pTableTag; // pRes = SArray<STagVal>* SArray* pTableTag; // pRes = SArray<STagVal>*
SArray* pDnodeList; // pRes = SArray<SEpSet>* SArray* pDnodeList; // pRes = SArray<SEpSet>*
SArray* pView; // pRes = SViewMeta*
SMetaRes* pSvrVer; // pRes = char* SMetaRes* pSvrVer; // pRes = char*
} SMetaData; } SMetaData;
typedef struct SCatalogCfg { typedef struct SCatalogCfg {
uint32_t maxTblCacheNum; uint32_t maxTblCacheNum;
uint32_t maxViewCacheNum;
uint32_t maxDBCacheNum; uint32_t maxDBCacheNum;
uint32_t maxUserCacheNum; uint32_t maxUserCacheNum;
uint32_t dbRentSec; uint32_t dbRentSec;
uint32_t stbRentSec; uint32_t stbRentSec;
uint32_t viewRentSec;
} SCatalogCfg; } SCatalogCfg;
typedef struct SSTableVersion { typedef struct SSTableVersion {
char dbFName[TSDB_DB_FNAME_LEN]; char dbFName[TSDB_DB_FNAME_LEN];
char stbName[TSDB_TABLE_NAME_LEN]; char stbName[TSDB_TABLE_NAME_LEN];
uint64_t dbId; int64_t dbId;
uint64_t suid; uint64_t suid;
int32_t sversion; int32_t sversion;
int32_t tversion; int32_t tversion;
@ -139,6 +154,20 @@ typedef struct SDbCacheInfo {
int64_t stateTs; int64_t stateTs;
} SDbCacheInfo; } SDbCacheInfo;
typedef struct SDynViewVersion {
int64_t svrBootTs;
uint64_t dynViewVer;
} SDynViewVersion;
typedef struct SViewVersion {
char dbFName[TSDB_DB_FNAME_LEN];
char viewName[TSDB_VIEW_NAME_LEN];
int64_t dbId;
uint64_t viewId;
int32_t version;
} SViewVersion;
typedef struct STbSVersion { typedef struct STbSVersion {
char* tbFName; char* tbFName;
int32_t sver; int32_t sver;
@ -307,6 +336,8 @@ int32_t catalogGetDnodeList(SCatalog* pCatalog, SRequestConnInfo* pConn, SArray*
int32_t catalogGetExpiredSTables(SCatalog* pCatalog, SSTableVersion** stables, uint32_t* num); int32_t catalogGetExpiredSTables(SCatalog* pCatalog, SSTableVersion** stables, uint32_t* num);
int32_t catalogGetExpiredViews(SCatalog* pCtg, SViewVersion** views, uint32_t* num, SDynViewVersion** dynViewVersion);
int32_t catalogGetExpiredDBs(SCatalog* pCatalog, SDbCacheInfo** dbs, uint32_t* num); int32_t catalogGetExpiredDBs(SCatalog* pCatalog, SDbCacheInfo** dbs, uint32_t* num);
int32_t catalogGetExpiredUsers(SCatalog* pCtg, SUserAuthVersion** users, uint32_t* num); int32_t catalogGetExpiredUsers(SCatalog* pCtg, SUserAuthVersion** users, uint32_t* num);
@ -343,6 +374,16 @@ SMetaData* catalogCloneMetaData(SMetaData* pData);
void catalogFreeMetaData(SMetaData* pData); void catalogFreeMetaData(SMetaData* pData);
int32_t catalogRemoveViewMeta(SCatalog* pCtg, const char* dbFName, uint64_t dbId, const char* viewName, uint64_t viewId);
int32_t catalogUpdateDynViewVer(SCatalog* pCtg, SDynViewVersion* pVer);
int32_t catalogUpdateViewMeta(SCatalog* pCtg, SViewMetaRsp* pMsg);
int32_t catalogAsyncUpdateViewMeta(SCatalog* pCtg, SViewMetaRsp* pMsg);
int32_t catalogGetViewMeta(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pViewName, STableMeta** pTableMeta);
int32_t ctgdEnableDebug(char* option, bool enable); int32_t ctgdEnableDebug(char* option, bool enable);
int32_t ctgdHandleDbgCommand(char* command); int32_t ctgdHandleDbgCommand(char* command);

View File

@ -49,6 +49,7 @@ typedef struct {
uint64_t checkpointId; uint64_t checkpointId;
bool initTableReader; bool initTableReader;
bool initTqReader; bool initTqReader;
bool skipRollup;
int32_t numOfVgroups; int32_t numOfVgroups;
void* sContext; // SSnapContext* void* sContext; // SSnapContext*
void* pStateBackend; void* pStateBackend;
@ -184,11 +185,7 @@ void qDestroyTask(qTaskInfo_t tinfo);
void qProcessRspMsg(void* parent, struct SRpcMsg* pMsg, struct SEpSet* pEpSet); void qProcessRspMsg(void* parent, struct SRpcMsg* pMsg, struct SEpSet* pEpSet);
int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, SArray* pExecInfoList /*,int32_t* resNum, SExplainExecInfo** pRes*/); int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, SArray* pExecInfoList);
int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len);
int32_t qDeserializeTaskStatus(qTaskInfo_t tinfo, const char* pInput, int32_t len);
void getNextTimeWindow(const SInterval* pInterval, STimeWindow* tw, int32_t order); void getNextTimeWindow(const SInterval* pInterval, STimeWindow* tw, int32_t order);
void getInitialStartTimeWindow(SInterval* pInterval, TSKEY ts, STimeWindow* w, bool ascQuery); void getInitialStartTimeWindow(SInterval* pInterval, TSKEY ts, STimeWindow* w, bool ascQuery);
@ -217,7 +214,7 @@ int32_t qStreamSourceScanParamForHistoryScanStep1(qTaskInfo_t tinfo, SVersionRan
int32_t qStreamSourceScanParamForHistoryScanStep2(qTaskInfo_t tinfo, SVersionRange *pVerRange, STimeWindow* pWindow); int32_t qStreamSourceScanParamForHistoryScanStep2(qTaskInfo_t tinfo, SVersionRange *pVerRange, STimeWindow* pWindow);
int32_t qStreamRecoverFinish(qTaskInfo_t tinfo); int32_t qStreamRecoverFinish(qTaskInfo_t tinfo);
int32_t qRestoreStreamOperatorOption(qTaskInfo_t tinfo); int32_t qRestoreStreamOperatorOption(qTaskInfo_t tinfo);
bool qStreamRecoverScanFinished(qTaskInfo_t tinfo); bool qStreamScanhistoryFinished(qTaskInfo_t tinfo);
int32_t qStreamInfoResetTimewindowFilter(qTaskInfo_t tinfo); int32_t qStreamInfoResetTimewindowFilter(qTaskInfo_t tinfo);
void resetTaskInfo(qTaskInfo_t tinfo); void resetTaskInfo(qTaskInfo_t tinfo);

View File

@ -119,6 +119,7 @@ typedef struct SRowBuffPos {
bool beFlushed; bool beFlushed;
bool beUsed; bool beUsed;
bool needFree; bool needFree;
bool beUpdated;
} SRowBuffPos; } SRowBuffPos;
// tq // tq
@ -387,11 +388,13 @@ typedef struct SStateStore {
int32_t (*streamStateStateAddIfNotExist)(SStreamState* pState, SSessionKey* key, char* pKeyData, int32_t keyDataLen, int32_t (*streamStateStateAddIfNotExist)(SStreamState* pState, SSessionKey* key, char* pKeyData, int32_t keyDataLen,
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen); state_key_cmpr_fn fn, void** pVal, int32_t* pVLen);
int32_t (*streamStateSessionGetKeyByRange)(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey); int32_t (*streamStateSessionGetKeyByRange)(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey);
int32_t (*streamStateSessionAllocWinBuffByNextPosition)(SStreamState* pState, SStreamStateCur* pCur,
const SSessionKey* pKey, void** pVal, int32_t* pVLen);
SUpdateInfo* (*updateInfoInit)(int64_t interval, int32_t precision, int64_t watermark, bool igUp); SUpdateInfo* (*updateInfoInit)(int64_t interval, int32_t precision, int64_t watermark, bool igUp);
TSKEY (*updateInfoFillBlockData)(SUpdateInfo* pInfo, SSDataBlock* pBlock, int32_t primaryTsCol); TSKEY (*updateInfoFillBlockData)(SUpdateInfo* pInfo, SSDataBlock* pBlock, int32_t primaryTsCol);
bool (*updateInfoIsUpdated)(SUpdateInfo* pInfo, uint64_t tableId, TSKEY ts); bool (*updateInfoIsUpdated)(SUpdateInfo* pInfo, uint64_t tableId, TSKEY ts);
bool (*updateInfoIsTableInserted)(SUpdateInfo* pInfo, int64_t tbUid); bool (*updateInfoIsTableInserted)(SUpdateInfo* pInfo, int64_t tbUid);
void (*updateInfoDestroy)(SUpdateInfo* pInfo); void (*updateInfoDestroy)(SUpdateInfo* pInfo);
void (*windowSBfDelete)(SUpdateInfo *pInfo, uint64_t count); void (*windowSBfDelete)(SUpdateInfo *pInfo, uint64_t count);
void (*windowSBfAdd)(SUpdateInfo *pInfo, uint64_t count); void (*windowSBfAdd)(SUpdateInfo *pInfo, uint64_t count);

View File

@ -168,7 +168,6 @@ typedef struct {
struct SStreamFileState *pFileState; struct SStreamFileState *pFileState;
int32_t number; int32_t number;
SSHashObj *parNameMap; SSHashObj *parNameMap;
int64_t checkPointId;
int32_t taskId; int32_t taskId;
int64_t streamId; int64_t streamId;
int64_t streamBackendRid; int64_t streamBackendRid;

View File

@ -26,7 +26,7 @@ extern "C" {
#define DESCRIBE_RESULT_COLS 4 #define DESCRIBE_RESULT_COLS 4
#define DESCRIBE_RESULT_FIELD_LEN (TSDB_COL_NAME_LEN - 1 + VARSTR_HEADER_SIZE) #define DESCRIBE_RESULT_FIELD_LEN (TSDB_COL_NAME_LEN - 1 + VARSTR_HEADER_SIZE)
#define DESCRIBE_RESULT_TYPE_LEN (20 + VARSTR_HEADER_SIZE) #define DESCRIBE_RESULT_TYPE_LEN (20 + VARSTR_HEADER_SIZE)
#define DESCRIBE_RESULT_NOTE_LEN (8 + VARSTR_HEADER_SIZE) #define DESCRIBE_RESULT_NOTE_LEN (16 + VARSTR_HEADER_SIZE)
#define SHOW_CREATE_DB_RESULT_COLS 2 #define SHOW_CREATE_DB_RESULT_COLS 2
#define SHOW_CREATE_DB_RESULT_FIELD1_LEN (TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE) #define SHOW_CREATE_DB_RESULT_FIELD1_LEN (TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE)
@ -36,6 +36,11 @@ extern "C" {
#define SHOW_CREATE_TB_RESULT_FIELD1_LEN (TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE) #define SHOW_CREATE_TB_RESULT_FIELD1_LEN (TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE)
#define SHOW_CREATE_TB_RESULT_FIELD2_LEN (TSDB_MAX_ALLOWED_SQL_LEN * 3) #define SHOW_CREATE_TB_RESULT_FIELD2_LEN (TSDB_MAX_ALLOWED_SQL_LEN * 3)
#define SHOW_CREATE_VIEW_RESULT_COLS 2
#define SHOW_CREATE_VIEW_RESULT_FIELD1_LEN (TSDB_VIEW_FNAME_LEN + 4 + VARSTR_HEADER_SIZE)
#define SHOW_CREATE_VIEW_RESULT_FIELD2_LEN (TSDB_MAX_ALLOWED_SQL_LEN + VARSTR_HEADER_SIZE)
#define SHOW_LOCAL_VARIABLES_RESULT_COLS 3 #define SHOW_LOCAL_VARIABLES_RESULT_COLS 3
#define SHOW_LOCAL_VARIABLES_RESULT_FIELD1_LEN (TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE) #define SHOW_LOCAL_VARIABLES_RESULT_FIELD1_LEN (TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE)
#define SHOW_LOCAL_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE) #define SHOW_LOCAL_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE)
@ -51,6 +56,7 @@ extern "C" {
#define PRIVILEGE_TYPE_READ BIT_FLAG_MASK(1) #define PRIVILEGE_TYPE_READ BIT_FLAG_MASK(1)
#define PRIVILEGE_TYPE_WRITE BIT_FLAG_MASK(2) #define PRIVILEGE_TYPE_WRITE BIT_FLAG_MASK(2)
#define PRIVILEGE_TYPE_SUBSCRIBE BIT_FLAG_MASK(3) #define PRIVILEGE_TYPE_SUBSCRIBE BIT_FLAG_MASK(3)
#define PRIVILEGE_TYPE_ALTER BIT_FLAG_MASK(4)
typedef struct SDatabaseOptions { typedef struct SDatabaseOptions {
ENodeType type; ENodeType type;
@ -297,6 +303,13 @@ typedef struct SShowCreateTableStmt {
void* pTableCfg; // STableCfg void* pTableCfg; // STableCfg
} SShowCreateTableStmt; } SShowCreateTableStmt;
typedef struct SShowCreateViewStmt {
ENodeType type;
char dbName[TSDB_DB_NAME_LEN];
char viewName[TSDB_VIEW_NAME_LEN];
void* pViewMeta;
} SShowCreateViewStmt;
typedef struct SShowTableDistributedStmt { typedef struct SShowTableDistributedStmt {
ENodeType type; ENodeType type;
char dbName[TSDB_DB_NAME_LEN]; char dbName[TSDB_DB_NAME_LEN];
@ -490,6 +503,23 @@ typedef struct SDropFunctionStmt {
bool ignoreNotExists; bool ignoreNotExists;
} SDropFunctionStmt; } SDropFunctionStmt;
typedef struct SCreateViewStmt {
ENodeType type;
char dbName[TSDB_DB_NAME_LEN];
char viewName[TSDB_VIEW_NAME_LEN];
char* pQuerySql;
bool orReplace;
SNode* pQuery;
SCMCreateViewReq createReq;
} SCreateViewStmt;
typedef struct SDropViewStmt {
ENodeType type;
char dbName[TSDB_DB_NAME_LEN];
char viewName[TSDB_VIEW_NAME_LEN];
bool ignoreNotExists;
} SDropViewStmt;
typedef struct SGrantStmt { typedef struct SGrantStmt {
ENodeType type; ENodeType type;
char userName[TSDB_USER_LEN]; char userName[TSDB_USER_LEN];

View File

@ -121,6 +121,7 @@ int32_t nodesListMakeAppend(SNodeList** pList, SNode* pNode);
int32_t nodesListMakeStrictAppend(SNodeList** pList, SNode* pNode); int32_t nodesListMakeStrictAppend(SNodeList** pList, SNode* pNode);
int32_t nodesListAppendList(SNodeList* pTarget, SNodeList* pSrc); int32_t nodesListAppendList(SNodeList* pTarget, SNodeList* pSrc);
int32_t nodesListStrictAppendList(SNodeList* pTarget, SNodeList* pSrc); int32_t nodesListStrictAppendList(SNodeList* pTarget, SNodeList* pSrc);
int32_t nodesListMakeStrictAppendList(SNodeList** pTarget, SNodeList* pSrc);
int32_t nodesListPushFront(SNodeList* pList, SNode* pNode); int32_t nodesListPushFront(SNodeList* pList, SNode* pNode);
SListCell* nodesListErase(SNodeList* pList, SListCell* pCell); SListCell* nodesListErase(SNodeList* pList, SListCell* pCell);
void nodesListInsertList(SNodeList* pTarget, SListCell* pPos, SNodeList* pSrc); void nodesListInsertList(SNodeList* pTarget, SListCell* pPos, SNodeList* pSrc);

View File

@ -40,6 +40,13 @@ typedef enum EGroupAction {
GROUP_ACTION_CLEAR GROUP_ACTION_CLEAR
} EGroupAction; } EGroupAction;
typedef enum EMergeType {
MERGE_TYPE_SORT = 1,
MERGE_TYPE_NON_SORT,
MERGE_TYPE_COLUMNS,
MERGE_TYPE_MAX_VALUE
} EMergeType;
typedef struct SLogicNode { typedef struct SLogicNode {
ENodeType type; ENodeType type;
bool dynamicOp; bool dynamicOp;
@ -138,6 +145,7 @@ typedef struct SAggLogicNode {
bool hasGroupKeyOptimized; bool hasGroupKeyOptimized;
bool isGroupTb; bool isGroupTb;
bool isPartTb; // true if partition keys has tbname bool isPartTb; // true if partition keys has tbname
bool hasGroup;
} SAggLogicNode; } SAggLogicNode;
typedef struct SProjectLogicNode { typedef struct SProjectLogicNode {
@ -221,6 +229,8 @@ typedef struct SMergeLogicNode {
SNodeList* pInputs; SNodeList* pInputs;
int32_t numOfChannels; int32_t numOfChannels;
int32_t srcGroupId; int32_t srcGroupId;
bool colsMerge;
bool needSort;
bool groupSort; bool groupSort;
bool ignoreGroupId; bool ignoreGroupId;
bool inputWithGroupId; bool inputWithGroupId;
@ -388,6 +398,7 @@ typedef struct SLastRowScanPhysiNode {
SNodeList* pGroupTags; SNodeList* pGroupTags;
bool groupSort; bool groupSort;
bool ignoreNull; bool ignoreNull;
SNodeList* pTargets;
} SLastRowScanPhysiNode; } SLastRowScanPhysiNode;
typedef SLastRowScanPhysiNode STableCountScanPhysiNode; typedef SLastRowScanPhysiNode STableCountScanPhysiNode;
@ -531,6 +542,7 @@ typedef struct SExchangePhysiNode {
typedef struct SMergePhysiNode { typedef struct SMergePhysiNode {
SPhysiNode node; SPhysiNode node;
EMergeType type;
SNodeList* pMergeKeys; SNodeList* pMergeKeys;
SNodeList* pTargets; SNodeList* pTargets;
int32_t numOfChannels; int32_t numOfChannels;

View File

@ -30,6 +30,11 @@ extern "C" {
#define VGROUPS_INFO_SIZE(pInfo) \ #define VGROUPS_INFO_SIZE(pInfo) \
(NULL == (pInfo) ? 0 : (sizeof(SVgroupsInfo) + (pInfo)->numOfVgroups * sizeof(SVgroupInfo))) (NULL == (pInfo) ? 0 : (sizeof(SVgroupsInfo) + (pInfo)->numOfVgroups * sizeof(SVgroupInfo)))
typedef struct SAssociationNode {
SNode** pPlace;
SNode* pAssociationNode;
} SAssociationNode;
typedef struct SRawExprNode { typedef struct SRawExprNode {
ENodeType nodeType; ENodeType nodeType;
char* p; char* p;
@ -182,6 +187,16 @@ typedef struct STempTableNode {
SNode* pSubquery; SNode* pSubquery;
} STempTableNode; } STempTableNode;
typedef struct SViewNode {
STableNode table; // QUERY_NODE_REAL_TABLE
struct STableMeta* pMeta;
SVgroupsInfo* pVgroupList;
char qualDbName[TSDB_DB_NAME_LEN]; // SHOW qualDbName.TABLES
double ratio;
SArray* pSmaIndexes;
int8_t cacheLastMode;
} SViewNode;
typedef enum EJoinType { typedef enum EJoinType {
JOIN_TYPE_INNER = 1, JOIN_TYPE_INNER = 1,
JOIN_TYPE_LEFT, JOIN_TYPE_LEFT,
@ -413,7 +428,8 @@ typedef struct SVgDataBlocks {
typedef void (*FFreeTableBlockHash)(SHashObj*); typedef void (*FFreeTableBlockHash)(SHashObj*);
typedef void (*FFreeVgourpBlockArray)(SArray*); typedef void (*FFreeVgourpBlockArray)(SArray*);
struct SStbRowsDataContext;
typedef void (*FFreeStbRowsDataContext)(struct SStbRowsDataContext*);
typedef struct SVnodeModifyOpStmt { typedef struct SVnodeModifyOpStmt {
ENodeType nodeType; ENodeType nodeType;
ENodeType sqlNodeType; ENodeType sqlNodeType;
@ -428,11 +444,11 @@ typedef struct SVnodeModifyOpStmt {
struct STableMeta* pTableMeta; struct STableMeta* pTableMeta;
SNode* pTagCond; SNode* pTagCond;
SArray* pTableTag; SArray* pTableTag;
SHashObj* pVgroupsHashObj; SHashObj* pVgroupsHashObj; // SHashObj<vgId, SVgInfo>
SHashObj* pTableBlockHashObj; // SHashObj<tuid, STableDataCxt*> SHashObj* pTableBlockHashObj; // SHashObj<tuid, STableDataCxt*>
SHashObj* pSubTableHashObj; SHashObj* pSubTableHashObj; // SHashObj<table_name, STableMeta*>
SHashObj* pTableNameHashObj; SHashObj* pTableNameHashObj; // set of table names for refreshing meta, sync mode
SHashObj* pDbFNameHashObj; SHashObj* pDbFNameHashObj; // set of db names for refreshing meta, sync mode
SArray* pVgDataBlocks; // SArray<SVgroupDataCxt*> SArray* pVgDataBlocks; // SArray<SVgroupDataCxt*>
SVCreateTbReq* pCreateTblReq; SVCreateTbReq* pCreateTblReq;
TdFilePtr fp; TdFilePtr fp;
@ -440,6 +456,10 @@ typedef struct SVnodeModifyOpStmt {
FFreeVgourpBlockArray freeArrayFunc; FFreeVgourpBlockArray freeArrayFunc;
bool usingTableProcessing; bool usingTableProcessing;
bool fileProcessing; bool fileProcessing;
bool stbSyntax;
struct SStbRowsDataContext* pStbRowsCxt;
FFreeStbRowsDataContext freeStbRowsCxtFunc;
} SVnodeModifyOpStmt; } SVnodeModifyOpStmt;
typedef struct SExplainOptions { typedef struct SExplainOptions {

View File

@ -22,9 +22,7 @@ extern "C" {
#include "query.h" #include "query.h"
#include "querynodes.h" #include "querynodes.h"
#include "catalog.h"
struct SCatalogReq;
struct SMetaData;
typedef struct SStmtCallback { typedef struct SStmtCallback {
TAOS_STMT* pStmt; TAOS_STMT* pStmt;
@ -33,6 +31,33 @@ typedef struct SStmtCallback {
int32_t (*getExecInfoFn)(TAOS_STMT*, SHashObj**, SHashObj**); int32_t (*getExecInfoFn)(TAOS_STMT*, SHashObj**, SHashObj**);
} SStmtCallback; } SStmtCallback;
typedef enum {
PARSE_SQL_RES_QUERY = 1,
PARSE_SQL_RES_SCHEMA,
} SParseResType;
typedef struct SParseSchemaRes {
int8_t precision;
int32_t numOfCols;
SSchema* pSchema;
} SParseSchemaRes;
typedef struct SParseQueryRes {
SNode* pQuery;
SCatalogReq* pCatalogReq;
SMetaData meta;
} SParseQueryRes;
typedef struct SParseSqlRes {
SParseResType resType;
union {
SParseSchemaRes schemaRes;
SParseQueryRes queryRes;
};
} SParseSqlRes;
typedef int32_t (*parseSqlFn)(void*, const char*, const char*, bool, const char*, SParseSqlRes*);
typedef struct SParseCsvCxt { typedef struct SParseCsvCxt {
TdFilePtr fp; // last parsed file TdFilePtr fp; // last parsed file
int32_t tableNo; // last parsed table int32_t tableNo; // last parsed table
@ -55,6 +80,8 @@ typedef struct SParseContext {
struct SCatalog* pCatalog; struct SCatalog* pCatalog;
SStmtCallback* pStmtCb; SStmtCallback* pStmtCb;
const char* pUser; const char* pUser;
const char* pEffectiveUser;
bool parseOnly;
bool isSuperUser; bool isSuperUser;
bool enableSysInfo; bool enableSysInfo;
bool async; bool async;
@ -64,7 +91,10 @@ typedef struct SParseContext {
SArray* pTableMetaPos; // sql table pos => catalog data pos SArray* pTableMetaPos; // sql table pos => catalog data pos
SArray* pTableVgroupPos; // sql table pos => catalog data pos SArray* pTableVgroupPos; // sql table pos => catalog data pos
int64_t allocatorId; int64_t allocatorId;
parseSqlFn parseSqlFp;
void* parseSqlParam;
int8_t biMode; int8_t biMode;
SArray* pSubMetaList;
} SParseContext; } SParseContext;
int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery); int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery);
@ -125,6 +155,8 @@ int rawBlockBindData(SQuery *query, STableMeta* pTableMeta, void* data, SVCr
int32_t rewriteToVnodeModifyOpStmt(SQuery* pQuery, SArray* pBufArray); int32_t rewriteToVnodeModifyOpStmt(SQuery* pQuery, SArray* pBufArray);
SArray* serializeVgroupsCreateTableBatch(SHashObj* pVgroupHashmap); SArray* serializeVgroupsCreateTableBatch(SHashObj* pVgroupHashmap);
SArray* serializeVgroupsDropTableBatch(SHashObj* pVgroupHashmap); SArray* serializeVgroupsDropTableBatch(SHashObj* pVgroupHashmap);
void destoryCatalogReq(SCatalogReq *pCatalogReq);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -119,6 +119,17 @@ typedef struct STableMeta {
} STableMeta; } STableMeta;
#pragma pack(pop) #pragma pack(pop)
typedef struct SViewMeta {
uint64_t viewId;
char* user;
char* querySql;
int8_t precision;
int8_t type;
int32_t version;
int32_t numOfCols;
SSchema* pSchema;
} SViewMeta;
typedef struct SDBVgInfo { typedef struct SDBVgInfo {
int32_t vgVersion; int32_t vgVersion;
int16_t hashPrefix; int16_t hashPrefix;
@ -148,6 +159,15 @@ typedef struct STableMetaOutput {
STableMeta* tbMeta; STableMeta* tbMeta;
} STableMetaOutput; } STableMetaOutput;
typedef struct SViewMetaOutput {
char name[TSDB_VIEW_NAME_LEN];
char dbFName[TSDB_DB_FNAME_LEN];
char* querySql;
int8_t precision;
int32_t numOfCols;
SSchema* pSchema;
} SViewMetaOutput;
typedef struct SDataBuf { typedef struct SDataBuf {
int32_t msgType; int32_t msgType;
void* pData; void* pData;
@ -300,9 +320,11 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
(NO_RET_REDIRECT_ERROR(_code) || SYNC_UNKNOWN_LEADER_REDIRECT_ERROR(_code) || \ (NO_RET_REDIRECT_ERROR(_code) || SYNC_UNKNOWN_LEADER_REDIRECT_ERROR(_code) || \
SYNC_SELF_LEADER_REDIRECT_ERROR(_code) || SYNC_OTHER_LEADER_REDIRECT_ERROR(_code)) SYNC_SELF_LEADER_REDIRECT_ERROR(_code) || SYNC_OTHER_LEADER_REDIRECT_ERROR(_code))
#define IS_VIEW_REQUEST(_type) ((_type) == TDMT_MND_CREATE_VIEW || (_type) == TDMT_MND_DROP_VIEW)
#define NEED_CLIENT_RM_TBLMETA_REQ(_type) \ #define NEED_CLIENT_RM_TBLMETA_REQ(_type) \
((_type) == TDMT_VND_CREATE_TABLE || (_type) == TDMT_MND_CREATE_STB || (_type) == TDMT_VND_DROP_TABLE || \ ((_type) == TDMT_VND_CREATE_TABLE || (_type) == TDMT_MND_CREATE_STB || (_type) == TDMT_VND_DROP_TABLE || \
(_type) == TDMT_MND_DROP_STB) (_type) == TDMT_MND_DROP_STB || (_type) == TDMT_MND_CREATE_VIEW || (_type) == TDMT_MND_DROP_VIEW)
#define NEED_SCHEDULER_REDIRECT_ERROR(_code) \ #define NEED_SCHEDULER_REDIRECT_ERROR(_code) \
(SYNC_UNKNOWN_LEADER_REDIRECT_ERROR(_code) || SYNC_SELF_LEADER_REDIRECT_ERROR(_code) || \ (SYNC_UNKNOWN_LEADER_REDIRECT_ERROR(_code) || SYNC_SELF_LEADER_REDIRECT_ERROR(_code) || \

View File

@ -57,6 +57,8 @@ int32_t streamStateSessionDel(SStreamState* pState, const SSessionKey* key);
int32_t streamStateSessionClear(SStreamState* pState); int32_t streamStateSessionClear(SStreamState* pState);
int32_t streamStateSessionGetKVByCur(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen); int32_t streamStateSessionGetKVByCur(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen);
int32_t streamStateSessionGetKeyByRange(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey); int32_t streamStateSessionGetKeyByRange(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey);
int32_t streamStateSessionAllocWinBuffByNextPosition(SStreamState* pState, SStreamStateCur* pCur,
const SSessionKey* pKey, void** pVal, int32_t* pVLen);
SStreamStateCur* streamStateSessionSeekKeyNext(SStreamState* pState, const SSessionKey* key); SStreamStateCur* streamStateSessionSeekKeyNext(SStreamState* pState, const SSessionKey* key);
SStreamStateCur* streamStateSessionSeekKeyCurrentPrev(SStreamState* pState, const SSessionKey* key); SStreamStateCur* streamStateSessionSeekKeyCurrentPrev(SStreamState* pState, const SSessionKey* key);
@ -66,6 +68,7 @@ SStreamStateCur* streamStateSessionSeekKeyCurrentNext(SStreamState* pState, cons
int32_t streamStateStateAddIfNotExist(SStreamState* pState, SSessionKey* key, char* pKeyData, int32_t keyDataLen, int32_t streamStateStateAddIfNotExist(SStreamState* pState, SSessionKey* key, char* pKeyData, int32_t keyDataLen,
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen); state_key_cmpr_fn fn, void** pVal, int32_t* pVLen);
// fill
int32_t streamStateFillPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen); int32_t streamStateFillPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
int32_t streamStateFillGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen); int32_t streamStateFillGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
int32_t streamStateFillDel(SStreamState* pState, const SWinKey* key); int32_t streamStateFillDel(SStreamState* pState, const SWinKey* key);

View File

@ -38,16 +38,25 @@ extern "C" {
#define TASK_DOWNSTREAM_READY 0x0 #define TASK_DOWNSTREAM_READY 0x0
#define TASK_DOWNSTREAM_NOT_READY 0x1 #define TASK_DOWNSTREAM_NOT_READY 0x1
#define TASK_DOWNSTREAM_NOT_LEADER 0x2 #define TASK_DOWNSTREAM_NOT_LEADER 0x2
#define TASK_SELF_NEW_STAGE 0x3 #define TASK_UPSTREAM_NEW_STAGE 0x3
#define NODE_ROLE_UNINIT 0x1 #define NODE_ROLE_UNINIT 0x1
#define NODE_ROLE_LEADER 0x2 #define NODE_ROLE_LEADER 0x2
#define NODE_ROLE_FOLLOWER 0x3 #define NODE_ROLE_FOLLOWER 0x3
typedef struct SStreamTask SStreamTask; #define HAS_RELATED_FILLHISTORY_TASK(_t) ((_t)->hTaskInfo.id.taskId != 0)
typedef struct SStreamQueue SStreamQueue; #define CLEAR_RELATED_FILLHISTORY_TASK(_t) \
do { \
(_t)->hTaskInfo.id.taskId = 0; \
(_t)->hTaskInfo.id.streamId = 0; \
} while (0)
typedef struct SStreamTask SStreamTask;
typedef struct SStreamQueue SStreamQueue;
typedef struct SStreamTaskSM SStreamTaskSM;
#define SSTREAM_TASK_VER 2 #define SSTREAM_TASK_VER 2
enum { enum {
STREAM_STATUS__NORMAL = 0, STREAM_STATUS__NORMAL = 0,
STREAM_STATUS__STOP, STREAM_STATUS__STOP,
@ -58,7 +67,7 @@ enum {
}; };
typedef enum ETaskStatus { typedef enum ETaskStatus {
TASK_STATUS__NORMAL = 0, TASK_STATUS__READY = 0,
TASK_STATUS__DROPPING, TASK_STATUS__DROPPING,
TASK_STATUS__UNINIT, // not used, an placeholder TASK_STATUS__UNINIT, // not used, an placeholder
TASK_STATUS__STOP, TASK_STATUS__STOP,
@ -66,6 +75,7 @@ typedef enum ETaskStatus {
TASK_STATUS__HALT, // pause, but not be manipulated by user command TASK_STATUS__HALT, // pause, but not be manipulated by user command
TASK_STATUS__PAUSE, // pause TASK_STATUS__PAUSE, // pause
TASK_STATUS__CK, // stream task is in checkpoint status, no data are allowed to put into inputQ anymore TASK_STATUS__CK, // stream task is in checkpoint status, no data are allowed to put into inputQ anymore
TASK_STATUS__STREAM_SCAN_HISTORY,
} ETaskStatus; } ETaskStatus;
enum { enum {
@ -118,6 +128,20 @@ enum {
STREAM_META_OK_TO_STOP = 2, STREAM_META_OK_TO_STOP = 2,
}; };
typedef enum EStreamTaskEvent {
TASK_EVENT_INIT = 0x1,
TASK_EVENT_INIT_SCANHIST = 0x2,
TASK_EVENT_INIT_STREAM_SCANHIST = 0x3,
TASK_EVENT_SCANHIST_DONE = 0x4,
TASK_EVENT_STOP = 0x5,
TASK_EVENT_GEN_CHECKPOINT = 0x6,
TASK_EVENT_CHECKPOINT_DONE = 0x7,
TASK_EVENT_PAUSE = 0x8,
TASK_EVENT_RESUME = 0x9,
TASK_EVENT_HALT = 0xA,
TASK_EVENT_DROPPING = 0xB,
} EStreamTaskEvent;
typedef struct { typedef struct {
int8_t type; int8_t type;
} SStreamQueueItem; } SStreamQueueItem;
@ -155,11 +179,6 @@ typedef struct {
SSDataBlock* pBlock; SSDataBlock* pBlock;
} SStreamRefDataBlock; } SStreamRefDataBlock;
typedef struct {
int8_t type;
SSDataBlock* pBlock;
} SStreamTrigger;
typedef struct SStreamQueueNode SStreamQueueNode; typedef struct SStreamQueueNode SStreamQueueNode;
struct SStreamQueueNode { struct SStreamQueueNode {
@ -215,6 +234,29 @@ typedef struct {
SUseDbRsp dbInfo; SUseDbRsp dbInfo;
} STaskDispatcherShuffle; } STaskDispatcherShuffle;
typedef struct {
int32_t nodeId;
SEpSet epset;
} SDownstreamTaskEpset;
typedef enum {
TASK_SCANHISTORY_CONT = 0x1,
TASK_SCANHISTORY_QUIT = 0x2,
TASK_SCANHISTORY_REXEC = 0x3,
} EScanHistoryRet;
typedef struct {
EScanHistoryRet ret;
int32_t idleTime;
} SScanhistoryDataInfo;
typedef struct {
int32_t idleDuration; // idle time before use time slice the continue execute scan-history
int32_t numOfTicks;
tmr_h pTimer;
int32_t execCount;
} SScanhistorySchedInfo;
typedef struct { typedef struct {
int64_t stbUid; int64_t stbUid;
char stbFullName[TSDB_TABLE_FNAME_LEN]; char stbFullName[TSDB_TABLE_FNAME_LEN];
@ -260,19 +302,21 @@ typedef struct SCheckpointInfo {
int64_t startTs; int64_t startTs;
int64_t checkpointId; int64_t checkpointId;
int64_t checkpointVer; // latest checkpointId version int64_t checkpointVer; // latest checkpointId version
int64_t processedVer; // already processed ver, that has generated results version.
int64_t nextProcessVer; // current offset in WAL, not serialize it int64_t nextProcessVer; // current offset in WAL, not serialize it
int64_t failedId; // record the latest failed checkpoint id int64_t failedId; // record the latest failed checkpoint id
} SCheckpointInfo; } SCheckpointInfo;
typedef struct SStreamStatus { typedef struct SStreamStatus {
int8_t taskStatus; SStreamTaskSM* pSM;
int8_t downstreamReady; // downstream tasks are all ready now, if this flag is set int8_t taskStatus;
int8_t schedStatus; int8_t downstreamReady; // downstream tasks are all ready now, if this flag is set
int8_t keepTaskStatus; int8_t schedStatus;
bool appendTranstateBlock; // has append the transfer state data block already, todo: remove it int8_t keepTaskStatus;
int8_t pauseAllowed; // allowed task status to be set to be paused bool appendTranstateBlock; // has append the transfer state data block already, todo: remove it
int32_t timerActive; // timer is active int8_t pauseAllowed; // allowed task status to be set to be paused
int32_t inScanHistorySentinel; int32_t timerActive; // timer is active
int32_t inScanHistorySentinel;
} SStreamStatus; } SStreamStatus;
typedef struct SDataRange { typedef struct SDataRange {
@ -305,15 +349,10 @@ typedef struct SDispatchMsgInfo {
void* pTimer; // used to dispatch data after a given time duration void* pTimer; // used to dispatch data after a given time duration
} SDispatchMsgInfo; } SDispatchMsgInfo;
typedef struct STaskOutputQueue { typedef struct STaskQueue {
int8_t status; int8_t status;
SStreamQueue* queue; SStreamQueue* queue;
} STaskOutputQueue; } STaskQueue;
typedef struct STaskInputInfo {
int8_t status;
SStreamQueue* queue;
} STaskInputInfo;
typedef struct STaskSchedInfo { typedef struct STaskSchedInfo {
int8_t status; int8_t status;
@ -332,7 +371,9 @@ typedef struct STaskExecStatisInfo {
int64_t init; int64_t init;
int64_t start; int64_t start;
int64_t step1Start; int64_t step1Start;
double step1El;
int64_t step2Start; int64_t step2Start;
double step2El;
int32_t updateCount; int32_t updateCount;
int64_t latestUpdateTs; int64_t latestUpdateTs;
int32_t processDataBlocks; int32_t processDataBlocks;
@ -349,18 +390,21 @@ typedef struct SHistoryTaskInfo {
int32_t tickCount; int32_t tickCount;
int32_t retryTimes; int32_t retryTimes;
int32_t waitInterval; int32_t waitInterval;
int64_t haltVer; // offset in wal when halt the stream task
} SHistoryTaskInfo; } SHistoryTaskInfo;
typedef struct STaskOutputInfo { typedef struct STaskOutputInfo {
union { union {
STaskDispatcherFixed fixedDispatcher; STaskDispatcherFixed fixedDispatcher;
STaskDispatcherShuffle shuffleDispatcher; STaskDispatcherShuffle shuffleDispatcher;
STaskSinkTb tbSink;
STaskSinkSma smaSink; STaskSinkTb tbSink;
STaskSinkFetch fetchSink; STaskSinkSma smaSink;
STaskSinkFetch fetchSink;
}; };
int8_t type; int8_t type;
STokenBucket* pTokenBucket; STokenBucket* pTokenBucket;
SArray* pDownstreamUpdateList;
} STaskOutputInfo; } STaskOutputInfo;
typedef struct SUpstreamInfo { typedef struct SUpstreamInfo {
@ -372,8 +416,8 @@ struct SStreamTask {
int64_t ver; int64_t ver;
SStreamTaskId id; SStreamTaskId id;
SSTaskBasicInfo info; SSTaskBasicInfo info;
STaskOutputQueue outputq; STaskQueue outputq;
STaskInputInfo inputInfo; STaskQueue inputq;
STaskSchedInfo schedInfo; STaskSchedInfo schedInfo;
STaskOutputInfo outputInfo; STaskOutputInfo outputInfo;
SDispatchMsgInfo msgInfo; SDispatchMsgInfo msgInfo;
@ -390,7 +434,10 @@ struct SStreamTask {
SStreamState* pState; // state backend SStreamState* pState; // state backend
SArray* pRspMsgList; SArray* pRspMsgList;
SUpstreamInfo upstreamInfo; SUpstreamInfo upstreamInfo;
// the followings attributes don't be serialized // the followings attributes don't be serialized
SScanhistorySchedInfo schedHistoryInfo;
int32_t notReadyTasks; int32_t notReadyTasks;
int32_t numOfWaitingUpstream; int32_t numOfWaitingUpstream;
int64_t checkReqId; int64_t checkReqId;
@ -408,38 +455,45 @@ struct SStreamTask {
typedef struct STaskStartInfo { typedef struct STaskStartInfo {
int64_t startTs; int64_t startTs;
int64_t readyTs; int64_t readyTs;
int32_t startedAfterNodeUpdate; int32_t tasksWillRestart;
SHashObj* pReadyTaskSet; // tasks that are all ready for running stream processing int32_t taskStarting; // restart flag, sentinel to guard the restart procedure.
int32_t elapsedTime; SHashObj* pReadyTaskSet; // tasks that are all ready for running stream processing
SHashObj* pFailedTaskSet; // tasks that are done the check downstream process, may be successful or failed
int64_t elapsedTime;
} STaskStartInfo; } STaskStartInfo;
typedef struct STaskUpdateInfo {
SHashObj* pTasks;
int32_t transId;
} STaskUpdateInfo;
// meta // meta
typedef struct SStreamMeta { typedef struct SStreamMeta {
char* path; char* path;
TDB* db; TDB* db;
TTB* pTaskDb; TTB* pTaskDb;
TTB* pCheckpointDb; TTB* pCheckpointDb;
SHashObj* pTasksMap; SHashObj* pTasksMap;
SArray* pTaskList; // SArray<STaskId*> SArray* pTaskList; // SArray<STaskId*>
void* ahandle; void* ahandle;
TXN* txn; TXN* txn;
FTaskExpand* expandFunc; FTaskExpand* expandFunc;
int32_t vgId; int32_t vgId;
int64_t stage; int64_t stage;
int32_t role; int32_t role;
STaskStartInfo startInfo; STaskStartInfo startInfo;
SRWLatch lock; SRWLatch lock;
int32_t walScanCounter; int32_t walScanCounter;
void* streamBackend; void* streamBackend;
int64_t streamBackendRid; int64_t streamBackendRid;
SHashObj* pTaskBackendUnique; SHashObj* pTaskBackendUnique;
TdThreadMutex backendMutex; TdThreadMutex backendMutex;
SMetaHbInfo* pHbInfo; SMetaHbInfo* pHbInfo;
SHashObj* pUpdateTaskSet; STaskUpdateInfo updateInfo;
int32_t numOfStreamTasks; // this value should be increased when a new task is added into the meta int32_t numOfStreamTasks; // this value should be increased when a new task is added into the meta
int32_t numOfPausedTasks; int32_t numOfPausedTasks;
int32_t chkptNotReadyTasks; int32_t chkptNotReadyTasks;
int64_t rid; int64_t rid;
int64_t chkpId; int64_t chkpId;
SArray* chkpSaved; SArray* chkpSaved;
@ -536,7 +590,7 @@ typedef struct {
int32_t downstreamNodeId; int32_t downstreamNodeId;
int32_t downstreamTaskId; int32_t downstreamTaskId;
int32_t childId; int32_t childId;
int32_t oldStage; int64_t oldStage;
int8_t status; int8_t status;
} SStreamTaskCheckRsp; } SStreamTaskCheckRsp;
@ -601,7 +655,7 @@ int32_t tDecodeStreamCheckpointReadyMsg(SDecoder* pDecoder, SStreamCheckpointRea
typedef struct STaskStatusEntry { typedef struct STaskStatusEntry {
STaskId id; STaskId id;
int32_t status; int32_t status;
int32_t stage; int64_t stage;
int32_t nodeId; int32_t nodeId;
int64_t verStart; // start version in WAL, only valid for source task int64_t verStart; // start version in WAL, only valid for source task
int64_t verEnd; // end version in WAL, only valid for source task int64_t verEnd; // end version in WAL, only valid for source task
@ -617,7 +671,8 @@ typedef struct STaskStatusEntry {
typedef struct SStreamHbMsg { typedef struct SStreamHbMsg {
int32_t vgId; int32_t vgId;
int32_t numOfTasks; int32_t numOfTasks;
SArray* pTaskStatus; // SArray<SStreamTaskStatusEntry> SArray* pTaskStatus; // SArray<STaskStatusEntry>
SArray* pUpdateNodes; // SArray<int32_t>, needs update the epsets in stream tasks for those nodes.
} SStreamHbMsg; } SStreamHbMsg;
int32_t tEncodeStreamHbMsg(SEncoder* pEncoder, const SStreamHbMsg* pRsp); int32_t tEncodeStreamHbMsg(SEncoder* pEncoder, const SStreamHbMsg* pRsp);
@ -641,6 +696,7 @@ typedef struct SNodeUpdateInfo {
} SNodeUpdateInfo; } SNodeUpdateInfo;
typedef struct SStreamTaskNodeUpdateMsg { typedef struct SStreamTaskNodeUpdateMsg {
int32_t transId; // to identify the msg
int64_t streamId; int64_t streamId;
int32_t taskId; int32_t taskId;
SArray* pNodeList; // SArray<SNodeUpdateInfo> SArray* pNodeList; // SArray<SNodeUpdateInfo>
@ -686,24 +742,34 @@ SStreamChildEpInfo* streamTaskGetUpstreamTaskEpInfo(SStreamTask* pTask, int32_t
void streamTaskInputFail(SStreamTask* pTask); void streamTaskInputFail(SStreamTask* pTask);
int32_t streamExecTask(SStreamTask* pTask); int32_t streamExecTask(SStreamTask* pTask);
int32_t streamSchedExec(SStreamTask* pTask); int32_t streamSchedExec(SStreamTask* pTask);
bool streamTaskShouldStop(const SStreamStatus* pStatus); bool streamTaskShouldStop(const SStreamTask* pStatus);
bool streamTaskShouldPause(const SStreamStatus* pStatus); bool streamTaskShouldPause(const SStreamTask* pStatus);
bool streamTaskIsIdle(const SStreamTask* pTask); bool streamTaskIsIdle(const SStreamTask* pTask);
bool streamTaskReadyToRun(const SStreamTask* pTask, char** pStatus);
char* createStreamTaskIdStr(int64_t streamId, int32_t taskId);
ETaskStatus streamTaskGetStatus(const SStreamTask* pTask, char** pStr);
const char* streamTaskGetStatusStr(ETaskStatus status);
void streamTaskResetStatus(SStreamTask* pTask);
void streamTaskSetStatusReady(SStreamTask* pTask);
void initRpcMsg(SRpcMsg* pMsg, int32_t msgType, void* pCont, int32_t contLen); void initRpcMsg(SRpcMsg* pMsg, int32_t msgType, void* pCont, int32_t contLen);
char* createStreamTaskIdStr(int64_t streamId, int32_t taskId);
// recover and fill history // recover and fill history
void streamTaskCheckDownstream(SStreamTask* pTask); void streamTaskCheckDownstream(SStreamTask* pTask);
int32_t streamTaskStartScanHistory(SStreamTask* pTask);
int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamTaskId, int32_t vgId, int64_t stage); int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamTaskId, int32_t vgId, int64_t stage, int64_t* oldStage);
int32_t streamTaskUpdateEpsetInfo(SStreamTask* pTask, SArray* pNodeList); int32_t streamTaskUpdateEpsetInfo(SStreamTask* pTask, SArray* pNodeList);
void streamTaskResetUpstreamStageInfo(SStreamTask* pTask); void streamTaskResetUpstreamStageInfo(SStreamTask* pTask);
bool streamTaskAllUpstreamClosed(SStreamTask* pTask); bool streamTaskAllUpstreamClosed(SStreamTask* pTask);
bool streamTaskSetSchedStatusWait(SStreamTask* pTask); bool streamTaskSetSchedStatusWait(SStreamTask* pTask);
int8_t streamTaskSetSchedStatusActive(SStreamTask* pTask); int8_t streamTaskSetSchedStatusActive(SStreamTask* pTask);
int8_t streamTaskSetSchedStatusInActive(SStreamTask* pTask); int8_t streamTaskSetSchedStatusInactive(SStreamTask* pTask);
int32_t streamTaskClearHTaskAttr(SStreamTask* pTask);
int32_t streamTaskHandleEvent(SStreamTaskSM* pSM, EStreamTaskEvent event);
int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent event);
void streamTaskRestoreStatus(SStreamTask* pTask);
int32_t streamTaskStop(SStreamTask* pTask); int32_t streamTaskStop(SStreamTask* pTask);
int32_t streamSendCheckRsp(const SStreamMeta* pMeta, const SStreamTaskCheckReq* pReq, SStreamTaskCheckRsp* pRsp, int32_t streamSendCheckRsp(const SStreamMeta* pMeta, const SStreamTaskCheckReq* pReq, SStreamTaskCheckRsp* pRsp,
@ -712,19 +778,15 @@ int32_t streamProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRs
int32_t streamLaunchFillHistoryTask(SStreamTask* pTask); int32_t streamLaunchFillHistoryTask(SStreamTask* pTask);
int32_t streamTaskScanHistoryDataComplete(SStreamTask* pTask); int32_t streamTaskScanHistoryDataComplete(SStreamTask* pTask);
int32_t streamStartScanHistoryAsync(SStreamTask* pTask, int8_t igUntreated); int32_t streamStartScanHistoryAsync(SStreamTask* pTask, int8_t igUntreated);
int32_t streamReExecScanHistoryFuture(SStreamTask* pTask, int32_t idleDuration);
bool streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask, int64_t latestVer); bool streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask, int64_t latestVer);
int32_t streamQueueGetNumOfItems(const SStreamQueue* pQueue); int32_t streamQueueGetNumOfItems(const SStreamQueue* pQueue);
int32_t streamQueueGetAvailableSpace(const SStreamQueue* pQueue, int32_t* availNum, double* availSize);
// common // common
int32_t streamRestoreParam(SStreamTask* pTask); int32_t streamRestoreParam(SStreamTask* pTask);
int32_t streamSetStatusNormal(SStreamTask* pTask);
int32_t streamSetStatusUnint(SStreamTask* pTask);
const char* streamGetTaskStatusStr(int32_t status);
void streamTaskPause(SStreamTask* pTask, SStreamMeta* pMeta); void streamTaskPause(SStreamTask* pTask, SStreamMeta* pMeta);
void streamTaskResume(SStreamTask* pTask, SStreamMeta* pMeta); void streamTaskResume(SStreamTask* pTask);
void streamTaskResumeFromHalt(SStreamTask* pTask);
void streamTaskDisablePause(SStreamTask* pTask);
void streamTaskEnablePause(SStreamTask* pTask); void streamTaskEnablePause(SStreamTask* pTask);
int32_t streamTaskSetUpstreamInfo(SStreamTask* pTask, const SStreamTask* pUpstreamTask); int32_t streamTaskSetUpstreamInfo(SStreamTask* pTask, const SStreamTask* pUpstreamTask);
void streamTaskUpdateUpstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpSet* pEpSet); void streamTaskUpdateUpstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpSet* pEpSet);
@ -741,12 +803,11 @@ void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc);
// source level // source level
int32_t streamSetParamForStreamScannerStep1(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow); int32_t streamSetParamForStreamScannerStep1(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow);
int32_t streamSetParamForStreamScannerStep2(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow); int32_t streamSetParamForStreamScannerStep2(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow);
int32_t streamScanHistoryData(SStreamTask* pTask); SScanhistoryDataInfo streamScanHistoryData(SStreamTask* pTask, int64_t st);
int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask); int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask);
// agg level // agg level
int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, SStreamScanHistoryFinishReq* pReq, int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, SStreamScanHistoryFinishReq* pReq, SRpcHandleInfo* pInfo);
SRpcHandleInfo* pRpcInfo);
int32_t streamProcessScanHistoryFinishRsp(SStreamTask* pTask); int32_t streamProcessScanHistoryFinishRsp(SStreamTask* pTask);
// stream task meta // stream task meta
@ -759,21 +820,28 @@ int32_t streamMetaRemoveTask(SStreamMeta* pMeta, STaskId* pKey);
int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask, bool* pAdded); int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask, bool* pAdded);
int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId); int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta); int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta);
int32_t streamMetaGetNumOfStreamTasks(SStreamMeta* pMeta);
SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId); SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask); void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
int32_t streamMetaReopen(SStreamMeta* pMeta); int32_t streamMetaReopen(SStreamMeta* pMeta);
void streamMetaInitBackend(SStreamMeta* pMeta);
int32_t streamMetaCommit(SStreamMeta* pMeta); int32_t streamMetaCommit(SStreamMeta* pMeta);
int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta); int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta);
int64_t streamMetaGetLatestCheckpointId(SStreamMeta* pMeta);
void streamMetaNotifyClose(SStreamMeta* pMeta); void streamMetaNotifyClose(SStreamMeta* pMeta);
void streamMetaStartHb(SStreamMeta* pMeta); void streamMetaStartHb(SStreamMeta* pMeta);
void streamMetaInitForSnode(SStreamMeta* pMeta); bool streamMetaTaskInTimer(SStreamMeta* pMeta);
int32_t streamMetaUpdateTaskDownstreamStatus(SStreamTask* pTask, int64_t startTs, int64_t endTs, bool succ);
void streamMetaRLock(SStreamMeta* pMeta);
void streamMetaRUnLock(SStreamMeta* pMeta);
void streamMetaWLock(SStreamMeta* pMeta);
void streamMetaWUnLock(SStreamMeta* pMeta);
void streamMetaResetStartInfo(STaskStartInfo* pMeta);
// checkpoint // checkpoint
int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSourceReq* pReq); int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSourceReq* pReq);
int32_t streamProcessCheckpointReadyMsg(SStreamTask* pTask); int32_t streamProcessCheckpointReadyMsg(SStreamTask* pTask);
int32_t streamTaskBuildCheckpoint(SStreamTask* pTask);
void streamTaskClearCheckInfo(SStreamTask* pTask); void streamTaskClearCheckInfo(SStreamTask* pTask);
int32_t streamAlignTransferState(SStreamTask* pTask); int32_t streamAlignTransferState(SStreamTask* pTask);
int32_t streamBuildAndSendDropTaskMsg(SMsgCb* pMsgCb, int32_t vgId, SStreamTaskId* pTaskId); int32_t streamBuildAndSendDropTaskMsg(SMsgCb* pMsgCb, int32_t vgId, SStreamTaskId* pTaskId);
int32_t streamAddCheckpointSourceRspMsg(SStreamCheckpointSourceReq* pReq, SRpcHandleInfo* pRpcInfo, SStreamTask* pTask, int32_t streamAddCheckpointSourceRspMsg(SStreamCheckpointSourceReq* pReq, SRpcHandleInfo* pRpcInfo, SStreamTask* pTask,

View File

@ -76,8 +76,10 @@ int32_t getRowStateRowSize(SStreamFileState* pFileState);
int32_t getSessionWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, TSKEY gap, void** pVal, int32_t* pVLen); int32_t getSessionWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, TSKEY gap, void** pVal, int32_t* pVLen);
int32_t putSessionWinResultBuff(SStreamFileState* pFileState, SRowBuffPos* pPos); int32_t putSessionWinResultBuff(SStreamFileState* pFileState, SRowBuffPos* pPos);
int32_t getSessionFlushedBuff(SStreamFileState* pFileState, SSessionKey* pKey, void** pVal, int32_t* pVLen); int32_t getSessionFlushedBuff(SStreamFileState* pFileState, SSessionKey* pKey, void** pVal, int32_t* pVLen);
int32_t deleteSessionWinStateBuffFn(void* pBuff, const void *key, size_t keyLen); int32_t deleteSessionWinStateBuffFn(void* pBuff, const void* key, size_t keyLen);
int32_t deleteSessionWinStateBuffByPosFn(SStreamFileState* pFileState, SRowBuffPos* pPos); int32_t deleteSessionWinStateBuffByPosFn(SStreamFileState* pFileState, SRowBuffPos* pPos);
int32_t allocSessioncWinBuffByNextPosition(SStreamFileState* pFileState, SStreamStateCur* pCur,
const SSessionKey* pWinKey, void** ppVal, int32_t* pVLen);
SRowBuffPos* createSessionWinBuff(SStreamFileState* pFileState, SSessionKey* pKey, void* p, int32_t* pVLen); SRowBuffPos* createSessionWinBuff(SStreamFileState* pFileState, SSessionKey* pKey, void* p, int32_t* pVLen);
int32_t recoverSesssion(SStreamFileState* pFileState, int64_t ckId); int32_t recoverSesssion(SStreamFileState* pFileState, int64_t ckId);

View File

@ -45,7 +45,8 @@ extern "C" {
#define SYNC_HEARTBEAT_SLOW_MS 1500 #define SYNC_HEARTBEAT_SLOW_MS 1500
#define SYNC_HEARTBEAT_REPLY_SLOW_MS 1500 #define SYNC_HEARTBEAT_REPLY_SLOW_MS 1500
#define SYNC_SNAP_RESEND_MS 1000 * 60 #define SYNC_SNAP_RESEND_MS 1000 * 300
#define SYNC_SNAP_TIMEOUT_MS 1000 * 1800
#define SYNC_VND_COMMIT_MIN_MS 3000 #define SYNC_VND_COMMIT_MIN_MS 3000

View File

@ -237,6 +237,12 @@ void syslog(int unused, const char *format, ...);
#define TD_DIRSEP "/" #define TD_DIRSEP "/"
#endif #endif
#if defined(_WIN32)
#define TD_DIRSEP_CHAR '\\'
#else
#define TD_DIRSEP_CHAR '/'
#endif
#define TD_LOCALE_LEN 64 #define TD_LOCALE_LEN 64
#define TD_CHARSET_LEN 64 #define TD_CHARSET_LEN 64
#define TD_TIMEZONE_LEN 96 #define TD_TIMEZONE_LEN 96

View File

@ -36,11 +36,12 @@ extern int64_t tsStreamMax;
extern float tsNumOfCores; extern float tsNumOfCores;
extern int64_t tsTotalMemoryKB; extern int64_t tsTotalMemoryKB;
extern char *tsProcPath; extern char *tsProcPath;
extern char tsSIMDBuiltins; extern char tsSIMDEnable;
extern char tsSSE42Enable; extern char tsSSE42Enable;
extern char tsAVXEnable; extern char tsAVXEnable;
extern char tsAVX2Enable; extern char tsAVX2Enable;
extern char tsFMAEnable; extern char tsFMAEnable;
extern char tsAVX512Enable;
extern char tsTagFilterCache; extern char tsTagFilterCache;
extern char configDir[]; extern char configDir[];

View File

@ -54,15 +54,16 @@ extern "C" {
typedef struct TdFile *TdFilePtr; typedef struct TdFile *TdFilePtr;
#define TD_FILE_CREATE 0x0001 #define TD_FILE_CREATE 0x0001
#define TD_FILE_WRITE 0x0002 #define TD_FILE_WRITE 0x0002
#define TD_FILE_READ 0x0004 #define TD_FILE_READ 0x0004
#define TD_FILE_TRUNC 0x0008 #define TD_FILE_TRUNC 0x0008
#define TD_FILE_APPEND 0x0010 #define TD_FILE_APPEND 0x0010
#define TD_FILE_TEXT 0x0020 #define TD_FILE_TEXT 0x0020
#define TD_FILE_AUTO_DEL 0x0040 #define TD_FILE_AUTO_DEL 0x0040
#define TD_FILE_EXCL 0x0080 #define TD_FILE_EXCL 0x0080
#define TD_FILE_STREAM 0x0100 // Only support taosFprintfFile, taosGetLineFile, taosEOFFile #define TD_FILE_STREAM 0x0100 // Only support taosFprintfFile, taosGetLineFile, taosEOFFile
#define TD_FILE_WRITE_THROUGH 0x0200
TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions); TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions);
TdFilePtr taosCreateFile(const char *path, int32_t tdFileOptions); TdFilePtr taosCreateFile(const char *path, int32_t tdFileOptions);

View File

@ -41,7 +41,7 @@ int32_t taosGetOsReleaseName(char *releaseName, char* sName, char* ver, int32_t
int32_t taosGetCpuInfo(char *cpuModel, int32_t maxLen, float *numOfCores); int32_t taosGetCpuInfo(char *cpuModel, int32_t maxLen, float *numOfCores);
int32_t taosGetCpuCores(float *numOfCores, bool physical); int32_t taosGetCpuCores(float *numOfCores, bool physical);
void taosGetCpuUsage(double *cpu_system, double *cpu_engine); void taosGetCpuUsage(double *cpu_system, double *cpu_engine);
int32_t taosGetCpuInstructions(char* sse42, char* avx, char* avx2, char* fma); int32_t taosGetCpuInstructions(char* sse42, char* avx, char* avx2, char* fma, char* avx512);
int32_t taosGetTotalMemory(int64_t *totalKB); int32_t taosGetTotalMemory(int64_t *totalKB);
int32_t taosGetProcMemory(int64_t *usedKB); int32_t taosGetProcMemory(int64_t *usedKB);
int32_t taosGetSysMemory(int64_t *usedKB); int32_t taosGetSysMemory(int64_t *usedKB);

View File

@ -172,7 +172,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_TSC_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0X0231) #define TSDB_CODE_TSC_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0X0231)
// mnode-common // mnode-common
// #define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) // 2.x #define TSDB_CODE_MND_REQ_REJECTED TAOS_DEF_ERROR_CODE(0, 0x0300)
// #define TSDB_CODE_MND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0301) // 2.x // #define TSDB_CODE_MND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0301) // 2.x
// #define TSDB_CODE_MND_ACTION_NEED_REPROCESSEDTAOS_DEF_ERROR_CODE(0, 0x0302) // 2.x // #define TSDB_CODE_MND_ACTION_NEED_REPROCESSEDTAOS_DEF_ERROR_CODE(0, 0x0302) // 2.x
#define TSDB_CODE_MND_NO_RIGHTS TAOS_DEF_ERROR_CODE(0, 0x0303) #define TSDB_CODE_MND_NO_RIGHTS TAOS_DEF_ERROR_CODE(0, 0x0303)
@ -332,6 +332,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_MND_INVALID_REPLICA TAOS_DEF_ERROR_CODE(0, 0x03B7) #define TSDB_CODE_MND_INVALID_REPLICA TAOS_DEF_ERROR_CODE(0, 0x03B7)
#define TSDB_CODE_MND_DNODE_IN_CREATING TAOS_DEF_ERROR_CODE(0, 0x03B8) #define TSDB_CODE_MND_DNODE_IN_CREATING TAOS_DEF_ERROR_CODE(0, 0x03B8)
#define TSDB_CODE_MND_DNODE_IN_DROPPING TAOS_DEF_ERROR_CODE(0, 0x03B9) #define TSDB_CODE_MND_DNODE_IN_DROPPING TAOS_DEF_ERROR_CODE(0, 0x03B9)
#define TSDB_CODE_MND_NO_ENOUGH_VNODES TAOS_DEF_ERROR_CODE(0, 0x03BA)
// mnode-stable-part2 // mnode-stable-part2
#define TSDB_CODE_MND_NAME_CONFLICT_WITH_TOPIC TAOS_DEF_ERROR_CODE(0, 0x03C0) #define TSDB_CODE_MND_NAME_CONFLICT_WITH_TOPIC TAOS_DEF_ERROR_CODE(0, 0x03C0)
@ -383,15 +384,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_MND_TOO_MANY_STREAMS TAOS_DEF_ERROR_CODE(0, 0x03F6) #define TSDB_CODE_MND_TOO_MANY_STREAMS TAOS_DEF_ERROR_CODE(0, 0x03F6)
#define TSDB_CODE_MND_INVALID_TARGET_TABLE TAOS_DEF_ERROR_CODE(0, 0x03F7) #define TSDB_CODE_MND_INVALID_TARGET_TABLE TAOS_DEF_ERROR_CODE(0, 0x03F7)
// mnode-sma
#define TSDB_CODE_MND_SMA_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0480)
#define TSDB_CODE_MND_SMA_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0481)
#define TSDB_CODE_MND_INVALID_SMA_OPTION TAOS_DEF_ERROR_CODE(0, 0x0482)
// mnode-tag-indxe
#define TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0483)
#define TSDB_CODE_MND_TAG_INDEX_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0484)
// dnode // dnode
// #define TSDB_CODE_DND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0400) // 2.x // #define TSDB_CODE_DND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0400) // 2.x
@ -418,6 +411,22 @@ int32_t* taosGetErrno();
#define TSDB_CODE_MNODE_NO_NEED_RESTORE TAOS_DEF_ERROR_CODE(0, 0x0415) // internal #define TSDB_CODE_MNODE_NO_NEED_RESTORE TAOS_DEF_ERROR_CODE(0, 0x0415) // internal
#define TSDB_CODE_DNODE_ONLY_USE_WHEN_OFFLINE TAOS_DEF_ERROR_CODE(0, 0x0416) #define TSDB_CODE_DNODE_ONLY_USE_WHEN_OFFLINE TAOS_DEF_ERROR_CODE(0, 0x0416)
// mnode-sma
#define TSDB_CODE_MND_SMA_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0480)
#define TSDB_CODE_MND_SMA_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0481)
#define TSDB_CODE_MND_INVALID_SMA_OPTION TAOS_DEF_ERROR_CODE(0, 0x0482)
// mnode-tag-indxe
#define TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0483)
#define TSDB_CODE_MND_TAG_INDEX_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0484)
// mnode-view
#define TSDB_CODE_MND_VIEW_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x04A0)
#define TSDB_CODE_MND_VIEW_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x04A1)
// vnode // vnode
// #define TSDB_CODE_VND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0500) // 2.x // #define TSDB_CODE_VND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0500) // 2.x
// #define TSDB_CODE_VND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0501) // 2.x // #define TSDB_CODE_VND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0501) // 2.x
@ -548,6 +557,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_GRANT_GEN_IVLD_KEY TAOS_DEF_ERROR_CODE(0, 0x0812) #define TSDB_CODE_GRANT_GEN_IVLD_KEY TAOS_DEF_ERROR_CODE(0, 0x0812)
#define TSDB_CODE_GRANT_GEN_APP_LIMIT TAOS_DEF_ERROR_CODE(0, 0x0813) #define TSDB_CODE_GRANT_GEN_APP_LIMIT TAOS_DEF_ERROR_CODE(0, 0x0813)
#define TSDB_CODE_GRANT_GEN_ENC_IVLD_KLEN TAOS_DEF_ERROR_CODE(0, 0x0814) #define TSDB_CODE_GRANT_GEN_ENC_IVLD_KLEN TAOS_DEF_ERROR_CODE(0, 0x0814)
#define TSDB_CODE_GRANT_PAR_IVLD_DIST TAOS_DEF_ERROR_CODE(0, 0x0815)
// sync // sync
// #define TSDB_CODE_SYN_INVALID_CONFIG TAOS_DEF_ERROR_CODE(0, 0x0900) // 2.x // #define TSDB_CODE_SYN_INVALID_CONFIG TAOS_DEF_ERROR_CODE(0, 0x0900) // 2.x
@ -631,6 +641,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_SCH_IGNORE_ERROR TAOS_DEF_ERROR_CODE(0, 0x2503) #define TSDB_CODE_SCH_IGNORE_ERROR TAOS_DEF_ERROR_CODE(0, 0x2503)
#define TSDB_CODE_SCH_TIMEOUT_ERROR TAOS_DEF_ERROR_CODE(0, 0x2504) #define TSDB_CODE_SCH_TIMEOUT_ERROR TAOS_DEF_ERROR_CODE(0, 0x2504)
#define TSDB_CODE_SCH_JOB_IS_DROPPING TAOS_DEF_ERROR_CODE(0, 0x2505) #define TSDB_CODE_SCH_JOB_IS_DROPPING TAOS_DEF_ERROR_CODE(0, 0x2505)
#define TSDB_CODE_SCH_JOB_NOT_EXISTS TAOS_DEF_ERROR_CODE(0, 0x2506)
//parser //parser
#define TSDB_CODE_PAR_SYNTAX_ERROR TAOS_DEF_ERROR_CODE(0, 0x2600) #define TSDB_CODE_PAR_SYNTAX_ERROR TAOS_DEF_ERROR_CODE(0, 0x2600)
@ -725,6 +736,9 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED TAOS_DEF_ERROR_CODE(0, 0x2669) #define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED TAOS_DEF_ERROR_CODE(0, 0x2669)
#define TSDB_CODE_PAR_INVALID_VARBINARY TAOS_DEF_ERROR_CODE(0, 0x266A) #define TSDB_CODE_PAR_INVALID_VARBINARY TAOS_DEF_ERROR_CODE(0, 0x266A)
#define TSDB_CODE_PAR_INVALID_IP_RANGE TAOS_DEF_ERROR_CODE(0, 0x266B) #define TSDB_CODE_PAR_INVALID_IP_RANGE TAOS_DEF_ERROR_CODE(0, 0x266B)
#define TSDB_CODE_PAR_INVALID_VIEW_QUERY TAOS_DEF_ERROR_CODE(0, 0x266C)
#define TSDB_CODE_PAR_COL_QUERY_MISMATCH TAOS_DEF_ERROR_CODE(0, 0x266D)
#define TSDB_CODE_PAR_VIEW_CONFLICT_WITH_TABLE TAOS_DEF_ERROR_CODE(0, 0x266E)
#define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF) #define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF)
//planner //planner
@ -740,7 +754,9 @@ int32_t* taosGetErrno();
#define TSDB_CODE_FUNC_FUNTION_PARA_VALUE TAOS_DEF_ERROR_CODE(0, 0x2803) #define TSDB_CODE_FUNC_FUNTION_PARA_VALUE TAOS_DEF_ERROR_CODE(0, 0x2803)
#define TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION TAOS_DEF_ERROR_CODE(0, 0x2804) #define TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION TAOS_DEF_ERROR_CODE(0, 0x2804)
#define TSDB_CODE_FUNC_DUP_TIMESTAMP TAOS_DEF_ERROR_CODE(0, 0x2805) #define TSDB_CODE_FUNC_DUP_TIMESTAMP TAOS_DEF_ERROR_CODE(0, 0x2805)
#define TSDB_CODE_FUNC_TO_TIMESTAMP_FAILED TAOS_DEF_ERROR_CODE(0, 0x2806) #define TSDB_CODE_FUNC_TO_TIMESTAMP_FAILED_FORMAT_ERR TAOS_DEF_ERROR_CODE(0, 0x2806)
#define TSDB_CODE_FUNC_TO_TIMESTAMP_FAILED_TS_ERR TAOS_DEF_ERROR_CODE(0, 0x2807)
#define TSDB_CODE_FUNC_TO_TIMESTAMP_FAILED_NOT_SUPPORTED TAOS_DEF_ERROR_CODE(0, 0x2808)
//udf //udf
#define TSDB_CODE_UDF_STOPPING TAOS_DEF_ERROR_CODE(0, 0x2901) #define TSDB_CODE_UDF_STOPPING TAOS_DEF_ERROR_CODE(0, 0x2901)
@ -807,6 +823,8 @@ int32_t* taosGetErrno();
// stream // stream
#define TSDB_CODE_STREAM_TASK_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x4100) #define TSDB_CODE_STREAM_TASK_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x4100)
#define TSDB_CODE_STREAM_EXEC_CANCELLED TAOS_DEF_ERROR_CODE(0, 0x4102) #define TSDB_CODE_STREAM_EXEC_CANCELLED TAOS_DEF_ERROR_CODE(0, 0x4102)
#define TSDB_CODE_STREAM_INVALID_STATETRANS TAOS_DEF_ERROR_CODE(0, 0x4103)
#define TSDB_CODE_STREAM_TASK_IVLD_STATUS TAOS_DEF_ERROR_CODE(0, 0x4104)
// TDLite // TDLite
#define TSDB_CODE_TDLITE_IVLD_OPEN_FLAGS TAOS_DEF_ERROR_CODE(0, 0x5100) #define TSDB_CODE_TDLITE_IVLD_OPEN_FLAGS TAOS_DEF_ERROR_CODE(0, 0x5100)

View File

@ -30,6 +30,10 @@ extern "C" {
#define INT64MASK(_x) ((((uint64_t)1) << _x) - 1) #define INT64MASK(_x) ((((uint64_t)1) << _x) - 1)
#define INT32MASK(_x) (((uint32_t)1 << _x) - 1) #define INT32MASK(_x) (((uint32_t)1 << _x) - 1)
#define INT8MASK(_x) (((uint8_t)1 << _x) - 1) #define INT8MASK(_x) (((uint8_t)1 << _x) - 1)
#define ZIGZAG_ENCODE(T, v) (((u##T)((v) >> (sizeof(T) * 8 - 1))) ^ (((u##T)(v)) << 1)) // zigzag encode
#define ZIGZAG_DECODE(T, v) (((v) >> 1) ^ -((T)((v)&1))) // zigzag decode
// Compression algorithm // Compression algorithm
#define NO_COMPRESSION 0 #define NO_COMPRESSION 0
#define ONE_STAGE_COMP 1 #define ONE_STAGE_COMP 1
@ -129,6 +133,12 @@ int32_t tsCompressBigint(void *pIn, int32_t nIn, int32_t nEle, void *pOut, int32
int32_t nBuf); int32_t nBuf);
int32_t tsDecompressBigint(void *pIn, int32_t nIn, int32_t nEle, void *pOut, int32_t nOut, uint8_t cmprAlg, void *pBuf, int32_t tsDecompressBigint(void *pIn, int32_t nIn, int32_t nEle, void *pOut, int32_t nOut, uint8_t cmprAlg, void *pBuf,
int32_t nBuf); int32_t nBuf);
// for internal usage
int32_t getWordLength(char type);
int32_t tsDecompressIntImpl_Hw(const char *const input, const int32_t nelements, char *const output, const char type);
int32_t tsDecompressFloatImplAvx512(const char *const input, const int32_t nelements, char *const output);
int32_t tsDecompressFloatImplAvx2(const char *const input, const int32_t nelements, char *const output);
/************************************************************************* /*************************************************************************
* STREAM COMPRESSION * STREAM COMPRESSION

View File

@ -57,15 +57,31 @@ typedef enum {
CFG_SCOPE_BOTH CFG_SCOPE_BOTH
} ECfgScopeType; } ECfgScopeType;
typedef enum {
CFG_DYN_NONE = 0,
CFG_DYN_SERVER = 1,
CFG_DYN_CLIENT = 2,
CFG_DYN_BOTH = 3,
#ifdef TD_ENTERPRISE
CFG_DYN_ENT_SERVER = CFG_DYN_SERVER,
CFG_DYN_ENT_CLIENT = CFG_DYN_CLIENT,
CFG_DYN_ENT_BOTH = CFG_DYN_BOTH,
#else
CFG_DYN_ENT_SERVER = CFG_DYN_NONE,
CFG_DYN_ENT_CLIENT = CFG_DYN_NONE,
CFG_DYN_ENT_BOTH = CFG_DYN_NONE,
#endif
} ECfgDynType;
typedef struct SConfigItem { typedef struct SConfigItem {
ECfgSrcType stype; ECfgSrcType stype;
ECfgDataType dtype; ECfgDataType dtype;
int8_t scope; int8_t scope;
char *name; int8_t dynScope;
char *name;
union { union {
bool bval; bool bval;
float fval; float fval;
double dval;
int32_t i32; int32_t i32;
int64_t i64; int64_t i64;
char *str; char *str;
@ -100,16 +116,20 @@ int32_t cfgGetSize(SConfig *pCfg);
SConfigItem *cfgGetItem(SConfig *pCfg, const char *name); SConfigItem *cfgGetItem(SConfig *pCfg, const char *name);
int32_t cfgSetItem(SConfig *pCfg, const char *name, const char *value, ECfgSrcType stype); int32_t cfgSetItem(SConfig *pCfg, const char *name, const char *value, ECfgSrcType stype);
int32_t cfgAddBool(SConfig *pCfg, const char *name, bool defaultVal, int8_t scope); int32_t cfgCheckRangeForDynUpdate(SConfig *pCfg, const char *name, const char *pVal, bool isServer);
int32_t cfgAddInt32(SConfig *pCfg, const char *name, int32_t defaultVal, int64_t minval, int64_t maxval, int8_t scope);
int32_t cfgAddInt64(SConfig *pCfg, const char *name, int64_t defaultVal, int64_t minval, int64_t maxval, int8_t scope); int32_t cfgAddBool(SConfig *pCfg, const char *name, bool defaultVal, int8_t scope, int8_t dynScope);
int32_t cfgAddFloat(SConfig *pCfg, const char *name, float defaultVal, float minval, float maxval, int8_t scope); int32_t cfgAddInt32(SConfig *pCfg, const char *name, int32_t defaultVal, int64_t minval, int64_t maxval, int8_t scope,
int32_t cfgAddDouble(SConfig *pCfg, const char *name, double defaultVal, double minval, double maxval, int8_t scope); int8_t dynScope);
int32_t cfgAddString(SConfig *pCfg, const char *name, const char *defaultVal, int8_t scope); int32_t cfgAddInt64(SConfig *pCfg, const char *name, int64_t defaultVal, int64_t minval, int64_t maxval, int8_t scope,
int32_t cfgAddDir(SConfig *pCfg, const char *name, const char *defaultVal, int8_t scope); int8_t dynScope);
int32_t cfgAddLocale(SConfig *pCfg, const char *name, const char *defaultVal, int8_t scope); int32_t cfgAddFloat(SConfig *pCfg, const char *name, float defaultVal, float minval, float maxval, int8_t scope,
int32_t cfgAddCharset(SConfig *pCfg, const char *name, const char *defaultVal, int8_t scope); int8_t dynScope);
int32_t cfgAddTimezone(SConfig *pCfg, const char *name, const char *defaultVal, int8_t scope); int32_t cfgAddString(SConfig *pCfg, const char *name, const char *defaultVal, int8_t scope, int8_t dynScope);
int32_t cfgAddDir(SConfig *pCfg, const char *name, const char *defaultVal, int8_t scope, int8_t dynScope);
int32_t cfgAddLocale(SConfig *pCfg, const char *name, const char *defaultVal, int8_t scope, int8_t dynScope);
int32_t cfgAddCharset(SConfig *pCfg, const char *name, const char *defaultVal, int8_t scope, int8_t dynScope);
int32_t cfgAddTimezone(SConfig *pCfg, const char *name, const char *defaultVal, int8_t scope, int8_t dynScope);
const char *cfgStypeStr(ECfgSrcType type); const char *cfgStypeStr(ECfgSrcType type);
const char *cfgDtypeStr(ECfgDataType type); const char *cfgDtypeStr(ECfgDataType type);

View File

@ -34,7 +34,6 @@ extern "C" {
// Bytes for each type. // Bytes for each type.
extern const int32_t TYPE_BYTES[21]; extern const int32_t TYPE_BYTES[21];
// TODO: replace and remove code below
#define CHAR_BYTES sizeof(char) #define CHAR_BYTES sizeof(char)
#define SHORT_BYTES sizeof(int16_t) #define SHORT_BYTES sizeof(int16_t)
#define INT_BYTES sizeof(int32_t) #define INT_BYTES sizeof(int32_t)
@ -240,6 +239,9 @@ typedef enum ELogicConditionType {
#define TSDB_MAX_SQL_SHOW_LEN 1024 #define TSDB_MAX_SQL_SHOW_LEN 1024
#define TSDB_MAX_ALLOWED_SQL_LEN (1 * 1024 * 1024u) // sql length should be less than 1mb #define TSDB_MAX_ALLOWED_SQL_LEN (1 * 1024 * 1024u) // sql length should be less than 1mb
#define TSDB_VIEW_NAME_LEN 193
#define TSDB_VIEW_FNAME_LEN (TSDB_DB_FNAME_LEN + TSDB_VIEW_NAME_LEN + TSDB_NAME_DELIMITER_LEN)
#define TSDB_APP_NAME_LEN TSDB_UNI_LEN #define TSDB_APP_NAME_LEN TSDB_UNI_LEN
#define TSDB_TB_COMMENT_LEN 1025 #define TSDB_TB_COMMENT_LEN 1025
@ -303,6 +305,8 @@ typedef enum ELogicConditionType {
#define TSDB_SYNC_APPLYQ_SIZE_LIMIT 512 #define TSDB_SYNC_APPLYQ_SIZE_LIMIT 512
#define TSDB_SYNC_NEGOTIATION_WIN 512 #define TSDB_SYNC_NEGOTIATION_WIN 512
#define TSDB_SYNC_SNAP_BUFFER_SIZE 2048
#define TSDB_TBNAME_COLUMN_INDEX (-1) #define TSDB_TBNAME_COLUMN_INDEX (-1)
#define TSDB_MULTI_TABLEMETA_MAX_NUM 100000 // maximum batch size allowed to load table meta #define TSDB_MULTI_TABLEMETA_MAX_NUM 100000 // maximum batch size allowed to load table meta
@ -325,7 +329,7 @@ typedef enum ELogicConditionType {
#define TSDB_MAX_DAYS_PER_FILE (3650 * 1440) #define TSDB_MAX_DAYS_PER_FILE (3650 * 1440)
#define TSDB_DEFAULT_DAYS_PER_FILE (10 * 1440) #define TSDB_DEFAULT_DAYS_PER_FILE (10 * 1440)
#define TSDB_MIN_DURATION_PER_FILE 60 // unit minute #define TSDB_MIN_DURATION_PER_FILE 60 // unit minute
#define TSDB_MAX_DURATION_PER_FILE (3650 * 1440) #define TSDB_MAX_DURATION_PER_FILE (90 * 1440)
#define TSDB_DEFAULT_DURATION_PER_FILE (10 * 1440) #define TSDB_DEFAULT_DURATION_PER_FILE (10 * 1440)
#define TSDB_MIN_KEEP (1 * 1440) // data in db to be reserved. unit minute #define TSDB_MIN_KEEP (1 * 1440) // data in db to be reserved. unit minute
#define TSDB_MAX_KEEP (365000 * 1440) // data in db to be reserved. #define TSDB_MAX_KEEP (365000 * 1440) // data in db to be reserved.

View File

@ -66,6 +66,7 @@ extern int32_t udfDebugFlag;
extern int32_t smaDebugFlag; extern int32_t smaDebugFlag;
extern int32_t idxDebugFlag; extern int32_t idxDebugFlag;
extern int32_t tdbDebugFlag; extern int32_t tdbDebugFlag;
extern int32_t sndDebugFlag;
int32_t taosInitLog(const char *logName, int32_t maxFiles); int32_t taosInitLog(const char *logName, int32_t maxFiles);
void taosCloseLog(); void taosCloseLog();

View File

@ -101,6 +101,9 @@ struct STaosQall {
STaosQnode *current; STaosQnode *current;
STaosQnode *start; STaosQnode *start;
int32_t numOfItems; int32_t numOfItems;
int64_t memOfItems;
int32_t unAccessedNumOfItems;
int64_t unAccessMemOfItems;
}; };
STaosQueue *taosOpenQueue(); STaosQueue *taosOpenQueue();
@ -123,6 +126,9 @@ int32_t taosReadAllQitems(STaosQueue *queue, STaosQall *qall);
int32_t taosGetQitem(STaosQall *qall, void **ppItem); int32_t taosGetQitem(STaosQall *qall, void **ppItem);
void taosResetQitems(STaosQall *qall); void taosResetQitems(STaosQall *qall);
int32_t taosQallItemSize(STaosQall *qall); int32_t taosQallItemSize(STaosQall *qall);
int64_t taosQallMemSize(STaosQall *qll);
int64_t taosQallUnAccessedItemSize(STaosQall *qall);
int64_t taosQallUnAccessedMemSize(STaosQall *qall);
STaosQset *taosOpenQset(); STaosQset *taosOpenQset();
void taosCloseQset(STaosQset *qset); void taosCloseQset(STaosQset *qset);
@ -135,8 +141,6 @@ int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, SQueueInfo *qinfo)
int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, SQueueInfo *qinfo); int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, SQueueInfo *qinfo);
void taosResetQsetThread(STaosQset *qset, void *pItem); void taosResetQsetThread(STaosQset *qset, void *pItem);
extern int64_t tsRpcQueueMemoryAllowed;
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

35
include/util/tunit.h Normal file
View File

@ -0,0 +1,35 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_UNIT_H_
#define _TD_UNIT_H_
#include "os.h"
#ifdef __cplusplus
extern "C" {
#endif
int64_t taosStrHumanToInt64(const char* str);
void taosInt64ToHumanStr(int64_t val, char* outStr);
int32_t taosStrHumanToInt32(const char* str);
void taosInt32ToHumanStr(int32_t val, char* outStr);
#ifdef __cplusplus
}
#endif
#endif /*_TD_UNIT_H_*/

View File

@ -34,6 +34,7 @@ benchmarkName="taosBenchmark"
dumpName="taosdump" dumpName="taosdump"
demoName="taosdemo" demoName="taosdemo"
xname="taosx" xname="taosx"
keeperName="taoskeeper"
clientName2="taos" clientName2="taos"
serverName2="${clientName2}d" serverName2="${clientName2}d"
@ -42,6 +43,7 @@ productName2="TDengine"
emailName2="taosdata.com" emailName2="taosdata.com"
xname2="${clientName2}x" xname2="${clientName2}x"
adapterName2="${clientName2}adapter" adapterName2="${clientName2}adapter"
keeperName2="${clientName2}keeper"
explorerName="${clientName2}-explorer" explorerName="${clientName2}-explorer"
benchmarkName2="${clientName2}Benchmark" benchmarkName2="${clientName2}Benchmark"
@ -154,7 +156,7 @@ interactiveFqdn=yes # [yes | no]
verType=server # [server | client] verType=server # [server | client]
initType=systemd # [systemd | service | ...] initType=systemd # [systemd | service | ...]
while getopts "hv:e:i:" arg; do while getopts "hv:e:" arg; do
case $arg in case $arg in
e) e)
#echo "interactiveFqdn=$OPTARG" #echo "interactiveFqdn=$OPTARG"
@ -164,10 +166,6 @@ while getopts "hv:e:i:" arg; do
#echo "verType=$OPTARG" #echo "verType=$OPTARG"
verType=$(echo $OPTARG) verType=$(echo $OPTARG)
;; ;;
i)
#echo "initType=$OPTARG"
initType=$(echo $OPTARG)
;;
h) h)
echo "Usage: $(basename $0) -v [server | client] -e [yes | no]" echo "Usage: $(basename $0) -v [server | client] -e [yes | no]"
exit 0 exit 0
@ -218,6 +216,7 @@ function install_bin() {
${csudo}rm -f ${bin_link_dir}/${demoName2} || : ${csudo}rm -f ${bin_link_dir}/${demoName2} || :
${csudo}rm -f ${bin_link_dir}/${benchmarkName2} || : ${csudo}rm -f ${bin_link_dir}/${benchmarkName2} || :
${csudo}rm -f ${bin_link_dir}/${dumpName2} || : ${csudo}rm -f ${bin_link_dir}/${dumpName2} || :
${csudo}rm -f ${bin_link_dir}/${keeperName2} || :
${csudo}rm -f ${bin_link_dir}/set_core || : ${csudo}rm -f ${bin_link_dir}/set_core || :
${csudo}rm -f ${bin_link_dir}/TDinsight.sh || : ${csudo}rm -f ${bin_link_dir}/TDinsight.sh || :
@ -231,6 +230,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/${benchmarkName2} ] && ${csudo}ln -sf ${install_main_dir}/bin/${benchmarkName2} ${bin_link_dir}/${demoName2} || : [ -x ${install_main_dir}/bin/${benchmarkName2} ] && ${csudo}ln -sf ${install_main_dir}/bin/${benchmarkName2} ${bin_link_dir}/${demoName2} || :
[ -x ${install_main_dir}/bin/${benchmarkName2} ] && ${csudo}ln -sf ${install_main_dir}/bin/${benchmarkName2} ${bin_link_dir}/${benchmarkName2} || : [ -x ${install_main_dir}/bin/${benchmarkName2} ] && ${csudo}ln -sf ${install_main_dir}/bin/${benchmarkName2} ${bin_link_dir}/${benchmarkName2} || :
[ -x ${install_main_dir}/bin/${dumpName2} ] && ${csudo}ln -sf ${install_main_dir}/bin/${dumpName2} ${bin_link_dir}/${dumpName2} || : [ -x ${install_main_dir}/bin/${dumpName2} ] && ${csudo}ln -sf ${install_main_dir}/bin/${dumpName2} ${bin_link_dir}/${dumpName2} || :
[ -x ${install_main_dir}/bin/${keeperName2} ] && ${csudo}ln -sf ${install_main_dir}/bin/${keeperName2} ${bin_link_dir}/${keeperName2} || :
[ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -sf ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || : [ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -sf ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || :
if [ "$clientName2" == "${clientName}" ]; then if [ "$clientName2" == "${clientName}" ]; then
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || : [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
@ -373,42 +373,56 @@ function add_newHostname_to_hosts() {
return return
fi fi
done done
${csudo}echo "127.0.0.1 $1" >>/etc/hosts || :
if grep -q "127.0.0.1 $1" /etc/hosts; then
return
else
${csudo}chmod 666 /etc/hosts
${csudo}echo "127.0.0.1 $1" >>/etc/hosts
fi
} }
function set_hostname() { function set_hostname() {
echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:" echo -e -n "${GREEN}Host name or IP (assigned to this machine) which can be accessed by your tools or apps (must not be 'localhost')${NC}"
read newHostname read -e -p " : " -i "$(hostname)" newHostname
while true; do while true; do
if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then if [ -z "$newHostname" ]; then
newHostname=$(hostname)
break
elif [ "$newHostname" != "localhost" ]; then
break break
else else
read -p "Please enter one hostname(must not be 'localhost'):" newHostname echo -e -n "${GREEN}Host name or IP (assigned to this machine) which can be accessed by your tools or apps (must not be 'localhost')${NC}"
read -e -p " : " -i "$(hostname)" newHostname
fi fi
done done
${csudo}hostname $newHostname || : # ${csudo}hostname $newHostname || :
retval=$(echo $?) # retval=$(echo $?)
if [[ $retval != 0 ]]; then # if [[ $retval != 0 ]]; then
echo # echo
echo "set hostname fail!" # echo "set hostname fail!"
return # return
fi # fi
#ubuntu/centos /etc/hostname # #ubuntu/centos /etc/hostname
if [[ -e /etc/hostname ]]; then # if [[ -e /etc/hostname ]]; then
${csudo}echo $newHostname >/etc/hostname || : # ${csudo}echo $newHostname >/etc/hostname || :
fi # fi
#debian: #HOSTNAME=yourname # #debian: #HOSTNAME=yourname
if [[ -e /etc/sysconfig/network ]]; then # if [[ -e /etc/sysconfig/network ]]; then
${csudo}sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network || : # ${csudo}sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network || :
fi # fi
${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/${configFile2} if [ -f ${cfg_install_dir}/${configFile2} ]; then
${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/${configFile2}
else
${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${script_dir}/cfg/${configFile2}
fi
serverFqdn=$newHostname serverFqdn=$newHostname
if [[ -e /etc/hosts ]]; then if [[ -e /etc/hosts ]] && [[ ! $newHostname =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
add_newHostname_to_hosts $newHostname add_newHostname_to_hosts $newHostname
fi fi
} }
@ -439,7 +453,12 @@ function set_ipAsFqdn() {
echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}"
localFqdn="127.0.0.1" localFqdn="127.0.0.1"
# Write the local FQDN to configuration file # Write the local FQDN to configuration file
${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/${configFile2}
if [ -f ${cfg_install_dir}/${configFile2} ]; then
${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/${configFile2}
else
${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${script_dir}/cfg/${configFile2}
fi
serverFqdn=$localFqdn serverFqdn=$localFqdn
echo echo
return return
@ -461,7 +480,11 @@ function set_ipAsFqdn() {
read -p "Please choose an IP from local IP list:" localFqdn read -p "Please choose an IP from local IP list:" localFqdn
else else
# Write the local FQDN to configuration file # Write the local FQDN to configuration file
${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/${configFile2} if [ -f ${cfg_install_dir}/${configFile2} ]; then
${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/${configFile2}
else
${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${script_dir}/cfg/${configFile2}
fi
serverFqdn=$localFqdn serverFqdn=$localFqdn
break break
fi fi
@ -476,37 +499,13 @@ function local_fqdn_check() {
echo echo
echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}" echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}"
echo echo
if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then set_hostname
echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}"
echo
while true; do
read -r -p "Set hostname now? [Y/n] " input
if [ ! -n "$input" ]; then
set_hostname
break
else
case $input in
[yY][eE][sS] | [yY])
set_hostname
break
;;
[nN][oO] | [nN])
set_ipAsFqdn
break
;;
*)
echo "Invalid input..."
;;
esac
fi
done
fi
} }
function install_adapter_config() { function install_adapter_config() {
if [ -f ${script_dir}/cfg/${adapterName}.toml ]; then
${csudo}sed -i -r "s/localhost/${serverFqdn}/g" ${script_dir}/cfg/${adapterName}.toml
fi
if [ ! -f "${cfg_install_dir}/${adapterName}.toml" ]; then if [ ! -f "${cfg_install_dir}/${adapterName}.toml" ]; then
${csudo}mkdir -p ${cfg_install_dir} ${csudo}mkdir -p ${cfg_install_dir}
[ -f ${script_dir}/cfg/${adapterName}.toml ] && ${csudo}cp ${script_dir}/cfg/${adapterName}.toml ${cfg_install_dir} [ -f ${script_dir}/cfg/${adapterName}.toml ] && ${csudo}cp ${script_dir}/cfg/${adapterName}.toml ${cfg_install_dir}
@ -523,13 +522,38 @@ function install_adapter_config() {
} }
function install_keeper_config() {
if [ -f ${script_dir}/cfg/${keeperName2}.toml ]; then
${csudo}sed -i -r "s/127.0.0.1/${serverFqdn}/g" ${script_dir}/cfg/${keeperName2}.toml
fi
if [ -f "${configDir}/keeper.toml" ]; then
echo "The file keeper.toml will be renamed to ${keeperName2}.toml"
${csudo}cp ${script_dir}/cfg/${keeperName2}.toml ${configDir}/${keeperName2}.toml.new
${csudo}mv ${configDir}/keeper.toml ${configDir}/${keeperName2}.toml
elif [ -f "${configDir}/${keeperName2}.toml" ]; then
# "taoskeeper.toml exists,new config is taoskeeper.toml.new"
${csudo}cp ${script_dir}/cfg/${keeperName2}.toml ${configDir}/${keeperName2}.toml.new
else
${csudo}cp ${script_dir}/cfg/${keeperName2}.toml ${configDir}/${keeperName2}.toml
fi
command -v systemctl >/dev/null 2>&1 && ${csudo}systemctl daemon-reload >/dev/null 2>&1 || true
}
function install_config() { function install_config() {
if [ ! -f "${cfg_install_dir}/${configFile2}" ]; then if [ ! -f "${cfg_install_dir}/${configFile2}" ]; then
${csudo}mkdir -p ${cfg_install_dir} ${csudo}mkdir -p ${cfg_install_dir}
[ -f ${script_dir}/cfg/${configFile2} ] && ${csudo}cp ${script_dir}/cfg/${configFile2} ${cfg_install_dir} if [ -f ${script_dir}/cfg/${configFile2} ]; then
${csudo} echo "monitor 1" >> ${script_dir}/cfg/${configFile2}
${csudo} echo "monitorFQDN ${serverFqdn}" >> ${script_dir}/cfg/${configFile2}
${csudo} echo "audit 1" >> ${script_dir}/cfg/${configFile2}
${csudo}cp ${script_dir}/cfg/${configFile2} ${cfg_install_dir}
fi
${csudo}chmod 644 ${cfg_install_dir}/* ${csudo}chmod 644 ${cfg_install_dir}/*
else else
${csudo} echo "monitor 1" >> ${script_dir}/cfg/${configFile2}
${csudo} echo "monitorFQDN ${serverFqdn}" >> ${script_dir}/cfg/${configFile2}
${csudo} echo "audit 1" >> ${script_dir}/cfg/${configFile2}
${csudo}cp -f ${script_dir}/cfg/${configFile2} ${cfg_install_dir}/${configFile2}.new ${csudo}cp -f ${script_dir}/cfg/${configFile2} ${cfg_install_dir}/${configFile2}.new
fi fi
@ -537,6 +561,8 @@ function install_config() {
[ ! -z $1 ] && return 0 || : # only install client [ ! -z $1 ] && return 0 || : # only install client
if ((${update_flag} == 1)); then if ((${update_flag} == 1)); then
return 0 return 0
fi fi
@ -554,7 +580,11 @@ function install_config() {
read firstEp read firstEp
while true; do while true; do
if [ ! -z "$firstEp" ]; then if [ ! -z "$firstEp" ]; then
${csudo}sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/${configFile2} if [ -f ${cfg_install_dir}/${configFile2} ]; then
${csudo}sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/${configFile2}
else
${csudo}sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${script_dir}/cfg/${configFile2}
fi
break break
else else
break break
@ -607,6 +637,9 @@ function install_data() {
function install_connector() { function install_connector() {
if [ -d "${script_dir}/connector/" ]; then if [ -d "${script_dir}/connector/" ]; then
${csudo}cp -rf ${script_dir}/connector/ ${install_main_dir}/ || echo "failed to copy connector" ${csudo}cp -rf ${script_dir}/connector/ ${install_main_dir}/ || echo "failed to copy connector"
${csudo}cp ${script_dir}/start-all.sh ${install_main_dir}/ || echo "failed to copy start-all.sh"
${csudo}cp ${script_dir}/stop-all.sh ${install_main_dir}/ || echo "failed to copy stop-all.sh"
${csudo}cp ${script_dir}/README.md ${install_main_dir}/ || echo "failed to copy README.md"
fi fi
} }
@ -622,6 +655,14 @@ function install_web() {
fi fi
} }
function install_taosx() {
if [ -f "${script_dir}/taosx/install_taosx.sh" ]; then
cd ${script_dir}/taosx
chmod a+x install_taosx.sh
bash install_taosx.sh -e $serverFqdn
fi
}
function clean_service_on_sysvinit() { function clean_service_on_sysvinit() {
if ps aux | grep -v grep | grep ${serverName2} &>/dev/null; then if ps aux | grep -v grep | grep ${serverName2} &>/dev/null; then
${csudo}service ${serverName2} stop || : ${csudo}service ${serverName2} stop || :
@ -702,29 +743,6 @@ function clean_service_on_systemd() {
fi fi
${csudo}systemctl disable tarbitratord &>/dev/null || echo &>/dev/null ${csudo}systemctl disable tarbitratord &>/dev/null || echo &>/dev/null
${csudo}rm -f ${tarbitratord_service_config} ${csudo}rm -f ${tarbitratord_service_config}
if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then
x_service_config="${service_config_dir}/${xName2}.service"
if [ -e "$x_service_config" ]; then
if systemctl is-active --quiet ${xName2}; then
echo "${productName2} ${xName2} is running, stopping it..."
${csudo}systemctl stop ${xName2} &>/dev/null || echo &>/dev/null
fi
${csudo}systemctl disable ${xName2} &>/dev/null || echo &>/dev/null
${csudo}rm -f ${x_service_config}
fi
explorer_service_config="${service_config_dir}/${explorerName2}.service"
if [ -e "$explorer_service_config" ]; then
if systemctl is-active --quiet ${explorerName2}; then
echo "${productName2} ${explorerName2} is running, stopping it..."
${csudo}systemctl stop ${explorerName2} &>/dev/null || echo &>/dev/null
fi
${csudo}systemctl disable ${explorerName2} &>/dev/null || echo &>/dev/null
${csudo}rm -f ${explorer_service_config}
${csudo}rm -f /etc/${clientName2}/explorer.toml
fi
fi
} }
function install_service_on_systemd() { function install_service_on_systemd() {
@ -745,15 +763,27 @@ function install_service_on_systemd() {
${csudo}systemctl daemon-reload ${csudo}systemctl daemon-reload
${csudo}systemctl enable ${serverName2} ${csudo}systemctl enable ${serverName2}
${csudo}systemctl daemon-reload ${csudo}systemctl daemon-reload
} }
function install_adapter_service() { function install_adapter_service() {
if ((${service_mod} == 0)); then if ((${service_mod} == 0)); then
[ -f ${script_dir}/cfg/${adapterName}.service ] && [ -f ${script_dir}/cfg/${adapterName2}.service ] &&
${csudo}cp ${script_dir}/cfg/${adapterName}.service \ ${csudo}cp ${script_dir}/cfg/${adapterName2}.service \
${service_config_dir}/ || : ${service_config_dir}/ || :
${csudo}systemctl enable ${adapterName2}
${csudo}systemctl daemon-reload
fi
}
function install_keeper_service() {
if ((${service_mod} == 0)); then
[ -f ${script_dir}/cfg/${clientName2}keeper.service ] &&
${csudo}cp ${script_dir}/cfg/${clientName2}keeper.service \
${service_config_dir}/ || :
${csudo}systemctl enable ${clientName2}keeper
${csudo}systemctl daemon-reload ${csudo}systemctl daemon-reload
fi fi
} }
@ -872,7 +902,7 @@ function updateProduct() {
tar -zxf ${tarName} tar -zxf ${tarName}
install_jemalloc install_jemalloc
echo -e "${GREEN}Start to update ${productName2}...${NC}" echo "Start to update ${productName2}..."
# Stop the service if running # Stop the service if running
if ps aux | grep -v grep | grep ${serverName2} &>/dev/null; then if ps aux | grep -v grep | grep ${serverName2} &>/dev/null; then
if ((${service_mod} == 0)); then if ((${service_mod} == 0)); then
@ -890,9 +920,11 @@ function updateProduct() {
install_log install_log
install_header install_header
install_lib install_lib
install_config
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
install_connector install_connector
install_taosx
fi fi
install_examples install_examples
@ -901,52 +933,69 @@ function updateProduct() {
install_bin install_bin
install_service install_service
install_adapter_service install_adapter_service
install_config
install_adapter_config install_adapter_config
install_keeper_service
install_keeper_config
openresty_work=false openresty_work=false
echo echo
echo -e "${GREEN_DARK}To configure ${productName2} ${NC}\t: edit ${cfg_install_dir}/${configFile2}" echo -e "${GREEN_DARK}To configure ${productName2} ${NC}\t\t: edit ${cfg_install_dir}/${configFile2}"
[ -f ${configDir}/${clientName2}adapter.toml ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \ [ -f ${configDir}/${clientName2}adapter.toml ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \
echo -e "${GREEN_DARK}To configure ${clientName2}Adapter ${NC}\t: edit ${configDir}/${clientName2}adapter.toml" echo -e "${GREEN_DARK}To configure ${clientName2}Adapter ${NC}\t: edit ${configDir}/${clientName2}adapter.toml"
if [ "$verMode" == "cluster" ]; then
echo -e "${GREEN_DARK}To configure ${clientName2}-explorer ${NC}\t: edit ${configDir}/explorer.toml"
fi
if ((${service_mod} == 0)); then if ((${service_mod} == 0)); then
echo -e "${GREEN_DARK}To start ${productName2} ${NC}\t: ${csudo}systemctl start ${serverName2}${NC}" echo -e "${GREEN_DARK}To start ${productName2} ${NC}\t\t: ${csudo}systemctl start ${serverName2}${NC}"
[ -f ${service_config_dir}/${clientName2}adapter.service ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \ [ -f ${service_config_dir}/${clientName2}adapter.service ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \
echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}\t: ${csudo}systemctl start ${clientName2}adapter ${NC}" echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}\t\t: ${csudo}systemctl start ${clientName2}adapter ${NC}"
elif ((${service_mod} == 1)); then elif ((${service_mod} == 1)); then
echo -e "${GREEN_DARK}To start ${productName2} ${NC}\t: ${csudo}service ${serverName2} start${NC}" echo -e "${GREEN_DARK}To start ${productName2} ${NC}\t\t: ${csudo}service ${serverName2} start${NC}"
[ -f ${service_config_dir}/${clientName2}adapter.service ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \ [ -f ${service_config_dir}/${clientName2}adapter.service ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \
echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}\t: ${csudo}service ${clientName2}adapter start${NC}" echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}\t\t: ${csudo}service ${clientName2}adapter start${NC}"
else else
echo -e "${GREEN_DARK}To start ${productName2} ${NC}\t: ./${serverName2}${NC}" echo -e "${GREEN_DARK}To start ${productName2} ${NC}\t\t: ./${serverName2}${NC}"
[ -f ${installDir}/bin/${clientName2}adapter ] && \ [ -f ${installDir}/bin/${clientName2}adapter ] && \
echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}\t: ${clientName2}adapter ${NC}" echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}\t\t: ${clientName2}adapter ${NC}"
fi fi
echo -e "${GREEN_DARK}To enable ${clientName2}keeper ${NC}\t: sudo systemctl enable ${clientName2}keeper ${NC}" echo -e "${GREEN_DARK}To enable ${clientName2}keeper ${NC}\t\t: sudo systemctl enable ${clientName2}keeper ${NC}"
if [ "$verMode" == "cluster" ];then
if [ ${openresty_work} = 'true' ]; then echo -e "${GREEN_DARK}To start ${clientName2}x ${NC}\t\t\t: sudo systemctl start ${clientName2}x ${NC}"
echo -e "${GREEN_DARK}To access ${productName2} ${NC}\t: use ${GREEN_UNDERLINE}${clientName2} -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${web_port}${NC}" echo -e "${GREEN_DARK}To start ${clientName2}-explorer ${NC}\t\t: sudo systemctl start ${clientName2}-explorer ${NC}"
else
echo -e "${GREEN_DARK}To access ${productName2} ${NC}\t: use ${GREEN_UNDERLINE}${clientName2} -h $serverFqdn${NC} in shell${NC}"
fi fi
if ((${prompt_force} == 1)); then # if [ ${openresty_work} = 'true' ]; then
echo "" # echo -e "${GREEN_DARK}To access ${productName2} ${NC}\t\t: use ${GREEN_UNDERLINE}${clientName2} -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${web_port}${NC}"
echo -e "${RED}Please run '${serverName2} --force-keep-file' at first time for the exist ${productName2} $exist_version!${NC}" # else
fi # echo -e "${GREEN_DARK}To access ${productName2} ${NC}\t\t: use ${GREEN_UNDERLINE}${clientName2} -h $serverFqdn${NC} in shell${NC}"
# fi
# if ((${prompt_force} == 1)); then
# echo ""
# echo -e "${RED}Please run '${serverName2} --force-keep-file' at first time for the exist ${productName2} $exist_version!${NC}"
# fi
echo echo
echo -e "\033[44;32;1m${productName2} is updated successfully!${NC}" echo "${productName2} is updated successfully!"
echo -e "\033[44;32;1mTo manage ${productName2} instance, view documentation or explorer features, please install ${clientName2}Explorer ${NC}" echo
if [ "$verMode" == "cluster" ];then
echo -e "\033[44;32;1mTo start all the components : ./start-all.sh${NC}"
fi
echo -e "\033[44;32;1mTo access ${productName2} : ${clientName2} -h $serverFqdn${NC}"
if [ "$verMode" == "cluster" ];then
echo -e "\033[44;32;1mTo access the management system : http://$serverFqdn:6060${NC}"
echo -e "\033[44;32;1mTo read the user manual : http://$serverFqdn:6060/docs${NC}"
fi
else else
install_bin install_bin
install_config
echo echo
echo -e "\033[44;32;1m${productName2} client is updated successfully!${NC}" echo -e "\033[44;32;1m${productName2} client is updated successfully!${NC}"
fi fi
cd $script_dir
rm -rf $(tar -tf ${tarName} | grep -Ev "^\./$|^\/") rm -rf $(tar -tf ${tarName} | grep -Ev "^\./$|^\/")
} }
@ -958,7 +1007,7 @@ function installProduct() {
fi fi
tar -zxf ${tarName} tar -zxf ${tarName}
echo -e "${GREEN}Start to install ${productName2}...${NC}" echo "Start to install ${productName2}..."
install_main_path install_main_path
@ -972,9 +1021,11 @@ function installProduct() {
install_jemalloc install_jemalloc
#install_avro lib #install_avro lib
#install_avro lib64 #install_avro lib64
install_config
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
install_connector install_connector
install_taosx
fi fi
install_examples install_examples
install_web install_web
@ -984,62 +1035,80 @@ function installProduct() {
install_service install_service
install_adapter_service install_adapter_service
install_adapter_config install_adapter_config
install_keeper_service
install_keeper_config
openresty_work=false openresty_work=false
install_config
# Ask if to start the service # Ask if to start the service
echo echo
echo -e "${GREEN_DARK}To configure ${productName2} ${NC}\t: edit ${cfg_install_dir}/${configFile2}" echo -e "${GREEN_DARK}To configure ${productName2} ${NC}\t\t: edit ${cfg_install_dir}/${configFile2}"
[ -f ${configDir}/${clientName2}adapter.toml ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \ [ -f ${configDir}/${clientName2}adapter.toml ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \
echo -e "${GREEN_DARK}To configure ${clientName2}Adapter ${NC}\t: edit ${configDir}/${clientName2}adapter.toml" echo -e "${GREEN_DARK}To configure ${clientName2}Adapter ${NC}\t: edit ${configDir}/${clientName2}adapter.toml"
if [ "$verMode" == "cluster" ]; then
echo -e "${GREEN_DARK}To configure ${clientName2}-explorer ${NC}\t: edit ${configDir}/explorer.toml"
fi
if ((${service_mod} == 0)); then if ((${service_mod} == 0)); then
echo -e "${GREEN_DARK}To start ${productName2} ${NC}\t: ${csudo}systemctl start ${serverName2}${NC}" echo -e "${GREEN_DARK}To start ${productName2} ${NC}\t\t: ${csudo}systemctl start ${serverName2}${NC}"
[ -f ${service_config_dir}/${clientName2}adapter.service ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \ [ -f ${service_config_dir}/${clientName2}adapter.service ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \
echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}\t: ${csudo}systemctl start ${clientName2}adapter ${NC}" echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}\t\t: ${csudo}systemctl start ${clientName2}adapter ${NC}"
elif ((${service_mod} == 1)); then elif ((${service_mod} == 1)); then
echo -e "${GREEN_DARK}To start ${productName2} ${NC}\t: ${csudo}service ${serverName2} start${NC}" echo -e "${GREEN_DARK}To start ${productName2} ${NC}\t\t: ${csudo}service ${serverName2} start${NC}"
[ -f ${service_config_dir}/${clientName2}adapter.service ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \ [ -f ${service_config_dir}/${clientName2}adapter.service ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \
echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}\t: ${csudo}service ${clientName2}adapter start${NC}" echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}\t\t: ${csudo}service ${clientName2}adapter start${NC}"
else else
echo -e "${GREEN_DARK}To start ${productName2} ${NC}\t: ${serverName2}${NC}" echo -e "${GREEN_DARK}To start ${productName2} ${NC}\t\t: ${serverName2}${NC}"
[ -f ${installDir}/bin/${clientName2}adapter ] && \ [ -f ${installDir}/bin/${clientName2}adapter ] && \
echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}\t: ${clientName2}adapter ${NC}" echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}\t\t: ${clientName2}adapter ${NC}"
fi fi
echo -e "${GREEN_DARK}To enable ${clientName2}keeper ${NC}\t: sudo systemctl enable ${clientName2}keeper ${NC}" echo -e "${GREEN_DARK}To enable ${clientName2}keeper ${NC}\t\t: sudo systemctl enable ${clientName2}keeper ${NC}"
if [ ! -z "$firstEp" ]; then if [ "$verMode" == "cluster" ];then
tmpFqdn=${firstEp%%:*} echo -e "${GREEN_DARK}To start ${clientName2}x ${NC}\t\t\t: sudo systemctl start ${clientName2}x ${NC}"
substr=":" echo -e "${GREEN_DARK}To start ${clientName2}-explorer ${NC}\t\t: sudo systemctl start ${clientName2}-explorer ${NC}"
if [[ $firstEp =~ $substr ]]; then
tmpPort=${firstEp#*:}
else
tmpPort=""
fi
if [[ "$tmpPort" != "" ]]; then
echo -e "${GREEN_DARK}To access ${productName2} ${NC}\t: ${clientName2} -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}"
else
echo -e "${GREEN_DARK}To access ${productName2} ${NC}\t: ${clientName2} -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}"
fi
echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}"
echo
elif [ ! -z "$serverFqdn" ]; then
echo -e "${GREEN_DARK}To access ${productName2} ${NC}\t: ${clientName2} -h $serverFqdn${GREEN_DARK} to login into ${productName2} server${NC}"
echo
fi fi
echo -e "\033[44;32;1m${productName2} is installed successfully!${NC}" # if [ ! -z "$firstEp" ]; then
echo -e "\033[44;32;1mTo manage ${productName2} instance, view documentation or explorer features, please install ${clientName2}Explorer ${NC}" # tmpFqdn=${firstEp%%:*}
# substr=":"
# if [[ $firstEp =~ $substr ]]; then
# tmpPort=${firstEp#*:}
# else
# tmpPort=""
# fi
# if [[ "$tmpPort" != "" ]]; then
# echo -e "${GREEN_DARK}To access ${productName2} ${NC}\t\t: ${clientName2} -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}"
# else
# echo -e "${GREEN_DARK}To access ${productName2} ${NC}\t\t: ${clientName2} -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}"
# fi
# echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}"
# echo
# elif [ ! -z "$serverFqdn" ]; then
# echo -e "${GREEN_DARK}To access ${productName2} ${NC}\t\t: ${clientName2} -h $serverFqdn${GREEN_DARK} to login into ${productName2} server${NC}"
# echo
# fi
echo
echo "${productName2} is installed successfully!"
echo
if [ "$verMode" == "cluster" ];then
echo -e "\033[44;32;1mTo start all the components : sudo ./start-all.sh${NC}"
fi
echo -e "\033[44;32;1mTo access ${productName2} : ${clientName2} -h $serverFqdn${NC}"
if [ "$verMode" == "cluster" ];then
echo -e "\033[44;32;1mTo access the management system : http://$serverFqdn:6060${NC}"
echo -e "\033[44;32;1mTo read the user manual : http://$serverFqdn:6060/docs-en${NC}"
fi
echo echo
else # Only install client else # Only install client
install_bin install_bin
install_config
echo echo
echo -e "\033[44;32;1m${productName2} client is installed successfully!${NC}" echo -e "\033[44;32;1m${productName2} client is installed successfully!${NC}"
fi fi
cd $script_dir
touch ~/.${historyFile} touch ~/.${historyFile}
rm -rf $(tar -tf ${tarName} | grep -Ev "^\./$|^\/") rm -rf $(tar -tf ${tarName} | grep -Ev "^\./$|^\/")
} }
@ -1071,3 +1140,5 @@ elif [ "$verType" == "client" ]; then
else else
echo "please input correct verType" echo "please input correct verType"
fi fi

View File

@ -129,6 +129,7 @@ function install_bin() {
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
[ -x ${install_main_dir}/bin/${demoName2} ] && ${csudo}ln -s ${install_main_dir}/bin/${demoName2} ${bin_link_dir}/${demoName2} || : [ -x ${install_main_dir}/bin/${demoName2} ] && ${csudo}ln -s ${install_main_dir}/bin/${demoName2} ${bin_link_dir}/${demoName2} || :
[ -x ${install_main_dir}/bin/${benchmarkName2} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName2} ${bin_link_dir}/${benchmarkName2} || : [ -x ${install_main_dir}/bin/${benchmarkName2} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName2} ${bin_link_dir}/${benchmarkName2} || :
[ -x ${install_main_dir}/bin/${dumpName2} ] && ${csudo}ln -s ${install_main_dir}/bin/${dumpName2} ${bin_link_dir}/${dumpName2} || :
fi fi
[ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo}ln -sf ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/${uninstallScript2} || : [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo}ln -sf ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/${uninstallScript2} || :
fi fi

View File

@ -24,10 +24,12 @@ clientName2="${12}"
productName="TDengine" productName="TDengine"
clientName="taos" clientName="taos"
benchmarkName="taosBenchmark" benchmarkName="taosBenchmark"
dumpName="taosdump"
configFile="taos.cfg" configFile="taos.cfg"
tarName="package.tar.gz" tarName="package.tar.gz"
benchmarkName2="${clientName2}Benchmark" benchmarkName2="${clientName2}Benchmark"
dumpName2="${clientName2}dump"
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
script_dir="$(dirname $(readlink -f $0))" script_dir="$(dirname $(readlink -f $0))"
@ -71,6 +73,7 @@ if [ "$osType" != "Darwin" ]; then
else else
bin_files="${build_dir}/bin/${clientName} \ bin_files="${build_dir}/bin/${clientName} \
${build_dir}/bin/${benchmarkName} \ ${build_dir}/bin/${benchmarkName} \
${build_dir}/bin/${dumpName} \
${script_dir}/remove_client.sh \ ${script_dir}/remove_client.sh \
${script_dir}/set_core.sh \ ${script_dir}/set_core.sh \
${script_dir}/get_client.sh" ${script_dir}/get_client.sh"

View File

@ -42,7 +42,7 @@ release_dir="${top_dir}/release"
#package_name='linux' #package_name='linux'
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/${productName2}-enterprise-server-${version}" install_dir="${release_dir}/${productName2}-enterprise-${version}"
elif [ "$verMode" == "cloud" ]; then elif [ "$verMode" == "cloud" ]; then
install_dir="${release_dir}/${productName2}-cloud-server-${version}" install_dir="${release_dir}/${productName2}-cloud-server-${version}"
else else
@ -92,14 +92,10 @@ else
${build_dir}/bin/tdengine-datasource.zip.md5" ${build_dir}/bin/tdengine-datasource.zip.md5"
fi fi
[ -f ${build_dir}/bin/taosx ] && taosx_bin="${build_dir}/bin/taosx"
explorer_bin_files=$(find ${build_dir}/bin/ -name '*-explorer')
bin_files="${build_dir}/bin/${serverName} \ bin_files="${build_dir}/bin/${serverName} \
${build_dir}/bin/${clientName} \ ${build_dir}/bin/${clientName} \
${taostools_bin_files} \ ${taostools_bin_files} \
${taosx_bin} \
${explorer_bin_files} \
${build_dir}/bin/${clientName}adapter \ ${build_dir}/bin/${clientName}adapter \
${build_dir}/bin/udfd \ ${build_dir}/bin/udfd \
${script_dir}/remove.sh \ ${script_dir}/remove.sh \
@ -285,6 +281,11 @@ fi
chmod a+x ${install_dir}/install.sh chmod a+x ${install_dir}/install.sh
if [[ $dbName == "taos" ]]; then if [[ $dbName == "taos" ]]; then
cp ${top_dir}/../enterprise/packaging/start-all.sh ${install_dir}
cp ${top_dir}/../enterprise/packaging/stop-all.sh ${install_dir}
cp ${top_dir}/../enterprise/packaging/README.md ${install_dir}
chmod a+x ${install_dir}/start-all.sh
chmod a+x ${install_dir}/stop-all.sh
# Copy example code # Copy example code
mkdir -p ${install_dir}/examples mkdir -p ${install_dir}/examples
examples_dir="${top_dir}/examples" examples_dir="${top_dir}/examples"
@ -330,7 +331,7 @@ fi
mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" >${install_dir}/driver/vercomp.txt mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" >${install_dir}/driver/vercomp.txt
[ -f ${wslib_files} ] && cp ${wslib_files} ${install_dir}/driver || : [ -f ${wslib_files} ] && cp ${wslib_files} ${install_dir}/driver || :
# Copy connector # Copy connector && taosx
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
connector_dir="${code_dir}/connector" connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector mkdir -p ${install_dir}/connector
@ -364,8 +365,13 @@ if [ "$verMode" == "cluster" ]; then
git clone --depth 1 https://github.com/taosdata/taos-connector-rust ${install_dir}/connector/rust git clone --depth 1 https://github.com/taosdata/taos-connector-rust ${install_dir}/connector/rust
rm -rf ${install_dir}/connector/rust/.git ||: rm -rf ${install_dir}/connector/rust/.git ||:
# cp -r ${connector_dir}/python ${install_dir}/connector # copy taosx
# cp -r ${connector_dir}/nodejs ${install_dir}/connector if [ -d ${top_dir}/../enterprise/src/plugins/taosx/release/taosx ]; then
cp -r ${top_dir}/../enterprise/src/plugins/taosx/release/taosx ${install_dir}
cp ${top_dir}/../enterprise/packaging/install_taosx.sh ${install_dir}/taosx
cp ${top_dir}/../enterprise/src/plugins/taosx/packaging/uninstall.sh ${install_dir}/taosx
sed -i 's/target=\"\"/target=\"taosx\"/g' ${install_dir}/taosx/uninstall.sh
fi
fi fi
fi fi

View File

@ -63,6 +63,10 @@ service_config_dir="/etc/systemd/system"
taos_service_name=${serverName2} taos_service_name=${serverName2}
taosadapter_service_name="${clientName2}adapter" taosadapter_service_name="${clientName2}adapter"
tarbitrator_service_name="tarbitratord" tarbitrator_service_name="tarbitratord"
config_dir="/etc/${clientName2}"
csudo="" csudo=""
if command -v sudo >/dev/null; then if command -v sudo >/dev/null; then
csudo="sudo " csudo="sudo "
@ -113,8 +117,10 @@ function clean_bin() {
# Remove link # Remove link
${csudo}rm -f ${bin_link_dir}/${clientName} || : ${csudo}rm -f ${bin_link_dir}/${clientName} || :
${csudo}rm -f ${bin_link_dir}/${serverName} || : ${csudo}rm -f ${bin_link_dir}/${serverName} || :
echo "${serverName} is removed successfully"
${csudo}rm -f ${bin_link_dir}/udfd || : ${csudo}rm -f ${bin_link_dir}/udfd || :
${csudo}rm -f ${bin_link_dir}/${adapterName2} || : ${csudo}rm -f ${bin_link_dir}/${adapterName2} || :
echo "${adapterName2} is removed successfully"
${csudo}rm -f ${bin_link_dir}/${benchmarkName2} || : ${csudo}rm -f ${bin_link_dir}/${benchmarkName2} || :
${csudo}rm -f ${bin_link_dir}/${demoName2} || : ${csudo}rm -f ${bin_link_dir}/${demoName2} || :
${csudo}rm -f ${bin_link_dir}/${dumpName2} || : ${csudo}rm -f ${bin_link_dir}/${dumpName2} || :
@ -175,7 +181,7 @@ function clean_log() {
function clean_service_on_systemd() { function clean_service_on_systemd() {
taosd_service_config="${service_config_dir}/${taos_service_name}.service" taosd_service_config="${service_config_dir}/${taos_service_name}.service"
if systemctl is-active --quiet ${taos_service_name}; then if systemctl is-active --quiet ${taos_service_name}; then
echo "${productName2} ${serverName2} is running, stopping it..." echo "${taos_service_name} is running, stopping it..."
${csudo}systemctl stop ${taos_service_name} &>/dev/null || echo &>/dev/null ${csudo}systemctl stop ${taos_service_name} &>/dev/null || echo &>/dev/null
fi fi
${csudo}systemctl disable ${taos_service_name} &>/dev/null || echo &>/dev/null ${csudo}systemctl disable ${taos_service_name} &>/dev/null || echo &>/dev/null
@ -183,7 +189,7 @@ function clean_service_on_systemd() {
taosadapter_service_config="${service_config_dir}/${clientName2}adapter.service" taosadapter_service_config="${service_config_dir}/${clientName2}adapter.service"
if systemctl is-active --quiet ${taosadapter_service_name}; then if systemctl is-active --quiet ${taosadapter_service_name}; then
echo "${productName2} ${clientName2}Adapter is running, stopping it..." echo "${clientName2}Adapter is running, stopping it..."
${csudo}systemctl stop ${taosadapter_service_name} &>/dev/null || echo &>/dev/null ${csudo}systemctl stop ${taosadapter_service_name} &>/dev/null || echo &>/dev/null
fi fi
${csudo}systemctl disable ${taosadapter_service_name} &>/dev/null || echo &>/dev/null ${csudo}systemctl disable ${taosadapter_service_name} &>/dev/null || echo &>/dev/null
@ -196,33 +202,11 @@ function clean_service_on_systemd() {
fi fi
${csudo}systemctl disable ${tarbitrator_service_name} &>/dev/null || echo &>/dev/null ${csudo}systemctl disable ${tarbitrator_service_name} &>/dev/null || echo &>/dev/null
if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then
x_service_config="${service_config_dir}/${xName2}.service"
if [ -e "$x_service_config" ]; then
if systemctl is-active --quiet ${xName2}; then
echo "${productName2} ${xName2} is running, stopping it..."
${csudo}systemctl stop ${xName2} &>/dev/null || echo &>/dev/null
fi
${csudo}systemctl disable ${xName2} &>/dev/null || echo &>/dev/null
${csudo}rm -f ${x_service_config}
fi
explorer_service_config="${service_config_dir}/${explorerName2}.service"
if [ -e "$explorer_service_config" ]; then
if systemctl is-active --quiet ${explorerName2}; then
echo "${productName2} ${explorerName2} is running, stopping it..."
${csudo}systemctl stop ${explorerName2} &>/dev/null || echo &>/dev/null
fi
${csudo}systemctl disable ${explorerName2} &>/dev/null || echo &>/dev/null
${csudo}rm -f ${explorer_service_config}
${csudo}rm -f /etc/${clientName2}/explorer.toml
fi
fi
} }
function clean_service_on_sysvinit() { function clean_service_on_sysvinit() {
if ps aux | grep -v grep | grep ${serverName} &>/dev/null; then if ps aux | grep -v grep | grep ${serverName} &>/dev/null; then
echo "${productName2} ${serverName2} is running, stopping it..." echo "${serverName2} is running, stopping it..."
${csudo}service ${serverName} stop || : ${csudo}service ${serverName} stop || :
fi fi
@ -284,6 +268,97 @@ function clean_service() {
fi fi
} }
function remove_data_and_config() {
data_dir=`grep dataDir /etc/taos/taos.cfg | grep -v '#' | tail -n 1 | awk {'print $2'}`
if [ X"$data_dir" == X"" ]; then
data_dir="/var/lib/taos"
fi
log_dir=`grep logDir /etc/taos/taos.cfg | grep -v '#' | tail -n 1 | awk {'print $2'}`
if [ X"$log_dir" == X"" ]; then
log_dir="/var/log/taos"
fi
[ -d "${config_dir}" ] && ${csudo}rm -rf ${config_dir}/*
[ -d "${data_dir}" ] && ${csudo}rm -rf ${data_dir}/*
[ -d "${log_dir}" ] && ${csudo}rm -rf ${log_dir}/*
}
_kill_service_of() {
_service=$1
pid=$(ps -ef | grep "$_service" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
}
_clean_service_on_systemd_of() {
_service=$1
_service_config="${service_config_dir}/${_service}.service"
if systemctl is-active --quiet ${_service}; then
echo "taoskeeper is running, stopping it..."
${csudo}systemctl stop ${_service} &>/dev/null || echo &>/dev/null
fi
${csudo}systemctl disable ${_service} &>/dev/null || echo &>/dev/null
${csudo}rm -f ${_service_config}
}
_clean_service_on_sysvinit_of() {
_service=$1
if pidof ${_service} &>/dev/null; then
echo "${_service} is running, stopping it..."
${csudo}service ${_service} stop || :
fi
if ((${initd_mod} == 1)); then
if [ -e ${service_config_dir}/${_service} ]; then
${csudo}chkconfig --del ${_service} || :
fi
elif ((${initd_mod} == 2)); then
if [ -e ${service_config_dir}/${_service} ]; then
${csudo}insserv -r ${_service} || :
fi
elif ((${initd_mod} == 3)); then
if [ -e ${service_config_dir}/${_service} ]; then
${csudo}update-rc.d -f ${_service} remove || :
fi
fi
${csudo}rm -f ${service_config_dir}/${_service} || :
if $(which init &>/dev/null); then
${csudo}init q || :
fi
}
_clean_service_of() {
_service=$1
if ((${service_mod} == 0)); then
_clean_service_on_systemd_of $_service
elif ((${service_mod} == 1)); then
_clean_service_on_sysvinit_of $_service
else
_kill_service_of $_service
fi
}
remove_taoskeeper() {
# remove taoskeeper bin
_clean_service_of taoskeeper
[ -e "${bin_link_dir}/taoskeeper" ] && ${csudo}rm -rf ${bin_link_dir}/taoskeeper
[ -e "${installDir}/taoskeeper" ] && ${csudo}rm -rf ${installDir}/taoskeeper
[ -e "${cfg_link_dir}/metrics.toml" ] || ${csudo}rm -rf ${cfg_link_dir}/metrics.toml
echo "taosKeeper is removed successfully!"
}
function uninstall_taosx() {
if [ -f ${installDir}/uninstall.sh ]; then
cd ${installDir}
bash uninstall.sh
fi
}
if [ "$verMode" == "cluster" ]; then
uninstall_taosx
fi
remove_taoskeeper
# Stop service and disable booting start. # Stop service and disable booting start.
clean_service clean_service
# Remove binary file and links # Remove binary file and links
@ -322,5 +397,13 @@ if [ "$osType" = "Darwin" ]; then
${csudo}rm -rf /Applications/TDengine.app ${csudo}rm -rf /Applications/TDengine.app
fi fi
echo -e "${GREEN}${productName2} is removed successfully!${NC}" echo
echo "Do you want to remove all the data, log and configuration files? [y/n]"
read answer
if [ X$answer == X"y" ] || [ X$answer == X"Y" ]; then
remove_data_and_config
fi
echo
echo "${productName2} is removed successfully!"
echo echo

View File

@ -1,5 +1,9 @@
aux_source_directory(src CLIENT_SRC) aux_source_directory(src CLIENT_SRC)
IF (TD_ENTERPRISE)
LIST(APPEND CLIENT_SRC ${TD_ENTERPRISE_DIR}/src/plugins/view/src/clientView.c)
ENDIF ()
if(TD_WINDOWS) if(TD_WINDOWS)
add_library(taos SHARED ${CLIENT_SRC} ${CMAKE_CURRENT_SOURCE_DIR}/src/taos.rc.in) add_library(taos SHARED ${CLIENT_SRC} ${CMAKE_CURRENT_SOURCE_DIR}/src/taos.rc.in)
else() else()

View File

@ -269,6 +269,7 @@ typedef struct SRequestObj {
bool syncQuery; // todo refactor: async query object bool syncQuery; // todo refactor: async query object
bool stableQuery; // todo refactor bool stableQuery; // todo refactor
bool validateOnly; // todo refactor bool validateOnly; // todo refactor
bool parseOnly;
bool killed; bool killed;
bool inRetry; bool inRetry;
bool isSubReq; bool isSubReq;
@ -279,6 +280,8 @@ typedef struct SRequestObj {
void* pPostPlan; void* pPostPlan;
SReqRelInfo relation; SReqRelInfo relation;
void* pWrapper; void* pWrapper;
SMetaData parseMeta;
char* effectiveUser;
} SRequestObj; } SRequestObj;
typedef struct SSyncQueryParam { typedef struct SSyncQueryParam {
@ -305,6 +308,8 @@ void taosAsyncQueryImpl(uint64_t connId, const char* sql, __taos_async_fn_t fp,
void taosAsyncQueryImplWithReqid(uint64_t connId, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly, void taosAsyncQueryImplWithReqid(uint64_t connId, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly,
int64_t reqid); int64_t reqid);
void taosAsyncFetchImpl(SRequestObj *pRequest, __taos_async_fn_t fp, void *param); void taosAsyncFetchImpl(SRequestObj *pRequest, __taos_async_fn_t fp, void *param);
int32_t clientParseSql(void* param, const char* dbName, const char* sql, bool parseOnly, const char* effectiveUser, SParseSqlRes* pRes);
void syncQueryFn(void* param, void* res, int32_t code);
int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols); int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols);
@ -403,7 +408,7 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResu
int32_t refreshMeta(STscObj* pTscObj, SRequestObj* pRequest); int32_t refreshMeta(STscObj* pTscObj, SRequestObj* pRequest);
int32_t updateQnodeList(SAppInstInfo* pInfo, SArray* pNodeList); int32_t updateQnodeList(SAppInstInfo* pInfo, SArray* pNodeList);
void doAsyncQuery(SRequestObj* pRequest, bool forceUpdateMeta); void doAsyncQuery(SRequestObj* pRequest, bool forceUpdateMeta);
int32_t removeMeta(STscObj* pTscObj, SArray* tbList); int32_t removeMeta(STscObj* pTscObj, SArray* tbList, bool isView);
int32_t handleAlterTbExecRes(void* res, struct SCatalog* pCatalog); int32_t handleAlterTbExecRes(void* res, struct SCatalog* pCatalog);
int32_t handleCreateTbExecRes(void* res, SCatalog* pCatalog); int32_t handleCreateTbExecRes(void* res, SCatalog* pCatalog);
bool qnodeRequired(SRequestObj* pRequest); bool qnodeRequired(SRequestObj* pRequest);
@ -415,6 +420,11 @@ int32_t buildPreviousRequest(SRequestObj *pRequest, const char* sql, SRequestObj
int32_t prepareAndParseSqlSyntax(SSqlCallbackWrapper **ppWrapper, SRequestObj *pRequest, bool updateMetaForce); int32_t prepareAndParseSqlSyntax(SSqlCallbackWrapper **ppWrapper, SRequestObj *pRequest, bool updateMetaForce);
void returnToUser(SRequestObj* pRequest); void returnToUser(SRequestObj* pRequest);
void stopAllQueries(SRequestObj *pRequest); void stopAllQueries(SRequestObj *pRequest);
void freeQueryParam(SSyncQueryParam* param);
#ifdef TD_ENTERPRISE
int32_t clientParseSqlImpl(void* param, const char* dbName, const char* sql, bool parseOnly, const char* effeciveUser, SParseSqlRes* pRes);
#endif
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -447,6 +447,7 @@ void doDestroyRequest(void *p) {
qDestroyQuery(pRequest->pQuery); qDestroyQuery(pRequest->pQuery);
nodesDestroyAllocator(pRequest->allocatorRefId); nodesDestroyAllocator(pRequest->allocatorRefId);
taosMemoryFreeClear(pRequest->effectiveUser);
taosMemoryFreeClear(pRequest->sqlstr); taosMemoryFreeClear(pRequest->sqlstr);
taosMemoryFree(pRequest); taosMemoryFree(pRequest);
tscTrace("end to destroy request %" PRIx64 " p:%p", reqId, pRequest); tscTrace("end to destroy request %" PRIx64 " p:%p", reqId, pRequest);
@ -775,7 +776,7 @@ int taos_options_imp(TSDB_OPTION option, const char *str) {
} else { } else {
tscInfo("set cfg:%s to %s", pItem->name, str); tscInfo("set cfg:%s to %s", pItem->name, str);
if (TSDB_OPTION_SHELL_ACTIVITY_TIMER == option || TSDB_OPTION_USE_ADAPTER == option) { if (TSDB_OPTION_SHELL_ACTIVITY_TIMER == option || TSDB_OPTION_USE_ADAPTER == option) {
code = taosApplyLocalCfg(pCfg, pItem->name); code = taosCfgDynamicOptions(pCfg, pItem->name, false);
} }
} }

View File

@ -53,7 +53,7 @@ static int32_t hbProcessUserAuthInfoRsp(void *value, int32_t valueLen, struct SC
int32_t numOfBatchs = taosArrayGetSize(batchRsp.pArray); int32_t numOfBatchs = taosArrayGetSize(batchRsp.pArray);
for (int32_t i = 0; i < numOfBatchs; ++i) { for (int32_t i = 0; i < numOfBatchs; ++i) {
SGetUserAuthRsp *rsp = taosArrayGet(batchRsp.pArray, i); SGetUserAuthRsp *rsp = taosArrayGet(batchRsp.pArray, i);
tscDebug("hb user auth rsp, user:%s, version:%d", rsp->user, rsp->version); tscDebug("hb to update user auth, user:%s, version:%d", rsp->user, rsp->version);
catalogUpdateUserAuthInfo(pCatalog, rsp); catalogUpdateUserAuthInfo(pCatalog, rsp);
} }
@ -205,6 +205,7 @@ static int32_t hbProcessDBInfoRsp(void *value, int32_t valueLen, struct SCatalog
rsp->useDbRsp->db, rsp->useDbRsp->vgVersion, rsp->useDbRsp->stateTs, rsp->useDbRsp->uid); rsp->useDbRsp->db, rsp->useDbRsp->vgVersion, rsp->useDbRsp->stateTs, rsp->useDbRsp->uid);
if (rsp->useDbRsp->vgVersion < 0) { if (rsp->useDbRsp->vgVersion < 0) {
tscDebug("hb to remove db, db:%s", rsp->useDbRsp->db);
code = catalogRemoveDB(pCatalog, rsp->useDbRsp->db, rsp->useDbRsp->uid); code = catalogRemoveDB(pCatalog, rsp->useDbRsp->db, rsp->useDbRsp->uid);
} else { } else {
SDBVgInfo *vgInfo = NULL; SDBVgInfo *vgInfo = NULL;
@ -213,6 +214,8 @@ static int32_t hbProcessDBInfoRsp(void *value, int32_t valueLen, struct SCatalog
goto _return; goto _return;
} }
tscDebug("hb to update db vgInfo, db:%s", rsp->useDbRsp->db);
catalogUpdateDBVgInfo(pCatalog, rsp->useDbRsp->db, rsp->useDbRsp->uid, vgInfo); catalogUpdateDBVgInfo(pCatalog, rsp->useDbRsp->db, rsp->useDbRsp->uid, vgInfo);
if (IS_SYS_DBNAME(rsp->useDbRsp->db)) { if (IS_SYS_DBNAME(rsp->useDbRsp->db)) {
@ -253,10 +256,10 @@ static int32_t hbProcessStbInfoRsp(void *value, int32_t valueLen, struct SCatalo
STableMetaRsp *rsp = taosArrayGet(hbRsp.pMetaRsp, i); STableMetaRsp *rsp = taosArrayGet(hbRsp.pMetaRsp, i);
if (rsp->numOfColumns < 0) { if (rsp->numOfColumns < 0) {
tscDebug("hb remove stb, db:%s, stb:%s", rsp->dbFName, rsp->stbName); tscDebug("hb to remove stb, db:%s, stb:%s", rsp->dbFName, rsp->stbName);
catalogRemoveStbMeta(pCatalog, rsp->dbFName, rsp->dbId, rsp->stbName, rsp->suid); catalogRemoveStbMeta(pCatalog, rsp->dbFName, rsp->dbId, rsp->stbName, rsp->suid);
} else { } else {
tscDebug("hb update stb, db:%s, stb:%s", rsp->dbFName, rsp->stbName); tscDebug("hb to update stb, db:%s, stb:%s", rsp->dbFName, rsp->stbName);
if (rsp->pSchemas[0].colId != PRIMARYKEY_TIMESTAMP_COL_ID) { if (rsp->pSchemas[0].colId != PRIMARYKEY_TIMESTAMP_COL_ID) {
tscError("invalid colId[%" PRIi16 "] for the first column in table meta rsp msg", rsp->pSchemas[0].colId); tscError("invalid colId[%" PRIi16 "] for the first column in table meta rsp msg", rsp->pSchemas[0].colId);
tFreeSSTbHbRsp(&hbRsp); tFreeSSTbHbRsp(&hbRsp);
@ -281,6 +284,108 @@ static int32_t hbProcessStbInfoRsp(void *value, int32_t valueLen, struct SCatalo
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static int32_t hbProcessDynViewRsp(void *value, int32_t valueLen, struct SCatalog *pCatalog) {
return catalogUpdateDynViewVer(pCatalog, (SDynViewVersion*)value);
}
static void hbFreeSViewMetaInRsp(void* p) {
if (NULL == p || NULL == *(void**)p) {
return;
}
SViewMetaRsp *pRsp = *(SViewMetaRsp**)p;
tFreeSViewMetaRsp(pRsp);
taosMemoryFreeClear(pRsp);
}
static int32_t hbProcessViewInfoRsp(void *value, int32_t valueLen, struct SCatalog *pCatalog) {
int32_t code = 0;
SViewHbRsp hbRsp = {0};
if (tDeserializeSViewHbRsp(value, valueLen, &hbRsp) != 0) {
taosArrayDestroyEx(hbRsp.pViewRsp, hbFreeSViewMetaInRsp);
terrno = TSDB_CODE_INVALID_MSG;
return -1;
}
int32_t numOfMeta = taosArrayGetSize(hbRsp.pViewRsp);
for (int32_t i = 0; i < numOfMeta; ++i) {
SViewMetaRsp *rsp = taosArrayGetP(hbRsp.pViewRsp, i);
if (rsp->numOfCols < 0) {
tscDebug("hb to remove view, db:%s, view:%s", rsp->dbFName, rsp->name);
catalogRemoveViewMeta(pCatalog, rsp->dbFName, rsp->dbId, rsp->name, rsp->viewId);
tFreeSViewMetaRsp(rsp);
taosMemoryFreeClear(rsp);
} else {
tscDebug("hb to update view, db:%s, view:%s", rsp->dbFName, rsp->name);
catalogUpdateViewMeta(pCatalog, rsp);
}
}
taosArrayDestroy(hbRsp.pViewRsp);
return TSDB_CODE_SUCCESS;
}
static void hbProcessQueryRspKvs(int32_t kvNum, SArray* pKvs, struct SCatalog *pCatalog, SAppHbMgr *pAppHbMgr) {
for (int32_t i = 0; i < kvNum; ++i) {
SKv *kv = taosArrayGet(pKvs, i);
switch (kv->key) {
case HEARTBEAT_KEY_USER_AUTHINFO: {
if (kv->valueLen <= 0 || NULL == kv->value) {
tscError("invalid hb user auth info, len:%d, value:%p", kv->valueLen, kv->value);
break;
}
hbProcessUserAuthInfoRsp(kv->value, kv->valueLen, pCatalog, pAppHbMgr);
break;
}
case HEARTBEAT_KEY_DBINFO: {
if (kv->valueLen <= 0 || NULL == kv->value) {
tscError("invalid hb db info, len:%d, value:%p", kv->valueLen, kv->value);
break;
}
hbProcessDBInfoRsp(kv->value, kv->valueLen, pCatalog);
break;
}
case HEARTBEAT_KEY_STBINFO: {
if (kv->valueLen <= 0 || NULL == kv->value) {
tscError("invalid hb stb info, len:%d, value:%p", kv->valueLen, kv->value);
break;
}
hbProcessStbInfoRsp(kv->value, kv->valueLen, pCatalog);
break;
}
#ifdef TD_ENTERPRISE
case HEARTBEAT_KEY_DYN_VIEW: {
if (kv->valueLen <= 0 || NULL == kv->value) {
tscError("invalid dyn view info, len:%d, value:%p", kv->valueLen, kv->value);
break;
}
hbProcessDynViewRsp(kv->value, kv->valueLen, pCatalog);
break;
}
case HEARTBEAT_KEY_VIEWINFO: {
if (kv->valueLen <= 0 || NULL == kv->value) {
tscError("invalid view info, len:%d, value:%p", kv->valueLen, kv->value);
break;
}
hbProcessViewInfoRsp(kv->value, kv->valueLen, pCatalog);
break;
}
#endif
default:
tscError("invalid hb key type:%d", kv->key);
break;
}
}
}
static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) { static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
SClientHbReq *pReq = taosHashAcquire(pAppHbMgr->activeInfo, &pRsp->connKey, sizeof(SClientHbKey)); SClientHbReq *pReq = taosHashAcquire(pAppHbMgr->activeInfo, &pRsp->connKey, sizeof(SClientHbKey));
if (NULL == pReq) { if (NULL == pReq) {
@ -338,63 +443,13 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
tscDebug("hb got %d rsp kv", kvNum); tscDebug("hb got %d rsp kv", kvNum);
for (int32_t i = 0; i < kvNum; ++i) { if (kvNum > 0) {
SKv *kv = taosArrayGet(pRsp->info, i); struct SCatalog *pCatalog = NULL;
switch (kv->key) { int32_t code = catalogGetHandle(pReq->clusterId, &pCatalog);
case HEARTBEAT_KEY_USER_AUTHINFO: { if (code != TSDB_CODE_SUCCESS) {
if (kv->valueLen <= 0 || NULL == kv->value) { tscWarn("catalogGetHandle failed, clusterId:%" PRIx64 ", error:%s", pReq->clusterId, tstrerror(code));
tscError("invalid hb user auth info, len:%d, value:%p", kv->valueLen, kv->value); } else {
break; hbProcessQueryRspKvs(kvNum, pRsp->info, pCatalog, pAppHbMgr);
}
struct SCatalog *pCatalog = NULL;
int32_t code = catalogGetHandle(pReq->clusterId, &pCatalog);
if (code != TSDB_CODE_SUCCESS) {
tscWarn("catalogGetHandle failed, clusterId:%" PRIx64 ", error:%s", pReq->clusterId, tstrerror(code));
break;
}
hbProcessUserAuthInfoRsp(kv->value, kv->valueLen, pCatalog, pAppHbMgr);
break;
}
case HEARTBEAT_KEY_DBINFO: {
if (kv->valueLen <= 0 || NULL == kv->value) {
tscError("invalid hb db info, len:%d, value:%p", kv->valueLen, kv->value);
break;
}
struct SCatalog *pCatalog = NULL;
int32_t code = catalogGetHandle(pReq->clusterId, &pCatalog);
if (code != TSDB_CODE_SUCCESS) {
tscWarn("catalogGetHandle failed, clusterId:%" PRIx64 ", error:%s", pReq->clusterId, tstrerror(code));
break;
}
hbProcessDBInfoRsp(kv->value, kv->valueLen, pCatalog);
break;
}
case HEARTBEAT_KEY_STBINFO: {
if (kv->valueLen <= 0 || NULL == kv->value) {
tscError("invalid hb stb info, len:%d, value:%p", kv->valueLen, kv->value);
break;
}
struct SCatalog *pCatalog = NULL;
int32_t code = catalogGetHandle(pReq->clusterId, &pCatalog);
if (code != TSDB_CODE_SUCCESS) {
tscWarn("catalogGetHandle failed, clusterId:%" PRIx64 ", error:%s", pReq->clusterId, tstrerror(code));
break;
}
hbProcessStbInfoRsp(kv->value, kv->valueLen, pCatalog);
break;
}
default:
tscError("invalid hb key type:%d", kv->key);
break;
} }
} }
@ -740,8 +795,8 @@ int32_t hbGetExpiredStbInfo(SClientHbKey *connKey, struct SCatalog *pCatalog, SC
for (int32_t i = 0; i < stbNum; ++i) { for (int32_t i = 0; i < stbNum; ++i) {
SSTableVersion *stb = &stbs[i]; SSTableVersion *stb = &stbs[i];
stb->suid = htobe64(stb->suid); stb->suid = htobe64(stb->suid);
stb->sversion = htons(stb->sversion); stb->sversion = htonl(stb->sversion);
stb->tversion = htons(stb->tversion); stb->tversion = htonl(stb->tversion);
stb->smaVer = htonl(stb->smaVer); stb->smaVer = htonl(stb->smaVer);
} }
@ -762,6 +817,56 @@ int32_t hbGetExpiredStbInfo(SClientHbKey *connKey, struct SCatalog *pCatalog, SC
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int32_t hbGetExpiredViewInfo(SClientHbKey *connKey, struct SCatalog *pCatalog, SClientHbReq *req) {
SViewVersion *views = NULL;
uint32_t viewNum = 0;
int32_t code = 0;
SDynViewVersion *pDynViewVer = NULL;
code = catalogGetExpiredViews(pCatalog, &views, &viewNum, &pDynViewVer);
if (TSDB_CODE_SUCCESS != code) {
taosMemoryFree(views);
taosMemoryFree(pDynViewVer);
return code;
}
if (viewNum <= 0) {
taosMemoryFree(views);
taosMemoryFree(pDynViewVer);
return TSDB_CODE_SUCCESS;
}
for (int32_t i = 0; i < viewNum; ++i) {
SViewVersion *view = &views[i];
view->dbId = htobe64(view->dbId);
view->viewId = htobe64(view->viewId);
view->version = htonl(view->version);
}
tscDebug("hb got %d expired view, valueLen:%lu", viewNum, sizeof(SViewVersion) * viewNum);
if (NULL == req->info) {
req->info = taosHashInit(64, hbKeyHashFunc, 1, HASH_ENTRY_LOCK);
}
SKv kv = {
.key = HEARTBEAT_KEY_DYN_VIEW,
.valueLen = sizeof(SDynViewVersion),
.value = pDynViewVer,
};
taosHashPut(req->info, &kv.key, sizeof(kv.key), &kv, sizeof(kv));
kv.key = HEARTBEAT_KEY_VIEWINFO;
kv.valueLen = sizeof(SViewVersion) * viewNum;
kv.value = views;
taosHashPut(req->info, &kv.key, sizeof(kv.key), &kv, sizeof(kv));
return TSDB_CODE_SUCCESS;
}
int32_t hbGetAppInfo(int64_t clusterId, SClientHbReq *req) { int32_t hbGetAppInfo(int64_t clusterId, SClientHbReq *req) {
SAppHbReq *pApp = taosHashGet(clientHbMgr.appSummary, &clusterId, sizeof(clusterId)); SAppHbReq *pApp = taosHashGet(clientHbMgr.appSummary, &clusterId, sizeof(clusterId));
if (NULL != pApp) { if (NULL != pApp) {
@ -781,19 +886,17 @@ int32_t hbQueryHbReqHandle(SClientHbKey *connKey, void *param, SClientHbReq *req
SHbParam *hbParam = (SHbParam *)param; SHbParam *hbParam = (SHbParam *)param;
SCatalog *pCatalog = NULL; SCatalog *pCatalog = NULL;
hbGetQueryBasicInfo(connKey, req);
if (hbParam->reqCnt == 0) { if (hbParam->reqCnt == 0) {
code = catalogGetHandle(hbParam->clusterId, &pCatalog); code = catalogGetHandle(hbParam->clusterId, &pCatalog);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
tscWarn("catalogGetHandle failed, clusterId:%" PRIx64 ", error:%s", hbParam->clusterId, tstrerror(code)); tscWarn("catalogGetHandle failed, clusterId:%" PRIx64 ", error:%s", hbParam->clusterId, tstrerror(code));
return code; return code;
} }
}
hbGetAppInfo(hbParam->clusterId, req); hbGetAppInfo(hbParam->clusterId, req);
hbGetQueryBasicInfo(connKey, req);
if (hbParam->reqCnt == 0) {
if (!taosHashGet(clientHbMgr.appHbHash, &hbParam->clusterId, sizeof(hbParam->clusterId))) { if (!taosHashGet(clientHbMgr.appHbHash, &hbParam->clusterId, sizeof(hbParam->clusterId))) {
code = hbGetExpiredUserInfo(connKey, pCatalog, req); code = hbGetExpiredUserInfo(connKey, pCatalog, req);
if (TSDB_CODE_SUCCESS != code) { if (TSDB_CODE_SUCCESS != code) {
@ -819,6 +922,15 @@ int32_t hbQueryHbReqHandle(SClientHbKey *connKey, void *param, SClientHbReq *req
if (TSDB_CODE_SUCCESS != code) { if (TSDB_CODE_SUCCESS != code) {
return code; return code;
} }
#ifdef TD_ENTERPRISE
code = hbGetExpiredViewInfo(connKey, pCatalog, req);
if (TSDB_CODE_SUCCESS != code) {
return code;
}
#endif
} else {
req->app.appId = 0;
} }
++hbParam->reqCnt; // success to get catalog info ++hbParam->reqCnt; // success to get catalog info

View File

@ -1009,7 +1009,7 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
tscDebug("schedulerExecCb request type %s", TMSG_INFO(pRequest->type)); tscDebug("schedulerExecCb request type %s", TMSG_INFO(pRequest->type));
if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type) && NULL == pRequest->body.resInfo.execRes.res) { if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type) && NULL == pRequest->body.resInfo.execRes.res) {
removeMeta(pTscObj, pRequest->targetTableList); removeMeta(pTscObj, pRequest->targetTableList, IS_VIEW_REQUEST(pRequest->type));
} }
pRequest->metric.execCostUs = taosGetTimestampUs() - pRequest->metric.execStart; pRequest->metric.execCostUs = taosGetTimestampUs() - pRequest->metric.execStart;
@ -1052,6 +1052,7 @@ SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQue
} }
} }
pRequest->body.execMode = pQuery->execMode;
switch (pQuery->execMode) { switch (pQuery->execMode) {
case QUERY_EXEC_MODE_LOCAL: case QUERY_EXEC_MODE_LOCAL:
if (!pRequest->validateOnly) { if (!pRequest->validateOnly) {
@ -1097,7 +1098,7 @@ SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQue
} }
if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type) && NULL == pRequest->body.resInfo.execRes.res) { if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type) && NULL == pRequest->body.resInfo.execRes.res) {
removeMeta(pRequest->pTscObj, pRequest->targetTableList); removeMeta(pRequest->pTscObj, pRequest->targetTableList, IS_VIEW_REQUEST(pRequest->type));
} }
handleQueryExecRsp(pRequest); handleQueryExecRsp(pRequest);
@ -1116,31 +1117,34 @@ SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQue
static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultMeta, static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultMeta,
SSqlCallbackWrapper* pWrapper) { SSqlCallbackWrapper* pWrapper) {
int32_t code = TSDB_CODE_SUCCESS;
pRequest->type = pQuery->msgType; pRequest->type = pQuery->msgType;
SArray* pMnodeList = NULL;
SArray* pMnodeList = taosArrayInit(4, sizeof(SQueryNodeLoad));
SPlanContext cxt = {.queryId = pRequest->requestId,
.acctId = pRequest->pTscObj->acctId,
.mgmtEpSet = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp),
.pAstRoot = pQuery->pRoot,
.showRewrite = pQuery->showRewrite,
.pMsg = pRequest->msgBuf,
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE,
.pUser = pRequest->pTscObj->user,
.sysInfo = pRequest->pTscObj->sysInfo,
.allocatorId = pRequest->allocatorRefId};
SQueryPlan* pDag = NULL; SQueryPlan* pDag = NULL;
int64_t st = taosGetTimestampUs(); int64_t st = taosGetTimestampUs();
int32_t code = qCreateQueryPlan(&cxt, &pDag, pMnodeList);
if (code) { if (!pRequest->parseOnly) {
tscError("0x%" PRIx64 " failed to create query plan, code:%s 0x%" PRIx64, pRequest->self, tstrerror(code), pMnodeList = taosArrayInit(4, sizeof(SQueryNodeLoad));
pRequest->requestId);
} else { SPlanContext cxt = {.queryId = pRequest->requestId,
pRequest->body.subplanNum = pDag->numOfSubplans; .acctId = pRequest->pTscObj->acctId,
TSWAP(pRequest->pPostPlan, pDag->pPostPlan); .mgmtEpSet = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp),
.pAstRoot = pQuery->pRoot,
.showRewrite = pQuery->showRewrite,
.pMsg = pRequest->msgBuf,
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE,
.pUser = pRequest->pTscObj->user,
.sysInfo = pRequest->pTscObj->sysInfo,
.allocatorId = pRequest->allocatorRefId};
code = qCreateQueryPlan(&cxt, &pDag, pMnodeList);
if (code) {
tscError("0x%" PRIx64 " failed to create query plan, code:%s 0x%" PRIx64, pRequest->self, tstrerror(code),
pRequest->requestId);
} else {
pRequest->body.subplanNum = pDag->numOfSubplans;
TSWAP(pRequest->pPostPlan, pDag->pPostPlan);
}
} }
pRequest->metric.execStart = taosGetTimestampUs(); pRequest->metric.execStart = taosGetTimestampUs();
@ -1173,6 +1177,7 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat
code = schedulerExecJob(&req, &pRequest->body.queryJob); code = schedulerExecJob(&req, &pRequest->body.queryJob);
taosArrayDestroy(pNodeList); taosArrayDestroy(pNodeList);
} else { } else {
qDestroyQueryPlan(pDag);
tscDebug("0x%" PRIx64 " plan not executed, code:%s 0x%" PRIx64, pRequest->self, tstrerror(code), tscDebug("0x%" PRIx64 " plan not executed, code:%s 0x%" PRIx64, pRequest->self, tstrerror(code),
pRequest->requestId); pRequest->requestId);
destorySqlCallbackWrapper(pWrapper); destorySqlCallbackWrapper(pWrapper);
@ -1193,6 +1198,11 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat
void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultMeta, SSqlCallbackWrapper* pWrapper) { void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultMeta, SSqlCallbackWrapper* pWrapper) {
int32_t code = 0; int32_t code = 0;
if (pRequest->parseOnly) {
pRequest->body.queryFp(pRequest->body.param, pRequest, 0);
return;
}
pRequest->body.execMode = pQuery->execMode; pRequest->body.execMode = pQuery->execMode;
if (QUERY_EXEC_MODE_SCHEDULE != pRequest->body.execMode) { if (QUERY_EXEC_MODE_SCHEDULE != pRequest->body.execMode) {
destorySqlCallbackWrapper(pWrapper); destorySqlCallbackWrapper(pWrapper);
@ -1226,6 +1236,7 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM
pRequest->body.queryFp(pRequest->body.param, pRequest, 0); pRequest->body.queryFp(pRequest->body.param, pRequest, 0);
break; break;
default: default:
tscError("0x%" PRIx64 " invalid execMode %d", pRequest->self, pQuery->execMode);
pRequest->body.queryFp(pRequest->body.param, pRequest, -1); pRequest->body.queryFp(pRequest->body.param, pRequest, -1);
break; break;
} }
@ -1272,7 +1283,7 @@ int32_t refreshMeta(STscObj* pTscObj, SRequestObj* pRequest) {
return code; return code;
} }
int32_t removeMeta(STscObj* pTscObj, SArray* tbList) { int32_t removeMeta(STscObj* pTscObj, SArray* tbList, bool isView) {
SCatalog* pCatalog = NULL; SCatalog* pCatalog = NULL;
int32_t tbNum = taosArrayGetSize(tbList); int32_t tbNum = taosArrayGetSize(tbList);
int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog); int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
@ -1280,9 +1291,18 @@ int32_t removeMeta(STscObj* pTscObj, SArray* tbList) {
return code; return code;
} }
for (int32_t i = 0; i < tbNum; ++i) { if (isView) {
SName* pTbName = taosArrayGet(tbList, i); for (int32_t i = 0; i < tbNum; ++i) {
catalogRemoveTableMeta(pCatalog, pTbName); SName* pViewName = taosArrayGet(tbList, i);
char dbFName[TSDB_DB_FNAME_LEN];
tNameGetFullDbName(pViewName, dbFName);
catalogRemoveViewMeta(pCatalog, dbFName, 0, pViewName->tname, 0);
}
} else {
for (int32_t i = 0; i < tbNum; ++i) {
SName* pTbName = taosArrayGet(tbList, i);
catalogRemoveTableMeta(pCatalog, pTbName);
}
} }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
@ -2604,3 +2624,13 @@ void taosAsyncFetchImpl(SRequestObj* pRequest, __taos_async_fn_t fp, void* param
schedulerFetchRows(pRequest->body.queryJob, &req); schedulerFetchRows(pRequest->body.queryJob, &req);
} }
int32_t clientParseSql(void* param, const char* dbName, const char* sql, bool parseOnly, const char* effectiveUser, SParseSqlRes* pRes) {
#ifndef TD_ENTERPRISE
return TSDB_CODE_SUCCESS;
#else
return clientParseSqlImpl(param, dbName, sql, parseOnly, effectiveUser, pRes);
#endif
}

View File

@ -32,7 +32,7 @@
#define TSC_VAR_RELEASED 0 #define TSC_VAR_RELEASED 0
static int32_t sentinel = TSC_VAR_NOT_RELEASE; static int32_t sentinel = TSC_VAR_NOT_RELEASE;
static int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt); static int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt, SSqlCallbackWrapper *pWrapper);
int taos_options(TSDB_OPTION option, const void *arg, ...) { int taos_options(TSDB_OPTION option, const void *arg, ...) {
static int32_t lock = 0; static int32_t lock = 0;
@ -879,39 +879,12 @@ int taos_get_current_db(TAOS *taos, char *database, int len, int *required) {
return code; return code;
} }
static void destoryTablesReq(void *p) {
STablesReq *pRes = (STablesReq *)p;
taosArrayDestroy(pRes->pTables);
}
static void destoryCatalogReq(SCatalogReq *pCatalogReq) {
if (NULL == pCatalogReq) {
return;
}
taosArrayDestroy(pCatalogReq->pDbVgroup);
taosArrayDestroy(pCatalogReq->pDbCfg);
taosArrayDestroy(pCatalogReq->pDbInfo);
if (pCatalogReq->cloned) {
taosArrayDestroy(pCatalogReq->pTableMeta);
taosArrayDestroy(pCatalogReq->pTableHash);
} else {
taosArrayDestroyEx(pCatalogReq->pTableMeta, destoryTablesReq);
taosArrayDestroyEx(pCatalogReq->pTableHash, destoryTablesReq);
}
taosArrayDestroy(pCatalogReq->pUdf);
taosArrayDestroy(pCatalogReq->pIndex);
taosArrayDestroy(pCatalogReq->pUser);
taosArrayDestroy(pCatalogReq->pTableIndex);
taosArrayDestroy(pCatalogReq->pTableCfg);
taosArrayDestroy(pCatalogReq->pTableTag);
taosMemoryFree(pCatalogReq);
}
void destorySqlCallbackWrapper(SSqlCallbackWrapper *pWrapper) { void destorySqlCallbackWrapper(SSqlCallbackWrapper *pWrapper) {
if (NULL == pWrapper) { if (NULL == pWrapper) {
return; return;
} }
destoryCatalogReq(pWrapper->pCatalogReq); destoryCatalogReq(pWrapper->pCatalogReq);
taosMemoryFree(pWrapper->pCatalogReq);
qDestroyParseContext(pWrapper->pParseCtx); qDestroyParseContext(pWrapper->pParseCtx);
taosMemoryFree(pWrapper); taosMemoryFree(pWrapper);
} }
@ -933,6 +906,7 @@ static void doAsyncQueryFromAnalyse(SMetaData *pResultMeta, void *param, int32_t
int64_t analyseStart = taosGetTimestampUs(); int64_t analyseStart = taosGetTimestampUs();
pRequest->metric.ctgCostUs = analyseStart - pRequest->metric.ctgStart; pRequest->metric.ctgCostUs = analyseStart - pRequest->metric.ctgStart;
pWrapper->pParseCtx->parseOnly = pRequest->parseOnly;
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = qAnalyseSqlSemantic(pWrapper->pParseCtx, pWrapper->pCatalogReq, pResultMeta, pQuery); code = qAnalyseSqlSemantic(pWrapper->pParseCtx, pWrapper->pCatalogReq, pResultMeta, pQuery);
@ -940,6 +914,11 @@ static void doAsyncQueryFromAnalyse(SMetaData *pResultMeta, void *param, int32_t
pRequest->metric.analyseCostUs += taosGetTimestampUs() - analyseStart; pRequest->metric.analyseCostUs += taosGetTimestampUs() - analyseStart;
if (pRequest->parseOnly) {
memcpy(&pRequest->parseMeta, pResultMeta, sizeof(*pResultMeta));
memset(pResultMeta, 0, sizeof(*pResultMeta));
}
handleQueryAnslyseRes(pWrapper, pResultMeta, code); handleQueryAnslyseRes(pWrapper, pResultMeta, code);
} }
@ -960,6 +939,7 @@ int32_t cloneCatalogReq(SCatalogReq **ppTarget, SCatalogReq *pSrc) {
pTarget->pTableIndex = taosArrayDup(pSrc->pTableIndex, NULL); pTarget->pTableIndex = taosArrayDup(pSrc->pTableIndex, NULL);
pTarget->pTableCfg = taosArrayDup(pSrc->pTableCfg, NULL); pTarget->pTableCfg = taosArrayDup(pSrc->pTableCfg, NULL);
pTarget->pTableTag = taosArrayDup(pSrc->pTableTag, NULL); pTarget->pTableTag = taosArrayDup(pSrc->pTableTag, NULL);
pTarget->pView = taosArrayDup(pSrc->pView, NULL);
pTarget->qNodeRequired = pSrc->qNodeRequired; pTarget->qNodeRequired = pSrc->qNodeRequired;
pTarget->dNodeRequired = pSrc->dNodeRequired; pTarget->dNodeRequired = pSrc->dNodeRequired;
pTarget->svrVerRequired = pSrc->svrVerRequired; pTarget->svrVerRequired = pSrc->svrVerRequired;
@ -1147,7 +1127,7 @@ void taos_query_a_with_reqid(TAOS *taos, const char *sql, __taos_async_fn_t fp,
taosAsyncQueryImplWithReqid(connId, sql, fp, param, false, reqid); taosAsyncQueryImplWithReqid(connId, sql, fp, param, false, reqid);
} }
int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt) { int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt, SSqlCallbackWrapper *pWrapper) {
const STscObj *pTscObj = pRequest->pTscObj; const STscObj *pTscObj = pRequest->pTscObj;
*pCxt = taosMemoryCalloc(1, sizeof(SParseContext)); *pCxt = taosMemoryCalloc(1, sizeof(SParseContext));
@ -1167,12 +1147,15 @@ int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt) {
.pTransporter = pTscObj->pAppInfo->pTransporter, .pTransporter = pTscObj->pAppInfo->pTransporter,
.pStmtCb = NULL, .pStmtCb = NULL,
.pUser = pTscObj->user, .pUser = pTscObj->user,
.pEffectiveUser = pRequest->effectiveUser,
.isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER)), .isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER)),
.enableSysInfo = pTscObj->sysInfo, .enableSysInfo = pTscObj->sysInfo,
.async = true, .async = true,
.svrVer = pTscObj->sVer, .svrVer = pTscObj->sVer,
.nodeOffline = (pTscObj->pAppInfo->onlineDnodes < pTscObj->pAppInfo->totalDnodes), .nodeOffline = (pTscObj->pAppInfo->onlineDnodes < pTscObj->pAppInfo->totalDnodes),
.allocatorId = pRequest->allocatorRefId}; .allocatorId = pRequest->allocatorRefId,
.parseSqlFp = clientParseSql,
.parseSqlParam = pWrapper};
int8_t biMode = atomic_load_8(&((STscObj *)pTscObj)->biMode); int8_t biMode = atomic_load_8(&((STscObj *)pTscObj)->biMode);
(*pCxt)->biMode = biMode; (*pCxt)->biMode = biMode;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
@ -1191,7 +1174,7 @@ int32_t prepareAndParseSqlSyntax(SSqlCallbackWrapper **ppWrapper, SRequestObj *p
} }
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = createParseContext(pRequest, &pWrapper->pParseCtx); code = createParseContext(pRequest, &pWrapper->pParseCtx, pWrapper);
} }
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
@ -1570,7 +1553,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
tsem_wait(&pParam->sem); tsem_wait(&pParam->sem);
_return: _return:
taosArrayDestroyEx(catalogReq.pTableMeta, destoryTablesReq); destoryCatalogReq(&catalogReq);
destroyRequest(pRequest); destroyRequest(pRequest);
return code; return code;
} }

View File

@ -35,7 +35,7 @@ int32_t genericRspCallback(void* param, SDataBuf* pMsg, int32_t code) {
setErrno(pRequest, code); setErrno(pRequest, code);
if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type)) { if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type)) {
removeMeta(pRequest->pTscObj, pRequest->targetTableList); removeMeta(pRequest->pTscObj, pRequest->targetTableList, IS_VIEW_REQUEST(pRequest->type));
} }
taosMemoryFree(pMsg->pEpSet); taosMemoryFree(pMsg->pEpSet);

File diff suppressed because it is too large Load Diff

View File

@ -104,9 +104,9 @@ static int32_t smlCheckAuth(SSmlHandle *info, SRequestConnInfo* conn, const cha
SUserAuthRes authRes = {0}; SUserAuthRes authRes = {0};
code = catalogChkAuth(info->pCatalog, conn, &pAuth, &authRes); code = catalogChkAuth(info->pCatalog, conn, &pAuth, &authRes);
nodesDestroyNode(authRes.pCond); nodesDestroyNode(authRes.pCond[AUTH_RES_BASIC]);
return (code == TSDB_CODE_SUCCESS) ? (authRes.pass ? TSDB_CODE_SUCCESS : TSDB_CODE_PAR_PERMISSION_DENIED) : code; return (code == TSDB_CODE_SUCCESS) ? (authRes.pass[AUTH_RES_BASIC] ? TSDB_CODE_SUCCESS : TSDB_CODE_PAR_PERMISSION_DENIED) : code;
} }
inline bool smlDoubleToInt64OverFlow(double num) { inline bool smlDoubleToInt64OverFlow(double num) {

View File

@ -1451,7 +1451,7 @@ static bool doUpdateLocalEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp)
bool set = false; bool set = false;
int32_t topicNumGet = taosArrayGetSize(pRsp->topics); int32_t topicNumGet = taosArrayGetSize(pRsp->topics);
if (topicNumGet <= 0 && epoch <= tmq->epoch) { if (epoch < tmq->epoch || (epoch == tmq->epoch && topicNumGet == 0)) {
tscInfo("consumer:0x%" PRIx64 " no update ep epoch from %d to epoch %d, incoming topics:%d", tscInfo("consumer:0x%" PRIx64 " no update ep epoch from %d to epoch %d, incoming topics:%d",
tmq->consumerId, tmq->epoch, epoch, topicNumGet); tmq->consumerId, tmq->epoch, epoch, topicNumGet);
return false; return false;

View File

@ -46,6 +46,75 @@ target_link_libraries(
INTERFACE api INTERFACE api
) )
if(${BUILD_S3})
if(${BUILD_WITH_S3})
target_include_directories(
common
PUBLIC "$ENV{HOME}/.cos-local.2/include"
)
set(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.2)
find_library(S3_LIBRARY s3)
find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
find_library(XML2_LIBRARY xml2)
find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
target_link_libraries(
common
# s3
PUBLIC ${S3_LIBRARY}
PUBLIC ${CURL_LIBRARY}
PUBLIC ${SSL_LIBRARY}
PUBLIC ${CRYPTO_LIBRARY}
PUBLIC ${XML2_LIBRARY}
)
add_definitions(-DUSE_S3)
endif()
if(${BUILD_WITH_COS})
set(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/)
find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/)
find_library(MINIXML_LIBRARY mxml)
find_library(CURL_LIBRARY curl)
target_link_libraries(
common
# s3
PUBLIC cos_c_sdk_static
PUBLIC ${APR_UTIL_LIBRARY}
PUBLIC ${APR_LIBRARY}
PUBLIC ${MINIXML_LIBRARY}
PUBLIC ${CURL_LIBRARY}
)
# s3
FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/)
IF (APR_CONFIG_BIN)
EXECUTE_PROCESS(
COMMAND ${APR_CONFIG_BIN} --includedir
OUTPUT_VARIABLE APR_INCLUDE_DIR
OUTPUT_STRIP_TRAILING_WHITESPACE
)
ENDIF()
include_directories (${APR_INCLUDE_DIR})
target_include_directories(
common
PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk"
PUBLIC "$ENV{HOME}/.cos-local.1/include"
)
add_definitions(-DUSE_COS)
endif(${BUILD_WITH_COS})
endif()
if(${BUILD_TEST}) if(${BUILD_TEST})
ADD_SUBDIRECTORY(test) ADD_SUBDIRECTORY(test)
endif(${BUILD_TEST}) endif(${BUILD_TEST})

View File

@ -1,6 +1,6 @@
#define ALLOW_FORBID_FUNC #define ALLOW_FORBID_FUNC
#include "vndCos.h" #include "cos.h"
extern char tsS3Endpoint[]; extern char tsS3Endpoint[];
extern char tsS3AccessKeyId[]; extern char tsS3AccessKeyId[];
@ -13,6 +13,7 @@ extern int8_t tsS3Https;
#if defined(USE_S3) #if defined(USE_S3)
#include "libs3.h" #include "libs3.h"
#include "tarray.h"
static int verifyPeerG = 0; static int verifyPeerG = 0;
static const char *awsRegionG = NULL; static const char *awsRegionG = NULL;
@ -24,7 +25,7 @@ static S3UriStyle uriStyleG = S3UriStylePath;
static int retriesG = 5; static int retriesG = 5;
static int timeoutMsG = 0; static int timeoutMsG = 0;
static int32_t s3Begin() { int32_t s3Begin() {
S3Status status; S3Status status;
const char *hostname = tsS3Hostname; const char *hostname = tsS3Hostname;
const char *env_hn = getenv("S3_HOSTNAME"); const char *env_hn = getenv("S3_HOSTNAME");
@ -34,7 +35,7 @@ static int32_t s3Begin() {
} }
if ((status = S3_initialize("s3", verifyPeerG | S3_INIT_ALL, hostname)) != S3StatusOK) { if ((status = S3_initialize("s3", verifyPeerG | S3_INIT_ALL, hostname)) != S3StatusOK) {
vError("Failed to initialize libs3: %s\n", S3_get_status_name(status)); uError("Failed to initialize libs3: %s\n", S3_get_status_name(status));
return -1; return -1;
} }
@ -43,10 +44,12 @@ static int32_t s3Begin() {
return 0; return 0;
} }
static void s3End() { S3_deinitialize(); } void s3End() { S3_deinitialize(); }
int32_t s3Init() { return s3Begin(); }
void s3CleanUp() { s3End(); } int32_t s3Init() { return 0; /*s3Begin();*/ }
void s3CleanUp() { /*s3End();*/
}
static int should_retry() { static int should_retry() {
/* /*
@ -63,21 +66,35 @@ static int should_retry() {
return 0; return 0;
} }
static void s3PrintError(const char *func, S3Status status, char error_details[]) { static void s3PrintError(const char *filename, int lineno, const char *funcname, S3Status status,
char error_details[]) {
if (status < S3StatusErrorAccessDenied) { if (status < S3StatusErrorAccessDenied) {
vError("%s: %s", __func__, S3_get_status_name(status)); uError("%s/%s:%d-%s: %s", __func__, filename, lineno, funcname, S3_get_status_name(status));
} else { } else {
vError("%s: %s, %s", __func__, S3_get_status_name(status), error_details); uError("%s/%s:%d-%s: %s, %s", __func__, filename, lineno, funcname, S3_get_status_name(status), error_details);
} }
} }
typedef struct {
char err_msg[512];
S3Status status;
uint64_t content_length;
TdFilePtr file;
} TS3GetData;
typedef struct { typedef struct {
char err_msg[128]; char err_msg[128];
S3Status status; S3Status status;
uint64_t content_length; uint64_t content_length;
char *buf; char *buf;
int64_t buf_pos;
} TS3SizeCBD; } TS3SizeCBD;
static S3Status responsePropertiesCallbackNull(const S3ResponseProperties *properties, void *callbackData) {
// (void)callbackData;
return S3StatusOK;
}
static S3Status responsePropertiesCallback(const S3ResponseProperties *properties, void *callbackData) { static S3Status responsePropertiesCallback(const S3ResponseProperties *properties, void *callbackData) {
//(void)callbackData; //(void)callbackData;
TS3SizeCBD *cbd = callbackData; TS3SizeCBD *cbd = callbackData;
@ -97,20 +114,22 @@ static void responseCompleteCallback(S3Status status, const S3ErrorDetails *erro
int len = 0; int len = 0;
const int elen = sizeof(cbd->err_msg); const int elen = sizeof(cbd->err_msg);
if (error) { if (error) {
if (error->message) { if (error->message && elen - len > 0) {
len += snprintf(&(cbd->err_msg[len]), elen - len, " Message: %s\n", error->message); len += snprintf(&(cbd->err_msg[len]), elen - len, " Message: %s\n", error->message);
} }
if (error->resource) { if (error->resource && elen - len > 0) {
len += snprintf(&(cbd->err_msg[len]), elen - len, " Resource: %s\n", error->resource); len += snprintf(&(cbd->err_msg[len]), elen - len, " Resource: %s\n", error->resource);
} }
if (error->furtherDetails) { if (error->furtherDetails && elen - len > 0) {
len += snprintf(&(cbd->err_msg[len]), elen - len, " Further Details: %s\n", error->furtherDetails); len += snprintf(&(cbd->err_msg[len]), elen - len, " Further Details: %s\n", error->furtherDetails);
} }
if (error->extraDetailsCount) { if (error->extraDetailsCount && elen - len > 0) {
len += snprintf(&(cbd->err_msg[len]), elen - len, "%s", " Extra Details:\n"); len += snprintf(&(cbd->err_msg[len]), elen - len, "%s", " Extra Details:\n");
for (int i = 0; i < error->extraDetailsCount; i++) { for (int i = 0; i < error->extraDetailsCount; i++) {
len += snprintf(&(cbd->err_msg[len]), elen - len, " %s: %s\n", error->extraDetails[i].name, if (elen - len > 0) {
error->extraDetails[i].value); len += snprintf(&(cbd->err_msg[len]), elen - len, " %s: %s\n", error->extraDetails[i].name,
error->extraDetails[i].value);
}
} }
} }
} }
@ -201,8 +220,9 @@ static void growbuffer_destroy(growbuffer *gb) {
} }
typedef struct put_object_callback_data { typedef struct put_object_callback_data {
char err_msg[128]; char err_msg[512];
S3Status status; S3Status status;
uint64_t content_length;
// FILE *infile; // FILE *infile;
TdFilePtr infileFD; TdFilePtr infileFD;
growbuffer *gb; growbuffer *gb;
@ -211,11 +231,12 @@ typedef struct put_object_callback_data {
int noStatus; int noStatus;
} put_object_callback_data; } put_object_callback_data;
#define MULTIPART_CHUNK_SIZE (768 << 20) // multipart is 768M #define MULTIPART_CHUNK_SIZE (64 << 20) // multipart is 768M
typedef struct UploadManager { typedef struct {
char err_msg[128]; char err_msg[512];
S3Status status; S3Status status;
uint64_t content_length;
// used for initial multipart // used for initial multipart
char *upload_id; char *upload_id;
@ -229,8 +250,9 @@ typedef struct UploadManager {
} UploadManager; } UploadManager;
typedef struct list_parts_callback_data { typedef struct list_parts_callback_data {
char err_msg[128]; char err_msg[512];
S3Status status; S3Status status;
uint64_t content_length;
int isTruncated; int isTruncated;
char nextPartNumberMarker[24]; char nextPartNumberMarker[24];
char initiatorId[256]; char initiatorId[256];
@ -246,8 +268,6 @@ typedef struct list_parts_callback_data {
} list_parts_callback_data; } list_parts_callback_data;
typedef struct MultipartPartData { typedef struct MultipartPartData {
char err_msg[128];
S3Status status;
put_object_callback_data put_object_data; put_object_callback_data put_object_data;
int seq; int seq;
UploadManager *manager; UploadManager *manager;
@ -255,11 +275,12 @@ typedef struct MultipartPartData {
static int putObjectDataCallback(int bufferSize, char *buffer, void *callbackData) { static int putObjectDataCallback(int bufferSize, char *buffer, void *callbackData) {
put_object_callback_data *data = (put_object_callback_data *)callbackData; put_object_callback_data *data = (put_object_callback_data *)callbackData;
/*
if (data->infileFD == 0) { if (data->infileFD == 0) {
MultipartPartData *mpd = (MultipartPartData *)callbackData; MultipartPartData *mpd = (MultipartPartData *)callbackData;
data = &mpd->put_object_data; data = &mpd->put_object_data;
} }
*/
int ret = 0; int ret = 0;
if (data->contentLength) { if (data->contentLength) {
@ -287,11 +308,12 @@ static int putObjectDataCallback(int bufferSize, char *buffer, void *callbackDat
S3Status initial_multipart_callback(const char *upload_id, void *callbackData) { S3Status initial_multipart_callback(const char *upload_id, void *callbackData) {
UploadManager *manager = (UploadManager *)callbackData; UploadManager *manager = (UploadManager *)callbackData;
manager->upload_id = strdup(upload_id); manager->upload_id = strdup(upload_id);
manager->status = S3StatusOK;
return S3StatusOK; return S3StatusOK;
} }
S3Status MultipartResponseProperiesCallback(const S3ResponseProperties *properties, void *callbackData) { S3Status MultipartResponseProperiesCallback(const S3ResponseProperties *properties, void *callbackData) {
responsePropertiesCallback(properties, callbackData); responsePropertiesCallbackNull(properties, callbackData);
MultipartPartData *data = (MultipartPartData *)callbackData; MultipartPartData *data = (MultipartPartData *)callbackData;
int seq = data->seq; int seq = data->seq;
@ -390,7 +412,8 @@ static int try_get_parts_info(const char *bucketName, const char *key, UploadMan
S3BucketContext bucketContext = {0, tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId, tsS3AccessKeySecret, S3BucketContext bucketContext = {0, tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId, tsS3AccessKeySecret,
0, awsRegionG}; 0, awsRegionG};
S3ListPartsHandler listPartsHandler = {{&responsePropertiesCallback, &responseCompleteCallback}, &listPartsCallback}; S3ListPartsHandler listPartsHandler = {{&responsePropertiesCallbackNull, &responseCompleteCallback},
&listPartsCallback};
list_parts_callback_data data; list_parts_callback_data data;
@ -416,7 +439,7 @@ static int try_get_parts_info(const char *bucketName, const char *key, UploadMan
// printListMultipartHeader(data.allDetails); // printListMultipartHeader(data.allDetails);
} }
} else { } else {
s3PrintError(__func__, data.status, data.err_msg); s3PrintError(__FILE__, __LINE__, __func__, data.status, data.err_msg);
return -1; return -1;
} }
@ -436,22 +459,22 @@ int32_t s3PutObjectFromFile2(const char *file, const char *object) {
int metaPropertiesCount = 0; int metaPropertiesCount = 0;
S3NameValue metaProperties[S3_MAX_METADATA_COUNT]; S3NameValue metaProperties[S3_MAX_METADATA_COUNT];
char useServerSideEncryption = 0; char useServerSideEncryption = 0;
int noStatus = 0; put_object_callback_data data = {0};
put_object_callback_data data; // int noStatus = 0;
// data.infile = 0; // data.infile = 0;
data.infileFD = NULL; // data.gb = 0;
data.gb = 0; // data.infileFD = NULL;
data.noStatus = noStatus; // data.noStatus = noStatus;
if (taosStatFile(file, &contentLength, NULL, NULL) < 0) { if (taosStatFile(file, &contentLength, NULL, NULL) < 0) {
vError("ERROR: %s Failed to stat file %s: ", __func__, file); uError("ERROR: %s Failed to stat file %s: ", __func__, file);
code = TAOS_SYSTEM_ERROR(errno); code = TAOS_SYSTEM_ERROR(errno);
return code; return code;
} }
if (!(data.infileFD = taosOpenFile(file, TD_FILE_READ))) { if (!(data.infileFD = taosOpenFile(file, TD_FILE_READ))) {
vError("ERROR: %s Failed to open file %s: ", __func__, file); uError("ERROR: %s Failed to open file %s: ", __func__, file);
code = TAOS_SYSTEM_ERROR(errno); code = TAOS_SYSTEM_ERROR(errno);
return code; return code;
} }
@ -469,53 +492,52 @@ int32_t s3PutObjectFromFile2(const char *file, const char *object) {
metaProperties, useServerSideEncryption}; metaProperties, useServerSideEncryption};
if (contentLength <= MULTIPART_CHUNK_SIZE) { if (contentLength <= MULTIPART_CHUNK_SIZE) {
S3PutObjectHandler putObjectHandler = {{&responsePropertiesCallback, &responseCompleteCallback}, S3PutObjectHandler putObjectHandler = {{&responsePropertiesCallbackNull, &responseCompleteCallback},
&putObjectDataCallback}; &putObjectDataCallback};
do { do {
S3_put_object(&bucketContext, key, contentLength, &putProperties, 0, 0, &putObjectHandler, &data); S3_put_object(&bucketContext, key, contentLength, &putProperties, 0, 0, &putObjectHandler, &data);
} while (S3_status_is_retryable(data.status) && should_retry()); } while (S3_status_is_retryable(data.status) && should_retry());
if (data.infileFD) {
taosCloseFile(&data.infileFD);
} else if (data.gb) {
growbuffer_destroy(data.gb);
}
if (data.status != S3StatusOK) { if (data.status != S3StatusOK) {
s3PrintError(__func__, data.status, data.err_msg); s3PrintError(__FILE__, __LINE__, __func__, data.status, data.err_msg);
code = TAOS_SYSTEM_ERROR(EIO); code = TAOS_SYSTEM_ERROR(EIO);
} else if (data.contentLength) { } else if (data.contentLength) {
vError("ERROR: %s Failed to read remaining %llu bytes from input", __func__, uError("ERROR: %s Failed to read remaining %llu bytes from input", __func__,
(unsigned long long)data.contentLength); (unsigned long long)data.contentLength);
code = TAOS_SYSTEM_ERROR(EIO); code = TAOS_SYSTEM_ERROR(EIO);
} }
} else { } else {
uint64_t totalContentLength = contentLength; uint64_t totalContentLength = contentLength;
uint64_t todoContentLength = contentLength; uint64_t todoContentLength = contentLength;
UploadManager manager; UploadManager manager = {0};
manager.upload_id = 0; // manager.upload_id = 0;
manager.gb = 0; // manager.gb = 0;
// div round up // div round up
int seq; int seq;
uint64_t chunk_size = MULTIPART_CHUNK_SIZE >> 8; uint64_t chunk_size = MULTIPART_CHUNK_SIZE >> 3;
int totalSeq = ((contentLength + chunk_size - 1) / chunk_size); int totalSeq = (contentLength + chunk_size - 1) / chunk_size;
const int max_part_num = 10000;
if (totalSeq > max_part_num) {
chunk_size = (contentLength + max_part_num - contentLength % max_part_num) / max_part_num;
totalSeq = (contentLength + chunk_size - 1) / chunk_size;
}
MultipartPartData partData; MultipartPartData partData;
memset(&partData, 0, sizeof(MultipartPartData)); memset(&partData, 0, sizeof(MultipartPartData));
int partContentLength = 0; int partContentLength = 0;
S3MultipartInitialHandler handler = {{&responsePropertiesCallback, &responseCompleteCallback}, S3MultipartInitialHandler handler = {{&responsePropertiesCallbackNull, &responseCompleteCallback},
&initial_multipart_callback}; &initial_multipart_callback};
S3PutObjectHandler putObjectHandler = {{&MultipartResponseProperiesCallback, &responseCompleteCallback}, S3PutObjectHandler putObjectHandler = {{&MultipartResponseProperiesCallback, &responseCompleteCallback},
&putObjectDataCallback}; &putObjectDataCallback};
S3MultipartCommitHandler commit_handler = { S3MultipartCommitHandler commit_handler = {
{&responsePropertiesCallback, &responseCompleteCallback}, &multipartPutXmlCallback, 0}; {&responsePropertiesCallbackNull, &responseCompleteCallback}, &multipartPutXmlCallback, 0};
manager.etags = (char **)taosMemoryMalloc(sizeof(char *) * totalSeq); manager.etags = (char **)taosMemoryCalloc(totalSeq, sizeof(char *));
manager.next_etags_pos = 0; manager.next_etags_pos = 0;
/* /*
if (uploadId) { if (uploadId) {
@ -536,7 +558,7 @@ int32_t s3PutObjectFromFile2(const char *file, const char *object) {
} while (S3_status_is_retryable(manager.status) && should_retry()); } while (S3_status_is_retryable(manager.status) && should_retry());
if (manager.upload_id == 0 || manager.status != S3StatusOK) { if (manager.upload_id == 0 || manager.status != S3StatusOK) {
s3PrintError(__func__, manager.status, manager.err_msg); s3PrintError(__FILE__, __LINE__, __func__, manager.status, manager.err_msg);
code = TAOS_SYSTEM_ERROR(EIO); code = TAOS_SYSTEM_ERROR(EIO);
goto clean; goto clean;
} }
@ -559,9 +581,9 @@ int32_t s3PutObjectFromFile2(const char *file, const char *object) {
do { do {
S3_upload_part(&bucketContext, key, &putProperties, &putObjectHandler, seq, manager.upload_id, S3_upload_part(&bucketContext, key, &putProperties, &putObjectHandler, seq, manager.upload_id,
partContentLength, 0, timeoutMsG, &partData); partContentLength, 0, timeoutMsG, &partData);
} while (S3_status_is_retryable(partData.status) && should_retry()); } while (S3_status_is_retryable(partData.put_object_data.status) && should_retry());
if (partData.status != S3StatusOK) { if (partData.put_object_data.status != S3StatusOK) {
s3PrintError(__func__, partData.status, partData.err_msg); s3PrintError(__FILE__, __LINE__, __func__, partData.put_object_data.status, partData.put_object_data.err_msg);
code = TAOS_SYSTEM_ERROR(EIO); code = TAOS_SYSTEM_ERROR(EIO);
goto clean; goto clean;
} }
@ -575,6 +597,10 @@ int32_t s3PutObjectFromFile2(const char *file, const char *object) {
char buf[256]; char buf[256];
int n; int n;
for (i = 0; i < totalSeq; i++) { for (i = 0; i < totalSeq; i++) {
if (!manager.etags[i]) {
code = TAOS_SYSTEM_ERROR(EIO);
goto clean;
}
n = snprintf(buf, sizeof(buf), n = snprintf(buf, sizeof(buf),
"<Part><PartNumber>%d</PartNumber>" "<Part><PartNumber>%d</PartNumber>"
"<ETag>%s</ETag></Part>", "<ETag>%s</ETag></Part>",
@ -589,7 +615,7 @@ int32_t s3PutObjectFromFile2(const char *file, const char *object) {
timeoutMsG, &manager); timeoutMsG, &manager);
} while (S3_status_is_retryable(manager.status) && should_retry()); } while (S3_status_is_retryable(manager.status) && should_retry());
if (manager.status != S3StatusOK) { if (manager.status != S3StatusOK) {
s3PrintError(__func__, manager.status, manager.err_msg); s3PrintError(__FILE__, __LINE__, __func__, manager.status, manager.err_msg);
code = TAOS_SYSTEM_ERROR(EIO); code = TAOS_SYSTEM_ERROR(EIO);
goto clean; goto clean;
} }
@ -605,11 +631,17 @@ int32_t s3PutObjectFromFile2(const char *file, const char *object) {
taosMemoryFree(manager.etags); taosMemoryFree(manager.etags);
} }
if (data.infileFD) {
taosCloseFile(&data.infileFD);
} else if (data.gb) {
growbuffer_destroy(data.gb);
}
return code; return code;
} }
typedef struct list_bucket_callback_data { typedef struct list_bucket_callback_data {
char err_msg[128]; char err_msg[512];
S3Status status; S3Status status;
int isTruncated; int isTruncated;
char nextMarker[1024]; char nextMarker[1024];
@ -658,19 +690,19 @@ static void s3FreeObjectKey(void *pItem) {
taosMemoryFree(key); taosMemoryFree(key);
} }
void s3DeleteObjectsByPrefix(const char *prefix) { static SArray *getListByPrefix(const char *prefix) {
S3BucketContext bucketContext = {0, tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId, tsS3AccessKeySecret, S3BucketContext bucketContext = {0, tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId, tsS3AccessKeySecret,
0, awsRegionG}; 0, awsRegionG};
S3ListBucketHandler listBucketHandler = {{&responsePropertiesCallback, &responseCompleteCallback}, S3ListBucketHandler listBucketHandler = {{&responsePropertiesCallbackNull, &responseCompleteCallback},
&listBucketCallback}; &listBucketCallback};
const char *marker = 0, *delimiter = 0; const char *marker = 0, *delimiter = 0;
int maxkeys = 0, allDetails = 0; int maxkeys = 0, allDetails = 0;
list_bucket_callback_data data; list_bucket_callback_data data;
data.objectArray = taosArrayInit(32, POINTER_BYTES); data.objectArray = taosArrayInit(32, sizeof(void *));
if (!data.objectArray) { if (!data.objectArray) {
vError("%s: %s", __func__, "out of memoty"); uError("%s: %s", __func__, "out of memoty");
return; return NULL;
} }
if (marker) { if (marker) {
snprintf(data.nextMarker, sizeof(data.nextMarker), "%s", marker); snprintf(data.nextMarker, sizeof(data.nextMarker), "%s", marker);
@ -693,18 +725,17 @@ void s3DeleteObjectsByPrefix(const char *prefix) {
if (data.status == S3StatusOK) { if (data.status == S3StatusOK) {
if (data.keyCount > 0) { if (data.keyCount > 0) {
// printListBucketHeader(allDetails); return data.objectArray;
s3DeleteObjects(TARRAY_DATA(data.objectArray), TARRAY_SIZE(data.objectArray));
} }
} else { } else {
s3PrintError(__func__, data.status, data.err_msg); s3PrintError(__FILE__, __LINE__, __func__, data.status, data.err_msg);
} }
taosArrayDestroyEx(data.objectArray, s3FreeObjectKey); taosArrayDestroyEx(data.objectArray, s3FreeObjectKey);
return NULL;
} }
void s3DeleteObjects(const char *object_name[], int nobject) { void s3DeleteObjects(const char *object_name[], int nobject) {
int status = 0;
S3BucketContext bucketContext = {0, tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId, tsS3AccessKeySecret, S3BucketContext bucketContext = {0, tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId, tsS3AccessKeySecret,
0, awsRegionG}; 0, awsRegionG};
S3ResponseHandler responseHandler = {0, &responseCompleteCallback}; S3ResponseHandler responseHandler = {0, &responseCompleteCallback};
@ -716,22 +747,33 @@ void s3DeleteObjects(const char *object_name[], int nobject) {
} while (S3_status_is_retryable(cbd.status) && should_retry()); } while (S3_status_is_retryable(cbd.status) && should_retry());
if ((cbd.status != S3StatusOK) && (cbd.status != S3StatusErrorPreconditionFailed)) { if ((cbd.status != S3StatusOK) && (cbd.status != S3StatusErrorPreconditionFailed)) {
s3PrintError(__func__, cbd.status, cbd.err_msg); s3PrintError(__FILE__, __LINE__, __func__, cbd.status, cbd.err_msg);
} }
} }
} }
void s3DeleteObjectsByPrefix(const char *prefix) {
SArray *objectArray = getListByPrefix(prefix);
if (objectArray == NULL) return;
s3DeleteObjects(TARRAY_DATA(objectArray), TARRAY_SIZE(objectArray));
taosArrayDestroyEx(objectArray, s3FreeObjectKey);
}
static S3Status getObjectDataCallback(int bufferSize, const char *buffer, void *callbackData) { static S3Status getObjectDataCallback(int bufferSize, const char *buffer, void *callbackData) {
TS3SizeCBD *cbd = callbackData; TS3SizeCBD *cbd = callbackData;
/*
if (cbd->content_length != bufferSize) { if (cbd->content_length != bufferSize) {
cbd->status = S3StatusAbortedByCallback; cbd->status = S3StatusAbortedByCallback;
return S3StatusAbortedByCallback; return S3StatusAbortedByCallback;
} }
*/
if (!cbd->buf) {
cbd->buf = taosMemoryCalloc(1, cbd->content_length);
}
char *buf = taosMemoryCalloc(1, bufferSize); if (cbd->buf) {
if (buf) { memcpy(cbd->buf + cbd->buf_pos, buffer, bufferSize);
memcpy(buf, buffer, bufferSize); cbd->buf_pos += bufferSize;
cbd->buf = buf;
cbd->status = S3StatusOK; cbd->status = S3StatusOK;
return S3StatusOK; return S3StatusOK;
} else { } else {
@ -740,7 +782,7 @@ static S3Status getObjectDataCallback(int bufferSize, const char *buffer, void *
} }
} }
int32_t s3GetObjectBlock(const char *object_name, int64_t offset, int64_t size, uint8_t **ppBlock) { int32_t s3GetObjectBlock(const char *object_name, int64_t offset, int64_t size, bool check, uint8_t **ppBlock) {
int status = 0; int status = 0;
int64_t ifModifiedSince = -1, ifNotModifiedSince = -1; int64_t ifModifiedSince = -1, ifNotModifiedSince = -1;
const char *ifMatch = 0, *ifNotMatch = 0; const char *ifMatch = 0, *ifNotMatch = 0;
@ -753,12 +795,18 @@ int32_t s3GetObjectBlock(const char *object_name, int64_t offset, int64_t size,
TS3SizeCBD cbd = {0}; TS3SizeCBD cbd = {0};
cbd.content_length = size; cbd.content_length = size;
cbd.buf_pos = 0;
do { do {
S3_get_object(&bucketContext, object_name, &getConditions, offset, size, 0, 0, &getObjectHandler, &cbd); S3_get_object(&bucketContext, object_name, &getConditions, offset, size, 0, 0, &getObjectHandler, &cbd);
} while (S3_status_is_retryable(cbd.status) && should_retry()); } while (S3_status_is_retryable(cbd.status) && should_retry());
if (cbd.status != S3StatusOK) { if (cbd.status != S3StatusOK) {
vError("%s: %d(%s)", __func__, cbd.status, cbd.err_msg); uError("%s: %d(%s)", __func__, cbd.status, cbd.err_msg);
return TAOS_SYSTEM_ERROR(EIO);
}
if (check && cbd.buf_pos != size) {
uError("%s: %d(%s)", __func__, cbd.status, cbd.err_msg);
return TAOS_SYSTEM_ERROR(EIO); return TAOS_SYSTEM_ERROR(EIO);
} }
@ -767,6 +815,66 @@ int32_t s3GetObjectBlock(const char *object_name, int64_t offset, int64_t size,
return 0; return 0;
} }
static S3Status getObjectCallback(int bufferSize, const char *buffer, void *callbackData) {
TS3GetData *cbd = (TS3GetData *)callbackData;
size_t wrote = taosWriteFile(cbd->file, buffer, bufferSize);
return ((wrote < (size_t)bufferSize) ? S3StatusAbortedByCallback : S3StatusOK);
}
int32_t s3GetObjectToFile(const char *object_name, char *fileName) {
int64_t ifModifiedSince = -1, ifNotModifiedSince = -1;
const char *ifMatch = 0, *ifNotMatch = 0;
S3BucketContext bucketContext = {0, tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId, tsS3AccessKeySecret,
0, awsRegionG};
S3GetConditions getConditions = {ifModifiedSince, ifNotModifiedSince, ifMatch, ifNotMatch};
S3GetObjectHandler getObjectHandler = {{&responsePropertiesCallbackNull, &responseCompleteCallback},
&getObjectCallback};
TdFilePtr pFile = taosOpenFile(fileName, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
if (pFile == NULL) {
uError("[s3] open file error, errno:%d, fileName:%s", errno, fileName);
return -1;
}
TS3GetData cbd = {0};
cbd.file = pFile;
do {
S3_get_object(&bucketContext, object_name, &getConditions, 0, 0, 0, 0, &getObjectHandler, &cbd);
} while (S3_status_is_retryable(cbd.status) && should_retry());
if (cbd.status != S3StatusOK) {
uError("%s: %d(%s)", __func__, cbd.status, cbd.err_msg);
taosCloseFile(&pFile);
return TAOS_SYSTEM_ERROR(EIO);
}
taosCloseFile(&pFile);
return 0;
}
int32_t s3GetObjectsByPrefix(const char *prefix, const char *path) {
SArray *objectArray = getListByPrefix(prefix);
if (objectArray == NULL) return -1;
for (size_t i = 0; i < taosArrayGetSize(objectArray); i++) {
char *object = taosArrayGetP(objectArray, i);
const char *tmp = strchr(object, '/');
tmp = (tmp == NULL) ? object : tmp + 1;
char fileName[PATH_MAX] = {0};
if (path[strlen(path) - 1] != TD_DIRSEP_CHAR) {
snprintf(fileName, PATH_MAX, "%s%s%s", path, TD_DIRSEP, tmp);
} else {
snprintf(fileName, PATH_MAX, "%s%s", path, tmp);
}
if (s3GetObjectToFile(object, fileName) != 0) {
taosArrayDestroyEx(objectArray, s3FreeObjectKey);
return -1;
}
}
taosArrayDestroyEx(objectArray, s3FreeObjectKey);
return 0;
}
long s3Size(const char *object_name) { long s3Size(const char *object_name) {
long size = 0; long size = 0;
int status = 0; int status = 0;
@ -782,7 +890,7 @@ long s3Size(const char *object_name) {
} while (S3_status_is_retryable(cbd.status) && should_retry()); } while (S3_status_is_retryable(cbd.status) && should_retry());
if ((cbd.status != S3StatusOK) && (cbd.status != S3StatusErrorPreconditionFailed)) { if ((cbd.status != S3StatusOK) && (cbd.status != S3StatusErrorPreconditionFailed)) {
vError("%s: %d(%s)", __func__, cbd.status, cbd.err_msg); s3PrintError(__FILE__, __LINE__, __func__, cbd.status, cbd.err_msg);
} }
size = cbd.content_length; size = cbd.content_length;
@ -1045,7 +1153,8 @@ bool s3Get(const char *object_name, const char *path) {
return ret; return ret;
} }
int32_t s3GetObjectBlock(const char *object_name, int64_t offset, int64_t block_size, uint8_t **ppBlock) { int32_t s3GetObjectBlock(const char *object_name, int64_t offset, int64_t block_size, bool check, uint8_t **ppBlock) {
(void)check;
int32_t code = 0; int32_t code = 0;
cos_pool_t *p = NULL; cos_pool_t *p = NULL;
int is_cname = 0; int is_cname = 0;
@ -1237,8 +1346,11 @@ void s3DeleteObjectsByPrefix(const char *prefix) {}
void s3DeleteObjects(const char *object_name[], int nobject) {} void s3DeleteObjects(const char *object_name[], int nobject) {}
bool s3Exists(const char *object_name) { return false; } bool s3Exists(const char *object_name) { return false; }
bool s3Get(const char *object_name, const char *path) { return false; } bool s3Get(const char *object_name, const char *path) { return false; }
int32_t s3GetObjectBlock(const char *object_name, int64_t offset, int64_t size, uint8_t **ppBlock) { return 0; } int32_t s3GetObjectBlock(const char *object_name, int64_t offset, int64_t size, bool check, uint8_t **ppBlock) {
return 0;
}
void s3EvictCache(const char *path, long object_size) {} void s3EvictCache(const char *path, long object_size) {}
long s3Size(const char *object_name) { return 0; } long s3Size(const char *object_name) { return 0; }
int32_t s3GetObjectsByPrefix(const char *prefix, const char *path) { return 0; }
#endif #endif

235
source/common/src/rsync.c Normal file
View File

@ -0,0 +1,235 @@
//
// Created by mingming wanng on 2023/11/2.
//
#include "rsync.h"
#include <stdlib.h>
#include "tglobal.h"
#define ERRNO_ERR_FORMAT "errno:%d,msg:%s"
#define ERRNO_ERR_DATA errno,strerror(errno)
// deleteRsync function produce empty directories, traverse base directory to remove them
static void removeEmptyDir(){
TdDirPtr pDir = taosOpenDir(tsCheckpointBackupDir);
if (pDir == NULL) return;
TdDirEntryPtr de = NULL;
while ((de = taosReadDir(pDir)) != NULL) {
if (!taosDirEntryIsDir(de)) {
continue;
}
if (strcmp(taosGetDirEntryName(de), ".") == 0 || strcmp(taosGetDirEntryName(de), "..") == 0) continue;
char filename[PATH_MAX] = {0};
snprintf(filename, sizeof(filename), "%s%s", tsCheckpointBackupDir, taosGetDirEntryName(de));
TdDirPtr pDirTmp = taosOpenDir(filename);
TdDirEntryPtr deTmp = NULL;
bool empty = true;
while ((deTmp = taosReadDir(pDirTmp)) != NULL){
if (strcmp(taosGetDirEntryName(deTmp), ".") == 0 || strcmp(taosGetDirEntryName(deTmp), "..") == 0) continue;
empty = false;
}
if(empty) taosRemoveDir(filename);
taosCloseDir(&pDirTmp);
}
taosCloseDir(&pDir);
}
#ifdef WINDOWS
// C:\TDengine\data\backup\checkpoint\ -> /c/TDengine/data/backup/checkpoint/
static void changeDirFromWindowsToLinux(char* from, char* to){
to[0] = '/';
to[1] = from[0];
for(int i = 2; i < strlen(from); i++) {
if (from[i] == '\\') {
to[i] = '/';
} else {
to[i] = from[i];
}
}
}
#endif
static int generateConfigFile(char* confDir){
TdFilePtr pFile = taosOpenFile(confDir, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
if (pFile == NULL) {
uError("[rsync] open conf file error, dir:%s,"ERRNO_ERR_FORMAT, confDir, ERRNO_ERR_DATA);
return -1;
}
#ifdef WINDOWS
char path[PATH_MAX] = {0};
changeDirFromWindowsToLinux(tsCheckpointBackupDir, path);
#endif
char confContent[PATH_MAX*4] = {0};
snprintf(confContent, PATH_MAX*4,
#ifndef WINDOWS
"uid = root\n"
"gid = root\n"
#endif
"use chroot = false\n"
"max connections = 200\n"
"timeout = 100\n"
"lock file = %srsync.lock\n"
"log file = %srsync.log\n"
"ignore errors = true\n"
"read only = false\n"
"list = false\n"
"[checkpoint]\n"
"path = %s", tsCheckpointBackupDir, tsCheckpointBackupDir,
#ifdef WINDOWS
path
#else
tsCheckpointBackupDir
#endif
);
uDebug("[rsync] conf:%s", confContent);
if (taosWriteFile(pFile, confContent, strlen(confContent)) <= 0){
uError("[rsync] write conf file error,"ERRNO_ERR_FORMAT, ERRNO_ERR_DATA);
taosCloseFile(&pFile);
return -1;
}
taosCloseFile(&pFile);
return 0;
}
static int execCommand(char* command){
int try = 3;
int32_t code = 0;
while(try-- > 0) {
code = system(command);
if (code == 0) {
break;
}
taosMsleep(10);
}
return code;
}
void stopRsync(){
int code =
#ifdef WINDOWS
system("taskkill /f /im rsync.exe");
#else
system("pkill rsync");
#endif
if(code != 0){
uError("[rsync] stop rsync server failed,"ERRNO_ERR_FORMAT, ERRNO_ERR_DATA);
return;
}
uDebug("[rsync] stop rsync server successful");
}
void startRsync(){
if(taosMulMkDir(tsCheckpointBackupDir) != 0){
uError("[rsync] build checkpoint backup dir failed, dir:%s,"ERRNO_ERR_FORMAT, tsCheckpointBackupDir, ERRNO_ERR_DATA);
return;
}
removeEmptyDir();
char confDir[PATH_MAX] = {0};
snprintf(confDir, PATH_MAX, "%srsync.conf", tsCheckpointBackupDir);
int code = generateConfigFile(confDir);
if(code != 0){
return;
}
char cmd[PATH_MAX] = {0};
snprintf(cmd, PATH_MAX, "rsync --daemon --port=%d --config=%s", tsRsyncPort, confDir);
// start rsync service to backup checkpoint
code = system(cmd);
if(code != 0){
uError("[rsync] start server failed, code:%d,"ERRNO_ERR_FORMAT, code, ERRNO_ERR_DATA);
return;
}
uDebug("[rsync] start server successful");
}
int uploadRsync(char* id, char* path){
#ifdef WINDOWS
char pathTransform[PATH_MAX] = {0};
changeDirFromWindowsToLinux(path, pathTransform);
#endif
char command[PATH_MAX] = {0};
#ifdef WINDOWS
if(pathTransform[strlen(pathTransform) - 1] != '/'){
#else
if(path[strlen(path) - 1] != '/'){
#endif
snprintf(command, PATH_MAX, "rsync -av --delete --timeout=10 --bwlimit=100000 %s/ rsync://%s/checkpoint/%s/",
#ifdef WINDOWS
pathTransform
#else
path
#endif
, tsSnodeAddress, id);
}else{
snprintf(command, PATH_MAX, "rsync -av --delete --timeout=10 --bwlimit=100000 %s rsync://%s/checkpoint/%s/",
#ifdef WINDOWS
pathTransform
#else
path
#endif
, tsSnodeAddress, id);
}
int code = execCommand(command);
if(code != 0){
uError("[rsync] send failed code:%d," ERRNO_ERR_FORMAT, code, ERRNO_ERR_DATA);
return -1;
}
uDebug("[rsync] upload data:%s successful", id);
return 0;
}
int downloadRsync(char* id, char* path){
#ifdef WINDOWS
char pathTransform[PATH_MAX] = {0};
changeDirFromWindowsToLinux(path, pathTransform);
#endif
char command[PATH_MAX] = {0};
snprintf(command, PATH_MAX, "rsync -av --timeout=10 --bwlimit=100000 rsync://%s/checkpoint/%s/ %s",
tsSnodeAddress, id,
#ifdef WINDOWS
pathTransform
#else
path
#endif
);
int code = execCommand(command);
if(code != 0){
uError("[rsync] get failed code:%d," ERRNO_ERR_FORMAT, code, ERRNO_ERR_DATA);
return -1;
}
uDebug("[rsync] down data:%s successful", id);
return 0;
}
int deleteRsync(char* id){
char* tmp = "./tmp_empty/";
int code = taosMkDir(tmp);
if(code != 0){
uError("[rsync] make tmp dir failed. code:%d," ERRNO_ERR_FORMAT, code, ERRNO_ERR_DATA);
return -1;
}
char command[PATH_MAX] = {0};
snprintf(command, PATH_MAX, "rsync -av --delete --timeout=10 %s rsync://%s/checkpoint/%s/",
tmp, tsSnodeAddress, id);
code = execCommand(command);
taosRemoveDir(tmp);
if(code != 0){
uError("[rsync] get failed code:%d," ERRNO_ERR_FORMAT, code, ERRNO_ERR_DATA);
return -1;
}
uDebug("[rsync] delete data:%s successful", id);
return 0;
}

View File

@ -24,6 +24,7 @@
#define SYSTABLE_SCH_TABLE_NAME_LEN ((TSDB_TABLE_NAME_LEN - 1) + VARSTR_HEADER_SIZE) #define SYSTABLE_SCH_TABLE_NAME_LEN ((TSDB_TABLE_NAME_LEN - 1) + VARSTR_HEADER_SIZE)
#define SYSTABLE_SCH_DB_NAME_LEN ((TSDB_DB_NAME_LEN - 1) + VARSTR_HEADER_SIZE) #define SYSTABLE_SCH_DB_NAME_LEN ((TSDB_DB_NAME_LEN - 1) + VARSTR_HEADER_SIZE)
#define SYSTABLE_SCH_COL_NAME_LEN ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE) #define SYSTABLE_SCH_COL_NAME_LEN ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE)
#define SYSTABLE_SCH_VIEW_NAME_LEN ((TSDB_VIEW_NAME_LEN - 1) + VARSTR_HEADER_SIZE)
// clang-format off // clang-format off
static const SSysDbTableSchema dnodesSchema[] = { static const SSysDbTableSchema dnodesSchema[] = {
@ -155,6 +156,8 @@ static const SSysDbTableSchema streamSchema[] = {
{.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, {.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
{.name = "trigger", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "trigger", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "sink_quota", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "history_scan_idle", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
}; };
static const SSysDbTableSchema streamTaskSchema[] = { static const SSysDbTableSchema streamTaskSchema[] = {
@ -164,7 +167,7 @@ static const SSysDbTableSchema streamTaskSchema[] = {
{.name = "node_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, {.name = "node_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
{.name = "level", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "level", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "status", .bytes = 15 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "status", .bytes = 15 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "stage", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, {.name = "stage", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
{.name = "in_queue", .bytes = 20, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "in_queue", .bytes = 20, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
// {.name = "out_queue", .bytes = 20, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, // {.name = "out_queue", .bytes = 20, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "info", .bytes = 25, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "info", .bytes = 25, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
@ -315,8 +318,23 @@ static const SSysDbTableSchema userUserPrivilegesSchema[] = {
{.name = "db_name", .bytes = TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "db_name", .bytes = TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "table_name", .bytes = TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "table_name", .bytes = TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "condition", .bytes = TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "condition", .bytes = TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "notes", .bytes = 64 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
}; };
static const SSysDbTableSchema userViewsSchema[] = {
{.name = "view_name", .bytes = SYSTABLE_SCH_VIEW_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "effective_user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
{.name = "type", .bytes = 128 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "query_sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "parameters", .bytes = 2048 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "default_values", .bytes = 2048 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
// {.name = "column_list", .bytes = 2048 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
};
static const SSysTableMeta infosMeta[] = { static const SSysTableMeta infosMeta[] = {
{TSDB_INS_TABLE_DNODES, dnodesSchema, tListLen(dnodesSchema), true}, {TSDB_INS_TABLE_DNODES, dnodesSchema, tListLen(dnodesSchema), true},
{TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema), true}, {TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema), true},
@ -343,6 +361,7 @@ static const SSysTableMeta infosMeta[] = {
{TSDB_INS_TABLE_STREAM_TASKS, streamTaskSchema, tListLen(streamTaskSchema), false}, {TSDB_INS_TABLE_STREAM_TASKS, streamTaskSchema, tListLen(streamTaskSchema), false},
{TSDB_INS_TABLE_VNODES, vnodesSchema, tListLen(vnodesSchema), true}, {TSDB_INS_TABLE_VNODES, vnodesSchema, tListLen(vnodesSchema), true},
{TSDB_INS_TABLE_USER_PRIVILEGES, userUserPrivilegesSchema, tListLen(userUserPrivilegesSchema), true}, {TSDB_INS_TABLE_USER_PRIVILEGES, userUserPrivilegesSchema, tListLen(userUserPrivilegesSchema), true},
{TSDB_INS_TABLE_VIEWS, userViewsSchema, tListLen(userViewsSchema), false},
}; };
static const SSysDbTableSchema connectionsSchema[] = { static const SSysDbTableSchema connectionsSchema[] = {

View File

@ -2121,6 +2121,7 @@ _end:
char* buildCtbNameByGroupId(const char* stbFullName, uint64_t groupId) { char* buildCtbNameByGroupId(const char* stbFullName, uint64_t groupId) {
char* pBuf = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN + 1); char* pBuf = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN + 1);
if (!pBuf) { if (!pBuf) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL; return NULL;
} }
int32_t code = buildCtbNameByGroupIdImpl(stbFullName, groupId, pBuf); int32_t code = buildCtbNameByGroupIdImpl(stbFullName, groupId, pBuf);
@ -2133,6 +2134,7 @@ char* buildCtbNameByGroupId(const char* stbFullName, uint64_t groupId) {
int32_t buildCtbNameByGroupIdImpl(const char* stbFullName, uint64_t groupId, char* cname) { int32_t buildCtbNameByGroupIdImpl(const char* stbFullName, uint64_t groupId, char* cname) {
if (stbFullName[0] == 0) { if (stbFullName[0] == 0) {
terrno = TSDB_CODE_INVALID_PARA;
return TSDB_CODE_FAILED; return TSDB_CODE_FAILED;
} }
@ -2142,6 +2144,7 @@ int32_t buildCtbNameByGroupIdImpl(const char* stbFullName, uint64_t groupId, cha
} }
if (cname == NULL) { if (cname == NULL) {
terrno = TSDB_CODE_INVALID_PARA;
taosArrayDestroy(tags); taosArrayDestroy(tags);
return TSDB_CODE_FAILED; return TSDB_CODE_FAILED;
} }

File diff suppressed because it is too large Load Diff

View File

@ -1677,6 +1677,7 @@ int32_t tSerializeSAlterUserReq(void *buf, int32_t bufLen, SAlterUserReq *pReq)
if (tEncodeI8(&encoder, pReq->superUser) < 0) return -1; if (tEncodeI8(&encoder, pReq->superUser) < 0) return -1;
if (tEncodeI8(&encoder, pReq->sysInfo) < 0) return -1; if (tEncodeI8(&encoder, pReq->sysInfo) < 0) return -1;
if (tEncodeI8(&encoder, pReq->enable) < 0) return -1; if (tEncodeI8(&encoder, pReq->enable) < 0) return -1;
if (tEncodeI8(&encoder, pReq->isView) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->user) < 0) return -1; if (tEncodeCStr(&encoder, pReq->user) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->pass) < 0) return -1; if (tEncodeCStr(&encoder, pReq->pass) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->objname) < 0) return -1; if (tEncodeCStr(&encoder, pReq->objname) < 0) return -1;
@ -1691,6 +1692,7 @@ int32_t tSerializeSAlterUserReq(void *buf, int32_t bufLen, SAlterUserReq *pReq)
if (tEncodeU32(&encoder, pReq->pIpRanges[i].ip) < 0) return -1; if (tEncodeU32(&encoder, pReq->pIpRanges[i].ip) < 0) return -1;
if (tEncodeU32(&encoder, pReq->pIpRanges[i].mask) < 0) return -1; if (tEncodeU32(&encoder, pReq->pIpRanges[i].mask) < 0) return -1;
} }
if (tEncodeI64(&encoder, pReq->privileges) < 0) return -1;
ENCODESQL(); ENCODESQL();
tEndEncode(&encoder); tEndEncode(&encoder);
@ -1708,6 +1710,7 @@ int32_t tDeserializeSAlterUserReq(void *buf, int32_t bufLen, SAlterUserReq *pReq
if (tDecodeI8(&decoder, &pReq->superUser) < 0) return -1; if (tDecodeI8(&decoder, &pReq->superUser) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->sysInfo) < 0) return -1; if (tDecodeI8(&decoder, &pReq->sysInfo) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->enable) < 0) return -1; if (tDecodeI8(&decoder, &pReq->enable) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->isView) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->user) < 0) return -1; if (tDecodeCStrTo(&decoder, pReq->user) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->pass) < 0) return -1; if (tDecodeCStrTo(&decoder, pReq->pass) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->objname) < 0) return -1; if (tDecodeCStrTo(&decoder, pReq->objname) < 0) return -1;
@ -1728,6 +1731,7 @@ int32_t tDeserializeSAlterUserReq(void *buf, int32_t bufLen, SAlterUserReq *pReq
if (tDecodeU32(&decoder, &(pReq->pIpRanges[i].ip)) < 0) return -1; if (tDecodeU32(&decoder, &(pReq->pIpRanges[i].ip)) < 0) return -1;
if (tDecodeU32(&decoder, &(pReq->pIpRanges[i].mask)) < 0) return -1; if (tDecodeU32(&decoder, &(pReq->pIpRanges[i].mask)) < 0) return -1;
} }
if (tDecodeI64(&decoder, &pReq->privileges) < 0) return -1;
DECODESQL(); DECODESQL();
tEndDecode(&decoder); tEndDecode(&decoder);
@ -1802,10 +1806,18 @@ int32_t tSerializeSGetUserAuthRspImpl(SEncoder *pEncoder, SGetUserAuthRsp *pRsp)
int32_t numOfReadTbs = taosHashGetSize(pRsp->readTbs); int32_t numOfReadTbs = taosHashGetSize(pRsp->readTbs);
int32_t numOfWriteTbs = taosHashGetSize(pRsp->writeTbs); int32_t numOfWriteTbs = taosHashGetSize(pRsp->writeTbs);
int32_t numOfUseTbs = taosHashGetSize(pRsp->useDbs); int32_t numOfAlterTbs = taosHashGetSize(pRsp->alterTbs);
int32_t numOfReadViews = taosHashGetSize(pRsp->readViews);
int32_t numOfWriteViews = taosHashGetSize(pRsp->writeViews);
int32_t numOfAlterViews = taosHashGetSize(pRsp->alterViews);
int32_t numOfUseDbs = taosHashGetSize(pRsp->useDbs);
if (tEncodeI32(pEncoder, numOfReadTbs) < 0) return -1; if (tEncodeI32(pEncoder, numOfReadTbs) < 0) return -1;
if (tEncodeI32(pEncoder, numOfWriteTbs) < 0) return -1; if (tEncodeI32(pEncoder, numOfWriteTbs) < 0) return -1;
if (tEncodeI32(pEncoder, numOfUseTbs) < 0) return -1; if (tEncodeI32(pEncoder, numOfAlterTbs) < 0) return -1;
if (tEncodeI32(pEncoder, numOfReadViews) < 0) return -1;
if (tEncodeI32(pEncoder, numOfWriteViews) < 0) return -1;
if (tEncodeI32(pEncoder, numOfAlterViews) < 0) return -1;
if (tEncodeI32(pEncoder, numOfUseDbs) < 0) return -1;
char *tb = taosHashIterate(pRsp->readTbs, NULL); char *tb = taosHashIterate(pRsp->readTbs, NULL);
while (tb != NULL) { while (tb != NULL) {
@ -1837,6 +1849,66 @@ int32_t tSerializeSGetUserAuthRspImpl(SEncoder *pEncoder, SGetUserAuthRsp *pRsp)
tb = taosHashIterate(pRsp->writeTbs, tb); tb = taosHashIterate(pRsp->writeTbs, tb);
} }
tb = taosHashIterate(pRsp->alterTbs, NULL);
while (tb != NULL) {
size_t keyLen = 0;
void *key = taosHashGetKey(tb, &keyLen);
if (tEncodeI32(pEncoder, keyLen) < 0) return -1;
if (tEncodeCStr(pEncoder, key) < 0) return -1;
size_t valueLen = 0;
valueLen = strlen(tb);
if (tEncodeI32(pEncoder, valueLen) < 0) return -1;
if (tEncodeCStr(pEncoder, tb) < 0) return -1;
tb = taosHashIterate(pRsp->alterTbs, tb);
}
tb = taosHashIterate(pRsp->readViews, NULL);
while (tb != NULL) {
size_t keyLen = 0;
void *key = taosHashGetKey(tb, &keyLen);
if (tEncodeI32(pEncoder, keyLen) < 0) return -1;
if (tEncodeCStr(pEncoder, key) < 0) return -1;
size_t valueLen = 0;
valueLen = strlen(tb);
if (tEncodeI32(pEncoder, valueLen) < 0) return -1;
if (tEncodeCStr(pEncoder, tb) < 0) return -1;
tb = taosHashIterate(pRsp->readViews, tb);
}
tb = taosHashIterate(pRsp->writeViews, NULL);
while (tb != NULL) {
size_t keyLen = 0;
void *key = taosHashGetKey(tb, &keyLen);
if (tEncodeI32(pEncoder, keyLen) < 0) return -1;
if (tEncodeCStr(pEncoder, key) < 0) return -1;
size_t valueLen = 0;
valueLen = strlen(tb);
if (tEncodeI32(pEncoder, valueLen) < 0) return -1;
if (tEncodeCStr(pEncoder, tb) < 0) return -1;
tb = taosHashIterate(pRsp->writeViews, tb);
}
tb = taosHashIterate(pRsp->alterViews, NULL);
while (tb != NULL) {
size_t keyLen = 0;
void *key = taosHashGetKey(tb, &keyLen);
if (tEncodeI32(pEncoder, keyLen) < 0) return -1;
if (tEncodeCStr(pEncoder, key) < 0) return -1;
size_t valueLen = 0;
valueLen = strlen(tb);
if (tEncodeI32(pEncoder, valueLen) < 0) return -1;
if (tEncodeCStr(pEncoder, tb) < 0) return -1;
tb = taosHashIterate(pRsp->alterViews, tb);
}
int32_t *useDb = taosHashIterate(pRsp->useDbs, NULL); int32_t *useDb = taosHashIterate(pRsp->useDbs, NULL);
while (useDb != NULL) { while (useDb != NULL) {
size_t keyLen = 0; size_t keyLen = 0;
@ -1876,9 +1948,14 @@ int32_t tDeserializeSGetUserAuthRspImpl(SDecoder *pDecoder, SGetUserAuthRsp *pRs
pRsp->writeDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK); pRsp->writeDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
pRsp->readTbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK); pRsp->readTbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
pRsp->writeTbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK); pRsp->writeTbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
pRsp->alterTbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
pRsp->readViews = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
pRsp->writeViews = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
pRsp->alterViews = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
pRsp->useDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK); pRsp->useDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
if (pRsp->createdDbs == NULL || pRsp->readDbs == NULL || pRsp->writeDbs == NULL || pRsp->readTbs == NULL || if (pRsp->createdDbs == NULL || pRsp->readDbs == NULL || pRsp->writeDbs == NULL || pRsp->readTbs == NULL ||
pRsp->writeTbs == NULL || pRsp->useDbs == NULL) { pRsp->writeTbs == NULL || pRsp->alterTbs == NULL || pRsp->readViews == NULL ||
pRsp->writeViews == NULL || pRsp->alterViews == NULL ||pRsp->useDbs == NULL) {
goto _err; goto _err;
} }
@ -1900,29 +1977,37 @@ int32_t tDeserializeSGetUserAuthRspImpl(SDecoder *pDecoder, SGetUserAuthRsp *pRs
char db[TSDB_DB_FNAME_LEN] = {0}; char db[TSDB_DB_FNAME_LEN] = {0};
if (tDecodeCStrTo(pDecoder, db) < 0) goto _err; if (tDecodeCStrTo(pDecoder, db) < 0) goto _err;
int32_t len = strlen(db); int32_t len = strlen(db);
taosHashPut(pRsp->createdDbs, db, len, db, len + 1); taosHashPut(pRsp->createdDbs, db, len + 1, db, len + 1);
} }
for (int32_t i = 0; i < numOfReadDbs; ++i) { for (int32_t i = 0; i < numOfReadDbs; ++i) {
char db[TSDB_DB_FNAME_LEN] = {0}; char db[TSDB_DB_FNAME_LEN] = {0};
if (tDecodeCStrTo(pDecoder, db) < 0) goto _err; if (tDecodeCStrTo(pDecoder, db) < 0) goto _err;
int32_t len = strlen(db); int32_t len = strlen(db);
taosHashPut(pRsp->readDbs, db, len, db, len + 1); taosHashPut(pRsp->readDbs, db, len + 1, db, len + 1);
} }
for (int32_t i = 0; i < numOfWriteDbs; ++i) { for (int32_t i = 0; i < numOfWriteDbs; ++i) {
char db[TSDB_DB_FNAME_LEN] = {0}; char db[TSDB_DB_FNAME_LEN] = {0};
if (tDecodeCStrTo(pDecoder, db) < 0) goto _err; if (tDecodeCStrTo(pDecoder, db) < 0) goto _err;
int32_t len = strlen(db); int32_t len = strlen(db);
taosHashPut(pRsp->writeDbs, db, len, db, len + 1); taosHashPut(pRsp->writeDbs, db, len + 1, db, len + 1);
} }
if (!tDecodeIsEnd(pDecoder)) { if (!tDecodeIsEnd(pDecoder)) {
int32_t numOfReadTbs = 0; int32_t numOfReadTbs = 0;
int32_t numOfWriteTbs = 0; int32_t numOfWriteTbs = 0;
int32_t numOfAlterTbs = 0;
int32_t numOfReadViews = 0;
int32_t numOfWriteViews = 0;
int32_t numOfAlterViews = 0;
int32_t numOfUseDbs = 0; int32_t numOfUseDbs = 0;
if (tDecodeI32(pDecoder, &numOfReadTbs) < 0) goto _err; if (tDecodeI32(pDecoder, &numOfReadTbs) < 0) goto _err;
if (tDecodeI32(pDecoder, &numOfWriteTbs) < 0) goto _err; if (tDecodeI32(pDecoder, &numOfWriteTbs) < 0) goto _err;
if (tDecodeI32(pDecoder, &numOfAlterTbs) < 0) goto _err;
if (tDecodeI32(pDecoder, &numOfReadViews) < 0) goto _err;
if (tDecodeI32(pDecoder, &numOfWriteViews) < 0) goto _err;
if (tDecodeI32(pDecoder, &numOfAlterViews) < 0) goto _err;
if (tDecodeI32(pDecoder, &numOfUseDbs) < 0) goto _err; if (tDecodeI32(pDecoder, &numOfUseDbs) < 0) goto _err;
for (int32_t i = 0; i < numOfReadTbs; ++i) { for (int32_t i = 0; i < numOfReadTbs; ++i) {
@ -1938,7 +2023,7 @@ int32_t tDeserializeSGetUserAuthRspImpl(SDecoder *pDecoder, SGetUserAuthRsp *pRs
value = taosMemoryCalloc(valuelen + 1, sizeof(char)); value = taosMemoryCalloc(valuelen + 1, sizeof(char));
if (tDecodeCStrTo(pDecoder, value) < 0) goto _err; if (tDecodeCStrTo(pDecoder, value) < 0) goto _err;
taosHashPut(pRsp->readTbs, key, strlen(key), value, valuelen + 1); taosHashPut(pRsp->readTbs, key, keyLen, value, valuelen + 1);
taosMemoryFreeClear(key); taosMemoryFreeClear(key);
taosMemoryFreeClear(value); taosMemoryFreeClear(value);
@ -1957,7 +2042,83 @@ int32_t tDeserializeSGetUserAuthRspImpl(SDecoder *pDecoder, SGetUserAuthRsp *pRs
value = taosMemoryCalloc(valuelen + 1, sizeof(char)); value = taosMemoryCalloc(valuelen + 1, sizeof(char));
if (tDecodeCStrTo(pDecoder, value) < 0) goto _err; if (tDecodeCStrTo(pDecoder, value) < 0) goto _err;
taosHashPut(pRsp->writeTbs, key, strlen(key), value, valuelen + 1); taosHashPut(pRsp->writeTbs, key, keyLen, value, valuelen + 1);
taosMemoryFreeClear(key);
taosMemoryFreeClear(value);
}
for (int32_t i = 0; i < numOfAlterTbs; ++i) {
int32_t keyLen = 0;
if (tDecodeI32(pDecoder, &keyLen) < 0) goto _err;
key = taosMemoryCalloc(keyLen + 1, sizeof(char));
if (tDecodeCStrTo(pDecoder, key) < 0) goto _err;
int32_t valuelen = 0;
if (tDecodeI32(pDecoder, &valuelen) < 0) goto _err;
value = taosMemoryCalloc(valuelen + 1, sizeof(char));
if (tDecodeCStrTo(pDecoder, value) < 0) goto _err;
taosHashPut(pRsp->alterTbs, key, keyLen, value, valuelen + 1);
taosMemoryFreeClear(key);
taosMemoryFreeClear(value);
}
for (int32_t i = 0; i < numOfReadViews; ++i) {
int32_t keyLen = 0;
if (tDecodeI32(pDecoder, &keyLen) < 0) goto _err;
key = taosMemoryCalloc(keyLen + 1, sizeof(char));
if (tDecodeCStrTo(pDecoder, key) < 0) goto _err;
int32_t valuelen = 0;
if (tDecodeI32(pDecoder, &valuelen) < 0) goto _err;
value = taosMemoryCalloc(valuelen + 1, sizeof(char));
if (tDecodeCStrTo(pDecoder, value) < 0) goto _err;
taosHashPut(pRsp->readViews, key, keyLen, value, valuelen + 1);
taosMemoryFreeClear(key);
taosMemoryFreeClear(value);
}
for (int32_t i = 0; i < numOfWriteViews; ++i) {
int32_t keyLen = 0;
if (tDecodeI32(pDecoder, &keyLen) < 0) goto _err;
key = taosMemoryCalloc(keyLen + 1, sizeof(char));
if (tDecodeCStrTo(pDecoder, key) < 0) goto _err;
int32_t valuelen = 0;
if (tDecodeI32(pDecoder, &valuelen) < 0) goto _err;
value = taosMemoryCalloc(valuelen + 1, sizeof(char));
if (tDecodeCStrTo(pDecoder, value) < 0) goto _err;
taosHashPut(pRsp->writeViews, key, keyLen, value, valuelen + 1);
taosMemoryFreeClear(key);
taosMemoryFreeClear(value);
}
for (int32_t i = 0; i < numOfAlterViews; ++i) {
int32_t keyLen = 0;
if (tDecodeI32(pDecoder, &keyLen) < 0) goto _err;
key = taosMemoryCalloc(keyLen + 1, sizeof(char));
if (tDecodeCStrTo(pDecoder, key) < 0) goto _err;
int32_t valuelen = 0;
if (tDecodeI32(pDecoder, &valuelen) < 0) goto _err;
value = taosMemoryCalloc(valuelen + 1, sizeof(char));
if (tDecodeCStrTo(pDecoder, value) < 0) goto _err;
taosHashPut(pRsp->alterViews, key, keyLen, value, valuelen + 1);
taosMemoryFreeClear(key); taosMemoryFreeClear(key);
taosMemoryFreeClear(value); taosMemoryFreeClear(value);
@ -1973,7 +2134,7 @@ int32_t tDeserializeSGetUserAuthRspImpl(SDecoder *pDecoder, SGetUserAuthRsp *pRs
int32_t ref = 0; int32_t ref = 0;
if (tDecodeI32(pDecoder, &ref) < 0) goto _err; if (tDecodeI32(pDecoder, &ref) < 0) goto _err;
taosHashPut(pRsp->useDbs, key, strlen(key), &ref, sizeof(ref)); taosHashPut(pRsp->useDbs, key, keyLen, &ref, sizeof(ref));
taosMemoryFreeClear(key); taosMemoryFreeClear(key);
} }
// since 3.0.7.0 // since 3.0.7.0
@ -1993,8 +2154,12 @@ _err:
taosHashCleanup(pRsp->createdDbs); taosHashCleanup(pRsp->createdDbs);
taosHashCleanup(pRsp->readDbs); taosHashCleanup(pRsp->readDbs);
taosHashCleanup(pRsp->writeDbs); taosHashCleanup(pRsp->writeDbs);
taosHashCleanup(pRsp->writeTbs);
taosHashCleanup(pRsp->readTbs); taosHashCleanup(pRsp->readTbs);
taosHashCleanup(pRsp->writeTbs);
taosHashCleanup(pRsp->alterTbs);
taosHashCleanup(pRsp->readViews);
taosHashCleanup(pRsp->writeViews);
taosHashCleanup(pRsp->alterViews);
taosHashCleanup(pRsp->useDbs); taosHashCleanup(pRsp->useDbs);
taosMemoryFreeClear(key); taosMemoryFreeClear(key);
@ -2020,8 +2185,12 @@ void tFreeSGetUserAuthRsp(SGetUserAuthRsp *pRsp) {
taosHashCleanup(pRsp->createdDbs); taosHashCleanup(pRsp->createdDbs);
taosHashCleanup(pRsp->readDbs); taosHashCleanup(pRsp->readDbs);
taosHashCleanup(pRsp->writeDbs); taosHashCleanup(pRsp->writeDbs);
taosHashCleanup(pRsp->writeTbs);
taosHashCleanup(pRsp->readTbs); taosHashCleanup(pRsp->readTbs);
taosHashCleanup(pRsp->writeTbs);
taosHashCleanup(pRsp->alterTbs);
taosHashCleanup(pRsp->readViews);
taosHashCleanup(pRsp->writeViews);
taosHashCleanup(pRsp->alterViews);
taosHashCleanup(pRsp->useDbs); taosHashCleanup(pRsp->useDbs);
} }
@ -8168,6 +8337,7 @@ int32_t tEncodeSBatchDeleteReq(SEncoder *pEncoder, const SBatchDeleteReq *pReq)
if (tEncodeSSingleDeleteReq(pEncoder, pOneReq) < 0) return -1; if (tEncodeSSingleDeleteReq(pEncoder, pOneReq) < 0) return -1;
} }
if (tEncodeI64(pEncoder, pReq->ctimeMs) < 0) return -1; if (tEncodeI64(pEncoder, pReq->ctimeMs) < 0) return -1;
if (tEncodeI8(pEncoder, pReq->level) < 0) return -1;
return 0; return 0;
} }
@ -8192,6 +8362,9 @@ int32_t tDecodeSBatchDeleteReq(SDecoder *pDecoder, SBatchDeleteReq *pReq) {
if (!tDecodeIsEnd(pDecoder)) { if (!tDecodeIsEnd(pDecoder)) {
if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1; if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1;
} }
if (!tDecodeIsEnd(pDecoder)) {
if (tDecodeI8(pDecoder, &pReq->level) < 0) return -1;
}
return 0; return 0;
} }
@ -8405,7 +8578,7 @@ void tDestroySubmitTbData(SSubmitTbData *pTbData, int32_t flag) {
} else { } else {
tDestroySVCreateTbReq(pTbData->pCreateTbReq, TSDB_MSG_FLG_DECODE); tDestroySVCreateTbReq(pTbData->pCreateTbReq, TSDB_MSG_FLG_DECODE);
} }
taosMemoryFree(pTbData->pCreateTbReq); taosMemoryFreeClear(pTbData->pCreateTbReq);
} }
if (pTbData->flags & SUBMIT_REQ_COLUMN_DATA_FORMAT) { if (pTbData->flags & SUBMIT_REQ_COLUMN_DATA_FORMAT) {
@ -8634,3 +8807,286 @@ void tDeleteMqSubTopicEp(SMqSubTopicEp *pSubTopicEp) {
pSubTopicEp->schema.nCols = 0; pSubTopicEp->schema.nCols = 0;
taosArrayDestroy(pSubTopicEp->vgs); taosArrayDestroy(pSubTopicEp->vgs);
} }
int32_t tSerializeSCMCreateViewReq(void *buf, int32_t bufLen, const SCMCreateViewReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->fullname) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->name) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->dbFName) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->querySql) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->sql) < 0) return -1;
if (tEncodeI8(&encoder, pReq->orReplace) < 0) return -1;
if (tEncodeI8(&encoder, pReq->precision) < 0) return -1;
if (tEncodeI32(&encoder, pReq->numOfCols) < 0) return -1;
for (int32_t i = 0; i < pReq->numOfCols; ++i) {
SSchema *pSchema = &pReq->pSchema[i];
if (tEncodeSSchema(&encoder, pSchema) < 0) return -1;
}
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
tEncoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSCMCreateViewReq(void *buf, int32_t bufLen, SCMCreateViewReq *pReq) {
SDecoder decoder = {0};
tDecoderInit(&decoder, buf, bufLen);
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->fullname) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->name) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->dbFName) < 0) return -1;
if (tDecodeCStrAlloc(&decoder, &pReq->querySql) < 0) return -1;
if (tDecodeCStrAlloc(&decoder, &pReq->sql) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->orReplace) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->precision) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->numOfCols) < 0) return -1;
if (pReq->numOfCols > 0) {
pReq->pSchema = taosMemoryCalloc(pReq->numOfCols, sizeof(SSchema));
if (pReq->pSchema == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
for (int32_t i = 0; i < pReq->numOfCols; ++i) {
SSchema* pSchema = pReq->pSchema + i;
if (tDecodeSSchema(&decoder, pSchema) < 0) return -1;
}
}
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
void tFreeSCMCreateViewReq(SCMCreateViewReq* pReq) {
if (NULL == pReq) {
return;
}
taosMemoryFreeClear(pReq->querySql);
taosMemoryFreeClear(pReq->sql);
taosMemoryFreeClear(pReq->pSchema);
}
int32_t tSerializeSCMDropViewReq(void* buf, int32_t bufLen, const SCMDropViewReq* pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->fullname) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->name) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->dbFName) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->sql) < 0) return -1;
if (tEncodeI8(&encoder, pReq->igNotExists) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
tEncoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSCMDropViewReq(void* buf, int32_t bufLen, SCMDropViewReq* pReq) {
SDecoder decoder = {0};
tDecoderInit(&decoder, buf, bufLen);
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->fullname) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->name) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->dbFName) < 0) return -1;
if (tDecodeCStrAlloc(&decoder, &pReq->sql) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->igNotExists) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
void tFreeSCMDropViewReq(SCMDropViewReq* pReq) {
if (NULL == pReq) {
return;
}
taosMemoryFree(pReq->sql);
}
int32_t tSerializeSViewMetaReq(void* buf, int32_t bufLen, const SViewMetaReq* pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->fullname) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
tEncoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSViewMetaReq(void* buf, int32_t bufLen, SViewMetaReq* pReq) {
SDecoder decoder = {0};
tDecoderInit(&decoder, buf, bufLen);
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->fullname) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
static int32_t tEncodeSViewMetaRsp(SEncoder *pEncoder, const SViewMetaRsp *pRsp) {
if (tEncodeCStr(pEncoder, pRsp->name) < 0) return -1;
if (tEncodeCStr(pEncoder, pRsp->dbFName) < 0) return -1;
if (tEncodeCStr(pEncoder, pRsp->user) < 0) return -1;
if (tEncodeU64(pEncoder, pRsp->dbId) < 0) return -1;
if (tEncodeU64(pEncoder, pRsp->viewId) < 0) return -1;
if (tEncodeCStr(pEncoder, pRsp->querySql) < 0) return -1;
if (tEncodeI8(pEncoder, pRsp->precision) < 0) return -1;
if (tEncodeI8(pEncoder, pRsp->type) < 0) return -1;
if (tEncodeI32(pEncoder, pRsp->version) < 0) return -1;
if (tEncodeI32(pEncoder, pRsp->numOfCols) < 0) return -1;
for (int32_t i = 0; i < pRsp->numOfCols; ++i) {
SSchema *pSchema = &pRsp->pSchema[i];
if (tEncodeSSchema(pEncoder, pSchema) < 0) return -1;
}
return 0;
}
int32_t tSerializeSViewMetaRsp(void* buf, int32_t bufLen, const SViewMetaRsp* pRsp) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeSViewMetaRsp(&encoder, pRsp) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
tEncoderClear(&encoder);
return tlen;
}
static int32_t tDecodeSViewMetaRsp(SDecoder *pDecoder, SViewMetaRsp *pRsp) {
if (tDecodeCStrTo(pDecoder, pRsp->name) < 0) return -1;
if (tDecodeCStrTo(pDecoder, pRsp->dbFName) < 0) return -1;
if (tDecodeCStrAlloc(pDecoder, &pRsp->user) < 0) return -1;
if (tDecodeU64(pDecoder, &pRsp->dbId) < 0) return -1;
if (tDecodeU64(pDecoder, &pRsp->viewId) < 0) return -1;
if (tDecodeCStrAlloc(pDecoder, &pRsp->querySql) < 0) return -1;
if (tDecodeI8(pDecoder, &pRsp->precision) < 0) return -1;
if (tDecodeI8(pDecoder, &pRsp->type) < 0) return -1;
if (tDecodeI32(pDecoder, &pRsp->version) < 0) return -1;
if (tDecodeI32(pDecoder, &pRsp->numOfCols) < 0) return -1;
if (pRsp->numOfCols > 0) {
pRsp->pSchema = taosMemoryCalloc(pRsp->numOfCols, sizeof(SSchema));
if (pRsp->pSchema == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
for (int32_t i = 0; i < pRsp->numOfCols; ++i) {
SSchema* pSchema = pRsp->pSchema + i;
if (tDecodeSSchema(pDecoder, pSchema) < 0) return -1;
}
}
return 0;
}
int32_t tDeserializeSViewMetaRsp(void* buf, int32_t bufLen, SViewMetaRsp* pRsp) {
SDecoder decoder = {0};
tDecoderInit(&decoder, buf, bufLen);
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeSViewMetaRsp(&decoder, pRsp) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
void tFreeSViewMetaRsp(SViewMetaRsp* pRsp) {
if (NULL == pRsp) {
return;
}
taosMemoryFree(pRsp->user);
taosMemoryFree(pRsp->querySql);
taosMemoryFree(pRsp->pSchema);
}
int32_t tSerializeSViewHbRsp(void *buf, int32_t bufLen, SViewHbRsp *pRsp) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
int32_t numOfMeta = taosArrayGetSize(pRsp->pViewRsp);
if (tEncodeI32(&encoder, numOfMeta) < 0) return -1;
for (int32_t i = 0; i < numOfMeta; ++i) {
SViewMetaRsp *pMetaRsp = taosArrayGetP(pRsp->pViewRsp, i);
if (tEncodeSViewMetaRsp(&encoder, pMetaRsp) < 0) return -1;
}
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
tEncoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSViewHbRsp(void *buf, int32_t bufLen, SViewHbRsp *pRsp) {
SDecoder decoder = {0};
tDecoderInit(&decoder, buf, bufLen);
if (tStartDecode(&decoder) < 0) return -1;
int32_t numOfMeta = 0;
if (tDecodeI32(&decoder, &numOfMeta) < 0) return -1;
pRsp->pViewRsp = taosArrayInit(numOfMeta, POINTER_BYTES);
if (pRsp->pViewRsp == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
for (int32_t i = 0; i < numOfMeta; ++i) {
SViewMetaRsp* metaRsp = taosMemoryCalloc(1, sizeof(SViewMetaRsp));
if (NULL == metaRsp) return -1;
if (tDecodeSViewMetaRsp(&decoder, metaRsp) < 0) return -1;
taosArrayPush(pRsp->pViewRsp, &metaRsp);
}
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
void tFreeSViewHbRsp(SViewHbRsp *pRsp) {
int32_t numOfMeta = taosArrayGetSize(pRsp->pViewRsp);
for (int32_t i = 0; i < numOfMeta; ++i) {
SViewMetaRsp *pMetaRsp = taosArrayGetP(pRsp->pViewRsp, i);
tFreeSViewMetaRsp(pMetaRsp);
taosMemoryFree(pMetaRsp);
}
taosArrayDestroy(pRsp->pViewRsp);
}

View File

@ -296,7 +296,10 @@ static int compareKv(const void* p1, const void* p2) {
void buildChildTableName(RandTableName* rName) { void buildChildTableName(RandTableName* rName) {
SStringBuilder sb = {0}; SStringBuilder sb = {0};
taosStringBuilderAppendStringLen(&sb, rName->stbFullName, rName->stbFullNameLen); taosStringBuilderAppendStringLen(&sb, rName->stbFullName, rName->stbFullNameLen);
if (sb.buf == NULL) return; if (sb.buf == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return;
}
taosArraySort(rName->tags, compareKv); taosArraySort(rName->tags, compareKv);
for (int j = 0; j < taosArrayGetSize(rName->tags); ++j) { for (int j = 0; j < taosArrayGetSize(rName->tags); ++j) {
taosStringBuilderAppendChar(&sb, ','); taosStringBuilderAppendChar(&sb, ',');

Some files were not shown because too many files have changed in this diff Show More