Merge branch '3.0' into refact/fillhistory
This commit is contained in:
commit
ceb78fdad2
|
@ -16,7 +16,6 @@ debug/
|
|||
release/
|
||||
target/
|
||||
debs/
|
||||
deps/
|
||||
rpms/
|
||||
mac/
|
||||
*.pyc
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
cmake_minimum_required(VERSION 3.0)
|
||||
|
||||
set(CMAKE_VERBOSE_MAKEFILE OFF)
|
||||
set(CMAKE_VERBOSE_MAKEFILE ON)
|
||||
set(TD_BUILD_TAOSA_INTERNAL FALSE)
|
||||
|
||||
#set output directory
|
||||
|
|
|
@ -172,5 +172,15 @@ ENDIF()
|
|||
|
||||
MESSAGE(STATUS "Platform arch:" ${PLATFORM_ARCH_STR})
|
||||
|
||||
set(TD_DEPS_DIR "x86")
|
||||
if (TD_LINUX)
|
||||
IF (TD_ARM_64 OR TD_ARM_32)
|
||||
set(TD_DEPS_DIR "arm")
|
||||
ELSE()
|
||||
set(TD_DEPS_DIR "x86")
|
||||
ENDIF()
|
||||
endif()
|
||||
MESSAGE(STATUS "DEPS_DIR: " ${TD_DEPS_DIR})
|
||||
|
||||
MESSAGE("C Compiler: ${CMAKE_C_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_C_COMPILER_VERSION})")
|
||||
MESSAGE("CXX Compiler: ${CMAKE_CXX_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_CXX_COMPILER_VERSION})")
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.0.5.0")
|
||||
SET(TD_VER_NUMBER "3.0.6.0.alpha")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
|
||||
# rocksdb
|
||||
IF (NOT ${TD_LINUX})
|
||||
ExternalProject_Add(rocksdb
|
||||
URL https://github.com/facebook/rocksdb/archive/refs/tags/v8.1.1.tar.gz
|
||||
URL_HASH MD5=3b4c97ee45df9c8a5517308d31ab008b
|
||||
|
@ -11,4 +12,4 @@ ExternalProject_Add(rocksdb
|
|||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
||||
|
||||
ENDIF(NOT ${TD_LINUX})
|
||||
|
|
|
@ -78,10 +78,18 @@ if(${BUILD_WITH_LEVELDB})
|
|||
endif(${BUILD_WITH_LEVELDB})
|
||||
|
||||
# rocksdb
|
||||
IF (NOT ${TD_LINUX})
|
||||
if(${BUILD_WITH_ROCKSDB})
|
||||
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
add_definitions(-DUSE_ROCKSDB)
|
||||
endif(${BUILD_WITH_ROCKSDB})
|
||||
ELSE()
|
||||
if(${BUILD_WITH_ROCKSDB})
|
||||
#cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
add_definitions(-DUSE_ROCKSDB)
|
||||
endif(${BUILD_WITH_ROCKSDB})
|
||||
|
||||
ENDIF(NOT ${TD_LINUX})
|
||||
|
||||
# canonical-raft
|
||||
if(${BUILD_WITH_CRAFT})
|
||||
|
@ -175,8 +183,8 @@ if(${BUILD_TEST})
|
|||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cpp-stub/src_darwin>
|
||||
)
|
||||
endif(${TD_DARWIN})
|
||||
|
||||
|
||||
|
||||
|
||||
endif(${BUILD_TEST})
|
||||
|
||||
# cJson
|
||||
|
@ -227,6 +235,8 @@ endif(${BUILD_WITH_LEVELDB})
|
|||
|
||||
# rocksdb
|
||||
# To support rocksdb build on ubuntu: sudo apt-get install libgflags-dev
|
||||
IF (NOT ${TD_LINUX})
|
||||
|
||||
if(${BUILD_WITH_ROCKSDB})
|
||||
if(${TD_LINUX})
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result")
|
||||
|
@ -245,7 +255,7 @@ if(${BUILD_WITH_ROCKSDB})
|
|||
endif(${TD_DARWIN_ARM64})
|
||||
|
||||
if (${TD_WINDOWS})
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
|
||||
endif(${TD_WINDOWS})
|
||||
|
||||
|
||||
|
@ -263,8 +273,8 @@ if(${BUILD_WITH_ROCKSDB})
|
|||
option(WITH_MD_LIBRARY "build with MD" OFF)
|
||||
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
|
||||
endif(${TD_WINDOWS})
|
||||
|
||||
|
||||
|
||||
|
||||
option(WITH_FALLOCATE "" OFF)
|
||||
option(WITH_JEMALLOC "" OFF)
|
||||
option(WITH_GFLAGS "" OFF)
|
||||
|
@ -276,7 +286,7 @@ if(${BUILD_WITH_ROCKSDB})
|
|||
option(WITH_BENCHMARK_TOOLS "" OFF)
|
||||
option(WITH_TOOLS "" OFF)
|
||||
option(WITH_LIBURING "" OFF)
|
||||
IF (TD_LINUX)
|
||||
IF (TD_LINUX)
|
||||
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
|
||||
ELSE()
|
||||
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
|
||||
|
@ -288,16 +298,17 @@ if(${BUILD_WITH_ROCKSDB})
|
|||
)
|
||||
endif(${BUILD_WITH_ROCKSDB})
|
||||
|
||||
ENDIF(NOT ${TD_LINUX})
|
||||
# lucene
|
||||
# To support build on ubuntu: sudo apt-get install libboost-all-dev
|
||||
if(${BUILD_WITH_LUCENE})
|
||||
option(ENABLE_TEST "Enable the tests" OFF)
|
||||
add_subdirectory(lucene EXCLUDE_FROM_ALL)
|
||||
target_include_directories(
|
||||
lucene++
|
||||
lucene++
|
||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/lucene/include>
|
||||
)
|
||||
|
||||
)
|
||||
|
||||
endif(${BUILD_WITH_LUCENE})
|
||||
|
||||
# NuRaft
|
||||
|
@ -357,7 +368,7 @@ if(${BUILD_MSVCREGEX})
|
|||
target_include_directories(msvcregex
|
||||
PRIVATE "msvcregex"
|
||||
)
|
||||
target_link_libraries(msvcregex
|
||||
target_link_libraries(msvcregex
|
||||
INTERFACE Shell32
|
||||
)
|
||||
SET_TARGET_PROPERTIES(msvcregex PROPERTIES OUTPUT_NAME msvcregex)
|
||||
|
@ -417,8 +428,8 @@ if(${BUILD_WITH_BDB})
|
|||
IMPORTED_LOCATION "${CMAKE_CURRENT_SOURCE_DIR}/bdb/libdb.a"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/bdb"
|
||||
)
|
||||
target_link_libraries(bdb
|
||||
INTERFACE pthread
|
||||
target_link_libraries(bdb
|
||||
INTERFACE pthread
|
||||
)
|
||||
endif(${BUILD_WITH_BDB})
|
||||
|
||||
|
@ -430,12 +441,12 @@ if(${BUILD_WITH_SQLITE})
|
|||
IMPORTED_LOCATION "${CMAKE_CURRENT_SOURCE_DIR}/sqlite/.libs/libsqlite3.a"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/sqlite"
|
||||
)
|
||||
target_link_libraries(sqlite
|
||||
INTERFACE m
|
||||
INTERFACE pthread
|
||||
target_link_libraries(sqlite
|
||||
INTERFACE m
|
||||
INTERFACE pthread
|
||||
)
|
||||
if(NOT TD_WINDOWS)
|
||||
target_link_libraries(sqlite
|
||||
target_link_libraries(sqlite
|
||||
INTERFACE dl
|
||||
)
|
||||
endif(NOT TD_WINDOWS)
|
||||
|
@ -443,22 +454,22 @@ endif(${BUILD_WITH_SQLITE})
|
|||
|
||||
# addr2line
|
||||
if(${BUILD_ADDR2LINE})
|
||||
if(NOT ${TD_WINDOWS})
|
||||
check_include_file( "sys/types.h" HAVE_SYS_TYPES_H)
|
||||
check_include_file( "sys/stat.h" HAVE_SYS_STAT_H )
|
||||
check_include_file( "inttypes.h" HAVE_INTTYPES_H )
|
||||
check_include_file( "stddef.h" HAVE_STDDEF_H )
|
||||
check_include_file( "stdlib.h" HAVE_STDLIB_H )
|
||||
check_include_file( "string.h" HAVE_STRING_H )
|
||||
check_include_file( "memory.h" HAVE_MEMORY_H )
|
||||
check_include_file( "strings.h" HAVE_STRINGS_H )
|
||||
if(NOT ${TD_WINDOWS})
|
||||
check_include_file( "sys/types.h" HAVE_SYS_TYPES_H)
|
||||
check_include_file( "sys/stat.h" HAVE_SYS_STAT_H )
|
||||
check_include_file( "inttypes.h" HAVE_INTTYPES_H )
|
||||
check_include_file( "stddef.h" HAVE_STDDEF_H )
|
||||
check_include_file( "stdlib.h" HAVE_STDLIB_H )
|
||||
check_include_file( "string.h" HAVE_STRING_H )
|
||||
check_include_file( "memory.h" HAVE_MEMORY_H )
|
||||
check_include_file( "strings.h" HAVE_STRINGS_H )
|
||||
check_include_file( "stdint.h" HAVE_STDINT_H )
|
||||
check_include_file( "unistd.h" HAVE_UNISTD_H )
|
||||
check_include_file( "sgidefs.h" HAVE_SGIDEFS_H )
|
||||
check_include_file( "stdafx.h" HAVE_STDAFX_H )
|
||||
check_include_file( "elf.h" HAVE_ELF_H )
|
||||
check_include_file( "libelf.h" HAVE_LIBELF_H )
|
||||
check_include_file( "libelf/libelf.h" HAVE_LIBELF_LIBELF_H)
|
||||
check_include_file( "elf.h" HAVE_ELF_H )
|
||||
check_include_file( "libelf.h" HAVE_LIBELF_H )
|
||||
check_include_file( "libelf/libelf.h" HAVE_LIBELF_LIBELF_H)
|
||||
check_include_file( "alloca.h" HAVE_ALLOCA_H )
|
||||
check_include_file( "elfaccess.h" HAVE_ELFACCESS_H)
|
||||
check_include_file( "sys/elf_386.h" HAVE_SYS_ELF_386_H )
|
||||
|
@ -466,7 +477,7 @@ if(${BUILD_ADDR2LINE})
|
|||
check_include_file( "sys/elf_sparc.h" HAVE_SYS_ELF_SPARC_H)
|
||||
check_include_file( "sys/ia64/elf.h" HAVE_SYS_IA64_ELF_H )
|
||||
set(VERSION 0.3.1)
|
||||
set(PACKAGE_VERSION "\"${VERSION}\"")
|
||||
set(PACKAGE_VERSION "\"${VERSION}\"")
|
||||
configure_file(libdwarf/cmake/config.h.cmake config.h)
|
||||
file(GLOB_RECURSE LIBDWARF_SOURCES "libdwarf/src/lib/libdwarf/*.c")
|
||||
add_library(libdwarf STATIC ${LIBDWARF_SOURCES})
|
||||
|
@ -497,6 +508,7 @@ if(${BUILD_GEOS})
|
|||
endif(${TD_LINUX})
|
||||
option(BUILD_SHARED_LIBS "Build GEOS with shared libraries" OFF)
|
||||
add_subdirectory(geos EXCLUDE_FROM_ALL)
|
||||
unset(CMAKE_CXX_STANDARD CACHE) # undo libgeos's setting of global CMAKE_CXX_STANDARD
|
||||
target_include_directories(
|
||||
geos_c
|
||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/geos/include>
|
||||
|
|
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
|
@ -667,6 +667,137 @@ Insert with req_id argument
|
|||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Parameter Binding
|
||||
|
||||
The Python connector provides a parameter binding api for inserting data. Similar to most databases, TDengine currently only supports the question mark `?` to indicate the parameters to be bound.
|
||||
|
||||
<Tabs>
|
||||
<TabItem value="native" label="native connection">
|
||||
|
||||
#### Create Stmt
|
||||
|
||||
Call the `statement` method in `Connection` to create the `stmt` for parameter binding.
|
||||
|
||||
```
|
||||
import taos
|
||||
|
||||
conn = taos.connect()
|
||||
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
|
||||
```
|
||||
|
||||
#### parameter binding
|
||||
|
||||
Call the `new_multi_binds` function to create the parameter list for parameter bindings.
|
||||
|
||||
```
|
||||
params = new_multi_binds(16)
|
||||
params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
|
||||
params[1].bool((True, None, False))
|
||||
params[2].tinyint([-128, -128, None]) # -128 is tinyint null
|
||||
params[3].tinyint([0, 127, None])
|
||||
params[4].smallint([3, None, 2])
|
||||
params[5].int([3, 4, None])
|
||||
params[6].bigint([3, 4, None])
|
||||
params[7].tinyint_unsigned([3, 4, None])
|
||||
params[8].smallint_unsigned([3, 4, None])
|
||||
params[9].int_unsigned([3, 4, None])
|
||||
params[10].bigint_unsigned([3, 4, None])
|
||||
params[11].float([3, None, 1])
|
||||
params[12].double([3, None, 1.2])
|
||||
params[13].binary(["abc", "dddafadfadfadfadfa", None])
|
||||
params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
|
||||
params[15].timestamp([None, None, 1626861392591])
|
||||
```
|
||||
|
||||
Call the `bind_param` (for a single row) method or the `bind_param_batch` (for multiple rows) method to set the values.
|
||||
|
||||
```
|
||||
stmt.bind_param_batch(params)
|
||||
```
|
||||
|
||||
#### execute sql
|
||||
|
||||
Call `execute` method to execute sql.
|
||||
|
||||
```
|
||||
stmt.execute()
|
||||
```
|
||||
|
||||
#### Close Stmt
|
||||
|
||||
```
|
||||
stmt.close()
|
||||
```
|
||||
|
||||
#### Example
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/stmt_example.py}}
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="websocket" label="WebSocket connection">
|
||||
|
||||
#### Create Stmt
|
||||
|
||||
Call the `statement` method in `Connection` to create the `stmt` for parameter binding.
|
||||
|
||||
```
|
||||
import taosws
|
||||
|
||||
conn = taosws.connect('taosws://localhost:6041/test')
|
||||
stmt = conn.statement()
|
||||
```
|
||||
|
||||
#### Prepare sql
|
||||
|
||||
Call `prepare` method in stmt to prepare sql.
|
||||
|
||||
```
|
||||
stmt.prepare("insert into t1 values (?, ?, ?, ?)")
|
||||
```
|
||||
|
||||
#### parameter binding
|
||||
|
||||
Call the `bind_param` method to bind parameters.
|
||||
|
||||
```
|
||||
stmt.bind_param([
|
||||
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
|
||||
taosws.ints_to_column([1, 2, 3, 4]),
|
||||
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
|
||||
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
|
||||
])
|
||||
```
|
||||
|
||||
Call the `add_batch` method to add parameters to the batch.
|
||||
|
||||
```
|
||||
stmt.add_batch()
|
||||
```
|
||||
|
||||
#### execute sql
|
||||
|
||||
Call `execute` method to execute sql.
|
||||
|
||||
```
|
||||
stmt.execute()
|
||||
```
|
||||
|
||||
#### Close Stmt
|
||||
|
||||
```
|
||||
stmt.close()
|
||||
```
|
||||
|
||||
#### Example
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/stmt_websocket_example.py}}
|
||||
```
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Other sample programs
|
||||
|
||||
| Example program links | Example program content |
|
||||
|
|
|
@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.0.5.1
|
||||
|
||||
<Release type="tdengine" version="3.0.5.1" />
|
||||
|
||||
## 3.0.5.0
|
||||
|
||||
<Release type="tdengine" version="3.0.5.0" />
|
||||
|
|
|
@ -10,6 +10,10 @@ For other historical version installers, please visit [here](https://www.taosdat
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 2.5.2
|
||||
|
||||
<Release type="tools" version="2.5.2" />
|
||||
|
||||
## 2.5.1
|
||||
|
||||
<Release type="tools" version="2.5.1" />
|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
#!
|
||||
|
||||
import taosws
|
||||
|
||||
import taos
|
||||
|
||||
db_name = 'test_ws_stmt'
|
||||
|
||||
|
||||
def before():
|
||||
taos_conn = taos.connect()
|
||||
taos_conn.execute("drop database if exists %s" % db_name)
|
||||
taos_conn.execute("create database %s" % db_name)
|
||||
taos_conn.select_db(db_name)
|
||||
taos_conn.execute("create table t1 (ts timestamp, a int, b float, c varchar(10))")
|
||||
taos_conn.execute(
|
||||
"create table stb1 (ts timestamp, a int, b float, c varchar(10)) tags (t1 int, t2 binary(10))")
|
||||
taos_conn.close()
|
||||
|
||||
|
||||
def stmt_insert():
|
||||
before()
|
||||
|
||||
conn = taosws.connect('taosws://root:taosdata@localhost:6041/%s' % db_name)
|
||||
|
||||
while True:
|
||||
try:
|
||||
stmt = conn.statement()
|
||||
stmt.prepare("insert into t1 values (?, ?, ?, ?)")
|
||||
|
||||
stmt.bind_param([
|
||||
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
|
||||
taosws.ints_to_column([1, 2, 3, 4]),
|
||||
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
|
||||
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
|
||||
])
|
||||
|
||||
stmt.add_batch()
|
||||
rows = stmt.execute()
|
||||
print(rows)
|
||||
stmt.close()
|
||||
except Exception as e:
|
||||
if 'Retry needed' in e.args[0]: # deal with [0x0125] Retry needed
|
||||
continue
|
||||
else:
|
||||
raise e
|
||||
|
||||
break
|
||||
|
||||
|
||||
def stmt_insert_into_stable():
|
||||
before()
|
||||
|
||||
conn = taosws.connect("taosws://root:taosdata@localhost:6041/%s" % db_name)
|
||||
|
||||
while True:
|
||||
try:
|
||||
stmt = conn.statement()
|
||||
stmt.prepare("insert into ? using stb1 tags (?, ?) values (?, ?, ?, ?)")
|
||||
stmt.set_tbname('stb1_1')
|
||||
stmt.set_tags([
|
||||
taosws.int_to_tag(1),
|
||||
taosws.varchar_to_tag('aaa'),
|
||||
])
|
||||
stmt.bind_param([
|
||||
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
|
||||
taosws.ints_to_column([1, 2, 3, 4]),
|
||||
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
|
||||
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
|
||||
])
|
||||
|
||||
stmt.add_batch()
|
||||
rows = stmt.execute()
|
||||
print(rows)
|
||||
stmt.close()
|
||||
except Exception as e:
|
||||
if 'Retry needed' in e.args[0]: # deal with [0x0125] Retry needed
|
||||
continue
|
||||
else:
|
||||
raise e
|
||||
|
||||
break
|
|
@ -0,0 +1,78 @@
|
|||
#!
|
||||
import time
|
||||
|
||||
import taosws
|
||||
|
||||
import taos
|
||||
|
||||
|
||||
def before_test(db_name):
|
||||
taos_conn = taos.connect()
|
||||
taos_conn.execute("drop database if exists %s" % db_name)
|
||||
taos_conn.execute("create database %s" % db_name)
|
||||
taos_conn.select_db(db_name)
|
||||
taos_conn.execute("create table t1 (ts timestamp, a int, b float, c varchar(10))")
|
||||
taos_conn.execute(
|
||||
"create table stb1 (ts timestamp, a int, b float, c varchar(10)) tags (t1 int, t2 binary(10))")
|
||||
taos_conn.close()
|
||||
|
||||
|
||||
def after_test(db_name):
|
||||
taos_conn = taos.connect()
|
||||
taos_conn.execute("drop database if exists %s" % db_name)
|
||||
taos_conn.close()
|
||||
|
||||
|
||||
def stmt_insert():
|
||||
db_name = 'test_ws_stmt_{}'.format(int(time.time()))
|
||||
before_test(db_name)
|
||||
|
||||
conn = taosws.connect('taosws://root:taosdata@localhost:6041/%s' % db_name)
|
||||
|
||||
stmt = conn.statement()
|
||||
stmt.prepare("insert into t1 values (?, ?, ?, ?)")
|
||||
|
||||
stmt.bind_param([
|
||||
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
|
||||
taosws.ints_to_column([1, 2, 3, 4]),
|
||||
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
|
||||
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
|
||||
])
|
||||
|
||||
stmt.add_batch()
|
||||
rows = stmt.execute()
|
||||
assert rows == 4
|
||||
stmt.close()
|
||||
after_test(db_name)
|
||||
|
||||
|
||||
def stmt_insert_into_stable():
|
||||
db_name = 'test_ws_stmt_{}'.format(int(time.time()))
|
||||
before_test(db_name)
|
||||
|
||||
conn = taosws.connect("taosws://root:taosdata@localhost:6041/%s" % db_name)
|
||||
|
||||
stmt = conn.statement()
|
||||
stmt.prepare("insert into ? using stb1 tags (?, ?) values (?, ?, ?, ?)")
|
||||
stmt.set_tbname('stb1_1')
|
||||
stmt.set_tags([
|
||||
taosws.int_to_tag(1),
|
||||
taosws.varchar_to_tag('aaa'),
|
||||
])
|
||||
stmt.bind_param([
|
||||
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
|
||||
taosws.ints_to_column([1, 2, 3, 4]),
|
||||
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
|
||||
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
|
||||
])
|
||||
|
||||
stmt.add_batch()
|
||||
rows = stmt.execute()
|
||||
assert rows == 4
|
||||
stmt.close()
|
||||
after_test(db_name)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
stmt_insert()
|
||||
stmt_insert_into_stable()
|
|
@ -672,6 +672,141 @@ consumer.close()
|
|||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### 通过参数绑定写入数据
|
||||
|
||||
TDengine 的 Python 连接器支持参数绑定风格的 Prepare API 方式写入数据,和大多数数据库类似,目前仅支持用 `?` 来代表待绑定的参数。
|
||||
|
||||
<Tabs>
|
||||
<TabItem value="native" label="原生连接">
|
||||
|
||||
#### 创建 stmt
|
||||
|
||||
Python 连接器的 `Connection` 提供了 `statement` 方法用于创建参数绑定对象 stmt,该方法接收 sql 字符串作为参数,sql 字符串目前仅支持用 `?` 来代表绑定的参数。
|
||||
|
||||
```
|
||||
import taos
|
||||
|
||||
conn = taos.connect()
|
||||
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
|
||||
```
|
||||
|
||||
#### 参数绑定
|
||||
|
||||
调用 `new_multi_binds` 函数创建 params 列表,用于参数绑定。
|
||||
|
||||
```
|
||||
params = new_multi_binds(16)
|
||||
params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
|
||||
params[1].bool((True, None, False))
|
||||
params[2].tinyint([-128, -128, None]) # -128 is tinyint null
|
||||
params[3].tinyint([0, 127, None])
|
||||
params[4].smallint([3, None, 2])
|
||||
params[5].int([3, 4, None])
|
||||
params[6].bigint([3, 4, None])
|
||||
params[7].tinyint_unsigned([3, 4, None])
|
||||
params[8].smallint_unsigned([3, 4, None])
|
||||
params[9].int_unsigned([3, 4, None])
|
||||
params[10].bigint_unsigned([3, 4, None])
|
||||
params[11].float([3, None, 1])
|
||||
params[12].double([3, None, 1.2])
|
||||
params[13].binary(["abc", "dddafadfadfadfadfa", None])
|
||||
params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
|
||||
params[15].timestamp([None, None, 1626861392591])
|
||||
```
|
||||
|
||||
调用 stmt 的 `bind_param` 以单行的方式设置 values 或 `bind_param_batch` 以多行的方式设置 values 方法绑定参数。
|
||||
|
||||
```
|
||||
stmt.bind_param_batch(params)
|
||||
```
|
||||
|
||||
#### 执行 sql
|
||||
|
||||
调用 stmt 的 `execute` 方法执行 sql
|
||||
|
||||
```
|
||||
stmt.execute()
|
||||
```
|
||||
|
||||
#### 关闭 stmt
|
||||
|
||||
最后需要关闭 stmt。
|
||||
|
||||
```
|
||||
stmt.close()
|
||||
```
|
||||
|
||||
#### 示例代码
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/stmt_example.py}}
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="websocket" label="WebSocket 连接">
|
||||
|
||||
#### 创建 stmt
|
||||
|
||||
Python WebSocket 连接器的 `Connection` 提供了 `statement` 方法用于创建参数绑定对象 stmt,该方法接收 sql 字符串作为参数,sql 字符串目前仅支持用 `?` 来代表绑定的参数。
|
||||
|
||||
```
|
||||
import taosws
|
||||
|
||||
conn = taosws.connect('taosws://localhost:6041/test')
|
||||
stmt = conn.statement()
|
||||
```
|
||||
|
||||
#### 解析 sql
|
||||
|
||||
调用 stmt 的 `prepare` 方法来解析 insert 语句。
|
||||
|
||||
```
|
||||
stmt.prepare("insert into t1 values (?, ?, ?, ?)")
|
||||
```
|
||||
|
||||
#### 参数绑定
|
||||
|
||||
调用 stmt 的 `bind_param` 方法绑定参数。
|
||||
|
||||
```
|
||||
stmt.bind_param([
|
||||
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
|
||||
taosws.ints_to_column([1, 2, 3, 4]),
|
||||
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
|
||||
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
|
||||
])
|
||||
```
|
||||
|
||||
调用 stmt 的 `add_batch` 方法,将参数加入批处理。
|
||||
|
||||
```
|
||||
stmt.add_batch()
|
||||
```
|
||||
|
||||
#### 执行 sql
|
||||
|
||||
调用 stmt 的 `execute` 方法执行 sql
|
||||
|
||||
```
|
||||
stmt.execute()
|
||||
```
|
||||
|
||||
#### 关闭 stmt
|
||||
|
||||
最后需要关闭 stmt。
|
||||
|
||||
```
|
||||
stmt.close()
|
||||
```
|
||||
|
||||
#### 示例代码
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/stmt_websocket_example.py}}
|
||||
```
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### 其它示例程序
|
||||
|
||||
| 示例程序链接 | 示例程序内容 |
|
||||
|
|
|
@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.0.5.1
|
||||
|
||||
<Release type="tdengine" version="3.0.5.1" />
|
||||
|
||||
## 3.0.5.0
|
||||
|
||||
<Release type="tdengine" version="3.0.5.0" />
|
||||
|
|
|
@ -10,6 +10,10 @@ taosTools 各版本安装包下载链接如下:
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 2.5.2
|
||||
|
||||
<Release type="tools" version="2.5.2" />
|
||||
|
||||
## 2.5.1
|
||||
|
||||
<Release type="tools" version="2.5.1" />
|
||||
|
|
|
@ -203,6 +203,7 @@ typedef struct SDataBlockInfo {
|
|||
SBlockID id;
|
||||
int16_t hasVarCol;
|
||||
int16_t dataLoad; // denote if the data is loaded or not
|
||||
uint8_t scanFlag;
|
||||
|
||||
// TODO: optimize and remove following
|
||||
int64_t version; // used for stream, and need serialization
|
||||
|
|
|
@ -53,6 +53,8 @@ typedef struct SLogicNode {
|
|||
EDataOrderLevel requireDataOrder; // requirements for input data
|
||||
EDataOrderLevel resultDataOrder; // properties of the output data
|
||||
EGroupAction groupAction;
|
||||
EOrder inputTsOrder;
|
||||
EOrder outputTsOrder;
|
||||
} SLogicNode;
|
||||
|
||||
typedef enum EScanType {
|
||||
|
@ -111,7 +113,6 @@ typedef struct SJoinLogicNode {
|
|||
SNode* pMergeCondition;
|
||||
SNode* pOnConditions;
|
||||
bool isSingleTableJoin;
|
||||
EOrder inputTsOrder;
|
||||
SNode* pColEqualOnConditions;
|
||||
} SJoinLogicNode;
|
||||
|
||||
|
@ -229,8 +230,6 @@ typedef struct SWindowLogicNode {
|
|||
int8_t igExpired;
|
||||
int8_t igCheckUpdate;
|
||||
EWindowAlgorithm windowAlgo;
|
||||
EOrder inputTsOrder;
|
||||
EOrder outputTsOrder;
|
||||
} SWindowLogicNode;
|
||||
|
||||
typedef struct SFillLogicNode {
|
||||
|
@ -241,7 +240,6 @@ typedef struct SFillLogicNode {
|
|||
SNode* pWStartTs;
|
||||
SNode* pValues; // SNodeListNode
|
||||
STimeWindow timeRange;
|
||||
EOrder inputTsOrder;
|
||||
} SFillLogicNode;
|
||||
|
||||
typedef struct SSortLogicNode {
|
||||
|
@ -310,6 +308,8 @@ typedef struct SDataBlockDescNode {
|
|||
|
||||
typedef struct SPhysiNode {
|
||||
ENodeType type;
|
||||
EOrder inputTsOrder;
|
||||
EOrder outputTsOrder;
|
||||
SDataBlockDescNode* pOutputDataBlockDesc;
|
||||
SNode* pConditions;
|
||||
SNodeList* pChildren;
|
||||
|
@ -406,7 +406,6 @@ typedef struct SSortMergeJoinPhysiNode {
|
|||
SNode* pMergeCondition;
|
||||
SNode* pOnConditions;
|
||||
SNodeList* pTargets;
|
||||
EOrder inputTsOrder;
|
||||
SNode* pColEqualOnConditions;
|
||||
} SSortMergeJoinPhysiNode;
|
||||
|
||||
|
@ -460,8 +459,6 @@ typedef struct SWindowPhysiNode {
|
|||
int64_t watermark;
|
||||
int64_t deleteMark;
|
||||
int8_t igExpired;
|
||||
EOrder inputTsOrder;
|
||||
EOrder outputTsOrder;
|
||||
bool mergeDataBlock;
|
||||
} SWindowPhysiNode;
|
||||
|
||||
|
@ -488,7 +485,6 @@ typedef struct SFillPhysiNode {
|
|||
SNode* pWStartTs; // SColumnNode
|
||||
SNode* pValues; // SNodeListNode
|
||||
STimeWindow timeRange;
|
||||
EOrder inputTsOrder;
|
||||
} SFillPhysiNode;
|
||||
|
||||
typedef SFillPhysiNode SStreamFillPhysiNode;
|
||||
|
|
|
@ -69,6 +69,7 @@ typedef struct SColumnNode {
|
|||
uint64_t tableId;
|
||||
int8_t tableType;
|
||||
col_id_t colId;
|
||||
uint16_t projIdx; // the idx in project list, start from 1
|
||||
EColumnType colType; // column or tag
|
||||
bool hasIndex;
|
||||
char dbName[TSDB_DB_NAME_LEN];
|
||||
|
|
|
@ -281,7 +281,8 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
|
|||
(_code) == TSDB_CODE_PAR_INVALID_DROP_COL || ((_code) == TSDB_CODE_TDB_INVALID_TABLE_ID))
|
||||
#define NEED_CLIENT_REFRESH_VG_ERROR(_code) \
|
||||
((_code) == TSDB_CODE_VND_HASH_MISMATCH || (_code) == TSDB_CODE_VND_INVALID_VGROUP_ID)
|
||||
#define NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code) ((_code) == TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER)
|
||||
#define NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code) \
|
||||
((_code) == TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER || (_code) == TSDB_CODE_MND_INVALID_SCHEMA_VER)
|
||||
#define NEED_CLIENT_HANDLE_ERROR(_code) \
|
||||
(NEED_CLIENT_RM_TBLMETA_ERROR(_code) || NEED_CLIENT_REFRESH_VG_ERROR(_code) || \
|
||||
NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code))
|
||||
|
|
|
@ -1120,6 +1120,7 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
|
|||
if (NEED_CLIENT_HANDLE_ERROR(code)) {
|
||||
tscDebug("0x%" PRIx64 " client retry to handle the error, code:%d - %s, tryCount:%d, reqId:0x%" PRIx64,
|
||||
pRequest->self, code, tstrerror(code), pRequest->retry, pRequest->requestId);
|
||||
refreshMeta(pRequest->pTscObj, pRequest);
|
||||
pRequest->prevCode = code;
|
||||
doAsyncQuery(pRequest, true);
|
||||
return;
|
||||
|
|
|
@ -783,8 +783,8 @@ size_t blockDataGetRowSize(SSDataBlock* pBlock) {
|
|||
* @return
|
||||
*/
|
||||
size_t blockDataGetSerialMetaSize(uint32_t numOfCols) {
|
||||
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column
|
||||
// length |
|
||||
// | version | total length | total rows | total columns | flag seg| block group id | column schema
|
||||
// | each column length |
|
||||
return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(uint64_t) +
|
||||
numOfCols * (sizeof(int8_t) + sizeof(int32_t)) + numOfCols * sizeof(int32_t);
|
||||
}
|
||||
|
|
|
@ -46,6 +46,7 @@ typedef struct {
|
|||
int32_t vgId;
|
||||
int32_t vgVersion;
|
||||
int8_t dropped;
|
||||
int32_t toVgId;
|
||||
char path[PATH_MAX + 20];
|
||||
} SWrapperCfg;
|
||||
|
||||
|
@ -55,6 +56,7 @@ typedef struct {
|
|||
int32_t refCount;
|
||||
int8_t dropped;
|
||||
int8_t disable;
|
||||
int32_t toVgId;
|
||||
char *path;
|
||||
SVnode *pImpl;
|
||||
SMultiWorker pWriteW;
|
||||
|
@ -70,6 +72,7 @@ typedef struct {
|
|||
int32_t vnodeNum;
|
||||
int32_t opened;
|
||||
int32_t failed;
|
||||
bool updateVnodesList;
|
||||
int32_t threadIndex;
|
||||
TdThread thread;
|
||||
SVnodeMgmt *pMgmt;
|
||||
|
|
|
@ -71,6 +71,8 @@ static int32_t vmDecodeVnodeList(SJson *pJson, SVnodeMgmt *pMgmt, SWrapperCfg **
|
|||
if (code < 0) goto _OVER;
|
||||
tjsonGetInt32ValueFromDouble(vnode, "vgVersion", pCfg->vgVersion, code);
|
||||
if (code < 0) goto _OVER;
|
||||
tjsonGetInt32ValueFromDouble(vnode, "toVgId", pCfg->toVgId, code);
|
||||
if (code < 0) goto _OVER;
|
||||
|
||||
snprintf(pCfg->path, sizeof(pCfg->path), "%s%svnode%d", pMgmt->path, TD_DIRSEP, pCfg->vgId);
|
||||
}
|
||||
|
@ -165,6 +167,7 @@ static int32_t vmEncodeVnodeList(SJson *pJson, SVnodeObj **ppVnodes, int32_t num
|
|||
if (tjsonAddDoubleToObject(vnode, "vgId", pVnode->vgId) < 0) return -1;
|
||||
if (tjsonAddDoubleToObject(vnode, "dropped", pVnode->dropped) < 0) return -1;
|
||||
if (tjsonAddDoubleToObject(vnode, "vgVersion", pVnode->vgVersion) < 0) return -1;
|
||||
if (pVnode->toVgId && tjsonAddDoubleToObject(vnode, "toVgId", pVnode->toVgId) < 0) return -1;
|
||||
if (tjsonAddItemToArray(vnodes, vnode) < 0) return -1;
|
||||
}
|
||||
|
||||
|
@ -179,7 +182,7 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) {
|
|||
SVnodeObj **ppVnodes = NULL;
|
||||
char file[PATH_MAX] = {0};
|
||||
char realfile[PATH_MAX] = {0};
|
||||
snprintf(file, sizeof(file), "%s%svnodes.json.bak", pMgmt->path, TD_DIRSEP);
|
||||
snprintf(file, sizeof(file), "%s%svnodes_tmp.json", pMgmt->path, TD_DIRSEP);
|
||||
snprintf(realfile, sizeof(realfile), "%s%svnodes.json", pMgmt->path, TD_DIRSEP);
|
||||
|
||||
int32_t numOfVnodes = 0;
|
||||
|
@ -226,4 +229,4 @@ _OVER:
|
|||
dError("failed to write vnodes file:%s since %s, vnodes:%d", realfile, terrstr(), numOfVnodes);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -484,10 +484,18 @@ int32_t vmProcessAlterHashRangeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
|
||||
int32_t srcVgId = req.srcVgId;
|
||||
int32_t dstVgId = req.dstVgId;
|
||||
|
||||
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, dstVgId);
|
||||
if (pVnode != NULL) {
|
||||
dError("vgId:%d, vnode already exist", dstVgId);
|
||||
vmReleaseVnode(pMgmt, pVnode);
|
||||
terrno = TSDB_CODE_VND_ALREADY_EXIST;
|
||||
return -1;
|
||||
}
|
||||
|
||||
dInfo("vgId:%d, start to alter vnode hashrange:[%u, %u], dstVgId:%d", req.srcVgId, req.hashBegin, req.hashEnd,
|
||||
req.dstVgId);
|
||||
|
||||
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, srcVgId);
|
||||
pVnode = vmAcquireVnode(pMgmt, srcVgId);
|
||||
if (pVnode == NULL) {
|
||||
dError("vgId:%d, failed to alter hashrange since %s", srcVgId, terrstr());
|
||||
terrno = TSDB_CODE_VND_NOT_EXIST;
|
||||
|
@ -501,6 +509,13 @@ int32_t vmProcessAlterHashRangeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
};
|
||||
tstrncpy(wrapperCfg.path, pVnode->path, sizeof(wrapperCfg.path));
|
||||
|
||||
// prepare alter
|
||||
pVnode->toVgId = dstVgId;
|
||||
if (vmWriteVnodeListToFile(pMgmt) != 0) {
|
||||
dError("vgId:%d, failed to write vnode list since %s", dstVgId, terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
dInfo("vgId:%d, close vnode", srcVgId);
|
||||
vmCloseVnode(pMgmt, pVnode, true);
|
||||
|
||||
|
@ -532,6 +547,7 @@ int32_t vmProcessAlterHashRangeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
// complete alter
|
||||
if (vmWriteVnodeListToFile(pMgmt) != 0) {
|
||||
dError("vgId:%d, failed to write vnode list since %s", dstVgId, terrstr());
|
||||
return -1;
|
||||
|
|
|
@ -158,6 +158,28 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal)
|
|||
taosMemoryFree(pVnode);
|
||||
}
|
||||
|
||||
static int32_t vmRestoreVgroupId(SWrapperCfg *pCfg, STfs *pTfs) {
|
||||
int32_t srcVgId = pCfg->vgId;
|
||||
int32_t dstVgId = pCfg->toVgId;
|
||||
if (dstVgId == 0) return 0;
|
||||
|
||||
char srcPath[TSDB_FILENAME_LEN];
|
||||
char dstPath[TSDB_FILENAME_LEN];
|
||||
|
||||
snprintf(srcPath, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, srcVgId);
|
||||
snprintf(dstPath, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, dstVgId);
|
||||
|
||||
int32_t vgId = vnodeRestoreVgroupId(srcPath, dstPath, srcVgId, dstVgId, pTfs);
|
||||
if (vgId <= 0) {
|
||||
dError("vgId:%d, failed to restore vgroup id. srcPath: %s", pCfg->vgId, srcPath);
|
||||
return -1;
|
||||
}
|
||||
|
||||
pCfg->vgId = vgId;
|
||||
pCfg->toVgId = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *vmOpenVnodeInThread(void *param) {
|
||||
SVnodeThread *pThread = param;
|
||||
SVnodeMgmt *pMgmt = pThread->pMgmt;
|
||||
|
@ -174,17 +196,33 @@ static void *vmOpenVnodeInThread(void *param) {
|
|||
pMgmt->state.openVnodes, pMgmt->state.totalVnodes);
|
||||
tmsgReportStartup("vnode-open", stepDesc);
|
||||
|
||||
if (pCfg->toVgId) {
|
||||
if (vmRestoreVgroupId(pCfg, pMgmt->pTfs) != 0) {
|
||||
dError("vgId:%d, failed to restore vgroup id by thread:%d", pCfg->vgId, pThread->threadIndex);
|
||||
pThread->failed++;
|
||||
continue;
|
||||
}
|
||||
pThread->updateVnodesList = true;
|
||||
}
|
||||
|
||||
snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, pCfg->vgId);
|
||||
|
||||
SVnode *pImpl = vnodeOpen(path, pMgmt->pTfs, pMgmt->msgCb);
|
||||
if (pImpl == NULL) {
|
||||
dError("vgId:%d, failed to open vnode by thread:%d", pCfg->vgId, pThread->threadIndex);
|
||||
pThread->failed++;
|
||||
} else {
|
||||
vmOpenVnode(pMgmt, pCfg, pImpl);
|
||||
dInfo("vgId:%d, is opened by thread:%d", pCfg->vgId, pThread->threadIndex);
|
||||
pThread->opened++;
|
||||
atomic_add_fetch_32(&pMgmt->state.openVnodes, 1);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (vmOpenVnode(pMgmt, pCfg, pImpl) != 0) {
|
||||
dError("vgId:%d, failed to open vnode by thread:%d", pCfg->vgId, pThread->threadIndex);
|
||||
pThread->failed++;
|
||||
continue;
|
||||
}
|
||||
|
||||
dInfo("vgId:%d, is opened by thread:%d", pCfg->vgId, pThread->threadIndex);
|
||||
pThread->opened++;
|
||||
atomic_add_fetch_32(&pMgmt->state.openVnodes, 1);
|
||||
}
|
||||
|
||||
dInfo("thread:%d, numOfVnodes:%d, opened:%d failed:%d", pThread->threadIndex, pThread->vnodeNum, pThread->opened,
|
||||
|
@ -242,6 +280,8 @@ static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) {
|
|||
taosThreadAttrDestroy(&thAttr);
|
||||
}
|
||||
|
||||
bool updateVnodesList = false;
|
||||
|
||||
for (int32_t t = 0; t < threadNum; ++t) {
|
||||
SVnodeThread *pThread = &threads[t];
|
||||
if (pThread->vnodeNum > 0 && taosCheckPthreadValid(pThread->thread)) {
|
||||
|
@ -249,6 +289,7 @@ static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) {
|
|||
taosThreadClear(&pThread->thread);
|
||||
}
|
||||
taosMemoryFree(pThread->pCfgs);
|
||||
if (pThread->updateVnodesList) updateVnodesList = true;
|
||||
}
|
||||
taosMemoryFree(threads);
|
||||
taosMemoryFree(pCfgs);
|
||||
|
@ -256,10 +297,15 @@ static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) {
|
|||
if (pMgmt->state.openVnodes != pMgmt->state.totalVnodes) {
|
||||
dError("there are total vnodes:%d, opened:%d", pMgmt->state.totalVnodes, pMgmt->state.openVnodes);
|
||||
return -1;
|
||||
} else {
|
||||
dInfo("successfully opened %d vnodes", pMgmt->state.totalVnodes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (updateVnodesList && vmWriteVnodeListToFile(pMgmt) != 0) {
|
||||
dError("failed to write vnode list since %s", terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
dInfo("successfully opened %d vnodes", pMgmt->state.totalVnodes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *vmCloseVnodeInThread(void *param) {
|
||||
|
|
|
@ -218,6 +218,7 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp
|
|||
if (pMsg->msgType != TDMT_VND_ALTER_CONFIRM && pVnode->disable) {
|
||||
dDebug("vgId:%d, msg:%p put into vnode-write queue failed since its disable", pVnode->vgId, pMsg);
|
||||
terrno = TSDB_CODE_VND_STOPPED;
|
||||
code = terrno;
|
||||
break;
|
||||
}
|
||||
dGTrace("vgId:%d, msg:%p put into vnode-write queue", pVnode->vgId, pMsg);
|
||||
|
|
|
@ -1217,6 +1217,7 @@ static int32_t mndAddAlterVnodeHashRangeAction(SMnode *pMnode, STrans *pTrans, i
|
|||
action.pCont = pReq;
|
||||
action.contLen = contLen;
|
||||
action.msgType = TDMT_VND_ALTER_HASHRANGE;
|
||||
action.acceptableCode = TSDB_CODE_VND_ALREADY_EXIST;
|
||||
|
||||
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
|
||||
taosMemoryFree(pReq);
|
||||
|
|
|
@ -80,6 +80,7 @@ IF (TD_VNODE_PLUGINS)
|
|||
)
|
||||
ENDIF ()
|
||||
|
||||
IF (NOT ${TD_LINUX})
|
||||
target_include_directories(
|
||||
vnode
|
||||
PUBLIC "inc"
|
||||
|
@ -87,7 +88,25 @@ target_include_directories(
|
|||
PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
|
||||
PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include"
|
||||
)
|
||||
ELSE()
|
||||
target_include_directories(
|
||||
vnode
|
||||
PUBLIC "inc"
|
||||
PUBLIC "src/inc"
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
|
||||
)
|
||||
ENDIF (NOT ${TD_LINUX})
|
||||
|
||||
IF (TD_LINUX)
|
||||
target_include_directories(
|
||||
vnode
|
||||
PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static"
|
||||
)
|
||||
|
||||
target_link_directories(
|
||||
vnode
|
||||
PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static"
|
||||
)
|
||||
target_link_libraries(
|
||||
vnode
|
||||
PUBLIC os
|
||||
|
|
|
@ -54,6 +54,7 @@ void vnodeCleanup();
|
|||
int32_t vnodeCreate(const char *path, SVnodeCfg *pCfg, STfs *pTfs);
|
||||
int32_t vnodeAlterReplica(const char *path, SAlterVnodeReplicaReq *pReq, STfs *pTfs);
|
||||
int32_t vnodeAlterHashRange(const char *srcPath, const char *dstPath, SAlterVnodeHashRangeReq *pReq, STfs *pTfs);
|
||||
int32_t vnodeRestoreVgroupId(const char *srcPath, const char *dstPath, int32_t srcVgId, int32_t dstVgId, STfs *pTfs);
|
||||
void vnodeDestroy(const char *path, STfs *pTfs);
|
||||
SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb);
|
||||
void vnodePreClose(SVnode *pVnode);
|
||||
|
|
|
@ -29,7 +29,7 @@ int32_t tdProcessTSmaInsert(SSma *pSma, int64_t indexUid, const char *msg) {
|
|||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
if ((code = tdProcessTSmaInsertImpl(pSma, indexUid, msg)) < 0) {
|
||||
smaWarn("vgId:%d, insert tsma data failed since %s", SMA_VID(pSma), tstrerror(terrno));
|
||||
smaError("vgId:%d, insert tsma data failed since %s", SMA_VID(pSma), tstrerror(terrno));
|
||||
}
|
||||
|
||||
return code;
|
||||
|
@ -346,6 +346,43 @@ _end:
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t tsmaProcessDelReq(SSma *pSma, int64_t indexUid, SBatchDeleteReq *pDelReq) {
|
||||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
|
||||
if (taosArrayGetSize(pDelReq->deleteReqs) > 0) {
|
||||
int32_t len = 0;
|
||||
tEncodeSize(tEncodeSBatchDeleteReq, pDelReq, len, code);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
||||
void *pBuf = rpcMallocCont(len + sizeof(SMsgHead));
|
||||
if (!pBuf) {
|
||||
code = terrno;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
|
||||
SEncoder encoder;
|
||||
tEncoderInit(&encoder, POINTER_SHIFT(pBuf, sizeof(SMsgHead)), len);
|
||||
tEncodeSBatchDeleteReq(&encoder, pDelReq);
|
||||
tEncoderClear(&encoder);
|
||||
|
||||
((SMsgHead *)pBuf)->vgId = TD_VID(pSma->pVnode);
|
||||
|
||||
SRpcMsg delMsg = {.msgType = TDMT_VND_BATCH_DEL, .pCont = pBuf, .contLen = len + sizeof(SMsgHead)};
|
||||
code = tmsgPutToQueue(&pSma->pVnode->msgCb, WRITE_QUEUE, &delMsg);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
|
||||
_exit:
|
||||
taosArrayDestroy(pDelReq->deleteReqs);
|
||||
if (code) {
|
||||
smaError("vgId:%d, failed at line %d to process delete req for smaIndex %" PRIi64 " since %s", SMA_VID(pSma), lino,
|
||||
indexUid, tstrerror(code));
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Insert/Update Time-range-wise SMA data.
|
||||
*
|
||||
|
@ -355,7 +392,6 @@ _end:
|
|||
*/
|
||||
static int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
|
||||
const SArray *pDataBlocks = (const SArray *)msg;
|
||||
// TODO: destroy SSDataBlocks(msg)
|
||||
if (!pDataBlocks) {
|
||||
terrno = TSDB_CODE_TSMA_INVALID_PTR;
|
||||
smaWarn("vgId:%d, insert tsma data failed since pDataBlocks is NULL", SMA_VID(pSma));
|
||||
|
@ -419,8 +455,10 @@ static int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char
|
|||
goto _err;
|
||||
}
|
||||
|
||||
// TODO deleteReq
|
||||
taosArrayDestroy(deleteReq.deleteReqs);
|
||||
if ((terrno = tsmaProcessDelReq(pSma, indexUid, &deleteReq)) != 0) {
|
||||
goto _err;
|
||||
}
|
||||
|
||||
#if 0
|
||||
if (!strncasecmp("td.tsma.rst.tb", pTsmaStat->pTSma->dstTbName, 14)) {
|
||||
terrno = TSDB_CODE_APP_ERROR;
|
||||
|
|
|
@ -136,14 +136,13 @@ static int32_t vnodeVgroupIdLen(int32_t vgId) {
|
|||
}
|
||||
|
||||
int32_t vnodeRenameVgroupId(const char *srcPath, const char *dstPath, int32_t srcVgId, int32_t dstVgId, STfs *pTfs) {
|
||||
int32_t ret = tfsRename(pTfs, srcPath, dstPath);
|
||||
if (ret != 0) return ret;
|
||||
int32_t ret = 0;
|
||||
|
||||
char oldRname[TSDB_FILENAME_LEN] = {0};
|
||||
char newRname[TSDB_FILENAME_LEN] = {0};
|
||||
char tsdbPath[TSDB_FILENAME_LEN] = {0};
|
||||
char tsdbFilePrefix[TSDB_FILENAME_LEN] = {0};
|
||||
snprintf(tsdbPath, TSDB_FILENAME_LEN, "%s%stsdb", dstPath, TD_DIRSEP);
|
||||
snprintf(tsdbPath, TSDB_FILENAME_LEN, "%s%stsdb", srcPath, TD_DIRSEP);
|
||||
snprintf(tsdbFilePrefix, TSDB_FILENAME_LEN, "tsdb%sv", TD_DIRSEP);
|
||||
|
||||
STfsDir *tsdbDir = tfsOpendir(pTfs, tsdbPath);
|
||||
|
@ -168,7 +167,7 @@ int32_t vnodeRenameVgroupId(const char *srcPath, const char *dstPath, int32_t sr
|
|||
|
||||
ret = tfsRename(pTfs, tsdbFile->rname, newRname);
|
||||
if (ret != 0) {
|
||||
vInfo("vgId:%d, failed to rename file from %s to %s since %s", dstVgId, tsdbFile->rname, newRname, terrstr());
|
||||
vError("vgId:%d, failed to rename file from %s to %s since %s", dstVgId, tsdbFile->rname, newRname, terrstr());
|
||||
tfsClosedir(tsdbDir);
|
||||
return ret;
|
||||
}
|
||||
|
@ -176,6 +175,21 @@ int32_t vnodeRenameVgroupId(const char *srcPath, const char *dstPath, int32_t sr
|
|||
}
|
||||
|
||||
tfsClosedir(tsdbDir);
|
||||
|
||||
vInfo("vgId:%d, rename dir from %s to %s", dstVgId, srcPath, dstPath);
|
||||
ret = tfsRename(pTfs, srcPath, dstPath);
|
||||
if (ret != 0) {
|
||||
vError("vgId:%d, failed to rename dir from %s to %s since %s", dstVgId, srcPath, dstPath, terrstr());
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int32_t vnodeGetAbsDir(const char *relPath, STfs *pTfs, char *buf, size_t bufLen) {
|
||||
if (pTfs) {
|
||||
snprintf(buf, bufLen, "%s%s%s", tfsGetPrimaryPath(pTfs), TD_DIRSEP, relPath);
|
||||
} else {
|
||||
snprintf(buf, bufLen, "%s", relPath);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -184,13 +198,7 @@ int32_t vnodeAlterHashRange(const char *srcPath, const char *dstPath, SAlterVnod
|
|||
char dir[TSDB_FILENAME_LEN] = {0};
|
||||
int32_t ret = 0;
|
||||
|
||||
if (pTfs) {
|
||||
snprintf(dir, TSDB_FILENAME_LEN, "%s%s%s", tfsGetPrimaryPath(pTfs), TD_DIRSEP, srcPath);
|
||||
} else {
|
||||
snprintf(dir, TSDB_FILENAME_LEN, "%s", srcPath);
|
||||
}
|
||||
|
||||
// todo add stat file to handle exception while vnode open
|
||||
vnodeGetAbsDir(srcPath, pTfs, dir, TSDB_FILENAME_LEN);
|
||||
|
||||
ret = vnodeLoadInfo(dir, &info);
|
||||
if (ret < 0) {
|
||||
|
@ -245,6 +253,42 @@ int32_t vnodeAlterHashRange(const char *srcPath, const char *dstPath, SAlterVnod
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t vnodeRestoreVgroupId(const char *srcPath, const char *dstPath, int32_t srcVgId, int32_t dstVgId, STfs *pTfs) {
|
||||
SVnodeInfo info = {0};
|
||||
char dir[TSDB_FILENAME_LEN] = {0};
|
||||
|
||||
vnodeGetAbsDir(dstPath, pTfs, dir, TSDB_FILENAME_LEN);
|
||||
if (vnodeLoadInfo(dir, &info) == 0) {
|
||||
if (info.config.vgId != dstVgId) {
|
||||
vError("vgId:%d, unexpected vnode config.vgId:%d", dstVgId, info.config.vgId);
|
||||
return -1;
|
||||
}
|
||||
return dstVgId;
|
||||
}
|
||||
|
||||
vnodeGetAbsDir(srcPath, pTfs, dir, TSDB_FILENAME_LEN);
|
||||
if (vnodeLoadInfo(dir, &info) < 0) {
|
||||
vError("vgId:%d, failed to read vnode config from %s since %s", srcVgId, srcPath, tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (info.config.vgId == srcVgId) {
|
||||
vInfo("vgId:%d, rollback alter hashrange", srcVgId);
|
||||
return srcVgId;
|
||||
} else if (info.config.vgId != dstVgId) {
|
||||
vError("vgId:%d, unexpected vnode config.vgId:%d", dstVgId, info.config.vgId);
|
||||
return -1;
|
||||
}
|
||||
|
||||
vInfo("vgId:%d, rename %s to %s", dstVgId, srcPath, dstPath);
|
||||
if (vnodeRenameVgroupId(srcPath, dstPath, srcVgId, dstVgId, pTfs) < 0) {
|
||||
vError("vgId:%d, failed to rename vnode from %s to %s since %s", dstVgId, srcPath, dstPath, tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
return dstVgId;
|
||||
}
|
||||
|
||||
void vnodeDestroy(const char *path, STfs *pTfs) {
|
||||
vInfo("path:%s is removed while destroy vnode", path);
|
||||
tfsRmdir(pTfs, path);
|
||||
|
|
|
@ -602,9 +602,7 @@ int32_t vnodeProcessStreamMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo)
|
|||
}
|
||||
}
|
||||
|
||||
// TODO: remove the function
|
||||
void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) {
|
||||
// TODO
|
||||
// blockDebugShowDataBlocks(data, __func__);
|
||||
tdProcessTSmaInsert(((SVnode *)pVnode)->pSma, smaId, (const char *)data);
|
||||
}
|
||||
|
|
|
@ -145,7 +145,7 @@ typedef struct SExplainCtx {
|
|||
SHashObj *groupHash; // Hash<SExplainGroup>
|
||||
} SExplainCtx;
|
||||
|
||||
#define EXPLAIN_ORDER_STRING(_order) ((ORDER_ASC == _order) ? "asc" : "desc")
|
||||
#define EXPLAIN_ORDER_STRING(_order) ((ORDER_ASC == _order) ? "asc" : ORDER_DESC == _order ? "desc" : "unknown")
|
||||
#define EXPLAIN_JOIN_STRING(_type) ((JOIN_TYPE_INNER == _type) ? "Inner join" : "Join")
|
||||
|
||||
#define INVERAL_TIME_FROM_PRECISION_TO_UNIT(_t, _u, _p) (((_u) == 'n' || (_u) == 'y') ? (_t) : (convertTimeFromPrecisionToUnit(_t, _p, _u)))
|
||||
|
|
|
@ -499,6 +499,9 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, pPrjNode->pProjections->length);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pPrjNode->node.pOutputDataBlockDesc->totalRowSize);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pPrjNode->node.inputTsOrder));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
|
||||
|
@ -544,6 +547,9 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, pJoinNode->pTargets->length);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pJoinNode->node.pOutputDataBlockDesc->totalRowSize);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pJoinNode->node.inputTsOrder));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
|
||||
|
@ -597,6 +603,9 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_GROUPS_FORMAT, pAggNode->pGroupKeys->length);
|
||||
}
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pAggNode->node.inputTsOrder));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
|
||||
|
@ -716,6 +725,11 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
case QUERY_NODE_PHYSICAL_PLAN_SORT: {
|
||||
SSortPhysiNode *pSortNode = (SSortPhysiNode *)pNode;
|
||||
EXPLAIN_ROW_NEW(level, EXPLAIN_SORT_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pSortNode->node.inputTsOrder));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_OUTPUT_ORDER_TYPE_FORMAT, EXPLAIN_ORDER_STRING(pSortNode->node.outputTsOrder));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT);
|
||||
if (pResNode->pExecInfo) {
|
||||
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
|
||||
|
@ -796,9 +810,10 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pIntNode->window.node.pOutputDataBlockDesc->totalRowSize);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pIntNode->window.inputTsOrder));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pIntNode->window.node.inputTsOrder));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_OUTPUT_ORDER_TYPE_FORMAT, EXPLAIN_ORDER_STRING(pIntNode->window.node.outputTsOrder));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_OUTPUT_ORDER_TYPE_FORMAT, EXPLAIN_ORDER_STRING(pIntNode->window.outputTsOrder));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
|
||||
|
@ -847,6 +862,10 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pIntNode->window.pFuncs->length);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pIntNode->window.node.pOutputDataBlockDesc->totalRowSize);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pIntNode->window.node.inputTsOrder));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_OUTPUT_ORDER_TYPE_FORMAT, EXPLAIN_ORDER_STRING(pIntNode->window.node.outputTsOrder));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
|
||||
|
@ -895,6 +914,9 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
EXPLAIN_ROW_APPEND(EXPLAIN_MODE_FORMAT, nodesGetFillModeString(pFillNode->mode));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pFillNode->node.pOutputDataBlockDesc->totalRowSize);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pFillNode->node.inputTsOrder));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
|
||||
|
@ -1080,6 +1102,10 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, nodesGetOutputNumFromSlotList(pDescNode->pSlots));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pDescNode->totalRowSize);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pMergeNode->node.inputTsOrder));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_OUTPUT_ORDER_TYPE_FORMAT, EXPLAIN_ORDER_STRING(pMergeNode->node.outputTsOrder));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
|
||||
|
|
|
@ -399,6 +399,8 @@ typedef struct SOptrBasicInfo {
|
|||
SResultRowInfo resultRowInfo;
|
||||
SSDataBlock* pRes;
|
||||
bool mergeResultBlock;
|
||||
int32_t inputTsOrder;
|
||||
int32_t outputTsOrder;
|
||||
} SOptrBasicInfo;
|
||||
|
||||
typedef struct SIntervalAggOperatorInfo {
|
||||
|
@ -411,8 +413,6 @@ typedef struct SIntervalAggOperatorInfo {
|
|||
STimeWindow win; // query time range
|
||||
bool timeWindowInterpo; // interpolation needed or not
|
||||
SArray* pInterpCols; // interpolation columns
|
||||
int32_t resultTsOrder; // result timestamp order
|
||||
int32_t inputOrder; // input data ts order
|
||||
EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model]
|
||||
STimeWindowAggSupp twAggSup;
|
||||
SArray* pPrevValues; // SArray<SGroupKeys> used to keep the previous not null value for interpolation.
|
||||
|
|
|
@ -146,7 +146,7 @@ void* tsortGetValue(STupleHandle* pVHandle, int32_t colId);
|
|||
* @return
|
||||
*/
|
||||
uint64_t tsortGetGroupId(STupleHandle* pVHandle);
|
||||
|
||||
void* tsortGetBlockInfo(STupleHandle* pVHandle);
|
||||
/**
|
||||
*
|
||||
* @param pSortHandle
|
||||
|
|
|
@ -108,6 +108,8 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SAggPhysiN
|
|||
pInfo->binfo.mergeResultBlock = pAggNode->mergeDataBlock;
|
||||
pInfo->groupKeyOptimized = pAggNode->groupKeyOptimized;
|
||||
pInfo->groupId = UINT64_MAX;
|
||||
pInfo->binfo.inputTsOrder = pAggNode->node.inputTsOrder;
|
||||
pInfo->binfo.outputTsOrder = pAggNode->node.outputTsOrder;
|
||||
|
||||
setOperatorInfo(pOperator, "TableAggregate", QUERY_NODE_PHYSICAL_PLAN_HASH_AGG, true, OP_NOT_OPENED, pInfo,
|
||||
pTaskInfo);
|
||||
|
@ -164,10 +166,8 @@ int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
|
|||
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
||||
|
||||
int64_t st = taosGetTimestampUs();
|
||||
|
||||
int32_t order = TSDB_ORDER_ASC;
|
||||
int32_t scanFlag = MAIN_SCAN;
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t order = pAggInfo->binfo.inputTsOrder;
|
||||
bool hasValidBlock = false;
|
||||
|
||||
while (1) {
|
||||
|
@ -185,12 +185,7 @@ int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
|
|||
}
|
||||
}
|
||||
hasValidBlock = true;
|
||||
|
||||
int32_t code = getTableScanInfo(pOperator, &order, &scanFlag, false);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
destroyDataBlockForEmptyInput(blockAllocated, &pBlock);
|
||||
T_LONG_JMP(pTaskInfo->env, code);
|
||||
}
|
||||
pAggInfo->binfo.pRes->info.scanFlag = pBlock->info.scanFlag;
|
||||
|
||||
// there is an scalar expression that needs to be calculated before apply the group aggregation.
|
||||
if (pAggInfo->scalarExprSup.pExprInfo != NULL && !blockAllocated) {
|
||||
|
@ -204,7 +199,7 @@ int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
|
|||
|
||||
// the pDataBlock are always the same one, no need to call this again
|
||||
setExecutionContext(pOperator, pOperator->exprSupp.numOfExprs, pBlock->info.id.groupId);
|
||||
setInputDataBlock(pSup, pBlock, order, scanFlag, true);
|
||||
setInputDataBlock(pSup, pBlock, order, pBlock->info.scanFlag, true);
|
||||
code = doAggregateImpl(pOperator, pSup->pCtx);
|
||||
if (code != 0) {
|
||||
destroyDataBlockForEmptyInput(blockAllocated, &pBlock);
|
||||
|
|
|
@ -208,6 +208,7 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) {
|
|||
|
||||
pRes->info.id.uid = *(tb_uid_t*)taosArrayGet(pInfo->pUidList, pInfo->indexOfBufferedRes);
|
||||
pRes->info.rows = 1;
|
||||
pRes->info.scanFlag = MAIN_SCAN;
|
||||
|
||||
SExprSupp* pSup = &pInfo->pseudoExprSup;
|
||||
int32_t code = addTagPseudoColumnData(&pInfo->readHandle, pSup->pExprInfo, pSup->numOfExprs, pRes,
|
||||
|
|
|
@ -120,6 +120,8 @@ SOperatorInfo* createEventwindowOperatorInfo(SOperatorInfo* downstream, SPhysiNo
|
|||
|
||||
initBasicInfo(&pInfo->binfo, pResBlock);
|
||||
initResultRowInfo(&pInfo->binfo.resultRowInfo);
|
||||
pInfo->binfo.inputTsOrder = physiNode->inputTsOrder;
|
||||
pInfo->binfo.outputTsOrder = physiNode->outputTsOrder;
|
||||
|
||||
pInfo->twAggSup = (STimeWindowAggSupp){.waterMark = pEventWindowNode->window.watermark,
|
||||
.calTrigger = pEventWindowNode->window.triggerType};
|
||||
|
@ -183,7 +185,7 @@ static SSDataBlock* eventWindowAggregate(SOperatorInfo* pOperator) {
|
|||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
||||
SExprSupp* pSup = &pOperator->exprSupp;
|
||||
int32_t order = TSDB_ORDER_ASC;
|
||||
int32_t order = pInfo->binfo.inputTsOrder;
|
||||
|
||||
SSDataBlock* pRes = pInfo->binfo.pRes;
|
||||
|
||||
|
@ -196,6 +198,7 @@ static SSDataBlock* eventWindowAggregate(SOperatorInfo* pOperator) {
|
|||
break;
|
||||
}
|
||||
|
||||
pRes->info.scanFlag = pBlock->info.scanFlag;
|
||||
setInputDataBlock(pSup, pBlock, order, MAIN_SCAN, true);
|
||||
blockDataUpdateTsWindow(pBlock, pInfo->tsSlotId);
|
||||
|
||||
|
|
|
@ -520,6 +520,7 @@ int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, char* pData, SArray* pCo
|
|||
// data from mnode
|
||||
pRes->info.dataLoad = 1;
|
||||
pRes->info.rows = pBlock->info.rows;
|
||||
pRes->info.scanFlag = MAIN_SCAN;
|
||||
relocateColumnData(pRes, pColList, pBlock->pDataBlock, false);
|
||||
blockDataDestroy(pBlock);
|
||||
}
|
||||
|
|
|
@ -178,16 +178,15 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) {
|
|||
|
||||
blockDataCleanup(pResBlock);
|
||||
|
||||
int32_t order = TSDB_ORDER_ASC;
|
||||
int32_t scanFlag = MAIN_SCAN;
|
||||
getTableScanInfo(pOperator, &order, &scanFlag, false);
|
||||
int32_t order = pInfo->pFillInfo->order;
|
||||
|
||||
SOperatorInfo* pDownstream = pOperator->pDownstream[0];
|
||||
|
||||
#if 0
|
||||
// the scan order may be different from the output result order for agg interval operator.
|
||||
if (pDownstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL) {
|
||||
order = ((SIntervalAggOperatorInfo*) pDownstream->info)->resultTsOrder;
|
||||
}
|
||||
#endif
|
||||
|
||||
doHandleRemainBlockFromNewGroup(pOperator, pInfo, pResultInfo, order);
|
||||
if (pResBlock->info.rows > 0) {
|
||||
|
@ -206,13 +205,14 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) {
|
|||
|
||||
taosFillSetStartInfo(pInfo->pFillInfo, 0, pInfo->win.ekey);
|
||||
} else {
|
||||
pResBlock->info.scanFlag = pBlock->info.scanFlag;
|
||||
pBlock->info.dataLoad = 1;
|
||||
blockDataUpdateTsWindow(pBlock, pInfo->primarySrcSlotId);
|
||||
|
||||
blockDataCleanup(pInfo->pRes);
|
||||
blockDataEnsureCapacity(pInfo->pRes, pBlock->info.rows);
|
||||
blockDataEnsureCapacity(pInfo->pFinalRes, pBlock->info.rows);
|
||||
doApplyScalarCalculation(pOperator, pBlock, order, scanFlag);
|
||||
doApplyScalarCalculation(pOperator, pBlock, order, pBlock->info.scanFlag);
|
||||
|
||||
if (pInfo->curGroupId == 0 || (pInfo->curGroupId == pInfo->pRes->info.id.groupId)) {
|
||||
if (pInfo->curGroupId == 0 && taosFillNotStarted(pInfo->pFillInfo)) {
|
||||
|
@ -405,7 +405,7 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode*
|
|||
? &((SMergeAlignedIntervalAggOperatorInfo*)downstream->info)->intervalAggOperatorInfo->interval
|
||||
: &((SIntervalAggOperatorInfo*)downstream->info)->interval;
|
||||
|
||||
int32_t order = (pPhyFillNode->inputTsOrder == ORDER_ASC) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
|
||||
int32_t order = (pPhyFillNode->node.inputTsOrder == ORDER_ASC) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
|
||||
int32_t type = convertFillType(pPhyFillNode->mode);
|
||||
|
||||
SResultInfo* pResultInfo = &pOperator->resultInfo;
|
||||
|
|
|
@ -378,9 +378,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
|
|||
return buildGroupResultDataBlock(pOperator);
|
||||
}
|
||||
|
||||
int32_t order = TSDB_ORDER_ASC;
|
||||
int32_t scanFlag = MAIN_SCAN;
|
||||
|
||||
int32_t order = pInfo->binfo.inputTsOrder;
|
||||
int64_t st = taosGetTimestampUs();
|
||||
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
||||
|
||||
|
@ -390,13 +388,10 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
|
|||
break;
|
||||
}
|
||||
|
||||
int32_t code = getTableScanInfo(pOperator, &order, &scanFlag, false);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
T_LONG_JMP(pTaskInfo->env, code);
|
||||
}
|
||||
pInfo->binfo.pRes->info.scanFlag = pBlock->info.scanFlag;
|
||||
|
||||
// the pDataBlock are always the same one, no need to call this again
|
||||
setInputDataBlock(&pOperator->exprSupp, pBlock, order, scanFlag, true);
|
||||
setInputDataBlock(&pOperator->exprSupp, pBlock, order, pBlock->info.scanFlag, true);
|
||||
|
||||
// there is an scalar expression that needs to be calculated right before apply the group aggregation.
|
||||
if (pInfo->scalarSup.pExprInfo != NULL) {
|
||||
|
@ -481,6 +476,8 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode*
|
|||
setOperatorInfo(pOperator, "GroupbyAggOperator", 0, true, OP_NOT_OPENED, pInfo, pTaskInfo);
|
||||
|
||||
pInfo->binfo.mergeResultBlock = pAggNode->mergeDataBlock;
|
||||
pInfo->binfo.inputTsOrder = pAggNode->node.inputTsOrder;
|
||||
pInfo->binfo.outputTsOrder = pAggNode->node.outputTsOrder;
|
||||
|
||||
pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, hashGroupbyAggregate, NULL, destroyGroupOperatorInfo,
|
||||
optrDefaultBufFn, NULL);
|
||||
|
@ -762,6 +759,7 @@ static SSDataBlock* hashPartition(SOperatorInfo* pOperator) {
|
|||
break;
|
||||
}
|
||||
|
||||
pInfo->binfo.pRes->info.scanFlag = pBlock->info.scanFlag;
|
||||
// there is an scalar expression that needs to be calculated right before apply the group aggregation.
|
||||
if (pInfo->scalarSup.pExprInfo != NULL) {
|
||||
pTaskInfo->code = projectApplyFunctions(pInfo->scalarSup.pExprInfo, pBlock, pBlock, pInfo->scalarSup.pCtx,
|
||||
|
|
|
@ -253,9 +253,9 @@ SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t
|
|||
}
|
||||
|
||||
pInfo->inputOrder = TSDB_ORDER_ASC;
|
||||
if (pJoinNode->inputTsOrder == ORDER_ASC) {
|
||||
if (pJoinNode->node.inputTsOrder == ORDER_ASC) {
|
||||
pInfo->inputOrder = TSDB_ORDER_ASC;
|
||||
} else if (pJoinNode->inputTsOrder == ORDER_DESC) {
|
||||
} else if (pJoinNode->node.inputTsOrder == ORDER_DESC) {
|
||||
pInfo->inputOrder = TSDB_ORDER_DESC;
|
||||
}
|
||||
|
||||
|
@ -684,6 +684,7 @@ static void doMergeJoinImpl(struct SOperatorInfo* pOperator, SSDataBlock* pRes)
|
|||
// the pDataBlock are always the same one, no need to call this again
|
||||
pRes->info.rows = nrows;
|
||||
pRes->info.dataLoad = 1;
|
||||
pRes->info.scanFlag = MAIN_SCAN;
|
||||
if (pRes->info.rows >= pOperator->resultInfo.threshold) {
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -107,6 +107,8 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys
|
|||
|
||||
pInfo->binfo.pRes = pResBlock;
|
||||
pInfo->pFinalRes = createOneDataBlock(pResBlock, false);
|
||||
pInfo->binfo.inputTsOrder = pProjPhyNode->node.inputTsOrder;
|
||||
pInfo->binfo.outputTsOrder = pProjPhyNode->node.outputTsOrder;
|
||||
|
||||
if (pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM) {
|
||||
pInfo->mergeDataBlocks = false;
|
||||
|
@ -253,8 +255,9 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
|
|||
}
|
||||
|
||||
int64_t st = 0;
|
||||
int32_t order = 0;
|
||||
int32_t order = pInfo->inputTsOrder;
|
||||
int32_t scanFlag = 0;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
if (pOperator->cost.openCost == 0) {
|
||||
st = taosGetTimestampUs();
|
||||
|
@ -299,10 +302,10 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
|
|||
break;
|
||||
}
|
||||
|
||||
// the pDataBlock are always the same one, no need to call this again
|
||||
int32_t code = getTableScanInfo(downstream, &order, &scanFlag, false);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
T_LONG_JMP(pTaskInfo->env, code);
|
||||
if (pProjectInfo->mergeDataBlocks) {
|
||||
pFinalRes->info.scanFlag = scanFlag = pBlock->info.scanFlag;
|
||||
} else {
|
||||
pRes->info.scanFlag = scanFlag = pBlock->info.scanFlag;
|
||||
}
|
||||
|
||||
setInputDataBlock(pSup, pBlock, order, scanFlag, false);
|
||||
|
@ -421,6 +424,8 @@ SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhy
|
|||
}
|
||||
|
||||
pInfo->binfo.pRes = pResBlock;
|
||||
pInfo->binfo.inputTsOrder = pNode->inputTsOrder;
|
||||
pInfo->binfo.outputTsOrder = pNode->outputTsOrder;
|
||||
pInfo->pPseudoColInfo = setRowTsColumnOutputInfo(pSup->pCtx, numOfExpr);
|
||||
|
||||
setOperatorInfo(pOperator, "IndefinitOperator", QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC, false, OP_NOT_OPENED, pInfo,
|
||||
|
@ -444,18 +449,13 @@ _error:
|
|||
|
||||
static void doHandleDataBlock(SOperatorInfo* pOperator, SSDataBlock* pBlock, SOperatorInfo* downstream,
|
||||
SExecTaskInfo* pTaskInfo) {
|
||||
int32_t order = 0;
|
||||
int32_t scanFlag = 0;
|
||||
|
||||
SIndefOperatorInfo* pIndefInfo = pOperator->info;
|
||||
SOptrBasicInfo* pInfo = &pIndefInfo->binfo;
|
||||
SExprSupp* pSup = &pOperator->exprSupp;
|
||||
|
||||
// the pDataBlock are always the same one, no need to call this again
|
||||
int32_t code = getTableScanInfo(downstream, &order, &scanFlag, false);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
T_LONG_JMP(pTaskInfo->env, code);
|
||||
}
|
||||
int32_t order = pInfo->inputTsOrder;
|
||||
int32_t scanFlag = pBlock->info.scanFlag;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
// there is an scalar expression that needs to be calculated before apply the group aggregation.
|
||||
SExprSupp* pScalarSup = &pIndefInfo->scalarSup;
|
||||
|
@ -521,6 +521,7 @@ SSDataBlock* doApplyIndefinitFunction(SOperatorInfo* pOperator) {
|
|||
setOperatorCompleted(pOperator);
|
||||
break;
|
||||
}
|
||||
pInfo->pRes->info.scanFlag = pBlock->info.scanFlag;
|
||||
|
||||
if (pIndefInfo->groupId == 0 && pBlock->info.id.groupId != 0) {
|
||||
pIndefInfo->groupId = pBlock->info.id.groupId; // this is the initial group result
|
||||
|
|
|
@ -713,6 +713,7 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
|
|||
pTableScanInfo->base.readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0;
|
||||
|
||||
pOperator->cost.totalCost = pTableScanInfo->base.readRecorder.elapsedTime;
|
||||
pBlock->info.scanFlag = pTableScanInfo->base.scanFlag;
|
||||
return pBlock;
|
||||
}
|
||||
return NULL;
|
||||
|
|
|
@ -69,6 +69,8 @@ SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode*
|
|||
|
||||
pInfo->binfo.pRes = createDataBlockFromDescNode(pDescNode);
|
||||
pInfo->pSortInfo = createSortInfo(pSortNode->pSortKeys);
|
||||
pInfo->binfo.inputTsOrder = pSortNode->node.inputTsOrder;
|
||||
pInfo->binfo.outputTsOrder = pSortNode->node.outputTsOrder;
|
||||
initLimitInfo(pSortNode->node.pLimit, pSortNode->node.pSlimit, &pInfo->limitInfo);
|
||||
|
||||
setOperatorInfo(pOperator, "SortOperator", QUERY_NODE_PHYSICAL_PLAN_SORT, true, OP_NOT_OPENED, pInfo, pTaskInfo);
|
||||
|
@ -114,6 +116,7 @@ void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle) {
|
|||
}
|
||||
|
||||
pBlock->info.dataLoad = 1;
|
||||
pBlock->info.scanFlag = ((SDataBlockInfo*)tsortGetBlockInfo(pTupleHandle))->scanFlag;
|
||||
pBlock->info.rows += 1;
|
||||
}
|
||||
|
||||
|
@ -155,6 +158,7 @@ SSDataBlock* getSortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, i
|
|||
|
||||
pDataBlock->info.dataLoad = 1;
|
||||
pDataBlock->info.rows = p->info.rows;
|
||||
pDataBlock->info.scanFlag = p->info.scanFlag;
|
||||
}
|
||||
|
||||
blockDataDestroy(p);
|
||||
|
@ -331,6 +335,7 @@ SSDataBlock* getGroupSortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlo
|
|||
|
||||
pDataBlock->info.rows = p->info.rows;
|
||||
pDataBlock->info.capacity = p->info.rows;
|
||||
pDataBlock->info.scanFlag = p->info.scanFlag;
|
||||
}
|
||||
|
||||
blockDataDestroy(p);
|
||||
|
@ -505,6 +510,8 @@ SOperatorInfo* createGroupSortOperatorInfo(SOperatorInfo* downstream, SGroupSort
|
|||
|
||||
pInfo->binfo.pRes = createDataBlockFromDescNode(pDescNode);
|
||||
blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
|
||||
pInfo->binfo.inputTsOrder = pSortPhyNode->node.inputTsOrder;
|
||||
pInfo->binfo.outputTsOrder = pSortPhyNode->node.outputTsOrder;
|
||||
|
||||
int32_t numOfOutputCols = 0;
|
||||
int32_t code = extractColMatchInfo(pSortPhyNode->pTargets, pDescNode, &numOfOutputCols, COL_MATCH_FROM_SLOT_ID,
|
||||
|
@ -698,6 +705,7 @@ SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pData
|
|||
}
|
||||
|
||||
pDataBlock->info.rows = p->info.rows;
|
||||
pDataBlock->info.scanFlag = p->info.scanFlag;
|
||||
if (pInfo->ignoreGroupId) {
|
||||
pDataBlock->info.id.groupId = 0;
|
||||
} else {
|
||||
|
@ -799,6 +807,8 @@ SOperatorInfo* createMultiwayMergeOperatorInfo(SOperatorInfo** downStreams, size
|
|||
size_t numOfCols = taosArrayGetSize(pInfo->binfo.pRes->pDataBlock);
|
||||
pInfo->bufPageSize = getProperSortPageSize(rowSize, numOfCols);
|
||||
pInfo->sortBufSize = pInfo->bufPageSize * (numStreams + 1); // one additional is reserved for merged result.
|
||||
pInfo->binfo.inputTsOrder = pMergePhyNode->node.inputTsOrder;
|
||||
pInfo->binfo.outputTsOrder = pMergePhyNode->node.outputTsOrder;
|
||||
|
||||
setOperatorInfo(pOperator, "MultiwayMergeOperator", QUERY_NODE_PHYSICAL_PLAN_MERGE, false, OP_NOT_OPENED, pInfo, pTaskInfo);
|
||||
pOperator->fpSet = createOperatorFpSet(openMultiwayMergeOperator, doMultiwayMerge, NULL,
|
||||
|
|
|
@ -44,6 +44,8 @@ typedef struct STimeSliceOperatorInfo {
|
|||
uint64_t groupId;
|
||||
SGroupKeys* pPrevGroupKey;
|
||||
SSDataBlock* pNextGroupRes;
|
||||
SSDataBlock* pRemainRes; // save block unfinished processing
|
||||
int32_t remainIndex; // the remaining index in the block to be processed
|
||||
} STimeSliceOperatorInfo;
|
||||
|
||||
static void destroyTimeSliceOperatorInfo(void* param);
|
||||
|
@ -644,13 +646,47 @@ static int32_t resetKeeperInfo(STimeSliceOperatorInfo* pInfo) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static bool checkThresholdReached(STimeSliceOperatorInfo* pSliceInfo, int32_t threshold) {
|
||||
SSDataBlock* pResBlock = pSliceInfo->pRes;
|
||||
if (pResBlock->info.rows > threshold) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool checkWindowBoundReached(STimeSliceOperatorInfo* pSliceInfo) {
|
||||
if (pSliceInfo->current > pSliceInfo->win.ekey) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void saveBlockStatus(STimeSliceOperatorInfo* pSliceInfo, SSDataBlock* pBlock, int32_t curIndex) {
|
||||
SSDataBlock* pResBlock = pSliceInfo->pRes;
|
||||
|
||||
SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, pSliceInfo->tsCol.slotId);
|
||||
if (curIndex < pBlock->info.rows - 1) {
|
||||
pSliceInfo->pRemainRes = pBlock;
|
||||
pSliceInfo->remainIndex = curIndex + 1;
|
||||
return;
|
||||
}
|
||||
|
||||
// all data in remaining block processed
|
||||
pSliceInfo->pRemainRes = NULL;
|
||||
|
||||
}
|
||||
|
||||
static void doTimesliceImpl(SOperatorInfo* pOperator, STimeSliceOperatorInfo* pSliceInfo, SSDataBlock* pBlock,
|
||||
SExecTaskInfo* pTaskInfo, bool ignoreNull) {
|
||||
SSDataBlock* pResBlock = pSliceInfo->pRes;
|
||||
SInterval* pInterval = &pSliceInfo->interval;
|
||||
|
||||
SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, pSliceInfo->tsCol.slotId);
|
||||
for (int32_t i = 0; i < pBlock->info.rows; ++i) {
|
||||
|
||||
int32_t i = (pSliceInfo->pRemainRes == NULL) ? 0 : pSliceInfo->remainIndex;
|
||||
for (; i < pBlock->info.rows; ++i) {
|
||||
int64_t ts = *(int64_t*)colDataGetData(pTsCol, i);
|
||||
|
||||
// check for duplicate timestamps
|
||||
|
@ -662,10 +698,6 @@ static void doTimesliceImpl(SOperatorInfo* pOperator, STimeSliceOperatorInfo* pS
|
|||
continue;
|
||||
}
|
||||
|
||||
if (pSliceInfo->current > pSliceInfo->win.ekey) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (ts == pSliceInfo->current) {
|
||||
addCurrentRowToResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i);
|
||||
|
||||
|
@ -674,9 +706,14 @@ static void doTimesliceImpl(SOperatorInfo* pOperator, STimeSliceOperatorInfo* pS
|
|||
|
||||
pSliceInfo->current =
|
||||
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
|
||||
if (pSliceInfo->current > pSliceInfo->win.ekey) {
|
||||
|
||||
if (checkWindowBoundReached(pSliceInfo)) {
|
||||
break;
|
||||
}
|
||||
if (checkThresholdReached(pSliceInfo, pOperator->resultInfo.threshold)) {
|
||||
saveBlockStatus(pSliceInfo, pBlock, i);
|
||||
return;
|
||||
}
|
||||
} else if (ts < pSliceInfo->current) {
|
||||
// in case of interpolation window starts and ends between two datapoints, fill(prev) need to interpolate
|
||||
doKeepPrevRows(pSliceInfo, pBlock, i);
|
||||
|
@ -697,9 +734,13 @@ static void doTimesliceImpl(SOperatorInfo* pOperator, STimeSliceOperatorInfo* pS
|
|||
}
|
||||
}
|
||||
|
||||
if (pSliceInfo->current > pSliceInfo->win.ekey) {
|
||||
if (checkWindowBoundReached(pSliceInfo)) {
|
||||
break;
|
||||
}
|
||||
if (checkThresholdReached(pSliceInfo, pOperator->resultInfo.threshold)) {
|
||||
saveBlockStatus(pSliceInfo, pBlock, i);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
// ignore current row, and do nothing
|
||||
}
|
||||
|
@ -730,11 +771,20 @@ static void doTimesliceImpl(SOperatorInfo* pOperator, STimeSliceOperatorInfo* pS
|
|||
}
|
||||
doKeepPrevRows(pSliceInfo, pBlock, i);
|
||||
|
||||
if (pSliceInfo->current > pSliceInfo->win.ekey) {
|
||||
if (checkWindowBoundReached(pSliceInfo)) {
|
||||
break;
|
||||
}
|
||||
if (checkThresholdReached(pSliceInfo, pOperator->resultInfo.threshold)) {
|
||||
saveBlockStatus(pSliceInfo, pBlock, i);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if reached here, meaning block processing finished naturally,
|
||||
// or interpolation reach window upper bound
|
||||
pSliceInfo->pRemainRes = NULL;
|
||||
|
||||
}
|
||||
|
||||
static void genInterpAfterDataBlock(STimeSliceOperatorInfo* pSliceInfo, SOperatorInfo* pOperator, int32_t index) {
|
||||
|
@ -781,39 +831,69 @@ static void resetTimesliceInfo(STimeSliceOperatorInfo* pSliceInfo) {
|
|||
resetKeeperInfo(pSliceInfo);
|
||||
}
|
||||
|
||||
static void doHandleTimeslice(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
||||
STimeSliceOperatorInfo* pSliceInfo = pOperator->info;
|
||||
SExprSupp* pSup = &pOperator->exprSupp;
|
||||
bool ignoreNull = getIgoreNullRes(pSup);
|
||||
int32_t order = TSDB_ORDER_ASC;
|
||||
|
||||
int32_t code = initKeeperInfo(pSliceInfo, pBlock, &pOperator->exprSupp);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
T_LONG_JMP(pTaskInfo->env, code);
|
||||
}
|
||||
|
||||
if (pSliceInfo->scalarSup.pExprInfo != NULL) {
|
||||
SExprSupp* pExprSup = &pSliceInfo->scalarSup;
|
||||
projectApplyFunctions(pExprSup->pExprInfo, pBlock, pBlock, pExprSup->pCtx, pExprSup->numOfExprs, NULL);
|
||||
}
|
||||
|
||||
// the pDataBlock are always the same one, no need to call this again
|
||||
setInputDataBlock(pSup, pBlock, order, MAIN_SCAN, true);
|
||||
doTimesliceImpl(pOperator, pSliceInfo, pBlock, pTaskInfo, ignoreNull);
|
||||
copyPrevGroupKey(&pOperator->exprSupp, pSliceInfo->pPrevGroupKey, pBlock);
|
||||
}
|
||||
|
||||
static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
STimeSliceOperatorInfo* pSliceInfo = pOperator->info;
|
||||
SSDataBlock* pResBlock = pSliceInfo->pRes;
|
||||
SExprSupp* pSup = &pOperator->exprSupp;
|
||||
bool ignoreNull = getIgoreNullRes(pSup);
|
||||
|
||||
int32_t order = TSDB_ORDER_ASC;
|
||||
SInterval* pInterval = &pSliceInfo->interval;
|
||||
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
||||
|
||||
blockDataCleanup(pResBlock);
|
||||
|
||||
while (1) {
|
||||
if (pSliceInfo->pNextGroupRes != NULL) {
|
||||
setInputDataBlock(pSup, pSliceInfo->pNextGroupRes, order, MAIN_SCAN, true);
|
||||
doTimesliceImpl(pOperator, pSliceInfo, pSliceInfo->pNextGroupRes, pTaskInfo, ignoreNull);
|
||||
copyPrevGroupKey(&pOperator->exprSupp, pSliceInfo->pPrevGroupKey, pSliceInfo->pNextGroupRes);
|
||||
doHandleTimeslice(pOperator, pSliceInfo->pNextGroupRes);
|
||||
if (checkWindowBoundReached(pSliceInfo) || checkThresholdReached(pSliceInfo, pOperator->resultInfo.threshold)) {
|
||||
doFilter(pResBlock, pOperator->exprSupp.pFilterInfo, NULL);
|
||||
if (pSliceInfo->pRemainRes == NULL) {
|
||||
pSliceInfo->pNextGroupRes = NULL;
|
||||
}
|
||||
if (pResBlock->info.rows != 0) {
|
||||
goto _finished;
|
||||
} else {
|
||||
// after fillter if result block has 0 rows, go back to
|
||||
// process pNextGroupRes again for unfinished data
|
||||
continue;
|
||||
}
|
||||
}
|
||||
pSliceInfo->pNextGroupRes = NULL;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
|
||||
SSDataBlock* pBlock = pSliceInfo->pRemainRes ? pSliceInfo->pRemainRes : downstream->fpSet.getNextFn(downstream);
|
||||
if (pBlock == NULL) {
|
||||
setOperatorCompleted(pOperator);
|
||||
break;
|
||||
}
|
||||
|
||||
pResBlock->info.scanFlag = pBlock->info.scanFlag;
|
||||
if (pSliceInfo->groupId == 0 && pBlock->info.id.groupId != 0) {
|
||||
pSliceInfo->groupId = pBlock->info.id.groupId;
|
||||
} else {
|
||||
|
@ -824,21 +904,15 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
|
|||
}
|
||||
}
|
||||
|
||||
if (pSliceInfo->scalarSup.pExprInfo != NULL) {
|
||||
SExprSupp* pExprSup = &pSliceInfo->scalarSup;
|
||||
projectApplyFunctions(pExprSup->pExprInfo, pBlock, pBlock, pExprSup->pCtx, pExprSup->numOfExprs, NULL);
|
||||
doHandleTimeslice(pOperator, pBlock);
|
||||
if (checkWindowBoundReached(pSliceInfo) || checkThresholdReached(pSliceInfo, pOperator->resultInfo.threshold)) {
|
||||
doFilter(pResBlock, pOperator->exprSupp.pFilterInfo, NULL);
|
||||
if (pResBlock->info.rows != 0) {
|
||||
goto _finished;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t code = initKeeperInfo(pSliceInfo, pBlock, &pOperator->exprSupp);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
T_LONG_JMP(pTaskInfo->env, code);
|
||||
}
|
||||
|
||||
// the pDataBlock are always the same one, no need to call this again
|
||||
setInputDataBlock(pSup, pBlock, order, MAIN_SCAN, true);
|
||||
doTimesliceImpl(pOperator, pSliceInfo, pBlock, pTaskInfo, ignoreNull);
|
||||
copyPrevGroupKey(&pOperator->exprSupp, pSliceInfo->pPrevGroupKey, pBlock);
|
||||
}
|
||||
// post work for a specific group
|
||||
|
||||
// check if need to interpolate after last datablock
|
||||
// except for fill(next), fill(linear)
|
||||
|
@ -851,11 +925,12 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
|
|||
|
||||
// restore initial value for next group
|
||||
resetTimesliceInfo(pSliceInfo);
|
||||
if (pResBlock->info.rows >= 4096) {
|
||||
if (pResBlock->info.rows != 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
_finished:
|
||||
// restore the value
|
||||
setTaskStatus(pOperator->pTaskInfo, TASK_COMPLETED);
|
||||
if (pResBlock->info.rows == 0) {
|
||||
|
@ -911,6 +986,8 @@ SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode
|
|||
pInfo->groupId = 0;
|
||||
pInfo->pPrevGroupKey = NULL;
|
||||
pInfo->pNextGroupRes = NULL;
|
||||
pInfo->pRemainRes = NULL;
|
||||
pInfo->remainIndex = 0;
|
||||
|
||||
if (downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
|
||||
STableScanInfo* pScanInfo = (STableScanInfo*)downstream->info;
|
||||
|
|
|
@ -358,7 +358,7 @@ static void setNotInterpoWindowKey(SqlFunctionCtx* pCtx, int32_t numOfOutput, in
|
|||
|
||||
static bool setTimeWindowInterpolationStartTs(SIntervalAggOperatorInfo* pInfo, int32_t pos, SSDataBlock* pBlock,
|
||||
const TSKEY* tsCols, STimeWindow* win, SExprSupp* pSup) {
|
||||
bool ascQuery = (pInfo->inputOrder == TSDB_ORDER_ASC);
|
||||
bool ascQuery = (pInfo->binfo.inputTsOrder == TSDB_ORDER_ASC);
|
||||
|
||||
TSKEY curTs = tsCols[pos];
|
||||
|
||||
|
@ -388,7 +388,7 @@ static bool setTimeWindowInterpolationStartTs(SIntervalAggOperatorInfo* pInfo, i
|
|||
static bool setTimeWindowInterpolationEndTs(SIntervalAggOperatorInfo* pInfo, SExprSupp* pSup, int32_t endRowIndex,
|
||||
SArray* pDataBlock, const TSKEY* tsCols, TSKEY blockEkey,
|
||||
STimeWindow* win) {
|
||||
int32_t order = pInfo->inputOrder;
|
||||
int32_t order = pInfo->binfo.inputTsOrder;
|
||||
|
||||
TSKEY actualEndKey = tsCols[endRowIndex];
|
||||
TSKEY key = (order == TSDB_ORDER_ASC) ? win->ekey : win->skey;
|
||||
|
@ -550,7 +550,7 @@ static void doWindowBorderInterpolation(SIntervalAggOperatorInfo* pInfo, SSDataB
|
|||
if (!done) {
|
||||
int32_t endRowIndex = startPos + forwardRows - 1;
|
||||
|
||||
TSKEY endKey = (pInfo->inputOrder == TSDB_ORDER_ASC) ? pBlock->info.window.ekey : pBlock->info.window.skey;
|
||||
TSKEY endKey = (pInfo->binfo.inputTsOrder == TSDB_ORDER_ASC) ? pBlock->info.window.ekey : pBlock->info.window.skey;
|
||||
bool interp = setTimeWindowInterpolationEndTs(pInfo, pSup, endRowIndex, pBlock->pDataBlock, tsCols, endKey, win);
|
||||
if (interp) {
|
||||
setResultRowInterpo(pResult, RESULT_ROW_END_INTERP);
|
||||
|
@ -888,12 +888,12 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
|
|||
int32_t numOfOutput = pSup->numOfExprs;
|
||||
int64_t* tsCols = extractTsCol(pBlock, pInfo);
|
||||
uint64_t tableGroupId = pBlock->info.id.groupId;
|
||||
bool ascScan = (pInfo->inputOrder == TSDB_ORDER_ASC);
|
||||
bool ascScan = (pInfo->binfo.inputTsOrder == TSDB_ORDER_ASC);
|
||||
TSKEY ts = getStartTsKey(&pBlock->info.window, tsCols);
|
||||
SResultRow* pResult = NULL;
|
||||
|
||||
STimeWindow win =
|
||||
getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval, pInfo->inputOrder);
|
||||
getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval, pInfo->binfo.inputTsOrder);
|
||||
int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId,
|
||||
pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
|
||||
if (ret != TSDB_CODE_SUCCESS || pResult == NULL) {
|
||||
|
@ -902,7 +902,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
|
|||
|
||||
TSKEY ekey = ascScan ? win.ekey : win.skey;
|
||||
int32_t forwardRows =
|
||||
getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->inputOrder);
|
||||
getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->binfo.inputTsOrder);
|
||||
|
||||
// prev time window not interpolation yet.
|
||||
if (pInfo->timeWindowInterpo) {
|
||||
|
@ -929,7 +929,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
|
|||
STimeWindow nextWin = win;
|
||||
while (1) {
|
||||
int32_t prevEndPos = forwardRows - 1 + startPos;
|
||||
startPos = getNextQualifiedWindow(&pInfo->interval, &nextWin, &pBlock->info, tsCols, prevEndPos, pInfo->inputOrder);
|
||||
startPos = getNextQualifiedWindow(&pInfo->interval, &nextWin, &pBlock->info, tsCols, prevEndPos, pInfo->binfo.inputTsOrder);
|
||||
if (startPos < 0) {
|
||||
break;
|
||||
}
|
||||
|
@ -942,7 +942,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
|
|||
|
||||
ekey = ascScan ? nextWin.ekey : nextWin.skey;
|
||||
forwardRows =
|
||||
getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->inputOrder);
|
||||
getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->binfo.inputTsOrder);
|
||||
// window start(end) key interpolation
|
||||
doWindowBorderInterpolation(pInfo, pBlock, pResult, &nextWin, startPos, forwardRows, pSup);
|
||||
// TODO: add to open window? how to close the open windows after input blocks exhausted?
|
||||
|
@ -1035,7 +1035,7 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) {
|
|||
break;
|
||||
}
|
||||
|
||||
getTableScanInfo(pOperator, &pInfo->inputOrder, &scanFlag, true);
|
||||
pInfo->binfo.pRes->info.scanFlag = scanFlag = pBlock->info.scanFlag;
|
||||
|
||||
if (pInfo->scalarSupp.pExprInfo != NULL) {
|
||||
SExprSupp* pExprSup = &pInfo->scalarSupp;
|
||||
|
@ -1043,11 +1043,11 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) {
|
|||
}
|
||||
|
||||
// the pDataBlock are always the same one, no need to call this again
|
||||
setInputDataBlock(pSup, pBlock, pInfo->inputOrder, scanFlag, true);
|
||||
setInputDataBlock(pSup, pBlock, pInfo->binfo.inputTsOrder, scanFlag, true);
|
||||
hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, scanFlag);
|
||||
}
|
||||
|
||||
initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, pInfo->resultTsOrder);
|
||||
initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, pInfo->binfo.outputTsOrder);
|
||||
OPTR_SET_OPENED(pOperator);
|
||||
|
||||
pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0;
|
||||
|
@ -1161,7 +1161,7 @@ static int32_t openStateWindowAggOptr(SOperatorInfo* pOperator) {
|
|||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
||||
SExprSupp* pSup = &pOperator->exprSupp;
|
||||
int32_t order = TSDB_ORDER_ASC;
|
||||
int32_t order = pInfo->binfo.inputTsOrder;
|
||||
int64_t st = taosGetTimestampUs();
|
||||
|
||||
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
||||
|
@ -1171,6 +1171,7 @@ static int32_t openStateWindowAggOptr(SOperatorInfo* pOperator) {
|
|||
break;
|
||||
}
|
||||
|
||||
pInfo->binfo.pRes->info.scanFlag = pBlock->info.scanFlag;
|
||||
setInputDataBlock(pSup, pBlock, order, MAIN_SCAN, true);
|
||||
blockDataUpdateTsWindow(pBlock, pInfo->tsSlotId);
|
||||
|
||||
|
@ -1653,8 +1654,8 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SIntervalPh
|
|||
};
|
||||
|
||||
pInfo->win = pTaskInfo->window;
|
||||
pInfo->inputOrder = (pPhyNode->window.inputTsOrder == ORDER_ASC) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
|
||||
pInfo->resultTsOrder = (pPhyNode->window.outputTsOrder == ORDER_ASC) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
|
||||
pInfo->binfo.inputTsOrder = pPhyNode->window.node.inputTsOrder;
|
||||
pInfo->binfo.outputTsOrder = pPhyNode->window.node.outputTsOrder;
|
||||
pInfo->interval = interval;
|
||||
pInfo->twAggSup = as;
|
||||
pInfo->binfo.mergeResultBlock = pPhyNode->window.mergeDataBlock;
|
||||
|
@ -1805,7 +1806,7 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) {
|
|||
}
|
||||
|
||||
int64_t st = taosGetTimestampUs();
|
||||
int32_t order = TSDB_ORDER_ASC;
|
||||
int32_t order = pInfo->binfo.inputTsOrder;
|
||||
|
||||
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
||||
|
||||
|
@ -1815,6 +1816,7 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) {
|
|||
break;
|
||||
}
|
||||
|
||||
pBInfo->pRes->info.scanFlag = pBlock->info.scanFlag;
|
||||
// the pDataBlock are always the same one, no need to call this again
|
||||
setInputDataBlock(pSup, pBlock, order, MAIN_SCAN, true);
|
||||
blockDataUpdateTsWindow(pBlock, pInfo->tsSlotId);
|
||||
|
@ -1875,6 +1877,8 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SStateWi
|
|||
if (pInfo->stateKey.pData == NULL) {
|
||||
goto _error;
|
||||
}
|
||||
pInfo->binfo.inputTsOrder = pStateNode->window.node.inputTsOrder;
|
||||
pInfo->binfo.outputTsOrder = pStateNode->window.node.outputTsOrder;
|
||||
|
||||
int32_t code = filterInitFromNode((SNode*)pStateNode->window.node.pConditions, &pOperator->exprSupp.pFilterInfo, 0);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -1973,6 +1977,8 @@ SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionW
|
|||
pInfo->binfo.pRes = pResBlock;
|
||||
pInfo->winSup.prevTs = INT64_MIN;
|
||||
pInfo->reptScan = false;
|
||||
pInfo->binfo.inputTsOrder = pSessionNode->window.node.inputTsOrder;
|
||||
pInfo->binfo.outputTsOrder = pSessionNode->window.node.outputTsOrder;
|
||||
code = filterInitFromNode((SNode*)pSessionNode->window.node.pConditions, &pOperator->exprSupp.pFilterInfo, 0);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
|
@ -4497,7 +4503,6 @@ static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
|
|||
SSDataBlock* pRes = pIaInfo->binfo.pRes;
|
||||
SResultRowInfo* pResultRowInfo = &pIaInfo->binfo.resultRowInfo;
|
||||
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
||||
int32_t scanFlag = MAIN_SCAN;
|
||||
|
||||
while (1) {
|
||||
SSDataBlock* pBlock = NULL;
|
||||
|
@ -4544,8 +4549,8 @@ static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
|
|||
}
|
||||
}
|
||||
|
||||
getTableScanInfo(pOperator, &pIaInfo->inputOrder, &scanFlag, false);
|
||||
setInputDataBlock(pSup, pBlock, pIaInfo->inputOrder, scanFlag, true);
|
||||
pRes->info.scanFlag = pBlock->info.scanFlag;
|
||||
setInputDataBlock(pSup, pBlock, pIaInfo->binfo.inputTsOrder, pBlock->info.scanFlag, true);
|
||||
doMergeAlignedIntervalAggImpl(pOperator, &pIaInfo->binfo.resultRowInfo, pBlock, pRes);
|
||||
|
||||
doFilter(pRes, pOperator->exprSupp.pFilterInfo, NULL);
|
||||
|
@ -4618,7 +4623,8 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream,
|
|||
|
||||
miaInfo->curTs = INT64_MIN;
|
||||
iaInfo->win = pTaskInfo->window;
|
||||
iaInfo->inputOrder = TSDB_ORDER_ASC;
|
||||
iaInfo->binfo.inputTsOrder = pNode->window.node.inputTsOrder;
|
||||
iaInfo->binfo.outputTsOrder = pNode->window.node.outputTsOrder;
|
||||
iaInfo->interval = interval;
|
||||
iaInfo->primaryTsIndex = ((SColumnNode*)pNode->window.pTspk)->slotId;
|
||||
iaInfo->binfo.mergeResultBlock = pNode->window.mergeDataBlock;
|
||||
|
@ -4695,7 +4701,7 @@ static int32_t outputPrevIntervalResult(SOperatorInfo* pOperatorInfo, uint64_t t
|
|||
STimeWindow* newWin) {
|
||||
SMergeIntervalAggOperatorInfo* miaInfo = pOperatorInfo->info;
|
||||
SIntervalAggOperatorInfo* iaInfo = &miaInfo->intervalAggOperatorInfo;
|
||||
bool ascScan = (iaInfo->inputOrder == TSDB_ORDER_ASC);
|
||||
bool ascScan = (iaInfo->binfo.inputTsOrder == TSDB_ORDER_ASC);
|
||||
|
||||
SGroupTimeWindow groupTimeWindow = {.groupId = tableGroupId, .window = *newWin};
|
||||
tdListAppend(miaInfo->groupIntervals, &groupTimeWindow);
|
||||
|
@ -4730,12 +4736,12 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo*
|
|||
int32_t numOfOutput = pExprSup->numOfExprs;
|
||||
int64_t* tsCols = extractTsCol(pBlock, iaInfo);
|
||||
uint64_t tableGroupId = pBlock->info.id.groupId;
|
||||
bool ascScan = (iaInfo->inputOrder == TSDB_ORDER_ASC);
|
||||
bool ascScan = (iaInfo->binfo.inputTsOrder == TSDB_ORDER_ASC);
|
||||
TSKEY blockStartTs = getStartTsKey(&pBlock->info.window, tsCols);
|
||||
SResultRow* pResult = NULL;
|
||||
|
||||
STimeWindow win = getActiveTimeWindow(iaInfo->aggSup.pResultBuf, pResultRowInfo, blockStartTs, &iaInfo->interval,
|
||||
iaInfo->inputOrder);
|
||||
iaInfo->binfo.inputTsOrder);
|
||||
|
||||
int32_t ret =
|
||||
setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pExprSup->pCtx,
|
||||
|
@ -4746,7 +4752,7 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo*
|
|||
|
||||
TSKEY ekey = ascScan ? win.ekey : win.skey;
|
||||
int32_t forwardRows =
|
||||
getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, iaInfo->inputOrder);
|
||||
getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, iaInfo->binfo.inputTsOrder);
|
||||
ASSERT(forwardRows > 0);
|
||||
|
||||
// prev time window not interpolation yet.
|
||||
|
@ -4777,7 +4783,7 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo*
|
|||
while (1) {
|
||||
int32_t prevEndPos = forwardRows - 1 + startPos;
|
||||
startPos =
|
||||
getNextQualifiedWindow(&iaInfo->interval, &nextWin, &pBlock->info, tsCols, prevEndPos, iaInfo->inputOrder);
|
||||
getNextQualifiedWindow(&iaInfo->interval, &nextWin, &pBlock->info, tsCols, prevEndPos, iaInfo->binfo.inputTsOrder);
|
||||
if (startPos < 0) {
|
||||
break;
|
||||
}
|
||||
|
@ -4792,7 +4798,7 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo*
|
|||
|
||||
ekey = ascScan ? nextWin.ekey : nextWin.skey;
|
||||
forwardRows =
|
||||
getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, iaInfo->inputOrder);
|
||||
getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, iaInfo->binfo.inputTsOrder);
|
||||
|
||||
// window start(end) key interpolation
|
||||
doWindowBorderInterpolation(iaInfo, pBlock, pResult, &nextWin, startPos, forwardRows, pExprSup);
|
||||
|
@ -4828,7 +4834,6 @@ static SSDataBlock* doMergeIntervalAgg(SOperatorInfo* pOperator) {
|
|||
|
||||
if (!miaInfo->inputBlocksFinished) {
|
||||
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
||||
int32_t scanFlag = MAIN_SCAN;
|
||||
while (1) {
|
||||
SSDataBlock* pBlock = NULL;
|
||||
if (miaInfo->prefetchedBlock == NULL) {
|
||||
|
@ -4853,9 +4858,9 @@ static SSDataBlock* doMergeIntervalAgg(SOperatorInfo* pOperator) {
|
|||
break;
|
||||
}
|
||||
|
||||
getTableScanInfo(pOperator, &iaInfo->inputOrder, &scanFlag, false);
|
||||
setInputDataBlock(pExpSupp, pBlock, iaInfo->inputOrder, scanFlag, true);
|
||||
doMergeIntervalAggImpl(pOperator, &iaInfo->binfo.resultRowInfo, pBlock, scanFlag, pRes);
|
||||
pRes->info.scanFlag = pBlock->info.scanFlag;
|
||||
setInputDataBlock(pExpSupp, pBlock, iaInfo->binfo.inputTsOrder, pBlock->info.scanFlag, true);
|
||||
doMergeIntervalAggImpl(pOperator, &iaInfo->binfo.resultRowInfo, pBlock, pBlock->info.scanFlag, pRes);
|
||||
|
||||
if (pRes->info.rows >= pOperator->resultInfo.threshold) {
|
||||
break;
|
||||
|
@ -4905,10 +4910,11 @@ SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SMerge
|
|||
|
||||
SIntervalAggOperatorInfo* pIntervalInfo = &pMergeIntervalInfo->intervalAggOperatorInfo;
|
||||
pIntervalInfo->win = pTaskInfo->window;
|
||||
pIntervalInfo->inputOrder = TSDB_ORDER_ASC;
|
||||
pIntervalInfo->binfo.inputTsOrder = pIntervalPhyNode->window.node.inputTsOrder;
|
||||
pIntervalInfo->interval = interval;
|
||||
pIntervalInfo->binfo.mergeResultBlock = pIntervalPhyNode->window.mergeDataBlock;
|
||||
pIntervalInfo->primaryTsIndex = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
|
||||
pIntervalInfo->binfo.outputTsOrder = pIntervalPhyNode->window.node.outputTsOrder;
|
||||
|
||||
SExprSupp* pExprSupp = &pOperator->exprSupp;
|
||||
|
||||
|
|
|
@ -887,6 +887,7 @@ void* tsortGetValue(STupleHandle* pVHandle, int32_t colIndex) {
|
|||
}
|
||||
|
||||
uint64_t tsortGetGroupId(STupleHandle* pVHandle) { return pVHandle->pBlock->info.id.groupId; }
|
||||
void* tsortGetBlockInfo(STupleHandle* pVHandle) { return &pVHandle->pBlock->info; }
|
||||
|
||||
SSortExecInfo tsortGetSortExecInfo(SSortHandle* pHandle) {
|
||||
SSortExecInfo info = {0};
|
||||
|
|
|
@ -110,6 +110,7 @@ static int32_t columnNodeCopy(const SColumnNode* pSrc, SColumnNode* pDst) {
|
|||
COPY_SCALAR_FIELD(tableId);
|
||||
COPY_SCALAR_FIELD(tableType);
|
||||
COPY_SCALAR_FIELD(colId);
|
||||
COPY_SCALAR_FIELD(projIdx);
|
||||
COPY_SCALAR_FIELD(colType);
|
||||
COPY_SCALAR_FIELD(hasIndex);
|
||||
COPY_CHAR_ARRAY_FIELD(dbName);
|
||||
|
@ -358,6 +359,8 @@ static int32_t logicNodeCopy(const SLogicNode* pSrc, SLogicNode* pDst) {
|
|||
COPY_SCALAR_FIELD(requireDataOrder);
|
||||
COPY_SCALAR_FIELD(resultDataOrder);
|
||||
COPY_SCALAR_FIELD(groupAction);
|
||||
COPY_SCALAR_FIELD(inputTsOrder);
|
||||
COPY_SCALAR_FIELD(outputTsOrder);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -404,7 +407,6 @@ static int32_t logicJoinCopy(const SJoinLogicNode* pSrc, SJoinLogicNode* pDst) {
|
|||
CLONE_NODE_FIELD(pOnConditions);
|
||||
CLONE_NODE_FIELD(pColEqualOnConditions);
|
||||
COPY_SCALAR_FIELD(isSingleTableJoin);
|
||||
COPY_SCALAR_FIELD(inputTsOrder);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -482,8 +484,6 @@ static int32_t logicWindowCopy(const SWindowLogicNode* pSrc, SWindowLogicNode* p
|
|||
COPY_SCALAR_FIELD(igExpired);
|
||||
COPY_SCALAR_FIELD(igCheckUpdate);
|
||||
COPY_SCALAR_FIELD(windowAlgo);
|
||||
COPY_SCALAR_FIELD(inputTsOrder);
|
||||
COPY_SCALAR_FIELD(outputTsOrder);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -495,7 +495,6 @@ static int32_t logicFillCopy(const SFillLogicNode* pSrc, SFillLogicNode* pDst) {
|
|||
CLONE_NODE_FIELD(pWStartTs);
|
||||
CLONE_NODE_FIELD(pValues);
|
||||
COPY_OBJECT_FIELD(timeRange, sizeof(STimeWindow));
|
||||
COPY_SCALAR_FIELD(inputTsOrder);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -544,6 +543,8 @@ static int32_t physiNodeCopy(const SPhysiNode* pSrc, SPhysiNode* pDst) {
|
|||
CLONE_NODE_FIELD_EX(pOutputDataBlockDesc, SDataBlockDescNode*);
|
||||
CLONE_NODE_FIELD(pConditions);
|
||||
CLONE_NODE_LIST_FIELD(pChildren);
|
||||
COPY_SCALAR_FIELD(inputTsOrder);
|
||||
COPY_SCALAR_FIELD(outputTsOrder);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -1904,9 +1904,6 @@ static int32_t physiJoinNodeToJson(const void* pObj, SJson* pJson) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddIntegerToObject(pJson, jkJoinPhysiPlanJoinType, pNode->joinType);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddIntegerToObject(pJson, jkJoinPhysiPlanInputTsOrder, pNode->inputTsOrder);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddObject(pJson, jkJoinPhysiPlanMergeCondition, nodeToJson, pNode->pMergeCondition);
|
||||
}
|
||||
|
@ -1929,9 +1926,6 @@ static int32_t jsonToPhysiJoinNode(const SJson* pJson, void* pObj) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
tjsonGetNumberValue(pJson, jkJoinPhysiPlanJoinType, pNode->joinType, code);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
tjsonGetNumberValue(pJson, jkJoinPhysiPlanInputTsOrder, pNode->inputTsOrder, code);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = jsonToNodeObject(pJson, jkJoinPhysiPlanOnConditions, &pNode->pOnConditions);
|
||||
}
|
||||
|
@ -2150,7 +2144,6 @@ static const char* jkWindowPhysiPlanWatermark = "Watermark";
|
|||
static const char* jkWindowPhysiPlanDeleteMark = "DeleteMark";
|
||||
static const char* jkWindowPhysiPlanIgnoreExpired = "IgnoreExpired";
|
||||
static const char* jkWindowPhysiPlanInputTsOrder = "InputTsOrder";
|
||||
static const char* jkWindowPhysiPlanOutputTsOrder = "outputTsOrder";
|
||||
static const char* jkWindowPhysiPlanMergeDataBlock = "MergeDataBlock";
|
||||
|
||||
static int32_t physiWindowNodeToJson(const void* pObj, SJson* pJson) {
|
||||
|
@ -2181,12 +2174,6 @@ static int32_t physiWindowNodeToJson(const void* pObj, SJson* pJson) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddIntegerToObject(pJson, jkWindowPhysiPlanIgnoreExpired, pNode->igExpired);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddIntegerToObject(pJson, jkWindowPhysiPlanInputTsOrder, pNode->inputTsOrder);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddIntegerToObject(pJson, jkWindowPhysiPlanOutputTsOrder, pNode->outputTsOrder);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddBoolToObject(pJson, jkWindowPhysiPlanMergeDataBlock, pNode->mergeDataBlock);
|
||||
}
|
||||
|
@ -2222,12 +2209,6 @@ static int32_t jsonToPhysiWindowNode(const SJson* pJson, void* pObj) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetTinyIntValue(pJson, jkWindowPhysiPlanIgnoreExpired, &pNode->igExpired);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
tjsonGetNumberValue(pJson, jkWindowPhysiPlanInputTsOrder, pNode->inputTsOrder, code);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
tjsonGetNumberValue(pJson, jkWindowPhysiPlanOutputTsOrder, pNode->outputTsOrder, code);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetBoolValue(pJson, jkWindowPhysiPlanMergeDataBlock, &pNode->mergeDataBlock);
|
||||
}
|
||||
|
@ -2294,7 +2275,6 @@ static const char* jkFillPhysiPlanWStartTs = "WStartTs";
|
|||
static const char* jkFillPhysiPlanValues = "Values";
|
||||
static const char* jkFillPhysiPlanStartTime = "StartTime";
|
||||
static const char* jkFillPhysiPlanEndTime = "EndTime";
|
||||
static const char* jkFillPhysiPlanInputTsOrder = "inputTsOrder";
|
||||
|
||||
static int32_t physiFillNodeToJson(const void* pObj, SJson* pJson) {
|
||||
const SFillPhysiNode* pNode = (const SFillPhysiNode*)pObj;
|
||||
|
@ -2321,9 +2301,6 @@ static int32_t physiFillNodeToJson(const void* pObj, SJson* pJson) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddIntegerToObject(pJson, jkFillPhysiPlanEndTime, pNode->timeRange.ekey);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddIntegerToObject(pJson, jkFillPhysiPlanInputTsOrder, pNode->inputTsOrder);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -2353,9 +2330,6 @@ static int32_t jsonToPhysiFillNode(const SJson* pJson, void* pObj) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetBigIntValue(pJson, jkFillPhysiPlanEndTime, &pNode->timeRange.ekey);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
tjsonGetNumberValue(pJson, jkFillPhysiPlanInputTsOrder, pNode->inputTsOrder, code);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -3053,6 +3027,7 @@ static const char* jkColumnTableId = "TableId";
|
|||
static const char* jkColumnTableType = "TableType";
|
||||
static const char* jkColumnColId = "ColId";
|
||||
static const char* jkColumnColType = "ColType";
|
||||
static const char* jkColumnProjId = "ProjId";
|
||||
static const char* jkColumnDbName = "DbName";
|
||||
static const char* jkColumnTableName = "TableName";
|
||||
static const char* jkColumnTableAlias = "TableAlias";
|
||||
|
@ -3073,6 +3048,9 @@ static int32_t columnNodeToJson(const void* pObj, SJson* pJson) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddIntegerToObject(pJson, jkColumnColId, pNode->colId);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddIntegerToObject(pJson, jkColumnProjId, pNode->projIdx);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddIntegerToObject(pJson, jkColumnColType, pNode->colType);
|
||||
}
|
||||
|
@ -3111,6 +3089,9 @@ static int32_t jsonToColumnNode(const SJson* pJson, void* pObj) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetSmallIntValue(pJson, jkColumnColId, &pNode->colId);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetSmallIntValue(pJson, jkColumnProjId, &pNode->projIdx);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
tjsonGetNumberValue(pJson, jkColumnColType, pNode->colType, code);
|
||||
}
|
||||
|
|
|
@ -1851,7 +1851,9 @@ enum {
|
|||
PHY_NODE_CODE_CONDITIONS,
|
||||
PHY_NODE_CODE_CHILDREN,
|
||||
PHY_NODE_CODE_LIMIT,
|
||||
PHY_NODE_CODE_SLIMIT
|
||||
PHY_NODE_CODE_SLIMIT,
|
||||
PHY_NODE_CODE_INPUT_TS_ORDER,
|
||||
PHY_NODE_CODE_OUTPUT_TS_ORDER
|
||||
};
|
||||
|
||||
static int32_t physiNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
||||
|
@ -1870,6 +1872,12 @@ static int32_t physiNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeObj(pEncoder, PHY_NODE_CODE_SLIMIT, nodeToMsg, pNode->pSlimit);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeEnum(pEncoder, PHY_NODE_CODE_INPUT_TS_ORDER, pNode->inputTsOrder);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeEnum(pEncoder, PHY_NODE_CODE_OUTPUT_TS_ORDER, pNode->outputTsOrder);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -1896,6 +1904,12 @@ static int32_t msgToPhysiNode(STlvDecoder* pDecoder, void* pObj) {
|
|||
case PHY_NODE_CODE_SLIMIT:
|
||||
code = msgToNodeFromTlv(pTlv, (void**)&pNode->pSlimit);
|
||||
break;
|
||||
case PHY_NODE_CODE_INPUT_TS_ORDER:
|
||||
code = tlvDecodeEnum(pTlv, &pNode->inputTsOrder, sizeof(pNode->inputTsOrder));
|
||||
break;
|
||||
case PHY_NODE_CODE_OUTPUT_TS_ORDER:
|
||||
code = tlvDecodeEnum(pTlv, &pNode->outputTsOrder, sizeof(pNode->outputTsOrder));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -2339,9 +2353,6 @@ static int32_t physiJoinNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeObj(pEncoder, PHY_SORT_MERGE_JOIN_CODE_TARGETS, nodeListToMsg, pNode->pTargets);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeEnum(pEncoder, PHY_SORT_MERGE_JOIN_CODE_INPUT_TS_ORDER, pNode->inputTsOrder);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeObj(pEncoder, PHY_SORT_MERGE_JOIN_CODE_TAG_EQUAL_CONDITIONS, nodeToMsg, pNode->pColEqualOnConditions);
|
||||
}
|
||||
|
@ -2370,9 +2381,6 @@ static int32_t msgToPhysiJoinNode(STlvDecoder* pDecoder, void* pObj) {
|
|||
case PHY_SORT_MERGE_JOIN_CODE_TARGETS:
|
||||
code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pTargets);
|
||||
break;
|
||||
case PHY_SORT_MERGE_JOIN_CODE_INPUT_TS_ORDER:
|
||||
code = tlvDecodeEnum(pTlv, &pNode->inputTsOrder, sizeof(pNode->inputTsOrder));
|
||||
break;
|
||||
case PHY_SORT_MERGE_JOIN_CODE_TAG_EQUAL_CONDITIONS:
|
||||
code = msgToNodeFromTlv(pTlv, (void**)&pNode->pColEqualOnConditions);
|
||||
break;
|
||||
|
@ -2675,12 +2683,6 @@ static int32_t physiWindowNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeI8(pEncoder, PHY_WINDOW_CODE_IG_EXPIRED, pNode->igExpired);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeEnum(pEncoder, PHY_WINDOW_CODE_INPUT_TS_ORDER, pNode->inputTsOrder);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeEnum(pEncoder, PHY_WINDOW_CODE_OUTPUT_TS_ORDER, pNode->outputTsOrder);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeBool(pEncoder, PHY_WINDOW_CODE_MERGE_DATA_BLOCK, pNode->mergeDataBlock);
|
||||
}
|
||||
|
@ -2722,12 +2724,6 @@ static int32_t msgToPhysiWindowNode(STlvDecoder* pDecoder, void* pObj) {
|
|||
case PHY_WINDOW_CODE_IG_EXPIRED:
|
||||
code = tlvDecodeI8(pTlv, &pNode->igExpired);
|
||||
break;
|
||||
case PHY_WINDOW_CODE_INPUT_TS_ORDER:
|
||||
code = tlvDecodeEnum(pTlv, &pNode->inputTsOrder, sizeof(pNode->inputTsOrder));
|
||||
break;
|
||||
case PHY_WINDOW_CODE_OUTPUT_TS_ORDER:
|
||||
code = tlvDecodeEnum(pTlv, &pNode->outputTsOrder, sizeof(pNode->outputTsOrder));
|
||||
break;
|
||||
case PHY_WINDOW_CODE_MERGE_DATA_BLOCK:
|
||||
code = tlvDecodeBool(pTlv, &pNode->mergeDataBlock);
|
||||
break;
|
||||
|
@ -2846,9 +2842,6 @@ static int32_t physiFillNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeObj(pEncoder, PHY_FILL_CODE_TIME_RANGE, timeWindowToMsg, &pNode->timeRange);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeEnum(pEncoder, PHY_FILL_CODE_INPUT_TS_ORDER, pNode->inputTsOrder);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -2881,9 +2874,6 @@ static int32_t msgToPhysiFillNode(STlvDecoder* pDecoder, void* pObj) {
|
|||
case PHY_FILL_CODE_TIME_RANGE:
|
||||
code = tlvDecodeObjFromTlv(pTlv, msgToTimeWindow, (void**)&pNode->timeRange);
|
||||
break;
|
||||
case PHY_FILL_CODE_INPUT_TS_ORDER:
|
||||
code = tlvDecodeEnum(pTlv, &pNode->inputTsOrder, sizeof(pNode->inputTsOrder));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -3075,13 +3075,13 @@ static bool needFill(SNode* pNode) {
|
|||
|
||||
static int32_t convertFillValue(STranslateContext* pCxt, SDataType dt, SNodeList* pValues, int32_t index) {
|
||||
SListCell* pCell = nodesListGetCell(pValues, index);
|
||||
if (dataTypeEqual(&dt, &((SExprNode*)pCell->pNode)->resType)) {
|
||||
if (dataTypeEqual(&dt, &((SExprNode*)pCell->pNode)->resType) && (QUERY_NODE_VALUE == nodeType(pCell->pNode))) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
SNode* pCaseFunc = NULL;
|
||||
int32_t code = createCastFunc(pCxt, pCell->pNode, dt, &pCaseFunc);
|
||||
SNode* pCastFunc = NULL;
|
||||
int32_t code = createCastFunc(pCxt, pCell->pNode, dt, &pCastFunc);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = scalarCalculateConstants(pCaseFunc, &pCell->pNode);
|
||||
code = scalarCalculateConstants(pCastFunc, &pCell->pNode);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code && QUERY_NODE_VALUE != nodeType(pCell->pNode)) {
|
||||
code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value can only accept constant");
|
||||
|
|
|
@ -436,7 +436,7 @@ static int32_t createJoinLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
|
|||
|
||||
pJoin->joinType = pJoinTable->joinType;
|
||||
pJoin->isSingleTableJoin = pJoinTable->table.singleTable;
|
||||
pJoin->inputTsOrder = ORDER_ASC;
|
||||
pJoin->node.inputTsOrder = ORDER_ASC;
|
||||
pJoin->node.groupAction = GROUP_ACTION_CLEAR;
|
||||
pJoin->node.requireDataOrder = DATA_ORDER_LEVEL_GLOBAL;
|
||||
pJoin->node.resultDataOrder = DATA_ORDER_LEVEL_GLOBAL;
|
||||
|
@ -741,8 +741,8 @@ static int32_t createWindowLogicNodeFinalize(SLogicPlanContext* pCxt, SSelectStm
|
|||
pWindow->igExpired = pCxt->pPlanCxt->igExpired;
|
||||
pWindow->igCheckUpdate = pCxt->pPlanCxt->igCheckUpdate;
|
||||
}
|
||||
pWindow->inputTsOrder = ORDER_ASC;
|
||||
pWindow->outputTsOrder = ORDER_ASC;
|
||||
pWindow->node.inputTsOrder = ORDER_ASC;
|
||||
pWindow->node.outputTsOrder = ORDER_ASC;
|
||||
|
||||
int32_t code = nodesCollectFuncs(pSelect, SQL_CLAUSE_WINDOW, fmIsWindowClauseFunc, &pWindow->pFuncs);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
|
@ -972,7 +972,7 @@ static int32_t createFillLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
|
|||
pFill->node.groupAction = getGroupAction(pCxt, pSelect);
|
||||
pFill->node.requireDataOrder = getRequireDataOrder(true, pSelect);
|
||||
pFill->node.resultDataOrder = pFill->node.requireDataOrder;
|
||||
pFill->inputTsOrder = ORDER_ASC;
|
||||
pFill->node.inputTsOrder = 0;
|
||||
|
||||
int32_t code = partFillExprs(pSelect, &pFill->pFillExprs, &pFill->pNotFillExprs);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
|
@ -1045,6 +1045,19 @@ static int32_t createSortLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
|
|||
if (NULL == pSort->pSortKeys) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
SNode* pNode = NULL;
|
||||
SOrderByExprNode* firstSortKey = (SOrderByExprNode*)nodesListGetNode(pSort->pSortKeys, 0);
|
||||
if (firstSortKey->pExpr->type == QUERY_NODE_COLUMN) {
|
||||
SColumnNode* pCol = (SColumnNode*)firstSortKey->pExpr;
|
||||
int16_t projIdx = 1;
|
||||
FOREACH(pNode, pSelect->pProjectionList) {
|
||||
SExprNode* pExpr = (SExprNode*)pNode;
|
||||
if (0 == strcmp(pCol->node.aliasName, pExpr->aliasName)) {
|
||||
pCol->projIdx = projIdx; break;
|
||||
}
|
||||
projIdx++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
|
|
|
@ -116,25 +116,33 @@ static EDealRes optRebuildTbanme(SNode** pNode, void* pContext) {
|
|||
return DEAL_RES_CONTINUE;
|
||||
}
|
||||
|
||||
static void optSetParentOrder(SLogicNode* pNode, EOrder order) {
|
||||
static void optSetParentOrder(SLogicNode* pNode, EOrder order, SLogicNode* pNodeForcePropagate) {
|
||||
if (NULL == pNode) {
|
||||
return;
|
||||
}
|
||||
pNode->inputTsOrder = order;
|
||||
switch (nodeType(pNode)) {
|
||||
case QUERY_NODE_LOGIC_PLAN_WINDOW:
|
||||
((SWindowLogicNode*)pNode)->inputTsOrder = order;
|
||||
// window has a sorting function, and the operator behind it uses its output order
|
||||
return;
|
||||
// for those nodes that will change the order, stop propagating
|
||||
//case QUERY_NODE_LOGIC_PLAN_WINDOW:
|
||||
case QUERY_NODE_LOGIC_PLAN_JOIN:
|
||||
((SJoinLogicNode*)pNode)->inputTsOrder = order;
|
||||
break;
|
||||
case QUERY_NODE_LOGIC_PLAN_FILL:
|
||||
((SFillLogicNode*)pNode)->inputTsOrder = order;
|
||||
case QUERY_NODE_LOGIC_PLAN_AGG:
|
||||
case QUERY_NODE_LOGIC_PLAN_SORT:
|
||||
if (pNode == pNodeForcePropagate) {
|
||||
pNode->outputTsOrder = order;
|
||||
break;
|
||||
} else
|
||||
return;
|
||||
case QUERY_NODE_LOGIC_PLAN_WINDOW:
|
||||
// Window output ts order default to be asc, and changed when doing sort by primary key optimization.
|
||||
// We stop propagate the original order to parents.
|
||||
// Use window output ts order instead.
|
||||
order = pNode->outputTsOrder;
|
||||
break;
|
||||
default:
|
||||
pNode->outputTsOrder = order;
|
||||
break;
|
||||
}
|
||||
optSetParentOrder(pNode->pParent, order);
|
||||
optSetParentOrder(pNode->pParent, order, pNodeForcePropagate);
|
||||
}
|
||||
|
||||
EDealRes scanPathOptHaveNormalColImpl(SNode* pNode, void* pContext) {
|
||||
|
@ -159,25 +167,25 @@ static bool scanPathOptMayBeOptimized(SLogicNode* pNode) {
|
|||
if (QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(pNode)) {
|
||||
return false;
|
||||
}
|
||||
if (NULL == pNode->pParent || (QUERY_NODE_LOGIC_PLAN_WINDOW != nodeType(pNode->pParent) &&
|
||||
QUERY_NODE_LOGIC_PLAN_AGG != nodeType(pNode->pParent) &&
|
||||
QUERY_NODE_LOGIC_PLAN_PARTITION != nodeType(pNode->pParent))) {
|
||||
return false;
|
||||
}
|
||||
if ((QUERY_NODE_LOGIC_PLAN_WINDOW == nodeType(pNode->pParent) &&
|
||||
WINDOW_TYPE_INTERVAL == ((SWindowLogicNode*)pNode->pParent)->winType) ||
|
||||
(QUERY_NODE_LOGIC_PLAN_PARTITION == nodeType(pNode->pParent) && pNode->pParent->pParent &&
|
||||
QUERY_NODE_LOGIC_PLAN_WINDOW == nodeType(pNode->pParent->pParent) &&
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool scanPathOptShouldGetFuncs(SLogicNode* pNode) {
|
||||
if ((QUERY_NODE_LOGIC_PLAN_WINDOW == nodeType(pNode) &&
|
||||
WINDOW_TYPE_INTERVAL == ((SWindowLogicNode*)pNode)->winType) ||
|
||||
(QUERY_NODE_LOGIC_PLAN_PARTITION == nodeType(pNode) && pNode->pParent &&
|
||||
QUERY_NODE_LOGIC_PLAN_WINDOW == nodeType(pNode->pParent) &&
|
||||
WINDOW_TYPE_INTERVAL == ((SWindowLogicNode*)pNode->pParent)->winType)) {
|
||||
return true;
|
||||
}
|
||||
if (QUERY_NODE_LOGIC_PLAN_AGG == nodeType(pNode->pParent)) {
|
||||
return !scanPathOptHaveNormalCol(((SAggLogicNode*)pNode->pParent)->pGroupKeys);
|
||||
if (QUERY_NODE_LOGIC_PLAN_AGG == nodeType(pNode)) {
|
||||
return !scanPathOptHaveNormalCol(((SAggLogicNode*)pNode)->pGroupKeys);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static SNodeList* scanPathOptGetAllFuncs(SLogicNode* pNode) {
|
||||
if (!scanPathOptShouldGetFuncs(pNode)) return NULL;
|
||||
switch (nodeType(pNode)) {
|
||||
case QUERY_NODE_LOGIC_PLAN_WINDOW:
|
||||
return ((SWindowLogicNode*)pNode)->pFuncs;
|
||||
|
@ -339,12 +347,12 @@ static void scanPathOptSetScanOrder(EScanOrder scanOrder, SScanLogicNode* pScan)
|
|||
case SCAN_ORDER_ASC:
|
||||
pScan->scanSeq[0] = 1;
|
||||
pScan->scanSeq[1] = 0;
|
||||
optSetParentOrder(pScan->node.pParent, ORDER_ASC);
|
||||
optSetParentOrder(pScan->node.pParent, ORDER_ASC, NULL);
|
||||
break;
|
||||
case SCAN_ORDER_DESC:
|
||||
pScan->scanSeq[0] = 0;
|
||||
pScan->scanSeq[1] = 1;
|
||||
optSetParentOrder(pScan->node.pParent, ORDER_DESC);
|
||||
optSetParentOrder(pScan->node.pParent, ORDER_DESC, NULL);
|
||||
break;
|
||||
case SCAN_ORDER_BOTH:
|
||||
pScan->scanSeq[0] = 1;
|
||||
|
@ -1239,6 +1247,7 @@ static int32_t sortPriKeyOptApply(SOptimizeContext* pCxt, SLogicSubplan* pLogicS
|
|||
if ((ORDER_DESC == order && pScan->scanSeq[0] > 0) || (ORDER_ASC == order && pScan->scanSeq[1] > 0)) {
|
||||
TSWAP(pScan->scanSeq[0], pScan->scanSeq[1]);
|
||||
}
|
||||
pScan->node.outputTsOrder = order;
|
||||
if (TSDB_SUPER_TABLE == pScan->tableType) {
|
||||
pScan->scanType = SCAN_TYPE_TABLE_MERGE;
|
||||
pScan->node.resultDataOrder = DATA_ORDER_LEVEL_GLOBAL;
|
||||
|
@ -1246,9 +1255,9 @@ static int32_t sortPriKeyOptApply(SOptimizeContext* pCxt, SLogicSubplan* pLogicS
|
|||
}
|
||||
pScan->sortPrimaryKey = true;
|
||||
} else if (QUERY_NODE_LOGIC_PLAN_WINDOW == nodeType(pSequencingNode)) {
|
||||
((SWindowLogicNode*)pSequencingNode)->outputTsOrder = order;
|
||||
((SLogicNode*)pSequencingNode)->outputTsOrder = order;
|
||||
}
|
||||
optSetParentOrder(((SLogicNode*)pSequencingNode)->pParent, order);
|
||||
optSetParentOrder(((SLogicNode*)pSequencingNode)->pParent, order, (SLogicNode*)pSort);
|
||||
}
|
||||
|
||||
SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pSort->node.pChildren, 0);
|
||||
|
@ -2881,10 +2890,62 @@ static int32_t tableCountScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLo
|
|||
return code;
|
||||
}
|
||||
|
||||
static SSortLogicNode* sortNonPriKeySatisfied(SLogicNode* pNode) {
|
||||
if (QUERY_NODE_LOGIC_PLAN_SORT != nodeType(pNode)) {
|
||||
return NULL;
|
||||
}
|
||||
SSortLogicNode* pSort = (SSortLogicNode*)pNode;
|
||||
if (sortPriKeyOptIsPriKeyOrderBy(pSort->pSortKeys)) {
|
||||
return NULL;
|
||||
}
|
||||
SNode* pSortKeyNode = NULL, *pSortKeyExpr = NULL;
|
||||
FOREACH(pSortKeyNode, pSort->pSortKeys) {
|
||||
pSortKeyExpr = ((SOrderByExprNode*)pSortKeyNode)->pExpr;
|
||||
switch (nodeType(pSortKeyExpr)) {
|
||||
case QUERY_NODE_COLUMN:
|
||||
break;
|
||||
case QUERY_NODE_VALUE:
|
||||
continue;
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (!pSortKeyExpr || ((SColumnNode*)pSortKeyExpr)->projIdx != 1 ||
|
||||
((SColumnNode*)pSortKeyExpr)->node.resType.type != TSDB_DATA_TYPE_TIMESTAMP) {
|
||||
return NULL;
|
||||
}
|
||||
return pSort;
|
||||
}
|
||||
|
||||
static bool sortNonPriKeyShouldOptimize(SLogicNode* pNode, void* pInfo) {
|
||||
SSortLogicNode* pSort = sortNonPriKeySatisfied(pNode);
|
||||
if (!pSort) return false;
|
||||
SNodeList* pSortNodeList = pInfo;
|
||||
nodesListAppend(pSortNodeList, (SNode*)pSort);
|
||||
return false;
|
||||
}
|
||||
|
||||
static int32_t sortNonPriKeyOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan) {
|
||||
SNodeList* pNodeList = nodesMakeList();
|
||||
optFindEligibleNode(pLogicSubplan->pNode, sortNonPriKeyShouldOptimize, pNodeList);
|
||||
SNode* pNode = NULL;
|
||||
FOREACH(pNode, pNodeList) {
|
||||
SSortLogicNode* pSort = (SSortLogicNode*)pNode;
|
||||
SOrderByExprNode* pOrderByExpr = (SOrderByExprNode*)nodesListGetNode(pSort->pSortKeys, 0);
|
||||
pSort->node.outputTsOrder = pOrderByExpr->order;
|
||||
optSetParentOrder(pSort->node.pParent, pOrderByExpr->order, NULL);
|
||||
}
|
||||
pCxt->optimized = false;
|
||||
nodesClearList(pNodeList);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// clang-format off
|
||||
static const SOptimizeRule optimizeRuleSet[] = {
|
||||
{.pName = "ScanPath", .optimizeFunc = scanPathOptimize},
|
||||
{.pName = "PushDownCondition", .optimizeFunc = pushDownCondOptimize},
|
||||
{.pName = "sortNonPriKeyOptimize", .optimizeFunc = sortNonPriKeyOptimize},
|
||||
{.pName = "SortPrimaryKey", .optimizeFunc = sortPrimaryKeyOptimize},
|
||||
{.pName = "SmaIndex", .optimizeFunc = smaIndexOptimize},
|
||||
{.pName = "PartitionTags", .optimizeFunc = partTagsOptimize},
|
||||
|
|
|
@ -366,6 +366,8 @@ static SPhysiNode* makePhysiNode(SPhysiPlanContext* pCxt, SLogicNode* pLogicNode
|
|||
|
||||
TSWAP(pPhysiNode->pLimit, pLogicNode->pLimit);
|
||||
TSWAP(pPhysiNode->pSlimit, pLogicNode->pSlimit);
|
||||
pPhysiNode->inputTsOrder = pLogicNode->inputTsOrder;
|
||||
pPhysiNode->outputTsOrder = pLogicNode->outputTsOrder;
|
||||
|
||||
int32_t code = createDataBlockDesc(pCxt, pLogicNode->pTargets, &pPhysiNode->pOutputDataBlockDesc);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
|
@ -676,7 +678,7 @@ static int32_t createJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren
|
|||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
pJoin->joinType = pJoinLogicNode->joinType;
|
||||
pJoin->inputTsOrder = pJoinLogicNode->inputTsOrder;
|
||||
pJoin->node.inputTsOrder = pJoinLogicNode->node.inputTsOrder;
|
||||
setNodeSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->pMergeCondition,
|
||||
&pJoin->pMergeCondition);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
|
@ -939,6 +941,11 @@ static int32_t createIndefRowsFuncPhysiNode(SPhysiPlanContext* pCxt, SNodeList*
|
|||
SNodeList* pFuncs = NULL;
|
||||
int32_t code = rewritePrecalcExprs(pCxt, pFuncLogicNode->pFuncs, &pPrecalcExprs, &pFuncs);
|
||||
|
||||
if (pIdfRowsFunc->node.inputTsOrder == 0) {
|
||||
// default to asc
|
||||
pIdfRowsFunc->node.inputTsOrder = TSDB_ORDER_ASC;
|
||||
}
|
||||
|
||||
SDataBlockDescNode* pChildTupe = (((SPhysiNode*)nodesListGetNode(pChildren, 0))->pOutputDataBlockDesc);
|
||||
// push down expression to pOutputDataBlockDesc of child node
|
||||
if (TSDB_CODE_SUCCESS == code && NULL != pPrecalcExprs) {
|
||||
|
@ -1156,9 +1163,12 @@ static int32_t createWindowPhysiNodeFinalize(SPhysiPlanContext* pCxt, SNodeList*
|
|||
pWindow->watermark = pWindowLogicNode->watermark;
|
||||
pWindow->deleteMark = pWindowLogicNode->deleteMark;
|
||||
pWindow->igExpired = pWindowLogicNode->igExpired;
|
||||
pWindow->inputTsOrder = pWindowLogicNode->inputTsOrder;
|
||||
pWindow->outputTsOrder = pWindowLogicNode->outputTsOrder;
|
||||
pWindow->mergeDataBlock = (GROUP_ACTION_KEEP == pWindowLogicNode->node.groupAction ? false : true);
|
||||
pWindow->node.inputTsOrder = pWindowLogicNode->node.inputTsOrder;
|
||||
pWindow->node.outputTsOrder = pWindowLogicNode->node.outputTsOrder;
|
||||
if (nodeType(pWindow) == QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL) {
|
||||
pWindow->node.inputTsOrder = pWindowLogicNode->node.outputTsOrder;
|
||||
}
|
||||
|
||||
SNodeList* pPrecalcExprs = NULL;
|
||||
SNodeList* pFuncs = NULL;
|
||||
|
@ -1492,7 +1502,7 @@ static int32_t createFillPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren
|
|||
|
||||
pFill->mode = pFillNode->mode;
|
||||
pFill->timeRange = pFillNode->timeRange;
|
||||
pFill->inputTsOrder = pFillNode->inputTsOrder;
|
||||
pFill->node.inputTsOrder = pFillNode->node.inputTsOrder;
|
||||
|
||||
SDataBlockDescNode* pChildTupe = (((SPhysiNode*)nodesListGetNode(pChildren, 0))->pOutputDataBlockDesc);
|
||||
int32_t code = setListSlotId(pCxt, pChildTupe->dataBlockId, -1, pFillNode->pFillExprs, &pFill->pFillExprs);
|
||||
|
|
|
@ -534,7 +534,9 @@ static int32_t stbSplGetNumOfVgroups(SLogicNode* pNode) {
|
|||
|
||||
static int32_t stbSplRewriteFromMergeNode(SMergeLogicNode* pMerge, SLogicNode* pNode) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
pMerge->node.inputTsOrder = pNode->outputTsOrder;
|
||||
pMerge->node.outputTsOrder = pNode->outputTsOrder;
|
||||
|
||||
switch (nodeType(pNode)) {
|
||||
case QUERY_NODE_LOGIC_PLAN_PROJECT: {
|
||||
SProjectLogicNode *pLogicNode = (SProjectLogicNode*)pNode;
|
||||
|
@ -631,7 +633,7 @@ static int32_t stbSplSplitIntervalForBatch(SSplitContext* pCxt, SStableSplitInfo
|
|||
((SWindowLogicNode*)pInfo->pSplitNode)->windowAlgo = INTERVAL_ALGO_MERGE;
|
||||
SNodeList* pMergeKeys = NULL;
|
||||
code = stbSplCreateMergeKeysByPrimaryKey(((SWindowLogicNode*)pInfo->pSplitNode)->pTspk,
|
||||
((SWindowLogicNode*)pInfo->pSplitNode)->outputTsOrder, &pMergeKeys);
|
||||
((SWindowLogicNode*)pInfo->pSplitNode)->node.outputTsOrder, &pMergeKeys);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = stbSplCreateMergeNode(pCxt, NULL, pInfo->pSplitNode, pMergeKeys, pPartWindow, true);
|
||||
}
|
||||
|
@ -721,7 +723,7 @@ static int32_t stbSplSplitSessionOrStateForBatch(SSplitContext* pCxt, SStableSpl
|
|||
|
||||
SNodeList* pMergeKeys = NULL;
|
||||
int32_t code = stbSplCreateMergeKeysByPrimaryKey(((SWindowLogicNode*)pWindow)->pTspk,
|
||||
((SWindowLogicNode*)pWindow)->inputTsOrder, &pMergeKeys);
|
||||
((SWindowLogicNode*)pWindow)->node.inputTsOrder, &pMergeKeys);
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = stbSplCreateMergeNode(pCxt, pInfo->pSubplan, pChild, pMergeKeys, (SLogicNode*)pChild, true);
|
||||
|
|
|
@ -765,7 +765,7 @@ int32_t schSetTaskCandidateAddrs(SSchJob *pJob, SSchTask *pTask) {
|
|||
|
||||
if (SCH_IS_DATA_BIND_TASK(pTask)) {
|
||||
SCH_TASK_ELOG("no execNode specifed for data src task, numOfEps:%d", pTask->plan->execNode.epSet.numOfEps);
|
||||
SCH_ERR_RET(TSDB_CODE_APP_ERROR);
|
||||
SCH_ERR_RET(TSDB_CODE_MND_INVALID_SCHEMA_VER);
|
||||
}
|
||||
|
||||
SCH_ERR_RET(schSetAddrsFromNodeList(pJob, pTask));
|
||||
|
|
|
@ -9,6 +9,14 @@ target_include_directories(
|
|||
|
||||
if(${BUILD_WITH_ROCKSDB})
|
||||
IF (TD_LINUX)
|
||||
target_include_directories(
|
||||
stream
|
||||
PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static"
|
||||
)
|
||||
target_link_directories(
|
||||
stream
|
||||
PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static"
|
||||
)
|
||||
target_link_libraries(
|
||||
stream
|
||||
PUBLIC rocksdb tdb
|
||||
|
|
|
@ -261,8 +261,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_STB_ALTER_OPTION, "Invalid stable alter
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_STB_OPTION_UNCHNAGED, "STable option unchanged")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_FIELD_CONFLICT_WITH_TOPIC,"Field used by topic")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_SINGLE_STB_MODE_DB, "Database is single stable mode")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_SCHEMA_VER, "Invalid schema version while alter stb")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_STABLE_UID_NOT_MATCH, "Invalid stable uid while alter stb")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_SCHEMA_VER, "Invalid schema version")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_STABLE_UID_NOT_MATCH, "Invalid stable uid")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_FIELD_CONFLICT_WITH_TSMA, "Field used by tsma")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_IN_CREATING, "Dnode in creating status")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_IN_DROPPING, "Dnode in dropping status")
|
||||
|
|
|
@ -5,423 +5,6 @@
|
|||
#unit-test
|
||||
,,y,unit-test,bash test.sh
|
||||
|
||||
#tsim test
|
||||
,,y,script,./test.sh -f tsim/user/basic.sim
|
||||
,,y,script,./test.sh -f tsim/user/password.sim
|
||||
,,y,script,./test.sh -f tsim/user/privilege_db.sim
|
||||
,,y,script,./test.sh -f tsim/user/privilege_sysinfo.sim
|
||||
,,y,script,./test.sh -f tsim/user/privilege_topic.sim
|
||||
,,y,script,./test.sh -f tsim/db/alter_option.sim
|
||||
,,y,script,./test.sh -f tsim/db/alter_replica_13.sim
|
||||
,,y,script,./test.sh -f tsim/db/alter_replica_31.sim
|
||||
,,y,script,./test.sh -f tsim/db/basic1.sim
|
||||
,,y,script,./test.sh -f tsim/db/basic2.sim
|
||||
,,y,script,./test.sh -f tsim/db/basic3.sim
|
||||
,,y,script,./test.sh -f tsim/db/basic4.sim
|
||||
,,y,script,./test.sh -f tsim/db/basic5.sim
|
||||
,,y,script,./test.sh -f tsim/db/basic6.sim
|
||||
,,y,script,./test.sh -f tsim/db/commit.sim
|
||||
,,y,script,./test.sh -f tsim/db/create_all_options.sim
|
||||
,,y,script,./test.sh -f tsim/db/delete_reuse1.sim
|
||||
,,y,script,./test.sh -f tsim/db/delete_reuse2.sim
|
||||
,,y,script,./test.sh -f tsim/db/delete_reusevnode.sim
|
||||
,,y,script,./test.sh -f tsim/db/delete_reusevnode2.sim
|
||||
,,y,script,./test.sh -f tsim/db/delete_writing1.sim
|
||||
,,y,script,./test.sh -f tsim/db/delete_writing2.sim
|
||||
,,y,script,./test.sh -f tsim/db/error1.sim
|
||||
,,y,script,./test.sh -f tsim/db/keep.sim
|
||||
,,y,script,./test.sh -f tsim/db/len.sim
|
||||
,,y,script,./test.sh -f tsim/db/repeat.sim
|
||||
,,y,script,./test.sh -f tsim/db/show_create_db.sim
|
||||
,,y,script,./test.sh -f tsim/db/show_create_table.sim
|
||||
,,y,script,./test.sh -f tsim/db/tables.sim
|
||||
,,y,script,./test.sh -f tsim/db/taosdlog.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/balance_replica1.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/balance_replica3.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/balance1.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/balance2.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/balance3.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/balancex.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/create_dnode.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_mnode.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_qnode_snode.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_vnode_replica1.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_vnode_replica3.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica1.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica3.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/drop_dnode_force.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/offline_reason.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica1.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_leader.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_follower.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v2.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v3.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/vnode_clean.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/use_dropped_dnode.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/split_vgroup_replica1.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/split_vgroup_replica3.sim
|
||||
,,y,script,./test.sh -f tsim/import/basic.sim
|
||||
,,y,script,./test.sh -f tsim/import/commit.sim
|
||||
,,y,script,./test.sh -f tsim/import/large.sim
|
||||
,,y,script,./test.sh -f tsim/import/replica1.sim
|
||||
,,y,script,./test.sh -f tsim/insert/backquote.sim
|
||||
,,y,script,./test.sh -f tsim/insert/basic.sim
|
||||
,,y,script,./test.sh -f tsim/insert/basic0.sim
|
||||
,,y,script,./test.sh -f tsim/insert/basic1.sim
|
||||
,,y,script,./test.sh -f tsim/insert/basic2.sim
|
||||
,,y,script,./test.sh -f tsim/insert/commit-merge0.sim
|
||||
,,y,script,./test.sh -f tsim/insert/insert_drop.sim
|
||||
,,y,script,./test.sh -f tsim/insert/insert_select.sim
|
||||
,,y,script,./test.sh -f tsim/insert/null.sim
|
||||
,,y,script,./test.sh -f tsim/insert/query_block1_file.sim
|
||||
,,y,script,./test.sh -f tsim/insert/query_block1_memory.sim
|
||||
,,y,script,./test.sh -f tsim/insert/query_block2_file.sim
|
||||
,,y,script,./test.sh -f tsim/insert/query_block2_memory.sim
|
||||
,,y,script,./test.sh -f tsim/insert/query_file_memory.sim
|
||||
,,y,script,./test.sh -f tsim/insert/query_multi_file.sim
|
||||
,,y,script,./test.sh -f tsim/insert/tcp.sim
|
||||
,,y,script,./test.sh -f tsim/insert/update0.sim
|
||||
,,y,script,./test.sh -f tsim/insert/update1_sort_merge.sim
|
||||
,,y,script,./test.sh -f tsim/insert/update2.sim
|
||||
,,y,script,./test.sh -f tsim/parser/alter__for_community_version.sim
|
||||
,,y,script,./test.sh -f tsim/parser/alter_column.sim
|
||||
,,y,script,./test.sh -f tsim/parser/alter_stable.sim
|
||||
,,y,script,./test.sh -f tsim/parser/alter.sim
|
||||
,,y,script,./test.sh -f tsim/parser/alter1.sim
|
||||
,,y,script,./test.sh -f tsim/parser/auto_create_tb_drop_tb.sim
|
||||
,,y,script,./test.sh -f tsim/parser/auto_create_tb.sim
|
||||
,,y,script,./test.sh -f tsim/parser/between_and.sim
|
||||
,,y,script,./test.sh -f tsim/parser/binary_escapeCharacter.sim
|
||||
,,y,script,./test.sh -f tsim/parser/col_arithmetic_operation.sim
|
||||
,,y,script,./test.sh -f tsim/parser/columnValue_bigint.sim
|
||||
,,y,script,./test.sh -f tsim/parser/columnValue_bool.sim
|
||||
,,y,script,./test.sh -f tsim/parser/columnValue_double.sim
|
||||
,,y,script,./test.sh -f tsim/parser/columnValue_float.sim
|
||||
,,y,script,./test.sh -f tsim/parser/columnValue_int.sim
|
||||
,,y,script,./test.sh -f tsim/parser/columnValue_smallint.sim
|
||||
,,y,script,./test.sh -f tsim/parser/columnValue_tinyint.sim
|
||||
,,y,script,./test.sh -f tsim/parser/columnValue_unsign.sim
|
||||
,,y,script,./test.sh -f tsim/parser/commit.sim
|
||||
,,y,script,./test.sh -f tsim/parser/condition.sim
|
||||
,,y,script,./test.sh -f tsim/parser/constCol.sim
|
||||
,,y,script,./test.sh -f tsim/parser/create_db.sim
|
||||
,,y,script,./test.sh -f tsim/parser/create_mt.sim
|
||||
,,y,script,./test.sh -f tsim/parser/create_tb_with_tag_name.sim
|
||||
,,y,script,./test.sh -f tsim/parser/create_tb.sim
|
||||
,,y,script,./test.sh -f tsim/parser/dbtbnameValidate.sim
|
||||
,,y,script,./test.sh -f tsim/parser/distinct.sim
|
||||
,,y,script,./test.sh -f tsim/parser/fill_us.sim
|
||||
,,y,script,./test.sh -f tsim/parser/fill.sim
|
||||
,,y,script,./test.sh -f tsim/parser/first_last.sim
|
||||
,,y,script,./test.sh -f tsim/parser/fill_stb.sim
|
||||
,,y,script,./test.sh -f tsim/parser/interp.sim
|
||||
,,y,script,./test.sh -f tsim/parser/fourArithmetic-basic.sim
|
||||
,,y,script,./test.sh -f tsim/parser/function.sim
|
||||
,,y,script,./test.sh -f tsim/parser/groupby-basic.sim
|
||||
,,y,script,./test.sh -f tsim/parser/groupby.sim
|
||||
,,y,script,./test.sh -f tsim/parser/having_child.sim
|
||||
,,y,script,./test.sh -f tsim/parser/having.sim
|
||||
,,y,script,./test.sh -f tsim/parser/import_commit1.sim
|
||||
,,y,script,./test.sh -f tsim/parser/import_commit2.sim
|
||||
,,y,script,./test.sh -f tsim/parser/import_commit3.sim
|
||||
,,y,script,./test.sh -f tsim/parser/import_file.sim
|
||||
,,y,script,./test.sh -f tsim/parser/import.sim
|
||||
,,y,script,./test.sh -f tsim/parser/insert_multiTbl.sim
|
||||
,,y,script,./test.sh -f tsim/parser/insert_tb.sim
|
||||
,,y,script,./test.sh -f tsim/parser/join_manyblocks.sim
|
||||
,,y,script,./test.sh -f tsim/parser/join_multitables.sim
|
||||
,,y,script,./test.sh -f tsim/parser/join_multivnode.sim
|
||||
,,y,script,./test.sh -f tsim/parser/join.sim
|
||||
,,y,script,./test.sh -f tsim/parser/last_cache.sim
|
||||
,,y,script,./test.sh -f tsim/parser/last_groupby.sim
|
||||
,,y,script,./test.sh -f tsim/parser/lastrow.sim
|
||||
,,y,script,./test.sh -f tsim/parser/lastrow2.sim
|
||||
,,y,script,./test.sh -f tsim/parser/like.sim
|
||||
,,y,script,./test.sh -f tsim/parser/limit.sim
|
||||
,,y,script,./test.sh -f tsim/parser/limit1.sim
|
||||
,,y,script,./test.sh -f tsim/parser/mixed_blocks.sim
|
||||
,,y,script,./test.sh -f tsim/parser/nchar.sim
|
||||
,,y,script,./test.sh -f tsim/parser/nestquery.sim
|
||||
,,y,script,./test.sh -f tsim/parser/null_char.sim
|
||||
,,y,script,./test.sh -f tsim/parser/precision_ns.sim
|
||||
,,y,script,./test.sh -f tsim/parser/projection_limit_offset.sim
|
||||
,,y,script,./test.sh -f tsim/parser/regex.sim
|
||||
,,y,script,./test.sh -f tsim/parser/regressiontest.sim
|
||||
,,y,script,./test.sh -f tsim/parser/select_across_vnodes.sim
|
||||
,,y,script,./test.sh -f tsim/parser/select_distinct_tag.sim
|
||||
,,y,script,./test.sh -f tsim/parser/select_from_cache_disk.sim
|
||||
,,y,script,./test.sh -f tsim/parser/select_with_tags.sim
|
||||
,,y,script,./test.sh -f tsim/parser/selectResNum.sim
|
||||
,,y,script,./test.sh -f tsim/parser/set_tag_vals.sim
|
||||
,,y,script,./test.sh -f tsim/parser/single_row_in_tb.sim
|
||||
,,y,script,./test.sh -f tsim/parser/sliding.sim
|
||||
,,y,script,./test.sh -f tsim/parser/slimit_alter_tags.sim
|
||||
,,y,script,./test.sh -f tsim/parser/slimit.sim
|
||||
,,y,script,./test.sh -f tsim/parser/slimit1.sim
|
||||
,,y,script,./test.sh -f tsim/parser/stableOp.sim
|
||||
,,y,script,./test.sh -f tsim/parser/tags_dynamically_specifiy.sim
|
||||
,,y,script,./test.sh -f tsim/parser/tags_filter.sim
|
||||
,,y,script,./test.sh -f tsim/parser/tbnameIn.sim
|
||||
,,y,script,./test.sh -f tsim/parser/timestamp.sim
|
||||
,,y,script,./test.sh -f tsim/parser/top_groupby.sim
|
||||
,,y,script,./test.sh -f tsim/parser/topbot.sim
|
||||
,,y,script,./test.sh -f tsim/parser/union.sim
|
||||
,,y,script,./test.sh -f tsim/parser/union_sysinfo.sim
|
||||
,,y,script,./test.sh -f tsim/parser/where.sim
|
||||
,,y,script,./test.sh -f tsim/query/tagLikeFilter.sim
|
||||
,,y,script,./test.sh -f tsim/query/charScalarFunction.sim
|
||||
,,y,script,./test.sh -f tsim/query/explain.sim
|
||||
,,y,script,./test.sh -f tsim/query/interval-offset.sim
|
||||
,,y,script,./test.sh -f tsim/query/interval.sim
|
||||
,,y,script,./test.sh -f tsim/query/scalarFunction.sim
|
||||
,,y,script,./test.sh -f tsim/query/scalarNull.sim
|
||||
,,y,script,./test.sh -f tsim/query/session.sim
|
||||
,,y,script,./test.sh -f tsim/query/udf.sim
|
||||
,,y,script,./test.sh -f tsim/query/udf_with_const.sim
|
||||
,,y,script,./test.sh -f tsim/query/sys_tbname.sim
|
||||
,,y,script,./test.sh -f tsim/query/groupby.sim
|
||||
,,y,script,./test.sh -f tsim/query/event.sim
|
||||
,,y,script,./test.sh -f tsim/query/forceFill.sim
|
||||
,,y,script,./test.sh -f tsim/query/emptyTsRange.sim
|
||||
,,y,script,./test.sh -f tsim/query/partitionby.sim
|
||||
,,y,script,./test.sh -f tsim/qnode/basic1.sim
|
||||
,,y,script,./test.sh -f tsim/snode/basic1.sim
|
||||
,,y,script,./test.sh -f tsim/mnode/basic1.sim
|
||||
,,y,script,./test.sh -f tsim/mnode/basic2.sim
|
||||
,,y,script,./test.sh -f tsim/mnode/basic3.sim
|
||||
,,y,script,./test.sh -f tsim/mnode/basic4.sim
|
||||
,,y,script,./test.sh -f tsim/mnode/basic5.sim
|
||||
,,y,script,./test.sh -f tsim/show/basic.sim
|
||||
,,y,script,./test.sh -f tsim/table/autocreate.sim
|
||||
,,y,script,./test.sh -f tsim/table/basic1.sim
|
||||
,,y,script,./test.sh -f tsim/table/basic2.sim
|
||||
,,y,script,./test.sh -f tsim/table/basic3.sim
|
||||
,,y,script,./test.sh -f tsim/table/bigint.sim
|
||||
,,y,script,./test.sh -f tsim/table/binary.sim
|
||||
,,y,script,./test.sh -f tsim/table/bool.sim
|
||||
,,y,script,./test.sh -f tsim/table/column_name.sim
|
||||
,,y,script,./test.sh -f tsim/table/column_num.sim
|
||||
,,y,script,./test.sh -f tsim/table/column_value.sim
|
||||
,,y,script,./test.sh -f tsim/table/column2.sim
|
||||
,,y,script,./test.sh -f tsim/table/createmulti.sim
|
||||
,,y,script,./test.sh -f tsim/table/date.sim
|
||||
,,y,script,./test.sh -f tsim/table/db.table.sim
|
||||
,,y,script,./test.sh -f tsim/table/delete_reuse1.sim
|
||||
,,y,script,./test.sh -f tsim/table/delete_reuse2.sim
|
||||
,,y,script,./test.sh -f tsim/table/delete_writing.sim
|
||||
,,y,script,./test.sh -f tsim/table/describe.sim
|
||||
,,y,script,./test.sh -f tsim/table/double.sim
|
||||
,,y,script,./test.sh -f tsim/table/float.sim
|
||||
,,y,script,./test.sh -f tsim/table/hash.sim
|
||||
,,y,script,./test.sh -f tsim/table/int.sim
|
||||
,,y,script,./test.sh -f tsim/table/limit.sim
|
||||
,,y,script,./test.sh -f tsim/table/smallint.sim
|
||||
,,y,script,./test.sh -f tsim/table/table_len.sim
|
||||
,,y,script,./test.sh -f tsim/table/table.sim
|
||||
,,y,script,./test.sh -f tsim/table/tinyint.sim
|
||||
,,y,script,./test.sh -f tsim/table/vgroup.sim
|
||||
,,n,script,./test.sh -f tsim/stream/basic0.sim -g
|
||||
,,y,script,./test.sh -f tsim/stream/basic1.sim
|
||||
,,y,script,./test.sh -f tsim/stream/basic2.sim
|
||||
,,y,script,./test.sh -f tsim/stream/basic3.sim
|
||||
,,y,script,./test.sh -f tsim/stream/basic4.sim
|
||||
,,y,script,./test.sh -f tsim/stream/checkStreamSTable1.sim
|
||||
,,y,script,./test.sh -f tsim/stream/checkStreamSTable.sim
|
||||
,,y,script,./test.sh -f tsim/stream/deleteInterval.sim
|
||||
,,y,script,./test.sh -f tsim/stream/deleteSession.sim
|
||||
,,y,script,./test.sh -f tsim/stream/deleteState.sim
|
||||
,,y,script,./test.sh -f tsim/stream/distributeInterval0.sim
|
||||
,,y,script,./test.sh -f tsim/stream/distributeIntervalRetrive0.sim
|
||||
,,y,script,./test.sh -f tsim/stream/distributeSession0.sim
|
||||
,,y,script,./test.sh -f tsim/stream/drop_stream.sim
|
||||
,,y,script,./test.sh -f tsim/stream/fillHistoryBasic1.sim
|
||||
,,y,script,./test.sh -f tsim/stream/fillHistoryBasic2.sim
|
||||
,,y,script,./test.sh -f tsim/stream/fillHistoryBasic3.sim
|
||||
,,y,script,./test.sh -f tsim/stream/fillIntervalDelete0.sim
|
||||
,,y,script,./test.sh -f tsim/stream/fillIntervalDelete1.sim
|
||||
,,y,script,./test.sh -f tsim/stream/fillIntervalLinear.sim
|
||||
,,y,script,./test.sh -f tsim/stream/fillIntervalPartitionBy.sim
|
||||
,,y,script,./test.sh -f tsim/stream/fillIntervalPrevNext1.sim
|
||||
,,y,script,./test.sh -f tsim/stream/fillIntervalPrevNext.sim
|
||||
,,y,script,./test.sh -f tsim/stream/fillIntervalRange.sim
|
||||
,,y,script,./test.sh -f tsim/stream/fillIntervalValue.sim
|
||||
,,y,script,./test.sh -f tsim/stream/ignoreCheckUpdate.sim
|
||||
,,y,script,./test.sh -f tsim/stream/ignoreExpiredData.sim
|
||||
,,y,script,./test.sh -f tsim/stream/partitionby1.sim
|
||||
,,y,script,./test.sh -f tsim/stream/partitionbyColumnInterval.sim
|
||||
,,y,script,./test.sh -f tsim/stream/partitionbyColumnSession.sim
|
||||
,,y,script,./test.sh -f tsim/stream/partitionbyColumnState.sim
|
||||
,,y,script,./test.sh -f tsim/stream/partitionby.sim
|
||||
,,y,script,./test.sh -f tsim/stream/pauseAndResume.sim
|
||||
,,y,script,./test.sh -f tsim/stream/schedSnode.sim
|
||||
,,y,script,./test.sh -f tsim/stream/session0.sim
|
||||
,,y,script,./test.sh -f tsim/stream/session1.sim
|
||||
,,y,script,./test.sh -f tsim/stream/sliding.sim
|
||||
,,y,script,./test.sh -f tsim/stream/state0.sim
|
||||
,,y,script,./test.sh -f tsim/stream/state1.sim
|
||||
,,y,script,./test.sh -f tsim/stream/triggerInterval0.sim
|
||||
,,y,script,./test.sh -f tsim/stream/triggerSession0.sim
|
||||
,,y,script,./test.sh -f tsim/stream/udTableAndTag0.sim
|
||||
,,y,script,./test.sh -f tsim/stream/udTableAndTag1.sim
|
||||
,,y,script,./test.sh -f tsim/stream/udTableAndTag2.sim
|
||||
,,y,script,./test.sh -f tsim/stream/windowClose.sim
|
||||
,,y,script,./test.sh -f tsim/trans/lossdata1.sim
|
||||
,,y,script,./test.sh -f tsim/trans/create_db.sim
|
||||
,,y,script,./test.sh -f tsim/tmq/basic1.sim
|
||||
,,y,script,./test.sh -f tsim/tmq/basic2.sim
|
||||
,,y,script,./test.sh -f tsim/tmq/basic3.sim
|
||||
,,y,script,./test.sh -f tsim/tmq/basic4.sim
|
||||
,,y,script,./test.sh -f tsim/tmq/basic1Of2Cons.sim
|
||||
,,y,script,./test.sh -f tsim/tmq/basic2Of2Cons.sim
|
||||
,,y,script,./test.sh -f tsim/tmq/basic3Of2Cons.sim
|
||||
,,y,script,./test.sh -f tsim/tmq/basic4Of2Cons.sim
|
||||
,,y,script,./test.sh -f tsim/tmq/basic2Of2ConsOverlap.sim
|
||||
,,y,script,./test.sh -f tsim/tmq/topic.sim
|
||||
,,y,script,./test.sh -f tsim/tmq/snapshot.sim
|
||||
,,y,script,./test.sh -f tsim/tmq/snapshot1.sim
|
||||
,,y,script,./test.sh -f tsim/stable/alter_comment.sim
|
||||
,,y,script,./test.sh -f tsim/stable/alter_count.sim
|
||||
,,y,script,./test.sh -f tsim/stable/alter_import.sim
|
||||
,,y,script,./test.sh -f tsim/stable/alter_insert1.sim
|
||||
,,y,script,./test.sh -f tsim/stable/alter_insert2.sim
|
||||
,,y,script,./test.sh -f tsim/stable/alter_metrics.sim
|
||||
,,y,script,./test.sh -f tsim/stable/column_add.sim
|
||||
,,y,script,./test.sh -f tsim/stable/column_drop.sim
|
||||
,,y,script,./test.sh -f tsim/stable/column_modify.sim
|
||||
,,y,script,./test.sh -f tsim/stable/disk.sim
|
||||
,,y,script,./test.sh -f tsim/stable/dnode3.sim
|
||||
,,y,script,./test.sh -f tsim/stable/metrics.sim
|
||||
,,y,script,./test.sh -f tsim/stable/refcount.sim
|
||||
,,y,script,./test.sh -f tsim/stable/tag_add.sim
|
||||
,,y,script,./test.sh -f tsim/stable/tag_drop.sim
|
||||
,,y,script,./test.sh -f tsim/stable/tag_filter.sim
|
||||
,,y,script,./test.sh -f tsim/stable/tag_modify.sim
|
||||
,,y,script,./test.sh -f tsim/stable/tag_rename.sim
|
||||
,,y,script,./test.sh -f tsim/stable/values.sim
|
||||
,,y,script,./test.sh -f tsim/stable/vnode3.sim
|
||||
,,y,script,./test.sh -f tsim/stable/metrics_idx.sim
|
||||
,,n,script,./test.sh -f tsim/sma/drop_sma.sim
|
||||
,,y,script,./test.sh -f tsim/sma/sma_leak.sim
|
||||
,,y,script,./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim
|
||||
,,y,script,./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim
|
||||
,,y,script,./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim
|
||||
,,n,script,./test.sh -f tsim/valgrind/checkError1.sim
|
||||
,,n,script,./test.sh -f tsim/valgrind/checkError2.sim
|
||||
,,n,script,./test.sh -f tsim/valgrind/checkError3.sim
|
||||
,,n,script,./test.sh -f tsim/valgrind/checkError4.sim
|
||||
,,n,script,./test.sh -f tsim/valgrind/checkError5.sim
|
||||
,,n,script,./test.sh -f tsim/valgrind/checkError6.sim
|
||||
,,n,script,./test.sh -f tsim/valgrind/checkError7.sim
|
||||
,,n,script,./test.sh -f tsim/valgrind/checkError8.sim
|
||||
,,n,script,./test.sh -f tsim/valgrind/checkUdf.sim
|
||||
,,y,script,./test.sh -f tsim/vnode/replica3_basic.sim
|
||||
,,y,script,./test.sh -f tsim/vnode/replica3_repeat.sim
|
||||
,,y,script,./test.sh -f tsim/vnode/replica3_vgroup.sim
|
||||
,,y,script,./test.sh -f tsim/vnode/replica3_many.sim
|
||||
,,y,script,./test.sh -f tsim/vnode/replica3_import.sim
|
||||
,,y,script,./test.sh -f tsim/vnode/stable_balance_replica1.sim
|
||||
,,y,script,./test.sh -f tsim/vnode/stable_dnode2_stop.sim
|
||||
,,y,script,./test.sh -f tsim/vnode/stable_dnode2.sim
|
||||
,,y,script,./test.sh -f tsim/vnode/stable_dnode3.sim
|
||||
,,y,script,./test.sh -f tsim/vnode/stable_replica3_dnode6.sim
|
||||
,,y,script,./test.sh -f tsim/vnode/stable_replica3_vnode3.sim
|
||||
,,y,script,./test.sh -f tsim/sync/3Replica1VgElect.sim
|
||||
,,y,script,./test.sh -f tsim/sync/3Replica5VgElect.sim
|
||||
,,y,script,./test.sh -f tsim/sync/oneReplica1VgElect.sim
|
||||
,,y,script,./test.sh -f tsim/sync/oneReplica5VgElect.sim
|
||||
,,y,script,./test.sh -f tsim/catalog/alterInCurrent.sim
|
||||
,,y,script,./test.sh -f tsim/scalar/in.sim
|
||||
,,y,script,./test.sh -f tsim/scalar/scalar.sim
|
||||
,,y,script,./test.sh -f tsim/scalar/filter.sim
|
||||
,,y,script,./test.sh -f tsim/scalar/caseWhen.sim
|
||||
,,y,script,./test.sh -f tsim/scalar/tsConvert.sim
|
||||
,,y,script,./test.sh -f tsim/alter/cached_schema_after_alter.sim
|
||||
,,y,script,./test.sh -f tsim/alter/dnode.sim
|
||||
,,y,script,./test.sh -f tsim/alter/table.sim
|
||||
,,y,script,./test.sh -f tsim/cache/new_metrics.sim
|
||||
,,y,script,./test.sh -f tsim/cache/restart_table.sim
|
||||
,,y,script,./test.sh -f tsim/cache/restart_metrics.sim
|
||||
,,y,script,./test.sh -f tsim/column/commit.sim
|
||||
,,y,script,./test.sh -f tsim/column/metrics.sim
|
||||
,,y,script,./test.sh -f tsim/column/table.sim
|
||||
,,y,script,./test.sh -f tsim/compress/commitlog.sim
|
||||
,,y,script,./test.sh -f tsim/compress/compress2.sim
|
||||
,,y,script,./test.sh -f tsim/compress/compress.sim
|
||||
,,y,script,./test.sh -f tsim/compress/uncompress.sim
|
||||
,,y,script,./test.sh -f tsim/compute/avg.sim
|
||||
,,y,script,./test.sh -f tsim/compute/block_dist.sim
|
||||
,,y,script,./test.sh -f tsim/compute/bottom.sim
|
||||
,,y,script,./test.sh -f tsim/compute/count.sim
|
||||
,,y,script,./test.sh -f tsim/compute/diff.sim
|
||||
,,y,script,./test.sh -f tsim/compute/diff2.sim
|
||||
,,y,script,./test.sh -f tsim/compute/first.sim
|
||||
,,y,script,./test.sh -f tsim/compute/interval.sim
|
||||
,,y,script,./test.sh -f tsim/compute/last_row.sim
|
||||
,,y,script,./test.sh -f tsim/compute/last.sim
|
||||
,,y,script,./test.sh -f tsim/compute/leastsquare.sim
|
||||
,,y,script,./test.sh -f tsim/compute/max.sim
|
||||
,,y,script,./test.sh -f tsim/compute/min.sim
|
||||
,,y,script,./test.sh -f tsim/compute/null.sim
|
||||
,,y,script,./test.sh -f tsim/compute/percentile.sim
|
||||
,,y,script,./test.sh -f tsim/compute/stddev.sim
|
||||
,,y,script,./test.sh -f tsim/compute/sum.sim
|
||||
,,y,script,./test.sh -f tsim/compute/top.sim
|
||||
,,y,script,./test.sh -f tsim/field/2.sim
|
||||
,,y,script,./test.sh -f tsim/field/3.sim
|
||||
,,y,script,./test.sh -f tsim/field/4.sim
|
||||
,,y,script,./test.sh -f tsim/field/5.sim
|
||||
,,y,script,./test.sh -f tsim/field/6.sim
|
||||
,,y,script,./test.sh -f tsim/field/binary.sim
|
||||
,,y,script,./test.sh -f tsim/field/bigint.sim
|
||||
,,y,script,./test.sh -f tsim/field/bool.sim
|
||||
,,y,script,./test.sh -f tsim/field/double.sim
|
||||
,,y,script,./test.sh -f tsim/field/float.sim
|
||||
,,y,script,./test.sh -f tsim/field/int.sim
|
||||
,,y,script,./test.sh -f tsim/field/single.sim
|
||||
,,y,script,./test.sh -f tsim/field/smallint.sim
|
||||
,,y,script,./test.sh -f tsim/field/tinyint.sim
|
||||
,,y,script,./test.sh -f tsim/field/unsigined_bigint.sim
|
||||
,,y,script,./test.sh -f tsim/vector/metrics_field.sim
|
||||
,,y,script,./test.sh -f tsim/vector/metrics_mix.sim
|
||||
,,y,script,./test.sh -f tsim/vector/metrics_query.sim
|
||||
,,y,script,./test.sh -f tsim/vector/metrics_tag.sim
|
||||
,,y,script,./test.sh -f tsim/vector/metrics_time.sim
|
||||
,,y,script,./test.sh -f tsim/vector/multi.sim
|
||||
,,y,script,./test.sh -f tsim/vector/single.sim
|
||||
,,y,script,./test.sh -f tsim/vector/table_field.sim
|
||||
,,y,script,./test.sh -f tsim/vector/table_mix.sim
|
||||
,,y,script,./test.sh -f tsim/vector/table_query.sim
|
||||
,,y,script,./test.sh -f tsim/vector/table_time.sim
|
||||
,,y,script,./test.sh -f tsim/wal/kill.sim
|
||||
,,y,script,./test.sh -f tsim/tag/3.sim
|
||||
,,y,script,./test.sh -f tsim/tag/4.sim
|
||||
,,y,script,./test.sh -f tsim/tag/5.sim
|
||||
,,y,script,./test.sh -f tsim/tag/6.sim
|
||||
,,y,script,./test.sh -f tsim/tag/add.sim
|
||||
,,y,script,./test.sh -f tsim/tag/bigint.sim
|
||||
,,y,script,./test.sh -f tsim/tag/binary_binary.sim
|
||||
,,y,script,./test.sh -f tsim/tag/binary.sim
|
||||
,,y,script,./test.sh -f tsim/tag/bool_binary.sim
|
||||
,,y,script,./test.sh -f tsim/tag/bool_int.sim
|
||||
,,y,script,./test.sh -f tsim/tag/bool.sim
|
||||
,,y,script,./test.sh -f tsim/tag/change.sim
|
||||
,,y,script,./test.sh -f tsim/tag/column.sim
|
||||
,,y,script,./test.sh -f tsim/tag/commit.sim
|
||||
,,y,script,./test.sh -f tsim/tag/create.sim
|
||||
,,y,script,./test.sh -f tsim/tag/delete.sim
|
||||
,,y,script,./test.sh -f tsim/tag/double.sim
|
||||
,,y,script,./test.sh -f tsim/tag/filter.sim
|
||||
,,y,script,./test.sh -f tsim/tag/float.sim
|
||||
,,y,script,./test.sh -f tsim/tag/int_binary.sim
|
||||
,,y,script,./test.sh -f tsim/tag/int_float.sim
|
||||
,,y,script,./test.sh -f tsim/tag/int.sim
|
||||
,,y,script,./test.sh -f tsim/tag/set.sim
|
||||
,,y,script,./test.sh -f tsim/tag/smallint.sim
|
||||
,,y,script,./test.sh -f tsim/tag/tinyint.sim
|
||||
,,y,script,./test.sh -f tsim/tag/drop_tag.sim
|
||||
,,y,script,./test.sh -f tsim/tag/tbNameIn.sim
|
||||
,,y,script,./test.sh -f tmp/monitor.sim
|
||||
|
||||
#system test
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_str.py
|
||||
|
@ -569,6 +152,7 @@
|
|||
,,n,system-test,python3 ./test.py -f 0-others/tag_index_basic.py
|
||||
,,n,system-test,python3 ./test.py -f 0-others/udfpy_main.py
|
||||
,,n,system-test,python3 ./test.py -N 3 -f 0-others/walRetention.py
|
||||
,,n,system-test,python3 ./test.py -f 0-others/splitVGroup.py -N 5
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/alter_database.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/alter_replica.py -N 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/influxdb_line_taosc_insert.py
|
||||
|
@ -1598,6 +1182,7 @@
|
|||
,,y,script,./test.sh -f tsim/tag/drop_tag.sim
|
||||
,,y,script,./test.sh -f tsim/tag/tbNameIn.sim
|
||||
,,y,script,./test.sh -f tmp/monitor.sim
|
||||
|
||||
#develop test
|
||||
,,n,develop-test,python3 ./test.py -f 2-query/table_count_scan.py
|
||||
,,n,develop-test,python3 ./test.py -f 2-query/show_create_db.py
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sql connect
|
||||
|
||||
sql create database test
|
||||
sql use test
|
||||
sql CREATE STABLE `meters` (`ts` TIMESTAMP, `c2` INT) TAGS (`cc` VARCHAR(3))
|
||||
|
||||
|
||||
sql insert into d1 using meters tags("MY") values("2022-05-15 00:01:08.000 ",234)
|
||||
sql insert into d1 using meters tags("MY") values("2022-05-16 00:01:08.000 ",136)
|
||||
sql insert into d1 using meters tags("MY") values("2022-05-17 00:01:08.000 ", 59)
|
||||
sql insert into d1 using meters tags("MY") values("2022-05-18 00:01:08.000 ", 58)
|
||||
sql insert into d1 using meters tags("MY") values("2022-05-19 00:01:08.000 ",243)
|
||||
sql insert into d1 using meters tags("MY") values("2022-05-20 00:01:08.000 ",120)
|
||||
sql insert into d1 using meters tags("MY") values("2022-05-21 00:01:08.000 ", 11)
|
||||
sql insert into d1 using meters tags("MY") values("2022-05-22 00:01:08.000 ",196)
|
||||
sql insert into d1 using meters tags("MY") values("2022-05-23 00:01:08.000 ",116)
|
||||
sql insert into d1 using meters tags("MY") values("2022-05-24 00:01:08.000 ",210)
|
||||
|
||||
sql insert into d2 using meters tags("HT") values("2022-05-15 00:01:08.000", 234)
|
||||
sql insert into d2 using meters tags("HT") values("2022-05-16 00:01:08.000", 136)
|
||||
sql insert into d2 using meters tags("HT") values("2022-05-17 00:01:08.000", 59)
|
||||
sql insert into d2 using meters tags("HT") values("2022-05-18 00:01:08.000", 58)
|
||||
sql insert into d2 using meters tags("HT") values("2022-05-19 00:01:08.000", 243)
|
||||
sql insert into d2 using meters tags("HT") values("2022-05-20 00:01:08.000", 120)
|
||||
sql insert into d2 using meters tags("HT") values("2022-05-21 00:01:08.000", 11)
|
||||
sql insert into d2 using meters tags("HT") values("2022-05-22 00:01:08.000", 196)
|
||||
sql insert into d2 using meters tags("HT") values("2022-05-23 00:01:08.000", 116)
|
||||
sql insert into d2 using meters tags("HT") values("2022-05-24 00:01:08.000", 210)
|
||||
|
||||
#sleep 10000000
|
||||
system taos -P7100 -s 'source tsim/query/t/explain_tsorder.sql' | grep -v 'Query OK' | grep -v 'Client Version' > /tmp/explain_tsorder.result
|
||||
system echo ----------------------diff start-----------------------
|
||||
system git diff --exit-code --color tsim/query/r/explain_tsorder.result /tmp/explain_tsorder.result
|
||||
system echo ----------------------diff succeed-----------------------
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,73 @@
|
|||
use test;
|
||||
explain verbose true select _wstart, last(ts), avg(c2) from meters interval(10s) order by _wstart desc\G;
|
||||
explain verbose true select _wstart, last(ts), avg(c2) from meters interval(10s) order by _wstart asc\G;
|
||||
explain verbose true select _wstart, first(ts), avg(c2) from meters interval(10s) order by _wstart asc\G;
|
||||
explain verbose true select _wstart, first(ts), avg(c2) from meters interval(10s) order by _wstart desc\G;
|
||||
explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s)) order by d\G;
|
||||
explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s)) order by d desc\G;
|
||||
explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a) order by d\G;
|
||||
explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) order by d\G;
|
||||
explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a) order by d desc\G;
|
||||
explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) order by d desc\G;
|
||||
|
||||
|
||||
explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) order by d\G;
|
||||
explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) order by d\G;
|
||||
explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) order by d desc\G;
|
||||
explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) order by d desc\G;
|
||||
|
||||
explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) group by c order by d\G;
|
||||
explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) group by c order by d\G;
|
||||
explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) group by c order by d desc\G;
|
||||
explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) group by c order by d desc\G;
|
||||
|
||||
|
||||
explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) where a > 10000 and a < 20000 interval(10s) fill(NULL) order by d\G;
|
||||
explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > 10000 and a < 20000 interval(10s) fill(NULL) order by d\G;
|
||||
explain verbose true select last(b) as d from (select last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) where b > 10000 and b < 20000 interval(10s) fill(NULL) order by d\G;
|
||||
explain verbose true select last(b) as d from (select last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) where b > 10000 and b < 20000 interval(10s) fill(NULL) order by d desc\G;
|
||||
|
||||
|
||||
select _wstart, last(ts), avg(c2) from meters interval(10s) order by _wstart desc;
|
||||
select _wstart, last(ts), avg(c2) from meters interval(10s) order by _wstart asc;
|
||||
select _wstart, first(ts), avg(c2) from meters interval(10s) order by _wstart asc;
|
||||
select _wstart, first(ts), avg(c2) from meters interval(10s) order by _wstart desc;
|
||||
select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s)) order by d;
|
||||
select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s)) order by d desc;
|
||||
select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a) order by d;
|
||||
select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) order by d;
|
||||
select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a) order by d desc;
|
||||
select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) order by d desc;
|
||||
|
||||
|
||||
select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) order by d;
|
||||
select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) order by d;
|
||||
select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) order by d desc;
|
||||
select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) order by d desc;
|
||||
|
||||
select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) group by c order by d
|
||||
select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) group by c order by d;
|
||||
select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) group by c order by d desc;
|
||||
select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) group by c order by d desc;
|
||||
|
||||
select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-19 00:01:08.000' interval(10s) order by d;
|
||||
select last(b) as d from (select last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) where b > '2022-05-15 00:01:00.000' and b < '2022-05-19 00:01:08.000' interval(10s) order by d;
|
||||
select last(b) as d from (select last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) where b > '2022-05-15 00:01:00.000' and b < '2022-05-19 00:01:08.000' interval(10s) order by d desc;
|
||||
select _wstart, first(a) as d, avg(c) from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-21 00:01:08.000' interval(5h) fill(linear) order by d desc;
|
||||
|
||||
explain verbose true select _wstart, first(a) as d, avg(c) from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-21 00:01:08.000' interval(5h) fill(linear) order by d desc\G;
|
||||
explain verbose true select _wstart, first(a) as d, avg(c) from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a asc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-21 00:01:08.000' interval(5h) fill(linear) order by d desc\G;
|
||||
|
||||
explain verbose true select * from (select ts as a, c2 as b from meters order by c2 desc)\G;
|
||||
select * from (select ts as a, c2 as b from meters order by c2 desc);
|
||||
|
||||
explain verbose true select * from (select ts as a, c2 as b from meters order by c2 desc) order by a desc\G;
|
||||
select * from (select ts as a, c2 as b from meters order by c2 desc) order by a desc;
|
||||
|
||||
explain verbose true select a.ts, a.c2, b.c2 from meters as a join meters as b on a.ts = b.ts\G;
|
||||
explain verbose true select a.ts, a.c2, b.c2 from meters as a join meters as b on a.ts = b.ts order by a.ts\G;
|
||||
select a.ts, a.c2, b.c2 from meters as a join meters as b on a.ts = b.ts;
|
||||
select a.ts, a.c2, b.c2 from meters as a join meters as b on a.ts = b.ts order by a.ts desc;
|
||||
explain verbose true select a.ts, a.c2, b.c2 from meters as a join (select ts, c2 from meters order by ts desc) b on a.ts = b.ts order by a.ts desc\G;
|
||||
explain verbose true select a.ts, a.c2, b.c2 from meters as a join (select ts, c2 from meters order by ts desc) b on a.ts = b.ts order by a.ts asc\G;
|
||||
select a.ts, a.c2, b.c2 from meters as a join (select * from meters order by ts desc) b on a.ts = b.ts order by a.ts asc;
|
|
@ -414,6 +414,21 @@ if $data05 != 30.000000000 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
sql delete from stb;
|
||||
|
||||
print =============== query after delete in common vgroups
|
||||
sql select _wstart, _wend, min(c1),max(c2),max(c1),max(c3) from stb interval(5m,10s) sliding(5m) order by _wstart;
|
||||
if $rows != 0 then
|
||||
print rows $rows != 0
|
||||
return -1
|
||||
endi
|
||||
|
||||
sleep 2000
|
||||
print =============== query after delete in designated vgroups
|
||||
sql select _wend, min(c1),max(c2),max(c1) from stb interval(5m,10s) sliding(5m) order by _wstart;
|
||||
if $rows != 0 then
|
||||
print rows $rows != 0
|
||||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -0,0 +1,377 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import random
|
||||
import time
|
||||
import copy
|
||||
import string
|
||||
|
||||
import taos
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
# random string
|
||||
def random_string(self, count):
|
||||
letters = string.ascii_letters
|
||||
return ''.join(random.choice(letters) for i in range(count))
|
||||
|
||||
# get col value and total max min ...
|
||||
def getColsValue(self, i, j):
|
||||
# c1 value
|
||||
if random.randint(1, 10) == 5:
|
||||
c1 = None
|
||||
else:
|
||||
c1 = 1
|
||||
|
||||
# c2 value
|
||||
if j % 3200 == 0:
|
||||
c2 = 8764231
|
||||
elif random.randint(1, 10) == 5:
|
||||
c2 = None
|
||||
else:
|
||||
c2 = random.randint(-87654297, 98765321)
|
||||
|
||||
|
||||
value = f"({self.ts}, "
|
||||
|
||||
# c1
|
||||
if c1 is None:
|
||||
value += "null,"
|
||||
else:
|
||||
self.c1Cnt += 1
|
||||
value += f"{c1},"
|
||||
# c2
|
||||
if c2 is None:
|
||||
value += "null,"
|
||||
else:
|
||||
value += f"{c2},"
|
||||
# total count
|
||||
self.c2Cnt += 1
|
||||
# max
|
||||
if self.c2Max is None:
|
||||
self.c2Max = c2
|
||||
else:
|
||||
if c2 > self.c2Max:
|
||||
self.c2Max = c2
|
||||
# min
|
||||
if self.c2Min is None:
|
||||
self.c2Min = c2
|
||||
else:
|
||||
if c2 < self.c2Min:
|
||||
self.c2Min = c2
|
||||
# sum
|
||||
if self.c2Sum is None:
|
||||
self.c2Sum = c2
|
||||
else:
|
||||
self.c2Sum += c2
|
||||
|
||||
# c3 same with ts
|
||||
value += f"{self.ts})"
|
||||
|
||||
# move next
|
||||
self.ts += 1
|
||||
|
||||
return value
|
||||
|
||||
# insert data
|
||||
def insertData(self):
|
||||
tdLog.info("insert data ....")
|
||||
sqls = ""
|
||||
for i in range(self.childCnt):
|
||||
# insert child table
|
||||
values = ""
|
||||
pre_insert = f"insert into @db_name.t{i} values "
|
||||
for j in range(self.childRow):
|
||||
if values == "":
|
||||
values = self.getColsValue(i, j)
|
||||
else:
|
||||
values += "," + self.getColsValue(i, j)
|
||||
|
||||
# batch insert
|
||||
if j % self.batchSize == 0 and values != "":
|
||||
sql = pre_insert + values
|
||||
self.exeDouble(sql)
|
||||
values = ""
|
||||
# append last
|
||||
if values != "":
|
||||
sql = pre_insert + values
|
||||
self.exeDouble(sql)
|
||||
values = ""
|
||||
|
||||
# insert nomal talbe
|
||||
for i in range(20):
|
||||
self.ts += 1000
|
||||
name = self.random_string(20)
|
||||
sql = f"insert into @db_name.ta values({self.ts}, {i}, {self.ts%100000}, '{name}', false)"
|
||||
self.exeDouble(sql)
|
||||
|
||||
# insert finished
|
||||
tdLog.info(f"insert data successfully.\n"
|
||||
f" inserted child table = {self.childCnt}\n"
|
||||
f" inserted child rows = {self.childRow}\n"
|
||||
f" total inserted rows = {self.childCnt*self.childRow}\n")
|
||||
return
|
||||
|
||||
def exeDouble(self, sql):
|
||||
# dbname replace
|
||||
sql1 = sql.replace("@db_name", self.db1)
|
||||
|
||||
if len(sql1) > 100:
|
||||
tdLog.info(sql1[:100])
|
||||
else:
|
||||
tdLog.info(sql1)
|
||||
tdSql.execute(sql1)
|
||||
|
||||
sql2 = sql.replace("@db_name", self.db2)
|
||||
if len(sql1) > 100:
|
||||
tdLog.info(sql1[:100])
|
||||
else:
|
||||
tdLog.info(sql1)
|
||||
tdSql.execute(sql2)
|
||||
|
||||
|
||||
# prepareEnv
|
||||
def prepareEnv(self):
|
||||
# init
|
||||
self.ts = 1680000000000
|
||||
self.childCnt = 10
|
||||
self.childRow = 10000
|
||||
self.batchSize = 5000
|
||||
self.vgroups1 = 4
|
||||
self.vgroups2 = 4
|
||||
self.db1 = "db1"
|
||||
self.db2 = "db2"
|
||||
|
||||
# total
|
||||
self.c1Cnt = 0
|
||||
self.c2Cnt = 0
|
||||
self.c2Max = None
|
||||
self.c2Min = None
|
||||
self.c2Sum = None
|
||||
|
||||
# create database db
|
||||
sql = f"create database @db_name vgroups {self.vgroups1} replica 3"
|
||||
self.exeDouble(sql)
|
||||
|
||||
# create super talbe st
|
||||
sql = f"create table @db_name.st(ts timestamp, c1 int, c2 bigint, ts1 timestamp) tags(area int)"
|
||||
self.exeDouble(sql)
|
||||
|
||||
# create child table
|
||||
for i in range(self.childCnt):
|
||||
sql = f"create table @db_name.t{i} using @db_name.st tags({i}) "
|
||||
self.exeDouble(sql)
|
||||
|
||||
# create normal table
|
||||
sql = f"create table @db_name.ta(ts timestamp, c1 int, c2 bigint, c3 binary(32), c4 bool)"
|
||||
self.exeDouble(sql)
|
||||
|
||||
# insert data
|
||||
self.insertData()
|
||||
|
||||
# check data correct
|
||||
def checkExpect(self, sql, expectVal):
|
||||
tdSql.query(sql)
|
||||
rowCnt = tdSql.getRows()
|
||||
for i in range(rowCnt):
|
||||
val = tdSql.getData(i,0)
|
||||
if val != expectVal:
|
||||
tdLog.exit(f"Not expect . query={val} expect={expectVal} i={i} sql={sql}")
|
||||
return False
|
||||
|
||||
tdLog.info(f"check expect ok. sql={sql} expect ={expectVal} rowCnt={rowCnt}")
|
||||
return True
|
||||
|
||||
# init
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
seed = time.clock_gettime(time.CLOCK_REALTIME)
|
||||
random.seed(seed)
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor(), True)
|
||||
|
||||
# check query result same
|
||||
def queryDouble(self, sql):
|
||||
# sql
|
||||
sql1 = sql.replace('@db_name', self.db1)
|
||||
tdLog.info(sql1)
|
||||
start1 = time.time()
|
||||
rows1 = tdSql.query(sql1)
|
||||
spend1 = time.time() - start1
|
||||
res1 = copy.copy(tdSql.queryResult)
|
||||
|
||||
sql2 = sql.replace('@db_name', self.db2)
|
||||
tdLog.info(sql2)
|
||||
start2 = time.time()
|
||||
tdSql.query(sql2)
|
||||
spend2 = time.time() - start2
|
||||
res2 = tdSql.queryResult
|
||||
|
||||
rowlen1 = len(res1)
|
||||
rowlen2 = len(res2)
|
||||
|
||||
if rowlen1 != rowlen2:
|
||||
tdLog.exit(f"rowlen1={rowlen1} rowlen2={rowlen2} both not equal.")
|
||||
return False
|
||||
|
||||
for i in range(rowlen1):
|
||||
row1 = res1[i]
|
||||
row2 = res2[i]
|
||||
collen1 = len(row1)
|
||||
collen2 = len(row2)
|
||||
if collen1 != collen2:
|
||||
tdLog.exit(f"collen1={collen1} collen2={collen2} both not equal.")
|
||||
return False
|
||||
for j in range(collen1):
|
||||
if row1[j] != row2[j]:
|
||||
tdLog.exit(f"col={j} col1={row1[j]} col2={row2[j]} both col not equal.")
|
||||
return False
|
||||
|
||||
# warning performance
|
||||
diff = (spend2 - spend1)*100/spend1
|
||||
tdLog.info("spend1=%.6fs spend2=%.6fs diff=%.1f%%"%(spend1, spend2, diff))
|
||||
if spend2 > spend1 and diff > 20:
|
||||
tdLog.info("warning: the diff for performance after spliting is over 20%")
|
||||
|
||||
return True
|
||||
|
||||
|
||||
# check result
|
||||
def checkResult(self):
|
||||
# check vgroupid
|
||||
sql = f"select vgroup_id from information_schema.ins_vgroups where db_name='{self.db2}'"
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(self.vgroups2)
|
||||
|
||||
# check child table count same
|
||||
sql = "select table_name from information_schema.ins_tables where db_name='@db_name' order by table_name"
|
||||
self.queryDouble(sql)
|
||||
|
||||
# check row value is ok
|
||||
sql = "select * from @db_name.st order by ts"
|
||||
self.queryDouble(sql)
|
||||
|
||||
# where
|
||||
sql = "select *,tbname from @db_name.st where c1 < 1000 order by ts"
|
||||
self.queryDouble(sql)
|
||||
|
||||
# max
|
||||
sql = "select max(c1) from @db_name.st"
|
||||
self.queryDouble(sql)
|
||||
|
||||
# min
|
||||
sql = "select min(c2) from @db_name.st"
|
||||
self.queryDouble(sql)
|
||||
|
||||
# sum
|
||||
sql = "select sum(c1) from @db_name.st"
|
||||
self.queryDouble(sql)
|
||||
|
||||
# normal table
|
||||
|
||||
# all rows
|
||||
sql = "select * from @db_name.ta"
|
||||
self.queryDouble(sql)
|
||||
|
||||
# count
|
||||
sql = "select count(*) from @db_name.ta"
|
||||
self.queryDouble(sql)
|
||||
|
||||
# sum
|
||||
sql = "select sum(c1) from @db_name.ta"
|
||||
self.queryDouble(sql)
|
||||
|
||||
|
||||
# get vgroup list
|
||||
def getVGroup(self, db_name):
|
||||
vgidList = []
|
||||
sql = f"select vgroup_id from information_schema.ins_vgroups where db_name='{db_name}'"
|
||||
res = tdSql.getResult(sql)
|
||||
rows = len(res)
|
||||
for i in range(rows):
|
||||
vgidList.append(res[i][0])
|
||||
|
||||
return vgidList;
|
||||
|
||||
# split vgroup on db2
|
||||
def splitVGroup(self, db_name):
|
||||
vgids = self.getVGroup(db_name)
|
||||
selid = random.choice(vgids)
|
||||
sql = f"split vgroup {selid}"
|
||||
tdLog.info(sql)
|
||||
tdSql.execute(sql)
|
||||
|
||||
# wait end
|
||||
for i in range(100):
|
||||
sql ="show transactions;"
|
||||
rows = tdSql.query(sql)
|
||||
if rows == 0:
|
||||
tdLog.info("split vgroup finished.")
|
||||
return True
|
||||
#tdLog.info(f"i={i} wait split vgroup ...")
|
||||
time.sleep(1)
|
||||
|
||||
tdLog.exit("split vgroup transaction is not finished after executing 50s")
|
||||
return False
|
||||
|
||||
# split empty database
|
||||
def splitEmptyDB(self):
|
||||
|
||||
dbName = "emptydb"
|
||||
vgNum = 2
|
||||
# create database
|
||||
sql = f"create database {dbName} vgroups {vgNum}"
|
||||
tdLog.info(sql)
|
||||
tdSql.execute(sql)
|
||||
|
||||
# split vgroup
|
||||
self.splitVGroup(dbName)
|
||||
vgList = self.getVGroup(dbName)
|
||||
vgNum1 = len(vgList)
|
||||
vgNum2 = vgNum + 1
|
||||
if vgNum1 != vgNum2:
|
||||
tdLog.exit(f" vglist len={vgNum1} is not same for expect {vgNum2}")
|
||||
return
|
||||
|
||||
# run
|
||||
def run(self):
|
||||
|
||||
# prepare env
|
||||
self.prepareEnv()
|
||||
|
||||
for i in range(5):
|
||||
# split vgroup on db2
|
||||
self.splitVGroup(self.db2)
|
||||
self.vgroups2 += 1
|
||||
|
||||
# check two db query result same
|
||||
self.checkResult()
|
||||
|
||||
tdLog.info(f"split vgroup i={i} passed.")
|
||||
|
||||
# split empty db
|
||||
self.splitEmptyDB()
|
||||
|
||||
|
||||
# stop
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -226,6 +226,7 @@ class TDTestCase:
|
|||
tdSql.checkData(3, 0, 12)
|
||||
|
||||
## test fill value with scalar expression
|
||||
# data types
|
||||
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + 2)")
|
||||
tdSql.checkRows(4)
|
||||
tdSql.checkData(0, 0, 3)
|
||||
|
@ -233,6 +234,49 @@ class TDTestCase:
|
|||
tdSql.checkData(2, 0, 3)
|
||||
tdSql.checkData(3, 0, 3)
|
||||
|
||||
tdSql.query(f"select interp(c1) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + 2)")
|
||||
tdSql.checkRows(4)
|
||||
tdSql.checkData(0, 0, 3)
|
||||
tdSql.checkData(1, 0, 3)
|
||||
tdSql.checkData(2, 0, 3)
|
||||
tdSql.checkData(3, 0, 3)
|
||||
|
||||
tdSql.query(f"select interp(c2) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + 2)")
|
||||
tdSql.checkRows(4)
|
||||
tdSql.checkData(0, 0, 3)
|
||||
tdSql.checkData(1, 0, 3)
|
||||
tdSql.checkData(2, 0, 3)
|
||||
tdSql.checkData(3, 0, 3)
|
||||
|
||||
tdSql.query(f"select interp(c3) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + 2)")
|
||||
tdSql.checkRows(4)
|
||||
tdSql.checkData(0, 0, 3)
|
||||
tdSql.checkData(1, 0, 3)
|
||||
tdSql.checkData(2, 0, 3)
|
||||
tdSql.checkData(3, 0, 3)
|
||||
|
||||
tdSql.query(f"select interp(c4) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + 2)")
|
||||
tdSql.checkRows(4)
|
||||
tdSql.checkData(0, 0, 3.0)
|
||||
tdSql.checkData(1, 0, 3.0)
|
||||
tdSql.checkData(2, 0, 3.0)
|
||||
tdSql.checkData(3, 0, 3.0)
|
||||
|
||||
tdSql.query(f"select interp(c5) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + 2)")
|
||||
tdSql.checkRows(4)
|
||||
tdSql.checkData(0, 0, 3.0)
|
||||
tdSql.checkData(1, 0, 3.0)
|
||||
tdSql.checkData(2, 0, 3.0)
|
||||
tdSql.checkData(3, 0, 3.0)
|
||||
|
||||
tdSql.query(f"select interp(c6) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + 2)")
|
||||
tdSql.checkRows(4)
|
||||
tdSql.checkData(0, 0, True)
|
||||
tdSql.checkData(1, 0, True)
|
||||
tdSql.checkData(2, 0, True)
|
||||
tdSql.checkData(3, 0, True)
|
||||
|
||||
# expr types
|
||||
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1.0 + 2.0)")
|
||||
tdSql.checkRows(4)
|
||||
tdSql.checkData(0, 0, 3)
|
||||
|
@ -275,6 +319,7 @@ class TDTestCase:
|
|||
tdSql.checkData(2, 0, 3)
|
||||
tdSql.checkData(3, 0, 3)
|
||||
|
||||
|
||||
tdLog.printNoPrefix("==========step5:fill prev")
|
||||
|
||||
## {. . .}
|
||||
|
|
|
@ -434,7 +434,7 @@ bool simExecuteSystemCmd(SScript *script, char *option) {
|
|||
simLogSql(buf, true);
|
||||
int32_t code = system(buf);
|
||||
int32_t repeatTimes = 0;
|
||||
while (code < 0) {
|
||||
while (code != 0) {
|
||||
simError("script:%s, failed to execute %s , code %d, errno:%d %s, repeatTimes:%d", script->fileName, buf, code,
|
||||
errno, strerror(errno), repeatTimes);
|
||||
taosMsleep(1000);
|
||||
|
|
Loading…
Reference in New Issue