Merge branch 'develop' of github.com:taosdata/TDengine into release/ver-2.1.4.1

This commit is contained in:
bryanchang0603 2021-07-09 17:59:31 +08:00
commit b2592f3168
95 changed files with 1220 additions and 429 deletions

2
.gitignore vendored
View File

@ -68,6 +68,8 @@ CMakeError.log
*.o *.o
version.c version.c
taos.rc taos.rc
src/connector/jdbc/.classpath
src/connector/jdbc/.project
src/connector/jdbc/.settings/ src/connector/jdbc/.settings/
tests/comparisonTest/cassandra/cassandratest/.classpath tests/comparisonTest/cassandra/cassandratest/.classpath
tests/comparisonTest/cassandra/cassandratest/.project tests/comparisonTest/cassandra/cassandratest/.project

View File

@ -1,4 +1,3 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
IF (CMAKE_VERSION VERSION_LESS 3.0) IF (CMAKE_VERSION VERSION_LESS 3.0)
PROJECT(TDengine CXX) PROJECT(TDengine CXX)
SET(PROJECT_VERSION_MAJOR "${LIB_MAJOR_VERSION}") SET(PROJECT_VERSION_MAJOR "${LIB_MAJOR_VERSION}")
@ -10,6 +9,12 @@ ELSE ()
PROJECT(TDengine VERSION "${LIB_VERSION_STRING}" LANGUAGES CXX) PROJECT(TDengine VERSION "${LIB_VERSION_STRING}" LANGUAGES CXX)
ENDIF () ENDIF ()
IF (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
ELSE ()
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
ENDIF ()
SET(TD_ACCOUNT FALSE) SET(TD_ACCOUNT FALSE)
SET(TD_ADMIN FALSE) SET(TD_ADMIN FALSE)
SET(TD_GRANT FALSE) SET(TD_GRANT FALSE)

View File

@ -185,9 +185,10 @@ cmake .. && cmake --build .
# 安装 # 安装
如果你不想安装可以直接在shell中运行。生成完成后安装 TDengine 生成完成后,安装 TDengine下文给出的指令以 Linux 为例,如果是在 Windows 下,那么对应的指令会是 `nmake install`
```bash ```bash
make install sudo make install
``` ```
用户可以在[文件目录结构](https://www.taosdata.com/cn/documentation/administrator#directories)中了解更多在操作系统中生成的目录或文件。 用户可以在[文件目录结构](https://www.taosdata.com/cn/documentation/administrator#directories)中了解更多在操作系统中生成的目录或文件。
@ -195,7 +196,7 @@ make install
安装成功后,在终端中启动 TDengine 服务: 安装成功后,在终端中启动 TDengine 服务:
```bash ```bash
taosd sudo systemctl start taosd
``` ```
用户可以使用 TDengine Shell 来连接 TDengine 服务,在终端中,输入: 用户可以使用 TDengine Shell 来连接 TDengine 服务,在终端中,输入:
@ -208,7 +209,7 @@ taos
## 快速运行 ## 快速运行
TDengine 生成后,在终端执行以下命令 如果不希望以服务方式运行 TDengine也可以在终端中直接运行它。也即在生成完成后执行以下命令在 Windows 下,生成的可执行文件会带有 .exe 后缀,例如会名为 taosd.exe
```bash ```bash
./build/bin/taosd -c test/cfg ./build/bin/taosd -c test/cfg

View File

@ -175,7 +175,7 @@ cmake .. && cmake --build .
# Installing # Installing
After building successfully, TDengine can be installed by: After building successfully, TDengine can be installed by: (On Windows platform, the following command should be `nmake install`)
```bash ```bash
sudo make install sudo make install
``` ```
@ -197,7 +197,7 @@ If TDengine shell connects the server successfully, welcome messages and version
## Quick Run ## Quick Run
If you don't want to run TDengine as a service, you can run it in current shell. For example, to quickly start a TDengine server after building, run the command below in terminal: If you don't want to run TDengine as a service, you can run it in current shell. For example, to quickly start a TDengine server after building, run the command below in terminal: (We take Linux as an example, command on Windows will be `taosd.exe`)
```bash ```bash
./build/bin/taosd -c test/cfg ./build/bin/taosd -c test/cfg
``` ```

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
IF (TD_ACCOUNT) IF (TD_ACCOUNT)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
SET(CMAKE_C_STANDARD 11) SET(CMAKE_C_STANDARD 11)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
IF (${ACCOUNT} MATCHES "true") IF (${ACCOUNT} MATCHES "true")

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
# #

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
IF (DEFINED VERNUMBER) IF (DEFINED VERNUMBER)

11
deps/CMakeLists.txt vendored
View File

@ -1,6 +1,11 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
IF (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
ELSE ()
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
ENDIF ()
ADD_SUBDIRECTORY(zlib-1.2.11) ADD_SUBDIRECTORY(zlib-1.2.11)
ADD_SUBDIRECTORY(pthread) ADD_SUBDIRECTORY(pthread)
ADD_SUBDIRECTORY(regex) ADD_SUBDIRECTORY(regex)
@ -11,9 +16,7 @@ ADD_SUBDIRECTORY(wepoll)
ADD_SUBDIRECTORY(MsvcLibX) ADD_SUBDIRECTORY(MsvcLibX)
ADD_SUBDIRECTORY(rmonotonic) ADD_SUBDIRECTORY(rmonotonic)
IF (TD_LINUX_64) ADD_SUBDIRECTORY(lua)
ADD_SUBDIRECTORY(lua)
ENDIF ()
IF (TD_LINUX AND TD_MQTT) IF (TD_LINUX AND TD_MQTT)
ADD_SUBDIRECTORY(MQTT-C) ADD_SUBDIRECTORY(MQTT-C)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
# MQTT-C build options # MQTT-C build options
option(MQTT_C_OpenSSL_SUPPORT "Build MQTT-C with OpenSSL support?" OFF) option(MQTT_C_OpenSSL_SUPPORT "Build MQTT-C with OpenSSL support?" OFF)

View File

@ -1,6 +1,11 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
IF (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
ELSE ()
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
ENDIF ()
IF (TD_WINDOWS) IF (TD_WINDOWS)
INCLUDE_DIRECTORIES(include) INCLUDE_DIRECTORIES(include)
AUX_SOURCE_DIRECTORY(src SRC) AUX_SOURCE_DIRECTORY(src SRC)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
IF (TD_WINDOWS) IF (TD_WINDOWS)

View File

@ -2,3 +2,6 @@ AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR}/src SOURCE_LIST)
ADD_LIBRARY(lua ${SOURCE_LIST}) ADD_LIBRARY(lua ${SOURCE_LIST})
TARGET_INCLUDE_DIRECTORIES(lua PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/inc) TARGET_INCLUDE_DIRECTORIES(lua PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/inc)
SET_SOURCE_FILES_PROPERTIES(./src/lgc.c PROPERTIES COMPILE_FLAGS -w)
SET_SOURCE_FILES_PROPERTIES(./src/ltable.c PROPERTIES COMPILE_FLAGS -w)
SET_SOURCE_FILES_PROPERTIES(./src/loslib.c PROPERTIES COMPILE_FLAGS -w)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
IF (TD_WINDOWS) IF (TD_WINDOWS)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
IF (TD_WINDOWS) IF (TD_WINDOWS)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
IF (TD_WINDOWS) IF (TD_WINDOWS)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
IF (TD_WINDOWS) IF (TD_WINDOWS)

View File

@ -69,6 +69,39 @@ mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/*
if [ -f ${build_dir}/bin/jemalloc-config ]; then
mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
if [ -f ${build_dir}/bin/jemalloc.sh ]; then
cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
fi
if [ -f ${build_dir}/bin/jeprof ]; then
cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
fi
if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
fi
if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
fi
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
fi
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
fi
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
fi
if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
fi
if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
fi
fi
cd ${install_dir} cd ${install_dir}
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then

View File

@ -91,6 +91,39 @@ else
fi fi
chmod a+x ${install_dir}/bin/* || : chmod a+x ${install_dir}/bin/* || :
if [ -f ${build_dir}/bin/jemalloc-config ]; then
mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
if [ -f ${build_dir}/bin/jemalloc.sh ]; then
cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
fi
if [ -f ${build_dir}/bin/jeprof ]; then
cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
fi
if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
fi
if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
fi
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
fi
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
fi
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
fi
if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
fi
if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
fi
fi
cd ${install_dir} cd ${install_dir}
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then

View File

@ -91,6 +91,39 @@ else
fi fi
chmod a+x ${install_dir}/bin/* || : chmod a+x ${install_dir}/bin/* || :
if [ -f ${build_dir}/bin/jemalloc-config ]; then
mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
if [ -f ${build_dir}/bin/jemalloc.sh ]; then
cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
fi
if [ -f ${build_dir}/bin/jeprof ]; then
cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
fi
if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
fi
if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
fi
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
fi
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
fi
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
fi
if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
fi
if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
fi
fi
cd ${install_dir} cd ${install_dir}
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
# Base compile # Base compile

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/mnode/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/mnode/inc)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)

View File

@ -961,6 +961,10 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
break; break;
} }
if (sToken.n == 0 || sToken.type == TK_SEMI || index == 0) {
return tscSQLSyntaxErrMsg(pCmd->payload, "unexpected token", sql);
}
sql += index; sql += index;
++numOfColsAfterTags; ++numOfColsAfterTags;
} }

View File

@ -2005,7 +2005,6 @@ static SVgroupsInfo* createVgroupInfoFromMsg(char* pMsg, int32_t* size, uint64_t
SVgroupMsg *vmsg = &pVgroupMsg->vgroups[j]; SVgroupMsg *vmsg = &pVgroupMsg->vgroups[j];
vmsg->vgId = htonl(vmsg->vgId); vmsg->vgId = htonl(vmsg->vgId);
vmsg->numOfEps = vmsg->numOfEps;
for (int32_t k = 0; k < vmsg->numOfEps; ++k) { for (int32_t k = 0; k < vmsg->numOfEps; ++k) {
vmsg->epAddr[k].port = htons(vmsg->epAddr[k].port); vmsg->epAddr[k].port = htons(vmsg->epAddr[k].port);
} }

View File

@ -634,7 +634,9 @@ static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bo
void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo) { void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
assert(pRes->numOfCols > 0); assert(pRes->numOfCols > 0);
if (pRes->numOfRows == 0) {
return;
}
int32_t offset = 0; int32_t offset = 0;
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
SInternalField* pInfo = (SInternalField*)TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i); SInternalField* pInfo = (SInternalField*)TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i);
@ -746,6 +748,37 @@ typedef struct SJoinOperatorInfo {
SRspResultInfo resultInfo; // todo refactor, add this info for each operator SRspResultInfo resultInfo; // todo refactor, add this info for each operator
} SJoinOperatorInfo; } SJoinOperatorInfo;
static void converNcharFilterColumn(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols, int32_t rows, bool *gotNchar) {
for (int32_t i = 0; i < numOfFilterCols; ++i) {
if (pFilterInfo[i].info.type == TSDB_DATA_TYPE_NCHAR) {
pFilterInfo[i].pData2 = pFilterInfo[i].pData;
pFilterInfo[i].pData = malloc(rows * pFilterInfo[i].info.bytes);
int32_t bufSize = pFilterInfo[i].info.bytes - VARSTR_HEADER_SIZE;
for (int32_t j = 0; j < rows; ++j) {
char* dst = (char *)pFilterInfo[i].pData + j * pFilterInfo[i].info.bytes;
char* src = (char *)pFilterInfo[i].pData2 + j * pFilterInfo[i].info.bytes;
int32_t len = 0;
taosMbsToUcs4(varDataVal(src), varDataLen(src), varDataVal(dst), bufSize, &len);
varDataLen(dst) = len;
}
*gotNchar = true;
}
}
}
static void freeNcharFilterColumn(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols) {
for (int32_t i = 0; i < numOfFilterCols; ++i) {
if (pFilterInfo[i].info.type == TSDB_DATA_TYPE_NCHAR) {
if (pFilterInfo[i].pData2) {
tfree(pFilterInfo[i].pData);
pFilterInfo[i].pData = pFilterInfo[i].pData2;
pFilterInfo[i].pData2 = NULL;
}
}
}
}
static void doSetupSDataBlock(SSqlRes* pRes, SSDataBlock* pBlock, SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols) { static void doSetupSDataBlock(SSqlRes* pRes, SSDataBlock* pBlock, SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols) {
int32_t offset = 0; int32_t offset = 0;
char* pData = pRes->data; char* pData = pRes->data;
@ -764,8 +797,13 @@ static void doSetupSDataBlock(SSqlRes* pRes, SSDataBlock* pBlock, SSingleColumnF
// filter data if needed // filter data if needed
if (numOfFilterCols > 0) { if (numOfFilterCols > 0) {
doSetFilterColumnInfo(pFilterInfo, numOfFilterCols, pBlock); doSetFilterColumnInfo(pFilterInfo, numOfFilterCols, pBlock);
bool gotNchar = false;
converNcharFilterColumn(pFilterInfo, numOfFilterCols, pBlock->info.rows, &gotNchar);
int8_t* p = calloc(pBlock->info.rows, sizeof(int8_t)); int8_t* p = calloc(pBlock->info.rows, sizeof(int8_t));
bool all = doFilterDataBlock(pFilterInfo, numOfFilterCols, pBlock->info.rows, p); bool all = doFilterDataBlock(pFilterInfo, numOfFilterCols, pBlock->info.rows, p);
if (gotNchar) {
freeNcharFilterColumn(pFilterInfo, numOfFilterCols);
}
if (!all) { if (!all) {
doCompactSDataBlock(pBlock, pBlock->info.rows, p); doCompactSDataBlock(pBlock, pBlock->info.rows, p);
} }

View File

@ -1,10 +1,11 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest) FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest)
FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib) FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib /usr/lib64)
FIND_LIBRARY(LIB_GTEST_SHARED_DIR libgtest.so /usr/lib/ /usr/local/lib /usr/lib64)
IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR) IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR))
MESSAGE(STATUS "gTest library found, build unit test") MESSAGE(STATUS "gTest library found, build unit test")
# GoogleTest requires at least C++11 # GoogleTest requires at least C++11
@ -17,4 +18,4 @@ IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR)
ADD_EXECUTABLE(cliTest ${SOURCE_LIST}) ADD_EXECUTABLE(cliTest ${SOURCE_LIST})
TARGET_LINK_LIBRARIES(cliTest taos tutil common gtest pthread) TARGET_LINK_LIBRARIES(cliTest taos tutil common gtest pthread)
ENDIF() ENDIF()

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)

View File

@ -57,8 +57,8 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
parameterCnt++; parameterCnt++;
} }
} }
parameters = new Object[parameterCnt];
} }
parameters = new Object[parameterCnt];
if (parameterCnt > 1) { if (parameterCnt > 1) {
// the table name is also a parameter, so ignore it. // the table name is also a parameter, so ignore it.

View File

@ -22,16 +22,16 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar
super(conn, database); super(conn, database);
this.rawSql = sql; this.rawSql = sql;
int parameterCnt = 0;
if (sql.contains("?")) { if (sql.contains("?")) {
int parameterCnt = 0;
for (int i = 0; i < sql.length(); i++) { for (int i = 0; i < sql.length(); i++) {
if ('?' == sql.charAt(i)) { if ('?' == sql.charAt(i)) {
parameterCnt++; parameterCnt++;
} }
} }
parameters = new Object[parameterCnt];
this.isPrepared = true; this.isPrepared = true;
} }
parameters = new Object[parameterCnt];
// build parameterMetaData // build parameterMetaData
this.parameterMetaData = new RestfulParameterMetaData(parameters); this.parameterMetaData = new RestfulParameterMetaData(parameters);

View File

@ -15,6 +15,8 @@ public class RestfulPreparedStatementTest {
private static PreparedStatement pstmt_insert; private static PreparedStatement pstmt_insert;
private static final String sql_select = "select * from t1 where ts > ? and ts <= ? and f1 >= ?"; private static final String sql_select = "select * from t1 where ts > ? and ts <= ? and f1 >= ?";
private static PreparedStatement pstmt_select; private static PreparedStatement pstmt_select;
private static final String sql_without_parameters = "select count(*) from t1";
private static PreparedStatement pstmt_without_parameters;
@Test @Test
public void executeQuery() throws SQLException { public void executeQuery() throws SQLException {
@ -237,6 +239,7 @@ public class RestfulPreparedStatementTest {
@Test @Test
public void clearParameters() throws SQLException { public void clearParameters() throws SQLException {
pstmt_insert.clearParameters(); pstmt_insert.clearParameters();
pstmt_without_parameters.clearParameters();
} }
@Test @Test
@ -382,6 +385,7 @@ public class RestfulPreparedStatementTest {
pstmt_insert = conn.prepareStatement(sql_insert); pstmt_insert = conn.prepareStatement(sql_insert);
pstmt_select = conn.prepareStatement(sql_select); pstmt_select = conn.prepareStatement(sql_select);
pstmt_without_parameters = conn.prepareStatement(sql_without_parameters);
} catch (SQLException e) { } catch (SQLException e) {
e.printStackTrace(); e.printStackTrace();
} }
@ -394,6 +398,8 @@ public class RestfulPreparedStatementTest {
pstmt_insert.close(); pstmt_insert.close();
if (pstmt_select != null) if (pstmt_select != null)
pstmt_select.close(); pstmt_select.close();
if (pstmt_without_parameters != null)
pstmt_without_parameters.close();
if (conn != null) if (conn != null)
conn.close(); conn.close();
} catch (SQLException e) { } catch (SQLException e) {

View File

@ -15,36 +15,18 @@ const { NULL_POINTER } = require('ref-napi');
module.exports = CTaosInterface; module.exports = CTaosInterface;
function convertMillisecondsToDatetime(time) { function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
return new TaosObjects.TaosTimestamp(time);
}
function convertMicrosecondsToDatetime(time) {
return new TaosObjects.TaosTimestamp(time * 0.001, true);
}
function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
timestampConverter = convertMillisecondsToDatetime;
if (micro == true) {
timestampConverter = convertMicrosecondsToDatetime;
}
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = []; let res = [];
let currOffset = 0; let currOffset = 0;
while (currOffset < data.length) { while (currOffset < data.length) {
let queue = []; let time = data.readInt64LE(currOffset);
let time = 0;
for (let i = currOffset; i < currOffset + nbytes; i++) {
queue.push(data[i]);
}
for (let i = queue.length - 1; i >= 0; i--) {
time += queue[i] * Math.pow(16, i * 2);
}
currOffset += nbytes; currOffset += nbytes;
res.push(timestampConverter(time)); res.push(new TaosObjects.TaosTimestamp(time, precision));
} }
return res; return res;
} }
function convertBool(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { function convertBool(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = new Array(data.length); let res = new Array(data.length);
for (let i = 0; i < data.length; i++) { for (let i = 0; i < data.length; i++) {
@ -60,7 +42,7 @@ function convertBool(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
} }
return res; return res;
} }
function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = []; let res = [];
let currOffset = 0; let currOffset = 0;
@ -71,7 +53,7 @@ function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, micro = false
} }
return res; return res;
} }
function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = []; let res = [];
let currOffset = 0; let currOffset = 0;
@ -82,7 +64,7 @@ function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, micro = fals
} }
return res; return res;
} }
function convertInt(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { function convertInt(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = []; let res = [];
let currOffset = 0; let currOffset = 0;
@ -93,7 +75,7 @@ function convertInt(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
} }
return res; return res;
} }
function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = []; let res = [];
let currOffset = 0; let currOffset = 0;
@ -104,7 +86,7 @@ function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, micro = false)
} }
return res; return res;
} }
function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = []; let res = [];
let currOffset = 0; let currOffset = 0;
@ -115,7 +97,7 @@ function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, micro = false)
} }
return res; return res;
} }
function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = []; let res = [];
let currOffset = 0; let currOffset = 0;
@ -127,7 +109,7 @@ function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro = false)
return res; return res;
} }
function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = []; let res = [];
@ -272,7 +254,7 @@ CTaosInterface.prototype.config = function config() {
CTaosInterface.prototype.connect = function connect(host = null, user = "root", password = "taosdata", db = null, port = 0) { CTaosInterface.prototype.connect = function connect(host = null, user = "root", password = "taosdata", db = null, port = 0) {
let _host, _user, _password, _db, _port; let _host, _user, _password, _db, _port;
try { try {
_host = host != null ? ref.allocCString(host) : ref.alloc(ref.types.char_ptr, ref.NULL); _host = host != null ? ref.allocCString(host) : ref.NULL;
} }
catch (err) { catch (err) {
throw "Attribute Error: host is expected as a str"; throw "Attribute Error: host is expected as a str";
@ -290,7 +272,7 @@ CTaosInterface.prototype.connect = function connect(host = null, user = "root",
throw "Attribute Error: password is expected as a str"; throw "Attribute Error: password is expected as a str";
} }
try { try {
_db = db != null ? ref.allocCString(db) : ref.alloc(ref.types.char_ptr, ref.NULL); _db = db != null ? ref.allocCString(db) : ref.NULL;
} }
catch (err) { catch (err) {
throw "Attribute Error: db is expected as a str"; throw "Attribute Error: db is expected as a str";
@ -345,8 +327,7 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
} }
var fieldL = this.libtaos.taos_fetch_lengths(result); var fieldL = this.libtaos.taos_fetch_lengths(result);
let precision = this.libtaos.taos_result_precision(result);
let isMicro = (this.libtaos.taos_result_precision(result) == FieldTypes.C_TIMESTAMP_MICRO);
var fieldlens = []; var fieldlens = [];
@ -373,7 +354,7 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
if (!convertFunctions[fields[i]['type']]) { if (!convertFunctions[fields[i]['type']]) {
throw new errors.DatabaseError("Invalid data type returned from database"); throw new errors.DatabaseError("Invalid data type returned from database");
} }
blocks[i] = convertFunctions[fields[i]['type']](pdata, num_of_rows, fieldlens[i], offset, isMicro); blocks[i] = convertFunctions[fields[i]['type']](pdata, num_of_rows, fieldlens[i], offset, precision);
} }
} }
return { blocks: blocks, num_of_rows } return { blocks: blocks, num_of_rows }
@ -423,7 +404,7 @@ CTaosInterface.prototype.fetch_rows_a = function fetch_rows_a(result, callback,
let row = cti.libtaos.taos_fetch_row(result2); let row = cti.libtaos.taos_fetch_row(result2);
let fields = cti.fetchFields_a(result2); let fields = cti.fetchFields_a(result2);
let isMicro = (cti.libtaos.taos_result_precision(result2) == FieldTypes.C_TIMESTAMP_MICRO); let precision = cti.libtaos.taos_result_precision(result2);
let blocks = new Array(fields.length); let blocks = new Array(fields.length);
blocks.fill(null); blocks.fill(null);
numOfRows2 = Math.abs(numOfRows2); numOfRows2 = Math.abs(numOfRows2);
@ -449,7 +430,7 @@ CTaosInterface.prototype.fetch_rows_a = function fetch_rows_a(result, callback,
let prow = ref.reinterpret(row, 8, i * 8); let prow = ref.reinterpret(row, 8, i * 8);
prow = prow.readPointer(); prow = prow.readPointer();
prow = ref.ref(prow); prow = ref.ref(prow);
blocks[i] = convertFunctions[fields[i]['type']](prow, 1, fieldlens[i], offset, isMicro); blocks[i] = convertFunctions[fields[i]['type']](prow, 1, fieldlens[i], offset, precision);
//offset += fields[i]['bytes'] * numOfRows2; //offset += fields[i]['bytes'] * numOfRows2;
} }
} }
@ -572,7 +553,7 @@ CTaosInterface.prototype.openStream = function openStream(connection, sql, callb
var cti = this; var cti = this;
let asyncCallbackWrapper = function (param2, result2, row) { let asyncCallbackWrapper = function (param2, result2, row) {
let fields = cti.fetchFields_a(result2); let fields = cti.fetchFields_a(result2);
let isMicro = (cti.libtaos.taos_result_precision(result2) == FieldTypes.C_TIMESTAMP_MICRO); let precision = cti.libtaos.taos_result_precision(result2);
let blocks = new Array(fields.length); let blocks = new Array(fields.length);
blocks.fill(null); blocks.fill(null);
let numOfRows2 = 1; let numOfRows2 = 1;
@ -582,7 +563,7 @@ CTaosInterface.prototype.openStream = function openStream(connection, sql, callb
if (!convertFunctions[fields[i]['type']]) { if (!convertFunctions[fields[i]['type']]) {
throw new errors.DatabaseError("Invalid data type returned from database"); throw new errors.DatabaseError("Invalid data type returned from database");
} }
blocks[i] = convertFunctions[fields[i]['type']](row, numOfRows2, fields[i]['bytes'], offset, isMicro); blocks[i] = convertFunctions[fields[i]['type']](row, numOfRows2, fields[i]['bytes'], offset, precision);
offset += fields[i]['bytes'] * numOfRows2; offset += fields[i]['bytes'] * numOfRows2;
} }
} }

View File

@ -1,5 +1,5 @@
const FieldTypes = require('./constants'); const FieldTypes = require('./constants');
const util = require('util');
/** /**
* Various objects such as TaosRow and TaosColumn that help make parsing data easier * Various objects such as TaosRow and TaosColumn that help make parsing data easier
* @module TaosObjects * @module TaosObjects
@ -14,7 +14,7 @@ const FieldTypes = require('./constants');
* var trow = new TaosRow(row); * var trow = new TaosRow(row);
* console.log(trow.data); * console.log(trow.data);
*/ */
function TaosRow (row) { function TaosRow(row) {
this.data = row; this.data = row;
this.length = row.length; this.length = row.length;
return this; return this;
@ -29,10 +29,10 @@ function TaosRow (row) {
*/ */
function TaosField(field) { function TaosField(field) {
this._field = field; this._field = field;
this.name = field.name; this.name = field.name;
this.type = FieldTypes.getType(field.type); this.type = FieldTypes.getType(field.type);
return this; return this;
} }
/** /**
@ -42,39 +42,110 @@ function TaosField(field) {
* @param {Date} date - A Javascript date time object or the time in milliseconds past 1970-1-1 00:00:00.000 * @param {Date} date - A Javascript date time object or the time in milliseconds past 1970-1-1 00:00:00.000
*/ */
class TaosTimestamp extends Date { class TaosTimestamp extends Date {
constructor(date, micro = false) { constructor(date, precision = 0) {
super(date); if (precision === 1) {
this._type = 'TaosTimestamp'; super(Math.floor(date / 1000));
if (micro) { this.precisionExtras = date % 1000;
this.microTime = date - Math.floor(date); } else if (precision === 2) {
super(parseInt(date / 1000000));
// use BigInt to fix: 1625801548423914405 % 1000000 = 914496 which not expected (914405)
this.precisionExtras = parseInt(BigInt(date) % 1000000n);
} else {
super(parseInt(date));
}
this.precision = precision;
}
/**
* TDengine raw timestamp.
* @returns raw taos timestamp (int64)
*/
taosTimestamp() {
if (this.precision == 1) {
return (this * 1000 + this.precisionExtras);
} else if (this.precision == 2) {
return (this * 1000000 + this.precisionExtras);
} else {
return Math.floor(this);
}
}
/**
* Gets the microseconds of a Date.
* @return {Int} A microseconds integer
*/
getMicroseconds() {
if (this.precision == 1) {
return this.getMilliseconds() * 1000 + this.precisionExtras;
} else if (this.precision == 2) {
return this.getMilliseconds() * 1000 + this.precisionExtras / 1000;
} else {
return 0;
}
}
/**
* Gets the nanoseconds of a TaosTimestamp.
* @return {Int} A nanoseconds integer
*/
getNanoseconds() {
if (this.precision == 1) {
return this.getMilliseconds() * 1000000 + this.precisionExtras * 1000;
} else if (this.precision == 2) {
return this.getMilliseconds() * 1000000 + this.precisionExtras;
} else {
return 0;
}
}
/**
* @returns {String} a string for timestamp string format
*/
_precisionExtra() {
if (this.precision == 1) {
return String(this.precisionExtras).padStart(3, '0');
} else if (this.precision == 2) {
return String(this.precisionExtras).padStart(6, '0');
} else {
return '';
} }
} }
/** /**
* @function Returns the date into a string usable by TDengine * @function Returns the date into a string usable by TDengine
* @return {string} A Taos Timestamp String * @return {string} A Taos Timestamp String
*/ */
toTaosString(){ toTaosString() {
var tzo = -this.getTimezoneOffset(), var tzo = -this.getTimezoneOffset(),
dif = tzo >= 0 ? '+' : '-', dif = tzo >= 0 ? '+' : '-',
pad = function(num) { pad = function (num) {
var norm = Math.floor(Math.abs(num)); var norm = Math.floor(Math.abs(num));
return (norm < 10 ? '0' : '') + norm; return (norm < 10 ? '0' : '') + norm;
}, },
pad2 = function(num) { pad2 = function (num) {
var norm = Math.floor(Math.abs(num)); var norm = Math.floor(Math.abs(num));
if (norm < 10) return '00' + norm; if (norm < 10) return '00' + norm;
if (norm < 100) return '0' + norm; if (norm < 100) return '0' + norm;
if (norm < 1000) return norm; if (norm < 1000) return norm;
}; };
return this.getFullYear() + return this.getFullYear() +
'-' + pad(this.getMonth() + 1) + '-' + pad(this.getMonth() + 1) +
'-' + pad(this.getDate()) + '-' + pad(this.getDate()) +
' ' + pad(this.getHours()) + ' ' + pad(this.getHours()) +
':' + pad(this.getMinutes()) + ':' + pad(this.getMinutes()) +
':' + pad(this.getSeconds()) + ':' + pad(this.getSeconds()) +
'.' + pad2(this.getMilliseconds()) + '.' + pad2(this.getMilliseconds()) +
'' + (this.microTime ? pad2(Math.round(this.microTime * 1000)) : ''); '' + this._precisionExtra();
}
/**
* Custom console.log
* @returns {String} string format for debug
*/
[util.inspect.custom](depth, opts) {
return this.toTaosString() + JSON.stringify({ precision: this.precision, precisionExtras: this.precisionExtras }, opts);
}
toString() {
return this.toTaosString();
} }
} }
module.exports = {TaosRow, TaosField, TaosTimestamp} module.exports = { TaosRow, TaosField, TaosTimestamp }

View File

@ -1,13 +1,13 @@
{ {
"name": "td2.0-connector", "name": "td2.0-connector",
"version": "2.0.8", "version": "2.0.9",
"description": "A Node.js connector for TDengine.", "description": "A Node.js connector for TDengine.",
"main": "tdengine.js", "main": "tdengine.js",
"directories": { "directories": {
"test": "test" "test": "test"
}, },
"scripts": { "scripts": {
"test": "node test/test.js" "test": "node test/test.js && node test/testMicroseconds.js && node test/testNanoseconds.js"
}, },
"repository": { "repository": {
"type": "git", "type": "git",

View File

@ -1,4 +1,4 @@
var TDengineConnection = require('./nodetaos/connection.js') var TDengineConnection = require('./nodetaos/connection.js')
module.exports.connect = function (connection=null) { module.exports.connect = function (connection={}) {
return new TDengineConnection(connection); return new TDengineConnection(connection);
} }

View File

@ -1,5 +1,5 @@
const taos = require('../tdengine'); const taos = require('../tdengine');
var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:10}); var conn = taos.connect();
var c1 = conn.cursor(); var c1 = conn.cursor();
let stime = new Date(); let stime = new Date();
let interval = 1000; let interval = 1000;

View File

@ -0,0 +1,49 @@
const taos = require('../tdengine');
var conn = taos.connect();
var c1 = conn.cursor();
let stime = new Date();
let interval = 1000;
function convertDateToTS(date) {
let tsArr = date.toISOString().split("T")
return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length - 1) + "\"";
}
function R(l, r) {
return Math.random() * (r - l) - r;
}
function randomBool() {
if (Math.random() < 0.5) {
return true;
}
return false;
}
// Initialize
//c1.execute('drop database td_connector_test;');
const dbname = 'nodejs_test_us';
c1.execute('create database if not exists ' + dbname + ' precision "us"');
c1.execute('use ' + dbname)
c1.execute('create table if not exists tstest (ts timestamp, _int int);');
c1.execute('insert into tstest values(1625801548423914, 0)');
// Select
console.log('select * from tstest');
c1.execute('select * from tstest');
var d = c1.fetchall();
console.log(c1.fields);
let ts = d[0][0];
console.log(ts);
if (ts.taosTimestamp() != 1625801548423914) {
throw "microseconds not match!";
}
if (ts.getMicroseconds() % 1000 !== 914) {
throw "micronsecond precision error";
}
setTimeout(function () {
c1.query('drop database nodejs_us_test;');
}, 200);
setTimeout(function () {
conn.close();
}, 2000);

View File

@ -0,0 +1,49 @@
const taos = require('../tdengine');
var conn = taos.connect();
var c1 = conn.cursor();
let stime = new Date();
let interval = 1000;
function convertDateToTS(date) {
let tsArr = date.toISOString().split("T")
return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length - 1) + "\"";
}
function R(l, r) {
return Math.random() * (r - l) - r;
}
function randomBool() {
if (Math.random() < 0.5) {
return true;
}
return false;
}
// Initialize
//c1.execute('drop database td_connector_test;');
const dbname = 'nodejs_test_ns';
c1.execute('create database if not exists ' + dbname + ' precision "ns"');
c1.execute('use ' + dbname)
c1.execute('create table if not exists tstest (ts timestamp, _int int);');
c1.execute('insert into tstest values(1625801548423914405, 0)');
// Select
console.log('select * from tstest');
c1.execute('select * from tstest');
var d = c1.fetchall();
console.log(c1.fields);
let ts = d[0][0];
console.log(ts);
if (ts.taosTimestamp() != 1625801548423914405) {
throw "nanosecond not match!";
}
if (ts.getNanoseconds() % 1000000 !== 914405) {
throw "nanosecond precision error";
}
setTimeout(function () {
c1.query('drop database nodejs_ns_test;');
}, 200);
setTimeout(function () {
conn.close();
}, 2000);

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
IF (TD_LINUX_64) IF (TD_LINUX_64)
@ -20,8 +20,8 @@ IF (TD_LINUX_64)
if (CMAKE_C_COMPILER_ID STREQUAL "GNU" AND CMAKE_C_COMPILER_VERSION VERSION_LESS 5.0.0) if (CMAKE_C_COMPILER_ID STREQUAL "GNU" AND CMAKE_C_COMPILER_VERSION VERSION_LESS 5.0.0)
message(WARNING "gcc 4.8.0 will complain too much about flex-generated code, we just bypass building ODBC driver in such case") message(WARNING "gcc 4.8.0 will complain too much about flex-generated code, we just bypass building ODBC driver in such case")
else () else ()
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -Wconversion") SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ")
SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -Wconversion") SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ")
ADD_SUBDIRECTORY(src) ADD_SUBDIRECTORY(src)
ADD_SUBDIRECTORY(tools) ADD_SUBDIRECTORY(tools)
ADD_SUBDIRECTORY(examples) ADD_SUBDIRECTORY(examples)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
add_subdirectory(base) add_subdirectory(base)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
aux_source_directory(. SRC) aux_source_directory(. SRC)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
LIST(APPEND CQTEST_SRC ./cqtest.c) LIST(APPEND CQTEST_SRC ./cqtest.c)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
ADD_SUBDIRECTORY(shell) ADD_SUBDIRECTORY(shell)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)

View File

@ -45,7 +45,7 @@ extern void updateBuffer(Command *cmd);
extern int isReadyGo(Command *cmd); extern int isReadyGo(Command *cmd);
extern void resetCommand(Command *cmd, const char s[]); extern void resetCommand(Command *cmd, const char s[]);
int countPrefixOnes(char c); int countPrefixOnes(unsigned char c);
void clearScreen(int ecmd_pos, int cursor_pos); void clearScreen(int ecmd_pos, int cursor_pos);
void printChar(char c, int times); void printChar(char c, int times);
void positionCursor(int step, int direction); void positionCursor(int step, int direction);

View File

@ -26,7 +26,7 @@ typedef struct {
char widthOnScreen; char widthOnScreen;
} UTFCodeInfo; } UTFCodeInfo;
int countPrefixOnes(char c) { int countPrefixOnes(unsigned char c) {
unsigned char mask = 127; unsigned char mask = 127;
mask = ~mask; mask = ~mask;
int ret = 0; int ret = 0;
@ -48,7 +48,7 @@ void getPrevCharSize(const char *str, int pos, int *size, int *width) {
while (--pos >= 0) { while (--pos >= 0) {
*size += 1; *size += 1;
if (str[pos] > 0 || countPrefixOnes(str[pos]) > 1) break; if (str[pos] > 0 || countPrefixOnes((unsigned char )str[pos]) > 1) break;
} }
int rc = mbtowc(&wc, str + pos, MB_CUR_MAX); int rc = mbtowc(&wc, str + pos, MB_CUR_MAX);

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)

View File

@ -3216,13 +3216,6 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) {
return 0; return 0;
} }
#if 0
int readSampleFromJsonFileToMem(SSuperTable * superTblInfo) {
// TODO
return 0;
}
#endif
/* /*
Read 10000 lines at most. If more than 10000 lines, continue to read after using Read 10000 lines at most. If more than 10000 lines, continue to read after using
*/ */
@ -5122,13 +5115,13 @@ static int32_t generateStbDataTail(
} else { } else {
lenOfRow = getRowDataFromSample( lenOfRow = getRowDataFromSample(
data, data,
remainderBufLen < MAX_DATA_SIZE ? remainderBufLen : MAX_DATA_SIZE, (remainderBufLen < MAX_DATA_SIZE)?remainderBufLen:MAX_DATA_SIZE,
startTime + superTblInfo->timeStampStep * k, startTime + superTblInfo->timeStampStep * k,
superTblInfo, superTblInfo,
pSamplePos); pSamplePos);
} }
if (lenOfRow > remainderBufLen) { if ((lenOfRow + 1) > remainderBufLen) {
break; break;
} }
@ -5338,7 +5331,7 @@ static int64_t generateInterlaceDataWithoutStb(
#if STMT_IFACE_ENABLED == 1 #if STMT_IFACE_ENABLED == 1
static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
char *dataType, int32_t dataLen, char **ptr) char *dataType, int32_t dataLen, char **ptr, char *value)
{ {
if (0 == strncasecmp(dataType, if (0 == strncasecmp(dataType,
"BINARY", strlen("BINARY"))) { "BINARY", strlen("BINARY"))) {
@ -5348,12 +5341,18 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
return -1; return -1;
} }
char *bind_binary = (char *)*ptr; char *bind_binary = (char *)*ptr;
rand_string(bind_binary, dataLen);
bind->buffer_type = TSDB_DATA_TYPE_BINARY; bind->buffer_type = TSDB_DATA_TYPE_BINARY;
bind->buffer_length = dataLen; if (value) {
bind->buffer = bind_binary; strncpy(bind_binary, value, strlen(value));
bind->buffer_length = strlen(bind_binary);
} else {
rand_string(bind_binary, dataLen);
bind->buffer_length = dataLen;
}
bind->length = &bind->buffer_length; bind->length = &bind->buffer_length;
bind->buffer = bind_binary;
bind->is_null = NULL; bind->is_null = NULL;
*ptr += bind->buffer_length; *ptr += bind->buffer_length;
@ -5365,9 +5364,14 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
return -1; return -1;
} }
char *bind_nchar = (char *)*ptr; char *bind_nchar = (char *)*ptr;
rand_string(bind_nchar, dataLen);
bind->buffer_type = TSDB_DATA_TYPE_NCHAR; bind->buffer_type = TSDB_DATA_TYPE_NCHAR;
if (value) {
strncpy(bind_nchar, value, strlen(value));
} else {
rand_string(bind_nchar, dataLen);
}
bind->buffer_length = strlen(bind_nchar); bind->buffer_length = strlen(bind_nchar);
bind->buffer = bind_nchar; bind->buffer = bind_nchar;
bind->length = &bind->buffer_length; bind->length = &bind->buffer_length;
@ -5378,7 +5382,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
"INT", strlen("INT"))) { "INT", strlen("INT"))) {
int32_t *bind_int = (int32_t *)*ptr; int32_t *bind_int = (int32_t *)*ptr;
*bind_int = rand_int(); if (value) {
*bind_int = atoi(value);
} else {
*bind_int = rand_int();
}
bind->buffer_type = TSDB_DATA_TYPE_INT; bind->buffer_type = TSDB_DATA_TYPE_INT;
bind->buffer_length = sizeof(int32_t); bind->buffer_length = sizeof(int32_t);
bind->buffer = bind_int; bind->buffer = bind_int;
@ -5390,7 +5398,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
"BIGINT", strlen("BIGINT"))) { "BIGINT", strlen("BIGINT"))) {
int64_t *bind_bigint = (int64_t *)*ptr; int64_t *bind_bigint = (int64_t *)*ptr;
*bind_bigint = rand_bigint(); if (value) {
*bind_bigint = atoll(value);
} else {
*bind_bigint = rand_bigint();
}
bind->buffer_type = TSDB_DATA_TYPE_BIGINT; bind->buffer_type = TSDB_DATA_TYPE_BIGINT;
bind->buffer_length = sizeof(int64_t); bind->buffer_length = sizeof(int64_t);
bind->buffer = bind_bigint; bind->buffer = bind_bigint;
@ -5402,7 +5414,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
"FLOAT", strlen("FLOAT"))) { "FLOAT", strlen("FLOAT"))) {
float *bind_float = (float *) *ptr; float *bind_float = (float *) *ptr;
*bind_float = rand_float(); if (value) {
*bind_float = (float)atof(value);
} else {
*bind_float = rand_float();
}
bind->buffer_type = TSDB_DATA_TYPE_FLOAT; bind->buffer_type = TSDB_DATA_TYPE_FLOAT;
bind->buffer_length = sizeof(float); bind->buffer_length = sizeof(float);
bind->buffer = bind_float; bind->buffer = bind_float;
@ -5414,7 +5430,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
"DOUBLE", strlen("DOUBLE"))) { "DOUBLE", strlen("DOUBLE"))) {
double *bind_double = (double *)*ptr; double *bind_double = (double *)*ptr;
*bind_double = rand_double(); if (value) {
*bind_double = atof(value);
} else {
*bind_double = rand_double();
}
bind->buffer_type = TSDB_DATA_TYPE_DOUBLE; bind->buffer_type = TSDB_DATA_TYPE_DOUBLE;
bind->buffer_length = sizeof(double); bind->buffer_length = sizeof(double);
bind->buffer = bind_double; bind->buffer = bind_double;
@ -5426,7 +5446,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
"SMALLINT", strlen("SMALLINT"))) { "SMALLINT", strlen("SMALLINT"))) {
int16_t *bind_smallint = (int16_t *)*ptr; int16_t *bind_smallint = (int16_t *)*ptr;
*bind_smallint = rand_smallint(); if (value) {
*bind_smallint = (int16_t)atoi(value);
} else {
*bind_smallint = rand_smallint();
}
bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; bind->buffer_type = TSDB_DATA_TYPE_SMALLINT;
bind->buffer_length = sizeof(int16_t); bind->buffer_length = sizeof(int16_t);
bind->buffer = bind_smallint; bind->buffer = bind_smallint;
@ -5438,7 +5462,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
"TINYINT", strlen("TINYINT"))) { "TINYINT", strlen("TINYINT"))) {
int8_t *bind_tinyint = (int8_t *)*ptr; int8_t *bind_tinyint = (int8_t *)*ptr;
*bind_tinyint = rand_tinyint(); if (value) {
*bind_tinyint = (int8_t)atoi(value);
} else {
*bind_tinyint = rand_tinyint();
}
bind->buffer_type = TSDB_DATA_TYPE_TINYINT; bind->buffer_type = TSDB_DATA_TYPE_TINYINT;
bind->buffer_length = sizeof(int8_t); bind->buffer_length = sizeof(int8_t);
bind->buffer = bind_tinyint; bind->buffer = bind_tinyint;
@ -5461,7 +5489,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
"TIMESTAMP", strlen("TIMESTAMP"))) { "TIMESTAMP", strlen("TIMESTAMP"))) {
int64_t *bind_ts2 = (int64_t *) *ptr; int64_t *bind_ts2 = (int64_t *) *ptr;
*bind_ts2 = rand_bigint(); if (value) {
*bind_ts2 = atoll(value);
} else {
*bind_ts2 = rand_bigint();
}
bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
bind->buffer_length = sizeof(int64_t); bind->buffer_length = sizeof(int64_t);
bind->buffer = bind_ts2; bind->buffer = bind_ts2;
@ -5527,12 +5559,13 @@ static int32_t prepareStmtWithoutStb(
ptr += bind->buffer_length; ptr += bind->buffer_length;
for (int i = 0; i < g_args.num_of_CPR; i ++) { for (int i = 0; i < g_args.num_of_CPR; i ++) {
bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * (i + 1))); bind = (TAOS_BIND *)((char *)bindArray
+ (sizeof(TAOS_BIND) * (i + 1)));
if ( -1 == prepareStmtBindArrayByType( if ( -1 == prepareStmtBindArrayByType(
bind, bind,
data_type[i], data_type[i],
g_args.len_of_binary, g_args.len_of_binary,
&ptr)) { &ptr, NULL)) {
return -1; return -1;
} }
} }
@ -5551,12 +5584,14 @@ static int32_t prepareStmtWithoutStb(
return k; return k;
} }
static int32_t prepareStbStmt(SSuperTable *stbInfo, static int32_t prepareStbStmt(
SSuperTable *stbInfo,
TAOS_STMT *stmt, TAOS_STMT *stmt,
char *tableName, uint32_t batch, char *tableName, uint32_t batch,
uint64_t insertRows, uint64_t insertRows,
uint64_t recordFrom, uint64_t recordFrom,
int64_t startTime, char *buffer) int64_t startTime,
int64_t *pSamplePos)
{ {
int ret = taos_stmt_set_tbname(stmt, tableName); int ret = taos_stmt_set_tbname(stmt, tableName);
if (ret != 0) { if (ret != 0) {
@ -5567,16 +5602,24 @@ static int32_t prepareStbStmt(SSuperTable *stbInfo,
char *bindArray = malloc(sizeof(TAOS_BIND) * (stbInfo->columnCount + 1)); char *bindArray = malloc(sizeof(TAOS_BIND) * (stbInfo->columnCount + 1));
if (bindArray == NULL) { if (bindArray == NULL) {
errorPrint("Failed to allocate %d bind params\n", errorPrint("%s() LN%d, Failed to allocate %d bind params\n",
(stbInfo->columnCount + 1)); __func__, __LINE__, (stbInfo->columnCount + 1));
return -1; return -1;
} }
bool tsRand; bool sourceRand;
if (0 == strncasecmp(stbInfo->dataSource, "rand", strlen("rand"))) { if (0 == strncasecmp(stbInfo->dataSource, "rand", strlen("rand"))) {
tsRand = true; sourceRand = true;
} else { } else {
tsRand = false; sourceRand = false; // from sample data file
}
char *bindBuffer = malloc(g_args.len_of_binary);
if (bindBuffer == NULL) {
errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n",
__func__, __LINE__, g_args.len_of_binary);
free(bindArray);
return -1;
} }
uint32_t k; uint32_t k;
@ -5592,7 +5635,7 @@ static int32_t prepareStbStmt(SSuperTable *stbInfo,
bind_ts = (int64_t *)ptr; bind_ts = (int64_t *)ptr;
bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
if (tsRand) { if (sourceRand) {
*bind_ts = startTime + getTSRandTail( *bind_ts = startTime + getTSRandTail(
stbInfo->timeStampStep, k, stbInfo->timeStampStep, k,
stbInfo->disorderRatio, stbInfo->disorderRatio,
@ -5607,14 +5650,46 @@ static int32_t prepareStbStmt(SSuperTable *stbInfo,
ptr += bind->buffer_length; ptr += bind->buffer_length;
int cursor = 0;
for (int i = 0; i < stbInfo->columnCount; i ++) { for (int i = 0; i < stbInfo->columnCount; i ++) {
bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * (i + 1))); bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * (i + 1)));
if ( -1 == prepareStmtBindArrayByType(
bind, if (sourceRand) {
stbInfo->columns[i].dataType, if ( -1 == prepareStmtBindArrayByType(
stbInfo->columns[i].dataLen, bind,
&ptr)) { stbInfo->columns[i].dataType,
return -1; stbInfo->columns[i].dataLen,
&ptr,
NULL)) {
free(bindArray);
free(bindBuffer);
return -1;
}
} else {
char *restStr = stbInfo->sampleDataBuf + cursor;
int lengthOfRest = strlen(restStr);
int index = 0;
for (index = 0; index < lengthOfRest; index ++) {
if (restStr[index] == ',') {
break;
}
}
memset(bindBuffer, 0, g_args.len_of_binary);
strncpy(bindBuffer, restStr, index);
cursor += index + 1; // skip ',' too
if ( -1 == prepareStmtBindArrayByType(
bind,
stbInfo->columns[i].dataType,
stbInfo->columns[i].dataLen,
&ptr,
bindBuffer)) {
free(bindArray);
free(bindBuffer);
return -1;
}
} }
} }
taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray); taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray);
@ -5623,11 +5698,16 @@ static int32_t prepareStbStmt(SSuperTable *stbInfo,
k++; k++;
recordFrom ++; recordFrom ++;
if (!sourceRand) {
(*pSamplePos) ++;
}
if (recordFrom >= insertRows) { if (recordFrom >= insertRows) {
break; break;
} }
} }
free(bindBuffer);
free(bindArray); free(bindArray);
return k; return k;
} }
@ -5820,13 +5900,14 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
if (superTblInfo) { if (superTblInfo) {
if (superTblInfo->iface == STMT_IFACE) { if (superTblInfo->iface == STMT_IFACE) {
#if STMT_IFACE_ENABLED == 1 #if STMT_IFACE_ENABLED == 1
generated = prepareStbStmt(superTblInfo, generated = prepareStbStmt(
superTblInfo,
pThreadInfo->stmt, pThreadInfo->stmt,
tableName, tableName,
batchPerTbl, batchPerTbl,
insertRows, i, insertRows, i,
startTime, startTime,
pThreadInfo->buffer); &(pThreadInfo->samplePos));
#else #else
generated = -1; generated = -1;
#endif #endif
@ -6051,7 +6132,8 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
pThreadInfo->stmt, pThreadInfo->stmt,
tableName, tableName,
g_args.num_of_RPR, g_args.num_of_RPR,
insertRows, i, start_time, pstr); insertRows, i, start_time,
&(pThreadInfo->samplePos));
#else #else
generated = -1; generated = -1;
#endif #endif
@ -7332,6 +7414,7 @@ static void *superSubscribe(void *sarg) {
TAOS_RES* res = NULL; TAOS_RES* res = NULL;
uint64_t st = 0, et = 0; uint64_t st = 0, et = 0;
while ((g_queryInfo.superQueryInfo.endAfterConsume == -1) while ((g_queryInfo.superQueryInfo.endAfterConsume == -1)
|| (g_queryInfo.superQueryInfo.endAfterConsume > || (g_queryInfo.superQueryInfo.endAfterConsume >
consumed[pThreadInfo->end_table_to consumed[pThreadInfo->end_table_to

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)

View File

@ -27,6 +27,8 @@
#include "tutil.h" #include "tutil.h"
#include <taos.h> #include <taos.h>
#define TSDB_SUPPORT_NANOSECOND 1
#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255 #define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255
#define COMMAND_SIZE 65536 #define COMMAND_SIZE 65536
#define MAX_RECORDS_PER_REQ 32766 #define MAX_RECORDS_PER_REQ 32766
@ -228,7 +230,11 @@ static struct argp_option options[] = {
{"avro", 'V', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 2}, {"avro", 'V', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 2},
{"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 4}, {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 4},
{"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 5}, {"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 5},
#if TSDB_SUPPORT_NANOSECOND == 1
{"precision", 'C', "PRECISION", 0, "Epoch precision. Valid value is one of ms, us, and ns. Default is ms.", 6}, {"precision", 'C', "PRECISION", 0, "Epoch precision. Valid value is one of ms, us, and ns. Default is ms.", 6},
#else
{"precision", 'C', "PRECISION", 0, "Epoch precision. Valid value is one of ms and us. Default is ms.", 6},
#endif
{"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 3}, {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 3},
{"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3}, {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3},
{"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3}, {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3},
@ -527,7 +533,10 @@ static void parse_precision_first(
} }
if ((0 != strncasecmp(tmp, "ms", strlen("ms"))) if ((0 != strncasecmp(tmp, "ms", strlen("ms")))
&& (0 != strncasecmp(tmp, "us", strlen("us"))) && (0 != strncasecmp(tmp, "us", strlen("us")))
&& (0 != strncasecmp(tmp, "ns", strlen("ns")))) { #if TSDB_SUPPORT_NANOSECOND == 1
&& (0 != strncasecmp(tmp, "ns", strlen("ns")))
#endif
) {
// //
errorPrint("input precision: %s is invalid value\n", tmp); errorPrint("input precision: %s is invalid value\n", tmp);
free(tmp); free(tmp);
@ -564,9 +573,11 @@ static void parse_timestamp(
} else if (0 == strncasecmp(arguments->precision, } else if (0 == strncasecmp(arguments->precision,
"us", strlen("us"))) { "us", strlen("us"))) {
timePrec = TSDB_TIME_PRECISION_MICRO; timePrec = TSDB_TIME_PRECISION_MICRO;
#if TSDB_SUPPORT_NANOSECOND == 1
} else if (0 == strncasecmp(arguments->precision, } else if (0 == strncasecmp(arguments->precision,
"ns", strlen("ns"))) { "ns", strlen("ns"))) {
timePrec = TSDB_TIME_PRECISION_NANO; timePrec = TSDB_TIME_PRECISION_NANO;
#endif
} else { } else {
errorPrint("Invalid time precision: %s", errorPrint("Invalid time precision: %s",
arguments->precision); arguments->precision);

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
IF (TD_LINUX) IF (TD_LINUX)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
AUX_SOURCE_DIRECTORY(. SRC) AUX_SOURCE_DIRECTORY(. SRC)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(.) INCLUDE_DIRECTORIES(.)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
AUX_SOURCE_DIRECTORY(. SRC) AUX_SOURCE_DIRECTORY(. SRC)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
AUX_SOURCE_DIRECTORY(. SRC) AUX_SOURCE_DIRECTORY(. SRC)

View File

@ -1,10 +1,11 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest) FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest)
FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib) FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib /usr/lib64)
FIND_LIBRARY(LIB_GTEST_SHARED_DIR libgtest.so /usr/lib/ /usr/local/lib /usr/lib64)
IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR) IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR))
MESSAGE(STATUS "gTest library found, build unit test") MESSAGE(STATUS "gTest library found, build unit test")
# GoogleTest requires at least C++11 # GoogleTest requires at least C++11
@ -17,4 +18,4 @@ IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR)
ADD_EXECUTABLE(osTest ${SOURCE_LIST}) ADD_EXECUTABLE(osTest ${SOURCE_LIST})
TARGET_LINK_LIBRARIES(osTest taos os tutil common gtest pthread) TARGET_LINK_LIBRARIES(osTest taos os tutil common gtest pthread)
ENDIF() ENDIF()

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
ADD_SUBDIRECTORY(monitor) ADD_SUBDIRECTORY(monitor)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/zlib-1.2.11/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/zlib-1.2.11/inc)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/tsdb/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/tsdb/inc)

View File

@ -73,14 +73,14 @@ typedef struct SResultRowPool {
typedef struct SResultRow { typedef struct SResultRow {
int32_t pageId; // pageId & rowId is the position of current result in disk-based output buffer int32_t pageId; // pageId & rowId is the position of current result in disk-based output buffer
int32_t offset:29; // row index in buffer page int32_t offset:29; // row index in buffer page
bool startInterp; // the time window start timestamp has done the interpolation already. bool startInterp; // the time window start timestamp has done the interpolation already.
bool endInterp; // the time window end timestamp has done the interpolation already. bool endInterp; // the time window end timestamp has done the interpolation already.
bool closed; // this result status: closed or opened bool closed; // this result status: closed or opened
uint32_t numOfRows; // number of rows of current time window uint32_t numOfRows; // number of rows of current time window
SResultRowCellInfo* pCellInfo; // For each result column, there is a resultInfo SResultRowCellInfo* pCellInfo; // For each result column, there is a resultInfo
STimeWindow win; STimeWindow win;
char* key; // start key of current result row char *key; // start key of current result row
} SResultRow; } SResultRow;
typedef struct SGroupResInfo { typedef struct SGroupResInfo {
@ -105,7 +105,7 @@ typedef struct SResultRowInfo {
int16_t type:8; // data type for hash key int16_t type:8; // data type for hash key
int32_t size:24; // number of result set int32_t size:24; // number of result set
int32_t capacity; // max capacity int32_t capacity; // max capacity
int32_t curIndex; // current start active index SResultRow* current; // current start active index
int64_t prevSKey; // previous (not completed) sliding window start key int64_t prevSKey; // previous (not completed) sliding window start key
} SResultRowInfo; } SResultRowInfo;
@ -118,6 +118,7 @@ typedef struct SColumnFilterElem {
typedef struct SSingleColumnFilterInfo { typedef struct SSingleColumnFilterInfo {
void* pData; void* pData;
void* pData2; //used for nchar column
int32_t numOfFilters; int32_t numOfFilters;
SColumnInfo info; SColumnInfo info;
SColumnFilterElem* pFilters; SColumnFilterElem* pFilters;
@ -276,6 +277,7 @@ typedef struct SQueryRuntimeEnv {
bool enableGroupData; bool enableGroupData;
SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file
SHashObj* pResultRowHashTable; // quick locate the window object for each result SHashObj* pResultRowHashTable; // quick locate the window object for each result
SHashObj* pResultRowListSet; // used to check if current ResultRowInfo has ResultRow object or not
char* keyBuf; // window key buffer char* keyBuf; // window key buffer
SResultRowPool* pool; // window result object pool SResultRowPool* pool; // window result object pool
char** prevRow; char** prevRow;

View File

@ -410,10 +410,25 @@ static void prepareResultListBuffer(SResultRowInfo* pResultRowInfo, SQueryRuntim
pResultRowInfo->capacity = (int32_t)newCapacity; pResultRowInfo->capacity = (int32_t)newCapacity;
} }
static SResultRow *doPrepareResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, char *pData, //static int32_t ascResultRowCompareFn(const void* p1, const void* p2) {
int16_t bytes, bool masterscan, uint64_t uid) { // SResultRow* pRow1 = *(SResultRow**)p1;
// SResultRow* pRow2 = *(SResultRow**)p2;
//
// if (pRow1 == pRow2) {
// return 0;
// } else {
// return pRow1->win.skey < pRow2->win.skey? -1:1;
// }
//}
//static int32_t descResultRowCompareFn(const void* p1, const void* p2) {
// return -ascResultRowCompareFn(p1, p2);
//}
static SResultRow *doPrepareResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, int64_t tid, char *pData,
int16_t bytes, bool masterscan, uint64_t tableGroupId) {
bool existed = false; bool existed = false;
SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, uid); SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tableGroupId);
SResultRow **p1 = SResultRow **p1 =
(SResultRow **)taosHashGet(pRuntimeEnv->pResultRowHashTable, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes)); (SResultRow **)taosHashGet(pRuntimeEnv->pResultRowHashTable, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
@ -425,16 +440,26 @@ static SResultRow *doPrepareResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SRes
} }
if (p1 != NULL) { if (p1 != NULL) {
for(int32_t i = pResultRowInfo->size - 1; i >= 0; --i) { pResultRowInfo->current = (*p1);
if (pResultRowInfo->pResult[i] == (*p1)) {
pResultRowInfo->curIndex = i; if (pResultRowInfo->size == 0) {
existed = true; existed = false;
break; } else if (pResultRowInfo->size == 1) {
} existed = (pResultRowInfo->pResult[0] == (*p1));
} else { // check if current pResultRowInfo contains the existed pResultRow
SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tid);
void* ptr = taosHashGet(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
existed = (ptr != NULL);
// __compar_fn_t fn = QUERY_IS_ASC_QUERY(pRuntimeEnv->pQueryAttr)? ascResultRowCompareFn:descResultRowCompareFn;
// void* ptr = taosbsearch(p1, pResultRowInfo->pResult, pResultRowInfo->size, POINTER_BYTES, fn, TD_EQ);
// if (ptr != NULL) {
// existed = true;
// }
} }
} }
} else { } else {
if (p1 != NULL) { // group by column query // In case of group by column query, the required SResultRow object must be existed in the pResultRowInfo object.
if (p1 != NULL) {
return *p1; return *p1;
} }
} }
@ -456,8 +481,12 @@ static SResultRow *doPrepareResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SRes
pResult = *p1; pResult = *p1;
} }
pResultRowInfo->pResult[pResultRowInfo->size] = pResult; pResultRowInfo->pResult[pResultRowInfo->size++] = pResult;
pResultRowInfo->curIndex = pResultRowInfo->size++; pResultRowInfo->current = pResult;
int64_t dummyVal = 0;
SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tid);
taosHashPut(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &dummyVal, POINTER_BYTES);
} }
// too many time window in query // too many time window in query
@ -465,7 +494,7 @@ static SResultRow *doPrepareResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SRes
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW); longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW);
} }
return getResultRow(pResultRowInfo, pResultRowInfo->curIndex); return pResultRowInfo->current;
} }
static void getInitialStartTimeWindow(SQueryAttr* pQueryAttr, TSKEY ts, STimeWindow* w) { static void getInitialStartTimeWindow(SQueryAttr* pQueryAttr, TSKEY ts, STimeWindow* w) {
@ -496,7 +525,7 @@ static void getInitialStartTimeWindow(SQueryAttr* pQueryAttr, TSKEY ts, STimeWin
static STimeWindow getActiveTimeWindow(SResultRowInfo * pResultRowInfo, int64_t ts, SQueryAttr *pQueryAttr) { static STimeWindow getActiveTimeWindow(SResultRowInfo * pResultRowInfo, int64_t ts, SQueryAttr *pQueryAttr) {
STimeWindow w = {0}; STimeWindow w = {0};
if (pResultRowInfo->curIndex == -1) { // the first window, from the previous stored value if (pResultRowInfo->current == NULL) { // the first window, from the previous stored value
if (pResultRowInfo->prevSKey == TSKEY_INITIAL_VAL) { if (pResultRowInfo->prevSKey == TSKEY_INITIAL_VAL) {
getInitialStartTimeWindow(pQueryAttr, ts, &w); getInitialStartTimeWindow(pQueryAttr, ts, &w);
pResultRowInfo->prevSKey = w.skey; pResultRowInfo->prevSKey = w.skey;
@ -510,8 +539,9 @@ static STimeWindow getActiveTimeWindow(SResultRowInfo * pResultRowInfo, int64_t
w.ekey = w.skey + pQueryAttr->interval.interval - 1; w.ekey = w.skey + pQueryAttr->interval.interval - 1;
} }
} else { } else {
int32_t slot = curTimeWindowIndex(pResultRowInfo); // int32_t slot = curTimeWindowIndex(pResultRowInfo);
SResultRow* pWindowRes = getResultRow(pResultRowInfo, slot); // SResultRow* pWindowRes = getResultRow(pResultRowInfo, slot);
SResultRow* pWindowRes = pResultRowInfo->current;
w = pWindowRes->win; w = pWindowRes->win;
} }
@ -592,13 +622,13 @@ static int32_t addNewWindowResultBuf(SResultRow *pWindowRes, SDiskbasedResultBuf
return 0; return 0;
} }
static int32_t setWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, STimeWindow *win, static int32_t setWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, int64_t tid, STimeWindow *win,
bool masterscan, SResultRow **pResult, int64_t groupId, SQLFunctionCtx* pCtx, bool masterscan, SResultRow **pResult, int64_t tableGroupId, SQLFunctionCtx* pCtx,
int32_t numOfOutput, int32_t* rowCellInfoOffset) { int32_t numOfOutput, int32_t* rowCellInfoOffset) {
assert(win->skey <= win->ekey); assert(win->skey <= win->ekey);
SDiskbasedResultBuf *pResultBuf = pRuntimeEnv->pResultBuf; SDiskbasedResultBuf *pResultBuf = pRuntimeEnv->pResultBuf;
SResultRow *pResultRow = doPrepareResultRowFromKey(pRuntimeEnv, pResultRowInfo, (char *)&win->skey, TSDB_KEYSIZE, masterscan, groupId); SResultRow *pResultRow = doPrepareResultRowFromKey(pRuntimeEnv, pResultRowInfo, tid, (char *)&win->skey, TSDB_KEYSIZE, masterscan, tableGroupId);
if (pResultRow == NULL) { if (pResultRow == NULL) {
*pResult = NULL; *pResult = NULL;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
@ -606,7 +636,7 @@ static int32_t setWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRow
// not assign result buffer yet, add new result buffer // not assign result buffer yet, add new result buffer
if (pResultRow->pageId == -1) { if (pResultRow->pageId == -1) {
int32_t ret = addNewWindowResultBuf(pResultRow, pResultBuf, (int32_t) groupId, pRuntimeEnv->pQueryAttr->intermediateResultRowSize); int32_t ret = addNewWindowResultBuf(pResultRow, pResultBuf, (int32_t) tableGroupId, pRuntimeEnv->pQueryAttr->intermediateResultRowSize);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
return -1; return -1;
} }
@ -697,7 +727,12 @@ static void doUpdateResultRowIndex(SResultRowInfo*pResultRowInfo, TSKEY lastKey,
// all result rows are closed, set the last one to be the skey // all result rows are closed, set the last one to be the skey
if (skey == TSKEY_INITIAL_VAL) { if (skey == TSKEY_INITIAL_VAL) {
pResultRowInfo->curIndex = pResultRowInfo->size - 1; if (pResultRowInfo->size == 0) {
// assert(pResultRowInfo->current == NULL);
pResultRowInfo->current = NULL;
} else {
pResultRowInfo->current = pResultRowInfo->pResult[pResultRowInfo->size - 1];
}
} else { } else {
for (i = pResultRowInfo->size - 1; i >= 0; --i) { for (i = pResultRowInfo->size - 1; i >= 0; --i) {
@ -708,12 +743,12 @@ static void doUpdateResultRowIndex(SResultRowInfo*pResultRowInfo, TSKEY lastKey,
} }
if (i == pResultRowInfo->size - 1) { if (i == pResultRowInfo->size - 1) {
pResultRowInfo->curIndex = i; pResultRowInfo->current = pResultRowInfo->pResult[i];
} else { } else {
pResultRowInfo->curIndex = i + 1; // current not closed result object pResultRowInfo->current = pResultRowInfo->pResult[i + 1]; // current not closed result object
} }
pResultRowInfo->prevSKey = pResultRowInfo->pResult[pResultRowInfo->curIndex]->win.skey; pResultRowInfo->prevSKey = pResultRowInfo->current->win.skey;
} }
} }
@ -721,7 +756,7 @@ static void updateResultRowInfoActiveIndex(SResultRowInfo* pResultRowInfo, SQuer
bool ascQuery = QUERY_IS_ASC_QUERY(pQueryAttr); bool ascQuery = QUERY_IS_ASC_QUERY(pQueryAttr);
if ((lastKey > pQueryAttr->window.ekey && ascQuery) || (lastKey < pQueryAttr->window.ekey && (!ascQuery))) { if ((lastKey > pQueryAttr->window.ekey && ascQuery) || (lastKey < pQueryAttr->window.ekey && (!ascQuery))) {
closeAllResultRows(pResultRowInfo); closeAllResultRows(pResultRowInfo);
pResultRowInfo->curIndex = pResultRowInfo->size - 1; pResultRowInfo->current = pResultRowInfo->pResult[pResultRowInfo->size - 1];
} else { } else {
int32_t step = ascQuery ? 1 : -1; int32_t step = ascQuery ? 1 : -1;
doUpdateResultRowIndex(pResultRowInfo, lastKey - step, ascQuery, pQueryAttr->timeWindowInterpo); doUpdateResultRowIndex(pResultRowInfo, lastKey - step, ascQuery, pQueryAttr->timeWindowInterpo);
@ -1220,7 +1255,7 @@ static void doWindowBorderInterpolation(SOperatorInfo* pOperatorInfo, SSDataBloc
} }
} }
static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pSDataBlock, int32_t groupId) { static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pSDataBlock, int32_t tableGroupId) {
STableIntervalOperatorInfo* pInfo = (STableIntervalOperatorInfo*) pOperatorInfo->info; STableIntervalOperatorInfo* pInfo = (STableIntervalOperatorInfo*) pOperatorInfo->info;
SQueryRuntimeEnv* pRuntimeEnv = pOperatorInfo->pRuntimeEnv; SQueryRuntimeEnv* pRuntimeEnv = pOperatorInfo->pRuntimeEnv;
@ -1230,7 +1265,8 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order); int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order);
bool ascQuery = QUERY_IS_ASC_QUERY(pQueryAttr); bool ascQuery = QUERY_IS_ASC_QUERY(pQueryAttr);
int32_t prevIndex = curTimeWindowIndex(pResultRowInfo); SResultRow* prevRow = pResultRowInfo->current;
// int32_t prevIndex = curTimeWindowIndex(pResultRowInfo);
TSKEY* tsCols = NULL; TSKEY* tsCols = NULL;
if (pSDataBlock->pDataBlock != NULL) { if (pSDataBlock->pDataBlock != NULL) {
@ -1247,7 +1283,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
bool masterScan = IS_MASTER_SCAN(pRuntimeEnv); bool masterScan = IS_MASTER_SCAN(pRuntimeEnv);
SResultRow* pResult = NULL; SResultRow* pResult = NULL;
int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, pResultRowInfo, &win, masterScan, &pResult, groupId, pInfo->pCtx, int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId, pInfo->pCtx,
numOfOutput, pInfo->rowCellInfoOffset); numOfOutput, pInfo->rowCellInfoOffset);
if (ret != TSDB_CODE_SUCCESS || pResult == NULL) { if (ret != TSDB_CODE_SUCCESS || pResult == NULL) {
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
@ -1259,18 +1295,24 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
getNumOfRowsInTimeWindow(pRuntimeEnv, &pSDataBlock->info, tsCols, startPos, ekey, binarySearchForKey, true); getNumOfRowsInTimeWindow(pRuntimeEnv, &pSDataBlock->info, tsCols, startPos, ekey, binarySearchForKey, true);
// prev time window not interpolation yet. // prev time window not interpolation yet.
int32_t curIndex = curTimeWindowIndex(pResultRowInfo); // int32_t curIndex = curTimeWindowIndex(pResultRowInfo);
if (prevIndex != -1 && prevIndex < curIndex && pQueryAttr->timeWindowInterpo) { // if (prevIndex != -1 && prevIndex < curIndex && pQueryAttr->timeWindowInterpo) {
for (int32_t j = prevIndex; j < curIndex; ++j) { // previous time window may be all closed already. // for (int32_t j = prevIndex; j < curIndex; ++j) { // previous time window may be all closed already.
if (prevRow != NULL && prevRow != pResultRowInfo->current && pQueryAttr->timeWindowInterpo) {
int32_t j = 0;
while(pResultRowInfo->pResult[j] != prevRow) {
j++;
}
for(; pResultRowInfo->pResult[j] != pResultRowInfo->current; ++j) {
SResultRow* pRes = pResultRowInfo->pResult[j]; SResultRow* pRes = pResultRowInfo->pResult[j];
if (pRes->closed) { if (pRes->closed) {
assert(resultRowInterpolated(pRes, RESULT_ROW_START_INTERP) && assert(resultRowInterpolated(pRes, RESULT_ROW_START_INTERP) && resultRowInterpolated(pRes, RESULT_ROW_END_INTERP));
resultRowInterpolated(pRes, RESULT_ROW_END_INTERP));
continue; continue;
} }
STimeWindow w = pRes->win; STimeWindow w = pRes->win;
ret = setWindowOutputBufByKey(pRuntimeEnv, pResultRowInfo, &w, masterScan, &pResult, groupId, pInfo->pCtx, ret = setWindowOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &w, masterScan, &pResult, tableGroupId, pInfo->pCtx,
numOfOutput, pInfo->rowCellInfoOffset); numOfOutput, pInfo->rowCellInfoOffset);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
@ -1288,7 +1330,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
} }
// restore current time window // restore current time window
ret = setWindowOutputBufByKey(pRuntimeEnv, pResultRowInfo, &win, masterScan, &pResult, groupId, pInfo->pCtx, ret = setWindowOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId, pInfo->pCtx,
numOfOutput, pInfo->rowCellInfoOffset); numOfOutput, pInfo->rowCellInfoOffset);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
@ -1308,7 +1350,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
} }
// null data, failed to allocate more memory buffer // null data, failed to allocate more memory buffer
int32_t code = setWindowOutputBufByKey(pRuntimeEnv, pResultRowInfo, &nextWin, masterScan, &pResult, groupId, int32_t code = setWindowOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &nextWin, masterScan, &pResult, tableGroupId,
pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset); pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset);
if (code != TSDB_CODE_SUCCESS || pResult == NULL) { if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
@ -1449,7 +1491,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInf
SResultRow* pResult = NULL; SResultRow* pResult = NULL;
pInfo->curWindow.ekey = pInfo->curWindow.skey; pInfo->curWindow.ekey = pInfo->curWindow.skey;
int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, &pInfo->curWindow, masterScan, int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, pSDataBlock->info.tid, &pInfo->curWindow, masterScan,
&pResult, item->groupIndex, pBInfo->pCtx, pOperator->numOfOutput, &pResult, item->groupIndex, pBInfo->pCtx, pOperator->numOfOutput,
pBInfo->rowCellInfoOffset); pBInfo->rowCellInfoOffset);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
@ -1470,7 +1512,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInf
SResultRow* pResult = NULL; SResultRow* pResult = NULL;
pInfo->curWindow.ekey = pInfo->curWindow.skey; pInfo->curWindow.ekey = pInfo->curWindow.skey;
int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, &pInfo->curWindow, masterScan, int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, pSDataBlock->info.tid, &pInfo->curWindow, masterScan,
&pResult, item->groupIndex, pBInfo->pCtx, pOperator->numOfOutput, &pResult, item->groupIndex, pBInfo->pCtx, pOperator->numOfOutput,
pBInfo->rowCellInfoOffset); pBInfo->rowCellInfoOffset);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
@ -1513,7 +1555,8 @@ static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasic
len = varDataLen(pData); len = varDataLen(pData);
} }
SResultRow *pResultRow = doPrepareResultRowFromKey(pRuntimeEnv, pResultRowInfo, d, len, true, groupIndex); int64_t tid = 0;
SResultRow *pResultRow = doPrepareResultRowFromKey(pRuntimeEnv, pResultRowInfo, tid, d, len, true, groupIndex);
assert (pResultRow != NULL); assert (pResultRow != NULL);
setResultRowKey(pResultRow, pData, type); setResultRowKey(pResultRow, pData, type);
@ -1777,6 +1820,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
pRuntimeEnv->pQueryAttr = pQueryAttr; pRuntimeEnv->pQueryAttr = pQueryAttr;
pRuntimeEnv->pResultRowHashTable = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); pRuntimeEnv->pResultRowHashTable = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
pRuntimeEnv->pResultRowListSet = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
pRuntimeEnv->keyBuf = malloc(pQueryAttr->maxTableColumnWidth + sizeof(int64_t)); pRuntimeEnv->keyBuf = malloc(pQueryAttr->maxTableColumnWidth + sizeof(int64_t));
pRuntimeEnv->pool = initResultRowPool(getResultRowSize(pRuntimeEnv)); pRuntimeEnv->pool = initResultRowPool(getResultRowSize(pRuntimeEnv));
@ -2026,6 +2070,9 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) {
taosHashCleanup(pRuntimeEnv->pTableRetrieveTsMap); taosHashCleanup(pRuntimeEnv->pTableRetrieveTsMap);
pRuntimeEnv->pTableRetrieveTsMap = NULL; pRuntimeEnv->pTableRetrieveTsMap = NULL;
taosHashCleanup(pRuntimeEnv->pResultRowListSet);
pRuntimeEnv->pResultRowListSet = NULL;
destroyOperatorInfo(pRuntimeEnv->proot); destroyOperatorInfo(pRuntimeEnv->proot);
pRuntimeEnv->pool = destroyResultRowPool(pRuntimeEnv->pool); pRuntimeEnv->pool = destroyResultRowPool(pRuntimeEnv->pool);
@ -2756,7 +2803,7 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa
TSKEY k = ascQuery? pBlock->info.window.skey : pBlock->info.window.ekey; TSKEY k = ascQuery? pBlock->info.window.skey : pBlock->info.window.ekey;
STimeWindow win = getActiveTimeWindow(pTableScanInfo->pResultRowInfo, k, pQueryAttr); STimeWindow win = getActiveTimeWindow(pTableScanInfo->pResultRowInfo, k, pQueryAttr);
if (setWindowOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, &win, masterScan, &pResult, groupId, if (setWindowOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, pBlock->info.tid, &win, masterScan, &pResult, groupId,
pTableScanInfo->pCtx, pTableScanInfo->numOfOutput, pTableScanInfo->pCtx, pTableScanInfo->numOfOutput,
pTableScanInfo->rowCellInfoOffset) != TSDB_CODE_SUCCESS) { pTableScanInfo->rowCellInfoOffset) != TSDB_CODE_SUCCESS) {
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
@ -2802,7 +2849,7 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa
TSKEY k = ascQuery? pBlock->info.window.skey : pBlock->info.window.ekey; TSKEY k = ascQuery? pBlock->info.window.skey : pBlock->info.window.ekey;
STimeWindow win = getActiveTimeWindow(pTableScanInfo->pResultRowInfo, k, pQueryAttr); STimeWindow win = getActiveTimeWindow(pTableScanInfo->pResultRowInfo, k, pQueryAttr);
if (setWindowOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, &win, masterScan, &pResult, groupId, if (setWindowOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, pBlock->info.tid, &win, masterScan, &pResult, groupId,
pTableScanInfo->pCtx, pTableScanInfo->numOfOutput, pTableScanInfo->pCtx, pTableScanInfo->numOfOutput,
pTableScanInfo->rowCellInfoOffset) != TSDB_CODE_SUCCESS) { pTableScanInfo->rowCellInfoOffset) != TSDB_CODE_SUCCESS) {
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
@ -3146,7 +3193,7 @@ void copyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, int32_t threshold, SSDataBl
} }
static void updateTableQueryInfoForReverseScan(SQueryAttr *pQueryAttr, STableQueryInfo *pTableQueryInfo) { static void updateTableQueryInfoForReverseScan(STableQueryInfo *pTableQueryInfo) {
if (pTableQueryInfo == NULL) { if (pTableQueryInfo == NULL) {
return; return;
} }
@ -3158,7 +3205,12 @@ static void updateTableQueryInfoForReverseScan(SQueryAttr *pQueryAttr, STableQue
pTableQueryInfo->cur.vgroupIndex = -1; pTableQueryInfo->cur.vgroupIndex = -1;
// set the index to be the end slot of result rows array // set the index to be the end slot of result rows array
pTableQueryInfo->resInfo.curIndex = pTableQueryInfo->resInfo.size - 1; SResultRowInfo* pResRowInfo = &pTableQueryInfo->resInfo;
if (pResRowInfo->size > 0) {
pResRowInfo->current = pResRowInfo->pResult[pResRowInfo->size - 1];
} else {
pResRowInfo->current = NULL;
}
} }
static void setupQueryRangeForReverseScan(SQueryRuntimeEnv* pRuntimeEnv) { static void setupQueryRangeForReverseScan(SQueryRuntimeEnv* pRuntimeEnv) {
@ -3172,7 +3224,7 @@ static void setupQueryRangeForReverseScan(SQueryRuntimeEnv* pRuntimeEnv) {
size_t t = taosArrayGetSize(group); size_t t = taosArrayGetSize(group);
for (int32_t j = 0; j < t; ++j) { for (int32_t j = 0; j < t; ++j) {
STableQueryInfo *pCheckInfo = taosArrayGetP(group, j); STableQueryInfo *pCheckInfo = taosArrayGetP(group, j);
updateTableQueryInfoForReverseScan(pQueryAttr, pCheckInfo); updateTableQueryInfoForReverseScan(pCheckInfo);
// update the last key in tableKeyInfo list, the tableKeyInfo is used to build the tsdbQueryHandle and decide // update the last key in tableKeyInfo list, the tableKeyInfo is used to build the tsdbQueryHandle and decide
// the start check timestamp of tsdbQueryHandle // the start check timestamp of tsdbQueryHandle
@ -3211,8 +3263,8 @@ void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, i
int32_t* rowCellInfoOffset = pInfo->rowCellInfoOffset; int32_t* rowCellInfoOffset = pInfo->rowCellInfoOffset;
SResultRowInfo* pResultRowInfo = &pInfo->resultRowInfo; SResultRowInfo* pResultRowInfo = &pInfo->resultRowInfo;
int32_t tid = 0; int64_t tid = 0;
SResultRow* pRow = doPrepareResultRowFromKey(pRuntimeEnv, pResultRowInfo, (char *)&tid, sizeof(tid), true, uid); SResultRow* pRow = doPrepareResultRowFromKey(pRuntimeEnv, pResultRowInfo, tid, (char *)&tid, sizeof(tid), true, uid);
for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) { for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) {
SColumnInfoData* pData = taosArrayGet(pDataBlock->pDataBlock, i); SColumnInfoData* pData = taosArrayGet(pDataBlock->pDataBlock, i);
@ -3443,10 +3495,13 @@ void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pRe
} }
void doSetTableGroupOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo* pResultRowInfo, SQLFunctionCtx* pCtx, void doSetTableGroupOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo* pResultRowInfo, SQLFunctionCtx* pCtx,
int32_t* rowCellInfoOffset, int32_t numOfOutput, int32_t groupIndex) { int32_t* rowCellInfoOffset, int32_t numOfOutput, int32_t tableGroupId) {
// for simple group by query without interval, all the tables belong to one group result.
int64_t uid = 0; int64_t uid = 0;
int64_t tid = 0;
SResultRow* pResultRow = SResultRow* pResultRow =
doPrepareResultRowFromKey(pRuntimeEnv, pResultRowInfo, (char*)&groupIndex, sizeof(groupIndex), true, uid); doPrepareResultRowFromKey(pRuntimeEnv, pResultRowInfo, tid, (char*)&tableGroupId, sizeof(tableGroupId), true, uid);
assert (pResultRow != NULL); assert (pResultRow != NULL);
/* /*
@ -3454,7 +3509,7 @@ void doSetTableGroupOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo* pRe
* all group belong to one result set, and each group result has different group id so set the id to be one * all group belong to one result set, and each group result has different group id so set the id to be one
*/ */
if (pResultRow->pageId == -1) { if (pResultRow->pageId == -1) {
int32_t ret = addNewWindowResultBuf(pResultRow, pRuntimeEnv->pResultBuf, groupIndex, pRuntimeEnv->pQueryAttr->resultRowSize); int32_t ret = addNewWindowResultBuf(pResultRow, pRuntimeEnv->pResultBuf, tableGroupId, pRuntimeEnv->pQueryAttr->resultRowSize);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
return; return;
} }
@ -3463,20 +3518,20 @@ void doSetTableGroupOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo* pRe
setResultRowOutputBufInitCtx(pRuntimeEnv, pResultRow, pCtx, numOfOutput, rowCellInfoOffset); setResultRowOutputBufInitCtx(pRuntimeEnv, pResultRow, pCtx, numOfOutput, rowCellInfoOffset);
} }
void setExecutionContext(SQueryRuntimeEnv* pRuntimeEnv, SOptrBasicInfo* pInfo, int32_t numOfOutput, int32_t groupIndex, void setExecutionContext(SQueryRuntimeEnv* pRuntimeEnv, SOptrBasicInfo* pInfo, int32_t numOfOutput, int32_t tableGroupId,
TSKEY nextKey) { TSKEY nextKey) {
STableQueryInfo *pTableQueryInfo = pRuntimeEnv->current; STableQueryInfo *pTableQueryInfo = pRuntimeEnv->current;
// lastKey needs to be updated // lastKey needs to be updated
pTableQueryInfo->lastKey = nextKey; pTableQueryInfo->lastKey = nextKey;
if (pRuntimeEnv->prevGroupId != INT32_MIN && pRuntimeEnv->prevGroupId == groupIndex) { if (pRuntimeEnv->prevGroupId != INT32_MIN && pRuntimeEnv->prevGroupId == tableGroupId) {
return; return;
} }
doSetTableGroupOutputBuf(pRuntimeEnv, &pInfo->resultRowInfo, pInfo->pCtx, pInfo->rowCellInfoOffset, numOfOutput, groupIndex); doSetTableGroupOutputBuf(pRuntimeEnv, &pInfo->resultRowInfo, pInfo->pCtx, pInfo->rowCellInfoOffset, numOfOutput, tableGroupId);
// record the current active group id // record the current active group id
pRuntimeEnv->prevGroupId = groupIndex; pRuntimeEnv->prevGroupId = tableGroupId;
} }
void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLFunctionCtx* pCtx, void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLFunctionCtx* pCtx,
@ -4574,7 +4629,7 @@ static SSDataBlock* doTableScan(void* param, bool *newgroup) {
} }
if (pResultRowInfo->size > 0) { if (pResultRowInfo->size > 0) {
pResultRowInfo->curIndex = 0; pResultRowInfo->current = pResultRowInfo->pResult[0];
pResultRowInfo->prevSKey = pResultRowInfo->pResult[0]->win.skey; pResultRowInfo->prevSKey = pResultRowInfo->pResult[0]->win.skey;
} }
@ -4600,8 +4655,8 @@ static SSDataBlock* doTableScan(void* param, bool *newgroup) {
pTableScanInfo->order = cond.order; pTableScanInfo->order = cond.order;
if (pResultRowInfo->size > 0) { if (pResultRowInfo->size > 0) {
pResultRowInfo->curIndex = pResultRowInfo->size-1; pResultRowInfo->current = pResultRowInfo->pResult[pResultRowInfo->size - 1];
pResultRowInfo->prevSKey = pResultRowInfo->pResult[pResultRowInfo->size-1]->win.skey; pResultRowInfo->prevSKey = pResultRowInfo->current->win.skey;
} }
p = doTableScanImpl(pOperator, newgroup); p = doTableScanImpl(pOperator, newgroup);
@ -5461,7 +5516,7 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI
} else { } else {
SResultRow* pResult = NULL; SResultRow* pResult = NULL;
pInfo->curWindow.ekey = pInfo->curWindow.skey; pInfo->curWindow.ekey = pInfo->curWindow.skey;
int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, &pInfo->curWindow, masterScan, int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, pSDataBlock->info.tid, &pInfo->curWindow, masterScan,
&pResult, item->groupIndex, pBInfo->pCtx, pOperator->numOfOutput, &pResult, item->groupIndex, pBInfo->pCtx, pOperator->numOfOutput,
pBInfo->rowCellInfoOffset); pBInfo->rowCellInfoOffset);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
@ -5481,7 +5536,7 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI
SResultRow* pResult = NULL; SResultRow* pResult = NULL;
pInfo->curWindow.ekey = pInfo->curWindow.skey; pInfo->curWindow.ekey = pInfo->curWindow.skey;
int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, &pInfo->curWindow, masterScan, int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, pSDataBlock->info.tid, &pInfo->curWindow, masterScan,
&pResult, item->groupIndex, pBInfo->pCtx, pOperator->numOfOutput, &pResult, item->groupIndex, pBInfo->pCtx, pOperator->numOfOutput,
pBInfo->rowCellInfoOffset); pBInfo->rowCellInfoOffset);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code

View File

@ -938,7 +938,7 @@ void SqlInfoDestroy(SSqlInfo *pInfo) {
taosArrayDestroy(pInfo->pMiscInfo->a); taosArrayDestroy(pInfo->pMiscInfo->a);
} }
if (pInfo->pMiscInfo != NULL && pInfo->type == TSDB_SQL_CREATE_DB) { if (pInfo->pMiscInfo != NULL && (pInfo->type == TSDB_SQL_CREATE_DB || pInfo->type == TSDB_SQL_ALTER_DB)) {
taosArrayDestroyEx(pInfo->pMiscInfo->dbOpt.keep, freeVariant); taosArrayDestroyEx(pInfo->pMiscInfo->dbOpt.keep, freeVariant);
} }

View File

@ -45,7 +45,7 @@ int32_t initResultRowInfo(SResultRowInfo *pResultRowInfo, int32_t size, int16_t
pResultRowInfo->type = type; pResultRowInfo->type = type;
pResultRowInfo->size = 0; pResultRowInfo->size = 0;
pResultRowInfo->prevSKey = TSKEY_INITIAL_VAL; pResultRowInfo->prevSKey = TSKEY_INITIAL_VAL;
pResultRowInfo->curIndex = -1; pResultRowInfo->current = NULL;
pResultRowInfo->capacity = size; pResultRowInfo->capacity = size;
pResultRowInfo->pResult = calloc(pResultRowInfo->capacity, POINTER_BYTES); pResultRowInfo->pResult = calloc(pResultRowInfo->capacity, POINTER_BYTES);
@ -90,9 +90,9 @@ void resetResultRowInfo(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRo
SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, &groupIndex, sizeof(groupIndex), uid); SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, &groupIndex, sizeof(groupIndex), uid);
taosHashRemove(pRuntimeEnv->pResultRowHashTable, (const char *)pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(sizeof(groupIndex))); taosHashRemove(pRuntimeEnv->pResultRowHashTable, (const char *)pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(sizeof(groupIndex)));
} }
pResultRowInfo->curIndex = -1; pResultRowInfo->size = 0;
pResultRowInfo->size = 0; pResultRowInfo->current = NULL;
pResultRowInfo->prevSKey = TSKEY_INITIAL_VAL; pResultRowInfo->prevSKey = TSKEY_INITIAL_VAL;
} }

View File

@ -1,10 +1,11 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest) FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest)
FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib) FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib /usr/lib64)
FIND_LIBRARY(LIB_GTEST_SHARED_DIR libgtest.so /usr/lib/ /usr/local/lib /usr/lib64)
IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR) IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR))
MESSAGE(STATUS "gTest library found, build unit test") MESSAGE(STATUS "gTest library found, build unit test")
# GoogleTest requires at least C++11 # GoogleTest requires at least C++11

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/rpc/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/rpc/inc)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
IF (TD_LINUX) IF (TD_LINUX)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/rpc/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/rpc/inc)

View File

@ -126,15 +126,38 @@ _hash_fn_t taosGetDefaultHashFunction(int32_t type) {
_hash_fn_t fn = NULL; _hash_fn_t fn = NULL;
switch(type) { switch(type) {
case TSDB_DATA_TYPE_TIMESTAMP: case TSDB_DATA_TYPE_TIMESTAMP:
case TSDB_DATA_TYPE_BIGINT: fn = taosIntHash_64;break; case TSDB_DATA_TYPE_UBIGINT:
case TSDB_DATA_TYPE_BINARY: fn = MurmurHash3_32;break; case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_NCHAR: fn = MurmurHash3_32;break; fn = taosIntHash_64;
case TSDB_DATA_TYPE_INT: fn = taosIntHash_32; break; break;
case TSDB_DATA_TYPE_SMALLINT: fn = taosIntHash_16; break; case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_TINYINT: fn = taosIntHash_8; break; fn = MurmurHash3_32;
case TSDB_DATA_TYPE_FLOAT: fn = taosFloatHash; break; break;
case TSDB_DATA_TYPE_DOUBLE: fn = taosDoubleHash; break; case TSDB_DATA_TYPE_NCHAR:
default: fn = taosIntHash_32;break; fn = MurmurHash3_32;
break;
case TSDB_DATA_TYPE_UINT:
case TSDB_DATA_TYPE_INT:
fn = taosIntHash_32;
break;
case TSDB_DATA_TYPE_SMALLINT:
case TSDB_DATA_TYPE_USMALLINT:
fn = taosIntHash_16;
break;
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_UTINYINT:
case TSDB_DATA_TYPE_TINYINT:
fn = taosIntHash_8;
break;
case TSDB_DATA_TYPE_FLOAT:
fn = taosFloatHash;
break;
case TSDB_DATA_TYPE_DOUBLE:
fn = taosDoubleHash;
break;
default:
fn = taosIntHash_32;
break;
} }
return fn; return fn;

View File

@ -1,15 +1,16 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest) FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest)
FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib) FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib /usr/lib64)
FIND_LIBRARY(LIB_GTEST_SHARED_DIR libgtest.so /usr/lib/ /usr/local/lib /usr/lib64)
IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR) IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR))
MESSAGE(STATUS "gTest library found, build unit test") MESSAGE(STATUS "gTest library found, build unit test")
INCLUDE_DIRECTORIES(${HEADER_GTEST_INCLUDE_DIR}) INCLUDE_DIRECTORIES(${HEADER_GTEST_INCLUDE_DIR})
AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST)
LIST(REMOVE_ITEM SOURCE_LIST ${CMAKE_CURRENT_SOURCE_DIR}/trefTest.c) LIST(REMOVE_ITEM SOURCE_LIST ${CMAKE_CURRENT_SOURCE_DIR}/trefTest.c)
ADD_EXECUTABLE(utilTest ${SOURCE_LIST}) ADD_EXECUTABLE(utilTest ${SOURCE_LIST})
TARGET_LINK_LIBRARIES(utilTest tutil common os gtest pthread gcov) TARGET_LINK_LIBRARIES(utilTest tutil common os gtest pthread gcov)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)

View File

@ -95,7 +95,7 @@ void vnodeCtrlFlow(int32_t vgId, int32_t level) {
} }
void vnodeStartSyncFile(int32_t vgId) { void vnodeStartSyncFile(int32_t vgId) {
SVnodeObj *pVnode = vnodeAcquire(vgId); SVnodeObj *pVnode = vnodeAcquireNotClose(vgId);
if (pVnode == NULL) { if (pVnode == NULL) {
vError("vgId:%d, vnode not found while start filesync", vgId); vError("vgId:%d, vnode not found while start filesync", vgId);
return; return;
@ -155,7 +155,7 @@ int32_t vnodeWriteToCache(int32_t vgId, void *wparam, int32_t qtype, void *rpara
} }
int32_t vnodeGetVersion(int32_t vgId, uint64_t *fver, uint64_t *wver) { int32_t vnodeGetVersion(int32_t vgId, uint64_t *fver, uint64_t *wver) {
SVnodeObj *pVnode = vnodeAcquire(vgId); SVnodeObj *pVnode = vnodeAcquireNotClose(vgId);
if (pVnode == NULL) { if (pVnode == NULL) {
vError("vgId:%d, vnode not found while write to cache", vgId); vError("vgId:%d, vnode not found while write to cache", vgId);
return -1; return -1;

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
ADD_DEFINITIONS(-DWAL_CHECKSUM_WHOLE) ADD_DEFINITIONS(-DWAL_CHECKSUM_WHOLE)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
IF (TD_LINUX) IF (TD_LINUX)

View File

@ -3,7 +3,7 @@
# generate release version: # generate release version:
# mkdir release; cd release; cmake -DCMAKE_BUILD_TYPE=Release .. # mkdir release; cd release; cmake -DCMAKE_BUILD_TYPE=Release ..
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
SET(CMAKE_C_STANDARD 11) SET(CMAKE_C_STANDARD 11)

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
IF (TD_LINUX) IF (TD_LINUX)

View File

@ -345,12 +345,14 @@ python3 ./test.py -f tag_lite/unsignedTinyint.py
python3 ./test.py -f functions/function_percentile2.py python3 ./test.py -f functions/function_percentile2.py
python3 ./test.py -f insert/boundary2.py python3 ./test.py -f insert/boundary2.py
python3 ./test.py -f insert/insert_locking.py
python3 ./test.py -f alter/alter_debugFlag.py python3 ./test.py -f alter/alter_debugFlag.py
python3 ./test.py -f query/queryBetweenAnd.py python3 ./test.py -f query/queryBetweenAnd.py
python3 ./test.py -f tag_lite/alter_tag.py python3 ./test.py -f tag_lite/alter_tag.py
python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
python3 test.py -f tools/taosdemoAllTest/TD-4985/query-limit-offset.py
python3 ./test.py -f tag_lite/drop_auto_create.py python3 ./test.py -f tag_lite/drop_auto_create.py
python3 test.py -f insert/insert_before_use_db.py python3 test.py -f insert/insert_before_use_db.py
python3 test.py -f alter/alter_keep.py python3 test.py -f alter/alter_keep.py

View File

@ -0,0 +1,178 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
import random
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
# test case for https://jira.taosdata.com:18080/browse/TD-5021
tdLog.info("\n\n----------step1 : drop db and create db----------\n")
tdSql.execute('''drop database if exists db ;''')
tdSql.execute('''create database db ;''')
sql = '''show databases;'''
tdSql.query(sql)
tdSql.checkRows(1)
tdLog.info("\n\n----------step2 : create stable----------\n")
tdSql.execute('''create stable
db.stable_1 (ts timestamp, payload binary(256))
tags(t1 binary(16),t2 int);''')
sql = '''show db.stables;'''
tdSql.query(sql)
tdSql.checkRows(1)
tdLog.info("\n\n----------step3 : create table and insert----------\n")
sql = '''insert into db.table1 using db.stable_1 (t1 , t2) tags ("table_1" , 111) ( values (now, ;'''
tdLog.info(sql)
tdSql.error(sql)
try:
tdSql.execute(sql)
tdLog.exit(" unexpected token")
except Exception as e:
tdLog.info(repr(e))
tdLog.info("DB error: syntax error near ', ;' (unexpected token)")
sql = '''insert into db.table1(ts , payload) using db.stable_1 (t1 , t2) tags ("table_1" , 111) ( values (now, ;'''
tdLog.info(sql)
tdSql.error(sql)
try:
tdSql.execute(sql)
tdLog.exit(" bind columns again")
except Exception as e:
tdLog.info(repr(e))
tdLog.info("DB error: syntax error near ', ;' (bind columns again)")
sql = '''insert into db.table1 using db.stable_1 (t1 , t2) tags ("table_1",111) (ts , payload) ( values (now, ;'''
tdLog.info(sql)
tdSql.error(sql)
try:
tdSql.execute(sql)
tdLog.exit(" keyword VALUES or FILE required ")
except Exception as e:
tdLog.info(repr(e))
tdLog.info("DB error: invalid SQL: (keyword VALUES or FILE required)")
tdSql.execute('''insert into db.table1 using db.stable_1 (t1 , t2)
tags ("table_1" , 111) values ( now , 1) ''')
sql = '''select * from db.stable_1;'''
tdSql.query(sql)
tdSql.checkRows(1)
tdSql.checkData(0,1,1)
tdSql.checkData(0,2,'table_1')
tdLog.info("\n\n----------step4 : create table and insert again----------\n")
sql = '''insert into db.table2 using db.stable_1 (t1) tags ("table_2") ( values (now, ;'''
tdLog.info(sql)
tdSql.error(sql)
try:
tdSql.execute(sql)
tdLog.exit(" unexpected token")
except Exception as e:
tdLog.info(repr(e))
tdLog.info("DB error: syntax error near ', ;' (unexpected token)")
tdSql.execute('''insert into db.table2 using db.stable_1 (t1)
tags ("table_2") values ( now , 2) ''')
sql = '''select * from db.stable_1;'''
tdSql.query(sql)
tdSql.checkRows(2)
tdSql.checkData(1,1,2)
tdSql.checkData(1,2,'table_2')
tdLog.info("\n\n----------step5 : create table and insert without db----------\n")
tdSql.execute('''use db''')
sql = '''insert into table3 using stable_1 (t1) tags ("table_3") ( values (now, ;'''
tdLog.info(sql)
tdSql.error(sql)
try:
tdSql.execute(sql)
tdLog.exit(" unexpected token")
except Exception as e:
tdLog.info(repr(e))
tdLog.info("DB error: syntax error near ', ;' (unexpected token)")
tdSql.execute('''insert into table3 using stable_1 (t1 , t2)
tags ("table_3" , 333) values ( now , 3) ''')
sql = '''select * from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(3)
tdSql.checkData(2,1,3)
tdSql.checkData(2,2,'table_3')
tdLog.info("\n\n----------step6 : create tables in one sql ----------\n")
sql = '''insert into table4 using stable_1 (t1) tags ("table_4") values (now, 4)
table5 using stable_1 (t1) tags ("table_5") ( values (now, ;'''
tdLog.info(sql)
tdSql.error(sql)
try:
tdSql.execute(sql)
tdLog.exit(" unexpected token")
except Exception as e:
tdLog.info(repr(e))
tdLog.info("DB error: syntax error near ', ;' (unexpected token)")
tdSql.execute('''insert into table4 using stable_1 (t1) tags ("table_4") values (now, 4)
table5 using stable_1 (t1) tags ("table_5") values (now, 5) ''')
sql = '''select * from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(5)
tdSql.checkData(3,1,4)
tdSql.checkData(3,2,'table_4')
tdSql.checkData(4,1,5)
tdSql.checkData(4,2,'table_5')
sql = '''insert into table6 using stable_1 (t1) tags ("table_6") ( values (now,
table7 using stable_1 (t1) tags ("table_7") values (now, 7);'''
tdLog.info(sql)
tdSql.error(sql)
try:
tdSql.execute(sql)
tdLog.exit(" invalid SQL")
except Exception as e:
tdLog.info(repr(e))
tdLog.info("invalid SQL")
tdSql.execute('''insert into table6 using stable_1 (t1 , t2) tags ("table_6" , 666) values (now, 6)
table7 using stable_1 (t1) tags ("table_7") values (now, 7) ''')
sql = '''select * from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(7)
tdSql.checkData(5,1,6)
tdSql.checkData(5,2,'table_6')
tdSql.checkData(6,1,7)
tdSql.checkData(6,2,'table_7')
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,10 @@
0,0,'TAOSdata-0'
1,1,'TAOSdata-1'
2,22,'TAOSdata-2'
3,333,'TAOSdata-3'
4,4444,'TAOSdata-4'
5,55555,'TAOSdata-5'
6,666666,'TAOSdata-6'
7,7777777,'TAOSdata-7'
8,88888888,'TAOSdata-8'
9,999999999,'TAOSdata-9'
1 0 0 'TAOSdata-0'
2 1 1 'TAOSdata-1'
3 2 22 'TAOSdata-2'
4 3 333 'TAOSdata-3'
5 4 4444 'TAOSdata-4'
6 5 55555 'TAOSdata-5'
7 6 666666 'TAOSdata-6'
8 7 7777777 'TAOSdata-7'
9 8 88888888 'TAOSdata-8'
10 9 999999999 'TAOSdata-9'

View File

@ -0,0 +1,62 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 10,
"thread_count_create_tbl": 10,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 1,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 10000,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 1,
"data_source": "sample",
"insert_mode": "taosc",
"insert_rows": 10,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./tools/taosdemoAllTest/TD-4985/query-limit-offset.csv",
"tags_file": "./tools/taosdemoAllTest/TD-4985/query-limit-offset.csv",
"columns": [{"type": "INT","count":2}, {"type": "BINARY", "len": 16, "count":1}],
"tags": [{"type": "INT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
}]
}]
}

View File

@ -0,0 +1,191 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
# insert: create one or mutiple tables per sql and insert multiple rows per sql
# test case for https://jira.taosdata.com:18080/browse/TD-4985
os.system("%staosdemo -f tools/taosdemoAllTest/TD-4985/query-limit-offset.json -y " % binPath)
tdSql.execute("use db")
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 10000)
for i in range(1000):
tdSql.execute('''insert into stb00_9999 values(%d, %d, %d,'test99.%s')'''
% (1600000000000 + i, i, -10000+i, i))
tdSql.execute('''insert into stb00_8888 values(%d, %d, %d,'test98.%s')'''
% (1600000000000 + i, i, -10000+i, i))
tdSql.execute('''insert into stb00_7777 values(%d, %d, %d,'test97.%s')'''
% (1600000000000 + i, i, -10000+i, i))
tdSql.execute('''insert into stb00_6666 values(%d, %d, %d,'test96.%s')'''
% (1600000000000 + i, i, -10000+i, i))
tdSql.execute('''insert into stb00_5555 values(%d, %d, %d,'test95.%s')'''
% (1600000000000 + i, i, -10000+i, i))
tdSql.execute('''insert into stb00_4444 values(%d, %d, %d,'test94.%s')'''
% (1600000000000 + i, i, -10000+i, i))
tdSql.execute('''insert into stb00_3333 values(%d, %d, %d,'test93.%s')'''
% (1600000000000 + i, i, -10000+i, i))
tdSql.execute('''insert into stb00_2222 values(%d, %d, %d,'test92.%s')'''
% (1600000000000 + i, i, -10000+i, i))
tdSql.execute('''insert into stb00_1111 values(%d, %d, %d,'test91.%s')'''
% (1600000000000 + i, i, -10000+i, i))
tdSql.execute('''insert into stb00_100 values(%d, %d, %d,'test90.%s')'''
% (1600000000000 + i, i, -10000+i, i))
tdSql.query("select * from stb0 where col2 like 'test99%' ")
tdSql.checkRows(1000)
tdSql.query("select * from stb0 where tbname like 'stb00_9999' limit 10" )
tdSql.checkData(0, 1, 0)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 2)
tdSql.query("select * from stb0 where tbname like 'stb00_9999' limit 10 offset 5" )
tdSql.checkData(0, 1, 5)
tdSql.checkData(1, 1, 6)
tdSql.checkData(2, 1, 7)
tdSql.query("select * from stb0 where col2 like 'test98%' ")
tdSql.checkRows(1000)
tdSql.query("select * from stb0 where tbname like 'stb00_8888' limit 10" )
tdSql.checkData(0, 1, 0)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 2)
tdSql.query("select * from stb0 where tbname like 'stb00_8888' limit 10 offset 5" )
tdSql.checkData(0, 1, 5)
tdSql.checkData(1, 1, 6)
tdSql.checkData(2, 1, 7)
tdSql.query("select * from stb0 where col2 like 'test97%' ")
tdSql.checkRows(1000)
tdSql.query("select * from stb0 where tbname like 'stb00_7777' limit 10" )
tdSql.checkData(0, 1, 0)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 2)
tdSql.query("select * from stb0 where tbname like 'stb00_7777' limit 10 offset 5" )
tdSql.checkData(0, 1, 5)
tdSql.checkData(1, 1, 6)
tdSql.checkData(2, 1, 7)
tdSql.query("select * from stb0 where col2 like 'test96%' ")
tdSql.checkRows(1000)
tdSql.query("select * from stb0 where tbname like 'stb00_6666' limit 10" )
tdSql.checkData(0, 1, 0)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 2)
tdSql.query("select * from stb0 where tbname like 'stb00_6666' limit 10 offset 5" )
tdSql.checkData(0, 1, 5)
tdSql.checkData(1, 1, 6)
tdSql.checkData(2, 1, 7)
tdSql.query("select * from stb0 where col2 like 'test95%' ")
tdSql.checkRows(1000)
tdSql.query("select * from stb0 where tbname like 'stb00_5555' limit 10" )
tdSql.checkData(0, 1, 0)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 2)
tdSql.query("select * from stb0 where tbname like 'stb00_5555' limit 10 offset 5" )
tdSql.checkData(0, 1, 5)
tdSql.checkData(1, 1, 6)
tdSql.checkData(2, 1, 7)
tdSql.query("select * from stb0 where col2 like 'test94%' ")
tdSql.checkRows(1000)
tdSql.query("select * from stb0 where tbname like 'stb00_4444' limit 10" )
tdSql.checkData(0, 1, 0)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 2)
tdSql.query("select * from stb0 where tbname like 'stb00_4444' limit 10 offset 5" )
tdSql.checkData(0, 1, 5)
tdSql.checkData(1, 1, 6)
tdSql.checkData(2, 1, 7)
tdSql.query("select * from stb0 where col2 like 'test93%' ")
tdSql.checkRows(1000)
tdSql.query("select * from stb0 where tbname like 'stb00_3333' limit 100" )
tdSql.checkData(0, 1, 0)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 2)
tdSql.query("select * from stb0 where tbname like 'stb00_3333' limit 100 offset 5" )
tdSql.checkData(0, 1, 5)
tdSql.checkData(1, 1, 6)
tdSql.checkData(2, 1, 7)
tdSql.query("select * from stb0 where col2 like 'test92%' ")
tdSql.checkRows(1000)
tdSql.query("select * from stb0 where tbname like 'stb00_2222' limit 100" )
tdSql.checkData(0, 1, 0)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 2)
tdSql.query("select * from stb0 where tbname like 'stb00_2222' limit 100 offset 5" )
tdSql.checkData(0, 1, 5)
tdSql.checkData(1, 1, 6)
tdSql.checkData(2, 1, 7)
tdSql.query("select * from stb0 where col2 like 'test91%' ")
tdSql.checkRows(1000)
tdSql.query("select * from stb0 where tbname like 'stb00_1111' limit 100" )
tdSql.checkData(0, 1, 0)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 2)
tdSql.query("select * from stb0 where tbname like 'stb00_1111' limit 100 offset 5" )
tdSql.checkData(0, 1, 5)
tdSql.checkData(1, 1, 6)
tdSql.checkData(2, 1, 7)
tdSql.query("select * from stb0 where col2 like 'test90%' ")
tdSql.checkRows(1000)
tdSql.query("select * from stb0 where tbname like 'stb00_100' limit 100" )
tdSql.checkData(0, 1, 0)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 2)
tdSql.query("select * from stb0 where tbname like 'stb00_100' limit 100 offset 5" )
tdSql.checkData(0, 1, 5)
tdSql.checkData(1, 1, 6)
tdSql.checkData(2, 1, 7)
os.system("rm -rf tools/taosdemoAllTest/TD-4985/query-limit-offset.py.sql")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -16,6 +16,8 @@ import pandas as pd
import argparse import argparse
import os.path import os.path
import json import json
from util.log import tdLog
from util.sql import tdSql
class taosdemoPerformace: class taosdemoPerformace:

View File

@ -63,7 +63,6 @@ class TDTestCase:
tdSql.execute("create database db days 11 keep 3649 blocks 8 ") tdSql.execute("create database db days 11 keep 3649 blocks 8 ")
tdSql.execute("create database db1 days 12 keep 3640 blocks 7 ") tdSql.execute("create database db1 days 12 keep 3640 blocks 7 ")
tdSql.execute("use db") tdSql.execute("use db")
tdSql.execute( tdSql.execute(
"create table st(ts timestamp, c1 int, c2 nchar(10)) tags(t1 int, t2 binary(10))") "create table st(ts timestamp, c1 int, c2 nchar(10)) tags(t1 int, t2 binary(10))")
tdSql.execute("create table t1 using st tags(1, 'beijing')") tdSql.execute("create table t1 using st tags(1, 'beijing')")
@ -86,9 +85,10 @@ class TDTestCase:
tdLog.info("taosdump found in %s" % buildPath) tdLog.info("taosdump found in %s" % buildPath)
binPath = buildPath + "/build/bin/" binPath = buildPath + "/build/bin/"
os.system("rm ./taosdumptest/tmp1/*.sql")
os.system("%staosdump --databases db -o ./taosdumptest/tmp1" % binPath) os.system("%staosdump --databases db -o ./taosdumptest/tmp1" % binPath)
os.system("%staosdump --databases db1 -o ./taosdumptest/tmp2" % binPath) os.system(
"%staosdump --databases db1 -o ./taosdumptest/tmp2" %
binPath)
tdSql.execute("drop database db") tdSql.execute("drop database db")
tdSql.execute("drop database db1") tdSql.execute("drop database db1")

View File

@ -1,147 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 1
system sh/exec.sh -n dnode1 -s start
sleep 100
sql connect
sleep 100
print ========== sub_in_from.sim
$i = 0
$dbPrefix = subdb
$tbPrefix = sub_tb
$stbPrefix = sub_stb
$tbNum = 10
$rowNum = 1000
$totalNum = $tbNum * $rowNum
$loops = 200000
$log = 10000
$ts0 = 1537146000000
$delta = 600000
$i = 0
$db = $dbPrefix . $i
$stb = $stbPrefix . $i
sql drop database $db -x step1
step1:
sql create database $db cache 16 maxrows 4096 keep 36500
print ====== create tables
sql use $db
sql create table $stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 smallint, c6 tinyint, c7 bool, c8 binary(10), c9 nchar(10)) tags(t1 int)
$i = 0
$ts = $ts0
$halfNum = $tbNum / 2
while $i < $halfNum
$tbId = $i + $halfNum
$tb = $tbPrefix . $i
$tb1 = $tbPrefix . $tbId
sql create table $tb using $stb tags( $i )
sql create table $tb1 using $stb tags( $tbId )
$x = 0
while $x < $rowNum
$xs = $x * $delta
$ts = $ts0 + $xs
$c = $x / 10
$c = $c * 10
$c = $x - $c
$binary = 'binary . $c
$binary = $binary . '
$nchar = 'nchar . $c
$nchar = $nchar . '
sql insert into $tb values ( $ts , $c , $c , $c , $c , $c , $c , true, $binary , $nchar )
sql insert into $tb1 values ( $ts , $c , NULL , $c , NULL , $c , $c , true, $binary , $nchar )
$x = $x + 1
endw
$i = $i + 1
endw
print ====== tables created
sql_error select count(*) from (select count(*) from abc.sub_stb0)
sql_error select val + 20 from (select count(*) from sub_stb0 interval(10h))
sql_error select abc+20 from (select count(*) from sub_stb0 interval(1s))
sql select count(*) from (select count(*) from sub_stb0 interval(10h))
if $rows != 1 then
return -1
endi
if $data00 != 18 then
print expect 18, actual: $data00
return -1
endi
sql select ts from (select count(*) from sub_stb0 interval(10h))
if $rows != 18 then
return -1
endi
if $data00 != @18-09-17 04:00:00.000@ then
return -1
endi
if $data01 != @18-09-17 14:00:00.000@ then
return -1
endi
sql select val + 20, val from (select count(*) as val from sub_stb0 interval(10h))
if $rows != 18 then
return -1
endi
if $data00 != 320.000000 then
return -1
endi
if $data01 != 300 then
return -1
endi
if $data10 != 620 then
return -1
endi
if $data11 != 600 then
return -1
endi
if $data20 != 620 then
return -1
endi
if $data21 != 600 then
return -1
endi
sql select max(val), min(val), max(val) - min(val) from (select count(*) val from sub_stb0 interval(10h))
if $rows != 1 then
return -1
endi
if $data00 != 600 then
return -1
endi
if $data01 != 100 then
return -1
endi
if $data02 != 500.000000 then
return -1
endi
sql select first(ts,val),last(ts,val) from (select count(*) val from sub_stb0 interval(10h))
sql select top(val, 5) from (select count(*) val from sub_stb0 interval(10h))
sql select diff(val) from (select count(*) val from sub_stb0 interval(10h))
sql select apercentile(val, 50) from (select count(*) val from sub_stb0 interval(10h))
# not support yet
sql select percentile(val, 50) from (select count(*) val from sub_stb0 interval(10h))
sql select stddev(val) from (select count(*) val from sub_stb0 interval(10h))
print ====================>complex query

View File

@ -1,4 +1,4 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8) CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc)