Merge from develop

This commit is contained in:
Shengliang Guan 2021-05-18 14:11:01 +08:00
commit 875072a474
117 changed files with 5278 additions and 964 deletions

View File

@ -7,41 +7,22 @@ platform:
arch: amd64
steps:
- name: smoke_test
image: python:3.8
- name: build
image: gcc
commands:
- apt-get update
- apt-get install -y cmake build-essential gcc
- pip3 install psutil
- pip3 install guppy3
- pip3 install src/connector/python/linux/python3/
- apt-get install -y cmake build-essential
- mkdir debug
- cd debug
- cmake ..
- make
- cd ../tests
- ./test-all.sh smoke
trigger:
event:
- pull_request
when:
branch:
- develop
- master
- name: crash_gen
image: python:3.8
commands:
- pip3 install requests
- pip3 install src/connector/python/linux/python3/
- pip3 install psutil
- pip3 install guppy3
- cd tests/pytest
- ./crash_gen.sh -a -p -t 4 -s 2000
when:
branch:
- develop
- master
---
kind: pipeline
name: test_arm64
@ -60,6 +41,9 @@ steps:
- cd debug
- cmake .. -DCPUTYPE=aarch64 > /dev/null
- make
trigger:
event:
- pull_request
when:
branch:
- develop
@ -82,6 +66,9 @@ steps:
- cd debug
- cmake .. -DCPUTYPE=aarch32 > /dev/null
- make
trigger:
event:
- pull_request
when:
branch:
- develop
@ -106,11 +93,13 @@ steps:
- cd debug
- cmake ..
- make
trigger:
event:
- pull_request
when:
branch:
- develop
- master
---
kind: pipeline
name: build_xenial
@ -129,6 +118,9 @@ steps:
- cd debug
- cmake ..
- make
trigger:
event:
- pull_request
when:
branch:
- develop
@ -151,6 +143,32 @@ steps:
- cd debug
- cmake ..
- make
trigger:
event:
- pull_request
when:
branch:
- develop
- master
---
kind: pipeline
name: build_centos7
platform:
os: linux
arch: amd64
steps:
- name: build
image: ansible/centos7-ansible
commands:
- yum install -y gcc gcc-c++ make cmake
- mkdir debug
- cd debug
- cmake ..
- make
trigger:
event:
- pull_request
when:
branch:
- develop

1
.gitignore vendored
View File

@ -2,6 +2,7 @@ build/
.vscode/
.idea/
cmake-build-debug/
cmake-build-release/
cscope.out
.DS_Store
debug/

View File

@ -3,7 +3,7 @@ IF (CMAKE_VERSION VERSION_LESS 3.0)
PROJECT(TDengine CXX)
SET(PROJECT_VERSION_MAJOR "${LIB_MAJOR_VERSION}")
SET(PROJECT_VERSION_MINOR "${LIB_MINOR_VERSION}")
SET(PROJECT_VERSION_PATCH"${LIB_PATCH_VERSION}")
SET(PROJECT_VERSION_PATCH "${LIB_PATCH_VERSION}")
SET(PROJECT_VERSION "${LIB_VERSION_STRING}")
ELSE ()
CMAKE_POLICY(SET CMP0048 NEW)
@ -42,6 +42,13 @@ INCLUDE(cmake/env.inc)
INCLUDE(cmake/version.inc)
INCLUDE(cmake/install.inc)
IF (CMAKE_SYSTEM_NAME MATCHES "Linux")
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pipe -Wall -Wshadow -Werror")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pipe -Wall -Wshadow -Werror")
ENDIF ()
MESSAGE(STATUS "CMAKE_C_FLAGS: ${CMAKE_C_FLAGS}")
MESSAGE(STATUS "CMAKE_CXX_FLAGS: ${CMAKE_CXX_FLAGS}")
ADD_SUBDIRECTORY(deps)
ADD_SUBDIRECTORY(src)
ADD_SUBDIRECTORY(tests)

2
Jenkinsfile vendored
View File

@ -94,7 +94,7 @@ def pre_test(){
make > /dev/null
make install > /dev/null
cd ${WKC}/tests
pip3 install ${WKC}/src/connector/python/linux/python3/
pip3 install ${WKC}/src/connector/python
'''
return 1
}

View File

@ -57,7 +57,7 @@ IF (TD_LINUX_64)
ADD_DEFINITIONS(-D_M_X64)
ADD_DEFINITIONS(-D_TD_LINUX_64)
MESSAGE(STATUS "linux64 is defined")
SET(COMMON_FLAGS "-Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ADD_DEFINITIONS(-DUSE_LIBICONV)
ENDIF ()
@ -65,7 +65,7 @@ IF (TD_LINUX_32)
ADD_DEFINITIONS(-D_TD_LINUX_32)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "linux32 is defined")
SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_ARM_64)
@ -73,7 +73,7 @@ IF (TD_ARM_64)
ADD_DEFINITIONS(-D_TD_ARM_)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "arm64 is defined")
SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_ARM_32)
@ -81,7 +81,7 @@ IF (TD_ARM_32)
ADD_DEFINITIONS(-D_TD_ARM_)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "arm32 is defined")
SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ")
ENDIF ()
IF (TD_MIPS_64)
@ -89,7 +89,7 @@ IF (TD_MIPS_64)
ADD_DEFINITIONS(-D_TD_MIPS_64)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "mips64 is defined")
SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_MIPS_32)
@ -97,7 +97,7 @@ IF (TD_MIPS_32)
ADD_DEFINITIONS(-D_TD_MIPS_32)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "mips32 is defined")
SET(COMMON_FLAGS "-Wall -Werror -fPIC -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_APLHINE)
@ -139,7 +139,7 @@ IF (TD_DARWIN_64)
ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "darwin64 is defined")
SET(COMMON_FLAGS "-Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
SET(RELEASE_FLAGS "-Og")
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
@ -159,7 +159,7 @@ IF (TD_WINDOWS)
IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
ENDIF ()
SET(DEBUG_FLAGS "/Zi /W3 /GL")
SET(DEBUG_FLAGS "/fsanitize=thread /fsanitize=leak /fsanitize=memory /fsanitize=undefined /fsanitize=hwaddress /Zi /W3 /GL")
SET(RELEASE_FLAGS "/W0 /O3 /GL")
ENDIF ()

View File

@ -345,7 +345,7 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
* taos已经建立好的数据库连接
* sqlSQL查询语句仅能使用查询语句
* fp用户定义的回调函数指针每次流式计算完成后TDengine将查询的结果TAOS_ROW、查询状态TAOS_RES、用户定义参数PARAM传递给回调函数在回调函数内用户可以使用taos_num_fields获取结果集列数taos_fetch_fields获取结果集每列数据的类型。
* stime是流式计算开始的时间如果是0表示从现在开始如果不为零表示从指定的时间开始计算UTC时间从1970/1/1算起的毫秒数
* stime是流式计算开始的时间。如果是“64位整数最小值”表示从现在开始如果不为“64位整数最小值”表示从指定的时间开始计算UTC时间从1970/1/1算起的毫秒数
* param是应用提供的用于回调的一个参数回调时提供给应用
* callback: 第二个回调函数,会在连续查询自动停止时被调用。
@ -400,27 +400,22 @@ Python连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/
#### Linux
用户可以在源代码的src/connector/python或者tar.gz的/connector/python文件夹下找到python2和python3的connector安装包。用户可以通过pip命令安装
用户可以在源代码的src/connector/python或者tar.gz的/connector/python文件夹下找到connector安装包。用户可以通过pip命令安装
`pip install src/connector/python/linux/python2/`
`pip install src/connector/python/`
`pip3 install src/connector/python/linux/python3/`
`pip3 install src/connector/python/`
#### Windows
在已安装Windows TDengine 客户端的情况下, 将文件"C:\TDengine\driver\taos.dll" 拷贝到 "C:\windows\system32" 目录下, 然后进入Windwos <em>cmd</em> 命令行界面
```cmd
cd C:\TDengine\connector\python\windows
python -m pip install python2\
```
```cmd
cd C:\TDengine\connector\python\windows
python -m pip install python3\
cd C:\TDengine\connector\python
python -m pip install .
```
* 如果机器上没有pip命令用户可将src/connector/python/python3或src/connector/python/python2下的taos文件夹拷贝到应用程序的目录使用。
* 如果机器上没有pip命令用户可将src/connector/python下的taos文件夹拷贝到应用程序的目录使用。
对于windows 客户端安装TDengine windows 客户端后将C:\TDengine\driver\taos.dll拷贝到C:\windows\system32目录下即可。
### 使用

View File

@ -16,7 +16,7 @@ TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafanaplugin
以CentOS 7.2操作系统为例将grafanaplugin目录拷贝到/var/lib/grafana/plugins目录下重新启动grafana即可。
```bash
sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/tdengine
sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine
```
### 使用 Grafana

View File

@ -135,6 +135,14 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
SHOW DATABASES;
```
- **显示一个数据库的创建语句**
```mysql
SHOW CREATE DATABASE db_name;
```
常用于数据库迁移。对一个已经存在的数据库,返回其创建语句;在另一个集群中执行该语句,就能得到一个设置完全相同的 Database。
## <a class="anchor" id="table"></a>表管理
- **创建数据表**
@ -200,6 +208,13 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
通配符匹配1% (百分号)匹配0到任意个字符2\_下划线匹配一个字符。
- **显示一个数据表的创建语句**
```mysql
SHOW CREATE TABLE tb_name;
```
常用于数据库迁移。对一个已经存在的数据表,返回其创建语句;在另一个集群中执行该语句,就能得到一个结构完全相同的数据表。
- **在线修改显示字符宽度**
```mysql
@ -265,6 +280,13 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
```
查看数据库内全部 STable及其相关信息包括 STable 的名称、创建时间、列数量、标签TAG数量、通过该 STable 建表的数量。
- **显示一个超级表的创建语句**
```mysql
SHOW CREATE STABLE stb_name;
```
常用于数据库迁移。对一个已经存在的超级表,返回其创建语句;在另一个集群中执行该语句,就能得到一个结构完全相同的超级表。
- **获取超级表的结构信息**
```mysql

View File

@ -58,7 +58,7 @@ cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_pat
cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include
cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples
cp -r ${top_dir}/src/connector/grafanaplugin ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/grafanaplugin/dist ${pkg_dir}${install_home_path}/connector/grafanaplugin
cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector

View File

@ -1,11 +1,11 @@
#!/bin/bash
#
# Generate the deb package for ubunt, or rpm package for centos, or tar.gz package for other linux os
# Generate the deb package for ubuntu, or rpm package for centos, or tar.gz package for other linux os
set -e
#set -x
# releash.sh -v [cluster | edge]
# release.sh -v [cluster | edge]
# -c [aarch32 | aarch64 | x64 | x86 | mips64 ...]
# -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
# -V [stable | beta]

View File

@ -66,7 +66,7 @@ cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include
cp -r %{_compiledir}/../src/connector/grafanaplugin %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/grafanaplugin/dist %{buildroot}%{homepath}/connector/grafanaplugin
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector

View File

@ -607,6 +607,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/taosd' >> ${taosd_service_config}"
${csudo} bash -c "echo 'ExecStartPre=/usr/local/taos/bin/startPre.sh' >> ${taosd_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}"
@ -630,6 +631,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
@ -655,6 +657,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}"
${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}"

View File

@ -205,6 +205,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"

View File

@ -205,6 +205,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"

View File

@ -577,6 +577,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo 'Type=simple' >> ${powerd_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/powerd' >> ${powerd_service_config}"
${csudo} bash -c "echo 'ExecStartPre=/usr/local/power/bin/startPre.sh' >> ${powerd_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${powerd_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${powerd_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${powerd_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${powerd_service_config}"
@ -599,6 +600,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
@ -624,6 +626,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}"
${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}"

View File

@ -243,7 +243,7 @@ function install_data() {
}
function install_connector() {
${csudo} cp -rf ${source_dir}/src/connector/grafanaplugin ${install_main_dir}/connector
${csudo} cp -rf ${source_dir}/src/connector/grafanaplugin/dist ${install_main_dir}/connector/grafanaplugin
${csudo} cp -rf ${source_dir}/src/connector/python ${install_main_dir}/connector
${csudo} cp -rf ${source_dir}/src/connector/go ${install_main_dir}/connector
@ -333,6 +333,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/taosd' >> ${taosd_service_config}"
${csudo} bash -c "echo 'ExecStartPre=/usr/local/taos/bin/startPre.sh' >> ${taosd_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}"

View File

@ -117,10 +117,10 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector
cp -r ${connector_dir}/nodejs ${install_dir}/connector
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector
cp -r ${connector_dir}/nodejs ${install_dir}/connector
fi
# Copy release note
# cp ${script_dir}/release_note ${install_dir}

View File

@ -144,24 +144,15 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/subscription.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/connection.py
fi
# Copy release note
# cp ${script_dir}/release_note ${install_dir}

View File

@ -131,7 +131,7 @@ connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector
cp -r ${connector_dir}/nodejs ${install_dir}/connector

View File

@ -166,24 +166,15 @@ connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/subscription.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/connection.py
fi
# Copy release note
# cp ${script_dir}/release_note ${install_dir}

View File

@ -405,6 +405,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/taosd' >> ${taosd_service_config}"
${csudo} bash -c "echo 'ExecStartPre=/usr/local/taos/bin/startPre.sh' >> ${taosd_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}"

View File

@ -713,13 +713,12 @@ static int32_t tscProcessShowCreateDatabase(SSqlObj *pSql) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SCreateBuilder *param = (SCreateBuilder *)malloc(sizeof(SCreateBuilder));
SCreateBuilder *param = (SCreateBuilder *)calloc(1, sizeof(SCreateBuilder));
if (param == NULL) {
free(pInterSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
strncpy(param->buf, tNameGetTableName(&pTableMetaInfo->name), TSDB_TABLE_NAME_LEN);
tNameGetDbName(&pTableMetaInfo->name, param->buf);
param->pParentSql = pSql;
param->pInterSql = pInterSql;

View File

@ -577,13 +577,14 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SSq
index = 0;
sToken = tStrGetToken(*str, &index, false);
*str += index;
if (sToken.n == 0 || sToken.type != TK_RP) {
tscSQLSyntaxErrMsg(pCmd->payload, ") expected", *str);
code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
return -1;
return code;
}
*str += index;
(*numOfRows)++;
}
@ -712,6 +713,9 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlock
int32_t numOfRows = 0;
code = tsParseValues(str, dataBuf, maxNumOfRows, pCmd, &numOfRows, tmpTokenBuf);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
for (uint32_t i = 0; i < dataBuf->numOfParams; ++i) {
SParamInfo *param = dataBuf->params + i;

View File

@ -276,6 +276,60 @@ static char* normalStmtBuildSql(STscStmt* stmt) {
return taosStringBuilderGetResult(&sb, NULL);
}
static int fillColumnsNull(STableDataBlocks* pBlock, int32_t rowNum) {
SParsedDataColInfo* spd = &pBlock->boundColumnInfo;
int32_t offset = 0;
SSchema *schema = (SSchema*)pBlock->pTableMeta->schema;
for (int32_t i = 0; i < spd->numOfCols; ++i) {
if (!spd->cols[i].hasVal) { // current column do not have any value to insert, set it to null
for (int32_t n = 0; n < rowNum; ++n) {
char *ptr = pBlock->pData + sizeof(SSubmitBlk) + pBlock->rowSize * n + offset;
if (schema[i].type == TSDB_DATA_TYPE_BINARY) {
varDataSetLen(ptr, sizeof(int8_t));
*(uint8_t*) varDataVal(ptr) = TSDB_DATA_BINARY_NULL;
} else if (schema[i].type == TSDB_DATA_TYPE_NCHAR) {
varDataSetLen(ptr, sizeof(int32_t));
*(uint32_t*) varDataVal(ptr) = TSDB_DATA_NCHAR_NULL;
} else {
setNull(ptr, schema[i].type, schema[i].bytes);
}
}
}
offset += schema[i].bytes;
}
return TSDB_CODE_SUCCESS;
}
int32_t fillTablesColumnsNull(SSqlObj* pSql) {
SSqlCmd* pCmd = &pSql->cmd;
STableDataBlocks** p = taosHashIterate(pCmd->pTableBlockHashList, NULL);
STableDataBlocks* pOneTableBlock = *p;
while(pOneTableBlock) {
SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
if (pBlocks->numOfRows > 0 && pOneTableBlock->boundColumnInfo.numOfBound < pOneTableBlock->boundColumnInfo.numOfCols) {
fillColumnsNull(pOneTableBlock, pBlocks->numOfRows);
}
p = taosHashIterate(pCmd->pTableBlockHashList, p);
if (p == NULL) {
break;
}
pOneTableBlock = *p;
}
return TSDB_CODE_SUCCESS;
}
////////////////////////////////////////////////////////////////////////////////
// functions for insertion statement preparation
static int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param, TAOS_BIND* bind, int32_t colNum) {
@ -1027,6 +1081,8 @@ static int insertStmtExecute(STscStmt* stmt) {
pBlk->uid = pTableMeta->id.uid;
pBlk->tid = pTableMeta->id.tid;
fillTablesColumnsNull(stmt->pSql);
int code = tscMergeTableDataBlocks(stmt->pSql, false);
if (code != TSDB_CODE_SUCCESS) {
return code;
@ -1120,10 +1176,15 @@ static int insertBatchStmtExecute(STscStmt* pStmt) {
pStmt->pSql->retry = pStmt->pSql->maxRetry + 1; //no retry
if (taosHashGetSize(pStmt->pSql->cmd.pTableBlockHashList) > 0) { // merge according to vgId
if ((code = tscMergeTableDataBlocks(pStmt->pSql, false)) != TSDB_CODE_SUCCESS) {
return code;
}
if (taosHashGetSize(pStmt->pSql->cmd.pTableBlockHashList) <= 0) { // merge according to vgId
tscError("0x%"PRIx64" no data block to insert", pStmt->pSql->self);
return TSDB_CODE_TSC_APP_ERROR;
}
fillTablesColumnsNull(pStmt->pSql);
if ((code = tscMergeTableDataBlocks(pStmt->pSql, false)) != TSDB_CODE_SUCCESS) {
return code;
}
code = tscHandleMultivnodeInsert(pStmt->pSql);

View File

@ -54,14 +54,14 @@ void tscAddIntoSqlList(SSqlObj *pSql) {
pSql->next = pObj->sqlList;
if (pObj->sqlList) pObj->sqlList->prev = pSql;
pObj->sqlList = pSql;
pSql->queryId = queryId++;
pSql->queryId = atomic_fetch_add_32(&queryId, 1);
pthread_mutex_unlock(&pObj->mutex);
pSql->stime = taosGetTimestampMs();
pSql->listed = 1;
tscDebug("0x%"PRIx64" added into sqlList", pSql->self);
tscDebug("0x%"PRIx64" added into sqlList, queryId:%u", pSql->self, pSql->queryId);
}
void tscSaveSlowQueryFpCb(void *param, TAOS_RES *result, int code) {

View File

@ -610,8 +610,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
if (pToken->n > TSDB_DB_NAME_LEN) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
return tscSetTableFullName(pTableMetaInfo, pToken, pSql);
return tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), pToken);
}
case TSDB_SQL_CFG_DNODE: {
const char* msg2 = "invalid configure options or values, such as resetlog / debugFlag 135 / balance 'vnode:2-dnode:2' / monitor 1 ";

View File

@ -56,9 +56,9 @@ static void skipRemainValue(STSBuf* pTSBuf, tVariant* tag1) {
}
while (tsBufNextPos(pTSBuf)) {
STSElem el1 = tsBufGetElem(pTSBuf);
el1 = tsBufGetElem(pTSBuf);
int32_t res = tVariantCompare(el1.tag, tag1);
res = tVariantCompare(el1.tag, tag1);
if (res != 0) { // it is a record with new tag
return;
}
@ -2881,7 +2881,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
tscDebug("0x%"PRIx64" sub:%p retrieve numOfRows:%d totalNumOfRows:%" PRIu64 " from ep:%s, orderOfSub:%d",
pParentSql->self, pSql, pRes->numOfRows, pState->numOfRetrievedRows, pSql->epSet.fqdn[pSql->epSet.inUse], idx);
if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0)) {
if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0) && !(tscGetQueryInfo(&pParentSql->cmd, 0)->distinctTag)) {
tscError("0x%"PRIx64" sub:0x%"PRIx64" num of OrderedRes is too many, max allowed:%" PRId32 " , current:%" PRId64,
pParentSql->self, pSql->self, tsMaxNumOfOrderedResults, num);
tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_SORTED_RES_TOO_MANY);

View File

@ -2944,16 +2944,21 @@ void tscDoQuery(SSqlObj* pSql) {
return;
}
if (pCmd->command == TSDB_SQL_SELECT) {
tscAddIntoSqlList(pSql);
}
if (pCmd->dataSourceType == DATA_FROM_DATA_FILE) {
tscImportDataFromFile(pSql);
} else {
SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex);
uint16_t type = pQueryInfo->type;
if ((pCmd->command == TSDB_SQL_SELECT) && (!TSDB_QUERY_HAS_TYPE(type, TSDB_QUERY_TYPE_SUBQUERY)) && (!TSDB_QUERY_HAS_TYPE(type, TSDB_QUERY_TYPE_STABLE_SUBQUERY))) {
tscAddIntoSqlList(pSql);
}
if (TSDB_QUERY_HAS_TYPE(type, TSDB_QUERY_TYPE_INSERT)) { // multi-vnodes insertion
tscHandleMultivnodeInsert(pSql);
return;
}
if (QUERY_IS_JOIN_QUERY(type)) {
if (!TSDB_QUERY_HAS_TYPE(type, TSDB_QUERY_TYPE_SUBQUERY)) {
tscHandleMasterJoinQuery(pSql);

View File

@ -98,7 +98,7 @@ TEST(testCase, parse_time) {
taosParseTime(t41, &time, strlen(t41), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, 852048000999);
int64_t k = timezone;
// int64_t k = timezone;
char t42[] = "1997-1-1T0:0:0.999999999Z";
taosParseTime(t42, &time, strlen(t42), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, 852048000999 - timezone * MILLISECOND_PER_SECOND);
@ -163,7 +163,7 @@ TEST(testCase, parse_time) {
taosParseTime(t13, &time, strlen(t13), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, -28800 * MILLISECOND_PER_SECOND);
char* t = "2021-01-08T02:11:40.000+00:00";
char t[] = "2021-01-08T02:11:40.000+00:00";
taosParseTime(t, &time, strlen(t), TSDB_TIME_PRECISION_MILLI, 0);
printf("%ld\n", time);
}

View File

@ -87,6 +87,8 @@ tExprNode* exprTreeFromBinary(const void* data, size_t size);
tExprNode* exprTreeFromTableName(const char* tbnameCond);
tExprNode* exprdup(tExprNode* pTree);
void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree);
bool exprTreeApplyFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param);
void arithmeticTreeTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order,

@ -1 +1 @@
Subproject commit d99751356e285696f57bc604304ffafd10287439
Subproject commit 7a26c432f8b4203e42344ff3290b9b9b01b983d5

View File

@ -122,6 +122,7 @@
<exclude>**/FailOverTest.java</exclude>
<exclude>**/InvalidResultSetPointerTest.java</exclude>
<exclude>**/RestfulConnectionTest.java</exclude>
<exclude>**/TD4144Test.java</exclude>
</excludes>
<testFailureIgnore>true</testFailureIgnore>
</configuration>

View File

@ -30,6 +30,7 @@ public abstract class TSDBConstants {
public static final int JNI_FETCH_END = -6;
public static final int JNI_OUT_OF_MEMORY = -7;
// TSDB Data Types
public static final int TSDB_DATA_TYPE_NULL = 0;
public static final int TSDB_DATA_TYPE_BOOL = 1;
public static final int TSDB_DATA_TYPE_TINYINT = 2;
public static final int TSDB_DATA_TYPE_SMALLINT = 3;

View File

@ -6,11 +6,13 @@ import com.google.common.primitives.Ints;
import com.google.common.primitives.Longs;
import com.google.common.primitives.Shorts;
import com.taosdata.jdbc.*;
import com.taosdata.jdbc.utils.Utils;
import java.math.BigDecimal;
import java.sql.*;
import java.time.Instant;
import java.time.ZoneOffset;
import java.time.format.DateTimeParseException;
import java.util.ArrayList;
import java.util.Calendar;
@ -18,14 +20,13 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
private volatile boolean isClosed;
private int pos = -1;
private final String database;
private final Statement statement;
// data
private final ArrayList<ArrayList<Object>> resultSet;
private final ArrayList<ArrayList<Object>> resultSet = new ArrayList<>();
// meta
private ArrayList<String> columnNames;
private ArrayList<Field> columns;
private ArrayList<String> columnNames = new ArrayList<>();
private ArrayList<Field> columns = new ArrayList<>();
private RestfulResultSetMetaData metaData;
/**
@ -37,10 +38,46 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
this.database = database;
this.statement = statement;
// column metadata
// get column metadata
JSONArray columnMeta = resultJson.getJSONArray("column_meta");
columnNames = new ArrayList<>();
columns = new ArrayList<>();
// get row data
JSONArray data = resultJson.getJSONArray("data");
if (data == null || data.isEmpty()) {
columnNames.clear();
columns.clear();
this.resultSet.clear();
return;
}
// get head
JSONArray head = resultJson.getJSONArray("head");
// get rows
Integer rows = resultJson.getInteger("rows");
// parse column_meta
if (columnMeta != null) {
parseColumnMeta_new(columnMeta);
} else {
parseColumnMeta_old(head, data, rows);
}
this.metaData = new RestfulResultSetMetaData(this.database, columns, this);
// parse row data
resultSet.clear();
for (int rowIndex = 0; rowIndex < data.size(); rowIndex++) {
ArrayList row = new ArrayList();
JSONArray jsonRow = data.getJSONArray(rowIndex);
for (int colIndex = 0; colIndex < this.metaData.getColumnCount(); colIndex++) {
row.add(parseColumnData(jsonRow, colIndex, columns.get(colIndex).taos_type));
}
resultSet.add(row);
}
}
/***
* use this method after TDengine-2.0.18.0 to parse column meta, restful add column_meta in resultSet
* @Param columnMeta
*/
private void parseColumnMeta_new(JSONArray columnMeta) throws SQLException {
columnNames.clear();
columns.clear();
for (int colIndex = 0; colIndex < columnMeta.size(); colIndex++) {
JSONArray col = columnMeta.getJSONArray(colIndex);
String col_name = col.getString(0);
@ -50,23 +87,55 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
columnNames.add(col_name);
columns.add(new Field(col_name, col_type, col_length, "", taos_type));
}
this.metaData = new RestfulResultSetMetaData(this.database, columns, this);
}
// row data
JSONArray data = resultJson.getJSONArray("data");
resultSet = new ArrayList<>();
for (int rowIndex = 0; rowIndex < data.size(); rowIndex++) {
ArrayList row = new ArrayList();
JSONArray jsonRow = data.getJSONArray(rowIndex);
for (int colIndex = 0; colIndex < jsonRow.size(); colIndex++) {
row.add(parseColumnData(jsonRow, colIndex, columns.get(colIndex).taos_type));
/**
* use this method before TDengine-2.0.18.0 to parse column meta
*/
private void parseColumnMeta_old(JSONArray head, JSONArray data, int rows) {
columnNames.clear();
columns.clear();
for (int colIndex = 0; colIndex < head.size(); colIndex++) {
String col_name = head.getString(colIndex);
columnNames.add(col_name);
int col_type = Types.NULL;
int col_length = 0;
int taos_type = TSDBConstants.TSDB_DATA_TYPE_NULL;
JSONArray row0Json = data.getJSONArray(0);
if (colIndex < row0Json.size()) {
Object value = row0Json.get(colIndex);
if (value instanceof Boolean) {
col_type = Types.BOOLEAN;
col_length = 1;
taos_type = TSDBConstants.TSDB_DATA_TYPE_BOOL;
}
if (value instanceof Byte || value instanceof Short || value instanceof Integer || value instanceof Long) {
col_type = Types.BIGINT;
col_length = 8;
taos_type = TSDBConstants.TSDB_DATA_TYPE_BIGINT;
}
if (value instanceof Float || value instanceof Double || value instanceof BigDecimal) {
col_type = Types.DOUBLE;
col_length = 8;
taos_type = TSDBConstants.TSDB_DATA_TYPE_DOUBLE;
}
if (value instanceof String) {
col_type = Types.NCHAR;
col_length = ((String) value).length();
taos_type = TSDBConstants.TSDB_DATA_TYPE_NCHAR;
}
}
resultSet.add(row);
columns.add(new Field(col_name, col_type, col_length, "", taos_type));
}
}
private Object parseColumnData(JSONArray row, int colIndex, int taosType) throws SQLException {
switch (taosType) {
case TSDBConstants.TSDB_DATA_TYPE_NULL:
return null;
case TSDBConstants.TSDB_DATA_TYPE_BOOL:
return row.getBoolean(colIndex);
case TSDBConstants.TSDB_DATA_TYPE_TINYINT:
@ -290,8 +359,10 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
return 0;
}
wasNull = false;
if (value instanceof Float || value instanceof Double)
if (value instanceof Float)
return (float) value;
if (value instanceof Double)
return new Float((Double) value);
return Float.parseFloat(value.toString());
}
@ -329,6 +400,9 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
return Shorts.toByteArray((short) value);
if (value instanceof Byte)
return new byte[]{(byte) value};
if (value instanceof Timestamp) {
return Utils.formatTimestamp((Timestamp) value).getBytes();
}
return value.toString().getBytes();
}
@ -342,7 +416,9 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
return null;
if (value instanceof Timestamp)
return new Date(((Timestamp) value).getTime());
return Date.valueOf(value.toString());
Date date = null;
date = Utils.parseDate(value.toString());
return date;
}
@Override
@ -354,7 +430,13 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
return null;
if (value instanceof Timestamp)
return new Time(((Timestamp) value).getTime());
return Time.valueOf(value.toString());
Time time = null;
try {
time = Utils.parseTime(value.toString());
} catch (DateTimeParseException e) {
time = null;
}
return time;
}
@Override
@ -366,14 +448,20 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
return null;
if (value instanceof Timestamp)
return (Timestamp) value;
// if (value instanceof Long) {
// if (1_0000_0000_0000_0L > (long) value)
// return Timestamp.from(Instant.ofEpochMilli((long) value));
// long epochSec = (long) value / 1000_000L;
// long nanoAdjustment = (long) ((long) value % 1000_000L * 1000);
// return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
// }
return Timestamp.valueOf(value.toString());
if (value instanceof Long) {
if (1_0000_0000_0000_0L > (long) value)
return Timestamp.from(Instant.ofEpochMilli((long) value));
long epochSec = (long) value / 1000_000L;
long nanoAdjustment = (long) value % 1000_000L * 1000;
return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
}
Timestamp ret;
try {
ret = Utils.parseTimestamp(value.toString());
} catch (Exception e) {
ret = null;
}
return ret;
}
@Override
@ -415,7 +503,13 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
return new BigDecimal(Double.valueOf(value.toString()));
if (value instanceof Timestamp)
return new BigDecimal(((Timestamp) value).getTime());
return new BigDecimal(value.toString());
BigDecimal ret;
try {
ret = new BigDecimal(value.toString());
} catch (Exception e) {
ret = null;
}
return ret;
}
@Override

View File

@ -1,12 +0,0 @@
package com.taosdata.jdbc.utils;
import java.time.format.DateTimeFormatter;
import java.time.format.DateTimeFormatterBuilder;
public class UtcTimestampUtil {
public static final DateTimeFormatter formatter = new DateTimeFormatterBuilder()
.appendPattern("yyyy-MM-ddTHH:mm:ss.SSS+")
// .appendFraction(ChronoField.NANO_OF_SECOND, 0, 9, true)
.toFormatter();
}

View File

@ -5,7 +5,15 @@ import com.google.common.collect.RangeSet;
import com.google.common.collect.TreeRangeSet;
import java.nio.charset.Charset;
import java.sql.Date;
import java.sql.Time;
import java.sql.Timestamp;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.LocalTime;
import java.time.format.DateTimeFormatter;
import java.time.format.DateTimeFormatterBuilder;
import java.time.format.DateTimeParseException;
import java.util.HashMap;
import java.util.Map;
import java.util.regex.Matcher;
@ -17,6 +25,41 @@ public class Utils {
private static Pattern ptn = Pattern.compile(".*?'");
private static final DateTimeFormatter formatter = new DateTimeFormatterBuilder()
.appendPattern("yyyy-MM-dd HH:mm:ss.SSS").toFormatter();
private static final DateTimeFormatter formatter2 = new DateTimeFormatterBuilder()
.appendPattern("yyyy-MM-dd HH:mm:ss.SSSSSS").toFormatter();
public static Time parseTime(String timestampStr) throws DateTimeParseException {
LocalTime time;
try {
time = LocalTime.parse(timestampStr, formatter);
} catch (DateTimeParseException e) {
time = LocalTime.parse(timestampStr, formatter2);
}
return Time.valueOf(time);
}
public static Date parseDate(String timestampStr) throws DateTimeParseException {
LocalDate date;
try {
date = LocalDate.parse(timestampStr, formatter);
} catch (DateTimeParseException e) {
date = LocalDate.parse(timestampStr, formatter2);
}
return Date.valueOf(date);
}
public static Timestamp parseTimestamp(String timeStampStr) {
LocalDateTime dateTime;
try {
dateTime = LocalDateTime.parse(timeStampStr, formatter);
} catch (DateTimeParseException e) {
dateTime = LocalDateTime.parse(timeStampStr, formatter2);
}
return Timestamp.valueOf(dateTime);
}
public static String escapeSingleQuota(String origin) {
Matcher m = ptn.matcher(origin);
StringBuffer sb = new StringBuffer();
@ -133,4 +176,13 @@ public class Utils {
}).collect(Collectors.joining());
}
public static String formatTimestamp(Timestamp timestamp) {
int nanos = timestamp.getNanos();
if (nanos % 1000000l != 0)
return timestamp.toLocalDateTime().format(formatter2);
return timestamp.toLocalDateTime().format(formatter);
}
}

View File

@ -8,6 +8,8 @@ import org.junit.Test;
import java.io.IOException;
import java.io.Serializable;
import java.sql.*;
import java.util.ArrayList;
import java.util.Random;
public class TSDBPreparedStatementTest {
private static final String host = "127.0.0.1";
@ -97,6 +99,118 @@ public class TSDBPreparedStatementTest {
Assert.assertEquals(1, result);
}
@Test
public void executeTest() throws SQLException {
Statement stmt = conn.createStatement();
int numOfRows = 1000;
for (int loop = 0; loop < 10; loop++){
stmt.execute("drop table if exists weather_test");
stmt.execute("create table weather_test(ts timestamp, f1 nchar(4), f2 float, f3 double, f4 timestamp, f5 int, f6 bool, f7 binary(10))");
TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? values(?, ?, ?, ?, ?, ?, ?, ?)");
Random r = new Random();
s.setTableName("weather_test");
ArrayList<Long> ts = new ArrayList<Long>();
for(int i = 0; i < numOfRows; i++) {
ts.add(System.currentTimeMillis() + i);
}
s.setTimestamp(0, ts);
int random = 10 + r.nextInt(5);
ArrayList<String> s2 = new ArrayList<String>();
for(int i = 0; i < numOfRows; i++) {
if(i % random == 0) {
s2.add(null);
}else{
s2.add("分支" + i % 4);
}
}
s.setNString(1, s2, 4);
random = 10 + r.nextInt(5);
ArrayList<Float> s3 = new ArrayList<Float>();
for(int i = 0; i < numOfRows; i++) {
if(i % random == 0) {
s3.add(null);
}else{
s3.add(r.nextFloat());
}
}
s.setFloat(2, s3);
random = 10 + r.nextInt(5);
ArrayList<Double> s4 = new ArrayList<Double>();
for(int i = 0; i < numOfRows; i++) {
if(i % random == 0) {
s4.add(null);
}else{
s4.add(r.nextDouble());
}
}
s.setDouble(3, s4);
random = 10 + r.nextInt(5);
ArrayList<Long> ts2 = new ArrayList<Long>();
for(int i = 0; i < numOfRows; i++) {
if(i % random == 0) {
ts2.add(null);
}else{
ts2.add(System.currentTimeMillis() + i);
}
}
s.setTimestamp(4, ts2);
random = 10 + r.nextInt(5);
ArrayList<Integer> vals = new ArrayList<>();
for(int i = 0; i < numOfRows; i++) {
if(i % random == 0) {
vals.add(null);
}else{
vals.add(r.nextInt());
}
}
s.setInt(5, vals);
random = 10 + r.nextInt(5);
ArrayList<Boolean> sb = new ArrayList<>();
for(int i = 0; i < numOfRows; i++) {
if(i % random == 0) {
sb.add(null);
}else{
sb.add(i % 2 == 0 ? true : false);
}
}
s.setBoolean(6, sb);
random = 10 + r.nextInt(5);
ArrayList<String> s5 = new ArrayList<String>();
for(int i = 0; i < numOfRows; i++) {
if(i % random == 0) {
s5.add(null);
}else{
s5.add("test" + i % 10);
}
}
s.setString(7, s5, 10);
s.columnDataAddBatch();
s.columnDataExecuteBatch();
s.columnDataCloseBatch();
String sql = "select * from weather_test";
PreparedStatement statement = conn.prepareStatement(sql);
ResultSet rs = statement.executeQuery();
int rows = 0;
while(rs.next()) {
rows++;
}
Assert.assertEquals(numOfRows, rows);
}
}
@Test
public void setBoolean() throws SQLException {
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));

View File

@ -7,7 +7,6 @@ import java.sql.*;
public class InsertSpecialCharacterRestfulTest {
private static final String host = "127.0.0.1";
// private static final String host = "master";
private static Connection conn;
private static String dbName = "spec_char_test";
private static String tbname1 = "test";

View File

@ -0,0 +1,105 @@
package com.taosdata.jdbc.cases;
import com.taosdata.jdbc.TSDBConnection;
import com.taosdata.jdbc.TSDBDriver;
import com.taosdata.jdbc.TSDBResultSet;
import com.taosdata.jdbc.TSDBSubscribe;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import java.sql.DriverManager;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
public class TD4144Test {
private static TSDBConnection connection;
private static final String host = "127.0.0.1";
private static final String topic = "topic-meter-current-bg-10";
private static final String sql = "select * from meters where current > 10";
private static final String sql2 = "select * from meters where ts >= '2020-08-15 12:20:00.000'";
@Test
public void test() throws SQLException {
TSDBSubscribe subscribe = null;
TSDBResultSet res = null;
boolean hasNext = false;
try {
subscribe = connection.subscribe(topic, sql, false);
int count = 0;
while (true) {
// 等待1秒避免频繁调用 consume给服务端造成压力
TimeUnit.SECONDS.sleep(1);
if (res == null) {
// 消费数据
res = subscribe.consume();
hasNext = res.next();
}
if (res == null) {
continue;
}
ResultSetMetaData metaData = res.getMetaData();
int number = 0;
while (hasNext) {
int columnCount = metaData.getColumnCount();
for (int i = 1; i <= columnCount; i++) {
System.out.print(metaData.getColumnLabel(i) + ": " + res.getString(i) + "\t");
}
System.out.println();
count++;
number++;
hasNext = res.next();
if (!hasNext) {
res.close();
res = null;
System.out.println("rows " + count);
}
if (hasNext == true && number >= 10) {
System.out.println("batch" + number);
break;
}
}
}
} catch (SQLException | InterruptedException throwables) {
throwables.printStackTrace();
} finally {
if (subscribe != null)
subscribe.close(true);
}
}
@BeforeClass
public static void beforeClass() throws SQLException {
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
connection = (DriverManager.getConnection(url, properties)).unwrap(TSDBConnection.class);
try (Statement stmt = connection.createStatement()) {
stmt.execute("drop database if exists power");
stmt.execute("create database if not exists power");
stmt.execute("use power");
stmt.execute("create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupId int)");
stmt.execute("create table d1001 using meters tags(\"Beijing.Chaoyang\", 2)");
stmt.execute("create table d1002 using meters tags(\"Beijing.Haidian\", 2)");
stmt.execute("insert into d1001 values(\"2020-08-15 12:00:00.000\", 12, 220, 1),(\"2020-08-15 12:10:00.000\", 12.3, 220, 2),(\"2020-08-15 12:20:00.000\", 12.2, 220, 1)");
stmt.execute("insert into d1002 values(\"2020-08-15 12:00:00.000\", 9.9, 220, 1),(\"2020-08-15 12:10:00.000\", 10.3, 220, 1),(\"2020-08-15 12:20:00.000\", 11.2, 220, 1)");
}
}
@AfterClass
public static void afterClass() throws SQLException {
if (connection != null)
connection.close();
}
}

View File

@ -0,0 +1,62 @@
package com.taosdata.jdbc.cases;
import com.alibaba.fastjson.JSONObject;
import com.taosdata.jdbc.TSDBDriver;
import org.junit.*;
import java.sql.*;
import java.util.Properties;
public class TD4174Test {
private Connection conn;
private static final String host = "127.0.0.1";
@Test
public void test() {
long ts = System.currentTimeMillis();
try (PreparedStatement pstmt = conn.prepareStatement("insert into weather values(" + ts + ", ?)")) {
JSONObject value = new JSONObject();
value.put("name", "John Smith");
value.put("age", 20);
Assert.assertEquals("{\"name\":\"John Smith\",\"age\":20}",value.toJSONString());
pstmt.setString(1, value.toJSONString());
int ret = pstmt.executeUpdate();
Assert.assertEquals(1, ret);
} catch (SQLException e) {
e.printStackTrace();
}
}
public static void main(String[] args) {
JSONObject value = new JSONObject();
value.put("name", "John Smith");
value.put("age", 20);
System.out.println(value.toJSONString());
}
@Before
public void before() throws SQLException {
String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
conn = DriverManager.getConnection(url, properties);
try (Statement stmt = conn.createStatement()) {
stmt.execute("drop database if exists td4174");
stmt.execute("create database if not exists td4174");
stmt.execute("use td4174");
stmt.execute("create table weather(ts timestamp, text binary(64))");
}
}
@After
public void after() throws SQLException {
if (conn != null)
conn.close();
}
}

View File

@ -13,6 +13,7 @@ import java.util.Properties;
public class TwoTypeTimestampPercisionInRestfulTest {
private static final String host = "127.0.0.1";
private static final String ms_timestamp_db = "ms_precision_test";
private static final String us_timestamp_db = "us_precision_test";
private static final long timestamp1 = System.currentTimeMillis();
@ -94,7 +95,8 @@ public class TwoTypeTimestampPercisionInRestfulTest {
try (Statement stmt = conn3.createStatement()) {
ResultSet rs = stmt.executeQuery("select last_row(ts) from " + ms_timestamp_db + ".weather");
rs.next();
long ts = rs.getTimestamp(1).getTime();
Timestamp actual = rs.getTimestamp(1);
long ts = actual == null ? 0 : actual.getTime();
Assert.assertEquals(timestamp1, ts);
ts = rs.getLong(1);
Assert.assertEquals(timestamp1, ts);
@ -110,7 +112,7 @@ public class TwoTypeTimestampPercisionInRestfulTest {
rs.next();
Timestamp timestamp = rs.getTimestamp(1);
long ts = timestamp.getTime();
long ts = timestamp == null ? 0 : timestamp.getTime();
Assert.assertEquals(timestamp1, ts);
int nanos = timestamp.getNanos();
Assert.assertEquals(timestamp2 % 1000_000l * 1000, nanos);

View File

@ -9,19 +9,19 @@ import java.util.Properties;
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
public class UnsignedNumberJniTest {
private static final String host = "127.0.0.1";
private static Connection conn;
private static long ts;
@Test
public void testCase001() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select * from us_table");
ResultSetMetaData meta = rs.getMetaData();
assertResultSetMetaData(meta);
while (rs.next()) {
for (int i = 1; i <= meta.getColumnCount(); i++) {
System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t");
}
System.out.println();
Assert.assertEquals(ts, rs.getTimestamp(1).getTime());
Assert.assertEquals("127", rs.getString(2));
Assert.assertEquals("32767", rs.getString(3));
Assert.assertEquals("2147483647", rs.getString(4));
@ -37,13 +37,10 @@ public class UnsignedNumberJniTest {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select * from us_table");
ResultSetMetaData meta = rs.getMetaData();
assertResultSetMetaData(meta);
while (rs.next()) {
System.out.print(meta.getColumnLabel(1) + ": " + rs.getTimestamp(1) + "\t");
System.out.print(meta.getColumnLabel(2) + ": " + rs.getByte(2) + "\t");
System.out.print(meta.getColumnLabel(3) + ": " + rs.getShort(3) + "\t");
System.out.print(meta.getColumnLabel(4) + ": " + rs.getInt(4) + "\t");
System.out.print(meta.getColumnLabel(5) + ": " + rs.getLong(5) + "\t");
System.out.println();
Assert.assertEquals(ts, rs.getTimestamp(1).getTime());
Assert.assertEquals(127, rs.getByte(2));
Assert.assertEquals(32767, rs.getShort(3));
Assert.assertEquals(2147483647, rs.getInt(4));
@ -61,16 +58,14 @@ public class UnsignedNumberJniTest {
stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(" + now + ", 127, 32767,2147483647, 18446744073709551614)");
ResultSet rs = stmt.executeQuery("select * from us_table where ts = " + now);
ResultSetMetaData meta = rs.getMetaData();
assertResultSetMetaData(meta);
while (rs.next()) {
System.out.print(meta.getColumnLabel(1) + ": " + rs.getTimestamp(1) + "\t");
System.out.print(meta.getColumnLabel(2) + ": " + rs.getByte(2) + "\t");
System.out.print(meta.getColumnLabel(3) + ": " + rs.getShort(3) + "\t");
System.out.print(meta.getColumnLabel(4) + ": " + rs.getInt(4) + "\t");
System.out.print(meta.getColumnLabel(5) + ": " + rs.getLong(5) + "\t");
System.out.println();
Assert.assertEquals(now, rs.getTimestamp(1).getTime());
Assert.assertEquals(127, rs.getByte(2));
Assert.assertEquals(32767, rs.getShort(3));
Assert.assertEquals(2147483647, rs.getInt(4));
Assert.assertEquals("18446744073709551614", rs.getString(5));
rs.getLong(5);
}
}
}
@ -82,15 +77,15 @@ public class UnsignedNumberJniTest {
stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(" + now + ", 127, 32767,4294967294, 18446744073709551614)");
ResultSet rs = stmt.executeQuery("select * from us_table where ts = " + now);
ResultSetMetaData meta = rs.getMetaData();
assertResultSetMetaData(meta);
while (rs.next()) {
System.out.print(meta.getColumnLabel(1) + ": " + rs.getTimestamp(1) + "\t");
System.out.print(meta.getColumnLabel(2) + ": " + rs.getByte(2) + "\t");
System.out.print(meta.getColumnLabel(3) + ": " + rs.getShort(3) + "\t");
System.out.print(meta.getColumnLabel(4) + ": " + rs.getInt(4) + "\t");
System.out.print(meta.getColumnLabel(5) + ": " + rs.getLong(5) + "\t");
System.out.println();
Assert.assertEquals(now, rs.getTimestamp(1).getTime());
Assert.assertEquals(127, rs.getByte(2));
Assert.assertEquals(32767, rs.getShort(3));
Assert.assertEquals("4294967294", rs.getString(4));
Assert.assertEquals("18446744073709551614", rs.getString(5));
rs.getInt(4);
}
}
}
@ -102,15 +97,15 @@ public class UnsignedNumberJniTest {
stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(" + now + ", 127, 65534,4294967294, 18446744073709551614)");
ResultSet rs = stmt.executeQuery("select * from us_table where ts = " + now);
ResultSetMetaData meta = rs.getMetaData();
while (rs.next()) {
System.out.print(meta.getColumnLabel(1) + ": " + rs.getTimestamp(1) + "\t");
System.out.print(meta.getColumnLabel(2) + ": " + rs.getByte(2) + "\t");
System.out.print(meta.getColumnLabel(3) + ": " + rs.getShort(3) + "\t");
System.out.print(meta.getColumnLabel(4) + ": " + rs.getInt(4) + "\t");
System.out.print(meta.getColumnLabel(5) + ": " + rs.getLong(5) + "\t");
System.out.println();
assertResultSetMetaData(meta);
while (rs.next()) {
Assert.assertEquals(now, rs.getTimestamp(1).getTime());
Assert.assertEquals(127, rs.getByte(2));
Assert.assertEquals("65534", rs.getString(3));
Assert.assertEquals("4294967294", rs.getString(4));
Assert.assertEquals("18446744073709551614", rs.getString(5));
rs.getShort(3);
}
}
}
@ -122,37 +117,27 @@ public class UnsignedNumberJniTest {
stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(" + now + ", 254, 65534,4294967294, 18446744073709551614)");
ResultSet rs = stmt.executeQuery("select * from us_table where ts = " + now);
ResultSetMetaData meta = rs.getMetaData();
while (rs.next()) {
System.out.print(meta.getColumnLabel(1) + ": " + rs.getTimestamp(1) + "\t");
System.out.print(meta.getColumnLabel(2) + ": " + rs.getByte(2) + "\t");
System.out.print(meta.getColumnLabel(3) + ": " + rs.getShort(3) + "\t");
System.out.print(meta.getColumnLabel(4) + ": " + rs.getInt(4) + "\t");
System.out.print(meta.getColumnLabel(5) + ": " + rs.getLong(5) + "\t");
System.out.println();
}
}
}
assertResultSetMetaData(meta);
@Test
public void testCase007() throws SQLException {
try (Statement stmt = conn.createStatement()) {
long now = System.currentTimeMillis();
stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(" + now + ", 254, 65534,4294967294, 18446744073709551614)");
ResultSet rs = stmt.executeQuery("select * from us_table where ts = " + now);
ResultSetMetaData meta = rs.getMetaData();
while (rs.next()) {
for (int i = 1; i <= meta.getColumnCount(); i++) {
System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t");
}
System.out.println();
Assert.assertEquals(now, rs.getTimestamp(1).getTime());
Assert.assertEquals("254", rs.getString(2));
Assert.assertEquals("65534", rs.getString(3));
Assert.assertEquals("4294967294", rs.getString(4));
Assert.assertEquals("18446744073709551614", rs.getString(5));
rs.getByte(2);
}
}
}
private void assertResultSetMetaData(ResultSetMetaData meta) throws SQLException {
Assert.assertEquals(5, meta.getColumnCount());
Assert.assertEquals("ts", meta.getColumnLabel(1));
Assert.assertEquals("f1", meta.getColumnLabel(2));
Assert.assertEquals("f2", meta.getColumnLabel(3));
Assert.assertEquals("f3", meta.getColumnLabel(4));
Assert.assertEquals("f4", meta.getColumnLabel(5));
}
@BeforeClass
public static void beforeClass() {
@ -160,20 +145,19 @@ public class UnsignedNumberJniTest {
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
ts = System.currentTimeMillis();
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
conn = DriverManager.getConnection(url, properties);
Statement stmt = conn.createStatement();
stmt.execute("drop database if exists unsign_jni");
stmt.execute("create database if not exists unsign_jni");
stmt.execute("use unsign_jni");
stmt.execute("create table us_table(ts timestamp, f1 tinyint unsigned, f2 smallint unsigned, f3 int unsigned, f4 bigint unsigned)");
stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(now, 127, 32767,2147483647, 9223372036854775807)");
stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(" + ts + ", 127, 32767,2147483647, 9223372036854775807)");
stmt.close();
} catch (ClassNotFoundException | SQLException e) {
} catch (SQLException e) {
e.printStackTrace();
}
}

View File

@ -13,17 +13,20 @@ public class UnsignedNumberRestfulTest {
private static final String host = "127.0.0.1";
private static Connection conn;
private static long ts;
@Test
public void testCase001() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select * from us_table");
ResultSetMetaData meta = rs.getMetaData();
assertResultSetMetaData(meta);
while (rs.next()) {
for (int i = 1; i <= meta.getColumnCount(); i++) {
System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t");
}
System.out.println();
Assert.assertEquals(ts, rs.getTimestamp(1).getTime());
Assert.assertEquals("127", rs.getString(2));
Assert.assertEquals("32767", rs.getString(3));
Assert.assertEquals("2147483647", rs.getString(4));
Assert.assertEquals("9223372036854775807", rs.getString(5));
}
} catch (SQLException e) {
e.printStackTrace();
@ -35,13 +38,14 @@ public class UnsignedNumberRestfulTest {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select * from us_table");
ResultSetMetaData meta = rs.getMetaData();
assertResultSetMetaData(meta);
while (rs.next()) {
System.out.print(meta.getColumnLabel(1) + ": " + rs.getTimestamp(1) + "\t");
System.out.print(meta.getColumnLabel(2) + ": " + rs.getByte(2) + "\t");
System.out.print(meta.getColumnLabel(3) + ": " + rs.getShort(3) + "\t");
System.out.print(meta.getColumnLabel(4) + ": " + rs.getInt(4) + "\t");
System.out.print(meta.getColumnLabel(5) + ": " + rs.getLong(5) + "\t");
System.out.println();
Assert.assertEquals(ts, rs.getTimestamp(1).getTime());
Assert.assertEquals(127, rs.getByte(2));
Assert.assertEquals(32767, rs.getShort(3));
Assert.assertEquals(2147483647, rs.getInt(4));
Assert.assertEquals(9223372036854775807l, rs.getLong(5));
}
} catch (SQLException e) {
e.printStackTrace();
@ -55,13 +59,14 @@ public class UnsignedNumberRestfulTest {
stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(" + now + ", 127, 32767,2147483647, 18446744073709551614)");
ResultSet rs = stmt.executeQuery("select * from us_table where ts = " + now);
ResultSetMetaData meta = rs.getMetaData();
assertResultSetMetaData(meta);
while (rs.next()) {
System.out.print(meta.getColumnLabel(1) + ": " + rs.getTimestamp(1) + "\t");
System.out.print(meta.getColumnLabel(2) + ": " + rs.getByte(2) + "\t");
System.out.print(meta.getColumnLabel(3) + ": " + rs.getShort(3) + "\t");
System.out.print(meta.getColumnLabel(4) + ": " + rs.getInt(4) + "\t");
System.out.print(meta.getColumnLabel(5) + ": " + rs.getLong(5) + "\t");
System.out.println();
Assert.assertEquals(now, rs.getTimestamp(1).getTime());
Assert.assertEquals(127, rs.getByte(2));
Assert.assertEquals(32767, rs.getShort(3));
Assert.assertEquals(2147483647, rs.getInt(4));
Assert.assertEquals("18446744073709551614", rs.getString(5));
rs.getLong(5);
}
}
}
@ -73,13 +78,15 @@ public class UnsignedNumberRestfulTest {
stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(" + now + ", 127, 32767,4294967294, 18446744073709551614)");
ResultSet rs = stmt.executeQuery("select * from us_table where ts = " + now);
ResultSetMetaData meta = rs.getMetaData();
assertResultSetMetaData(meta);
while (rs.next()) {
System.out.print(meta.getColumnLabel(1) + ": " + rs.getTimestamp(1) + "\t");
System.out.print(meta.getColumnLabel(2) + ": " + rs.getByte(2) + "\t");
System.out.print(meta.getColumnLabel(3) + ": " + rs.getShort(3) + "\t");
System.out.print(meta.getColumnLabel(4) + ": " + rs.getInt(4) + "\t");
System.out.print(meta.getColumnLabel(5) + ": " + rs.getLong(5) + "\t");
System.out.println();
Assert.assertEquals(now, rs.getTimestamp(1).getTime());
Assert.assertEquals(127, rs.getByte(2));
Assert.assertEquals(32767, rs.getShort(3));
Assert.assertEquals("4294967294", rs.getString(4));
Assert.assertEquals("18446744073709551614", rs.getString(5));
rs.getInt(4);
}
}
}
@ -91,13 +98,15 @@ public class UnsignedNumberRestfulTest {
stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(" + now + ", 127, 65534,4294967294, 18446744073709551614)");
ResultSet rs = stmt.executeQuery("select * from us_table where ts = " + now);
ResultSetMetaData meta = rs.getMetaData();
assertResultSetMetaData(meta);
while (rs.next()) {
System.out.print(meta.getColumnLabel(1) + ": " + rs.getTimestamp(1) + "\t");
System.out.print(meta.getColumnLabel(2) + ": " + rs.getByte(2) + "\t");
System.out.print(meta.getColumnLabel(3) + ": " + rs.getShort(3) + "\t");
System.out.print(meta.getColumnLabel(4) + ": " + rs.getInt(4) + "\t");
System.out.print(meta.getColumnLabel(5) + ": " + rs.getLong(5) + "\t");
System.out.println();
Assert.assertEquals(now, rs.getTimestamp(1).getTime());
Assert.assertEquals(127, rs.getByte(2));
Assert.assertEquals("65534", rs.getString(3));
Assert.assertEquals("4294967294", rs.getString(4));
Assert.assertEquals("18446744073709551614", rs.getString(5));
rs.getShort(3);
}
}
}
@ -109,57 +118,47 @@ public class UnsignedNumberRestfulTest {
stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(" + now + ", 254, 65534,4294967294, 18446744073709551614)");
ResultSet rs = stmt.executeQuery("select * from us_table where ts = " + now);
ResultSetMetaData meta = rs.getMetaData();
while (rs.next()) {
System.out.print(meta.getColumnLabel(1) + ": " + rs.getTimestamp(1) + "\t");
System.out.print(meta.getColumnLabel(2) + ": " + rs.getByte(2) + "\t");
System.out.print(meta.getColumnLabel(3) + ": " + rs.getShort(3) + "\t");
System.out.print(meta.getColumnLabel(4) + ": " + rs.getInt(4) + "\t");
System.out.print(meta.getColumnLabel(5) + ": " + rs.getLong(5) + "\t");
System.out.println();
}
}
}
assertResultSetMetaData(meta);
@Test
public void testCase007() throws SQLException {
try (Statement stmt = conn.createStatement()) {
long now = System.currentTimeMillis();
stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(" + now + ", 254, 65534,4294967294, 18446744073709551614)");
ResultSet rs = stmt.executeQuery("select * from us_table where ts = " + now);
ResultSetMetaData meta = rs.getMetaData();
while (rs.next()) {
for (int i = 1; i <= meta.getColumnCount(); i++) {
System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t");
}
System.out.println();
Assert.assertEquals(now, rs.getTimestamp(1).getTime());
Assert.assertEquals("254", rs.getString(2));
Assert.assertEquals("65534", rs.getString(3));
Assert.assertEquals("4294967294", rs.getString(4));
Assert.assertEquals("18446744073709551614", rs.getString(5));
rs.getByte(2);
}
}
}
private void assertResultSetMetaData(ResultSetMetaData meta) throws SQLException {
Assert.assertEquals(5, meta.getColumnCount());
Assert.assertEquals("ts", meta.getColumnLabel(1));
Assert.assertEquals("f1", meta.getColumnLabel(2));
Assert.assertEquals("f2", meta.getColumnLabel(3));
Assert.assertEquals("f3", meta.getColumnLabel(4));
Assert.assertEquals("f4", meta.getColumnLabel(5));
}
@BeforeClass
public static void beforeClass() {
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
ts = System.currentTimeMillis();
try {
Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata";
conn = DriverManager.getConnection(url, properties);
Statement stmt = conn.createStatement();
stmt.execute("drop database if exists unsign_restful");
stmt.execute("create database if not exists unsign_restful");
stmt.execute("use unsign_restful");
stmt.execute("create table us_table(ts timestamp, f1 tinyint unsigned, f2 smallint unsigned, f3 int unsigned, f4 bigint unsigned)");
stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(now, 127, 32767,2147483647, 9223372036854775807)");
stmt.executeUpdate("insert into us_table(ts,f1,f2,f3,f4) values(" + ts + ", 127, 32767,2147483647, 9223372036854775807)");
stmt.close();
} catch (ClassNotFoundException | SQLException e) {
} catch (SQLException e) {
e.printStackTrace();
}
}

View File

@ -10,7 +10,6 @@ import java.sql.*;
public class RestfulPreparedStatementTest {
private static final String host = "127.0.0.1";
// private static final String host = "master";
private static Connection conn;
private static final String sql_insert = "insert into t1 values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
private static PreparedStatement pstmt_insert;
@ -371,7 +370,6 @@ public class RestfulPreparedStatementTest {
pstmt_insert.setSQLXML(1, null);
}
@BeforeClass
public static void beforeClass() {
try {

View File

@ -18,7 +18,6 @@ import java.text.SimpleDateFormat;
public class RestfulResultSetTest {
private static final String host = "127.0.0.1";
private static Connection conn;
private static Statement stmt;
private static ResultSet rs;
@ -95,7 +94,8 @@ public class RestfulResultSetTest {
@Test
public void getBigDecimal() throws SQLException {
BigDecimal f1 = rs.getBigDecimal("f1");
Assert.assertEquals(1609430400000l, f1.longValue());
long actual = (f1 == null) ? 0 : f1.longValue();
Assert.assertEquals(1609430400000l, actual);
BigDecimal f2 = rs.getBigDecimal("f2");
Assert.assertEquals(1, f2.intValue());
@ -119,7 +119,7 @@ public class RestfulResultSetTest {
@Test
public void getBytes() throws SQLException {
byte[] f1 = rs.getBytes("f1");
Assert.assertEquals("2021-01-01 00:00:00.0", new String(f1));
Assert.assertEquals("2021-01-01 00:00:00.000", new String(f1));
byte[] f2 = rs.getBytes("f2");
Assert.assertEquals(1, Ints.fromByteArray(f2));

View File

@ -18,8 +18,8 @@
#define CHK_TEST(statement) \
do { \
D("testing: %s", #statement); \
int r = (statement); \
if (r) { \
int _r = (statement); \
if (_r) { \
D("testing failed: %s", #statement); \
return 1; \
} \
@ -181,7 +181,7 @@ static int do_statement(SQLHSTMT stmt, const char *statement) {
r = traverse_cols(stmt, cols);
char buf[4096];
while (1) {
SQLRETURN r = SQLFetch(stmt);
r = SQLFetch(stmt);
if (r==SQL_NO_DATA) break;
CHK_RESULT(r, SQL_HANDLE_STMT, stmt, "");
for (size_t i=0; i<cols; ++i) {

View File

@ -1762,8 +1762,8 @@ static SQLRETURN tsdb_conn_prepare(stmt_t *stmt) {
tsdb_stmt->tsdb_params = tsdb_params;
for (int i=0; i<nums; ++i) {
SQLRETURN r = do_fill_param(stmt, i);
if (r) return r;
SQLRETURN _r = do_fill_param(stmt, i);
if (_r) return _r;
}
}

View File

@ -1 +0,0 @@
../

View File

@ -1 +0,0 @@
../

View File

@ -1 +0,0 @@
../

View File

@ -1 +0,0 @@
../

View File

@ -1 +0,0 @@
../

View File

@ -148,15 +148,15 @@ static void *shellCheckThreadFp(void *arg) {
return NULL;
}
static void shellRunCheckThreads(TAOS *con, SShellArguments *args) {
static void shellRunCheckThreads(TAOS *con, SShellArguments *_args) {
pthread_attr_t thattr;
ShellThreadObj *threadObj = (ShellThreadObj *)calloc(args->threadNum, sizeof(ShellThreadObj));
for (int t = 0; t < args->threadNum; ++t) {
ShellThreadObj *threadObj = (ShellThreadObj *)calloc(_args->threadNum, sizeof(ShellThreadObj));
for (int t = 0; t < _args->threadNum; ++t) {
ShellThreadObj *pThread = threadObj + t;
pThread->threadIndex = t;
pThread->totalThreads = args->threadNum;
pThread->totalThreads = _args->threadNum;
pThread->taos = con;
pThread->db = args->database;
pThread->db = _args->database;
pthread_attr_init(&thattr);
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
@ -167,31 +167,31 @@ static void shellRunCheckThreads(TAOS *con, SShellArguments *args) {
}
}
for (int t = 0; t < args->threadNum; ++t) {
for (int t = 0; t < _args->threadNum; ++t) {
pthread_join(threadObj[t].threadID, NULL);
}
for (int t = 0; t < args->threadNum; ++t) {
for (int t = 0; t < _args->threadNum; ++t) {
taos_close(threadObj[t].taos);
}
free(threadObj);
}
void shellCheck(TAOS *con, SShellArguments *args) {
void shellCheck(TAOS *con, SShellArguments *_args) {
int64_t start = taosGetTimestampMs();
if (shellUseDb(con, args->database) != 0) {
if (shellUseDb(con, _args->database) != 0) {
shellFreeTbnames();
return;
}
if (shellShowTables(con, args->database) != 0) {
if (shellShowTables(con, _args->database) != 0) {
shellFreeTbnames();
return;
}
fprintf(stdout, "total %d tables will be checked by %d threads\n", tbNum, args->threadNum);
shellRunCheckThreads(con, args);
fprintf(stdout, "total %d tables will be checked by %d threads\n", tbNum, _args->threadNum);
shellRunCheckThreads(con, _args);
int64_t end = taosGetTimestampMs();
fprintf(stdout, "total %d tables checked, failed:%d, time spent %.2f seconds\n", checkedNum, errorNum,

View File

@ -56,24 +56,24 @@ extern TAOS *taos_connect_auth(const char *ip, const char *user, const char *aut
/*
* FUNCTION: Initialize the shell.
*/
TAOS *shellInit(SShellArguments *args) {
TAOS *shellInit(SShellArguments *_args) {
printf("\n");
printf(CLIENT_VERSION, tsOsName, taos_get_client_info());
fflush(stdout);
// set options before initializing
if (args->timezone != NULL) {
taos_options(TSDB_OPTION_TIMEZONE, args->timezone);
if (_args->timezone != NULL) {
taos_options(TSDB_OPTION_TIMEZONE, _args->timezone);
}
if (args->is_use_passwd) {
if (args->password == NULL) args->password = getpass("Enter password: ");
if (_args->is_use_passwd) {
if (_args->password == NULL) _args->password = getpass("Enter password: ");
} else {
args->password = TSDB_DEFAULT_PASS;
_args->password = TSDB_DEFAULT_PASS;
}
if (args->user == NULL) {
args->user = TSDB_DEFAULT_USER;
if (_args->user == NULL) {
_args->user = TSDB_DEFAULT_USER;
}
if (taos_init()) {
@ -84,10 +84,10 @@ TAOS *shellInit(SShellArguments *args) {
// Connect to the database.
TAOS *con = NULL;
if (args->auth == NULL) {
con = taos_connect(args->host, args->user, args->password, args->database, args->port);
if (_args->auth == NULL) {
con = taos_connect(_args->host, _args->user, _args->password, _args->database, _args->port);
} else {
con = taos_connect_auth(args->host, args->user, args->auth, args->database, args->port);
con = taos_connect_auth(_args->host, _args->user, _args->auth, _args->database, _args->port);
}
if (con == NULL) {
@ -100,14 +100,14 @@ TAOS *shellInit(SShellArguments *args) {
read_history();
// Check if it is temperory run
if (args->commands != NULL || args->file[0] != 0) {
if (args->commands != NULL) {
printf("%s%s\n", PROMPT_HEADER, args->commands);
shellRunCommand(con, args->commands);
if (_args->commands != NULL || _args->file[0] != 0) {
if (_args->commands != NULL) {
printf("%s%s\n", PROMPT_HEADER, _args->commands);
shellRunCommand(con, _args->commands);
}
if (args->file[0] != 0) {
source_file(con, args->file);
if (_args->file[0] != 0) {
source_file(con, _args->file);
}
taos_close(con);
@ -116,14 +116,14 @@ TAOS *shellInit(SShellArguments *args) {
}
#ifndef WINDOWS
if (args->dir[0] != 0) {
source_dir(con, args);
if (_args->dir[0] != 0) {
source_dir(con, _args);
taos_close(con);
exit(EXIT_SUCCESS);
}
if (args->check != 0) {
shellCheck(con, args);
if (_args->check != 0) {
shellCheck(con, _args);
taos_close(con);
exit(EXIT_SUCCESS);
}

View File

@ -233,15 +233,15 @@ void* shellImportThreadFp(void *arg)
return NULL;
}
static void shellRunImportThreads(SShellArguments* args)
static void shellRunImportThreads(SShellArguments* _args)
{
pthread_attr_t thattr;
ShellThreadObj *threadObj = (ShellThreadObj *)calloc(args->threadNum, sizeof(ShellThreadObj));
for (int t = 0; t < args->threadNum; ++t) {
ShellThreadObj *threadObj = (ShellThreadObj *)calloc(_args->threadNum, sizeof(ShellThreadObj));
for (int t = 0; t < _args->threadNum; ++t) {
ShellThreadObj *pThread = threadObj + t;
pThread->threadIndex = t;
pThread->totalThreads = args->threadNum;
pThread->taos = taos_connect(args->host, args->user, args->password, args->database, tsDnodeShellPort);
pThread->totalThreads = _args->threadNum;
pThread->taos = taos_connect(_args->host, _args->user, _args->password, _args->database, tsDnodeShellPort);
if (pThread->taos == NULL) {
fprintf(stderr, "ERROR: thread:%d failed connect to TDengine, error:%s\n", pThread->threadIndex, "null taos"/*taos_errstr(pThread->taos)*/);
exit(0);
@ -256,18 +256,18 @@ static void shellRunImportThreads(SShellArguments* args)
}
}
for (int t = 0; t < args->threadNum; ++t) {
for (int t = 0; t < _args->threadNum; ++t) {
pthread_join(threadObj[t].threadID, NULL);
}
for (int t = 0; t < args->threadNum; ++t) {
for (int t = 0; t < _args->threadNum; ++t) {
taos_close(threadObj[t].taos);
}
free(threadObj);
}
void source_dir(TAOS* con, SShellArguments* args) {
shellGetDirectoryFileList(args->dir);
void source_dir(TAOS* con, SShellArguments* _args) {
shellGetDirectoryFileList(_args->dir);
int64_t start = taosGetTimestampMs();
if (shellTablesSQLFile[0] != 0) {
@ -276,7 +276,7 @@ void source_dir(TAOS* con, SShellArguments* args) {
fprintf(stdout, "import %s finished, time spent %.2f seconds\n", shellTablesSQLFile, (end - start) / 1000.0);
}
shellRunImportThreads(args);
shellRunImportThreads(_args);
int64_t end = taosGetTimestampMs();
fprintf(stdout, "import %s finished, time spent %.2f seconds\n", args->dir, (end - start) / 1000.0);
fprintf(stdout, "import %s finished, time spent %.2f seconds\n", _args->dir, (end - start) / 1000.0);
}

View File

@ -415,7 +415,7 @@ void set_terminal_mode() {
}
}
void get_history_path(char *history) { snprintf(history, TSDB_FILENAME_LEN, "%s/%s", getenv("HOME"), HISTORY_FILE); }
void get_history_path(char *_history) { snprintf(_history, TSDB_FILENAME_LEN, "%s/%s", getenv("HOME"), HISTORY_FILE); }
void clearScreen(int ecmd_pos, int cursor_pos) {
struct winsize w;

View File

@ -10,7 +10,11 @@ IF (GIT_FOUND)
COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -n 1 ${CMAKE_CURRENT_LIST_DIR}/taosdemo.c
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE TAOSDEMO_COMMIT_SHA1)
STRING(SUBSTRING "${TAOSDEMO_COMMIT_SHA1}" 0 7 TAOSDEMO_COMMIT_SHA1)
IF ("${TAOSDEMO_COMMIT_SHA1}" STREQUAL "")
MESSAGE("taosdemo's latest commit in short is:" ${TAOSDEMO_COMMIT_SHA1})
ELSE ()
STRING(SUBSTRING "${TAOSDEMO_COMMIT_SHA1}" 0 7 TAOSDEMO_COMMIT_SHA1)
ENDIF ()
EXECUTE_PROCESS(
COMMAND ${GIT_EXECUTABLE} status -z -s ${CMAKE_CURRENT_LIST_DIR}/taosdemo.c
RESULT_VARIABLE RESULT

File diff suppressed because it is too large Load Diff

View File

@ -1017,7 +1017,7 @@ int taosDumpOut(struct arguments *arguments) {
sprintf(command, "use %s", dbInfos[0]->name);
result = taos_query(taos, command);
int32_t code = taos_errno(result);
code = taos_errno(result);
if (code != 0) {
fprintf(stderr, "invalid database %s\n", dbInfos[0]->name);
goto _exit_failure;

View File

@ -522,13 +522,13 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
pStatus->lastReboot = htonl(pStatus->lastReboot);
pStatus->numOfCores = htons(pStatus->numOfCores);
uint32_t version = htonl(pStatus->version);
if (version != tsVersion) {
uint32_t _version = htonl(pStatus->version);
if (_version != tsVersion) {
pDnode = mnodeGetDnodeByEp(pStatus->dnodeEp);
if (pDnode != NULL && pDnode->status != TAOS_DN_STATUS_READY) {
pDnode->offlineReason = TAOS_DN_OFF_VERSION_NOT_MATCH;
}
mError("dnode:%d, status msg version:%d not equal with cluster:%d", pStatus->dnodeId, version, tsVersion);
mError("dnode:%d, status msg version:%d not equal with cluster:%d", pStatus->dnodeId, _version, tsVersion);
return TSDB_CODE_MND_INVALID_MSG_VERSION;
}

View File

@ -123,8 +123,9 @@ SConnObj *mnodeAccquireConn(int32_t connId, char *user, uint32_t ip, uint16_t po
if (/* pConn->ip != ip || */ pConn->port != port /* || strcmp(pConn->user, user) != 0 */) {
mDebug("connId:%d, incoming conn user:%s ip:%s:%u, not match exist conn user:%s ip:%s:%u", connId, user,
taosIpStr(ip), port, pConn->user, taosIpStr(pConn->ip), pConn->port);
taosCacheRelease(tsMnodeConnCache, (void **)&pConn, false);
return NULL;
pConn->port = port;
//taosCacheRelease(tsMnodeConnCache, (void **)&pConn, false);
//return NULL;
}
// mDebug("connId:%d, is incoming, user:%s ip:%s:%u", connId, pConn->user, taosIpStr(pConn->ip), pConn->port);

View File

@ -253,10 +253,6 @@ static int32_t mnodeProcessHeartBeatMsg(SMnodeMsg *pMsg) {
int32_t connId = htonl(pHBMsg->connId);
SConnObj *pConn = mnodeAccquireConn(connId, connInfo.user, connInfo.clientIp, connInfo.clientPort);
if (pConn == NULL) {
pHBMsg->pid = htonl(pHBMsg->pid);
pConn = mnodeCreateConn(connInfo.user, connInfo.clientIp, connInfo.clientPort, pHBMsg->pid, pHBMsg->appName);
}
if (pConn == NULL) {
// do not close existing links, otherwise

View File

@ -132,10 +132,10 @@ int32_t ehttp_gzip_write(ehttp_gzip_t *gzip, const char *buf, int32_t len) {
if (ret != Z_STREAM_END) continue;
}
int32_t len = (int32_t)(gzip->gzip->next_out - (z_const Bytef *)gzip->chunk);
int32_t _len = (int32_t)(gzip->gzip->next_out - (z_const Bytef *)gzip->chunk);
gzip->gzip->next_out[0] = '\0';
gzip->callbacks.on_data(gzip, gzip->arg, gzip->chunk, len);
gzip->callbacks.on_data(gzip, gzip->arg, gzip->chunk, _len);
gzip->gzip->next_out = (z_const Bytef *)gzip->chunk;
gzip->gzip->avail_out = gzip->conf.chunk_size;
}

View File

@ -163,9 +163,9 @@ static int32_t httpOnRequestLine(HttpParser *pParser, char *method, char *target
// parse decode method
for (int32_t i = 0; i < tsHttpServer.methodScannerLen; i++) {
HttpDecodeMethod *method = tsHttpServer.methodScanner[i];
if (strcmp(method->module, pParser->path[0].str) == 0) {
pContext->decodeMethod = method;
HttpDecodeMethod *_method = tsHttpServer.methodScanner[i];
if (strcmp(_method->module, pParser->path[0].str) == 0) {
pContext->decodeMethod = _method;
break;
}
}

View File

@ -209,7 +209,7 @@ void tgParseSchemaMetric(cJSON *metric) {
parsedOk = false;
goto ParseEnd;
}
int32_t nameLen = (int32_t)strlen(field->valuestring);
nameLen = (int32_t)strlen(field->valuestring);
if (nameLen == 0 || nameLen >= TSDB_TABLE_NAME_LEN) {
parsedOk = false;
goto ParseEnd;

View File

@ -116,6 +116,8 @@ int64_t genQueryId(void) {
uid |= sid;
qDebug("gen qid:0x%"PRIx64, uid);
return uid;
}

View File

@ -593,14 +593,14 @@ void tSetDbName(SStrToken *pCpxName, SStrToken *pDb) {
void tSetColumnInfo(TAOS_FIELD *pField, SStrToken *pName, TAOS_FIELD *pType) {
int32_t maxLen = sizeof(pField->name) / sizeof(pField->name[0]);
// truncate the column name
if ((int32_t)pName->n >= maxLen) {
pName->n = maxLen - 1;
// column name is too long, set the it to be invalid.
if ((int32_t) pName->n >= maxLen) {
pName->n = -1;
} else {
strncpy(pField->name, pName->z, pName->n);
pField->name[pName->n] = 0;
}
strncpy(pField->name, pName->z, pName->n);
pField->name[pName->n] = 0;
pField->type = pType->type;
if(!isValidDataType(pField->type)){
pField->bytes = 0;

View File

@ -13,3 +13,10 @@ IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR)
ADD_EXECUTABLE(queryTest ${SOURCE_LIST})
TARGET_LINK_LIBRARIES(queryTest taos query gtest pthread gcov)
ENDIF()
SET_SOURCE_FILES_PROPERTIES(./astTest.cpp PROPERTIES COMPILE_FLAGS -w)
SET_SOURCE_FILES_PROPERTIES(./histogramTest.cpp PROPERTIES COMPILE_FLAGS -w)
SET_SOURCE_FILES_PROPERTIES(./percentileTest.cpp PROPERTIES COMPILE_FLAGS -w)
SET_SOURCE_FILES_PROPERTIES(./resultBufferTest.cpp PROPERTIES COMPILE_FLAGS -w)
SET_SOURCE_FILES_PROPERTIES(./tsBufTest.cpp PROPERTIES COMPILE_FLAGS -w)
SET_SOURCE_FILES_PROPERTIES(./unitTest.cpp PROPERTIES COMPILE_FLAGS -w)

View File

@ -1471,7 +1471,7 @@ static int32_t rpcCompressRpcMsg(char* pCont, int32_t contLen) {
* only the compressed size is less than the value of contLen - overhead, the compression is applied
* The first four bytes is set to 0, the second four bytes are utilized to keep the original length of message
*/
if (compLen < contLen - overhead) {
if (compLen > 0 && compLen < contLen - overhead) {
SRpcComp *pComp = (SRpcComp *)pCont;
pComp->reserved = 0;
pComp->contLen = htonl(contLen);

View File

@ -576,7 +576,7 @@ static void *taosProcessTcpData(void *param) {
}
while (pThreadObj->pHead) {
SFdObj *pFdObj = pThreadObj->pHead;
pFdObj = pThreadObj->pHead;
pThreadObj->pHead = pFdObj->next;
taosReportBrokenLink(pFdObj);
}

View File

@ -389,17 +389,17 @@ int32_t syncForwardToPeer(int64_t rid, void *data, void *mhandle, int32_t qtype,
return code;
}
void syncConfirmForward(int64_t rid, uint64_t version, int32_t code, bool force) {
void syncConfirmForward(int64_t rid, uint64_t _version, int32_t code, bool force) {
SSyncNode *pNode = syncAcquireNode(rid);
if (pNode == NULL) return;
SSyncPeer *pPeer = pNode->pMaster;
if (pPeer && (pNode->quorum > 1 || force)) {
SFwdRsp rsp;
syncBuildSyncFwdRsp(&rsp, pNode->vgId, version, code);
syncBuildSyncFwdRsp(&rsp, pNode->vgId, _version, code);
if (taosWriteMsg(pPeer->peerFd, &rsp, sizeof(SFwdRsp)) == sizeof(SFwdRsp)) {
sTrace("%s, forward-rsp is sent, code:0x%x hver:%" PRIu64, pPeer->id, code, version);
sTrace("%s, forward-rsp is sent, code:0x%x hver:%" PRIu64, pPeer->id, code, _version);
} else {
sDebug("%s, failed to send forward-rsp, restart", pPeer->id);
syncRestartConnection(pPeer);
@ -1302,14 +1302,14 @@ static void syncProcessBrokenLink(int64_t rid) {
syncReleasePeer(pPeer);
}
static int32_t syncSaveFwdInfo(SSyncNode *pNode, uint64_t version, void *mhandle) {
static int32_t syncSaveFwdInfo(SSyncNode *pNode, uint64_t _version, void *mhandle) {
SSyncFwds *pSyncFwds = pNode->pSyncFwds;
int64_t time = taosGetTimestampMs();
if (pSyncFwds->fwds >= SYNC_MAX_FWDS) {
// pSyncFwds->first = (pSyncFwds->first + 1) % SYNC_MAX_FWDS;
// pSyncFwds->fwds--;
sError("vgId:%d, failed to save fwd info, hver:%" PRIu64 " fwds:%d", pNode->vgId, version, pSyncFwds->fwds);
sError("vgId:%d, failed to save fwd info, hver:%" PRIu64 " fwds:%d", pNode->vgId, _version, pSyncFwds->fwds);
return TSDB_CODE_SYN_TOO_MANY_FWDINFO;
}
@ -1319,12 +1319,12 @@ static int32_t syncSaveFwdInfo(SSyncNode *pNode, uint64_t version, void *mhandle
SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + pSyncFwds->last;
memset(pFwdInfo, 0, sizeof(SFwdInfo));
pFwdInfo->version = version;
pFwdInfo->version = _version;
pFwdInfo->mhandle = mhandle;
pFwdInfo->time = time;
pSyncFwds->fwds++;
sTrace("vgId:%d, fwd info is saved, hver:%" PRIu64 " fwds:%d ", pNode->vgId, version, pSyncFwds->fwds);
sTrace("vgId:%d, fwd info is saved, hver:%" PRIu64 " fwds:%d ", pNode->vgId, _version, pSyncFwds->fwds);
return 0;
}

View File

@ -61,13 +61,13 @@ void syncBuildSyncFwdMsg(SSyncHead *pHead, int32_t vgId, int32_t len) {
syncBuildHead(pHead);
}
void syncBuildSyncFwdRsp(SFwdRsp *pMsg, int32_t vgId, uint64_t version, int32_t code) {
void syncBuildSyncFwdRsp(SFwdRsp *pMsg, int32_t vgId, uint64_t _version, int32_t code) {
pMsg->head.type = TAOS_SMSG_SYNC_FWD_RSP;
pMsg->head.vgId = vgId;
pMsg->head.len = sizeof(SFwdRsp) - sizeof(SSyncHead);
syncBuildHead(&pMsg->head);
pMsg->version = version;
pMsg->version = _version;
pMsg->code = code;
}

View File

@ -28,6 +28,7 @@ typedef struct {
int bufBlockSize;
int tBufBlocks;
int nBufBlocks;
int nRecycleBlocks;
int64_t index;
SList* bufBlockList;
} STsdbBufPool;
@ -39,5 +40,7 @@ void tsdbFreeBufPool(STsdbBufPool* pBufPool);
int tsdbOpenBufPool(STsdbRepo* pRepo);
void tsdbCloseBufPool(STsdbRepo* pRepo);
SListNode* tsdbAllocBufBlockFromPool(STsdbRepo* pRepo);
int tsdbExpendPool(STsdbRepo* pRepo, int32_t oldTotalBlocks);
void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode);
#endif /* _TD_TSDB_BUFFER_H_ */

View File

@ -69,7 +69,7 @@ void tsdbFreeMeta(STsdbMeta* pMeta);
int tsdbOpenMeta(STsdbRepo* pRepo);
int tsdbCloseMeta(STsdbRepo* pRepo);
STable* tsdbGetTableByUid(STsdbMeta* pMeta, uint64_t uid);
STSchema* tsdbGetTableSchemaByVersion(STable* pTable, int16_t version);
STSchema* tsdbGetTableSchemaByVersion(STable* pTable, int16_t _version);
int tsdbWLockRepoMeta(STsdbRepo* pRepo);
int tsdbRLockRepoMeta(STsdbRepo* pRepo);
int tsdbUnlockRepoMeta(STsdbRepo* pRepo);
@ -89,16 +89,16 @@ static FORCE_INLINE int tsdbCompareSchemaVersion(const void *key1, const void *k
}
}
static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock, bool copy, int16_t version) {
static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock, bool copy, int16_t _version) {
STable* pDTable = (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable;
STSchema* pSchema = NULL;
STSchema* pTSchema = NULL;
if (lock) TSDB_RLOCK_TABLE(pDTable);
if (version < 0) { // get the latest version of schema
if (_version < 0) { // get the latest version of schema
pTSchema = pDTable->schema[pDTable->numOfSchemas - 1];
} else { // get the schema with version
void* ptr = taosbsearch(&version, pDTable->schema, pDTable->numOfSchemas, sizeof(STSchema*),
void* ptr = taosbsearch(&_version, pDTable->schema, pDTable->numOfSchemas, sizeof(STSchema*),
tsdbCompareSchemaVersion, TD_EQ);
if (ptr == NULL) {
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;

View File

@ -71,6 +71,11 @@ struct STsdbRepo {
uint8_t state;
STsdbCfg config;
STsdbCfg save_config; // save apply config
bool config_changed; // config changed flag
pthread_mutex_t save_mutex; // protect save config
STsdbAppH appH;
STsdbStat stat;
STsdbMeta* tsdbMeta;

View File

@ -70,6 +70,7 @@ int tsdbOpenBufPool(STsdbRepo *pRepo) {
pPool->tBufBlocks = pCfg->totalBlocks;
pPool->nBufBlocks = 0;
pPool->index = 0;
pPool->nRecycleBlocks = 0;
for (int i = 0; i < pCfg->totalBlocks; i++) {
STsdbBufBlock *pBufBlock = tsdbNewBufBlock(pPool->bufBlockSize);
@ -157,3 +158,45 @@ _err:
}
static void tsdbFreeBufBlock(STsdbBufBlock *pBufBlock) { tfree(pBufBlock); }
int tsdbExpendPool(STsdbRepo* pRepo, int32_t oldTotalBlocks) {
if (oldTotalBlocks == pRepo->config.totalBlocks) {
return TSDB_CODE_SUCCESS;
}
int err = TSDB_CODE_SUCCESS;
if (tsdbLockRepo(pRepo) < 0) return terrno;
STsdbBufPool* pPool = pRepo->pPool;
if (pRepo->config.totalBlocks > oldTotalBlocks) {
for (int i = 0; i < pRepo->config.totalBlocks - oldTotalBlocks; i++) {
STsdbBufBlock *pBufBlock = tsdbNewBufBlock(pPool->bufBlockSize);
if (pBufBlock == NULL) goto err;
if (tdListAppend(pPool->bufBlockList, (void *)(&pBufBlock)) < 0) {
tsdbFreeBufBlock(pBufBlock);
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
err = TSDB_CODE_TDB_OUT_OF_MEMORY;
goto err;
}
pPool->nBufBlocks++;
}
pthread_cond_signal(&pPool->poolNotEmpty);
} else {
pPool->nRecycleBlocks = oldTotalBlocks - pRepo->config.totalBlocks;
}
err:
tsdbUnlockRepo(pRepo);
return err;
}
void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode) {
STsdbBufBlock *pBufBlock = NULL;
tdListNodeGetData(pPool->bufBlockList, pNode, (void *)(&pBufBlock));
tsdbFreeBufBlock(pBufBlock);
free(pNode);
pPool->nBufBlocks--;
}

View File

@ -958,11 +958,11 @@ static int tsdbWriteBlockInfo(SCommitH *pCommih) {
}
static int tsdbWriteBlockIdx(SCommitH *pCommih) {
SBlockIdx *pBlkIdx;
SBlockIdx *pBlkIdx = NULL;
SDFile * pHeadf = TSDB_COMMIT_HEAD_FILE(pCommih);
size_t nidx = taosArrayGetSize(pCommih->aBlkIdx);
int tlen = 0, size;
int64_t offset;
int tlen = 0, size = 0;
int64_t offset = 0;
if (nidx <= 0) {
// All data are deleted

View File

@ -112,6 +112,32 @@ int tsdbScheduleCommit(STsdbRepo *pRepo) {
return 0;
}
static void tsdbApplyRepoConfig(STsdbRepo *pRepo) {
pRepo->config_changed = false;
STsdbCfg * pSaveCfg = &pRepo->save_config;
int32_t oldTotalBlocks = pRepo->config.totalBlocks;
pRepo->config.compression = pRepo->save_config.compression;
pRepo->config.keep = pRepo->save_config.keep;
pRepo->config.keep1 = pRepo->save_config.keep1;
pRepo->config.keep2 = pRepo->save_config.keep2;
pRepo->config.cacheLastRow = pRepo->save_config.cacheLastRow;
pRepo->config.totalBlocks = pRepo->save_config.totalBlocks;
tsdbInfo("vgId:%d apply new config: compression(%d), keep(%d,%d,%d), totalBlocks(%d), cacheLastRow(%d),totalBlocks(%d)",
REPO_ID(pRepo),
pSaveCfg->compression, pSaveCfg->keep,pSaveCfg->keep1, pSaveCfg->keep2,
pSaveCfg->totalBlocks, pSaveCfg->cacheLastRow, pSaveCfg->totalBlocks);
int err = tsdbExpendPool(pRepo, oldTotalBlocks);
if (!TAOS_SUCCEEDED(err)) {
tsdbError("vgId:%d expand pool from %d to %d fail,reason:%s",
REPO_ID(pRepo), oldTotalBlocks, pSaveCfg->totalBlocks, tstrerror(err));
}
}
static void *tsdbLoopCommit(void *arg) {
SCommitQueue *pQueue = &tsCommitQueue;
SListNode * pNode = NULL;
@ -138,6 +164,13 @@ static void *tsdbLoopCommit(void *arg) {
pRepo = ((SCommitReq *)pNode->data)->pRepo;
// check if need to apply new config
if (pRepo->config_changed) {
pthread_mutex_lock(&pRepo->save_mutex);
tsdbApplyRepoConfig(pRepo);
pthread_mutex_unlock(&pRepo->save_mutex);
}
tsdbCommitData(pRepo);
listNodeFree(pNode);
}

View File

@ -957,10 +957,10 @@ static int tsdbRestoreMeta(STsdbRepo *pRepo) {
regfree(&regex);
return -1;
} else {
uint32_t version = 0;
uint32_t _version = 0;
if (strcmp(bname, "meta") != 0) {
sscanf(bname, "meta-ver%" PRIu32, &version);
pfs->cstatus->meta.version = version;
sscanf(bname, "meta-ver%" PRIu32, &_version);
pfs->cstatus->meta.version = _version;
}
pfs->cstatus->pmf = &(pfs->cstatus->mf);
@ -1103,10 +1103,10 @@ static int tsdbRestoreDFileSet(STsdbRepo *pRepo) {
int tvid, tfid;
TSDB_FILE_T ttype;
uint32_t tversion;
char bname[TSDB_FILENAME_LEN];
char _bname[TSDB_FILENAME_LEN];
tfsbasename(pf, bname);
tsdbParseDFilename(bname, &tvid, &tfid, &ttype, &tversion);
tfsbasename(pf, _bname);
tsdbParseDFilename(_bname, &tvid, &tfid, &ttype, &tversion);
ASSERT(tvid == REPO_ID(pRepo));

View File

@ -410,7 +410,7 @@ int tsdbUpdateDFileHeader(SDFile *pDFile) {
int tsdbLoadDFileHeader(SDFile *pDFile, SDFInfo *pInfo) {
char buf[TSDB_FILE_HEAD_SIZE] = "\0";
uint32_t version;
uint32_t _version;
ASSERT(TSDB_FILE_OPENED(pDFile));
@ -428,7 +428,7 @@ int tsdbLoadDFileHeader(SDFile *pDFile, SDFInfo *pInfo) {
}
void *pBuf = buf;
pBuf = taosDecodeFixedU32(pBuf, &version);
pBuf = taosDecodeFixedU32(pBuf, &_version);
pBuf = tsdbDecodeDFInfo(pBuf, pInfo);
return 0;
}
@ -660,12 +660,12 @@ int tsdbScanAndTryFixDFileSet(STsdbRepo *pRepo, SDFileSet *pSet) {
return 0;
}
int tsdbParseDFilename(const char *fname, int *vid, int *fid, TSDB_FILE_T *ftype, uint32_t *version) {
int tsdbParseDFilename(const char *fname, int *vid, int *fid, TSDB_FILE_T *ftype, uint32_t *_version) {
char *p = NULL;
*version = 0;
*_version = 0;
*ftype = TSDB_FILE_MAX;
sscanf(fname, "v%df%d.%m[a-z]-ver%" PRIu32, vid, fid, &p, version);
sscanf(fname, "v%df%d.%m[a-z]-ver%" PRIu32, vid, fid, &p, _version);
for (TSDB_FILE_T i = 0; i < TSDB_FILE_MAX; i++) {
if (strcmp(p, TSDB_FNAME_SUFFIX[i]) == 0) {
*ftype = i;

View File

@ -203,6 +203,70 @@ void tsdbReportStat(void *repo, int64_t *totalPoints, int64_t *totalStorage, int
int32_t tsdbConfigRepo(STsdbRepo *repo, STsdbCfg *pCfg) {
// TODO: think about multithread cases
if (tsdbCheckAndSetDefaultCfg(pCfg) < 0) return -1;
STsdbCfg * pRCfg = &repo->config;
ASSERT(pRCfg->tsdbId == pCfg->tsdbId);
ASSERT(pRCfg->cacheBlockSize == pCfg->cacheBlockSize);
ASSERT(pRCfg->daysPerFile == pCfg->daysPerFile);
ASSERT(pRCfg->minRowsPerFileBlock == pCfg->minRowsPerFileBlock);
ASSERT(pRCfg->maxRowsPerFileBlock == pCfg->maxRowsPerFileBlock);
ASSERT(pRCfg->precision == pCfg->precision);
bool configChanged = false;
if (pRCfg->compression != pCfg->compression) {
configChanged = true;
}
if (pRCfg->keep != pCfg->keep) {
configChanged = true;
}
if (pRCfg->keep1 != pCfg->keep1) {
configChanged = true;
}
if (pRCfg->keep2 != pCfg->keep2) {
configChanged = true;
}
if (pRCfg->cacheLastRow != pCfg->cacheLastRow) {
configChanged = true;
}
if (pRCfg->totalBlocks != pCfg->totalBlocks) {
configChanged = true;
}
if (!configChanged) {
tsdbError("vgId:%d no config changed", REPO_ID(repo));
}
int code = pthread_mutex_lock(&repo->save_mutex);
if (code != 0) {
tsdbError("vgId:%d failed to lock tsdb save config mutex since %s", REPO_ID(repo), strerror(errno));
terrno = TAOS_SYSTEM_ERROR(code);
return -1;
}
STsdbCfg * pSaveCfg = &repo->save_config;
*pSaveCfg = repo->config;
pSaveCfg->compression = pCfg->compression;
pSaveCfg->keep = pCfg->keep;
pSaveCfg->keep1 = pCfg->keep1;
pSaveCfg->keep2 = pCfg->keep2;
pSaveCfg->cacheLastRow = pCfg->cacheLastRow;
pSaveCfg->totalBlocks = pCfg->totalBlocks;
tsdbInfo("vgId:%d old config: compression(%d), keep(%d,%d,%d), cacheLastRow(%d),totalBlocks(%d)",
REPO_ID(repo),
pRCfg->compression, pRCfg->keep, pRCfg->keep1,pRCfg->keep2,
pRCfg->cacheLastRow, pRCfg->totalBlocks);
tsdbInfo("vgId:%d new config: compression(%d), keep(%d,%d,%d), cacheLastRow(%d),totalBlocks(%d)",
REPO_ID(repo),
pSaveCfg->compression, pSaveCfg->keep,pSaveCfg->keep1, pSaveCfg->keep2,
pSaveCfg->cacheLastRow,pSaveCfg->totalBlocks);
repo->config_changed = true;
pthread_mutex_unlock(&repo->save_mutex);
return 0;
#if 0
STsdbRepo *pRepo = (STsdbRepo *)repo;
@ -474,6 +538,14 @@ static STsdbRepo *tsdbNewRepo(STsdbCfg *pCfg, STsdbAppH *pAppH) {
return NULL;
}
code = pthread_mutex_init(&(pRepo->save_mutex), NULL);
if (code != 0) {
terrno = TAOS_SYSTEM_ERROR(code);
tsdbFreeRepo(pRepo);
return NULL;
}
pRepo->config_changed = false;
code = tsem_init(&(pRepo->readyToCommit), 0, 1);
if (code != 0) {
code = errno;

View File

@ -98,17 +98,26 @@ int tsdbUnRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) {
STsdbBufPool *pBufPool = pRepo->pPool;
SListNode *pNode = NULL;
bool recycleBlocks = pBufPool->nRecycleBlocks > 0;
if (tsdbLockRepo(pRepo) < 0) return -1;
while ((pNode = tdListPopHead(pMemTable->bufBlockList)) != NULL) {
tdListAppendNode(pBufPool->bufBlockList, pNode);
if (pBufPool->nRecycleBlocks > 0) {
tsdbRecycleBufferBlock(pBufPool, pNode);
pBufPool->nRecycleBlocks -= 1;
} else {
tdListAppendNode(pBufPool->bufBlockList, pNode);
}
}
int code = pthread_cond_signal(&pBufPool->poolNotEmpty);
if (code != 0) {
if (tsdbUnlockRepo(pRepo) < 0) return -1;
tsdbError("vgId:%d failed to signal pool not empty since %s", REPO_ID(pRepo), strerror(code));
terrno = TAOS_SYSTEM_ERROR(code);
return -1;
if (!recycleBlocks) {
int code = pthread_cond_signal(&pBufPool->poolNotEmpty);
if (code != 0) {
if (tsdbUnlockRepo(pRepo) < 0) return -1;
tsdbError("vgId:%d failed to signal pool not empty since %s", REPO_ID(pRepo), strerror(code));
terrno = TAOS_SYSTEM_ERROR(code);
return -1;
}
}
if (tsdbUnlockRepo(pRepo) < 0) return -1;
for (int i = 0; i < pMemTable->maxTables; i++) {
@ -958,6 +967,15 @@ static void tsdbFreeRows(STsdbRepo *pRepo, void **rows, int rowCounter) {
static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SDataRow row) {
STsdbCfg *pCfg = &pRepo->config;
// if cacheLastRow config has been reset, free the lastRow
if (!pCfg->cacheLastRow && pTable->lastRow != NULL) {
taosTZfree(pTable->lastRow);
TSDB_WLOCK_TABLE(pTable);
pTable->lastRow = NULL;
pTable->lastKey = TSKEY_INITIAL_VAL;
TSDB_WUNLOCK_TABLE(pTable);
}
if (tsdbGetTableLastKeyImpl(pTable) < dataRowKey(row)) {
if (pCfg->cacheLastRow || pTable->lastRow != NULL) {
SDataRow nrow = pTable->lastRow;

View File

@ -531,8 +531,8 @@ STable *tsdbGetTableByUid(STsdbMeta *pMeta, uint64_t uid) {
return *(STable **)ptr;
}
STSchema *tsdbGetTableSchemaByVersion(STable *pTable, int16_t version) {
return tsdbGetTableSchemaImpl(pTable, true, false, version);
STSchema *tsdbGetTableSchemaByVersion(STable *pTable, int16_t _version) {
return tsdbGetTableSchemaImpl(pTable, true, false, _version);
}
int tsdbWLockRepoMeta(STsdbRepo *pRepo) {
@ -891,9 +891,9 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFro
maxCols = 0;
maxRowBytes = 0;
for (int i = 0; i < pMeta->maxTables; i++) {
STable *pTable = pMeta->tables[i];
if (pTable != NULL) {
pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
STable *_pTable = pMeta->tables[i];
if (_pTable != NULL) {
pSchema = tsdbGetTableSchemaImpl(_pTable, false, false, -1);
maxCols = MAX(maxCols, schemaNCols(pSchema));
maxRowBytes = MAX(maxRowBytes, schemaTLen(pSchema));
}

View File

@ -155,26 +155,26 @@ void generate_key(unsigned char* key) {
}
}
void print_key_set(key_set key_set) {
void print_key_set(key_set _key_set) {
int i;
printf("K: \n");
for (i = 0; i < 8; i++) {
printf("%02X : ", key_set.k[i]);
print_char_as_binary(key_set.k[i]);
printf("%02X : ", _key_set.k[i]);
print_char_as_binary(_key_set.k[i]);
printf("\n");
}
printf("\nC: \n");
for (i = 0; i < 4; i++) {
printf("%02X : ", key_set.c[i]);
print_char_as_binary(key_set.c[i]);
printf("%02X : ", _key_set.c[i]);
print_char_as_binary(_key_set.c[i]);
printf("\n");
}
printf("\nD: \n");
for (i = 0; i < 4; i++) {
printf("%02X : ", key_set.d[i]);
print_char_as_binary(key_set.d[i]);
printf("%02X : ", _key_set.d[i]);
print_char_as_binary(_key_set.d[i]);
printf("\n");
}
printf("\n");

View File

@ -51,7 +51,6 @@ SSkipList *tSkipListCreate(uint8_t maxLevel, uint8_t keyType, uint16_t keyLen, _
pSkipList->flags = flags;
pSkipList->keyFn = fn;
pSkipList->seed = rand();
if (comparFn == NULL) {
pSkipList->comparFn = getKeyComparFunc(keyType);
} else {
@ -547,7 +546,11 @@ static FORCE_INLINE int32_t getSkipListNodeRandomHeight(SSkipList *pSkipList) {
const uint32_t factor = 4;
int32_t n = 1;
#if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32)
while ((rand() % factor) == 0 && n <= pSkipList->maxLevel) {
#else
while ((rand_r(&(pSkipList->seed)) % factor) == 0 && n <= pSkipList->maxLevel) {
#endif
n++;
}

View File

@ -170,29 +170,31 @@ static int32_t vnodeAlterImp(SVnodeObj *pVnode, SCreateVnodeMsg *pVnodeCfg) {
vDebug("vgId:%d, tsdbchanged:%d syncchanged:%d while alter vnode", pVnode->vgId, tsdbCfgChanged, syncCfgChanged);
if (/*tsdbCfgChanged || */syncCfgChanged) {
if (tsdbCfgChanged || syncCfgChanged) {
// vnode in non-ready state and still needs to return success instead of TSDB_CODE_VND_INVALID_STATUS
// dbCfgVersion can be corrected by status msg
if (!vnodeSetUpdatingStatus(pVnode)) {
vDebug("vgId:%d, vnode is not ready, do alter operation later", pVnode->vgId);
pVnode->dbCfgVersion = dbCfgVersion;
pVnode->vgCfgVersion = vgCfgVersion;
pVnode->syncCfg = syncCfg;
pVnode->tsdbCfg = tsdbCfg;
return TSDB_CODE_SUCCESS;
if (syncCfgChanged) {
if (!vnodeSetUpdatingStatus(pVnode)) {
vDebug("vgId:%d, vnode is not ready, do alter operation later", pVnode->vgId);
pVnode->dbCfgVersion = dbCfgVersion;
pVnode->vgCfgVersion = vgCfgVersion;
pVnode->syncCfg = syncCfg;
pVnode->tsdbCfg = tsdbCfg;
return TSDB_CODE_SUCCESS;
}
code = syncReconfig(pVnode->sync, &pVnode->syncCfg);
if (code != TSDB_CODE_SUCCESS) {
pVnode->dbCfgVersion = dbCfgVersion;
pVnode->vgCfgVersion = vgCfgVersion;
pVnode->syncCfg = syncCfg;
pVnode->tsdbCfg = tsdbCfg;
vnodeSetReadyStatus(pVnode);
return code;
}
}
code = syncReconfig(pVnode->sync, &pVnode->syncCfg);
if (code != TSDB_CODE_SUCCESS) {
pVnode->dbCfgVersion = dbCfgVersion;
pVnode->vgCfgVersion = vgCfgVersion;
pVnode->syncCfg = syncCfg;
pVnode->tsdbCfg = tsdbCfg;
vnodeSetReadyStatus(pVnode);
return code;
}
if (pVnode->tsdb) {
if (tsdbCfgChanged && pVnode->tsdb) {
code = tsdbConfigRepo(pVnode->tsdb, &pVnode->tsdbCfg);
if (code != TSDB_CODE_SUCCESS) {
pVnode->dbCfgVersion = dbCfgVersion;

View File

@ -199,7 +199,7 @@ int32_t walRestore(void *handle, void *pVnode, FWalWrite writeFp) {
snprintf(walName, sizeof(pWal->name), "%s/%s%" PRId64, pWal->path, WAL_PREFIX, fileId);
wInfo("vgId:%d, file:%s, will be restored", pWal->vgId, walName);
int32_t code = walRestoreWalFile(pWal, pVnode, writeFp, walName, fileId);
code = walRestoreWalFile(pWal, pVnode, writeFp, walName, fileId);
if (code != TSDB_CODE_SUCCESS) {
wError("vgId:%d, file:%s, failed to restore since %s", pWal->vgId, walName, tstrerror(code));
continue;
@ -426,8 +426,8 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch
#endif
offset = offset + sizeof(SWalHead) + pHead->len;
wTrace("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d", pWal->vgId,
fileId, pHead->version, pWal->version, pHead->len);
wTrace("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d offset:%" PRId64,
pWal->vgId, fileId, pHead->version, pWal->version, pHead->len, offset);
pWal->version = pHead->version;
(*writeFp)(pVnode, pHead, TAOS_QTYPE_WAL, NULL);

View File

@ -8,8 +8,8 @@
3. mkdir debug; cd debug; cmake ..; make ; sudo make install
4. pip install ../src/connector/python/linux/python2 ; pip3 install
../src/connector/python/linux/python3
4. pip install ../src/connector/python ; pip3 install
../src/connector/python
5. pip install numpy; pip3 install numpy (numpy is required only if you need to run querySort.py)

2
tests/Jenkinsfile vendored
View File

@ -21,7 +21,7 @@ def pre_test(){
cmake .. > /dev/null
make > /dev/null
make install > /dev/null
pip3 install ${WKC}/src/connector/python/linux/python3/
pip3 install ${WKC}/src/connector/python
'''
return 1
}

View File

@ -189,8 +189,8 @@ void writeDataImp(void *param) {
counter++;
if (counter >= arguments.rowsPerRequest) {
TAOS_RES *result = taos_query(taos, sql);
int32_t code = taos_errno(result);
result = taos_query(taos, sql);
code = taos_errno(result);
if (code != 0) {
printf("thread:%d error:%d reason:%s\n", pThread->threadId, code, taos_errstr(taos));
}
@ -207,8 +207,8 @@ void writeDataImp(void *param) {
}
if (counter > 0) {
TAOS_RES *result = taos_query(taos, sql);
int32_t code = taos_errno(result);
result = taos_query(taos, sql);
code = taos_errno(result);
if (code != 0) {
printf("thread:%d error:%d reason:%s\n", pThread->threadId, code, taos_errstr(taos));
}

15
tests/fuzz/sql-fuzzer.c Normal file
View File

@ -0,0 +1,15 @@
#include "qSqlparser.h"
int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size){
char *new_str = (char *)malloc(size+1);
if (new_str == NULL){
return 0;
}
memcpy(new_str, data, size);
new_str[size] = '\0';
qSqlParse(new_str);
free(new_str);
return 0;
}

View File

@ -48,7 +48,7 @@ fi
PYTHON_EXEC=python3.8
# First we need to set up a path for Python to find our own TAOS modules, so that "import" can work.
export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3:$(pwd)
export PYTHONPATH=$(pwd)/../../src/connector/python:$(pwd)
# Then let us set up the library path so that our compiled SO file can be loaded by Python
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIB_DIR

View File

@ -48,7 +48,7 @@ fi
PYTHON_EXEC=python3.8
# First we need to set up a path for Python to find our own TAOS modules, so that "import" can work.
export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3:$(pwd)
export PYTHONPATH=$(pwd)/../../src/connector/python:$(pwd)
# Then let us set up the library path so that our compiled SO file can be loaded by Python
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIB_DIR

View File

@ -0,0 +1,2 @@
# Helpful Ref: https://stackoverflow.com/questions/24100558/how-can-i-split-a-module-into-multiple-files-without-breaking-a-backwards-compa/24100645
from crash_gen.service_manager import ServiceManager, TdeInstance, TdeSubProcess

View File

@ -1,6 +1,6 @@
# -----!/usr/bin/python3.7
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# Copyright (c) 2016-2021 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
@ -15,7 +15,7 @@
# https://stackoverflow.com/questions/33533148/how-do-i-specify-that-the-return-type-of-a-method-is-the-same-as-the-class-itsel
from __future__ import annotations
from typing import Set
from typing import Any, Set, Tuple
from typing import Dict
from typing import List
from typing import Optional # Type hinting, ref: https://stackoverflow.com/questions/19202633/python-3-type-hinting-for-none
@ -24,29 +24,34 @@ import textwrap
import time
import datetime
import random
import logging
import threading
import copy
import argparse
import getopt
import sys
import os
import io
import signal
import traceback
import resource
import requests
# from guppy import hpy
import gc
from crash_gen.service_manager import ServiceManager, TdeInstance
from crash_gen.misc import Logging, Status, CrashGenError, Dice, Helper, Progress
from crash_gen.db import DbConn, MyTDSql, DbConnNative, DbManager
import crash_gen.settings
import taos
import requests
crash_gen.settings.init()
from .shared.types import TdColumns, TdTags
# from crash_gen import ServiceManager, TdeInstance, TdeSubProcess
# from crash_gen import ServiceManager, Config, DbConn, DbConnNative, Dice, DbManager, Status, Logging, Helper, \
# CrashGenError, Progress, MyTDSql, \
# TdeInstance
from .service_manager import ServiceManager, TdeInstance
from .shared.config import Config
from .shared.db import DbConn, DbManager, DbConnNative, MyTDSql
from .shared.misc import Dice, Logging, Helper, Status, CrashGenError, Progress
from .shared.types import TdDataType
# Config.init()
# Require Python 3
if sys.version_info[0] < 3:
@ -56,8 +61,8 @@ if sys.version_info[0] < 3:
# Command-line/Environment Configurations, will set a bit later
# ConfigNameSpace = argparse.Namespace
gConfig: argparse.Namespace
gSvcMgr: ServiceManager # TODO: refactor this hack, use dep injection
# gConfig: argparse.Namespace
gSvcMgr: Optional[ServiceManager] # TODO: refactor this hack, use dep injection
# logger: logging.Logger
gContainer: Container
@ -80,20 +85,20 @@ class WorkerThread:
self._stepGate = threading.Event()
# Let us have a DB connection of our own
if (gConfig.per_thread_db_connection): # type: ignore
if (Config.getConfig().per_thread_db_connection): # type: ignore
# print("connector_type = {}".format(gConfig.connector_type))
tInst = gContainer.defTdeInstance
if gConfig.connector_type == 'native':
if Config.getConfig().connector_type == 'native':
self._dbConn = DbConn.createNative(tInst.getDbTarget())
elif gConfig.connector_type == 'rest':
elif Config.getConfig().connector_type == 'rest':
self._dbConn = DbConn.createRest(tInst.getDbTarget())
elif gConfig.connector_type == 'mixed':
elif Config.getConfig().connector_type == 'mixed':
if Dice.throw(2) == 0: # 1/2 chance
self._dbConn = DbConn.createNative()
self._dbConn = DbConn.createNative(tInst.getDbTarget())
else:
self._dbConn = DbConn.createRest()
self._dbConn = DbConn.createRest(tInst.getDbTarget())
else:
raise RuntimeError("Unexpected connector type: {}".format(gConfig.connector_type))
raise RuntimeError("Unexpected connector type: {}".format(Config.getConfig().connector_type))
# self._dbInUse = False # if "use db" was executed already
@ -122,14 +127,14 @@ class WorkerThread:
# self.isSleeping = False
Logging.info("Starting to run thread: {}".format(self._tid))
if (gConfig.per_thread_db_connection): # type: ignore
if (Config.getConfig().per_thread_db_connection): # type: ignore
Logging.debug("Worker thread openning database connection")
self._dbConn.open()
self._doTaskLoop()
# clean up
if (gConfig.per_thread_db_connection): # type: ignore
if (Config.getConfig().per_thread_db_connection): # type: ignore
if self._dbConn.isOpen: #sometimes it is not open
self._dbConn.close()
else:
@ -157,7 +162,7 @@ class WorkerThread:
# Before we fetch the task and run it, let's ensure we properly "use" the database (not needed any more)
try:
if (gConfig.per_thread_db_connection): # most likely TRUE
if (Config.getConfig().per_thread_db_connection): # most likely TRUE
if not self._dbConn.isOpen: # might have been closed during server auto-restart
self._dbConn.open()
# self.useDb() # might encounter exceptions. TODO: catch
@ -231,7 +236,7 @@ class WorkerThread:
return self.getDbConn().getQueryResult()
def getDbConn(self) -> DbConn :
if (gConfig.per_thread_db_connection):
if (Config.getConfig().per_thread_db_connection):
return self._dbConn
else:
return self._tc.getDbManager().getDbConn()
@ -253,7 +258,7 @@ class ThreadCoordinator:
self._pool = pool
# self._wd = wd
self._te = None # prepare for every new step
self._dbManager = dbManager
self._dbManager = dbManager # type: Optional[DbManager] # may be freed
self._executedTasks: List[Task] = [] # in a given step
self._lock = threading.RLock() # sync access for a few things
@ -265,9 +270,13 @@ class ThreadCoordinator:
self._stepStartTime = None # Track how long it takes to execute each step
def getTaskExecutor(self):
if self._te is None:
raise CrashGenError("Unexpected empty TE")
return self._te
def getDbManager(self) -> DbManager:
if self._dbManager is None:
raise ChildProcessError("Unexpected empty _dbManager")
return self._dbManager
def crossStepBarrier(self, timeout=None):
@ -278,7 +287,7 @@ class ThreadCoordinator:
self._execStats.registerFailure("User Interruption")
def _runShouldEnd(self, transitionFailed, hasAbortedTask, workerTimeout):
maxSteps = gConfig.max_steps # type: ignore
maxSteps = Config.getConfig().max_steps # type: ignore
if self._curStep >= (maxSteps - 1): # maxStep==10, last curStep should be 9
return True
if self._runStatus != Status.STATUS_RUNNING:
@ -383,7 +392,7 @@ class ThreadCoordinator:
hasAbortedTask = False
workerTimeout = False
while not self._runShouldEnd(transitionFailed, hasAbortedTask, workerTimeout):
if not gConfig.debug: # print this only if we are not in debug mode
if not Config.getConfig().debug: # print this only if we are not in debug mode
Progress.emit(Progress.STEP_BOUNDARY)
# print(".", end="", flush=True)
# if (self._curStep % 2) == 0: # print memory usage once every 10 steps
@ -468,7 +477,7 @@ class ThreadCoordinator:
self._pool = None
self._te = None
self._dbManager = None
self._executedTasks = None
self._executedTasks = []
self._lock = None
self._stepBarrier = None
self._execStats = None
@ -507,18 +516,18 @@ class ThreadCoordinator:
''' Initialize multiple databases, invoked at __ini__() time '''
self._dbs = [] # type: List[Database]
dbc = self.getDbManager().getDbConn()
if gConfig.max_dbs == 0:
if Config.getConfig().max_dbs == 0:
self._dbs.append(Database(0, dbc))
else:
baseDbNumber = int(datetime.datetime.now().timestamp( # Don't use Dice/random, as they are deterministic
)*333) % 888 if gConfig.dynamic_db_table_names else 0
for i in range(gConfig.max_dbs):
)*333) % 888 if Config.getConfig().dynamic_db_table_names else 0
for i in range(Config.getConfig().max_dbs):
self._dbs.append(Database(baseDbNumber + i, dbc))
def pickDatabase(self):
idxDb = 0
if gConfig.max_dbs != 0 :
idxDb = Dice.throw(gConfig.max_dbs) # 0 to N-1
if Config.getConfig().max_dbs != 0 :
idxDb = Dice.throw(Config.getConfig().max_dbs) # 0 to N-1
db = self._dbs[idxDb] # type: Database
return db
@ -562,7 +571,7 @@ class ThreadPool:
workerThread._thread.join()
def cleanup(self):
self.threadList = None # maybe clean up each?
self.threadList = [] # maybe clean up each?
# A queue of continguous POSITIVE integers, used by DbManager to generate continuous numbers
# for new table names
@ -672,7 +681,7 @@ class AnyState:
# Each sub state tells us the "info", about itself, so we can determine
# on things like canDropDB()
def getInfo(self):
def getInfo(self) -> List[Any]:
raise RuntimeError("Must be overriden by child classes")
def equals(self, other):
@ -700,7 +709,7 @@ class AnyState:
def canDropDb(self):
# If user requests to run up to a number of DBs,
# we'd then not do drop_db operations any more
if gConfig.max_dbs > 0 or gConfig.use_shadow_db :
if Config.getConfig().max_dbs > 0 or Config.getConfig().use_shadow_db :
return False
return self._info[self.CAN_DROP_DB]
@ -708,7 +717,7 @@ class AnyState:
return self._info[self.CAN_CREATE_FIXED_SUPER_TABLE]
def canDropFixedSuperTable(self):
if gConfig.use_shadow_db: # duplicate writes to shaddow DB, in which case let's disable dropping s-table
if Config.getConfig().use_shadow_db: # duplicate writes to shaddow DB, in which case let's disable dropping s-table
return False
return self._info[self.CAN_DROP_FIXED_SUPER_TABLE]
@ -910,7 +919,7 @@ class StateMechine:
# May be slow, use cautionsly...
def getTaskTypes(self): # those that can run (directly/indirectly) from the current state
def typesToStrings(types):
def typesToStrings(types) -> List:
ss = []
for t in types:
ss.append(t.__name__)
@ -1029,13 +1038,14 @@ class StateMechine:
# ref:
# https://eli.thegreenplace.net/2010/01/22/weighted-random-generation-in-python/
def _weighted_choice_sub(self, weights):
def _weighted_choice_sub(self, weights) -> int:
# TODO: use our dice to ensure it being determinstic?
rnd = random.random() * sum(weights)
for i, w in enumerate(weights):
rnd -= w
if rnd < 0:
return i
raise CrashGenError("Unexpected no choice")
class Database:
''' We use this to represent an actual TDengine database inside a service instance,
@ -1047,8 +1057,8 @@ class Database:
'''
_clsLock = threading.Lock() # class wide lock
_lastInt = 101 # next one is initial integer
_lastTick = 0
_lastLaggingTick = 0 # lagging tick, for out-of-sequence (oos) data insertions
_lastTick = None # Optional[datetime]
_lastLaggingTick = None # Optional[datetime] # lagging tick, for out-of-sequence (oos) data insertions
def __init__(self, dbNum: int, dbc: DbConn): # TODO: remove dbc
self._dbNum = dbNum # we assign a number to databases, for our testing purpose
@ -1104,7 +1114,7 @@ class Database:
t3 = datetime.datetime(2012, 1, 1) # default "keep" is 10 years
t4 = datetime.datetime.fromtimestamp(
t3.timestamp() + elSec2) # see explanation above
Logging.info("Setting up TICKS to start from: {}".format(t4))
Logging.debug("Setting up TICKS to start from: {}".format(t4))
return t4
@classmethod
@ -1113,14 +1123,14 @@ class Database:
Fetch a timestamp tick, with some random factor, may not be unique.
'''
with cls._clsLock: # prevent duplicate tick
if cls._lastLaggingTick==0 or cls._lastTick==0 : # not initialized
if cls._lastLaggingTick is None or cls._lastTick is None : # not initialized
# 10k at 1/20 chance, should be enough to avoid overlaps
tick = cls.setupLastTick()
cls._lastTick = tick
cls._lastLaggingTick = tick + datetime.timedelta(0, -60*2) # lagging behind 2 minutes, should catch up fast
# if : # should be quite a bit into the future
if gConfig.mix_oos_data and Dice.throw(20) == 0: # if asked to do so, and 1 in 20 chance, return lagging tick
if Config.isSet('mix_oos_data') and Dice.throw(20) == 0: # if asked to do so, and 1 in 20 chance, return lagging tick
cls._lastLaggingTick += datetime.timedelta(0, 1) # pick the next sequence from the lagging tick sequence
return cls._lastLaggingTick
else: # regular
@ -1302,10 +1312,10 @@ class Task():
]:
return True # These are the ALWAYS-ACCEPTABLE ones
# This case handled below already.
# elif (errno in [ 0x0B ]) and gConfig.auto_start_service:
# elif (errno in [ 0x0B ]) and Settings.getConfig().auto_start_service:
# return True # We may get "network unavilable" when restarting service
elif gConfig.ignore_errors: # something is specified on command line
moreErrnos = [int(v, 0) for v in gConfig.ignore_errors.split(',')]
elif Config.getConfig().ignore_errors: # something is specified on command line
moreErrnos = [int(v, 0) for v in Config.getConfig().ignore_errors.split(',')]
if errno in moreErrnos:
return True
elif errno == 0x200 : # invalid SQL, we need to div in a bit more
@ -1341,7 +1351,7 @@ class Task():
self._executeInternal(te, wt) # TODO: no return value?
except taos.error.ProgrammingError as err:
errno2 = Helper.convertErrno(err.errno)
if (gConfig.continue_on_exception): # user choose to continue
if (Config.getConfig().continue_on_exception): # user choose to continue
self.logDebug("[=] Continue after TAOS exception: errno=0x{:X}, msg: {}, SQL: {}".format(
errno2, err, wt.getDbConn().getLastSql()))
self._err = err
@ -1356,7 +1366,7 @@ class Task():
self.__class__.__name__,
errno2, err, wt.getDbConn().getLastSql())
self.logDebug(errMsg)
if gConfig.debug:
if Config.getConfig().debug:
# raise # so that we see full stack
traceback.print_exc()
print(
@ -1370,13 +1380,13 @@ class Task():
self._err = e
self._aborted = True
traceback.print_exc()
except BaseException as e:
except BaseException as e2:
self.logInfo("Python base exception encountered")
self._err = e
# self._err = e2 # Exception/BaseException incompatible!
self._aborted = True
traceback.print_exc()
except BaseException: # TODO: what is this again??!!
raise RuntimeError("Punt")
# except BaseException: # TODO: what is this again??!!
# raise RuntimeError("Punt")
# self.logDebug(
# "[=] Unexpected exception, SQL: {}".format(
# wt.getDbConn().getLastSql()))
@ -1421,11 +1431,11 @@ class Task():
class ExecutionStats:
def __init__(self):
# total/success times for a task
self._execTimes: Dict[str, [int, int]] = {}
self._execTimes: Dict[str, List[int]] = {}
self._tasksInProgress = 0
self._lock = threading.Lock()
self._firstTaskStartTime = None
self._execStartTime = None
self._firstTaskStartTime = 0.0
self._execStartTime = 0.0
self._errors = {}
self._elapsedTime = 0.0 # total elapsed time
self._accRunTime = 0.0 # accumulated run time
@ -1470,7 +1480,7 @@ class ExecutionStats:
self._tasksInProgress -= 1
if self._tasksInProgress == 0: # all tasks have stopped
self._accRunTime += (time.time() - self._firstTaskStartTime)
self._firstTaskStartTime = None
self._firstTaskStartTime = 0.0
def registerFailure(self, reason):
self._failed = True
@ -1554,7 +1564,7 @@ class StateTransitionTask(Task):
def getRegTableName(cls, i):
if ( StateTransitionTask._baseTableNumber is None): # Set it one time
StateTransitionTask._baseTableNumber = Dice.throw(
999) if gConfig.dynamic_db_table_names else 0
999) if Config.getConfig().dynamic_db_table_names else 0
return "reg_table_{}".format(StateTransitionTask._baseTableNumber + i)
def execute(self, wt: WorkerThread):
@ -1574,14 +1584,14 @@ class TaskCreateDb(StateTransitionTask):
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
# was: self.execWtSql(wt, "create database db")
repStr = ""
if gConfig.num_replicas != 1:
# numReplica = Dice.throw(gConfig.max_replicas) + 1 # 1,2 ... N
numReplica = gConfig.num_replicas # fixed, always
if Config.getConfig().num_replicas != 1:
# numReplica = Dice.throw(Settings.getConfig().max_replicas) + 1 # 1,2 ... N
numReplica = Config.getConfig().num_replicas # fixed, always
repStr = "replica {}".format(numReplica)
updatePostfix = "update 1" if gConfig.verify_data else "" # allow update only when "verify data" is active
updatePostfix = "update 1" if Config.getConfig().verify_data else "" # allow update only when "verify data" is active
dbName = self._db.getName()
self.execWtSql(wt, "create database {} {} {} ".format(dbName, repStr, updatePostfix ) )
if dbName == "db_0" and gConfig.use_shadow_db:
if dbName == "db_0" and Config.getConfig().use_shadow_db:
self.execWtSql(wt, "create database {} {} {} ".format("db_s", repStr, updatePostfix ) )
class TaskDropDb(StateTransitionTask):
@ -1615,9 +1625,10 @@ class TaskCreateSuperTable(StateTransitionTask):
# wt.execSql("use db") # should always be in place
sTable.create(wt.getDbConn(),
{'ts':'TIMESTAMP', 'speed':'INT', 'color':'BINARY(16)'}, {'b':'BINARY(200)', 'f':'FLOAT'},
dropIfExists = True
)
{'ts': TdDataType.TIMESTAMP, 'speed': TdDataType.INT, 'color': TdDataType.BINARY16}, {
'b': TdDataType.BINARY200, 'f': TdDataType.FLOAT},
dropIfExists=True
)
# self.execWtSql(wt,"create table db.{} (ts timestamp, speed int) tags (b binary(200), f float) ".format(tblName))
# No need to create the regular tables, INSERT will do that
# automatically
@ -1645,9 +1656,7 @@ class TdSuperTable:
return dbc.existsSuperTable(self._stName)
# TODO: odd semantic, create() method is usually static?
def create(self, dbc, cols: dict, tags: dict,
dropIfExists = False
):
def create(self, dbc, cols: TdColumns, tags: TdTags, dropIfExists = False):
'''Creating a super table'''
dbName = self._dbName
@ -1662,13 +1671,13 @@ class TdSuperTable:
# Now let's create
sql = "CREATE TABLE {} ({})".format(
fullTableName,
",".join(['%s %s'%(k,v) for (k,v) in cols.items()]))
if tags is None :
sql += " TAGS (dummy int) "
else:
",".join(['%s %s'%(k,v.value) for (k,v) in cols.items()]))
if tags :
sql += " TAGS ({})".format(
",".join(['%s %s'%(k,v) for (k,v) in tags.items()])
",".join(['%s %s'%(k,v.value) for (k,v) in tags.items()])
)
else:
sql += " TAGS (dummy int) "
dbc.execute(sql)
def getRegTables(self, dbc: DbConn):
@ -1686,7 +1695,7 @@ class TdSuperTable:
def hasRegTables(self, dbc: DbConn):
return dbc.query("SELECT * FROM {}.{}".format(self._dbName, self._stName)) > 0
def ensureTable(self, task: Task, dbc: DbConn, regTableName: str):
def ensureRegTable(self, task: Optional[Task], dbc: DbConn, regTableName: str):
dbName = self._dbName
sql = "select tbname from {}.{} where tbname in ('{}')".format(dbName, self._stName, regTableName)
if dbc.query(sql) >= 1 : # reg table exists already
@ -1694,7 +1703,7 @@ class TdSuperTable:
# acquire a lock first, so as to be able to *verify*. More details in TD-1471
fullTableName = dbName + '.' + regTableName
if task is not None: # optional lock
if task is not None: # TODO: what happens if we don't lock the table
task.lockTable(fullTableName)
Progress.emit(Progress.CREATE_TABLE_ATTEMPT) # ATTEMPT to create a new table
# print("(" + fullTableName[-3:] + ")", end="", flush=True)
@ -1886,7 +1895,7 @@ class TaskDropSuperTable(StateTransitionTask):
if Dice.throw(2) == 0:
# print("_7_", end="", flush=True)
tblSeq = list(range(
2 + (self.LARGE_NUMBER_OF_TABLES if gConfig.larger_data else self.SMALL_NUMBER_OF_TABLES)))
2 + (self.LARGE_NUMBER_OF_TABLES if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_TABLES)))
random.shuffle(tblSeq)
tickOutput = False # if we have spitted out a "d" character for "drop regular table"
isSuccess = True
@ -1952,13 +1961,13 @@ class TaskRestartService(StateTransitionTask):
@classmethod
def canBeginFrom(cls, state: AnyState):
if gConfig.auto_start_service:
if Config.getConfig().auto_start_service:
return state.canDropFixedSuperTable() # Basicallly when we have the super table
return False # don't run this otherwise
CHANCE_TO_RESTART_SERVICE = 200
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
if not gConfig.auto_start_service: # only execute when we are in -a mode
if not Config.getConfig().auto_start_service: # only execute when we are in -a mode
print("_a", end="", flush=True)
return
@ -1980,12 +1989,12 @@ class TaskAddData(StateTransitionTask):
activeTable: Set[int] = set()
# We use these two files to record operations to DB, useful for power-off tests
fAddLogReady = None # type: TextIOWrapper
fAddLogDone = None # type: TextIOWrapper
fAddLogReady = None # type: Optional[io.TextIOWrapper]
fAddLogDone = None # type: Optional[io.TextIOWrapper]
@classmethod
def prepToRecordOps(cls):
if gConfig.record_ops:
if Config.getConfig().record_ops:
if (cls.fAddLogReady is None):
Logging.info(
"Recording in a file operations to be performed...")
@ -2003,7 +2012,7 @@ class TaskAddData(StateTransitionTask):
return state.canAddData()
def _addDataInBatch(self, db, dbc, regTableName, te: TaskExecutor):
numRecords = self.LARGE_NUMBER_OF_RECORDS if gConfig.larger_data else self.SMALL_NUMBER_OF_RECORDS
numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS
fullTableName = db.getName() + '.' + regTableName
sql = "INSERT INTO {} VALUES ".format(fullTableName)
@ -2015,21 +2024,23 @@ class TaskAddData(StateTransitionTask):
dbc.execute(sql)
def _addData(self, db: Database, dbc, regTableName, te: TaskExecutor): # implied: NOT in batches
numRecords = self.LARGE_NUMBER_OF_RECORDS if gConfig.larger_data else self.SMALL_NUMBER_OF_RECORDS
numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS
for j in range(numRecords): # number of records per table
nextInt = db.getNextInt()
nextTick = db.getNextTick()
nextColor = db.getNextColor()
if gConfig.record_ops:
if Config.getConfig().record_ops:
self.prepToRecordOps()
if self.fAddLogReady is None:
raise CrashGenError("Unexpected empty fAddLogReady")
self.fAddLogReady.write("Ready to write {} to {}\n".format(nextInt, regTableName))
self.fAddLogReady.flush()
os.fsync(self.fAddLogReady)
os.fsync(self.fAddLogReady.fileno())
# TODO: too ugly trying to lock the table reliably, refactor...
fullTableName = db.getName() + '.' + regTableName
if gConfig.verify_data:
if Config.getConfig().verify_data:
self.lockTable(fullTableName)
# print("_w" + str(nextInt % 100), end="", flush=True) # Trace what was written
@ -2042,7 +2053,7 @@ class TaskAddData(StateTransitionTask):
dbc.execute(sql)
# Quick hack, attach an update statement here. TODO: create an "update" task
if (not gConfig.use_shadow_db) and Dice.throw(5) == 0: # 1 in N chance, plus not using shaddow DB
if (not Config.getConfig().use_shadow_db) and Dice.throw(5) == 0: # 1 in N chance, plus not using shaddow DB
nextInt = db.getNextInt()
nextColor = db.getNextColor()
sql = "INSERt INTO {} VALUES ('{}', {}, '{}');".format( # "INSERt" means "update" here
@ -2053,12 +2064,12 @@ class TaskAddData(StateTransitionTask):
dbc.execute(sql)
except: # Any exception at all
if gConfig.verify_data:
if Config.getConfig().verify_data:
self.unlockTable(fullTableName)
raise
# Now read it back and verify, we might encounter an error if table is dropped
if gConfig.verify_data: # only if command line asks for it
if Config.getConfig().verify_data: # only if command line asks for it
try:
readBack = dbc.queryScalar("SELECT speed from {}.{} WHERE ts='{}'".
format(db.getName(), regTableName, nextTick))
@ -2085,17 +2096,19 @@ class TaskAddData(StateTransitionTask):
# Successfully wrote the data into the DB, let's record it somehow
te.recordDataMark(nextInt)
if gConfig.record_ops:
if Config.getConfig().record_ops:
if self.fAddLogDone is None:
raise CrashGenError("Unexpected empty fAddLogDone")
self.fAddLogDone.write("Wrote {} to {}\n".format(nextInt, regTableName))
self.fAddLogDone.flush()
os.fsync(self.fAddLogDone)
os.fsync(self.fAddLogDone.fileno())
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
# ds = self._dbManager # Quite DANGEROUS here, may result in multi-thread client access
db = self._db
dbc = wt.getDbConn()
numTables = self.LARGE_NUMBER_OF_TABLES if gConfig.larger_data else self.SMALL_NUMBER_OF_TABLES
numRecords = self.LARGE_NUMBER_OF_RECORDS if gConfig.larger_data else self.SMALL_NUMBER_OF_RECORDS
numTables = self.LARGE_NUMBER_OF_TABLES if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_TABLES
numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS
tblSeq = list(range(numTables ))
random.shuffle(tblSeq) # now we have random sequence
for i in tblSeq:
@ -2110,7 +2123,7 @@ class TaskAddData(StateTransitionTask):
regTableName = self.getRegTableName(i) # "db.reg_table_{}".format(i)
fullTableName = dbName + '.' + regTableName
# self._lockTable(fullTableName) # "create table" below. Stop it if the table is "locked"
sTable.ensureTable(self, wt.getDbConn(), regTableName) # Ensure the table exists
sTable.ensureRegTable(self, wt.getDbConn(), regTableName) # Ensure the table exists
# self._unlockTable(fullTableName)
if Dice.throw(1) == 0: # 1 in 2 chance
@ -2126,6 +2139,8 @@ class ThreadStacks: # stack info for all threads
self._allStacks = {}
allFrames = sys._current_frames()
for th in threading.enumerate():
if th.ident is None:
continue
stack = traceback.extract_stack(allFrames[th.ident])
self._allStacks[th.native_id] = stack
@ -2246,14 +2261,15 @@ class ClientManager:
def run(self, svcMgr):
# self._printLastNumbers()
global gConfig
# global gConfig
# Prepare Tde Instance
global gContainer
tInst = gContainer.defTdeInstance = TdeInstance() # "subdir to hold the instance"
dbManager = DbManager(gConfig.connector_type, tInst.getDbTarget()) # Regular function
thPool = ThreadPool(gConfig.num_threads, gConfig.max_steps)
cfg = Config.getConfig()
dbManager = DbManager(cfg.connector_type, tInst.getDbTarget()) # Regular function
thPool = ThreadPool(cfg.num_threads, cfg.max_steps)
self.tc = ThreadCoordinator(thPool, dbManager)
Logging.info("Starting client instance: {}".format(tInst))
@ -2266,7 +2282,8 @@ class ClientManager:
# Release global variables
gConfig = None
# gConfig = None
Config.clearConfig()
gSvcMgr = None
logger = None
@ -2297,7 +2314,7 @@ class ClientManager:
class MainExec:
def __init__(self):
self._clientMgr = None
self._svcMgr = None # type: ServiceManager
self._svcMgr = None # type: Optional[ServiceManager]
signal.signal(signal.SIGTERM, self.sigIntHandler)
signal.signal(signal.SIGINT, self.sigIntHandler)
@ -2317,7 +2334,7 @@ class MainExec:
def runClient(self):
global gSvcMgr
if gConfig.auto_start_service:
if Config.getConfig().auto_start_service:
gSvcMgr = self._svcMgr = ServiceManager(1) # hack alert
gSvcMgr.startTaosServices() # we start, don't run
@ -2326,26 +2343,18 @@ class MainExec:
try:
ret = self._clientMgr.run(self._svcMgr) # stop TAOS service inside
except requests.exceptions.ConnectionError as err:
Logging.warning("Failed to open REST connection to DB: {}".format(err.getMessage()))
Logging.warning("Failed to open REST connection to DB: {}".format(err))
# don't raise
return ret
def runService(self):
global gSvcMgr
gSvcMgr = self._svcMgr = ServiceManager(gConfig.num_dnodes) # save it in a global variable TODO: hack alert
gSvcMgr = self._svcMgr = ServiceManager(Config.getConfig().num_dnodes) # save it in a global variable TODO: hack alert
gSvcMgr.run() # run to some end state
gSvcMgr = self._svcMgr = None
def init(self): # TODO: refactor
global gContainer
gContainer = Container() # micky-mouse DI
global gSvcMgr # TODO: refactor away
gSvcMgr = None
# Super cool Python argument library:
# https://docs.python.org/3/library/argparse.html
def _buildCmdLineParser(self):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
@ -2466,20 +2475,29 @@ class MainExec:
action='store_true',
help='Continue execution after encountering unexpected/disallowed errors/exceptions (default: false)')
global gConfig
gConfig = parser.parse_args()
crash_gen.settings.gConfig = gConfig # TODO: fix this hack, consolidate this global var
return parser
def init(self): # TODO: refactor
global gContainer
gContainer = Container() # micky-mouse DI
global gSvcMgr # TODO: refactor away
gSvcMgr = None
parser = self._buildCmdLineParser()
Config.init(parser)
# Sanity check for arguments
if gConfig.use_shadow_db and gConfig.max_dbs>1 :
if Config.getConfig().use_shadow_db and Config.getConfig().max_dbs>1 :
raise CrashGenError("Cannot combine use-shadow-db with max-dbs of more than 1")
Logging.clsInit(gConfig)
Logging.clsInit(Config.getConfig().debug)
Dice.seed(0) # initial seeding of dice
def run(self):
if gConfig.run_tdengine: # run server
if Config.getConfig().run_tdengine: # run server
try:
self.runService()
return 0 # success

View File

@ -1,25 +1,33 @@
from __future__ import annotations
import os
import io
import sys
from enum import Enum
import threading
import signal
import logging
import time
import subprocess
from typing import IO, List
from subprocess import PIPE, Popen, TimeoutExpired
from typing import BinaryIO, Generator, IO, List, NewType, Optional
import typing
try:
import psutil
except:
print("Psutil module needed, please install: sudo pip3 install psutil")
sys.exit(-1)
from queue import Queue, Empty
from .misc import Logging, Status, CrashGenError, Dice, Helper, Progress
from .db import DbConn, DbTarget
import crash_gen.settings
from .shared.config import Config
from .shared.db import DbTarget, DbConn
from .shared.misc import Logging, Helper, CrashGenError, Status, Progress, Dice
from .shared.types import DirPath
# from crash_gen.misc import CrashGenError, Dice, Helper, Logging, Progress, Status
# from crash_gen.db import DbConn, DbTarget
# from crash_gen.settings import Config
# from crash_gen.types import DirPath
class TdeInstance():
"""
@ -68,7 +76,10 @@ class TdeInstance():
self._fepPort = fepPort
self._tInstNum = tInstNum
self._smThread = ServiceManagerThread()
# An "Tde Instance" will *contain* a "sub process" object, with will/may use a thread internally
# self._smThread = ServiceManagerThread()
self._subProcess = None # type: Optional[TdeSubProcess]
def getDbTarget(self):
return DbTarget(self.getCfgDir(), self.getHostAddr(), self._port)
@ -153,23 +164,24 @@ quorum 2
def getExecFile(self): # .../taosd
return self._buildDir + "/build/bin/taosd"
def getRunDir(self): # TODO: rename to "root dir" ?!
return self._buildDir + self._subdir
def getRunDir(self) -> DirPath : # TODO: rename to "root dir" ?!
return DirPath(self._buildDir + self._subdir)
def getCfgDir(self): # path, not file
return self.getRunDir() + "/cfg"
def getCfgDir(self) -> DirPath : # path, not file
return DirPath(self.getRunDir() + "/cfg")
def getLogDir(self):
return self.getRunDir() + "/log"
def getLogDir(self) -> DirPath :
return DirPath(self.getRunDir() + "/log")
def getHostAddr(self):
return "127.0.0.1"
def getServiceCmdLine(self): # to start the instance
cmdLine = []
if crash_gen.settings.gConfig.track_memory_leaks:
if Config.getConfig().track_memory_leaks:
Logging.info("Invoking VALGRIND on service...")
cmdLine = ['valgrind', '--leak-check=yes']
# TODO: move "exec -c" into Popen(), we can both "use shell" and NOT fork so ask to lose kill control
cmdLine += ["exec " + self.getExecFile(), '-c', self.getCfgDir()] # used in subproce.Popen()
return cmdLine
@ -196,27 +208,46 @@ quorum 2
dbc.close()
def getStatus(self):
return self._smThread.getStatus()
# return self._smThread.getStatus()
if self._subProcess is None:
return Status(Status.STATUS_EMPTY)
return self._subProcess.getStatus()
def getSmThread(self):
return self._smThread
# def getSmThread(self):
# return self._smThread
def start(self):
if not self.getStatus().isStopped():
if self.getStatus().isActive():
raise CrashGenError("Cannot start instance from status: {}".format(self.getStatus()))
Logging.info("Starting TDengine instance: {}".format(self))
self.generateCfgFile() # service side generates config file, client does not
self.rotateLogs()
self._smThread.start(self.getServiceCmdLine(), self.getLogDir()) # May raise exceptions
# self._smThread.start(self.getServiceCmdLine(), self.getLogDir()) # May raise exceptions
self._subProcess = TdeSubProcess(self.getServiceCmdLine(), self.getLogDir())
def stop(self):
self._smThread.stop()
self._subProcess.stop()
self._subProcess = None
def isFirst(self):
return self._tInstNum == 0
def printFirst10Lines(self):
if self._subProcess is None:
Logging.warning("Incorrect TI status for procIpcBatch-10 operation")
return
self._subProcess.procIpcBatch(trimToTarget=10, forceOutput=True)
def procIpcBatch(self):
if self._subProcess is None:
Logging.warning("Incorrect TI status for procIpcBatch operation")
return
self._subProcess.procIpcBatch() # may enounter EOF and change status to STOPPED
if self._subProcess.getStatus().isStopped():
self._subProcess.stop()
self._subProcess = None
class TdeSubProcess:
"""
@ -225,41 +256,56 @@ class TdeSubProcess:
It takes a TdeInstance object as its parameter, with the rationale being
"a sub process runs an instance".
We aim to ensure that this object has exactly the same life-cycle as the
underlying sub process.
"""
# RET_ALREADY_STOPPED = -1
# RET_TIME_OUT = -3
# RET_SUCCESS = -4
def __init__(self):
self.subProcess = None # type: subprocess.Popen
# if tInst is None:
# raise CrashGenError("Empty instance not allowed in TdeSubProcess")
# self._tInst = tInst # Default create at ServiceManagerThread
def __init__(self, cmdLine: List[str], logDir: DirPath):
# Create the process + managing thread immediately
Logging.info("Attempting to start TAOS sub process...")
self._popen = self._start(cmdLine) # the actual sub process
self._smThread = ServiceManagerThread(self, logDir) # A thread to manage the sub process, mostly to process the IO
Logging.info("Successfully started TAOS process: {}".format(self))
def __repr__(self):
if self.subProcess is None:
return '[TdeSubProc: Empty]'
return '[TdeSubProc: pid = {}]'.format(self.getPid())
# if self.subProcess is None:
# return '[TdeSubProc: Empty]'
return '[TdeSubProc: pid = {}, status = {}]'.format(
self.getPid(), self.getStatus() )
def getStdOut(self):
return self.subProcess.stdout
def getStdOut(self) -> BinaryIO :
if self._popen.universal_newlines : # alias of text_mode
raise CrashGenError("We need binary mode for STDOUT IPC")
# Logging.info("Type of stdout is: {}".format(type(self._popen.stdout)))
return typing.cast(BinaryIO, self._popen.stdout)
def getStdErr(self):
return self.subProcess.stderr
def getStdErr(self) -> BinaryIO :
if self._popen.universal_newlines : # alias of text_mode
raise CrashGenError("We need binary mode for STDERR IPC")
return typing.cast(BinaryIO, self._popen.stderr)
def isRunning(self):
return self.subProcess is not None
# Now it's always running, since we matched the life cycle
# def isRunning(self):
# return self.subProcess is not None
def getPid(self):
return self.subProcess.pid
return self._popen.pid
def start(self, cmdLine):
def _start(self, cmdLine) -> Popen :
ON_POSIX = 'posix' in sys.builtin_module_names
# Sanity check
if self.subProcess: # already there
raise RuntimeError("Corrupt process state")
# if self.subProcess: # already there
# raise RuntimeError("Corrupt process state")
# Prepare environment variables for coverage information
# Ref: https://stackoverflow.com/questions/2231227/python-subprocess-popen-with-a-modified-environment
@ -270,15 +316,12 @@ class TdeSubProcess:
# print("Starting TDengine with env: ", myEnv.items())
# print("Starting TDengine via Shell: {}".format(cmdLineStr))
useShell = True # Needed to pass environments into it
self.subProcess = subprocess.Popen(
# ' '.join(cmdLine) if useShell else cmdLine,
# shell=useShell,
' '.join(cmdLine),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
# bufsize=1, # not supported in binary mode
# useShell = True # Needed to pass environments into it
return Popen(
' '.join(cmdLine), # ' '.join(cmdLine) if useShell else cmdLine,
shell=True, # Always use shell, since we need to pass ENV vars
stdout=PIPE,
stderr=PIPE,
close_fds=ON_POSIX,
env=myEnv
) # had text=True, which interferred with reading EOF
@ -288,7 +331,9 @@ class TdeSubProcess:
def stop(self):
"""
Stop a sub process, DO NOT return anything, process all conditions INSIDE
Stop a sub process, DO NOT return anything, process all conditions INSIDE.
Calling function should immediately delete/unreference the object
Common POSIX signal values (from man -7 signal):
SIGHUP 1
@ -306,29 +351,39 @@ class TdeSubProcess:
SIGSEGV 11
SIGUSR2 12
"""
if not self.subProcess:
Logging.error("Sub process already stopped")
# self._popen should always be valid.
Logging.info("Terminating TDengine service running as the sub process...")
if self.getStatus().isStopped():
Logging.info("Service already stopped")
return
if self.getStatus().isStopping():
Logging.info("Service is already being stopped, pid: {}".format(self.getPid()))
return
retCode = self.subProcess.poll() # ret -N means killed with signal N, otherwise it's from exit(N)
self.setStatus(Status.STATUS_STOPPING)
retCode = self._popen.poll() # ret -N means killed with signal N, otherwise it's from exit(N)
if retCode: # valid return code, process ended
# retCode = -retCode # only if valid
Logging.warning("TSP.stop(): process ended itself")
self.subProcess = None
# self.subProcess = None
return
# process still alive, let's interrupt it
self._stopForSure(self.subProcess, self.STOP_SIGNAL) # success if no exception
self.subProcess = None
self._stopForSure(self._popen, self.STOP_SIGNAL) # success if no exception
# sub process should end, then IPC queue should end, causing IO thread to end
self._smThread.stop() # stop for sure too
self.setStatus(Status.STATUS_STOPPED)
@classmethod
def _stopForSure(cls, proc: subprocess.Popen, sig: int):
def _stopForSure(cls, proc: Popen, sig: int):
'''
Stop a process and all sub processes with a singal, and SIGKILL if necessary
'''
def doKillTdService(proc: subprocess.Popen, sig: int):
def doKillTdService(proc: Popen, sig: int):
Logging.info("Killing sub-sub process {} with signal {}".format(proc.pid, sig))
proc.send_signal(sig)
try:
@ -340,7 +395,7 @@ class TdeSubProcess:
else:
Logging.warning("TD service terminated, EXPECTING ret code {}, got {}".format(sig, -retCode))
return True # terminated successfully
except subprocess.TimeoutExpired as err:
except TimeoutExpired as err:
Logging.warning("Failed to kill sub-sub process {} with signal {}".format(proc.pid, sig))
return False # failed to terminate
@ -349,22 +404,22 @@ class TdeSubProcess:
Logging.info("Killing sub-sub process {} with signal {}".format(child.pid, sig))
child.send_signal(sig)
try:
retCode = child.wait(20)
if (- retCode) == signal.SIGSEGV: # Crashed
retCode = child.wait(20) # type: ignore
if (- retCode) == signal.SIGSEGV: # type: ignore # Crashed
Logging.warning("Process {} CRASHED, please check CORE file!".format(child.pid))
elif (- retCode) == sig :
elif (- retCode) == sig : # type: ignore
Logging.info("Sub-sub process terminated with expected return code {}".format(sig))
else:
Logging.warning("Process terminated, EXPECTING ret code {}, got {}".format(sig, -retCode))
Logging.warning("Process terminated, EXPECTING ret code {}, got {}".format(sig, -retCode)) # type: ignore
return True # terminated successfully
except psutil.TimeoutExpired as err:
Logging.warning("Failed to kill sub-sub process {} with signal {}".format(child.pid, sig))
return False # did not terminate
def doKill(proc: subprocess.Popen, sig: int):
def doKill(proc: Popen, sig: int):
pid = proc.pid
try:
topSubProc = psutil.Process(pid)
topSubProc = psutil.Process(pid) # Now that we are doing "exec -c", should not have children any more
for child in topSubProc.children(recursive=True): # or parent.children() for recursive=False
Logging.warning("Unexpected child to be killed")
doKillChild(child, sig)
@ -391,17 +446,24 @@ class TdeSubProcess:
def hardKill(proc):
return doKill(proc, signal.SIGKILL)
pid = proc.pid
Logging.info("Terminate running processes under {}, with SIG #{} and wait...".format(pid, sig))
if softKill(proc, sig):
return# success
return # success
if sig != signal.SIGKILL: # really was soft above
if hardKill(proc):
return
raise CrashGenError("Failed to stop process, pid={}".format(pid))
def getStatus(self):
return self._smThread.getStatus()
def setStatus(self, status):
self._smThread.setStatus(status)
def procIpcBatch(self, trimToTarget=0, forceOutput=False):
self._smThread.procIpcBatch(trimToTarget, forceOutput)
class ServiceManager:
PAUSE_BETWEEN_IPC_CHECK = 1.2 # seconds between checks on STDOUT of sub process
@ -498,10 +560,10 @@ class ServiceManager:
def isActive(self):
"""
Determine if the service/cluster is active at all, i.e. at least
one thread is not "stopped".
one instance is active
"""
for ti in self._tInsts:
if not ti.getStatus().isStopped():
if ti.getStatus().isActive():
return True
return False
@ -539,10 +601,10 @@ class ServiceManager:
# while self.isRunning() or self.isRestarting() : # for as long as the svc mgr thread is still here
status = ti.getStatus()
if status.isRunning():
th = ti.getSmThread()
th.procIpcBatch() # regular processing,
# th = ti.getSmThread()
ti.procIpcBatch() # regular processing,
if status.isStopped():
th.procIpcBatch() # one last time?
ti.procIpcBatch() # one last time?
# self._updateThreadStatus()
time.sleep(self.PAUSE_BETWEEN_IPC_CHECK) # pause, before next round
@ -572,7 +634,8 @@ class ServiceManager:
if not ti.isFirst():
tFirst = self._getFirstInstance()
tFirst.createDnode(ti.getDbTarget())
ti.getSmThread().procIpcBatch(trimToTarget=10, forceOutput=True) # for printing 10 lines
ti.printFirst10Lines()
# ti.getSmThread().procIpcBatch(trimToTarget=10, forceOutput=True) # for printing 10 lines
def stopTaosServices(self):
with self._lock:
@ -618,21 +681,24 @@ class ServiceManagerThread:
"""
MAX_QUEUE_SIZE = 10000
def __init__(self):
def __init__(self, subProc: TdeSubProcess, logDir: str):
# Set the sub process
self._tdeSubProcess = None # type: TdeSubProcess
# self._tdeSubProcess = None # type: TdeSubProcess
# Arrange the TDengine instance
# self._tInstNum = tInstNum # instance serial number in cluster, ZERO based
# self._tInst = tInst or TdeInstance() # Need an instance
self._thread = None # The actual thread, # type: threading.Thread
self._thread2 = None # watching stderr
# self._thread = None # type: Optional[threading.Thread] # The actual thread, # type: threading.Thread
# self._thread2 = None # type: Optional[threading.Thread] Thread # watching stderr
self._status = Status(Status.STATUS_STOPPED) # The status of the underlying service, actually.
self._start(subProc, logDir)
def __repr__(self):
return "[SvcMgrThread: status={}, subProc={}]".format(
self.getStatus(), self._tdeSubProcess)
raise CrashGenError("SMT status moved to TdeSubProcess")
# return "[SvcMgrThread: status={}, subProc={}]".format(
# self.getStatus(), self._tdeSubProcess)
def getStatus(self):
'''
@ -640,30 +706,33 @@ class ServiceManagerThread:
'''
return self._status
def setStatus(self, statusVal: int):
self._status.set(statusVal)
# Start the thread (with sub process), and wait for the sub service
# to become fully operational
def start(self, cmdLine : str, logDir: str):
def _start(self, subProc :TdeSubProcess, logDir: str):
'''
Request the manager thread to start a new sub process, and manage it.
:param cmdLine: the command line to invoke
:param logDir: the logging directory, to hold stdout/stderr files
'''
if self._thread:
raise RuntimeError("Unexpected _thread")
if self._tdeSubProcess:
raise RuntimeError("TDengine sub process already created/running")
# if self._thread:
# raise RuntimeError("Unexpected _thread")
# if self._tdeSubProcess:
# raise RuntimeError("TDengine sub process already created/running")
Logging.info("Attempting to start TAOS service: {}".format(self))
# Moved to TdeSubProcess
# Logging.info("Attempting to start TAOS service: {}".format(self))
self._status.set(Status.STATUS_STARTING)
self._tdeSubProcess = TdeSubProcess()
self._tdeSubProcess.start(cmdLine) # TODO: verify process is running
# self._tdeSubProcess = TdeSubProcess.start(cmdLine) # TODO: verify process is running
self._ipcQueue = Queue()
self._ipcQueue = Queue() # type: Queue
self._thread = threading.Thread( # First thread captures server OUTPUT
target=self.svcOutputReader,
args=(self._tdeSubProcess.getStdOut(), self._ipcQueue, logDir))
args=(subProc.getStdOut(), self._ipcQueue, logDir))
self._thread.daemon = True # thread dies with the program
self._thread.start()
time.sleep(0.01)
@ -675,7 +744,7 @@ class ServiceManagerThread:
self._thread2 = threading.Thread( # 2nd thread captures server ERRORs
target=self.svcErrorReader,
args=(self._tdeSubProcess.getStdErr(), self._ipcQueue, logDir))
args=(subProc.getStdErr(), self._ipcQueue, logDir))
self._thread2.daemon = True # thread dies with the program
self._thread2.start()
time.sleep(0.01)
@ -690,14 +759,14 @@ class ServiceManagerThread:
Progress.emit(Progress.SERVICE_START_NAP)
# print("_zz_", end="", flush=True)
if self._status.isRunning():
Logging.info("[] TDengine service READY to process requests")
Logging.info("[] TAOS service started: {}".format(self))
Logging.info("[] TDengine service READY to process requests: pid={}".format(subProc.getPid()))
# Logging.info("[] TAOS service started: {}".format(self))
# self._verifyDnode(self._tInst) # query and ensure dnode is ready
# Logging.debug("[] TAOS Dnode verified: {}".format(self))
return # now we've started
# TODO: handle failure-to-start better?
self.procIpcBatch(100, True) # display output before cronking out, trim to last 20 msgs, force output
raise RuntimeError("TDengine service did not start successfully: {}".format(self))
raise RuntimeError("TDengine service DID NOT achieve READY status: pid={}".format(subProc.getPid()))
def _verifyDnode(self, tInst: TdeInstance):
dbc = DbConn.createNative(tInst.getDbTarget())
@ -717,70 +786,45 @@ class ServiceManagerThread:
break
if not isValid:
print("Failed to start dnode, sleep for a while")
time.sleep(600)
time.sleep(10.0)
raise RuntimeError("Failed to start Dnode, expected port not found: {}".
format(tInst.getPort()))
dbc.close()
def stop(self):
# can be called from both main thread or signal handler
Logging.info("Terminating TDengine service running as the sub process...")
if self.getStatus().isStopped():
Logging.info("Service already stopped")
return
if self.getStatus().isStopping():
Logging.info("Service is already being stopped, pid: {}".format(self._tdeSubProcess.getPid()))
return
# Linux will send Control-C generated SIGINT to the TDengine process
# already, ref:
# https://unix.stackexchange.com/questions/176235/fork-and-how-signals-are-delivered-to-processes
if not self._tdeSubProcess:
raise RuntimeError("sub process object missing")
self._status.set(Status.STATUS_STOPPING)
# retCode = self._tdeSubProcess.stop()
# try:
# retCode = self._tdeSubProcess.stop()
# # print("Attempted to stop sub process, got return code: {}".format(retCode))
# if retCode == signal.SIGSEGV : # SGV
# Logging.error("[[--ERROR--]]: TDengine service SEGV fault (check core file!)")
# except subprocess.TimeoutExpired as err:
# Logging.info("Time out waiting for TDengine service process to exit")
if not self._tdeSubProcess.stop(): # everything withing
if self._tdeSubProcess.isRunning(): # still running, should now never happen
Logging.error("FAILED to stop sub process, it is still running... pid = {}".format(
self._tdeSubProcess.getPid()))
else:
self._tdeSubProcess = None # not running any more
self.join() # stop the thread, change the status, etc.
# Linux will send Control-C generated SIGINT to the TDengine process already, ref:
# https://unix.stackexchange.com/questions/176235/fork-and-how-signals-are-delivered-to-processes
self.join() # stop the thread, status change moved to TdeSubProcess
# Check if it's really stopped
outputLines = 10 # for last output
if self.getStatus().isStopped():
self.procIpcBatch(outputLines) # one last time
Logging.debug("End of TDengine Service Output: {}".format(self))
Logging.debug("End of TDengine Service Output")
Logging.info("----- TDengine Service (managed by SMT) is now terminated -----\n")
else:
print("WARNING: SMT did not terminate as expected: {}".format(self))
print("WARNING: SMT did not terminate as expected")
def join(self):
# TODO: sanity check
if not self.getStatus().isStopping():
s = self.getStatus()
if s.isStopping() or s.isStopped(): # we may be stopping ourselves, or have been stopped/killed by others
if self._thread or self._thread2 :
if self._thread:
self._thread.join()
self._thread = None
if self._thread2: # STD ERR thread
self._thread2.join()
self._thread2 = None
else:
Logging.warning("Joining empty thread, doing nothing")
else:
raise RuntimeError(
"SMT.Join(): Unexpected status: {}".format(self._status))
if self._thread or self._thread2 :
if self._thread:
self._thread.join()
self._thread = None
if self._thread2: # STD ERR thread
self._thread2.join()
self._thread2 = None
else:
print("Joining empty thread, doing nothing")
self._status.set(Status.STATUS_STOPPED)
def _trimQueue(self, targetSize):
if targetSize <= 0:
return # do nothing
@ -799,6 +843,10 @@ class ServiceManagerThread:
TD_READY_MSG = "TDengine is initialized successfully"
def procIpcBatch(self, trimToTarget=0, forceOutput=False):
'''
Process a batch of STDOUT/STDERR data, until we read EMPTY from
the queue.
'''
self._trimQueue(trimToTarget) # trim if necessary
# Process all the output generated by the underlying sub process,
# managed by IO thread
@ -827,35 +875,54 @@ class ServiceManagerThread:
print(pBar, end="", flush=True)
print('\b\b\b\b', end="", flush=True)
def svcOutputReader(self, out: IO, queue, logDir: str):
BinaryChunk = NewType('BinaryChunk', bytes) # line with binary data, directly from STDOUT, etc.
TextChunk = NewType('TextChunk', str) # properly decoded, suitable for printing, etc.
@classmethod
def _decodeBinaryChunk(cls, bChunk: bytes) -> Optional[TextChunk] :
try:
tChunk = bChunk.decode("utf-8").rstrip()
return cls.TextChunk(tChunk)
except UnicodeError:
print("\nNon-UTF8 server output: {}\n".format(bChunk.decode('cp437')))
return None
def _textChunkGenerator(self, streamIn: BinaryIO, logDir: str, logFile: str
) -> Generator[TextChunk, None, None]:
'''
Take an input stream with binary data, produced a generator of decoded
"text chunks", and also save the original binary data in a log file.
'''
os.makedirs(logDir, exist_ok=True)
logF = open(os.path.join(logDir, logFile), 'wb')
for bChunk in iter(streamIn.readline, b''):
logF.write(bChunk) # Write to log file immediately
tChunk = self._decodeBinaryChunk(bChunk) # decode
if tChunk is not None:
yield tChunk # TODO: split into actual text lines
# At the end...
streamIn.close() # Close the stream
logF.close() # Close the output file
def svcOutputReader(self, stdOut: BinaryIO, queue, logDir: str):
'''
The infinite routine that processes the STDOUT stream for the sub process being managed.
:param out: the IO stream object used to fetch the data from
:param queue: the queue where we dump the roughly parsed line-by-line data
:param stdOut: the IO stream object used to fetch the data from
:param queue: the queue where we dump the roughly parsed chunk-by-chunk text data
:param logDir: where we should dump a verbatim output file
'''
os.makedirs(logDir, exist_ok=True)
logFile = os.path.join(logDir,'stdout.log')
fOut = open(logFile, 'wb')
# Important Reference: https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
# print("This is the svcOutput Reader...")
# for line in out :
for line in iter(out.readline, b''):
fOut.write(line)
# print("Finished reading a line: {}".format(line))
# print("Adding item to queue...")
try:
line = line.decode("utf-8").rstrip()
except UnicodeError:
print("\nNon-UTF8 server output: {}\n".format(line))
# This might block, and then causing "out" buffer to block
queue.put(line)
# stdOut.readline() # Skip the first output? TODO: remove?
for tChunk in self._textChunkGenerator(stdOut, logDir, 'stdout.log') :
queue.put(tChunk) # tChunk garanteed not to be None
self._printProgress("_i")
if self._status.isStarting(): # we are starting, let's see if we have started
if line.find(self.TD_READY_MSG) != -1: # found
if tChunk.find(self.TD_READY_MSG) != -1: # found
Logging.info("Waiting for the service to become FULLY READY")
time.sleep(1.0) # wait for the server to truly start. TODO: remove this
Logging.info("Service is now FULLY READY") # TODO: more ID info here?
@ -869,18 +936,17 @@ class ServiceManagerThread:
print("_w", end="", flush=True)
# queue.put(line)
# meaning sub process must have died
Logging.info("EOF for TDengine STDOUT: {}".format(self))
out.close() # Close the stream
fOut.close() # Close the output file
# stdOut has no more data, meaning sub process must have died
Logging.info("EOF found TDengine STDOUT, marking the process as terminated")
self.setStatus(Status.STATUS_STOPPED)
def svcErrorReader(self, err: IO, queue, logDir: str):
os.makedirs(logDir, exist_ok=True)
logFile = os.path.join(logDir,'stderr.log')
fErr = open(logFile, 'wb')
for line in iter(err.readline, b''):
fErr.write(line)
Logging.info("TDengine STDERR: {}".format(line))
Logging.info("EOF for TDengine STDERR: {}".format(self))
err.close()
fErr.close()
def svcErrorReader(self, stdErr: BinaryIO, queue, logDir: str):
# os.makedirs(logDir, exist_ok=True)
# logFile = os.path.join(logDir,'stderr.log')
# fErr = open(logFile, 'wb')
# for line in iter(err.readline, b''):
for tChunk in self._textChunkGenerator(stdErr, logDir, 'stderr.log') :
queue.put(tChunk) # tChunk garanteed not to be None
# fErr.write(line)
Logging.info("TDengine STDERR: {}".format(tChunk))
Logging.info("EOF for TDengine STDERR")

View File

@ -1,8 +0,0 @@
from __future__ import annotations
import argparse
gConfig: argparse.Namespace
def init():
global gConfig
gConfig = []

View File

@ -0,0 +1,42 @@
from __future__ import annotations
import argparse
from typing import Optional
from .misc import CrashGenError
# from crash_gen.misc import CrashGenError
# gConfig: Optional[argparse.Namespace]
class Config:
_config = None # type Optional[argparse.Namespace]
@classmethod
def init(cls, parser: argparse.ArgumentParser):
if cls._config is not None:
raise CrashGenError("Config can only be initialized once")
cls._config = parser.parse_args()
# print(cls._config)
@classmethod
def setConfig(cls, config: argparse.Namespace):
cls._config = config
@classmethod
# TODO: check items instead of exposing everything
def getConfig(cls) -> argparse.Namespace:
if cls._config is None:
raise CrashGenError("invalid state")
return cls._config
@classmethod
def clearConfig(cls):
cls._config = None
@classmethod
def isSet(cls, cfgKey):
cfg = cls.getConfig()
if cfgKey not in cfg:
return False
return cfg.__getattribute__(cfgKey)

View File

@ -1,24 +1,26 @@
from __future__ import annotations
import sys
import os
import datetime
import time
import threading
import requests
from requests.auth import HTTPBasicAuth
import taos
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.log import *
from .misc import Logging, CrashGenError, Helper, Dice
import os
import datetime
import traceback
# from .service_manager import TdeInstance
import crash_gen.settings
from .config import Config
from .misc import Logging, CrashGenError, Helper
from .types import QueryResult
class DbConn:
TYPE_NATIVE = "native-c"
@ -79,7 +81,7 @@ class DbConn:
raise RuntimeError("Cannot query database until connection is open")
nRows = self.query(sql)
if nRows != 1:
raise taos.error.ProgrammingError(
raise CrashGenError(
"Unexpected result for query: {}, rows = {}".format(sql, nRows),
(CrashGenError.INVALID_EMPTY_RESULT if nRows==0 else CrashGenError.INVALID_MULTIPLE_RESULT)
)
@ -115,7 +117,7 @@ class DbConn:
try:
self.execute(sql)
return True # ignore num of results, return success
except taos.error.ProgrammingError as err:
except taos.error.Error as err:
return False # failed, for whatever TAOS reason
# Not possile to reach here, non-TAOS exception would have been thrown
@ -126,7 +128,7 @@ class DbConn:
def openByType(self):
raise RuntimeError("Unexpected execution, should be overriden")
def getQueryResult(self):
def getQueryResult(self) -> QueryResult :
raise RuntimeError("Unexpected execution, should be overriden")
def getResultRows(self):
@ -221,7 +223,7 @@ class DbConnRest(DbConn):
class MyTDSql:
# Class variables
_clsLock = threading.Lock() # class wide locking
longestQuery = None # type: str
longestQuery = '' # type: str
longestQueryTime = 0.0 # seconds
lqStartTime = 0.0
# lqEndTime = 0.0 # Not needed, as we have the two above already
@ -249,7 +251,13 @@ class MyTDSql:
def _execInternal(self, sql):
startTime = time.time()
# Logging.debug("Executing SQL: " + sql)
# ret = None # TODO: use strong type here
# try: # Let's not capture the error, and let taos.error.ProgrammingError pass through
ret = self._cursor.execute(sql)
# except taos.error.ProgrammingError as err:
# Logging.warning("Taos SQL execution error: {}, SQL: {}".format(err.msg, sql))
# raise CrashGenError(err.msg)
# print("\nSQL success: {}".format(sql))
queryTime = time.time() - startTime
# Record the query time
@ -261,7 +269,7 @@ class MyTDSql:
cls.lqStartTime = startTime
# Now write to the shadow database
if crash_gen.settings.gConfig.use_shadow_db:
if Config.isSet('use_shadow_db'):
if sql[:11] == "INSERT INTO":
if sql[:16] == "INSERT INTO db_0":
sql2 = "INSERT INTO db_s" + sql[16:]
@ -453,31 +461,11 @@ class DbManager():
''' Release the underlying DB connection upon deletion of DbManager '''
self.cleanUp()
def getDbConn(self):
def getDbConn(self) -> DbConn :
if self._dbConn is None:
raise CrashGenError("Unexpected empty DbConn")
return self._dbConn
# TODO: not used any more, to delete
def pickAndAllocateTable(self): # pick any table, and "use" it
return self.tableNumQueue.pickAndAllocate()
# TODO: Not used any more, to delete
def addTable(self):
with self._lock:
tIndex = self.tableNumQueue.push()
return tIndex
# Not used any more, to delete
def releaseTable(self, i): # return the table back, so others can use it
self.tableNumQueue.release(i)
# TODO: not used any more, delete
def getTableNameToDelete(self):
tblNum = self.tableNumQueue.pop() # TODO: race condition!
if (not tblNum): # maybe false
return False
return "table_{}".format(tblNum)
def cleanUp(self):
if self._dbConn:
self._dbConn.close()

View File

@ -3,6 +3,7 @@ import random
import logging
import os
import sys
from typing import Optional
import taos
@ -39,14 +40,14 @@ class MyLoggingAdapter(logging.LoggerAdapter):
class Logging:
logger = None
logger = None # type: Optional[MyLoggingAdapter]
@classmethod
def getLogger(cls):
return logger
return cls.logger
@classmethod
def clsInit(cls, gConfig): # TODO: refactor away gConfig
def clsInit(cls, debugMode: bool):
if cls.logger:
return
@ -60,12 +61,8 @@ class Logging:
# Logging adapter, to be used as a logger
# print("setting logger variable")
# global logger
cls.logger = MyLoggingAdapter(_logger, [])
if (gConfig.debug):
cls.logger.setLevel(logging.DEBUG) # default seems to be INFO
else:
cls.logger.setLevel(logging.INFO)
cls.logger = MyLoggingAdapter(_logger, {})
cls.logger.setLevel(logging.DEBUG if debugMode else logging.INFO) # default seems to be INFO
@classmethod
def info(cls, msg):
@ -84,6 +81,7 @@ class Logging:
cls.logger.error(msg)
class Status:
STATUS_EMPTY = 99
STATUS_STARTING = 1
STATUS_RUNNING = 2
STATUS_STOPPING = 3
@ -95,12 +93,16 @@ class Status:
def __repr__(self):
return "[Status: v={}]".format(self._status)
def set(self, status):
def set(self, status: int):
self._status = status
def get(self):
return self._status
def isEmpty(self):
''' Empty/Undefined '''
return self._status == Status.STATUS_EMPTY
def isStarting(self):
return self._status == Status.STATUS_STARTING
@ -117,6 +119,9 @@ class Status:
def isStable(self):
return self.isRunning() or self.isStopped()
def isActive(self):
return self.isStarting() or self.isRunning() or self.isStopping()
# Deterministic random number generator
class Dice():
seeded = False # static, uninitialized

Some files were not shown because too many files have changed in this diff Show More