Merge branch 'master' into cq with continue query funciton
This commit is contained in:
commit
f3b4252526
|
@ -1,12 +1,12 @@
|
|||
[submodule "src/connector/go"]
|
||||
path = src/connector/go
|
||||
url = https://github.com/taosdata/driver-go
|
||||
url = git@github.com:taosdata/driver-go.git
|
||||
[submodule "src/connector/grafanaplugin"]
|
||||
path = src/connector/grafanaplugin
|
||||
url = https://github.com/taosdata/grafanaplugin
|
||||
url = git@github.com:taosdata/grafanaplugin.git
|
||||
[submodule "src/connector/hivemq-tdengine-extension"]
|
||||
path = src/connector/hivemq-tdengine-extension
|
||||
url = https://github.com/huskar-t/hivemq-tdengine-extension.git
|
||||
url = git@github.com:taosdata/hivemq-tdengine-extension.git
|
||||
[submodule "tests/examples/rust"]
|
||||
path = tests/examples/rust
|
||||
url = https://github.com/songtianyi/tdengine-rust-bindings.git
|
||||
|
|
|
@ -102,6 +102,12 @@ IF ("${CPUTYPE}" STREQUAL "")
|
|||
SET(TD_LINUX TRUE)
|
||||
SET(TD_LINUX_64 FALSE)
|
||||
SET(TD_ARM_64 TRUE)
|
||||
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "mips64")
|
||||
SET(CPUTYPE "mips64")
|
||||
MESSAGE(STATUS "Set CPUTYPE to mips64")
|
||||
SET(TD_LINUX TRUE)
|
||||
SET(TD_LINUX_64 FALSE)
|
||||
SET(TD_MIPS_64 TRUE)
|
||||
ENDIF ()
|
||||
|
||||
ELSE ()
|
||||
|
|
|
@ -4,7 +4,7 @@ PROJECT(TDengine)
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "2.0.20.2")
|
||||
SET(TD_VER_NUMBER "2.0.20.5")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -58,7 +58,12 @@ cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_pat
|
|||
cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include
|
||||
cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include
|
||||
cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples
|
||||
cp -r ${top_dir}/src/connector/grafanaplugin ${pkg_dir}${install_home_path}/connector
|
||||
if [ -d "${top_dir}/src/connector/grafanaplugin/dist" ]; then
|
||||
cp -r ${top_dir}/src/connector/grafanaplugin/dist ${pkg_dir}${install_home_path}/connector/grafanaplugin
|
||||
else
|
||||
echo "grafanaplugin bundled directory not found!"
|
||||
exit 1
|
||||
fi
|
||||
cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
|
||||
cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
|
||||
cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
|
||||
|
|
|
@ -66,7 +66,12 @@ cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin
|
|||
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
|
||||
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
|
||||
cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include
|
||||
cp -r %{_compiledir}/../src/connector/grafanaplugin %{buildroot}%{homepath}/connector
|
||||
if [ -d %{_compiledir}/../src/connector/grafanaplugin/dist ]; then
|
||||
cp -r %{_compiledir}/../src/connector/grafanaplugin/dist %{buildroot}%{homepath}/connector/grafanaplugin
|
||||
else
|
||||
echo grafanaplugin bundled directory not found!
|
||||
exit 1
|
||||
fi
|
||||
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
|
||||
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
|
||||
cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
|
||||
|
|
|
@ -243,9 +243,17 @@ function install_data() {
|
|||
}
|
||||
|
||||
function install_connector() {
|
||||
${csudo} cp -rf ${source_dir}/src/connector/grafanaplugin ${install_main_dir}/connector
|
||||
if [ -d "${source_dir}/src/connector/grafanaplugin/dist" ]; then
|
||||
${csudo} cp -rf ${source_dir}/src/connector/grafanaplugin/dist ${install_main_dir}/connector/grafanaplugin
|
||||
else
|
||||
echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
|
||||
fi
|
||||
if find ${source_dir}/src/connector/go -mindepth 1 -maxdepth 1 | read; then
|
||||
${csudo} cp -r ${source_dir}/src/connector/go ${install_main_dir}/connector
|
||||
else
|
||||
echo "WARNING: go connector not found, please check if want to use it!"
|
||||
fi
|
||||
${csudo} cp -rf ${source_dir}/src/connector/python ${install_main_dir}/connector
|
||||
${csudo} cp -rf ${source_dir}/src/connector/go ${install_main_dir}/connector
|
||||
|
||||
${csudo} cp ${binary_dir}/build/lib/*.jar ${install_main_dir}/connector &> /dev/null && ${csudo} chmod 777 ${install_main_dir}/connector/*.jar || echo &> /dev/null
|
||||
}
|
||||
|
|
|
@ -117,10 +117,18 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
|||
if [ "$osType" != "Darwin" ]; then
|
||||
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
|
||||
fi
|
||||
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
|
||||
cp -r ${connector_dir}/python ${install_dir}/connector/
|
||||
cp -r ${connector_dir}/go ${install_dir}/connector
|
||||
cp -r ${connector_dir}/nodejs ${install_dir}/connector
|
||||
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
|
||||
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
|
||||
else
|
||||
echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
|
||||
fi
|
||||
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
|
||||
cp -r ${connector_dir}/go ${install_dir}/connector
|
||||
else
|
||||
echo "WARNING: go connector not found, please check if want to use it!"
|
||||
fi
|
||||
cp -r ${connector_dir}/python ${install_dir}/connector
|
||||
cp -r ${connector_dir}/nodejs ${install_dir}/connector
|
||||
fi
|
||||
# Copy release note
|
||||
# cp ${script_dir}/release_note ${install_dir}
|
||||
|
|
|
@ -144,9 +144,17 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
|||
if [ "$osType" != "Darwin" ]; then
|
||||
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
|
||||
fi
|
||||
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
|
||||
cp -r ${connector_dir}/python ${install_dir}/connector/
|
||||
cp -r ${connector_dir}/go ${install_dir}/connector
|
||||
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
|
||||
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
|
||||
else
|
||||
echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!"
|
||||
fi
|
||||
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
|
||||
cp -r ${connector_dir}/go ${install_dir}/connector
|
||||
else
|
||||
echo "WARNING: go connector not found, please check if want to use it!"
|
||||
fi
|
||||
cp -r ${connector_dir}/python ${install_dir}/connector
|
||||
|
||||
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/cinterface.py
|
||||
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/cinterface.py
|
||||
|
|
|
@ -114,6 +114,25 @@ mkdir -p ${install_dir}/examples
|
|||
examples_dir="${top_dir}/tests/examples"
|
||||
cp -r ${examples_dir}/c ${install_dir}/examples
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
if [ -d ${examples_dir}/JDBC/connectionPools/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/connectionPools/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/JDBCDemo/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/JDBCDemo/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/mybatisplus-demo/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/mybatisplus-demo/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/springbootdemo/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/springbootdemo/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/SpringJdbcTemplate/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/SpringJdbcTemplate/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/taosdemo/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/taosdemo/target
|
||||
fi
|
||||
|
||||
cp -r ${examples_dir}/JDBC ${install_dir}/examples
|
||||
cp -r ${examples_dir}/matlab ${install_dir}/examples
|
||||
cp -r ${examples_dir}/python ${install_dir}/examples
|
||||
|
@ -131,9 +150,17 @@ connector_dir="${code_dir}/connector"
|
|||
mkdir -p ${install_dir}/connector
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
|
||||
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
|
||||
cp -r ${connector_dir}/python ${install_dir}/connector/
|
||||
cp -r ${connector_dir}/go ${install_dir}/connector
|
||||
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
|
||||
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
|
||||
else
|
||||
echo "WARNING: grafanaplugin bundled dir not found, please check if you want to use it!"
|
||||
fi
|
||||
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
|
||||
cp -r ${connector_dir}/go ${install_dir}/connector
|
||||
else
|
||||
echo "WARNING: go connector not found, please check if want to use it!"
|
||||
fi
|
||||
cp -r ${connector_dir}/python ${install_dir}/connector
|
||||
cp -r ${connector_dir}/nodejs ${install_dir}/connector
|
||||
fi
|
||||
# Copy release note
|
||||
|
|
|
@ -166,9 +166,18 @@ connector_dir="${code_dir}/connector"
|
|||
mkdir -p ${install_dir}/connector
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
|
||||
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
|
||||
|
||||
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
|
||||
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
|
||||
else
|
||||
echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
|
||||
fi
|
||||
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
|
||||
cp -r ${connector_dir}/go ${install_dir}/connector
|
||||
else
|
||||
echo "WARNING: go connector not found, please check if want to use it!"
|
||||
fi
|
||||
cp -r ${connector_dir}/python ${install_dir}/connector/
|
||||
cp -r ${connector_dir}/go ${install_dir}/connector
|
||||
|
||||
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/cinterface.py
|
||||
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/cinterface.py
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
name: tdengine
|
||||
base: core18
|
||||
version: '2.0.20.2'
|
||||
version: '2.0.20.5'
|
||||
icon: snap/gui/t-dengine.svg
|
||||
summary: an open-source big data platform designed and optimized for IoT.
|
||||
description: |
|
||||
|
@ -72,7 +72,7 @@ parts:
|
|||
- usr/bin/taosd
|
||||
- usr/bin/taos
|
||||
- usr/bin/taosdemo
|
||||
- usr/lib/libtaos.so.2.0.20.2
|
||||
- usr/lib/libtaos.so.2.0.20.5
|
||||
- usr/lib/libtaos.so.1
|
||||
- usr/lib/libtaos.so
|
||||
|
||||
|
|
|
@ -6494,7 +6494,6 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
|
|||
|
||||
size_t valSize = taosArrayGetSize(pValList);
|
||||
|
||||
|
||||
// too long tag values will return invalid sql, not be truncated automatically
|
||||
SSchema *pTagSchema = tscGetTableTagSchema(pStableMetaInfo->pTableMeta);
|
||||
STagData *pTag = &pCreateTableInfo->tagdata;
|
||||
|
@ -6504,7 +6503,6 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
|
|||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
||||
SArray* pNameList = NULL;
|
||||
size_t nameSize = 0;
|
||||
int32_t schemaSize = tscGetNumOfTags(pStableMetaInfo->pTableMeta);
|
||||
|
@ -7119,6 +7117,7 @@ int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t i
|
|||
const char* msg6 = "too many tables in from clause";
|
||||
const char* msg7 = "invalid table alias name";
|
||||
const char* msg8 = "alias name too long";
|
||||
const char* msg9 = "only tag query not compatible with normal column filter";
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
|
@ -7288,6 +7287,20 @@ int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t i
|
|||
if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) {
|
||||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
|
||||
if(tscQueryTags(pQueryInfo)) {
|
||||
SSqlExpr* pExpr1 = tscSqlExprGet(pQueryInfo, 0);
|
||||
|
||||
if (pExpr1->functionId != TSDB_FUNC_TID_TAG) {
|
||||
int32_t numOfCols = (int32_t)taosArrayGetSize(pQueryInfo->colList);
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumn* pCols = taosArrayGetP(pQueryInfo->colList, i);
|
||||
if (pCols->numOfFilters > 0) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (parseSessionClause(pCmd, pQueryInfo, pQuerySqlNode) != TSDB_CODE_SUCCESS) {
|
||||
|
|
|
@ -2020,8 +2020,9 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
|
|||
}
|
||||
}
|
||||
|
||||
tscDebug("0x%"PRIx64" recv table meta, uid:%" PRIu64 ", tid:%d, name:%s", pSql->self, pTableMeta->id.uid, pTableMeta->id.tid,
|
||||
tNameGetTableName(&pTableMetaInfo->name));
|
||||
tscDebug("0x%"PRIx64" recv table meta, uid:%" PRIu64 ", tid:%d, name:%s, numOfCols:%d, numOfTags:%d", pSql->self,
|
||||
pTableMeta->id.uid, pTableMeta->id.tid, tNameGetTableName(&pTableMetaInfo->name), pTableMeta->tableInfo.numOfColumns,
|
||||
pTableMeta->tableInfo.numOfTags);
|
||||
|
||||
free(pTableMeta);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -2164,8 +2165,7 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) {
|
|||
|
||||
pInfo->vgroupList->numOfVgroups = pVgroupMsg->numOfVgroups;
|
||||
if (pInfo->vgroupList->numOfVgroups <= 0) {
|
||||
//tfree(pInfo->vgroupList);
|
||||
tscError("0x%"PRIx64" empty vgroup info", pSql->self);
|
||||
tscDebug("0x%"PRIx64" empty vgroup info", pSql->self);
|
||||
} else {
|
||||
for (int32_t j = 0; j < pInfo->vgroupList->numOfVgroups; ++j) {
|
||||
// just init, no need to lock
|
||||
|
|
|
@ -215,7 +215,7 @@ static void tscProcessSubscriptionTimer(void *handle, void *tmrId) {
|
|||
taosTmrReset(tscProcessSubscriptionTimer, pSub->interval, pSub, tscTmr, &pSub->pTimer);
|
||||
}
|
||||
|
||||
|
||||
//TODO refactor: extract table list name not simply from the sql
|
||||
static SArray* getTableList( SSqlObj* pSql ) {
|
||||
const char* p = strstr( pSql->sqlstr, " from " );
|
||||
assert(p != NULL); // we are sure this is a 'select' statement
|
||||
|
@ -224,11 +224,11 @@ static SArray* getTableList( SSqlObj* pSql ) {
|
|||
|
||||
SSqlObj* pNew = taos_query(pSql->pTscObj, sql);
|
||||
if (pNew == NULL) {
|
||||
tscError("0x%"PRIx64"failed to retrieve table id: cannot create new sql object.", pSql->self);
|
||||
tscError("0x%"PRIx64" failed to retrieve table id: cannot create new sql object.", pSql->self);
|
||||
return NULL;
|
||||
|
||||
} else if (taos_errno(pNew) != TSDB_CODE_SUCCESS) {
|
||||
tscError("0x%"PRIx64"failed to retrieve table id,error: %s", pSql->self, tstrerror(taos_errno(pNew)));
|
||||
tscError("0x%"PRIx64" failed to retrieve table id,error: %s", pSql->self, tstrerror(taos_errno(pNew)));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 7a26c432f8b4203e42344ff3290b9b9b01b983d5
|
||||
Subproject commit 8ce6d86558afc8c0b50c10f990fd2b4270cf06fc
|
|
@ -9,7 +9,7 @@ const ffi = require('ffi-napi');
|
|||
const ArrayType = require('ref-array-napi');
|
||||
const Struct = require('ref-struct-napi');
|
||||
const FieldTypes = require('./constants');
|
||||
const errors = require ('./error');
|
||||
const errors = require('./error');
|
||||
const TaosObjects = require('./taosobjects');
|
||||
const { NULL_POINTER } = require('ref-napi');
|
||||
|
||||
|
@ -22,7 +22,7 @@ function convertMicrosecondsToDatetime(time) {
|
|||
return new TaosObjects.TaosTimestamp(time * 0.001, true);
|
||||
}
|
||||
|
||||
function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
|
||||
function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
|
||||
timestampConverter = convertMillisecondsToDatetime;
|
||||
if (micro == true) {
|
||||
timestampConverter = convertMicrosecondsToDatetime;
|
||||
|
@ -44,14 +44,14 @@ function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, micro=false
|
|||
}
|
||||
return res;
|
||||
}
|
||||
function convertBool(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
|
||||
function convertBool(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
|
||||
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
|
||||
let res = new Array(data.length);
|
||||
for (let i = 0; i < data.length; i++) {
|
||||
if (data[i] == 0) {
|
||||
res[i] = false;
|
||||
}
|
||||
else if (data[i] == 1){
|
||||
else if (data[i] == 1) {
|
||||
res[i] = true;
|
||||
}
|
||||
else if (data[i] == FieldTypes.C_BOOL_NULL) {
|
||||
|
@ -60,29 +60,29 @@ function convertBool(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
|
|||
}
|
||||
return res;
|
||||
}
|
||||
function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
|
||||
function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
|
||||
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
|
||||
let res = [];
|
||||
let currOffset = 0;
|
||||
while (currOffset < data.length) {
|
||||
let d = data.readIntLE(currOffset,1);
|
||||
let d = data.readIntLE(currOffset, 1);
|
||||
res.push(d == FieldTypes.C_TINYINT_NULL ? null : d);
|
||||
currOffset += nbytes;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
|
||||
function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
|
||||
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
|
||||
let res = [];
|
||||
let currOffset = 0;
|
||||
while (currOffset < data.length) {
|
||||
let d = data.readIntLE(currOffset,2);
|
||||
let d = data.readIntLE(currOffset, 2);
|
||||
res.push(d == FieldTypes.C_SMALLINT_NULL ? null : d);
|
||||
currOffset += nbytes;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
function convertInt(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
|
||||
function convertInt(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
|
||||
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
|
||||
let res = [];
|
||||
let currOffset = 0;
|
||||
|
@ -93,7 +93,7 @@ function convertInt(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
|
|||
}
|
||||
return res;
|
||||
}
|
||||
function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
|
||||
function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
|
||||
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
|
||||
let res = [];
|
||||
let currOffset = 0;
|
||||
|
@ -104,7 +104,7 @@ function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
|
|||
}
|
||||
return res;
|
||||
}
|
||||
function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
|
||||
function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
|
||||
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
|
||||
let res = [];
|
||||
let currOffset = 0;
|
||||
|
@ -115,7 +115,7 @@ function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
|
|||
}
|
||||
return res;
|
||||
}
|
||||
function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
|
||||
function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
|
||||
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
|
||||
let res = [];
|
||||
let currOffset = 0;
|
||||
|
@ -126,7 +126,7 @@ function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
|
|||
}
|
||||
return res;
|
||||
}
|
||||
function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
|
||||
function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
|
||||
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
|
||||
let res = [];
|
||||
let currOffset = 0;
|
||||
|
@ -142,7 +142,7 @@ function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
|
|||
}
|
||||
return res;
|
||||
}
|
||||
function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
|
||||
function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
|
||||
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
|
||||
let res = [];
|
||||
let dataEntry = data.slice(0, nbytes); //one entry in a row under a column;
|
||||
|
@ -153,23 +153,23 @@ function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
|
|||
|
||||
// Object with all the relevant converters from pblock data to javascript readable data
|
||||
let convertFunctions = {
|
||||
[FieldTypes.C_BOOL] : convertBool,
|
||||
[FieldTypes.C_TINYINT] : convertTinyint,
|
||||
[FieldTypes.C_SMALLINT] : convertSmallint,
|
||||
[FieldTypes.C_INT] : convertInt,
|
||||
[FieldTypes.C_BIGINT] : convertBigint,
|
||||
[FieldTypes.C_FLOAT] : convertFloat,
|
||||
[FieldTypes.C_DOUBLE] : convertDouble,
|
||||
[FieldTypes.C_BINARY] : convertBinary,
|
||||
[FieldTypes.C_TIMESTAMP] : convertTimestamp,
|
||||
[FieldTypes.C_NCHAR] : convertNchar
|
||||
[FieldTypes.C_BOOL]: convertBool,
|
||||
[FieldTypes.C_TINYINT]: convertTinyint,
|
||||
[FieldTypes.C_SMALLINT]: convertSmallint,
|
||||
[FieldTypes.C_INT]: convertInt,
|
||||
[FieldTypes.C_BIGINT]: convertBigint,
|
||||
[FieldTypes.C_FLOAT]: convertFloat,
|
||||
[FieldTypes.C_DOUBLE]: convertDouble,
|
||||
[FieldTypes.C_BINARY]: convertBinary,
|
||||
[FieldTypes.C_TIMESTAMP]: convertTimestamp,
|
||||
[FieldTypes.C_NCHAR]: convertNchar
|
||||
}
|
||||
|
||||
// Define TaosField structure
|
||||
var char_arr = ArrayType(ref.types.char);
|
||||
var TaosField = Struct({
|
||||
'name': char_arr,
|
||||
});
|
||||
'name': char_arr,
|
||||
});
|
||||
TaosField.fields.name.type.size = 65;
|
||||
TaosField.defineProperty('type', ref.types.char);
|
||||
TaosField.defineProperty('bytes', ref.types.short);
|
||||
|
@ -183,7 +183,7 @@ TaosField.defineProperty('bytes', ref.types.short);
|
|||
* @classdesc The CTaosInterface is the interface through which Node.JS communicates data back and forth with TDengine. It is not advised to
|
||||
* access this class directly and use it unless you understand what these functions do.
|
||||
*/
|
||||
function CTaosInterface (config = null, pass = false) {
|
||||
function CTaosInterface(config = null, pass = false) {
|
||||
ref.types.char_ptr = ref.refType(ref.types.char);
|
||||
ref.types.void_ptr = ref.refType(ref.types.void);
|
||||
ref.types.void_ptr2 = ref.refType(ref.types.void_ptr);
|
||||
|
@ -196,64 +196,65 @@ function CTaosInterface (config = null, pass = false) {
|
|||
taoslibname = 'libtaos';
|
||||
}
|
||||
this.libtaos = ffi.Library(taoslibname, {
|
||||
'taos_options': [ ref.types.int, [ ref.types.int , ref.types.void_ptr ] ],
|
||||
'taos_init': [ ref.types.void, [ ] ],
|
||||
'taos_options': [ref.types.int, [ref.types.int, ref.types.void_ptr]],
|
||||
'taos_init': [ref.types.void, []],
|
||||
//TAOS *taos_connect(char *ip, char *user, char *pass, char *db, int port)
|
||||
'taos_connect': [ ref.types.void_ptr, [ ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.int ] ],
|
||||
'taos_connect': [ref.types.void_ptr, [ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.int]],
|
||||
//void taos_close(TAOS *taos)
|
||||
'taos_close': [ ref.types.void, [ ref.types.void_ptr ] ],
|
||||
//int *taos_fetch_lengths(TAOS_RES *taos);
|
||||
'taos_fetch_lengths': [ ref.types.void_ptr, [ ref.types.void_ptr ] ],
|
||||
'taos_close': [ref.types.void, [ref.types.void_ptr]],
|
||||
//int *taos_fetch_lengths(TAOS_RES *res);
|
||||
'taos_fetch_lengths': [ref.types.void_ptr, [ref.types.void_ptr]],
|
||||
//int taos_query(TAOS *taos, char *sqlstr)
|
||||
'taos_query': [ ref.types.void_ptr, [ ref.types.void_ptr, ref.types.char_ptr ] ],
|
||||
//int taos_affected_rows(TAOS *taos)
|
||||
'taos_affected_rows': [ ref.types.int, [ ref.types.void_ptr] ],
|
||||
'taos_query': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.char_ptr]],
|
||||
//int taos_affected_rows(TAOS_RES *res)
|
||||
'taos_affected_rows': [ref.types.int, [ref.types.void_ptr]],
|
||||
//int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows)
|
||||
'taos_fetch_block': [ ref.types.int, [ ref.types.void_ptr, ref.types.void_ptr] ],
|
||||
'taos_fetch_block': [ref.types.int, [ref.types.void_ptr, ref.types.void_ptr]],
|
||||
//int taos_num_fields(TAOS_RES *res);
|
||||
'taos_num_fields': [ ref.types.int, [ ref.types.void_ptr] ],
|
||||
'taos_num_fields': [ref.types.int, [ref.types.void_ptr]],
|
||||
//TAOS_ROW taos_fetch_row(TAOS_RES *res)
|
||||
//TAOS_ROW is void **, but we set the return type as a reference instead to get the row
|
||||
'taos_fetch_row': [ ref.refType(ref.types.void_ptr2), [ ref.types.void_ptr ] ],
|
||||
'taos_fetch_row': [ref.refType(ref.types.void_ptr2), [ref.types.void_ptr]],
|
||||
'taos_print_row': [ref.types.int, [ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr, ref.types.int]],
|
||||
//int taos_result_precision(TAOS_RES *res)
|
||||
'taos_result_precision': [ ref.types.int, [ ref.types.void_ptr ] ],
|
||||
'taos_result_precision': [ref.types.int, [ref.types.void_ptr]],
|
||||
//void taos_free_result(TAOS_RES *res)
|
||||
'taos_free_result': [ ref.types.void, [ ref.types.void_ptr] ],
|
||||
'taos_free_result': [ref.types.void, [ref.types.void_ptr]],
|
||||
//int taos_field_count(TAOS *taos)
|
||||
'taos_field_count': [ ref.types.int, [ ref.types.void_ptr ] ],
|
||||
'taos_field_count': [ref.types.int, [ref.types.void_ptr]],
|
||||
//TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)
|
||||
'taos_fetch_fields': [ ref.refType(TaosField), [ ref.types.void_ptr ] ],
|
||||
'taos_fetch_fields': [ref.refType(TaosField), [ref.types.void_ptr]],
|
||||
//int taos_errno(TAOS *taos)
|
||||
'taos_errno': [ ref.types.int, [ ref.types.void_ptr] ],
|
||||
'taos_errno': [ref.types.int, [ref.types.void_ptr]],
|
||||
//char *taos_errstr(TAOS *taos)
|
||||
'taos_errstr': [ ref.types.char_ptr, [ ref.types.void_ptr] ],
|
||||
'taos_errstr': [ref.types.char_ptr, [ref.types.void_ptr]],
|
||||
//void taos_stop_query(TAOS_RES *res);
|
||||
'taos_stop_query': [ ref.types.void, [ ref.types.void_ptr] ],
|
||||
'taos_stop_query': [ref.types.void, [ref.types.void_ptr]],
|
||||
//char *taos_get_server_info(TAOS *taos);
|
||||
'taos_get_server_info': [ ref.types.char_ptr, [ ref.types.void_ptr ] ],
|
||||
'taos_get_server_info': [ref.types.char_ptr, [ref.types.void_ptr]],
|
||||
//char *taos_get_client_info();
|
||||
'taos_get_client_info': [ ref.types.char_ptr, [ ] ],
|
||||
'taos_get_client_info': [ref.types.char_ptr, []],
|
||||
|
||||
// ASYNC
|
||||
// void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *, TAOS_RES *, int), void *param)
|
||||
'taos_query_a': [ ref.types.void, [ ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr ] ],
|
||||
'taos_query_a': [ref.types.void, [ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr]],
|
||||
// void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param);
|
||||
'taos_fetch_rows_a': [ ref.types.void, [ ref.types.void_ptr, ref.types.void_ptr, ref.types.void_ptr ]],
|
||||
'taos_fetch_rows_a': [ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.types.void_ptr]],
|
||||
|
||||
// Subscription
|
||||
//TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)
|
||||
'taos_subscribe': [ ref.types.void_ptr, [ ref.types.void_ptr, ref.types.int, ref.types.char_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr, ref.types.int] ],
|
||||
'taos_subscribe': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.int, ref.types.char_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr, ref.types.int]],
|
||||
// TAOS_RES *taos_consume(TAOS_SUB *tsub)
|
||||
'taos_consume': [ ref.types.void_ptr, [ref.types.void_ptr] ],
|
||||
'taos_consume': [ref.types.void_ptr, [ref.types.void_ptr]],
|
||||
//void taos_unsubscribe(TAOS_SUB *tsub);
|
||||
'taos_unsubscribe': [ ref.types.void, [ ref.types.void_ptr ] ],
|
||||
'taos_unsubscribe': [ref.types.void, [ref.types.void_ptr]],
|
||||
|
||||
// Continuous Query
|
||||
//TAOS_STREAM *taos_open_stream(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
|
||||
// int64_t stime, void *param, void (*callback)(void *));
|
||||
'taos_open_stream': [ ref.types.void_ptr, [ ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.int64, ref.types.void_ptr, ref.types.void_ptr ] ],
|
||||
'taos_open_stream': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.int64, ref.types.void_ptr, ref.types.void_ptr]],
|
||||
//void taos_close_stream(TAOS_STREAM *tstr);
|
||||
'taos_close_stream': [ ref.types.void, [ ref.types.void_ptr ] ]
|
||||
'taos_close_stream': [ref.types.void, [ref.types.void_ptr]]
|
||||
|
||||
});
|
||||
if (pass == false) {
|
||||
|
@ -264,7 +265,7 @@ function CTaosInterface (config = null, pass = false) {
|
|||
try {
|
||||
this._config = ref.allocCString(config);
|
||||
}
|
||||
catch(err){
|
||||
catch (err) {
|
||||
throw "Attribute Error: config is expected as a str";
|
||||
}
|
||||
}
|
||||
|
@ -276,38 +277,38 @@ function CTaosInterface (config = null, pass = false) {
|
|||
return this;
|
||||
}
|
||||
CTaosInterface.prototype.config = function config() {
|
||||
return this._config;
|
||||
}
|
||||
CTaosInterface.prototype.connect = function connect(host=null, user="root", password="taosdata", db=null, port=0) {
|
||||
let _host,_user,_password,_db,_port;
|
||||
try {
|
||||
return this._config;
|
||||
}
|
||||
CTaosInterface.prototype.connect = function connect(host = null, user = "root", password = "taosdata", db = null, port = 0) {
|
||||
let _host, _user, _password, _db, _port;
|
||||
try {
|
||||
_host = host != null ? ref.allocCString(host) : ref.alloc(ref.types.char_ptr, ref.NULL);
|
||||
}
|
||||
catch(err) {
|
||||
catch (err) {
|
||||
throw "Attribute Error: host is expected as a str";
|
||||
}
|
||||
try {
|
||||
_user = ref.allocCString(user)
|
||||
}
|
||||
catch(err) {
|
||||
catch (err) {
|
||||
throw "Attribute Error: user is expected as a str";
|
||||
}
|
||||
try {
|
||||
_password = ref.allocCString(password);
|
||||
}
|
||||
catch(err) {
|
||||
catch (err) {
|
||||
throw "Attribute Error: password is expected as a str";
|
||||
}
|
||||
try {
|
||||
_db = db != null ? ref.allocCString(db) : ref.alloc(ref.types.char_ptr, ref.NULL);
|
||||
}
|
||||
catch(err) {
|
||||
catch (err) {
|
||||
throw "Attribute Error: db is expected as a str";
|
||||
}
|
||||
try {
|
||||
_port = ref.alloc(ref.types.int, port);
|
||||
}
|
||||
catch(err) {
|
||||
catch (err) {
|
||||
throw TypeError("port is expected as an int")
|
||||
}
|
||||
let connection = this.libtaos.taos_connect(_host, _user, _password, _db, _port);
|
||||
|
@ -324,10 +325,10 @@ CTaosInterface.prototype.close = function close(connection) {
|
|||
console.log("Connection is closed");
|
||||
}
|
||||
CTaosInterface.prototype.query = function query(connection, sql) {
|
||||
return this.libtaos.taos_query(connection, ref.allocCString(sql));
|
||||
return this.libtaos.taos_query(connection, ref.allocCString(sql));
|
||||
}
|
||||
CTaosInterface.prototype.affectedRows = function affectedRows(connection) {
|
||||
return this.libtaos.taos_affected_rows(connection);
|
||||
CTaosInterface.prototype.affectedRows = function affectedRows(result) {
|
||||
return this.libtaos.taos_affected_rows(result);
|
||||
}
|
||||
CTaosInterface.prototype.useResult = function useResult(result) {
|
||||
|
||||
|
@ -337,8 +338,8 @@ CTaosInterface.prototype.useResult = function useResult(result) {
|
|||
pfields = ref.reinterpret(pfields, this.fieldsCount(result) * 68, 0);
|
||||
for (let i = 0; i < pfields.length; i += 68) {
|
||||
//0 - 63 = name //64 - 65 = bytes, 66 - 67 = type
|
||||
fields.push( {
|
||||
name: ref.readCString(ref.reinterpret(pfields,65,i)),
|
||||
fields.push({
|
||||
name: ref.readCString(ref.reinterpret(pfields, 65, i)),
|
||||
type: pfields[i + 65],
|
||||
bytes: pfields[i + 66]
|
||||
})
|
||||
|
@ -347,11 +348,10 @@ CTaosInterface.prototype.useResult = function useResult(result) {
|
|||
return fields;
|
||||
}
|
||||
CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
|
||||
//let pblock = ref.ref(ref.ref(ref.NULL)); // equal to our raw data
|
||||
let pblock = this.libtaos.taos_fetch_row(result);
|
||||
let num_of_rows = 1;
|
||||
if (ref.isNull(pblock) == true) {
|
||||
return {block:null, num_of_rows:0};
|
||||
let pblock = ref.NULL_POINTER;
|
||||
let num_of_rows = this.libtaos.taos_fetch_block(result, pblock);
|
||||
if (ref.isNull(pblock.deref()) == true) {
|
||||
return { block: null, num_of_rows: 0 };
|
||||
}
|
||||
|
||||
var fieldL = this.libtaos.taos_fetch_lengths(result);
|
||||
|
@ -359,10 +359,10 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
|
|||
let isMicro = (this.libtaos.taos_result_precision(result) == FieldTypes.C_TIMESTAMP_MICRO);
|
||||
|
||||
var fieldlens = [];
|
||||
|
||||
|
||||
if (ref.isNull(fieldL) == false) {
|
||||
for (let i = 0; i < fields.length; i ++) {
|
||||
let plen = ref.reinterpret(fieldL, 4, i*4);
|
||||
for (let i = 0; i < fields.length; i++) {
|
||||
let plen = ref.reinterpret(fieldL, 4, i * 4);
|
||||
let len = plen.readInt32LE(0);
|
||||
fieldlens.push(len);
|
||||
}
|
||||
|
@ -370,21 +370,23 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
|
|||
|
||||
let blocks = new Array(fields.length);
|
||||
blocks.fill(null);
|
||||
//num_of_rows = Math.abs(num_of_rows);
|
||||
num_of_rows = Math.abs(num_of_rows);
|
||||
let offset = 0;
|
||||
let ptr = pblock.deref();
|
||||
|
||||
for (let i = 0; i < fields.length; i++) {
|
||||
pdata = ref.reinterpret(pblock,8,i*8);
|
||||
if(ref.isNull(pdata.readPointer())){
|
||||
blocks[i] = new Array();
|
||||
}else{
|
||||
pdata = ref.ref(pdata.readPointer());
|
||||
if (!convertFunctions[fields[i]['type']] ) {
|
||||
throw new errors.DatabaseError("Invalid data type returned from database");
|
||||
}
|
||||
blocks[i] = convertFunctions[fields[i]['type']](pdata, 1, fieldlens[i], offset, isMicro);
|
||||
}
|
||||
pdata = ref.reinterpret(ptr, 8, i * 8);
|
||||
if (ref.isNull(pdata.readPointer())) {
|
||||
blocks[i] = new Array();
|
||||
} else {
|
||||
pdata = ref.ref(pdata.readPointer());
|
||||
if (!convertFunctions[fields[i]['type']]) {
|
||||
throw new errors.DatabaseError("Invalid data type returned from database");
|
||||
}
|
||||
blocks[i] = convertFunctions[fields[i]['type']](pdata, num_of_rows, fieldlens[i], offset, isMicro);
|
||||
}
|
||||
}
|
||||
return {blocks: blocks, num_of_rows:Math.abs(num_of_rows)}
|
||||
return { blocks: blocks, num_of_rows }
|
||||
}
|
||||
CTaosInterface.prototype.fetchRow = function fetchRow(result, fields) {
|
||||
let row = this.libtaos.taos_fetch_row(result);
|
||||
|
@ -414,7 +416,7 @@ CTaosInterface.prototype.errStr = function errStr(result) {
|
|||
// Async
|
||||
CTaosInterface.prototype.query_a = function query_a(connection, sql, callback, param = ref.ref(ref.NULL)) {
|
||||
// void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, int), void *param)
|
||||
callback = ffi.Callback(ref.types.void, [ ref.types.void_ptr, ref.types.void_ptr, ref.types.int ], callback);
|
||||
callback = ffi.Callback(ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.types.int], callback);
|
||||
this.libtaos.taos_query_a(connection, ref.allocCString(sql), callback, param);
|
||||
return param;
|
||||
}
|
||||
|
@ -439,46 +441,46 @@ CTaosInterface.prototype.fetch_rows_a = function fetch_rows_a(result, callback,
|
|||
var fieldL = cti.libtaos.taos_fetch_lengths(result);
|
||||
var fieldlens = [];
|
||||
if (ref.isNull(fieldL) == false) {
|
||||
|
||||
for (let i = 0; i < fields.length; i ++) {
|
||||
let plen = ref.reinterpret(fieldL, 8, i*8);
|
||||
let len = ref.get(plen,0,ref.types.int32);
|
||||
|
||||
for (let i = 0; i < fields.length; i++) {
|
||||
let plen = ref.reinterpret(fieldL, 8, i * 8);
|
||||
let len = ref.get(plen, 0, ref.types.int32);
|
||||
fieldlens.push(len);
|
||||
}
|
||||
}
|
||||
if (numOfRows2 > 0){
|
||||
if (numOfRows2 > 0) {
|
||||
for (let i = 0; i < fields.length; i++) {
|
||||
if(ref.isNull(pdata.readPointer())){
|
||||
blocks[i] = new Array();
|
||||
}else{
|
||||
if (!convertFunctions[fields[i]['type']] ) {
|
||||
throw new errors.DatabaseError("Invalid data type returned from database");
|
||||
}
|
||||
let prow = ref.reinterpret(row,8,i*8);
|
||||
prow = prow.readPointer();
|
||||
prow = ref.ref(prow);
|
||||
blocks[i] = convertFunctions[fields[i]['type']](prow, 1, fieldlens[i], offset, isMicro);
|
||||
//offset += fields[i]['bytes'] * numOfRows2;
|
||||
}
|
||||
if (ref.isNull(pdata.readPointer())) {
|
||||
blocks[i] = new Array();
|
||||
} else {
|
||||
if (!convertFunctions[fields[i]['type']]) {
|
||||
throw new errors.DatabaseError("Invalid data type returned from database");
|
||||
}
|
||||
let prow = ref.reinterpret(row, 8, i * 8);
|
||||
prow = prow.readPointer();
|
||||
prow = ref.ref(prow);
|
||||
blocks[i] = convertFunctions[fields[i]['type']](prow, 1, fieldlens[i], offset, isMicro);
|
||||
//offset += fields[i]['bytes'] * numOfRows2;
|
||||
}
|
||||
}
|
||||
}
|
||||
callback(param2, result2, numOfRows2, blocks);
|
||||
}
|
||||
asyncCallbackWrapper = ffi.Callback(ref.types.void, [ ref.types.void_ptr, ref.types.void_ptr, ref.types.int ], asyncCallbackWrapper);
|
||||
asyncCallbackWrapper = ffi.Callback(ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.types.int], asyncCallbackWrapper);
|
||||
this.libtaos.taos_fetch_rows_a(result, asyncCallbackWrapper, param);
|
||||
return param;
|
||||
}
|
||||
// Fetch field meta data by result handle
|
||||
CTaosInterface.prototype.fetchFields_a = function fetchFields_a (result) {
|
||||
CTaosInterface.prototype.fetchFields_a = function fetchFields_a(result) {
|
||||
let pfields = this.fetchFields(result);
|
||||
let pfieldscount = this.numFields(result);
|
||||
let fields = [];
|
||||
if (ref.isNull(pfields) == false) {
|
||||
pfields = ref.reinterpret(pfields, 68 * pfieldscount , 0);
|
||||
pfields = ref.reinterpret(pfields, 68 * pfieldscount, 0);
|
||||
for (let i = 0; i < pfields.length; i += 68) {
|
||||
//0 - 64 = name //65 = type, 66 - 67 = bytes
|
||||
fields.push( {
|
||||
name: ref.readCString(ref.reinterpret(pfields,65,i)),
|
||||
fields.push({
|
||||
name: ref.readCString(ref.reinterpret(pfields, 65, i)),
|
||||
type: pfields[i + 65],
|
||||
bytes: pfields[i + 66]
|
||||
})
|
||||
|
@ -488,7 +490,7 @@ CTaosInterface.prototype.fetchFields_a = function fetchFields_a (result) {
|
|||
}
|
||||
// Stop a query by result handle
|
||||
CTaosInterface.prototype.stopQuery = function stopQuery(result) {
|
||||
if (result != null){
|
||||
if (result != null) {
|
||||
this.libtaos.taos_stop_query(result);
|
||||
}
|
||||
else {
|
||||
|
@ -509,13 +511,13 @@ CTaosInterface.prototype.subscribe = function subscribe(connection, restart, top
|
|||
try {
|
||||
sql = sql != null ? ref.allocCString(sql) : ref.alloc(ref.types.char_ptr, ref.NULL);
|
||||
}
|
||||
catch(err) {
|
||||
catch (err) {
|
||||
throw "Attribute Error: sql is expected as a str";
|
||||
}
|
||||
try {
|
||||
topic = topic != null ? ref.allocCString(topic) : ref.alloc(ref.types.char_ptr, ref.NULL);
|
||||
}
|
||||
catch(err) {
|
||||
catch (err) {
|
||||
throw TypeError("topic is expected as a str");
|
||||
}
|
||||
|
||||
|
@ -539,8 +541,8 @@ CTaosInterface.prototype.consume = function consume(subscription) {
|
|||
pfields = ref.reinterpret(pfields, this.numFields(result) * 68, 0);
|
||||
for (let i = 0; i < pfields.length; i += 68) {
|
||||
//0 - 63 = name //64 - 65 = bytes, 66 - 67 = type
|
||||
fields.push( {
|
||||
name: ref.readCString(ref.reinterpret(pfields,64,i)),
|
||||
fields.push({
|
||||
name: ref.readCString(ref.reinterpret(pfields, 64, i)),
|
||||
bytes: pfields[i + 64],
|
||||
type: pfields[i + 66]
|
||||
})
|
||||
|
@ -548,7 +550,7 @@ CTaosInterface.prototype.consume = function consume(subscription) {
|
|||
}
|
||||
|
||||
let data = [];
|
||||
while(true) {
|
||||
while (true) {
|
||||
let { blocks, num_of_rows } = this.fetchBlock(result, fields);
|
||||
if (num_of_rows == 0) {
|
||||
break;
|
||||
|
@ -559,7 +561,7 @@ CTaosInterface.prototype.consume = function consume(subscription) {
|
|||
for (let j = 0; j < fields.length; j++) {
|
||||
rowBlock[j] = blocks[j][i];
|
||||
}
|
||||
data[data.length-1] = (rowBlock);
|
||||
data[data.length - 1] = (rowBlock);
|
||||
}
|
||||
}
|
||||
return { data: data, fields: fields, result: result };
|
||||
|
@ -570,11 +572,11 @@ CTaosInterface.prototype.unsubscribe = function unsubscribe(subscription) {
|
|||
}
|
||||
|
||||
// Continuous Query
|
||||
CTaosInterface.prototype.openStream = function openStream(connection, sql, callback, stime,stoppingCallback, param = ref.ref(ref.NULL)) {
|
||||
CTaosInterface.prototype.openStream = function openStream(connection, sql, callback, stime, stoppingCallback, param = ref.ref(ref.NULL)) {
|
||||
try {
|
||||
sql = ref.allocCString(sql);
|
||||
}
|
||||
catch(err) {
|
||||
catch (err) {
|
||||
throw "Attribute Error: sql string is expected as a str";
|
||||
}
|
||||
var cti = this;
|
||||
|
@ -587,7 +589,7 @@ CTaosInterface.prototype.openStream = function openStream(connection, sql, callb
|
|||
let offset = 0;
|
||||
if (numOfRows2 > 0) {
|
||||
for (let i = 0; i < fields.length; i++) {
|
||||
if (!convertFunctions[fields[i]['type']] ) {
|
||||
if (!convertFunctions[fields[i]['type']]) {
|
||||
throw new errors.DatabaseError("Invalid data type returned from database");
|
||||
}
|
||||
blocks[i] = convertFunctions[fields[i]['type']](row, numOfRows2, fields[i]['bytes'], offset, isMicro);
|
||||
|
@ -596,8 +598,8 @@ CTaosInterface.prototype.openStream = function openStream(connection, sql, callb
|
|||
}
|
||||
callback(param2, result2, blocks, fields);
|
||||
}
|
||||
asyncCallbackWrapper = ffi.Callback(ref.types.void, [ ref.types.void_ptr, ref.types.void_ptr, ref.refType(ref.types.void_ptr2) ], asyncCallbackWrapper);
|
||||
asyncStoppingCallbackWrapper = ffi.Callback( ref.types.void, [ ref.types.void_ptr ], stoppingCallback);
|
||||
asyncCallbackWrapper = ffi.Callback(ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.refType(ref.types.void_ptr2)], asyncCallbackWrapper);
|
||||
asyncStoppingCallbackWrapper = ffi.Callback(ref.types.void, [ref.types.void_ptr], stoppingCallback);
|
||||
let streamHandle = this.libtaos.taos_open_stream(connection, sql, asyncCallbackWrapper, stime, param, asyncStoppingCallbackWrapper);
|
||||
if (ref.isNull(streamHandle)) {
|
||||
throw new errors.TDError('Failed to open a stream with TDengine');
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
const ref = require('ref-napi');
|
||||
require('./globalfunc.js')
|
||||
const CTaosInterface = require('./cinterface')
|
||||
const errors = require ('./error')
|
||||
const errors = require('./error')
|
||||
const TaosQuery = require('./taosquery')
|
||||
const { PerformanceObserver, performance } = require('perf_hooks');
|
||||
module.exports = TDengineCursor;
|
||||
|
@ -22,7 +22,7 @@ module.exports = TDengineCursor;
|
|||
* @property {fields} - Array of the field objects in order from left to right of the latest data retrieved
|
||||
* @since 1.0.0
|
||||
*/
|
||||
function TDengineCursor(connection=null) {
|
||||
function TDengineCursor(connection = null) {
|
||||
//All parameters are store for sync queries only.
|
||||
this._rowcount = -1;
|
||||
this._connection = null;
|
||||
|
@ -91,7 +91,7 @@ TDengineCursor.prototype.execute = function execute(operation, options, callback
|
|||
return null;
|
||||
}
|
||||
|
||||
if (typeof options == 'function') {
|
||||
if (typeof options == 'function') {
|
||||
callback = options;
|
||||
}
|
||||
if (typeof options != 'object') options = {}
|
||||
|
@ -144,10 +144,10 @@ TDengineCursor.prototype.execute = function execute(operation, options, callback
|
|||
|
||||
}
|
||||
TDengineCursor.prototype._createAffectedResponse = function (num, time) {
|
||||
return "Query OK, " + num + " row(s) affected (" + (time * 0.001).toFixed(8) + "s)";
|
||||
return "Query OK, " + num + " row(s) affected (" + (time * 0.001).toFixed(8) + "s)";
|
||||
}
|
||||
TDengineCursor.prototype._createSetResponse = function (num, time) {
|
||||
return "Query OK, " + num + " row(s) in set (" + (time * 0.001).toFixed(8) + "s)";
|
||||
return "Query OK, " + num + " row(s) in set (" + (time * 0.001).toFixed(8) + "s)";
|
||||
}
|
||||
TDengineCursor.prototype.executemany = function executemany() {
|
||||
|
||||
|
@ -176,27 +176,22 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) {
|
|||
throw new errors.OperationalError("Invalid use of fetchall, either result or fields from query are null. First execute a query first");
|
||||
}
|
||||
|
||||
let data = [];
|
||||
let num_of_rows = this._chandle.affectedRows(this._result);
|
||||
let data = new Array(num_of_rows);
|
||||
|
||||
this._rowcount = 0;
|
||||
//let nodetime = 0;
|
||||
|
||||
let time = 0;
|
||||
const obs = new PerformanceObserver((items) => {
|
||||
time += items.getEntries()[0].duration;
|
||||
performance.clearMarks();
|
||||
});
|
||||
/*
|
||||
const obs2 = new PerformanceObserver((items) => {
|
||||
nodetime += items.getEntries()[0].duration;
|
||||
performance.clearMarks();
|
||||
});
|
||||
obs2.observe({ entryTypes: ['measure'] });
|
||||
performance.mark('nodea');
|
||||
*/
|
||||
obs.observe({ entryTypes: ['measure'] });
|
||||
performance.mark('A');
|
||||
while(true) {
|
||||
|
||||
while (true) {
|
||||
let blockAndRows = this._chandle.fetchBlock(this._result, this._fields);
|
||||
// console.log(blockAndRows);
|
||||
// break;
|
||||
let block = blockAndRows.blocks;
|
||||
let num_of_rows = blockAndRows.num_of_rows;
|
||||
if (num_of_rows == 0) {
|
||||
|
@ -205,22 +200,24 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) {
|
|||
this._rowcount += num_of_rows;
|
||||
let numoffields = this._fields.length;
|
||||
for (let i = 0; i < num_of_rows; i++) {
|
||||
data.push([]);
|
||||
|
||||
// data.push([]);
|
||||
|
||||
let rowBlock = new Array(numoffields);
|
||||
for (let j = 0; j < numoffields; j++) {
|
||||
rowBlock[j] = block[j][i];
|
||||
}
|
||||
data[data.length-1] = (rowBlock);
|
||||
data[this._rowcount - num_of_rows + i] = (rowBlock);
|
||||
// data.push(rowBlock);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
performance.mark('B');
|
||||
performance.measure('query', 'A', 'B');
|
||||
let response = this._createSetResponse(this._rowcount, time)
|
||||
console.log(response);
|
||||
|
||||
// this._connection._clearResultSet();
|
||||
// this._connection._clearResultSet();
|
||||
let fields = this.fields;
|
||||
this._reset_result();
|
||||
this.data = data;
|
||||
|
@ -239,12 +236,12 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) {
|
|||
* @return {number | Buffer} Number of affected rows or a Buffer that points to the results of the query
|
||||
* @since 1.0.0
|
||||
*/
|
||||
TDengineCursor.prototype.execute_a = function execute_a (operation, options, callback, param) {
|
||||
TDengineCursor.prototype.execute_a = function execute_a(operation, options, callback, param) {
|
||||
if (operation == undefined) {
|
||||
throw new errors.ProgrammingError('No operation passed as argument');
|
||||
return null;
|
||||
}
|
||||
if (typeof options == 'function') {
|
||||
if (typeof options == 'function') {
|
||||
//we expect the parameter after callback to be param
|
||||
param = callback;
|
||||
callback = options;
|
||||
|
@ -265,14 +262,14 @@ TDengineCursor.prototype.execute_a = function execute_a (operation, options, cal
|
|||
}
|
||||
|
||||
if (resCode >= 0) {
|
||||
// let fieldCount = cr._chandle.numFields(res2);
|
||||
// if (fieldCount == 0) {
|
||||
// //cr._chandle.freeResult(res2);
|
||||
// return res2;
|
||||
// }
|
||||
// else {
|
||||
// return res2;
|
||||
// }
|
||||
// let fieldCount = cr._chandle.numFields(res2);
|
||||
// if (fieldCount == 0) {
|
||||
// //cr._chandle.freeResult(res2);
|
||||
// return res2;
|
||||
// }
|
||||
// else {
|
||||
// return res2;
|
||||
// }
|
||||
return res2;
|
||||
|
||||
}
|
||||
|
@ -317,7 +314,7 @@ TDengineCursor.prototype.execute_a = function execute_a (operation, options, cal
|
|||
* })
|
||||
*/
|
||||
TDengineCursor.prototype.fetchall_a = function fetchall_a(result, options, callback, param = {}) {
|
||||
if (typeof options == 'function') {
|
||||
if (typeof options == 'function') {
|
||||
//we expect the parameter after callback to be param
|
||||
param = callback;
|
||||
callback = options;
|
||||
|
@ -360,17 +357,17 @@ TDengineCursor.prototype.fetchall_a = function fetchall_a(result, options, callb
|
|||
for (let k = 0; k < fields.length; k++) {
|
||||
rowBlock[k] = block[k][j];
|
||||
}
|
||||
data[data.length-1] = rowBlock;
|
||||
data[data.length - 1] = rowBlock;
|
||||
}
|
||||
}
|
||||
cr._chandle.freeResult(result2); // free result, avoid seg faults and mem leaks!
|
||||
callback(param2, result2, numOfRows2, {data:data,fields:fields});
|
||||
callback(param2, result2, numOfRows2, { data: data, fields: fields });
|
||||
|
||||
}
|
||||
}
|
||||
ref.writeObject(buf, 0, param);
|
||||
param = this._chandle.fetch_rows_a(result, asyncCallbackWrapper, buf); //returned param
|
||||
return {param:param,result:result};
|
||||
return { param: param, result: result };
|
||||
}
|
||||
/**
|
||||
* Stop a query given the result handle.
|
||||
|
@ -428,7 +425,7 @@ TDengineCursor.prototype.subscribe = function subscribe(config) {
|
|||
*/
|
||||
TDengineCursor.prototype.consumeData = async function consumeData(subscription, callback) {
|
||||
while (true) {
|
||||
let { data, fields, result} = this._chandle.consume(subscription);
|
||||
let { data, fields, result } = this._chandle.consume(subscription);
|
||||
callback(data, fields, result);
|
||||
}
|
||||
}
|
||||
|
@ -450,30 +447,30 @@ TDengineCursor.prototype.unsubscribe = function unsubscribe(subscription) {
|
|||
* @return {Buffer} A buffer pointing to the stream handle
|
||||
* @since 1.3.0
|
||||
*/
|
||||
TDengineCursor.prototype.openStream = function openStream(sql, callback, stime = 0, stoppingCallback, param = {}) {
|
||||
let buf = ref.alloc('Object');
|
||||
ref.writeObject(buf, 0, param);
|
||||
TDengineCursor.prototype.openStream = function openStream(sql, callback, stime = 0, stoppingCallback, param = {}) {
|
||||
let buf = ref.alloc('Object');
|
||||
ref.writeObject(buf, 0, param);
|
||||
|
||||
let asyncCallbackWrapper = function (param2, result2, blocks, fields) {
|
||||
let data = [];
|
||||
let num_of_rows = blocks[0].length;
|
||||
for (let j = 0; j < num_of_rows; j++) {
|
||||
data.push([]);
|
||||
let rowBlock = new Array(fields.length);
|
||||
for (let k = 0; k < fields.length; k++) {
|
||||
rowBlock[k] = blocks[k][j];
|
||||
}
|
||||
data[data.length-1] = rowBlock;
|
||||
}
|
||||
callback(param2, result2, blocks, fields);
|
||||
}
|
||||
return this._chandle.openStream(this._connection._conn, sql, asyncCallbackWrapper, stime, stoppingCallback, buf);
|
||||
}
|
||||
/**
|
||||
* Close a stream
|
||||
* @param {Buffer} - A buffer pointing to the handle of the stream to be closed
|
||||
* @since 1.3.0
|
||||
*/
|
||||
TDengineCursor.prototype.closeStream = function closeStream(stream) {
|
||||
this._chandle.closeStream(stream);
|
||||
}
|
||||
let asyncCallbackWrapper = function (param2, result2, blocks, fields) {
|
||||
let data = [];
|
||||
let num_of_rows = blocks[0].length;
|
||||
for (let j = 0; j < num_of_rows; j++) {
|
||||
data.push([]);
|
||||
let rowBlock = new Array(fields.length);
|
||||
for (let k = 0; k < fields.length; k++) {
|
||||
rowBlock[k] = blocks[k][j];
|
||||
}
|
||||
data[data.length - 1] = rowBlock;
|
||||
}
|
||||
callback(param2, result2, blocks, fields);
|
||||
}
|
||||
return this._chandle.openStream(this._connection._conn, sql, asyncCallbackWrapper, stime, stoppingCallback, buf);
|
||||
}
|
||||
/**
|
||||
* Close a stream
|
||||
* @param {Buffer} - A buffer pointing to the handle of the stream to be closed
|
||||
* @since 1.3.0
|
||||
*/
|
||||
TDengineCursor.prototype.closeStream = function closeStream(stream) {
|
||||
this._chandle.closeStream(stream);
|
||||
}
|
||||
|
|
|
@ -1,285 +0,0 @@
|
|||
{
|
||||
"name": "td2.0-connector",
|
||||
"version": "2.0.6",
|
||||
"lockfileVersion": 1,
|
||||
"requires": true,
|
||||
"dependencies": {
|
||||
"array-index": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/array-index/-/array-index-1.0.0.tgz",
|
||||
"integrity": "sha1-7FanSe4QPk4Ix5C5w1PfFgVbl/k=",
|
||||
"requires": {
|
||||
"debug": "^2.2.0",
|
||||
"es6-symbol": "^3.0.2"
|
||||
},
|
||||
"dependencies": {
|
||||
"debug": {
|
||||
"version": "2.6.9",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
|
||||
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
|
||||
"requires": {
|
||||
"ms": "2.0.0"
|
||||
}
|
||||
},
|
||||
"ms": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
|
||||
"integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
|
||||
}
|
||||
}
|
||||
},
|
||||
"d": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/d/-/d-1.0.1.tgz",
|
||||
"integrity": "sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==",
|
||||
"requires": {
|
||||
"es5-ext": "^0.10.50",
|
||||
"type": "^1.0.1"
|
||||
}
|
||||
},
|
||||
"debug": {
|
||||
"version": "4.3.1",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
|
||||
"integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==",
|
||||
"requires": {
|
||||
"ms": "2.1.2"
|
||||
}
|
||||
},
|
||||
"es5-ext": {
|
||||
"version": "0.10.53",
|
||||
"resolved": "https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.53.tgz",
|
||||
"integrity": "sha512-Xs2Stw6NiNHWypzRTY1MtaG/uJlwCk8kH81920ma8mvN8Xq1gsfhZvpkImLQArw8AHnv8MT2I45J3c0R8slE+Q==",
|
||||
"requires": {
|
||||
"es6-iterator": "~2.0.3",
|
||||
"es6-symbol": "~3.1.3",
|
||||
"next-tick": "~1.0.0"
|
||||
}
|
||||
},
|
||||
"es6-iterator": {
|
||||
"version": "2.0.3",
|
||||
"resolved": "https://registry.npmjs.org/es6-iterator/-/es6-iterator-2.0.3.tgz",
|
||||
"integrity": "sha1-p96IkUGgWpSwhUQDstCg+/qY87c=",
|
||||
"requires": {
|
||||
"d": "1",
|
||||
"es5-ext": "^0.10.35",
|
||||
"es6-symbol": "^3.1.1"
|
||||
}
|
||||
},
|
||||
"es6-symbol": {
|
||||
"version": "3.1.3",
|
||||
"resolved": "https://registry.npmjs.org/es6-symbol/-/es6-symbol-3.1.3.tgz",
|
||||
"integrity": "sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA==",
|
||||
"requires": {
|
||||
"d": "^1.0.1",
|
||||
"ext": "^1.1.2"
|
||||
}
|
||||
},
|
||||
"ext": {
|
||||
"version": "1.4.0",
|
||||
"resolved": "https://registry.npmjs.org/ext/-/ext-1.4.0.tgz",
|
||||
"integrity": "sha512-Key5NIsUxdqKg3vIsdw9dSuXpPCQ297y6wBjL30edxwPgt2E44WcWBZey/ZvUc6sERLTxKdyCu4gZFmUbk1Q7A==",
|
||||
"requires": {
|
||||
"type": "^2.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"type": {
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/type/-/type-2.1.0.tgz",
|
||||
"integrity": "sha512-G9absDWvhAWCV2gmF1zKud3OyC61nZDwWvBL2DApaVFogI07CprggiQAOOjvp2NRjYWFzPyu7vwtDrQFq8jeSA=="
|
||||
}
|
||||
}
|
||||
},
|
||||
"ffi-napi": {
|
||||
"version": "3.1.0",
|
||||
"resolved": "https://registry.npmjs.org/ffi-napi/-/ffi-napi-3.1.0.tgz",
|
||||
"integrity": "sha512-EsHO+sP2p/nUC/3l/l8m9niee1BLm4asUFDzkkBGR4kYVgp2KqdAYUomZhkKtzim4Fq7mcYHjpUaIHsMqs+E1g==",
|
||||
"requires": {
|
||||
"debug": "^4.1.1",
|
||||
"get-uv-event-loop-napi-h": "^1.0.5",
|
||||
"node-addon-api": "^2.0.0",
|
||||
"node-gyp-build": "^4.2.1",
|
||||
"ref-napi": "^2.0.1",
|
||||
"ref-struct-di": "^1.1.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"ref-napi": {
|
||||
"version": "2.1.2",
|
||||
"resolved": "https://registry.npmjs.org/ref-napi/-/ref-napi-2.1.2.tgz",
|
||||
"integrity": "sha512-aFl+vrIuLWUXMUTQGAwGAuSNLX3Ub5W3iVP8b7KyFFZUdn4+i4U1TXXTop0kCTUfGNu8glBGVz4lowkwMcPVVA==",
|
||||
"requires": {
|
||||
"debug": "^4.1.1",
|
||||
"get-symbol-from-current-process-h": "^1.0.2",
|
||||
"node-addon-api": "^2.0.0",
|
||||
"node-gyp-build": "^4.2.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"get-symbol-from-current-process-h": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/get-symbol-from-current-process-h/-/get-symbol-from-current-process-h-1.0.2.tgz",
|
||||
"integrity": "sha512-syloC6fsCt62ELLrr1VKBM1ggOpMdetX9hTrdW77UQdcApPHLmf7CI7OKcN1c9kYuNxKcDe4iJ4FY9sX3aw2xw=="
|
||||
},
|
||||
"get-uv-event-loop-napi-h": {
|
||||
"version": "1.0.6",
|
||||
"resolved": "https://registry.npmjs.org/get-uv-event-loop-napi-h/-/get-uv-event-loop-napi-h-1.0.6.tgz",
|
||||
"integrity": "sha512-t5c9VNR84nRoF+eLiz6wFrEp1SE2Acg0wS+Ysa2zF0eROes+LzOfuTaVHxGy8AbS8rq7FHEJzjnCZo1BupwdJg==",
|
||||
"requires": {
|
||||
"get-symbol-from-current-process-h": "^1.0.1"
|
||||
}
|
||||
},
|
||||
"ms": {
|
||||
"version": "2.1.2",
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
|
||||
"integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
|
||||
},
|
||||
"next-tick": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.0.0.tgz",
|
||||
"integrity": "sha1-yobR/ogoFpsBICCOPchCS524NCw="
|
||||
},
|
||||
"node-addon-api": {
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-2.0.2.tgz",
|
||||
"integrity": "sha512-Ntyt4AIXyaLIuMHF6IOoTakB3K+RWxwtsHNRxllEoA6vPwP9o4866g6YWDLUdnucilZhmkxiHwHr11gAENw+QA=="
|
||||
},
|
||||
"node-gyp-build": {
|
||||
"version": "4.2.3",
|
||||
"resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.2.3.tgz",
|
||||
"integrity": "sha512-MN6ZpzmfNCRM+3t57PTJHgHyw/h4OWnZ6mR8P5j/uZtqQr46RRuDE/P+g3n0YR/AiYXeWixZZzaip77gdICfRg=="
|
||||
},
|
||||
"ref-array-napi": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/ref-array-napi/-/ref-array-napi-1.2.1.tgz",
|
||||
"integrity": "sha512-jQp2WWSucmxkqVfoNfm7yDlDeGu3liAbzqfwjNybL80ooLOCnCZpAK2woDInY+lxNOK/VlIVSqeDEYb4gVPuNQ==",
|
||||
"requires": {
|
||||
"array-index": "1",
|
||||
"debug": "2",
|
||||
"ref-napi": "^1.4.2"
|
||||
},
|
||||
"dependencies": {
|
||||
"debug": {
|
||||
"version": "2.6.9",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
|
||||
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
|
||||
"requires": {
|
||||
"ms": "2.0.0"
|
||||
}
|
||||
},
|
||||
"ms": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
|
||||
"integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
|
||||
},
|
||||
"ref-napi": {
|
||||
"version": "1.5.2",
|
||||
"resolved": "https://registry.npmjs.org/ref-napi/-/ref-napi-1.5.2.tgz",
|
||||
"integrity": "sha512-hwyNmWpUkt1bDWDW4aiwCoC+SJfJO69UIdjqssNqdaS0sYJpgqzosGg/rLtk69UoQ8drZdI9yyQefM7eEMM3Gw==",
|
||||
"requires": {
|
||||
"debug": "^3.1.0",
|
||||
"node-addon-api": "^2.0.0",
|
||||
"node-gyp-build": "^4.2.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"debug": {
|
||||
"version": "3.2.7",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
|
||||
"integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
|
||||
"requires": {
|
||||
"ms": "^2.1.1"
|
||||
}
|
||||
},
|
||||
"ms": {
|
||||
"version": "2.1.3",
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
|
||||
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"ref-napi": {
|
||||
"version": "3.0.1",
|
||||
"resolved": "https://registry.npmjs.org/ref-napi/-/ref-napi-3.0.1.tgz",
|
||||
"integrity": "sha512-W3rcb0E+tlO9u9ySFnX5vifInwwPGToOfFgTZUHJBNiOBsW0NNvgHz2zJN7ctABo/2yIlgdPQUvuqqfORIF4LA==",
|
||||
"requires": {
|
||||
"debug": "^4.1.1",
|
||||
"get-symbol-from-current-process-h": "^1.0.2",
|
||||
"node-addon-api": "^2.0.0",
|
||||
"node-gyp-build": "^4.2.1"
|
||||
}
|
||||
},
|
||||
"ref-struct-di": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/ref-struct-di/-/ref-struct-di-1.1.1.tgz",
|
||||
"integrity": "sha512-2Xyn/0Qgz89VT+++WP0sTosdm9oeowLP23wRJYhG4BFdMUrLj3jhwHZNEytYNYgtPKLNTP3KJX4HEgBvM1/Y2g==",
|
||||
"requires": {
|
||||
"debug": "^3.1.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"debug": {
|
||||
"version": "3.2.7",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
|
||||
"integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
|
||||
"requires": {
|
||||
"ms": "^2.1.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"ref-struct-napi": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/ref-struct-napi/-/ref-struct-napi-1.1.1.tgz",
|
||||
"integrity": "sha512-YgS5/d7+kT5zgtySYI5ieH0hREdv+DabgDvoczxsui0f9VLm0rrDcWEj4DHKehsH+tJnVMsLwuyctWgvdEcVRw==",
|
||||
"requires": {
|
||||
"debug": "2",
|
||||
"ref-napi": "^1.4.2"
|
||||
},
|
||||
"dependencies": {
|
||||
"debug": {
|
||||
"version": "2.6.9",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
|
||||
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
|
||||
"requires": {
|
||||
"ms": "2.0.0"
|
||||
}
|
||||
},
|
||||
"ms": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
|
||||
"integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
|
||||
},
|
||||
"ref-napi": {
|
||||
"version": "1.5.2",
|
||||
"resolved": "https://registry.npmjs.org/ref-napi/-/ref-napi-1.5.2.tgz",
|
||||
"integrity": "sha512-hwyNmWpUkt1bDWDW4aiwCoC+SJfJO69UIdjqssNqdaS0sYJpgqzosGg/rLtk69UoQ8drZdI9yyQefM7eEMM3Gw==",
|
||||
"requires": {
|
||||
"debug": "^3.1.0",
|
||||
"node-addon-api": "^2.0.0",
|
||||
"node-gyp-build": "^4.2.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"debug": {
|
||||
"version": "3.2.7",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
|
||||
"integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
|
||||
"requires": {
|
||||
"ms": "^2.1.1"
|
||||
}
|
||||
},
|
||||
"ms": {
|
||||
"version": "2.1.3",
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
|
||||
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"type": {
|
||||
"version": "1.2.0",
|
||||
"resolved": "https://registry.npmjs.org/type/-/type-1.2.0.tgz",
|
||||
"integrity": "sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg=="
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "td2.0-connector",
|
||||
"version": "2.0.6",
|
||||
"version": "2.0.7",
|
||||
"description": "A Node.js connector for TDengine.",
|
||||
"main": "tdengine.js",
|
||||
"directories": {
|
||||
|
|
|
@ -219,6 +219,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_VND_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0512) //"Database write operation denied")
|
||||
#define TSDB_CODE_VND_IS_SYNCING TAOS_DEF_ERROR_CODE(0, 0x0513) //"Database is syncing")
|
||||
#define TSDB_CODE_VND_INVALID_TSDB_STATE TAOS_DEF_ERROR_CODE(0, 0x0514) //"Invalid tsdb state")
|
||||
#define TSDB_CODE_VND_IS_CLOSING TAOS_DEF_ERROR_CODE(0, 0x0515) //"Database is closing")
|
||||
|
||||
// tsdb
|
||||
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600) //"Invalid table ID")
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -712,13 +712,13 @@ static int32_t sdbProcessWrite(void *wparam, void *hparam, int32_t qtype, void *
|
|||
if (action == SDB_ACTION_INSERT) {
|
||||
return sdbPerformInsertAction(pHead, pTable);
|
||||
} else if (action == SDB_ACTION_DELETE) {
|
||||
if (qtype == TAOS_QTYPE_FWD) {
|
||||
//if (qtype == TAOS_QTYPE_FWD) {
|
||||
// Drop database/stable may take a long time and cause a timeout, so we confirm first then reput it into queue
|
||||
sdbWriteFwdToQueue(1, hparam, TAOS_QTYPE_QUERY, unused);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
// sdbWriteFwdToQueue(1, hparam, TAOS_QTYPE_QUERY, unused);
|
||||
// return TSDB_CODE_SUCCESS;
|
||||
//} else {
|
||||
return sdbPerformDeleteAction(pHead, pTable);
|
||||
}
|
||||
//}
|
||||
} else if (action == SDB_ACTION_UPDATE) {
|
||||
return sdbPerformUpdateAction(pHead, pTable);
|
||||
} else {
|
||||
|
|
|
@ -1189,8 +1189,8 @@ static int32_t mnodeFindSuperTableTagIndex(SSTableObj *pStable, const char *tagN
|
|||
|
||||
static int32_t mnodeAddSuperTableTagCb(SMnodeMsg *pMsg, int32_t code) {
|
||||
SSTableObj *pStable = (SSTableObj *)pMsg->pTable;
|
||||
mLInfo("msg:%p, app:%p stable %s, add tag result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId,
|
||||
tstrerror(code));
|
||||
mLInfo("msg:%p, app:%p stable %s, add tag result:%s, numOfTags:%d", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId,
|
||||
tstrerror(code), pStable->numOfTags);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -23,14 +23,14 @@
|
|||
typedef void (*FLinuxSignalHandler)(int32_t signum, siginfo_t *sigInfo, void *context);
|
||||
|
||||
void taosSetSignal(int32_t signum, FSignalHandler sigfp) {
|
||||
struct sigaction act = {{0}};
|
||||
struct sigaction act; memset(&act, 0, sizeof(act));
|
||||
#if 1
|
||||
act.sa_flags = SA_SIGINFO;
|
||||
act.sa_sigaction = (FLinuxSignalHandler)sigfp;
|
||||
#else
|
||||
act.sa_handler = sigfp;
|
||||
act.sa_handler = sigfp;
|
||||
#endif
|
||||
sigaction(signum, &act, NULL);
|
||||
sigaction(signum, &act, NULL);
|
||||
}
|
||||
|
||||
void taosIgnSignal(int32_t signum) {
|
||||
|
|
|
@ -709,7 +709,7 @@ static void syncChooseMaster(SSyncNode *pNode) {
|
|||
}
|
||||
|
||||
static SSyncPeer *syncCheckMaster(SSyncNode *pNode) {
|
||||
int32_t onlineNum = 0;
|
||||
int32_t onlineNum = 0, arbOnlineNum = 0;
|
||||
int32_t masterIndex = -1;
|
||||
int32_t replica = pNode->replica;
|
||||
|
||||
|
@ -723,13 +723,15 @@ static SSyncPeer *syncCheckMaster(SSyncNode *pNode) {
|
|||
SSyncPeer *pArb = pNode->peerInfo[TAOS_SYNC_MAX_REPLICA];
|
||||
if (pArb && pArb->role != TAOS_SYNC_ROLE_OFFLINE) {
|
||||
onlineNum++;
|
||||
++arbOnlineNum;
|
||||
replica = pNode->replica + 1;
|
||||
}
|
||||
|
||||
if (onlineNum <= replica * 0.5) {
|
||||
if (nodeRole != TAOS_SYNC_ROLE_UNSYNCED) {
|
||||
if (nodeRole == TAOS_SYNC_ROLE_MASTER && onlineNum == replica * 0.5 && onlineNum >= 1) {
|
||||
if (nodeRole == TAOS_SYNC_ROLE_MASTER && onlineNum == replica * 0.5 && ((replica > 2 && onlineNum - arbOnlineNum > 1) || pNode->replica < 3)) {
|
||||
sInfo("vgId:%d, self keep work as master, online:%d replica:%d", pNode->vgId, onlineNum, replica);
|
||||
masterIndex = pNode->selfIndex;
|
||||
} else {
|
||||
nodeRole = TAOS_SYNC_ROLE_UNSYNCED;
|
||||
sInfo("vgId:%d, self change to unsynced state, online:%d replica:%d", pNode->vgId, onlineNum, replica);
|
||||
|
@ -1002,6 +1004,7 @@ static void syncProcessForwardFromPeer(char *cont, SSyncPeer *pPeer) {
|
|||
if (nodeRole == TAOS_SYNC_ROLE_SLAVE) {
|
||||
// nodeVersion = pHead->version;
|
||||
code = (*pNode->writeToCacheFp)(pNode->vgId, pHead, TAOS_QTYPE_FWD, NULL);
|
||||
syncConfirmForward(pNode->rid, pHead->version, code, false);
|
||||
} else {
|
||||
if (nodeSStatus != TAOS_SYNC_STATUS_INIT) {
|
||||
code = syncSaveIntoBuffer(pPeer, pHead);
|
||||
|
@ -1404,7 +1407,7 @@ static void syncMonitorFwdInfos(void *param, void *tmrId) {
|
|||
pthread_mutex_lock(&pNode->mutex);
|
||||
for (int32_t i = 0; i < pSyncFwds->fwds; ++i) {
|
||||
SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + (pSyncFwds->first + i) % SYNC_MAX_FWDS;
|
||||
if (ABS(time - pFwdInfo->time) < 2000) break;
|
||||
if (ABS(time - pFwdInfo->time) < 10000) break;
|
||||
|
||||
sDebug("vgId:%d, forward info expired, hver:%" PRIu64 " curtime:%" PRIu64 " savetime:%" PRIu64, pNode->vgId,
|
||||
pFwdInfo->version, time, pFwdInfo->time);
|
||||
|
|
|
@ -613,7 +613,7 @@ void doCleanupDataCache(SCacheObj *pCacheObj) {
|
|||
|
||||
// todo memory leak if there are object with refcount greater than 0 in hash table?
|
||||
taosHashCleanup(pCacheObj->pHashTable);
|
||||
taosTrashcanEmpty(pCacheObj, true);
|
||||
taosTrashcanEmpty(pCacheObj, false);
|
||||
|
||||
__cache_lock_destroy(pCacheObj);
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
*/
|
||||
#ifndef _TD_ARM_
|
||||
#if !defined(_TD_ARM_) && !defined(_TD_MIPS_)
|
||||
#include <nmmintrin.h>
|
||||
#endif
|
||||
|
||||
|
|
|
@ -419,7 +419,11 @@ void vnodeDestroy(SVnodeObj *pVnode) {
|
|||
}
|
||||
|
||||
if (pVnode->tsdb) {
|
||||
code = tsdbCloseRepo(pVnode->tsdb, 1);
|
||||
// the deleted vnode does not need to commit, so as to speed up the deletion
|
||||
int toCommit = 1;
|
||||
if (pVnode->dropped) toCommit = 0;
|
||||
|
||||
code = tsdbCloseRepo(pVnode->tsdb, toCommit);
|
||||
pVnode->tsdb = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -126,11 +126,16 @@ void vnodeStopSyncFile(int32_t vgId, uint64_t fversion) {
|
|||
}
|
||||
|
||||
void vnodeConfirmForard(int32_t vgId, void *wparam, int32_t code) {
|
||||
void *pVnode = vnodeAcquire(vgId);
|
||||
SVnodeObj *pVnode = vnodeAcquire(vgId);
|
||||
if (pVnode == NULL) {
|
||||
vError("vgId:%d, vnode not found while confirm forward", vgId);
|
||||
}
|
||||
|
||||
if (code == TSDB_CODE_SYN_CONFIRM_EXPIRED && pVnode->status == TAOS_VN_STATUS_CLOSING) {
|
||||
vDebug("vgId:%d, db:%s, vnode is closing while confirm forward", vgId, pVnode->db);
|
||||
code = TSDB_CODE_VND_IS_CLOSING;
|
||||
}
|
||||
|
||||
dnodeSendRpcVWriteRsp(pVnode, wparam, code);
|
||||
vnodeRelease(pVnode);
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ pipeline {
|
|||
stage('Parallel test stage') {
|
||||
parallel {
|
||||
stage('pytest') {
|
||||
agent{label '184'}
|
||||
agent{label 'slad1'}
|
||||
steps {
|
||||
pre_test()
|
||||
sh '''
|
||||
|
@ -62,7 +62,7 @@ pipeline {
|
|||
}
|
||||
|
||||
stage('test_crash_gen') {
|
||||
agent{label "185"}
|
||||
agent{label "slad2"}
|
||||
steps {
|
||||
pre_test()
|
||||
sh '''
|
||||
|
@ -149,7 +149,7 @@ pipeline {
|
|||
}
|
||||
|
||||
stage('test_valgrind') {
|
||||
agent{label "186"}
|
||||
agent{label "slad3"}
|
||||
|
||||
steps {
|
||||
pre_test()
|
||||
|
|
|
@ -0,0 +1,309 @@
|
|||
def pre_test(){
|
||||
|
||||
sh '''
|
||||
sudo rmtaos||echo 'no taosd installed'
|
||||
'''
|
||||
sh '''
|
||||
cd ${WKC}
|
||||
git reset --hard
|
||||
git checkout $BRANCH_NAME
|
||||
git pull
|
||||
git submodule update
|
||||
cd ${WK}
|
||||
git reset --hard
|
||||
git checkout $BRANCH_NAME
|
||||
git pull
|
||||
export TZ=Asia/Harbin
|
||||
date
|
||||
rm -rf ${WK}/debug
|
||||
mkdir debug
|
||||
cd debug
|
||||
cmake .. > /dev/null
|
||||
make > /dev/null
|
||||
make install > /dev/null
|
||||
pip3 install ${WKC}/src/connector/python/linux/python3/
|
||||
'''
|
||||
return 1
|
||||
}
|
||||
pipeline {
|
||||
agent none
|
||||
environment{
|
||||
|
||||
WK = '/var/lib/jenkins/workspace/TDinternal'
|
||||
WKC= '/var/lib/jenkins/workspace/TDinternal/community'
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Parallel test stage') {
|
||||
parallel {
|
||||
stage('pytest') {
|
||||
agent{label 'slam1'}
|
||||
steps {
|
||||
pre_test()
|
||||
sh '''
|
||||
cd ${WKC}/tests
|
||||
find pytest -name '*'sql|xargs rm -rf
|
||||
./test-all.sh pytest
|
||||
date'''
|
||||
}
|
||||
}
|
||||
stage('test_b1') {
|
||||
agent{label 'slam2'}
|
||||
steps {
|
||||
pre_test()
|
||||
|
||||
sh '''
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh b1
|
||||
date'''
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
stage('test_crash_gen') {
|
||||
agent{label "slam3"}
|
||||
steps {
|
||||
pre_test()
|
||||
sh '''
|
||||
cd ${WKC}/tests/pytest
|
||||
'''
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WKC}/tests/pytest
|
||||
./crash_gen.sh -a -p -t 4 -s 2000
|
||||
'''
|
||||
}
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WKC}/tests/pytest
|
||||
rm -rf /var/lib/taos/*
|
||||
rm -rf /var/log/taos/*
|
||||
./handle_crash_gen_val_log.sh
|
||||
'''
|
||||
}
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WKC}/tests/pytest
|
||||
rm -rf /var/lib/taos/*
|
||||
rm -rf /var/log/taos/*
|
||||
./handle_taosd_val_log.sh
|
||||
'''
|
||||
}
|
||||
|
||||
sh'''
|
||||
systemctl start taosd
|
||||
sleep 10
|
||||
'''
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WKC}/tests/gotest
|
||||
bash batchtest.sh
|
||||
'''
|
||||
}
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WKC}/tests/examples/python/PYTHONConnectorChecker
|
||||
python3 PythonChecker.py
|
||||
'''
|
||||
}
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WKC}/tests/examples/JDBC/JDBCDemo/
|
||||
mvn clean package assembly:single -DskipTests >/dev/null
|
||||
java -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
|
||||
'''
|
||||
}
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WKC}/src/connector/jdbc
|
||||
mvn clean package -Dmaven.test.skip=true >/dev/null
|
||||
cd ${WKC}/tests/examples/JDBC/JDBCDemo/
|
||||
java --class-path=../../../../src/connector/jdbc/target:$JAVA_HOME/jre/lib/ext -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
|
||||
'''
|
||||
}
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cp -rf ${WKC}/tests/examples/nodejs ${JENKINS_HOME}/workspace/
|
||||
cd ${JENKINS_HOME}/workspace/nodejs
|
||||
node nodejsChecker.js host=localhost
|
||||
'''
|
||||
}
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${JENKINS_HOME}/workspace/C#NET/src/CheckC#
|
||||
dotnet run
|
||||
'''
|
||||
}
|
||||
sh '''
|
||||
systemctl stop taosd
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh b2
|
||||
date
|
||||
'''
|
||||
sh '''
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh full unit
|
||||
date'''
|
||||
}
|
||||
}
|
||||
|
||||
stage('test_valgrind') {
|
||||
agent{label "slam4"}
|
||||
|
||||
steps {
|
||||
pre_test()
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WKC}/tests/pytest
|
||||
nohup taosd >/dev/null &
|
||||
sleep 10
|
||||
python3 concurrent_inquiry.py -c 1
|
||||
|
||||
'''
|
||||
}
|
||||
sh '''
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh full jdbc
|
||||
date'''
|
||||
sh '''
|
||||
cd ${WKC}/tests/pytest
|
||||
./valgrind-test.sh 2>&1 > mem-error-out.log
|
||||
./handle_val_log.sh
|
||||
|
||||
date
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh b3
|
||||
date'''
|
||||
sh '''
|
||||
date
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh full example
|
||||
date'''
|
||||
}
|
||||
}
|
||||
|
||||
stage('arm64_build'){
|
||||
agent{label 'arm64'}
|
||||
steps{
|
||||
sh '''
|
||||
cd ${WK}
|
||||
git fetch
|
||||
git checkout develop
|
||||
git pull
|
||||
cd ${WKC}
|
||||
git fetch
|
||||
git checkout develop
|
||||
git pull
|
||||
git submodule update
|
||||
cd ${WKC}/packaging
|
||||
./release.sh -v cluster -c aarch64 -n 2.0.0.0 -m 2.0.0.0
|
||||
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('arm32_build'){
|
||||
agent{label 'arm32'}
|
||||
steps{
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WK}
|
||||
git fetch
|
||||
git checkout develop
|
||||
git pull
|
||||
cd ${WKC}
|
||||
git fetch
|
||||
git checkout develop
|
||||
git pull
|
||||
git submodule update
|
||||
cd ${WKC}/packaging
|
||||
./release.sh -v cluster -c aarch32 -n 2.0.0.0 -m 2.0.0.0
|
||||
|
||||
'''
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
post {
|
||||
success {
|
||||
emailext (
|
||||
subject: "SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
|
||||
body: '''<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
</head>
|
||||
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
|
||||
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
|
||||
<tr>
|
||||
<td><br />
|
||||
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
|
||||
<hr size="2" width="100%" align="center" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<ul>
|
||||
<div style="font-size:18px">
|
||||
<li>构建名称>>分支:${PROJECT_NAME}</li>
|
||||
<li>构建结果:<span style="color:green"> Successful </span></li>
|
||||
<li>构建编号:${BUILD_NUMBER}</li>
|
||||
<li>触发用户:${CAUSE}</li>
|
||||
<li>变更概要:${CHANGES}</li>
|
||||
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
|
||||
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
|
||||
<li>变更集:${JELLY_SCRIPT}</li>
|
||||
</div>
|
||||
</ul>
|
||||
</td>
|
||||
</tr>
|
||||
</table></font>
|
||||
</body>
|
||||
</html>''',
|
||||
to: "yqliu@taosdata.com,pxiao@taosdata.com",
|
||||
from: "support@taosdata.com"
|
||||
)
|
||||
}
|
||||
failure {
|
||||
emailext (
|
||||
subject: "FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
|
||||
body: '''<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
</head>
|
||||
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
|
||||
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
|
||||
<tr>
|
||||
<td><br />
|
||||
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
|
||||
<hr size="2" width="100%" align="center" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<ul>
|
||||
<div style="font-size:18px">
|
||||
<li>构建名称>>分支:${PROJECT_NAME}</li>
|
||||
<li>构建结果:<span style="color:green"> Successful </span></li>
|
||||
<li>构建编号:${BUILD_NUMBER}</li>
|
||||
<li>触发用户:${CAUSE}</li>
|
||||
<li>变更概要:${CHANGES}</li>
|
||||
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
|
||||
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
|
||||
<li>变更集:${JELLY_SCRIPT}</li>
|
||||
</div>
|
||||
</ul>
|
||||
</td>
|
||||
</tr>
|
||||
</table></font>
|
||||
</body>
|
||||
</html>''',
|
||||
to: "yqliu@taosdata.com,pxiao@taosdata.com",
|
||||
from: "support@taosdata.com"
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -64,18 +64,25 @@ function runQueryPerfTest {
|
|||
[ -f $PERFORMANCE_TEST_REPORT ] && rm $PERFORMANCE_TEST_REPORT
|
||||
nohup $WORK_DIR/TDengine/debug/build/bin/taosd -c /etc/taosperf/ > /dev/null 2>&1 &
|
||||
echoInfo "Wait TDengine to start"
|
||||
sleep 300
|
||||
sleep 60
|
||||
echoInfo "Run Performance Test"
|
||||
cd $WORK_DIR/TDengine/tests/pytest
|
||||
|
||||
python3 query/queryPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT
|
||||
|
||||
mkdir -p /var/lib/perf/
|
||||
mkdir -p /var/log/perf/
|
||||
rm -rf /var/lib/perf/*
|
||||
rm -rf /var/log/perf/*
|
||||
nohup $WORK_DIR/TDengine/debug/build/bin/taosd -c /etc/perf/ > /dev/null 2>&1 &
|
||||
echoInfo "Wait TDengine to start"
|
||||
sleep 10
|
||||
echoInfo "Run Performance Test"
|
||||
cd $WORK_DIR/TDengine/tests/pytest
|
||||
|
||||
python3 insert/insertFromCSVPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT
|
||||
|
||||
python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT
|
||||
|
||||
python3 perfbenchmark/joinPerformance.py | tee -a $PERFORMANCE_TEST_REPORT
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ from queue import Queue, Empty
|
|||
from .shared.config import Config
|
||||
from .shared.db import DbTarget, DbConn
|
||||
from .shared.misc import Logging, Helper, CrashGenError, Status, Progress, Dice
|
||||
from .shared.types import DirPath
|
||||
from .shared.types import DirPath, IpcStream
|
||||
|
||||
# from crash_gen.misc import CrashGenError, Dice, Helper, Logging, Progress, Status
|
||||
# from crash_gen.db import DbConn, DbTarget
|
||||
|
@ -177,13 +177,12 @@ quorum 2
|
|||
return "127.0.0.1"
|
||||
|
||||
def getServiceCmdLine(self): # to start the instance
|
||||
cmdLine = []
|
||||
if Config.getConfig().track_memory_leaks:
|
||||
Logging.info("Invoking VALGRIND on service...")
|
||||
cmdLine = ['valgrind', '--leak-check=yes']
|
||||
# TODO: move "exec -c" into Popen(), we can both "use shell" and NOT fork so ask to lose kill control
|
||||
cmdLine += ["exec " + self.getExecFile(), '-c', self.getCfgDir()] # used in subproce.Popen()
|
||||
return cmdLine
|
||||
return ['exec /usr/bin/valgrind', '--leak-check=yes', self.getExecFile(), '-c', self.getCfgDir()]
|
||||
else:
|
||||
# TODO: move "exec -c" into Popen(), we can both "use shell" and NOT fork so ask to lose kill control
|
||||
return ["exec " + self.getExecFile(), '-c', self.getCfgDir()] # used in subproce.Popen()
|
||||
|
||||
def _getDnodes(self, dbc):
|
||||
dbc.query("show dnodes")
|
||||
|
@ -281,16 +280,16 @@ class TdeSubProcess:
|
|||
return '[TdeSubProc: pid = {}, status = {}]'.format(
|
||||
self.getPid(), self.getStatus() )
|
||||
|
||||
def getStdOut(self) -> BinaryIO :
|
||||
def getIpcStdOut(self) -> IpcStream :
|
||||
if self._popen.universal_newlines : # alias of text_mode
|
||||
raise CrashGenError("We need binary mode for STDOUT IPC")
|
||||
# Logging.info("Type of stdout is: {}".format(type(self._popen.stdout)))
|
||||
return typing.cast(BinaryIO, self._popen.stdout)
|
||||
return typing.cast(IpcStream, self._popen.stdout)
|
||||
|
||||
def getStdErr(self) -> BinaryIO :
|
||||
def getIpcStdErr(self) -> IpcStream :
|
||||
if self._popen.universal_newlines : # alias of text_mode
|
||||
raise CrashGenError("We need binary mode for STDERR IPC")
|
||||
return typing.cast(BinaryIO, self._popen.stderr)
|
||||
return typing.cast(IpcStream, self._popen.stderr)
|
||||
|
||||
# Now it's always running, since we matched the life cycle
|
||||
# def isRunning(self):
|
||||
|
@ -301,11 +300,6 @@ class TdeSubProcess:
|
|||
|
||||
def _start(self, cmdLine) -> Popen :
|
||||
ON_POSIX = 'posix' in sys.builtin_module_names
|
||||
|
||||
# Sanity check
|
||||
# if self.subProcess: # already there
|
||||
# raise RuntimeError("Corrupt process state")
|
||||
|
||||
|
||||
# Prepare environment variables for coverage information
|
||||
# Ref: https://stackoverflow.com/questions/2231227/python-subprocess-popen-with-a-modified-environment
|
||||
|
@ -314,9 +308,8 @@ class TdeSubProcess:
|
|||
|
||||
# print(myEnv)
|
||||
# print("Starting TDengine with env: ", myEnv.items())
|
||||
# print("Starting TDengine via Shell: {}".format(cmdLineStr))
|
||||
print("Starting TDengine: {}".format(cmdLine))
|
||||
|
||||
# useShell = True # Needed to pass environments into it
|
||||
return Popen(
|
||||
' '.join(cmdLine), # ' '.join(cmdLine) if useShell else cmdLine,
|
||||
shell=True, # Always use shell, since we need to pass ENV vars
|
||||
|
@ -732,19 +725,19 @@ class ServiceManagerThread:
|
|||
self._ipcQueue = Queue() # type: Queue
|
||||
self._thread = threading.Thread( # First thread captures server OUTPUT
|
||||
target=self.svcOutputReader,
|
||||
args=(subProc.getStdOut(), self._ipcQueue, logDir))
|
||||
args=(subProc.getIpcStdOut(), self._ipcQueue, logDir))
|
||||
self._thread.daemon = True # thread dies with the program
|
||||
self._thread.start()
|
||||
time.sleep(0.01)
|
||||
if not self._thread.is_alive(): # What happened?
|
||||
Logging.info("Failed to started process to monitor STDOUT")
|
||||
Logging.info("Failed to start process to monitor STDOUT")
|
||||
self.stop()
|
||||
raise CrashGenError("Failed to start thread to monitor STDOUT")
|
||||
Logging.info("Successfully started process to monitor STDOUT")
|
||||
|
||||
self._thread2 = threading.Thread( # 2nd thread captures server ERRORs
|
||||
target=self.svcErrorReader,
|
||||
args=(subProc.getStdErr(), self._ipcQueue, logDir))
|
||||
args=(subProc.getIpcStdErr(), self._ipcQueue, logDir))
|
||||
self._thread2.daemon = True # thread dies with the program
|
||||
self._thread2.start()
|
||||
time.sleep(0.01)
|
||||
|
@ -887,14 +880,19 @@ class ServiceManagerThread:
|
|||
print("\nNon-UTF8 server output: {}\n".format(bChunk.decode('cp437')))
|
||||
return None
|
||||
|
||||
def _textChunkGenerator(self, streamIn: BinaryIO, logDir: str, logFile: str
|
||||
def _textChunkGenerator(self, streamIn: IpcStream, logDir: str, logFile: str
|
||||
) -> Generator[TextChunk, None, None]:
|
||||
'''
|
||||
Take an input stream with binary data, produced a generator of decoded
|
||||
"text chunks", and also save the original binary data in a log file.
|
||||
Take an input stream with binary data (likely from Popen), produced a generator of decoded
|
||||
"text chunks".
|
||||
|
||||
Side effect: it also save the original binary data in a log file.
|
||||
'''
|
||||
os.makedirs(logDir, exist_ok=True)
|
||||
logF = open(os.path.join(logDir, logFile), 'wb')
|
||||
if logF is None:
|
||||
Logging.error("Failed to open log file (binary write): {}/{}".format(logDir, logFile))
|
||||
return
|
||||
for bChunk in iter(streamIn.readline, b''):
|
||||
logF.write(bChunk) # Write to log file immediately
|
||||
tChunk = self._decodeBinaryChunk(bChunk) # decode
|
||||
|
@ -902,14 +900,14 @@ class ServiceManagerThread:
|
|||
yield tChunk # TODO: split into actual text lines
|
||||
|
||||
# At the end...
|
||||
streamIn.close() # Close the stream
|
||||
logF.close() # Close the output file
|
||||
streamIn.close() # Close the incoming stream
|
||||
logF.close() # Close the log file
|
||||
|
||||
def svcOutputReader(self, stdOut: BinaryIO, queue, logDir: str):
|
||||
def svcOutputReader(self, ipcStdOut: IpcStream, queue, logDir: str):
|
||||
'''
|
||||
The infinite routine that processes the STDOUT stream for the sub process being managed.
|
||||
|
||||
:param stdOut: the IO stream object used to fetch the data from
|
||||
:param ipcStdOut: the IO stream object used to fetch the data from
|
||||
:param queue: the queue where we dump the roughly parsed chunk-by-chunk text data
|
||||
:param logDir: where we should dump a verbatim output file
|
||||
'''
|
||||
|
@ -917,7 +915,7 @@ class ServiceManagerThread:
|
|||
# Important Reference: https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
|
||||
# print("This is the svcOutput Reader...")
|
||||
# stdOut.readline() # Skip the first output? TODO: remove?
|
||||
for tChunk in self._textChunkGenerator(stdOut, logDir, 'stdout.log') :
|
||||
for tChunk in self._textChunkGenerator(ipcStdOut, logDir, 'stdout.log') :
|
||||
queue.put(tChunk) # tChunk garanteed not to be None
|
||||
self._printProgress("_i")
|
||||
|
||||
|
@ -940,12 +938,12 @@ class ServiceManagerThread:
|
|||
Logging.info("EOF found TDengine STDOUT, marking the process as terminated")
|
||||
self.setStatus(Status.STATUS_STOPPED)
|
||||
|
||||
def svcErrorReader(self, stdErr: BinaryIO, queue, logDir: str):
|
||||
def svcErrorReader(self, ipcStdErr: IpcStream, queue, logDir: str):
|
||||
# os.makedirs(logDir, exist_ok=True)
|
||||
# logFile = os.path.join(logDir,'stderr.log')
|
||||
# fErr = open(logFile, 'wb')
|
||||
# for line in iter(err.readline, b''):
|
||||
for tChunk in self._textChunkGenerator(stdErr, logDir, 'stderr.log') :
|
||||
for tChunk in self._textChunkGenerator(ipcStdErr, logDir, 'stderr.log') :
|
||||
queue.put(tChunk) # tChunk garanteed not to be None
|
||||
# fErr.write(line)
|
||||
Logging.info("TDengine STDERR: {}".format(tChunk))
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from typing import Any, List, Dict, NewType
|
||||
from typing import Any, BinaryIO, List, Dict, NewType
|
||||
from enum import Enum
|
||||
|
||||
DirPath = NewType('DirPath', str)
|
||||
|
@ -26,3 +26,5 @@ class TdDataType(Enum):
|
|||
|
||||
TdColumns = Dict[str, TdDataType]
|
||||
TdTags = Dict[str, TdDataType]
|
||||
|
||||
IpcStream = NewType('IpcStream', BinaryIO)
|
|
@ -183,7 +183,7 @@ python3 ./test.py -f stable/query_after_reset.py
|
|||
# perfbenchmark
|
||||
python3 ./test.py -f perfbenchmark/bug3433.py
|
||||
#python3 ./test.py -f perfbenchmark/bug3589.py
|
||||
|
||||
python3 ./test.py -f perfbenchmark/taosdemoInsert.py
|
||||
|
||||
#query
|
||||
python3 ./test.py -f query/filter.py
|
||||
|
|
|
@ -31,7 +31,7 @@ class insertFromCSVPerformace:
|
|||
self.host = "127.0.0.1"
|
||||
self.user = "root"
|
||||
self.password = "taosdata"
|
||||
self.config = "/etc/taosperf"
|
||||
self.config = "/etc/perf"
|
||||
self.conn = taos.connect(
|
||||
self.host,
|
||||
self.user,
|
||||
|
|
|
@ -0,0 +1,387 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import taos
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
import argparse
|
||||
import subprocess
|
||||
import datetime
|
||||
import re
|
||||
|
||||
|
||||
from multiprocessing import cpu_count
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
from util.dnodes import TDDnode
|
||||
|
||||
class Taosdemo:
|
||||
def __init__(self, clearCache, dbName, keep):
|
||||
self.clearCache = clearCache
|
||||
self.dbname = dbName
|
||||
self.drop = "yes"
|
||||
self.keep = keep
|
||||
self.host = "127.0.0.1"
|
||||
self.user = "root"
|
||||
self.password = "taosdata"
|
||||
# self.config = "/etc/taosperf"
|
||||
# self.conn = taos.connect(
|
||||
# self.host,
|
||||
# self.user,
|
||||
# self.password,
|
||||
# self.config)
|
||||
|
||||
# env config
|
||||
def getBuildPath(self) -> str:
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/debug/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def getExeToolsDir(self) -> str:
|
||||
self.debugdir = self.getBuildPath() + "/debug/build/bin"
|
||||
return self.debugdir
|
||||
|
||||
def getCfgDir(self) -> str:
|
||||
self.config = self.getBuildPath() + "/sim/dnode1/cfg"
|
||||
return self.config
|
||||
|
||||
# taodemo insert file config
|
||||
def dbinfocfg(self) -> dict:
|
||||
return {
|
||||
"name": self.dbname,
|
||||
"drop": self.drop,
|
||||
"replica": 1,
|
||||
"days": 10,
|
||||
"cache": 16,
|
||||
"blocks": 8,
|
||||
"precision": "ms",
|
||||
"keep": self.keep,
|
||||
"minRows": 100,
|
||||
"maxRows": 4096,
|
||||
"comp": 2,
|
||||
"walLevel": 1,
|
||||
"cachelast": 0,
|
||||
"quorum": 1,
|
||||
"fsync": 3000,
|
||||
"update": 0
|
||||
}
|
||||
|
||||
def type_check(func):
|
||||
def wrapper(self, **kwargs):
|
||||
num_types = ["int", "float", "bigint", "tinyint", "smallint", "double"]
|
||||
str_types = ["binary", "nchar"]
|
||||
for k ,v in kwargs.items():
|
||||
if k.lower() not in num_types and k.lower() not in str_types:
|
||||
return f"args {k} type error, not allowed"
|
||||
elif not isinstance(v, (int, list, tuple)):
|
||||
return f"value {v} type error, not allowed"
|
||||
elif k.lower() in num_types and not isinstance(v, int):
|
||||
return f"arg {v} takes 1 positional argument must be type int "
|
||||
elif isinstance(v, (list,tuple)) and len(v) > 2:
|
||||
return f"arg {v} takes from 1 to 2 positional arguments but more than 2 were given "
|
||||
elif isinstance(v,(list,tuple)) and [ False for _ in v if not isinstance(_, int) ]:
|
||||
return f"arg {v} takes from 1 to 2 positional arguments must be type int "
|
||||
else:
|
||||
pass
|
||||
return func(self, **kwargs)
|
||||
return wrapper
|
||||
|
||||
@type_check
|
||||
def column_tag_count(self, **column_tag) -> list :
|
||||
init_column_tag = []
|
||||
for k, v in column_tag.items():
|
||||
if re.search(k, "int, float, bigint, tinyint, smallint, double", re.IGNORECASE):
|
||||
init_column_tag.append({"type": k, "count": v})
|
||||
elif re.search(k, "binary, nchar", re.IGNORECASE):
|
||||
if isinstance(v, int):
|
||||
init_column_tag.append({"type": k, "count": v, "len":8})
|
||||
elif len(v) == 1:
|
||||
init_column_tag.append({"type": k, "count": v[0], "len": 8})
|
||||
else:
|
||||
init_column_tag.append({"type": k, "count": v[0], "len": v[1]})
|
||||
return init_column_tag
|
||||
|
||||
def stbcfg(self, stb: str, child_tab_count: int, rows: int, prechildtab: str, columns: dict, tags: dict) -> dict:
|
||||
return {
|
||||
"name": stb,
|
||||
"child_table_exists": "no",
|
||||
"childtable_count": child_tab_count,
|
||||
"childtable_prefix": prechildtab,
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 10,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": rows,
|
||||
"childtable_limit": 0,
|
||||
"childtable_offset": 0,
|
||||
"rows_per_tbl": 1,
|
||||
"max_sql_len": 65480,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 10,
|
||||
"start_timestamp": f"{datetime.datetime.now():%F %X}",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": self.column_tag_count(**columns),
|
||||
"tags": self.column_tag_count(**tags)
|
||||
}
|
||||
|
||||
def schemecfg(self,intcount=1,floatcount=0,bcount=0,tcount=0,scount=0,doublecount=0,binarycount=0,ncharcount=0):
|
||||
return {
|
||||
"INT": intcount,
|
||||
"FLOAT": floatcount,
|
||||
"BIGINT": bcount,
|
||||
"TINYINT": tcount,
|
||||
"SMALLINT": scount,
|
||||
"DOUBLE": doublecount,
|
||||
"BINARY": binarycount,
|
||||
"NCHAR": ncharcount
|
||||
}
|
||||
|
||||
def insertcfg(self,db: dict, stbs: list) -> dict:
|
||||
return {
|
||||
"filetype": "insert",
|
||||
"cfgdir": self.config,
|
||||
"host": self.host,
|
||||
"port": 6030,
|
||||
"user": self.user,
|
||||
"password": self.password,
|
||||
"thread_count": cpu_count(),
|
||||
"thread_count_create_tbl": cpu_count(),
|
||||
"result_file": "/tmp/insert_res.txt",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"insert_interval": 0,
|
||||
"num_of_records_per_req": 100,
|
||||
"max_sql_len": 1024000,
|
||||
"databases": [{
|
||||
"dbinfo": db,
|
||||
"super_tables": stbs
|
||||
}]
|
||||
}
|
||||
|
||||
def createinsertfile(self,db: dict, stbs: list) -> str:
|
||||
date = datetime.datetime.now()
|
||||
file_create_table = f"/tmp/insert_{date:%F-%H%M}.json"
|
||||
|
||||
with open(file_create_table, 'w') as f:
|
||||
json.dump(self.insertcfg(db, stbs), f)
|
||||
|
||||
return file_create_table
|
||||
|
||||
# taosdemo query file config
|
||||
def querysqls(self, sql: str) -> list:
|
||||
return [{"sql":sql,"result":""}]
|
||||
|
||||
def querycfg(self, sql: str) -> dict:
|
||||
return {
|
||||
"filetype": "query",
|
||||
"cfgdir": self.config,
|
||||
"host": self.host,
|
||||
"port": 6030,
|
||||
"user": self.user,
|
||||
"password": self.password,
|
||||
"confirm_parameter_prompt": "yes",
|
||||
"query_times": 10,
|
||||
"query_mode": "taosc",
|
||||
"databases": self.dbname,
|
||||
"specified_table_query": {
|
||||
"query_interval": 0,
|
||||
"concurrent": cpu_count(),
|
||||
"sqls": self.querysqls(sql)
|
||||
}
|
||||
}
|
||||
|
||||
def createqueryfile(self, sql: str):
|
||||
date = datetime.datetime.now()
|
||||
file_query_table = f"/tmp/query_{date:%F-%H%M}.json"
|
||||
|
||||
with open(file_query_table,"w") as f:
|
||||
json.dump(self.querycfg(sql), f)
|
||||
|
||||
return file_query_table
|
||||
|
||||
# Execute taosdemo, and delete temporary files when finished
|
||||
def taosdemotable(self, filepath: str, resultfile="/dev/null"):
|
||||
taosdemopath = self.getBuildPath() + "/debug/build/bin"
|
||||
with open(filepath,"r") as f:
|
||||
filetype = json.load(f)["filetype"]
|
||||
if filetype == "insert":
|
||||
taosdemo_table_cmd = f"{taosdemopath}/taosdemo -f {filepath} > {resultfile} 2>&1"
|
||||
else:
|
||||
taosdemo_table_cmd = f"yes | {taosdemopath}/taosdemo -f {filepath} > {resultfile} 2>&1"
|
||||
try:
|
||||
_ = subprocess.check_output(taosdemo_table_cmd, shell=True).decode("utf-8")
|
||||
except subprocess.CalledProcessError as e:
|
||||
_ = e.output
|
||||
|
||||
def droptmpfile(self, filepath: str):
|
||||
drop_file_cmd = f"[ -f {filepath} ] && rm -f {filepath}"
|
||||
try:
|
||||
_ = subprocess.check_output(drop_file_cmd, shell=True).decode("utf-8")
|
||||
except subprocess.CalledProcessError as e:
|
||||
_ = e.output
|
||||
|
||||
# TODO:需要完成TD-4153的数据插入和客户端请求的性能查询。
|
||||
def td4153insert(self):
|
||||
|
||||
tdLog.printNoPrefix("========== start to create table and insert data ==========")
|
||||
self.dbname = "td4153"
|
||||
db = self.dbinfocfg()
|
||||
stblist = []
|
||||
|
||||
columntype = self.schemecfg(intcount=1, ncharcount=100)
|
||||
tagtype = self.schemecfg(intcount=1)
|
||||
stbname = "stb1"
|
||||
prechild = "t1"
|
||||
stable = self.stbcfg(
|
||||
stb=stbname,
|
||||
prechildtab=prechild,
|
||||
child_tab_count=2,
|
||||
rows=10000,
|
||||
columns=columntype,
|
||||
tags=tagtype
|
||||
)
|
||||
stblist.append(stable)
|
||||
insertfile = self.createinsertfile(db=db, stbs=stblist)
|
||||
|
||||
nmon_file = f"/tmp/insert_{datetime.datetime.now():%F-%H%M}.nmon"
|
||||
cmd = f"nmon -s5 -F {nmon_file} -m /tmp/"
|
||||
try:
|
||||
_ = subprocess.check_output(cmd, shell=True).decode("utf-8")
|
||||
except subprocess.CalledProcessError as e:
|
||||
_ = e.output
|
||||
|
||||
self.taosdemotable(insertfile)
|
||||
self.droptmpfile(insertfile)
|
||||
self.droptmpfile("/tmp/insert_res.txt")
|
||||
|
||||
# In order to prevent too many performance files from being generated, the nmon file is deleted.
|
||||
# and the delete statement can be cancelled during the actual test.
|
||||
self.droptmpfile(nmon_file)
|
||||
|
||||
cmd = f"ps -ef|grep -w nmon| grep -v grep | awk '{{print $2}}'"
|
||||
try:
|
||||
time.sleep(10)
|
||||
_ = subprocess.check_output(cmd,shell=True).decode("utf-8")
|
||||
except BaseException as e:
|
||||
raise e
|
||||
|
||||
def td4153query(self):
|
||||
tdLog.printNoPrefix("========== start to query operation ==========")
|
||||
|
||||
sqls = {
|
||||
"select_all": "select * from stb1",
|
||||
"select_join": "select * from t10, t11 where t10.ts=t11.ts"
|
||||
}
|
||||
for type, sql in sqls.items():
|
||||
result_file = f"/tmp/queryResult_{type}.log"
|
||||
query_file = self.createqueryfile(sql)
|
||||
try:
|
||||
self.taosdemotable(query_file, resultfile=result_file)
|
||||
except subprocess.CalledProcessError as e:
|
||||
out_put = e.output
|
||||
if result_file:
|
||||
print(f"execute rows {type.split('_')[1]} sql, the sql is: {sql}")
|
||||
max_sql_time_cmd = f'''
|
||||
grep -o Spent.*s {result_file} |awk 'NR==1{{max=$2;next}}{{max=max>$2?max:$2}}END{{print "Max=",max,"s"}}'
|
||||
'''
|
||||
max_sql_time = subprocess.check_output(max_sql_time_cmd, shell=True).decode("UTF-8")
|
||||
print(f"{type.split('_')[1]} rows sql time : {max_sql_time}")
|
||||
|
||||
min_sql_time_cmd = f'''
|
||||
grep -o Spent.*s {result_file} |awk 'NR==1{{min=$2;next}}{{min=min<$2?min:$2}}END{{print "Min=",min,"s"}}'
|
||||
'''
|
||||
min_sql_time = subprocess.check_output(min_sql_time_cmd, shell=True).decode("UTF-8")
|
||||
print(f"{type.split('_')[1]} rows sql time : {min_sql_time}")
|
||||
|
||||
avg_sql_time_cmd = f'''
|
||||
grep -o Spent.*s {result_file} |awk '{{sum+=$2}}END{{print "Average=",sum/NR,"s"}}'
|
||||
'''
|
||||
avg_sql_time = subprocess.check_output(avg_sql_time_cmd, shell=True).decode("UTF-8")
|
||||
print(f"{type.split('_')[1]} rows sql time : {avg_sql_time}")
|
||||
|
||||
self.droptmpfile(query_file)
|
||||
self.droptmpfile(result_file)
|
||||
|
||||
drop_query_tmt_file_cmd = " find ./ -name 'querySystemInfo-*' -type f -exec rm {} \; "
|
||||
try:
|
||||
_ = subprocess.check_output(drop_query_tmt_file_cmd, shell=True).decode("utf-8")
|
||||
except subprocess.CalledProcessError as e:
|
||||
_ = e.output
|
||||
pass
|
||||
|
||||
def td4153(self):
|
||||
self.td4153insert()
|
||||
self.td4153query()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'-r',
|
||||
'--remove-cache',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='clear cache before query (default: False)')
|
||||
parser.add_argument(
|
||||
'-d',
|
||||
'--database-name',
|
||||
action='store',
|
||||
default='db',
|
||||
type=str,
|
||||
help='Database name to be created (default: db)')
|
||||
parser.add_argument(
|
||||
'-k',
|
||||
'--keep-time',
|
||||
action='store',
|
||||
default=3650,
|
||||
type=int,
|
||||
help='Database keep parameters (default: 3650)')
|
||||
|
||||
args = parser.parse_args()
|
||||
taosdemo = Taosdemo(args.remove_cache, args.database_name, args.keep_time)
|
||||
# taosdemo.conn = taos.connect(
|
||||
# taosdemo.host,
|
||||
# taosdemo.user,
|
||||
# taosdemo.password,
|
||||
# taosdemo.config
|
||||
# )
|
||||
|
||||
debugdir = taosdemo.getExeToolsDir()
|
||||
cfgdir = taosdemo.getCfgDir()
|
||||
cmd = f"{debugdir}/taosd -c {cfgdir} >/dev/null 2>&1 &"
|
||||
try:
|
||||
_ = subprocess.check_output(cmd, shell=True).decode("utf-8")
|
||||
except subprocess.CalledProcessError as e:
|
||||
_ = e.output
|
||||
|
||||
if taosdemo.clearCache:
|
||||
# must be root permission
|
||||
subprocess.check_output("echo 3 > /proc/sys/vm/drop_caches", shell=True).decode("utf-8")
|
||||
|
||||
taosdemo.td4153()
|
|
@ -24,7 +24,7 @@ class taosdemoPerformace:
|
|||
self.host = "127.0.0.1"
|
||||
self.user = "root"
|
||||
self.password = "taosdata"
|
||||
self.config = "/etc/taosperf"
|
||||
self.config = "/etc/perf"
|
||||
self.conn = taos.connect(
|
||||
self.host,
|
||||
self.user,
|
||||
|
@ -77,7 +77,7 @@ class taosdemoPerformace:
|
|||
|
||||
insert_data = {
|
||||
"filetype": "insert",
|
||||
"cfgdir": "/etc/taosperf",
|
||||
"cfgdir": "/etc/perf",
|
||||
"host": "127.0.0.1",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
|
@ -104,7 +104,7 @@ class taosdemoPerformace:
|
|||
return output
|
||||
|
||||
def insertData(self):
|
||||
os.system("taosdemo -f %s > taosdemoperf.txt" % self.generateJson())
|
||||
os.system("taosdemo -f %s > taosdemoperf.txt 2>&1" % self.generateJson())
|
||||
self.createTableTime = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'")
|
||||
self.insertRecordsTime = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'")
|
||||
self.recordsPerSecond = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $16}'")
|
||||
|
|
|
@ -847,10 +847,16 @@ sql_error select tbname, t1 from select_tags_mt0 interval(1y);
|
|||
#valid sql: select first(c1), last(c2), count(*) from select_tags_mt0 group by tbname, t1;
|
||||
#valid sql: select first(c1), tbname, t1 from select_tags_mt0 group by t2;
|
||||
|
||||
print ==================================>TD-4231
|
||||
sql_error select t1,tbname from select_tags_mt0 where c1<0
|
||||
sql_error select t1,tbname from select_tags_mt0 where c1<0 and tbname in ('select_tags_tb12')
|
||||
|
||||
sql select tbname from select_tags_mt0 where tbname in ('select_tags_tb12');
|
||||
|
||||
sql_error select first(c1), last(c2), t1 from select_tags_mt0 group by tbname;
|
||||
sql_error select first(c1), last(c2), tbname, t2 from select_tags_mt0 group by tbname;
|
||||
sql_error select first(c1), count(*), t2, t1, tbname from select_tags_mt0 group by tbname;
|
||||
# this sql is valid: select first(c1), t2 from select_tags_mt0 group by tbname;
|
||||
#valid sql: select first(c1), t2 from select_tags_mt0 group by tbname;
|
||||
|
||||
#sql select first(ts), tbname from select_tags_mt0 group by tbname;
|
||||
#sql select count(c1) from select_tags_mt0 where c1=99 group by tbname;
|
||||
|
|
|
@ -158,7 +158,7 @@ if $dnode4Vtatus != offline then
|
|||
sleep 2000
|
||||
goto wait_dnode4_vgroup_offline
|
||||
endi
|
||||
if $dnode3Vtatus != master then
|
||||
if $dnode3Vtatus != unsynced then
|
||||
sleep 2000
|
||||
goto wait_dnode4_vgroup_offline
|
||||
endi
|
||||
|
|
Loading…
Reference in New Issue