Merge branch 'master' into feature/qrefactor

This commit is contained in:
Haojun Liao 2021-04-19 17:05:03 +08:00
commit cc4c5d26aa
140 changed files with 9944 additions and 2683 deletions

45
Jenkinsfile vendored
View File

@ -42,20 +42,52 @@ def pre_test(){
killall -9 taosd ||echo "no taosd running"
killall -9 gdb || echo "no gdb running"
cd ${WKC}
git checkout develop
git reset --hard HEAD~10 >/dev/null
'''
script {
if (env.CHANGE_TARGET == 'master') {
sh '''
cd ${WKC}
git checkout master
'''
}
else {
sh '''
cd ${WKC}
git checkout develop
'''
}
}
sh'''
cd ${WKC}
git pull >/dev/null
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
find ${WKC}/tests/pytest -name \'*\'.sql -exec rm -rf {} \\;
git clean -dfx
cd ${WK}
git reset --hard HEAD~10
git checkout develop
git pull >/dev/null
'''
script {
if (env.CHANGE_TARGET == 'master') {
sh '''
cd ${WK}
git checkout master
'''
}
else {
sh '''
cd ${WK}
git checkout develop
'''
}
}
sh '''
cd ${WK}
git pull >/dev/null
export TZ=Asia/Harbin
date
rm -rf ${WK}/debug
git clean -dfx
mkdir debug
cd debug
cmake .. > /dev/null
@ -93,6 +125,7 @@ pipeline {
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
'''
script{
env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD develop|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true)
}
@ -185,14 +218,12 @@ pipeline {
rm -rf /var/log/taos/*
./handle_crash_gen_val_log.sh
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_taosd_val_log.sh
'''
}
timeout(time: 45, unit: 'MINUTES'){
sh '''
date

View File

@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.24-dist.jar DESTINATION connector/jdbc)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.25-dist.jar DESTINATION connector/jdbc)
ENDIF ()
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")

View File

@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "2.0.18.0")
SET(TD_VER_NUMBER "2.0.19.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)

View File

@ -101,7 +101,7 @@ $ taos -h 192.168.0.1 -s "use db; show tables;"
### 运行SQL命令脚本
TDengine终端可以通过`source`命令来运行SQL命令脚本.
TDengine 终端可以通过 `source` 命令来运行 SQL 命令脚本.
```mysql
taos> source <filename>;
@ -109,10 +109,10 @@ taos> source <filename>;
### Shell小技巧
- 可以使用上下光标键查看已经历史输入的命
- 修改用户密码。在shell中使用alter user命
- 可以使用上下光标键查看历史输入的指
- 修改用户密码。在 shell 中使用 alter user 指
- ctrl+c 中止正在进行中的查询
- 执行`RESET QUERY CACHE`清空本地缓存的表的schema
- 执行 `RESET QUERY CACHE` 清空本地缓存的表 schema
## <a class="anchor" id="demo"></a>TDengine 极速体验
@ -179,19 +179,20 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
### TDengine服务器支持的平台列表
| | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** |
| -------------- | --------------------- | ------------------------ | --------------- | --------------- | ------------------------- | --------------------- |
| X64 | ● | ● | | ○ | ● | ● |
| 树莓派 ARM32 | | ● | ● | | | |
| 龙芯 MIPS64 | | | ● | | | |
| 鲲鹏 ARM64 | | ○ | ○ | | ● | |
| 申威 Alpha64 | | | ○ | ● | | |
| 飞腾 ARM64 | | ○ 优麒麟 | | | | |
| 海光 X64 | ● | ● | ● | ○ | ● | ● |
| 瑞芯微 ARM64/32 | | | ○ | | | |
| 全志 ARM64/32 | | | ○ | | | |
| 炬力 ARM64/32 | | | ○ | | | |
| TI ARM32 | | | ○ | | | |
| | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **华为 EulerOS** |
| -------------- | --------------------- | ------------------------ | --------------- | --------------- | ------------------------- | --------------------- | --------------------- |
| X64 | ● | ● | | ○ | ● | ● | ● |
| 树莓派 ARM32 | | ● | ● | | | | |
| 龙芯 MIPS64 | | | ● | | | | |
| 鲲鹏 ARM64 | | ○ | ○ | | ● | | |
| 申威 Alpha64 | | | ○ | ● | | | |
| 飞腾 ARM64 | | ○ 优麒麟 | | | | | |
| 海光 X64 | ● | ● | ● | ○ | ● | ● | |
| 瑞芯微 ARM64/32 | | | ○ | | | | |
| 全志 ARM64/32 | | | ○ | | | | |
| 炬力 ARM64/32 | | | ○ | | | | |
| TI ARM32 | | | ○ | | | | |
| 华为云 ARM64 | | | | | | | ● |
注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。
@ -211,7 +212,7 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
| **Python** | ● | ● | ● | ○ | ● | ● | ● | -- | ● |
| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- |
| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- |
| **C#** | ○ | ● | ● | ○ | ○ | ○ | ○ | -- | -- |
| **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- |
| **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● | ● |
注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。

View File

@ -14,7 +14,7 @@ TDengine提供了丰富的应用程序开发接口其中包括C/C++、Java、
| **Python** | ● | ● | ● | ○ | ● | ● | ○ | -- | ○ |
| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- |
| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- |
| **C#** | ○ | ● | ● | ○ | ○ | ○ | ○ | -- | -- |
| **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- |
| **RESTful** | ● | ● | ● | ● | ● | ● | ○ | ○ | ○ |
其中 ● 表示经过官方测试验证, ○ 表示非官方测试验证。
@ -23,7 +23,7 @@ TDengine提供了丰富的应用程序开发接口其中包括C/C++、Java、
* 在没有安装TDengine服务端软件的系统中使用连接器除RESTful外访问 TDengine 数据库需要安装相应版本的客户端安装包来使应用驱动Linux系统中文件名为libtaos.soWindows系统中为taos.dll被安装在系统中否则会产生无法找到相应库文件的错误。
* 所有执行 SQL 语句的 API例如 C/C++ Connector 中的 `tao_query`、`taos_query_a`、`taos_subscribe` 等以及其它语言中与它们对应的API每次都只能执行一条 SQL 语句,如果实际参数中包含了多条语句,它们的行为是未定义的。
* 升级到TDengine到2.0.8.0版本的用户必须更新JDBC连接TDengine必须升级taos-jdbcdriver到2.0.12及以上。
* 升级到TDengine到2.0.8.0版本的用户必须更新JDBC连接TDengine必须升级taos-jdbcdriver到2.0.12及以上。详细的版本依赖关系请参见 [taos-jdbcdriver 文档](https://www.taosdata.com/cn/documentation/connector/java#version)。
* 无论选用何种编程语言的连接器2.0 及以上版本的 TDengine 推荐数据库应用的每个线程都建立一个独立的连接或基于线程建立连接池以避免连接内的“USE statement”状态量在线程之间相互干扰但连接的查询和写入操作都是线程安全的
## <a class="anchor" id="driver"></a>安装连接器驱动步骤

View File

@ -29,21 +29,21 @@ taos> DESCRIBE meters;
## <a class="anchor" id="data-type"></a>支持的数据类型
使用TDengine最重要的是时间戳。创建并插入记录、查询历史记录的时候均需要指定时间戳。时间戳有如下规则
使用 TDengine最重要的是时间戳。创建并插入记录、查询历史记录的时候均需要指定时间戳。时间戳有如下规则
- 时间格式为```YYYY-MM-DD HH:mm:ss.MS```, 默认时间分辨率为毫秒。比如:```2017-08-12 18:25:58.128```
- 内部函数now是服务器的当前时间
- 插入记录时,如果时间戳为now插入数据时使用服务器当前时间
- Epoch Time: 时间戳也可以是一个长整数表示从1970-01-01 08:00:00.000开始的毫秒数
- 时间可以加减,比如 now-2h表明查询时刻向前推2个小时(最近2小时)。 数字后面的时间单位可以是 a(毫秒)、s(秒)、 m(分)、h(小时)、d(天)、w(周)。 比如select * from t1 where ts > now-2w and ts <= now-1w, 表示查询两周前整整一周的数据。 在指定降频操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。
- 时间格式为 ```YYYY-MM-DD HH:mm:ss.MS```默认时间分辨率为毫秒。比如:```2017-08-12 18:25:58.128```
- 内部函数 now 是客户端的当前时间
- 插入记录时,如果时间戳为 now插入数据时使用提交这条记录的客户端的当前时间
- Epoch Time:时间戳也可以是一个长整数,表示从 1970-01-01 08:00:00.000 开始的毫秒数
- 时间可以加减,比如 now-2h表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`表示查询两周前整整一周的数据。在指定降频操作down sampling的时间窗口interval时,时间单位还可以使用 n(自然月) 和 y(自然年)。
TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMicrosecond就可支持微秒。
TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableMicrosecond 就可支持微秒。
在TDengine中普通表的数据模型中可使用以下10种数据类型。
在TDengine中普通表的数据模型中可使用以下 10 种数据类型。
| | 类型 | Bytes | 说明 |
| ---- | :-------: | ------ | ------------------------------------------------------------ |
| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。 |
| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。(从 2.0.18 版本开始,已经去除了这一时间范围限制) |
| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31 用作 NULL |
| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL |
| 4 | FLOAT | 4 | 浮点型,有效位数 6-7范围 [-3.4E38, 3.4E38] |

View File

@ -120,7 +120,7 @@ function clean_service_on_systemd() {
if [ "$verMode" == "cluster" ]; then
nginx_service_config="${service_config_dir}/${nginx_service_name}.service"
if [ -d ${bin_dir}/web ]; then
if [ -d ${install_nginxd_dir} ]; then
if systemctl is-active --quiet ${nginx_service_name}; then
echo "Nginx for TDengine is running, stopping it..."
${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null

View File

@ -1,6 +1,6 @@
name: tdengine
base: core18
version: '2.0.18.0'
version: '2.0.19.0'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
@ -72,7 +72,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- usr/lib/libtaos.so.2.0.18.0
- usr/lib/libtaos.so.2.0.19.0
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so

View File

@ -96,6 +96,25 @@ typedef struct STableMetaInfo {
SArray *tagColList; // SArray<SColumn*>, involved tag columns
} STableMetaInfo;
typedef struct SColumnIndex {
int16_t tableIndex;
int16_t columnIndex;
} SColumnIndex;
typedef struct SFieldInfo {
int16_t numOfOutput; // number of column in result
TAOS_FIELD* final;
SArray *internalField; // SArray<SInternalField>
} SFieldInfo;
typedef struct SColumn {
SColumnIndex colIndex;
int32_t numOfFilters;
SColumnFilterInfo *filterInfo;
} SColumn;
/* the structure for sql function in select clause */
typedef struct SSqlExpr {
char aliasName[TSDB_COL_NAME_LEN]; // as aliasName
@ -109,32 +128,24 @@ typedef struct SSqlExpr {
tVariant param[3]; // parameters are not more than 3
int32_t offset; // sub result column value of arithmetic expression.
int16_t resColId; // result column id
SColumn *pFilter; // expr filter
} SSqlExpr;
typedef struct SColumnIndex {
int16_t tableIndex;
int16_t columnIndex;
} SColumnIndex;
typedef struct SExprFilter {
tSqlExpr *pExpr; //used for having parse
SSqlExpr *pSqlExpr;
SArray *fp;
SColumn *pFilters; //having filter info
}SExprFilter;
typedef struct SInternalField {
TAOS_FIELD field;
bool visible;
SExprInfo *pArithExprInfo;
SSqlExpr *pSqlExpr;
SExprFilter *pFieldFilters;
} SInternalField;
typedef struct SFieldInfo {
int16_t numOfOutput; // number of column in result
TAOS_FIELD* final;
SArray *internalField; // SArray<SInternalField>
} SFieldInfo;
typedef struct SColumn {
SColumnIndex colIndex;
int32_t numOfFilters;
SColumnFilterInfo *filterInfo;
} SColumn;
typedef struct SCond {
uint64_t uid;
int32_t len; // length of tag query condition data
@ -243,6 +254,7 @@ typedef struct SQueryInfo {
int32_t round; // 0/1/....
int32_t bufLen;
char* buf;
int32_t havingFieldNum;
} SQueryInfo;
typedef struct {
@ -316,6 +328,7 @@ typedef struct {
char ** buffer; // Buffer used to put multibytes encoded using unicode (wchar_t)
SColumnIndex* pColumnIndex;
TAOS_FIELD* final;
SArithmeticSupport *pArithSup; // support the arithmetic expression calculation on agg functions
struct SLocalMerger *pLocalMerger;
} SSqlRes;

View File

@ -22,6 +22,7 @@
#include "tscUtil.h"
#include "tschemautil.h"
#include "tsclient.h"
#include "qUtil.h"
typedef struct SCompareParam {
SLocalDataSource **pLocalData;
@ -1243,6 +1244,76 @@ static bool saveGroupResultInfo(SSqlObj *pSql) {
return false;
}
bool doFilterFieldData(char *input, SExprFilter* pFieldFilters, int16_t type, bool* notSkipped) {
bool qualified = false;
for(int32_t k = 0; k < pFieldFilters->pFilters->numOfFilters; ++k) {
__filter_func_t fp = taosArrayGetP(pFieldFilters->fp, k);
SColumnFilterElem filterElem = {.filterInfo = pFieldFilters->pFilters->filterInfo[k]};
bool isnull = isNull(input, type);
if (isnull) {
if (fp == isNullOperator) {
qualified = true;
break;
} else {
continue;
}
} else {
if (fp == notNullOperator) {
qualified = true;
break;
} else if (fp == isNullOperator) {
continue;
}
}
if (fp(&filterElem, input, input, type)) {
qualified = true;
break;
}
}
*notSkipped = qualified;
return TSDB_CODE_SUCCESS;
}
int32_t doHavingFilter(SQueryInfo* pQueryInfo, tFilePage* pOutput, bool* notSkipped) {
*notSkipped = true;
if (pQueryInfo->havingFieldNum <= 0) {
return TSDB_CODE_SUCCESS;
}
//int32_t exprNum = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
size_t numOfOutput = tscNumOfFields(pQueryInfo);
for(int32_t i = 0; i < numOfOutput; ++i) {
SInternalField* pInterField = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i);
SExprFilter* pFieldFilters = pInterField->pFieldFilters;
if (pFieldFilters == NULL) {
continue;
}
int32_t type = pInterField->field.type;
char* pInput = pOutput->data + pOutput->num* pFieldFilters->pSqlExpr->offset;
doFilterFieldData(pInput, pFieldFilters, type, notSkipped);
if (*notSkipped == false) {
return TSDB_CODE_SUCCESS;
}
}
return TSDB_CODE_SUCCESS;
}
/**
*
* @param pSql
@ -1283,6 +1354,22 @@ bool genFinalResults(SSqlObj *pSql, SLocalMerger *pLocalMerge, bool noMoreCurren
doArithmeticCalculate(pQueryInfo, pResBuf, pModel->rowSize, pLocalMerge->finalModel->rowSize);
}
bool notSkipped = true;
doHavingFilter(pQueryInfo, pResBuf, &notSkipped);
if (!notSkipped) {
pRes->numOfRows = 0;
pLocalMerge->discard = !noMoreCurrentGroupRes;
if (pLocalMerge->discard) {
SColumnModel *pInternModel = pLocalMerge->pDesc->pColumnModel;
tColModelAppend(pInternModel, pLocalMerge->discardData, pLocalMerge->pTempBuffer->data, 0, 1, 1);
}
return notSkipped;
}
// no interval query, no fill operation
if (pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) {
genFinalResWithoutFill(pRes, pLocalMerge, pQueryInfo);

View File

@ -118,7 +118,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
if (sToken.type == TK_PLUS) {
useconds += interval;
} else {
useconds = (useconds >= interval) ? useconds - interval : 0;
useconds = useconds - interval;
}
*next = pTokenEnd;
@ -937,6 +937,10 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
return ret;
}
if (sql == NULL) {
return TSDB_CODE_TSC_INVALID_SQL;
}
code = tscGetTableMetaEx(pSql, pTableMetaInfo, true);
if (TSDB_CODE_TSC_ACTION_IN_PROGRESS == code) {
return code;
@ -945,6 +949,10 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
} else {
sql = sToken.z;
if (sql == NULL) {
return TSDB_CODE_TSC_INVALID_SQL;
}
code = tscGetTableMetaEx(pSql, pTableMetaInfo, false);
if (pCmd->curSql == NULL) {
assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS);
@ -953,10 +961,6 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
*sqlstr = sql;
if (*sqlstr == NULL) {
code = TSDB_CODE_TSC_INVALID_SQL;
}
return code;
}

View File

@ -34,6 +34,7 @@
#include "tstoken.h"
#include "tstrbuild.h"
#include "ttokendef.h"
#include "qUtil.h"
#define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0"
@ -1097,6 +1098,7 @@ static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd) {
return true;
}
static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd) {
assert(pTagsList != NULL);
@ -1606,7 +1608,7 @@ bool isValidDistinctSql(SQueryInfo* pQueryInfo) {
int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, SArray* pSelectList, bool isSTable, bool joinQuery, bool timeWindowQuery) {
assert(pSelectList != NULL && pCmd != NULL);
const char* msg1 = "too many columns in selection clause";
const char* msg2 = "functions or others can not be mixed up";
const char* msg3 = "not support query expression";
const char* msg5 = "invalid function name";
@ -1655,7 +1657,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, SArray* pSelectLis
}
if (pQueryInfo->fieldsInfo.numOfOutput > TSDB_MAX_COLUMNS) {
return TSDB_CODE_TSC_INVALID_SQL;
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
@ -1676,18 +1678,6 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, SArray* pSelectLis
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
/*
* transfer sql functions that need secondary merge into another format
* in dealing with super table queries such as: count/first/last
*/
if (isSTable) {
tscTansformFuncForSTableQuery(pQueryInfo);
if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
}
return TSDB_CODE_SUCCESS;
}
@ -3065,6 +3055,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd)
return TSDB_CODE_SUCCESS;
}
static SColumnFilterInfo* addColumnFilterInfo(SColumn* pColumn) {
if (pColumn == NULL) {
return NULL;
@ -3088,15 +3079,11 @@ static SColumnFilterInfo* addColumnFilterInfo(SColumn* pColumn) {
}
static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SColumnFilterInfo* pColumnFilter,
SColumnIndex* columnIndex, tSqlExpr* pExpr) {
int16_t colType, tSqlExpr* pExpr) {
const char* msg = "not supported filter condition";
tSqlExpr* pRight = pExpr->pRight;
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, columnIndex->tableIndex);
SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex->columnIndex);
int16_t colType = pSchema->type;
if (colType >= TSDB_DATA_TYPE_TINYINT && colType <= TSDB_DATA_TYPE_BIGINT) {
colType = TSDB_DATA_TYPE_BIGINT;
} else if (colType == TSDB_DATA_TYPE_FLOAT || colType == TSDB_DATA_TYPE_DOUBLE) {
@ -3301,7 +3288,10 @@ static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SC
}
pColumn->colIndex = *pIndex;
return doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, pIndex, pExpr);
int16_t colType = pSchema->type;
return doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, colType, pExpr);
}
static int32_t getTablenameCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pTableCond, SStringBuilder* sb) {
@ -6030,7 +6020,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
if (TSDB_COL_IS_TAG(pColIndex->flag)) {
SColumnIndex index = {.tableIndex = pQueryInfo->groupbyExpr.tableIndex, .columnIndex = colIndex};
SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG, &index, type, bytes, getNewResColId(pQueryInfo), bytes, true);
SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, (int32_t)size - pQueryInfo->havingFieldNum, TSDB_FUNC_TAG, &index, type, bytes, getNewResColId(pQueryInfo), bytes, true);
memset(pExpr->aliasName, 0, sizeof(pExpr->aliasName));
tstrncpy(pExpr->aliasName, name, sizeof(pExpr->aliasName));
@ -6039,7 +6029,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
// NOTE: tag column does not add to source column list
SColumnList ids = getColumnList(1, 0, pColIndex->colIndex);
insertResultField(pQueryInfo, (int32_t)size, &ids, bytes, (int8_t)type, name, pExpr);
insertResultField(pQueryInfo, (int32_t)size - pQueryInfo->havingFieldNum, &ids, bytes, (int8_t)type, name, pExpr);
} else {
// if this query is "group by" normal column, time window query is not allowed
if (isTimeWindowQuery(pQueryInfo)) {
@ -6166,7 +6156,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
}
// projection query on super table does not compatible with "group by" syntax
if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
if (tscIsProjectionQuery(pQueryInfo)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
@ -6336,7 +6326,7 @@ void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex) {
int32_t totalBufSize = 1024;
char str[1024] = {0};
char str[1024+1] = {0};
int32_t offset = 0;
offset += sprintf(str, "num:%d [", size);
@ -6769,6 +6759,313 @@ static int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
return TSDB_CODE_SUCCESS;
}
int32_t tscInsertExprFields(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SInternalField** interField) {
tSqlExprItem item = {.pNode = pExpr, .aliasName = NULL, .distinct = false};
int32_t outputIndex = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
// ADD TRUE FOR TEST
if (addExprAndResultField(pCmd, pQueryInfo, outputIndex, &item, true) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
++pQueryInfo->havingFieldNum;
size_t n = tscSqlExprNumOfExprs(pQueryInfo);
SSqlExpr* pSqlExpr = tscSqlExprGet(pQueryInfo, (int32_t)n - 1);
int32_t slot = tscNumOfFields(pQueryInfo) - 1;
SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, slot);
pInfo->visible = false;
if (pInfo->pFieldFilters == NULL) {
SExprFilter* pFieldFilters = calloc(1, sizeof(SExprFilter));
if (pFieldFilters == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SColumn* pFilters = calloc(1, sizeof(SColumn));
if (pFilters == NULL) {
tfree(pFieldFilters);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
pFieldFilters->pFilters = pFilters;
pFieldFilters->pSqlExpr = pSqlExpr;
pSqlExpr->pFilter = pFilters;
pInfo->pFieldFilters = pFieldFilters;
}
pInfo->pFieldFilters->pExpr = pExpr;
*interField = pInfo;
return TSDB_CODE_SUCCESS;
}
int32_t tscGetExprFilters(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SInternalField** pField) {
SInternalField* pInfo = NULL;
for (int32_t i = pQueryInfo->havingFieldNum - 1; i >= 0; --i) {
pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, pQueryInfo->fieldsInfo.numOfOutput - 1 - i);
if (pInfo->pFieldFilters && 0 == tSqlExprCompare(pInfo->pFieldFilters->pExpr, pExpr)) {
*pField = pInfo;
return TSDB_CODE_SUCCESS;
}
}
int32_t ret = tscInsertExprFields(pCmd, pQueryInfo, pExpr, &pInfo);
if (ret) {
return ret;
}
*pField = pInfo;
return TSDB_CODE_SUCCESS;
}
static int32_t genExprFilter(SExprFilter * exprFilter) {
exprFilter->fp = taosArrayInit(4, sizeof(__filter_func_t));
if (exprFilter->fp == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
for (int32_t i = 0; i < exprFilter->pFilters->numOfFilters; ++i) {
SColumnFilterInfo *filterInfo = &exprFilter->pFilters->filterInfo[i];
int32_t lower = filterInfo->lowerRelOptr;
int32_t upper = filterInfo->upperRelOptr;
if (lower == TSDB_RELATION_INVALID && upper == TSDB_RELATION_INVALID) {
tscError("invalid rel optr");
return TSDB_CODE_TSC_APP_ERROR;
}
__filter_func_t ffp = getFilterOperator(lower, upper);
if (ffp == NULL) {
tscError("invalid filter info");
return TSDB_CODE_TSC_APP_ERROR;
}
taosArrayPush(exprFilter->fp, &ffp);
}
return TSDB_CODE_SUCCESS;
}
static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, int32_t sqlOptr) {
const char* msg1 = "non binary column not support like operator";
const char* msg2 = "invalid operator for binary column in having clause";
const char* msg3 = "invalid operator for bool column in having clause";
SColumn* pColumn = NULL;
SColumnFilterInfo* pColFilter = NULL;
SInternalField* pInfo = NULL;
/*
* in case of TK_AND filter condition, we first find the corresponding column and build the query condition together
* the already existed condition.
*/
if (sqlOptr == TK_AND) {
int32_t ret = tscGetExprFilters(pCmd, pQueryInfo, pExpr->pLeft, &pInfo);
if (ret) {
return ret;
}
pColumn = pInfo->pFieldFilters->pFilters;
// this is a new filter condition on this column
if (pColumn->numOfFilters == 0) {
pColFilter = addColumnFilterInfo(pColumn);
} else { // update the existed column filter information, find the filter info here
pColFilter = &pColumn->filterInfo[0];
}
if (pColFilter == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
} else if (sqlOptr == TK_OR) {
int32_t ret = tscGetExprFilters(pCmd, pQueryInfo, pExpr->pLeft, &pInfo);
if (ret) {
return ret;
}
pColumn = pInfo->pFieldFilters->pFilters;
// TODO fixme: failed to invalid the filter expression: "col1 = 1 OR col2 = 2"
pColFilter = addColumnFilterInfo(pColumn);
if (pColFilter == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
} else { // error;
return TSDB_CODE_TSC_INVALID_SQL;
}
pColFilter->filterstr =
((pInfo->field.type == TSDB_DATA_TYPE_BINARY || pInfo->field.type == TSDB_DATA_TYPE_NCHAR) ? 1 : 0);
if (pColFilter->filterstr) {
if (pExpr->tokenId != TK_EQ
&& pExpr->tokenId != TK_NE
&& pExpr->tokenId != TK_ISNULL
&& pExpr->tokenId != TK_NOTNULL
&& pExpr->tokenId != TK_LIKE
) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
} else {
if (pExpr->tokenId == TK_LIKE) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pInfo->field.type == TSDB_DATA_TYPE_BOOL) {
if (pExpr->tokenId != TK_EQ && pExpr->tokenId != TK_NE) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
}
int32_t ret = doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, pInfo->field.type, pExpr);
if (ret) {
return ret;
}
return genExprFilter(pInfo->pFieldFilters);
}
int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, int32_t parentOptr) {
if (pExpr == NULL) {
return TSDB_CODE_SUCCESS;
}
const char* msg1 = "invalid having clause";
tSqlExpr* pLeft = pExpr->pLeft;
tSqlExpr* pRight = pExpr->pRight;
if (pExpr->tokenId == TK_AND || pExpr->tokenId == TK_OR) {
int32_t ret = getHavingExpr(pCmd, pQueryInfo, pExpr->pLeft, pExpr->tokenId);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
return getHavingExpr(pCmd, pQueryInfo, pExpr->pRight, pExpr->tokenId);
}
if (pLeft == NULL || pRight == NULL) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pLeft->type == pRight->type) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
exchangeExpr(pExpr);
pLeft = pExpr->pLeft;
pRight = pExpr->pRight;
if (pLeft->type != SQL_NODE_SQLFUNCTION) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pRight->type != SQL_NODE_VALUE) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pExpr->tokenId >= TK_BITAND) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
//if (pLeft->pParam == NULL || pLeft->pParam->nExpr < 1) {
// return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
//}
if (pLeft->pParam) {
size_t size = taosArrayGetSize(pLeft->pParam);
for (int32_t i = 0; i < size; i++) {
tSqlExprItem* pParamElem = taosArrayGet(pLeft->pParam, i);
if (pParamElem->pNode->tokenId != TK_ALL &&
pParamElem->pNode->tokenId != TK_ID &&
pParamElem->pNode->tokenId != TK_STRING &&
pParamElem->pNode->tokenId != TK_INTEGER &&
pParamElem->pNode->tokenId != TK_FLOAT) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pParamElem->pNode->tokenId == TK_ID && (pParamElem->pNode->colInfo.z == NULL && pParamElem->pNode->colInfo.n == 0)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pParamElem->pNode->tokenId == TK_ID) {
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
if ((getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
if (index.columnIndex <= 0 ||
index.columnIndex >= tscGetNumOfColumns(pTableMeta)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
}
}
pLeft->functionId = isValidFunction(pLeft->operand.z, pLeft->operand.n);
if (pLeft->functionId < 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
return handleExprInHavingClause(pCmd, pQueryInfo, pExpr, parentOptr);
}
int32_t parseHavingClause(SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SSqlCmd* pCmd, bool isSTable, int32_t joinQuery, int32_t timeWindowQuery) {
const char* msg1 = "having only works with group by";
const char* msg2 = "functions or others can not be mixed up";
const char* msg3 = "invalid expression in having clause";
if (pExpr == NULL) {
return TSDB_CODE_SUCCESS;
}
if (pQueryInfo->groupbyExpr.numOfGroupCols <= 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pExpr->pLeft == NULL || pExpr->pRight == NULL) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (pQueryInfo->colList == NULL) {
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
}
int32_t ret = 0;
if ((ret = getHavingExpr(pCmd, pQueryInfo, pExpr, TK_AND)) != TSDB_CODE_SUCCESS) {
return ret;
}
//REDO function check
if (!functionCompatibleCheck(pQueryInfo, joinQuery, timeWindowQuery)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
return TSDB_CODE_SUCCESS;
}
int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t index) {
assert(pQuerySqlNode != NULL && (pQuerySqlNode->from == NULL || taosArrayGetSize(pQuerySqlNode->from->tableList) > 0));
@ -6934,6 +7231,23 @@ int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t i
}
}
// parse the having clause in the first place
if (parseHavingClause(pQueryInfo, pQuerySqlNode->pHaving, pCmd, isSTable, joinQuery, timeWindowQuery) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
/*
* transfer sql functions that need secondary merge into another format
* in dealing with super table queries such as: count/first/last
*/
if (isSTable) {
tscTansformFuncForSTableQuery(pQueryInfo);
if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
}
if (parseSessionClause(pCmd, pQueryInfo, pQuerySqlNode) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -7125,3 +7439,10 @@ bool hasNormalColumnFilter(SQueryInfo* pQueryInfo) {
return false;
}

View File

@ -144,8 +144,9 @@ SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg) {
SNewVgroupInfo info = {0};
info.numOfEps = pVgroupMsg->numOfEps;
info.vgId = pVgroupMsg->vgId;
info.inUse = 0;
info.inUse = 0; // 0 is the default value of inUse in case of multiple replica
assert(info.numOfEps >= 1 && info.vgId >= 1);
for(int32_t i = 0; i < pVgroupMsg->numOfEps; ++i) {
tstrncpy(info.ep[i].fqdn, pVgroupMsg->epAddr[i].fqdn, TSDB_FQDN_LEN);
info.ep[i].port = pVgroupMsg->epAddr[i].port;

View File

@ -34,6 +34,7 @@ int tscKeepConn[TSDB_SQL_MAX] = {0};
TSKEY tscGetSubscriptionProgress(void* sub, int64_t uid, TSKEY dflt);
void tscUpdateSubscriptionProgress(void* sub, int64_t uid, TSKEY ts);
void tscSaveSubscriptionProgress(void* sub);
static int32_t extractSTableQueryVgroupId(STableMetaInfo* pTableMetaInfo);
static int32_t minMsgSize() { return tsRpcHeadSize + 100; }
static int32_t getWaitingTimeInterval(int32_t count) {
@ -79,6 +80,7 @@ static void tscEpSetHtons(SRpcEpSet *s) {
s->port[i] = htons(s->port[i]);
}
}
bool tscEpSetIsEqual(SRpcEpSet *s1, SRpcEpSet *s2) {
if (s1->numOfEps != s2->numOfEps || s1->inUse != s2->inUse) {
return false;
@ -111,19 +113,22 @@ static void tscDumpEpSetFromVgroupInfo(SRpcEpSet *pEpSet, SNewVgroupInfo *pVgrou
}
}
static void tscUpdateVgroupInfo(SSqlObj *pObj, SRpcEpSet *pEpSet) {
SSqlCmd *pCmd = &pObj->cmd;
static void tscUpdateVgroupInfo(SSqlObj *pSql, SRpcEpSet *pEpSet) {
SSqlCmd *pCmd = &pSql->cmd;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
if (pTableMetaInfo == NULL || pTableMetaInfo->pTableMeta == NULL) {
return;
}
int32_t vgId = pTableMetaInfo->pTableMeta->vgId;
int32_t vgId = -1;
if (pTableMetaInfo->pTableMeta->tableType == TSDB_SUPER_TABLE) {
assert(vgId == 0);
return;
vgId = extractSTableQueryVgroupId(pTableMetaInfo);
} else {
vgId = pTableMetaInfo->pTableMeta->vgId;
}
assert(vgId > 0);
SNewVgroupInfo vgroupInfo = {.vgId = -1};
taosHashGetClone(tscVgroupMap, &vgId, sizeof(vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo));
assert(vgroupInfo.numOfEps > 0 && vgroupInfo.vgId > 0);
@ -138,6 +143,33 @@ static void tscUpdateVgroupInfo(SSqlObj *pObj, SRpcEpSet *pEpSet) {
tscDebug("after: EndPoint in use:%d, numOfEps:%d", vgroupInfo.inUse, vgroupInfo.numOfEps);
taosHashPut(tscVgroupMap, &vgId, sizeof(vgId), &vgroupInfo, sizeof(SNewVgroupInfo));
// Update the local cached epSet info cached by SqlObj
int32_t inUse = pSql->epSet.inUse;
tscDumpEpSetFromVgroupInfo(&pSql->epSet, &vgroupInfo);
tscDebug("%p update the epSet in SqlObj, in use before:%d, after:%d", pSql, inUse, pSql->epSet.inUse);
}
int32_t extractSTableQueryVgroupId(STableMetaInfo* pTableMetaInfo) {
assert(pTableMetaInfo != NULL);
int32_t vgIndex = pTableMetaInfo->vgroupIndex;
int32_t vgId = -1;
if (pTableMetaInfo->pVgroupTables == NULL) {
SVgroupsInfo *pVgroupInfo = pTableMetaInfo->vgroupList;
assert(pVgroupInfo->vgroups[vgIndex].vgId > 0 && vgIndex < pTableMetaInfo->vgroupList->numOfVgroups);
vgId = pVgroupInfo->vgroups[vgIndex].vgId;
} else {
int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables);
assert(vgIndex >= 0 && vgIndex < numOfVgroups);
SVgroupTableInfo *pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, vgIndex);
vgId = pTableIdList->vgInfo.vgId;
}
return vgId;
}
void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) {
@ -515,21 +547,22 @@ int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
int32_t vgIndex = pTableMetaInfo->vgroupIndex;
int32_t vgId = -1;
if (pTableMetaInfo->pVgroupTables == NULL) {
SVgroupsInfo *pVgroupInfo = pTableMetaInfo->vgroupList;
assert(pVgroupInfo->vgroups[vgIndex].vgId > 0 && vgIndex < pTableMetaInfo->vgroupList->numOfVgroups);
pRetrieveMsg->header.vgId = htonl(pVgroupInfo->vgroups[vgIndex].vgId);
tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d, qhandle:%" PRIX64, pSql, pVgroupInfo->vgroups[vgIndex].vgId, vgIndex, pSql->res.qId);
vgId = pVgroupInfo->vgroups[vgIndex].vgId;
} else {
int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables);
assert(vgIndex >= 0 && vgIndex < numOfVgroups);
SVgroupTableInfo* pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, vgIndex);
pRetrieveMsg->header.vgId = htonl(pTableIdList->vgInfo.vgId);
tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d, qId:%" PRIu64, pSql, pTableIdList->vgInfo.vgId, vgIndex, pSql->res.qId);
vgId = pTableIdList->vgInfo.vgId;
}
pRetrieveMsg->header.vgId = htonl(vgId);
tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d, qId:%" PRIu64, pSql, vgId, vgIndex, pSql->res.qId);
} else {
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
pRetrieveMsg->header.vgId = htonl(pTableMeta->vgId);
@ -862,8 +895,44 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pSqlFuncExpr->functionId = htons(pExpr->functionId);
pSqlFuncExpr->numOfParams = htons(pExpr->numOfParams);
pSqlFuncExpr->resColId = htons(pExpr->resColId);
if (pTableMeta->tableType != TSDB_SUPER_TABLE && pExpr->pFilter && pExpr->pFilter->numOfFilters > 0) {
pSqlFuncExpr->filterNum = htonl(pExpr->pFilter->numOfFilters);
} else {
pSqlFuncExpr->filterNum = 0;
}
pMsg += sizeof(SSqlFuncMsg);
if (pSqlFuncExpr->filterNum) {
pMsg += sizeof(SColumnFilterInfo) * pExpr->pFilter->numOfFilters;
// append the filter information after the basic column information
for (int32_t f = 0; f < pExpr->pFilter->numOfFilters; ++f) {
SColumnFilterInfo *pColFilter = &pExpr->pFilter->filterInfo[f];
SColumnFilterInfo *pFilterMsg = &pSqlFuncExpr->filterInfo[f];
pFilterMsg->filterstr = htons(pColFilter->filterstr);
if (pColFilter->filterstr) {
pFilterMsg->len = htobe64(pColFilter->len);
memcpy(pMsg, (void *)pColFilter->pz, (size_t)(pColFilter->len + 1));
pMsg += (pColFilter->len + 1); // append the additional filter binary info
} else {
pFilterMsg->lowerBndi = htobe64(pColFilter->lowerBndi);
pFilterMsg->upperBndi = htobe64(pColFilter->upperBndi);
}
pFilterMsg->lowerRelOptr = htons(pColFilter->lowerRelOptr);
pFilterMsg->upperRelOptr = htons(pColFilter->upperRelOptr);
if (pColFilter->lowerRelOptr == TSDB_RELATION_INVALID && pColFilter->upperRelOptr == TSDB_RELATION_INVALID) {
tscError("invalid filter info");
return TSDB_CODE_TSC_INVALID_SQL;
}
}
}
for (int32_t j = 0; j < pExpr->numOfParams; ++j) { // todo add log
pSqlFuncExpr->arg[j].argType = htons((uint16_t)pExpr->param[j].nType);
pSqlFuncExpr->arg[j].argBytes = htons(pExpr->param[j].nLen);
@ -1944,7 +2013,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
(vgroupInfo.inUse < 0)) { // vgroup info exists, compare with it
vgroupInfo = createNewVgroupInfo(&pMetaMsg->vgroup);
taosHashPut(tscVgroupMap, &vgId, sizeof(vgId), &vgroupInfo, sizeof(vgroupInfo));
tscDebug("add new VgroupInfo, vgId:%d, total:%d", vgId, (int32_t) taosHashGetSize(tscVgroupMap));
tscDebug("add new VgroupInfo, vgId:%d, total cached:%d", vgId, (int32_t) taosHashGetSize(tscVgroupMap));
}
}
@ -2096,18 +2165,33 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) {
tscError("%p empty vgroup info", pSql);
} else {
for (int32_t j = 0; j < pInfo->vgroupList->numOfVgroups; ++j) {
//just init, no need to lock
SVgroupInfo *pVgroups = &pInfo->vgroupList->vgroups[j];
// just init, no need to lock
SVgroupInfo *pVgroup = &pInfo->vgroupList->vgroups[j];
SVgroupMsg *vmsg = &pVgroupMsg->vgroups[j];
pVgroups->vgId = htonl(vmsg->vgId);
pVgroups->numOfEps = vmsg->numOfEps;
vmsg->vgId = htonl(vmsg->vgId);
vmsg->numOfEps = vmsg->numOfEps;
for (int32_t k = 0; k < vmsg->numOfEps; ++k) {
vmsg->epAddr[k].port = htons(vmsg->epAddr[k].port);
}
assert(pVgroups->numOfEps >= 1 && pVgroups->vgId >= 1);
SNewVgroupInfo newVi = createNewVgroupInfo(vmsg);
pVgroup->numOfEps = newVi.numOfEps;
pVgroup->vgId = newVi.vgId;
for (int32_t k = 0; k < vmsg->numOfEps; ++k) {
pVgroup->epAddr[k].port = newVi.ep[k].port;
pVgroup->epAddr[k].fqdn = strndup(newVi.ep[k].fqdn, TSDB_FQDN_LEN);
}
for (int32_t k = 0; k < pVgroups->numOfEps; ++k) {
pVgroups->epAddr[k].port = htons(vmsg->epAddr[k].port);
pVgroups->epAddr[k].fqdn = strndup(vmsg->epAddr[k].fqdn, tListLen(vmsg->epAddr[k].fqdn));
// check if current buffer contains the vgroup info.
// If not, add it
SNewVgroupInfo existVgroupInfo = {.inUse = -1};
taosHashGetClone(tscVgroupMap, &newVi.vgId, sizeof(newVi.vgId), NULL, &existVgroupInfo, sizeof(SNewVgroupInfo));
if (((existVgroupInfo.inUse >= 0) && !vgroupInfoIdentical(&existVgroupInfo, vmsg)) ||
(existVgroupInfo.inUse < 0)) { // vgroup info exists, compare with it
taosHashPut(tscVgroupMap, &newVi.vgId, sizeof(newVi.vgId), &newVi, sizeof(newVi));
tscDebug("add new VgroupInfo, vgId:%d, total cached:%d", newVi.vgId, (int32_t) taosHashGetSize(tscVgroupMap));
}
}
}

View File

@ -405,6 +405,7 @@ int taos_affected_rows(TAOS_RES *tres) {
TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) {
SSqlObj *pSql = (SSqlObj *)res;
SSqlRes *pRes = &pSql->res;
if (pSql == NULL || pSql->signature != pSql) return 0;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
@ -419,7 +420,7 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) {
SFieldInfo *pFieldInfo = &pQueryInfo->fieldsInfo;
if (pFieldInfo->final == NULL) {
if (pRes->final == NULL) {
TAOS_FIELD* f = calloc(pFieldInfo->numOfOutput, sizeof(TAOS_FIELD));
int32_t j = 0;
@ -439,10 +440,10 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) {
}
}
pFieldInfo->final = f;
pRes->final = f;
}
return pFieldInfo->final;
return pRes->final;
}
static bool needToFetchNewBlock(SSqlObj* pSql) {

View File

@ -619,7 +619,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
if (code == TSDB_CODE_SUCCESS) {
tscCreateStream(pStream, pSql, code);
} else if (code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
tscError("%p open stream failed, sql:%s, code:%s", pSql, sqlstr, tstrerror(pRes->code));
tscError("%p open stream failed, sql:%s, code:%s", pSql, sqlstr, tstrerror(code));
taosReleaseRef(tscObjRef, pSql->self);
free(pStream);
return NULL;

View File

@ -2986,7 +2986,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
}
tscDebug("%p sub:%p query complete, ep:%s, vgId:%d, orderOfSub:%d, retrieve data", trsupport->pParentSql, pSql,
pVgroup->epAddr[0].fqdn, pVgroup->vgId, trsupport->subqueryIndex);
pVgroup->epAddr[pSql->epSet.inUse].fqdn, pVgroup->vgId, trsupport->subqueryIndex);
if (pSql->res.qId == 0) { // qhandle is NULL, code is TSDB_CODE_SUCCESS means no results generated from this vnode
tscRetrieveFromDnodeCallBack(param, pSql, 0);

View File

@ -430,6 +430,8 @@ static void tscDestroyResPointerInfo(SSqlRes* pRes) {
tfree(pRes->pArithSup);
}
tfree(pRes->final);
pRes->data = NULL; // pRes->data points to the buffer of pRsp, no need to free
}
@ -1045,6 +1047,7 @@ SInternalField* tscFieldInfoAppend(SFieldInfo* pFieldInfo, TAOS_FIELD* pField) {
.pSqlExpr = NULL,
.pArithExprInfo = NULL,
.visible = true,
.pFieldFilters = NULL,
};
info.field = *pField;
@ -1057,6 +1060,7 @@ SInternalField* tscFieldInfoInsert(SFieldInfo* pFieldInfo, int32_t index, TAOS_F
.pSqlExpr = NULL,
.pArithExprInfo = NULL,
.visible = true,
.pFieldFilters = NULL,
};
info.field = *field;
@ -1130,6 +1134,22 @@ int32_t tscGetResRowLength(SArray* pExprList) {
return size;
}
static void destroyFilterInfo(SColumnFilterInfo* pFilterInfo, int32_t numOfFilters) {
for(int32_t i = 0; i < numOfFilters; ++i) {
if (pFilterInfo[i].filterstr) {
tfree(pFilterInfo[i].pz);
}
}
tfree(pFilterInfo);
}
static void tscColumnDestroy(SColumn* pCol) {
destroyFilterInfo(pCol->filterInfo, pCol->numOfFilters);
free(pCol);
}
void tscFieldInfoClear(SFieldInfo* pFieldInfo) {
if (pFieldInfo == NULL) {
return;
@ -1150,10 +1170,14 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo) {
tfree(pInfo->pArithExprInfo);
}
if (pInfo->pFieldFilters != NULL) {
tscColumnDestroy(pInfo->pFieldFilters->pFilters);
tfree(pInfo->pFieldFilters);
}
}
taosArrayDestroy(pFieldInfo->internalField);
tfree(pFieldInfo->final);
memset(pFieldInfo, 0, sizeof(SFieldInfo));
}
@ -1411,15 +1435,7 @@ SColumn* tscColumnListInsert(SArray* pColumnList, SColumnIndex* pColIndex) {
return taosArrayGetP(pColumnList, i);
}
static void destroyFilterInfo(SColumnFilterInfo* pFilterInfo, int32_t numOfFilters) {
for(int32_t i = 0; i < numOfFilters; ++i) {
if (pFilterInfo[i].filterstr) {
tfree(pFilterInfo[i].pz);
}
}
tfree(pFilterInfo);
}
SColumn* tscColumnClone(const SColumn* src) {
assert(src != NULL);
@ -1436,10 +1452,6 @@ SColumn* tscColumnClone(const SColumn* src) {
return dst;
}
static void tscColumnDestroy(SColumn* pCol) {
destroyFilterInfo(pCol->filterInfo, pCol->numOfFilters);
free(pCol);
}
void tscColumnListCopy(SArray* dst, const SArray* src, int16_t tableIndex) {
assert(src != NULL && dst != NULL);

View File

@ -39,6 +39,7 @@ extern int8_t tsEnableTelemetryReporting;
extern char tsEmail[];
extern char tsArbitrator[];
extern int8_t tsArbOnline;
extern int32_t tsDnodeId;
// common
extern int tsRpcTimer;

View File

@ -43,6 +43,7 @@ int8_t tsEnableVnodeBak = 1;
int8_t tsEnableTelemetryReporting = 1;
int8_t tsArbOnline = 0;
char tsEmail[TSDB_FQDN_LEN] = {0};
int32_t tsDnodeId = 0;
// common
int32_t tsRpcTimer = 1000;
@ -212,7 +213,7 @@ float tsAvailTmpDirectorySpace = 0;
float tsAvailDataDirGB = 0;
float tsUsedDataDirGB = 0;
float tsReservedTmpDirectorySpace = 1.0f;
float tsMinimalDataDirGB = 1.0f;
float tsMinimalDataDirGB = 2.0f;
int32_t tsTotalMemoryMB = 0;
uint32_t tsVersion = 0;

View File

@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
POST_BUILD
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.24-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.25-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMENT "build jdbc driver")
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})

View File

@ -5,7 +5,7 @@
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.24</version>
<version>2.0.25</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>

View File

@ -3,7 +3,7 @@
<modelVersion>4.0.0</modelVersion>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.24</version>
<version>2.0.25</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
@ -62,6 +62,14 @@
</dependencies>
<build>
<resources>
<resource>
<directory>src/main/resources</directory>
<excludes>
<exclude>**/*.md</exclude>
</excludes>
</resource>
</resources>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>

View File

@ -15,7 +15,7 @@ public abstract class AbstractParameterMetaData extends WrapperImpl implements P
@Override
public int getParameterCount() throws SQLException {
return parameters.length;
return parameters == null ? 0 : parameters.length;
}
@Override

View File

@ -29,45 +29,25 @@ public class TSDBJNIConnector {
private static volatile Boolean isInitialized = false;
private TaosInfo taosInfo = TaosInfo.getInstance();
// Connection pointer used in C
private long taos = TSDBConstants.JNI_NULL_POINTER;
// result set status in current connection
private boolean isResultsetClosed = true;
private int affectedRows = -1;
static {
System.loadLibrary("taos");
System.out.println("java.library.path:" + System.getProperty("java.library.path"));
}
/**
* Connection pointer used in C
*/
private long taos = TSDBConstants.JNI_NULL_POINTER;
/**
* Result set pointer for the current connection
*/
// private long taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER;
/**
* result set status in current connection
*/
private boolean isResultsetClosed = true;
private int affectedRows = -1;
/**
* Whether the connection is closed
*/
public boolean isClosed() {
return this.taos == TSDBConstants.JNI_NULL_POINTER;
}
/**
* Returns the status of last result set in current connection
*/
public boolean isResultsetClosed() {
return this.isResultsetClosed;
}
/**
* Initialize static variables in JNI to optimize performance
*/
public static void init(String configDir, String locale, String charset, String timezone) throws SQLWarning {
synchronized (isInitialized) {
if (!isInitialized) {
@ -93,11 +73,6 @@ public class TSDBJNIConnector {
public static native String getTsCharset();
/**
* Get connection pointer
*
* @throws SQLException
*/
public boolean connect(String host, int port, String dbName, String user, String password) throws SQLException {
if (this.taos != TSDBConstants.JNI_NULL_POINTER) {
// this.closeConnectionImp(this.taos);
@ -185,13 +160,6 @@ public class TSDBJNIConnector {
private native String getErrMsgImp(long pSql);
/**
* Get resultset pointer
* Each connection should have a single open result set at a time
*/
// public long getResultSet() {
// return taosResultSetPointer;
// }
private native long getResultSetImp(long connection, long pSql);
public boolean isUpdateQuery(long pSql) {
@ -231,6 +199,7 @@ public class TSDBJNIConnector {
// }
// return resCode;
// }
private native int freeResultSetImp(long connection, long result);
/**
@ -323,8 +292,7 @@ public class TSDBJNIConnector {
* Validate if a <I>create table</I> sql statement is correct without actually creating that table
*/
public boolean validateCreateTableSql(String sql) {
long connection = taos;
int res = validateCreateTableSqlImp(connection, sql.getBytes());
int res = validateCreateTableSqlImp(taos, sql.getBytes());
return res != 0 ? false : true;
}

View File

@ -84,9 +84,9 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
return new Timestamp(row.getDate(colIndex).getTime());
case TSDBConstants.TSDB_DATA_TYPE_BINARY:
return row.getString(colIndex).getBytes();
return row.getString(colIndex) == null ? null : row.getString(colIndex).getBytes();
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
return row.getString(colIndex);
return row.getString(colIndex) == null ? null : row.getString(colIndex);
default:
return row.get(colIndex);
}

View File

@ -0,0 +1,17 @@
package com.taosdata.jdbc.utils;
public class OSUtils {
private static final String OS = System.getProperty("os.name").toLowerCase();
public static boolean isWindows() {
return OS.indexOf("win") >= 0;
}
public static boolean isMac() {
return OS.indexOf("mac") >= 0;
}
public static boolean isLinux() {
return OS.indexOf("nux") >= 0;
}
}

View File

@ -17,7 +17,6 @@ public class DriverAutoloadTest {
@Test
public void testRestful() throws SQLException {
// Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(url, properties);
Assert.assertNotNull(conn);

View File

@ -0,0 +1,64 @@
package com.taosdata.jdbc.cases;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.sql.*;
public class NullValueInResultSetForJdbcRestfulTest {
private static final String host = "127.0.0.1";
Connection conn;
@Test
public void test() {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select * from weather");
ResultSetMetaData meta = rs.getMetaData();
while (rs.next()) {
for (int i = 1; i <= meta.getColumnCount(); i++) {
Object value = rs.getObject(i);
System.out.print(meta.getColumnLabel(i) + ": " + value + "\t");
}
System.out.println();
}
} catch (SQLException e) {
e.printStackTrace();
}
}
@Before
public void before() throws SQLException {
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata";
conn = DriverManager.getConnection(url);
try (Statement stmt = conn.createStatement()) {
stmt.execute("drop database if exists test_null");
stmt.execute("create database if not exists test_null");
stmt.execute("use test_null");
stmt.execute("create table weather(ts timestamp, f1 int, f2 bigint, f3 float, f4 double, f5 smallint, f6 tinyint, f7 bool, f8 binary(64), f9 nchar(64))");
stmt.executeUpdate("insert into weather(ts, f1) values(now+1s, 1)");
stmt.executeUpdate("insert into weather(ts, f2) values(now+2s, 2)");
stmt.executeUpdate("insert into weather(ts, f3) values(now+3s, 3.0)");
stmt.executeUpdate("insert into weather(ts, f4) values(now+4s, 4.0)");
stmt.executeUpdate("insert into weather(ts, f5) values(now+5s, 5)");
stmt.executeUpdate("insert into weather(ts, f6) values(now+6s, 6)");
stmt.executeUpdate("insert into weather(ts, f7) values(now+7s, true)");
stmt.executeUpdate("insert into weather(ts, f8) values(now+8s, 'hello')");
stmt.executeUpdate("insert into weather(ts, f9) values(now+9s, '涛思数据')");
} catch (SQLException e) {
e.printStackTrace();
}
}
@After
public void after() {
try {
if (conn != null)
conn.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}

View File

@ -0,0 +1,30 @@
package com.taosdata.jdbc.utils;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class OSUtilsTest {
private String OS;
@Test
public void inWindows() {
Assert.assertEquals(OS.indexOf("win") >= 0, OSUtils.isWindows());
}
@Test
public void isMac() {
Assert.assertEquals(OS.indexOf("mac") >= 0, OSUtils.isMac());
}
@Test
public void isLinux() {
Assert.assertEquals(OS.indexOf("nux") >= 0, OSUtils.isLinux());
}
@Before
public void before() {
OS = System.getProperty("os.name").toLowerCase();
}
}

View File

@ -73,6 +73,7 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row);
static void cqCreateStream(SCqContext *pContext, SCqObj *pObj);
int32_t cqObjRef = -1;
int32_t cqVnodeNum = 0;
void cqRmFromList(SCqObj *pObj) {
//LOCK in caller
@ -166,6 +167,8 @@ void *cqOpen(void *ahandle, const SCqCfg *pCfg) {
return NULL;
}
atomic_add_fetch_32(&cqVnodeNum, 1);
cqCreateRef();
pContext->tmrCtrl = taosTmrInit(0, 0, 0, "CQ");
@ -240,6 +243,13 @@ void cqClose(void *handle) {
if (hasCq == 0) {
freeSCqContext(pContext);
}
int32_t remainn = atomic_sub_fetch_32(&cqVnodeNum, 1);
if (remainn <= 0) {
int32_t ref = cqObjRef;
cqObjRef = -1;
taosCloseRef(ref);
}
}
void cqStart(void *handle) {
@ -294,14 +304,29 @@ void cqStop(void *handle) {
pthread_mutex_unlock(&pContext->mutex);
}
void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, char *sqlStr, STSchema *pSchema) {
void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, char *sqlStr, STSchema *pSchema, int start) {
if (tsEnableStream == 0) {
return NULL;
}
SCqContext *pContext = handle;
int64_t rid = 0;
SCqObj *pObj = calloc(sizeof(SCqObj), 1);
pthread_mutex_lock(&pContext->mutex);
SCqObj *pObj = pContext->pHead;
while (pObj) {
if (pObj->uid == uid) {
rid = pObj->rid;
pthread_mutex_unlock(&pContext->mutex);
return (void *)rid;
}
pObj = pObj->next;
}
pthread_mutex_unlock(&pContext->mutex);
pObj = calloc(sizeof(SCqObj), 1);
if (pObj == NULL) return NULL;
pObj->uid = uid;
@ -326,7 +351,11 @@ void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, ch
pObj->rid = taosAddRef(cqObjRef, pObj);
if(start && pContext->master) {
cqCreateStream(pContext, pObj);
} else {
pObj->pContext = pContext;
}
rid = pObj->rid;
@ -375,9 +404,12 @@ static void doCreateStream(void *param, TAOS_RES *result, int32_t code) {
SCqContext* pContext = pObj->pContext;
SSqlObj* pSql = (SSqlObj*)result;
if (code == TSDB_CODE_SUCCESS) {
if (atomic_val_compare_exchange_ptr(&(pContext->dbConn), NULL, pSql->pTscObj) != NULL) {
taos_close(pSql->pTscObj);
}
}
pthread_mutex_lock(&pContext->mutex);
cqCreateStream(pContext, pObj);
pthread_mutex_unlock(&pContext->mutex);
@ -413,6 +445,7 @@ static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) {
pObj->tmrId = taosTmrStart(cqProcessCreateTimer, 1000, (void *)pObj->rid, pContext->tmrCtrl);
return;
}
pObj->tmrId = 0;
if (pObj->pStream == NULL) {

View File

@ -70,7 +70,7 @@ int main(int argc, char *argv[]) {
tdDestroyTSchemaBuilder(&schemaBuilder);
for (int sid =1; sid<10; ++sid) {
cqCreate(pCq, sid, sid, NULL, "select avg(speed) from demo.t1 sliding(1s) interval(5s)", pSchema);
cqCreate(pCq, sid, sid, NULL, "select avg(speed) from demo.t1 sliding(1s) interval(5s)", pSchema, 1);
}
tdFreeSchema(pSchema);

View File

@ -17,6 +17,7 @@
#include "os.h"
#include "cJSON.h"
#include "dnodeCfg.h"
#include "tglobal.h"
static SDnodeCfg tsCfg = {0};
static pthread_mutex_t tsCfgMutex;
@ -70,6 +71,7 @@ static void dnodeResetCfg(SDnodeCfg *cfg) {
pthread_mutex_lock(&tsCfgMutex);
tsCfg.dnodeId = cfg->dnodeId;
tsDnodeId = cfg->dnodeId;
tstrncpy(tsCfg.clusterId, cfg->clusterId, TSDB_CLUSTER_ID_LEN);
dnodePrintCfg(cfg);
dnodeWriteCfg();

View File

@ -237,6 +237,20 @@ static int32_t dnodeInitStorage() {
return -1;
}
TDIR *tdir = tfsOpendir("vnode_bak/.staging");
bool stagingNotEmpty = tfsReaddir(tdir) != NULL;
tfsClosedir(tdir);
if (stagingNotEmpty) {
dError("vnode_bak/.staging dir not empty, fix it first.");
return -1;
}
if (tfsMkdir("vnode_bak/.staging") < 0) {
dError("failed to create vnode_bak/.staging dir since %s", tstrerror(terrno));
return -1;
}
dnodeCheckDataDirOpenned(tsDnodeDir);
dInfo("dnode storage is initialized at %s", tsDnodeDir);

View File

@ -205,7 +205,7 @@ static void *dnodeProcessVWriteQueue(void *wparam) {
pWrite->rpcMsg.ahandle, taosMsg[pWrite->pHead.msgType], qtypeStr[qtype], pWrite->pHead.version);
pWrite->code = vnodeProcessWrite(pVnode, &pWrite->pHead, qtype, pWrite);
if (pWrite->code <= 0) pWrite->processedCount = 1;
if (pWrite->code <= 0) atomic_add_fetch_32(&pWrite->processedCount, 1);
if (pWrite->code > 0) pWrite->code = 0;
if (pWrite->code == 0 && pWrite->pHead.msgType != TSDB_MSG_TYPE_SUBMIT) forceFsync = true;
@ -222,7 +222,7 @@ static void *dnodeProcessVWriteQueue(void *wparam) {
dnodeSendRpcVWriteRsp(pVnode, pWrite, pWrite->code);
} else {
if (qtype == TAOS_QTYPE_FWD) {
vnodeConfirmForward(pVnode, pWrite->pHead.version, 0, pWrite->pHead.msgType != TSDB_MSG_TYPE_SUBMIT);
vnodeConfirmForward(pVnode, pWrite->pHead.version, pWrite->code, pWrite->pHead.msgType != TSDB_MSG_TYPE_SUBMIT);
}
if (pWrite->rspRet.rsp) {
rpcFreeCont(pWrite->rspRet.rsp);

View File

@ -88,10 +88,11 @@ void* qOpenQueryMgmt(int32_t vgId);
void qQueryMgmtNotifyClosed(void* pExecutor);
void qQueryMgmtReOpen(void *pExecutor);
void qCleanupQueryMgmt(void* pExecutor);
void** qRegisterQInfo(void* pMgmt, uint64_t qId, uint64_t qInfo);
void** qRegisterQInfo(void* pMgmt, uint64_t qId, void *qInfo);
void** qAcquireQInfo(void* pMgmt, uint64_t key);
void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool freeHandle);
bool checkQIdEqual(void *qHandle, uint64_t qId);
int64_t genQueryId(void);
#ifdef __cplusplus
}

View File

@ -259,7 +259,7 @@ do { \
#define TSDB_MIN_TABLES 4
#define TSDB_MAX_TABLES 10000000
#define TSDB_DEFAULT_TABLES 1000000
#define TSDB_TABLES_STEP 100
#define TSDB_TABLES_STEP 1000
#define TSDB_MIN_DAYS_PER_FILE 1
#define TSDB_MAX_DAYS_PER_FILE 3650

View File

@ -163,6 +163,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_MND_INVALID_TABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x0362) //"Table does not exist")
#define TSDB_CODE_MND_INVALID_TABLE_TYPE TAOS_DEF_ERROR_CODE(0, 0x0363) //"Invalid table type in tsdb")
#define TSDB_CODE_MND_TOO_MANY_TAGS TAOS_DEF_ERROR_CODE(0, 0x0364) //"Too many tags")
#define TSDB_CODE_MND_TOO_MANY_COLUMNS TAOS_DEF_ERROR_CODE(0, 0x0365) //"Too many columns")
#define TSDB_CODE_MND_TOO_MANY_TIMESERIES TAOS_DEF_ERROR_CODE(0, 0x0366) //"Too many time series")
#define TSDB_CODE_MND_NOT_SUPER_TABLE TAOS_DEF_ERROR_CODE(0, 0x0367) //"Not super table") // operation only available for super table
#define TSDB_CODE_MND_COL_NAME_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x0368) //"Tag name too long")

View File

@ -399,36 +399,6 @@ typedef struct SColIndex {
char name[TSDB_COL_NAME_LEN]; // TODO remove it
} SColIndex;
/* sql function msg, to describe the message to vnode about sql function
* operations in select clause */
typedef struct SSqlFuncMsg {
int16_t functionId;
int16_t numOfParams;
int16_t resColId; // result column id, id of the current output column
int16_t colType;
int16_t colBytes;
SColIndex colInfo;
struct ArgElem {
int16_t argType;
int16_t argBytes;
union {
double d;
int64_t i64;
char * pz;
} argValue;
} arg[3];
} SSqlFuncMsg;
typedef struct SExprInfo {
SSqlFuncMsg base;
struct tExprNode* pExpr;
int16_t bytes;
int16_t type;
int32_t interBytes;
int64_t uid;
} SExprInfo;
typedef struct SColumnFilterInfo {
int16_t lowerRelOptr;
@ -451,6 +421,42 @@ typedef struct SColumnFilterInfo {
};
} SColumnFilterInfo;
/* sql function msg, to describe the message to vnode about sql function
* operations in select clause */
typedef struct SSqlFuncMsg {
int16_t functionId;
int16_t numOfParams;
int16_t resColId; // result column id, id of the current output column
int16_t colType;
int16_t colBytes;
SColIndex colInfo;
struct ArgElem {
int16_t argType;
int16_t argBytes;
union {
double d;
int64_t i64;
char * pz;
} argValue;
} arg[3];
int32_t filterNum;
SColumnFilterInfo filterInfo[];
} SSqlFuncMsg;
typedef struct SExprInfo {
SColumnFilterInfo * pFilter;
struct tExprNode* pExpr;
int16_t bytes;
int16_t type;
int32_t interBytes;
int64_t uid;
SSqlFuncMsg base;
} SExprInfo;
/*
* for client side struct, we only need the column id, type, bytes are not necessary
* But for data in vnode side, we need all the following information.

View File

@ -42,7 +42,7 @@ void cqStart(void *handle);
void cqStop(void *handle);
// cqCreate is called by TSDB to start an instance of CQ
void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, char *sqlStr, STSchema *pSchema);
void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, char *sqlStr, STSchema *pSchema, int start);
// cqDrop is called by TSDB to stop an instance of CQ, handle is the return value of cqCreate
void cqDrop(void *handle);

View File

@ -51,7 +51,7 @@ typedef struct {
void *cqH;
int (*notifyStatus)(void *, int status, int eno);
int (*eventCallBack)(void *);
void *(*cqCreateFunc)(void *handle, uint64_t uid, int32_t sid, const char *dstTable, char *sqlStr, STSchema *pSchema);
void *(*cqCreateFunc)(void *handle, uint64_t uid, int32_t sid, const char *dstTable, char *sqlStr, STSchema *pSchema, int start);
void (*cqDropFunc)(void *handle);
} STsdbAppH;

View File

@ -79,9 +79,6 @@ typedef void (*FStopSyncFile)(int32_t vgId, uint64_t fversion);
// get file version
typedef int32_t (*FGetVersion)(int32_t vgId, uint64_t *fver, uint64_t *vver);
// reset version
typedef int32_t (*FResetVersion)(int32_t vgId, uint64_t fver);
typedef int32_t (*FSendFile)(void *tsdb, SOCKET socketFd);
typedef int32_t (*FRecvFile)(void *tsdb, SOCKET socketFd);
@ -99,7 +96,6 @@ typedef struct {
FStartSyncFile startSyncFileFp;
FStopSyncFile stopSyncFileFp;
FGetVersion getVersionFp;
FResetVersion resetVersionFp;
FSendFile sendFileFp;
FRecvFile recvFileFp;
} SSyncInfo;

View File

@ -205,6 +205,11 @@
#define TK_VALUES 186
#define TK_SPACE 300
#define TK_COMMENT 301
#define TK_ILLEGAL 302

View File

@ -27,7 +27,7 @@
#define MAX_IP_SIZE 20
#define MAX_PASSWORD_SIZE 20
#define MAX_HISTORY_SIZE 1000
#define MAX_COMMAND_SIZE 65536
#define MAX_COMMAND_SIZE 1048586
#define HISTORY_FILE ".taos_history"
#define DEFAULT_RES_SHOW_NUM 100

View File

@ -238,7 +238,7 @@ void resetCommand(Command *cmd, const char s[]) {
clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size);
memset(cmd->buffer, 0, MAX_COMMAND_SIZE);
memset(cmd->command, 0, MAX_COMMAND_SIZE);
strcpy(cmd->command, s);
strncpy(cmd->command, s, MAX_COMMAND_SIZE);
int size = 0;
int width = 0;
getMbSizeInfo(s, &size, &width);

View File

@ -199,15 +199,19 @@ void updateBuffer(Command *cmd) {
}
int isReadyGo(Command *cmd) {
char total[MAX_COMMAND_SIZE];
char *total = malloc(MAX_COMMAND_SIZE);
memset(total, 0, MAX_COMMAND_SIZE);
sprintf(total, "%s%s", cmd->buffer, cmd->command);
char *reg_str =
"(^.*;\\s*$)|(^\\s*$)|(^\\s*exit\\s*$)|(^\\s*q\\s*$)|(^\\s*quit\\s*$)|(^"
"\\s*clear\\s*$)";
if (regex_match(total, reg_str, REG_EXTENDED | REG_ICASE)) return 1;
if (regex_match(total, reg_str, REG_EXTENDED | REG_ICASE)) {
free(total);
return 1;
}
free(total);
return 0;
}

View File

@ -9,19 +9,18 @@ IF (GIT_FOUND)
EXECUTE_PROCESS(
COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -n 1 ${CMAKE_CURRENT_LIST_DIR}/taosdemo.c
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE TAOSDEMO_COMMIT)
EXECUTE_PROCESS(
COMMAND bash "-c" "echo '${TAOSDEMO_COMMIT}' | awk '{print $1}' | cut -c -9"
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE TAOSDEMO_COMMIT_SHA1)
STRING(SUBSTRING "${TAOSDEMO_COMMIT_SHA1}" 0 7 TAOSDEMO_COMMIT_SHA1)
EXECUTE_PROCESS(
COMMAND ${GIT_EXECUTABLE} status -z -s ${CMAKE_CURRENT_LIST_DIR}/taosdemo.c
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE TAOSDEMO_STATUS)
IF (TD_LINUX)
EXECUTE_PROCESS(
COMMAND bash "-c" "echo '${TAOSDEMO_STATUS}' | awk '{print $1}'"
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE TAOSDEMO_STATUS)
ENDIF (TD_LINUX)
MESSAGE("taosdemo.c status: " ${TAOSDEMO_STATUS})
ELSE()
MESSAGE("Git not found")
@ -29,9 +28,9 @@ ELSE()
SET(TAOSDEMO_STATUS "unknown")
ENDIF (GIT_FOUND)
STRING(STRIP ${TAOSDEMO_COMMIT_SHA1} TAOSDEMO_COMMIT_SHA1)
STRING(STRIP "${TAOSDEMO_COMMIT_SHA1}" TAOSDEMO_COMMIT_SHA1)
MESSAGE("taosdemo's latest commit in short is:" ${TAOSDEMO_COMMIT_SHA1})
STRING(STRIP ${TAOSDEMO_STATUS} TAOSDEMO_STATUS)
STRING(STRIP "${TAOSDEMO_STATUS}" TAOSDEMO_STATUS)
IF (TAOSDEMO_STATUS MATCHES "M")
SET(TAOSDEMO_STATUS "modified")

View File

@ -40,7 +40,6 @@
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 1000,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 20,
"max_sql_len": 1024000,
"disorder_ratio": 0,

View File

@ -40,7 +40,6 @@
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100000,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,

View File

@ -1,5 +1,5 @@
{
"filetype":"query",
"filetype": "query",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
@ -7,13 +7,30 @@
"password": "taosdata",
"confirm_parameter_prompt": "yes",
"databases": "dbx",
"specified_table_query":
{"query_interval":1, "concurrent":4,
"sqls": [{"sql": "select last_row(*) from stb where color='red'", "result": "./query_res0.txt"},
{"sql": "select count(*) from stb_01", "result": "./query_res1.txt"}]
"query_times": 1,
"specified_table_query": {
"query_interval": 1,
"concurrent": 4,
"sqls": [
{
"sql": "select last_row(*) from stb where color='red'",
"result": "./query_res0.txt"
},
"super_table_query":
{"stblname": "stb", "query_interval":1, "threads":4,
"sqls": [{"sql": "select last_row(*) from xxxx", "result": "./query_res2.txt"}]
{
"sql": "select count(*) from stb_01",
"result": "./query_res1.txt"
}
]
},
"super_table_query": {
"stblname": "stb",
"query_interval": 1,
"threads": 4,
"sqls": [
{
"sql": "select last_row(*) from xxxx",
"result": "./query_res2.txt"
}
]
}
}

File diff suppressed because it is too large Load Diff

View File

@ -39,6 +39,22 @@ typedef struct {
int8_t type;
} SOColInfo;
#define debugPrint(fmt, ...) \
do { if (g_args.debug_print || g_args.verbose_print) \
fprintf(stderr, "DEBG: "fmt, __VA_ARGS__); } while(0)
#define verbosePrint(fmt, ...) \
do { if (g_args.verbose_print) \
fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0)
#define performancePrint(fmt, ...) \
do { if (g_args.performance_print) \
fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0)
#define errorPrint(fmt, ...) \
do { fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); } while(0)
// -------------------------- SHOW DATABASE INTERFACE-----------------------
enum _show_db_index {
TSDB_SHOW_DB_NAME_INDEX,
@ -199,16 +215,18 @@ static struct argp_option options[] = {
{"schemaonly", 's', 0, 0, "Only dump schema.", 3},
{"with-property", 'M', 0, 0, "Dump schema with properties.", 3},
{"start-time", 'S', "START_TIME", 0, "Start time to dump.", 3},
{"end-time", 'E', "END_TIME", 0, "End time to dump.", 3},
{"end-time", 'E', "END_TIME", 0, "End time to dump. Epoch or ISO8601/RFC3339 format is acceptable. For example: 2017-10-01T18:00:00+0800", 3},
{"data-batch", 'N', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3},
{"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3},
{"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3},
{"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3},
{"allow-sys", 'a', 0, 0, "Allow to dump sys database", 3},
{"debug", 'g', 0, 0, "Print debug info.", 1},
{"verbose", 'v', 0, 0, "Print verbose debug info.", 1},
{0}};
/* Used by main to communicate with parse_opt. */
struct arguments {
typedef struct arguments {
// connection option
char *host;
char *user;
@ -240,7 +258,10 @@ struct arguments {
char **arg_list;
int arg_list_len;
bool isDumpIn;
};
bool debug_print;
bool verbose_print;
bool performance_print;
} SArguments;
/* Parse a single option. */
static error_t parse_opt(int key, char *arg, struct argp_state *state) {
@ -286,6 +307,9 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
tstrncpy(arguments->outpath, full_path.we_wordv[0], TSDB_FILENAME_LEN);
wordfree(&full_path);
break;
case 'g':
arguments->debug_print = true;
break;
case 'i':
arguments->isDumpIn = true;
if (wordexp(arg, &full_path, 0) != 0) {
@ -387,7 +411,7 @@ int taosCheckParam(struct arguments *arguments);
void taosFreeDbInfos();
static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName);
struct arguments tsArguments = {
struct arguments g_args = {
// connection option
NULL,
"root",
@ -421,7 +445,10 @@ struct arguments tsArguments = {
0,
NULL,
0,
false
false,
false, // debug_print
false, // verbose_print
false // performance_print
};
static int queryDbImpl(TAOS *taos, char *command) {
@ -453,13 +480,48 @@ static int queryDbImpl(TAOS *taos, char *command) {
return 0;
}
static void parse_args(int argc, char *argv[], SArguments *arguments) {
for (int i = 1; i < argc; i++) {
if (strcmp(argv[i], "-E") == 0) {
char *tmp = strdup(argv[++i]);
if (tmp) {
int64_t tmpEpoch;
if (strchr(tmp, ':') && strchr(tmp, '-')) {
if (TSDB_CODE_SUCCESS != taosParseTime(
tmp, &tmpEpoch, strlen(tmp), TSDB_TIME_PRECISION_MILLI, 0)) {
fprintf(stderr, "Input end time error!\n");
free(tmp);
return;
}
} else {
tmpEpoch = atoll(tmp);
}
sprintf(argv[i], "%"PRId64"", tmpEpoch);
debugPrint("%s() LN%d, tmp is: %s, argv[%d]: %s\n",
__func__, __LINE__, tmp, i, argv[i]);
free(tmp);
} else {
errorPrint("%s() LN%d, strdup() cannot allocate memory\n", __func__, __LINE__);
exit(-1);
}
} else if (strcmp(argv[i], "-g") == 0) {
arguments->debug_print = true;
}
}
}
int main(int argc, char *argv[]) {
/* Parse our arguments; every option seen by parse_opt will be
reflected in arguments. */
argp_parse(&argp, argc, argv, 0, 0, &tsArguments);
parse_args(argc, argv, &g_args);
if (tsArguments.abort) {
argp_parse(&argp, argc, argv, 0, 0, &g_args);
if (g_args.abort) {
#ifndef _ALPINE
error(10, 0, "ABORTED");
#else
@ -469,81 +531,82 @@ int main(int argc, char *argv[]) {
printf("====== arguments config ======\n");
{
printf("host: %s\n", tsArguments.host);
printf("user: %s\n", tsArguments.user);
printf("password: %s\n", tsArguments.password);
printf("port: %u\n", tsArguments.port);
printf("cversion: %s\n", tsArguments.cversion);
printf("mysqlFlag: %d\n", tsArguments.mysqlFlag);
printf("outpath: %s\n", tsArguments.outpath);
printf("inpath: %s\n", tsArguments.inpath);
printf("resultFile: %s\n", tsArguments.resultFile);
printf("encode: %s\n", tsArguments.encode);
printf("all_databases: %d\n", tsArguments.all_databases);
printf("databases: %d\n", tsArguments.databases);
printf("schemaonly: %d\n", tsArguments.schemaonly);
printf("with_property: %d\n", tsArguments.with_property);
printf("start_time: %" PRId64 "\n", tsArguments.start_time);
printf("end_time: %" PRId64 "\n", tsArguments.end_time);
printf("data_batch: %d\n", tsArguments.data_batch);
printf("max_sql_len: %d\n", tsArguments.max_sql_len);
printf("table_batch: %d\n", tsArguments.table_batch);
printf("thread_num: %d\n", tsArguments.thread_num);
printf("allow_sys: %d\n", tsArguments.allow_sys);
printf("abort: %d\n", tsArguments.abort);
printf("isDumpIn: %d\n", tsArguments.isDumpIn);
printf("arg_list_len: %d\n", tsArguments.arg_list_len);
printf("host: %s\n", g_args.host);
printf("user: %s\n", g_args.user);
printf("password: %s\n", g_args.password);
printf("port: %u\n", g_args.port);
printf("cversion: %s\n", g_args.cversion);
printf("mysqlFlag: %d\n", g_args.mysqlFlag);
printf("outpath: %s\n", g_args.outpath);
printf("inpath: %s\n", g_args.inpath);
printf("resultFile: %s\n", g_args.resultFile);
printf("encode: %s\n", g_args.encode);
printf("all_databases: %d\n", g_args.all_databases);
printf("databases: %d\n", g_args.databases);
printf("schemaonly: %d\n", g_args.schemaonly);
printf("with_property: %d\n", g_args.with_property);
printf("start_time: %" PRId64 "\n", g_args.start_time);
printf("end_time: %" PRId64 "\n", g_args.end_time);
printf("data_batch: %d\n", g_args.data_batch);
printf("max_sql_len: %d\n", g_args.max_sql_len);
printf("table_batch: %d\n", g_args.table_batch);
printf("thread_num: %d\n", g_args.thread_num);
printf("allow_sys: %d\n", g_args.allow_sys);
printf("abort: %d\n", g_args.abort);
printf("isDumpIn: %d\n", g_args.isDumpIn);
printf("arg_list_len: %d\n", g_args.arg_list_len);
printf("debug_print: %d\n", g_args.debug_print);
for (int32_t i = 0; i < tsArguments.arg_list_len; i++) {
printf("arg_list[%d]: %s\n", i, tsArguments.arg_list[i]);
for (int32_t i = 0; i < g_args.arg_list_len; i++) {
printf("arg_list[%d]: %s\n", i, g_args.arg_list[i]);
}
}
printf("==============================\n");
if (tsArguments.cversion[0] != 0){
tstrncpy(version, tsArguments.cversion, 11);
if (g_args.cversion[0] != 0){
tstrncpy(version, g_args.cversion, 11);
}
if (taosCheckParam(&tsArguments) < 0) {
if (taosCheckParam(&g_args) < 0) {
exit(EXIT_FAILURE);
}
g_fpOfResult = fopen(tsArguments.resultFile, "a");
g_fpOfResult = fopen(g_args.resultFile, "a");
if (NULL == g_fpOfResult) {
fprintf(stderr, "Failed to open %s for save result\n", tsArguments.resultFile);
fprintf(stderr, "Failed to open %s for save result\n", g_args.resultFile);
return 1;
};
fprintf(g_fpOfResult, "#############################################################################\n");
fprintf(g_fpOfResult, "============================== arguments config =============================\n");
{
fprintf(g_fpOfResult, "host: %s\n", tsArguments.host);
fprintf(g_fpOfResult, "user: %s\n", tsArguments.user);
fprintf(g_fpOfResult, "password: %s\n", tsArguments.password);
fprintf(g_fpOfResult, "port: %u\n", tsArguments.port);
fprintf(g_fpOfResult, "cversion: %s\n", tsArguments.cversion);
fprintf(g_fpOfResult, "mysqlFlag: %d\n", tsArguments.mysqlFlag);
fprintf(g_fpOfResult, "outpath: %s\n", tsArguments.outpath);
fprintf(g_fpOfResult, "inpath: %s\n", tsArguments.inpath);
fprintf(g_fpOfResult, "resultFile: %s\n", tsArguments.resultFile);
fprintf(g_fpOfResult, "encode: %s\n", tsArguments.encode);
fprintf(g_fpOfResult, "all_databases: %d\n", tsArguments.all_databases);
fprintf(g_fpOfResult, "databases: %d\n", tsArguments.databases);
fprintf(g_fpOfResult, "schemaonly: %d\n", tsArguments.schemaonly);
fprintf(g_fpOfResult, "with_property: %d\n", tsArguments.with_property);
fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", tsArguments.start_time);
fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", tsArguments.end_time);
fprintf(g_fpOfResult, "data_batch: %d\n", tsArguments.data_batch);
fprintf(g_fpOfResult, "max_sql_len: %d\n", tsArguments.max_sql_len);
fprintf(g_fpOfResult, "table_batch: %d\n", tsArguments.table_batch);
fprintf(g_fpOfResult, "thread_num: %d\n", tsArguments.thread_num);
fprintf(g_fpOfResult, "allow_sys: %d\n", tsArguments.allow_sys);
fprintf(g_fpOfResult, "abort: %d\n", tsArguments.abort);
fprintf(g_fpOfResult, "isDumpIn: %d\n", tsArguments.isDumpIn);
fprintf(g_fpOfResult, "arg_list_len: %d\n", tsArguments.arg_list_len);
fprintf(g_fpOfResult, "host: %s\n", g_args.host);
fprintf(g_fpOfResult, "user: %s\n", g_args.user);
fprintf(g_fpOfResult, "password: %s\n", g_args.password);
fprintf(g_fpOfResult, "port: %u\n", g_args.port);
fprintf(g_fpOfResult, "cversion: %s\n", g_args.cversion);
fprintf(g_fpOfResult, "mysqlFlag: %d\n", g_args.mysqlFlag);
fprintf(g_fpOfResult, "outpath: %s\n", g_args.outpath);
fprintf(g_fpOfResult, "inpath: %s\n", g_args.inpath);
fprintf(g_fpOfResult, "resultFile: %s\n", g_args.resultFile);
fprintf(g_fpOfResult, "encode: %s\n", g_args.encode);
fprintf(g_fpOfResult, "all_databases: %d\n", g_args.all_databases);
fprintf(g_fpOfResult, "databases: %d\n", g_args.databases);
fprintf(g_fpOfResult, "schemaonly: %d\n", g_args.schemaonly);
fprintf(g_fpOfResult, "with_property: %d\n", g_args.with_property);
fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", g_args.start_time);
fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", g_args.end_time);
fprintf(g_fpOfResult, "data_batch: %d\n", g_args.data_batch);
fprintf(g_fpOfResult, "max_sql_len: %d\n", g_args.max_sql_len);
fprintf(g_fpOfResult, "table_batch: %d\n", g_args.table_batch);
fprintf(g_fpOfResult, "thread_num: %d\n", g_args.thread_num);
fprintf(g_fpOfResult, "allow_sys: %d\n", g_args.allow_sys);
fprintf(g_fpOfResult, "abort: %d\n", g_args.abort);
fprintf(g_fpOfResult, "isDumpIn: %d\n", g_args.isDumpIn);
fprintf(g_fpOfResult, "arg_list_len: %d\n", g_args.arg_list_len);
for (int32_t i = 0; i < tsArguments.arg_list_len; i++) {
fprintf(g_fpOfResult, "arg_list[%d]: %s\n", i, tsArguments.arg_list[i]);
for (int32_t i = 0; i < g_args.arg_list_len; i++) {
fprintf(g_fpOfResult, "arg_list[%d]: %s\n", i, g_args.arg_list[i]);
}
}
@ -552,11 +615,11 @@ int main(int argc, char *argv[]) {
time_t tTime = time(NULL);
struct tm tm = *localtime(&tTime);
if (tsArguments.isDumpIn) {
if (g_args.isDumpIn) {
fprintf(g_fpOfResult, "============================== DUMP IN ============================== \n");
fprintf(g_fpOfResult, "# DumpIn start time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1,
tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
if (taosDumpIn(&tsArguments) < 0) {
if (taosDumpIn(&g_args) < 0) {
fprintf(g_fpOfResult, "\n");
fclose(g_fpOfResult);
return -1;
@ -565,7 +628,7 @@ int main(int argc, char *argv[]) {
fprintf(g_fpOfResult, "============================== DUMP OUT ============================== \n");
fprintf(g_fpOfResult, "# DumpOut start time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1,
tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
if (taosDumpOut(&tsArguments) < 0) {
if (taosDumpOut(&g_args) < 0) {
fprintf(g_fpOfResult, "\n");
fclose(g_fpOfResult);
return -1;
@ -1236,8 +1299,8 @@ void* taosDumpOutWorkThreadFp(void *arg)
FILE *fp = NULL;
memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
if (tsArguments.outpath[0] != 0) {
sprintf(tmpBuf, "%s/%s.tables.%d.sql", tsArguments.outpath, pThread->dbName, pThread->threadIndex);
if (g_args.outpath[0] != 0) {
sprintf(tmpBuf, "%s/%s.tables.%d.sql", g_args.outpath, pThread->dbName, pThread->threadIndex);
} else {
sprintf(tmpBuf, "%s.tables.%d.sql", pThread->dbName, pThread->threadIndex);
}
@ -1270,7 +1333,7 @@ void* taosDumpOutWorkThreadFp(void *arg)
ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord));
if (readLen <= 0) break;
int ret = taosDumpTable(tableRecord.name, tableRecord.metric, &tsArguments, fp, pThread->taosCon, pThread->dbName);
int ret = taosDumpTable(tableRecord.name, tableRecord.metric, &g_args, fp, pThread->taosCon, pThread->dbName);
if (ret >= 0) {
// TODO: sum table count and table rows by self
pThread->tablesOfDumpOut++;
@ -1282,13 +1345,13 @@ void* taosDumpOutWorkThreadFp(void *arg)
}
tablesInOneFile++;
if (tablesInOneFile >= tsArguments.table_batch) {
if (tablesInOneFile >= g_args.table_batch) {
fclose(fp);
tablesInOneFile = 0;
memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
if (tsArguments.outpath[0] != 0) {
sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql", tsArguments.outpath, pThread->dbName, pThread->threadIndex, fileNameIndex);
if (g_args.outpath[0] != 0) {
sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql", g_args.outpath, pThread->dbName, pThread->threadIndex, fileNameIndex);
} else {
sprintf(tmpBuf, "%s.tables.%d-%d.sql", pThread->dbName, pThread->threadIndex, fileNameIndex);
}
@ -1491,14 +1554,14 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
taos_free_result(res);
lseek(fd, 0, SEEK_SET);
int maxThreads = tsArguments.thread_num;
int maxThreads = g_args.thread_num;
int tableOfPerFile ;
if (numOfTable <= tsArguments.thread_num) {
if (numOfTable <= g_args.thread_num) {
tableOfPerFile = 1;
maxThreads = numOfTable;
} else {
tableOfPerFile = numOfTable / tsArguments.thread_num;
if (0 != numOfTable % tsArguments.thread_num) {
tableOfPerFile = numOfTable / g_args.thread_num;
if (0 != numOfTable % g_args.thread_num) {
tableOfPerFile += 1;
}
}
@ -2214,7 +2277,7 @@ void* taosDumpInWorkThreadFp(void *arg)
continue;
}
fprintf(stderr, "Success Open input file: %s\n", SQLFileName);
taosDumpInOneFile(pThread->taosCon, fp, tsfCharset, tsArguments.encode, SQLFileName);
taosDumpInOneFile(pThread->taosCon, fp, tsfCharset, g_args.encode, SQLFileName);
}
}

View File

@ -437,14 +437,14 @@ static int32_t mnodeCheckClusterCfgPara(const SClusterCfg *clusterCfg) {
return TAOS_DN_OFF_TIME_ZONE_NOT_MATCH;
}
if (0 != strncasecmp(clusterCfg->locale, tsLocale, strlen(tsLocale))) {
mError("\"locale\"[%s - %s] cfg parameters inconsistent", clusterCfg->locale, tsLocale);
return TAOS_DN_OFF_LOCALE_NOT_MATCH;
}
if (0 != strncasecmp(clusterCfg->charset, tsCharset, strlen(tsCharset))) {
mError("\"charset\"[%s - %s] cfg parameters inconsistent.", clusterCfg->charset, tsCharset);
return TAOS_DN_OFF_CHARSET_NOT_MATCH;
}
// if (0 != strncasecmp(clusterCfg->locale, tsLocale, strlen(tsLocale))) {
// mError("\"locale\"[%s - %s] cfg parameters inconsistent", clusterCfg->locale, tsLocale);
// return TAOS_DN_OFF_LOCALE_NOT_MATCH;
// }
// if (0 != strncasecmp(clusterCfg->charset, tsCharset, strlen(tsCharset))) {
// mError("\"charset\"[%s - %s] cfg parameters inconsistent.", clusterCfg->charset, tsCharset);
// return TAOS_DN_OFF_CHARSET_NOT_MATCH;
// }
if (clusterCfg->enableBalance != tsEnableBalance) {
mError("\"balance\"[%d - %d] cfg parameters inconsistent", clusterCfg->enableBalance, tsEnableBalance);
@ -628,6 +628,11 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
bnNotify();
}
if (!tsEnableBalance) {
int32_t numOfMnodes = mnodeGetMnodesNum();
if (numOfMnodes < tsNumOfMnodes) bnNotify();
}
if (openVnodes != pDnode->openVnodes) {
mnodeCheckUnCreatedVgroup(pDnode, pStatus->load, openVnodes);
}

View File

@ -381,6 +381,8 @@ static bool mnodeAllOnline() {
void *pIter = NULL;
bool allOnline = true;
sdbUpdateMnodeRoles();
while (1) {
SMnodeObj *pMnode = NULL;
pIter = mnodeGetNextMnode(pIter, &pMnode);

View File

@ -315,6 +315,10 @@ void sdbUpdateAsync() {
taosTmrReset(sdbUpdateSyncTmrFp, 200, NULL, tsMnodeTmr, &tsSdbTmr);
}
static int node_cmp(const void *l, const void *r) {
return ((SNodeInfo *)l)->nodeId - ((SNodeInfo *)r)->nodeId;
}
int32_t sdbUpdateSync(void *pMnodes) {
SMInfos *pMinfos = pMnodes;
if (!mnodeIsRunning()) {
@ -382,6 +386,8 @@ int32_t sdbUpdateSync(void *pMnodes) {
return TSDB_CODE_SUCCESS;
}
qsort(syncCfg.nodeInfo, syncCfg.replica, sizeof(syncCfg.nodeInfo[0]), node_cmp);
sdbInfo("vgId:1, work as mnode, replica:%d", syncCfg.replica);
for (int32_t i = 0; i < syncCfg.replica; ++i) {
sdbInfo("vgId:1, mnode:%d, %s:%d", syncCfg.nodeInfo[i].nodeId, syncCfg.nodeInfo[i].nodeFqdn,
@ -1019,7 +1025,7 @@ static int32_t sdbWriteToQueue(SSdbRow *pRow, int32_t qtype) {
int32_t queued = atomic_add_fetch_32(&tsSdbMgmt.queuedMsg, 1);
if (queued > MAX_QUEUED_MSG_NUM) {
sdbDebug("vgId:1, too many msg:%d in sdb queue, flow control", queued);
sdbInfo("vgId:1, too many msg:%d in sdb queue, flow control", queued);
taosMsleep(1);
}

View File

@ -1037,6 +1037,19 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
SCreateTableMsg* pCreate = (SCreateTableMsg*)((char*)pCreate1 + sizeof(SCMCreateTableMsg));
int16_t numOfTags = htons(pCreate->numOfTags);
if (numOfTags > TSDB_MAX_TAGS) {
mError("msg:%p, app:%p table:%s, failed to create, too many tags", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName);
return TSDB_CODE_MND_TOO_MANY_TAGS;
}
int16_t numOfColumns = htons(pCreate->numOfColumns);
int32_t numOfCols = numOfColumns + numOfTags;
if (numOfCols > TSDB_MAX_COLUMNS) {
mError("msg:%p, app:%p table:%s, failed to create, too many columns", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName);
return TSDB_CODE_MND_TOO_MANY_COLUMNS;
}
SSTableObj * pStable = calloc(1, sizeof(SSTableObj));
if (pStable == NULL) {
mError("msg:%p, app:%p table:%s, failed to create, no enough memory", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName);
@ -1050,10 +1063,9 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
pStable->uid = (us << 24) + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
pStable->sversion = 0;
pStable->tversion = 0;
pStable->numOfColumns = htons(pCreate->numOfColumns);
pStable->numOfTags = htons(pCreate->numOfTags);
pStable->numOfColumns = numOfColumns;
pStable->numOfTags = numOfTags;
int32_t numOfCols = pStable->numOfColumns + pStable->numOfTags;
int32_t schemaSize = numOfCols * sizeof(SSchema);
pStable->schema = (SSchema *)calloc(1, schemaSize);
if (pStable->schema == NULL) {
@ -1064,11 +1076,6 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
memcpy(pStable->schema, pCreate->schema, numOfCols * sizeof(SSchema));
if (pStable->numOfColumns > TSDB_MAX_COLUMNS || pStable->numOfTags > TSDB_MAX_TAGS) {
mError("msg:%p, app:%p table:%s, failed to create, too many columns", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName);
return TSDB_CODE_MND_INVALID_TABLE_NAME;
}
pStable->nextColId = 0;
for (int32_t col = 0; col < numOfCols; col++) {
@ -1340,6 +1347,11 @@ static int32_t mnodeAddSuperTableColumn(SMnodeMsg *pMsg, SSchema schema[], int32
return TSDB_CODE_MND_APP_ERROR;
}
if (pStable->numOfColumns + ncols + pStable->numOfTags > TSDB_MAX_COLUMNS) {
mError("msg:%p, app:%p stable:%s, add column, too many columns", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId);
return TSDB_CODE_MND_TOO_MANY_COLUMNS;
}
for (int32_t i = 0; i < ncols; i++) {
if (mnodeFindSuperTableColumnIndex(pStable, schema[i].name) > 0) {
mError("msg:%p, app:%p stable:%s, add column, column:%s already exist", pMsg, pMsg->rpcMsg.ahandle,

View File

@ -994,6 +994,7 @@ void mnodeSendSyncVgroupMsg(SVgObj *pVgroup) {
mDebug("vgId:%d, send sync all vnodes msg, numOfVnodes:%d db:%s", pVgroup->vgId, pVgroup->numOfVnodes,
pVgroup->dbName);
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
if (pVgroup->vnodeGid[i].role != TAOS_SYNC_ROLE_SLAVE) continue;
SRpcEpSet epSet = mnodeGetEpSetFromIp(pVgroup->vnodeGid[i].pDnode->dnodeEp);
mDebug("vgId:%d, index:%d, send sync vnode msg to dnode %s", pVgroup->vgId, i,
pVgroup->vnodeGid[i].pDnode->dnodeEp);

View File

@ -77,6 +77,7 @@ extern "C" {
#include <sys/resource.h>
#include <error.h>
#include <math.h>
#include <poll.h>
#define TAOS_OS_FUNC_LZ4
#define BUILDIN_CLZL(val) __builtin_clzll(val)

View File

@ -78,6 +78,7 @@ extern "C" {
#include <error.h>
#include <linux/sysctl.h>
#include <math.h>
#include <poll.h>
#ifdef __cplusplus
}

View File

@ -77,6 +77,7 @@ extern "C" {
#include <sys/resource.h>
#include <error.h>
#include <math.h>
#include <poll.h>
#define TAOS_OS_FUNC_LZ4
#define BUILDIN_CLZL(val) __builtin_clzll(val)

View File

@ -75,6 +75,7 @@ extern "C" {
#include <fcntl.h>
#include <sys/utsname.h>
#include <sys/resource.h>
#include <poll.h>
#ifndef _ALPINE
#include <error.h>
#endif

View File

@ -68,6 +68,8 @@ void taosSetMaskSIGPIPE();
// TAOS_OS_FUNC_SOCKET_SETSOCKETOPT
int32_t taosSetSockOpt(SOCKET socketfd, int32_t level, int32_t optname, void *optval, int32_t optlen);
int32_t taosGetSockOpt(SOCKET socketfd, int32_t level, int32_t optname, void *optval, int32_t* optlen);
// TAOS_OS_FUNC_SOCKET_INET
uint32_t taosInetAddr(char *ipAddr);
const char *taosInetNtoa(struct in_addr ipInt);

View File

@ -71,6 +71,9 @@ int32_t taosSetSockOpt(SOCKET socketfd, int32_t level, int32_t optname, void *op
return setsockopt(socketfd, level, optname, optval, (socklen_t)optlen);
}
int32_t taosGetSockOpt(SOCKET socketfd, int32_t level, int32_t optname, void *optval, int32_t* optlen) {
return getsockopt(socketfd, level, optname, optval, (socklen_t *)optlen);
}
#endif
#ifndef TAOS_OS_FUNC_SOCKET_INET

View File

@ -86,7 +86,8 @@ typedef struct SResultRow {
bool closed; // this result status: closed or opened
uint32_t numOfRows; // number of rows of current time window
SResultRowCellInfo* pCellInfo; // For each result column, there is a resultInfo
union {STimeWindow win; char* key;}; // start key of current result row
STimeWindow win;
char* key; // start key of current result row
} SResultRow;
typedef struct SGroupResInfo {
@ -189,6 +190,8 @@ typedef struct SQuery {
bool stabledev; // super table stddev query
int32_t interBufSize; // intermediate buffer sizse
int32_t havingNum; // having expr number
SOrderVal order;
int16_t numOfCols;
int16_t numOfTags;
@ -284,6 +287,7 @@ enum OPERATOR_TYPE_E {
OP_Fill = 13,
OP_MultiTableAggregate = 14,
OP_MultiTableTimeInterval = 15,
OP_Having = 16,
};
typedef struct SOperatorInfo {
@ -401,6 +405,11 @@ typedef struct SOffsetOperatorInfo {
int64_t offset;
} SOffsetOperatorInfo;
typedef struct SHavingOperatorInfo {
SArray* fp;
} SHavingOperatorInfo;
typedef struct SFillOperatorInfo {
SFillInfo *pFillInfo;
SSDataBlock *pRes;

View File

@ -98,6 +98,7 @@ typedef struct SQuerySqlNode {
SLimitVal limit; // limit offset [optional]
SLimitVal slimit; // group limit offset [optional]
SStrToken sqlstr; // sql string in select clause
struct tSqlExpr *pHaving; // having clause [optional]
} SQuerySqlNode;
typedef struct STableNamePair {
@ -253,6 +254,11 @@ SArray *tVariantListAppend(SArray *pList, tVariant *pVar, uint8_t sortOrder);
SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int32_t index);
SArray *tVariantListAppendToken(SArray *pList, SStrToken *pAliasToken, uint8_t sortOrder);
tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType);
int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right);
tSqlExpr *tSqlExprClone(tSqlExpr *pSrc);
SFromInfo *setTableNameList(SFromInfo* pFromInfo, SStrToken *pName, SStrToken* pAlias);
SFromInfo *setSubquery(SFromInfo* pFromInfo, SQuerySqlNode *pSqlNode);
void *destroyFromInfo(SFromInfo* pFromInfo);
@ -272,7 +278,7 @@ void tSqlExprListDestroy(SArray *pList);
SQuerySqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelectList, SFromInfo *pFrom, tSqlExpr *pWhere,
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, SSessionWindowVal *ps,
SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pgLimit);
SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pgLimit, tSqlExpr *pHaving);
SCreateTableSql *tSetCreateTableInfo(SArray *pCols, SArray *pTags, SQuerySqlNode *pSelect, int32_t type);

View File

@ -52,11 +52,20 @@ static FORCE_INLINE SResultRow *getResultRow(SResultRowInfo *pResultRowInfo, int
return pResultRowInfo->pResult[slot];
}
static FORCE_INLINE char *getPosInResultPage(SQuery *pQuery, tFilePage* page, int32_t rowOffset, int16_t offset) {
assert(rowOffset >= 0 && pQuery != NULL);
static FORCE_INLINE char* getPosInResultPage(SQueryRuntimeEnv* pRuntimeEnv, tFilePage* page, int32_t rowOffset,
int16_t offset, int32_t size) {
assert(rowOffset >= 0 && pRuntimeEnv != NULL);
SQuery* pQuery = pRuntimeEnv->pQuery;
int64_t pageSize = pRuntimeEnv->pResultBuf->pageSize;
int32_t numOfRows = (int32_t)GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pQuery->topBotQuery, pQuery->stableQuery);
return ((char *)page->data) + rowOffset + offset * numOfRows;
// buffer overflow check
int64_t bufEnd = (rowOffset + offset * numOfRows + size);
assert(page->num <= pageSize && bufEnd <= page->num);
return ((char*)page->data) + rowOffset + offset * numOfRows;
}
bool isNullOperator(SColumnFilterElem *pFilter, const char* minval, const char* maxval, int16_t type);

View File

@ -453,7 +453,7 @@ tagitem(A) ::= PLUS(X) FLOAT(Y). {
%type select {SQuerySqlNode*}
%destructor select {destroyQuerySqlNode($$);}
select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) session_option(H) fill_opt(F) sliding_opt(S) groupby_opt(P) orderby_opt(Z) having_opt(N) slimit_opt(G) limit_opt(L). {
A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &S, F, &L, &G);
A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &S, F, &L, &G, N);
}
select(A) ::= LP select(B) RP. {A = B;}
@ -471,7 +471,7 @@ cmd ::= union(X). { setSqlInfo(pInfo, X, NULL, TSDB_SQL_SELECT); }
// select client_version()
// select server_state()
select(A) ::= SELECT(T) selcollist(W). {
A = tSetQuerySqlNode(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
A = tSetQuerySqlNode(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
}
// selcollist is a list of expressions that are to become the return

View File

@ -2482,6 +2482,29 @@ static STopBotInfo *getTopBotOutputInfo(SQLFunctionCtx *pCtx) {
}
}
/*
* keep the intermediate results during scan data blocks in the format of:
* +-----------------------------------+-------------one value pair-----------+------------next value pair-----------+
* |-------------pointer area----------|----ts---+-----+-----n tags-----------|----ts---+-----+-----n tags-----------|
* +..[Value Pointer1][Value Pointer2].|timestamp|value|tags1|tags2|....|tagsn|timestamp|value|tags1|tags2|....|tagsn+
*/
static void buildTopBotStruct(STopBotInfo *pTopBotInfo, SQLFunctionCtx *pCtx) {
char *tmp = (char *)pTopBotInfo + sizeof(STopBotInfo);
pTopBotInfo->res = (tValuePair**) tmp;
tmp += POINTER_BYTES * pCtx->param[0].i64;
size_t size = sizeof(tValuePair) + pCtx->tagInfo.tagsLen;
// assert(pCtx->param[0].i64 > 0);
for (int32_t i = 0; i < pCtx->param[0].i64; ++i) {
pTopBotInfo->res[i] = (tValuePair*) tmp;
pTopBotInfo->res[i]->pTags = tmp + sizeof(tValuePair);
tmp += size;
}
}
bool topbot_datablock_filter(SQLFunctionCtx *pCtx, const char *minval, const char *maxval) {
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
if (pResInfo == NULL) {
@ -2495,6 +2518,10 @@ bool topbot_datablock_filter(SQLFunctionCtx *pCtx, const char *minval, const cha
return true;
}
if ((void *)pTopBotInfo->res[0] != (void *)((char *)pTopBotInfo + sizeof(STopBotInfo) + POINTER_BYTES * pCtx->param[0].i64)) {
buildTopBotStruct(pTopBotInfo, pCtx);
}
tValuePair **pRes = (tValuePair**) pTopBotInfo->res;
if (pCtx->functionId == TSDB_FUNC_TOP) {
@ -2534,27 +2561,6 @@ bool topbot_datablock_filter(SQLFunctionCtx *pCtx, const char *minval, const cha
}
}
/*
* keep the intermediate results during scan data blocks in the format of:
* +-----------------------------------+-------------one value pair-----------+------------next value pair-----------+
* |-------------pointer area----------|----ts---+-----+-----n tags-----------|----ts---+-----+-----n tags-----------|
* +..[Value Pointer1][Value Pointer2].|timestamp|value|tags1|tags2|....|tagsn|timestamp|value|tags1|tags2|....|tagsn+
*/
static void buildTopBotStruct(STopBotInfo *pTopBotInfo, SQLFunctionCtx *pCtx) {
char *tmp = (char *)pTopBotInfo + sizeof(STopBotInfo);
pTopBotInfo->res = (tValuePair**) tmp;
tmp += POINTER_BYTES * pCtx->param[0].i64;
size_t size = sizeof(tValuePair) + pCtx->tagInfo.tagsLen;
// assert(pCtx->param[0].i64 > 0);
for (int32_t i = 0; i < pCtx->param[0].i64; ++i) {
pTopBotInfo->res[i] = (tValuePair*) tmp;
pTopBotInfo->res[i]->pTags = tmp + sizeof(tValuePair);
tmp += size;
}
}
static bool top_bottom_function_setup(SQLFunctionCtx *pCtx) {
if (!function_setup(pCtx)) {
return false;
@ -2771,14 +2777,16 @@ static void percentile_function(SQLFunctionCtx *pCtx) {
SPercentileInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo);
if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) {
pInfo->stage += 1;
// all data are null, set it completed
if (pInfo->numOfElems == 0) {
pResInfo->complete = true;
return;
} else {
pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
}
pInfo->stage += 1;
}
// the first stage, only acquire the min/max value
@ -2857,14 +2865,16 @@ static void percentile_function_f(SQLFunctionCtx *pCtx, int32_t index) {
SPercentileInfo *pInfo = (SPercentileInfo *)GET_ROWCELL_INTERBUF(pResInfo);
if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) {
pInfo->stage += 1;
// all data are null, set it completed
if (pInfo->numOfElems == 0) {
pResInfo->complete = true;
return;
} else {
pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
}
pInfo->stage += 1;
}
if (pInfo->stage == 0) {

View File

@ -105,6 +105,30 @@ int32_t getMaximumIdleDurationSec() {
return tsShellActivityTimer * 2;
}
int64_t genQueryId(void) {
int64_t uid = 0;
int64_t did = tsDnodeId;
uid = did << 54;
int64_t pid = ((int64_t)taosGetPId()) & 0x3FF;
uid |= pid << 44;
int64_t ts = taosGetTimestampMs() & 0x1FFFFFFFF;
uid |= ts << 11;
int64_t sid = atomic_add_fetch_64(&queryHandleId, 1) & 0x7FF;
uid |= sid;
return uid;
}
static void getNextTimeWindow(SQuery* pQuery, STimeWindow* tw) {
int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
if (pQuery->interval.intervalUnit != 'n' && pQuery->interval.intervalUnit != 'y') {
@ -181,6 +205,7 @@ static SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntime
static SOperatorInfo* createMultiTableTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
static SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInfo* pExpr, int32_t numOfOutput);
static SOperatorInfo* createTableBlockInfoScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv);
static SOperatorInfo* createHavingOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
static void destroyBasicOperatorInfo(void* param, int32_t numOfOutput);
static void destroySFillOperatorInfo(void* param, int32_t numOfOutput);
@ -200,6 +225,7 @@ static bool isPointInterpoQuery(SQuery *pQuery);
static void setResultBufSize(SQuery* pQuery, SRspResultInfo* pResultInfo);
static void setCtxTagForJoin(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, SExprInfo* pExprInfo, void* pTable);
static void setParamForStableStddev(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput, SExprInfo* pExpr);
static void setParamForStableStddevByColData(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput, SExprInfo* pExpr, char* val, int16_t bytes);
static void doSetTableGroupOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo* pResultRowInfo,
SQLFunctionCtx* pCtx, int32_t* rowCellInfoOffset, int32_t numOfOutput,
int32_t groupIndex);
@ -1330,6 +1356,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn
SColumnInfoData* pColInfoData = taosArrayGet(pSDataBlock->pDataBlock, pInfo->colIndex);
int16_t bytes = pColInfoData->info.bytes;
int16_t type = pColInfoData->info.type;
SQuery *pQuery = pRuntimeEnv->pQuery;
if (type == TSDB_DATA_TYPE_FLOAT || type == TSDB_DATA_TYPE_DOUBLE) {
qError("QInfo:%"PRIu64" group by not supported on double/float columns, abort", GET_QID(pRuntimeEnv));
@ -1350,6 +1377,10 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn
memcpy(pInfo->prevData, val, bytes);
if (pQuery->stableQuery && pQuery->stabledev && (pRuntimeEnv->prevResult != NULL)) {
setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, val, bytes);
}
int32_t ret =
setGroupResultOutputBuf(pRuntimeEnv, pInfo, pOperator->numOfOutput, val, type, bytes, item->groupIndex);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
@ -1813,6 +1844,10 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
}
}
if (pQuery->havingNum > 0) {
pRuntimeEnv->proot = createHavingOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQuery->pExpr1, pQuery->numOfOutput);
}
if (pQuery->limit.offset > 0) {
pRuntimeEnv->proot = createOffsetOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot);
}
@ -1843,6 +1878,19 @@ static void doFreeQueryHandle(SQueryRuntimeEnv* pRuntimeEnv) {
assert(pMemRef->ref == 0 && pMemRef->snapshot.imem == NULL && pMemRef->snapshot.mem == NULL);
}
static void destroyTsComp(SQueryRuntimeEnv *pRuntimeEnv, SQuery *pQuery) {
if (isTsCompQuery(pQuery) && pRuntimeEnv->outputBuf && pRuntimeEnv->outputBuf->pDataBlock && taosArrayGetSize(pRuntimeEnv->outputBuf->pDataBlock) > 0) {
SColumnInfoData* pColInfoData = taosArrayGet(pRuntimeEnv->outputBuf->pDataBlock, 0);
if (pColInfoData) {
FILE *f = *(FILE **)pColInfoData->pData; // TODO refactor
if (f) {
fclose(f);
*(FILE **)pColInfoData->pData = NULL;
}
}
}
}
static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) {
SQuery *pQuery = pRuntimeEnv->pQuery;
SQInfo* pQInfo = (SQInfo*) pRuntimeEnv->qinfo;
@ -1861,6 +1909,8 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) {
destroyResultBuf(pRuntimeEnv->pResultBuf);
doFreeQueryHandle(pRuntimeEnv);
destroyTsComp(pRuntimeEnv, pQuery);
pRuntimeEnv->pTsBuf = tsBufDestroy(pRuntimeEnv->pTsBuf);
tfree(pRuntimeEnv->keyBuf);
@ -1870,14 +1920,15 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) {
taosHashCleanup(pRuntimeEnv->pResultRowHashTable);
pRuntimeEnv->pResultRowHashTable = NULL;
pRuntimeEnv->pool = destroyResultRowPool(pRuntimeEnv->pool);
taosArrayDestroyEx(pRuntimeEnv->prevResult, freeInterResult);
pRuntimeEnv->prevResult = NULL;
taosHashCleanup(pRuntimeEnv->pTableRetrieveTsMap);
pRuntimeEnv->pTableRetrieveTsMap = NULL;
destroyOperatorInfo(pRuntimeEnv->proot);
pRuntimeEnv->pool = destroyResultRowPool(pRuntimeEnv->pool);
taosArrayDestroyEx(pRuntimeEnv->prevResult, freeInterResult);
pRuntimeEnv->prevResult = NULL;
}
static bool needBuildResAfterQueryComplete(SQInfo* pQInfo) {
@ -2101,6 +2152,40 @@ static bool onlyFirstQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSD
static bool onlyLastQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSDB_FUNC_LAST, TSDB_FUNC_LAST_DST); }
static int32_t updateBlockLoadStatus(SQuery *pQuery, int32_t status) {
bool hasFirstLastFunc = false;
bool hasOtherFunc = false;
if (status == BLK_DATA_ALL_NEEDED || status == BLK_DATA_DISCARD) {
return status;
}
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
int32_t functionId = pQuery->pExpr1[i].base.functionId;
if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_TAG ||
functionId == TSDB_FUNC_TAG_DUMMY) {
continue;
}
if (functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_LAST_DST) {
hasFirstLastFunc = true;
} else {
hasOtherFunc = true;
}
}
if (hasFirstLastFunc && status == BLK_DATA_NO_NEEDED) {
if(!hasOtherFunc) {
return BLK_DATA_DISCARD;
} else{
return BLK_DATA_ALL_NEEDED;
}
}
return status;
}
static void doExchangeTimeWindow(SQInfo* pQInfo, STimeWindow* win) {
SQuery* pQuery = &pQInfo->query;
size_t t = taosArrayGetSize(pQuery->tableGroupInfo.pGroupList);
@ -2542,11 +2627,12 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa
pBlock->pDataBlock = NULL;
pBlock->pBlockStatis = NULL;
SQInfo* pQInfo = pRuntimeEnv->qinfo;
SQuery* pQuery = pRuntimeEnv->pQuery;
int64_t groupId = pQuery->current->groupIndex;
bool ascQuery = QUERY_IS_ASC_QUERY(pQuery);
SQInfo* pQInfo = pRuntimeEnv->qinfo;
SQueryCostInfo* pCost = &pQInfo->summary;
if (pRuntimeEnv->pTsBuf != NULL) {
@ -2603,7 +2689,9 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa
}
SDataBlockInfo* pBlockInfo = &pBlock->info;
if ((*status) == BLK_DATA_NO_NEEDED) {
*status = updateBlockLoadStatus(pRuntimeEnv->pQuery, *status);
if ((*status) == BLK_DATA_NO_NEEDED || (*status) == BLK_DATA_DISCARD) {
qDebug("QInfo:%"PRIu64" data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo->qId, pBlockInfo->window.skey,
pBlockInfo->window.ekey, pBlockInfo->rows);
pCost->discardBlocks += 1;
@ -3236,7 +3324,7 @@ void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pRe
continue;
}
pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv->pQuery, bufPage, pResult->offset, offset);
pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv, bufPage, pResult->offset, offset, pCtx[i].outputBytes);
offset += pCtx[i].outputBytes;
int32_t functionId = pCtx[i].functionId;
@ -3294,7 +3382,7 @@ void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLF
int16_t offset = 0;
for (int32_t i = 0; i < numOfCols; ++i) {
pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv->pQuery, page, pResult->offset, offset);
pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv, page, pResult->offset, offset, pCtx[i].outputBytes);
offset += pCtx[i].outputBytes;
int32_t functionId = pCtx[i].functionId;
@ -3411,6 +3499,42 @@ void setParamForStableStddev(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx
}
void setParamForStableStddevByColData(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput, SExprInfo* pExpr, char* val, int16_t bytes) {
SQuery* pQuery = pRuntimeEnv->pQuery;
int32_t numOfExprs = pQuery->numOfOutput;
for(int32_t i = 0; i < numOfExprs; ++i) {
SExprInfo* pExprInfo = &(pExpr[i]);
if (pExprInfo->base.functionId != TSDB_FUNC_STDDEV_DST) {
continue;
}
SSqlFuncMsg* pFuncMsg = &pExprInfo->base;
pCtx[i].param[0].arr = NULL;
pCtx[i].param[0].nType = TSDB_DATA_TYPE_INT; // avoid freeing the memory by setting the type to be int
// TODO use hash to speedup this loop
int32_t numOfGroup = (int32_t)taosArrayGetSize(pRuntimeEnv->prevResult);
for (int32_t j = 0; j < numOfGroup; ++j) {
SInterResult* p = taosArrayGet(pRuntimeEnv->prevResult, j);
if (bytes == 0 || memcmp(p->tags, val, bytes) == 0) {
int32_t numOfCols = (int32_t)taosArrayGetSize(p->pResult);
for (int32_t k = 0; k < numOfCols; ++k) {
SStddevInterResult* pres = taosArrayGet(p->pResult, k);
if (pres->colId == pFuncMsg->colInfo.colId) {
pCtx[i].param[0].arr = pres->pResult;
break;
}
}
}
}
}
}
/*
* There are two cases to handle:
*
@ -3466,8 +3590,6 @@ void setIntervalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, TSKEY key) {
*/
static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* pGroupResInfo, int32_t orderType, SSDataBlock* pBlock) {
SQuery *pQuery = pRuntimeEnv->pQuery;
int32_t numOfRows = getNumOfTotalRes(pGroupResInfo);
int32_t numOfResult = pBlock->info.rows; // there are already exists result rows
@ -3502,7 +3624,7 @@ static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo*
int32_t bytes = pColInfoData->info.bytes;
char *out = pColInfoData->pData + numOfResult * bytes;
char *in = getPosInResultPage(pQuery, page, pRow->offset, offset);
char *in = getPosInResultPage(pRuntimeEnv, page, pRow->offset, offset, bytes);
memcpy(out, in, bytes * numOfRowsToCopy);
offset += bytes;
@ -3972,7 +4094,7 @@ static SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfOutput, in
return pFillCol;
}
int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, SArray* prevResult, void *tsdb, int32_t vgId, bool isSTableQuery) {
int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bool isSTableQuery) {
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
@ -3983,8 +4105,6 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, SArray* prevResult, void *ts
pQuery->timeWindowInterpo = timeWindowInterpoRequired(pQuery);
pQuery->stabledev = isStabledev(pQuery);
pRuntimeEnv->prevResult = prevResult;
setScanLimitationByResultBuffer(pQuery);
int32_t code = setupQueryHandle(tsdb, pQInfo, isSTableQuery);
@ -4626,6 +4746,111 @@ static SSDataBlock* doOffset(void* param) {
}
}
bool doFilterData(SColumnInfoData* p, int32_t rid, SColumnFilterElem *filterElem, __filter_func_t fp) {
char* input = p->pData + p->info.bytes * rid;
bool isnull = isNull(input, p->info.type);
if (isnull) {
return (fp == isNullOperator) ? true : false;
} else {
if (fp == notNullOperator) {
return true;
} else if (fp == isNullOperator) {
return false;
}
}
if (fp(filterElem, input, input, p->info.type)) {
return true;
}
return false;
}
void doHavingImpl(SOperatorInfo *pOperator, SSDataBlock *pBlock) {
SHavingOperatorInfo* pInfo = pOperator->info;
int32_t f = 0;
int32_t allQualified = 1;
int32_t exprQualified = 0;
for (int32_t r = 0; r < pBlock->info.rows; ++r) {
allQualified = 1;
for (int32_t i = 0; i < pOperator->numOfOutput; ++i) {
SExprInfo* pExprInfo = &(pOperator->pExpr[i]);
if (pExprInfo->pFilter == NULL) {
continue;
}
SArray* es = taosArrayGetP(pInfo->fp, i);
assert(es);
size_t fpNum = taosArrayGetSize(es);
exprQualified = 0;
for (int32_t m = 0; m < fpNum; ++m) {
__filter_func_t fp = taosArrayGetP(es, m);
assert(fp);
//SColIndex* colIdx = &pExprInfo->base.colInfo;
SColumnInfoData* p = taosArrayGet(pBlock->pDataBlock, i);
SColumnFilterElem filterElem = {.filterInfo = pExprInfo->pFilter[m]};
if (doFilterData(p, r, &filterElem, fp)) {
exprQualified = 1;
break;
}
}
if (exprQualified == 0) {
allQualified = 0;
break;
}
}
if (allQualified == 0) {
continue;
}
for (int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
SColumnInfoData *pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
int16_t bytes = pColInfoData->info.bytes;
memmove(pColInfoData->pData + f * bytes, pColInfoData->pData + bytes * r, bytes);
}
++f;
}
pBlock->info.rows = f;
}
static SSDataBlock* doHaving(void* param) {
SOperatorInfo *pOperator = (SOperatorInfo *)param;
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
}
SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
while (1) {
SSDataBlock *pBlock = pOperator->upstream->exec(pOperator->upstream);
if (pBlock == NULL) {
setQueryStatus(pRuntimeEnv, QUERY_COMPLETED);
pOperator->status = OP_EXEC_DONE;
return NULL;
}
doHavingImpl(pOperator, pBlock);
return pBlock;
}
}
static SSDataBlock* doIntervalAgg(void* param) {
SOperatorInfo* pOperator = (SOperatorInfo*) param;
if (pOperator->status == OP_EXEC_DONE) {
@ -4976,6 +5201,13 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) {
pInfo->pRes = destroyOutputBuf(pInfo->pRes);
}
static void destroyHavingOperatorInfo(void* param, int32_t numOfOutput) {
SHavingOperatorInfo* pInfo = (SHavingOperatorInfo*) param;
if (pInfo->fp) {
taosArrayDestroy(pInfo->fp);
}
}
SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
SAggOperatorInfo* pInfo = calloc(1, sizeof(SAggOperatorInfo));
@ -5032,6 +5264,83 @@ SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI
return pOperator;
}
int32_t initFilterFp(SExprInfo* pExpr, int32_t numOfOutput, SArray** fps) {
__filter_func_t fp = NULL;
*fps = taosArrayInit(numOfOutput, sizeof(SArray*));
if (*fps == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
for (int32_t i = 0; i < numOfOutput; ++i) {
SExprInfo* pExprInfo = &(pExpr[i]);
SColIndex* colIdx = &pExprInfo->base.colInfo;
if (pExprInfo->pFilter == NULL || !TSDB_COL_IS_NORMAL_COL(colIdx->flag)) {
taosArrayPush(*fps, &fp);
continue;
}
int32_t filterNum = pExprInfo->base.filterNum;
SColumnFilterInfo *filterInfo = pExprInfo->pFilter;
SArray* es = taosArrayInit(filterNum, sizeof(__filter_func_t));
for (int32_t j = 0; j < filterNum; ++j) {
int32_t lower = filterInfo->lowerRelOptr;
int32_t upper = filterInfo->upperRelOptr;
if (lower == TSDB_RELATION_INVALID && upper == TSDB_RELATION_INVALID) {
qError("invalid rel optr");
taosArrayDestroy(es);
return TSDB_CODE_QRY_APP_ERROR;
}
__filter_func_t ffp = getFilterOperator(lower, upper);
if (ffp == NULL) {
qError("invalid filter info");
taosArrayDestroy(es);
return TSDB_CODE_QRY_APP_ERROR;
}
taosArrayPush(es, &ffp);
filterInfo += 1;
}
taosArrayPush(*fps, &es);
}
return TSDB_CODE_SUCCESS;
}
SOperatorInfo* createHavingOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
SHavingOperatorInfo* pInfo = calloc(1, sizeof(SHavingOperatorInfo));
initFilterFp(pExpr, numOfOutput, &pInfo->fp);
assert(pInfo->fp);
SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo));
pOperator->name = "HavingOperator";
pOperator->operatorType = OP_Having;
pOperator->blockingOptr = false;
pOperator->status = OP_IN_EXECUTING;
pOperator->numOfOutput = numOfOutput;
pOperator->pExpr = pExpr;
pOperator->upstream = upstream;
pOperator->exec = doHaving;
pOperator->info = pInfo;
pOperator->pRuntimeEnv = pRuntimeEnv;
pOperator->cleanup = destroyHavingOperatorInfo;
return pOperator;
}
SOperatorInfo* createLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream) {
SLimitOperatorInfo* pInfo = calloc(1, sizeof(SLimitOperatorInfo));
pInfo->limit = pRuntimeEnv->pQuery->limit.limit;
@ -5603,9 +5912,35 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) {
pExprMsg->functionId = htons(pExprMsg->functionId);
pExprMsg->numOfParams = htons(pExprMsg->numOfParams);
pExprMsg->resColId = htons(pExprMsg->resColId);
pExprMsg->filterNum = htonl(pExprMsg->filterNum);
pMsg += sizeof(SSqlFuncMsg);
SColumnFilterInfo* pExprFilterInfo = pExprMsg->filterInfo;
pMsg += sizeof(SColumnFilterInfo) * pExprMsg->filterNum;
for (int32_t f = 0; f < pExprMsg->filterNum; ++f) {
SColumnFilterInfo *pFilterMsg = (SColumnFilterInfo *)pExprFilterInfo;
pFilterMsg->filterstr = htons(pFilterMsg->filterstr);
if (pFilterMsg->filterstr) {
pFilterMsg->len = htobe64(pFilterMsg->len);
pFilterMsg->pz = (int64_t)pMsg;
pMsg += (pFilterMsg->len + 1);
} else {
pFilterMsg->lowerBndi = htobe64(pFilterMsg->lowerBndi);
pFilterMsg->upperBndi = htobe64(pFilterMsg->upperBndi);
}
pFilterMsg->lowerRelOptr = htons(pFilterMsg->lowerRelOptr);
pFilterMsg->upperRelOptr = htons(pFilterMsg->upperRelOptr);
pExprFilterInfo++;
}
for (int32_t j = 0; j < pExprMsg->numOfParams; ++j) {
pExprMsg->arg[j].argType = htons(pExprMsg->arg[j].argType);
pExprMsg->arg[j].argBytes = htons(pExprMsg->arg[j].argBytes);
@ -5790,6 +6125,42 @@ _cleanup:
return code;
}
int32_t cloneExprFilterInfo(SColumnFilterInfo **dst, SColumnFilterInfo* src, int32_t filterNum) {
if (filterNum <= 0) {
return TSDB_CODE_SUCCESS;
}
*dst = calloc(filterNum, sizeof(*src));
if (*dst == NULL) {
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
memcpy(*dst, src, sizeof(*src) * filterNum);
for (int32_t i = 0; i < filterNum; i++) {
if ((*dst)[i].filterstr && dst[i]->len > 0) {
void *pz = calloc(1, (size_t)(*dst)[i].len + 1);
if (pz == NULL) {
if (i == 0) {
free(*dst);
} else {
freeColumnFilterInfo(*dst, i);
}
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
memcpy(pz, (void *)src->pz, (size_t)src->len + 1);
(*dst)[i].pz = (int64_t)pz;
}
}
return TSDB_CODE_SUCCESS;
}
static int32_t buildArithmeticExprFromMsg(SExprInfo *pArithExprInfo, SQueryTableMsg *pQueryMsg) {
qDebug("qmsg:%p create arithmetic expr from binary", pQueryMsg);
@ -5903,6 +6274,13 @@ int32_t createQueryFuncExprFromMsg(SQueryTableMsg* pQueryMsg, int32_t numOfOutpu
type = s->type;
bytes = s->bytes;
}
if (pExprs[i].base.filterNum > 0) {
int32_t ret = cloneExprFilterInfo(&pExprs[i].pFilter, pExprMsg[i]->filterInfo, pExprMsg[i]->filterNum);
if (ret) {
return ret;
}
}
}
int32_t param = (int32_t)pExprs[i].base.arg[0].argValue.i64;
@ -6143,6 +6521,8 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SSqlGroupbyExpr* pGroupbyExpr
goto _cleanup_qinfo;
}
pQInfo->qId = *qId;
// to make sure third party won't overwrite this structure
pQInfo->signature = pQInfo;
SQuery* pQuery = &pQInfo->query;
@ -6192,6 +6572,10 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SSqlGroupbyExpr* pGroupbyExpr
if (TSDB_COL_IS_TAG(pExprs[col].base.colInfo.flag)) {
pQuery->tagLen += pExprs[col].bytes;
}
if (pExprs[col].pFilter) {
++pQuery->havingNum;
}
}
doUpdateExprColumnIndex(pQuery);
@ -6275,8 +6659,6 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SSqlGroupbyExpr* pGroupbyExpr
// todo refactor
pQInfo->query.queryBlockDist = (numOfOutput == 1 && pExprs[0].base.colInfo.colId == TSDB_BLOCK_DIST_COLUMN_INDEX);
pQInfo->qId = atomic_add_fetch_64(&queryHandleId, 1);
*qId = pQInfo->qId;
qDebug("qmsg:%p QInfo:%" PRIu64 "-%p created", pQueryMsg, pQInfo->qId, pQInfo);
return pQInfo;
@ -6295,6 +6677,10 @@ _cleanup_qinfo:
tExprTreeDestroy(pExprInfo->pExpr, NULL);
pExprInfo->pExpr = NULL;
}
if (pExprInfo->pFilter) {
freeColumnFilterInfo(pExprInfo->pFilter, pExprInfo->base.filterNum);
}
}
tfree(pExprs);
@ -6340,6 +6726,8 @@ int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *p
SArray* prevResult = NULL;
if (pQueryMsg->prevResultLen > 0) {
prevResult = interResFromBinary(param->prevResult, pQueryMsg->prevResultLen);
pRuntimeEnv->prevResult = prevResult;
}
pQuery->precision = tsdbGetCfg(tsdb)->precision;
@ -6361,7 +6749,7 @@ int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *p
}
// filter the qualified
if ((code = doInitQInfo(pQInfo, pTsBuf, prevResult, tsdb, vgId, isSTable)) != TSDB_CODE_SUCCESS) {
if ((code = doInitQInfo(pQInfo, pTsBuf, tsdb, vgId, isSTable)) != TSDB_CODE_SUCCESS) {
goto _error;
}
@ -6379,7 +6767,7 @@ void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters) {
}
for (int32_t i = 0; i < numOfFilters; i++) {
if (pFilter[i].filterstr) {
if (pFilter[i].filterstr && pFilter[i].pz) {
free((void*)(pFilter[i].pz));
}
}
@ -6421,6 +6809,10 @@ static void* destroyQueryFuncExpr(SExprInfo* pExprInfo, int32_t numOfExpr) {
if (pExprInfo[i].pExpr != NULL) {
tExprTreeDestroy(pExprInfo[i].pExpr, NULL);
}
if (pExprInfo[i].pFilter) {
freeColumnFilterInfo(pExprInfo[i].pFilter, pExprInfo[i].base.filterNum);
}
}
tfree(pExprInfo);
@ -6436,6 +6828,9 @@ void freeQInfo(SQInfo *pQInfo) {
SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
releaseQueryBuf(pRuntimeEnv->tableqinfoGroupInfo.numOfTables);
doDestroyTableQueryInfo(&pRuntimeEnv->tableqinfoGroupInfo);
teardownQueryRuntimeEnv(&pQInfo->runtimeEnv);
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
@ -6471,7 +6866,6 @@ void freeQInfo(SQInfo *pQInfo) {
}
}
doDestroyTableQueryInfo(&pRuntimeEnv->tableqinfoGroupInfo);
tfree(pQInfo->pBuf);
tfree(pQInfo->sql);
@ -6521,6 +6915,7 @@ int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) {
}
fclose(f);
*(FILE **)pColInfoData->pData = NULL;
}
// all data returned, set query over

View File

@ -266,6 +266,7 @@ int32_t tExtMemBufferFlush(tExtMemBuffer *pMemBuffer) {
size_t retVal = fwrite((char *)&(first->item), pMemBuffer->pageSize, 1, pMemBuffer->file);
if (retVal <= 0) { // failed to write to buffer, may be not enough space
ret = TAOS_SYSTEM_ERROR(errno);
pMemBuffer->pHead = first;
return ret;
}

View File

@ -124,7 +124,7 @@ bool greaterEqualOperator(SColumnFilterElem *pFilter, const char *minval, const
bool equalOperator(SColumnFilterElem *pFilter, const char *minval, const char *maxval, int16_t type) {
SColumnFilterInfo *pFilterInfo = &pFilter->filterInfo;
if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL) {
if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL || type == TSDB_DATA_TYPE_TIMESTAMP) {
int64_t minv = -1, maxv = -1;
GET_TYPED_DATA(minv, int64_t, type, minval);
GET_TYPED_DATA(maxv, int64_t, type, maxval);
@ -202,7 +202,7 @@ bool likeOperator(SColumnFilterElem *pFilter, const char *minval, const char *ma
bool notEqualOperator(SColumnFilterElem *pFilter, const char *minval, const char *maxval, int16_t type) {
SColumnFilterInfo *pFilterInfo = &pFilter->filterInfo;
if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL) {
if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL || type == TSDB_DATA_TYPE_TIMESTAMP) {
int64_t minv = -1, maxv = -1;
GET_TYPED_DATA(minv, int64_t, type, minval);
GET_TYPED_DATA(maxv, int64_t, type, maxval);

View File

@ -287,6 +287,10 @@ static void lruListMoveToFront(SList *pList, SPageInfo* pi) {
tdListPrependNode(pList, pi->pn);
}
static FORCE_INLINE size_t getAllocPageSize(int32_t pageSize) {
return pageSize + POINTER_BYTES + 2 + sizeof(tFilePage);
}
tFilePage* getNewDataBuf(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32_t* pageId) {
pResultBuf->statis.getPages += 1;
@ -311,7 +315,7 @@ tFilePage* getNewDataBuf(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32
// allocate buf
if (availablePage == NULL) {
pi->pData = calloc(1, pResultBuf->pageSize + POINTER_BYTES + 2); // add extract bytes in case of zipped buffer increased.
pi->pData = calloc(1, getAllocPageSize(pResultBuf->pageSize)); // add extract bytes in case of zipped buffer increased.
} else {
pi->pData = availablePage;
}
@ -355,7 +359,7 @@ tFilePage* getResBufPage(SDiskbasedResultBuf* pResultBuf, int32_t id) {
}
if (availablePage == NULL) {
(*pi)->pData = calloc(1, pResultBuf->pageSize + POINTER_BYTES);
(*pi)->pData = calloc(1, getAllocPageSize(pResultBuf->pageSize));
} else {
(*pi)->pData = availablePage;
}

View File

@ -310,6 +310,77 @@ tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType) {
return pExpr;
}
static FORCE_INLINE int32_t tStrTokenCompare(SStrToken* left, SStrToken* right) {
return (left->type == right->type && left->n == right->n && strncasecmp(left->z, right->z, left->n) == 0) ? 0 : 1;
}
int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right) {
if ((left == NULL && right) || (left && right == NULL)) {
return 1;
}
if (left->type != right->type) {
return 1;
}
if (left->tokenId != right->tokenId) {
return 1;
}
if (left->functionId != right->functionId) {
return 1;
}
if ((left->pLeft && right->pLeft == NULL)
|| (left->pLeft == NULL && right->pLeft)
|| (left->pRight && right->pRight == NULL)
|| (left->pRight == NULL && right->pRight)
|| (left->pParam && right->pParam == NULL)
|| (left->pParam == NULL && right->pParam)) {
return 1;
}
if (tVariantCompare(&left->value, &right->value)) {
return 1;
}
if (tStrTokenCompare(&left->colInfo, &right->colInfo)) {
return 1;
}
if (right->pParam && left->pParam) {
size_t size = taosArrayGetSize(right->pParam);
if (left->pParam && taosArrayGetSize(left->pParam) != size) {
return 1;
}
for (int32_t i = 0; i < size; i++) {
tSqlExprItem* pLeftElem = taosArrayGet(left->pParam, i);
tSqlExpr* pSubLeft = pLeftElem->pNode;
tSqlExprItem* pRightElem = taosArrayGet(left->pParam, i);
tSqlExpr* pSubRight = pRightElem->pNode;
if (tSqlExprCompare(pSubLeft, pSubRight)) {
return 1;
}
}
}
if (left->pLeft && tSqlExprCompare(left->pLeft, right->pLeft)) {
return 1;
}
if (left->pRight && tSqlExprCompare(left->pRight, right->pRight)) {
return 1;
}
return 0;
}
tSqlExpr *tSqlExprClone(tSqlExpr *pSrc) {
tSqlExpr *pExpr = calloc(1, sizeof(tSqlExpr));
@ -640,7 +711,7 @@ void tSetColumnType(TAOS_FIELD *pField, SStrToken *type) {
SQuerySqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelectList, SFromInfo *pFrom, tSqlExpr *pWhere,
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval,
SSessionWindowVal *pSession, SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit,
SLimitVal *psLimit) {
SLimitVal *psLimit, tSqlExpr *pHaving) {
assert(pSelectList != NULL);
SQuerySqlNode *pSqlNode = calloc(1, sizeof(SQuerySqlNode));
@ -655,6 +726,7 @@ SQuerySqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelectList, SF
pSqlNode->pSortOrder = pSortOrder;
pSqlNode->pWhere = pWhere;
pSqlNode->fillType = pFill;
pSqlNode->pHaving = pHaving;
if (pLimit != NULL) {
pSqlNode->limit = *pLimit;
@ -718,6 +790,9 @@ void destroyQuerySqlNode(SQuerySqlNode *pQuerySql) {
tSqlExprDestroy(pQuerySql->pWhere);
pQuerySql->pWhere = NULL;
tSqlExprDestroy(pQuerySql->pHaving);
pQuerySql->pHaving = NULL;
taosArrayDestroyEx(pQuerySql->pSortOrder, freeVariant);
pQuerySql->pSortOrder = NULL;

View File

@ -66,8 +66,8 @@ void cleanupResultRowInfo(SResultRowInfo *pResultRowInfo) {
return;
}
if (pResultRowInfo->type == TSDB_DATA_TYPE_BINARY || pResultRowInfo->type == TSDB_DATA_TYPE_NCHAR) {
for(int32_t i = 0; i < pResultRowInfo->size; ++i) {
if (pResultRowInfo->pResult[i]) {
tfree(pResultRowInfo->pResult[i]->key);
}
}
@ -140,7 +140,7 @@ void clearResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResultRow, int16
SResultRowCellInfo *pResultInfo = &pResultRow->pCellInfo[i];
int16_t size = pRuntimeEnv->pQuery->pExpr1[i].bytes;
char * s = getPosInResultPage(pRuntimeEnv->pQuery, page, pResultRow->offset, offset);
char * s = getPosInResultPage(pRuntimeEnv, page, pResultRow->offset, offset, size);
memset(s, 0, size);
offset += size;
@ -153,11 +153,8 @@ void clearResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResultRow, int16
pResultRow->offset = -1;
pResultRow->closed = false;
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
tfree(pResultRow->key);
} else {
pResultRow->win = TSWINDOW_INITIALIZER;
}
}
// TODO refactor: use macro

View File

@ -197,6 +197,7 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi
return code;
}
bool qTableQuery(qinfo_t qinfo, uint64_t *qId) {
SQInfo *pQInfo = (SQInfo *)qinfo;
assert(pQInfo && pQInfo->signature == pQInfo);
@ -475,7 +476,7 @@ void qCleanupQueryMgmt(void* pQMgmt) {
qDebug("vgId:%d, queryMgmt cleanup completed", vgId);
}
void** qRegisterQInfo(void* pMgmt, uint64_t qId, uint64_t qInfo) {
void** qRegisterQInfo(void* pMgmt, uint64_t qId, void *qInfo) {
if (pMgmt == NULL) {
terrno = TSDB_CODE_VND_INVALID_VGROUP_ID;
return NULL;
@ -516,8 +517,7 @@ void** qAcquireQInfo(void* pMgmt, uint64_t _key) {
return NULL;
}
TSDB_CACHE_PTR_TYPE key = (TSDB_CACHE_PTR_TYPE)_key;
void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, &key, sizeof(TSDB_CACHE_PTR_TYPE));
void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, &_key, sizeof(_key));
if (handle == NULL || *handle == NULL) {
terrno = TSDB_CODE_QRY_INVALID_QHANDLE;
return NULL;

File diff suppressed because it is too large Load Diff

View File

@ -295,7 +295,7 @@ void *rpcOpen(const SRpcInit *pInit) {
return NULL;
}
} else {
pRpc->pCache = rpcOpenConnCache(pRpc->sessions, rpcCloseConn, pRpc->tmrCtrl, pRpc->idleTime);
pRpc->pCache = rpcOpenConnCache(pRpc->sessions, rpcCloseConn, pRpc->tmrCtrl, pRpc->idleTime * 30);
if ( pRpc->pCache == NULL ) {
tError("%s failed to init connection cache", pRpc->label);
rpcClose(pRpc);
@ -470,7 +470,7 @@ void rpcSendResponse(const SRpcMsg *pRsp) {
taosTmrStopA(&pConn->pTimer);
// set the idle timer to monitor the activity
taosTmrReset(rpcProcessIdleTimer, pRpc->idleTime, pConn, pRpc->tmrCtrl, &pConn->pIdleTimer);
taosTmrReset(rpcProcessIdleTimer, pRpc->idleTime * 30, pConn, pRpc->tmrCtrl, &pConn->pIdleTimer);
rpcSendMsgToPeer(pConn, msg, msgLen);
// if not set to secured, set it expcet NOT_READY case, since client wont treat it as secured
@ -997,8 +997,8 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv, SRpcReqCont
}
if ( rpcIsReq(pHead->msgType) ) {
terrno = rpcProcessReqHead(pConn, pHead);
pConn->connType = pRecv->connType;
terrno = rpcProcessReqHead(pConn, pHead);
// stop idle timer
taosTmrStopA(&pConn->pIdleTimer);
@ -1367,7 +1367,7 @@ static void rpcProcessConnError(void *param, void *id) {
tDebug("%s %p, connection error happens", pRpc->label, pContext->ahandle);
if (pContext->numOfTry >= pContext->epSet.numOfEps) {
if (pContext->numOfTry >= pContext->epSet.numOfEps || pContext->msgType == TSDB_MSG_TYPE_FETCH) {
rpcMsg.msgType = pContext->msgType+1;
rpcMsg.ahandle = pContext->ahandle;
rpcMsg.code = pContext->code;

View File

@ -35,7 +35,7 @@ extern "C" {
#define SYNC_MAX_SIZE (TSDB_MAX_WAL_SIZE + sizeof(SWalHead) + sizeof(SSyncHead) + 16)
#define SYNC_RECV_BUFFER_SIZE (5*1024*1024)
#define SYNC_MAX_FWDS 512
#define SYNC_MAX_FWDS 4096
#define SYNC_FWD_TIMER 300
#define SYNC_ROLE_TIMER 15000 // ms
#define SYNC_CHECK_INTERVAL 1000 // ms
@ -117,7 +117,6 @@ typedef struct SSyncNode {
FStartSyncFile startSyncFileFp;
FStopSyncFile stopSyncFileFp;
FGetVersion getVersionFp;
FResetVersion resetVersionFp;
FSendFile sendFileFp;
FRecvFile recvFileFp;
pthread_mutex_t mutex;

View File

@ -182,7 +182,6 @@ int64_t syncStart(const SSyncInfo *pInfo) {
pNode->startSyncFileFp = pInfo->startSyncFileFp;
pNode->stopSyncFileFp = pInfo->stopSyncFileFp;
pNode->getVersionFp = pInfo->getVersionFp;
pNode->resetVersionFp = pInfo->resetVersionFp;
pNode->sendFileFp = pInfo->sendFileFp;
pNode->recvFileFp = pInfo->recvFileFp;
@ -410,23 +409,22 @@ void syncConfirmForward(int64_t rid, uint64_t version, int32_t code, bool force)
syncReleaseNode(pNode);
}
#if 1
void syncRecover(int64_t rid) {
SSyncPeer *pPeer;
SSyncNode *pNode = syncAcquireNode(rid);
if (pNode == NULL) return;
// to do: add a few lines to check if recover is OK
// if take this node to unsync state, the whole system may not work
nodeRole = TAOS_SYNC_ROLE_UNSYNCED;
(*pNode->notifyRoleFp)(pNode->vgId, nodeRole);
nodeVersion = 0;
pthread_mutex_lock(&pNode->mutex);
nodeVersion = 0;
for (int32_t i = 0; i < pNode->replica; ++i) {
if (i == pNode->selfIndex) continue;
pPeer = pNode->peerInfo[i];
if (pPeer->peerFd >= 0) {
syncRestartConnection(pPeer);
@ -437,7 +435,6 @@ void syncRecover(int64_t rid) {
syncReleaseNode(pNode);
}
#endif
int32_t syncGetNodesRole(int64_t rid, SNodesRole *pNodesRole) {
SSyncNode *pNode = syncAcquireNode(rid);
@ -552,7 +549,10 @@ static void syncClosePeerConn(SSyncPeer *pPeer) {
if (pPeer->peerFd >= 0) {
pPeer->peerFd = -1;
void *pConn = pPeer->pConn;
if (pConn != NULL) syncFreeTcpConn(pPeer->pConn);
if (pConn != NULL) {
syncFreeTcpConn(pPeer->pConn);
pPeer->pConn = NULL;
}
}
}
@ -998,17 +998,24 @@ static void syncProcessForwardFromPeer(char *cont, SSyncPeer *pPeer) {
sTrace("%s, forward is received, hver:%" PRIu64 ", len:%d", pPeer->id, pHead->version, pHead->len);
int32_t code = 0;
if (nodeRole == TAOS_SYNC_ROLE_SLAVE) {
// nodeVersion = pHead->version;
(*pNode->writeToCacheFp)(pNode->vgId, pHead, TAOS_QTYPE_FWD, NULL);
code = (*pNode->writeToCacheFp)(pNode->vgId, pHead, TAOS_QTYPE_FWD, NULL);
} else {
if (nodeSStatus != TAOS_SYNC_STATUS_INIT) {
syncSaveIntoBuffer(pPeer, pHead);
code = syncSaveIntoBuffer(pPeer, pHead);
} else {
sError("%s, forward discarded since sstatus:%s, hver:%" PRIu64, pPeer->id, syncStatus[nodeSStatus],
pHead->version);
code = -1;
}
}
if (code != 0) {
sError("%s, failed to process fwd msg, hver:%" PRIu64 ", len:%d", pPeer->id, pHead->version, pHead->len);
syncRestartConnection(pPeer);
}
}
static void syncProcessPeersStatusMsg(SPeersStatus *pPeersStatus, SSyncPeer *pPeer) {
@ -1373,7 +1380,7 @@ static void syncMonitorNodeRole(void *param, void *tmrId) {
if (/*pPeer->role > TAOS_SYNC_ROLE_UNSYNCED && */ nodeRole > TAOS_SYNC_ROLE_UNSYNCED) continue;
if (/*pPeer->sstatus > TAOS_SYNC_STATUS_INIT || */ nodeSStatus > TAOS_SYNC_STATUS_INIT) continue;
sDebug("%s, check roles since self:%s sstatus:%s, peer:%s sstatus:%s", pPeer->id, syncRole[pPeer->role],
sDebug("%s, check roles since peer:%s sstatus:%s, self:%s sstatus:%s", pPeer->id, syncRole[pPeer->role],
syncStatus[pPeer->sstatus], syncRole[nodeRole], syncStatus[nodeSStatus]);
syncSendPeersStatusMsgToPeer(pPeer, 1, SYNC_STATUS_CHECK_ROLE, syncGenTranId());
break;
@ -1460,7 +1467,12 @@ static int32_t syncForwardToPeerImpl(SSyncNode *pNode, void *data, void *mhandle
if ((pNode->quorum > 1 || force) && code == 0) {
code = syncSaveFwdInfo(pNode, pWalHead->version, mhandle);
if (code >= 0) code = 1;
if (code >= 0) {
code = 1;
} else {
pthread_mutex_unlock(&pNode->mutex);
return code;
}
}
int32_t retLen = taosWriteMsg(pPeer->peerFd, pSyncHead, fwdLen);

View File

@ -238,7 +238,6 @@ static int32_t syncRestoreDataStepByStep(SSyncPeer *pPeer) {
(*pNode->stopSyncFileFp)(pNode->vgId, fversion);
nodeVersion = fversion;
if (pNode->resetVersionFp) (*pNode->resetVersionFp)(pNode->vgId, fversion);
sInfo("%s, start to restore wal, fver:%" PRIu64, pPeer->id, nodeVersion);
uint64_t wver = 0;

View File

@ -526,7 +526,7 @@ static void tsdbStartStream(STsdbRepo *pRepo) {
STable *pTable = pMeta->tables[i];
if (pTable && pTable->type == TSDB_STREAM_TABLE) {
pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), TABLE_NAME(pTable)->data, pTable->sql,
tsdbGetTableSchemaImpl(pTable, false, false, -1));
tsdbGetTableSchemaImpl(pTable, false, false, -1), 0);
}
}
}

View File

@ -840,7 +840,7 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, boo
if (lock && tsdbUnlockRepoMeta(pRepo) < 0) return -1;
if (TABLE_TYPE(pTable) == TSDB_STREAM_TABLE && addIdx) {
pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), TABLE_NAME(pTable)->data, pTable->sql,
tsdbGetTableSchemaImpl(pTable, false, false, -1));
tsdbGetTableSchemaImpl(pTable, false, false, -1), 1);
}
tsdbDebug("vgId:%d table %s tid %d uid %" PRIu64 " is added to meta", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),

View File

@ -2861,12 +2861,6 @@ int32_t tsdbRetrieveDataBlockStatisInfo(TsdbQueryHandleT* pQueryHandle, SDataSta
if (pHandle->statis[i].numOfNull == -1) { // set the column data are all NULL
pHandle->statis[i].numOfNull = pBlockInfo->compBlock->numOfRows;
}
SColumnInfo* pColInfo = taosArrayGet(pHandle->pColumns, i);
if (pColInfo->type == TSDB_DATA_TYPE_TIMESTAMP) {
pHandle->statis[i].min = pBlockInfo->compBlock->keyFirst;
pHandle->statis[i].max = pBlockInfo->compBlock->keyLast;
}
}
int64_t elapsed = taosGetTimestampUs() - stime;

View File

@ -175,6 +175,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_ID, "Table name too long")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_NAME, "Table does not exist")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_TYPE, "Invalid table type in tsdb")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TAGS, "Too many tags")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_COLUMNS, "Too many columns")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TIMESERIES, "Too many time series")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_NOT_SUPER_TABLE, "Not super table") // operation only available for super table
TAOS_DEFINE_ERROR(TSDB_CODE_MND_COL_NAME_TOO_LONG, "Tag name too long")

View File

@ -22,6 +22,8 @@
#define SIGPIPE EPIPE
#endif
#define TCP_CONN_TIMEOUT 3000 // conn timeout
int32_t taosGetFqdn(char *fqdn) {
char hostname[1024];
hostname[1023] = '\0';
@ -346,10 +348,47 @@ SOCKET taosOpenTcpClientSocket(uint32_t destIp, uint16_t destPort, uint32_t clie
serverAddr.sin_addr.s_addr = destIp;
serverAddr.sin_port = (uint16_t)htons((uint16_t)destPort);
#ifdef _TD_LINUX
taosSetNonblocking(sockFd, 1);
ret = connect(sockFd, (struct sockaddr *)&serverAddr, sizeof(serverAddr));
if (ret == -1) {
if (errno == EHOSTUNREACH) {
uError("failed to connect socket, ip:0x%x, port:%hu(%s)", destIp, destPort, strerror(errno));
taosCloseSocket(sockFd);
return -1;
} else if (errno == EINPROGRESS || errno == EAGAIN || errno == EWOULDBLOCK) {
struct pollfd wfd[1];
wfd[0].fd = sockFd;
wfd[0].events = POLLOUT;
int res = poll(wfd, 1, TCP_CONN_TIMEOUT);
if (res == -1 || res == 0) {
uError("failed to connect socket, ip:0x%x, port:%hu(poll error/conn timeout)", destIp, destPort);
taosCloseSocket(sockFd); //
return -1;
}
int optVal = -1, optLen = sizeof(int);
if ((0 != taosGetSockOpt(sockFd, SOL_SOCKET, SO_ERROR, &optVal, &optLen)) || (optVal != 0)) {
uError("failed to connect socket, ip:0x%x, port:%hu(connect host error)", destIp, destPort);
taosCloseSocket(sockFd); //
return -1;
}
ret = 0;
} else { // Other error
uError("failed to connect socket, ip:0x%x, port:%hu(target host cannot be reached)", destIp, destPort);
taosCloseSocket(sockFd); //
return -1;
}
}
taosSetNonblocking(sockFd, 0);
#else
ret = connect(sockFd, (struct sockaddr *)&serverAddr, sizeof(serverAddr));
#endif
if (ret != 0) {
// uError("failed to connect socket, ip:0x%x, port:%hu(%s)", destIp, destPort, strerror(errno));
uError("failed to connect socket, ip:0x%x, port:%hu(%s)", destIp, destPort, strerror(errno));
taosCloseSocket(sockFd);
sockFd = -1;
} else {

View File

@ -0,0 +1,32 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_VNODE_BACKUP_H
#define TDENGINE_VNODE_BACKUP_H
#ifdef __cplusplus
extern "C" {
#endif
#include "vnodeInt.h"
int32_t vnodeInitBackup();
void vnodeCleanupBackup();
int32_t vnodeBackup(int32_t vgId);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -37,6 +37,7 @@ extern int32_t vDebugFlag;
typedef struct {
int32_t vgId; // global vnode group ID
int32_t refCount; // reference count
int64_t queuedWMsgSize;
int32_t queuedWMsg;
int32_t queuedRMsg;
int32_t flowctrlLevel;

View File

@ -30,7 +30,6 @@ void vnodeStopSyncFile(int32_t vgId, uint64_t fversion);
void vnodeConfirmForard(int32_t vgId, void *wparam, int32_t code);
int32_t vnodeWriteToCache(int32_t vgId, void *wparam, int32_t qtype, void *rparam);
int32_t vnodeGetVersion(int32_t vgId, uint64_t *fver, uint64_t *wver);
int32_t vnodeResetVersion(int32_t vgId, uint64_t fver);
void vnodeConfirmForward(void *pVnode, uint64_t version, int32_t code, bool force);

172
src/vnode/src/vnodeBackup.c Normal file
View File

@ -0,0 +1,172 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "os.h"
#include "taoserror.h"
#include "taosmsg.h"
#include "tutil.h"
#include "tqueue.h"
#include "tglobal.h"
#include "tfs.h"
#include "vnodeBackup.h"
#include "vnodeMain.h"
typedef struct {
int32_t vgId;
} SVBackupMsg;
typedef struct {
pthread_t thread;
int32_t workerId;
} SVBackupWorker;
typedef struct {
int32_t num;
SVBackupWorker *worker;
} SVBackupWorkerPool;
static SVBackupWorkerPool tsVBackupPool;
static taos_qset tsVBackupQset;
static taos_queue tsVBackupQueue;
static void vnodeProcessBackupMsg(SVBackupMsg *pMsg) {
int32_t vgId = pMsg->vgId;
char newDir[TSDB_FILENAME_LEN] = {0};
char stagingDir[TSDB_FILENAME_LEN] = {0};
sprintf(newDir, "%s/vnode%d", "vnode_bak", vgId);
sprintf(stagingDir, "%s/.staging/vnode%d", "vnode_bak", vgId);
if (tsEnableVnodeBak) {
tfsRmdir(newDir);
tfsRename(stagingDir, newDir);
} else {
vInfo("vgId:%d, vnode backup not enabled", vgId);
tfsRmdir(stagingDir);
}
}
static void *vnodeBackupFunc(void *param) {
while (1) {
SVBackupMsg *pMsg = NULL;
if (taosReadQitemFromQset(tsVBackupQset, NULL, (void **)&pMsg, NULL) == 0) {
vDebug("qset:%p, vbackup got no message from qset, exiting", tsVBackupQset);
break;
}
vTrace("vgId:%d, will be processed in vbackup queue", pMsg->vgId);
vnodeProcessBackupMsg(pMsg);
vTrace("vgId:%d, disposed in vbackup worker", pMsg->vgId);
taosFreeQitem(pMsg);
}
return NULL;
}
static int32_t vnodeStartBackup() {
tsVBackupQueue = taosOpenQueue();
if (tsVBackupQueue == NULL) return TSDB_CODE_DND_OUT_OF_MEMORY;
taosAddIntoQset(tsVBackupQset, tsVBackupQueue, NULL);
for (int32_t i = 0; i < tsVBackupPool.num; ++i) {
SVBackupWorker *pWorker = tsVBackupPool.worker + i;
pWorker->workerId = i;
pthread_attr_t thAttr;
pthread_attr_init(&thAttr);
pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE);
if (pthread_create(&pWorker->thread, &thAttr, vnodeBackupFunc, pWorker) != 0) {
vError("failed to create thread to process vbackup queue, reason:%s", strerror(errno));
}
pthread_attr_destroy(&thAttr);
vDebug("vbackup:%d is launched, total:%d", pWorker->workerId, tsVBackupPool.num);
}
vDebug("vbackup queue:%p is allocated", tsVBackupQueue);
return TSDB_CODE_SUCCESS;
}
static int32_t vnodeWriteIntoBackupWorker(int32_t vgId) {
SVBackupMsg *pMsg = taosAllocateQitem(sizeof(SVBackupMsg));
if (pMsg == NULL) return TSDB_CODE_VND_OUT_OF_MEMORY;
pMsg->vgId = vgId;
int32_t code = taosWriteQitem(tsVBackupQueue, TAOS_QTYPE_RPC, pMsg);
if (code == 0) code = TSDB_CODE_DND_ACTION_IN_PROGRESS;
return code;
}
int32_t vnodeBackup(int32_t vgId) {
vTrace("vgId:%d, will backup", vgId);
return vnodeWriteIntoBackupWorker(vgId);
}
int32_t vnodeInitBackup() {
tsVBackupQset = taosOpenQset();
tsVBackupPool.num = 1;
tsVBackupPool.worker = calloc(sizeof(SVBackupWorker), tsVBackupPool.num);
if (tsVBackupPool.worker == NULL) return -1;
for (int32_t i = 0; i < tsVBackupPool.num; ++i) {
SVBackupWorker *pWorker = tsVBackupPool.worker + i;
pWorker->workerId = i;
vDebug("vbackup:%d is created", i);
}
vDebug("vbackup is initialized, num:%d qset:%p", tsVBackupPool.num, tsVBackupQset);
return vnodeStartBackup();
}
void vnodeCleanupBackup() {
for (int32_t i = 0; i < tsVBackupPool.num; ++i) {
SVBackupWorker *pWorker = tsVBackupPool.worker + i;
if (taosCheckPthreadValid(pWorker->thread)) {
taosQsetThreadResume(tsVBackupQset);
}
vDebug("vbackup:%d is closed", i);
}
for (int32_t i = 0; i < tsVBackupPool.num; ++i) {
SVBackupWorker *pWorker = tsVBackupPool.worker + i;
vDebug("vbackup:%d start to join", i);
if (taosCheckPthreadValid(pWorker->thread)) {
pthread_join(pWorker->thread, NULL);
}
vDebug("vbackup:%d join success", i);
}
vDebug("vbackup is closed, qset:%p", tsVBackupQset);
taosCloseQset(tsVBackupQset);
tsVBackupQset = NULL;
tfree(tsVBackupPool.worker);
vDebug("vbackup queue:%p is freed", tsVBackupQueue);
taosCloseQueue(tsVBackupQueue);
tsVBackupQueue = NULL;
}

View File

@ -27,6 +27,7 @@
#include "vnodeVersion.h"
#include "vnodeMgmt.h"
#include "vnodeWorker.h"
#include "vnodeBackup.h"
#include "vnodeMain.h"
static int32_t vnodeProcessTsdbStatus(void *arg, int32_t status, int32_t eno);
@ -98,8 +99,13 @@ int32_t vnodeSync(int32_t vgId) {
return TSDB_CODE_VND_INVALID_VGROUP_ID;
}
if (pVnode->role != TAOS_SYNC_ROLE_MASTER) {
if (pVnode->role == TAOS_SYNC_ROLE_SLAVE) {
vInfo("vgId:%d, vnode will sync, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode);
pVnode->version = 0;
pVnode->fversion = 0;
walResetVersion(pVnode->wal, pVnode->fversion);
syncRecover(pVnode->sync);
}
@ -364,7 +370,6 @@ int32_t vnodeOpen(int32_t vgId) {
syncInfo.startSyncFileFp = vnodeStartSyncFile;
syncInfo.stopSyncFileFp = vnodeStopSyncFile;
syncInfo.getVersionFp = vnodeGetVersion;
syncInfo.resetVersionFp = vnodeResetVersion;
syncInfo.sendFileFp = tsdbSyncSend;
syncInfo.recvFileFp = tsdbSyncRecv;
syncInfo.pTsdb = pVnode->tsdb;
@ -448,18 +453,14 @@ void vnodeDestroy(SVnodeObj *pVnode) {
if (pVnode->dropped) {
char rootDir[TSDB_FILENAME_LEN] = {0};
char newDir[TSDB_FILENAME_LEN] = {0};
char stagingDir[TSDB_FILENAME_LEN] = {0};
sprintf(rootDir, "%s/vnode%d", "vnode", vgId);
sprintf(newDir, "%s/vnode%d", "vnode_bak", vgId);
sprintf(stagingDir, "%s/.staging/vnode%d", "vnode_bak", vgId);
if (0 == tsEnableVnodeBak) {
vInfo("vgId:%d, vnode backup not enabled", pVnode->vgId);
} else {
tfsRmdir(newDir);
tfsRename(rootDir, newDir);
}
tfsRename(rootDir, stagingDir);
vnodeBackup(vgId);
tfsRmdir(rootDir);
dnodeSendStatusMsgToMnode();
}

View File

@ -17,6 +17,7 @@
#include "os.h"
#include "dnode.h"
#include "vnodeStatus.h"
#include "vnodeBackup.h"
#include "vnodeWorker.h"
#include "vnodeRead.h"
#include "vnodeWrite.h"
@ -29,6 +30,7 @@ static void vnodeCleanupHash(void);
static void vnodeIncRef(void *ptNode);
static SStep tsVnodeSteps[] = {
{"vnode-backup", vnodeInitBackup, vnodeCleanupBackup},
{"vnode-worker", vnodeInitMWorker, vnodeCleanupMWorker},
{"vnode-write", vnodeInitWrite, vnodeCleanupWrite},
{"vnode-read", vnodeInitRead, vnodeCleanupRead},

View File

@ -208,6 +208,7 @@ static void vnodeBuildNoResultQueryRsp(SRspRet *pRet) {
pRsp->completed = true;
}
static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
void * pCont = pRead->pCont;
int32_t contLen = pRead->contLen;
@ -226,7 +227,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
if (contLen != 0) {
qinfo_t pQInfo = NULL;
uint64_t qId = 0;
uint64_t qId = genQueryId();
code = qCreateQueryInfo(pVnode->tsdb, pVnode->vgId, pQueryTableMsg, &pQInfo, &qId);
SQueryTableRsp *pRsp = (SQueryTableRsp *)rpcMallocCont(sizeof(SQueryTableRsp));
@ -239,7 +240,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
// current connect is broken
if (code == TSDB_CODE_SUCCESS) {
handle = qRegisterQInfo(pVnode->qMgmt, qId, (uint64_t)pQInfo);
handle = qRegisterQInfo(pVnode->qMgmt, qId, pQInfo);
if (handle == NULL) { // failed to register qhandle
pRsp->code = terrno;
terrno = 0;

View File

@ -107,8 +107,9 @@ void vnodeStopSyncFile(int32_t vgId, uint64_t fversion) {
pVnode->fversion = fversion;
pVnode->version = fversion;
vnodeSaveVersion(pVnode);
walResetVersion(pVnode->wal, fversion);
vDebug("vgId:%d, datafile is synced, fver:%" PRIu64 " vver:%" PRIu64, vgId, fversion, fversion);
vInfo("vgId:%d, datafile is synced, fver:%" PRIu64 " vver:%" PRIu64, vgId, fversion, fversion);
vnodeSetReadyStatus(pVnode);
vnodeRelease(pVnode);
@ -158,22 +159,6 @@ int32_t vnodeGetVersion(int32_t vgId, uint64_t *fver, uint64_t *wver) {
return code;
}
int32_t vnodeResetVersion(int32_t vgId, uint64_t fver) {
SVnodeObj *pVnode = vnodeAcquire(vgId);
if (pVnode == NULL) {
vError("vgId:%d, vnode not found while reset version", vgId);
return -1;
}
pVnode->fversion = fver;
pVnode->version = fver;
walResetVersion(pVnode->wal, fver);
vInfo("vgId:%d, version reset to %" PRIu64, vgId, fver);
vnodeRelease(pVnode);
return 0;
}
void vnodeConfirmForward(void *vparam, uint64_t version, int32_t code, bool force) {
SVnodeObj *pVnode = vparam;
syncConfirmForward(pVnode->sync, version, code, force);

View File

@ -25,6 +25,7 @@
#include "vnodeStatus.h"
#define MAX_QUEUED_MSG_NUM 100000
#define MAX_QUEUED_MSG_SIZE 1024*1024*1024 //1GB
extern void * tsDnodeTmr;
static int32_t (*vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_MAX])(SVnodeObj *, void *pCont, SRspRet *);
@ -91,12 +92,17 @@ int32_t vnodeProcessWrite(void *vparam, void *wparam, int32_t qtype, void *rpara
int32_t syncCode = 0;
bool force = (pWrite == NULL ? false : pWrite->pHead.msgType != TSDB_MSG_TYPE_SUBMIT);
syncCode = syncForwardToPeer(pVnode->sync, pHead, pWrite, qtype, force);
if (syncCode < 0) return syncCode;
if (syncCode < 0) {
pHead->version = 0;
return syncCode;
}
// write into WAL
code = walWrite(pVnode->wal, pHead);
if (code < 0) {
if (syncCode > 0) atomic_sub_fetch_32(&pWrite->processedCount, 1);
vError("vgId:%d, hver:%" PRIu64 " vver:%" PRIu64 " code:0x%x", pVnode->vgId, pHead->version, pVnode->version, code);
pHead->version = 0;
return code;
}
@ -104,7 +110,10 @@ int32_t vnodeProcessWrite(void *vparam, void *wparam, int32_t qtype, void *rpara
// write data locally
code = (*vnodeProcessWriteMsgFp[pHead->msgType])(pVnode, pHead->cont, pRspRet);
if (code < 0) return code;
if (code < 0) {
if (syncCode > 0) atomic_sub_fetch_32(&pWrite->processedCount, 1);
return code;
}
return syncCode;
}
@ -261,6 +270,13 @@ static int32_t vnodeWriteToWQueueImp(SVWriteMsg *pWrite) {
}
}
if (tsAvailDataDirGB <= tsMinimalDataDirGB) {
vError("vgId:%d, failed to write into vwqueue since no diskspace, avail:%fGB", pVnode->vgId, tsAvailDataDirGB);
taosFreeQitem(pWrite);
vnodeRelease(pVnode);
return TSDB_CODE_VND_NO_DISKSPACE;
}
if (!vnodeInReadyOrUpdatingStatus(pVnode)) {
vError("vgId:%d, failed to write into vwqueue, vstatus is %s, refCount:%d pVnode:%p", pVnode->vgId,
vnodeStatus[pVnode->status], pVnode->refCount, pVnode);
@ -270,14 +286,17 @@ static int32_t vnodeWriteToWQueueImp(SVWriteMsg *pWrite) {
}
int32_t queued = atomic_add_fetch_32(&pVnode->queuedWMsg, 1);
if (queued > MAX_QUEUED_MSG_NUM) {
int64_t queuedSize = atomic_add_fetch_64(&pVnode->queuedWMsgSize, pWrite->pHead.len);
if (queued > MAX_QUEUED_MSG_NUM || queuedSize > MAX_QUEUED_MSG_SIZE) {
int32_t ms = (queued / MAX_QUEUED_MSG_NUM) * 10 + 3;
if (ms > 100) ms = 100;
vDebug("vgId:%d, too many msg:%d in vwqueue, flow control %dms", pVnode->vgId, queued, ms);
taosMsleep(ms);
}
vTrace("vgId:%d, write into vwqueue, refCount:%d queued:%d", pVnode->vgId, pVnode->refCount, pVnode->queuedWMsg);
vTrace("vgId:%d, write into vwqueue, refCount:%d queued:%d size:%" PRId64, pVnode->vgId, pVnode->refCount,
pVnode->queuedWMsg, pVnode->queuedWMsgSize);
taosWriteQitem(pVnode->wqueue, pWrite->qtype, pWrite);
return TSDB_CODE_SUCCESS;
@ -300,7 +319,10 @@ void vnodeFreeFromWQueue(void *vparam, SVWriteMsg *pWrite) {
SVnodeObj *pVnode = vparam;
int32_t queued = atomic_sub_fetch_32(&pVnode->queuedWMsg, 1);
vTrace("vgId:%d, msg:%p, app:%p, free from vwqueue, queued:%d", pVnode->vgId, pWrite, pWrite->rpcMsg.ahandle, queued);
int64_t queuedSize = atomic_sub_fetch_64(&pVnode->queuedWMsgSize, pWrite->pHead.len);
vTrace("vgId:%d, msg:%p, app:%p, free from vwqueue, queued:%d size:%" PRId64, pVnode->vgId, pWrite,
pWrite->rpcMsg.ahandle, queued, queuedSize);
taosFreeQitem(pWrite);
vnodeRelease(pVnode);
@ -336,7 +358,9 @@ static void vnodeFlowCtrlMsgToWQueue(void *param, void *tmrId) {
static int32_t vnodePerformFlowCtrl(SVWriteMsg *pWrite) {
SVnodeObj *pVnode = pWrite->pVnode;
if (pWrite->qtype != TAOS_QTYPE_RPC) return 0;
if (pVnode->queuedWMsg < MAX_QUEUED_MSG_NUM && pVnode->flowctrlLevel <= 0) return 0;
if (pVnode->queuedWMsg < MAX_QUEUED_MSG_NUM && pVnode->queuedWMsgSize < MAX_QUEUED_MSG_SIZE &&
pVnode->flowctrlLevel <= 0)
return 0;
if (tsEnableFlowCtrl == 0) {
int32_t ms = (int32_t)pow(2, pVnode->flowctrlLevel + 2);

View File

@ -104,7 +104,7 @@ int32_t walAlter(void *handle, SWalCfg *pCfg) {
pWal->level = pCfg->walLevel;
pWal->fsyncPeriod = pCfg->fsyncPeriod;
pWal->fsyncSeq = pCfg->fsyncPeriod % 1000;
pWal->fsyncSeq = pCfg->fsyncPeriod / 1000;
if (pWal->fsyncSeq <= 0) pWal->fsyncSeq = 1;
return TSDB_CODE_SUCCESS;

View File

@ -68,12 +68,12 @@ public class JDBCDemo {
}
private void insert() {
final String sql = "insert into test.weather (ts, temperature, humidity) values(now, 20.5, 34)";
final String sql = "insert into " + dbName + "." + tbName + " (ts, temperature, humidity) values(now, 20.5, 34)";
exuete(sql);
}
private void select() {
final String sql = "select * from test.weather";
final String sql = "select * from "+ dbName + "." + tbName;
executeQuery(sql);
}

View File

@ -36,8 +36,6 @@
"insert_mode": "taosc",
"insert_rate": 0,
"insert_rows": 100,
"multi_thread_write_one_tbl": "no",
"number_of_tbl_in_one_sql": 0,
"interlace_rows": 3,
"max_sql_len": 1024,
"disorder_ratio": 0,

View File

@ -28,7 +28,8 @@
int points = 5;
int numOfTables = 3;
int tablesProcessed = 0;
int tablesInsertProcessed = 0;
int tablesSelectProcessed = 0;
int64_t st, et;
typedef struct {
@ -134,6 +135,9 @@ int main(int argc, char *argv[])
gettimeofday(&systemTime, NULL);
st = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
tablesInsertProcessed = 0;
tablesSelectProcessed = 0;
for (i = 0; i<numOfTables; ++i) {
// insert records in asynchronous API
sprintf(sql, "insert into %s values(%ld, 0)", tableList[i].name, 1546300800000 + i);
@ -143,10 +147,20 @@ int main(int argc, char *argv[])
printf("once insert finished, presse any key to query\n");
getchar();
while(1) {
if (tablesInsertProcessed < numOfTables) {
printf("wait for process finished\n");
sleep(1);
continue;
}
break;
}
printf("start to query...\n");
gettimeofday(&systemTime, NULL);
st = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
tablesProcessed = 0;
for (i = 0; i < numOfTables; ++i) {
// select records in asynchronous API
@ -157,14 +171,8 @@ int main(int argc, char *argv[])
printf("\nonce finished, press any key to exit\n");
getchar();
for (i = 0; i<numOfTables; ++i) {
printf("%s inserted:%d retrieved:%d\n", tableList[i].name, tableList[i].rowsInserted, tableList[i].rowsRetrieved);
}
getchar();
while(1) {
if (tablesProcessed < numOfTables) {
if (tablesSelectProcessed < numOfTables) {
printf("wait for process finished\n");
sleep(1);
continue;
@ -173,6 +181,10 @@ int main(int argc, char *argv[])
break;
}
for (i = 0; i<numOfTables; ++i) {
printf("%s inserted:%d retrieved:%d\n", tableList[i].name, tableList[i].rowsInserted, tableList[i].rowsRetrieved);
}
taos_close(taos);
free(tableList);
@ -214,8 +226,8 @@ void taos_insert_call_back(void *param, TAOS_RES *tres, int code)
}
else {
printf("%d rows data are inserted into %s\n", points, pTable->name);
tablesProcessed++;
if (tablesProcessed >= numOfTables) {
tablesInsertProcessed++;
if (tablesInsertProcessed >= numOfTables) {
gettimeofday(&systemTime, NULL);
et = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
printf("%lld mseconds to insert %d data points\n", (et - st) / 1000, points*numOfTables);
@ -251,15 +263,17 @@ void taos_retrieve_call_back(void *param, TAOS_RES *tres, int numOfRows)
//taos_free_result(tres);
printf("%d rows data retrieved from %s\n", pTable->rowsRetrieved, pTable->name);
tablesProcessed++;
if (tablesProcessed >= numOfTables) {
tablesSelectProcessed++;
if (tablesSelectProcessed >= numOfTables) {
gettimeofday(&systemTime, NULL);
et = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
printf("%lld mseconds to query %d data rows\n", (et - st) / 1000, points * numOfTables);
}
}
taos_free_result(tres);
}
}
void taos_select_call_back(void *param, TAOS_RES *tres, int code)
@ -276,6 +290,4 @@ void taos_select_call_back(void *param, TAOS_RES *tres, int code)
taos_cleanup();
exit(1);
}
taos_free_result(tres);
}

View File

@ -66,7 +66,7 @@ int main(int argc, char *argv[]) {
printf("failed to connect to server, reason:%s\n", "null taos"/*taos_errstr(taos)*/);
exit(1);
}
for (int i = 0; i < 4000000; i++) {
for (int i = 0; i < 100; i++) {
Test(taos, qstr, i);
}
taos_close(taos);

View File

@ -16,24 +16,26 @@ package main
import (
"database/sql"
"flag"
"fmt"
_ "github.com/taosdata/driver-go/taosSql"
"math/rand"
"os"
"sync"
"runtime"
"strconv"
"sync"
"time"
"flag"
"math/rand"
_ "github.com/taosdata/driver-go/taosSql"
//"golang.org/x/sys/unix"
)
const (
maxLocationSize = 32
maxSqlBufSize = 65480
//maxSqlBufSize = 65480
)
var locations = [maxLocationSize]string {
var locations = [maxLocationSize]string{
"Beijing", "Shanghai", "Guangzhou", "Shenzhen",
"HangZhou", "Tianjin", "Wuhan", "Changsha",
"Nanjing", "Xian"}
@ -62,7 +64,7 @@ var taosDriverName = "taosSql"
var url string
func init() {
flag.StringVar(&configPara.hostName, "h", "127.0.0.1","The host to connect to TDengine server.")
flag.StringVar(&configPara.hostName, "h", "127.0.0.1", "The host to connect to TDengine server.")
flag.IntVar(&configPara.serverPort, "p", 6030, "The TCP/IP port number to use for the connection to TDengine server.")
flag.StringVar(&configPara.user, "u", "root", "The TDengine user name to use when connecting to the server.")
flag.StringVar(&configPara.password, "P", "taosdata", "The password to use when connecting to the server.")
@ -80,7 +82,7 @@ func init() {
configPara.supTblName = "meters"
startTs, err := time.ParseInLocation("2006-01-02 15:04:05", configPara.startTimestamp, time.Local)
if err==nil {
if err == nil {
configPara.startTs = startTs.UnixNano() / 1e6
}
}
@ -104,7 +106,7 @@ func printAllArgs() {
func main() {
printAllArgs()
fmt.Printf("Please press enter key to continue....\n")
fmt.Scanln()
_, _ = fmt.Scanln()
url = "root:taosdata@/tcp(" + configPara.hostName + ":" + strconv.Itoa(configPara.serverPort) + ")/"
//url = fmt.Sprintf("%s:%s@/tcp(%s:%d)/%s?interpolateParams=true", configPara.user, configPara.password, configPara.hostName, configPara.serverPort, configPara.dbName)
@ -138,7 +140,7 @@ func main() {
func createDatabase(dbName string, supTblName string) {
db, err := sql.Open(taosDriverName, url)
if err != nil {
fmt.Println("Open database error: %s\n", err)
fmt.Printf("Open database error: %s\n", err)
os.Exit(1)
}
defer db.Close()
@ -165,27 +167,27 @@ func createDatabase(dbName string, supTblName string) {
checkErr(err, sqlStr)
}
func multiThreadCreateTable(threads int, ntables int, dbName string, tablePrefix string) {
func multiThreadCreateTable(threads int, nTables int, dbName string, tablePrefix string) {
st := time.Now().UnixNano()
if (threads < 1) {
threads = 1;
if threads < 1 {
threads = 1
}
a := ntables / threads;
if (a < 1) {
threads = ntables;
a = 1;
a := nTables / threads
if a < 1 {
threads = nTables
a = 1
}
b := ntables % threads;
b := nTables % threads
last := 0;
last := 0
endTblId := 0
wg := sync.WaitGroup{}
for i := 0; i < threads; i++ {
startTblId := last
if (i < b ) {
if i < b {
endTblId = last + a
} else {
endTblId = last + a - 1
@ -206,7 +208,7 @@ func createTable(dbName string, childTblPrefix string, startTblId int, endTblId
db, err := sql.Open(taosDriverName, url)
if err != nil {
fmt.Println("Open database error: %s\n", err)
fmt.Printf("Open database error: %s\n", err)
os.Exit(1)
}
defer db.Close()
@ -228,20 +230,21 @@ func generateRowData(ts int64) string {
values := "( " + strconv.FormatInt(ts, 10) + ", " + strconv.FormatFloat(float64(current), 'f', 6, 64) + ", " + strconv.Itoa(voltage) + ", " + strconv.FormatFloat(float64(phase), 'f', 6, 64) + " ) "
return values
}
func insertData(dbName string, childTblPrefix string, startTblId int, endTblId int, wg *sync.WaitGroup) {
//fmt.Printf("subThread[%d]: insert data to table from %d to %d \n", unix.Gettid(), startTblId, endTblId)
// windows.GetCurrentThreadId()
db, err := sql.Open(taosDriverName, url)
if err != nil {
fmt.Println("Open database error: %s\n", err)
fmt.Printf("Open database error: %s\n", err)
os.Exit(1)
}
defer db.Close()
tmpTs := configPara.startTs;
tmpTs := configPara.startTs
//rand.New(rand.NewSource(time.Now().UnixNano()))
for tID := startTblId; tID <= endTblId; tID++{
for tID := startTblId; tID <= endTblId; tID++ {
totalNum := 0
for {
sqlStr := "insert into " + dbName + "." + childTblPrefix + strconv.Itoa(tID) + " values "
@ -254,7 +257,7 @@ func insertData(dbName string, childTblPrefix string, startTblId int, endTblId i
sqlStr = fmt.Sprintf("%s %s", sqlStr, valuesOfRow)
if (currRowNum >= configPara.numOfRecordsPerReq || totalNum >= configPara.numOfRecordsPerTable) {
if currRowNum >= configPara.numOfRecordsPerReq || totalNum >= configPara.numOfRecordsPerTable {
break
}
}
@ -265,12 +268,12 @@ func insertData(dbName string, childTblPrefix string, startTblId int, endTblId i
count, err := res.RowsAffected()
checkErr(err, "rows affected")
if (count != int64(currRowNum)) {
if count != int64(currRowNum) {
fmt.Printf("insert data, expect affected:%d, actual:%d\n", currRowNum, count)
os.Exit(1)
}
if (totalNum >= configPara.numOfRecordsPerTable) {
if totalNum >= configPara.numOfRecordsPerTable {
break
}
}
@ -279,44 +282,46 @@ func insertData(dbName string, childTblPrefix string, startTblId int, endTblId i
wg.Done()
runtime.Goexit()
}
func multiThreadInsertData(threads int, ntables int, dbName string, tablePrefix string) {
func multiThreadInsertData(threads int, nTables int, dbName string, tablePrefix string) {
st := time.Now().UnixNano()
if (threads < 1) {
threads = 1;
if threads < 1 {
threads = 1
}
a := ntables / threads;
if (a < 1) {
threads = ntables;
a = 1;
a := nTables / threads
if a < 1 {
threads = nTables
a = 1
}
b := ntables % threads;
b := nTables % threads
last := 0;
last := 0
endTblId := 0
wg := sync.WaitGroup{}
for i := 0; i < threads; i++ {
startTblId := last
if (i < b ) {
if i < b {
endTblId = last + a
} else {
endTblId = last + a - 1
}
last = endTblId + 1
wg.Add(1)
go insertData(dbName, tablePrefix, startTblId , endTblId, &wg)
go insertData(dbName, tablePrefix, startTblId, endTblId, &wg)
}
wg.Wait()
et := time.Now().UnixNano()
fmt.Printf("insert data spent duration: %6.6fs\n", (float32(et-st))/1e9)
}
func selectTest(dbName string, tbPrefix string, supTblName string){
func selectTest(dbName string, tbPrefix string, supTblName string) {
db, err := sql.Open(taosDriverName, url)
if err != nil {
fmt.Println("Open database error: %s\n", err)
fmt.Printf("Open database error: %s\n", err)
os.Exit(1)
}
defer db.Close()
@ -352,7 +357,7 @@ func selectTest(dbName string, tbPrefix string, supTblName string){
}
// select sql 2
sqlStr = "select avg(voltage), min(voltage), max(voltage) from " + dbName + "." + tbPrefix + strconv.Itoa( rand.Int() % configPara.numOftables)
sqlStr = "select avg(voltage), min(voltage), max(voltage) from " + dbName + "." + tbPrefix + strconv.Itoa(rand.Int()%configPara.numOftables)
rows, err = db.Query(sqlStr)
checkErr(err, sqlStr)

Some files were not shown because too many files have changed in this diff Show More