Merge branch 'develop' into feature/TD-3188

This commit is contained in:
dapan1121 2021-03-29 10:31:00 +08:00
commit 15d412fe5b
85 changed files with 6313 additions and 2234 deletions

View File

@ -17,6 +17,7 @@ SET(TD_MQTT FALSE)
SET(TD_TSDB_PLUGINS FALSE)
SET(TD_STORAGE FALSE)
SET(TD_TOPIC FALSE)
SET(TD_MODULE FALSE)
SET(TD_COVER FALSE)
SET(TD_MEM_CHECK FALSE)

29
Jenkinsfile vendored
View File

@ -6,6 +6,7 @@ node {
}
def skipstage=0
def abortPreviousBuilds() {
def currentJobName = env.JOB_NAME
def currentBuildNumber = env.BUILD_NUMBER.toInteger()
@ -24,7 +25,7 @@ def abortPreviousBuilds() {
build.doKill() //doTerm(),doKill(),doTerm()
}
}
//abort previous build
// abort previous build
abortPreviousBuilds()
def abort_previous(){
def buildNumber = env.BUILD_NUMBER as int
@ -32,19 +33,20 @@ def abort_previous(){
milestone(buildNumber)
}
def pre_test(){
sh '''
sudo rmtaos || echo "taosd has not installed"
'''
sh '''
killall -9 taosd ||echo "no taosd running"
killall -9 gdb || echo "no gdb running"
cd ${WKC}
git checkout develop
git reset --hard HEAD~10 >/dev/null
git pull >/dev/null
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD develop)|grep -v -E '.*md|//src//connector|Jenkinsfile'
find ${WKC}/tests/pytest -name \'*\'.sql -exec rm -rf {} \\;
cd ${WK}
git reset --hard HEAD~10
@ -79,6 +81,10 @@ pipeline {
changeRequest()
}
steps {
script{
abort_previous()
abortPreviousBuilds()
}
sh'''
cp -r ${WORKSPACE} ${WORKSPACE}.tes
cd ${WORKSPACE}.tes
@ -179,6 +185,14 @@ pipeline {
rm -rf /var/log/taos/*
./handle_crash_gen_val_log.sh
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_taosd_val_log.sh
'''
}
timeout(time: 45, unit: 'MINUTES'){
sh '''
date
@ -209,6 +223,11 @@ pipeline {
cd ${WKC}/tests
./test-all.sh b3fq
date'''
sh '''
date
cd ${WKC}/tests
./test-all.sh full example
date'''
}
}
}
@ -266,7 +285,7 @@ pipeline {
date
cd ${WKC}/tests
./test-all.sh b7fq
date'''
date'''
}
}
}

View File

@ -258,10 +258,16 @@ TDengine 社区生态中也有一些非常友好的第三方连接器,可以
TDengine 的测试框架和所有测试例全部开源。
点击[这里](tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md),了解如何运行测试例和添加新的测试例。
点击 [这里](tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md),了解如何运行测试例和添加新的测试例。
# 成为社区贡献者
点击[这里](https://www.taosdata.com/cn/contributor/),了解如何成为 TDengine 的贡献者。
#加入技术交流群
TDengine官方社群「物联网大数据群」对外开放欢迎您加入讨论。搜索微信号 "tdengine"加小T为好友即可入群。
点击 [这里](https://www.taosdata.com/cn/contributor/),了解如何成为 TDengine 的贡献者。
# 加入技术交流群
TDengine 官方社群「物联网大数据群」对外开放,欢迎您加入讨论。搜索微信号 "tdengine"加小T为好友即可入群。
# [谁在使用TDengine](https://github.com/taosdata/TDengine/issues/2432)
欢迎所有 TDengine 用户及贡献者在 [这里](https://github.com/taosdata/TDengine/issues/2432) 分享您在当前工作中开发/使用 TDengine 的故事。

View File

@ -250,3 +250,6 @@ Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to th
Add WeChat “tdengine” to join the groupyou can communicate with other users.
# [User List](https://github.com/taosdata/TDengine/issues/2432)
If you are using TDengine and feel it helps or you'd like to do some contributions, please add your company to [user list](https://github.com/taosdata/TDengine/issues/2432) and let us know your needs.

View File

@ -29,6 +29,10 @@ IF (TD_TOPIC)
ADD_DEFINITIONS(-D_TOPIC)
ENDIF ()
IF (TD_MODULE)
ADD_DEFINITIONS(-D_MODULE)
ENDIF ()
IF (TD_GODLL)
ADD_DEFINITIONS(-D_TD_GO_DLL_)
ENDIF ()

View File

@ -17,6 +17,14 @@ ELSEIF (${TOPIC} MATCHES "false")
MESSAGE(STATUS "Build without topic plugins")
ENDIF ()
IF (${TD_MODULE} MATCHES "true")
SET(TD_MODULE TRUE)
MESSAGE(STATUS "Build with module plugins")
ELSEIF (${TOPIC} MATCHES "false")
SET(TD_MODULE FALSE)
MESSAGE(STATUS "Build without module plugins")
ENDIF ()
IF (${COVER} MATCHES "true")
SET(TD_COVER TRUE)
MESSAGE(STATUS "Build with test coverage")

View File

@ -120,7 +120,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台
* [TDengine性能对比测试工具](https://www.taosdata.com/blog/2020/01/18/1166.html)
* [IDEA数据库管理工具可视化使用TDengine](https://www.taosdata.com/blog/2020/08/27/1767.html)
* [基于eletron开发的跨平台TDengine图形化管理工具](https://github.com/skye0207/TDengineGUI)
* [DataX支持TDengine的离线数据采集/同步工具](https://github.com/alibaba/DataX)
* [DataX支持TDengine的离线数据采集/同步工具](https://github.com/wgzhao/DataX)(文档:[读取插件](https://github.com/wgzhao/DataX/blob/master/docs/src/main/sphinx/reader/tdenginereader.md)、[写入插件](https://github.com/wgzhao/DataX/blob/master/docs/src/main/sphinx/writer/tdenginewriter.md)
## TDengine与其他数据库的对比测试

View File

@ -179,18 +179,18 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
### TDengine服务器支持的平台列表
| | **CentOS** **6/7/8** | **Ubuntu** **16/18/20** | **Other Linux** | **统信****UOS** | **银河****/****中标麒麟** | **凝思** **V60/V80** |
| | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** |
| -------------- | --------------------- | ------------------------ | --------------- | --------------- | ------------------------- | --------------------- |
| X64 | ● | ● | | ○ | ● | ● |
| 树莓派ARM32 | | ● | ● | | | |
| 龙芯MIPS64 | | | ● | | | |
| 鲲鹏 ARM64 | | ○ | ○ | | ● | |
| 申威 Alpha64 | | | ○ | ● | | |
| 飞腾ARM64 | | ○优麒麟 | | | | |
| 海光X64 | ● | ● | ● | ○ | ● | ● |
| 瑞芯微ARM64/32 | | | ○ | | | |
| 全志ARM64/32 | | | ○ | | | |
| 炬力ARM64/32 | | | ○ | | | |
| 树莓派 ARM32 | | ● | ● | | | |
| 龙芯 MIPS64 | | | ● | | | |
| 鲲鹏 ARM64 | | ○ | ○ | | ● | |
| 申威 Alpha64 | | | ○ | ● | | |
| 飞腾 ARM64 | | ○ 优麒麟 | | | | |
| 海光 X64 | ● | ● | ● | ○ | ● | ● |
| 瑞芯微 ARM64/32 | | | ○ | | | |
| 全志 ARM64/32 | | | ○ | | | |
| 炬力 ARM64/32 | | | ○ | | | |
| TI ARM32 | | | ○ | | | |
注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。
@ -203,7 +203,7 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
对照矩阵如下:
| **CPU** | **X64 64bit** | | | **X86 32bit** | **ARM64** | **ARM32** | **MIPS ** **龙芯** | **Alpha ** **申威** | **X64 ** **海光** |
| **CPU** | **X64 64bit** | | | **X86 32bit** | **ARM64** | **ARM32** | **MIPS 龙芯** | **Alpha 申威** | **X64 海光** |
| ----------- | --------------- | --------- | --------- | --------------- | --------- | --------- | ------------------- | -------------------- | ------------------ |
| **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** | **Linux** | **Linux** |
| **C/C++** | ● | ● | ● | ○ | ● | ● | ● | ● | ● |

View File

@ -471,9 +471,11 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT, TINYINT | java.lang.Short |
| SMALLINT | java.lang.Short |
| TINYINT | java.lang.Byte |
| BOOL | java.lang.Boolean |
| BINARY, NCHAR | java.lang.String |
| BINARY | byte array |
| NCHAR | java.lang.String |

View File

@ -111,9 +111,10 @@ taos>
**提示:**
- 任何已经加入集群在线的数据节点都可以作为后续待加入节点的firstEP。
- firstEp这个参数仅仅在该数据节点首次加入集群时有作用加入集群后该数据节点会保存最新的mnode的End Point列表不再依赖这个参数。
- 两个没有配置firstEp参数的数据节点dnode启动后会独立运行起来。这个时候无法将其中一个数据节点加入到另外一个数据节点形成集群。**无法将两个独立的集群合并成为新的集群**。
- 任何已经加入集群在线的数据节点,都可以作为后续待加入节点的 firstEP。
- firstEp 这个参数仅仅在该数据节点首次加入集群时有作用,加入集群后,该数据节点会保存最新的 mnode 的 End Point 列表,不再依赖这个参数。
- 接下来,配置文件中的 firstEp 参数就主要在客户端连接的时候使用了,例如 taos shell 如果不加参数,会默认连接由 firstEp 指定的节点。
- 两个没有配置 firstEp 参数的数据节点 dnode 启动后,会独立运行起来。这个时候,无法将其中一个数据节点加入到另外一个数据节点,形成集群。**无法将两个独立的集群合并成为新的集群**。
## <a class="anchor" id="management"></a>数据节点管理

View File

@ -6,19 +6,27 @@
### 内存需求
每个 DB 可以创建固定数目的 vgroup默认与 CPU 核数相同,可通过 maxVgroupsPerDb 配置vgroup 中的每个副本会是一个 vnode每个 vnode 会占用固定大小的内存(大小与数据库的配置参数 blocks 和 cache 有关);每个 Table 会占用与标签总长度有关的内存;此外,系统会有一些固定的内存开销。因此,每个 DB 需要的系统内存可通过如下公式计算:
每个 Database 可以创建固定数目的 vgroup默认与 CPU 核数相同,可通过 maxVgroupsPerDb 配置vgroup 中的每个副本会是一个 vnode每个 vnode 会占用固定大小的内存(大小与数据库的配置参数 blocks 和 cache 有关);每个 Table 会占用与标签总长度有关的内存;此外,系统会有一些固定的内存开销。因此,每个 DB 需要的系统内存可通过如下公式计算:
```
Memory Size = maxVgroupsPerDb * (blocks * cache + 10MB) + numOfTables * (tagSizePerTable + 0.5KB)
Database Memory Size = maxVgroupsPerDb * (blocks * cache + 10MB) + numOfTables * (tagSizePerTable + 0.5KB)
```
示例:假设是 4 核机器cache 是缺省大小 16M, blocks 是缺省值 6假设有 10 万张表,标签总长度是 256 字节则总的内存需求为4 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 499M。
示例:假设是 4 核机器cache 是缺省大小 16M, blocks 是缺省值 6并且一个 DB 中有 10 万张表,标签总长度是 256 字节,则这个 DB 总的内存需求为4 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 499M。
注意:从这个公式计算得到的内存容量,应理解为系统的“必要需求”,而不是“内存总数”。在实际运行的生产系统中,由于操作系统缓存、资源管理调度等方面的需要,内存规划应当在计算结果的基础上保留一定冗余,以维持系统状态和系统性能的稳定性。
在实际的系统运维中,我们通常会更关心 TDengine 服务进程taosd会占用的内存量。
```
taosd 内存总量 = vnode 内存 + mnode 内存 + 查询内存
```
实际运行的系统往往会根据数据特点的不同,将数据存放在不同的 DB 里。因此做规划时,也需要考虑。
其中:
1. “vnode 内存”指的是集群中所有的 Database 存储分摊到当前 taosd 节点上所占用的内存资源。可以按上文“Database Memory Size”计算公式估算每个 DB 的内存占用量进行加总,再按集群中总共的 TDengine 节点数做平均(如果设置为多副本,则还需要乘以对应的副本倍数)。
2. “mnode 内存”指的是集群中管理节点所占用的资源。如果一个 taosd 节点上分布有 mnode 管理节点则内存消耗还需要增加“0.2KB * 集群中数据表总数”。
3. “查询内存”指的是服务端处理查询请求时所需要占用的内存。单条查询语句至少会占用“0.2KB * 查询涉及的数据表总数”的内存量。
如果内存充裕,可以加大 Blocks 的配置,这样更多数据将保存在内存里,提高查询速度。
注意:以上内存估算方法,主要讲解了系统的“必须内存需求”,而不是“内存总数上限”。在实际运行的生产环境中,由于操作系统缓存、资源管理调度等方面的原因,内存规划应当在估算结果的基础上保留一定冗余,以维持系统状态和系统性能的稳定性。并且,生产环境通常会配置系统资源的监控工具,以便及时发现硬件资源的紧缺情况。
最后,如果内存充裕,可以考虑加大 Blocks 的配置,这样更多数据将保存在内存里,提高查询速度。
### CPU 需求

View File

@ -83,6 +83,22 @@ typedef struct SJoinSupporter {
SArray* pVgroupTables;
} SJoinSupporter;
typedef struct SMergeCtx {
SJoinSupporter* p;
int32_t idx;
SArray* res;
int8_t compared;
}SMergeCtx;
typedef struct SMergeTsCtx {
SJoinSupporter* p;
STSBuf* res;
int64_t numOfInput;
int8_t compared;
}SMergeTsCtx;
typedef struct SVgroupTableInfo {
SVgroupInfo vgInfo;
SArray* itemList; //SArray<STableIdInfo>
@ -183,6 +199,7 @@ int32_t tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deep
void tscSqlExprInfoDestroy(SArray* pExprInfo);
SColumn* tscColumnClone(const SColumn* src);
bool tscColumnExists(SArray* pColumnList, SColumnIndex* pColIndex);
SColumn* tscColumnListInsert(SArray* pColList, SColumnIndex* colIndex);
SArray* tscColumnListClone(const SArray* src, int16_t tableIndex);
void tscColumnListDestroy(SArray* pColList);

View File

@ -142,15 +142,15 @@ typedef struct SCond {
} SCond;
typedef struct SJoinNode {
char tableName[TSDB_TABLE_FNAME_LEN];
uint64_t uid;
int16_t tagColId;
SArray* tsJoin;
SArray* tagJoin;
} SJoinNode;
typedef struct SJoinInfo {
bool hasJoin;
SJoinNode left;
SJoinNode right;
bool hasJoin;
SJoinNode* joinTables[TSDB_MAX_JOIN_TABLE_NUM];
} SJoinInfo;
typedef struct STagCond {
@ -285,7 +285,7 @@ typedef struct {
char * pRsp;
int32_t rspType;
int32_t rspLen;
uint64_t qhandle;
uint64_t qId;
int64_t useconds;
int64_t offset; // offset value from vnode during projection query of stable
int32_t row;
@ -368,7 +368,7 @@ typedef struct SSqlObj {
int64_t svgroupRid;
int64_t squeryLock;
int32_t retryReason; // previous error code
struct SSqlObj *prev, *next;
int64_t self;
} SSqlObj;

View File

@ -160,8 +160,8 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
if ((pRes->qhandle == 0 || numOfRows != 0) && pCmd->command < TSDB_SQL_LOCAL) {
if (pRes->qhandle == 0 && numOfRows != 0) {
if ((pRes->qId == 0 || numOfRows != 0) && pCmd->command < TSDB_SQL_LOCAL) {
if (pRes->qId == 0 && numOfRows != 0) {
tscError("qhandle is NULL");
} else {
pRes->code = numOfRows;
@ -208,7 +208,7 @@ void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) {
pSql->fetchFp = fp;
pSql->fp = tscAsyncFetchRowsProxy;
if (pRes->qhandle == 0) {
if (pRes->qId == 0) {
tscError("qhandle is NULL");
pRes->code = TSDB_CODE_TSC_INVALID_QHANDLE;
pSql->param = param;
@ -310,10 +310,51 @@ void tscAsyncResultOnError(SSqlObj* pSql) {
taosScheduleTask(tscQhandle, &schedMsg);
}
int tscSendMsgToServer(SSqlObj *pSql);
static int32_t updateMetaBeforeRetryQuery(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SQueryInfo* pQueryInfo) {
// handle the invalid table error code for super table.
// update the pExpr info, colList info, number of table columns
// TODO Re-parse this sql and issue the corresponding subquery as an alternative for this case.
if (pSql->retryReason == TSDB_CODE_TDB_INVALID_TABLE_ID) {
int32_t numOfExprs = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
int32_t numOfTags = tscGetNumOfTags(pTableMetaInfo->pTableMeta);
SSchema *pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
for (int32_t i = 0; i < numOfExprs; ++i) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i);
pExpr->uid = pTableMetaInfo->pTableMeta->id.uid;
if (pExpr->colInfo.colIndex >= 0) {
int32_t index = pExpr->colInfo.colIndex;
if ((TSDB_COL_IS_NORMAL_COL(pExpr->colInfo.flag) && index >= numOfCols) ||
(TSDB_COL_IS_TAG(pExpr->colInfo.flag) && (index < numOfCols || index >= (numOfCols + numOfTags)))) {
return pSql->retryReason;
}
if ((pSchema[pExpr->colInfo.colIndex].colId != pExpr->colInfo.colId) &&
strcasecmp(pExpr->colInfo.name, pSchema[pExpr->colInfo.colIndex].name) != 0) {
return pSql->retryReason;
}
}
}
// validate the table columns information
for (int32_t i = 0; i < taosArrayGetSize(pQueryInfo->colList); ++i) {
SColumn *pCol = taosArrayGetP(pQueryInfo->colList, i);
if (pCol->colIndex.columnIndex >= numOfCols) {
return pSql->retryReason;
}
}
} else {
// do nothing
}
return TSDB_CODE_SUCCESS;
}
void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, (int64_t)param);
if (pSql == NULL) return;
@ -339,7 +380,8 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY|TSDB_QUERY_TYPE_SUBQUERY|TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) {
tscDebug("%p update local table meta, continue to process sql and send the corresponding query", pSql);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
code = tscGetTableMeta(pSql, pTableMetaInfo);
assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS || code == TSDB_CODE_SUCCESS);
@ -349,6 +391,10 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
}
assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0));
code = updateMetaBeforeRetryQuery(pSql, pTableMetaInfo, pQueryInfo);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
// tscProcessSql can add error into async res
tscProcessSql(pSql);

View File

@ -309,7 +309,7 @@ TAOS_ROW tscFetchRow(void *param) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
if (pRes->qhandle == 0 ||
if (pRes->qId == 0 ||
pCmd->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT ||
pCmd->command == TSDB_SQL_INSERT) {
return NULL;
@ -905,7 +905,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
* set the qhandle to be 1 in order to pass the qhandle check, and to call partial release function to
* free allocated resources and remove the SqlObj from sql query linked list
*/
pRes->qhandle = 0x1;
pRes->qId = 0x1;
pRes->numOfRows = 0;
} else if (pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE) {
pRes->code = tscProcessShowCreateTable(pSql);

View File

@ -1606,7 +1606,7 @@ void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen)
tscDestroyLocalMerger(pObj);
}
pRes->qhandle = 1; // hack to pass the safety check in fetch_row function
pRes->qId = 1; // hack to pass the safety check in fetch_row function
pRes->numOfRows = 0;
pRes->row = 0;

View File

@ -903,7 +903,7 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
pRes->qhandle = 0;
pRes->qId = 0;
pRes->numOfRows = 1;
strtolower(pSql->sqlstr, sql);

View File

@ -249,8 +249,8 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
pQdesc->stime = htobe64(pSql->stime);
pQdesc->queryId = htonl(pSql->queryId);
//pQdesc->useconds = htobe64(pSql->res.useconds);
pQdesc->useconds = htobe64(now - pSql->stime);
pQdesc->qHandle = htobe64(pSql->res.qhandle);
pQdesc->useconds = htobe64(now - pSql->stime); // use local time instead of sever rsp elapsed time
pQdesc->qHandle = htobe64(pSql->res.qId);
pHeartbeat->numOfQueries++;
pQdesc++;

View File

@ -807,7 +807,18 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
if ((code = setKillInfo(pSql, pInfo, pInfo->type)) != TSDB_CODE_SUCCESS) {
return code;
}
break;
}
case TSDB_SQL_SYNC_DB_REPLICA: {
const char* msg1 = "invalid db name";
SStrToken* pzName = taosArrayGet(pInfo->pMiscInfo->a, 0);
assert(taosArrayGetSize(pInfo->pMiscInfo->a) == 1);
code = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), pzName);
if (code != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
break;
}
@ -1103,12 +1114,18 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* pSl
int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pTableName, SSqlObj* pSql) {
const char* msg1 = "name too long";
const char* msg2 = "acctId too long";
const char* msg3 = "no acctId";
SSqlCmd* pCmd = &pSql->cmd;
int32_t code = TSDB_CODE_SUCCESS;
if (hasSpecifyDB(pTableName)) { // db has been specified in sql string so we ignore current db path
code = tNameSetAcctId(&pTableMetaInfo->name, getAccountId(pSql));
char* acctId = getAccountId(pSql);
if (acctId == NULL || strlen(acctId) <= 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
code = tNameSetAcctId(&pTableMetaInfo->name, acctId);
if (code != 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
@ -3613,24 +3630,26 @@ static int32_t getColumnQueryCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSq
}
}
static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr) {
const char* msg1 = "invalid join query condition";
const char* msg2 = "invalid table name in join query";
static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr) {
int32_t code = 0;
const char* msg1 = "timestamp required for join tables";
const char* msg3 = "type of join columns must be identical";
const char* msg4 = "invalid column name in join condition";
const char* msg5 = "only support one join tag for each table";
if (pExpr == NULL) {
return TSDB_CODE_SUCCESS;
}
if (!tSqlExprIsParentOfLeaf(pExpr)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
code = checkAndSetJoinCondInfo(pCmd, pQueryInfo, pExpr->pLeft);
if (code) {
return code;
}
return checkAndSetJoinCondInfo(pCmd, pQueryInfo, pExpr->pRight);
}
STagCond* pTagCond = &pQueryInfo->tagCond;
SJoinNode* pLeft = &pTagCond->joinInfo.left;
SJoinNode* pRight = &pTagCond->joinInfo.right;
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
if (getColumnIndexByName(pCmd, &pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
@ -3639,13 +3658,28 @@ static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr*
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
SSchema* pTagSchema1 = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
pLeft->uid = pTableMetaInfo->pTableMeta->id.uid;
pLeft->tagColId = pTagSchema1->colId;
assert(index.tableIndex >= 0 && index.tableIndex < TSDB_MAX_JOIN_TABLE_NUM);
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pLeft->tableName);
if (code != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
SJoinNode **leftNode = &pQueryInfo->tagCond.joinInfo.joinTables[index.tableIndex];
if (*leftNode == NULL) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
(*leftNode)->uid = pTableMetaInfo->pTableMeta->id.uid;
(*leftNode)->tagColId = pTagSchema1->colId;
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
if (!tscColumnExists(pTableMetaInfo->tagColList, &index)) {
tscColumnListInsert(pTableMetaInfo->tagColList, &index);
if (taosArrayGetSize(pTableMetaInfo->tagColList) > 1) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
}
}
int16_t leftIdx = index.tableIndex;
index = (SColumnIndex)COLUMN_INDEX_INITIALIZER;
if (getColumnIndexByName(pCmd, &pExpr->pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
@ -3655,20 +3689,55 @@ static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr*
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
SSchema* pTagSchema2 = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
pRight->uid = pTableMetaInfo->pTableMeta->id.uid;
pRight->tagColId = pTagSchema2->colId;
assert(index.tableIndex >= 0 && index.tableIndex < TSDB_MAX_JOIN_TABLE_NUM);
code = tNameExtractFullName(&pTableMetaInfo->name, pRight->tableName);
if (code != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
SJoinNode **rightNode = &pQueryInfo->tagCond.joinInfo.joinTables[index.tableIndex];
if (*rightNode == NULL) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
(*rightNode)->uid = pTableMetaInfo->pTableMeta->id.uid;
(*rightNode)->tagColId = pTagSchema2->colId;
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
if (!tscColumnExists(pTableMetaInfo->tagColList, &index)) {
tscColumnListInsert(pTableMetaInfo->tagColList, &index);
if (taosArrayGetSize(pTableMetaInfo->tagColList) > 1) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
}
}
int16_t rightIdx = index.tableIndex;
if (pTagSchema1->type != pTagSchema2->type) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
pTagCond->joinInfo.hasJoin = true;
if ((*leftNode)->tagJoin == NULL) {
(*leftNode)->tagJoin = taosArrayInit(2, sizeof(int16_t));
}
if ((*rightNode)->tagJoin == NULL) {
(*rightNode)->tagJoin = taosArrayInit(2, sizeof(int16_t));
}
taosArrayPush((*leftNode)->tagJoin, &rightIdx);
taosArrayPush((*rightNode)->tagJoin, &leftIdx);
pQueryInfo->tagCond.joinInfo.hasJoin = true;
return TSDB_CODE_SUCCESS;
}
static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr) {
if (pExpr == NULL) {
return TSDB_CODE_SUCCESS;
}
return checkAndSetJoinCondInfo(pCmd, pQueryInfo, pExpr);
}
static int32_t validateSQLExpr(SSqlCmd* pCmd, tSqlExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList,
@ -3932,7 +4001,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
const char* msg1 = "table query cannot use tags filter";
const char* msg2 = "illegal column name";
const char* msg3 = "only one query time range allowed";
const char* msg4 = "only one join condition allowed";
const char* msg4 = "too many join tables";
const char* msg5 = "not support ordinary column join";
const char* msg6 = "only one query condition on tbname allowed";
const char* msg7 = "only in/like allowed in filter table name";
@ -3963,6 +4032,47 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_QUERY);
pCondExpr->tsJoin = true;
assert(index.tableIndex >= 0 && index.tableIndex < TSDB_MAX_JOIN_TABLE_NUM);
SJoinNode **leftNode = &pQueryInfo->tagCond.joinInfo.joinTables[index.tableIndex];
if (*leftNode == NULL) {
*leftNode = calloc(1, sizeof(SJoinNode));
if (*leftNode == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
}
int16_t leftIdx = index.tableIndex;
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
if (getColumnIndexByName(pCmd, &pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
if (index.tableIndex < 0 || index.tableIndex >= TSDB_MAX_JOIN_TABLE_NUM) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
SJoinNode **rightNode = &pQueryInfo->tagCond.joinInfo.joinTables[index.tableIndex];
if (*rightNode == NULL) {
*rightNode = calloc(1, sizeof(SJoinNode));
if (*rightNode == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
}
int16_t rightIdx = index.tableIndex;
if ((*leftNode)->tsJoin == NULL) {
(*leftNode)->tsJoin = taosArrayInit(2, sizeof(int16_t));
}
if ((*rightNode)->tsJoin == NULL) {
(*rightNode)->tsJoin = taosArrayInit(2, sizeof(int16_t));
}
taosArrayPush((*leftNode)->tsJoin, &rightIdx);
taosArrayPush((*rightNode)->tsJoin, &leftIdx);
/*
* to release expression, e.g., m1.ts = m2.ts,
* since this expression is used to set the join query type
@ -4020,10 +4130,6 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
return TSDB_CODE_TSC_INVALID_SQL;
}
if (pCondExpr->pJoinExpr != NULL) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_QUERY;
ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pQueryInfo->msg);
*pExpr = NULL;
@ -4251,7 +4357,8 @@ static bool validateFilterExpr(SQueryInfo* pQueryInfo) {
static int32_t getTimeRangeFromExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr) {
const char* msg0 = "invalid timestamp";
const char* msg1 = "only one time stamp window allowed";
int32_t code = 0;
if (pExpr == NULL) {
return TSDB_CODE_SUCCESS;
}
@ -4261,8 +4368,11 @@ static int32_t getTimeRangeFromExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlE
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
getTimeRangeFromExpr(pCmd, pQueryInfo, pExpr->pLeft);
code = getTimeRangeFromExpr(pCmd, pQueryInfo, pExpr->pLeft);
if (code) {
return code;
}
return getTimeRangeFromExpr(pCmd, pQueryInfo, pExpr->pRight);
} else {
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
@ -4343,6 +4453,7 @@ static void cleanQueryExpr(SCondExpr* pCondExpr) {
}
}
/*
static void doAddJoinTagsColumnsIntoTagList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr* pCondExpr) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (QUERY_IS_JOIN_QUERY(pQueryInfo->type) && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
@ -4365,6 +4476,7 @@ static void doAddJoinTagsColumnsIntoTagList(SSqlCmd* pCmd, SQueryInfo* pQueryInf
tscColumnListInsert(pTableMetaInfo->tagColList, &index);
}
}
*/
static int32_t validateTagCondExpr(SSqlCmd* pCmd, tExprNode *p) {
const char *msg1 = "invalid tag operator";
@ -4508,6 +4620,102 @@ static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondE
return ret;
}
int32_t validateJoinNodes(SQueryInfo* pQueryInfo, SSqlObj* pSql) {
const char* msg1 = "timestamp required for join tables";
const char* msg2 = "tag required for join stables";
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
SJoinNode *node = pQueryInfo->tagCond.joinInfo.joinTables[i];
if (node == NULL || node->tsJoin == NULL || taosArrayGetSize(node->tsJoin) <= 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg1);
}
}
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
SJoinNode *node = pQueryInfo->tagCond.joinInfo.joinTables[i];
if (node == NULL || node->tagJoin == NULL || taosArrayGetSize(node->tagJoin) <= 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg2);
}
}
}
return TSDB_CODE_SUCCESS;
}
void mergeJoinNodesImpl(int8_t* r, int8_t* p, int16_t* tidx, SJoinNode** nodes, int32_t type) {
SJoinNode *node = nodes[*tidx];
SArray* arr = (type == 0) ? node->tsJoin : node->tagJoin;
size_t size = taosArrayGetSize(arr);
p[*tidx] = 1;
for (int32_t j = 0; j < size; j++) {
int16_t* idx = taosArrayGet(arr, j);
r[*idx] = 1;
if (p[*idx] == 0) {
mergeJoinNodesImpl(r, p, idx, nodes, type);
}
}
}
int32_t mergeJoinNodes(SQueryInfo* pQueryInfo, SSqlObj* pSql) {
const char* msg1 = "not all join tables have same timestamp";
const char* msg2 = "not all join tables have same tag";
int8_t r[TSDB_MAX_JOIN_TABLE_NUM] = {0};
int8_t p[TSDB_MAX_JOIN_TABLE_NUM] = {0};
for (int16_t i = 0; i < pQueryInfo->numOfTables; ++i) {
mergeJoinNodesImpl(r, p, &i, pQueryInfo->tagCond.joinInfo.joinTables, 0);
taosArrayClear(pQueryInfo->tagCond.joinInfo.joinTables[i]->tsJoin);
for (int32_t j = 0; j < TSDB_MAX_JOIN_TABLE_NUM; ++j) {
if (r[j]) {
taosArrayPush(pQueryInfo->tagCond.joinInfo.joinTables[i]->tsJoin, &j);
}
}
memset(r, 0, sizeof(r));
memset(p, 0, sizeof(p));
}
if (taosArrayGetSize(pQueryInfo->tagCond.joinInfo.joinTables[0]->tsJoin) != pQueryInfo->numOfTables) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg1);
}
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
for (int16_t i = 0; i < pQueryInfo->numOfTables; ++i) {
mergeJoinNodesImpl(r, p, &i, pQueryInfo->tagCond.joinInfo.joinTables, 1);
taosArrayClear(pQueryInfo->tagCond.joinInfo.joinTables[i]->tagJoin);
for (int32_t j = 0; j < TSDB_MAX_JOIN_TABLE_NUM; ++j) {
if (r[j]) {
taosArrayPush(pQueryInfo->tagCond.joinInfo.joinTables[i]->tagJoin, &j);
}
}
memset(r, 0, sizeof(r));
memset(p, 0, sizeof(p));
}
if (taosArrayGetSize(pQueryInfo->tagCond.joinInfo.joinTables[0]->tagJoin) != pQueryInfo->numOfTables) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg2);
}
}
return TSDB_CODE_SUCCESS;
}
int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSql) {
if (pExpr == NULL) {
return TSDB_CODE_SUCCESS;
@ -4553,17 +4761,17 @@ int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSql
// 4. get the table name query condition
if ((ret = getTablenameCond(&pSql->cmd, pQueryInfo, condExpr.pTableCond, &sb)) != TSDB_CODE_SUCCESS) {
return ret;
goto PARSE_WHERE_EXIT;
}
// 5. other column query condition
if ((ret = getColumnQueryCondInfo(&pSql->cmd, pQueryInfo, condExpr.pColumnCond, TK_AND)) != TSDB_CODE_SUCCESS) {
return ret;
goto PARSE_WHERE_EXIT;
}
// 6. join condition
if ((ret = getJoinCondInfo(&pSql->cmd, pQueryInfo, condExpr.pJoinExpr)) != TSDB_CODE_SUCCESS) {
return ret;
goto PARSE_WHERE_EXIT;
}
// 7. query condition for table name
@ -4571,12 +4779,29 @@ int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSql
ret = setTableCondForSTableQuery(&pSql->cmd, pQueryInfo, getAccountId(pSql), condExpr.pTableCond, condExpr.tableCondIndex, &sb);
taosStringBuilderDestroy(&sb);
if (!validateFilterExpr(pQueryInfo)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg2);
if (ret) {
goto PARSE_WHERE_EXIT;
}
doAddJoinTagsColumnsIntoTagList(&pSql->cmd, pQueryInfo, &condExpr);
if (!validateFilterExpr(pQueryInfo)) {
ret = invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg2);
goto PARSE_WHERE_EXIT;
}
//doAddJoinTagsColumnsIntoTagList(&pSql->cmd, pQueryInfo, &condExpr);
if (condExpr.tsJoin) {
ret = validateJoinNodes(pQueryInfo, pSql);
if (ret) {
goto PARSE_WHERE_EXIT;
}
ret = mergeJoinNodes(pQueryInfo, pSql);
if (ret) {
goto PARSE_WHERE_EXIT;
}
}
PARSE_WHERE_EXIT:
cleanQueryExpr(&condExpr);
return ret;
@ -6818,7 +7043,6 @@ int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t i
const char* msg1 = "point interpolation query needs timestamp";
const char* msg2 = "fill only available for interval query";
const char* msg3 = "start(end) time of query range required or time range too large";
const char* msg4 = "illegal number of tables in from clause";
const char* msg5 = "too many columns in selection clause";
const char* msg6 = "too many tables in from clause";
const char* msg7 = "invalid table alias name";
@ -6855,14 +7079,11 @@ int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t i
}
size_t fromSize = taosArrayGetSize(pQuerySqlNode->from->tableList);
if (fromSize > TSDB_MAX_JOIN_TABLE_NUM) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
if (fromSize > TSDB_MAX_JOIN_TABLE_NUM * 2) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
pQueryInfo->command = TSDB_SQL_SELECT;
if (fromSize > 2) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
// set all query tables, which are maybe more than one.
for (int32_t i = 0; i < fromSize; ++i) {

View File

@ -350,8 +350,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
taosMsleep(duration);
}
pSql->retryReason = rpcMsg->code;
rpcMsg->code = tscRenewTableMeta(pSql, 0);
// if there is an error occurring, proceed to the following error handling procedure.
if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
taosReleaseRef(tscObjRef, handle);
@ -509,7 +509,7 @@ int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
pRetrieveMsg->free = htons(pQueryInfo->type);
pRetrieveMsg->qhandle = htobe64(pSql->res.qhandle);
pRetrieveMsg->qId = htobe64(pSql->res.qId);
// todo valid the vgroupId at the client side
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
@ -521,7 +521,7 @@ int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
assert(pVgroupInfo->vgroups[vgIndex].vgId > 0 && vgIndex < pTableMetaInfo->vgroupList->numOfVgroups);
pRetrieveMsg->header.vgId = htonl(pVgroupInfo->vgroups[vgIndex].vgId);
tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d, qhandle:%" PRIX64, pSql, pVgroupInfo->vgroups[vgIndex].vgId, vgIndex, pSql->res.qhandle);
tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d, qhandle:%" PRIX64, pSql, pVgroupInfo->vgroups[vgIndex].vgId, vgIndex, pSql->res.qId);
} else {
int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables);
assert(vgIndex >= 0 && vgIndex < numOfVgroups);
@ -529,12 +529,12 @@ int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SVgroupTableInfo* pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, vgIndex);
pRetrieveMsg->header.vgId = htonl(pTableIdList->vgInfo.vgId);
tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d, qhandle:%" PRIX64, pSql, pTableIdList->vgInfo.vgId, vgIndex, pSql->res.qhandle);
tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d, qId:%" PRIu64, pSql, pTableIdList->vgInfo.vgId, vgIndex, pSql->res.qId);
}
} else {
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
pRetrieveMsg->header.vgId = htonl(pTableMeta->vgId);
tscDebug("%p build fetch msg from only one vgroup, vgId:%d, qhandle:%" PRIX64, pSql, pTableMeta->vgId, pSql->res.qhandle);
tscDebug("%p build fetch msg from only one vgroup, vgId:%d, qId:%" PRIu64, pSql, pTableMeta->vgId, pSql->res.qId);
}
pSql->cmd.payloadLen = sizeof(SRetrieveTableMsg);
@ -614,7 +614,7 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql, int32_t clauseIndex) {
tableSerialize + sqlLen + 4096 + pQueryInfo->bufLen;
}
static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char *pMsg) {
static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char *pMsg, int32_t *succeed) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, 0);
TSKEY dfltKey = htobe64(pQueryMsg->window.skey);
@ -627,9 +627,14 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
assert(index >= 0);
SVgroupInfo* pVgroupInfo = NULL;
if (pTableMetaInfo->vgroupList->numOfVgroups > 0) {
if (pTableMetaInfo->vgroupList && pTableMetaInfo->vgroupList->numOfVgroups > 0) {
assert(index < pTableMetaInfo->vgroupList->numOfVgroups);
pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[index];
} else {
tscError("%p No vgroup info found", pSql);
*succeed = 0;
return pMsg;
}
vgId = pVgroupInfo->vgId;
@ -955,8 +960,13 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pQueryMsg->secondStageOutput = 0;
}
int32_t succeed = 1;
// serialize the table info (sid, uid, tags)
pMsg = doSerializeTableInfo(pQueryMsg, pSql, pMsg);
pMsg = doSerializeTableInfo(pQueryMsg, pSql, pMsg, &succeed);
if (succeed == 0) {
return TSDB_CODE_TSC_APP_ERROR;
}
SSqlGroupbyExpr *pGroupbyExpr = &pQueryInfo->groupbyExpr;
if (pGroupbyExpr->numOfGroupCols > 0) {
@ -1339,6 +1349,23 @@ int32_t tscBuildUseDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return TSDB_CODE_SUCCESS;
}
int32_t tscBuildSyncDbReplicaMsg(SSqlObj* pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SSyncDbMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SSyncDbMsg *pSyncMsg = (SSyncDbMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
tNameExtractFullName(&pTableMetaInfo->name, pSyncMsg->db);
pCmd->msgType = TSDB_MSG_TYPE_CM_SYNC_DB;
return TSDB_CODE_SUCCESS;
}
int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
STscObj *pObj = pSql->pTscObj;
SSqlCmd *pCmd = &pSql->cmd;
@ -1619,7 +1646,7 @@ int tscBuildRetrieveFromMgmtMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
SRetrieveTableMsg *pRetrieveMsg = (SRetrieveTableMsg*)pCmd->payload;
pRetrieveMsg->qhandle = htobe64(pSql->res.qhandle);
pRetrieveMsg->qId = htobe64(pSql->res.qId);
pRetrieveMsg->free = htons(pQueryInfo->type);
return TSDB_CODE_SUCCESS;
@ -2195,19 +2222,24 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) {
assert(pInfo->vgroupList != NULL);
pInfo->vgroupList->numOfVgroups = pVgroupMsg->numOfVgroups;
for (int32_t j = 0; j < pInfo->vgroupList->numOfVgroups; ++j) {
//just init, no need to lock
SVgroupInfo *pVgroups = &pInfo->vgroupList->vgroups[j];
if (pInfo->vgroupList->numOfVgroups <= 0) {
//tfree(pInfo->vgroupList);
tscError("%p empty vgroup info", pSql);
} else {
for (int32_t j = 0; j < pInfo->vgroupList->numOfVgroups; ++j) {
//just init, no need to lock
SVgroupInfo *pVgroups = &pInfo->vgroupList->vgroups[j];
SVgroupMsg *vmsg = &pVgroupMsg->vgroups[j];
pVgroups->vgId = htonl(vmsg->vgId);
pVgroups->numOfEps = vmsg->numOfEps;
SVgroupMsg *vmsg = &pVgroupMsg->vgroups[j];
pVgroups->vgId = htonl(vmsg->vgId);
pVgroups->numOfEps = vmsg->numOfEps;
assert(pVgroups->numOfEps >= 1 && pVgroups->vgId >= 1);
assert(pVgroups->numOfEps >= 1 && pVgroups->vgId >= 1);
for (int32_t k = 0; k < pVgroups->numOfEps; ++k) {
pVgroups->epAddr[k].port = htons(vmsg->epAddr[k].port);
pVgroups->epAddr[k].fqdn = strndup(vmsg->epAddr[k].fqdn, tListLen(vmsg->epAddr[k].fqdn));
for (int32_t k = 0; k < pVgroups->numOfEps; ++k) {
pVgroups->epAddr[k].port = htons(vmsg->epAddr[k].port);
pVgroups->epAddr[k].fqdn = strndup(vmsg->epAddr[k].fqdn, tListLen(vmsg->epAddr[k].fqdn));
}
}
}
@ -2233,7 +2265,7 @@ int tscProcessShowRsp(SSqlObj *pSql) {
pShow = (SShowRsp *)pRes->pRsp;
pShow->qhandle = htobe64(pShow->qhandle);
pRes->qhandle = pShow->qhandle;
pRes->qId = pShow->qhandle;
tscResetForNextRetrieve(pRes);
pMetaMsg = &(pShow->tableMeta);
@ -2415,11 +2447,12 @@ int tscProcessQueryRsp(SSqlObj *pSql) {
SSqlRes *pRes = &pSql->res;
SQueryTableRsp *pQuery = (SQueryTableRsp *)pRes->pRsp;
pQuery->qhandle = htobe64(pQuery->qhandle);
pRes->qhandle = pQuery->qhandle;
pQuery->qId = htobe64(pQuery->qId);
pRes->qId = pQuery->qId;
pRes->data = NULL;
tscResetForNextRetrieve(pRes);
tscDebug("%p query rsp received, qId:%"PRIu64, pSql, pRes->qId);
return 0;
}
@ -2477,7 +2510,8 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) {
}
pRes->row = 0;
tscDebug("%p numOfRows:%d, offset:%" PRId64 ", complete:%d", pSql, pRes->numOfRows, pRes->offset, pRes->completed);
tscDebug("%p numOfRows:%d, offset:%" PRId64 ", complete:%d, qId:%"PRIu64, pSql, pRes->numOfRows, pRes->offset,
pRes->completed, pRes->qId);
return 0;
}
@ -2736,6 +2770,7 @@ void tscInitMsgsFp() {
tscBuildMsg[TSDB_SQL_DROP_ACCT] = tscBuildDropUserAcctMsg;
tscBuildMsg[TSDB_SQL_DROP_DB] = tscBuildDropDbMsg;
tscBuildMsg[TSDB_SQL_DROP_FUNCTION] = tscBuildDropFuncMsg;
tscBuildMsg[TSDB_SQL_SYNC_DB_REPLICA] = tscBuildSyncDbReplicaMsg;
tscBuildMsg[TSDB_SQL_DROP_TABLE] = tscBuildDropTableMsg;
tscBuildMsg[TSDB_SQL_ALTER_USER] = tscBuildUserMsg;
tscBuildMsg[TSDB_SQL_CREATE_DNODE] = tscBuildCreateDnodeMsg;

View File

@ -476,7 +476,7 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
if (pRes->qhandle == 0 ||
if (pRes->qId == 0 ||
pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED ||
pCmd->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT ||
pCmd->command == TSDB_SQL_INSERT) {
@ -508,7 +508,7 @@ int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
if (pRes->qhandle == 0 ||
if (pRes->qId == 0 ||
pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED ||
pCmd->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT ||
pCmd->command == TSDB_SQL_INSERT) {
@ -554,7 +554,7 @@ static bool tscKillQueryInDnode(SSqlObj* pSql) {
SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res;
if (pRes == NULL || pRes->qhandle == 0) {
if (pRes == NULL || pRes->qId == 0) {
return true;
}
@ -1050,7 +1050,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
* If qhandle is NOT set 0, the function of taos_free_result() will send message to server by calling tscProcessSql()
* to free connection, which may cause segment fault, when the parse phrase is not even successfully executed.
*/
pRes->qhandle = 0;
pRes->qId = 0;
free(str);
if (code != TSDB_CODE_SUCCESS) {

View File

@ -149,7 +149,7 @@ static SSub* tscCreateSubscription(STscObj* pObj, const char* topic, const char*
}
strtolower(pSql->sqlstr, pSql->sqlstr);
pRes->qhandle = 0;
pRes->qId = 0;
pRes->numOfRows = 1;
code = tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE);
@ -448,7 +448,7 @@ SSqlObj* recreateSqlObj(SSub* pSub) {
return NULL;
}
pRes->qhandle = 0;
pRes->qId = 0;
pRes->numOfRows = 1;
int code = tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE);
@ -546,7 +546,7 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
uint32_t type = pQueryInfo->type;
tscFreeSqlResult(pSql);
pRes->numOfRows = 1;
pRes->qhandle = 0;
pRes->qId = 0;
pSql->cmd.command = TSDB_SQL_SELECT;
pQueryInfo->type = type;

View File

@ -46,6 +46,13 @@ static int32_t tsCompare(int32_t order, int64_t left, int64_t right) {
}
static void skipRemainValue(STSBuf* pTSBuf, tVariant* tag1) {
STSElem el1 = tsBufGetElem(pTSBuf);
int32_t res = tVariantCompare(el1.tag, tag1);
if (res != 0) { // it is a record with new tag
return;
}
while (tsBufNextPos(pTSBuf)) {
STSElem el1 = tsBufGetElem(pTSBuf);
@ -118,123 +125,233 @@ static bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) {
static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJoinSupporter* pSupporter2, STimeWindow * win) {
static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
STSBuf* output1 = tsBufCreate(true, pQueryInfo->order.order);
STSBuf* output2 = tsBufCreate(true, pQueryInfo->order.order);
win->skey = INT64_MAX;
win->ekey = INT64_MIN;
SLimitVal* pLimit = &pQueryInfo->limit;
int32_t order = pQueryInfo->order.order;
int32_t joinNum = pSql->subState.numOfSub;
SMergeTsCtx ctxlist[TSDB_MAX_JOIN_TABLE_NUM] = {{0}};
SMergeTsCtx* ctxStack[TSDB_MAX_JOIN_TABLE_NUM] = {0};
int32_t slot = 0;
size_t tableNum = 0;
int16_t* tableMIdx = 0;
int32_t equalNum = 0;
int32_t stackidx = 0;
SMergeTsCtx* ctx = NULL;
SMergeTsCtx* pctx = NULL;
SMergeTsCtx* mainCtx = NULL;
STSElem cur;
STSElem prev;
SArray* tsCond = NULL;
int32_t mergeDone = 0;
SQueryInfo* pSubQueryInfo1 = tscGetQueryInfoDetail(&pSql->pSubs[0]->cmd, 0);
SQueryInfo* pSubQueryInfo2 = tscGetQueryInfoDetail(&pSql->pSubs[1]->cmd, 0);
for (int32_t i = 0; i < joinNum; ++i) {
STSBuf* output = tsBufCreate(true, pQueryInfo->order.order);
SQueryInfo* pSubQueryInfo = tscGetQueryInfoDetail(&pSql->pSubs[i]->cmd, 0);
pSubQueryInfo1->tsBuf = output1;
pSubQueryInfo2->tsBuf = output2;
pSubQueryInfo->tsBuf = output;
SJoinSupporter* pSupporter = pSql->pSubs[i]->param;
if (pSupporter->pTSBuf == NULL) {
tscDebug("%p at least one ts-comp is empty, 0 for secondary query after ts blocks intersecting", pSql);
return 0;
}
tsBufResetPos(pSupporter->pTSBuf);
if (!tsBufNextPos(pSupporter->pTSBuf)) {
tscDebug("%p input1 is empty, 0 for secondary query after ts blocks intersecting", pSql);
return 0;
}
tscDebug("%p sub:%p table idx:%d, input group number:%d", pSql, pSql->pSubs[i], i, pSupporter->pTSBuf->numOfGroups);
ctxlist[i].p = pSupporter;
ctxlist[i].res = output;
}
TSKEY st = taosGetTimestampUs();
// no result generated, return directly
if (pSupporter1->pTSBuf == NULL || pSupporter2->pTSBuf == NULL) {
tscDebug("%p at least one ts-comp is empty, 0 for secondary query after ts blocks intersecting", pSql);
return 0;
}
tsBufResetPos(pSupporter1->pTSBuf);
tsBufResetPos(pSupporter2->pTSBuf);
if (!tsBufNextPos(pSupporter1->pTSBuf)) {
tsBufFlush(output1);
tsBufFlush(output2);
tscDebug("%p input1 is empty, 0 for secondary query after ts blocks intersecting", pSql);
return 0;
}
if (!tsBufNextPos(pSupporter2->pTSBuf)) {
tsBufFlush(output1);
tsBufFlush(output2);
tscDebug("%p input2 is empty, 0 for secondary query after ts blocks intersecting", pSql);
return 0;
}
int64_t numOfInput1 = 1;
int64_t numOfInput2 = 1;
while(1) {
STSElem elem = tsBufGetElem(pSupporter1->pTSBuf);
// no data in pSupporter1 anymore, jump out of loop
if (!tsBufIsValidElem(&elem)) {
break;
for (int16_t tidx = 0; tidx < joinNum; tidx++) {
pctx = &ctxlist[tidx];
if (pctx->compared) {
continue;
}
// find the data in supporter2 with the same tag value
STSElem e2 = tsBufFindElemStartPosByTag(pSupporter2->pTSBuf, elem.tag);
assert(pctx->numOfInput == 0);
/**
* there are elements in pSupporter2 with the same tag, continue
*/
tVariant tag1 = {0};
tVariantAssign(&tag1, elem.tag);
tsCond = pQueryInfo->tagCond.joinInfo.joinTables[tidx]->tsJoin;
tableNum = taosArrayGetSize(tsCond);
assert(tableNum >= 2);
for (int32_t i = 0; i < tableNum; ++i) {
tableMIdx = taosArrayGet(tsCond, i);
SMergeTsCtx* tctx = &ctxlist[*tableMIdx];
tctx->compared = 1;
}
tableMIdx = taosArrayGet(tsCond, 0);
pctx = &ctxlist[*tableMIdx];
mainCtx = pctx;
while (1) {
pctx = mainCtx;
prev = tsBufGetElem(pctx->p->pTSBuf);
ctxStack[stackidx++] = pctx;
if (!tsBufIsValidElem(&prev)) {
break;
}
tVariant tag = {0};
tVariantAssign(&tag, prev.tag);
int32_t skipped = 0;
for (int32_t i = 1; i < tableNum; ++i) {
SMergeTsCtx* tctx = &ctxlist[i];
// find the data in supporter2 with the same tag value
STSElem e2 = tsBufFindElemStartPosByTag(tctx->p->pTSBuf, &tag);
if (!tsBufIsValidElem(&e2)) {
skipRemainValue(pctx->p->pTSBuf, &tag);
skipped = 1;
break;
}
}
if (skipped) {
slot = 0;
stackidx = 0;
continue;
}
tableMIdx = taosArrayGet(tsCond, ++slot);
equalNum = 1;
if (tsBufIsValidElem(&e2)) {
while (1) {
STSElem elem1 = tsBufGetElem(pSupporter1->pTSBuf);
STSElem elem2 = tsBufGetElem(pSupporter2->pTSBuf);
ctx = &ctxlist[*tableMIdx];
prev = tsBufGetElem(pctx->p->pTSBuf);
cur = tsBufGetElem(ctx->p->pTSBuf);
// data with current are exhausted
if (!tsBufIsValidElem(&elem1) || tVariantCompare(elem1.tag, &tag1) != 0) {
if (!tsBufIsValidElem(&prev) || tVariantCompare(prev.tag, &tag) != 0) {
break;
}
if (!tsBufIsValidElem(&elem2) || tVariantCompare(elem2.tag, &tag1) != 0) { // ignore all records with the same tag
skipRemainValue(pSupporter1->pTSBuf, &tag1);
if (!tsBufIsValidElem(&cur) || tVariantCompare(cur.tag, &tag) != 0) { // ignore all records with the same tag
break;
}
/*
* in case of stable query, limit/offset is not applied here. the limit/offset is applied to the
* final results which is acquired after the secondary merge of in the client.
*/
int32_t re = tsCompare(order, elem1.ts, elem2.ts);
if (re < 0) {
tsBufNextPos(pSupporter1->pTSBuf);
numOfInput1++;
} else if (re > 0) {
tsBufNextPos(pSupporter2->pTSBuf);
numOfInput2++;
} else {
ctxStack[stackidx++] = ctx;
int32_t ret = tsCompare(order, prev.ts, cur.ts);
if (ret == 0) {
if (++equalNum < tableNum) {
pctx = ctx;
if (++slot >= tableNum) {
slot = 0;
}
tableMIdx = taosArrayGet(tsCond, slot);
continue;
}
assert(stackidx == tableNum);
if (pLimit->offset == 0 || pQueryInfo->interval.interval > 0 || QUERY_IS_STABLE_QUERY(pQueryInfo->type)) {
if (win->skey > elem1.ts) {
win->skey = elem1.ts;
if (win->skey > prev.ts) {
win->skey = prev.ts;
}
if (win->ekey < prev.ts) {
win->ekey = prev.ts;
}
if (win->ekey < elem1.ts) {
win->ekey = elem1.ts;
for (int32_t i = 0; i < stackidx; ++i) {
SMergeTsCtx* tctx = ctxStack[i];
prev = tsBufGetElem(tctx->p->pTSBuf);
tsBufAppend(tctx->res, prev.id, prev.tag, (const char*)&prev.ts, sizeof(prev.ts));
}
tsBufAppend(output1, elem1.id, elem1.tag, (const char*)&elem1.ts, sizeof(elem1.ts));
tsBufAppend(output2, elem2.id, elem2.tag, (const char*)&elem2.ts, sizeof(elem2.ts));
} else {
pLimit->offset -= 1;//offset apply to projection?
}
tsBufNextPos(pSupporter1->pTSBuf);
numOfInput1++;
for (int32_t i = 0; i < stackidx; ++i) {
SMergeTsCtx* tctx = ctxStack[i];
if (!tsBufNextPos(tctx->p->pTSBuf) && tctx == mainCtx) {
mergeDone = 1;
}
tctx->numOfInput++;
}
tsBufNextPos(pSupporter2->pTSBuf);
numOfInput2++;
if (mergeDone) {
break;
}
stackidx = 0;
equalNum = 1;
ctxStack[stackidx++] = pctx;
} else if (ret > 0) {
if (!tsBufNextPos(ctx->p->pTSBuf) && ctx == mainCtx) {
mergeDone = 1;
break;
}
ctx->numOfInput++;
stackidx--;
} else {
stackidx--;
for (int32_t i = 0; i < stackidx; ++i) {
SMergeTsCtx* tctx = ctxStack[i];
if (!tsBufNextPos(tctx->p->pTSBuf) && tctx == mainCtx) {
mergeDone = 1;
}
tctx->numOfInput++;
}
if (mergeDone) {
break;
}
stackidx = 0;
equalNum = 1;
ctxStack[stackidx++] = pctx;
}
}
} else { // no data in pSupporter2, ignore current data in pSupporter2
skipRemainValue(pSupporter1->pTSBuf, &tag1);
if (mergeDone) {
break;
}
slot = 0;
stackidx = 0;
skipRemainValue(mainCtx->p->pTSBuf, &tag);
}
stackidx = 0;
slot = 0;
mergeDone = 0;
}
/*
@ -242,28 +359,32 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ
* 1. only one element
* 2. only one element for each tag.
*/
if (output1->tsOrder == -1) {
output1->tsOrder = TSDB_ORDER_ASC;
output2->tsOrder = TSDB_ORDER_ASC;
if (ctxlist[0].res->tsOrder == -1) {
for (int32_t i = 0; i < joinNum; ++i) {
ctxlist[i].res->tsOrder = TSDB_ORDER_ASC;
}
}
tsBufFlush(output1);
tsBufFlush(output2);
tsBufDestroy(pSupporter1->pTSBuf);
pSupporter1->pTSBuf = NULL;
tsBufDestroy(pSupporter2->pTSBuf);
pSupporter2->pTSBuf = NULL;
for (int32_t i = 0; i < joinNum; ++i) {
tsBufFlush(ctxlist[i].res);
tsBufDestroy(ctxlist[i].p->pTSBuf);
ctxlist[i].p->pTSBuf = NULL;
}
TSKEY et = taosGetTimestampUs();
tscDebug("%p input1:%" PRId64 ", input2:%" PRId64 ", final:%" PRId64 " in %d vnodes for secondary query after ts blocks "
"intersecting, skey:%" PRId64 ", ekey:%" PRId64 ", numOfVnode:%d, elapsed time:%" PRId64 " us",
pSql, numOfInput1, numOfInput2, output1->numOfTotal, output1->numOfGroups, win->skey, win->ekey,
tsBufGetNumOfGroup(output1), et - st);
return output1->numOfTotal;
for (int32_t i = 0; i < joinNum; ++i) {
tscDebug("%p sub:%p tblidx:%d, input:%" PRId64 ", final:%" PRId64 " in %d vnodes for secondary query after ts blocks "
"intersecting, skey:%" PRId64 ", ekey:%" PRId64 ", numOfVnode:%d, elapsed time:%" PRId64 " us",
pSql, pSql->pSubs[i], i, ctxlist[i].numOfInput, ctxlist[i].res->numOfTotal, ctxlist[i].res->numOfGroups, win->skey, win->ekey,
tsBufGetNumOfGroup(ctxlist[i].res), et - st);
}
return ctxlist[0].res->numOfTotal;
}
// todo handle failed to create sub query
SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, int32_t index) {
SJoinSupporter* pSupporter = calloc(1, sizeof(SJoinSupporter));
@ -768,76 +889,218 @@ static bool checkForDuplicateTagVal(SSchema* pColSchema, SJoinSupporter* p1, SSq
return true;
}
static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pParentSql, SArray** s1, SArray** s2) {
SJoinSupporter* p1 = pParentSql->pSubs[0]->param;
SJoinSupporter* p2 = pParentSql->pSubs[1]->param;
tscDebug("%p all subquery retrieve <tid, tags> complete, do tags match, %d, %d", pParentSql, p1->num, p2->num);
// sort according to the tag value
qsort(p1->pIdTagList, p1->num, p1->tagSize, tagValCompar);
qsort(p2->pIdTagList, p2->num, p2->tagSize, tagValCompar);
static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pParentSql, SArray* resList) {
int16_t joinNum = pParentSql->subState.numOfSub;
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
int16_t tagColId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid);
SSchema* pColSchema = tscGetColumnSchemaById(pTableMetaInfo->pTableMeta, tagColId);
SJoinSupporter* p0 = pParentSql->pSubs[0]->param;
SMergeCtx ctxlist[TSDB_MAX_JOIN_TABLE_NUM] = {{0}};
SMergeCtx* ctxStack[TSDB_MAX_JOIN_TABLE_NUM] = {0};
// int16_t for padding
int32_t size = p1->tagSize - sizeof(int16_t);
*s1 = taosArrayInit(p1->num, size);
*s2 = taosArrayInit(p2->num, size);
int32_t size = p0->tagSize - sizeof(int16_t);
if (!(checkForDuplicateTagVal(pColSchema, p1, pParentSql) && checkForDuplicateTagVal(pColSchema, p2, pParentSql))) {
return TSDB_CODE_QRY_DUP_JOIN_KEY;
}
SSchema* pColSchema = tscGetColumnSchemaById(pTableMetaInfo->pTableMeta, tagColId);
tscDebug("%p all subquery retrieve <tid, tags> complete, do tags match", pParentSql);
int32_t i = 0, j = 0;
while(i < p1->num && j < p2->num) {
STidTags* pp1 = (STidTags*) varDataVal(p1->pIdTagList + i * p1->tagSize);
STidTags* pp2 = (STidTags*) varDataVal(p2->pIdTagList + j * p2->tagSize);
assert(pp1->tid != 0 && pp2->tid != 0);
for (int32_t i = 0; i < joinNum; i++) {
SJoinSupporter* p = pParentSql->pSubs[i]->param;
int32_t ret = doCompare(pp1->tag, pp2->tag, pColSchema->type, pColSchema->bytes);
if (ret == 0) {
tscDebug("%p tag matched, vgId:%d, val:%d, tid:%d, uid:%"PRIu64", tid:%d, uid:%"PRIu64, pParentSql, pp1->vgId,
*(int*) pp1->tag, pp1->tid, pp1->uid, pp2->tid, pp2->uid);
taosArrayPush(*s1, pp1);
taosArrayPush(*s2, pp2);
j++;
i++;
} else if (ret > 0) {
j++;
} else {
i++;
ctxlist[i].p = p;
ctxlist[i].res = taosArrayInit(p->num, size);
tscDebug("Join %d - num:%d", i, p->num);
// sort according to the tag valu
qsort(p->pIdTagList, p->num, p->tagSize, tagValCompar);
if (!checkForDuplicateTagVal(pColSchema, p, pParentSql)) {
for (int32_t j = 0; j <= i; j++) {
taosArrayDestroy(ctxlist[j].res);
}
return TSDB_CODE_QRY_DUP_JOIN_KEY;
}
}
// reorganize the tid-tag value according to both the vgroup id and tag values
// sort according to the tag value
size_t t1 = taosArrayGetSize(*s1);
size_t t2 = taosArrayGetSize(*s2);
int32_t slot = 0;
size_t tableNum = 0;
int16_t* tableMIdx = 0;
int32_t equalNum = 0;
int32_t stackidx = 0;
int32_t mergeDone = 0;
SMergeCtx* ctx = NULL;
SMergeCtx* pctx = NULL;
STidTags* cur = NULL;
STidTags* prev = NULL;
SArray* tagCond = NULL;
qsort((*s1)->pData, t1, size, tidTagsCompar);
qsort((*s2)->pData, t2, size, tidTagsCompar);
for (int16_t tidx = 0; tidx < joinNum; tidx++) {
pctx = &ctxlist[tidx];
if (pctx->compared) {
continue;
}
#if 0
for(int32_t k = 0; k < t1; ++k) {
STidTags* p = (*s1)->pData + size * k;
printf("%d, tag:%s\n", p->vgId, ((tstr*)(p->tag))->data);
assert(pctx->idx == 0 && taosArrayGetSize(pctx->res) == 0);
tagCond = pQueryInfo->tagCond.joinInfo.joinTables[tidx]->tagJoin;
tableNum = taosArrayGetSize(tagCond);
assert(tableNum >= 2);
for (int32_t i = 0; i < tableNum; ++i) {
tableMIdx = taosArrayGet(tagCond, i);
SMergeCtx* tctx = &ctxlist[*tableMIdx];
tctx->compared = 1;
}
for (int32_t i = 0; i < tableNum; ++i) {
tableMIdx = taosArrayGet(tagCond, i);
SMergeCtx* tctx = &ctxlist[*tableMIdx];
if (tctx->p->num <= 0 || tctx->p->pIdTagList == NULL) {
mergeDone = 1;
break;
}
}
if (mergeDone) {
mergeDone = 0;
continue;
}
tableMIdx = taosArrayGet(tagCond, slot);
pctx = &ctxlist[*tableMIdx];
prev = (STidTags*) varDataVal(pctx->p->pIdTagList + pctx->idx * pctx->p->tagSize);
ctxStack[stackidx++] = pctx;
tableMIdx = taosArrayGet(tagCond, ++slot);
equalNum = 1;
while (1) {
ctx = &ctxlist[*tableMIdx];
cur = (STidTags*) varDataVal(ctx->p->pIdTagList + ctx->idx * ctx->p->tagSize);
assert(cur->tid != 0 && prev->tid != 0);
ctxStack[stackidx++] = ctx;
int32_t ret = doCompare(prev->tag, cur->tag, pColSchema->type, pColSchema->bytes);
if (ret == 0) {
if (++equalNum < tableNum) {
prev = cur;
pctx = ctx;
if (++slot >= tableNum) {
slot = 0;
}
tableMIdx = taosArrayGet(tagCond, slot);
continue;
}
tscDebug("%p tag matched, vgId:%d, val:%d, tid:%d, uid:%"PRIu64", tid:%d, uid:%"PRIu64, pParentSql, prev->vgId,
*(int*) prev->tag, prev->tid, prev->uid, cur->tid, cur->uid);
assert(stackidx == tableNum);
for (int32_t i = 0; i < stackidx; ++i) {
SMergeCtx* tctx = ctxStack[i];
prev = (STidTags*) varDataVal(tctx->p->pIdTagList + tctx->idx * tctx->p->tagSize);
taosArrayPush(tctx->res, prev);
}
for (int32_t i = 0; i < stackidx; ++i) {
SMergeCtx* tctx = ctxStack[i];
if (++tctx->idx >= tctx->p->num) {
mergeDone = 1;
break;
}
}
if (mergeDone) {
break;
}
stackidx = 0;
equalNum = 1;
prev = (STidTags*) varDataVal(pctx->p->pIdTagList + pctx->idx * pctx->p->tagSize);
ctxStack[stackidx++] = pctx;
} else if (ret > 0) {
stackidx--;
if (++ctx->idx >= ctx->p->num) {
break;
}
} else {
stackidx--;
for (int32_t i = 0; i < stackidx; ++i) {
SMergeCtx* tctx = ctxStack[i];
if (++tctx->idx >= tctx->p->num) {
mergeDone = 1;
break;
}
}
if (mergeDone) {
break;
}
stackidx = 0;
equalNum = 1;
prev = (STidTags*) varDataVal(pctx->p->pIdTagList + pctx->idx * pctx->p->tagSize);
ctxStack[stackidx++] = pctx;
}
}
slot = 0;
mergeDone = 0;
stackidx = 0;
}
for(int32_t k = 0; k < t1; ++k) {
STidTags* p = (*s2)->pData + size * k;
printf("%d, tag:%s\n", p->vgId, ((tstr*)(p->tag))->data);
}
#endif
for (int32_t i = 0; i < joinNum; ++i) {
// reorganize the tid-tag value according to both the vgroup id and tag values
// sort according to the tag value
size_t num = taosArrayGetSize(ctxlist[i].res);
qsort((ctxlist[i].res)->pData, num, size, tidTagsCompar);
tscDebug("%p tags match complete, result: %"PRIzu", %"PRIzu, pParentSql, t1, t2);
taosArrayPush(resList, &ctxlist[i].res);
tscDebug("%p tags match complete, result num: %"PRIzu, pParentSql, num);
}
return TSDB_CODE_SUCCESS;
}
bool emptyTagList(SArray* resList, int32_t size) {
size_t rsize = taosArrayGetSize(resList);
if (rsize != size) {
return true;
}
for (int32_t i = 0; i < size; ++i) {
SArray** s = taosArrayGet(resList, i);
if (taosArrayGetSize(*s) <= 0) {
return true;
}
}
return false;
}
static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRows) {
SJoinSupporter* pSupporter = (SJoinSupporter*)param;
@ -939,19 +1202,19 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
return;
}
SArray *s1 = NULL, *s2 = NULL;
int32_t code = getIntersectionOfTableTuple(pQueryInfo, pParentSql, &s1, &s2);
SArray* resList = taosArrayInit(pParentSql->subState.numOfSub, sizeof(SArray *));
int32_t code = getIntersectionOfTableTuple(pQueryInfo, pParentSql, resList);
if (code != TSDB_CODE_SUCCESS) {
freeJoinSubqueryObj(pParentSql);
pParentSql->res.code = code;
tscAsyncResultOnError(pParentSql);
taosArrayDestroy(s1);
taosArrayDestroy(s2);
taosArrayDestroy(resList);
return;
}
if (taosArrayGetSize(s1) == 0 || taosArrayGetSize(s2) == 0) { // no results,return.
if (emptyTagList(resList, pParentSql->subState.numOfSub)) { // no results,return.
assert(pParentSql->fp != tscJoinQueryCallback);
tscDebug("%p tag intersect does not generated qualified tables for join, free all sub SqlObj and quit", pParentSql);
@ -963,37 +1226,34 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
(*pParentSql->fp)(pParentSql->param, pParentSql, 0);
} else {
// proceed to for ts_comp query
SSqlCmd* pSubCmd1 = &pParentSql->pSubs[0]->cmd;
SSqlCmd* pSubCmd2 = &pParentSql->pSubs[1]->cmd;
SQueryInfo* pQueryInfo1 = tscGetQueryInfoDetail(pSubCmd1, 0);
STableMetaInfo* pTableMetaInfo1 = tscGetMetaInfo(pQueryInfo1, 0);
tscBuildVgroupTableInfo(pParentSql, pTableMetaInfo1, s1);
SQueryInfo* pQueryInfo2 = tscGetQueryInfoDetail(pSubCmd2, 0);
STableMetaInfo* pTableMetaInfo2 = tscGetMetaInfo(pQueryInfo2, 0);
tscBuildVgroupTableInfo(pParentSql, pTableMetaInfo2, s2);
SSqlObj* psub1 = pParentSql->pSubs[0];
((SJoinSupporter*)psub1->param)->pVgroupTables = tscVgroupTableInfoDup(pTableMetaInfo1->pVgroupTables);
SSqlObj* psub2 = pParentSql->pSubs[1];
((SJoinSupporter*)psub2->param)->pVgroupTables = tscVgroupTableInfoDup(pTableMetaInfo2->pVgroupTables);
pParentSql->subState.numOfSub = 2;
memset(pParentSql->subState.states, 0, sizeof(pParentSql->subState.states[0]) * pParentSql->subState.numOfSub);
tscDebug("%p reset all sub states to 0", pParentSql);
for (int32_t m = 0; m < pParentSql->subState.numOfSub; ++m) {
SSqlObj* sub = pParentSql->pSubs[m];
issueTsCompQuery(sub, sub->param, pParentSql);
// proceed to for ts_comp query
SSqlCmd* pSubCmd = &pParentSql->pSubs[m]->cmd;
SArray** s = taosArrayGet(resList, m);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pSubCmd, 0);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
tscBuildVgroupTableInfo(pParentSql, pTableMetaInfo, *s);
SSqlObj* psub = pParentSql->pSubs[m];
((SJoinSupporter*)psub->param)->pVgroupTables = tscVgroupTableInfoDup(pTableMetaInfo->pVgroupTables);
memset(pParentSql->subState.states, 0, sizeof(pParentSql->subState.states[0]) * pParentSql->subState.numOfSub);
tscDebug("%p reset all sub states to 0", pParentSql);
issueTsCompQuery(psub, psub->param, pParentSql);
}
}
taosArrayDestroy(s1);
taosArrayDestroy(s2);
size_t rsize = taosArrayGetSize(resList);
for (int32_t i = 0; i < rsize; ++i) {
SArray** s = taosArrayGet(resList, i);
if (*s) {
taosArrayDestroy(*s);
}
}
taosArrayDestroy(resList);
}
static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRows) {
@ -1124,12 +1384,8 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
tscDebug("%p all subquery retrieve ts complete, do ts block intersect", pParentSql);
// proceeds to launched secondary query to retrieve final data
SJoinSupporter* p1 = pParentSql->pSubs[0]->param;
SJoinSupporter* p2 = pParentSql->pSubs[1]->param;
STimeWindow win = TSWINDOW_INITIALIZER;
int64_t num = doTSBlockIntersect(pParentSql, p1, p2, &win);
int64_t num = doTSBlockIntersect(pParentSql, &win);
if (num <= 0) { // no result during ts intersect
tscDebug("%p no results generated in ts intersection, free all sub SqlObj and quit", pParentSql);
freeJoinSubqueryObj(pParentSql);
@ -1584,7 +1840,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
SSqlCmd * pCmd = &pSql->cmd;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
pSql->res.qhandle = 0x1;
pSql->res.qId = 0x1;
assert(pSql->res.numOfRows == 0);
if (pSql->pSubs == NULL) {
@ -1639,6 +1895,8 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
pNewQueryInfo->limit.limit = -1;
pNewQueryInfo->limit.offset = 0;
pNewQueryInfo->order.orderColId = INT32_MIN;
// backup the data and clear it in the sqlcmd object
memset(&pNewQueryInfo->groupbyExpr, 0, sizeof(SSqlGroupbyExpr));
@ -2182,7 +2440,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
SColumnModel *pModel = NULL;
SColumnModel *pFinalModel = NULL;
pRes->qhandle = 0x1; // hack the qhandle check
pRes->qId = 0x1; // hack the qhandle check
const uint32_t nBufferSize = (1u << 16u); // 64KB
@ -2730,7 +2988,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
tscDebug("%p sub:%p query complete, ep:%s, vgId:%d, orderOfSub:%d, retrieve data", trsupport->pParentSql, pSql,
pVgroup->epAddr[0].fqdn, pVgroup->vgId, trsupport->subqueryIndex);
if (pSql->res.qhandle == 0) { // qhandle is NULL, code is TSDB_CODE_SUCCESS means no results generated from this vnode
if (pSql->res.qId == 0) { // qhandle is NULL, code is TSDB_CODE_SUCCESS means no results generated from this vnode
tscRetrieveFromDnodeCallBack(param, pSql, 0);
} else {
taos_fetch_rows_a(tres, tscRetrieveFromDnodeCallBack, param);

View File

@ -1279,6 +1279,34 @@ int32_t tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepco
return 0;
}
bool tscColumnExists(SArray* pColumnList, SColumnIndex* pColIndex) {
// ignore the tbname columnIndex to be inserted into source list
if (pColIndex->columnIndex < 0) {
return false;
}
size_t numOfCols = taosArrayGetSize(pColumnList);
int16_t col = pColIndex->columnIndex;
int32_t i = 0;
while (i < numOfCols) {
SColumn* pCol = taosArrayGetP(pColumnList, i);
if ((pCol->colIndex.columnIndex != col) || (pCol->colIndex.tableIndex != pColIndex->tableIndex)) {
++i;
continue;
} else {
break;
}
}
if (i >= numOfCols || numOfCols == 0) {
return false;
}
return true;
}
SColumn* tscColumnListInsert(SArray* pColumnList, SColumnIndex* pColIndex) {
// ignore the tbname columnIndex to be inserted into source list
if (pColIndex->columnIndex < 0) {
@ -1583,7 +1611,25 @@ int32_t tscTagCondCopy(STagCond* dest, const STagCond* src) {
dest->tbnameCond.uid = src->tbnameCond.uid;
dest->tbnameCond.len = src->tbnameCond.len;
memcpy(&dest->joinInfo, &src->joinInfo, sizeof(SJoinInfo));
dest->joinInfo.hasJoin = src->joinInfo.hasJoin;
for (int32_t i = 0; i < TSDB_MAX_JOIN_TABLE_NUM; ++i) {
if (src->joinInfo.joinTables[i]) {
dest->joinInfo.joinTables[i] = calloc(1, sizeof(SJoinNode));
memcpy(dest->joinInfo.joinTables[i], src->joinInfo.joinTables[i], sizeof(SJoinNode));
if (src->joinInfo.joinTables[i]->tsJoin) {
dest->joinInfo.joinTables[i]->tsJoin = taosArrayDup(src->joinInfo.joinTables[i]->tsJoin);
}
if (src->joinInfo.joinTables[i]->tagJoin) {
dest->joinInfo.joinTables[i]->tagJoin = taosArrayDup(src->joinInfo.joinTables[i]->tagJoin);
}
}
}
dest->relType = src->relType;
if (src->pCond == NULL) {
@ -1629,6 +1675,23 @@ void tscTagCondRelease(STagCond* pTagCond) {
taosArrayDestroy(pTagCond->pCond);
}
for (int32_t i = 0; i < TSDB_MAX_JOIN_TABLE_NUM; ++i) {
SJoinNode *node = pTagCond->joinInfo.joinTables[i];
if (node == NULL) {
continue;
}
if (node->tsJoin != NULL) {
taosArrayDestroy(node->tsJoin);
}
if (node->tagJoin != NULL) {
taosArrayDestroy(node->tagJoin);
}
tfree(node);
}
memset(pTagCond, 0, sizeof(STagCond));
}
@ -2318,16 +2381,21 @@ void tscDoQuery(SSqlObj* pSql) {
}
int16_t tscGetJoinTagColIdByUid(STagCond* pTagCond, uint64_t uid) {
if (pTagCond->joinInfo.left.uid == uid) {
return pTagCond->joinInfo.left.tagColId;
} else if (pTagCond->joinInfo.right.uid == uid) {
return pTagCond->joinInfo.right.tagColId;
} else {
assert(0);
return -1;
int32_t i = 0;
while (i < TSDB_MAX_JOIN_TABLE_NUM) {
SJoinNode* node = pTagCond->joinInfo.joinTables[i];
if (node && node->uid == uid) {
return node->tagColId;
}
i++;
}
assert(0);
return -1;
}
int16_t tscGetTagColIndexById(STableMeta* pTableMeta, int16_t colId) {
int32_t numOfTags = tscGetNumOfTags(pTableMeta);

View File

@ -53,6 +53,7 @@ enum {
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_ALTER_ACCT, "alter-acct" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_ALTER_TABLE, "alter-table" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_ALTER_DB, "alter-db" )
TSDB_DEFINE_SQL_TYPE(TSDB_SQL_SYNC_DB_REPLICA, "sync db-replica")
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_MNODE, "create-mnode" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DROP_MNODE, "drop-mnode" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_DNODE, "create-dnode" )
@ -90,13 +91,13 @@ enum {
*/
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RETRIEVE_EMPTY_RESULT, "retrieve-empty-result" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RESET_CACHE, "reset-cache" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SERV_STATUS, "serv-status" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CURRENT_DB, "current-db" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RESET_CACHE, "reset-cache" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SERV_STATUS, "serv-status" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CURRENT_DB, "current-db" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SERV_VERSION, "serv-version" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CLI_VERSION, "cli-version" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CLI_VERSION, "cli-version" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CURRENT_USER, "current-user ")
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CFG_LOCAL, "cfg-local" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CFG_LOCAL, "cfg-local" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_MAX, "max" )
};

View File

@ -71,7 +71,7 @@ int32_t tsMaxBinaryDisplayWidth = 30;
int32_t tsCompressMsgSize = -1;
// client
int32_t tsMaxSQLStringLen = TSDB_MAX_SQL_LEN;
int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN;
int8_t tsTscEnableRecordSql = 0;
// the maximum number of results for projection query on super table that are returned from

View File

@ -35,6 +35,10 @@ IF (TD_TOPIC)
TARGET_LINK_LIBRARIES(taosd topic)
ENDIF ()
IF (TD_MODULE AND TD_LINUX)
TARGET_LINK_LIBRARIES(taosd module dl)
ENDIF ()
SET(PREPARE_ENV_CMD "prepare_env_cmd")
SET(PREPARE_ENV_TARGET "prepare_env_target")
ADD_CUSTOM_COMMAND(OUTPUT ${PREPARE_ENV_CMD}

View File

@ -39,6 +39,13 @@
#include "dnodeMPeer.h"
#include "dnodeShell.h"
#include "dnodeTelemetry.h"
#include "module.h"
#if !defined(_MODULE) || !defined(_TD_LINUX)
int32_t moduleStart() { return 0; }
void moduleStop() {}
#endif
void *tsDnodeTmr = NULL;
static SRunStatus tsRunStatus = TSDB_RUN_STATUS_STOPPED;
@ -146,6 +153,7 @@ int32_t dnodeInitSystem() {
}
dnodeSetRunStatus(TSDB_RUN_STATUS_RUNING);
moduleStart();
dnodeReportStep("TDengine", "initialized successfully", 1);
dInfo("TDengine is initialized successfully");
@ -155,6 +163,7 @@ int32_t dnodeInitSystem() {
void dnodeCleanUpSystem() {
if (dnodeGetRunStatus() != TSDB_RUN_STATUS_STOPPED) {
moduleStop();
dnodeSetRunStatus(TSDB_RUN_STATUS_STOPPED);
dnodeCleanupTmr();
dnodeCleanupComponents();

View File

@ -43,6 +43,7 @@ int32_t dnodeInitServer() {
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = dnodeDispatchToVMgmtQueue;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_ALTER_VNODE] = dnodeDispatchToVMgmtQueue;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_SYNC_VNODE] = dnodeDispatchToVMgmtQueue;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = dnodeDispatchToVMgmtQueue;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = dnodeDispatchToVMgmtQueue;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE] = dnodeDispatchToVMgmtQueue;

View File

@ -50,6 +50,7 @@ int32_t dnodeInitShell() {
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_TP] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_FUNCTION] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_DB] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_SYNC_DB] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_TP] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_FUNCTION] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_DB] = dnodeDispatchToMWriteQueue;

View File

@ -30,6 +30,7 @@ static taos_queue tsVMgmtQueue = NULL;
static void * dnodeProcessMgmtQueue(void *param);
static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *pMsg);
static int32_t dnodeProcessAlterVnodeMsg(SRpcMsg *pMsg);
static int32_t dnodeProcessSyncVnodeMsg(SRpcMsg *pMsg);
static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *pMsg);
static int32_t dnodeProcessAlterStreamMsg(SRpcMsg *pMsg);
static int32_t dnodeProcessConfigDnodeMsg(SRpcMsg *pMsg);
@ -39,6 +40,7 @@ static int32_t (*dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *pMsg);
int32_t dnodeInitVMgmt() {
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = dnodeProcessCreateVnodeMsg;
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_VNODE] = dnodeProcessAlterVnodeMsg;
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_SYNC_VNODE] = dnodeProcessSyncVnodeMsg;
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = dnodeProcessDropVnodeMsg;
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = dnodeProcessAlterStreamMsg;
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE] = dnodeProcessConfigDnodeMsg;
@ -179,6 +181,13 @@ static int32_t dnodeProcessAlterVnodeMsg(SRpcMsg *rpcMsg) {
}
}
static int32_t dnodeProcessSyncVnodeMsg(SRpcMsg *rpcMsg) {
SSyncVnodeMsg *pSyncVnode = rpcMsg->pCont;
pSyncVnode->vgId = htonl(pSyncVnode->vgId);
return vnodeSync(pSyncVnode->vgId);
}
static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *rpcMsg) {
SDropVnodeMsg *pDrop = rpcMsg->pCont;
pDrop->vgId = htonl(pDrop->vgId);

View File

@ -202,10 +202,11 @@ static void dnodeProcessStatusRsp(SRpcMsg *pMsg) {
char clusterId[TSDB_CLUSTER_ID_LEN];
dnodeGetClusterId(clusterId);
if (clusterId[0] != '\0') {
dError("exit zombie dropped dnode");
exit(EXIT_FAILURE);
dError("exit zombie dropped dnode");
exit(EXIT_FAILURE);
}
}
taosTmrReset(dnodeSendStatusMsg, tsStatusInterval * 1000, NULL, tsDnodeTmr, &tsStatusTimer);
return;
}

30
src/inc/module.h Normal file
View File

@ -0,0 +1,30 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_MODULE
#define TDENGINE_MODULE
#ifdef __cplusplus
extern "C" {
#endif
int32_t moduleStart();
void moduleStop();
#ifdef __cplusplus
}
#endif
#endif

View File

@ -38,7 +38,7 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryTableMs
* @param qinfo
* @return
*/
bool qTableQuery(qinfo_t qinfo);
bool qTableQuery(qinfo_t qinfo, uint64_t *qId);
/**
* Retrieve the produced results information, if current query is not paused or completed,

View File

@ -262,7 +262,7 @@ do { \
#define TSDB_MIN_TABLES 4
#define TSDB_MAX_TABLES 10000000
#define TSDB_DEFAULT_TABLES 1000000
#define TSDB_TABLES_STEP 1000
#define TSDB_TABLES_STEP 100
#define TSDB_MIN_DAYS_PER_FILE 1
#define TSDB_MAX_DAYS_PER_FILE 3650
@ -320,7 +320,7 @@ do { \
#define TSDB_MAX_DB_QUORUM_OPTION 2
#define TSDB_DEFAULT_DB_QUORUM_OPTION 1
#define TSDB_MAX_JOIN_TABLE_NUM 5
#define TSDB_MAX_JOIN_TABLE_NUM 10
#define TSDB_MAX_UNION_CLAUSE 5
#define TSDB_MAX_BINARY_LEN (TSDB_MAX_BYTES_PER_ROW-TSDB_KEYSIZE)

View File

@ -59,6 +59,7 @@ TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_DROP_STABLE, "drop-stable" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_ALTER_STREAM, "alter-stream" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_CONFIG_DNODE, "config-dnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_ALTER_VNODE, "alter-vnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_SYNC_VNODE, "sync-vnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_CREATE_MNODE, "create-mnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY6, "dummy6" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY7, "dummy7" )
@ -79,6 +80,7 @@ TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_DB, "drop-db" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_FUNCTION, "drop-function" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_USE_DB, "use-db" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_ALTER_DB, "alter-db" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_SYNC_DB, "sync-db-replica" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CREATE_TABLE, "create-table" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_TABLE, "drop-table" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_ALTER_TABLE, "alter-table" )
@ -391,7 +393,7 @@ typedef struct {
typedef struct {
int32_t vgId;
} SDropVnodeMsg;
} SDropVnodeMsg, SSyncVnodeMsg;
typedef struct SColIndex {
int16_t colId; // column id
@ -519,12 +521,13 @@ typedef struct {
typedef struct {
int32_t code;
uint64_t qhandle; // query handle
union{uint64_t qhandle; uint64_t qId;}; // query handle
} SQueryTableRsp;
// todo: the show handle should be replaced with id
typedef struct {
SMsgHead header;
uint64_t qhandle;
union{uint64_t qhandle; uint64_t qId;}; // query handle
uint16_t free;
} SRetrieveTableMsg;
@ -613,7 +616,7 @@ typedef struct {
typedef struct {
char db[TSDB_TABLE_FNAME_LEN];
uint8_t ignoreNotExists;
} SDropDbMsg, SUseDbMsg;
} SDropDbMsg, SUseDbMsg, SSyncDbMsg;
// IMPORTANT: sizeof(SVnodeStatisticInfo) should not exceed
// TSDB_FILE_HEADER_LEN/4 - TSDB_FILE_HEADER_VERSION_SIZE

View File

@ -245,7 +245,7 @@ typedef struct {
* @param qinfo query info handle from query processor
* @return
*/
TsdbQueryHandleT *tsdbQueryTables(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *tableInfoGroup, void *qinfo,
TsdbQueryHandleT *tsdbQueryTables(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *tableInfoGroup, uint64_t qId,
SMemRef *pRef);
/**
@ -258,7 +258,7 @@ TsdbQueryHandleT *tsdbQueryTables(STsdbRepo *tsdb, STsdbQueryCond *pCond, STable
* @param tableInfo table list.
* @return
*/
TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *tableInfo, void *qinfo,
TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *tableInfo, uint64_t qId,
SMemRef *pRef);
/**
@ -277,7 +277,7 @@ SArray *tsdbGetQueriedTableList(TsdbQueryHandleT *pHandle);
* @return
*/
TsdbQueryHandleT tsdbQueryRowsInExternalWindow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList,
void *qinfo, SMemRef *pRef);
uint64_t qId, SMemRef *pRef);
/**

View File

@ -156,56 +156,58 @@
#define TK_NOW 137
#define TK_RESET 138
#define TK_QUERY 139
#define TK_ADD 140
#define TK_COLUMN 141
#define TK_TAG 142
#define TK_CHANGE 143
#define TK_SET 144
#define TK_KILL 145
#define TK_CONNECTION 146
#define TK_STREAM 147
#define TK_COLON 148
#define TK_ABORT 149
#define TK_AFTER 150
#define TK_ATTACH 151
#define TK_BEFORE 152
#define TK_BEGIN 153
#define TK_CASCADE 154
#define TK_CLUSTER 155
#define TK_CONFLICT 156
#define TK_COPY 157
#define TK_DEFERRED 158
#define TK_DELIMITERS 159
#define TK_DETACH 160
#define TK_EACH 161
#define TK_END 162
#define TK_EXPLAIN 163
#define TK_FAIL 164
#define TK_FOR 165
#define TK_IGNORE 166
#define TK_IMMEDIATE 167
#define TK_INITIALLY 168
#define TK_INSTEAD 169
#define TK_MATCH 170
#define TK_KEY 171
#define TK_OF 172
#define TK_RAISE 173
#define TK_REPLACE 174
#define TK_RESTRICT 175
#define TK_ROW 176
#define TK_STATEMENT 177
#define TK_TRIGGER 178
#define TK_VIEW 179
#define TK_SEMI 180
#define TK_NONE 181
#define TK_PREV 182
#define TK_LINEAR 183
#define TK_IMPORT 184
#define TK_TBNAME 185
#define TK_JOIN 186
#define TK_INSERT 187
#define TK_INTO 188
#define TK_VALUES 189
#define TK_SYNCDB 140
#define TK_ADD 141
#define TK_COLUMN 142
#define TK_TAG 143
#define TK_CHANGE 144
#define TK_SET 145
#define TK_KILL 146
#define TK_CONNECTION 147
#define TK_STREAM 148
#define TK_COLON 149
#define TK_ABORT 150
#define TK_AFTER 151
#define TK_ATTACH 152
#define TK_BEFORE 153
#define TK_BEGIN 154
#define TK_CASCADE 155
#define TK_CLUSTER 156
#define TK_CONFLICT 157
#define TK_COPY 158
#define TK_DEFERRED 159
#define TK_DELIMITERS 160
#define TK_DETACH 161
#define TK_EACH 162
#define TK_END 163
#define TK_EXPLAIN 164
#define TK_FAIL 165
#define TK_FOR 166
#define TK_IGNORE 167
#define TK_IMMEDIATE 168
#define TK_INITIALLY 169
#define TK_INSTEAD 170
#define TK_MATCH 171
#define TK_KEY 172
#define TK_OF 173
#define TK_RAISE 174
#define TK_REPLACE 175
#define TK_RESTRICT 176
#define TK_ROW 177
#define TK_STATEMENT 178
#define TK_TRIGGER 179
#define TK_VIEW 180
#define TK_SEMI 181
#define TK_NONE 182
#define TK_PREV 183
#define TK_LINEAR 184
#define TK_IMPORT 185
#define TK_TBNAME 186
#define TK_JOIN 187
#define TK_INSERT 188
#define TK_INTO 189
#define TK_VALUES 190

View File

@ -60,6 +60,7 @@ int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg);
int32_t vnodeDrop(int32_t vgId);
int32_t vnodeOpen(int32_t vgId);
int32_t vnodeAlter(void *pVnode, SCreateVnodeMsg *pVnodeCfg);
int32_t vnodeSync(int32_t vgId);
int32_t vnodeClose(int32_t vgId);
// vnodeMgmt
@ -89,4 +90,4 @@ int32_t vnodeProcessRead(void *pVnode, SVReadMsg *pRead);
}
#endif
#endif
#endif

View File

@ -74,6 +74,7 @@ void source_file(TAOS* con, char* fptr);
void source_dir(TAOS* con, SShellArguments* args);
void shellCheck(TAOS* con, SShellArguments* args);
void get_history_path(char* history);
void shellCheck(TAOS* con, SShellArguments* args);
void cleanup_handler(void* arg);
void exitShell();
int shellDumpResult(TAOS_RES* con, char* fname, int* error_no, bool printMode);

View File

@ -132,7 +132,6 @@ TAOS *shellInit(SShellArguments *args) {
return con;
}
static bool isEmptyCommand(const char* cmd) {
for (char c = *cmd++; c != 0; c = *cmd++) {
if (c != ' ' && c != '\t' && c != ';') {

View File

@ -3,6 +3,55 @@ PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
FIND_PACKAGE(Git)
IF (GIT_FOUND)
MESSAGE("Git found")
EXECUTE_PROCESS(
COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -n 1 ${CMAKE_CURRENT_LIST_DIR}/taosdemo.c
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE TAOSDEMO_COMMIT)
EXECUTE_PROCESS(
COMMAND bash "-c" "echo '${TAOSDEMO_COMMIT}' | awk '{print $1}' | cut -c -9"
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE TAOSDEMO_COMMIT_SHA1)
EXECUTE_PROCESS(
COMMAND ${GIT_EXECUTABLE} status -z -s ${CMAKE_CURRENT_LIST_DIR}/taosdemo.c
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE TAOSDEMO_STATUS)
EXECUTE_PROCESS(
COMMAND bash "-c" "echo '${TAOSDEMO_STATUS}' | awk '{print $1}'"
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE TAOSDEMO_STATUS)
MESSAGE("taosdemo.c status: " ${TAOSDEMO_STATUS})
ELSE()
MESSAGE("Git not found")
SET(TAOSDEMO_COMMIT_SHA1 "unknown")
SET(TAOSDEMO_STATUS "unknown")
ENDIF (GIT_FOUND)
STRING(STRIP ${TAOSDEMO_COMMIT_SHA1} TAOSDEMO_COMMIT_SHA1)
MESSAGE("taosdemo's latest commit in short is:" ${TAOSDEMO_COMMIT_SHA1})
STRING(STRIP ${TAOSDEMO_STATUS} TAOSDEMO_STATUS)
IF (TAOSDEMO_STATUS MATCHES "M")
SET(TAOSDEMO_STATUS "modified")
ELSE()
SET(TAOSDEMO_STATUS "")
ENDIF ()
MESSAGE("taosdemo's status is:" ${TAOSDEMO_STATUS})
ADD_DEFINITIONS(-DTAOSDEMO_COMMIT_SHA1="${TAOSDEMO_COMMIT_SHA1}")
ADD_DEFINITIONS(-DTAOSDEMO_STATUS="${TAOSDEMO_STATUS}")
MESSAGE("VERNUMBER is:" ${VERNUMBER})
IF ("${VERNUMBER}" STREQUAL "")
SET(TD_VERSION_NUMBER "TDengine-version-unknown")
ELSE()
SET(TD_VERSION_NUMBER ${VERNUMBER})
ENDIF ()
MESSAGE("TD_VERSION_NUMBER is:" ${TD_VERSION_NUMBER})
ADD_DEFINITIONS(-DTD_VERNUMBER="${TD_VERSION_NUMBER}")
IF (TD_LINUX)
AUX_SOURCE_DIRECTORY(. SRC)
ADD_EXECUTABLE(taosdemo ${SRC})

View File

@ -41,7 +41,7 @@
"insert_mode": "taosc",
"insert_rows": 1000,
"multi_thread_write_one_tbl": "no",
"rows_per_tbl": 20,
"interlace_rows": 20,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,

View File

@ -41,7 +41,7 @@
"insert_mode": "taosc",
"insert_rows": 100000,
"multi_thread_write_one_tbl": "no",
"rows_per_tbl": 0,
"interlace_rows": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,

File diff suppressed because it is too large Load Diff

View File

@ -49,6 +49,7 @@ void mnodeRemoveTableFromVgroup(SVgObj *pVgroup, SCTableObj *pTable);
void mnodeSendDropVnodeMsg(int32_t vgId, SRpcEpSet *epSet, void *ahandle);
void mnodeSendCreateVgroupMsg(SVgObj *pVgroup, void *ahandle);
void mnodeSendAlterVgroupMsg(SVgObj *pVgroup);
void mnodeSendSyncVgroupMsg(SVgObj *pVgroup);
SRpcEpSet mnodeGetEpSetFromVgroup(SVgObj *pVgroup);
SRpcEpSet mnodeGetEpSetFromIp(char *ep);

View File

@ -50,7 +50,7 @@ static int32_t mnodeGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn
static int32_t mnodeRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void *pConn);
static int32_t mnodeProcessCreateDbMsg(SMnodeMsg *pMsg);
static int32_t mnodeProcessDropDbMsg(SMnodeMsg *pMsg);
static int32_t mnodeProcessSyncDbMsg(SMnodeMsg *pMsg);
int32_t mnodeProcessAlterDbMsg(SMnodeMsg *pMsg);
#ifndef _TOPIC
@ -179,6 +179,7 @@ int32_t mnodeInitDbs() {
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_CREATE_DB, mnodeProcessCreateDbMsg);
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_ALTER_DB, mnodeProcessAlterDbMsg);
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_DROP_DB, mnodeProcessDropDbMsg);
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_SYNC_DB, mnodeProcessSyncDbMsg);
mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_DB, mnodeGetDbMeta);
mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_DB, mnodeRetrieveDbs);
mnodeAddShowFreeIterHandle(TSDB_MGMT_TABLE_DB, mnodeCancelGetNextDb);
@ -1188,6 +1189,46 @@ static int32_t mnodeProcessDropDbMsg(SMnodeMsg *pMsg) {
return mnodeDropDb(pMsg);
}
static int32_t mnodeSyncDb(SDbObj *pDb, SMnodeMsg *pMsg) {
void *pIter = NULL;
SVgObj *pVgroup = NULL;
while (1) {
pIter = mnodeGetNextVgroup(pIter, &pVgroup);
if (pVgroup == NULL) break;
if (pVgroup->pDb == pDb) {
mnodeSendSyncVgroupMsg(pVgroup);
}
mnodeDecVgroupRef(pVgroup);
}
mLInfo("db:%s, is synced by %s", pDb->name, mnodeGetUserFromMsg(pMsg));
return TSDB_CODE_SUCCESS;
}
static int32_t mnodeProcessSyncDbMsg(SMnodeMsg *pMsg) {
SSyncDbMsg *pSyncDb = pMsg->rpcMsg.pCont;
mDebug("db:%s, syncdb is received from thandle:%p, ignore:%d", pSyncDb->db, pMsg->rpcMsg.handle, pSyncDb->ignoreNotExists);
if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDb(pSyncDb->db);
if (pMsg->pDb == NULL) {
if (pSyncDb->ignoreNotExists) {
mDebug("db:%s, db is not exist, treat as success", pSyncDb->db);
return TSDB_CODE_SUCCESS;
} else {
mError("db:%s, failed to sync, invalid db", pSyncDb->db);
return TSDB_CODE_MND_INVALID_DB;
}
}
if (pMsg->pDb->status != TSDB_DB_STATUS_READY) {
mError("db:%s, status:%d, in dropping", pSyncDb->db, pMsg->pDb->status);
return TSDB_CODE_MND_DB_IN_DROPPING;
}
return mnodeSyncDb(pMsg->pDb, pMsg);
}
void mnodeDropAllDbs(SAcctObj *pAcct) {
int32_t numOfDbs = 0;
SDbObj *pDb = NULL;

View File

@ -60,6 +60,7 @@ static int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *p
static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pConn);
static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg);
static void mnodeProcessAlterVnodeRsp(SRpcMsg *rpcMsg);
static void mnodeProcessSyncVnodeRsp(SRpcMsg *rpcMsg);
static void mnodeProcessDropVnodeRsp(SRpcMsg *rpcMsg);
static int32_t mnodeProcessVnodeCfgMsg(SMnodeMsg *pMsg) ;
static void mnodeSendDropVgroupMsg(SVgObj *pVgroup, void *ahandle);
@ -236,6 +237,7 @@ int32_t mnodeInitVgroups() {
mnodeAddShowFreeIterHandle(TSDB_MGMT_TABLE_VGROUP, mnodeCancelGetNextVgroup);
mnodeAddPeerRspHandle(TSDB_MSG_TYPE_MD_CREATE_VNODE_RSP, mnodeProcessCreateVnodeRsp);
mnodeAddPeerRspHandle(TSDB_MSG_TYPE_MD_ALTER_VNODE_RSP, mnodeProcessAlterVnodeRsp);
mnodeAddPeerRspHandle(TSDB_MSG_TYPE_MD_ALTER_VNODE_RSP, mnodeProcessSyncVnodeRsp);
mnodeAddPeerRspHandle(TSDB_MSG_TYPE_MD_DROP_VNODE_RSP, mnodeProcessDropVnodeRsp);
mnodeAddPeerMsgHandle(TSDB_MSG_TYPE_DM_CONFIG_VNODE, mnodeProcessVnodeCfgMsg);
@ -967,6 +969,38 @@ void mnodeSendAlterVgroupMsg(SVgObj *pVgroup) {
}
}
static SSyncVnodeMsg *mnodeBuildSyncVnodeMsg(int32_t vgId) {
SSyncVnodeMsg *pSyncVnode = rpcMallocCont(sizeof(SSyncVnodeMsg));
if (pSyncVnode == NULL) return NULL;
pSyncVnode->vgId = htonl(vgId);
return pSyncVnode;
}
static void mnodeSendSyncVnodeMsg(SVgObj *pVgroup, SRpcEpSet *epSet) {
SSyncVnodeMsg *pSyncVnode = mnodeBuildSyncVnodeMsg(pVgroup->vgId);
SRpcMsg rpcMsg = {
.ahandle = NULL,
.pCont = pSyncVnode,
.contLen = pSyncVnode ? sizeof(SSyncVnodeMsg) : 0,
.code = 0,
.msgType = TSDB_MSG_TYPE_MD_SYNC_VNODE
};
dnodeSendMsgToDnode(epSet, &rpcMsg);
}
void mnodeSendSyncVgroupMsg(SVgObj *pVgroup) {
mDebug("vgId:%d, send sync all vnodes msg, numOfVnodes:%d db:%s", pVgroup->vgId, pVgroup->numOfVnodes,
pVgroup->dbName);
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
SRpcEpSet epSet = mnodeGetEpSetFromIp(pVgroup->vnodeGid[i].pDnode->dnodeEp);
mDebug("vgId:%d, index:%d, send sync vnode msg to dnode %s", pVgroup->vgId, i,
pVgroup->vnodeGid[i].pDnode->dnodeEp);
mnodeSendSyncVnodeMsg(pVgroup, &epSet);
}
}
static void mnodeSendCreateVnodeMsg(SVgObj *pVgroup, SRpcEpSet *epSet, void *ahandle) {
SCreateVnodeMsg *pCreate = mnodeBuildVnodeMsg(pVgroup);
SRpcMsg rpcMsg = {
@ -994,6 +1028,10 @@ static void mnodeProcessAlterVnodeRsp(SRpcMsg *rpcMsg) {
mDebug("alter vnode rsp received");
}
static void mnodeProcessSyncVnodeRsp(SRpcMsg *rpcMsg) {
mDebug("sync vnode rsp received");
}
static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg) {
if (rpcMsg->ahandle == NULL) return;

View File

@ -72,7 +72,7 @@ typedef struct SDiskbasedResultBuf {
bool comp; // compressed before flushed to disk
int32_t nextPos; // next page flush position
const void* handle; // for debug purpose
uint64_t qId; // for debug purpose
SResultBufStatis statis;
} SDiskbasedResultBuf;
@ -88,7 +88,7 @@ typedef struct SDiskbasedResultBuf {
* @param handle
* @return
*/
int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t pagesize, int32_t inMemBufSize, const void* handle);
int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t pagesize, int32_t inMemBufSize, uint64_t qId);
/**
*

View File

@ -25,6 +25,7 @@
} while (0)
#define GET_RES_WINDOW_KEY_LEN(_l) ((_l) + sizeof(uint64_t))
#define GET_QID(_r) (((SQInfo*)((_r)->qinfo))->qId)
#define curTimeWindowIndex(_winres) ((_winres)->curIndex)
#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!(sq)))? (_q)->pExpr1[1].base.arg->argValue.i64:1)

View File

@ -730,6 +730,9 @@ expritem(A) ::= . {A = 0;}
///////////////////////////////////reset query cache//////////////////////////////////////
cmd ::= RESET QUERY CACHE. { setDCLSqlElems(pInfo, TSDB_SQL_RESET_CACHE, 0);}
///////////////////////////////////sync replica database//////////////////////////////////
cmd ::= SYNCDB ids(X) REPLICA.{ setDCLSqlElems(pInfo, TSDB_SQL_SYNC_DB_REPLICA, 1, &X);}
///////////////////////////////////ALTER TABLE statement//////////////////////////////////
cmd ::= ALTER TABLE ids(X) cpxName(F) ADD COLUMN columnlist(A). {
X.n += F.n;

View File

@ -1364,7 +1364,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn
int16_t type = pColInfoData->info.type;
if (type == TSDB_DATA_TYPE_FLOAT || type == TSDB_DATA_TYPE_DOUBLE) {
qError("QInfo:%p group by not supported on double/float columns, abort", pRuntimeEnv->qinfo);
qError("QInfo:%"PRIu64" group by not supported on double/float columns, abort", GET_QID(pRuntimeEnv));
return;
}
@ -1749,7 +1749,7 @@ static void* destroySQLFunctionCtx(SQLFunctionCtx* pCtx, int32_t numOfOutput) {
}
static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOfTables) {
qDebug("QInfo:%p setup runtime env", pRuntimeEnv->qinfo);
qDebug("QInfo:%"PRIu64" setup runtime env", GET_QID(pRuntimeEnv));
SQuery *pQuery = pRuntimeEnv->pQuery;
pRuntimeEnv->prevGroupId = INT32_MIN;
@ -1782,7 +1782,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
*(int64_t*) pRuntimeEnv->prevRow[0] = INT64_MIN;
}
qDebug("QInfo:%p init runtime environment completed", pRuntimeEnv->qinfo);
qDebug("QInfo:%"PRIu64" init runtime environment completed", GET_QID(pRuntimeEnv));
// group by normal column, sliding window query, interval query are handled by interval query processor
// interval (down sampling operation)
@ -1886,7 +1886,7 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) {
SQuery *pQuery = pRuntimeEnv->pQuery;
SQInfo* pQInfo = (SQInfo*) pRuntimeEnv->qinfo;
qDebug("QInfo:%p teardown runtime env", pQInfo);
qDebug("QInfo:%"PRIu64" teardown runtime env", pQInfo->qId);
if (pRuntimeEnv->sasArray != NULL) {
for(int32_t i = 0; i < pQuery->numOfOutput; ++i) {
@ -1936,8 +1936,8 @@ bool isQueryKilled(SQInfo *pQInfo) {
(!needBuildResAfterQueryComplete(pQInfo))) {
assert(pQInfo->startExecTs != 0);
qDebug("QInfo:%p retrieve not arrive beyond %d sec, abort current query execution, start:%"PRId64", current:%d", pQInfo, 1,
pQInfo->startExecTs, taosGetTimestampSec());
qDebug("QInfo:%" PRIu64 " retrieve not arrive beyond %d sec, abort current query execution, start:%" PRId64
", current:%d", pQInfo->qId, 1, pQInfo->startExecTs, taosGetTimestampSec());
return true;
}
@ -2113,11 +2113,11 @@ static void setScanLimitationByResultBuffer(SQuery *pQuery) {
/*
* todo add more parameters to check soon..
*/
bool colIdCheck(SQuery *pQuery, void* qinfo) {
bool colIdCheck(SQuery *pQuery, uint64_t qId) {
// load data column information is incorrect
for (int32_t i = 0; i < pQuery->numOfCols - 1; ++i) {
if (pQuery->colList[i].colId == pQuery->colList[i + 1].colId) {
qError("QInfo:%p invalid data load column for query", qinfo);
qError("QInfo:%"PRIu64" invalid data load column for query", qId);
return false;
}
}
@ -2170,13 +2170,13 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bo
SQuery* pQuery = pQInfo->runtimeEnv.pQuery;
// in case of point-interpolation query, use asc order scan
char msg[] = "QInfo:%p scan order changed for %s query, old:%d, new:%d, qrange exchanged, old qrange:%" PRId64
char msg[] = "QInfo:%"PRIu64" scan order changed for %s query, old:%d, new:%d, qrange exchanged, old qrange:%" PRId64
"-%" PRId64 ", new qrange:%" PRId64 "-%" PRId64;
// todo handle the case the the order irrelevant query type mixed up with order critical query type
// descending order query for last_row query
if (isFirstLastRowQuery(pQuery)) {
qDebug("QInfo:%p scan order changed for last_row query, old:%d, new:%d", pQInfo, pQuery->order.order, TSDB_ORDER_ASC);
qDebug("QInfo:%"PRIu64" scan order changed for last_row query, old:%d, new:%d", pQInfo->qId, pQuery->order.order, TSDB_ORDER_ASC);
pQuery->order.order = TSDB_ORDER_ASC;
if (pQuery->window.skey > pQuery->window.ekey) {
@ -2198,7 +2198,7 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bo
if (isPointInterpoQuery(pQuery) && pQuery->interval.interval == 0) {
if (!QUERY_IS_ASC_QUERY(pQuery)) {
qDebug(msg, pQInfo, "interp", pQuery->order.order, TSDB_ORDER_ASC, pQuery->window.skey, pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey);
qDebug(msg, pQInfo->qId, "interp", pQuery->order.order, TSDB_ORDER_ASC, pQuery->window.skey, pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey);
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
}
@ -2209,7 +2209,7 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bo
if (pQuery->interval.interval == 0) {
if (onlyFirstQuery(pQuery)) {
if (!QUERY_IS_ASC_QUERY(pQuery)) {
qDebug(msg, pQInfo, "only-first", pQuery->order.order, TSDB_ORDER_ASC, pQuery->window.skey,
qDebug(msg, pQInfo->qId, "only-first", pQuery->order.order, TSDB_ORDER_ASC, pQuery->window.skey,
pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey);
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
@ -2219,7 +2219,7 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bo
pQuery->order.order = TSDB_ORDER_ASC;
} else if (onlyLastQuery(pQuery)) {
if (QUERY_IS_ASC_QUERY(pQuery)) {
qDebug(msg, pQInfo, "only-last", pQuery->order.order, TSDB_ORDER_DESC, pQuery->window.skey,
qDebug(msg, pQInfo->qId, "only-last", pQuery->order.order, TSDB_ORDER_DESC, pQuery->window.skey,
pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey);
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
@ -2233,7 +2233,7 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bo
if (stableQuery) {
if (onlyFirstQuery(pQuery)) {
if (!QUERY_IS_ASC_QUERY(pQuery)) {
qDebug(msg, pQInfo, "only-first stable", pQuery->order.order, TSDB_ORDER_ASC,
qDebug(msg, pQInfo->qId, "only-first stable", pQuery->order.order, TSDB_ORDER_ASC,
pQuery->window.skey, pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey);
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
@ -2243,7 +2243,7 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bo
pQuery->order.order = TSDB_ORDER_ASC;
} else if (onlyLastQuery(pQuery)) {
if (QUERY_IS_ASC_QUERY(pQuery)) {
qDebug(msg, pQInfo, "only-last stable", pQuery->order.order, TSDB_ORDER_DESC,
qDebug(msg, pQInfo->qId, "only-last stable", pQuery->order.order, TSDB_ORDER_DESC,
pQuery->window.skey, pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey);
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
@ -2656,7 +2656,7 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa
SDataBlockInfo* pBlockInfo = &pBlock->info;
if ((*status) == BLK_DATA_NO_NEEDED) {
qDebug("QInfo:%p data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo, pBlockInfo->window.skey,
qDebug("QInfo:%"PRIu64" data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo->qId, pBlockInfo->window.skey,
pBlockInfo->window.ekey, pBlockInfo->rows);
pCost->discardBlocks += 1;
} else if ((*status) == BLK_DATA_STATIS_NEEDED) {
@ -2684,7 +2684,7 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa
(char*)&(pBlock->pBlockStatis[i].max));
if (!load) { // current block has been discard due to filter applied
pCost->discardBlocks += 1;
qDebug("QInfo:%p data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo,
qDebug("QInfo:%"PRIu64" data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo->qId,
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
(*status) = BLK_DATA_DISCARD;
return TSDB_CODE_SUCCESS;
@ -2696,7 +2696,7 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa
// current block has been discard due to filter applied
if (!doFilterByBlockStatistics(pRuntimeEnv, pBlock->pBlockStatis, pTableScanInfo->pCtx, pBlockInfo->rows)) {
pCost->discardBlocks += 1;
qDebug("QInfo:%p data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo, pBlockInfo->window.skey,
qDebug("QInfo:%"PRIu64" data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo->qId, pBlockInfo->window.skey,
pBlockInfo->window.ekey, pBlockInfo->rows);
(*status) = BLK_DATA_DISCARD;
return TSDB_CODE_SUCCESS;
@ -3400,10 +3400,10 @@ void setCtxTagForJoin(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, SExpr
int16_t tagType = pCtx[0].tag.nType;
if (tagType == TSDB_DATA_TYPE_BINARY || tagType == TSDB_DATA_TYPE_NCHAR) {
qDebug("QInfo:%p set tag value for join comparison, colId:%" PRId64 ", val:%s", pRuntimeEnv->qinfo,
qDebug("QInfo:%"PRIu64" set tag value for join comparison, colId:%" PRId64 ", val:%s", GET_QID(pRuntimeEnv),
pExprInfo->base.arg->argValue.i64, pCtx[0].tag.pz);
} else {
qDebug("QInfo:%p set tag value for join comparison, colId:%" PRId64 ", val:%" PRId64, pRuntimeEnv->qinfo,
qDebug("QInfo:%"PRIu64" set tag value for join comparison, colId:%" PRId64 ", val:%" PRId64, GET_QID(pRuntimeEnv),
pExprInfo->base.arg->argValue.i64, pCtx[0].tag.i64);
}
}
@ -3423,9 +3423,9 @@ int32_t setTimestampListJoinInfo(SQueryRuntimeEnv* pRuntimeEnv, tVariant* pTag,
// failed to find data with the specified tag value and vnodeId
if (!tsBufIsValidElem(&elem)) {
if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) {
qError("QInfo:%p failed to find tag:%s in ts_comp", pRuntimeEnv->qinfo, pTag->pz);
qError("QInfo:%"PRIu64" failed to find tag:%s in ts_comp", GET_QID(pRuntimeEnv), pTag->pz);
} else {
qError("QInfo:%p failed to find tag:%" PRId64 " in ts_comp", pRuntimeEnv->qinfo, pTag->i64);
qError("QInfo:%"PRIu64" failed to find tag:%" PRId64 " in ts_comp", GET_QID(pRuntimeEnv), pTag->i64);
}
return -1;
@ -3434,17 +3434,17 @@ int32_t setTimestampListJoinInfo(SQueryRuntimeEnv* pRuntimeEnv, tVariant* pTag,
// Keep the cursor info of current table
pTableQueryInfo->cur = tsBufGetCursor(pRuntimeEnv->pTsBuf);
if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) {
qDebug("QInfo:%p find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", pRuntimeEnv->qinfo, pTag->pz, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex);
qDebug("QInfo:%"PRIu64" find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", GET_QID(pRuntimeEnv), pTag->pz, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex);
} else {
qDebug("QInfo:%p find tag:%"PRId64" start pos in ts_comp, blockIndex:%d, tsIndex:%d", pRuntimeEnv->qinfo, pTag->i64, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex);
qDebug("QInfo:%"PRIu64" find tag:%"PRId64" start pos in ts_comp, blockIndex:%d, tsIndex:%d", GET_QID(pRuntimeEnv), pTag->i64, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex);
}
} else {
tsBufSetCursor(pRuntimeEnv->pTsBuf, &pTableQueryInfo->cur);
if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) {
qDebug("QInfo:%p find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", pRuntimeEnv->qinfo, pTag->pz, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex);
qDebug("QInfo:%"PRIu64" find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", GET_QID(pRuntimeEnv), pTag->pz, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex);
} else {
qDebug("QInfo:%p find tag:%"PRId64" start pos in ts_comp, blockIndex:%d, tsIndex:%d", pRuntimeEnv->qinfo, pTag->i64, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex);
qDebug("QInfo:%"PRIu64" find tag:%"PRId64" start pos in ts_comp, blockIndex:%d, tsIndex:%d", GET_QID(pRuntimeEnv), pTag->i64, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex);
}
}
@ -3548,7 +3548,7 @@ static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo*
int32_t start = 0;
int32_t step = -1;
qDebug("QInfo:%p start to copy data from windowResInfo to output buf", pRuntimeEnv->qinfo);
qDebug("QInfo:%"PRIu64" start to copy data from windowResInfo to output buf", GET_QID(pRuntimeEnv));
if (orderType == TSDB_ORDER_ASC) {
start = pGroupResInfo->index;
step = 1;
@ -3588,7 +3588,7 @@ static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo*
}
}
qDebug("QInfo:%p copy data to query buf completed", pRuntimeEnv->qinfo);
qDebug("QInfo:%"PRIu64" copy data to query buf completed", GET_QID(pRuntimeEnv));
pBlock->info.rows = numOfResult;
return 0;
}
@ -3674,11 +3674,11 @@ static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data
data += sizeof(STableIdInfo);
total++;
qDebug("QInfo:%p set subscribe info, tid:%d, uid:%"PRIu64", skey:%"PRId64, pQInfo, item->tid, item->uid, item->key);
qDebug("QInfo:%"PRIu64" set subscribe info, tid:%d, uid:%"PRIu64", skey:%"PRId64, pQInfo->qId, item->tid, item->uid, item->key);
item = taosHashIterate(pRuntimeEnv->pTableRetrieveTsMap, item);
}
qDebug("QInfo:%p set %d subscribe info", pQInfo, total);
qDebug("QInfo:%"PRIu64" set %d subscribe info", pQInfo->qId, total);
// Check if query is completed or not for stable query or normal table query respectively.
if (Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED) && pRuntimeEnv->proot->status == OP_EXEC_DONE) {
setQueryStatus(pRuntimeEnv, QUERY_OVER);
@ -3717,12 +3717,12 @@ void queryCostStatis(SQInfo *pQInfo) {
pSummary->numOfTimeWindows = 0;
}
qDebug("QInfo:%p :cost summary: elapsed time:%"PRId64" us, first merge:%"PRId64" us, total blocks:%d, "
qDebug("QInfo:%"PRIu64" :cost summary: elapsed time:%"PRId64" us, first merge:%"PRId64" us, total blocks:%d, "
"load block statis:%d, load data block:%d, total rows:%"PRId64 ", check rows:%"PRId64,
pQInfo, pSummary->elapsedTime, pSummary->firstStageMergeTime, pSummary->totalBlocks, pSummary->loadBlockStatis,
pQInfo->qId, pSummary->elapsedTime, pSummary->firstStageMergeTime, pSummary->totalBlocks, pSummary->loadBlockStatis,
pSummary->loadBlocks, pSummary->totalRows, pSummary->totalCheckedRows);
qDebug("QInfo:%p :cost summary: winResPool size:%.2f Kb, numOfWin:%"PRId64", tableInfoSize:%.2f Kb, hashTable:%.2f Kb", pQInfo, pSummary->winInfoSize/1024.0,
qDebug("QInfo:%"PRIu64" :cost summary: winResPool size:%.2f Kb, numOfWin:%"PRId64", tableInfoSize:%.2f Kb, hashTable:%.2f Kb", pQInfo->qId, pSummary->winInfoSize/1024.0,
pSummary->numOfTimeWindows, pSummary->tableInfoSize/1024.0, pSummary->hashSize/1024.0);
}
@ -3758,7 +3758,7 @@ void queryCostStatis(SQInfo *pQInfo) {
//
// int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, NULL, binarySearchForKey, pDataBlock);
//
// qDebug("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%"PRId64, pRuntimeEnv->qinfo,
// qDebug("QInfo:%"PRIu64" check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%"PRId64, GET_QID(pRuntimeEnv),
// pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, numOfRes, pQuery->current->lastKey);
//}
@ -3788,7 +3788,7 @@ void queryCostStatis(SQInfo *pQInfo) {
// pTableQueryInfo->lastKey = (QUERY_IS_ASC_QUERY(pQuery)) ? blockInfo.window.ekey : blockInfo.window.skey;
// pTableQueryInfo->lastKey += step;
//
// qDebug("QInfo:%p skip rows:%d, offset:%" PRId64, pRuntimeEnv->qinfo, blockInfo.rows,
// qDebug("QInfo:%"PRIu64" skip rows:%d, offset:%" PRId64, GET_QID(pRuntimeEnv), blockInfo.rows,
// pQuery->limit.offset);
// } else { // find the appropriated start position in current block
// updateOffsetVal(pRuntimeEnv, &blockInfo);
@ -3836,8 +3836,8 @@ void queryCostStatis(SQInfo *pQInfo) {
// int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, NULL, binarySearchForKey, pDataBlock);
// pRuntimeEnv->resultRowInfo.curIndex = index; // restore the window index
//
// qDebug("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%" PRId64,
// pRuntimeEnv->qinfo, pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, numOfRes,
// qDebug("QInfo:%"PRIu64" check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%" PRId64,
// GET_QID(pRuntimeEnv), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, numOfRes,
// pQuery->current->lastKey);
//
// return key;
@ -3992,7 +3992,7 @@ static int32_t setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery)
terrno = TSDB_CODE_SUCCESS;
if (isFirstLastRowQuery(pQuery)) {
pRuntimeEnv->pQueryHandle = tsdbQueryLastRow(tsdb, &cond, &pQuery->tableGroupInfo, pQInfo, &pQuery->memRef);
pRuntimeEnv->pQueryHandle = tsdbQueryLastRow(tsdb, &cond, &pQuery->tableGroupInfo, pQInfo->qId, &pQuery->memRef);
// update the query time window
pQuery->window = cond.twindow;
@ -4013,9 +4013,9 @@ static int32_t setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery)
}
}
} else if (isPointInterpoQuery(pQuery)) {
pRuntimeEnv->pQueryHandle = tsdbQueryRowsInExternalWindow(tsdb, &cond, &pQuery->tableGroupInfo, pQInfo, &pQuery->memRef);
pRuntimeEnv->pQueryHandle = tsdbQueryRowsInExternalWindow(tsdb, &cond, &pQuery->tableGroupInfo, pQInfo->qId, &pQuery->memRef);
} else {
pRuntimeEnv->pQueryHandle = tsdbQueryTables(tsdb, &cond, &pQuery->tableGroupInfo, pQInfo, &pQuery->memRef);
pRuntimeEnv->pQueryHandle = tsdbQueryTables(tsdb, &cond, &pQuery->tableGroupInfo, pQInfo->qId, &pQuery->memRef);
}
return terrno;
@ -4058,7 +4058,6 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, SArray* prevResult, void *ts
pQuery->stabledev = isStabledev(pQuery);
pRuntimeEnv->prevResult = prevResult;
pRuntimeEnv->qinfo = pQInfo;
setScanLimitationByResultBuffer(pQuery);
@ -4102,7 +4101,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, SArray* prevResult, void *ts
getIntermediateBufInfo(pRuntimeEnv, &ps, &pQuery->intermediateResultRowSize);
int32_t TENMB = 1024*1024*10;
code = createDiskbasedResultBuffer(&pRuntimeEnv->pResultBuf, ps, TENMB, pQInfo);
code = createDiskbasedResultBuffer(&pRuntimeEnv->pResultBuf, ps, TENMB, pQInfo->qId);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@ -4270,8 +4269,8 @@ static SSDataBlock* doTableScan(void* param) {
pResultRowInfo->prevSKey = pResultRowInfo->pResult[0]->win.skey;
}
qDebug("QInfo:%p start to repeat scan data blocks due to query func required, qrange:%" PRId64 "-%" PRId64,
pRuntimeEnv->qinfo, cond.twindow.skey, cond.twindow.ekey);
qDebug("QInfo:%"PRIu64" start to repeat scan data blocks due to query func required, qrange:%" PRId64 "-%" PRId64,
GET_QID(pRuntimeEnv), cond.twindow.skey, cond.twindow.ekey);
}
if (pTableScanInfo->reverseTimes > 0) {
@ -4280,8 +4279,8 @@ static SSDataBlock* doTableScan(void* param) {
STsdbQueryCond cond = createTsdbQueryCond(pQuery, &pQuery->window);
tsdbResetQueryHandle(pTableScanInfo->pQueryHandle, &cond);
qDebug("QInfo:%p start to reverse scan data blocks due to query func required, qrange:%" PRId64 "-%" PRId64,
pRuntimeEnv->qinfo, cond.twindow.skey, cond.twindow.ekey);
qDebug("QInfo:%"PRIu64" start to reverse scan data blocks due to query func required, qrange:%" PRId64 "-%" PRId64,
GET_QID(pRuntimeEnv), cond.twindow.skey, cond.twindow.ekey);
pRuntimeEnv->scanFlag = REVERSE_SCAN;
@ -5350,14 +5349,14 @@ static SSDataBlock* doTagScan(void* param) {
count += 1;
}
qDebug("QInfo:%p create (tableId, tag) info completed, rows:%d", pRuntimeEnv->qinfo, count);
qDebug("QInfo:%"PRIu64" create (tableId, tag) info completed, rows:%d", GET_QID(pRuntimeEnv), count);
} else if (functionId == TSDB_FUNC_COUNT) {// handle the "count(tbname)" query
SColumnInfoData* pColInfo = taosArrayGet(pRes->pDataBlock, 0);
*(int64_t*)pColInfo->pData = pInfo->totalTables;
count = 1;
pOperator->status = OP_EXEC_DONE;
qDebug("QInfo:%p create count(tbname) query, res:%d rows:1", pRuntimeEnv->qinfo, count);
qDebug("QInfo:%"PRIu64" create count(tbname) query, res:%d rows:1", GET_QID(pRuntimeEnv), count);
} else { // return only the tags|table name etc.
SExprInfo* pExprInfo = pOperator->pExpr; // todo use the column list instead of exprinfo
@ -5396,7 +5395,7 @@ static SSDataBlock* doTagScan(void* param) {
pOperator->status = OP_EXEC_DONE;
}
qDebug("QInfo:%p create tag values results completed, rows:%d", pRuntimeEnv->qinfo, count);
qDebug("QInfo:%"PRIu64" create tag values results completed, rows:%d", GET_QID(pRuntimeEnv), count);
}
pRes->info.rows = count;
@ -6209,7 +6208,7 @@ SSqlGroupbyExpr *createGroupbyExprFromMsg(SQueryTableMsg *pQueryMsg, SColIndex *
return pGroupbyExpr;
}
static int32_t createFilterInfo(void *pQInfo, SQuery *pQuery) {
static int32_t createFilterInfo(SQuery *pQuery, uint64_t qId) {
for (int32_t i = 0; i < pQuery->numOfCols; ++i) {
if (pQuery->colList[i].numOfFilters > 0) {
pQuery->numOfFilterCols++;
@ -6245,13 +6244,13 @@ static int32_t createFilterInfo(void *pQInfo, SQuery *pQuery) {
int32_t lower = pSingleColFilter->filterInfo.lowerRelOptr;
int32_t upper = pSingleColFilter->filterInfo.upperRelOptr;
if (lower == TSDB_RELATION_INVALID && upper == TSDB_RELATION_INVALID) {
qError("QInfo:%p invalid filter info", pQInfo);
qError("QInfo:%"PRIu64" invalid filter info", qId);
return TSDB_CODE_QRY_INVALID_MSG;
}
pSingleColFilter->fp = getFilterOperator(lower, upper);
if (pSingleColFilter->fp == NULL) {
qError("QInfo:%p invalid filter info", pQInfo);
qError("QInfo:%"PRIu64" invalid filter info", qId);
return TSDB_CODE_QRY_INVALID_MSG;
}
@ -6411,7 +6410,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SSqlGroupbyExpr* pGroupbyExpr
}
doUpdateExprColumnIndex(pQuery);
int32_t ret = createFilterInfo(pQInfo, pQuery);
int32_t ret = createFilterInfo(pQuery, pQInfo->qId);
if (ret != TSDB_CODE_SUCCESS) {
goto _cleanup;
}
@ -6486,7 +6485,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SSqlGroupbyExpr* pGroupbyExpr
}
}
colIdCheck(pQuery, pQInfo);
colIdCheck(pQuery, pQInfo->qId);
// todo refactor
pQInfo->query.queryBlockDist = (numOfOutput == 1 && pExprs[0].base.colInfo.colId == TSDB_BLOCK_DIST_COLUMN_INDEX);
@ -6538,7 +6537,9 @@ int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *p
int32_t code = TSDB_CODE_SUCCESS;
SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
pRuntimeEnv->qinfo = pQInfo;
SQuery *pQuery = pRuntimeEnv->pQuery;
STSBuf *pTsBuf = NULL;
if (pQueryMsg->tsLen > 0) { // open new file to save the result
@ -6560,7 +6561,7 @@ int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *p
if ((QUERY_IS_ASC_QUERY(pQuery) && (pQuery->window.skey > pQuery->window.ekey)) ||
(!QUERY_IS_ASC_QUERY(pQuery) && (pQuery->window.ekey > pQuery->window.skey))) {
qDebug("QInfo:%p no result in time range %" PRId64 "-%" PRId64 ", order %d", pQInfo, pQuery->window.skey,
qDebug("QInfo:%"PRIu64" no result in time range %" PRId64 "-%" PRId64 ", order %d", pQInfo->qId, pQuery->window.skey,
pQuery->window.ekey, pQuery->order.order);
setQueryStatus(pRuntimeEnv, QUERY_COMPLETED);
pRuntimeEnv->tableqinfoGroupInfo.numOfTables = 0;
@ -6569,7 +6570,7 @@ int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *p
}
if (pRuntimeEnv->tableqinfoGroupInfo.numOfTables == 0) {
qDebug("QInfo:%p no table qualified for tag filter, abort query", pQInfo);
qDebug("QInfo:%"PRIu64" no table qualified for tag filter, abort query", pQInfo->qId);
setQueryStatus(pRuntimeEnv, QUERY_COMPLETED);
return TSDB_CODE_SUCCESS;
}
@ -6646,7 +6647,7 @@ void freeQInfo(SQInfo *pQInfo) {
return;
}
qDebug("QInfo:%p start to free QInfo", pQInfo);
qDebug("QInfo:%"PRIu64" start to free QInfo", pQInfo->qId);
SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
releaseQueryBuf(pRuntimeEnv->tableqinfoGroupInfo.numOfTables);
@ -6695,7 +6696,7 @@ void freeQInfo(SQInfo *pQInfo) {
taosArrayDestroy(pRuntimeEnv->groupResInfo.pRows);
pQInfo->signature = 0;
qDebug("QInfo:%p QInfo is freed", pQInfo);
qDebug("QInfo:%"PRIu64" QInfo is freed", pQInfo->qId);
tfree(pQInfo);
}
@ -6715,7 +6716,7 @@ int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) {
off_t s = lseek(fileno(f), 0, SEEK_END);
assert(s == pRuntimeEnv->outputBuf->info.rows);
qDebug("QInfo:%p ts comp data return, file:%p, size:%"PRId64, pQInfo, f, (uint64_t)s);
qDebug("QInfo:%"PRIu64" ts comp data return, file:%p, size:%"PRId64, pQInfo->qId, f, (uint64_t)s);
if (fseek(f, 0, SEEK_SET) >= 0) {
size_t sz = fread(data, 1, s, f);
if(sz < s) { // todo handle error
@ -6746,11 +6747,11 @@ int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) {
}
pRuntimeEnv->resultInfo.total += pRuntimeEnv->outputBuf->info.rows;
qDebug("QInfo:%p current numOfRes rows:%d, total:%" PRId64, pQInfo,
qDebug("QInfo:%"PRIu64" current numOfRes rows:%d, total:%" PRId64, pQInfo->qId,
pRuntimeEnv->outputBuf->info.rows, pRuntimeEnv->resultInfo.total);
if (pQuery->limit.limit > 0 && pQuery->limit.limit == pRuntimeEnv->resultInfo.total) {
qDebug("QInfo:%p results limitation reached, limitation:%"PRId64, pQInfo, pQuery->limit.limit);
qDebug("QInfo:%"PRIu64" results limitation reached, limitation:%"PRId64, pQInfo->qId, pQuery->limit.limit);
setQueryStatus(pRuntimeEnv, QUERY_OVER);
}

View File

@ -254,7 +254,7 @@ tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval,
resetSlotInfo(pBucket);
int32_t ret = createDiskbasedResultBuffer(&pBucket->pBuffer, pBucket->bufPageSize, pBucket->bufPageSize * 512, NULL);
int32_t ret = createDiskbasedResultBuffer(&pBucket->pBuffer, pBucket->bufPageSize, pBucket->bufPageSize * 512, 1);
if (ret != TSDB_CODE_SUCCESS) {
tMemBucketDestroy(pBucket);
return NULL;

View File

@ -9,7 +9,7 @@
#define GET_DATA_PAYLOAD(_p) ((char *)(_p)->pData + POINTER_BYTES)
#define NO_IN_MEM_AVAILABLE_PAGES(_b) (listNEles((_b)->lruList) >= (_b)->inMemPages)
int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t pagesize, int32_t inMemBufSize, const void* handle) {
int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t pagesize, int32_t inMemBufSize, uint64_t qId) {
*pResultBuf = calloc(1, sizeof(SDiskbasedResultBuf));
SDiskbasedResultBuf* pResBuf = *pResultBuf;
@ -24,7 +24,7 @@ int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t pa
pResBuf->allocateId = -1;
pResBuf->comp = true;
pResBuf->file = NULL;
pResBuf->handle = handle;
pResBuf->qId = qId;
pResBuf->fileSize = 0;
// at least more than 2 pages must be in memory
@ -43,7 +43,7 @@ int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t pa
pResBuf->emptyDummyIdList = taosArrayInit(1, sizeof(int32_t));
qDebug("QInfo:%p create resBuf for output, page size:%d, inmem buf pages:%d, file:%s", handle, pResBuf->pageSize,
qDebug("QInfo:%"PRIu64" create resBuf for output, page size:%d, inmem buf pages:%d, file:%s", qId, pResBuf->pageSize,
pResBuf->inMemPages, pResBuf->path);
return TSDB_CODE_SUCCESS;
@ -406,13 +406,13 @@ void destroyResultBuf(SDiskbasedResultBuf* pResultBuf) {
}
if (pResultBuf->file != NULL) {
qDebug("QInfo:%p res output buffer closed, total:%.2f Kb, inmem size:%.2f Kb, file size:%.2f Kb",
pResultBuf->handle, pResultBuf->totalBufSize/1024.0, listNEles(pResultBuf->lruList) * pResultBuf->pageSize / 1024.0,
qDebug("QInfo:%"PRIu64" res output buffer closed, total:%.2f Kb, inmem size:%.2f Kb, file size:%.2f Kb",
pResultBuf->qId, pResultBuf->totalBufSize/1024.0, listNEles(pResultBuf->lruList) * pResultBuf->pageSize / 1024.0,
pResultBuf->fileSize/1024.0);
fclose(pResultBuf->file);
} else {
qDebug("QInfo:%p res output buffer closed, total:%.2f Kb, no file created", pResultBuf->handle,
qDebug("QInfo:%"PRIu64" res output buffer closed, total:%.2f Kb, no file created", pResultBuf->qId,
pResultBuf->totalBufSize/1024.0);
}

View File

@ -911,6 +911,7 @@ void setDCLSqlElems(SSqlInfo *pInfo, int32_t type, int32_t nParam, ...) {
SStrToken *pToken = va_arg(va, SStrToken *);
taosArrayPush(pInfo->pMiscInfo->a, pToken);
}
va_end(va);
}

View File

@ -100,6 +100,7 @@ static SKeyword keywordTable[] = {
{"ACCOUNT", TK_ACCOUNT},
{"USE", TK_USE},
{"DESCRIBE", TK_DESCRIBE},
{"SYNCDB", TK_SYNCDB},
{"ALTER", TK_ALTER},
{"PASS", TK_PASS},
{"PRIVILEGE", TK_PRIVILEGE},

View File

@ -468,7 +468,7 @@ static int32_t mergeIntoGroupResultImpl(SQueryRuntimeEnv *pRuntimeEnv, SGroupRes
pTableQueryInfoList = malloc(POINTER_BYTES * size);
if (pTableQueryInfoList == NULL || posList == NULL || pGroupResInfo->pRows == NULL || pGroupResInfo->pRows == NULL) {
qError("QInfo:%p failed alloc memory", pRuntimeEnv->qinfo);
qError("QInfo:%"PRIu64" failed alloc memory", GET_QID(pRuntimeEnv));
code = TSDB_CODE_QRY_OUT_OF_MEMORY;
goto _end;
}
@ -540,7 +540,7 @@ static int32_t mergeIntoGroupResultImpl(SQueryRuntimeEnv *pRuntimeEnv, SGroupRes
int64_t endt = taosGetTimestampMs();
qDebug("QInfo:%p result merge completed for group:%d, elapsed time:%" PRId64 " ms", pRuntimeEnv->qinfo,
qDebug("QInfo:%"PRIu64" result merge completed for group:%d, elapsed time:%" PRId64 " ms", GET_QID(pRuntimeEnv),
pGroupResInfo->currentGroup, endt - startt);
_end:
@ -567,13 +567,13 @@ int32_t mergeIntoGroupResult(SGroupResInfo* pGroupResInfo, SQueryRuntimeEnv* pRu
break;
}
qDebug("QInfo:%p no result in group %d, continue", pRuntimeEnv->qinfo, pGroupResInfo->currentGroup);
qDebug("QInfo:%"PRIu64" no result in group %d, continue", GET_QID(pRuntimeEnv), pGroupResInfo->currentGroup);
cleanupGroupResInfo(pGroupResInfo);
incNextGroup(pGroupResInfo);
}
int64_t elapsedTime = taosGetTimestampUs() - st;
qDebug("QInfo:%p merge res data into group, index:%d, total group:%d, elapsed time:%" PRId64 "us", pRuntimeEnv->qinfo,
qDebug("QInfo:%"PRIu64" merge res data into group, index:%d, total group:%d, elapsed time:%" PRId64 "us", GET_QID(pRuntimeEnv),
pGroupResInfo->currentGroup, pGroupResInfo->totalGroup, elapsedTime);
// pQInfo->summary.firstStageMergeTime += elapsedTime;

View File

@ -200,29 +200,30 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi
return code;
}
bool qTableQuery(qinfo_t qinfo) {
bool qTableQuery(qinfo_t qinfo, uint64_t *qId) {
SQInfo *pQInfo = (SQInfo *)qinfo;
assert(pQInfo && pQInfo->signature == pQInfo);
int64_t threadId = taosGetSelfPthreadId();
int64_t curOwner = 0;
if ((curOwner = atomic_val_compare_exchange_64(&pQInfo->owner, 0, threadId)) != 0) {
qError("QInfo:%p qhandle is now executed by thread:%p", pQInfo, (void*) curOwner);
qError("QInfo:%"PRIu64"-%p qhandle is now executed by thread:%p", pQInfo->qId, pQInfo, (void*) curOwner);
pQInfo->code = TSDB_CODE_QRY_IN_EXEC;
return false;
}
*qId = pQInfo->qId;
pQInfo->startExecTs = taosGetTimestampSec();
if (isQueryKilled(pQInfo)) {
qDebug("QInfo:%p it is already killed, abort", pQInfo);
qDebug("QInfo:%"PRIu64" it is already killed, abort", pQInfo->qId);
return doBuildResCheck(pQInfo);
}
SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
if (pRuntimeEnv->tableqinfoGroupInfo.numOfTables == 0) {
qDebug("QInfo:%p no table exists for query, abort", pQInfo);
qDebug("QInfo:%"PRIu64" no table exists for query, abort", pQInfo->qId);
setQueryStatus(pRuntimeEnv, QUERY_COMPLETED);
return doBuildResCheck(pQInfo);
}
@ -231,22 +232,22 @@ bool qTableQuery(qinfo_t qinfo) {
int32_t ret = setjmp(pQInfo->runtimeEnv.env);
if (ret != TSDB_CODE_SUCCESS) {
pQInfo->code = ret;
qDebug("QInfo:%p query abort due to error/cancel occurs, code:%s", pQInfo, tstrerror(pQInfo->code));
qDebug("QInfo:%"PRIu64" query abort due to error/cancel occurs, code:%s", pQInfo->qId, tstrerror(pQInfo->code));
return doBuildResCheck(pQInfo);
}
qDebug("QInfo:%p query task is launched", pQInfo);
qDebug("QInfo:%"PRIu64" query task is launched", pQInfo->qId);
pRuntimeEnv->outputBuf = pRuntimeEnv->proot->exec(pRuntimeEnv->proot);
if (isQueryKilled(pQInfo)) {
qDebug("QInfo:%p query is killed", pQInfo);
qDebug("QInfo:%"PRIu64" query is killed", pQInfo->qId);
} else if (GET_NUM_OF_RESULTS(pRuntimeEnv) == 0) {
qDebug("QInfo:%p over, %u tables queried, %"PRId64" rows are returned", pQInfo, pRuntimeEnv->tableqinfoGroupInfo.numOfTables,
qDebug("QInfo:%"PRIu64" over, %u tables queried, %"PRId64" rows are returned", pQInfo->qId, pRuntimeEnv->tableqinfoGroupInfo.numOfTables,
pRuntimeEnv->resultInfo.total);
} else {
qDebug("QInfo:%p query paused, %d rows returned, numOfTotal:%" PRId64 " rows",
pQInfo, GET_NUM_OF_RESULTS(pRuntimeEnv), pRuntimeEnv->resultInfo.total + GET_NUM_OF_RESULTS(pRuntimeEnv));
qDebug("QInfo:%"PRIu64" query paused, %d rows returned, numOfTotal:%" PRId64 " rows",
pQInfo->qId, GET_NUM_OF_RESULTS(pRuntimeEnv), pRuntimeEnv->resultInfo.total + GET_NUM_OF_RESULTS(pRuntimeEnv));
}
return doBuildResCheck(pQInfo);
@ -256,13 +257,13 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContex
SQInfo *pQInfo = (SQInfo *)qinfo;
if (pQInfo == NULL || !isValidQInfo(pQInfo)) {
qError("QInfo:%p invalid qhandle", pQInfo);
qError("QInfo:%"PRIu64" invalid qhandle", pQInfo->qId);
return TSDB_CODE_QRY_INVALID_QHANDLE;
}
*buildRes = false;
if (IS_QUERY_KILLED(pQInfo)) {
qDebug("QInfo:%p query is killed, code:0x%08x", pQInfo, pQInfo->code);
qDebug("QInfo:%"PRIu64" query is killed, code:0x%08x", pQInfo->qId, pQInfo->code);
return pQInfo->code;
}
@ -282,11 +283,11 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContex
assert(pQInfo->rspContext == NULL);
if (pQInfo->dataReady == QUERY_RESULT_READY) {
*buildRes = true;
qDebug("QInfo:%p retrieve result info, rowsize:%d, rows:%d, code:%s", pQInfo, pQuery->resultRowSize,
qDebug("QInfo:%"PRIu64" retrieve result info, rowsize:%d, rows:%d, code:%s", pQInfo->qId, pQuery->resultRowSize,
GET_NUM_OF_RESULTS(pRuntimeEnv), tstrerror(pQInfo->code));
} else {
*buildRes = false;
qDebug("QInfo:%p retrieve req set query return result after paused", pQInfo);
qDebug("QInfo:%"PRIu64" retrieve req set query return result after paused", pQInfo->qId);
pQInfo->rspContext = pRspContext;
assert(pQInfo->rspContext != NULL);
}
@ -345,9 +346,10 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co
// here current thread hold the refcount, so it is safe to free tsdbQueryHandle.
*continueExec = false;
(*pRsp)->completed = 1; // notify no more result to client
qDebug("QInfo:%"PRIu64" no more results to retrieve", pQInfo->qId);
} else {
*continueExec = true;
qDebug("QInfo:%p has more results to retrieve", pQInfo);
qDebug("QInfo:%"PRIu64" has more results to retrieve", pQInfo->qId);
}
// the memory should be freed if the code of pQInfo is not TSDB_CODE_SUCCESS
@ -400,7 +402,7 @@ void qDestroyQueryInfo(qinfo_t qHandle) {
return;
}
qDebug("QInfo:%p query completed", pQInfo);
qDebug("QInfo:%"PRIu64" query completed", pQInfo->qId);
queryCostStatis(pQInfo); // print the query cost summary
freeQInfo(pQInfo);
}
@ -483,7 +485,7 @@ void** qRegisterQInfo(void* pMgmt, uint64_t qId, uint64_t qInfo) {
SQueryMgmt *pQueryMgmt = pMgmt;
if (pQueryMgmt->qinfoPool == NULL) {
qError("QInfo:%p failed to add qhandle into qMgmt, since qMgmt is closed", (void *)qInfo);
qError("QInfo:%"PRIu64"-%p failed to add qhandle into qMgmt, since qMgmt is closed", qId, (void*)qInfo);
terrno = TSDB_CODE_VND_INVALID_VGROUP_ID;
return NULL;
}
@ -491,7 +493,7 @@ void** qRegisterQInfo(void* pMgmt, uint64_t qId, uint64_t qInfo) {
pthread_mutex_lock(&pQueryMgmt->lock);
if (pQueryMgmt->closed) {
pthread_mutex_unlock(&pQueryMgmt->lock);
qError("QInfo:%p failed to add qhandle into cache, since qMgmt is colsing", (void *)qInfo);
qError("QInfo:%"PRIu64"-%p failed to add qhandle into cache, since qMgmt is colsing", qId, (void*)qInfo);
terrno = TSDB_CODE_VND_INVALID_VGROUP_ID;
return NULL;
} else {

File diff suppressed because it is too large Load Diff

View File

@ -1017,6 +1017,13 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv, SRpcReqCont
return pConn;
}
static void doRpcReportBrokenLinkToServer(void *param, void *id) {
SRpcMsg *pRpcMsg = (SRpcMsg *)(param);
SRpcConn *pConn = (SRpcConn *)(pRpcMsg->handle);
SRpcInfo *pRpc = pConn->pRpc;
(*(pRpc->cfp))(pRpcMsg, NULL);
free(pRpcMsg);
}
static void rpcReportBrokenLinkToServer(SRpcConn *pConn) {
SRpcInfo *pRpc = pConn->pRpc;
if (pConn->pReqMsg == NULL) return;
@ -1025,16 +1032,20 @@ static void rpcReportBrokenLinkToServer(SRpcConn *pConn) {
rpcAddRef(pRpc);
tDebug("%s, notify the server app, connection is gone", pConn->info);
SRpcMsg rpcMsg;
rpcMsg.pCont = pConn->pReqMsg; // pReqMsg is re-used to store the APP context from server
rpcMsg.contLen = pConn->reqMsgLen; // reqMsgLen is re-used to store the APP context length
rpcMsg.ahandle = pConn->ahandle;
rpcMsg.handle = pConn;
rpcMsg.msgType = pConn->inType;
rpcMsg.code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
SRpcMsg *rpcMsg = malloc(sizeof(SRpcMsg));
rpcMsg->pCont = pConn->pReqMsg; // pReqMsg is re-used to store the APP context from server
rpcMsg->contLen = pConn->reqMsgLen; // reqMsgLen is re-used to store the APP context length
rpcMsg->ahandle = pConn->ahandle;
rpcMsg->handle = pConn;
rpcMsg->msgType = pConn->inType;
rpcMsg->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
pConn->pReqMsg = NULL;
pConn->reqMsgLen = 0;
if (pRpc->cfp) (*(pRpc->cfp))(&rpcMsg, NULL);
if (pRpc->cfp) {
taosTmrStart(doRpcReportBrokenLinkToServer, 0, rpcMsg, pRpc->tmrCtrl);
} else {
free(rpcMsg);
}
}
static void rpcProcessBrokenLink(SRpcConn *pConn) {
@ -1051,7 +1062,7 @@ static void rpcProcessBrokenLink(SRpcConn *pConn) {
pConn->pReqMsg = NULL;
taosTmrStart(rpcProcessConnError, 0, pContext, pRpc->tmrCtrl);
}
if (pConn->inType) rpcReportBrokenLinkToServer(pConn);
rpcReleaseConn(pConn);

View File

@ -410,7 +410,7 @@ void syncConfirmForward(int64_t rid, uint64_t version, int32_t code, bool force)
syncReleaseNode(pNode);
}
#if 0
#if 1
void syncRecover(int64_t rid) {
SSyncPeer *pPeer;

View File

@ -17,7 +17,7 @@
#define TSDB_MAX_SUBBLOCKS 8
static FORCE_INLINE int TSDB_KEY_FID(TSKEY key, int32_t days, int8_t precision) {
if (key < 0) {
return (int)(-((-key) / tsMsPerDay[precision] / days + 1));
return (int)((key + 1) / tsMsPerDay[precision] / days + 1);
} else {
return (int)((key / tsMsPerDay[precision] / days));
}
@ -452,6 +452,14 @@ static int tsdbCommitToFile(SCommitH *pCommith, SDFileSet *pSet, int fid) {
return -1;
}
if (tsdbUpdateDFileSetHeader(&(pCommith->wSet)) < 0) {
tsdbError("vgId:%d failed to update FSET %d header since %s", REPO_ID(pRepo), fid, tstrerror(terrno));
tsdbCloseCommitFile(pCommith, true);
// revert the file change
tsdbApplyDFileSetChange(TSDB_COMMIT_WRITE_FSET(pCommith), pSet);
return -1;
}
// Close commit file
tsdbCloseCommitFile(pCommith, false);

View File

@ -134,14 +134,14 @@ int tsdbCreateMFile(SMFile *pMFile, bool updateHeader) {
return 0;
}
pMFile->info.size += TSDB_FILE_HEAD_SIZE;
if (tsdbUpdateMFileHeader(pMFile) < 0) {
tsdbCloseMFile(pMFile);
tsdbRemoveMFile(pMFile);
return -1;
}
pMFile->info.size += TSDB_FILE_HEAD_SIZE;
return 0;
}
@ -378,14 +378,14 @@ int tsdbCreateDFile(SDFile *pDFile, bool updateHeader) {
return 0;
}
pDFile->info.size += TSDB_FILE_HEAD_SIZE;
if (tsdbUpdateDFileHeader(pDFile) < 0) {
tsdbCloseDFile(pDFile);
tsdbRemoveDFile(pDFile);
return -1;
}
pDFile->info.size += TSDB_FILE_HEAD_SIZE;
return 0;
}

View File

@ -111,7 +111,7 @@ typedef struct STsdbQueryHandle {
bool loadExternalRow; // load time window external data rows
bool currentLoadExternalRows; // current load external rows
int32_t loadType; // block load type
void* qinfo; // query info handle, for debug purpose
uint64_t qId; // query info handle, for debug purpose
int32_t type; // query type: retrieve all data blocks, 2. retrieve only last row, 3. retrieve direct prev|next rows
SDFileSet* pFileGroup;
SFSIter fileIter;
@ -286,8 +286,8 @@ static SArray* createCheckInfoFromTableGroup(STsdbQueryHandle* pQueryHandle, STa
}
taosArrayPush(pTableCheckInfo, &info);
tsdbDebug("%p check table uid:%"PRId64", tid:%d from lastKey:%"PRId64" %p", pQueryHandle, info.tableId.uid,
info.tableId.tid, info.lastKey, pQueryHandle->qinfo);
tsdbDebug("%p check table uid:%"PRId64", tid:%d from lastKey:%"PRId64" %"PRIu64, pQueryHandle, info.tableId.uid,
info.tableId.tid, info.lastKey, pQueryHandle->qId);
}
}
@ -339,7 +339,7 @@ static SArray* createCheckInfoFromCheckInfo(STableCheckInfo* pCheckInfo, TSKEY s
return pNew;
}
static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pCond, void* qinfo, SMemRef* pMemRef) {
static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pCond, uint64_t qId, SMemRef* pMemRef) {
STsdbQueryHandle* pQueryHandle = calloc(1, sizeof(STsdbQueryHandle));
if (pQueryHandle == NULL) {
goto out_of_memory;
@ -353,7 +353,7 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC
pQueryHandle->cur.win = TSWINDOW_INITIALIZER;
pQueryHandle->checkFiles = true;
pQueryHandle->activeIndex = 0; // current active table index
pQueryHandle->qinfo = qinfo;
pQueryHandle->qId = qId;
pQueryHandle->outputCapacity = ((STsdbRepo*)tsdb)->config.maxRowsPerFileBlock;
pQueryHandle->allocSize = 0;
pQueryHandle->locateStart = false;
@ -406,7 +406,7 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC
pQueryHandle->pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pQueryHandle->pTsdb->config.maxRowsPerFileBlock);
if (pQueryHandle->pDataCols == NULL) {
tsdbError("%p failed to malloc buf for pDataCols, %p", pQueryHandle, pQueryHandle->qinfo);
tsdbError("%p failed to malloc buf for pDataCols, %"PRIu64, pQueryHandle, pQueryHandle->qId);
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
goto out_of_memory;
}
@ -422,8 +422,8 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC
return NULL;
}
TsdbQueryHandleT* tsdbQueryTables(STsdbRepo* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList, void* qinfo, SMemRef* pRef) {
STsdbQueryHandle* pQueryHandle = tsdbQueryTablesImpl(tsdb, pCond, qinfo, pRef);
TsdbQueryHandleT* tsdbQueryTables(STsdbRepo* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList, uint64_t qId, SMemRef* pRef) {
STsdbQueryHandle* pQueryHandle = tsdbQueryTablesImpl(tsdb, pCond, qId, pRef);
STsdbMeta* pMeta = tsdbGetMeta(tsdb);
assert(pMeta != NULL);
@ -440,7 +440,7 @@ TsdbQueryHandleT* tsdbQueryTables(STsdbRepo* tsdb, STsdbQueryCond* pCond, STable
tsdbMayTakeMemSnapshot(pQueryHandle, psTable);
tsdbDebug("%p total numOfTable:%" PRIzu " in query, %p", pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo), pQueryHandle->qinfo);
tsdbDebug("%p total numOfTable:%" PRIzu " in query, %"PRIu64, pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo), pQueryHandle->qId);
return (TsdbQueryHandleT) pQueryHandle;
}
@ -512,7 +512,7 @@ void tsdbResetQueryHandleForNewTable(TsdbQueryHandleT queryHandle, STsdbQueryCon
pQueryHandle->next = doFreeColumnInfoData(pQueryHandle->next);
}
TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, void* qinfo, SMemRef* pMemRef) {
TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pMemRef) {
pCond->twindow = updateLastrowForEachGroup(groupList);
// no qualified table
@ -520,7 +520,7 @@ TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STable
return NULL;
}
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qinfo, pMemRef);
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qId, pMemRef);
int32_t code = checkForCachedLastRow(pQueryHandle, groupList);
if (code != TSDB_CODE_SUCCESS) { // set the numOfTables to be 0
terrno = code;
@ -581,10 +581,10 @@ static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGr
return pNew;
}
TsdbQueryHandleT tsdbQueryRowsInExternalWindow(STsdbRepo *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, void* qinfo, SMemRef* pRef) {
TsdbQueryHandleT tsdbQueryRowsInExternalWindow(STsdbRepo *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pRef) {
STableGroupInfo* pNew = trimTableGroup(&pCond->twindow, groupList);
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, pNew, qinfo, pRef);
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, pNew, qId, pRef);
pQueryHandle->loadExternalRow = true;
pQueryHandle->currentLoadExternalRows = true;
@ -651,9 +651,9 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
SDataRow row = (SDataRow)SL_GET_NODE_DATA(node);
TSKEY key = dataRowKey(row); // first timestamp in buffer
tsdbDebug("%p uid:%" PRId64 ", tid:%d check data in mem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64
"-%" PRId64 ", lastKey:%" PRId64 ", numOfRows:%"PRId64", %p",
"-%" PRId64 ", lastKey:%" PRId64 ", numOfRows:%"PRId64", %"PRIu64,
pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pMem->keyFirst, pMem->keyLast,
pCheckInfo->lastKey, pMem->numOfRows, pHandle->qinfo);
pCheckInfo->lastKey, pMem->numOfRows, pHandle->qId);
if (ASCENDING_TRAVERSE(order)) {
assert(pCheckInfo->lastKey <= key);
@ -662,8 +662,8 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
}
} else {
tsdbDebug("%p uid:%"PRId64", tid:%d no data in mem, %p", pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid,
pHandle->qinfo);
tsdbDebug("%p uid:%"PRId64", tid:%d no data in mem, %"PRIu64, pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid,
pHandle->qId);
}
if (!imemEmpty) {
@ -673,9 +673,9 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
SDataRow row = (SDataRow)SL_GET_NODE_DATA(node);
TSKEY key = dataRowKey(row); // first timestamp in buffer
tsdbDebug("%p uid:%" PRId64 ", tid:%d check data in imem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64
"-%" PRId64 ", lastKey:%" PRId64 ", numOfRows:%"PRId64", %p",
"-%" PRId64 ", lastKey:%" PRId64 ", numOfRows:%"PRId64", %"PRIu64,
pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pIMem->keyFirst, pIMem->keyLast,
pCheckInfo->lastKey, pIMem->numOfRows, pHandle->qinfo);
pCheckInfo->lastKey, pIMem->numOfRows, pHandle->qId);
if (ASCENDING_TRAVERSE(order)) {
assert(pCheckInfo->lastKey <= key);
@ -683,8 +683,8 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
assert(pCheckInfo->lastKey >= key);
}
} else {
tsdbDebug("%p uid:%"PRId64", tid:%d no data in imem, %p", pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid,
pHandle->qinfo);
tsdbDebug("%p uid:%"PRId64", tid:%d no data in imem, %"PRIu64, pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid,
pHandle->qId);
}
return true;
@ -811,8 +811,8 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) {
}
pCheckInfo->lastKey = dataRowKey(row); // first timestamp in buffer
tsdbDebug("%p uid:%" PRId64", tid:%d check data in buffer from skey:%" PRId64 ", order:%d, %p", pHandle,
pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, pCheckInfo->lastKey, pHandle->order, pHandle->qinfo);
tsdbDebug("%p uid:%" PRId64", tid:%d check data in buffer from skey:%" PRId64 ", order:%d, %"PRIu64, pHandle,
pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, pCheckInfo->lastKey, pHandle->order, pHandle->qId);
// all data in mem are checked already.
if ((pCheckInfo->lastKey > pHandle->window.ekey && ASCENDING_TRAVERSE(pHandle->order)) ||
@ -984,21 +984,21 @@ static int32_t doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SBlock* pBloc
STSchema *pSchema = tsdbGetTableSchema(pCheckInfo->pTableObj);
int32_t code = tdInitDataCols(pQueryHandle->pDataCols, pSchema);
if (code != TSDB_CODE_SUCCESS) {
tsdbError("%p failed to malloc buf for pDataCols, %p", pQueryHandle, pQueryHandle->qinfo);
tsdbError("%p failed to malloc buf for pDataCols, %"PRIu64, pQueryHandle, pQueryHandle->qId);
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
goto _error;
}
code = tdInitDataCols(pQueryHandle->rhelper.pDCols[0], pSchema);
if (code != TSDB_CODE_SUCCESS) {
tsdbError("%p failed to malloc buf for rhelper.pDataCols[0], %p", pQueryHandle, pQueryHandle->qinfo);
tsdbError("%p failed to malloc buf for rhelper.pDataCols[0], %"PRIu64, pQueryHandle, pQueryHandle->qId);
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
goto _error;
}
code = tdInitDataCols(pQueryHandle->rhelper.pDCols[1], pSchema);
if (code != TSDB_CODE_SUCCESS) {
tsdbError("%p failed to malloc buf for rhelper.pDataCols[1], %p", pQueryHandle, pQueryHandle->qinfo);
tsdbError("%p failed to malloc buf for rhelper.pDataCols[1], %"PRIu64, pQueryHandle, pQueryHandle->qId);
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
goto _error;
}
@ -1034,15 +1034,15 @@ static int32_t doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SBlock* pBloc
int64_t elapsedTime = (taosGetTimestampUs() - st);
pQueryHandle->cost.blockLoadTime += elapsedTime;
tsdbDebug("%p load file block into buffer, index:%d, brange:%"PRId64"-%"PRId64", rows:%d, elapsed time:%"PRId64 " us, %p",
pQueryHandle, slotIndex, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfRows, elapsedTime, pQueryHandle->qinfo);
tsdbDebug("%p load file block into buffer, index:%d, brange:%"PRId64"-%"PRId64", rows:%d, elapsed time:%"PRId64 " us, %"PRIu64,
pQueryHandle, slotIndex, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfRows, elapsedTime, pQueryHandle->qId);
return TSDB_CODE_SUCCESS;
_error:
pBlock->numOfRows = 0;
tsdbError("%p error occurs in loading file block, index:%d, brange:%"PRId64"-%"PRId64", rows:%d, %p",
pQueryHandle, slotIndex, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfRows, pQueryHandle->qinfo);
tsdbError("%p error occurs in loading file block, index:%d, brange:%"PRId64"-%"PRId64", rows:%d, %"PRIu64,
pQueryHandle, slotIndex, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfRows, pQueryHandle->qId);
return terrno;
}
@ -1064,7 +1064,7 @@ static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SBlock* p
assert(cur->pos >= 0 && cur->pos <= binfo.rows);
TSKEY key = (row != NULL)? dataRowKey(row):TSKEY_INITIAL_VAL;
tsdbDebug("%p key in mem:%"PRId64", %p", pQueryHandle, key, pQueryHandle->qinfo);
tsdbDebug("%p key in mem:%"PRId64", %"PRIu64, pQueryHandle, key, pQueryHandle->qId);
if ((ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key <= binfo.window.ekey)) ||
(!ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key >= binfo.window.skey))) {
@ -1545,9 +1545,9 @@ static void copyAllRemainRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, STabl
updateInfoAfterMerge(pQueryHandle, pCheckInfo, numOfRows, pos);
doCheckGeneratedBlockRange(pQueryHandle);
tsdbDebug("%p uid:%" PRIu64",tid:%d data block created, mixblock:%d, brange:%"PRIu64"-%"PRIu64" rows:%d, %p",
tsdbDebug("%p uid:%" PRIu64",tid:%d data block created, mixblock:%d, brange:%"PRIu64"-%"PRIu64" rows:%d, %"PRIu64,
pQueryHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, cur->mixBlock, cur->win.skey,
cur->win.ekey, cur->rows, pQueryHandle->qinfo);
cur->win.ekey, cur->rows, pQueryHandle->qId);
}
int32_t getEndPosInDataBlock(STsdbQueryHandle* pQueryHandle, SDataBlockInfo* pBlockInfo) {
@ -1599,9 +1599,9 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
int32_t endPos = getEndPosInDataBlock(pQueryHandle, &blockInfo);
tsdbDebug("%p uid:%" PRIu64",tid:%d start merge data block, file block range:%"PRIu64"-%"PRIu64" rows:%d, start:%d,"
"end:%d, %p",
"end:%d, %"PRIu64,
pQueryHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, blockInfo.window.skey, blockInfo.window.ekey,
blockInfo.rows, cur->pos, endPos, pQueryHandle->qinfo);
blockInfo.rows, cur->pos, endPos, pQueryHandle->qId);
// compared with the data from in-memory buffer, to generate the correct timestamp array list
int32_t numOfRows = 0;
@ -1741,9 +1741,9 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
updateInfoAfterMerge(pQueryHandle, pCheckInfo, numOfRows, pos);
doCheckGeneratedBlockRange(pQueryHandle);
tsdbDebug("%p uid:%" PRIu64",tid:%d data block created, mixblock:%d, brange:%"PRIu64"-%"PRIu64" rows:%d, %p",
tsdbDebug("%p uid:%" PRIu64",tid:%d data block created, mixblock:%d, brange:%"PRIu64"-%"PRIu64" rows:%d, %"PRIu64,
pQueryHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, cur->mixBlock, cur->win.skey,
cur->win.ekey, cur->rows, pQueryHandle->qinfo);
cur->win.ekey, cur->rows, pQueryHandle->qId);
}
int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order) {
@ -1917,13 +1917,13 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO
memcpy(pQueryHandle->pDataBlockInfo, sup.pDataBlockInfo[0], sizeof(STableBlockInfo) * numOfBlocks);
cleanBlockOrderSupporter(&sup, numOfQualTables);
tsdbDebug("%p create data blocks info struct completed for 1 table, %d blocks not sorted %p ", pQueryHandle, cnt,
pQueryHandle->qinfo);
tsdbDebug("%p create data blocks info struct completed for 1 table, %d blocks not sorted %"PRIu64, pQueryHandle, cnt,
pQueryHandle->qId);
return TSDB_CODE_SUCCESS;
}
tsdbDebug("%p create data blocks info struct completed, %d blocks in %d tables %p", pQueryHandle, cnt,
numOfQualTables, pQueryHandle->qinfo);
tsdbDebug("%p create data blocks info struct completed, %d blocks in %d tables %"PRIu64, pQueryHandle, cnt,
numOfQualTables, pQueryHandle->qId);
assert(cnt <= numOfBlocks && numOfQualTables <= numOfTables); // the pTableQueryInfo[j]->numOfBlocks may be 0
sup.numOfTables = numOfQualTables;
@ -1959,7 +1959,7 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO
* }
*/
tsdbDebug("%p %d data blocks sort completed, %p", pQueryHandle, cnt, pQueryHandle->qinfo);
tsdbDebug("%p %d data blocks sort completed, %"PRIu64, pQueryHandle, cnt, pQueryHandle->qId);
cleanBlockOrderSupporter(&sup, numOfTables);
free(pTree);
@ -2017,8 +2017,8 @@ static int32_t getFirstFileDataBlock(STsdbQueryHandle* pQueryHandle, bool* exist
if ((ASCENDING_TRAVERSE(pQueryHandle->order) && win.skey > pQueryHandle->window.ekey) ||
(!ASCENDING_TRAVERSE(pQueryHandle->order) && win.ekey < pQueryHandle->window.ekey)) {
tsdbUnLockFS(REPO_FS(pQueryHandle->pTsdb));
tsdbDebug("%p remain files are not qualified for qrange:%" PRId64 "-%" PRId64 ", ignore, %p", pQueryHandle,
pQueryHandle->window.skey, pQueryHandle->window.ekey, pQueryHandle->qinfo);
tsdbDebug("%p remain files are not qualified for qrange:%" PRId64 "-%" PRId64 ", ignore, %"PRIu64, pQueryHandle,
pQueryHandle->window.skey, pQueryHandle->window.ekey, pQueryHandle->qId);
pQueryHandle->pFileGroup = NULL;
assert(pQueryHandle->numOfBlocks == 0);
break;
@ -2041,8 +2041,8 @@ static int32_t getFirstFileDataBlock(STsdbQueryHandle* pQueryHandle, bool* exist
break;
}
tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, %p", pQueryHandle, numOfBlocks, numOfTables,
pQueryHandle->pFileGroup->fid, pQueryHandle->qinfo);
tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, %"PRIu64, pQueryHandle, numOfBlocks, numOfTables,
pQueryHandle->pFileGroup->fid, pQueryHandle->qId);
assert(numOfBlocks >= 0);
if (numOfBlocks == 0) {
@ -2133,8 +2133,8 @@ int32_t tsdbGetFileBlocksDistInfo(TsdbQueryHandleT* queryHandle, STableBlockDist
if ((ASCENDING_TRAVERSE(pQueryHandle->order) && win.skey > pQueryHandle->window.ekey) ||
(!ASCENDING_TRAVERSE(pQueryHandle->order) && win.ekey < pQueryHandle->window.ekey)) {
tsdbUnLockFS(REPO_FS(pQueryHandle->pTsdb));
tsdbDebug("%p remain files are not qualified for qrange:%" PRId64 "-%" PRId64 ", ignore, %p", pQueryHandle,
pQueryHandle->window.skey, pQueryHandle->window.ekey, pQueryHandle->qinfo);
tsdbDebug("%p remain files are not qualified for qrange:%" PRId64 "-%" PRId64 ", ignore, %"PRIu64, pQueryHandle,
pQueryHandle->window.skey, pQueryHandle->window.ekey, pQueryHandle->qId);
pQueryHandle->pFileGroup = NULL;
break;
}
@ -2157,8 +2157,8 @@ int32_t tsdbGetFileBlocksDistInfo(TsdbQueryHandleT* queryHandle, STableBlockDist
break;
}
tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, %p", pQueryHandle, numOfBlocks, numOfTables,
pQueryHandle->pFileGroup->fid, pQueryHandle->qinfo);
tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, %"PRIu64, pQueryHandle, numOfBlocks, numOfTables,
pQueryHandle->pFileGroup->fid, pQueryHandle->qId);
if (numOfBlocks == 0) {
continue;
@ -2205,8 +2205,8 @@ static int32_t getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle, bool* exists
if ((!cur->mixBlock) || cur->blockCompleted) {
// all data blocks in current file has been checked already, try next file if exists
} else {
tsdbDebug("%p continue in current data block, index:%d, pos:%d, %p", pQueryHandle, cur->slot, cur->pos,
pQueryHandle->qinfo);
tsdbDebug("%p continue in current data block, index:%d, pos:%d, %"PRIu64, pQueryHandle, cur->slot, cur->pos,
pQueryHandle->qId);
int32_t code = handleDataMergeIfNeeded(pQueryHandle, pBlockInfo->compBlock, pCheckInfo);
*exists = (pQueryHandle->realNumOfRows > 0);
@ -2334,8 +2334,8 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int
}
int64_t elapsedTime = taosGetTimestampUs() - st;
tsdbDebug("%p build data block from cache completed, elapsed time:%"PRId64" us, numOfRows:%d, numOfCols:%d, %p", pQueryHandle,
elapsedTime, numOfRows, numOfCols, pQueryHandle->qinfo);
tsdbDebug("%p build data block from cache completed, elapsed time:%"PRId64" us, numOfRows:%d, numOfCols:%d, %"PRIu64, pQueryHandle,
elapsedTime, numOfRows, numOfCols, pQueryHandle->qId);
return numOfRows;
}
@ -2593,7 +2593,7 @@ static int32_t doGetExternalRow(STsdbQueryHandle* pQueryHandle, int16_t type, SM
memcpy(&cond.colList[i], &pColInfoData->info, sizeof(SColumnInfo));
}
pSecQueryHandle = tsdbQueryTablesImpl(pQueryHandle->pTsdb, &cond, pQueryHandle->qinfo, pMemRef);
pSecQueryHandle = tsdbQueryTablesImpl(pQueryHandle->pTsdb, &cond, pQueryHandle->qId, pMemRef);
tfree(cond.colList);
// current table, only one table
@ -3393,8 +3393,8 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) {
pQueryHandle->next = doFreeColumnInfoData(pQueryHandle->next);
SIOCostSummary* pCost = &pQueryHandle->cost;
tsdbDebug("%p :io-cost summary: statis-info:%"PRId64" us, datablock:%" PRId64" us, check data:%"PRId64" us, %p",
pQueryHandle, pCost->statisInfoLoadTime, pCost->blockLoadTime, pCost->checkForNextTime, pQueryHandle->qinfo);
tsdbDebug("%p :io-cost summary: statis-info:%"PRId64" us, datablock:%" PRId64" us, check data:%"PRId64" us, %"PRIu64,
pQueryHandle, pCost->statisInfoLoadTime, pCost->blockLoadTime, pCost->checkForNextTime, pQueryHandle->qId);
tfree(pQueryHandle);
}

View File

@ -25,6 +25,7 @@ int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg);
int32_t vnodeDrop(int32_t vgId);
int32_t vnodeOpen(int32_t vgId);
int32_t vnodeAlter(void *pVnode, SCreateVnodeMsg *pVnodeCfg);
int32_t vnodeSync(int32_t vgId);
int32_t vnodeClose(int32_t vgId);
void vnodeCleanUp(SVnodeObj *pVnode);
void vnodeDestroy(SVnodeObj *pVnode);
@ -33,4 +34,4 @@ void vnodeDestroy(SVnodeObj *pVnode);
}
#endif
#endif
#endif

View File

@ -91,6 +91,23 @@ int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg) {
return code;
}
int32_t vnodeSync(int32_t vgId) {
SVnodeObj *pVnode = vnodeAcquire(vgId);
if (pVnode == NULL) {
vDebug("vgId:%d, failed to sync, vnode not find", vgId);
return TSDB_CODE_VND_INVALID_VGROUP_ID;
}
if (pVnode->role != TAOS_SYNC_ROLE_MASTER) {
vInfo("vgId:%d, vnode will sync, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode);
syncRecover(pVnode->sync);
}
vnodeRelease(pVnode);
return TSDB_CODE_SUCCESS;
}
int32_t vnodeDrop(int32_t vgId) {
SVnodeObj *pVnode = vnodeAcquire(vgId);
if (pVnode == NULL) {

View File

@ -25,7 +25,7 @@ static int32_t (*vnodeProcessReadMsgFp[TSDB_MSG_TYPE_MAX])(SVnodeObj *pVnode, SV
static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead);
static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead);
static int32_t vnodeNotifyCurrentQhandle(void* handle, void* qhandle, int32_t vgId);
static int32_t vnodeNotifyCurrentQhandle(void* handle, uint64_t qId, void* qhandle, int32_t vgId);
int32_t vnodeInitRead(void) {
vnodeProcessReadMsgFp[TSDB_MSG_TYPE_QUERY] = vnodeProcessQueryMsg;
@ -167,7 +167,7 @@ static int32_t vnodePutItemIntoReadQueue(SVnodeObj *pVnode, void **qhandle, void
* @param ahandle sqlObj address at client side
* @return
*/
static int32_t vnodeDumpQueryResult(SRspRet *pRet, void *pVnode, void **handle, bool *freeHandle, void *ahandle) {
static int32_t vnodeDumpQueryResult(SRspRet *pRet, void *pVnode, uint64_t qId, void **handle, bool *freeHandle, void *ahandle) {
bool continueExec = false;
int32_t code = TSDB_CODE_SUCCESS;
@ -183,7 +183,7 @@ static int32_t vnodeDumpQueryResult(SRspRet *pRet, void *pVnode, void **handle,
}
} else {
*freeHandle = true;
vTrace("QInfo:%p exec completed, free handle:%d", *handle, *freeHandle);
vTrace("QInfo:%"PRIu64"-%p exec completed, free handle:%d", qId, *handle, *freeHandle);
}
} else {
SRetrieveTableRsp *pRsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp));
@ -220,27 +220,6 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
if (pRead->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
vError("error rpc msg in query, %s", tstrerror(pRead->code));
}
// assert(pRead->code != TSDB_CODE_RPC_NETWORK_UNAVAIL);
// if (pRead->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
// SCancelQueryMsg *pMsg = (SCancelQueryMsg *)pRead->pCont;
//// pMsg->free = htons(killQueryMsg->free);
// pMsg->qhandle = htobe64(pMsg->qhandle);
//
// vWarn("QInfo:%p connection %p broken, kill query", (void *)pMsg->qhandle, pRead->rpcHandle);
//// assert(pRead->contLen > 0 && pMsg->free == 1);
//
// void **qhandle = qAcquireQInfo(pVnode->qMgmt, (uint64_t)pMsg->qhandle);
// if (qhandle == NULL || *qhandle == NULL) {
// vWarn("QInfo:%p invalid qhandle, no matched query handle, conn:%p", (void *)pMsg->qhandle, pRead->rpcHandle);
// } else {
// assert(*qhandle == (void *)pMsg->qhandle);
//
// qKillQuery(*qhandle);
// qReleaseQInfo(pVnode->qMgmt, (void **)&qhandle, true);
// }
//
// return TSDB_CODE_TSC_QUERY_CANCELLED;
// }
int32_t code = TSDB_CODE_SUCCESS;
void ** handle = NULL;
@ -274,7 +253,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
}
if (handle != NULL &&
vnodeNotifyCurrentQhandle(pRead->rpcHandle, *handle, pVnode->vgId) != TSDB_CODE_SUCCESS) {
vnodeNotifyCurrentQhandle(pRead->rpcHandle, qId, *handle, pVnode->vgId) != TSDB_CODE_SUCCESS) {
vError("vgId:%d, QInfo:%"PRIu64 "-%p, query discarded since link is broken, %p", pVnode->vgId, qId, *handle,
pRead->rpcHandle);
pRsp->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
@ -297,16 +276,17 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
} else {
assert(pCont != NULL);
void **qhandle = (void **)pRead->qhandle;
uint64_t qId = 0;
vTrace("vgId:%d, QInfo:%p, dnode continues to exec query", pVnode->vgId, *qhandle);
// In the retrieve blocking model, only 50% CPU will be used in query processing
if (tsRetrieveBlockingModel) {
qTableQuery(*qhandle); // do execute query
qTableQuery(*qhandle, &qId); // do execute query
qReleaseQInfo(pVnode->qMgmt, (void **)&qhandle, false);
} else {
bool freehandle = false;
bool buildRes = qTableQuery(*qhandle); // do execute query
bool buildRes = qTableQuery(*qhandle, &qId); // do execute query
// build query rsp, the retrieve request has reached here already
if (buildRes) {
@ -318,7 +298,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
pRead->rpcHandle);
// set the real rsp error code
pRead->code = vnodeDumpQueryResult(&pRead->rspRet, pVnode, qhandle, &freehandle, pRead->rpcHandle);
pRead->code = vnodeDumpQueryResult(&pRead->rspRet, pVnode, qId, qhandle, &freehandle, pRead->rpcHandle);
// NOTE: set return code to be TSDB_CODE_QRY_HAS_RSP to notify dnode to return msg to client
code = TSDB_CODE_QRY_HAS_RSP;
@ -348,32 +328,32 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
SRetrieveTableMsg *pRetrieve = pCont;
pRetrieve->free = htons(pRetrieve->free);
pRetrieve->qhandle = htobe64(pRetrieve->qhandle);
pRetrieve->qId = htobe64(pRetrieve->qId);
vTrace("vgId:%d, QInfo:%" PRIu64 ", retrieve msg is disposed, free:%d, conn:%p", pVnode->vgId, pRetrieve->qhandle,
vTrace("vgId:%d, qId:%" PRIu64 ", retrieve msg is disposed, free:%d, conn:%p", pVnode->vgId, pRetrieve->qId,
pRetrieve->free, pRead->rpcHandle);
memset(pRet, 0, sizeof(SRspRet));
terrno = TSDB_CODE_SUCCESS;
int32_t code = TSDB_CODE_SUCCESS;
void ** handle = qAcquireQInfo(pVnode->qMgmt, pRetrieve->qhandle);
void ** handle = qAcquireQInfo(pVnode->qMgmt, pRetrieve->qId);
if (handle == NULL) {
code = terrno;
terrno = TSDB_CODE_SUCCESS;
} else if (!checkQIdEqual(*handle, pRetrieve->qhandle)) {
} else if (!checkQIdEqual(*handle, pRetrieve->qId)) {
code = TSDB_CODE_QRY_INVALID_QHANDLE;
}
if (code != TSDB_CODE_SUCCESS) {
vError("vgId:%d, invalid handle in retrieving result, code:%s, QInfo:%" PRIu64, pVnode->vgId, tstrerror(code), pRetrieve->qhandle);
vError("vgId:%d, invalid qId in retrieving result, code:%s, QInfo:%" PRIu64, pVnode->vgId, tstrerror(code), pRetrieve->qId);
vnodeBuildNoResultQueryRsp(pRet);
return code;
}
// kill current query and free corresponding resources.
if (pRetrieve->free == 1) {
vWarn("vgId:%d, QInfo:%"PRIu64 "-%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, pRetrieve->qhandle, *handle);
vWarn("vgId:%d, QInfo:%"PRIu64 "-%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, pRetrieve->qId, *handle);
qKillQuery(*handle);
qReleaseQInfo(pVnode->qMgmt, (void **)&handle, true);
@ -383,7 +363,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
}
// register the qhandle to connect to quit query immediate if connection is broken
if (vnodeNotifyCurrentQhandle(pRead->rpcHandle, *handle, pVnode->vgId) != TSDB_CODE_SUCCESS) {
if (vnodeNotifyCurrentQhandle(pRead->rpcHandle, pRetrieve->qId, *handle, pVnode->vgId) != TSDB_CODE_SUCCESS) {
vError("vgId:%d, QInfo:%"PRIu64 "-%p, retrieve discarded since link is broken, %p", pVnode->vgId, pRetrieve->qhandle, *handle, pRead->rpcHandle);
code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
qKillQuery(*handle);
@ -413,7 +393,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
}
// ahandle is the sqlObj pointer
code = vnodeDumpQueryResult(pRet, pVnode, handle, &freeHandle, pRead->rpcHandle);
code = vnodeDumpQueryResult(pRet, pVnode, pRetrieve->qId, handle, &freeHandle, pRead->rpcHandle);
}
// If qhandle is not added into vread queue, the query should be completed already or paused with error.
@ -427,13 +407,13 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
// notify connection(handle) that current qhandle is created, if current connection from
// client is broken, the query needs to be killed immediately.
int32_t vnodeNotifyCurrentQhandle(void *handle, void *qhandle, int32_t vgId) {
int32_t vnodeNotifyCurrentQhandle(void *handle, uint64_t qId, void *qhandle, int32_t vgId) {
SRetrieveTableMsg *pMsg = rpcMallocCont(sizeof(SRetrieveTableMsg));
pMsg->qhandle = htobe64((uint64_t)qhandle);
pMsg->header.vgId = htonl(vgId);
pMsg->header.contLen = htonl(sizeof(SRetrieveTableMsg));
vTrace("QInfo:%p register qhandle to connect:%p", qhandle, handle);
vTrace("QInfo:%"PRIu64"-%p register qhandle to connect:%p", qId, qhandle, handle);
return rpcReportProgress(handle, (char *)pMsg, sizeof(SRetrieveTableMsg));
}

43
tests/Jenkinsfile vendored
View File

@ -1,9 +1,8 @@
def pre_test(){
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
sudo rmtaos
'''
}
sh '''
sudo rmtaos||echo 'no taosd installed'
'''
sh '''
cd ${WKC}
git reset --hard
@ -56,14 +55,8 @@ pipeline {
cd ${WKC}/tests
./test-all.sh b1
date'''
sh '''
cd ${WKC}/tests
./test-all.sh full jdbc
date'''
sh '''
cd ${WKC}/tests
./test-all.sh full unit
date'''
}
}
@ -83,9 +76,20 @@ pipeline {
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_crash_gen_val_log.sh
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_taosd_val_log.sh
'''
}
sh'''
systemctl start taosd
sleep 10
@ -136,6 +140,10 @@ pipeline {
./test-all.sh b2
date
'''
sh '''
cd ${WKC}/tests
./test-all.sh full unit
date'''
}
}
@ -154,6 +162,10 @@ pipeline {
'''
}
sh '''
cd ${WKC}/tests
./test-all.sh full jdbc
date'''
sh '''
cd ${WKC}/tests/pytest
./valgrind-test.sh 2>&1 > mem-error-out.log
./handle_val_log.sh
@ -162,6 +174,11 @@ pipeline {
cd ${WKC}/tests
./test-all.sh b3
date'''
sh '''
date
cd ${WKC}/tests
./test-all.sh full example
date'''
}
}

View File

@ -38,7 +38,7 @@
"insert_rows": 100,
"multi_thread_write_one_tbl": "no",
"number_of_tbl_in_one_sql": 0,
"rows_per_tbl": 3,
"interlace_rows": 3,
"max_sql_len": 1024,
"disorder_ratio": 0,
"disorder_range": 1000,

View File

@ -163,6 +163,16 @@ int main(int argc, char *argv[])
getchar();
while(1) {
if (tablesProcessed < numOfTables) {
printf("wait for process finished\n");
sleep(1);
continue;
}
break;
}
taos_close(taos);
free(tableList);

View File

@ -68,20 +68,14 @@ function prepareBuild {
rm -rf $CURR_DIR/../../../../release/*
fi
if [ ! -e $DOCKER_DIR/TDengine-server-$VERSION-Linux-x64.tar.gz ] || [ ! -e $DOCKER_DIR/TDengine-arbitrator-$VERSION-Linux-x64.tar.gz ]; then
cd $CURR_DIR/../../../../packaging
echo $CURR_DIR
echo $IN_TDINTERNAL
echo "generating TDeninger packages"
if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then
pwd
./release.sh -v cluster -n $VERSION >> /dev/null 2>&1
else
pwd
./release.sh -v edge -n $VERSION >> /dev/null 2>&1
fi
cd $CURR_DIR/../../../../packaging
if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then
if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then
if [ ! -e $DOCKER_DIR/TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz ] || [ ! -e $DOCKER_DIR/TDengine-enterprise-arbitrator-$VERSION-Linux-x64.tar.gz ]; then
echo "generating TDeninge enterprise packages"
./release.sh -v cluster -n $VERSION >> /dev/null 2>&1
if [ ! -e $CURR_DIR/../../../../release/TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz ]; then
echo "no TDengine install package found"
exit 1
@ -91,7 +85,17 @@ function prepareBuild {
echo "no arbitrator install package found"
exit 1
fi
else
cd $CURR_DIR/../../../../release
mv TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz $DOCKER_DIR
mv TDengine-enterprise-arbitrator-$VERSION-Linux-x64.tar.gz $DOCKER_DIR
fi
else
if [ ! -e $DOCKER_DIR/TDengine-server-$VERSION-Linux-x64.tar.gz ] || [ ! -e $DOCKER_DIR/TDengine-arbitrator-$VERSION-Linux-x64.tar.gz ]; then
echo "generating TDeninge community packages"
./release.sh -v edge -n $VERSION >> /dev/null 2>&1
if [ ! -e $CURR_DIR/../../../../release/TDengine-server-$VERSION-Linux-x64.tar.gz ]; then
echo "no TDengine install package found"
exit 1
@ -101,16 +105,11 @@ function prepareBuild {
echo "no arbitrator install package found"
exit 1
fi
fi
cd $CURR_DIR/../../../../release
if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then
mv TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz $DOCKER_DIR
mv TDengine-enterprise-arbitrator-$VERSION-Linux-x64.tar.gz $DOCKER_DIR
else
cd $CURR_DIR/../../../../release
mv TDengine-server-$VERSION-Linux-x64.tar.gz $DOCKER_DIR
mv TDengine-arbitrator-$VERSION-Linux-x64.tar.gz $DOCKER_DIR
fi
fi
fi
rm -rf $DOCKER_DIR/*.yml

View File

@ -39,7 +39,7 @@
"insert_rows": 100000,
"multi_thread_write_one_tbl": "no",
"number_of_tbl_in_one_sql": 1,
"rows_per_tbl": 100,
"interlace_rows": 100,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,

View File

@ -196,6 +196,9 @@ python3 ./test.py -f query/bug2119.py
python3 ./test.py -f query/isNullTest.py
python3 ./test.py -f query/queryWithTaosdKilled.py
python3 ./test.py -f query/floatCompare.py
python3 ./test.py -f query/bug3375.py
#stream
python3 ./test.py -f stream/metric_1.py
@ -230,16 +233,6 @@ python3 client/twoClients.py
python3 test.py -f query/queryInterval.py
python3 test.py -f query/queryFillTest.py
# tools
python3 test.py -f tools/taosdemoTest.py
python3 test.py -f tools/taosdemoTestWithoutMetric.py
python3 test.py -f tools/taosdemoTestWithJson.py
python3 test.py -f tools/taosdemoTestLimitOffset.py
python3 test.py -f tools/taosdumpTest.py
python3 test.py -f tools/taosdemoTest2.py
python3 test.py -f tools/taosdemoTestSampleData.py
python3 test.py -f tools/taosdemoTestInterlace.py
# subscribe
python3 test.py -f subscribe/singlemeter.py
#python3 test.py -f subscribe/stability.py
@ -249,6 +242,18 @@ python3 test.py -f subscribe/supertable.py
#======================p3-end===============
#======================p4-start===============
# tools
python3 test.py -f tools/taosdumpTest.py
python3 test.py -f tools/taosdemoTest.py
python3 test.py -f tools/taosdemoTestWithoutMetric.py
python3 test.py -f tools/taosdemoTestWithJson.py
python3 test.py -f tools/taosdemoTestLimitOffset.py
python3 test.py -f tools/taosdemoTest2.py
python3 test.py -f tools/taosdemoTestSampleData.py
python3 test.py -f tools/taosdemoTestInterlace.py
python3 test.py -f tools/taosdemoTestQuery.py
python3 ./test.py -f update/merge_commit_data-0.py
# wal
python3 ./test.py -f wal/addOldWalTest.py

View File

@ -0,0 +1,68 @@
#!/bin/bash
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
IN_TDINTERNAL="community"
TDIR=`pwd`
if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]]; then
cd ../..
else
cd ../../..
fi
TOP_DIR=`pwd`
TAOSD_DIR=`find . -name "taosd"|grep -v community|grep debug|head -n1`
VALGRIND_OUT=taosd_valgrind.out
VALGRIND_ERR=taosd_valgrind.err
rm -rf /var/lib/taos/*
# nohup valgrind --tool=memcheck --leak-check=yes $TAOSD_DIR > $TDIR/$VALGRIND_OUT 2> $TDIR/$VALGRIND_ERR &
nohup valgrind --leak-check=yes $TAOSD_DIR > $TDIR/$VALGRIND_OUT 2> $TDIR/$VALGRIND_ERR &
sleep 20
cd -
./crash_gen.sh -p -t 10 -s 200
ps -ef |grep valgrind|grep -v grep|awk '{print $2}'|xargs kill -term
while true
do
monitoring=` ps -ef|grep valgrind |grep -v grep| wc -l`
if [ $monitoring -eq 0 ]
then
echo "Manipulator is not running "
break
else
sleep 1
fi
done
grep 'start to execute\|ERROR SUMMARY' $VALGRIND_ERR | grep -v 'grep' | uniq | tee taosd_mem_err.log
for memError in `grep 'ERROR SUMMARY' taosd_mem_err.log | awk '{print $4}'`
do
memError=(${memError//,/})
if [ -n "$memError" ]; then
if [ "$memError" -gt 12 ]; then
echo -e "${RED} ## Memory errors number valgrind reports is $memError.\
More than our threshold! ## ${NC}"
fi
fi
done
grep 'start to execute\|definitely lost:' $VALGRIND_ERR|grep -v 'grep'|uniq|tee taosd-definitely-lost-out.log
for defiMemError in `grep 'definitely lost:' taosd-definitely-lost-out.log | awk '{print $7}'`
do
defiMemError=(${defiMemError//,/})
if [ -n "$defiMemError" ]; then
if [ "$defiMemError" -gt 0 -a "$defiMemError" -lt 1013 ]; then
cat $VALGRIND_ERR
echo -e "${RED} ## Memory errors number valgrind reports \
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
exit 8
elif [ "$defiMemError" -gt 1013 ];then #add for azure
cat $VALGRIND_ERR
echo -e "${RED} ## Memory errors number valgrind reports \
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
exit 8
fi
fi
done

View File

@ -0,0 +1,61 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db keep 36500")
tdSql.execute("use db")
tdLog.printNoPrefix("==========step1:create table && insert data")
tdSql.execute(
"create table stb1 (ts timestamp, c11 int) TAGS(t11 int, t12 int )"
)
tdSql.execute(
"create table stb2 (ts timestamp, c21 int) TAGS(t21 int, t22 int )"
)
tdSql.execute("create table t10 using stb1 tags(1, 10)")
tdSql.execute("create table t20 using stb2 tags(1, 12)")
tdSql.execute("insert into t10 values (1600000000000, 1)")
tdSql.execute("insert into t10 values (1610000000000, 2)")
tdSql.execute("insert into t20 values (1600000000000, 3)")
tdSql.execute("insert into t20 values (1610000000000, 4)")
tdLog.printNoPrefix("==========step2:query crash test")
tdSql.query("select stb1.c11, stb1.t11, stb1.t12 from stb2,stb1 where stb2.t21 = stb1.t11 and stb1.ts = stb2.ts")
tdSql.checkRows(2)
tdSql.query("select stb2.c21, stb2.t21, stb2.t21 from stb1, stb2 where stb2.t21 = stb1.t11 and stb1.ts = stb2.ts")
tdSql.checkRows(2)
tdSql.query("select top(stb2.c21,2) from stb1, stb2 where stb2.t21 = stb1.t11 and stb1.ts = stb2.ts")
tdSql.checkRows(2)
tdSql.query("select last(stb2.c21) from stb1, stb2 where stb2.t21 = stb1.t11 and stb1.ts = stb2.ts")
tdSql.checkRows(1)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,230 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import taos
import sys
import os
import json
import subprocess
import datetime
from util.log import *
from util.sql import *
from util.cases import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
def getCfgDir(self, path):
binPath = os.path.dirname(os.path.realpath(__file__))
binPath = binPath + "/../../../debug/"
tdLog.debug("binPath %s" % (binPath))
binPath = os.path.realpath(binPath)
tdLog.debug("binPath real path %s" % (binPath))
if path == "":
self.path = os.path.abspath(binPath + "../../")
else:
self.path = os.path.realpath(path)
self.cfgDir = "%s/sim/psim/cfg" % (self.path)
return self.cfgDir
def creatcfg(self):
dbinfo = {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 16,
"blocks": 8,
"precision": "ms",
"keep": 36500,
"minRows": 100,
"maxRows": 4096,
"comp": 2,
"walLevel": 1,
"cachelast": 0,
"quorum": 1,
"fsync": 3000,
"update": 0
}
# 设置创建的超级表格式
stable1 = {
"name": "stb2",
"child_table_exists": "no",
"childtable_count": 10,
"childtable_prefix": "t",
"auto_create_table": "no",
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 5000,
"multi_thread_write_one_tbl": "no",
"number_of_tbl_in_one_sql": 0,
"rows_per_tbl": 1000,
"max_sql_len": 65480,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 19000000,
"start_timestamp": "1969-01-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [
{"type": "INT"},
{"type": "DOUBLE", "count": 10},
{"type": "BINARY", "len": 16, "count": 3},
{"type": "BINARY", "len": 32, "count": 6}
],
"tags": [
{"type": "TINYINT", "count": 2},
{"type": "BINARY", "len": 16, "count": 5}
]
}
# 需要创建多个超级表时只需创建不同的超级表格式并添加至super_tables
super_tables = [stable1]
database = {
"dbinfo": dbinfo,
"super_tables": super_tables
}
cfgdir = self.getCfgDir("")
create_table = {
"filetype": "insert",
"cfgdir": cfgdir,
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "/tmp/insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"num_of_records_per_req": 100,
"databases": [database]
}
return create_table
def inserttable(self):
create_table = self.creatcfg()
date = datetime.datetime.now().strftime("%Y%m%d%H%M")
file_create_table = f"/tmp/insert_{date}.json"
with open(file_create_table, 'w') as f:
json.dump(create_table, f)
create_table_cmd = f"taosdemo -f {file_create_table}"
_ = subprocess.check_output(create_table_cmd, shell=True).decode("utf-8")
def run(self):
s = 'reset query cache'
tdSql.execute(s)
s = 'create database if not exists db'
tdSql.execute(s)
s = 'use db'
tdSql.execute(s)
tdLog.info("==========step1:create table stable and child table,then insert data automatically")
self.inserttable()
# tdSql.execute(
# '''create table if not exists supt
# (ts timestamp, c1 int, c2 float, c3 bigint, c4 double, c5 smallint, c6 tinyint)
# tags(location binary(64), type int, isused bool , family nchar(64))'''
# )
# tdSql.execute("create table t1 using supt tags('beijing', 1, 1, '自行车')")
# tdSql.execute("create table t2 using supt tags('shanghai', 2, 0, '拖拉机')")
# tdSql.execute(
# f"insert into t1 values (-31564800000, 6, 5, 4, 3, 2, 1)"
# )
tdLog.info("==========step2:query join")
# stable query
tdSql.query(
"select * from stb2 where stb2.ts < '1970-01-01 00:00:00.000' "
)
tdSql.checkRows(16600)
tdSql.query(
"select * from stb2 where stb2.ts >= '1970-01-01 00:00:00.000' "
)
tdSql.checkRows(33400)
tdSql.query(
"select * from stb2 where stb2.ts > '1969-12-01 00:00:00.000' and stb2.ts <'1970-01-31 00:00:00.000' "
)
tdSql.checkRows(2780)
# child-table query
tdSql.query(
"select * from t0 where t0.ts < '1970-01-01 00:00:00.000' "
)
tdSql.checkRows(1660)
tdSql.query(
"select * from t1 where t1.ts >= '1970-01-01 00:00:00.000' "
)
tdSql.checkRows(3340)
tdSql.query(
"select * from t9 where t9.ts > '1969-12-01 00:00:00.000' and t9.ts <'1970-01-31 00:00:00.000' "
)
tdSql.checkRows(278)
tdSql.query(
"select * from t0,t1 where t0.ts=t1.ts and t1.ts >= '1970-01-01 00:00:00.000' "
)
tdSql.checkRows(3340)
tdSql.query(
"select diff(col1) from t0 where t0.ts >= '1970-01-01 00:00:00.000' "
)
tdSql.checkRows(3339)
tdSql.query(
"select t0,col1 from stb2 where stb2.ts < '1970-01-01 00:00:00.000' order by ts"
)
tdSql.checkRows(16600)
# query with timestamp in 'where ...'
tdSql.query(
"select * from stb2 where stb2.ts > -28800000 "
)
tdSql.checkRows(33400)
tdSql.query(
"select * from stb2 where stb2.ts > -28800000 and stb2.ts < '1970-01-01 08:00:00.000' "
)
tdSql.checkRows(20)
tdSql.query(
"select * from stb2 where stb2.ts < -28800000 and stb2.ts > '1969-12-31 16:00:00.000' "
)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -10,7 +10,7 @@
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 5000,
"rows_per_tbl": 50,
"interlace_rows": 50,
"num_of_records_per_req": 100,
"max_sql_len": 1024000,
"databases": [{
@ -42,7 +42,7 @@
"insert_mode": "taosc",
"insert_rows": 250,
"multi_thread_write_one_tbl": "no",
"rows_per_tbl": 80,
"interlace_rows": 80,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,

View File

@ -0,0 +1,22 @@
{
"filetype": "query",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"confirm_parameter_prompt": "no",
"databases": "test",
"query_times": 1,
"super_table_query": {
"stblname": "meters",
"query_interval": 10,
"threads": 8,
"sqls": [
{
"sql": "select last_row(ts) from xxxx",
"result": ""
}
]
}
}

View File

@ -51,7 +51,7 @@ class TDTestCase:
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath + "/build/bin/"
os.system("%staosdemo -y -M -t %d -n %d -x" %
os.system("%staosdemo -y -t %d -n %d" %
(binPath, self.numberOfTables, self.numberOfRecords))
tdSql.execute("use test")

View File

@ -31,12 +31,20 @@ class TDTestCase:
def insertDataAndAlterTable(self, threadID):
if(threadID == 0):
os.system("taosdemo -M -y -t %d -n %d -x" %
os.system("taosdemo -y -t %d -n %d" %
(self.numberOfTables, self.numberOfRecords))
if(threadID == 1):
time.sleep(2)
print("use test")
tdSql.execute("use test")
while True:
try:
tdSql.execute("use test")
break
except Exception as e:
tdLog.info("use database test failed")
time.sleep(1)
continue
# check if all the tables have heen created
while True:
tdSql.query("show tables")

View File

@ -1,4 +1,4 @@
###################################################################
##################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
@ -25,9 +25,6 @@ class TDTestCase:
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.numberOfTables = 10000
self.numberOfRecords = 100
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))

View File

@ -0,0 +1,78 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
import time
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
import subprocess
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.numberOfTables = 1000
self.numberOfRecords = 100
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def run(self):
tdSql.prepare()
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath + "/build/bin/"
os.system("%staosdemo -y -t %d -n %d" %
(binPath, self.numberOfTables, self.numberOfRecords))
print("Sleep 2 seconds..")
time.sleep(2)
os.system('%staosdemo -f tools/query.json ' % binPath)
# taosdemoCmd = '%staosdemo -f tools/query.json ' % binPath
# threads = subprocess.check_output(
# taosdemoCmd, shell=True).decode("utf-8")
# print("threads: %d" % int(threads))
# if (int(threads) != 8):
# caller = inspect.getframeinfo(inspect.stack()[0][0])
# tdLog.exit(
# "%s(%d) failed: expected threads 8, actual %d" %
# (caller.filename, caller.lineno, int(threads)))
#
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -50,7 +50,7 @@ class TDTestCase:
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath + "/build/bin/"
os.system("%staosdemo -y -t %d -n %d -x" %
os.system("%staosdemo -N -y -t %d -n %d" %
(binPath, self.numberOfTables, self.numberOfRecords))
tdSql.query("show databases")

File diff suppressed because it is too large Load Diff

View File

@ -29,7 +29,25 @@ function dohavecore(){
proc=`echo $corefile|cut -d "_" -f3`
if [ -n "$corefile" ];then
echo 'taosd or taos has generated core'
tar -zcPf $corepath'taos_'`date "+%Y_%m_%d_%H_%M_%S"`.tar.gz /usr/local/taos/
if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]] && [[ $1 == 1 ]]; then
cd ../../../
tar -zcPf $corepath'taos_'`date "+%Y_%m_%d_%H_%M_%S"`.tar.gz debug/build/bin/taosd debug/build/bin/tsim debug/build/lib/libtaos*so*
if [[ $2 == 1 ]];then
cp -r sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S"`
rm -rf sim/case.log
else
cd community
cp -r sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" `
rm -rf sim/case.log
fi
else
cd ../../
if [[ $1 == 1 ]];then
tar -zcPf $corepath'taos_'`date "+%Y_%m_%d_%H_%M_%S"`.tar.gz debug/build/bin/taosd debug/build/bin/tsim debug/build/lib/libtaos*so*
cp -r sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" `
rm -rf sim/case.log
fi
fi
if [[ $1 == 1 ]];then
echo '\n'|gdb /usr/local/taos/bin/$proc $core_file -ex "bt 10" -ex quit
exit 8
@ -100,11 +118,14 @@ function runSimCaseOneByOnefq {
cp -r ../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" `
rm -rf ../../sim/case.log
fi
exit 8
dohavecore $2 1
if [[ $2 == 1 ]];then
exit 8
fi
fi
end_time=`date +%s`
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a out.log
dohavecore $2
dohavecore $2 1
fi
done
rm -rf ../../../sim/case.log
@ -169,16 +190,19 @@ function runPyCaseOneByOnefq() {
out_log=`tail -1 pytest-out.log `
if [[ $out_log =~ 'failed' ]];then
cp -r ../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" `
echo '=====================log====================='
echo '=====================log===================== '
cat ../../sim/case.log
rm -rf ../../sim/case.log
exit 8
dohavecore $2 2
if [[ $2 == 1 ]];then
exit 8
fi
fi
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a pytest-out.log
else
$line > /dev/null 2>&1
fi
dohavecore $2
dohavecore $2 2
fi
done
rm -rf ../../sim/case.log
@ -188,9 +212,10 @@ totalFailed=0
totalPyFailed=0
totalJDBCFailed=0
totalUnitFailed=0
totalExampleFailed=0
corepath=`grep -oP '.*(?=core_)' /proc/sys/kernel/core_pattern||grep -oP '.*(?=core-)' /proc/sys/kernel/core_pattern`
if [ "$2" != "jdbc" ] && [ "$2" != "python" ] && [ "$2" != "unit" ]; then
if [ "$2" != "jdbc" ] && [ "$2" != "python" ] && [ "$2" != "unit" ] && [ "$2" != "example" ]; then
echo "### run TSIM test case ###"
cd $tests_dir/script
@ -205,15 +230,15 @@ if [ "$2" != "jdbc" ] && [ "$2" != "python" ] && [ "$2" != "unit" ]; then
echo "### run TSIM b1 test ###"
runSimCaseOneByOnefq b1 0
runSimCaseOneByOnefq b4 0
runSimCaseOneByOnefq b5 0
runSimCaseOneByOnefq b6 0
runSimCaseOneByOnefq b7 0
elif [ "$1" == "b2" ]; then
echo "### run TSIM b2 test ###"
runSimCaseOneByOnefq b2 0
runSimCaseOneByOnefq b5 0
elif [ "$1" == "b3" ]; then
echo "### run TSIM b3 test ###"
runSimCaseOneByOnefq b3 0
runSimCaseOneByOnefq b6 0
elif [ "$1" == "b1fq" ]; then
echo "### run TSIM b1 test ###"
runSimCaseOneByOnefq b1 1
@ -259,7 +284,7 @@ if [ "$2" != "jdbc" ] && [ "$2" != "python" ] && [ "$2" != "unit" ]; then
fi
fi
if [ "$2" != "sim" ] && [ "$2" != "jdbc" ] && [ "$2" != "unit" ]; then
if [ "$2" != "sim" ] && [ "$2" != "jdbc" ] && [ "$2" != "unit" ] && [ "$2" != "example" ]; then
echo "### run Python test case ###"
cd $tests_dir
@ -328,7 +353,7 @@ if [ "$2" != "sim" ] && [ "$2" != "jdbc" ] && [ "$2" != "unit" ]; then
fi
if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$2" != "unit" ] && [ "$1" == "full" ]; then
if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$2" != "unit" ] && [ "$2" != "example" ] && [ "$1" == "full" ]; then
echo "### run JDBC test cases ###"
cd $tests_dir
@ -372,7 +397,7 @@ if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$2" != "unit" ] && [ "$1" ==
dohavecore 1
fi
if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$2" != "jdbc" ] && [ "$1" == "full" ]; then
if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$2" != "jdbc" ] && [ "$2" != "example" ] && [ "$1" == "full" ]; then
echo "### run Unit tests ###"
stopTaosd
@ -408,5 +433,80 @@ if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$2" != "jdbc" ] && [ "$1" ==
dohavecore 1
fi
if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$2" != "jdbc" ] && [ "$2" != "unit" ] && [ "$1" == "full" ]; then
echo "### run Example tests ###"
exit $(($totalFailed + $totalPyFailed + $totalJDBCFailed + $totalUnitFailed))
stopTaosd
cd $tests_dir
if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]]; then
cd ../../
else
cd ../
fi
pwd
cd debug/build/bin
rm -rf /var/lib/taos/*
nohup ./taosd -c /etc/taos/ > /dev/null 2>&1 &
echo "sleeping for 30 seconds"
#sleep 30
cd $tests_dir
echo "current dir: "
pwd
cd examples/c
echo "building applications"
make > /dev/null
totalExamplePass=0
echo "Running tests"
./apitest > /dev/null 2>&1
if [ $? != "0" ]; then
echo "prepare failed"
totalExampleFailed=`expr $totalExampleFailed + 1`
else
echo "prepare pass"
totalExamplePass=`expr $totalExamplePass + 1`
fi
./prepare 127.0.0.1 > /dev/null 2>&1
if [ $? != "0" ]; then
echo "prepare failed"
totalExampleFailed=`expr $totalExampleFailed + 1`
else
echo "prepare pass"
totalExamplePass=`expr $totalExamplePass + 1`
fi
./subscribe -test > /dev/null 2>&1
if [ $? != "0" ]; then
echo "prepare failed"
totalExampleFailed=`expr $totalExampleFailed + 1`
else
echo "prepare pass"
totalExamplePass=`expr $totalExamplePass + 1`
fi
yes |./asyncdemo 127.0.0.1 test 1000 10 > /dev/null 2>&1
if [ $? != "0" ]; then
echo "prepare failed"
totalExampleFailed=`expr $totalExampleFailed + 1`
else
echo "prepare pass"
totalExamplePass=`expr $totalExamplePass + 1`
fi
if [ "$totalExamplePass" -gt "0" ]; then
echo -e "\n${GREEN} ### Total $totalExamplePass examples succeed! ### ${NC}"
fi
if [ "$totalExampleFailed" -ne "0" ]; then
echo -e "\n${RED} ### Total $totalExampleFailed examples failed! ### ${NC}"
fi
dohavecore 1
fi
exit $(($totalFailed + $totalPyFailed + $totalJDBCFailed + $totalUnitFailed + $totalExampleFailed))