Merge remote-tracking branch 'origin/3.0' into enh/msgRefactor0

This commit is contained in:
dapan1121 2022-11-17 10:42:29 +08:00
commit 3a642f06d0
84 changed files with 4834 additions and 6480 deletions

View File

@ -2,7 +2,7 @@
# taosws-rs
ExternalProject_Add(taosws-rs
GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git
GIT_TAG 38c4599
GIT_TAG 9843872
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

View File

@ -1,4 +1,4 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="-0.5 -1 32 32">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="-0.5 -1 32 32" width="50" height="50">
<g fill="#5865f2">
<path
d="M26.0015 6.9529C24.0021 6.03845 21.8787 5.37198 19.6623 5C19.3833 5.48048 19.0733 6.13144 18.8563 6.64292C16.4989 6.30193 14.1585 6.30193 11.8336 6.64292C11.6166 6.13144 11.2911 5.48048 11.0276 5C8.79575 5.37198 6.67235 6.03845 4.6869 6.9529C0.672601 12.8736 -0.41235 18.6548 0.130124 24.3585C2.79599 26.2959 5.36889 27.4739 7.89682 28.2489C8.51679 27.4119 9.07477 26.5129 9.55525 25.5675C8.64079 25.2265 7.77283 24.808 6.93587 24.312C7.15286 24.1571 7.36986 23.9866 7.57135 23.8161C12.6241 26.1255 18.0969 26.1255 23.0876 23.8161C23.3046 23.9866 23.5061 24.1571 23.7231 24.312C22.8861 24.808 22.0182 25.2265 21.1037 25.5675C21.5842 26.5129 22.1422 27.4119 22.7621 28.2489C25.2885 27.4739 27.8769 26.2959 30.5288 24.3585C31.1952 17.7559 29.4733 12.0212 26.0015 6.9529ZM10.2527 20.8402C8.73376 20.8402 7.49382 19.4608 7.49382 17.7714C7.49382 16.082 8.70276 14.7025 10.2527 14.7025C11.7871 14.7025 13.0425 16.082 13.0115 17.7714C13.0115 19.4608 11.7871 20.8402 10.2527 20.8402ZM20.4373 20.8402C18.9183 20.8402 17.6768 19.4608 17.6768 17.7714C17.6768 16.082 18.8873 14.7025 20.4373 14.7025C21.9717 14.7025 23.2271 16.082 23.1961 17.7714C23.1961 19.4608 21.9872 20.8402 20.4373 20.8402Z"

Before

Width:  |  Height:  |  Size: 1.3 KiB

After

Width:  |  Height:  |  Size: 1.3 KiB

View File

@ -1,6 +1,6 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="-1 -2 18 18">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="-1 -2 18 18" width="50" height="50">
<path
fill="currentColor"
fill="#000"
d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0016 8c0-4.42-3.58-8-8-8z"
></path>
</svg>

Before

Width:  |  Height:  |  Size: 690 B

After

Width:  |  Height:  |  Size: 705 B

View File

@ -3,11 +3,11 @@ title: Get Started
description: This article describes how to install TDengine and test its performance.
---
import github from './github.svg'
import discord from './discord.svg'
import twitter from './twitter.svg'
import youtube from './youtube.svg'
import linkedin from './linkedin.svg'
import GitHubSVG from './github.svg'
import DiscordSVG from './discord.svg'
import TwitterSVG from './twitter.svg'
import YouTubeSVG from './youtube.svg'
import LinkedInSVG from './linkedin.svg'
You can install and run TDengine on Linux/Windows/macOS machines as well as Docker containers. You can also deploy TDengine as a managed service with TDengine Cloud.
@ -23,11 +23,11 @@ import {useCurrentSidebarCategory} from '@docusaurus/theme-common';
### Join TDengine Community
<table width="100%">
<tr align="center">
<td width="20%" style="border:0"><a href="https://github.com/taosdata/TDengine" target="_blank"><img src={github} alt="Star GitHub" width="50" /><p>Star GitHub</p></a></td>
<td width="20%" style="border:0"><a href="https://discord.com/invite/VZdSuUg4pS" target="_blank"><img src={discord} alt="Join Discord" width="50" /><p>Join Discord</p></a></td>
<td width="20%" style="border:0"><a href="https://twitter.com/TDengineDB" target="_blank"><img src={twitter} alt="Follow Twitter" width="50" /><p>Follow Twitter</p></a></td>
<td width="20%" style="border:0"><a href="https://www.youtube.com/@tdengine" target="_blank"><img src={youtube} alt="Subscribe YouTube" width="50" /><p>Subscribe YouTube</p></a></td>
<td width="20%" style="border:0"><a href="https://www.linkedin.com/company/tdengine" target="_blank"><img src={linkedin} alt="Follow LinkedIn" width="50" /><p>Follow LinkedIn</p></a></td>
<tr align="center" style={{border:0}}>
<td width="20%" style={{border:0}}><a href="https://github.com/taosdata/TDengine" target="_blank"><GitHubSVG /><p>Star GitHub</p></a></td>
<td width="20%" style={{border:0}}><a href="https://discord.com/invite/VZdSuUg4pS" target="_blank"><DiscordSVG /><p>Join Discord</p></a></td>
<td width="20%" style={{border:0}}><a href="https://twitter.com/TDengineDB" target="_blank"><TwitterSVG /><p>Follow Twitter</p></a></td>
<td width="20%" style={{border:0}}><a href="https://www.youtube.com/@tdengine" target="_blank"><YouTubeSVG /><p>Subscribe YouTube</p></a></td>
<td width="20%" style={{border:0}}><a href="https://www.linkedin.com/company/tdengine" target="_blank"><LinkedInSVG /><p>Follow LinkedIn</p></a></td>
</tr>
</table>

View File

@ -1,4 +1,4 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 -2 24 24">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 -2 24 24" width="50" height="50">
<path
fill="rgb(10, 102, 194)"
d="M20.5 2h-17A1.5 1.5 0 002 3.5v17A1.5 1.5 0 003.5 22h17a1.5 1.5 0 001.5-1.5v-17A1.5 1.5 0 0020.5 2zM8 19H5v-9h3zM6.5 8.25A1.75 1.75 0 118.3 6.5a1.78 1.78 0 01-1.8 1.75zM19 19h-3v-4.74c0-1.42-.6-1.93-1.38-1.93A1.74 1.74 0 0013 14.19a.66.66 0 000 .14V19h-3v-9h2.9v1.3a3.11 3.11 0 012.7-1.4c1.55 0 3.36.86 3.36 3.66z"

Before

Width:  |  Height:  |  Size: 438 B

After

Width:  |  Height:  |  Size: 461 B

View File

@ -1,4 +1,4 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 -2 24 24">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 -2 24 24" width="50" height="50">
<g fill="rgb(29, 155, 240)">
<path
d="M23.643 4.937c-.835.37-1.732.62-2.675.733.962-.576 1.7-1.49 2.048-2.578-.9.534-1.897.922-2.958 1.13-.85-.904-2.06-1.47-3.4-1.47-2.572 0-4.658 2.086-4.658 4.66 0 .364.042.718.12 1.06-3.873-.195-7.304-2.05-9.602-4.868-.4.69-.63 1.49-.63 2.342 0 1.616.823 3.043 2.072 3.878-.764-.025-1.482-.234-2.11-.583v.06c0 2.257 1.605 4.14 3.737 4.568-.392.106-.803.162-1.227.162-.3 0-.593-.028-.877-.082.593 1.85 2.313 3.198 4.352 3.234-1.595 1.25-3.604 1.995-5.786 1.995-.376 0-.747-.022-1.112-.065 2.062 1.323 4.51 2.093 7.14 2.093 8.57 0 13.255-7.098 13.255-13.254 0-.2-.005-.402-.014-.602.91-.658 1.7-1.477 2.323-2.41z"

Before

Width:  |  Height:  |  Size: 749 B

After

Width:  |  Height:  |  Size: 772 B

View File

@ -1,4 +1,4 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="-2 -8 32 32">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="-2 -8 32 32" width="50" height="50">
<g>
<g>
<path

Before

Width:  |  Height:  |  Size: 778 B

After

Width:  |  Height:  |  Size: 801 B

View File

@ -142,7 +142,7 @@ The preceding SQL statement can be used in migration scenarios. This command can
### View Database Configuration
```sql
SHOW DATABASES \G;
SELECT * FROM INFORMATION_SCHEMA.INS_DATABASES WHERE NAME='DBNAME' \G;
```
The preceding SQL statement shows the value of each parameter for the specified database. One value is displayed per line.

View File

@ -8,6 +8,9 @@ will automatically add the required columns to ensure that the data written by t
The schemaless writing method creates super tables and their corresponding subtables. These are completely indistinguishable from the super tables and subtables created directly via SQL. You can write data directly to them via SQL statements. Note that the names of tables created by schemaless writing are based on fixed mapping rules for tag values, so they are not explicitly ideographic and they lack readability.
Tips:
The schemaless write will automatically create a table. You do not need to create a table manually, or an unknown error may occur.
## Schemaless Writing Line Protocol
TDengine's schemaless writing line protocol supports InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. However, when using these three protocols, you need to specify in the API the standard of the parsing protocol to be used for the input content.

View File

@ -24,13 +24,13 @@ import {useCurrentSidebarCategory} from '@docusaurus/theme-common';
<table width="100%">
<tr align="center">
<td style="padding:1em;border:0"><img src={xiaot} alt="小 T 的二维码" width="200" /></td>
<td style="padding:1em;border:0"><img src={channel} alt="TDengine 微信视频号" width="200" /></td>
<td style="padding:1em;border:0"><img src={official_account} alt="TDengine 微信公众号" width="200" /></td>
<td style={{padding:'1em 3em',border:0}}><img src={xiaot} alt="小 T 的二维码" width="200" /></td>
<td style={{padding:'1em 3em',border:0}}><img src={channel} alt="TDengine 微信视频号" width="200" /></td>
<td style={{padding:'1em 3em',border:0}}><img src={official_account} alt="TDengine 微信公众号" width="200" /></td>
</tr>
<tr align="center">
<td style="padding:1em;border:0">加入“物联网大数据技术前沿群”<br/>与大家进行技术交流</td>
<td style="padding:1em;border:0">关注 TDengine 微信视频号<br/>收看技术直播与教学视频</td>
<td style="padding:1em;border:0">关注 TDengine 微信公众号<br/>阅读核心技术与行业案例文章</td>
<td style={{padding:'1em 3em',border:0}}>加入“物联网大数据技术前沿群”<br/>与大家进行技术交流</td>
<td style={{padding:'1em 3em',border:0}}>关注 TDengine 微信视频号<br/>收看技术直播与教学视频</td>
<td style={{padding:'1em 3em',border:0}}>关注 TDengine 微信公众号<br/>阅读核心技术与行业案例文章</td>
</tr>
</table>

View File

@ -142,10 +142,10 @@ SHOW CREATE DATABASE db_name;
### 查看数据库参数
```sql
SHOW DATABASES \G;
SELECT * FROM INFORMATION_SCHEMA.INS_DATABASES WHERE NAME='DBNAME' \G;
```
会列出系统中所有数据库的配置参数,并且每行只显示一个参数。
会列出指定数据库的配置参数,并且每行只显示一个参数。
## 删除过期数据

View File

@ -32,7 +32,7 @@ taosAdapter 提供以下功能:
taosAdapter 是 TDengine 服务端软件 的一部分,如果您使用 TDengine server 您不需要任何额外的步骤来安装 taosAdapter。您可以从[涛思数据官方网站](https://taosdata.com/cn/all-downloads/)下载 TDengine server 安装包。如果需要将 taosAdapter 分离部署在 TDengine server 之外的服务器上,则应该在该服务器上安装完整的 TDengine 来安装 taosAdapter。如果您需要使用源代码编译生成 taosAdapter您可以参考[构建 taosAdapter](https://github.com/taosdata/taosadapter/blob/3.0/BUILD-CN.md)文档。
### start/stop taosAdapter
### 启动/停止 taosAdapter
在 Linux 系统上 taosAdapter 服务默认由 systemd 管理。使用命令 `systemctl start taosadapter` 可以启动 taosAdapter 服务。使用命令 `systemctl stop taosadapter` 可以停止 taosAdapter 服务。

View File

@ -4,6 +4,9 @@ sidebar_label: TDinsight
description: 基于Grafana的TDengine零依赖监控解决方案
---
import Tabs from '@theme/Tabs'
import TabItem from '@theme/TabItem'
TDinsight 是使用监控数据库和 [Grafana] 对 TDengine 进行监控的解决方案。
TDengine 通过 [taosKeeper](../taosKeeper) 将服务器的 CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入指定数据库并对重要的系统操作比如登录、创建、删除数据库等以及各种错误报警信息进行记录。通过 [Grafana] 和 [TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases)TDinsight 将集群状态、节点信息、插入及查询请求、资源使用情况等进行可视化展示,同时还支持 vnode、dnode、mnode 节点状态异常告警,为开发者实时监控 TDengine 集群运行状态提供了便利。本文将指导用户安装 Grafana 服务器并通过 `TDinsight.sh` 安装脚本自动安装 TDengine 数据源插件及部署 TDinsight 可视化面板。
@ -41,6 +44,7 @@ sudo apt-get install grafana
```
### 在 CentOS / RHEL 上安装 Grafana
</TabItem>
<TabItem label="redhat" value="基于 CentOS / RHEL 系统">
@ -127,20 +131,20 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
大多数命令行选项都可以通过环境变量获得同样的效果。
| 短选项 | 长选项 | 环境变量 | 说明 |
| ------ | -------------------------- | ---------------------------- | --------------------------------------------------------------------------- |
| -v | --plugin-version | TDENGINE_PLUGIN_VERSION | TDengine 数据源插件版本,默认使用最新版。 |
| -P | --grafana-provisioning-dir | GF_PROVISIONING_DIR | Grafana 配置目录,默认为`/etc/grafana/provisioning/` |
| -G | --grafana-plugins-dir | GF_PLUGINS_DIR | Grafana 插件目录,默认为`/var/lib/grafana/plugins`。 |
| -O | --grafana-org-id | GF_ORG_ID | Grafana 组织 ID默认为 1。 |
| -n | --tdengine-ds-name | TDENGINE_DS_NAME | TDengine 数据源名称,默认为 TDengine。 |
| -a | --tdengine-api | TDENGINE_API | TDengine REST API 端点。默认为`http://127.0.0.1:6041`。 |
| -u | --tdengine-user | TDENGINE_USER | TDengine 用户名。 [默认值root] |
| -p | --tdengine-密码 | TDENGINE_PASSWORD | TDengine 密码。 [默认taosdata] |
| -i | --tdinsight-uid | TDINSIGHT_DASHBOARD_UID | TDinsight 仪表盘`uid`。 [默认值tdinsight] |
| -t | --tdinsight-title | TDINSIGHT_DASHBOARD_TITLE | TDinsight 仪表盘标题。 [默认TDinsight] |
| -e | --tdinsight-可编辑 | TDINSIGHT_DASHBOARD_EDITABLE | 如果配置仪表盘可以编辑。 [默认值false] |
| -E | --external-notifier | EXTERNAL_NOTIFIER | 将外部通知程序 uid 应用于 TDinsight 仪表盘。 |
| 短选项 | 长选项 | 环境变量 | 说明 |
| ------ | -------------------------- | ---------------------------- | ------------------------------------------------------- |
| -v | --plugin-version | TDENGINE_PLUGIN_VERSION | TDengine 数据源插件版本,默认使用最新版。 |
| -P | --grafana-provisioning-dir | GF_PROVISIONING_DIR | Grafana 配置目录,默认为`/etc/grafana/provisioning/` |
| -G | --grafana-plugins-dir | GF_PLUGINS_DIR | Grafana 插件目录,默认为`/var/lib/grafana/plugins`。 |
| -O | --grafana-org-id | GF_ORG_ID | Grafana 组织 ID默认为 1。 |
| -n | --tdengine-ds-name | TDENGINE_DS_NAME | TDengine 数据源名称,默认为 TDengine。 |
| -a | --tdengine-api | TDENGINE_API | TDengine REST API 端点。默认为`http://127.0.0.1:6041`。 |
| -u | --tdengine-user | TDENGINE_USER | TDengine 用户名。 [默认值root] |
| -p | --tdengine-密码 | TDENGINE_PASSWORD | TDengine 密码。 [默认taosdata] |
| -i | --tdinsight-uid | TDINSIGHT_DASHBOARD_UID | TDinsight 仪表盘`uid`。 [默认值tdinsight] |
| -t | --tdinsight-title | TDINSIGHT_DASHBOARD_TITLE | TDinsight 仪表盘标题。 [默认TDinsight] |
| -e | --tdinsight-可编辑 | TDINSIGHT_DASHBOARD_EDITABLE | 如果配置仪表盘可以编辑。 [默认值false] |
| -E | --external-notifier | EXTERNAL_NOTIFIER | 将外部通知程序 uid 应用于 TDinsight 仪表盘。 |
假设您在主机 `tdengine` 上启动 TDengine 数据库HTTP API 端口为 `6041`,用户为 `root1`,密码为 `pass5ord`。执行脚本:
@ -196,6 +200,7 @@ sudo grafana-cli \
[plugins]
allow_loading_unsigned_plugins = tdengine-datasource
```
:::
### 启动 Grafana 服务

View File

@ -8,6 +8,8 @@ description: 'Schemaless 写入方式,可以免于预先创建超级表/子表
无模式写入方式建立的超级表及其对应的子表与通过 SQL 直接建立的超级表和子表完全没有区别你也可以通过SQL 语句直接向其中写入数据。需要注意的是,通过无模式写入方式建立的表,其表名是基于标签值按照固定的映射规则生成,所以无法明确地进行表意,缺乏可读性。
注意:无模式写入会自动建表,不需要手动建表,手动建表的话可能会出现未知的错误。
## 无模式写入行协议
TDengine 的无模式写入的行协议兼容 InfluxDB 的 行协议Line Protocol、OpenTSDB 的 telnet 行协议、OpenTSDB 的 JSON 格式协议。但是使用这三种协议的时候,需要在 API 中指定输入内容使用解析协议的标准。

View File

@ -2627,6 +2627,7 @@ typedef struct {
int32_t tEncodeSTqCheckInfo(SEncoder* pEncoder, const STqCheckInfo* pInfo);
int32_t tDecodeSTqCheckInfo(SDecoder* pDecoder, STqCheckInfo* pInfo);
void tDeleteSTqCheckInfo(STqCheckInfo* pInfo);
typedef struct {
char topic[TSDB_TOPIC_FNAME_LEN];

View File

@ -338,7 +338,7 @@ typedef struct SStreamTask {
int32_t recoverWaitingUpstream;
int64_t checkReqId;
SArray* checkReqIds; // shuffle
int32_t refCnt;
} SStreamTask;
int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo);
@ -565,6 +565,7 @@ typedef struct SStreamMeta {
TXN txn;
FTaskExpand* expandFunc;
int32_t vgId;
SRWLatch lock;
} SStreamMeta;
SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId);
@ -575,6 +576,10 @@ int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t startVer, c
int32_t streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId);
SStreamTask* streamMetaGetTask(SStreamMeta* pMeta, int32_t taskId);
SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int32_t taskId);
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
void streamMetaRemoveTask1(SStreamMeta* pMeta, int32_t taskId);
int32_t streamMetaBegin(SStreamMeta* pMeta);
int32_t streamMetaCommit(SStreamMeta* pMeta);
int32_t streamMetaRollBack(SStreamMeta* pMeta);

View File

@ -2,6 +2,7 @@
%define userlocalpath /usr/local
%define cfg_install_dir /etc/taos
%define __strip /bin/true
%global __python /usr/bin/python3
Name: tdengine
Version: %{_version}

View File

@ -1,3 +1,6 @@
TDengine is a high-efficient, scalable, high-available distributed time-series database, which makes a lot of optimizations on inserting and querying data, which is far more efficient than normal regular databases. So TDengine can meet the high requirements of IOT and other areas on storing and querying a large amount of data.
TDengine will be installed under C:\TDengine, users can modify configuration file C:\TDengine\cfg\taos.cfg, set the log file path or other parameters.
TDengine will be installed under C:\TDengine, users can modify configuration file C:\TDengine\cfg\taos.cfg, set the log file path or other parameters.
To start/stop TDengine with administrator privileges: sc start/stop taosd
To start/stop taosAdapter with administrator privileges: sc start/stop taosadapter
Please manually remove C:\TDengine from your system PATH environment after you remove TDengine software.

View File

@ -6036,6 +6036,7 @@ int32_t tDecodeSTqCheckInfo(SDecoder *pDecoder, STqCheckInfo *pInfo) {
}
return 0;
}
void tDeleteSTqCheckInfo(STqCheckInfo *pInfo) { taosArrayDestroy(pInfo->colIdList); }
int32_t tEncodeDeleteRes(SEncoder *pCoder, const SDeleteRes *pRes) {
int32_t nUid = taosArrayGetSize(pRes->uidList);
@ -6159,9 +6160,13 @@ int32_t tDecodeSMqDataRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) {
void tDeleteSMqDataRsp(SMqDataRsp *pRsp) {
taosArrayDestroy(pRsp->blockDataLen);
pRsp->blockDataLen = NULL;
taosArrayDestroyP(pRsp->blockData, (FDelete)taosMemoryFree);
pRsp->blockData = NULL;
taosArrayDestroyP(pRsp->blockSchema, (FDelete)tDeleteSSchemaWrapper);
pRsp->blockSchema = NULL;
taosArrayDestroyP(pRsp->blockTbName, (FDelete)taosMemoryFree);
pRsp->blockTbName = NULL;
}
int32_t tEncodeSTaosxRsp(SEncoder *pEncoder, const STaosxRsp *pRsp) {

View File

@ -122,6 +122,7 @@ void smStopWorker(SSnodeMgmt *pMgmt) {
for (int32_t i = 0; i < taosArrayGetSize(pMgmt->writeWroker); i++) {
SMultiWorker *pWorker = taosArrayGetP(pMgmt->writeWroker, i);
tMultiWorkerCleanup(pWorker);
taosMemoryFree(pWorker);
}
taosArrayDestroy(pMgmt->writeWroker);
tSingleWorkerCleanup(&pMgmt->streamWorker);

View File

@ -287,7 +287,7 @@ int32_t vmGetQueueSize(SVnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype) {
vmReleaseVnode(pMgmt, pVnode);
}
if (size < 0) {
dError("vgId:%d, can't get size from queue since %s, qtype:%d", vgId, terrstr(), qtype);
dTrace("vgId:%d, can't get size from queue since %s, qtype:%d", vgId, terrstr(), qtype);
size = 0;
}
return size;

View File

@ -508,6 +508,7 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
qDestroyQueryPlan(pPlan);
return -1;
}
sdbRelease(pSdb, pVgroup);
}
}
qDestroyQueryPlan(pPlan);

View File

@ -295,6 +295,7 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj,
return -1;
}
pObj->sourceDbUid = pSourceDb->uid;
mndReleaseDb(pMnode, pSourceDb);
memcpy(pObj->targetSTbName, pCreate->targetStbFullName, TSDB_TABLE_FNAME_LEN);
@ -307,6 +308,7 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj,
pObj->targetStbUid = mndGenerateUid(pObj->targetSTbName, TSDB_TABLE_FNAME_LEN);
pObj->targetDbUid = pTargetDb->uid;
mndReleaseDb(pMnode, pTargetDb);
pObj->sql = pCreate->sql;
pObj->ast = pCreate->ast;
@ -523,6 +525,7 @@ static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStre
tFreeSMCreateStbReq(&createReq);
mndFreeStb(&stbObj);
mndReleaseDb(pMnode, pDb);
return 0;
_OVER:
@ -738,6 +741,7 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
}
sdbRelease(pMnode->pSdb, pStream);
mndTransDrop(pTrans);
return TSDB_CODE_ACTION_IN_PROGRESS;
}

View File

@ -235,6 +235,7 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR
taosHashPut(pHash, &pVgEp->vgId, sizeof(int32_t), &outputVg, sizeof(SMqRebOutputVg));
mInfo("mq rebalance: remove vgId:%d from consumer:%" PRId64, pVgEp->vgId, consumerId);
}
taosArrayDestroy(pConsumerEp->vgs);
taosHashRemove(pOutput->pSub->consumerHash, &consumerId, sizeof(int64_t));
// put into removed
taosArrayPush(pOutput->removedConsumers, &consumerId);
@ -470,8 +471,12 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu
pConsumerNew->updateType = CONSUMER_UPDATE__TOUCH;
mndReleaseConsumer(pMnode, pConsumerOld);
if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) {
tDeleteSMqConsumerObj(pConsumerNew);
taosMemoryFree(pConsumerNew);
goto REB_FAIL;
}
tDeleteSMqConsumerObj(pConsumerNew);
taosMemoryFree(pConsumerNew);
}
// 3.2 set new consumer
consumerNum = taosArrayGetSize(pOutput->newConsumers);
@ -991,7 +996,7 @@ static int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)&pConsumerEp->consumerId, false);
mDebug("mnd show subscrptions: topic %s, consumer %" PRId64 "cgroup %s vgid %d", varDataVal(topic),
mDebug("mnd show subscriptions: topic %s, consumer %" PRId64 " cgroup %s vgid %d", varDataVal(topic),
pConsumerEp->consumerId, varDataVal(cgroup), pVgEp->vgId);
// offset
@ -1039,7 +1044,7 @@ static int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, NULL, true);
mDebug("mnd show subscrptions(unassigned): topic %s, cgroup %s vgid %d", varDataVal(topic), varDataVal(cgroup),
mDebug("mnd show subscriptions(unassigned): topic %s, cgroup %s vgid %d", varDataVal(topic), varDataVal(cgroup),
pVgEp->vgId);
// offset

View File

@ -349,6 +349,7 @@ int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw, int32_t transId) {
}
rpcFreeCont(req.pCont);
req.pCont = NULL;
if (code != 0) {
mError("trans:%d, failed to propose, code:0x%x", pMgmt->transId, code);
return code;

View File

@ -36,13 +36,14 @@ void sndEnqueueStreamDispatch(SSnode *pSnode, SRpcMsg *pMsg) {
int32_t taskId = req.taskId;
SStreamTask *pTask = streamMetaGetTask(pSnode->pMeta, taskId);
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, taskId);
if (pTask) {
SRpcMsg rsp = {
.info = pMsg->info,
.code = 0,
};
streamProcessDispatchReq(pTask, &req, &rsp, false);
streamMetaReleaseTask(pSnode->pMeta, pTask);
rpcFreeCont(pMsg->pCont);
taosFreeQitem(pMsg);
return;
@ -63,6 +64,7 @@ int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t ver) {
ASSERT(pTask->taskLevel == TASK_LEVEL__AGG);
ASSERT(taosArrayGetSize(pTask->childEpInfo) != 0);
pTask->refCnt = 1;
pTask->schedStatus = TASK_SCHED_STATUS__INACTIVE;
pTask->inputQueue = streamQueueOpen();
pTask->outputQueue = streamQueueOpen();
@ -166,15 +168,17 @@ int32_t sndProcessTaskDeployReq(SSnode *pSnode, char *msg, int32_t msgLen) {
int32_t sndProcessTaskDropReq(SSnode *pSnode, char *msg, int32_t msgLen) {
SVDropStreamTaskReq *pReq = (SVDropStreamTaskReq *)msg;
return streamMetaRemoveTask(pSnode->pMeta, pReq->taskId);
streamMetaRemoveTask1(pSnode->pMeta, pReq->taskId);
return 0;
}
int32_t sndProcessTaskRunReq(SSnode *pSnode, SRpcMsg *pMsg) {
SStreamTaskRunReq *pReq = pMsg->pCont;
int32_t taskId = pReq->taskId;
SStreamTask *pTask = streamMetaGetTask(pSnode->pMeta, taskId);
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, taskId);
if (pTask) {
streamProcessRunReq(pTask);
streamMetaReleaseTask(pSnode->pMeta, pTask);
return 0;
} else {
return -1;
@ -191,13 +195,14 @@ int32_t sndProcessTaskDispatchReq(SSnode *pSnode, SRpcMsg *pMsg, bool exec) {
tDecodeStreamDispatchReq(&decoder, &req);
int32_t taskId = req.taskId;
SStreamTask *pTask = streamMetaGetTask(pSnode->pMeta, taskId);
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, taskId);
if (pTask) {
SRpcMsg rsp = {
.info = pMsg->info,
.code = 0,
};
streamProcessDispatchReq(pTask, &req, &rsp, exec);
streamMetaReleaseTask(pSnode->pMeta, pTask);
return 0;
} else {
return -1;
@ -215,13 +220,14 @@ int32_t sndProcessTaskRetrieveReq(SSnode *pSnode, SRpcMsg *pMsg) {
tDecodeStreamRetrieveReq(&decoder, &req);
tDecoderClear(&decoder);
int32_t taskId = req.dstTaskId;
SStreamTask *pTask = streamMetaGetTask(pSnode->pMeta, taskId);
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, taskId);
if (pTask) {
SRpcMsg rsp = {
.info = pMsg->info,
.code = 0,
};
streamProcessRetrieveReq(pTask, &req, &rsp);
streamMetaReleaseTask(pSnode->pMeta, pTask);
tDeleteStreamRetrieveReq(&req);
return 0;
} else {
@ -232,9 +238,10 @@ int32_t sndProcessTaskRetrieveReq(SSnode *pSnode, SRpcMsg *pMsg) {
int32_t sndProcessTaskDispatchRsp(SSnode *pSnode, SRpcMsg *pMsg) {
SStreamDispatchRsp *pRsp = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
int32_t taskId = ntohl(pRsp->upstreamTaskId);
SStreamTask *pTask = streamMetaGetTask(pSnode->pMeta, taskId);
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, taskId);
if (pTask) {
streamProcessDispatchRsp(pTask, pRsp, pMsg->code);
streamMetaReleaseTask(pSnode->pMeta, pTask);
return 0;
} else {
return -1;
@ -274,15 +281,17 @@ int32_t sndProcessTaskRecoverFinishReq(SSnode *pSnode, SRpcMsg *pMsg) {
tDecoderClear(&decoder);
// find task
SStreamTask *pTask = streamMetaGetTask(pSnode->pMeta, req.taskId);
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, req.taskId);
if (pTask == NULL) {
return -1;
}
// do process request
if (streamProcessRecoverFinishReq(pTask, req.childId) < 0) {
streamMetaReleaseTask(pSnode->pMeta, pTask);
return -1;
}
streamMetaReleaseTask(pSnode->pMeta, pTask);
return 0;
}

View File

@ -88,8 +88,7 @@ typedef struct {
STqExecTb execTb;
STqExecDb execDb;
};
int32_t numOfCols; // number of out pout column, temporarily used
SSchemaWrapper* pSchemaWrapper; // columns that are involved in query
int32_t numOfCols; // number of out pout column, temporarily used
} STqExecHandle;
typedef struct {

View File

@ -55,6 +55,7 @@ static void destroySTqHandle(void* data) {
STqHandle* pData = (STqHandle*)data;
qDestroyTask(pData->execHandle.task);
if (pData->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
taosMemoryFreeClear(pData->execHandle.execCol.qmsg);
} else if (pData->execHandle.subType == TOPIC_SUB_TYPE__DB) {
tqCloseReader(pData->execHandle.pExecReader);
walCloseReader(pData->pWalReader);
@ -67,6 +68,7 @@ static void destroySTqHandle(void* data) {
static void tqPushEntryFree(void* data) {
STqPushEntry* p = *(void**)data;
tDeleteSMqDataRsp(&p->dataRsp);
taosMemoryFree(p);
}
@ -80,7 +82,6 @@ STQ* tqOpen(const char* path, SVnode* pVnode) {
pTq->pVnode = pVnode;
pTq->pHandle = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK);
taosHashSetFreeFp(pTq->pHandle, destroySTqHandle);
taosInitRWLatch(&pTq->pushLock);
@ -88,6 +89,7 @@ STQ* tqOpen(const char* path, SVnode* pVnode) {
taosHashSetFreeFp(pTq->pPushMgr, tqPushEntryFree);
pTq->pCheckInfo = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK);
taosHashSetFreeFp(pTq->pCheckInfo, (FDelete)tDeleteSTqCheckInfo);
if (tqMetaOpen(pTq) < 0) {
ASSERT(0);
@ -575,8 +577,8 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
return 0;
}
}
taosWUnLockLatch(&pTq->pushLock);
#endif
taosWUnLockLatch(&pTq->pushLock);
if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) {
code = -1;
@ -779,6 +781,7 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t version, char* msg, int32_t msgL
}
if (req.newConsumerId == -1) {
tqError("vgId:%d, tq invalid rebalance request, new consumerId %" PRId64 "", req.vgId, req.newConsumerId);
taosMemoryFree(req.qmsg);
return 0;
}
STqHandle tqHandle = {0};
@ -815,8 +818,7 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t version, char* msg, int32_t msgL
req.qmsg = NULL;
pHandle->execHandle.task =
qCreateQueueExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle, &pHandle->execHandle.numOfCols,
&pHandle->execHandle.pSchemaWrapper);
qCreateQueueExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle, &pHandle->execHandle.numOfCols, NULL);
ASSERT(pHandle->execHandle.task);
void* scanner = NULL;
qExtractStreamScanner(pHandle->execHandle.task, &scanner);
@ -864,6 +866,7 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t version, char* msg, int32_t msgL
atomic_store_32(&pHandle->epoch, -1);
atomic_store_64(&pHandle->consumerId, req.newConsumerId);
atomic_add_fetch_32(&pHandle->epoch, 1);
taosMemoryFree(req.qmsg);
if (tqMetaSaveHandle(pTq, req.subKey, pHandle) < 0) {
// TODO
ASSERT(0);
@ -879,6 +882,7 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
ASSERT(taosArrayGetSize(pTask->childEpInfo) != 0);
}
pTask->refCnt = 1;
pTask->schedStatus = TASK_SCHED_STATUS__INACTIVE;
pTask->inputQueue = streamQueueOpen();
@ -972,13 +976,15 @@ int32_t tqProcessStreamTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) {
.upstreamNodeId = req.upstreamNodeId,
.upstreamTaskId = req.upstreamTaskId,
};
SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, taskId);
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
if (pTask && atomic_load_8(&pTask->taskStatus) == TASK_STATUS__NORMAL) {
rsp.status = 1;
} else {
rsp.status = 0;
}
if (pTask) streamMetaReleaseTask(pTq->pStreamMeta, pTask);
tqDebug("tq recv task check req(reqId: %" PRId64 ") %d at node %d check req from task %d at node %d, status %d",
rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
@ -1024,12 +1030,14 @@ int32_t tqProcessStreamTaskCheckRsp(STQ* pTq, int64_t version, char* msg, int32_
tqDebug("tq recv task check rsp(reqId: %" PRId64 ") %d at node %d check req from task %d at node %d, status %d",
rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, rsp.upstreamTaskId);
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, rsp.upstreamTaskId);
if (pTask == NULL) {
return -1;
}
return streamProcessTaskCheckRsp(pTask, &rsp, version);
code = streamProcessTaskCheckRsp(pTask, &rsp, version);
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
return code;
}
int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
@ -1074,15 +1082,17 @@ int32_t tqProcessTaskRecover1Req(STQ* pTq, SRpcMsg* pMsg) {
int32_t msgLen = pMsg->contLen;
SStreamRecoverStep1Req* pReq = (SStreamRecoverStep1Req*)msg;
SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, pReq->taskId);
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, pReq->taskId);
if (pTask == NULL) {
return -1;
}
ASSERT(pReq->taskId == pTask->taskId);
// check param
int64_t fillVer1 = pTask->startVer;
if (fillVer1 <= 0) {
ASSERT(0);
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
return -1;
}
@ -1093,10 +1103,11 @@ int32_t tqProcessTaskRecover1Req(STQ* pTq, SRpcMsg* pMsg) {
SStreamRecoverStep2Req req;
code = streamBuildSourceRecover2Req(pTask, &req);
if (code < 0) {
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
return -1;
}
ASSERT(pReq->taskId == pTask->taskId);
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
// serialize msg
int32_t len = sizeof(SStreamRecoverStep1Req);
@ -1124,7 +1135,7 @@ int32_t tqProcessTaskRecover1Req(STQ* pTq, SRpcMsg* pMsg) {
int32_t tqProcessTaskRecover2Req(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
int32_t code;
SStreamRecoverStep2Req* pReq = (SStreamRecoverStep2Req*)msg;
SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, pReq->taskId);
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, pReq->taskId);
if (pTask == NULL) {
return -1;
}
@ -1132,27 +1143,33 @@ int32_t tqProcessTaskRecover2Req(STQ* pTq, int64_t version, char* msg, int32_t m
// do recovery step 2
code = streamSourceRecoverScanStep2(pTask, version);
if (code < 0) {
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
return -1;
}
// restore param
code = streamRestoreParam(pTask);
if (code < 0) {
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
return -1;
}
// set status normal
code = streamSetStatusNormal(pTask);
if (code < 0) {
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
return -1;
}
// dispatch recover finish req to all related downstream task
code = streamDispatchRecoverFinishReq(pTask);
if (code < 0) {
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
return -1;
}
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
return 0;
}
@ -1169,15 +1186,17 @@ int32_t tqProcessTaskRecoverFinishReq(STQ* pTq, SRpcMsg* pMsg) {
tDecoderClear(&decoder);
// find task
SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, req.taskId);
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.taskId);
if (pTask == NULL) {
return -1;
}
// do process request
if (streamProcessRecoverFinishReq(pTask, req.childId) < 0) {
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
return -1;
}
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
return 0;
}
@ -1351,9 +1370,10 @@ int32_t tqProcessSubmitReq(STQ* pTq, SSubmitReq* pReq, int64_t ver) {
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) {
SStreamTaskRunReq* pReq = pMsg->pCont;
int32_t taskId = pReq->taskId;
SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, taskId);
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
if (pTask) {
streamProcessRunReq(pTask);
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
return 0;
} else {
return -1;
@ -1370,13 +1390,14 @@ int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec) {
tDecodeStreamDispatchReq(&decoder, &req);
int32_t taskId = req.taskId;
SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, taskId);
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
if (pTask) {
SRpcMsg rsp = {
.info = pMsg->info,
.code = 0,
};
streamProcessDispatchReq(pTask, &req, &rsp, exec);
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
return 0;
} else {
return -1;
@ -1386,10 +1407,11 @@ int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec) {
int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) {
SStreamDispatchRsp* pRsp = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
int32_t taskId = ntohl(pRsp->upstreamTaskId);
SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, taskId);
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
tqDebug("recv dispatch rsp, code: %x", pMsg->code);
if (pTask) {
streamProcessDispatchRsp(pTask, pRsp, pMsg->code);
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
return 0;
} else {
return -1;
@ -1398,7 +1420,8 @@ int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) {
int32_t tqProcessTaskDropReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
SVDropStreamTaskReq* pReq = (SVDropStreamTaskReq*)msg;
return streamMetaRemoveTask(pTq->pStreamMeta, pReq->taskId);
streamMetaRemoveTask1(pTq->pStreamMeta, pReq->taskId);
return 0;
}
int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg) {
@ -1411,13 +1434,14 @@ int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg) {
tDecodeStreamRetrieveReq(&decoder, &req);
tDecoderClear(&decoder);
int32_t taskId = req.dstTaskId;
SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, taskId);
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
if (pTask) {
SRpcMsg rsp = {
.info = pMsg->info,
.code = 0,
};
streamProcessRetrieveReq(pTask, &req, &rsp);
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
tDeleteStreamRetrieveReq(&req);
return 0;
} else {
@ -1449,13 +1473,14 @@ int32_t vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) {
int32_t taskId = req.taskId;
SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, taskId);
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
if (pTask) {
SRpcMsg rsp = {
.info = pMsg->info,
.code = 0,
};
streamProcessDispatchReq(pTask, &req, &rsp, false);
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
rpcFreeCont(pMsg->pCont);
taosFreeQitem(pMsg);
return 0;

View File

@ -179,6 +179,7 @@ int32_t tqMetaRestoreCheckInfo(STQ* pTq) {
if (tDecodeSTqCheckInfo(&decoder, &info) < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
tdbFree(pKey);
tdbFree(pVal);
tdbTbcClose(pCur);
return -1;
}
@ -186,11 +187,13 @@ int32_t tqMetaRestoreCheckInfo(STQ* pTq) {
if (taosHashPut(pTq->pCheckInfo, info.topic, strlen(info.topic), &info, sizeof(STqCheckInfo)) < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
tdbFree(pKey);
tdbFree(pVal);
tdbTbcClose(pCur);
return -1;
}
}
tdbFree(pKey);
tdbFree(pVal);
tdbTbcClose(pCur);
return 0;
}
@ -305,8 +308,8 @@ int32_t tqMetaRestoreHandle(STQ* pTq) {
};
if (handle.execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
handle.execHandle.task = qCreateQueueExecTaskInfo(
handle.execHandle.execCol.qmsg, &reader, &handle.execHandle.numOfCols, &handle.execHandle.pSchemaWrapper);
handle.execHandle.task =
qCreateQueueExecTaskInfo(handle.execHandle.execCol.qmsg, &reader, &handle.execHandle.numOfCols, NULL);
ASSERT(handle.execHandle.task);
void* scanner = NULL;
qExtractStreamScanner(handle.execHandle.task, &scanner);

View File

@ -425,6 +425,7 @@ int32_t tqRetrieveDataBlock(SSDataBlock* pBlock, STqReader* pReader) {
tqWarn("cannot found tsschema for table: uid:%" PRId64 " (suid:%" PRId64 "), version %d, possibly dropped table",
pReader->msgIter.uid, pReader->msgIter.suid, pReader->cachedSchemaVer);
/*ASSERT(0);*/
pReader->cachedSchemaSuid = 0;
terrno = TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND;
return -1;
}
@ -435,6 +436,7 @@ int32_t tqRetrieveDataBlock(SSDataBlock* pBlock, STqReader* pReader) {
tqWarn("cannot found schema wrapper for table: suid:%" PRId64 ", version %d, possibly dropped table",
pReader->msgIter.uid, pReader->cachedSchemaVer);
/*ASSERT(0);*/
pReader->cachedSchemaSuid = 0;
terrno = TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND;
return -1;
}

View File

@ -31,7 +31,11 @@ int32_t tqBuildDeleteReq(SVnode* pVnode, const char* stbFullName, const SSDataBl
int64_t ts = *(int64_t*)colDataGetData(pTsCol, row);
int64_t groupId = *(int64_t*)colDataGetData(pGidCol, row);
char* name;
void* varTbName = colDataGetVarData(pTbNameCol, row);
void* varTbName = NULL;
if (!colDataIsNull(pTbNameCol, totRow, row, NULL)) {
varTbName = colDataGetVarData(pTbNameCol, row);
}
if (varTbName != NULL && varTbName != (void*)-1) {
name = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN);
memcpy(name, varDataVal(varTbName), varDataLen(varTbName));

View File

@ -88,6 +88,7 @@ typedef struct SBlockLoadSuppInfo {
int16_t* colIds; // column ids for loading file block data
int32_t numOfCols;
char** buildBuf; // build string tmp buffer, todo remove it later after all string format being updated.
bool smaValid; // the sma on all queried columns are activated
} SBlockLoadSuppInfo;
typedef struct SLastBlockReader {
@ -213,11 +214,10 @@ static bool hasDataInFileBlock(const SBlockData* pBlockData, const SFil
static bool outOfTimeWindow(int64_t ts, STimeWindow* pWindow) { return (ts > pWindow->ekey) || (ts < pWindow->skey); }
static int32_t setColumnIdSlotList(STsdbReader* pReader, SSDataBlock* pBlock) {
SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
static int32_t setColumnIdSlotList(SBlockLoadSuppInfo* pSupInfo, SSDataBlock* pBlock) {
size_t numOfCols = blockDataGetNumOfCols(pBlock);
pSupInfo->smaValid = true;
pSupInfo->numOfCols = numOfCols;
pSupInfo->colIds = taosMemoryMalloc(numOfCols * sizeof(int16_t));
pSupInfo->buildBuf = taosMemoryCalloc(numOfCols, POINTER_BYTES);
@ -239,6 +239,28 @@ static int32_t setColumnIdSlotList(STsdbReader* pReader, SSDataBlock* pBlock) {
return TSDB_CODE_SUCCESS;
}
static void updateBlockSMAInfo(STSchema* pSchema, SBlockLoadSuppInfo* pSupInfo) {
int32_t i = 0, j = 0;
while(i < pSchema->numOfCols && j < pSupInfo->numOfCols) {
STColumn* pTCol = &pSchema->columns[i];
if (pTCol->colId == pSupInfo->colIds[j]) {
if (!IS_BSMA_ON(pTCol)) {
pSupInfo->smaValid = false;
return;
}
i += 1;
j += 1;
} else if (pTCol->colId < pSupInfo->colIds[j]) {
// do nothing
i += 1;
} else {
ASSERT(0);
}
}
}
static int32_t initBlockScanInfoBuf(SBlockInfoBuf* pBuf, int32_t numOfTables) {
int32_t num = numOfTables / pBuf->numPerBucket;
int32_t remainder = numOfTables % pBuf->numPerBucket;
@ -580,7 +602,7 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd
// allocate buffer in order to load data blocks from file
SBlockLoadSuppInfo* pSup = &pReader->suppInfo;
pSup->pColAgg = taosArrayInit(4, sizeof(SColumnDataAgg));
pSup->pColAgg = taosArrayInit(pCond->numOfCols, sizeof(SColumnDataAgg));
pSup->plist = taosMemoryCalloc(pCond->numOfCols, POINTER_BYTES);
if (pSup->pColAgg == NULL || pSup->plist == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
@ -601,7 +623,7 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd
goto _end;
}
setColumnIdSlotList(pReader, pReader->pResBlock);
setColumnIdSlotList(&pReader->suppInfo, pReader->pResBlock);
*ppReader = pReader;
return code;
@ -3763,6 +3785,10 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL
}
}
if (pReader->pSchema != NULL) {
updateBlockSMAInfo(pReader->pSchema, &pReader->suppInfo);
}
STsdbReader* p = pReader->innerReader[0] != NULL ? pReader->innerReader[0] : pReader;
pReader->status.pTableMap = createDataBlockScanInfo(p, pTableList, numOfTables);
@ -4020,7 +4046,7 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SColumnDataAgg*** pBlockS
}
// there is no statistics data for composed block
if (pReader->status.composedDataBlock) {
if (pReader->status.composedDataBlock || (!pReader->suppInfo.smaValid)) {
*pBlockStatis = NULL;
return TSDB_CODE_SUCCESS;
}
@ -4060,7 +4086,7 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SColumnDataAgg*** pBlockS
int32_t i = 0, j = 0;
size_t size = taosArrayGetSize(pSup->pColAgg);
#if 0
while (j < numOfCols && i < size) {
SColumnDataAgg* pAgg = taosArrayGet(pSup->pColAgg, i);
if (pAgg->colId == pSup->colIds[j]) {
@ -4068,6 +4094,7 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SColumnDataAgg*** pBlockS
pSup->plist[j] = pAgg;
} else {
*allHave = false;
break;
}
i += 1;
j += 1;
@ -4077,12 +4104,43 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SColumnDataAgg*** pBlockS
j += 1;
}
}
#else
// fill the all null data column
SArray* pNewAggList = taosArrayInit(numOfCols, sizeof(SColumnDataAgg));
while (j < numOfCols && i < size) {
SColumnDataAgg* pAgg = taosArrayGet(pSup->pColAgg, i);
if (pAgg->colId == pSup->colIds[j]) {
taosArrayPush(pNewAggList, pAgg);
i += 1;
j += 1;
} else if (pAgg->colId < pSup->colIds[j]) {
i += 1;
} else if (pSup->colIds[j] < pAgg->colId) {
// all date in this block are null
SColumnDataAgg nullColAgg = {.colId = pSup->colIds[j], .numOfNull = pBlock->nRow};
taosArrayPush(pNewAggList, &nullColAgg);
j += 1;
}
}
taosArrayClear(pSup->pColAgg);
taosArrayAddAll(pSup->pColAgg, pNewAggList);
size_t num = taosArrayGetSize(pSup->pColAgg);
for(int32_t k = 0; k < num; ++k) {
pSup->plist[k] = taosArrayGet(pSup->pColAgg, k);
}
taosArrayDestroy(pNewAggList);
#endif
pReader->cost.smaDataLoad += 1;
*pBlockStatis = pSup->plist;
tsdbDebug("vgId:%d, succeed to load block SMA for uid %" PRIu64 ", %s", 0, pFBlock->uid, pReader->idStr);
return code;
}

View File

@ -517,7 +517,7 @@ static int32_t tsdbWriteBlockSma(SDataFWriter *pWriter, SBlockData *pBlockData,
for (int32_t iColData = 0; iColData < pBlockData->nColData; iColData++) {
SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iColData);
if ((!pColData->smaOn) || IS_VAR_DATA_TYPE(pColData->type)) continue;
if ((!pColData->smaOn) || IS_VAR_DATA_TYPE(pColData->type) || ((pColData->flag & HAS_VALUE) == 0)) continue;
SColumnDataAgg sma = {.colId = pColData->cid};
tColDataCalcSMA[pColData->type](pColData, &sma.sum, &sma.max, &sma.min, &sma.numOfNull);

View File

@ -1189,6 +1189,8 @@ static int32_t vnodeProcessBatchDeleteReq(SVnode *pVnode, int64_t version, void
vError("vgId:%d, delete error since %s, suid:%" PRId64 ", uid:%" PRId64 ", start ts:%" PRId64 ", end ts:%" PRId64,
TD_VID(pVnode), terrstr(), deleteReq.suid, uid, pOneReq->ts, pOneReq->ts);
}
tDecoderClear(&mr.coder);
}
metaReaderClear(&mr);
taosArrayDestroy(deleteReq.deleteReqs);

View File

@ -154,8 +154,8 @@ typedef struct {
} SSchemaInfo;
typedef struct {
int32_t operatorType;
int64_t refId;
int32_t operatorType;
int64_t refId;
} SExchangeOpStopInfo;
typedef struct {
@ -261,10 +261,10 @@ typedef struct SLimitInfo {
} SLimitInfo;
typedef struct SExchangeInfo {
SArray* pSources;
SArray* pSourceDataInfo;
tsem_t ready;
void* pTransporter;
SArray* pSources;
SArray* pSourceDataInfo;
tsem_t ready;
void* pTransporter;
// SArray<SSDataBlock*>, result block list, used to keep the multi-block that
// passed by downstream operator
SArray* pResultBlockList;
@ -275,7 +275,7 @@ typedef struct SExchangeInfo {
SLoadRemoteDataInfo loadInfo;
uint64_t self;
SLimitInfo limitInfo;
int64_t openedTs; // start exec time stamp
int64_t openedTs; // start exec time stamp
} SExchangeInfo;
typedef struct SScanInfo {
@ -310,9 +310,9 @@ typedef struct {
} SAggOptrPushDownInfo;
typedef struct STableMetaCacheInfo {
SLRUCache* pTableMetaEntryCache; // 100 by default
uint64_t metaFetch;
uint64_t cacheHit;
SLRUCache* pTableMetaEntryCache; // 100 by default
uint64_t metaFetch;
uint64_t cacheHit;
} STableMetaCacheInfo;
typedef struct STableScanInfo {
@ -343,7 +343,7 @@ typedef struct STableMergeScanInfo {
int32_t tableEndIndex;
bool hasGroupId;
uint64_t groupId;
SArray* queryConds; // array of queryTableDataCond
SArray* queryConds; // array of queryTableDataCond
STsdbReader* pReader;
SReadHandle readHandle;
int32_t bufPageSize;
@ -358,7 +358,7 @@ typedef struct STableMergeScanInfo {
int64_t numOfRows;
SScanInfo scanInfo;
int32_t scanTimes;
SqlFunctionCtx* pCtx; // which belongs to the direct upstream operator operator query context
SqlFunctionCtx* pCtx; // which belongs to the direct upstream operator operator query context
SResultRowInfo* pResultRowInfo;
int32_t* rowEntryInfoOffset;
SExprInfo* pExpr;
@ -504,6 +504,7 @@ typedef struct SStreamScanInfo {
STimeWindow updateWin;
STimeWindowAggSupp twAggSup;
SSDataBlock* pUpdateDataRes;
SHashObj* pGroupIdTbNameMap;
// status for tmq
SNodeList* pGroupTags;
SNode* pTagCond;
@ -550,10 +551,10 @@ typedef struct SSysTableScanInfo {
} SSysTableScanInfo;
typedef struct SBlockDistInfo {
SSDataBlock* pResBlock;
STsdbReader* pHandle;
SReadHandle readHandle;
uint64_t uid; // table uid
SSDataBlock* pResBlock;
STsdbReader* pHandle;
SReadHandle readHandle;
uint64_t uid; // table uid
} SBlockDistInfo;
// todo remove this
@ -663,9 +664,9 @@ typedef struct SGroupbyOperatorInfo {
SAggSupporter aggSup;
SArray* pGroupCols; // group by columns, SArray<SColumn>
SArray* pGroupColVals; // current group column values, SArray<SGroupKeys>
bool isInit; // denote if current val is initialized or not
char* keyBuf; // group by keys for hash
int32_t groupKeyLen; // total group by column width
bool isInit; // denote if current val is initialized or not
char* keyBuf; // group by keys for hash
int32_t groupKeyLen; // total group by column width
SGroupResInfo groupResInfo;
SExprSupp scalarSup;
} SGroupbyOperatorInfo;
@ -890,14 +891,14 @@ STimeWindow getFirstQualifiedTimeWindow(int64_t ts, STimeWindow* pWindow, SInter
int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scanFlag);
int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaultBufsz);
void doDestroyExchangeOperatorInfo(void* param);
void doDestroyExchangeOperatorInfo(void* param);
void setOperatorCompleted(SOperatorInfo* pOperator);
void setOperatorInfo(SOperatorInfo* pOperator, const char* name, int32_t type, bool blocking, int32_t status, void* pInfo,
SExecTaskInfo* pTaskInfo);
void setOperatorInfo(SOperatorInfo* pOperator, const char* name, int32_t type, bool blocking, int32_t status,
void* pInfo, SExecTaskInfo* pTaskInfo);
void doFilter(SSDataBlock* pBlock, SFilterInfo* pFilterInfo, SColMatchInfo* pColMatchInfo);
int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int32_t numOfExpr,
SSDataBlock* pBlock, int32_t rows, const char* idStr, STableMetaCacheInfo * pCache);
int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int32_t numOfExpr, SSDataBlock* pBlock,
int32_t rows, const char* idStr, STableMetaCacheInfo* pCache);
void cleanupAggSup(SAggSupporter* pAggSup);
void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle);
@ -992,7 +993,7 @@ void setTaskKilled(SExecTaskInfo* pTaskInfo);
void queryCostStatis(SExecTaskInfo* pTaskInfo);
void doDestroyTask(SExecTaskInfo* pTaskInfo);
void destroyOperatorInfo(SOperatorInfo* pOperator);
void destroyOperatorInfo(SOperatorInfo* pOperator);
int32_t getMaximumIdleDurationSec();
/*
@ -1038,6 +1039,7 @@ void appendOneRowToStreamSpecialBlock(SSDataBlock* pBlock, TSKEY* pStartTs, TSKE
uint64_t* pGp, void* pTbName);
void printDataBlock(SSDataBlock* pBlock, const char* flag);
uint64_t calGroupIdByData(SPartitionBySupporter* pParSup, SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t rowId);
void calBlockTbName(SStreamScanInfo* pInfo, SSDataBlock* pBlock);
int32_t finalizeResultRows(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition, SExprSupp* pSup,
SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo);
@ -1061,7 +1063,7 @@ int32_t setOutputBuf(SStreamState* pState, STimeWindow* win, SResultRow** pResul
int32_t releaseOutputBuf(SStreamState* pState, SWinKey* pKey, SResultRow* pResult);
int32_t saveOutputBuf(SStreamState* pState, SWinKey* pKey, SResultRow* pResult, int32_t resSize);
void getNextIntervalWindow(SInterval* pInterval, STimeWindow* tw, int32_t order);
int32_t qAppendTaskStopInfo(SExecTaskInfo* pTaskInfo, SExchangeOpStopInfo *pInfo);
int32_t qAppendTaskStopInfo(SExecTaskInfo* pTaskInfo, SExchangeOpStopInfo* pInfo);
#ifdef __cplusplus
}

View File

@ -208,7 +208,7 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) {
if (pInfo->pseudoExprSup.numOfExprs > 0) {
SExprSupp* pSup = &pInfo->pseudoExprSup;
STableKeyInfo* pKeyInfo = &((STableKeyInfo*)pTableList)[0];
STableKeyInfo* pKeyInfo = &((STableKeyInfo*)pList)[0];
pInfo->pRes->info.groupId = pKeyInfo->groupId;
if (taosArrayGetSize(pInfo->pUidList) > 0) {

View File

@ -246,7 +246,9 @@ qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* n
}
}
*pSchema = tCloneSSchemaWrapper(((SExecTaskInfo*)pTaskInfo)->schemaInfo.qsw);
if (pSchema) {
*pSchema = tCloneSSchemaWrapper(((SExecTaskInfo*)pTaskInfo)->schemaInfo.qsw);
}
return pTaskInfo;
}
@ -659,7 +661,7 @@ int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t* useconds) {
return pTaskInfo->code;
}
int32_t qAppendTaskStopInfo(SExecTaskInfo* pTaskInfo, SExchangeOpStopInfo *pInfo) {
int32_t qAppendTaskStopInfo(SExecTaskInfo* pTaskInfo, SExchangeOpStopInfo* pInfo) {
taosWLockLatch(&pTaskInfo->stopInfo.lock);
taosArrayPush(pTaskInfo->stopInfo.pStopInfo, pInfo);
taosWUnLockLatch(&pTaskInfo->stopInfo.lock);
@ -680,7 +682,7 @@ int32_t stopInfoComp(void const* lp, void const* rp) {
return 0;
}
void qRemoveTaskStopInfo(SExecTaskInfo* pTaskInfo, SExchangeOpStopInfo *pInfo) {
void qRemoveTaskStopInfo(SExecTaskInfo* pTaskInfo, SExchangeOpStopInfo* pInfo) {
taosWLockLatch(&pTaskInfo->stopInfo.lock);
int32_t idx = taosArraySearchIdx(pTaskInfo->stopInfo.pStopInfo, pInfo, stopInfoComp, TD_EQ);
if (idx >= 0) {
@ -696,8 +698,8 @@ void qStopTaskOperators(SExecTaskInfo* pTaskInfo) {
int32_t num = taosArrayGetSize(pTaskInfo->stopInfo.pStopInfo);
for (int32_t i = 0; i < num; ++i) {
SExchangeOpStopInfo *pStop = taosArrayGet(pTaskInfo->stopInfo.pStopInfo, i);
SExchangeInfo* pExchangeInfo = taosAcquireRef(exchangeObjRefPool, pStop->refId);
SExchangeOpStopInfo* pStop = taosArrayGet(pTaskInfo->stopInfo.pStopInfo, i);
SExchangeInfo* pExchangeInfo = taosAcquireRef(exchangeObjRefPool, pStop->refId);
if (pExchangeInfo) {
tsem_post(&pExchangeInfo->ready);
taosReleaseRef(exchangeObjRefPool, pStop->refId);
@ -715,11 +717,11 @@ int32_t qAsyncKillTask(qTaskInfo_t qinfo) {
}
qDebug("%s execTask async killed", GET_TASKID(pTaskInfo));
setTaskKilled(pTaskInfo);
qStopTaskOperators(pTaskInfo);
return TSDB_CODE_SUCCESS;
}
@ -1178,4 +1180,4 @@ void qProcessRspMsg(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
pSendInfo->fp(pSendInfo->param, &buf, pMsg->code);
rpcFreeCont(pMsg->pCont);
destroySendMsgInfo(pSendInfo);
}
}

View File

@ -659,7 +659,11 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc
pfCtx->pDstBlock = pResult;
}
numOfRows = pfCtx->fpSet.process(pfCtx);
int32_t code = pfCtx->fpSet.process(pfCtx);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
numOfRows = pResInfo->numOfRes;
} else if (fmIsAggFunc(pfCtx->functionId)) {
// selective value output should be set during corresponding function execution
if (fmIsSelectValueFunc(pfCtx->functionId)) {

View File

@ -1515,10 +1515,17 @@ static int32_t generateDeleteResultBlock(SStreamScanInfo* pInfo, SSDataBlock* pS
for (int32_t i = 0; i < pSrcBlock->info.rows; i++) {
uint64_t srcUid = srcUidData[i];
uint64_t groupId = srcGp[i];
char* tbname[VARSTR_HEADER_SIZE + TSDB_TABLE_NAME_LEN] = {0};
if (groupId == 0) {
groupId = getGroupIdByData(pInfo, srcUid, srcStartTsCol[i], version);
}
appendOneRowToStreamSpecialBlock(pDestBlock, srcStartTsCol + i, srcEndTsCol + i, srcUidData + i, &groupId, NULL);
if (pInfo->tbnameCalSup.pExprInfo) {
char* parTbname = taosHashGet(pInfo->pGroupIdTbNameMap, &groupId, sizeof(int64_t));
memcpy(varDataVal(tbname), parTbname, TSDB_TABLE_NAME_LEN);
varDataSetLen(tbname, strlen(varDataVal(tbname)));
}
appendOneRowToStreamSpecialBlock(pDestBlock, srcStartTsCol + i, srcEndTsCol + i, srcUidData + i, &groupId,
tbname[0] == 0 ? NULL : tbname);
}
return TSDB_CODE_SUCCESS;
}
@ -1562,9 +1569,16 @@ static void calBlockTag(SExprSupp* pTagCalSup, SSDataBlock* pBlock, SSDataBlock*
blockDataDestroy(pSrcBlock);
}
static void calBlockTbName(SExprSupp* pTbNameCalSup, SSDataBlock* pBlock) {
void calBlockTbName(SStreamScanInfo* pInfo, SSDataBlock* pBlock) {
SExprSupp* pTbNameCalSup = &pInfo->tbnameCalSup;
if (pTbNameCalSup == NULL || pTbNameCalSup->numOfExprs == 0) return;
if (pBlock == NULL || pBlock->info.rows == 0) return;
if (pBlock->info.groupId) {
char* tbname = taosHashGet(pInfo->pGroupIdTbNameMap, &pBlock->info.groupId, sizeof(int64_t));
if (tbname != NULL) {
memcpy(pBlock->info.parTbName, tbname, TSDB_TABLE_NAME_LEN);
}
}
SSDataBlock* pSrcBlock = blockCopyOneRow(pBlock, 0);
ASSERT(pSrcBlock->info.rows == 1);
@ -1592,6 +1606,11 @@ static void calBlockTbName(SExprSupp* pTbNameCalSup, SSDataBlock* pBlock) {
pBlock->info.parTbName[0] = 0;
}
if (pBlock->info.groupId) {
taosHashPut(pInfo->pGroupIdTbNameMap, &pBlock->info.groupId, sizeof(int64_t), pBlock->info.parTbName,
TSDB_TABLE_NAME_LEN);
}
blockDataDestroy(pSrcBlock);
blockDataDestroy(pResBlock);
}
@ -1713,7 +1732,7 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock
blockDataUpdateTsWindow(pInfo->pRes, pInfo->primaryTsIndex);
blockDataFreeRes((SSDataBlock*)pBlock);
calBlockTbName(&pInfo->tbnameCalSup, pInfo->pRes);
calBlockTbName(pInfo, pInfo->pRes);
return 0;
}
@ -1960,8 +1979,10 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
if (pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__SCAN) {
SSDataBlock* pBlock = doTableScan(pInfo->pTableScanOp);
if (pBlock != NULL) {
calBlockTbName(&pInfo->tbnameCalSup, pBlock);
updateInfoFillBlockData(pInfo->pUpdateInfo, pBlock, pInfo->primaryTsIndex);
calBlockTbName(pInfo, pBlock);
if (pInfo->pUpdateInfo) {
updateInfoFillBlockData(pInfo->pUpdateInfo, pBlock, pInfo->primaryTsIndex);
}
qDebug("stream recover scan get block, rows %d", pBlock->info.rows);
return pBlock;
}
@ -2081,7 +2102,7 @@ FETCH_NEXT_BLOCK:
pSDB->info.type = pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER_RANGE ? STREAM_NORMAL : STREAM_PULL_DATA;
checkUpdateData(pInfo, true, pSDB, false);
// printDataBlock(pSDB, "stream scan update");
calBlockTbName(&pInfo->tbnameCalSup, pSDB);
calBlockTbName(pInfo, pSDB);
return pSDB;
}
blockDataCleanup(pInfo->pUpdateDataRes);
@ -2385,6 +2406,9 @@ static void destroyStreamScanOperatorInfo(void* param) {
taosMemoryFree(pStreamScan->pPseudoExpr);
}
cleanupExprSupp(&pStreamScan->tbnameCalSup);
taosHashCleanup(pStreamScan->pGroupIdTbNameMap);
updateInfoDestroy(pStreamScan->pUpdateInfo);
blockDataDestroy(pStreamScan->pRes);
blockDataDestroy(pStreamScan->pUpdateRes);
@ -2441,6 +2465,8 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
if (initExprSupp(&pInfo->tbnameCalSup, pSubTableExpr, 1) != 0) {
goto _error;
}
pInfo->pGroupIdTbNameMap =
taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, HASH_NO_LOCK);
}
if (pTableScanNode->pTags != NULL) {

View File

@ -4296,7 +4296,7 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT ||
pBlock->info.type == STREAM_CLEAR) {
// gap must be 0
SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo));
SArray* pWins = taosArrayInit(16, sizeof(SSessionKey));
doDeleteTimeWindows(&pInfo->streamAggSup, pBlock, pWins);
removeSessionResults(pStUpdated, pWins);
copyDeleteWindowInfo(pWins, pInfo->pStDeleted);

View File

@ -93,7 +93,7 @@ static SHNode *doCreateHashNode(const void *key, size_t keyLen, const void *data
pNewNode->keyLen = keyLen;
pNewNode->dataLen = dataLen;
pNewNode->next = NULL;
memcpy(GET_SHASH_NODE_DATA(pNewNode), data, dataLen);
if (data) memcpy(GET_SHASH_NODE_DATA(pNewNode), data, dataLen);
memcpy(GET_SHASH_NODE_KEY(pNewNode, dataLen), key, keyLen);
return pNewNode;
}
@ -203,7 +203,7 @@ int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, size_t keyLen, cons
pNewNode->next = pHashObj->hashList[slot];
pHashObj->hashList[slot] = pNewNode;
atomic_add_fetch_64(&pHashObj->size, 1);
} else { // update data
} else if (data) { // update data
memcpy(GET_SHASH_NODE_DATA(pNode), data, dataLen);
}

View File

@ -2640,8 +2640,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "diff",
.type = FUNCTION_TYPE_DIFF,
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_KEEP_ORDER_FUNC |
FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_CUMULATIVE_FUNC,
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC |
FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_CUMULATIVE_FUNC,
.translateFunc = translateDiff,
.getEnvFunc = getDiffFuncEnv,
.initFunc = diffFunctionSetup,
@ -2653,7 +2653,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "statecount",
.type = FUNCTION_TYPE_STATE_COUNT,
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC |
FUNC_MGT_FORBID_STREAM_FUNC,
.translateFunc = translateStateCount,
.getEnvFunc = getStateFuncEnv,
.initFunc = functionSetup,
@ -2664,7 +2665,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "stateduration",
.type = FUNCTION_TYPE_STATE_DURATION,
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC |
FUNC_MGT_FORBID_STREAM_FUNC,
.translateFunc = translateStateDuration,
.getEnvFunc = getStateFuncEnv,
.initFunc = functionSetup,
@ -2675,7 +2677,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "csum",
.type = FUNCTION_TYPE_CSUM,
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC |
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC |
FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_CUMULATIVE_FUNC | FUNC_MGT_KEEP_ORDER_FUNC,
.translateFunc = translateCsum,
.getEnvFunc = getCsumFuncEnv,
@ -2688,7 +2690,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "mavg",
.type = FUNCTION_TYPE_MAVG,
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC |
FUNC_MGT_FORBID_STREAM_FUNC,
.translateFunc = translateMavg,
.getEnvFunc = getMavgFuncEnv,
.initFunc = mavgFunctionSetup,

View File

@ -48,6 +48,9 @@ typedef struct SSumRes {
double dsum;
};
int16_t type;
int64_t prevTs; // used for csum only
bool isPrevTsSet; //used for csum only
} SSumRes;
typedef struct SAvgRes {
@ -190,6 +193,8 @@ typedef struct SStateInfo {
int64_t count;
int64_t durationStart;
};
int64_t prevTs;
bool isPrevTsSet;
} SStateInfo;
typedef enum {
@ -205,6 +210,8 @@ typedef enum {
typedef struct SMavgInfo {
int32_t pos;
double sum;
int64_t prevTs;
bool isPrevTsSet;
int32_t numOfPoints;
bool pointsMeet;
double points[];
@ -2132,6 +2139,11 @@ int32_t stddevFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
int32_t type = pStddevRes->type;
double avg;
if (pStddevRes->count == 0) {
GET_RES_INFO(pCtx)->numOfRes = 0;
return functionFinalize(pCtx, pBlock);
}
if (IS_SIGNED_NUMERIC_TYPE(type)) {
avg = pStddevRes->isum / ((double)pStddevRes->count);
pStddevRes->result = sqrt(fabs(pStddevRes->quadraticISum / ((double)pStddevRes->count) - avg * avg));
@ -2912,6 +2924,8 @@ static void firstlastSaveTupleData(const SSDataBlock* pSrcBlock, int32_t rowInde
if (!pInfo->hasResult) {
pInfo->pos = saveTupleData(pCtx, rowIndex, pSrcBlock, NULL);
ASSERT(pCtx->subsidiaries.buf != NULL);
ASSERT(pCtx->subsidiaries.rowLen > 0);
} else {
updateTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos);
}
@ -3388,6 +3402,7 @@ bool diffFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo) {
SDiffInfo* pDiffInfo = GET_ROWCELL_INTERBUF(pResInfo);
pDiffInfo->hasPrev = false;
pDiffInfo->prev.i64 = 0;
pDiffInfo->prevTs = -1;
if (pCtx->numOfParams > 1) {
pDiffInfo->ignoreNegative = pCtx->param[1].param.i; // TODO set correct param
} else {
@ -3398,7 +3413,7 @@ bool diffFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo) {
return true;
}
static void doSetPrevVal(SDiffInfo* pDiffInfo, int32_t type, const char* pv) {
static void doSetPrevVal(SDiffInfo* pDiffInfo, int32_t type, const char* pv, int64_t ts) {
switch (type) {
case TSDB_DATA_TYPE_BOOL:
pDiffInfo->prev.i64 = *(bool*)pv ? 1 : 0;
@ -3425,11 +3440,13 @@ static void doSetPrevVal(SDiffInfo* pDiffInfo, int32_t type, const char* pv) {
default:
ASSERT(0);
}
pDiffInfo->prevTs = ts;
}
static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SColumnInfoData* pOutput, int32_t pos,
int32_t order) {
int32_t order, int64_t ts) {
int32_t factor = (order == TSDB_ORDER_ASC) ? 1 : -1;
pDiffInfo->prevTs = ts;
switch (type) {
case TSDB_DATA_TYPE_INT: {
int32_t v = *(int32_t*)pv;
@ -3513,6 +3530,8 @@ int32_t diffFunction(SqlFunctionCtx* pCtx) {
SColumnInfoData* pInputCol = pInput->pData[0];
TSKEY* tsList = (int64_t*)pInput->pPTS->pData;
int32_t numOfElems = 0;
int32_t startOffset = pCtx->offset;
@ -3534,7 +3553,10 @@ int32_t diffFunction(SqlFunctionCtx* pCtx) {
char* pv = colDataGetData(pInputCol, i);
if (pDiffInfo->hasPrev) {
doHandleDiff(pDiffInfo, pInputCol->info.type, pv, pOutput, pos, pCtx->order);
if (tsList[i] == pDiffInfo->prevTs) {
return TSDB_CODE_FUNC_DUP_TIMESTAMP;
}
doHandleDiff(pDiffInfo, pInputCol->info.type, pv, pOutput, pos, pCtx->order, tsList[i]);
// handle selectivity
if (pCtx->subsidiaries.num > 0) {
appendSelectivityValue(pCtx, i, pos);
@ -3542,7 +3564,7 @@ int32_t diffFunction(SqlFunctionCtx* pCtx) {
numOfElems++;
} else {
doSetPrevVal(pDiffInfo, pInputCol->info.type, pv);
doSetPrevVal(pDiffInfo, pInputCol->info.type, pv, tsList[i]);
}
pDiffInfo->hasPrev = true;
@ -3564,7 +3586,10 @@ int32_t diffFunction(SqlFunctionCtx* pCtx) {
// there is a row of previous data block to be handled in the first place.
if (pDiffInfo->hasPrev) {
doHandleDiff(pDiffInfo, pInputCol->info.type, pv, pOutput, pos, pCtx->order);
if (tsList[i] == pDiffInfo->prevTs) {
return TSDB_CODE_FUNC_DUP_TIMESTAMP;
}
doHandleDiff(pDiffInfo, pInputCol->info.type, pv, pOutput, pos, pCtx->order, tsList[i]);
// handle selectivity
if (pCtx->subsidiaries.num > 0) {
appendSelectivityValue(pCtx, i, pos);
@ -3572,15 +3597,15 @@ int32_t diffFunction(SqlFunctionCtx* pCtx) {
numOfElems++;
} else {
doSetPrevVal(pDiffInfo, pInputCol->info.type, pv);
doSetPrevVal(pDiffInfo, pInputCol->info.type, pv, tsList[i]);
}
pDiffInfo->hasPrev = true;
}
}
// initial value is not set yet
return numOfElems;
pResInfo->numOfRes = numOfElems;
return TSDB_CODE_SUCCESS;
}
int32_t getTopBotInfoSize(int64_t numOfItems) { return sizeof(STopBotRes) + numOfItems * sizeof(STopBotResItem); }
@ -4343,6 +4368,7 @@ static bool getHistogramBinDesc(SHistoFuncInfo* pInfo, char* binDescStr, int8_t
int32_t numOfParams = cJSON_GetArraySize(binDesc);
int32_t startIndex;
if (numOfParams != 4) {
cJSON_Delete(binDesc);
return false;
}
@ -4353,15 +4379,18 @@ static bool getHistogramBinDesc(SHistoFuncInfo* pInfo, char* binDescStr, int8_t
cJSON* infinity = cJSON_GetObjectItem(binDesc, "infinity");
if (!cJSON_IsNumber(start) || !cJSON_IsNumber(count) || !cJSON_IsBool(infinity)) {
cJSON_Delete(binDesc);
return false;
}
if (count->valueint <= 0 || count->valueint > 1000) { // limit count to 1000
cJSON_Delete(binDesc);
return false;
}
if (isinf(start->valuedouble) || (width != NULL && isinf(width->valuedouble)) ||
(factor != NULL && isinf(factor->valuedouble)) || (count != NULL && isinf(count->valuedouble))) {
cJSON_Delete(binDesc);
return false;
}
@ -4379,12 +4408,14 @@ static bool getHistogramBinDesc(SHistoFuncInfo* pInfo, char* binDescStr, int8_t
// linear bin process
if (width->valuedouble == 0) {
taosMemoryFree(intervals);
cJSON_Delete(binDesc);
return false;
}
for (int i = 0; i < counter + 1; ++i) {
intervals[startIndex] = start->valuedouble + i * width->valuedouble;
if (isinf(intervals[startIndex])) {
taosMemoryFree(intervals);
cJSON_Delete(binDesc);
return false;
}
startIndex++;
@ -4393,22 +4424,26 @@ static bool getHistogramBinDesc(SHistoFuncInfo* pInfo, char* binDescStr, int8_t
// log bin process
if (start->valuedouble == 0) {
taosMemoryFree(intervals);
cJSON_Delete(binDesc);
return false;
}
if (factor->valuedouble < 0 || factor->valuedouble == 0 || factor->valuedouble == 1) {
taosMemoryFree(intervals);
cJSON_Delete(binDesc);
return false;
}
for (int i = 0; i < counter + 1; ++i) {
intervals[startIndex] = start->valuedouble * pow(factor->valuedouble, i * 1.0);
if (isinf(intervals[startIndex])) {
taosMemoryFree(intervals);
cJSON_Delete(binDesc);
return false;
}
startIndex++;
}
} else {
taosMemoryFree(intervals);
cJSON_Delete(binDesc);
return false;
}
@ -4423,6 +4458,7 @@ static bool getHistogramBinDesc(SHistoFuncInfo* pInfo, char* binDescStr, int8_t
}
} else if (cJSON_IsArray(binDesc)) { /* user input bins */
if (binType != USER_INPUT_BIN) {
cJSON_Delete(binDesc);
return false;
}
numOfBins = cJSON_GetArraySize(binDesc);
@ -4430,6 +4466,7 @@ static bool getHistogramBinDesc(SHistoFuncInfo* pInfo, char* binDescStr, int8_t
cJSON* bin = binDesc->child;
if (bin == NULL) {
taosMemoryFree(intervals);
cJSON_Delete(binDesc);
return false;
}
int i = 0;
@ -4437,16 +4474,19 @@ static bool getHistogramBinDesc(SHistoFuncInfo* pInfo, char* binDescStr, int8_t
intervals[i] = bin->valuedouble;
if (!cJSON_IsNumber(bin)) {
taosMemoryFree(intervals);
cJSON_Delete(binDesc);
return false;
}
if (i != 0 && intervals[i] <= intervals[i - 1]) {
taosMemoryFree(intervals);
cJSON_Delete(binDesc);
return false;
}
bin = bin->next;
i++;
}
} else {
cJSON_Delete(binDesc);
return false;
}
@ -4459,6 +4499,8 @@ static bool getHistogramBinDesc(SHistoFuncInfo* pInfo, char* binDescStr, int8_t
}
taosMemoryFree(intervals);
cJSON_Delete(binDesc);
return true;
}
@ -4472,18 +4514,23 @@ bool histogramFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultIn
pInfo->totalCount = 0;
pInfo->normalized = 0;
int8_t binType = getHistogramBinType(varDataVal(pCtx->param[1].param.pz));
char *binTypeStr = strndup(varDataVal(pCtx->param[1].param.pz), varDataLen(pCtx->param[1].param.pz));
int8_t binType = getHistogramBinType(binTypeStr);
taosMemoryFree(binTypeStr);
if (binType == UNKNOWN_BIN) {
return false;
}
char* binDesc = varDataVal(pCtx->param[2].param.pz);
char* binDesc = strndup(varDataVal(pCtx->param[2].param.pz), varDataLen(pCtx->param[2].param.pz));
int64_t normalized = pCtx->param[3].param.i;
if (normalized != 0 && normalized != 1) {
return false;
}
if (!getHistogramBinDesc(pInfo, binDesc, binType, (bool)normalized)) {
taosMemoryFree(binDesc);
return false;
}
taosMemoryFree(binDesc);
return true;
}
@ -4931,6 +4978,7 @@ int32_t stateCountFunction(SqlFunctionCtx* pCtx) {
SStateInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
SInputColumnInfoData* pInput = &pCtx->input;
TSKEY* tsList = (int64_t*)pInput->pPTS->pData;
SColumnInfoData* pInputCol = pInput->pData[0];
@ -4943,7 +4991,15 @@ int32_t stateCountFunction(SqlFunctionCtx* pCtx) {
}
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; i += 1) {
if (pInfo->isPrevTsSet == true && tsList[i] == pInfo->prevTs) {
return TSDB_CODE_FUNC_DUP_TIMESTAMP;
} else {
pInfo->prevTs = tsList[i];
}
pInfo->isPrevTsSet = true;
numOfElems++;
if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
colDataAppendNULL(pOutput, i);
// handle selectivity
@ -4969,7 +5025,8 @@ int32_t stateCountFunction(SqlFunctionCtx* pCtx) {
}
}
return numOfElems;
pResInfo->numOfRes = numOfElems;
return TSDB_CODE_SUCCESS;
}
int32_t stateDurationFunction(SqlFunctionCtx* pCtx) {
@ -4992,11 +5049,19 @@ int32_t stateDurationFunction(SqlFunctionCtx* pCtx) {
int8_t op = getStateOpType(varDataVal(pCtx->param[1].param.pz));
if (STATE_OPER_INVALID == op) {
return 0;
return TSDB_CODE_INVALID_PARA;
}
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; i += 1) {
if (pInfo->isPrevTsSet == true && tsList[i] == pInfo->prevTs) {
return TSDB_CODE_FUNC_DUP_TIMESTAMP;
} else {
pInfo->prevTs = tsList[i];
}
pInfo->isPrevTsSet = true;
numOfElems++;
if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
colDataAppendNULL(pOutput, i);
// handle selectivity
@ -5026,7 +5091,8 @@ int32_t stateDurationFunction(SqlFunctionCtx* pCtx) {
}
}
return numOfElems;
pResInfo->numOfRes = numOfElems;
return TSDB_CODE_SUCCESS;
}
bool getCsumFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) {
@ -5039,6 +5105,7 @@ int32_t csumFunction(SqlFunctionCtx* pCtx) {
SSumRes* pSumRes = GET_ROWCELL_INTERBUF(pResInfo);
SInputColumnInfoData* pInput = &pCtx->input;
TSKEY* tsList = (int64_t*)pInput->pPTS->pData;
SColumnInfoData* pInputCol = pInput->pData[0];
SColumnInfoData* pOutput = (SColumnInfoData*)pCtx->pOutput;
@ -5047,6 +5114,13 @@ int32_t csumFunction(SqlFunctionCtx* pCtx) {
int32_t type = pInputCol->info.type;
int32_t startOffset = pCtx->offset;
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; i += 1) {
if (pSumRes->isPrevTsSet == true && tsList[i] == pSumRes->prevTs) {
return TSDB_CODE_FUNC_DUP_TIMESTAMP;
} else {
pSumRes->prevTs = tsList[i];
}
pSumRes->isPrevTsSet = true;
int32_t pos = startOffset + numOfElems;
if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
// colDataAppendNULL(pOutput, i);
@ -5084,7 +5158,8 @@ int32_t csumFunction(SqlFunctionCtx* pCtx) {
numOfElems++;
}
return numOfElems;
pResInfo->numOfRes = numOfElems;
return TSDB_CODE_SUCCESS;
}
bool getMavgFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) {
@ -5100,6 +5175,8 @@ bool mavgFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo) {
SMavgInfo* pInfo = GET_ROWCELL_INTERBUF(pResultInfo);
pInfo->pos = 0;
pInfo->sum = 0;
pInfo->prevTs = -1;
pInfo->isPrevTsSet = false;
pInfo->numOfPoints = pCtx->param[1].param.i;
if (pInfo->numOfPoints < 1 || pInfo->numOfPoints > MAVG_MAX_POINTS_NUM) {
return false;
@ -5114,6 +5191,7 @@ int32_t mavgFunction(SqlFunctionCtx* pCtx) {
SMavgInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
SInputColumnInfoData* pInput = &pCtx->input;
TSKEY* tsList = (int64_t*)pInput->pPTS->pData;
SColumnInfoData* pInputCol = pInput->pData[0];
SColumnInfoData* pTsOutput = pCtx->pTsOutput;
@ -5123,6 +5201,13 @@ int32_t mavgFunction(SqlFunctionCtx* pCtx) {
int32_t type = pInputCol->info.type;
int32_t startOffset = pCtx->offset;
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; i += 1) {
if (pInfo->isPrevTsSet == true && tsList[i] == pInfo->prevTs) {
return TSDB_CODE_FUNC_DUP_TIMESTAMP;
} else {
pInfo->prevTs = tsList[i];
}
pInfo->isPrevTsSet = true;
int32_t pos = startOffset + numOfElems;
if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
// colDataAppendNULL(pOutput, i);
@ -5167,7 +5252,8 @@ int32_t mavgFunction(SqlFunctionCtx* pCtx) {
}
}
return numOfElems;
pResInfo->numOfRes = numOfElems;
return TSDB_CODE_SUCCESS;
}
static SSampleInfo* getSampleOutputInfo(SqlFunctionCtx* pCtx) {
@ -5651,6 +5737,10 @@ bool twaFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo) {
}
static double twa_get_area(SPoint1 s, SPoint1 e) {
if (e.key == INT64_MAX || s.key == INT64_MIN) {
return 0;
}
if ((s.val >= 0 && e.val >= 0) || (s.val <= 0 && e.val <= 0)) {
return (s.val + e.val) * (e.key - s.key) / 2;
}
@ -5945,6 +6035,8 @@ int32_t twaFinalize(struct SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
} else {
if (pInfo->win.ekey == pInfo->win.skey) {
pInfo->dOutput = pInfo->p.val;
} else if (pInfo->win.ekey == INT64_MAX || pInfo->win.skey == INT64_MIN) { //no data in timewindow
pInfo->dOutput = 0;
} else {
pInfo->dOutput = pInfo->dOutput / (pInfo->win.ekey - pInfo->win.skey);
}
@ -6207,6 +6299,9 @@ int32_t derivativeFunction(SqlFunctionCtx* pCtx) {
if (!pDerivInfo->valueSet) { // initial value is not set yet
pDerivInfo->valueSet = true;
} else {
if (tsList[i] == pDerivInfo->prevTs) {
return TSDB_CODE_FUNC_DUP_TIMESTAMP;
}
double r = ((v - pDerivInfo->prevValue) * pDerivInfo->tsWindow) / (tsList[i] - pDerivInfo->prevTs);
if (pDerivInfo->ignoreNegative && r < 0) {
} else {
@ -6245,6 +6340,9 @@ int32_t derivativeFunction(SqlFunctionCtx* pCtx) {
if (!pDerivInfo->valueSet) { // initial value is not set yet
pDerivInfo->valueSet = true;
} else {
if (tsList[i] == pDerivInfo->prevTs) {
return TSDB_CODE_FUNC_DUP_TIMESTAMP;
}
double r = ((pDerivInfo->prevValue - v) * pDerivInfo->tsWindow) / (pDerivInfo->prevTs - tsList[i]);
if (pDerivInfo->ignoreNegative && r < 0) {
} else {
@ -6272,7 +6370,9 @@ int32_t derivativeFunction(SqlFunctionCtx* pCtx) {
}
}
return numOfElems;
pResInfo->numOfRes = numOfElems;
return TSDB_CODE_SUCCESS;
}
bool getIrateFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
@ -6337,11 +6437,15 @@ int32_t irateFunction(SqlFunctionCtx* pCtx) {
pRateInfo->lastKey = tsList[i];
continue;
} else if (tsList[i] == pRateInfo->lastKey) {
return TSDB_CODE_FUNC_DUP_TIMESTAMP;
}
if ((INT64_MIN == pRateInfo->firstKey) || tsList[i] > pRateInfo->firstKey) {
pRateInfo->firstValue = v;
pRateInfo->firstKey = tsList[i];
} else if (tsList[i] == pRateInfo->firstKey) {
return TSDB_CODE_FUNC_DUP_TIMESTAMP;
}
}

View File

@ -5679,6 +5679,7 @@ static int32_t translateCreateFunction(STranslateContext* pCxt, SCreateFunctionS
if (TSDB_CODE_SUCCESS == code) {
code = buildCmdMsg(pCxt, TDMT_MND_CREATE_FUNC, (FSerializeFunc)tSerializeSCreateFuncReq, &req);
}
tFreeSCreateFuncReq(&req);
return code;
}

View File

@ -833,6 +833,7 @@ static int32_t partitionAggCondConj(SAggLogicNode* pAgg, SNode** ppAggFuncCond,
nodesDestroyNode(pTempAggFuncCond);
nodesDestroyNode(pTempGroupKeyCond);
}
nodesDestroyNode(pAgg->node.pConditions);
pAgg->node.pConditions = NULL;
return code;
}
@ -853,8 +854,7 @@ static int32_t partitionAggCond(SAggLogicNode* pAgg, SNode** ppAggFunCond, SNode
}
static int32_t pushCondToAggCond(SOptimizeContext* pCxt, SAggLogicNode* pAgg, SNode** pAggFuncCond) {
pushDownCondOptAppendCond(&pAgg->node.pConditions, pAggFuncCond);
return TSDB_CODE_SUCCESS;
return pushDownCondOptAppendCond(&pAgg->node.pConditions, pAggFuncCond);
}
typedef struct SRewriteAggGroupKeyCondContext {

View File

@ -596,6 +596,7 @@ static int32_t stbSplSplitSessionForStream(SSplitContext* pCxt, SStableSplitInfo
int32_t index = 0;
int32_t code = stbSplAppendWEnd(pPartWin, &index);
if (TSDB_CODE_SUCCESS == code) {
nodesDestroyNode(pMergeWin->pTsEnd);
pMergeWin->pTsEnd = nodesCloneNode(nodesListGetNode(pPartWin->node.pTargets, index));
if (NULL == pMergeWin->pTsEnd) {
code = TSDB_CODE_OUT_OF_MEMORY;

View File

@ -248,9 +248,9 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) {
}
}
// if (optr == OP_TYPE_JSON_CONTAINS && type == TSDB_DATA_TYPE_JSON) {
// return 28;
// }
// if (optr == OP_TYPE_JSON_CONTAINS && type == TSDB_DATA_TYPE_JSON) {
// return 28;
// }
switch (type) {
case TSDB_DATA_TYPE_BOOL:
@ -336,6 +336,10 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) {
__compar_fn_t filterGetCompFunc(int32_t type, int32_t optr) { return gDataCompare[filterGetCompFuncIdx(type, optr)]; }
__compar_fn_t filterGetCompFuncEx(int32_t lType, int32_t rType, int32_t optr) {
if (TSDB_DATA_TYPE_NULL == rType) {
return NULL;
}
switch (lType) {
case TSDB_DATA_TYPE_TINYINT: {
if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
@ -1090,7 +1094,7 @@ int32_t filterAddUnitImpl(SFilterInfo *info, uint8_t optr, SFilterFieldId *left,
if (tmp == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
info->units = (SFilterUnit*)tmp;
info->units = (SFilterUnit *)tmp;
memset(info->units + psize, 0, sizeof(*info->units) * FILTER_DEFAULT_UNIT_SIZE);
}
@ -1172,7 +1176,7 @@ int32_t fltAddGroupUnitFromNode(SFilterInfo *info, SNode *tree, SArray *group) {
SScalarParam out = {.columnData = taosMemoryCalloc(1, sizeof(SColumnInfoData))};
out.columnData->info.type = type;
out.columnData->info.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes; //reserved space for simple_copy
out.columnData->info.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes; // reserved space for simple_copy
for (int32_t i = 0; i < listNode->pNodeList->length; ++i) {
SValueNode *valueNode = (SValueNode *)cell->pNode;
@ -1194,7 +1198,7 @@ int32_t fltAddGroupUnitFromNode(SFilterInfo *info, SNode *tree, SArray *group) {
filterAddField(info, NULL, (void **)&out.columnData->pData, FLD_TYPE_VALUE, &right, len, true);
out.columnData->pData = NULL;
} else {
void *data = taosMemoryCalloc(1, tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes); //reserved space for simple_copy
void *data = taosMemoryCalloc(1, tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes); // reserved space for simple_copy
if (NULL == data) {
FLT_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
@ -1636,11 +1640,11 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options)
SValueNode *var = (SValueNode *)field->desc;
SDataType *dType = &var->node.resType;
//if (dType->type == TSDB_DATA_TYPE_VALUE_ARRAY) {
// qDebug("VAL%d => [type:TS][val:[%" PRIi64 "] - [%" PRId64 "]]", i, *(int64_t *)field->data,
// *(((int64_t *)field->data) + 1));
//} else {
qDebug("VAL%d => [type:%d][val:%" PRIx64 "]", i, dType->type, var->datum.i); // TODO
// if (dType->type == TSDB_DATA_TYPE_VALUE_ARRAY) {
// qDebug("VAL%d => [type:TS][val:[%" PRIi64 "] - [%" PRId64 "]]", i, *(int64_t *)field->data,
// *(((int64_t *)field->data) + 1));
// } else {
qDebug("VAL%d => [type:%d][val:%" PRIx64 "]", i, dType->type, var->datum.i); // TODO
//}
} else if (field->data) {
qDebug("VAL%d => [type:NIL][val:NIL]", i); // TODO
@ -1722,7 +1726,7 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options)
ctx->isrange);
if (ctx->isrange) {
SFilterRangeNode *r = ctx->rs;
int32_t tlen = 0;
int32_t tlen = 0;
while (r) {
char str[256] = {0};
if (FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_NULL)) {

View File

@ -23,12 +23,13 @@ int32_t scalarGetOperatorParamNum(EOperatorType type) {
int32_t sclConvertToTsValueNode(int8_t precision, SValueNode *valueNode) {
char *timeStr = valueNode->datum.p;
int32_t code =
convertStringToTimestamp(valueNode->node.resType.type, valueNode->datum.p, precision, &valueNode->datum.i);
int64_t value = 0;
int32_t code = convertStringToTimestamp(valueNode->node.resType.type, valueNode->datum.p, precision, &value);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
taosMemoryFree(timeStr);
valueNode->datum.i = value;
valueNode->typeData = valueNode->datum.i;
valueNode->node.resType.type = TSDB_DATA_TYPE_TIMESTAMP;
@ -61,7 +62,7 @@ int32_t sclCreateColumnInfoData(SDataType *pType, int32_t numOfRows, SScalarPara
return TSDB_CODE_SUCCESS;
}
int32_t sclConvertValueToSclParam(SValueNode* pValueNode, SScalarParam* out, int32_t* overflow) {
int32_t sclConvertValueToSclParam(SValueNode *pValueNode, SScalarParam *out, int32_t *overflow) {
SScalarParam in = {.numOfRows = 1};
int32_t code = sclCreateColumnInfoData(&pValueNode->node.resType, 1, &in);
if (code != TSDB_CODE_SUCCESS) {
@ -78,7 +79,7 @@ int32_t sclConvertValueToSclParam(SValueNode* pValueNode, SScalarParam* out, int
}
int32_t sclExtendResRows(SScalarParam *pDst, SScalarParam *pSrc, SArray *pBlockList) {
SSDataBlock* pb = taosArrayGetP(pBlockList, 0);
SSDataBlock *pb = taosArrayGetP(pBlockList, 0);
SScalarParam *pLeft = taosMemoryCalloc(1, sizeof(SScalarParam));
if (NULL == pLeft) {
sclError("calloc %d failed", (int32_t)sizeof(SScalarParam));
@ -90,7 +91,7 @@ int32_t sclExtendResRows(SScalarParam *pDst, SScalarParam *pSrc, SArray *pBlockL
if (pDst->numOfRows < pb->info.rows) {
colInfoDataEnsureCapacity(pDst->columnData, pb->info.rows, true);
}
_bin_scalar_fn_t OperatorFn = getBinScalarOperatorFn(OP_TYPE_ASSIGN);
OperatorFn(pLeft, pSrc, pDst, TSDB_ORDER_ASC);
@ -566,7 +567,7 @@ _return:
SCL_RET(code);
}
int32_t sclGetNodeRes(SNode* node, SScalarCtx *ctx, SScalarParam **res) {
int32_t sclGetNodeRes(SNode *node, SScalarCtx *ctx, SScalarParam **res) {
if (NULL == node) {
return TSDB_CODE_SUCCESS;
}
@ -576,48 +577,55 @@ int32_t sclGetNodeRes(SNode* node, SScalarCtx *ctx, SScalarParam **res) {
if (NULL == *res) {
SCL_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
SCL_ERR_RET(sclInitParam(node, *res, ctx, &rowNum));
return TSDB_CODE_SUCCESS;
}
int32_t sclWalkCaseWhenList(SScalarCtx *ctx, SNodeList* pList, struct SListCell* pCell, SScalarParam *pCase, SScalarParam *pElse, SScalarParam *pComp, SScalarParam *output, int32_t rowIdx, int32_t totalRows, bool *complete) {
SNode *node = NULL;
SWhenThenNode* pWhenThen = NULL;
SScalarParam *pWhen = NULL;
SScalarParam *pThen = NULL;
int32_t code = 0;
int32_t sclWalkCaseWhenList(SScalarCtx *ctx, SNodeList *pList, struct SListCell *pCell, SScalarParam *pCase,
SScalarParam *pElse, SScalarParam *pComp, SScalarParam *output, int32_t rowIdx,
int32_t totalRows, bool *complete) {
SNode *node = NULL;
SWhenThenNode *pWhenThen = NULL;
SScalarParam *pWhen = NULL;
SScalarParam *pThen = NULL;
int32_t code = 0;
for (SListCell *cell = pCell; (NULL != cell ? (node = cell->pNode, true) : (node = NULL, false));
cell = cell->pNext) {
pWhenThen = (SWhenThenNode *)node;
for (SListCell* cell = pCell; (NULL != cell ? (node = cell->pNode, true) : (node = NULL, false)); cell = cell->pNext) {
pWhenThen = (SWhenThenNode*)node;
SCL_ERR_RET(sclGetNodeRes(pWhenThen->pWhen, ctx, &pWhen));
SCL_ERR_RET(sclGetNodeRes(pWhenThen->pThen, ctx, &pThen));
vectorCompareImpl(pCase, pWhen, pComp, rowIdx, 1, TSDB_ORDER_ASC, OP_TYPE_EQUAL);
bool *equal = (bool*)colDataGetData(pComp->columnData, rowIdx);
bool *equal = (bool *)colDataGetData(pComp->columnData, rowIdx);
if (*equal) {
colDataAppend(output->columnData, rowIdx, colDataGetData(pThen->columnData, (pThen->numOfRows > 1 ? rowIdx : 0)), colDataIsNull_s(pThen->columnData, (pThen->numOfRows > 1 ? rowIdx : 0)));
bool isNull = colDataIsNull_s(pThen->columnData, (pThen->numOfRows > 1 ? rowIdx : 0));
char *pData = isNull ? NULL : colDataGetData(pThen->columnData, (pThen->numOfRows > 1 ? rowIdx : 0));
colDataAppend(output->columnData, rowIdx, pData, isNull);
if (0 == rowIdx && 1 == pCase->numOfRows && 1 == pWhen->numOfRows && 1 == pThen->numOfRows && totalRows > 1) {
SCL_ERR_JRET(sclExtendResRows(output, output, ctx->pBlockList));
*complete = true;
}
goto _return;
}
}
if (pElse) {
colDataAppend(output->columnData, rowIdx, colDataGetData(pElse->columnData, (pElse->numOfRows > 1 ? rowIdx : 0)), colDataIsNull_s(pElse->columnData, (pElse->numOfRows > 1 ? rowIdx : 0)));
bool isNull = colDataIsNull_s(pElse->columnData, (pElse->numOfRows > 1 ? rowIdx : 0));
char *pData = isNull ? NULL : colDataGetData(pElse->columnData, (pElse->numOfRows > 1 ? rowIdx : 0));
colDataAppend(output->columnData, rowIdx, pData, isNull);
if (0 == rowIdx && 1 == pCase->numOfRows && 1 == pElse->numOfRows && totalRows > 1) {
SCL_ERR_JRET(sclExtendResRows(output, output, ctx->pBlockList));
*complete = true;
}
goto _return;
}
@ -629,7 +637,7 @@ int32_t sclWalkCaseWhenList(SScalarCtx *ctx, SNodeList* pList, struct SListCell*
}
_return:
sclFreeParam(pWhen);
sclFreeParam(pThen);
taosMemoryFree(pWhen);
@ -638,32 +646,35 @@ _return:
SCL_RET(code);
}
int32_t sclWalkWhenList(SScalarCtx *ctx, SNodeList* pList, struct SListCell* pCell, SScalarParam *pElse, SScalarParam *output,
int32_t rowIdx, int32_t totalRows, bool *complete, bool preSingle) {
SNode *node = NULL;
SWhenThenNode* pWhenThen = NULL;
SScalarParam *pWhen = NULL;
SScalarParam *pThen = NULL;
int32_t code = 0;
int32_t sclWalkWhenList(SScalarCtx *ctx, SNodeList *pList, struct SListCell *pCell, SScalarParam *pElse,
SScalarParam *output, int32_t rowIdx, int32_t totalRows, bool *complete, bool preSingle) {
SNode *node = NULL;
SWhenThenNode *pWhenThen = NULL;
SScalarParam *pWhen = NULL;
SScalarParam *pThen = NULL;
int32_t code = 0;
for (SListCell* cell = pCell; (NULL != cell ? (node = cell->pNode, true) : (node = NULL, false)); cell = cell->pNext) {
pWhenThen = (SWhenThenNode*)node;
for (SListCell *cell = pCell; (NULL != cell ? (node = cell->pNode, true) : (node = NULL, false));
cell = cell->pNext) {
pWhenThen = (SWhenThenNode *)node;
pWhen = NULL;
pThen = NULL;
SCL_ERR_JRET(sclGetNodeRes(pWhenThen->pWhen, ctx, &pWhen));
SCL_ERR_JRET(sclGetNodeRes(pWhenThen->pThen, ctx, &pThen));
bool *whenValue = (bool*)colDataGetData(pWhen->columnData, (pWhen->numOfRows > 1 ? rowIdx : 0));
bool *whenValue = (bool *)colDataGetData(pWhen->columnData, (pWhen->numOfRows > 1 ? rowIdx : 0));
if (*whenValue) {
colDataAppend(output->columnData, rowIdx, colDataGetData(pThen->columnData, (pThen->numOfRows > 1 ? rowIdx : 0)), colDataIsNull_s(pThen->columnData, (pThen->numOfRows > 1 ? rowIdx : 0)));
bool isNull = colDataIsNull_s(pThen->columnData, (pThen->numOfRows > 1 ? rowIdx : 0));
char *pData = isNull ? NULL : colDataGetData(pThen->columnData, (pThen->numOfRows > 1 ? rowIdx : 0));
colDataAppend(output->columnData, rowIdx, pData, isNull);
if (preSingle && 0 == rowIdx && 1 == pWhen->numOfRows && 1 == pThen->numOfRows && totalRows > 1) {
SCL_ERR_JRET(sclExtendResRows(output, output, ctx->pBlockList));
*complete = true;
}
goto _return;
}
@ -674,13 +685,15 @@ int32_t sclWalkWhenList(SScalarCtx *ctx, SNodeList* pList, struct SListCell* pCe
}
if (pElse) {
colDataAppend(output->columnData, rowIdx, colDataGetData(pElse->columnData, (pElse->numOfRows > 1 ? rowIdx : 0)), colDataIsNull_s(pElse->columnData, (pElse->numOfRows > 1 ? rowIdx : 0)));
bool isNull = colDataIsNull_s(pElse->columnData, (pElse->numOfRows > 1 ? rowIdx : 0));
char *pData = isNull ? NULL : colDataGetData(pElse->columnData, (pElse->numOfRows > 1 ? rowIdx : 0));
colDataAppend(output->columnData, rowIdx, pData, isNull);
if (preSingle && 0 == rowIdx && 1 == pElse->numOfRows && totalRows > 1) {
SCL_ERR_JRET(sclExtendResRows(output, output, ctx->pBlockList));
*complete = true;
}
goto _return;
}
@ -860,14 +873,14 @@ _return:
}
int32_t sclExecCaseWhen(SCaseWhenNode *node, SScalarCtx *ctx, SScalarParam *output) {
int32_t code = 0;
int32_t code = 0;
SScalarParam *pCase = NULL;
SScalarParam *pElse = NULL;
SScalarParam *pWhen = NULL;
SScalarParam *pThen = NULL;
SScalarParam comp = {0};
int32_t rowNum = 1;
bool complete = false;
int32_t rowNum = 1;
bool complete = false;
if (NULL == node->pWhenThenList || node->pWhenThenList->length <= 0) {
sclError("invalid whenThen list");
@ -875,24 +888,24 @@ int32_t sclExecCaseWhen(SCaseWhenNode *node, SScalarCtx *ctx, SScalarParam *outp
}
if (ctx->pBlockList) {
SSDataBlock* pb = taosArrayGetP(ctx->pBlockList, 0);
SSDataBlock *pb = taosArrayGetP(ctx->pBlockList, 0);
rowNum = pb->info.rows;
output->numOfRows = pb->info.rows;
}
SCL_ERR_JRET(sclCreateColumnInfoData(&node->node.resType, rowNum, output));
SCL_ERR_JRET(sclGetNodeRes(node->pCase, ctx, &pCase));
SCL_ERR_JRET(sclGetNodeRes(node->pElse, ctx, &pElse));
SDataType compType = {0};
compType.type = TSDB_DATA_TYPE_BOOL;
compType.bytes = tDataTypes[compType.type].bytes;
SCL_ERR_JRET(sclCreateColumnInfoData(&compType, rowNum, &comp));
SNode* tnode = NULL;
SWhenThenNode* pWhenThen = (SWhenThenNode*)node->pWhenThenList->pHead->pNode;
SNode *tnode = NULL;
SWhenThenNode *pWhenThen = (SWhenThenNode *)node->pWhenThenList->pHead->pNode;
SCL_ERR_JRET(sclGetNodeRes(pWhenThen->pWhen, ctx, &pWhen));
SCL_ERR_JRET(sclGetNodeRes(pWhenThen->pThen, ctx, &pThen));
@ -903,17 +916,19 @@ int32_t sclExecCaseWhen(SCaseWhenNode *node, SScalarCtx *ctx, SScalarParam *outp
if (pCase) {
vectorCompare(pCase, pWhen, &comp, TSDB_ORDER_ASC, OP_TYPE_EQUAL);
for (int32_t i = 0; i < rowNum; ++i) {
bool *equal = (bool*)colDataGetData(comp.columnData, (comp.numOfRows > 1 ? i : 0));
bool *equal = (bool *)colDataGetData(comp.columnData, (comp.numOfRows > 1 ? i : 0));
if (*equal) {
colDataAppend(output->columnData, i, colDataGetData(pThen->columnData, (pThen->numOfRows > 1 ? i : 0)), colDataIsNull_s(pThen->columnData, (pThen->numOfRows > 1 ? i : 0)));
colDataAppend(output->columnData, i, colDataGetData(pThen->columnData, (pThen->numOfRows > 1 ? i : 0)),
colDataIsNull_s(pThen->columnData, (pThen->numOfRows > 1 ? i : 0)));
if (0 == i && 1 == pCase->numOfRows && 1 == pWhen->numOfRows && 1 == pThen->numOfRows && rowNum > 1) {
SCL_ERR_JRET(sclExtendResRows(output, output, ctx->pBlockList));
break;
}
} else {
SCL_ERR_JRET(sclWalkCaseWhenList(ctx, node->pWhenThenList, node->pWhenThenList->pHead->pNext, pCase, pElse, &comp, output, i, rowNum, &complete));
SCL_ERR_JRET(sclWalkCaseWhenList(ctx, node->pWhenThenList, node->pWhenThenList->pHead->pNext, pCase, pElse,
&comp, output, i, rowNum, &complete));
if (complete) {
break;
}
@ -921,15 +936,17 @@ int32_t sclExecCaseWhen(SCaseWhenNode *node, SScalarCtx *ctx, SScalarParam *outp
}
} else {
for (int32_t i = 0; i < rowNum; ++i) {
bool *whenValue = (bool*)colDataGetData(pWhen->columnData, (pWhen->numOfRows > 1 ? i : 0));
bool *whenValue = (bool *)colDataGetData(pWhen->columnData, (pWhen->numOfRows > 1 ? i : 0));
if (*whenValue) {
colDataAppend(output->columnData, i, colDataGetData(pThen->columnData, (pThen->numOfRows > 1 ? i : 0)), colDataIsNull_s(pThen->columnData, (pThen->numOfRows > 1 ? i : 0)));
colDataAppend(output->columnData, i, colDataGetData(pThen->columnData, (pThen->numOfRows > 1 ? i : 0)),
colDataIsNull_s(pThen->columnData, (pThen->numOfRows > 1 ? i : 0)));
if (0 == i && 1 == pWhen->numOfRows && 1 == pThen->numOfRows && rowNum > 1) {
SCL_ERR_JRET(sclExtendResRows(output, output, ctx->pBlockList));
break;
}
} else {
SCL_ERR_JRET(sclWalkWhenList(ctx, node->pWhenThenList, node->pWhenThenList->pHead->pNext, pElse, output, i, rowNum, &complete, (pWhen->numOfRows == 1 && pThen->numOfRows == 1)));
SCL_ERR_JRET(sclWalkWhenList(ctx, node->pWhenThenList, node->pWhenThenList->pHead->pNext, pElse, output, i,
rowNum, &complete, (pWhen->numOfRows == 1 && pThen->numOfRows == 1)));
if (complete) {
break;
}
@ -965,7 +982,6 @@ _return:
SCL_RET(code);
}
EDealRes sclRewriteNullInOptr(SNode **pNode, SScalarCtx *ctx, EOperatorType opType) {
if (opType <= OP_TYPE_CALC_MAX) {
SValueNode *res = (SValueNode *)nodesMakeNode(QUERY_NODE_VALUE);
@ -1089,8 +1105,7 @@ EDealRes sclRewriteNonConstOperator(SNode **pNode, SScalarCtx *ctx) {
EDealRes sclRewriteFunction(SNode **pNode, SScalarCtx *ctx) {
SFunctionNode *node = (SFunctionNode *)*pNode;
SNode *tnode = NULL;
if ((!fmIsScalarFunc(node->funcId) && (!ctx->dual)) ||
fmIsUserDefinedFunc(node->funcId)) {
if ((!fmIsScalarFunc(node->funcId) && (!ctx->dual)) || fmIsUserDefinedFunc(node->funcId)) {
return DEAL_RES_CONTINUE;
}
@ -1229,20 +1244,20 @@ EDealRes sclRewriteOperator(SNode **pNode, SScalarCtx *ctx) {
return DEAL_RES_CONTINUE;
}
EDealRes sclRewriteCaseWhen(SNode** pNode, SScalarCtx *ctx) {
EDealRes sclRewriteCaseWhen(SNode **pNode, SScalarCtx *ctx) {
SCaseWhenNode *node = (SCaseWhenNode *)*pNode;
if ((!SCL_IS_CONST_NODE(node->pCase)) || (!SCL_IS_CONST_NODE(node->pElse))) {
return DEAL_RES_CONTINUE;
}
SNode* tnode = NULL;
SNode *tnode = NULL;
FOREACH(tnode, node->pWhenThenList) {
SWhenThenNode* pWhenThen = (SWhenThenNode*)tnode;
SWhenThenNode *pWhenThen = (SWhenThenNode *)tnode;
if (!SCL_IS_CONST_NODE(pWhenThen->pWhen) || !SCL_IS_CONST_NODE(pWhenThen->pThen)) {
return DEAL_RES_CONTINUE;
}
}
}
SScalarParam output = {0};
ctx->code = sclExecCaseWhen(node, ctx, &output);
@ -1275,13 +1290,12 @@ EDealRes sclRewriteCaseWhen(SNode** pNode, SScalarCtx *ctx) {
}
nodesDestroyNode(*pNode);
*pNode = (SNode*)res;
*pNode = (SNode *)res;
sclFreeParam(&output);
return DEAL_RES_CONTINUE;
}
EDealRes sclConstantsRewriter(SNode **pNode, void *pContext) {
SScalarCtx *ctx = (SScalarCtx *)pContext;
@ -1408,9 +1422,9 @@ EDealRes sclWalkTarget(SNode *pNode, SScalarCtx *ctx) {
return DEAL_RES_CONTINUE;
}
EDealRes sclWalkCaseWhen(SNode* pNode, SScalarCtx *ctx) {
EDealRes sclWalkCaseWhen(SNode *pNode, SScalarCtx *ctx) {
SCaseWhenNode *node = (SCaseWhenNode *)pNode;
SScalarParam output = {0};
SScalarParam output = {0};
ctx->code = sclExecCaseWhen(node, ctx, &output);
if (ctx->code) {
@ -1425,11 +1439,10 @@ EDealRes sclWalkCaseWhen(SNode* pNode, SScalarCtx *ctx) {
return DEAL_RES_CONTINUE;
}
EDealRes sclCalcWalker(SNode *pNode, void *pContext) {
if (QUERY_NODE_VALUE == nodeType(pNode) || QUERY_NODE_NODE_LIST == nodeType(pNode)
|| QUERY_NODE_COLUMN == nodeType(pNode) || QUERY_NODE_LEFT_VALUE == nodeType(pNode)
|| QUERY_NODE_WHEN_THEN == nodeType(pNode)) {
if (QUERY_NODE_VALUE == nodeType(pNode) || QUERY_NODE_NODE_LIST == nodeType(pNode) ||
QUERY_NODE_COLUMN == nodeType(pNode) || QUERY_NODE_LEFT_VALUE == nodeType(pNode) ||
QUERY_NODE_WHEN_THEN == nodeType(pNode)) {
return DEAL_RES_CONTINUE;
}

View File

@ -2620,6 +2620,7 @@ static bool getHistogramBinDesc(SHistoFuncBin **bins, int32_t *binNum, char *bin
int32_t numOfParams = cJSON_GetArraySize(binDesc);
int32_t startIndex;
if (numOfParams != 4) {
cJSON_Delete(binDesc);
return false;
}
@ -2630,15 +2631,18 @@ static bool getHistogramBinDesc(SHistoFuncBin **bins, int32_t *binNum, char *bin
cJSON *infinity = cJSON_GetObjectItem(binDesc, "infinity");
if (!cJSON_IsNumber(start) || !cJSON_IsNumber(count) || !cJSON_IsBool(infinity)) {
cJSON_Delete(binDesc);
return false;
}
if (count->valueint <= 0 || count->valueint > 1000) { // limit count to 1000
cJSON_Delete(binDesc);
return false;
}
if (isinf(start->valuedouble) || (width != NULL && isinf(width->valuedouble)) ||
(factor != NULL && isinf(factor->valuedouble)) || (count != NULL && isinf(count->valuedouble))) {
cJSON_Delete(binDesc);
return false;
}
@ -2656,12 +2660,14 @@ static bool getHistogramBinDesc(SHistoFuncBin **bins, int32_t *binNum, char *bin
// linear bin process
if (width->valuedouble == 0) {
taosMemoryFree(intervals);
cJSON_Delete(binDesc);
return false;
}
for (int i = 0; i < counter + 1; ++i) {
intervals[startIndex] = start->valuedouble + i * width->valuedouble;
if (isinf(intervals[startIndex])) {
taosMemoryFree(intervals);
cJSON_Delete(binDesc);
return false;
}
startIndex++;
@ -2670,22 +2676,26 @@ static bool getHistogramBinDesc(SHistoFuncBin **bins, int32_t *binNum, char *bin
// log bin process
if (start->valuedouble == 0) {
taosMemoryFree(intervals);
cJSON_Delete(binDesc);
return false;
}
if (factor->valuedouble < 0 || factor->valuedouble == 0 || factor->valuedouble == 1) {
taosMemoryFree(intervals);
cJSON_Delete(binDesc);
return false;
}
for (int i = 0; i < counter + 1; ++i) {
intervals[startIndex] = start->valuedouble * pow(factor->valuedouble, i * 1.0);
if (isinf(intervals[startIndex])) {
taosMemoryFree(intervals);
cJSON_Delete(binDesc);
return false;
}
startIndex++;
}
} else {
taosMemoryFree(intervals);
cJSON_Delete(binDesc);
return false;
}
@ -2700,6 +2710,7 @@ static bool getHistogramBinDesc(SHistoFuncBin **bins, int32_t *binNum, char *bin
}
} else if (cJSON_IsArray(binDesc)) { /* user input bins */
if (binType != USER_INPUT_BIN) {
cJSON_Delete(binDesc);
return false;
}
numOfBins = cJSON_GetArraySize(binDesc);
@ -2707,6 +2718,7 @@ static bool getHistogramBinDesc(SHistoFuncBin **bins, int32_t *binNum, char *bin
cJSON *bin = binDesc->child;
if (bin == NULL) {
taosMemoryFree(intervals);
cJSON_Delete(binDesc);
return false;
}
int i = 0;
@ -2714,16 +2726,19 @@ static bool getHistogramBinDesc(SHistoFuncBin **bins, int32_t *binNum, char *bin
intervals[i] = bin->valuedouble;
if (!cJSON_IsNumber(bin)) {
taosMemoryFree(intervals);
cJSON_Delete(binDesc);
return false;
}
if (i != 0 && intervals[i] <= intervals[i - 1]) {
taosMemoryFree(intervals);
cJSON_Delete(binDesc);
return false;
}
bin = bin->next;
i++;
}
} else {
cJSON_Delete(binDesc);
return false;
}
@ -2735,8 +2750,9 @@ static bool getHistogramBinDesc(SHistoFuncBin **bins, int32_t *binNum, char *bin
(*bins)[i].count = 0;
}
cJSON_Delete(binDesc);
taosMemoryFree(intervals);
cJSON_Delete(binDesc);
return true;
}
@ -2748,14 +2764,19 @@ int32_t histogramScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarP
int32_t numOfBins = 0;
int32_t totalCount = 0;
int8_t binType = getHistogramBinType(varDataVal(pInput[1].columnData->pData));
char *binDesc = varDataVal(pInput[2].columnData->pData);
char *binTypeStr = strndup(varDataVal(pInput[1].columnData->pData), varDataLen(pInput[1].columnData->pData));
int8_t binType = getHistogramBinType(binTypeStr);
taosMemoryFree(binTypeStr);
char *binDesc = strndup(varDataVal(pInput[2].columnData->pData), varDataLen(pInput[2].columnData->pData));
int64_t normalized = *(int64_t *)(pInput[3].columnData->pData);
int32_t type = GET_PARAM_TYPE(pInput);
if (!getHistogramBinDesc(&bins, &numOfBins, binDesc, binType, (bool)normalized)) {
taosMemoryFree(binDesc);
return TSDB_CODE_FAILED;
}
taosMemoryFree(binDesc);
for (int32_t i = 0; i < pInput->numOfRows; ++i) {
if (colDataIsNull_s(pInputData, i)) {
@ -2785,6 +2806,8 @@ int32_t histogramScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarP
}
}
colInfoDataEnsureCapacity(pOutputData, numOfBins, false);
for (int32_t k = 0; k < numOfBins; ++k) {
int32_t len;
char buf[512] = {0};

View File

@ -51,6 +51,7 @@ void streamSchedByTimer(void* param, void* tmrId) {
SStreamTask* pTask = (void*)param;
if (atomic_load_8(&pTask->taskStatus) == TASK_STATUS__DROPPING) {
streamMetaReleaseTask(NULL, pTask);
return;
}
@ -80,6 +81,8 @@ void streamSchedByTimer(void* param, void* tmrId) {
int32_t streamSetupTrigger(SStreamTask* pTask) {
if (pTask->triggerParam != 0) {
int32_t ref = atomic_add_fetch_32(&pTask->refCnt, 1);
ASSERT(ref == 2);
pTask->timer = taosTmrStart(streamSchedByTimer, (int32_t)pTask->triggerParam, pTask, streamEnv.timer);
pTask->triggerStatus = TASK_TRIGGER_STATUS__INACTIVE;
}

View File

@ -80,7 +80,12 @@ void streamMetaClose(SStreamMeta* pMeta) {
pIter = taosHashIterate(pMeta->pTasks, pIter);
if (pIter == NULL) break;
SStreamTask* pTask = *(SStreamTask**)pIter;
if (pTask->timer) {
taosTmrStop(pTask->timer);
pTask->timer = NULL;
}
tFreeSStreamTask(pTask);
/*streamMetaReleaseTask(pMeta, pTask);*/
}
taosHashCleanup(pMeta->pTasks);
taosMemoryFree(pMeta->path);
@ -169,6 +174,51 @@ SStreamTask* streamMetaGetTask(SStreamMeta* pMeta, int32_t taskId) {
}
}
SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int32_t taskId) {
taosRLockLatch(&pMeta->lock);
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasks, &taskId, sizeof(int32_t));
if (ppTask) {
SStreamTask* pTask = *ppTask;
if (atomic_load_8(&pTask->taskStatus) != TASK_STATUS__DROPPING) {
atomic_add_fetch_32(&pTask->refCnt, 1);
taosRUnLockLatch(&pMeta->lock);
return pTask;
} else {
taosRUnLockLatch(&pMeta->lock);
return NULL;
}
}
taosRUnLockLatch(&pMeta->lock);
return NULL;
}
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask) {
int32_t left = atomic_sub_fetch_32(&pTask->refCnt, 1);
ASSERT(left >= 0);
if (left == 0) {
ASSERT(atomic_load_8(&pTask->taskStatus) == TASK_STATUS__DROPPING);
tFreeSStreamTask(pTask);
}
}
void streamMetaRemoveTask1(SStreamMeta* pMeta, int32_t taskId) {
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasks, &taskId, sizeof(int32_t));
if (ppTask) {
SStreamTask* pTask = *ppTask;
taosHashRemove(pMeta->pTasks, &taskId, sizeof(int32_t));
/*if (pTask->timer) {
* taosTmrStop(pTask->timer);*/
/*pTask->timer = NULL;*/
/*}*/
atomic_store_8(&pTask->taskStatus, TASK_STATUS__DROPPING);
taosWLockLatch(&pMeta->lock);
streamMetaReleaseTask(pMeta, pTask);
taosWUnLockLatch(&pMeta->lock);
}
}
int32_t streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId) {
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasks, &taskId, sizeof(int32_t));
if (ppTask) {

View File

@ -70,14 +70,15 @@ typedef struct SSyncTimer {
uint64_t counter;
int32_t timerMS;
SRaftId destId;
void* pData;
SSyncHbTimerData hbData;
} SSyncTimer;
typedef struct SElectTimer {
typedef struct SElectTimerParam {
uint64_t logicClock;
SSyncNode* pSyncNode;
int64_t executeTime;
void* pData;
} SElectTimer;
} SElectTimerParam;
typedef struct SPeerState {
SyncIndex lastSendIndex;
@ -153,6 +154,7 @@ typedef struct SSyncNode {
uint64_t electTimerLogicClock;
TAOS_TMR_CALLBACK FpElectTimerCB; // Timer Fp
uint64_t electTimerCounter;
SElectTimerParam electTimerParam;
// heartbeat timer
tmr_h pHeartbeatTimer;
@ -225,6 +227,7 @@ int32_t syncNodeRestartHeartbeatTimer(SSyncNode* pSyncNode);
int32_t syncNodeSendMsgById(const SRaftId* destRaftId, SSyncNode* pSyncNode, SRpcMsg* pMsg);
int32_t syncNodeSendMsgByInfo(const SNodeInfo* nodeInfo, SSyncNode* pSyncNode, SRpcMsg* pMsg);
SyncIndex syncMinMatchIndex(SSyncNode* pSyncNode);
int32_t syncCacheEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry, LRUHandle** h);
// raft state change --------------
void syncNodeUpdateTerm(SSyncNode* pSyncNode, SyncTerm term);

View File

@ -192,13 +192,34 @@ int32_t syncNodeOnAppendEntries(SSyncNode* ths, const SRpcMsg* pRpcMsg) {
SSyncRaftEntry* pAppendEntry = syncEntryBuildFromAppendEntries(pMsg);
ASSERT(pAppendEntry != NULL);
SyncIndex appendIndex = pMsg->prevLogIndex + 1;
SyncIndex appendIndex = pMsg->prevLogIndex + 1;
LRUHandle* hLocal = NULL;
LRUHandle* hAppend = NULL;
int32_t code = 0;
SSyncRaftEntry* pLocalEntry = NULL;
int32_t code = ths->pLogStore->syncLogGetEntry(ths->pLogStore, appendIndex, &pLocalEntry);
SLRUCache* pCache = ths->pLogStore->pCache;
hLocal = taosLRUCacheLookup(pCache, &appendIndex, sizeof(appendIndex));
if (hLocal) {
pLocalEntry = (SSyncRaftEntry*)taosLRUCacheValue(pCache, hLocal);
code = 0;
sNTrace(ths, "hit cache index:%" PRId64 ", bytes:%u, %p", appendIndex, pLocalEntry->bytes, pLocalEntry);
} else {
sNTrace(ths, "miss cache index:%" PRId64, appendIndex);
code = ths->pLogStore->syncLogGetEntry(ths->pLogStore, appendIndex, &pLocalEntry);
}
if (code == 0) {
// get local entry success
if (pLocalEntry->term == pAppendEntry->term) {
// do nothing
sNTrace(ths, "log match, do nothing, index:%" PRId64, appendIndex);
} else {
// truncate
code = ths->pLogStore->syncLogTruncate(ths->pLogStore, appendIndex);
@ -207,8 +228,18 @@ int32_t syncNodeOnAppendEntries(SSyncNode* ths, const SRpcMsg* pRpcMsg) {
snprintf(logBuf, sizeof(logBuf), "ignore, truncate error, append-index:%" PRId64, appendIndex);
syncLogRecvAppendEntries(ths, pMsg, logBuf);
syncEntryDestory(pLocalEntry);
syncEntryDestory(pAppendEntry);
if (hLocal) {
taosLRUCacheRelease(ths->pLogStore->pCache, hLocal, false);
} else {
syncEntryDestory(pLocalEntry);
}
if (hAppend) {
taosLRUCacheRelease(ths->pLogStore->pCache, hAppend, false);
} else {
syncEntryDestory(pAppendEntry);
}
goto _IGNORE;
}
@ -219,10 +250,22 @@ int32_t syncNodeOnAppendEntries(SSyncNode* ths, const SRpcMsg* pRpcMsg) {
snprintf(logBuf, sizeof(logBuf), "ignore, append error, append-index:%" PRId64, appendIndex);
syncLogRecvAppendEntries(ths, pMsg, logBuf);
syncEntryDestory(pLocalEntry);
syncEntryDestory(pAppendEntry);
if (hLocal) {
taosLRUCacheRelease(ths->pLogStore->pCache, hLocal, false);
} else {
syncEntryDestory(pLocalEntry);
}
if (hAppend) {
taosLRUCacheRelease(ths->pLogStore->pCache, hAppend, false);
} else {
syncEntryDestory(pAppendEntry);
}
goto _IGNORE;
}
syncCacheEntry(ths->pLogStore, pAppendEntry, &hAppend);
}
} else {
@ -248,20 +291,42 @@ int32_t syncNodeOnAppendEntries(SSyncNode* ths, const SRpcMsg* pRpcMsg) {
snprintf(logBuf, sizeof(logBuf), "ignore, log not exist, append error, append-index:%" PRId64, appendIndex);
syncLogRecvAppendEntries(ths, pMsg, logBuf);
syncEntryDestory(pLocalEntry);
syncEntryDestory(pAppendEntry);
if (hLocal) {
taosLRUCacheRelease(ths->pLogStore->pCache, hLocal, false);
} else {
syncEntryDestory(pLocalEntry);
}
if (hAppend) {
taosLRUCacheRelease(ths->pLogStore->pCache, hAppend, false);
} else {
syncEntryDestory(pAppendEntry);
}
goto _IGNORE;
}
syncCacheEntry(ths->pLogStore, pAppendEntry, &hAppend);
} else {
// error
// get local entry success
char logBuf[128];
snprintf(logBuf, sizeof(logBuf), "ignore, get local entry error, append-index:%" PRId64 " err:%d", appendIndex,
terrno);
syncLogRecvAppendEntries(ths, pMsg, logBuf);
syncEntryDestory(pLocalEntry);
syncEntryDestory(pAppendEntry);
if (hLocal) {
taosLRUCacheRelease(ths->pLogStore->pCache, hLocal, false);
} else {
syncEntryDestory(pLocalEntry);
}
if (hAppend) {
taosLRUCacheRelease(ths->pLogStore->pCache, hAppend, false);
} else {
syncEntryDestory(pAppendEntry);
}
goto _IGNORE;
}
}
@ -269,8 +334,17 @@ int32_t syncNodeOnAppendEntries(SSyncNode* ths, const SRpcMsg* pRpcMsg) {
// update match index
pReply->matchIndex = pAppendEntry->index;
syncEntryDestory(pLocalEntry);
syncEntryDestory(pAppendEntry);
if (hLocal) {
taosLRUCacheRelease(ths->pLogStore->pCache, hLocal, false);
} else {
syncEntryDestory(pLocalEntry);
}
if (hAppend) {
taosLRUCacheRelease(ths->pLogStore->pCache, hAppend, false);
} else {
syncEntryDestory(pAppendEntry);
}
} else {
// no append entries, do nothing

View File

@ -83,10 +83,14 @@ int32_t syncNodeOnAppendEntriesReply(SSyncNode* ths, const SRpcMsg* pRpcMsg) {
ASSERT(pState != NULL);
if (pMsg->lastSendIndex == pState->lastSendIndex) {
int64_t timeNow = taosGetTimestampMs();
int64_t elapsed = timeNow - pState->lastSendTime;
sNTrace(ths, "sync-append-entries rtt elapsed:%" PRId64 ", index:%" PRId64, elapsed, pState->lastSendIndex);
syncNodeReplicateOne(ths, &(pMsg->srcId), true);
}
}
syncLogRecvAppendEntriesReply(ths, pMsg, "process");
return 0;
}
}

View File

@ -116,7 +116,12 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) {
LRUHandle* h = taosLRUCacheLookup(pCache, &index, sizeof(index));
if (h) {
pEntry = (SSyncRaftEntry*)taosLRUCacheValue(pCache, h);
sNTrace(pSyncNode, "hit cache index:%" PRId64 ", bytes:%u, %p", index, pEntry->bytes, pEntry);
} else {
sNTrace(pSyncNode, "miss cache index:%" PRId64, index);
int32_t code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, index, &pEntry);
if (code != 0) {
sNError(pSyncNode, "advance commit index error, read wal index:%" PRId64, index);

View File

@ -383,15 +383,33 @@ bool syncIsReadyForRead(int64_t rid) {
} else {
if (!pSyncNode->pLogStore->syncLogIsEmpty(pSyncNode->pLogStore)) {
SyncIndex lastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore);
SSyncRaftEntry* pEntry = NULL;
int32_t code = pSyncNode->pLogStore->syncLogGetEntry(
pSyncNode->pLogStore, pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore), &pEntry);
SLRUCache* pCache = pSyncNode->pLogStore->pCache;
LRUHandle* h = taosLRUCacheLookup(pCache, &lastIndex, sizeof(lastIndex));
int32_t code = 0;
if (h) {
pEntry = (SSyncRaftEntry*)taosLRUCacheValue(pCache, h);
code = 0;
sNTrace(pSyncNode, "hit cache index:%" PRId64 ", bytes:%u, %p", lastIndex, pEntry->bytes, pEntry);
} else {
sNTrace(pSyncNode, "miss cache index:%" PRId64, lastIndex);
code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, lastIndex, &pEntry);
}
if (code == 0 && pEntry != NULL) {
if (pEntry->originalRpcType == TDMT_SYNC_NOOP && pEntry->term == pSyncNode->pRaftStore->currentTerm) {
ready = true;
}
syncEntryDestory(pEntry);
if (h) {
taosLRUCacheRelease(pCache, h, false);
} else {
syncEntryDestory(pEntry);
}
}
}
}
@ -411,7 +429,7 @@ bool syncIsReadyForRead(int64_t rid) {
int32_t syncNodeLeaderTransfer(SSyncNode* pSyncNode) {
if (pSyncNode->peersNum == 0) {
sDebug("only one replica, cannot leader transfer");
sDebug("vgId:%d, only one replica, cannot leader transfer", pSyncNode->vgId);
terrno = TSDB_CODE_SYN_ONE_REPLICA;
return -1;
}
@ -427,7 +445,7 @@ int32_t syncNodeLeaderTransfer(SSyncNode* pSyncNode) {
int32_t syncNodeLeaderTransferTo(SSyncNode* pSyncNode, SNodeInfo newLeader) {
if (pSyncNode->replicaNum == 1) {
sDebug("only one replica, cannot leader transfer");
sDebug("vgId:%d, only one replica, cannot leader transfer", pSyncNode->vgId);
terrno = TSDB_CODE_SYN_ONE_REPLICA;
return -1;
}
@ -442,7 +460,9 @@ int32_t syncNodeLeaderTransferTo(SSyncNode* pSyncNode, SNodeInfo newLeader) {
pMsg->newLeaderId.vgId = pSyncNode->vgId;
pMsg->newNodeInfo = newLeader;
return syncNodePropose(pSyncNode, &rpcMsg, false);
int32_t ret = syncNodePropose(pSyncNode, &rpcMsg, false);
rpcFreeCont(rpcMsg.pCont);
return ret;
}
SSyncState syncGetState(int64_t rid) {
@ -645,13 +665,12 @@ static int32_t syncHbTimerInit(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer, SRa
static int32_t syncHbTimerStart(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer) {
int32_t ret = 0;
if (syncIsInit()) {
SSyncHbTimerData* pData = taosMemoryMalloc(sizeof(SSyncHbTimerData));
SSyncHbTimerData* pData = &pSyncTimer->hbData;
pData->pSyncNode = pSyncNode;
pData->pTimer = pSyncTimer;
pData->destId = pSyncTimer->destId;
pData->logicClock = pSyncTimer->logicClock;
pSyncTimer->pData = pData;
taosTmrReset(pSyncTimer->timerCb, pSyncTimer->timerMS, pData, syncEnv()->pTimerManager, &pSyncTimer->pTimer);
} else {
sError("vgId:%d, start ctrl hb timer error, sync env is stop", pSyncNode->vgId);
@ -664,7 +683,6 @@ static int32_t syncHbTimerStop(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer) {
atomic_add_fetch_64(&pSyncTimer->logicClock, 1);
taosTmrStop(pSyncTimer->pTimer);
pSyncTimer->pTimer = NULL;
// taosMemoryFree(pSyncTimer->pData);
return ret;
}
@ -1087,12 +1105,13 @@ int32_t syncNodeStartElectTimer(SSyncNode* pSyncNode, int32_t ms) {
if (syncIsInit()) {
pSyncNode->electTimerMS = ms;
SElectTimer* pElectTimer = taosMemoryMalloc(sizeof(SElectTimer));
pElectTimer->logicClock = pSyncNode->electTimerLogicClock;
pElectTimer->pSyncNode = pSyncNode;
pElectTimer->pData = NULL;
int64_t execTime = taosGetTimestampMs() + ms;
atomic_store_64(&(pSyncNode->electTimerParam.executeTime), execTime);
atomic_store_64(&(pSyncNode->electTimerParam.logicClock), pSyncNode->electTimerLogicClock);
pSyncNode->electTimerParam.pSyncNode = pSyncNode;
pSyncNode->electTimerParam.pData = NULL;
taosTmrReset(pSyncNode->FpElectTimerCB, pSyncNode->electTimerMS, pElectTimer, syncEnv()->pTimerManager,
taosTmrReset(pSyncNode->FpElectTimerCB, pSyncNode->electTimerMS, pSyncNode, syncEnv()->pTimerManager,
&pSyncNode->pElectTimer);
} else {
@ -1761,10 +1780,24 @@ SyncTerm syncNodeGetPreTerm(SSyncNode* pSyncNode, SyncIndex index) {
return 0;
}
SyncTerm preTerm = 0;
SyncIndex preIndex = index - 1;
SyncTerm preTerm = 0;
SyncIndex preIndex = index - 1;
SSyncRaftEntry* pPreEntry = NULL;
int32_t code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, preIndex, &pPreEntry);
SLRUCache* pCache = pSyncNode->pLogStore->pCache;
LRUHandle* h = taosLRUCacheLookup(pCache, &preIndex, sizeof(preIndex));
int32_t code = 0;
if (h) {
pPreEntry = (SSyncRaftEntry*)taosLRUCacheValue(pCache, h);
code = 0;
sNTrace(pSyncNode, "hit cache index:%" PRId64 ", bytes:%u, %p", preIndex, pPreEntry->bytes, pPreEntry);
} else {
sNTrace(pSyncNode, "miss cache index:%" PRId64, preIndex);
code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, preIndex, &pPreEntry);
}
SSnapshot snapshot = {.data = NULL,
.lastApplyIndex = SYNC_INDEX_INVALID,
@ -1774,7 +1807,13 @@ SyncTerm syncNodeGetPreTerm(SSyncNode* pSyncNode, SyncIndex index) {
if (code == 0) {
ASSERT(pPreEntry != NULL);
preTerm = pPreEntry->term;
taosMemoryFree(pPreEntry);
if (h) {
taosLRUCacheRelease(pCache, h, false);
} else {
syncEntryDestory(pPreEntry);
}
return preTerm;
} else {
if (pSyncNode->pFsm->FpGetSnapshotInfo != NULL) {
@ -1811,7 +1850,7 @@ static void syncNodeEqPingTimer(void* param, void* tmrId) {
return;
}
sNTrace(pNode, "enqueue ping msg");
sTrace("enqueue ping msg");
code = pNode->syncEqMsg(pNode->msgcb, &rpcMsg);
if (code != 0) {
sError("failed to sync enqueue ping msg since %s", terrstr());
@ -1820,27 +1859,27 @@ static void syncNodeEqPingTimer(void* param, void* tmrId) {
}
taosTmrReset(syncNodeEqPingTimer, pNode->pingTimerMS, pNode, syncEnv()->pTimerManager, &pNode->pPingTimer);
} else {
sTrace("==syncNodeEqPingTimer== pingTimerLogicClock:%" PRId64 ", pingTimerLogicClockUser:%" PRId64,
pNode->pingTimerLogicClock, pNode->pingTimerLogicClockUser);
}
}
static void syncNodeEqElectTimer(void* param, void* tmrId) {
if (!syncIsInit()) return;
SElectTimer* pElectTimer = param;
SSyncNode* pNode = pElectTimer->pSyncNode;
SSyncNode* pNode = (SSyncNode*)param;
if (pNode == NULL) return;
if (pNode->syncEqMsg == NULL) return;
int64_t tsNow = taosGetTimestampMs();
if (tsNow < pNode->electTimerParam.executeTime) return;
SRpcMsg rpcMsg = {0};
int32_t code = syncBuildTimeout(&rpcMsg, SYNC_TIMEOUT_ELECTION, pElectTimer->logicClock, pNode->electTimerMS, pNode);
int32_t code =
syncBuildTimeout(&rpcMsg, SYNC_TIMEOUT_ELECTION, pNode->electTimerParam.logicClock, pNode->electTimerMS, pNode);
if (code != 0) {
sError("failed to build elect msg");
taosMemoryFree(pElectTimer);
return;
}
@ -1851,21 +1890,9 @@ static void syncNodeEqElectTimer(void* param, void* tmrId) {
if (code != 0) {
sError("failed to sync enqueue elect msg since %s", terrstr());
rpcFreeCont(rpcMsg.pCont);
taosMemoryFree(pElectTimer);
return;
}
taosMemoryFree(pElectTimer);
#if 0
// reset timer ms
if (syncIsInit() && pNode->electBaseLine > 0) {
pNode->electTimerMS = syncUtilElectRandomMS(pNode->electBaseLine, 2 * pNode->electBaseLine);
taosTmrReset(syncNodeEqElectTimer, pNode->electTimerMS, pNode, syncEnv()->pTimerManager, &pNode->pElectTimer);
} else {
sError("sync env is stop, syncNodeEqElectTimer");
}
#endif
}
static void syncNodeEqHeartbeatTimer(void* param, void* tmrId) {
@ -1883,7 +1910,7 @@ static void syncNodeEqHeartbeatTimer(void* param, void* tmrId) {
return;
}
sNTrace(pNode, "enqueue heartbeat timer");
sTrace("enqueue heartbeat timer");
code = pNode->syncEqMsg(pNode->msgcb, &rpcMsg);
if (code != 0) {
sError("failed to enqueue heartbeat msg since %s", terrstr());
@ -1979,7 +2006,10 @@ static int32_t syncNodeEqNoop(SSyncNode* pNode) {
static void deleteCacheEntry(const void* key, size_t keyLen, void* value) { taosMemoryFree(value); }
static int32_t syncCacheEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry, LRUHandle** h) {
int32_t syncCacheEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry, LRUHandle** h) {
SSyncLogStoreData* pData = pLogStore->data;
sNTrace(pData->pSyncNode, "in cache index:%" PRId64 ", bytes:%u, %p", pEntry->index, pEntry->bytes, pEntry);
int32_t code = 0;
int32_t entryLen = sizeof(*pEntry) + pEntry->dataLen;
LRUStatus status = taosLRUCacheInsert(pLogStore->pCache, &pEntry->index, sizeof(pEntry->index), pEntry, entryLen,
@ -2000,7 +2030,6 @@ static int32_t syncNodeAppendNoop(SSyncNode* ths) {
ASSERT(pEntry != NULL);
LRUHandle* h = NULL;
syncCacheEntry(ths->pLogStore, pEntry, &h);
if (ths->state == TAOS_SYNC_STATE_LEADER) {
int32_t code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pEntry);
@ -2008,6 +2037,8 @@ static int32_t syncNodeAppendNoop(SSyncNode* ths) {
sError("append noop error");
return -1;
}
syncCacheEntry(ths->pLogStore, pEntry, &h);
}
if (h) {
@ -2044,6 +2075,7 @@ int32_t syncNodeOnHeartbeat(SSyncNode* ths, const SRpcMsg* pRpcMsg) {
SyncLocalCmd* pSyncMsg = rpcMsgLocalCmd.pCont;
pSyncMsg->cmd = SYNC_LOCAL_CMD_FOLLOWER_CMT;
pSyncMsg->fcIndex = pMsg->commitIndex;
SyncIndex fcIndex = pSyncMsg->fcIndex;
if (ths->syncEqMsg != NULL && ths->msgcb != NULL) {
int32_t code = ths->syncEqMsg(ths->msgcb, &rpcMsgLocalCmd);
@ -2051,7 +2083,7 @@ int32_t syncNodeOnHeartbeat(SSyncNode* ths, const SRpcMsg* pRpcMsg) {
sError("vgId:%d, sync enqueue fc-commit msg error, code:%d", ths->vgId, code);
rpcFreeCont(rpcMsgLocalCmd.pCont);
} else {
sTrace("vgId:%d, sync enqueue fc-commit msg, fc-index:%" PRId64, ths->vgId, pSyncMsg->fcIndex);
sTrace("vgId:%d, sync enqueue fc-commit msg, fc-index:%" PRId64, ths->vgId, fcIndex);
}
}
}
@ -2143,7 +2175,6 @@ int32_t syncNodeOnClientRequest(SSyncNode* ths, SRpcMsg* pMsg, SyncIndex* pRetIn
}
LRUHandle* h = NULL;
syncCacheEntry(ths->pLogStore, pEntry, &h);
if (ths->state == TAOS_SYNC_STATE_LEADER) {
// append entry
@ -2183,6 +2214,8 @@ int32_t syncNodeOnClientRequest(SSyncNode* ths, SRpcMsg* pMsg, SyncIndex* pRetIn
}
}
syncCacheEntry(ths->pLogStore, pEntry, &h);
// if mulit replica, start replicate right now
if (ths->replicaNum > 1) {
syncNodeReplicate(ths);
@ -2349,7 +2382,12 @@ int32_t syncNodeDoCommit(SSyncNode* ths, SyncIndex beginIndex, SyncIndex endInde
LRUHandle* h = taosLRUCacheLookup(pCache, &i, sizeof(i));
if (h) {
pEntry = (SSyncRaftEntry*)taosLRUCacheValue(pCache, h);
sNTrace(ths, "hit cache index:%" PRId64 ", bytes:%u, %p", i, pEntry->bytes, pEntry);
} else {
sNTrace(ths, "miss cache index:%" PRId64, i);
code = ths->pLogStore->syncLogGetEntry(ths->pLogStore, i, &pEntry);
// ASSERT(code == 0);
// ASSERT(pEntry != NULL);

View File

@ -92,6 +92,8 @@ SSyncRaftEntry* syncEntryBuildNoop(SyncTerm term, SyncIndex index, int32_t vgId)
void syncEntryDestory(SSyncRaftEntry* pEntry) {
if (pEntry != NULL) {
taosMemoryFree(pEntry);
sTrace("free entry: %p", pEntry);
}
}

View File

@ -37,7 +37,8 @@ SSyncLogStore* logStoreCreate(SSyncNode* pSyncNode) {
return NULL;
}
pLogStore->pCache = taosLRUCacheInit(10 * 1024 * 1024, 1, .5);
// pLogStore->pCache = taosLRUCacheInit(10 * 1024 * 1024, 1, .5);
pLogStore->pCache = taosLRUCacheInit(100 * 1024 * 1024, 1, .5);
if (pLogStore->pCache == NULL) {
taosMemoryFree(pLogStore);
terrno = TSDB_CODE_WAL_OUT_OF_MEMORY;
@ -321,6 +322,17 @@ static int32_t raftLogTruncate(struct SSyncLogStore* pLogStore, SyncIndex fromIn
return 0;
}
// delete from cache
for (SyncIndex index = fromIndex; index <= wallastVer; ++index) {
SLRUCache* pCache = pData->pSyncNode->pLogStore->pCache;
LRUHandle* h = taosLRUCacheLookup(pCache, &index, sizeof(index));
if (h) {
sNTrace(pData->pSyncNode, "cache delete index:%" PRId64, index);
taosLRUCacheRelease(pData->pSyncNode->pLogStore->pCache, h, true);
}
}
int32_t code = walRollback(pWal, fromIndex);
if (code != 0) {
int32_t err = terrno;

View File

@ -73,7 +73,20 @@ int32_t syncNodeReplicateOne(SSyncNode* pSyncNode, SRaftId* pDestId, bool snapsh
SyncAppendEntries* pMsg = NULL;
SSyncRaftEntry* pEntry = NULL;
int32_t code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, nextIndex, &pEntry);
SLRUCache* pCache = pSyncNode->pLogStore->pCache;
LRUHandle* h = taosLRUCacheLookup(pCache, &nextIndex, sizeof(nextIndex));
int32_t code = 0;
if (h) {
pEntry = (SSyncRaftEntry*)taosLRUCacheValue(pCache, h);
code = 0;
sNTrace(pSyncNode, "hit cache index:%" PRId64 ", bytes:%u, %p", nextIndex, pEntry->bytes, pEntry);
} else {
sNTrace(pSyncNode, "miss cache index:%" PRId64, nextIndex);
code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, nextIndex, &pEntry);
}
if (code == 0) {
ASSERT(pEntry != NULL);
@ -99,7 +112,11 @@ int32_t syncNodeReplicateOne(SSyncNode* pSyncNode, SRaftId* pDestId, bool snapsh
}
}
syncEntryDestory(pEntry);
if (h) {
taosLRUCacheRelease(pCache, h, false);
} else {
syncEntryDestory(pEntry);
}
// prepare msg
ASSERT(pMsg != NULL);

View File

@ -192,7 +192,9 @@ int32_t snapshotSend(SSyncSnapshotSender *pSender) {
// pMsg->privateTerm = pSender->privateTerm;
memcpy(pMsg->data, pSender->pCurrentBlock, pSender->blockLen);
if (pSender->pCurrentBlock != NULL) {
memcpy(pMsg->data, pSender->pCurrentBlock, pSender->blockLen);
}
// send msg
syncNodeSendMsgById(&pMsg->destId, pSender->pSyncNode, &rpcMsg);

View File

@ -193,7 +193,7 @@ static void syncPeerState2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen) {
}
void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNode* pNode, const char* format, ...) {
if (pNode == NULL || pNode->pRaftCfg != NULL && pNode->pRaftStore == NULL || pNode->pLogStore == NULL) return;
if (pNode == NULL || pNode->pRaftCfg == NULL || pNode->pRaftStore == NULL || pNode->pLogStore == NULL) return;
int64_t currentTerm = pNode->pRaftStore->currentTerm;
// save error code, otherwise it will be overwritten
@ -212,7 +212,11 @@ void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNo
}
char cfgStr[1024];
syncCfg2SimpleStr(&(pNode->pRaftCfg->cfg), cfgStr, sizeof(cfgStr));
if (pNode->pRaftCfg != NULL) {
syncCfg2SimpleStr(&(pNode->pRaftCfg->cfg), cfgStr, sizeof(cfgStr));
} else {
return;
}
char peerStr[1024] = "{";
syncPeerState2Str(pNode, peerStr, sizeof(peerStr));
@ -230,23 +234,25 @@ void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNo
// restore error code
terrno = errCode;
taosPrintLog(flags, level, dflag,
"vgId:%d, sync %s "
"%s"
", tm:%" PRIu64 ", cmt:%" PRId64 ", fst:%" PRId64 ", lst:%" PRId64 ", min:%" PRId64 ", snap:%" PRId64
", snap-tm:%" PRIu64 ", sby:%d, aq:%d, bch:%d, r-num:%d, lcfg:%" PRId64
", chging:%d, rsto:%d, dquorum:%d, elt:%" PRId64 ", hb:%" PRId64 ", %s, %s",
pNode->vgId, syncStr(pNode->state), eventLog, currentTerm, pNode->commitIndex, logBeginIndex,
logLastIndex, pNode->minMatchIndex, snapshot.lastApplyIndex, snapshot.lastApplyTerm,
pNode->pRaftCfg->isStandBy, aqItems, pNode->pRaftCfg->batchSize, pNode->replicaNum,
pNode->pRaftCfg->lastConfigIndex, pNode->changing, pNode->restoreFinish, quorum,
pNode->electTimerLogicClock, pNode->heartbeatTimerLogicClockUser, peerStr, cfgStr);
if (pNode != NULL && pNode->pRaftCfg != NULL) {
taosPrintLog(flags, level, dflag,
"vgId:%d, sync %s "
"%s"
", tm:%" PRIu64 ", cmt:%" PRId64 ", fst:%" PRId64 ", lst:%" PRId64 ", min:%" PRId64 ", snap:%" PRId64
", snap-tm:%" PRIu64 ", sby:%d, aq:%d, bch:%d, r-num:%d, lcfg:%" PRId64
", chging:%d, rsto:%d, dquorum:%d, elt:%" PRId64 ", hb:%" PRId64 ", %s, %s",
pNode->vgId, syncStr(pNode->state), eventLog, currentTerm, pNode->commitIndex, logBeginIndex,
logLastIndex, pNode->minMatchIndex, snapshot.lastApplyIndex, snapshot.lastApplyTerm,
pNode->pRaftCfg->isStandBy, aqItems, pNode->pRaftCfg->batchSize, pNode->replicaNum,
pNode->pRaftCfg->lastConfigIndex, pNode->changing, pNode->restoreFinish, quorum,
pNode->electTimerLogicClock, pNode->heartbeatTimerLogicClockUser, peerStr, cfgStr);
}
}
void syncPrintSnapshotSenderLog(const char* flags, ELogLevel level, int32_t dflag, SSyncSnapshotSender* pSender,
const char* format, ...) {
SSyncNode* pNode = pSender->pSyncNode;
if (pNode == NULL || pNode->pRaftCfg != NULL && pNode->pRaftStore == NULL || pNode->pLogStore == NULL) return;
if (pNode == NULL || pNode->pRaftCfg == NULL || pNode->pRaftStore == NULL || pNode->pLogStore == NULL) return;
SSnapshot snapshot = {.data = NULL, .lastApplyIndex = -1, .lastApplyTerm = 0};
if (pNode->pFsm != NULL && pNode->pFsm->FpGetSnapshotInfo != NULL) {
@ -298,7 +304,7 @@ void syncPrintSnapshotSenderLog(const char* flags, ELogLevel level, int32_t dfla
void syncPrintSnapshotReceiverLog(const char* flags, ELogLevel level, int32_t dflag, SSyncSnapshotReceiver* pReceiver,
const char* format, ...) {
SSyncNode* pNode = pReceiver->pSyncNode;
if (pNode == NULL || pNode->pRaftCfg != NULL && pNode->pRaftStore == NULL || pNode->pLogStore == NULL) return;
if (pNode == NULL || pNode->pRaftCfg == NULL || pNode->pRaftStore == NULL || pNode->pLogStore == NULL) return;
SSnapshot snapshot = {.data = NULL, .lastApplyIndex = -1, .lastApplyTerm = 0};
if (pNode->pFsm != NULL && pNode->pFsm->FpGetSnapshotInfo != NULL) {
@ -364,9 +370,9 @@ void syncLogSendAppendEntriesReply(SSyncNode* pSyncNode, const SyncAppendEntries
syncUtilU642Addr(pMsg->destId.addr, host, sizeof(host), &port);
sNTrace(pSyncNode,
"send sync-append-entries-reply to %s:%d, {term:%" PRId64 ", pterm:%" PRId64 ", success:%d, match:%" PRId64
"}, %s",
host, port, pMsg->term, pMsg->privateTerm, pMsg->success, pMsg->matchIndex, s);
"send sync-append-entries-reply to %s:%d, {term:%" PRId64 ", pterm:%" PRId64
", success:%d, lsend-index:%" PRId64 ", match:%" PRId64 "}, %s",
host, port, pMsg->term, pMsg->privateTerm, pMsg->success, pMsg->lastSendIndex, pMsg->matchIndex, s);
}
void syncLogRecvAppendEntriesReply(SSyncNode* pSyncNode, const SyncAppendEntriesReply* pMsg, const char* s) {
@ -548,4 +554,4 @@ void syncLogSendRequestVoteReply(SSyncNode* pSyncNode, const SyncRequestVoteRepl
syncUtilU642Addr(pMsg->destId.addr, host, sizeof(host), &port);
sNTrace(pSyncNode, "send sync-request-vote-reply to %s:%d {term:%" PRId64 ", grant:%d}, %s", host, port, pMsg->term,
pMsg->voteGranted, s);
}
}

View File

@ -203,6 +203,14 @@ void walClose(SWal *pWal) {
pWal->pIdxFile = NULL;
taosArrayDestroy(pWal->fileInfoSet);
pWal->fileInfoSet = NULL;
void *pIter = NULL;
while (1) {
pIter = taosHashIterate(pWal->pRefHash, pIter);
if (pIter == NULL) break;
SWalRef *pRef = *(SWalRef **)pIter;
taosMemoryFree(pRef);
}
taosHashCleanup(pWal->pRefHash);
taosThreadMutexUnlock(&pWal->mutex);

View File

@ -32,7 +32,7 @@ SWalRef *walOpenRef(SWal *pWal) {
return pRef;
}
#if 0
#if 1
void walCloseRef(SWal *pWal, int64_t refId) {
SWalRef **ppRef = taosHashGet(pWal->pRefHash, &refId, sizeof(int64_t));
if (ppRef == NULL) return;
@ -67,7 +67,7 @@ int32_t walRefVer(SWalRef *pRef, int64_t ver) {
return 0;
}
#if 0
#if 1
void walUnrefVer(SWalRef *pRef) {
pRef->refId = -1;
pRef->refFile = -1;

View File

@ -22,11 +22,13 @@
int32_t walRestoreFromSnapshot(SWal *pWal, int64_t ver) {
taosThreadMutexLock(&pWal->mutex);
wInfo("vgId:%d, restore from snapshot, version %" PRId64, pWal->cfg.vgId, ver);
void *pIter = NULL;
while (1) {
pIter = taosHashIterate(pWal->pRefHash, pIter);
if (pIter == NULL) break;
SWalRef *pRef = (SWalRef *)pIter;
SWalRef *pRef = *(SWalRef **)pIter;
if (pRef->refVer != -1 && pRef->refVer <= ver) {
taosHashCancelIterate(pWal->pRefHash, pIter);
taosThreadMutexUnlock(&pWal->mutex);
@ -70,8 +72,8 @@ int32_t walRestoreFromSnapshot(SWal *pWal, int64_t ver) {
taosArrayClear(pWal->fileInfoSet);
pWal->vers.firstVer = -1;
pWal->vers.lastVer = ver;
pWal->vers.commitVer = ver - 1;
pWal->vers.snapshotVer = ver - 1;
pWal->vers.commitVer = ver;
pWal->vers.snapshotVer = ver;
pWal->vers.verInSnapshotting = -1;
taosThreadMutexUnlock(&pWal->mutex);
@ -100,6 +102,7 @@ int32_t walCommit(SWal *pWal, int64_t ver) {
int32_t walRollback(SWal *pWal, int64_t ver) {
taosThreadMutexLock(&pWal->mutex);
wInfo("vgId:%d, wal rollback for version %" PRId64, pWal->cfg.vgId, ver);
int64_t code;
char fnameStr[WAL_FILE_LEN];
if (ver > pWal->vers.lastVer || ver < pWal->vers.commitVer || ver <= pWal->vers.snapshotVer) {
@ -121,8 +124,10 @@ int32_t walRollback(SWal *pWal, int64_t ver) {
int fileSetSize = taosArrayGetSize(pWal->fileInfoSet);
for (int i = pWal->writeCur + 1; i < fileSetSize; i++) {
walBuildLogName(pWal, ((SWalFileInfo *)taosArrayGet(pWal->fileInfoSet, i))->firstVer, fnameStr);
wDebug("vgId:%d, wal remove file %s for rollback", pWal->cfg.vgId, fnameStr);
taosRemoveFile(fnameStr);
walBuildIdxName(pWal, ((SWalFileInfo *)taosArrayGet(pWal->fileInfoSet, i))->firstVer, fnameStr);
wDebug("vgId:%d, wal remove file %s for rollback", pWal->cfg.vgId, fnameStr);
taosRemoveFile(fnameStr);
}
// pop from fileInfoSet
@ -155,6 +160,7 @@ int32_t walRollback(SWal *pWal, int64_t ver) {
walBuildLogName(pWal, walGetCurFileFirstVer(pWal), fnameStr);
TdFilePtr pLogFile = taosOpenFile(fnameStr, TD_FILE_WRITE | TD_FILE_READ | TD_FILE_APPEND);
wDebug("vgId:%d, wal truncate file %s", pWal->cfg.vgId, fnameStr);
if (pLogFile == NULL) {
// TODO
terrno = TAOS_SYSTEM_ERROR(errno);
@ -319,12 +325,12 @@ int32_t walEndSnapshot(SWal *pWal) {
SWalFileInfo *pInfo = taosArraySearch(pWal->fileInfoSet, &tmp, compareWalFileInfo, TD_LE);
if (pInfo) {
if (ver >= pInfo->lastVer) {
pInfo++;
pInfo--;
}
if (POINTER_DISTANCE(pInfo, pWal->fileInfoSet->pData) > 0) {
wDebug("vgId:%d, begin remove from %" PRId64, pWal->cfg.vgId, pInfo->firstVer);
wDebug("vgId:%d, wal end remove for %" PRId64, pWal->cfg.vgId, pInfo->firstVer);
} else {
wDebug("vgId:%d, no remove", pWal->cfg.vgId);
wDebug("vgId:%d, wal no remove", pWal->cfg.vgId);
}
// iterate files, until the searched result
for (SWalFileInfo *iter = pWal->fileInfoSet->pData; iter < pInfo; iter++) {
@ -341,12 +347,12 @@ int32_t walEndSnapshot(SWal *pWal) {
for (int i = 0; i < deleteCnt; i++) {
pInfo = taosArrayGet(pWal->fileInfoSet, i);
walBuildLogName(pWal, pInfo->firstVer, fnameStr);
wDebug("vgId:%d, remove file %s", pWal->cfg.vgId, fnameStr);
wDebug("vgId:%d, wal remove file %s", pWal->cfg.vgId, fnameStr);
if (taosRemoveFile(fnameStr) < 0) {
goto UPDATE_META;
}
walBuildIdxName(pWal, pInfo->firstVer, fnameStr);
wDebug("vgId:%d, remove file %s", pWal->cfg.vgId, fnameStr);
wDebug("vgId:%d, wal remove file %s", pWal->cfg.vgId, fnameStr);
if (taosRemoveFile(fnameStr) < 0) {
ASSERT(0);
}

View File

@ -10,9 +10,9 @@
,,y,script,./test.sh -f tsim/user/password.sim
,,y,script,./test.sh -f tsim/user/privilege_db.sim
,,y,script,./test.sh -f tsim/user/privilege_sysinfo.sim
,,,script,./test.sh -f tsim/db/alter_option.sim
,,y,script,./test.sh -f tsim/db/alter_option.sim
,,,script,./test.sh -f tsim/db/alter_replica_13.sim
,,,script,./test.sh -f tsim/db/alter_replica_31.sim
,,y,script,./test.sh -f tsim/db/alter_replica_31.sim
,,y,script,./test.sh -f tsim/db/basic1.sim
,,y,script,./test.sh -f tsim/db/basic2.sim
,,y,script,./test.sh -f tsim/db/basic3.sim
@ -20,7 +20,7 @@
,,y,script,./test.sh -f tsim/db/basic5.sim
,,y,script,./test.sh -f tsim/db/basic6.sim
,,y,script,./test.sh -f tsim/db/commit.sim
,,,script,./test.sh -f tsim/db/create_all_options.sim
,,y,script,./test.sh -f tsim/db/create_all_options.sim
,,y,script,./test.sh -f tsim/db/delete_reuse1.sim
,,y,script,./test.sh -f tsim/db/delete_reuse2.sim
,,y,script,./test.sh -f tsim/db/delete_reusevnode.sim
@ -42,8 +42,8 @@
,,,script,./test.sh -f tsim/dnode/balance3.sim
,,,script,./test.sh -f tsim/dnode/balancex.sim
,,y,script,./test.sh -f tsim/dnode/create_dnode.sim
,,,script,./test.sh -f tsim/dnode/drop_dnode_has_mnode.sim
,,,script,./test.sh -f tsim/dnode/drop_dnode_has_qnode_snode.sim
,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_mnode.sim
,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_qnode_snode.sim
,,,script,./test.sh -f tsim/dnode/drop_dnode_has_vnode_replica1.sim
,,,script,./test.sh -f tsim/dnode/drop_dnode_has_vnode_replica3.sim
,,,script,./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica1.sim
@ -90,7 +90,7 @@
,,y,script,./test.sh -f tsim/parser/auto_create_tb.sim
,,y,script,./test.sh -f tsim/parser/between_and.sim
,,y,script,./test.sh -f tsim/parser/binary_escapeCharacter.sim
,,,script,./test.sh -f tsim/parser/col_arithmetic_operation.sim
,,y,script,./test.sh -f tsim/parser/col_arithmetic_operation.sim
,,y,script,./test.sh -f tsim/parser/columnValue_bigint.sim
,,y,script,./test.sh -f tsim/parser/columnValue_bool.sim
,,y,script,./test.sh -f tsim/parser/columnValue_double.sim
@ -98,7 +98,7 @@
,,y,script,./test.sh -f tsim/parser/columnValue_int.sim
,,y,script,./test.sh -f tsim/parser/columnValue_smallint.sim
,,y,script,./test.sh -f tsim/parser/columnValue_tinyint.sim
,,,script,./test.sh -f tsim/parser/columnValue_unsign.sim
,,y,script,./test.sh -f tsim/parser/columnValue_unsign.sim
,,y,script,./test.sh -f tsim/parser/commit.sim
,,y,script,./test.sh -f tsim/parser/condition.sim
,,y,script,./test.sh -f tsim/parser/constCol.sim
@ -111,12 +111,15 @@
,,y,script,./test.sh -f tsim/parser/fill_us.sim
,,y,script,./test.sh -f tsim/parser/fill.sim
,,y,script,./test.sh -f tsim/parser/first_last.sim
,,y,script,./test.sh -f tsim/parser/fill_stb.sim
,,y,script,./test.sh -f tsim/parser/interp.sim
#,,y,script,./test.sh -f tsim/parser/limit2.sim
,,y,script,./test.sh -f tsim/parser/fourArithmetic-basic.sim
,,,script,./test.sh -f tsim/parser/function.sim
,,y,script,./test.sh -f tsim/parser/function.sim
,,y,script,./test.sh -f tsim/parser/groupby-basic.sim
,,,script,./test.sh -f tsim/parser/groupby.sim
,,,script,./test.sh -f tsim/parser/having_child.sim
,,,script,./test.sh -f tsim/parser/having.sim
,,y,script,./test.sh -f tsim/parser/groupby.sim
,,y,script,./test.sh -f tsim/parser/having_child.sim
,,y,script,./test.sh -f tsim/parser/having.sim
,,y,script,./test.sh -f tsim/parser/import_commit1.sim
,,y,script,./test.sh -f tsim/parser/import_commit2.sim
,,y,script,./test.sh -f tsim/parser/import_commit3.sim
@ -124,7 +127,7 @@
,,y,script,./test.sh -f tsim/parser/import.sim
,,y,script,./test.sh -f tsim/parser/insert_multiTbl.sim
,,y,script,./test.sh -f tsim/parser/insert_tb.sim
,,,script,./test.sh -f tsim/parser/join_manyblocks.sim
,,y,script,./test.sh -f tsim/parser/join_manyblocks.sim
,,y,script,./test.sh -f tsim/parser/join_multitables.sim
,,y,script,./test.sh -f tsim/parser/join_multivnode.sim
,,y,script,./test.sh -f tsim/parser/join.sim
@ -137,10 +140,10 @@
,,,script,./test.sh -f tsim/parser/limit1.sim
,,y,script,./test.sh -f tsim/parser/mixed_blocks.sim
,,y,script,./test.sh -f tsim/parser/nchar.sim
,,,script,./test.sh -f tsim/parser/nestquery.sim
,,y,script,./test.sh -f tsim/parser/nestquery.sim
,,y,script,./test.sh -f tsim/parser/null_char.sim
,,y,script,./test.sh -f tsim/parser/precision_ns.sim
,,,script,./test.sh -f tsim/parser/projection_limit_offset.sim
,,y,script,./test.sh -f tsim/parser/projection_limit_offset.sim
,,y,script,./test.sh -f tsim/parser/regex.sim
,,y,script,./test.sh -f tsim/parser/select_across_vnodes.sim
,,y,script,./test.sh -f tsim/parser/select_distinct_tag.sim
@ -149,30 +152,30 @@
,,y,script,./test.sh -f tsim/parser/selectResNum.sim
,,y,script,./test.sh -f tsim/parser/set_tag_vals.sim
,,y,script,./test.sh -f tsim/parser/single_row_in_tb.sim
,,,script,./test.sh -f tsim/parser/sliding.sim
,,y,script,./test.sh -f tsim/parser/sliding.sim
,,y,script,./test.sh -f tsim/parser/slimit_alter_tags.sim
,,y,script,./test.sh -f tsim/parser/slimit.sim
,,y,script,./test.sh -f tsim/parser/slimit1.sim
,,y,script,./test.sh -f tsim/parser/stableOp.sim
,,y,script,./test.sh -f tsim/parser/tags_dynamically_specifiy.sim
,,,script,./test.sh -f tsim/parser/tags_filter.sim
,,,script,./test.sh -f tsim/parser/tbnameIn.sim
,,y,script,./test.sh -f tsim/parser/tags_filter.sim
,,y,script,./test.sh -f tsim/parser/tbnameIn.sim
,,y,script,./test.sh -f tsim/parser/timestamp.sim
,,y,script,./test.sh -f tsim/parser/top_groupby.sim
,,y,script,./test.sh -f tsim/parser/topbot.sim
,,,script,./test.sh -f tsim/parser/union.sim
,,y,script,./test.sh -f tsim/parser/union_sysinfo.sim
,,,script,./test.sh -f tsim/parser/where.sim
,,y,script,./test.sh -f tsim/parser/where.sim
,,y,script,./test.sh -f tsim/query/charScalarFunction.sim
,,y,script,./test.sh -f tsim/query/explain.sim
,,y,script,./test.sh -f tsim/query/interval-offset.sim
,,y,script,./test.sh -f tsim/query/interval.sim
,,y,script,./test.sh -f tsim/query/scalarFunction.sim
,,y,script,./test.sh -f tsim/query/scalarNull.sim
,,,script,./test.sh -f tsim/query/session.sim
,,,script,./test.sh -f tsim/query/udf.sim
,,y,script,./test.sh -f tsim/query/session.sim
,,y,script,./test.sh -f tsim/query/udf.sim
,,y,script,./test.sh -f tsim/qnode/basic1.sim
,,,script,./test.sh -f tsim/snode/basic1.sim
,,y,script,./test.sh -f tsim/snode/basic1.sim
,,,script,./test.sh -f tsim/mnode/basic1.sim
,,,script,./test.sh -f tsim/mnode/basic2.sim
,,,script,./test.sh -f tsim/mnode/basic3.sim
@ -207,53 +210,53 @@
,,y,script,./test.sh -f tsim/table/table.sim
,,y,script,./test.sh -f tsim/table/tinyint.sim
,,y,script,./test.sh -f tsim/table/vgroup.sim
,,,script,./test.sh -f tsim/stream/basic0.sim -g
,,,script,./test.sh -f tsim/stream/basic1.sim
,,n,script,./test.sh -f tsim/stream/basic0.sim -g
,,y,script,./test.sh -f tsim/stream/basic1.sim
,,y,script,./test.sh -f tsim/stream/basic2.sim
,,,script,./test.sh -f tsim/stream/drop_stream.sim
,,y,script,./test.sh -f tsim/stream/fillHistoryBasic1.sim
,,y,script,./test.sh -f tsim/stream/fillHistoryBasic2.sim
,,,script,./test.sh -f tsim/stream/fillHistoryBasic3.sim
,,y,script,./test.sh -f tsim/stream/fillHistoryBasic3.sim
,,y,script,./test.sh -f tsim/stream/distributeInterval0.sim
,,,script,./test.sh -f tsim/stream/distributeIntervalRetrive0.sim
,,,script,./test.sh -f tsim/stream/distributeSession0.sim
,,,script,./test.sh -f tsim/stream/session0.sim
,,,script,./test.sh -f tsim/stream/session1.sim
,,,script,./test.sh -f tsim/stream/state0.sim
,,,script,./test.sh -f tsim/stream/triggerInterval0.sim
,,,script,./test.sh -f tsim/stream/triggerSession0.sim
,,,script,./test.sh -f tsim/stream/partitionby.sim
,,,script,./test.sh -f tsim/stream/partitionby1.sim
,,y,script,./test.sh -f tsim/stream/distributeIntervalRetrive0.sim
,,y,script,./test.sh -f tsim/stream/distributeSession0.sim
,,y,script,./test.sh -f tsim/stream/session0.sim
,,y,script,./test.sh -f tsim/stream/session1.sim
,,y,script,./test.sh -f tsim/stream/state0.sim
,,y,script,./test.sh -f tsim/stream/triggerInterval0.sim
,,y,script,./test.sh -f tsim/stream/triggerSession0.sim
,,y,script,./test.sh -f tsim/stream/partitionby.sim
,,y,script,./test.sh -f tsim/stream/partitionby1.sim
,,y,script,./test.sh -f tsim/stream/schedSnode.sim
,,,script,./test.sh -f tsim/stream/windowClose.sim
,,,script,./test.sh -f tsim/stream/ignoreExpiredData.sim
,,y,script,./test.sh -f tsim/stream/ignoreExpiredData.sim
,,y,script,./test.sh -f tsim/stream/sliding.sim
,,,script,./test.sh -f tsim/stream/partitionbyColumnInterval.sim
,,,script,./test.sh -f tsim/stream/partitionbyColumnSession.sim
,,,script,./test.sh -f tsim/stream/partitionbyColumnState.sim
,,,script,./test.sh -f tsim/stream/deleteInterval.sim
,,,script,./test.sh -f tsim/stream/deleteSession.sim
,,,script,./test.sh -f tsim/stream/deleteState.sim
,,y,script,./test.sh -f tsim/stream/partitionbyColumnInterval.sim
,,y,script,./test.sh -f tsim/stream/partitionbyColumnSession.sim
,,y,script,./test.sh -f tsim/stream/partitionbyColumnState.sim
,,y,script,./test.sh -f tsim/stream/deleteInterval.sim
,,y,script,./test.sh -f tsim/stream/deleteSession.sim
,,y,script,./test.sh -f tsim/stream/deleteState.sim
,,y,script,./test.sh -f tsim/stream/fillIntervalDelete0.sim
,,,script,./test.sh -f tsim/stream/fillIntervalDelete1.sim
,,y,script,./test.sh -f tsim/stream/fillIntervalDelete1.sim
,,y,script,./test.sh -f tsim/stream/fillIntervalLinear.sim
,,y,script,./test.sh -f tsim/stream/fillIntervalPartitionBy.sim
,,y,script,./test.sh -f tsim/stream/fillIntervalPrevNext.sim
,,y,script,./test.sh -f tsim/stream/fillIntervalValue.sim
,,y,script,./test.sh -f tsim/trans/lossdata1.sim
,,y,script,./test.sh -f tsim/trans/create_db.sim
,,,script,./test.sh -f tsim/tmq/basic1.sim
,,,script,./test.sh -f tsim/tmq/basic2.sim
,,,script,./test.sh -f tsim/tmq/basic3.sim
,,,script,./test.sh -f tsim/tmq/basic4.sim
,,,script,./test.sh -f tsim/tmq/basic1Of2Cons.sim
,,,script,./test.sh -f tsim/tmq/basic2Of2Cons.sim
,,,script,./test.sh -f tsim/tmq/basic3Of2Cons.sim
,,,script,./test.sh -f tsim/tmq/basic4Of2Cons.sim
,,,script,./test.sh -f tsim/tmq/basic2Of2ConsOverlap.sim
,,,script,./test.sh -f tsim/tmq/topic.sim
,,,script,./test.sh -f tsim/tmq/snapshot.sim
,,,script,./test.sh -f tsim/tmq/snapshot1.sim
,,y,script,./test.sh -f tsim/tmq/basic1.sim
,,y,script,./test.sh -f tsim/tmq/basic2.sim
,,y,script,./test.sh -f tsim/tmq/basic3.sim
,,y,script,./test.sh -f tsim/tmq/basic4.sim
,,y,script,./test.sh -f tsim/tmq/basic1Of2Cons.sim
,,y,script,./test.sh -f tsim/tmq/basic2Of2Cons.sim
,,y,script,./test.sh -f tsim/tmq/basic3Of2Cons.sim
,,y,script,./test.sh -f tsim/tmq/basic4Of2Cons.sim
,,y,script,./test.sh -f tsim/tmq/basic2Of2ConsOverlap.sim
,,y,script,./test.sh -f tsim/tmq/topic.sim
,,y,script,./test.sh -f tsim/tmq/snapshot.sim
,,y,script,./test.sh -f tsim/tmq/snapshot1.sim
,,y,script,./test.sh -f tsim/stable/alter_comment.sim
,,y,script,./test.sh -f tsim/stable/alter_count.sim
,,y,script,./test.sh -f tsim/stable/alter_import.sim
@ -278,36 +281,36 @@
,,,script,./test.sh -f tsim/sma/drop_sma.sim
,,,script,./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim
,,y,script,./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim
,,,script,./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim
,,,script,./test.sh -f tsim/valgrind/checkError1.sim
,,,script,./test.sh -f tsim/valgrind/checkError2.sim
,,,script,./test.sh -f tsim/valgrind/checkError3.sim
,,,script,./test.sh -f tsim/valgrind/checkError4.sim
,,,script,./test.sh -f tsim/valgrind/checkError5.sim
,,,script,./test.sh -f tsim/valgrind/checkError6.sim
,,,script,./test.sh -f tsim/valgrind/checkError7.sim
,,,script,./test.sh -f tsim/valgrind/checkError8.sim
,,,script,./test.sh -f tsim/valgrind/checkUdf.sim
,,y,script,./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim
,,n,script,./test.sh -f tsim/valgrind/checkError1.sim
,,n,script,./test.sh -f tsim/valgrind/checkError2.sim
,,n,script,./test.sh -f tsim/valgrind/checkError3.sim
,,n,script,./test.sh -f tsim/valgrind/checkError4.sim
,,n,script,./test.sh -f tsim/valgrind/checkError5.sim
,,n,script,./test.sh -f tsim/valgrind/checkError6.sim
,,n,script,./test.sh -f tsim/valgrind/checkError7.sim
,,n,script,./test.sh -f tsim/valgrind/checkError8.sim
,,n,script,./test.sh -f tsim/valgrind/checkUdf.sim
,,,script,./test.sh -f tsim/vnode/replica3_basic.sim
,,,script,./test.sh -f tsim/vnode/replica3_repeat.sim
,,,script,./test.sh -f tsim/vnode/replica3_vgroup.sim
,,,script,./test.sh -f tsim/vnode/replica3_many.sim
,,,script,./test.sh -f tsim/vnode/replica3_import.sim
,,y,script,./test.sh -f tsim/vnode/replica3_repeat.sim
,,y,script,./test.sh -f tsim/vnode/replica3_vgroup.sim
,,y,script,./test.sh -f tsim/vnode/replica3_many.sim
,,y,script,./test.sh -f tsim/vnode/replica3_import.sim
,,,script,./test.sh -f tsim/vnode/stable_balance_replica1.sim
,,y,script,./test.sh -f tsim/vnode/stable_dnode2_stop.sim
,,y,script,./test.sh -f tsim/vnode/stable_dnode2.sim
,,y,script,./test.sh -f tsim/vnode/stable_dnode3.sim
,,,script,./test.sh -f tsim/vnode/stable_replica3_dnode6.sim
,,,script,./test.sh -f tsim/vnode/stable_replica3_vnode3.sim
,,,script,./test.sh -f tsim/sync/3Replica1VgElect.sim
,,,script,./test.sh -f tsim/sync/3Replica5VgElect.sim
,,,script,./test.sh -f tsim/sync/oneReplica1VgElect.sim
,,,script,./test.sh -f tsim/sync/oneReplica5VgElect.sim
,,y,script,./test.sh -f tsim/vnode/stable_replica3_dnode6.sim
,,y,script,./test.sh -f tsim/vnode/stable_replica3_vnode3.sim
,,y,script,./test.sh -f tsim/sync/3Replica1VgElect.sim
,,y,script,./test.sh -f tsim/sync/3Replica5VgElect.sim
,,y,script,./test.sh -f tsim/sync/oneReplica1VgElect.sim
,,y,script,./test.sh -f tsim/sync/oneReplica5VgElect.sim
,,y,script,./test.sh -f tsim/catalog/alterInCurrent.sim
,,y,script,./test.sh -f tsim/scalar/in.sim
,,y,script,./test.sh -f tsim/scalar/scalar.sim
,,y,script,./test.sh -f tsim/scalar/filter.sim
,,,script,./test.sh -f tsim/scalar/caseWhen.sim
,,y,script,./test.sh -f tsim/scalar/caseWhen.sim
,,y,script,./test.sh -f tsim/alter/cached_schema_after_alter.sim
,,y,script,./test.sh -f tsim/alter/dnode.sim
,,y,script,./test.sh -f tsim/alter/table.sim
@ -432,6 +435,7 @@
,,,system-test,python3 ./test.py -f 1-insert/update_data_muti_rows.py
,,,system-test,python3 ./test.py -f 1-insert/db_tb_name_check.py
,,,system-test,python3 ./test.py -f 1-insert/database_pre_suf.py
,,,system-test,python3 ./test.py -f 1-insert/InsertFuturets.py
,,,system-test,python3 ./test.py -f 0-others/show.py
,,,system-test,python3 ./test.py -f 2-query/abs.py
,,,system-test,python3 ./test.py -f 2-query/abs.py -R

View File

@ -7,6 +7,7 @@
##################################################
set +e
#set -x
FILE_NAME=
RELEASE=0
@ -137,8 +138,16 @@ if [ -n "$FILE_NAME" ]; then
$PROGRAM -c $CFG_DIR -f $FILE_NAME -v
else
echo "ExcuteCmd:" $PROGRAM -c $CFG_DIR -f $FILE_NAME
$PROGRAM -c $CFG_DIR -f $FILE_NAME 2> $ASAN_DIR/tsim.asan
$CODE_DIR/sh/checkAsan.sh
echo "AsanDir:" $ASAN_DIR/tsim.asan
eval $PROGRAM -c $CFG_DIR -f $FILE_NAME 2> $ASAN_DIR/tsim.asan
result=$?
echo "Execute result: " $result
if [ $result -eq 0 ]; then
$CODE_DIR/sh/checkAsan.sh
else
exit 1
fi
fi
else
echo "ExcuteCmd:" $PROGRAM -c $CFG_DIR -f basicSuite.sim

View File

@ -83,7 +83,7 @@ sql select diff(c1), diff(c2) from $tb
sql select 2+diff(c1) from $tb
sql select diff(c1+2) from $tb
sql_error select diff(c1) from $tb where ts > 0 and ts < now + 100m interval(10m)
sql select diff(c1) from $mt
#sql select diff(c1) from $mt
sql_error select diff(diff(c1)) from $tb
sql_error select diff(c1) from m_di_tb1 where c2 like '2%'

View File

@ -97,8 +97,8 @@ $tsu = $tsu + $ts0
#endi
# number of fill values exceeds number of selected columns
print select _wstart, count(ts), max(c1), max(c2), max(c3), max(c4), max(c5) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1, -2, -3, -4, -5, -6, -7, -8)
sql select _wstart, count(ts), max(c1), max(c2), max(c3), max(c4), max(c5) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1, -2, -3, -4, -5, -6, -7, -8)
print select _wstart, count(ts), max(c1), max(c2), max(c3), max(c4), max(c5) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1, -2, -3, -4, -5, -6)
sql select _wstart, count(ts), max(c1), max(c2), max(c3), max(c4), max(c5) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1, -2, -3, -4, -5, -6)
$val = $rowNum * 2
$val = $val - 1
print $rows $val
@ -136,8 +136,8 @@ if $data74 != -4.00000 then
endi
## fill(value) + group by
print select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $stb where ts >= $ts0 and ts <= $tsu partition by t1 interval(5m) fill(value, -1, -2, -3, -4, -5, -6, -7, -8)
sql select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $stb where ts >= $ts0 and ts <= $tsu partition by t1 interval(5m) fill(value, -1, -2, -3, -4, -5, -6, -7, -8)
print select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $stb where ts >= $ts0 and ts <= $tsu partition by t1 interval(5m) fill(value, -1, -2, -3, -4, -5)
sql select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $stb where ts >= $ts0 and ts <= $tsu partition by t1 interval(5m) fill(value, -1, -2, -3, -4, -5)
$val = $rowNum * 2
print $rowNum, $val
@ -154,8 +154,8 @@ if $data11 != -1 then
endi
# number of fill values is smaller than number of selected columns
print select _wstart, max(c1), max(c2), max(c3) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6)
sql select _wstart, max(c1), max(c2), max(c3) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6)
print select _wstart, max(c1), max(c2), max(c3) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6)
sql select _wstart, max(c1), max(c2), max(c3) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6)
if $data11 != 6 then
return -1
endi
@ -170,7 +170,7 @@ endi
sql_error select max(c1), max(c2), max(c3), max(c4), max(c5) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill (6, 6, 6, 6, 6)
# fill_char_values_to_arithmetic_fields
sql select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
sql_error select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
# fill_multiple_columns
sql_error select sum(c1), avg(c2), min(c3), max(c4), count(c6), first(c7), last(c8) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99, 99, abc, abc)
@ -199,7 +199,7 @@ sql select max(c4) from $stb where t1 > 4 and ts >= $ts0 and ts <= $tsu partitio
# return -1
#endi
sql select _wstart, min(c1), max(c4) from $stb where t1 > 4 and ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1)
sql select _wstart, min(c1), max(c4) from $stb where t1 > 4 and ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1, -1)
$val = $rowNum * 2
$val = $val - 1
if $rows != $val then
@ -222,52 +222,30 @@ if $data12 != -1.000000000 then
endi
# fill_into_nonarithmetic_fieds
print select _wstart, first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
sql select _wstart, first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
print select _wstart, first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '20000000', '20000000', '20000000')
sql select _wstart, first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '20000000', '20000000', '20000000')
#if $data11 != 20000000 then
#if $data11 != 1 then
# return -1
#endi
sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1)
sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1)
sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1)
sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1', '1', '1')
sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1.1', '1.1', '1.1')
sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1', '1e1', '1e1')
sql select first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1')
# fill quoted values into bool column will throw error unless the value is 'true' or 'false' Note:2018-10-24
# fill values into binary or nchar columns will be set to NULL automatically Note:2018-10-24
sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1','1e1')
sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true', 'true', 'true')
sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true', 'true','true')
# fill nonarithmetic values into arithmetic fields
sql_error select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, abc);
sql select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
sql_error select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'abc');
sql_error select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
sql select _wstart, count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '2e1');
$val = $rowNum * 2
$val = $val - 1
if $rows != $val then
return -1
endi
if $data01 != $rowNum then
return -1
endi
#if $data11 != 20 then
# return -1
#endi
sql_error select _wstart, count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '2e1');
sql select _wstart, count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 2e1);
if $rows != $val then
return -1
endi
if $data01 != $rowNum then
return -1
endi
if $data11 != 20 then
return -1
endi
sql select _wstart, count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '20');
sql select _wstart, count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20);
if $rows != $val then
return -1
endi
@ -377,23 +355,23 @@ endi
## NULL fill
print fill(NULL)
print select _wstart, max(c1), min(c2), avg(c3), sum(c4), count(c5), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 partition by t1 interval(5m) fill(value, NULL) limit 5
sql select _wstart, max(c1), min(c2), avg(c3), sum(c4), count(c5), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 partition by t1 interval(5m) fill(value, NULL) limit 5
sql select _wstart, max(c1), min(c2), avg(c3), sum(c4), count(c5), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 partition by t1 interval(5m) fill(NULL) limit 5
if $rows != 25 then
return -1
endi
if $data01 != 0 then
return -1
endi
if $data02 != 0 then
if $data02 != NULL then
return -1
endi
if $data06 != 1 then
return -1
endi
if $data11 != 0 then
if $data11 != NULL then
return -1
endi
if $data12 != 0 then
if $data12 != NULL then
return -1
endi
if $data18 != NULL then

View File

@ -932,10 +932,10 @@ sql insert into t0 values('2020-1-1 1:4:10', 10);
sql insert into t1 values('2020-1-1 1:1:2', 2);
print ===========================>td-4739
sql select diff(val) from (select derivative(k, 1s, 0) val from t1);
if $rows != 0 then
return -1
endi
#sql select diff(val) from (select derivative(k, 1s, 0) val from t1);
#if $rows != 0 then
# return -1
#endi
sql insert into t1 values('2020-1-1 1:1:4', 20);
sql insert into t1 values('2020-1-1 1:1:6', 200);

View File

@ -68,12 +68,8 @@ print ================= TD-5931
sql create stable st5931(ts timestamp, f int) tags(t int)
sql create table ct5931 using st5931 tags(1)
sql create table nt5931(ts timestamp, f int)
sql select interp(*) from nt5931 where ts=now
sql select interp(*) from st5931 where ts=now
sql select interp(*) from ct5931 where ts=now
if $rows != 0 then
return -1
endi
sql_error select interp(*) from nt5931 where ts=now
sql_error select interp(*) from st5931 where ts=now
sql_error select interp(*) from ct5931 where ts=now
system sh/exec.sh -n dnode1 -s stop -x SIGINT

File diff suppressed because it is too large Load Diff

View File

@ -80,7 +80,7 @@ endi
sql_error select count(*) from $stb where t1 like 1
##### aggregation on tb + where + fill + limit offset
sql select _wstart, max(c1) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1, -2) limit 10 offset 1
sql select _wstart, max(c1) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1) limit 10 offset 1
if $rows != 10 then
return -1
endi
@ -101,32 +101,26 @@ if $data91 != 5 then
endi
$tb5 = $tbPrefix . 5
sql select max(c1), min(c2) from $tb5 where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1, -2, -3, -4) limit 10 offset 1
sql select max(c1), min(c2) from $tb5 where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1, -2) limit 10 offset 1
if $rows != 10 then
return -1
endi
if $data00 != @18-09-17 09:05:00.000@ then
if $data00 != -1 then
return -1
endi
if $data01 != -1 then
if $data01 != -2 then
return -1
endi
if $data02 != -2 then
if $data10 != 1 then
return -1
endi
if $data11 != 1 then
if $data11 != -2 then
return -1
endi
if $data12 != -2 then
if $data90 != 5 then
return -1
endi
if $data90 != @18-09-17 09:50:00.000@ then
return -1
endi
if $data91 != 5 then
return -1
endi
if $data92 != -2 then
if $data91 != -2 then
return -1
endi
@ -135,7 +129,7 @@ endi
$tb = $tbPrefix . 0
$limit = $rowNum
$offset = $limit / 2
sql select max(c1), min(c2), sum(c3), avg(c4), stddev(c5), spread(c6), first(c7), last(c8), first(c9) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1, -2) limit $limit offset $offset
sql select max(c1), min(c2), sum(c3), avg(c4), stddev(c5), spread(c6), first(c7), last(c8), first(c9) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1, -2 ,-3, -4 , -5, -6 ,-7 ,'-8', '-9') limit $limit offset $offset
if $rows != $limit then
print expect $limit, actual $rows
return -1
@ -143,11 +137,8 @@ endi
if $data01 != 0 then
return -1
endi
if $data11 != -1 then
return -1
endi
sql select max(c1) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(value, -1000, -2) limit 8200
sql select max(c1) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(value, -1000) limit 8200
if $rows != 8200 then
return -1
endi
@ -155,88 +146,56 @@ endi
sql select max(c1) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(value, -1000) limit 100000;
sql select max(c1) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(value, -1000, -2) limit 10 offset 8190;
sql select max(c1) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(value, -1000) limit 10 offset 8190;
if $rows != 10 then
return -1
endi
if $data00 != @18-10-15 19:30:00.000@ then
if $data00 != 5 then
return -1
endi
if $data01 != 5 then
if $data10 != -1000 then
return -1
endi
if $data10 != @18-10-15 19:35:00.000@ then
if $data20 != 6 then
return -1
endi
if $data11 != -1000 then
if $data30 != -1000 then
return -1
endi
if $data20 != @18-10-15 19:40:00.000@ then
return -1
endi
if $data21 != 6 then
return -1
endi
if $data30 != @18-10-15 19:45:00.000@ then
return -1
endi
if $data31 != -1000 then
return -1
endi
sql select max(c1) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(value, -1000, -2) limit 10 offset 10001;
sql select max(c1) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(value, -1000) limit 10 offset 10001;
if $rows != 10 then
return -1
endi
if $data00 != @18-10-22 02:25:00.000@ then
if $data00 != -1000 then
return -1
endi
if $data01 != -1000 then
if $data10 != 1 then
return -1
endi
if $data10 != @18-10-22 02:30:00.000@ then
if $data20 != -1000 then
return -1
endi
if $data11 != 1 then
if $data30 != 2 then
return -1
endi
if $data20 != @18-10-22 02:35:00.000@ then
return -1
endi
if $data21 != -1000 then
return -1
endi
if $data30 != @18-10-22 02:40:00.000@ then
return -1
endi
if $data31 != 2 then
return -1
endi
sql select max(c1) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(value, -1000, -2) limit 10000 offset 10001;
sql select max(c1) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(value, -1000) limit 10000 offset 10001;
print ====> needs to validate the last row result
if $rows != 9998 then
return -1
endi
sql select max(c1) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(value, -1000, -2) limit 100 offset 20001;
sql select max(c1) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(value, -1000) limit 100 offset 20001;
if $rows != 0 then
return -1
endi
@ -244,7 +203,7 @@ endi
# tb + interval + fill(linear) + limit offset
$limit = $rowNum
$offset = $limit / 2
sql select max(c1), min(c2), sum(c3), avg(c4), stddev(c5), spread(c6), first(c7), last(c8), first(c9) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(linear) limit $limit offset $offset
sql select _wstart,max(c1), min(c2), sum(c3), avg(c4), stddev(c5), spread(c6), first(c7), last(c8), first(c9) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(linear) limit $limit offset $offset
if $rows != $limit then
print expect $limit, actual $rows
return -1
@ -290,7 +249,7 @@ endi
$limit = $rowNum
$offset = $limit / 2
$offset = $offset + 10
sql select max(c1), min(c2), sum(c3), avg(c4), stddev(c5), spread(c6), first(c7), last(c8), first(c9) from $tb where ts >= $ts0 and ts <= $tsu and c1 = 5 interval(5m) fill(value, -1, -2) limit $limit offset $offset
sql select _wstart,max(c1), min(c2), sum(c3), avg(c4), stddev(c5), spread(c6), first(c7), last(c8), first(c9) from $tb where ts >= $ts0 and ts <= $tsu and c1 = 5 interval(5m) fill(value, -1, -2 ,-3, -4 , -5, -6 ,-7 ,'-8', '-9') limit $limit offset $offset
if $rows != $limit then
return -1
endi
@ -324,20 +283,11 @@ endi
if $data19 != NULL then
return -1
endi
if $data16 != -2.000000000 then
return -1
endi
if $data17 != 1 then
return -1
endi
if $data11 != -1 then
return -1
endi
$limit = $rowNum
$offset = $limit * 2
$offset = $offset - 11
sql select max(c1), min(c2), sum(c3), avg(c4), stddev(c5), spread(c6), first(c7), last(c8), first(c9) from $tb where ts >= $ts0 and ts <= $tsu and c1 = 5 interval(5m) fill(value, -1, -2) limit $limit offset $offset
sql select _wstart,max(c1), min(c2), sum(c3), avg(c4), stddev(c5), spread(c6), first(c7), last(c8), first(c9) from $tb where ts >= $ts0 and ts <= $tsu and c1 = 5 interval(5m) fill(value, -1, -2 ,-3, -4 , -5, -6 ,-7 ,'-8', '-9') limit $limit offset $offset
if $rows != 10 then
return -1
endi
@ -368,27 +318,21 @@ endi
if $data19 != nchar5 then
return -1
endi
if $data27 != 1 then
return -1
endi
if $data38 != NULL then
return -1
endi
if $data49 != NULL then
return -1
endi
### [TBASE-350]
## stb + interval + fill + group by + limit offset
sql select max(c1), min(c2), sum(c3), avg(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1, -2) group by t1 limit 2 offset 10
if $rows != 20 then
sql select max(c1), min(c2), sum(c3), avg(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu partition by t1 interval(5m) fill(value, -1, -2, -3, -4 ,-7 ,'-8', '-9') limit 2 offset 10
if $rows != 2 then
return -1
endi
#add one more test case
sql select max(c1), last(c8) from lm2_db0.lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(linear) limit 10 offset 4089;"
$limit = 5
$offset = $rowNum * 2
$offset = $offset - 2
sql select max(c1), min(c2), sum(c3), avg(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1, -2) group by t1 order by t1 limit $limit offset $offset
sql select max(c1), min(c2), sum(c3), avg(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu partition by t1 interval(5m) fill(value, -1, -2, -3, -4 ,-7 ,'-8', '-9') order by t1 limit $limit offset $offset
if $rows != $tbNum then
return -1
endi

View File

@ -338,13 +338,13 @@ if $data03 != @20-09-15 00:00:00.000@ then
return -1
endi
sql select diff(val) from (select c1 val from nest_tb0);
if $rows != 9999 then
return -1
endi
if $data00 != 1 then
return -1
endi
#sql select diff(val) from (select c1 val from nest_tb0);
#if $rows != 9999 then
# return -1
#endi
#if $data00 != 1 then
# return -1
#endi
sql_error select last_row(*) from (select * from nest_tb0) having c1 > 0

View File

@ -751,5 +751,40 @@ if $rows != 0 then
endi
sql insert into t1 values(1648791223000,2,2,3,1.0);
sql insert into t1 values(1648791223000,10,2,3,1.0);
sql insert into t1 values(1648791233000,10,2,3,1.0);
$loop_count = 0
loop16:
sleep 200
sql select * from streamt4;
$loop_count = $loop_count + 1
if $loop_count == 10 then
return -1
endi
if $rows != 2 then
print =====rows=$rows
goto loop16
endi
sql insert into t1 values(1648791233000,2,2,3,1.0);
$loop_count = 0
loop17:
sleep 200
sql select * from streamt4;
$loop_count = $loop_count + 1
if $loop_count == 10 then
return -1
endi
if $rows != 1 then
print =====rows=$rows
goto loop17
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -92,7 +92,7 @@ sql select avg(tbcol) as b from stb where ts <= 1601481840000 partition by tgcol
sql show table distributed stb
sql select count(1) from stb
sql select count(tbcol) as b from stb where ts <= 1601481840000 partition by tgcol interval(1m)
sql select diff(tbcol) from stb where ts <= 1601481840000
#sql select diff(tbcol) from stb where ts <= 1601481840000
sql select first(tbcol), last(tbcol) as c from stb group by tgcol
sql select first(tbcol), last(tbcol) as b from stb where ts <= 1601481840000 and tbcol2 is null partition by tgcol interval(1m)
sql select first(tbcol), last(tbcol) as b from stb where ts <= 1601481840000 partition by tgcol interval(1m)
@ -176,7 +176,7 @@ sql select avg(tbcol) as c from stb group by tgcol
sql select avg(tbcol) as b from stb where ts <= 1601481840000 partition by tgcol interval(1m)
sql show table distributed stb
sql select count(tbcol) as b from stb where ts <= 1601481840000 partition by tgcol interval(1m)
sql select diff(tbcol) from stb where ts <= 1601481840000
#sql select diff(tbcol) from stb where ts <= 1601481840000
sql select first(tbcol), last(tbcol) as c from stb group by tgcol
sql select first(tbcol), last(tbcol) as b from stb where ts <= 1601481840000 and tbcol2 is null partition by tgcol interval(1m)
sql select first(tbcol), last(tbcol) as b from stb where ts <= 1601481840000 partition by tgcol interval(1m)

View File

@ -0,0 +1,106 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import math
from util.log import *
from util.cases import *
from util.sql import *
from util.common import *
from util.sqlset import *
import time
import datetime
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.timestamp_ms = int(round(time.time()*1000))
self.timestamp_us = int(round(time.time()*1000000))
self.timestamp_ns = int(time.time_ns())
self.ms_boundary = 31556995200000
self.us_boundary = 31556995200000000
self.ns_boundary = 9214646400000000000
self.ntbname = 'ntb'
self.stbname = 'stb'
self.ctbname = 'ctb'
def insert_check(self,timestamp,tbname):
tdSql.execute(f'insert into {tbname} values({timestamp},1)')
tdSql.query(f'select * from {tbname} where ts = {timestamp}')
tdSql.checkEqual(tdSql.queryResult[0][1],1)
tdSql.execute('flush database db')
tdSql.query(f'select * from {tbname} where ts = {timestamp}')
tdSql.checkEqual(tdSql.queryResult[0][1],1)
tdSql.execute(f'insert into {tbname} values({timestamp},2)')
tdSql.query(f'select * from {tbname} where ts = {timestamp}')
tdSql.checkEqual(tdSql.queryResult[0][1],2)
tdSql.execute('flush database db')
tdSql.query(f'select * from {tbname} where ts = {timestamp}')
tdSql.checkEqual(tdSql.queryResult[0][1],2)
tdSql.execute(f'delete from {tbname} where ts = {timestamp}')
tdSql.query(f'select * from {tbname} where ts = {timestamp}')
tdSql.checkRows(0)
tdSql.execute('flush database db')
tdSql.query(f'select * from {tbname} where ts = {timestamp}')
tdSql.checkRows(0)
def insert_ms(self):
tdSql.prepare()
tdSql.execute('use db')
tdSql.execute(f'create table {self.ntbname} (ts timestamp,c0 int)')
timestamp = random.randint(self.timestamp_ms,self.ms_boundary-1)
self.insert_check(timestamp,self.ntbname)
self.insert_check(self.ms_boundary,self.ntbname)
tdSql.error(f'insert into {self.ntbname} values({self.ms_boundary+1},1)')
tdSql.execute(f'create table {self.stbname} (ts timestamp,c0 int) tags(t0 int)')
tdSql.execute(f'create table {self.ctbname} using {self.stbname} tags (1)')
self.insert_check(timestamp,self.ctbname)
self.insert_check(self.ms_boundary,self.ctbname)
tdSql.error(f'insert into {self.ctbname} values({self.ms_boundary+1},1)')
def insert_us(self):
tdSql.execute('create database db1 precision "us"')
tdSql.execute('use db1')
tdSql.execute(f'create table {self.ntbname} (ts timestamp,c0 int)')
timestamp = random.randint(self.timestamp_us,self.us_boundary-1)
self.insert_check(timestamp,self.ntbname)
self.insert_check(self.us_boundary,self.ntbname)
tdSql.error(f'insert into {self.ntbname} values({self.us_boundary+1},1)')
tdSql.execute(f'create table {self.stbname} (ts timestamp,c0 int) tags(t0 int)')
tdSql.execute(f'create table {self.ctbname} using {self.stbname} tags (1)')
self.insert_check(timestamp,self.ctbname)
self.insert_check(self.us_boundary,self.ctbname)
tdSql.error(f'insert into {self.ctbname} values({self.us_boundary+1},1)')
def insert_ns(self):
tdSql.execute('create database db2 precision "ns"')
tdSql.execute('use db2')
tdSql.execute(f'create table {self.ntbname} (ts timestamp,c0 int)')
timestamp = random.randint(self.timestamp_ns,self.ns_boundary-1)
self.insert_check(timestamp,self.ntbname)
self.insert_check(self.ns_boundary,self.ntbname)
tdSql.error(f'insert into {self.ntbname} values({self.ns_boundary+1},1)')
tdSql.execute(f'create table {self.stbname} (ts timestamp,c0 int) tags(t0 int)')
tdSql.execute(f'create table {self.ctbname} using {self.stbname} tags (1)')
self.insert_check(timestamp,self.ctbname)
self.insert_check(self.ns_boundary,self.ctbname)
tdSql.error(f'insert into {self.ctbname} values({self.ns_boundary+1},1)')
def run(self):
self.insert_ms()
self.insert_us()
self.insert_ns()
pass
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -24,7 +24,7 @@ from util.cases import *
from util.sql import *
from util.dnodes import *
msec_per_min=60 * 1000
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
@ -54,7 +54,7 @@ class TDTestCase:
if tdSql.queryRows == 0:
tdSql.query(self.csum_query_form(
col=col, alias=alias, table_expr=table_expr, condition=condition
col=col, alias=alias, table_expr=table_expr.replace("csum", "ts, csum"), condition=condition
))
print(f"case in {line}: ", end='')
tdSql.checkRows(0)
@ -132,7 +132,7 @@ class TDTestCase:
pre_result = np.array(pre_result, dtype = 'int64')
pre_csum = np.cumsum(pre_result)[offset_val:]
tdSql.query(self.csum_query_form(
col=col, alias=alias, table_expr=table_expr, condition=condition
col=col, alias=alias, table_expr=table_expr.replace("csum", "ts,csum"), condition=condition
))
for i in range(tdSql.queryRows):
@ -163,7 +163,7 @@ class TDTestCase:
self.checkcsum(**case6)
# case7~8: nested query
case7 = {"table_expr": "(select c1 from db.stb1 order by ts, tbname )"}
case7 = {"table_expr": "(select ts,c1 from db.stb1 order by ts, tbname )"}
self.checkcsum(**case7)
case8 = {"table_expr": "(select csum(c1) c1 from db.t1)"}
self.checkcsum(**case8)
@ -315,19 +315,19 @@ class TDTestCase:
for j in range(data_row):
tdSql.execute(
f"insert into t{i} values ("
f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
f"{basetime + (j+1)*10 + i * msec_per_min}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, "
f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )"
)
tdSql.execute(
f"insert into t{i} values ("
f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, "
f"{basetime - (j+1) * 10 + i * msec_per_min}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, "
f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, "
f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )"
)
tdSql.execute(
f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
f"insert into tt{i} values ( {basetime-(j+1) * 10 + i * msec_per_min}, {random.randint(1, 200)} )"
)
pass
@ -366,26 +366,26 @@ class TDTestCase:
tdLog.printNoPrefix("######## insert only NULL test:")
for i in range(tbnum):
tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime - 5})")
tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime + 5})")
tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime - 5 + i * msec_per_min})")
tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime + 5 + i * msec_per_min})")
self.csum_current_query()
self.csum_error_query()
tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):")
self.csum_test_table(tbnum)
tdSql.execute(f"insert into db.t1(ts, c1,c2,c5,c7) values "
f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})")
f"({nowtime - (per_table_rows + 1) * 10 + i * msec_per_min}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})")
tdSql.execute(f"insert into db.t1(ts, c1,c2,c5,c7) values "
f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})")
f"({nowtime - (per_table_rows + 2) * 10 + i * msec_per_min}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})")
self.csum_current_query()
self.csum_error_query()
tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):")
self.csum_test_table(tbnum)
tdSql.execute(f"insert into db.t1(ts, c1,c2,c5,c7) values "
f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})")
f"({nowtime - (per_table_rows + 1) * 10 + i * msec_per_min}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})")
tdSql.execute(f"insert into db.t1(ts, c1,c2,c5,c7) values "
f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})")
f"({nowtime - (per_table_rows + 2) * 10 + i * msec_per_min}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})")
self.csum_current_query()
self.csum_error_query()
@ -398,9 +398,9 @@ class TDTestCase:
tdLog.printNoPrefix("######## insert data mix with NULL test:")
for i in range(tbnum):
tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime})")
tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime-(per_table_rows+3)*10})")
tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime+(per_table_rows+3)*10})")
tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime + i * msec_per_min})")
tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime-(per_table_rows+3)*10 + i * msec_per_min})")
tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime+(per_table_rows+3)*10 + i * msec_per_min})")
self.csum_current_query()
self.csum_error_query()

View File

@ -24,7 +24,7 @@ from util.cases import *
from util.sql import *
from util.dnodes import *
msec_per_min=60*1000
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
@ -312,19 +312,19 @@ class TDTestCase:
for j in range(data_row):
tdSql.execute(
f"insert into db.t{i} values ("
f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
f"{basetime + (j+1)*10 + i* msec_per_min}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, "
f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )"
)
tdSql.execute(
f"insert into db.t{i} values ("
f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, "
f"{basetime - (j+1) * 10 + i* msec_per_min}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, "
f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, "
f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )"
)
tdSql.execute(
f"insert into db.tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
f"insert into db.tt{i} values ( {basetime-(j+1) * 10 + i* msec_per_min}, {random.randint(1, 200)} )"
)
pass
@ -394,26 +394,26 @@ class TDTestCase:
tdLog.printNoPrefix("######## insert only NULL test:")
for i in range(tbnum):
tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime - 5})")
tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime + 5})")
tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime - 5 + i* msec_per_min})")
tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime + 5 + i* msec_per_min})")
self.diff_current_query()
self.diff_error_query()
tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):")
self.diff_test_table(tbnum)
tdSql.execute(f"insert into db.t1(ts, c1,c2,c5,c7) values "
f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})")
f"({nowtime - (per_table_rows + 1) * 10 + i* msec_per_min}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})")
tdSql.execute(f"insert into db.t1(ts, c1,c2,c5,c7) values "
f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})")
f"({nowtime - (per_table_rows + 2) * 10 + i* msec_per_min}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})")
self.diff_current_query()
self.diff_error_query()
tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):")
self.diff_test_table(tbnum)
tdSql.execute(f"insert into db.t1(ts, c1,c2,c5,c7) values "
f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})")
f"({nowtime - (per_table_rows + 1) * 10 + i* msec_per_min}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})")
tdSql.execute(f"insert into db.t1(ts, c1,c2,c5,c7) values "
f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})")
f"({nowtime - (per_table_rows + 2) * 10 + i* msec_per_min}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})")
self.diff_current_query()
self.diff_error_query()
@ -426,9 +426,9 @@ class TDTestCase:
tdLog.printNoPrefix("######## insert data mix with NULL test:")
for i in range(tbnum):
tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime})")
tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime-(per_table_rows+3)*10})")
tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime+(per_table_rows+3)*10})")
tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime + i* msec_per_min})")
tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime-(per_table_rows+3)*10 + i* msec_per_min})")
tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime+(per_table_rows+3)*10 + i* msec_per_min})")
self.diff_current_query()
self.diff_error_query()

View File

@ -34,7 +34,7 @@ class TDTestCase:
ts = self.ts
for row in range(rownums):
ts = self.ts + time_step*row
ts = self.ts + (time_step) * row + tbnum * 60 * 1000
c1 = random.randint(0,1000)
c2 = random.randint(0,100000)
c3 = random.randint(0,125)

View File

@ -26,6 +26,7 @@ from util.sql import *
from util.dnodes import *
dbname = 'db'
msec_per_min = 60 * 1000
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
@ -327,7 +328,7 @@ class TDTestCase:
self.checkmavg(**case6)
# case7~8: nested query
case7 = {"table_expr": f"(select c1 from {dbname}.stb1)"}
case7 = {"table_expr": f"(select ts, c1 from {dbname}.stb1)"}
self.checkmavg(**case7)
# case8 = {"table_expr": f"(select _c0, mavg(c1, 1) c1 from {dbname}.stb1 group by tbname)"}
# self.checkmavg(**case8)
@ -568,19 +569,19 @@ class TDTestCase:
for j in range(data_row):
tdSql.execute(
f"insert into {dbname}.t{i} values ("
f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
f"{basetime + (j+1)*10 + i * msec_per_min}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, "
f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )"
)
tdSql.execute(
f"insert into {dbname}.t{i} values ("
f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, "
f"{basetime - (j+1) * 10 + i * msec_per_min}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, "
f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, "
f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )"
)
tdSql.execute(
f"insert into {dbname}.tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
f"insert into {dbname}.tt{i} values ( {basetime-(j+1) * 10 + i * msec_per_min}, {random.randint(1, 200)} )"
)
pass
@ -619,8 +620,8 @@ class TDTestCase:
tdLog.printNoPrefix("######## insert only NULL test:")
for i in range(tbnum):
tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime - 5})")
tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime + 5})")
tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime - 5 + i * msec_per_min})")
tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime + 5 + i * msec_per_min})")
self.mavg_current_query()
self.mavg_error_query()
@ -651,9 +652,9 @@ class TDTestCase:
tdLog.printNoPrefix("######## insert data mix with NULL test:")
for i in range(tbnum):
tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime})")
tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime-(per_table_rows+3)*10})")
tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime+(per_table_rows+3)*10})")
tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime + i * msec_per_min})")
tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime-(per_table_rows+3)*10 + i * msec_per_min})")
tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime+(per_table_rows+3)*10 + i * msec_per_min})")
self.mavg_current_query()
self.mavg_error_query()
@ -676,7 +677,7 @@ class TDTestCase:
tdSql.checkRows(4)
def mavg_support_stable(self):
tdSql.query(f" select mavg(1,3) from {dbname}.stb1 ")
tdSql.query(f"select mavg(1,3) from {dbname}.stb1 ")
tdSql.checkRows(68)
tdSql.checkData(0,0,1.000000000)
tdSql.query(f"select mavg(c1,3) from {dbname}.stb1 partition by tbname ")

View File

@ -20,15 +20,15 @@ class TDTestCase:
for i in range(tb_nums):
tbname = f"{dbname}.sub_{stb_name}_{i}"
ts = self.ts + i*10000
ts = self.ts + i*1000*120
tdSql.execute(f"create table {tbname} using {dbname}.{stb_name} tags ({ts} , {i} , {i}*10 ,{i}*1.0,{i}*1.0 , 1 , 2, 'true', 'binary_{i}' ,'nchar_{i}',{i},{i},10,20 )")
for row in range(row_nums):
ts = self.ts + row*1000
ts = ts + row*1000
tdSql.execute(f"insert into {tbname} values({ts} , {row} , {row} , {row} , {row} , 1 , 2 , 'true' , 'binary_{row}' , 'nchar_{row}' , {row} , {row} , 1 ,2 )")
for null in range(5):
ts = self.ts + row_nums*1000 + null*1000
ts = ts + row_nums*1000 + null*1000
tdSql.execute(f"insert into {tbname} values({ts} , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL )")
def basic_query(self, dbname="db"):
@ -160,13 +160,13 @@ class TDTestCase:
tdSql.query(f"select tbname , count(c1) from {dbname}.stb partition by tbname interval(10s) slimit 5 soffset 1 ")
tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname interval(10s)")
tdSql.checkRows(self.row_nums*2)
tdSql.checkRows(self.row_nums*10)
tdSql.query(f"select unique(c1) from {dbname}.stb partition by tbname order by tbname")
tdSql.query(f"select tbname , count(c1) from {dbname}.sub_stb_1 partition by tbname interval(10s)")
tdSql.checkData(0,0,'sub_stb_1')
tdSql.checkData(0,1,self.row_nums)
tdSql.checkData(0,1, 4)
tdSql.query(f"select c1 , mavg(c1 ,2 ) from {dbname}.stb partition by c1")
tdSql.checkRows(90)
@ -193,7 +193,7 @@ class TDTestCase:
tdSql.query(f"select c1 , DERIVATIVE(c1,2,1) from {dbname}.stb partition by c1 order by c1")
tdSql.checkRows(90)
# bug need fix
tdSql.checkData(0,1,None)
tdSql.checkData(0,1,0.0)
tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname order by tbname slimit 5 soffset 0 ")

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -17,52 +17,52 @@ class TDTestCase(TDTestCase):
def run(self):
tdSql.prepare()
startTime = time.time()
self.function_before_26()
self.dropandcreateDB_random("%s" %self.db_nest, 1)
startTime = time.time()
self.function_before_26()
self.dropandcreateDB_random("%s" %self.db_nest, 1)
# self.math_nest(['UNIQUE'])
# self.math_nest(['MODE'])
# self.math_nest(['MODE'])
# self.math_nest(['SAMPLE'])
# self.math_nest(['ABS','SQRT'])
# self.math_nest(['SIN','COS','TAN','ASIN','ACOS','ATAN'])
# self.math_nest(['POW','LOG'])
# self.math_nest(['FLOOR','CEIL','ROUND'])
# self.math_nest(['MAVG'])
# self.math_nest(['HYPERLOGLOG'])
# self.math_nest(['TAIL'])
# self.math_nest(['ABS','SQRT'])
# self.math_nest(['SIN','COS','TAN','ASIN','ACOS','ATAN'])
# self.math_nest(['POW','LOG'])
# self.math_nest(['FLOOR','CEIL','ROUND'])
# self.math_nest(['MAVG'])
# self.math_nest(['HYPERLOGLOG'])
# self.math_nest(['TAIL'])
# self.math_nest(['CSUM'])
# self.math_nest(['statecount','stateduration'])
# self.math_nest(['HISTOGRAM'])
self.str_nest(['LTRIM','RTRIM','LOWER','UPPER'])
self.str_nest(['LENGTH','CHAR_LENGTH'])
self.str_nest(['SUBSTR'])
self.str_nest(['CONCAT'])
self.str_nest(['CONCAT_WS'])
self.time_nest(['CAST'])
# self.math_nest(['HISTOGRAM'])
self.str_nest(['LTRIM','RTRIM','LOWER','UPPER'])
self.str_nest(['LENGTH','CHAR_LENGTH'])
self.str_nest(['SUBSTR'])
self.str_nest(['CONCAT'])
self.str_nest(['CONCAT_WS'])
self.time_nest(['CAST'])
self.time_nest(['CAST_1'])
self.time_nest(['CAST_2'])
self.time_nest(['CAST_3'])
self.time_nest(['CAST_4'])
self.time_nest(['NOW','TODAY'])
self.time_nest(['TIMEZONE'])
self.time_nest(['TIMETRUNCATE'])
self.time_nest(['NOW','TODAY'])
self.time_nest(['TIMEZONE'])
self.time_nest(['TIMETRUNCATE'])
self.time_nest(['TO_ISO8601'])
self.time_nest(['TO_UNIXTIMESTAMP'])
self.time_nest(['ELAPSED'])
self.time_nest(['TIMEDIFF_1'])
self.time_nest(['TIMEDIFF_2'])
self.time_nest(['TIMEDIFF_2'])
endTime = time.time()
print("total time %ds" % (endTime - startTime))
def stop(self):
tdSql.close()