diff --git a/cmake/cmake.define b/cmake/cmake.define
index 5639d212d7..989b69a89b 100644
--- a/cmake/cmake.define
+++ b/cmake/cmake.define
@@ -81,7 +81,7 @@ ENDIF ()
IF (TD_WINDOWS)
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
- SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi")
+ SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO")
# IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
# SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
@@ -92,6 +92,12 @@ IF (TD_WINDOWS)
IF (CMAKE_DEPFILE_FLAGS_CXX)
SET(CMAKE_DEPFILE_FLAGS_CXX "")
ENDIF ()
+ IF (CMAKE_C_FLAGS_DEBUG)
+ SET(CMAKE_C_FLAGS_DEBUG "" CACHE STRING "" FORCE)
+ ENDIF ()
+ IF (CMAKE_CXX_FLAGS_DEBUG)
+ SET(CMAKE_CXX_FLAGS_DEBUG "" CACHE STRING "" FORCE)
+ ENDIF ()
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMMON_FLAGS}")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMMON_FLAGS}")
diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in
index 3a6eb3c25a..1751549680 100644
--- a/cmake/taostools_CMakeLists.txt.in
+++ b/cmake/taostools_CMakeLists.txt.in
@@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
- GIT_TAG 53a0103
+ GIT_TAG d237772
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt
index b4e8825431..294b59fe95 100644
--- a/contrib/CMakeLists.txt
+++ b/contrib/CMakeLists.txt
@@ -273,7 +273,7 @@ endif(${BUILD_WITH_NURAFT})
# pthread
if(${BUILD_PTHREAD})
- set(CMAKE_BUILD_TYPE release)
+ set(CMAKE_BUILD_TYPE debug)
add_definitions(-DPTW32_STATIC_LIB)
add_subdirectory(pthread EXCLUDE_FROM_ALL)
set_target_properties(libpthreadVC3 PROPERTIES OUTPUT_NAME pthread)
diff --git a/docs/en/07-develop/_sub_java.mdx b/docs/en/07-develop/_sub_java.mdx
index ae0ecd28e0..d14b5fd609 100644
--- a/docs/en/07-develop/_sub_java.mdx
+++ b/docs/en/07-develop/_sub_java.mdx
@@ -3,7 +3,9 @@
{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}}
```
-:::note
-For now Java connector doesn't provide asynchronous subscription, but `TimerTask` can be used to achieve similar purpose.
-
-:::
\ No newline at end of file
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
+```
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}}
+```
\ No newline at end of file
diff --git a/docs/en/07-develop/_sub_rust.mdx b/docs/en/07-develop/_sub_rust.mdx
index afb8d79daa..0021666a70 100644
--- a/docs/en/07-develop/_sub_rust.mdx
+++ b/docs/en/07-develop/_sub_rust.mdx
@@ -1,3 +1,3 @@
-```rs
+```rust
{{#include docs/examples/rust/nativeexample/examples/subscribe_demo.rs}}
-```
\ No newline at end of file
+```
diff --git a/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java
index b1e675cdf6..50e8b35771 100644
--- a/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java
+++ b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java
@@ -68,6 +68,7 @@ public class SubscribeDemo {
System.out.println(meter);
}
}
+ consumer.unsubscribe();
}
} catch (ClassNotFoundException | SQLException e) {
e.printStackTrace();
diff --git a/docs/examples/node/nativeexample/subscribe_demo.js b/docs/examples/node/nativeexample/subscribe_demo.js
index c4f7e6df84..5b65e1c907 100644
--- a/docs/examples/node/nativeexample/subscribe_demo.js
+++ b/docs/examples/node/nativeexample/subscribe_demo.js
@@ -28,7 +28,8 @@ function runConsumer() {
console.log(msg.topicPartition);
console.log(msg.block);
console.log(msg.fields)
- consumer.commit(msg);
+ // fixme(@xiaolei): commented temp, should be fixed.
+ //consumer.commit(msg);
console.log(`=======consumer ${i} done`)
}
@@ -48,4 +49,4 @@ try {
cursor.close();
conn.close();
}, 2000);
-}
\ No newline at end of file
+}
diff --git a/docs/examples/python/tmq_example.py b/docs/examples/python/tmq_example.py
index 1f6da3d1b6..cee036454e 100644
--- a/docs/examples/python/tmq_example.py
+++ b/docs/examples/python/tmq_example.py
@@ -1,59 +1,6 @@
import taos
-from taos.tmq import *
-
-conn = taos.connect()
-
-# create database
-conn.execute("drop database if exists py_tmq")
-conn.execute("create database if not exists py_tmq vgroups 2")
-
-# create table and stables
-conn.select_db("py_tmq")
-conn.execute("create stable if not exists stb1 (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)")
-conn.execute("create table if not exists tb1 using stb1 tags(1)")
-conn.execute("create table if not exists tb2 using stb1 tags(2)")
-conn.execute("create table if not exists tb3 using stb1 tags(3)")
-
-# create topic
-conn.execute("drop topic if exists topic_ctb_column")
-conn.execute("create topic if not exists topic_ctb_column as select ts, c1, c2, c3 from stb1")
-
-# set consumer configure options
-conf = TaosTmqConf()
-conf.set("group.id", "tg2")
-conf.set("td.connect.user", "root")
-conf.set("td.connect.pass", "taosdata")
-conf.set("enable.auto.commit", "true")
-conf.set("msg.with.table.name", "true")
-
-def tmq_commit_cb_print(tmq, resp, offset, param=None):
- print(f"commit: {resp}, tmq: {tmq}, offset: {offset}, param: {param}")
-
-conf.set_auto_commit_cb(tmq_commit_cb_print, None)
-
-# build consumer
-tmq = conf.new_consumer()
-
-# build topic list
-topic_list = TaosTmqList()
-topic_list.append("topic_ctb_column")
-
-# subscribe consumer
-tmq.subscribe(topic_list)
-
-# check subscriptions
-sub_list = tmq.subscription()
-print("subscribed topics: ",sub_list)
-
-# start subscribe
-while 1:
- res = tmq.poll(1000)
- if res:
- topic = res.get_topic_name()
- vg = res.get_vgroup_id()
- db = res.get_db_name()
- print(f"topic: {topic}\nvgroup id: {vg}\ndb: {db}")
- for row in res:
- print(row)
- tb = res.get_table_name()
- print(f"from table: {tb}")
+from taos.tmq import TaosConsumer
+consumer = TaosConsumer('topic_ctb_column', group_id='vg2')
+for msg in consumer:
+ for row in msg:
+ print(row)
diff --git a/docs/zh/01-index.md b/docs/zh/01-index.md
index 64a7d419e1..79d5424ac2 100644
--- a/docs/zh/01-index.md
+++ b/docs/zh/01-index.md
@@ -4,7 +4,7 @@ sidebar_label: 文档首页
slug: /
---
-TDengine是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库(Time-Series Database, TSDB), 它专为物联网、工业互联网、金融等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一极简的时序数据处理平台。本文档是 TDengine 用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发者与系统管理员的。
+TDengine是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库(Time Series Database, TSDB), 它专为物联网、工业互联网、金融等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一极简的时序数据处理平台。本文档是 TDengine 用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发者与系统管理员的。
TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用TDengine, 无论如何,请您仔细阅读[基本概念](./concept)一章。
diff --git a/docs/zh/02-intro.md b/docs/zh/02-intro.md
index 97322c68a2..a6ef2b94b6 100644
--- a/docs/zh/02-intro.md
+++ b/docs/zh/02-intro.md
@@ -3,7 +3,7 @@ title: 产品简介
toc_max_heading_level: 2
---
-TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/tdengine/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库 (Time-Series Database, TSDB)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等功能,是一极简的时序数据处理平台,最大程度的减小系统设计的复杂度,降低研发和运营成本。
+TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/tdengine/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库(Time Series Database, TSDB)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等功能,是一极简的时序数据处理平台,最大程度的减小系统设计的复杂度,降低研发和运营成本。
本章节介绍TDengine的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对TDengine有个整体的了解。
diff --git a/docs/zh/05-get-started/01-docker.md b/docs/zh/05-get-started/01-docker.md
index 814784b649..f0f09d4c7e 100644
--- a/docs/zh/05-get-started/01-docker.md
+++ b/docs/zh/05-get-started/01-docker.md
@@ -2,18 +2,15 @@
sidebar_label: Docker
title: 通过 Docker 快速体验 TDengine
---
-:::info
-如果您希望为 TDengine 贡献代码或对内部技术实现感兴趣,请参考[TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
-:::
-本节首先介绍如何通过 Docker 快速体验 TDengine,然后介绍如何在 Docker 环境下体验 TDengine 的写入和查询功能。
+本节首先介绍如何通过 Docker 快速体验 TDengine,然后介绍如何在 Docker 环境下体验 TDengine 的写入和查询功能。如果你不熟悉 Docker,请使用[安装包的方式快速体验](../../get-started/package/)。如果您希望为 TDengine 贡献代码或对内部技术实现感兴趣,请参考 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
## 启动 TDengine
如果已经安装了 docker, 只需执行下面的命令。
```shell
-docker run -d -p 6030:6030 -p 6041/6041 -p 6043-6049/6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
+docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
```
注意:TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043-6049 为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。
diff --git a/docs/zh/05-get-started/03-package.md b/docs/zh/05-get-started/03-package.md
index 63698aab50..4c6757b930 100644
--- a/docs/zh/05-get-started/03-package.md
+++ b/docs/zh/05-get-started/03-package.md
@@ -5,17 +5,74 @@ title: 使用安装包立即开始
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
+import PkgListV3 from "/components/PkgListV3";
-:::info
-如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
+TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件,目前服务端 taosd 和 taosAdapter 仅在 Linux 系统上安装和运行,后续将支持 Windows、macOS 等系统。应用驱动 taosc 与 TDengine CLI 可以在 Windows 或 Linux 上安装和运行。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../../reference/taosadapter/) 提供 [RESTful 接口](../../reference/rest-api/)。
-:::
+为方便使用,标准的服务端安装包包含了 taos、taosd、taosAdapter、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 lite 版本的安装包。
+
+在 Linux 系统上,TDengine 开源版本提供 deb 和 rpm 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 deb 支持 Debian/Ubuntu 及衍生系统,rpm 支持 CentOS/RHEL/SUSE 及衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包,也支持通过 `apt-get` 工具从线上进行安装。TDengine 也提供 Windows x64 平台的安装包。您也可以[用 Docker 立即体验](../../get-started/docker/)。需要注意的是,rpm 和 deb 包不含 taosdump 和 TDinsight 安装脚本,这些工具需要通过安装 taosTool 包获得。如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
-在 Linux 系统上,TDengine 开源版本提供 deb 和 rpm 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 deb 支持 Debian/Ubuntu 及衍生系统,rpm 支持 CentOS/RHEL/SUSE 及衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包,也支持通过 `apt-get` 工具从线上进行安装。TDengine 也提供 Windows x64 平台的安装包。
## 安装
+
+
+1. 从列表中下载获得 deb 安装包;
+
+2. 进入到安装包所在目录,执行如下的安装命令:
+
+```bash
+# 替换为下载的安装包版本
+sudo dpkg -i TDengine-server--Linux-x64.deb
+```
+
+
+
+
+
+1. 从列表中下载获得 rpm 安装包;
+
+2. 进入到安装包所在目录,执行如下的安装命令:
+
+```bash
+# 替换为下载的安装包版本
+sudo rpm -ivh TDengine-server--Linux-x64.rpm
+```
+
+
+
+
+
+1. 从列表中下载获得 tar.gz 安装包;
+
+2. 进入到安装包所在目录,先解压文件后,进入子目录,执行其中的 install.sh 安装脚本:
+
+```bash
+# 替换为下载的安装包版本
+tar -zxvf TDengine-server--Linux-x64.tar.gz
+```
+
+解压后进入相应路径,执行
+
+```bash
+sudo ./install.sh
+```
+
+:::info
+install.sh 安装脚本在执行过程中,会通过命令行交互界面询问一些配置信息。如果希望采取无交互安装方式,那么可以用 -e no 参数来执行 install.sh 脚本。运行 `./install.sh -h` 指令可以查看所有参数的详细说明信息。
+:::
+
+
+
+
+
+1. 从列表中下载获得 exe 安装程序;
+
+2. 运行可执行程序来安装 TDengine。
+
+
可以使用 apt-get 工具从官方仓库安装。
@@ -29,6 +86,7 @@ echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-stable stable main" |
如果安装 Beta 版需要安装包仓库
```bash
+wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add -
echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list
```
@@ -44,58 +102,12 @@ sudo apt-get install tdengine
apt-get 方式只适用于 Debian 或 Ubuntu 系统
::::
-
-
-1. 从 [发布历史页面](../../releases) 下载获得 deb 安装包,例如 TDengine-server-3.0.0.0-Linux-x64.deb;
-2. 进入到 TDengine-server-3.0.0.0-Linux-x64.deb 安装包所在目录,执行如下的安装命令:
-
-```bash
-sudo dpkg -i TDengine-server-3.0.0.0-Linux-x64.deb
-```
-
-
-
-
-
-1. 从 [发布历史页面](../../releases) 下载获得 rpm 安装包,例如 TDengine-server-3.0.0.0-Linux-x64.rpm;
-2. 进入到 TDengine-server-3.0.0.0-Linux-x64.rpm 安装包所在目录,执行如下的安装命令:
-
-```bash
-sudo rpm -ivh TDengine-server-3.0.0.0-Linux-x64.rpm
-```
-
-
-
-
-
-1. 从 [发布历史页面](../../releases) 下载获得 tar.gz 安装包,例如 TDengine-server-3.0.0.0-Linux-x64.tar.gz;
-2. 进入到 TDengine-server-3.0.0.0-Linux-x64.tar.gz 安装包所在目录,先解压文件后,进入子目录,执行其中的 install.sh 安装脚本:
-
-```bash
-tar -zxvf TDengine-server-3.0.0.0-Linux-x64.tar.gz
-```
-
-解压后进入相应路径,执行
-
-```bash
-sudo ./install.sh
-```
+
:::info
-install.sh 安装脚本在执行过程中,会通过命令行交互界面询问一些配置信息。如果希望采取无交互安装方式,那么可以用 -e no 参数来执行 install.sh 脚本。运行 `./install.sh -h` 指令可以查看所有参数的详细说明信息。
-
+下载其他组件、最新 Beta 版及之前版本的安装包,请点击[发布历史页面](../../releases)
:::
-
-
-
-
-1. 从 [发布历史页面](../../releases) 下载获得 exe 安装程序,例如 TDengine-server-3.0.0.0-Windows-x64.exe;
-2. 运行 TDengine-server-3.0.0.0-Windows-x64.exe 来安装 TDengine。
-
-
-
-
:::note
当安装第一个节点时,出现 Enter FQDN:提示的时候,不需要输入任何内容。只有当安装第二个或以后更多的节点时,才需要输入已有集群中任何一个可用节点的 FQDN,支持该新节点加入集群。当然也可以不输入,而是在新节点启动前,配置到新节点的配置文件中。
diff --git a/docs/zh/07-develop/06-stream.md b/docs/zh/07-develop/06-stream.md
index ab4fdf9004..d5296582d5 100644
--- a/docs/zh/07-develop/06-stream.md
+++ b/docs/zh/07-develop/06-stream.md
@@ -4,8 +4,16 @@ description: "TDengine 流式计算将数据的写入、预处理、复杂分析
title: 流式计算
---
-在时序数据的处理中,经常要对原始数据进行清洗、预处理,再使用时序数据库进行长久的储存。用户通常需要在时序数据库之外再搭建 Kafka、Flink、Spark 等流计算处理引擎,增加了用户的开发成本和维护成本。
-使用 TDengine 3.0 的流式计算引擎能够最大限度的减少对这些额外中间件的依赖,真正将数据的写入、预处理、长期存储、复杂分析、实时计算、实时报警触发等功能融为一体,并且,所有这些任务只需要使用 SQL 完成,极大降低了用户的学习成本、使用成本。
+在时序数据的处理中,经常要对原始数据进行清洗、预处理,再使用时序数据库进行长久的储存。在传统的时序数据解决方案中,常常需要部署 Kafka、Flink 等流处理系统。而流处理系统的复杂性,带来了高昂的开发与运维成本。
+
+TDengine 3.0 的流式计算引擎提供了实时处理写入的数据流的能力,使用 SQL 定义实时流变换,当数据被写入流的源表后,数据会被以定义的方式自动处理,并根据定义的触发模式向目的表推送结果。它提供了替代复杂流处理系统的轻量级解决方案,并能够在高吞吐的数据写入的情况下,提供毫秒级的计算结果延迟。
+
+流式计算可以包含数据过滤,标量函数计算(含UDF),以及窗口聚合(支持滑动窗口、会话窗口与状态窗口),可以以超级表、子表、普通表为源表,写入到目的超级表。在创建流时,目的超级表将被自动创建,随后新插入的数据会被流定义的方式处理并写入其中,通过 partition by 子句,可以以表名或标签划分 partition,不同的 partition 将写入到目的超级表的不同子表。
+
+TDengine 的流式计算能够支持分布在多个 vnode 中的超级表聚合;还能够处理乱序数据的写入:它提供了 watermark 机制以度量容忍数据乱序的程度,并提供了 ignore expired 配置项以决定乱序数据的处理策略——丢弃或者重新计算。
+
+详见 [流式计算](../../taos-sql/stream)
+
## 流式计算的创建
@@ -14,7 +22,7 @@ CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name AS subq
stream_options: {
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time]
WATERMARK time
- IGNORE EXPIRED
+ IGNORE EXPIRED [0 | 1]
}
```
@@ -59,7 +67,7 @@ insert into d1004 values("2018-10-03 14:38:05.000", 10.80000, 223, 0.29000);
insert into d1004 values("2018-10-03 14:38:06.500", 11.50000, 221, 0.35000);
```
-### 查询以观查结果
+### 查询以观察结果
```sql
taos> select start, end, max_current from current_stream_output_stb;
@@ -88,7 +96,7 @@ create stream power_stream into power_stream_output_stb as select ts, concat_ws(
参考示例一 [写入数据](#写入数据)
-### 查询以观查结果
+### 查询以观察结果
```sql
taos> select ts, meter_location, active_power, reactive_power from power_stream_output_stb;
ts | meter_location | active_power | reactive_power |
@@ -102,4 +110,4 @@ taos> select ts, meter_location, active_power, reactive_power from power_stream_
2018-10-03 14:38:16.800 | California.SanFrancisco.d1001 | 2588.728381186 | 829.240910475 |
2018-10-03 14:38:16.650 | California.SanFrancisco.d1002 | 2175.595991997 | 555.520860397 |
Query OK, 8 rows in database (0.014753s)
-```
\ No newline at end of file
+```
diff --git a/docs/zh/07-develop/07-tmq.md b/docs/zh/07-develop/07-tmq.md
deleted file mode 100644
index 25d468cad3..0000000000
--- a/docs/zh/07-develop/07-tmq.md
+++ /dev/null
@@ -1,249 +0,0 @@
----
-sidebar_label: 数据订阅
-description: "数据订阅与推送服务。写入到 TDengine 中的时序数据能够被自动推送到订阅客户端。"
-title: 数据订阅
----
-
-import Tabs from "@theme/Tabs";
-import TabItem from "@theme/TabItem";
-import Java from "./_sub_java.mdx";
-import Python from "./_sub_python.mdx";
-import Go from "./_sub_go.mdx";
-import Rust from "./_sub_rust.mdx";
-import Node from "./_sub_node.mdx";
-import CSharp from "./_sub_cs.mdx";
-import CDemo from "./_sub_c.mdx";
-
-
-为了帮助应用实时获取写入 TDengine 的数据,或者以事件到达顺序处理数据,TDengine提供了类似消息队列产品的数据订阅、消费接口。这样在很多场景下,采用 TDengine 的时序数据处理系统不再需要集成消息队列产品,比如 kafka, 从而简化系统设计的复杂度,降低运营维护成本。
-
-与 kafka 一样,你需要定义 topic, 但 TDengine 的 topic 是基于一个已经存在的超级表、子表或普通表的查询条件,即一个 SELECT 语句。你可以使用 SQL 对标签、表名、列、表达式等条件进行过滤,以及对数据进行标量函数与 UDF 计算(不包括数据聚合)。与其他消息队列软件相比,这是 TDengine 数据订阅功能的最大的优势,它提供了更大的灵活性,数据的颗粒度可以由应用随时调整,而且数据的过滤与预处理交给 TDengine,而不是应用完成,有效的减少传输的数据量与应用的复杂度。
-
-消费者订阅 topic 后,可以实时获得最新的数据。多个消费者可以组成一个消费者组 (consumer group), 一个消费者组里的多个消费者共享消费进度,便于多线程、分布式地消费数据,提高消费速度。但不同消费者组中的消费者即使消费同一个topic, 并不共享消费进度。一个消费者可以订阅多个 topic。如果订阅的是超级表,数据可能会分布在多个不同的 vnode 上,也就是多个 shard 上,这样一个消费组里有多个消费者可以提高消费效率。TDengine 的消息队列提供了消息的ACK机制,在宕机、重启等复杂环境下确保 at least once 消费。
-
-为了实现上述功能,TDengine 会为 WAL (Write-Ahead-Log) 文件自动创建索引以支持快速随机访问,并提供了灵活可配置的文件切换与保留机制:用户可以按需指定 WAL 文件保留的时间以及大小(详见 create database 语句)。通过以上方式将 WAL 改造成了一个保留事件到达顺序的、可持久化的存储引擎(但由于 TSDB 具有远比 WAL 更高的压缩率,我们不推荐保留太长时间,一般来说,不超过几天)。 对于以 topic 形式创建的查询,TDengine 将对接 WAL 而不是 TSDB 作为其存储引擎。在消费时,TDengine 根据当前消费进度从 WAL 直接读取数据,并使用统一的查询引擎实现过滤、变换等操作,将数据推送给消费者。
-
-本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。
-
-## 主要数据结构和API
-
-TMQ 的 API 中,与订阅相关的主要数据结构和API如下:
-
-```c
-typedef struct tmq_t tmq_t;
-typedef struct tmq_conf_t tmq_conf_t;
-typedef struct tmq_list_t tmq_list_t;
-
-typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param));
-
-DLL_EXPORT tmq_list_t *tmq_list_new();
-DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *);
-DLL_EXPORT void tmq_list_destroy(tmq_list_t *);
-DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen);
-DLL_EXPORT const char *tmq_err2str(int32_t code);
-
-DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list);
-DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq);
-DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
-DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
-DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
-DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
-
-enum tmq_conf_res_t {
- TMQ_CONF_UNKNOWN = -2,
- TMQ_CONF_INVALID = -1,
- TMQ_CONF_OK = 0,
-};
-typedef enum tmq_conf_res_t tmq_conf_res_t;
-
-DLL_EXPORT tmq_conf_t *tmq_conf_new();
-DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
-DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf);
-DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
-```
-
-这些 API 的文档请见 [C/C++ Connector](/reference/connector/cpp),下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面C语言的示例代码。
-
-## 写入数据
-
-首先完成建库、建一张超级表和多张子表操作,然后就可以写入数据了,比如:
-
-```sql
-drop database if exists tmqdb;
-create database tmqdb;
-create table tmqdb.stb (ts timestamp, c1 int, c2 float, c3 varchar(16) tags(t1 int, t3 varchar(16));
-create table tmqdb.ctb0 using tmqdb.stb tags(0, "subtable0");
-create table tmqdb.ctb1 using tmqdb.stb tags(1, "subtable1");
-insert into tmqdb.ctb0 values(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00');
-insert into tmqdb.ctb1 values(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11');
-```
-
-## 创建topic:
-
-```sql
-create topic topicName as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1;
-```
-
-TMQ支持多种订阅类型:
-
-### 列订阅
-
-语法:CREATE TOPIC topic_name as subquery
-通过select语句订阅(包括select *,或select ts, c1等指定列描述订阅,可以带条件过滤、标量函数计算,但不支持聚合函数、不支持时间窗口聚合)
-
-- TOPIC一旦创建则schema确定
-- 被订阅或用于计算的column和tag不可被删除、修改
-- 若发生schema变更,新增的column不出现在结果中
-
-### 超级表订阅
-语法:CREATE TOPIC topic_name AS STABLE stbName
-
-与select * from stbName订阅的区别是:
-- 不会限制用户的schema变更
-- 返回的是非结构化的数据:返回数据的schema会随之超级表的schema变化而变化
-- 用户对于要处理的每一个数据块都可能有不同的schema,因此,必须重新获取schema
-- 返回数据不带有tag
-
-## 创建 consumer 以及consumer group
-
-对于consumer, 目前支持的config包括:
-
-| 参数名称 | 参数值 | 备注 |
-| ---------------------------- | ------------------------------ | ------------------------------------------------------ |
-| group.id | 最大长度:192 | |
-| enable.auto.commit | 合法值:true, false | |
-| auto.commit.interval.ms | | |
-| auto.offset.reset | 合法值:earliest, latest, none | |
-| td.connect.ip | 用于连接,同taos_connect的参数 | |
-| td.connect.user | 用于连接,同taos_connect的参数 | |
-| td.connect.pass | 用于连接,同taos_connect的参数 | |
-| td.connect.port | 用于连接,同taos_connect的参数 | |
-| enable.heartbeat.background | 合法值:true, false | 开启后台心跳,即consumer不会因为长时间不poll而认为离线 |
-| experimental.snapshot.enable | 合法值:true, false | 从wal开始消费,还是从tsbs开始消费 |
-| msg.with.table.name | 合法值:true, false | 从消息中能否解析表名 |
-
-```sql
-/* 根据需要,设置消费组(group.id)、自动提交(enable.auto.commit)、自动提交时间间隔(auto.commit.interval.ms)、用户名(td.connect.user)、密码(td.connect.pass)等参数 */
- tmq_conf_t* conf = tmq_conf_new();
- tmq_conf_set(conf, "enable.auto.commit", "true");
- tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
- tmq_conf_set(conf, "group.id", "cgrpName");
- tmq_conf_set(conf, "td.connect.user", "root");
- tmq_conf_set(conf, "td.connect.pass", "taosdata");
- tmq_conf_set(conf, "auto.offset.reset", "earliest");
- tmq_conf_set(conf, "experimental.snapshot.enable", "true");
- tmq_conf_set(conf, "msg.with.table.name", "true");
- tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
-
- tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
- tmq_conf_destroy(conf);
-```
-
-上述配置中包括consumer group ID,如果多个 consumer 指定的 consumer group ID一样,则自动形成一个consumer group,共享消费进度。
-
-
-## 创建 topic 列表
-
-单个consumer支持同时订阅多个topic。
-
-```sql
- tmq_list_t* topicList = tmq_list_new();
- tmq_list_append(topicList, "topicName");
-```
-
-## 启动订阅并开始消费
-
-```
- /* 启动订阅 */
- tmq_subscribe(tmq, topicList);
- tmq_list_destroy(topicList);
-
- /* 循环poll消息 */
- while (running) {
- TAOS_RES* tmqmsg = tmq_consumer_poll(tmq, timeOut);
- msg_process(tmqmsg);
- }
-```
-
-这里是一个 **while** 循环,每调用一次tmq_consumer_poll(),获取一个消息,该消息与普通查询返回的结果集完全相同,可以使用相同的解析API完成消息内容的解析。
-
-## 结束消费
-
-```sql
- /* 取消订阅 */
- tmq_unsubscribe(tmq);
-
- /* 关闭消费 */
- tmq_consumer_close(tmq);
-```
-
-## 删除topic
-
-如果不再需要,可以删除创建topic,但注意:只有没有被订阅的topic才能别删除。
-
-```sql
- /* 删除topic */
- drop topic topicName;
-```
-
-## 状态查看
-
-1、topics:查询已经创建的topic
-
-```sql
- show topics;
-```
-
-2、consumers:查询consumer的状态及其订阅的topic
-
-```sql
- show consumers;
-```
-
-3、subscriptions:查询consumer与vgroup之间的分配关系
-
-```sql
- show subscriptions;
-```
-
-## 示例代码
-
-本节展示各种语言的示例代码。
-
-
-
-
-```c
-{{#include examples/c/tmq.c}}
-```
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-```python
-{{#include docs/examples/python/tmq_example.py}}
-```
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/docs/zh/07-develop/07-tmq.mdx b/docs/zh/07-develop/07-tmq.mdx
new file mode 100644
index 0000000000..c9ac178081
--- /dev/null
+++ b/docs/zh/07-develop/07-tmq.mdx
@@ -0,0 +1,1046 @@
+---
+sidebar_label: 数据订阅
+description: "数据订阅与推送服务。写入到 TDengine 中的时序数据能够被自动推送到订阅客户端。"
+title: 数据订阅
+---
+
+import Tabs from "@theme/Tabs";
+import TabItem from "@theme/TabItem";
+import Java from "./_sub_java.mdx";
+import Python from "./_sub_python.mdx";
+import Go from "./_sub_go.mdx";
+import Rust from "./_sub_rust.mdx";
+import Node from "./_sub_node.mdx";
+import CSharp from "./_sub_cs.mdx";
+import CDemo from "./_sub_c.mdx";
+
+为了帮助应用实时获取写入 TDengine 的数据,或者以事件到达顺序处理数据,TDengine 提供了类似消息队列产品的数据订阅、消费接口。这样在很多场景下,采用 TDengine 的时序数据处理系统不再需要集成消息队列产品,比如 kafka, 从而简化系统设计的复杂度,降低运营维护成本。
+
+与 kafka 一样,你需要定义 *topic*, 但 TDengine 的 *topic* 是基于一个已经存在的超级表、子表或普通表的查询条件,即一个 `SELECT` 语句。你可以使用 SQL 对标签、表名、列、表达式等条件进行过滤,以及对数据进行标量函数与 UDF 计算(不包括数据聚合)。与其他消息队列软件相比,这是 TDengine 数据订阅功能的最大的优势,它提供了更大的灵活性,数据的颗粒度可以由应用随时调整,而且数据的过滤与预处理交给 TDengine,而不是应用完成,有效的减少传输的数据量与应用的复杂度。
+
+消费者订阅 *topic* 后,可以实时获得最新的数据。多个消费者可以组成一个消费者组 (consumer group), 一个消费者组里的多个消费者共享消费进度,便于多线程、分布式地消费数据,提高消费速度。但不同消费者组中的消费者即使消费同一个 topic, 并不共享消费进度。一个消费者可以订阅多个 topic。如果订阅的是超级表,数据可能会分布在多个不同的 vnode 上,也就是多个 shard 上,这样一个消费组里有多个消费者可以提高消费效率。TDengine 的消息队列提供了消息的 ACK 机制,在宕机、重启等复杂环境下确保 at least once 消费。
+
+为了实现上述功能,TDengine 会为 WAL (Write-Ahead-Log) 文件自动创建索引以支持快速随机访问,并提供了灵活可配置的文件切换与保留机制:用户可以按需指定 WAL 文件保留的时间以及大小(详见 create database 语句)。通过以上方式将 WAL 改造成了一个保留事件到达顺序的、可持久化的存储引擎(但由于 TSDB 具有远比 WAL 更高的压缩率,我们不推荐保留太长时间,一般来说,不超过几天)。 对于以 topic 形式创建的查询,TDengine 将对接 WAL 而不是 TSDB 作为其存储引擎。在消费时,TDengine 根据当前消费进度从 WAL 直接读取数据,并使用统一的查询引擎实现过滤、变换等操作,将数据推送给消费者。
+
+本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。
+
+## 主要数据结构和 API
+
+不同语言下, TMQ 订阅相关的 API 及数据结构如下:
+
+
+
+
+```c
+typedef struct tmq_t tmq_t;
+typedef struct tmq_conf_t tmq_conf_t;
+typedef struct tmq_list_t tmq_list_t;
+
+typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param));
+
+DLL_EXPORT tmq_list_t *tmq_list_new();
+DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *);
+DLL_EXPORT void tmq_list_destroy(tmq_list_t *);
+DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen);
+DLL_EXPORT const char *tmq_err2str(int32_t code);
+
+DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list);
+DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq);
+DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
+DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
+DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
+DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
+
+enum tmq_conf_res_t {
+ TMQ_CONF_UNKNOWN = -2,
+ TMQ_CONF_INVALID = -1,
+ TMQ_CONF_OK = 0,
+};
+typedef enum tmq_conf_res_t tmq_conf_res_t;
+
+DLL_EXPORT tmq_conf_t *tmq_conf_new();
+DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
+DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf);
+DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
+```
+
+这些 API 的文档请见 [C/C++ Connector](/reference/connector/cpp),下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。
+
+
+
+
+```java
+void subscribe(Collection topics) throws SQLException;
+
+void unsubscribe() throws SQLException;
+
+Set subscription() throws SQLException;
+
+ConsumerRecords poll(Duration timeout) throws SQLException;
+
+void commitAsync();
+
+void commitAsync(OffsetCommitCallback callback);
+
+void commitSync() throws SQLException;
+
+void close() throws SQLException;
+```
+
+
+
+
+
+```python
+class TaosConsumer():
+ def __init__(self, *topics, **configs)
+
+ def __iter__(self)
+
+ def __next__(self)
+
+ def sync_next(self)
+
+ def subscription(self)
+
+ def unsubscribe(self)
+
+ def close(self)
+
+ def __del__(self)
+```
+
+
+
+
+
+```go
+func NewConsumer(conf *Config) (*Consumer, error)
+
+func (c *Consumer) Close() error
+
+func (c *Consumer) Commit(ctx context.Context, message unsafe.Pointer) error
+
+func (c *Consumer) FreeMessage(message unsafe.Pointer)
+
+func (c *Consumer) Poll(timeout time.Duration) (*Result, error)
+
+func (c *Consumer) Subscribe(topics []string) error
+
+func (c *Consumer) Unsubscribe() error
+```
+
+
+
+
+
+```csharp
+ConsumerBuilder(IEnumerable> config)
+
+virtual IConsumer Build()
+
+Consumer(ConsumerBuilder builder)
+
+void Subscribe(IEnumerable topics)
+
+void Subscribe(string topic)
+
+ConsumeResult Consume(int millisecondsTimeout)
+
+List Subscription()
+
+void Unsubscribe()
+
+void Commit(ConsumeResult consumerResult)
+
+void Close()
+```
+
+
+
+
+
+```node
+function TMQConsumer(config)
+
+function subscribe(topic)
+
+function consume(timeout)
+
+function subscription()
+
+function unsubscribe()
+
+function commit(msg)
+
+function close()
+```
+
+
+
+
+
+## 写入数据
+
+首先完成建库、建一张超级表和多张子表操作,然后就可以写入数据了,比如:
+
+```sql
+DROP DATABASE IF EXISTS tmqdb;
+CREATE DATABASE tmqdb;
+CREATE TABLE tmqdb.stb (ts TIMESTAMP, c1 INT, c2 FLOAT, c3 VARCHAR(16) TAGS(t1 INT, t3 VARCHAR(16));
+CREATE TABLE tmqdb.ctb0 USING tmqdb.stb TAGS(0, "subtable0");
+CREATE TABLE tmqdb.ctb1 USING tmqdb.stb TAGS(1, "subtable1");
+INSERT INTO tmqdb.ctb0 VALUES(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00');
+INSERT INTO tmqdb.ctb1 VALUES(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11');
+```
+
+## 创建 *topic*
+
+TDengine 使用 SQL 创建一个 topic:
+
+```sql
+CREATE TOPIC topic_name AS SELECT ts, c1, c2, c3 FROM tmqdb.stb WHERE c1 > 1;
+```
+
+TMQ 支持多种订阅类型:
+
+### 列订阅
+
+语法:
+
+```sql
+CREATE TOPIC topic_name as subquery
+```
+
+通过 `SELECT` 语句订阅(包括 `SELECT *`,或 `SELECT ts, c1` 等指定列订阅,可以带条件过滤、标量函数计算,但不支持聚合函数、不支持时间窗口聚合)。需要注意的是:
+
+- 该类型 TOPIC 一旦创建则订阅数据的结构确定。
+- 被订阅或用于计算的列或标签不可被删除(`ALTER table DROP`)、修改(`ALTER table MODIFY`)。
+- 若发生表结构变更,新增的列不出现在结果中,若发生列删除则会报错。
+
+### 超级表订阅
+
+语法:
+
+```sql
+CREATE TOPIC topic_name AS STABLE stb_name
+```
+
+与 `SELECT * from stbName` 订阅的区别是:
+
+- 不会限制用户的表结构变更。
+- 返回的是非结构化的数据:返回数据的结构会随之超级表的表结构变化而变化。
+- 用户对于要处理的每一个数据块都可能有不同的表结构。
+- 返回数据不包含标签。
+
+### 数据库订阅
+
+语法:
+
+```sql
+CREATE TOPIC topic_name [WITH META] AS DATABASE db_name;
+```
+
+通过该语句可创建一个包含数据库所有表数据的订阅,`WITH META` 可选择将数据库结构变动信息加入到订阅消息流,TMQ 将消费当前数据库下所有表结构的变动,包括超级表的创建与删除,列添加、删除或修改,子表的创建、删除及 TAG 变动信息等等。消费者可通过 API 来判断具体的消息类型。这一点也是与 Kafka 不同的地方。
+
+## 创建消费者 *consumer*
+
+消费者需要通过一系列配置选项创建,基础配置项如下表所示:
+
+| 参数名称 | 类型 | 参数说明 | 备注 |
+| :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- |
+| `td.connect.ip` | string | 用于创建连接,同 `taos_connect` | |
+| `td.connect.user` | string | 用于创建连接,同 `taos_connect` | |
+| `td.connect.pass` | string | 用于创建连接,同 `taos_connect` |
+| `td.connect.port` | integer | 用于创建连接,同 `taos_connect` |
+| `group.id` | string | 消费组 ID,同一消费组共享消费进度 | **必填项**。最大长度:192。 |
+| `client.id` | string | 客户端 ID | 最大长度:192。 |
+| `auto.offset.reset` | enum | 消费组订阅的初始位置 | 可选:`earliest`, `latest`, `none`(default) |
+| `enable.auto.commit` | boolean | 启用自动提交 | 合法值:`true`, `false`。 |
+| `auto.commit.interval.ms` | integer | 以毫秒为单位的自动提交时间间隔 |
+| `enable.heartbeat.background` | boolean | 启用后台心跳,启用后即使长时间不 poll 消息也不会造成离线 | |
+| `experimental.snapshot.enable` | boolean | 从 WAL 开始消费,还是从 TSBS 开始消费 | |
+| `msg.with.table.name` | boolean | 是否允许从消息中解析表名 |
+
+对于不同编程语言,其设置方式如下:
+
+
+
+
+```c
+/* 根据需要,设置消费组 (group.id)、自动提交 (enable.auto.commit)、
+ 自动提交时间间隔 (auto.commit.interval.ms)、用户名 (td.connect.user)、密码 (td.connect.pass) 等参数 */
+tmq_conf_t* conf = tmq_conf_new();
+tmq_conf_set(conf, "enable.auto.commit", "true");
+tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
+tmq_conf_set(conf, "group.id", "cgrpName");
+tmq_conf_set(conf, "td.connect.user", "root");
+tmq_conf_set(conf, "td.connect.pass", "taosdata");
+tmq_conf_set(conf, "auto.offset.reset", "earliest");
+tmq_conf_set(conf, "experimental.snapshot.enable", "true");
+tmq_conf_set(conf, "msg.with.table.name", "true");
+tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
+
+tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
+tmq_conf_destroy(conf);
+```
+
+
+
+
+对于 Java 程序,使用如下配置项:
+
+| 参数名称 | 类型 | 参数说明 |
+| ----------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- |
+| `bootstrap.servers` | string | 连接地址,如 `localhost:6030` |
+| `value.deserializer` | string | 值解析方法,使用此方法应实现 `com.taosdata.jdbc.tmq.Deserializer` 接口或继承 `com.taosdata.jdbc.tmq.ReferenceDeserializer` 类 |
+| `value.deserializer.encoding` | string | 指定字符串解析的字符集 | |
+
+需要注意:此处使用 `bootstrap.servers` 替代 `td.connect.ip` 和 `td.connect.port`,以提供与 Kafka 一致的接口。
+
+```java
+Properties properties = new Properties();
+properties.setProperty("enable.auto.commit", "true");
+properties.setProperty("auto.commit.interval.ms", "1000");
+properties.setProperty("group.id", "cgrpName");
+properties.setProperty("bootstrap.servers", "127.0.0.1:6030");
+properties.setProperty("td.connect.user", "root");
+properties.setProperty("td.connect.pass", "taosdata");
+properties.setProperty("auto.offset.reset", "earliest");
+properties.setProperty("msg.with.table.name", "true");
+properties.setProperty("value.deserializer", "com.taos.example.MetersDeserializer");
+
+TaosConsumer consumer = new TaosConsumer<>(properties);
+
+/* value deserializer definition. */
+import com.taosdata.jdbc.tmq.ReferenceDeserializer;
+
+public class MetersDeserializer extends ReferenceDeserializer {
+}
+```
+
+
+
+
+
+Python 使用以下配置项创建一个 Consumer 实例。
+
+| 参数名称 | 类型 | 参数说明 | 备注 |
+| :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- |
+| `td_connect_ip` | string | 用于创建连接,同 `taos_connect` | |
+| `td_connect_user` | string | 用于创建连接,同 `taos_connect` | |
+| `td_connect_pass` | string | 用于创建连接,同 `taos_connect` | |
+| `td_connect_port` | string | 用于创建连接,同 `taos_connect` | |
+| `group_id` | string | 消费组 ID,同一消费组共享消费进度 | **必填项**。最大长度:192。 |
+| `client_id` | string | 客户端 ID | 最大长度:192。 |
+| `auto_offset_reset` | string | 消费组订阅的初始位置 | 可选:`earliest`, `latest`, `none`(default) |
+| `enable_auto_commit` | string | 启用自动提交 | 合法值:`true`, `false`。 |
+| `auto_commit_interval_ms` | string | 以毫秒为单位的自动提交时间间隔 | |
+| `enable_heartbeat_background` | string | 启用后台心跳,启用后即使长时间不 poll 消息也不会造成离线 | 合法值:`true`, `false` |
+| `experimental_snapshot_enable` | string | 从 WAL 开始消费,还是从 TSBS 开始消费 | 合法值:`true`, `false` |
+| `msg_with_table_name` | string | 是否允许从消息中解析表名 | 合法值:`true`, `false` |
+| `timeout` | int | 消费者拉去的超时时间 | |
+
+
+
+
+
+```go
+config := tmq.NewConfig()
+defer config.Destroy()
+err = config.SetGroupID("test")
+if err != nil {
+ panic(err)
+}
+err = config.SetAutoOffsetReset("earliest")
+if err != nil {
+ panic(err)
+}
+err = config.SetConnectIP("127.0.0.1")
+if err != nil {
+ panic(err)
+}
+err = config.SetConnectUser("root")
+if err != nil {
+ panic(err)
+}
+err = config.SetConnectPass("taosdata")
+if err != nil {
+ panic(err)
+}
+err = config.SetConnectPort("6030")
+if err != nil {
+ panic(err)
+}
+err = config.SetMsgWithTableName(true)
+if err != nil {
+ panic(err)
+}
+err = config.EnableHeartBeat()
+if err != nil {
+ panic(err)
+}
+err = config.EnableAutoCommit(func(result *wrapper.TMQCommitCallbackResult) {
+ if result.ErrCode != 0 {
+ errStr := wrapper.TMQErr2Str(result.ErrCode)
+ err := errors.NewError(int(result.ErrCode), errStr)
+ panic(err)
+ }
+})
+if err != nil {
+ panic(err)
+}
+```
+
+
+
+
+
+```csharp
+using TDengineTMQ;
+
+// 根据需要,设置消费组 (GourpId)、自动提交 (EnableAutoCommit)、
+// 自动提交时间间隔 (AutoCommitIntervalMs)、用户名 (TDConnectUser)、密码 (TDConnectPasswd) 等参数
+var cfg = new ConsumerConfig
+ {
+ EnableAutoCommit = "true"
+ AutoCommitIntervalMs = "1000"
+ GourpId = "TDengine-TMQ-C#",
+ TDConnectUser = "root",
+ TDConnectPasswd = "taosdata",
+ AutoOffsetReset = "earliest"
+ MsgWithTableName = "true",
+ TDConnectIp = "127.0.0.1",
+ TDConnectPort = "6030"
+ };
+
+var consumer = new ConsumerBuilder(cfg).Build();
+
+```
+
+
+
+
+
+``` node
+// 根据需要,设置消费组 (group.id)、自动提交 (enable.auto.commit)、
+// 自动提交时间间隔 (auto.commit.interval.ms)、用户名 (td.connect.user)、密码 (td.connect.pass) 等参数
+
+let consumer = taos.consumer({
+ 'enable.auto.commit': 'true',
+ 'auto.commit.interval.ms','1000',
+ 'group.id': 'tg2',
+ 'td.connect.user': 'root',
+ 'td.connect.pass': 'taosdata',
+ 'auto.offset.reset','earliest',
+ 'msg.with.table.name': 'true',
+ 'td.connect.ip','127.0.0.1',
+ 'td.connect.port','6030'
+ });
+
+```
+
+
+
+
+
+上述配置中包括 consumer group ID,如果多个 consumer 指定的 consumer group ID 一样,则自动形成一个 consumer group,共享消费进度。
+
+## 订阅 *topics*
+
+一个 consumer 支持同时订阅多个 topic。
+
+
+
+
+```c
+// 创建订阅 topics 列表
+tmq_list_t* topicList = tmq_list_new();
+tmq_list_append(topicList, "topicName");
+// 启动订阅
+tmq_subscribe(tmq, topicList);
+tmq_list_destroy(topicList);
+
+```
+
+
+
+
+```java
+List topics = new ArrayList<>();
+topics.add("tmq_topic");
+consumer.subscribe(topics);
+```
+
+
+
+
+```go
+consumer, err := tmq.NewConsumer(config)
+if err != nil {
+ panic(err)
+}
+err = consumer.Subscribe([]string{"example_tmq_topic"})
+if err != nil {
+ panic(err)
+}
+```
+
+
+
+
+
+```csharp
+// 创建订阅 topics 列表
+List topics = new List();
+topics.add("tmq_topic");
+// 启动订阅
+consumer.Subscribe(topics);
+```
+
+
+
+
+```python
+consumer = TaosConsumer('topic_ctb_column', group_id='vg2')
+```
+
+
+
+
+```node
+// 创建订阅 topics 列表
+let topics = ['topic_test']
+
+// 启动订阅
+consumer.subscribe(topics);
+```
+
+
+
+
+
+## 消费
+
+以下代码展示了不同语言下如何对 TMQ 消息进行消费。
+
+
+
+
+```c
+// 消费数据
+while (running) {
+ TAOS_RES* msg = tmq_consumer_poll(tmq, timeOut);
+ msg_process(msg);
+}
+```
+
+这里是一个 **while** 循环,每调用一次 tmq_consumer_poll(),获取一个消息,该消息与普通查询返回的结果集完全相同,可以使用相同的解析 API 完成消息内容的解析。
+
+
+
+
+```java
+while(running){
+ ConsumerRecords meters = consumer.poll(Duration.ofMillis(100));
+ for (Meters meter : meters) {
+ processMsg(meter);
+ }
+}
+```
+
+
+
+
+```python
+for msg in consumer:
+ for row in msg:
+ print(row)
+```
+
+
+
+
+```go
+for {
+ result, err := consumer.Poll(time.Second)
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println(result)
+ consumer.Commit(context.Background(), result.Message)
+ consumer.FreeMessage(result.Message)
+}
+```
+
+
+
+
+
+```csharp
+// 消费数据
+while (true)
+{
+ var consumerRes = consumer.Consume(100);
+ // process ConsumeResult
+ ProcessMsg(consumerRes);
+ consumer.Commit(consumerRes);
+}
+```
+
+
+
+
+
+```node
+while(true){
+ msg = consumer.consume(200);
+ // process message(consumeResult)
+ console.log(msg.topicPartition);
+ console.log(msg.block);
+ console.log(msg.fields)
+ }
+```
+
+
+
+
+
+## 结束消费
+
+消费结束后,应当取消订阅。
+
+
+
+
+```c
+/* 取消订阅 */
+tmq_unsubscribe(tmq);
+
+/* 关闭消费者对象 */
+tmq_consumer_close(tmq);
+```
+
+
+
+
+```java
+/* 取消订阅 */
+consumer.unsubscribe();
+
+/* 关闭消费 */
+consumer.close();
+```
+
+
+
+
+
+```python
+/* 取消订阅 */
+consumer.unsubscribe();
+
+/* 关闭消费 */
+consumer.close();
+
+
+
+
+
+```go
+consumer.Close()
+```
+
+
+
+
+```csharp
+// 取消订阅
+consumer.Unsubscribe();
+
+// 关闭消费
+consumer.Close();
+```
+
+
+
+
+```node
+consumer.unsubscribe();
+consumer.close();
+```
+
+
+
+
+
+## 删除 *topic*
+
+如果不再需要订阅数据,可以删除 topic,需要注意:只有当前未在订阅中的 TOPIC 才能被删除。
+
+```sql
+/* 删除 topic */
+DROP TOPIC topic_name;
+```
+
+## 状态查看
+
+1、*topics*:查询已经创建的 topic
+
+```sql
+SHOW TOPICS;
+```
+
+2、consumers:查询 consumer 的状态及其订阅的 topic
+
+```sql
+SHOW CONSUMERS;
+```
+
+3、subscriptions:查询 consumer 与 vgroup 之间的分配关系
+
+```sql
+SHOW SUBSCRIPTIONS;
+```
+
+## 示例代码
+
+以下是各语言的完整示例代码。
+
+
+
+
+```c
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include "taos.h"
+
+static int running = 1;
+static char dbName[64] = "tmqdb";
+static char stbName[64] = "stb";
+static char topicName[64] = "topicname";
+
+static int32_t msg_process(TAOS_RES* msg) {
+ char buf[1024];
+ int32_t rows = 0;
+
+ const char* topicName = tmq_get_topic_name(msg);
+ const char* dbName = tmq_get_db_name(msg);
+ int32_t vgroupId = tmq_get_vgroup_id(msg);
+
+ printf("topic: %s\n", topicName);
+ printf("db: %s\n", dbName);
+ printf("vgroup id: %d\n", vgroupId);
+
+ while (1) {
+ TAOS_ROW row = taos_fetch_row(msg);
+ if (row == NULL) break;
+
+ TAOS_FIELD* fields = taos_fetch_fields(msg);
+ int32_t numOfFields = taos_field_count(msg);
+ int32_t* length = taos_fetch_lengths(msg);
+ int32_t precision = taos_result_precision(msg);
+ const char* tbName = tmq_get_table_name(msg);
+ rows++;
+ taos_print_row(buf, row, fields, numOfFields);
+ printf("row content from %s: %s\n", (tbName != NULL ? tbName : "table null"), buf);
+ }
+
+ return rows;
+}
+
+static int32_t init_env() {
+ TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
+ if (pConn == NULL) {
+ return -1;
+ }
+
+ TAOS_RES* pRes;
+ // drop database if exists
+ printf("create database\n");
+ pRes = taos_query(pConn, "drop database if exists tmqdb");
+ if (taos_errno(pRes) != 0) {
+ printf("error in drop tmqdb, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ // create database
+ pRes = taos_query(pConn, "create database tmqdb");
+ if (taos_errno(pRes) != 0) {
+ printf("error in create tmqdb, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ // create super table
+ printf("create super table\n");
+ pRes = taos_query(
+ pConn, "create table tmqdb.stb (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table stb, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ // create sub tables
+ printf("create sub tables\n");
+ pRes = taos_query(pConn, "create table tmqdb.ctb0 using tmqdb.stb tags(0, 'subtable0')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table ctb0, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create table tmqdb.ctb1 using tmqdb.stb tags(1, 'subtable1')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table ctb1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create table tmqdb.ctb2 using tmqdb.stb tags(2, 'subtable2')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table ctb2, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create table tmqdb.ctb3 using tmqdb.stb tags(3, 'subtable3')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table ctb3, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ // insert data
+ printf("insert data into sub tables\n");
+ pRes = taos_query(pConn, "insert into tmqdb.ctb0 values(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "insert into tmqdb.ctb1 values(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "insert into tmqdb.ctb2 values(now, 2, 2, 'a1')(now+1s, 22, 22, 'a22')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "insert into tmqdb.ctb3 values(now, 3, 3, 'a1')(now+1s, 33, 33, 'a33')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ taos_close(pConn);
+ return 0;
+}
+
+int32_t create_topic() {
+ printf("create topic\n");
+ TAOS_RES* pRes;
+ TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
+ if (pConn == NULL) {
+ return -1;
+ }
+
+ pRes = taos_query(pConn, "use tmqdb");
+ if (taos_errno(pRes) != 0) {
+ printf("error in use tmqdb, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create topic topicname as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create topic topicname, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ taos_close(pConn);
+ return 0;
+}
+
+void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) {
+ printf("tmq_commit_cb_print() code: %d, tmq: %p, param: %p\n", code, tmq, param);
+}
+
+tmq_t* build_consumer() {
+ tmq_conf_res_t code;
+ tmq_conf_t* conf = tmq_conf_new();
+ code = tmq_conf_set(conf, "enable.auto.commit", "true");
+ if (TMQ_CONF_OK != code) return NULL;
+ code = tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
+ if (TMQ_CONF_OK != code) return NULL;
+ code = tmq_conf_set(conf, "group.id", "cgrpName");
+ if (TMQ_CONF_OK != code) return NULL;
+ code = tmq_conf_set(conf, "client.id", "user defined name");
+ if (TMQ_CONF_OK != code) return NULL;
+ code = tmq_conf_set(conf, "td.connect.user", "root");
+ if (TMQ_CONF_OK != code) return NULL;
+ code = tmq_conf_set(conf, "td.connect.pass", "taosdata");
+ if (TMQ_CONF_OK != code) return NULL;
+ code = tmq_conf_set(conf, "auto.offset.reset", "earliest");
+ if (TMQ_CONF_OK != code) return NULL;
+ code = tmq_conf_set(conf, "experimental.snapshot.enable", "true");
+ if (TMQ_CONF_OK != code) return NULL;
+ code = tmq_conf_set(conf, "msg.with.table.name", "true");
+ if (TMQ_CONF_OK != code) return NULL;
+
+ tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
+
+ tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
+ tmq_conf_destroy(conf);
+ return tmq;
+}
+
+tmq_list_t* build_topic_list() {
+ tmq_list_t* topicList = tmq_list_new();
+ int32_t code = tmq_list_append(topicList, "topicname");
+ if (code) {
+ return NULL;
+ }
+ return topicList;
+}
+
+void basic_consume_loop(tmq_t* tmq, tmq_list_t* topicList) {
+ int32_t code;
+
+ if ((code = tmq_subscribe(tmq, topicList))) {
+ fprintf(stderr, "%% Failed to tmq_subscribe(): %s\n", tmq_err2str(code));
+ return;
+ }
+
+ int32_t totalRows = 0;
+ int32_t msgCnt = 0;
+ int32_t timeout = 5000;
+ while (running) {
+ TAOS_RES* tmqmsg = tmq_consumer_poll(tmq, timeout);
+ if (tmqmsg) {
+ msgCnt++;
+ totalRows += msg_process(tmqmsg);
+ taos_free_result(tmqmsg);
+ /*} else {*/
+ /*break;*/
+ }
+ }
+
+ fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
+}
+
+int main(int argc, char* argv[]) {
+ int32_t code;
+
+ if (init_env() < 0) {
+ return -1;
+ }
+
+ if (create_topic() < 0) {
+ return -1;
+ }
+
+ tmq_t* tmq = build_consumer();
+ if (NULL == tmq) {
+ fprintf(stderr, "%% build_consumer() fail!\n");
+ return -1;
+ }
+
+ tmq_list_t* topic_list = build_topic_list();
+ if (NULL == topic_list) {
+ return -1;
+ }
+
+ basic_consume_loop(tmq, topic_list);
+
+ code = tmq_unsubscribe(tmq);
+ if (code) {
+ fprintf(stderr, "%% Failed to unsubscribe: %s\n", tmq_err2str(code));
+ } else {
+ fprintf(stderr, "%% unsubscribe\n");
+ }
+
+ code = tmq_consumer_close(tmq);
+ if (code) {
+ fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
+ } else {
+ fprintf(stderr, "%% Consumer closed\n");
+ }
+
+ return 0;
+}
+
+```
+
+[查看源码](https://github.com/taosdata/TDengine/blob/develop/examples/c/tmq.c)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+```python
+import taos
+from taos.tmq import TaosConsumer
+
+import taos
+from taos.tmq import *
+consumer = TaosConsumer('topic_ctb_column', group_id='vg2')
+for msg in consumer:
+ for row in msg:
+ print(row)
+
+```
+
+[查看源码](https://github.com/taosdata/TDengine/blob/develop/docs/examples/python/tmq_example.py)
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/zh/07-develop/_sub_java.mdx b/docs/zh/07-develop/_sub_java.mdx
index 9365941679..d14b5fd609 100644
--- a/docs/zh/07-develop/_sub_java.mdx
+++ b/docs/zh/07-develop/_sub_java.mdx
@@ -3,7 +3,9 @@
{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}}
```
-:::note
-目前 Java 接口没有提供异步订阅模式,但用户程序可以通过创建 `TimerTask` 等方式达到同样的效果。
-
-:::
\ No newline at end of file
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
+```
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}}
+```
\ No newline at end of file
diff --git a/docs/zh/07-develop/_sub_rust.mdx b/docs/zh/07-develop/_sub_rust.mdx
index afb8d79daa..0021666a70 100644
--- a/docs/zh/07-develop/_sub_rust.mdx
+++ b/docs/zh/07-develop/_sub_rust.mdx
@@ -1,3 +1,3 @@
-```rs
+```rust
{{#include docs/examples/rust/nativeexample/examples/subscribe_demo.rs}}
-```
\ No newline at end of file
+```
diff --git a/docs/zh/12-taos-sql/01-data-type.md b/docs/zh/12-taos-sql/01-data-type.md
index 8ac6ee3b87..628086f5a9 100644
--- a/docs/zh/12-taos-sql/01-data-type.md
+++ b/docs/zh/12-taos-sql/01-data-type.md
@@ -34,7 +34,7 @@ CREATE DATABASE db_name PRECISION 'ns';
| 7 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] |
| 8 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。 |
| 9 | SMALLINT | 2 | 短整型, 范围 [-32768, 32767] |
-| 10 | SMALLINT UNSIGNED | 2| 无符号短整型,范围 [0, 655357] |
+| 10 | SMALLINT UNSIGNED | 2| 无符号短整型,范围 [0, 65535] |
| 11 | TINYINT | 1 | 单字节整型,范围 [-128, 127] |
| 12 | TINYINT UNSIGNED | 1 | 无符号单字节整型,范围 [0, 255] |
| 13 | BOOL | 1 | 布尔型,{true, false} |
diff --git a/docs/zh/12-taos-sql/06-select.md b/docs/zh/12-taos-sql/06-select.md
index 8aa6c43747..5312d7d2f3 100644
--- a/docs/zh/12-taos-sql/06-select.md
+++ b/docs/zh/12-taos-sql/06-select.md
@@ -103,7 +103,7 @@ SELECT d1001.* FROM d1001,d1003 WHERE d1001.ts = d1003.ts;
在超级表和子表的查询中可以指定 _标签列_,且标签列的值会与普通列的数据一起返回。
```sql
-ELECT location, groupid, current FROM d1001 LIMIT 2;
+SELECT location, groupid, current FROM d1001 LIMIT 2;
```
### 结果去重
diff --git a/docs/zh/12-taos-sql/13-tmq.md b/docs/zh/12-taos-sql/13-tmq.md
index 4d9c475a38..b05d2bf680 100644
--- a/docs/zh/12-taos-sql/13-tmq.md
+++ b/docs/zh/12-taos-sql/13-tmq.md
@@ -1,6 +1,6 @@
---
-sidebar_label: 消息队列
-title: 消息队列
+sidebar_label: 数据订阅
+title: 数据订阅
---
TDengine 3.0.0.0 开始对消息队列做了大幅的优化和增强以简化用户的解决方案。
@@ -8,24 +8,17 @@ TDengine 3.0.0.0 开始对消息队列做了大幅的优化和增强以简化用
## 创建订阅主题
```sql
-CREATE TOPIC [IF NOT EXISTS] topic_name AS {subquery | DATABASE db_name | STABLE stb_name };
+CREATE TOPIC [IF NOT EXISTS] topic_name AS subquery;
```
-订阅主题包括三种:列订阅、超级表订阅和数据库订阅。
-**列订阅是**用 subquery 描述,支持过滤和标量函数和 UDF 标量函数,不支持 JOIN、GROUP BY、窗口切分子句、聚合函数和 UDF 聚合函数。列订阅规则如下:
+TOPIC 支持过滤和标量函数和 UDF 标量函数,不支持 JOIN、GROUP BY、窗口切分子句、聚合函数和 UDF 聚合函数。列订阅规则如下:
1. TOPIC 一旦创建则返回结果的字段确定
2. 被订阅或用于计算的列不可被删除、修改
3. 列可以新增,但新增的列不出现在订阅结果字段中
4. 对于 select \*,则订阅展开为创建时所有的列(子表、普通表为数据列,超级表为数据列加标签列)
-**超级表订阅和数据库订阅**规则如下:
-
-1. 被订阅主体的 schema 变更不受限
-2. 返回消息中 schema 是块级别的,每块的 schema 可能不一样
-3. 列变更后写入的数据若未落盘,将以写入时的 schema 返回
-4. 列变更后写入的数据若未已落盘,将以落盘时的 schema 返回
## 删除订阅主题
diff --git a/docs/zh/12-taos-sql/29-changes.md b/docs/zh/12-taos-sql/29-changes.md
new file mode 100644
index 0000000000..d653c59a5c
--- /dev/null
+++ b/docs/zh/12-taos-sql/29-changes.md
@@ -0,0 +1,95 @@
+---
+sidebar_label: 3.0 版本语法变更
+title: 3.0 版本语法变更
+description: "TDengine 3.0 版本的语法变更说明"
+---
+
+## SQL 基本元素变更
+
+| # | **元素** | **
差异性
** | **说明** |
+| - | :------- | :-------- | :------- |
+| 1 | VARCHAR | 新增 | BINARY类型的别名。
+| 2 | TIMESTAMP字面量 | 新增 | 新增支持 TIMESTAMP 'timestamp format' 语法。
+| 3 | _ROWTS伪列 | 新增 | 表示时间戳主键。是_C0伪列的别名。
+| 4 | INFORMATION_SCHEMA | 新增 | 包含各种SCHEMA定义的系统数据库。
+| 5 | PERFORMANCE_SCHEMA | 新增 | 包含运行信息的系统数据库。
+| 6 | 连续查询 | 废除 | 不再支持连续查询。相关的各种语法和接口废除。
+| 7 | 混合运算 | 增强 | 查询中的混合运算(标量运算和矢量运算混合)全面增强,SELECT的各个子句均全面支持符合语法语义的混合运算。
+| 8 | 标签运算 | 新增 |在查询中,标签列可以像普通列一样参与各种运算,用于各种子句。
+| 9 | 时间线子句和时间函数用于超级表查询 | 增强 |没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+
+## SQL 语句变更
+
+在 TDengine 中,普通表的数据模型中可使用以下数据类型。
+
+| # | **语句** | **差异性
** | **说明** |
+| - | :------- | :-------- | :------- |
+| 1 | ALTER ACCOUNT | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
+| 2 | ALTER ALL DNODES | 新增 | 修改所有DNODE的参数。
+| 3 | ALTER DATABASE | 调整 | 废除- QUORUM:写入需要的副本确认数。3.0版本使用STRICT来指定强一致还是弱一致。3.0.0版本STRICT暂不支持修改。
- BLOCKS:VNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。
- UPDATE:更新操作的支持模式。3.0版本所有数据库都支持部分列更新。
- CACHELAST:缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。
- COMP:3.0版本暂不支持修改。
新增 - CACHEMODEL:表示是否在内存中缓存子表的最近数据。
- CACHESIZE:表示缓存子表最近数据的内存大小。
- WAL_FSYNC_PERIOD:代替原FSYNC参数。
- WAL_LEVEL:代替原WAL参数。
调整 - REPLICA:3.0.0版本暂不支持修改。
- KEEP:3.0版本新增支持带单位的设置方式。
+| 4 | ALTER STABLE | 调整 | 废除- CHANGE TAG:修改标签列的名称。3.0版本使用RENAME TAG代替。
新增 - RENAME TAG:代替原CHANGE TAG子句。
- COMMENT:修改超级表的注释。
+| 5 | ALTER TABLE | 调整 | 废除- CHANGE TAG:修改标签列的名称。3.0版本使用RENAME TAG代替。
新增 - RENAME TAG:代替原CHANGE TAG子句。
- COMMENT:修改表的注释。
- TTL:修改表的生命周期。
+| 6 | ALTER USER | 调整 | 废除- PRIVILEGE:修改用户权限。3.0版本使用GRANT和REVOKE来授予和回收权限。
新增 - ENABLE:启用或停用此用户。
- SYSINFO:修改用户是否可查看系统信息。
+| 7 | COMPACT VNODES | 暂不支持 | 整理指定VNODE的数据。3.0.0版本暂不支持。
+| 8 | CREATE ACCOUNT | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
+| 9 | CREATE DATABASE | 调整 | 废除- BLOCKS:VNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。
- CACHE:VNODE使用的内存块的大小。3.0版本使用BUFFER来表示VNODE写入内存池的大小。
- CACHELAST:缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。
- DAYS:数据文件存储数据的时间跨度。3.0版本使用DURATION代替。
- FSYNC:当 WAL 设置为 2 时,执行 fsync 的周期。3.0版本使用WAL_FSYNC_PERIOD代替。
- QUORUM:写入需要的副本确认数。3.0版本使用STRICT来指定强一致还是弱一致。
- UPDATE:更新操作的支持模式。3.0版本所有数据库都支持部分列更新。
- WAL:WAL 级别。3.0版本使用WAL_LEVEL代替。
新增 - BUFFER:一个 VNODE 写入内存池大小。
- CACHEMODEL:表示是否在内存中缓存子表的最近数据。
- CACHESIZE:表示缓存子表最近数据的内存大小。
- DURATION:代替原DAYS参数。新增支持带单位的设置方式。
- PAGES:一个 VNODE 中元数据存储引擎的缓存页个数。
- PAGESIZE:一个 VNODE 中元数据存储引擎的页大小。
- RETENTIONS:表示数据的聚合周期和保存时长。
- STRICT:表示数据同步的一致性要求。
- SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表。
- VGROUPS:数据库中初始VGROUP的数目。
- WAL_FSYNC_PERIOD:代替原FSYNC参数。
- WAL_LEVEL:代替原WAL参数。
- WAL_RETENTION_PERIOD:wal文件的额外保留策略,用于数据订阅。
- WAL_RETENTION_SIZE:wal文件的额外保留策略,用于数据订阅。
- WAL_ROLL_PERIOD:wal文件切换时长。
- WAL_SEGMENT_SIZE:wal单个文件大小。
调整 - KEEP:3.0版本新增支持带单位的设置方式。
+| 10 | CREATE DNODE | 调整 | 新增主机名和端口号分开指定语法- CREATE DNODE dnode_host_name PORT port_val
+| 11 | CREATE INDEX | 新增 | 创建SMA索引。
+| 12 | CREATE MNODE | 新增 | 创建管理节点。
+| 13 | CREATE QNODE | 新增 | 创建查询节点。
+| 14 | CREATE STABLE | 调整 | 新增表参数语法COMMENT:表注释。
+| 15 | CREATE STREAM | 新增 | 创建流。
+| 16 | CREATE TABLE | 调整 | 新增表参数语法- COMMENT:表注释。
- WATERMARK:指定窗口的关闭时间。
- MAX_DELAY:用于控制推送计算结果的最大延迟。
- ROLLUP:指定的聚合函数,提供基于多层级的降采样聚合结果。
- SMA:提供基于数据块的自定义预计算功能。
- TTL:用来指定表的生命周期的参数。
+| 17 | CREATE TOPIC | 新增 | 创建订阅主题。
+| 18 | DROP ACCOUNT | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
+| 19 | DROP CONSUMER GROUP | 新增 | 删除消费组。
+| 20 | DROP INDEX | 新增 | 删除索引。
+| 21 | DROP MNODE | 新增 | 创建管理节点。
+| 22 | DROP QNODE | 新增 | 创建查询节点。
+| 23 | DROP STREAM | 新增 | 删除流。
+| 24 | DROP TABLE | 调整 | 新增批量删除语法
+| 25 | DROP TOPIC | 新增 | 删除订阅主题。
+| 26 | EXPLAIN | 新增 | 查看查询语句的执行计划。
+| 27 | GRANT | 新增 | 授予用户权限。
+| 28 | KILL TRANSACTION | 新增 | 终止管理节点的事务。
+| 29 | KILL STREAM | 废除 | 终止连续查询。3.0版本不再支持连续查询,而是用更通用的流计算来代替。
+| 30 | MERGE VGROUP | 新增 | 合并VGROUP。
+| 31 | REVOKE | 新增 | 回收用户权限。
+| 32 | SELECT | 调整 | - SELECT关闭隐式结果列,输出列均需要由SELECT子句来指定。
- DISTINCT功能全面支持。2.x版本只支持对标签列去重,并且不可以和JOIN、GROUP BY等子句混用。
- JOIN功能增强。增加支持:JOIN后WHERE条件中有OR条件;JOIN后的多表运算;JOIN后的多表GROUP BY。
- FROM后子查询功能大幅增强。不限制子查询嵌套层数;支持子查询和UNION ALL混合使用;移除其他一些之前版本的语法限制。
- WHERE后可以使用任意的标量表达式。
- GROUP BY功能增强。支持任意标量表达式及其组合的分组。
- SESSION可以用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
- STATE_WINDOW可以用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
- ORDER BY功能大幅增强。不再必须和GROUP BY子句一起使用;不再有排序表达式个数的限制;增加支持NULLS FIRST/LAST语法功能;支持符合语法语义的任意表达式。
- 新增PARTITION BY语法。替代原来的GROUP BY tags。
+| 33 | SHOW ACCOUNTS | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
+| 34 | SHOW APPS |新增 | 显示接入集群的应用(客户端)信息。
+| 35 | SHOW CONSUMERS | 新增 | 显示当前数据库下所有活跃的消费者的信息。
+| 36 | SHOW DATABASES | 调整 | 3.0版本只显示数据库名。
+| 37 | SHOW FUNCTIONS | 调整 | 3.0版本只显示自定义函数名。
+| 38 | SHOW LICENCE | 新增 | 和SHOW GRANTS 命令等效。
+| 39 | SHOW INDEXES | 新增 | 显示已创建的索引。
+| 40 | SHOW LOCAL VARIABLES | 新增 | 显示当前客户端配置参数的运行值。
+| 41 | SHOW MODULES | 废除 | 显示当前系统中所安装的组件的信息。
+| 42 | SHOW QNODES | 新增 | 显示当前系统中QNODE的信息。
+| 43 | SHOW STABLES | 调整 | 3.0版本只显示超级表名。
+| 44 | SHOW STREAMS | 调整 | 2.x版本此命令显示系统中已创建的连续查询的信息。3.0版本废除了连续查询,用流代替。此命令显示已创建的流。
+| 45 | SHOW SUBSCRIPTIONS | 新增 | 显示当前数据库下的所有的订阅关系
+| 46 | SHOW TABLES | 调整 | 3.0版本只显示表名。
+| 47 | SHOW TABLE DISTRIBUTED | 新增 | 显示表的数据分布信息。代替2.x版本中的SELECT _block_dist() FROM { tb_name | stb_name }方式。
+| 48 | SHOW TOPICS | 新增 | 显示当前数据库下的所有订阅主题。
+| 49 | SHOW TRANSACTIONS | 新增 | 显示当前系统中正在执行的事务的信息。
+| 50 | SHOW DNODE VARIABLES | 新增 |显示指定DNODE的配置参数。
+| 51 | SHOW VNODES | 暂不支持 | 显示当前系统中VNODE的信息。3.0.0版本暂不支持。
+| 52 | SPLIT VGROUP | 新增 | 拆分VGROUP。
+| 53 | TRIM DATABASE | 新增 | 删除过期数据,并根据多级存储的配置归整数据。
+
+## SQL 函数变更
+
+| # | **函数** | ** 差异性
** | **说明** |
+| - | :------- | :-------- | :------- |
+| 1 | TWA | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 2 | IRATE | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 3 | LEASTSQUARES | 增强 | 可以用于超级表了。
+| 4 | ELAPSED | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 5 | DIFF | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 6 | DERIVATIVE | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 7 | CSUM | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 8 | MAVG | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 9 | SAMPLE | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 10 | STATECOUNT | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 11 | STATEDURATION | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
diff --git a/docs/zh/12-taos-sql/index.md b/docs/zh/12-taos-sql/index.md
index 900fff1ba2..821679551c 100644
--- a/docs/zh/12-taos-sql/index.md
+++ b/docs/zh/12-taos-sql/index.md
@@ -3,7 +3,7 @@ title: TAOS SQL
description: "TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容"
---
-本文档说明 TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的 SQL 语言的基础。
+本文档说明 TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的 SQL 语言的基础。TDengine 3.0 版本相比 2.x 版本做了大量改进和优化,特别是查询引擎进行了彻底的重构,因此 SQL 语法相比 2.x 版本有很多变更。详细的变更内容请见 [3.0 版本语法变更](/taos-sql/changes) 章节
TAOS SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TAOS SQL 提供标准的 SQL 语法,并针对时序数据和业务的特点优化和新增了许多语法和功能。TAOS SQL 语句的最大长度为 1M。TAOS SQL 不支持关键字的缩写,例如 DELETE 不能缩写为 DEL。
diff --git a/docs/zh/14-reference/02-rest-api/02-rest-api.mdx b/docs/zh/14-reference/02-rest-api/02-rest-api.mdx
index ba43aa30fd..4b9171c07d 100644
--- a/docs/zh/14-reference/02-rest-api/02-rest-api.mdx
+++ b/docs/zh/14-reference/02-rest-api/02-rest-api.mdx
@@ -2,7 +2,7 @@
title: REST API
---
-为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 REST API。为最大程度降低学习成本,不同于其他数据库 REST API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。REST 连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。
+为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 REST API。为最大程度降低学习成本,不同于其他数据库 REST API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。REST 连接器的使用参见 [视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。
:::note
与原生连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。支持在 RESTful URL 中指定 db_name,这时如果 SQL 语句中没有指定数据库名前缀的话,会使用 URL 中指定的这个 db_name。
@@ -20,8 +20,10 @@ RESTful 接口不依赖于任何 TDengine 的库,因此客户端不需要安
下面示例是列出所有的数据库,请把 h1.taosdata.com 和 6041(缺省值)替换为实际运行的 TDengine 服务 FQDN 和端口号:
-```html
-curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" h1.taosdata.com:6041/rest/sql
+```bash
+curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" \
+ -d "select name, ntables, status from information_schema.ins_databases;" \
+ h1.taosdata.com:6041/rest/sql
```
返回值结果如下表示验证通过:
@@ -35,188 +37,27 @@ curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" h1.t
"VARCHAR",
64
],
- [
- "create_time",
- "TIMESTAMP",
- 8
- ],
- [
- "vgroups",
- "SMALLINT",
- 2
- ],
[
"ntables",
"BIGINT",
8
],
- [
- "replica",
- "TINYINT",
- 1
- ],
- [
- "strict",
- "VARCHAR",
- 4
- ],
- [
- "duration",
- "VARCHAR",
- 10
- ],
- [
- "keep",
- "VARCHAR",
- 32
- ],
- [
- "buffer",
- "INT",
- 4
- ],
- [
- "pagesize",
- "INT",
- 4
- ],
- [
- "pages",
- "INT",
- 4
- ],
- [
- "minrows",
- "INT",
- 4
- ],
- [
- "maxrows",
- "INT",
- 4
- ],
- [
- "comp",
- "TINYINT",
- 1
- ],
- [
- "precision",
- "VARCHAR",
- 2
- ],
[
"status",
"VARCHAR",
10
- ],
- [
- "retention",
- "VARCHAR",
- 60
- ],
- [
- "single_stable",
- "BOOL",
- 1
- ],
- [
- "cachemodel",
- "VARCHAR",
- 11
- ],
- [
- "cachesize",
- "INT",
- 4
- ],
- [
- "wal_level",
- "TINYINT",
- 1
- ],
- [
- "wal_fsync_period",
- "INT",
- 4
- ],
- [
- "wal_retention_period",
- "INT",
- 4
- ],
- [
- "wal_retention_size",
- "BIGINT",
- 8
- ],
- [
- "wal_roll_period",
- "INT",
- 4
- ],
- [
- "wal_seg_size",
- "BIGINT",
- 8
]
],
"data": [
[
"information_schema",
- null,
- null,
- 14,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- "ready",
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null
+ 16,
+ "ready"
],
[
"performance_schema",
- null,
- null,
- 3,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- "ready",
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null
+ 9,
+ "ready"
]
],
"rows": 2
@@ -231,21 +72,21 @@ http://:/rest/sql/[db_name]
参数说明:
-- fqnd: 集群中的任一台主机 FQDN 或 IP 地址
-- port: 配置文件中 httpPort 配置项,缺省为 6041
+- fqnd: 集群中的任一台主机 FQDN 或 IP 地址。
+- port: 配置文件中 httpPort 配置项,缺省为 6041。
- db_name: 可选参数,指定本次所执行的 SQL 语句的默认数据库库名。
例如:`http://h1.taos.com:6041/rest/sql/test` 是指向地址为 `h1.taos.com:6041` 的 URL,并将默认使用的数据库库名设置为 `test`。
HTTP 请求的 Header 里需带有身份认证信息,TDengine 支持 Basic 认证与自定义认证两种机制,后续版本将提供标准安全的数字签名机制来做身份验证。
-- [自定义身份认证信息](#自定义授权码)如下所示
+- [自定义身份认证信息](#自定义授权码)如下所示:
```text
Authorization: Taosd
```
-- Basic 身份认证信息如下所示
+- Basic 身份认证信息如下所示:
```text
Authorization: Basic
@@ -259,13 +100,13 @@ HTTP 请求的 BODY 里就是一个完整的 SQL 语句,SQL 语句中的数据
curl -L -H "Authorization: Basic " -d "" :/rest/sql/[db_name]
```
-或者
+或者,
```bash
curl -L -u username:password -d "" :/rest/sql/[db_name]
```
-其中,`TOKEN` 为 `{username}:{password}` 经过 Base64 编码之后的字符串,例如 `root:taosdata` 编码后为 `cm9vdDp0YW9zZGF0YQ==`
+其中,`TOKEN` 为 `{username}:{password}` 经过 Base64 编码之后的字符串,例如 `root:taosdata` 编码后为 `cm9vdDp0YW9zZGF0YQ==`。
## HTTP 返回格式
@@ -282,27 +123,9 @@ curl -L -u username:password -d "" :/rest/sql/[db_name]
### HTTP body 结构
-
-
- 执行结果 |
- 说明 |
- 样例 |
-
-
- 正确执行 |
-
- code:(int)0 代表成功
-
-
- column_meta:([][3]any)列信息,每个列会用三个值来说明,分别为:列名(string)、列类型(string)、类型长度(int)
-
-
- rows:(int)数据返回行数
-
-
- data:([][]any)具体数据内容
- |
-
+#### 正确执行
+
+样例:
```json
{
@@ -313,23 +136,16 @@ curl -L -u username:password -d "" :/rest/sql/[db_name]
}
```
- |
-
-
- 正确查询 |
-
- code:(int)0 代表成功
-
-
- column_meta:([][3]any) 列信息,每个列会用三个值来说明,分别为:列名(string)、列类型(string)、类型长度(int)
-
-
- rows:(int)数据返回行数
-
-
- data:([][]any)具体数据内容
- |
-
+说明:
+
+- code:(`int`)0 代表成功。
+- column_meta:(`[1][3]any`)只返回 `[["affected_rows", "INT", 4]]`。
+- rows:(`int`)只返回 `1`。
+- data:(`[][]any`)返回受影响行数。
+
+#### 正确查询
+
+样例:
```json
{
@@ -385,17 +201,35 @@ curl -L -u username:password -d "" :/rest/sql/[db_name]
}
```
- |
-
-
- 错误 |
-
- code:(int)错误码
-
-
- desc:(string)错误描述
- |
-
+说明:
+
+- code:(`int`)0 代表成功。
+- column_meta:(`[][3]any`) 列信息,每个列会用三个值来说明,分别为:列名(string)、列类型(string)、类型长度(int)。
+- rows:(`int`)数据返回行数。
+- data:(`[][]any`)具体数据内容(时间格式仅支持 RFC3339,结果集为 0 时区)。
+
+列类型使用如下字符串:
+
+- "NULL"
+- "BOOL"
+- "TINYINT"
+- "SMALLINT"
+- "INT"
+- "BIGINT"
+- "FLOAT"
+- "DOUBLE"
+- "VARCHAR"
+- "TIMESTAMP"
+- "NCHAR"
+- "TINYINT UNSIGNED"
+- "SMALLINT UNSIGNED"
+- "INT UNSIGNED"
+- "BIGINT UNSIGNED"
+- "JSON"
+
+#### 错误
+
+样例:
```json
{
@@ -404,30 +238,10 @@ curl -L -u username:password -d "" :/rest/sql/[db_name]
}
```
- |
-
-
+说明:
-### 说明
-
-- 时间格式仅支持 RFC3339,结果集为 0 时区
-- 列类型使用如下字符串:
- > "NULL"
- > "BOOL"
- > "TINYINT"
- > "SMALLINT"
- > "INT"
- > "BIGINT"
- > "FLOAT"
- > "DOUBLE"
- > "VARCHAR"
- > "TIMESTAMP"
- > "NCHAR"
- > "TINYINT UNSIGNED"
- > "SMALLINT UNSIGNED"
- > "INT UNSIGNED"
- > "BIGINT UNSIGNED"
- > "JSON"
+- code:(`int`)错误码。
+- desc:(`string`)错误描述。
## 自定义授权码
@@ -439,11 +253,9 @@ curl http://:/rest/login//
其中,`fqdn` 是 TDengine 数据库的 FQDN 或 IP 地址,`port` 是 TDengine 服务的端口号,`username` 为数据库用户名,`password` 为数据库密码,返回值为 JSON 格式,各字段含义如下:
-- status:请求结果的标志位
-
-- code:返回值代码
-
-- desc:授权码
+- status:请求结果的标志位。
+- code:返回值代码。
+- desc:授权码。
获取授权码示例:
diff --git a/docs/zh/14-reference/03-connector/_linux_install.mdx b/docs/zh/14-reference/03-connector/_linux_install.mdx
index eb7f683288..c3ddff53cd 100644
--- a/docs/zh/14-reference/03-connector/_linux_install.mdx
+++ b/docs/zh/14-reference/03-connector/_linux_install.mdx
@@ -1,10 +1,10 @@
-import PkgList from "/components/PkgList";
+import PkgListV3 from "/components/PkgListV3";
1. 下载客户端安装包
-
+
- [所有下载](https://www.taosdata.com/cn/all-downloads/)
+ [所有下载](../../releases)
2. 解压缩软件包
diff --git a/docs/zh/14-reference/03-connector/_windows_install.mdx b/docs/zh/14-reference/03-connector/_windows_install.mdx
index 755f96b2d7..9fdefa04c0 100644
--- a/docs/zh/14-reference/03-connector/_windows_install.mdx
+++ b/docs/zh/14-reference/03-connector/_windows_install.mdx
@@ -1,11 +1,10 @@
-import PkgList from "/components/PkgList";
+import PkgListV3 from "/components/PkgListV3";
1. 下载客户端安装包
-
-
- [所有下载](https://www.taosdata.com/cn/all-downloads/)
+
+ [所有下载](../../releases)
2. 执行安装程序,按提示选择默认值,完成安装
3. 安装路径
diff --git a/docs/zh/14-reference/03-connector/cpp.mdx b/docs/zh/14-reference/03-connector/cpp.mdx
index 3a8367ef33..bd5776d035 100644
--- a/docs/zh/14-reference/03-connector/cpp.mdx
+++ b/docs/zh/14-reference/03-connector/cpp.mdx
@@ -404,47 +404,3 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
**支持版本**
该功能接口从 2.3.0.0 版本开始支持。
-
-### 订阅和消费 API
-
-订阅 API 目前支持订阅一张或多张表,并通过定期轮询的方式不断获取写入表中的最新数据。
-
-- `TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)`
-
- 该函数负责启动订阅服务,成功时返回订阅对象,失败时返回 `NULL`,其参数为:
-
- - taos:已经建立好的数据库连接
- - restart:如果订阅已经存在,是重新开始,还是继续之前的订阅
- - topic:订阅的主题(即名称),此参数是订阅的唯一标识
- - sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据
- - fp:收到查询结果时的回调函数(稍后介绍函数原型),只在异步调用时使用,同步调用时此参数应该传 `NULL`
- - param:调用回调函数时的附加参数,系统 API 将其原样传递到回调函数,不进行任何处理
- - interval:轮询周期,单位为毫秒。异步调用时,将根据此参数周期性的调用回调函数,为避免对系统性能造成影响,不建议将此参数设置的过小;同步调用时,如两次调用 `taos_consume()` 的间隔小于此周期,API 将会阻塞,直到时间间隔超过此周期。
-
-- `typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code)`
-
- 异步模式下,回调函数的原型,其参数为:
-
- - tsub:订阅对象
- - res:查询结果集,注意结果集中可能没有记录
- - param:调用 `taos_subscribe()` 时客户程序提供的附加参数
- - code:错误码
-
- :::note
- 在这个回调函数里不可以做耗时过长的处理,尤其是对于返回的结果集中数据较多的情况,否则有可能导致客户端阻塞等异常状态。如果必须进行复杂计算,则建议在另外的线程中进行处理。
-
- :::
-
-- `TAOS_RES *taos_consume(TAOS_SUB *tsub)`
-
- 同步模式下,该函数用来获取订阅的结果。 用户应用程序将其置于一个循环之中。 如两次调用 `taos_consume()` 的间隔小于订阅的轮询周期,API 将会阻塞,直到时间间隔超过此周期。如果数据库有新记录到达,该 API 将返回该最新的记录,否则返回一个没有记录的空结果集。 如果返回值为 `NULL`,说明系统出错。 异步模式下,用户程序不应调用此 API。
-
- :::note
- 在调用 `taos_consume()` 之后,用户应用应确保尽快调用 `taos_fetch_row()` 或 `taos_fetch_block()` 来处理订阅结果,否则服务端会持续缓存查询结果数据等待客户端读取,极端情况下会导致服务端内存消耗殆尽,影响服务稳定性。
-
- :::
-
-- `void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress)`
-
- 取消订阅。 如参数 `keepProgress` 不为 0,API 会保留订阅的进度信息,后续调用 `taos_subscribe()` 时可以基于此进度继续;否则将删除进度信息,后续只能重新开始读取数据。
-
diff --git a/docs/zh/14-reference/03-connector/java.mdx b/docs/zh/14-reference/03-connector/java.mdx
index c9d74dcaeb..6a78902b1e 100644
--- a/docs/zh/14-reference/03-connector/java.mdx
+++ b/docs/zh/14-reference/03-connector/java.mdx
@@ -93,12 +93,12 @@ Maven 项目中,在 pom.xml 中添加以下依赖:
可以通过下载 TDengine 的源码,自己编译最新版本的 Java connector
```shell
-git clone https://github.com/taosdata/taos-connector-jdbc.git --branch 2.0
+git clone https://github.com/taosdata/taos-connector-jdbc.git
cd taos-connector-jdbc
mvn clean install -Dmaven.test.skip=true
```
-编译后,在 target 目录下会产生 taos-jdbcdriver-2.0.XX-dist.jar 的 jar 包,并自动将编译的 jar 文件放在本地的 Maven 仓库中。
+编译后,在 target 目录下会产生 taos-jdbcdriver-3.0.*-dist.jar 的 jar 包,并自动将编译的 jar 文件放在本地的 Maven 仓库中。
@@ -198,7 +198,7 @@ url 中的配置参数如下:
- user:登录 TDengine 用户名,默认值 'root'。
- password:用户登录密码,默认值 'taosdata'。
-- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。逐行拉取结果集使用 HTTP 方式进行数据传输。从 taos-jdbcdriver-2.0.38 开始,JDBC REST 连接增加批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTP,WebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。
+- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。逐行拉取结果集使用 HTTP 方式进行数据传输。JDBC REST 连接支持批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTP,WebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。
- charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。
- batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 SQL 了。false:不再执行失败 SQL 后的任何语句。默认值为:false。
- httpConnectTimeout: 连接超时时间,单位 ms, 默认值为 5000。
@@ -216,7 +216,7 @@ url 中的配置参数如下:
INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6);
```
-- 从 taos-jdbcdriver-2.0.36 开始,如果在 url 中指定了 dbname,那么,JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url,在 SQL 中不需要指定 dbname。例如:url 为 jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 sql:insert into t1 using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6);
+- 如果在 url 中指定了 dbname,那么,JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url,在 SQL 中不需要指定 dbname。例如:url 为 jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 sql:insert into t1 using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6);
:::
@@ -230,7 +230,7 @@ INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFra
**注意**:
- 应用中设置的 client parameter 为进程级别的,即如果要更新 client 的参数,需要重启应用。这是因为 client parameter 是全局参数,仅在应用程序的第一次设置生效。
-- 以下示例代码基于 taos-jdbcdriver-2.0.36。
+- 以下示例代码基于 taos-jdbcdriver-3.0.0。
```java
public Connection getConn() throws Exception{
@@ -367,7 +367,7 @@ TDengine 的 JDBC 原生连接实现大幅改进了参数绑定方式对数据
**注意**:
- JDBC REST 连接目前不支持参数绑定
-- 以下示例代码基于 taos-jdbcdriver-2.0.36
+- 以下示例代码基于 taos-jdbcdriver-3.0.0
- binary 类型数据需要调用 setString 方法,nchar 类型数据需要调用 setNString 方法
- setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽
@@ -635,7 +635,7 @@ TDengine 支持无模式写入功能。无模式写入兼容 InfluxDB 的 行协
**注意**:
- JDBC REST 连接目前不支持无模式写入
-- 以下示例代码基于 taos-jdbcdriver-2.0.36
+- 以下示例代码基于 taos-jdbcdriver-3.0.0
```java
public class SchemalessInsertTest {
@@ -666,7 +666,7 @@ public class SchemalessInsertTest {
}
```
-### 订阅
+### 数据订阅
TDengine Java 连接器支持订阅功能,应用 API 如下:
@@ -712,14 +712,19 @@ while(true) {
}
```
-`poll` 每次调用获取一个消息。请按需选择合理的调用 `poll` 的频率(如例子中的 `Duration.ofMillis(100)`),否则会给服务端造成不必要的压力。
+`poll` 每次调用获取一个消息。
#### 关闭订阅
```java
+// 取消订阅
+consumer.unsubscribe();
+// 关闭消费
consumer.close()
```
+详情请参考:[数据订阅](../../../develop/tmq)
+
### 使用示例如下:
```java
@@ -734,7 +739,7 @@ public abstract class ConsumerLoop {
config.setProperty("msg.with.table.name", "true");
config.setProperty("enable.auto.commit", "true");
config.setProperty("group.id", "group1");
- config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ResultDeserializer");
+ config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
this.consumer = new TaosConsumer<>(config);
this.topics = Collections.singletonList("topic_speed");
@@ -754,8 +759,9 @@ public abstract class ConsumerLoop {
process(record);
}
}
+ consumer.unsubscribe();
} finally {
- consumer.close();
+ consumer.close();
shutdownLatch.countDown();
}
}
@@ -765,11 +771,11 @@ public abstract class ConsumerLoop {
shutdownLatch.await();
}
- static class ResultDeserializer extends ReferenceDeserializer {
+ public static class ResultDeserializer extends ReferenceDeserializer {
}
- static class ResultBean {
+ public static class ResultBean {
private Timestamp ts;
private int speed;
@@ -875,6 +881,7 @@ public static void main(String[] args) throws Exception {
| taos-jdbcdriver 版本 | 主要变化 |
| :------------------: | :----------------------------: |
+| 3.0.0 | 支持 TDengine 3.0 |
| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 |
| 2.0.38 | JDBC REST 连接增加批量拉取功能 |
| 2.0.37 | 增加对 json tag 支持 |
diff --git a/docs/zh/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md
index 845693a98e..d2efc5baf3 100644
--- a/docs/zh/14-reference/12-config/index.md
+++ b/docs/zh/14-reference/12-config/index.md
@@ -647,3 +647,173 @@ charset 的有效值是 UTF-8。
| 含义 | 是否启动 udf 服务 |
| 取值范围 | 0: 不启动;1:启动 |
| 缺省值 | 1 |
+
+## 2.X 与 3.0 配置参数对比
+| # | **参数** | **适用于 2.X 版本** | **适用于 3.0 版本** |
+| --- | :-----------------: | --------------- | --------------- |
+| 1 | firstEp | 是 | 是 |
+| 2 | secondEp | 是 | 是 |
+| 3 | fqdn | 是 | 是 |
+| 4 | serverPort | 是 | 是 |
+| 5 | maxShellConns | 是 | 是 |
+| 6 | monitor | 是 | 是 |
+| 7 | monitorFqdn | 否 | 是 |
+| 8 | monitorPort | 否 | 是 |
+| 9 | monitorInterval | 是 | 是 |
+| 10 | monitorMaxLogs | 否 | 是 |
+| 11 | monitorComp | 否 | 是 |
+| 12 | telemetryReporting | 是 | 是 |
+| 13 | telemetryInterval | 否 | 是 |
+| 14 | telemetryServer | 否 | 是 |
+| 15 | telemetryPort | 否 | 是 |
+| 16 | queryPolicy | 否 | 是 |
+| 17 | querySmaOptimize | 否 | 是 |
+| 18 | queryBufferSize | 是 | 是 |
+| 19 | maxNumOfDistinctRes | 是 | 是 |
+| 20 | minSlidingTime | 是 | 是 |
+| 21 | minIntervalTime | 是 | 是 |
+| 22 | countAlwaysReturnValue | 是 | 是 |
+| 23 | dataDir | 是 | 是 |
+| 24 | minimalDataDirGB | 是 | 是 |
+| 25 | supportVnodes | 否 | 是 |
+| 26 | tempDir | 是 | 是 |
+| 27 | minimalTmpDirGB | 是 | 是 |
+| 28 | compressMsgSize | 是 | 是 |
+| 29 | compressColData | 是 | 是 |
+| 30 | smlChildTableName | 是 | 是 |
+| 31 | smlTagName | 是 | 是 |
+| 32 | smlDataFormat | 否 | 是 |
+| 33 | statusInterval | 是 | 是 |
+| 34 | shellActivityTimer | 是 | 是 |
+| 35 | transPullupInterval | 否 | 是 |
+| 36 | mqRebalanceInterval | 否 | 是 |
+| 37 | ttlUnit | 否 | 是 |
+| 38 | ttlPushInterval | 否 | 是 |
+| 39 | numOfTaskQueueThreads | 否 | 是 |
+| 40 | numOfRpcThreads | 否 | 是 |
+| 41 | numOfCommitThreads | 是 | 是 |
+| 42 | numOfMnodeReadThreads | 否 | 是 |
+| 43 | numOfVnodeQueryThreads | 否 | 是 |
+| 44 | numOfVnodeStreamThreads | 否 | 是 |
+| 45 | numOfVnodeFetchThreads | 否 | 是 |
+| 46 | numOfVnodeWriteThreads | 否 | 是 |
+| 47 | numOfVnodeSyncThreads | 否 | 是 |
+| 48 | numOfQnodeQueryThreads | 否 | 是 |
+| 49 | numOfQnodeFetchThreads | 否 | 是 |
+| 50 | numOfSnodeSharedThreads | 否 | 是 |
+| 51 | numOfSnodeUniqueThreads | 否 | 是 |
+| 52 | rpcQueueMemoryAllowed | 否 | 是 |
+| 53 | logDir | 是 | 是 |
+| 54 | minimalLogDirGB | 是 | 是 |
+| 55 | numOfLogLines | 是 | 是 |
+| 56 | asyncLog | 是 | 是 |
+| 57 | logKeepDays | 是 | 是 |
+| 58 | debugFlag | 是 | 是 |
+| 59 | tmrDebugFlag | 是 | 是 |
+| 60 | uDebugFlag | 是 | 是 |
+| 61 | rpcDebugFlag | 是 | 是 |
+| 62 | jniDebugFlag | 是 | 是 |
+| 63 | qDebugFlag | 是 | 是 |
+| 64 | cDebugFlag | 是 | 是 |
+| 65 | dDebugFlag | 是 | 是 |
+| 66 | vDebugFlag | 是 | 是 |
+| 67 | mDebugFlag | 是 | 是 |
+| 68 | wDebugFlag | 是 | 是 |
+| 69 | sDebugFlag | 是 | 是 |
+| 70 | tsdbDebugFlag | 是 | 是 |
+| 71 | tqDebugFlag | 否 | 是 |
+| 72 | fsDebugFlag | 是 | 是 |
+| 73 | udfDebugFlag | 否 | 是 |
+| 74 | smaDebugFlag | 否 | 是 |
+| 75 | idxDebugFlag | 否 | 是 |
+| 76 | tdbDebugFlag | 否 | 是 |
+| 77 | metaDebugFlag | 否 | 是 |
+| 78 | timezone | 是 | 是 |
+| 79 | locale | 是 | 是 |
+| 80 | charset | 是 | 是 |
+| 81 | udf | 是 | 是 |
+| 82 | enableCoreFile | 是 | 是 |
+| 83 | arbitrator | 是 | 否 |
+| 84 | numOfThreadsPerCore | 是 | 否 |
+| 85 | numOfMnodes | 是 | 否 |
+| 86 | vnodeBak | 是 | 否 |
+| 87 | balance | 是 | 否 |
+| 88 | balanceInterval | 是 | 否 |
+| 89 | offlineThreshold | 是 | 否 |
+| 90 | role | 是 | 否 |
+| 91 | dnodeNopLoop | 是 | 否 |
+| 92 | keepTimeOffset | 是 | 否 |
+| 93 | rpcTimer | 是 | 否 |
+| 94 | rpcMaxTime | 是 | 否 |
+| 95 | rpcForceTcp | 是 | 否 |
+| 96 | tcpConnTimeout | 是 | 否 |
+| 97 | syncCheckInterval | 是 | 否 |
+| 98 | maxTmrCtrl | 是 | 否 |
+| 99 | monitorReplica | 是 | 否 |
+| 100 | smlTagNullName | 是 | 否 |
+| 101 | keepColumnName | 是 | 否 |
+| 102 | ratioOfQueryCores | 是 | 否 |
+| 103 | maxStreamCompDelay | 是 | 否 |
+| 104 | maxFirstStreamCompDelay | 是 | 否 |
+| 105 | retryStreamCompDelay | 是 | 否 |
+| 106 | streamCompDelayRatio | 是 | 否 |
+| 107 | maxVgroupsPerDb | 是 | 否 |
+| 108 | maxTablesPerVnode | 是 | 否 |
+| 109 | minTablesPerVnode | 是 | 否 |
+| 110 | tableIncStepPerVnode | 是 | 否 |
+| 111 | cache | 是 | 否 |
+| 112 | blocks | 是 | 否 |
+| 113 | days | 是 | 否 |
+| 114 | keep | 是 | 否 |
+| 115 | minRows | 是 | 否 |
+| 116 | maxRows | 是 | 否 |
+| 117 | quorum | 是 | 否 |
+| 118 | comp | 是 | 否 |
+| 119 | walLevel | 是 | 否 |
+| 120 | fsync | 是 | 否 |
+| 121 | replica | 是 | 否 |
+| 122 | partitions | 是 | 否 |
+| 123 | quorum | 是 | 否 |
+| 124 | update | 是 | 否 |
+| 125 | cachelast | 是 | 否 |
+| 126 | maxSQLLength | 是 | 否 |
+| 127 | maxWildCardsLength | 是 | 否 |
+| 128 | maxRegexStringLen | 是 | 否 |
+| 129 | maxNumOfOrderedRes | 是 | 否 |
+| 130 | maxConnections | 是 | 否 |
+| 131 | mnodeEqualVnodeNum | 是 | 否 |
+| 132 | http | 是 | 否 |
+| 133 | httpEnableRecordSql | 是 | 否 |
+| 134 | httpMaxThreads | 是 | 否 |
+| 135 | restfulRowLimit | 是 | 否 |
+| 136 | httpDbNameMandatory | 是 | 否 |
+| 137 | httpKeepAlive | 是 | 否 |
+| 138 | enableRecordSql | 是 | 否 |
+| 139 | maxBinaryDisplayWidth | 是 | 否 |
+| 140 | stream | 是 | 否 |
+| 141 | retrieveBlockingModel | 是 | 否 |
+| 142 | tsdbMetaCompactRatio | 是 | 否 |
+| 143 | defaultJSONStrType | 是 | 否 |
+| 144 | walFlushSize | 是 | 否 |
+| 145 | keepTimeOffset | 是 | 否 |
+| 146 | flowctrl | 是 | 否 |
+| 147 | slaveQuery | 是 | 否 |
+| 148 | adjustMaster | 是 | 否 |
+| 149 | topicBinaryLen | 是 | 否 |
+| 150 | telegrafUseFieldNum | 是 | 否 |
+| 151 | deadLockKillQuery | 是 | 否 |
+| 152 | clientMerge | 是 | 否 |
+| 153 | sdbDebugFlag | 是 | 否 |
+| 154 | odbcDebugFlag | 是 | 否 |
+| 155 | httpDebugFlag | 是 | 否 |
+| 156 | monDebugFlag | 是 | 否 |
+| 157 | cqDebugFlag | 是 | 否 |
+| 158 | shortcutFlag | 是 | 否 |
+| 159 | probeSeconds | 是 | 否 |
+| 160 | probeKillSeconds | 是 | 否 |
+| 161 | probeInterval | 是 | 否 |
+| 162 | lossyColumns | 是 | 否 |
+| 163 | fPrecision | 是 | 否 |
+| 164 | dPrecision | 是 | 否 |
+| 165 | maxRange | 是 | 否 |
+| 166 | range | 是 | 否 |
diff --git a/docs/zh/14-reference/15-taosKeeper.md b/docs/zh/14-reference/15-taosKeeper.md
new file mode 100644
index 0000000000..d3f96bc5a9
--- /dev/null
+++ b/docs/zh/14-reference/15-taosKeeper.md
@@ -0,0 +1,134 @@
+---
+sidebar_label: taosKeeper
+title: taosKeeper
+description: TDengine taosKeeper 使用说明
+---
+
+## 简介
+
+TaosKeeper 是 TDengine 3.0 版本监控指标的导出工具,通过简单的几项配置即可获取 TDengine 的运行状态。taosKeeper 使用 TDengine RESTful 接口,所以不需要安装 TDengine 客户端即可使用。
+
+## 安装
+
+
+taosKeeper 安装方式:
+
+
+
+
+- 单独编译 taosKeeper 并安装,详情请参考 [taosKeeper](https://github.com/taosdata/taoskeeper) 仓库。
+
+## 运行
+
+### 配置和运行方式
+
+
+taosKeeper 需要在操作系统终端执行,该工具支持 [配置文件启动](#配置文件启动)。
+
+**在运行 taosKeeper 之前要确保 TDengine 集群与 taosAdapter 已经在正确运行。**
+
+
+### 配置文件启动
+
+执行以下命令即可快速体验 taosKeeper。当不指定 taosKeeper 配置文件时,优先使用 `/etc/taos/keeper.toml` 配置,否则将使用默认配置。
+
+```shell
+taoskeeper -c
+```
+
+**下面是配置文件的示例:**
+```toml
+# gin 框架是否启用 debug
+debug = false
+
+# 服务监听端口, 默认为 6043
+port = 6043
+
+# 日志级别,包含 panic、error、info、debug、trace等
+loglevel = "info"
+
+# 程序中使用协程池的大小
+gopoolsize = 50000
+
+# 查询 TDengine 监控数据轮询间隔
+RotationInterval = "15s"
+
+[tdengine]
+host = "127.0.0.1"
+port = 6041
+username = "root"
+password = "taosdata"
+
+# 需要被监控的 taosAdapter
+[taosAdapter]
+address = ["127.0.0.1:6041","192.168.1.95:6041"]
+
+[metrics]
+# 监控指标前缀
+prefix = "taos"
+
+# 集群数据的标识符
+cluster = "production"
+
+# 存放监控数据的数据库
+database = "log"
+
+# 指定需要监控的普通表
+tables = ["normal_table"]
+```
+
+### 获取监控指标
+
+taosKeeper 作为 TDengine 监控指标的导出工具,可以将 TDengine 产生的监控数据记录在指定数据库中,并提供导出接口。
+
+#### 查看监控结果集
+
+```shell
+$ taos
+#
+> use log;
+> select * from cluster_info limit 1;
+```
+
+结果示例:
+
+```shell
+ ts | first_ep | first_ep_dnode_id | version | master_uptime | monitor_interval | dbs_total | tbs_total | stbs_total | dnodes_total | dnodes_alive | mnodes_total | mnodes_alive | vgroups_total | vgroups_alive | vnodes_total | vnodes_alive | connections_total | protocol | cluster_id |
+===============================================================================================================================================================================================================================================================================================================================================================================
+ 2022-08-16 17:37:01.629 | hlb:6030 | 1 | 3.0.0.0 | 0.27250 | 15 | 2 | 27 | 38 | 1 | 1 | 1 | 1 | 4 | 4 | 4 | 4 | 14 | 1 | 5981392874047724755 |
+Query OK, 1 rows in database (0.036162s)
+```
+
+#### 导出监控指标
+
+```shell
+curl http://127.0.0.1:6043/metrics
+```
+
+部分结果集:
+
+```shell
+# HELP taos_cluster_info_connections_total
+# TYPE taos_cluster_info_connections_total counter
+taos_cluster_info_connections_total{cluster_id="5981392874047724755"} 16
+# HELP taos_cluster_info_dbs_total
+# TYPE taos_cluster_info_dbs_total counter
+taos_cluster_info_dbs_total{cluster_id="5981392874047724755"} 2
+# HELP taos_cluster_info_dnodes_alive
+# TYPE taos_cluster_info_dnodes_alive counter
+taos_cluster_info_dnodes_alive{cluster_id="5981392874047724755"} 1
+# HELP taos_cluster_info_dnodes_total
+# TYPE taos_cluster_info_dnodes_total counter
+taos_cluster_info_dnodes_total{cluster_id="5981392874047724755"} 1
+# HELP taos_cluster_info_first_ep
+# TYPE taos_cluster_info_first_ep gauge
+taos_cluster_info_first_ep{cluster_id="5981392874047724755",value="hlb:6030"} 1
+```
\ No newline at end of file
diff --git a/docs/zh/28-releases.md b/docs/zh/28-releases.md
index 5f30325829..311d69ac1b 100644
--- a/docs/zh/28-releases.md
+++ b/docs/zh/28-releases.md
@@ -3,7 +3,7 @@ sidebar_label: 发布历史
title: 发布历史
---
-import Release from "/components/Release";
+import Release from "/components/ReleaseV3";
diff --git a/examples/c/tmq.c b/examples/c/tmq.c
index fc34915fe7..19adaad116 100644
--- a/examples/c/tmq.c
+++ b/examples/c/tmq.c
@@ -45,10 +45,9 @@ static int32_t msg_process(TAOS_RES* msg) {
int32_t numOfFields = taos_field_count(msg);
int32_t* length = taos_fetch_lengths(msg);
int32_t precision = taos_result_precision(msg);
- const char* tbName = tmq_get_table_name(msg);
rows++;
taos_print_row(buf, row, fields, numOfFields);
- printf("row content from %s: %s\n", (tbName != NULL ? tbName : "table null"), buf);
+ printf("row content: %s\n", buf);
}
return rows;
@@ -167,7 +166,7 @@ int32_t create_topic() {
}
taos_free_result(pRes);
- pRes = taos_query(pConn, "create topic topicname as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1");
+ pRes = taos_query(pConn, "create topic topicname as select ts, c1, c2, c3, tbname from tmqdb.stb where c1 > 1");
if (taos_errno(pRes) != 0) {
printf("failed to create topic topicname, reason:%s\n", taos_errstr(pRes));
return -1;
@@ -199,9 +198,7 @@ tmq_t* build_consumer() {
if (TMQ_CONF_OK != code) return NULL;
code = tmq_conf_set(conf, "auto.offset.reset", "earliest");
if (TMQ_CONF_OK != code) return NULL;
- code = tmq_conf_set(conf, "experimental.snapshot.enable", "true");
- if (TMQ_CONF_OK != code) return NULL;
- code = tmq_conf_set(conf, "msg.with.table.name", "true");
+ code = tmq_conf_set(conf, "experimental.snapshot.enable", "false");
if (TMQ_CONF_OK != code) return NULL;
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
@@ -220,14 +217,7 @@ tmq_list_t* build_topic_list() {
return topicList;
}
-void basic_consume_loop(tmq_t* tmq, tmq_list_t* topicList) {
- int32_t code;
-
- if ((code = tmq_subscribe(tmq, topicList))) {
- fprintf(stderr, "%% Failed to tmq_subscribe(): %s\n", tmq_err2str(code));
- return;
- }
-
+void basic_consume_loop(tmq_t* tmq) {
int32_t totalRows = 0;
int32_t msgCnt = 0;
int32_t timeout = 5000;
@@ -237,8 +227,8 @@ void basic_consume_loop(tmq_t* tmq, tmq_list_t* topicList) {
msgCnt++;
totalRows += msg_process(tmqmsg);
taos_free_result(tmqmsg);
- /*} else {*/
- /*break;*/
+ } else {
+ break;
}
}
@@ -267,14 +257,12 @@ int main(int argc, char* argv[]) {
return -1;
}
- basic_consume_loop(tmq, topic_list);
-
- code = tmq_unsubscribe(tmq);
- if (code) {
- fprintf(stderr, "%% Failed to unsubscribe: %s\n", tmq_err2str(code));
- } else {
- fprintf(stderr, "%% unsubscribe\n");
+ if ((code = tmq_subscribe(tmq, topic_list))) {
+ fprintf(stderr, "%% Failed to tmq_subscribe(): %s\n", tmq_err2str(code));
}
+ tmq_list_destroy(topic_list);
+
+ basic_consume_loop(tmq);
code = tmq_consumer_close(tmq);
if (code) {
diff --git a/include/client/taos.h b/include/client/taos.h
index dd7266bd96..f260b84f4a 100644
--- a/include/client/taos.h
+++ b/include/client/taos.h
@@ -131,10 +131,10 @@ DLL_EXPORT int taos_options(TSDB_OPTION option, const void *arg, ...);
DLL_EXPORT setConfRet taos_set_config(const char *config);
DLL_EXPORT int taos_init(void);
DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port);
-DLL_EXPORT TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port);
-DLL_EXPORT void taos_close(TAOS *taos);
+DLL_EXPORT TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port);
+DLL_EXPORT void taos_close(TAOS *taos);
-const char *taos_data_type(int type);
+const char *taos_data_type(int type);
DLL_EXPORT TAOS_STMT *taos_stmt_init(TAOS *taos);
DLL_EXPORT int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length);
@@ -244,33 +244,37 @@ DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_comm
/* -------------------------TMQ MSG HANDLE INTERFACE---------------------- */
+DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
+DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
+DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
+
+/* ------------------------------ TAOSX -----------------------------------*/
+// note: following apis are unstable
enum tmq_res_t {
TMQ_RES_INVALID = -1,
TMQ_RES_DATA = 1,
TMQ_RES_TABLE_META = 2,
};
-typedef struct tmq_raw_data{
- void* raw;
+typedef struct tmq_raw_data {
+ void *raw;
uint32_t raw_len;
uint16_t raw_type;
} tmq_raw_data;
typedef enum tmq_res_t tmq_res_t;
-DLL_EXPORT tmq_res_t tmq_get_res_type(TAOS_RES *res);
-DLL_EXPORT int32_t tmq_get_raw(TAOS_RES *res, tmq_raw_data *raw);
-DLL_EXPORT int32_t tmq_write_raw(TAOS *taos, tmq_raw_data raw);
-DLL_EXPORT int taos_write_raw_block(TAOS *taos, int numOfRows, char *pData, const char* tbname);
-DLL_EXPORT void tmq_free_raw(tmq_raw_data raw);
-DLL_EXPORT char *tmq_get_json_meta(TAOS_RES *res); // Returning null means error. Returned result need to be freed by tmq_free_json_meta
-DLL_EXPORT void tmq_free_json_meta(char* jsonMeta);
-DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
-DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
-DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
-DLL_EXPORT const char *tmq_get_table_name(TAOS_RES *res);
+DLL_EXPORT const char *tmq_get_table_name(TAOS_RES *res);
+DLL_EXPORT tmq_res_t tmq_get_res_type(TAOS_RES *res);
+DLL_EXPORT int32_t tmq_get_raw(TAOS_RES *res, tmq_raw_data *raw);
+DLL_EXPORT int32_t tmq_write_raw(TAOS *taos, tmq_raw_data raw);
+DLL_EXPORT int taos_write_raw_block(TAOS *taos, int numOfRows, char *pData, const char *tbname);
+DLL_EXPORT void tmq_free_raw(tmq_raw_data raw);
+// Returning null means error. Returned result need to be freed by tmq_free_json_meta
+DLL_EXPORT char *tmq_get_json_meta(TAOS_RES *res);
+DLL_EXPORT void tmq_free_json_meta(char *jsonMeta);
-/* ------------------------------ TMQ END -------------------------------- */
+/* ---------------------------- TAOSX END -------------------------------- */
typedef enum {
TSDB_SRV_STATUS_UNAVAILABLE = 0,
diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h
index a3de9164a2..717278d51d 100644
--- a/include/libs/parser/parser.h
+++ b/include/libs/parser/parser.h
@@ -53,6 +53,8 @@ typedef struct SParseContext {
int8_t schemalessType;
const char* svrVer;
bool nodeOffline;
+ SArray* pTableMetaPos; // sql table pos => catalog data pos
+ SArray* pTableVgroupPos; // sql table pos => catalog data pos
} SParseContext;
int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery);
@@ -84,8 +86,8 @@ int32_t qBindStmtSingleColValue(void* pBlock, TAOS_MULTI_BIND* bind, char* msgBu
int32_t rowNum);
int32_t qBuildStmtColFields(void* pDataBlock, int32_t* fieldNum, TAOS_FIELD_E** fields);
int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TAOS_FIELD_E** fields);
-int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const char* sTableName, char* tName, TAOS_MULTI_BIND* bind,
- char* msgBuf, int32_t msgBufLen);
+int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const char* sTableName, char* tName,
+ TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen);
void destroyBoundColumnInfo(void* pBoundInfo);
int32_t qCreateSName(SName* pName, const char* pTableName, int32_t acctId, char* dbName, char* msgBuf,
int32_t msgBufLen);
diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h
index f51c37ed47..eac92d76ba 100644
--- a/include/libs/stream/tstream.h
+++ b/include/libs/stream/tstream.h
@@ -275,12 +275,8 @@ typedef struct SStreamTask {
int32_t nodeId;
SEpSet epSet;
- // used for task source and sink,
- // while task agg should have processedVer for each child
int64_t recoverSnapVer;
int64_t startVer;
- int64_t checkpointVer;
- int64_t processedVer;
// children info
SArray* childEpInfo; // SArray
diff --git a/include/libs/stream/tstreamUpdate.h b/include/libs/stream/tstreamUpdate.h
index 78543118da..c186430f3f 100644
--- a/include/libs/stream/tstreamUpdate.h
+++ b/include/libs/stream/tstreamUpdate.h
@@ -25,33 +25,34 @@ extern "C" {
#endif
typedef struct SUpdateInfo {
- SArray *pTsBuckets;
- uint64_t numBuckets;
- SArray *pTsSBFs;
- uint64_t numSBFs;
- int64_t interval;
- int64_t watermark;
- TSKEY minTS;
- SScalableBf* pCloseWinSBF;
- SHashObj* pMap;
- STimeWindow scanWindow;
- uint64_t scanGroupId;
- uint64_t maxVersion;
+ SArray *pTsBuckets;
+ uint64_t numBuckets;
+ SArray *pTsSBFs;
+ uint64_t numSBFs;
+ int64_t interval;
+ int64_t watermark;
+ TSKEY minTS;
+ SScalableBf *pCloseWinSBF;
+ SHashObj *pMap;
+ STimeWindow scanWindow;
+ uint64_t scanGroupId;
+ uint64_t maxVersion;
} SUpdateInfo;
-SUpdateInfo *updateInfoInitP(SInterval* pInterval, int64_t watermark);
+SUpdateInfo *updateInfoInitP(SInterval *pInterval, int64_t watermark);
SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t watermark);
-bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts);
-void updateInfoSetScanRange(SUpdateInfo *pInfo, STimeWindow* pWin, uint64_t groupId, uint64_t version);
-bool updateInfoIgnore(SUpdateInfo *pInfo, STimeWindow* pWin, uint64_t groupId, uint64_t version);
-void updateInfoDestroy(SUpdateInfo *pInfo);
-void updateInfoAddCloseWindowSBF(SUpdateInfo *pInfo);
-void updateInfoDestoryColseWinSBF(SUpdateInfo *pInfo);
-int32_t updateInfoSerialize(void *buf, int32_t bufLen, const SUpdateInfo *pInfo);
-int32_t updateInfoDeserialize(void *buf, int32_t bufLen, SUpdateInfo *pInfo);
+bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts);
+bool updateInfoIsTableInserted(SUpdateInfo *pInfo, int64_t tbUid);
+void updateInfoSetScanRange(SUpdateInfo *pInfo, STimeWindow *pWin, uint64_t groupId, uint64_t version);
+bool updateInfoIgnore(SUpdateInfo *pInfo, STimeWindow *pWin, uint64_t groupId, uint64_t version);
+void updateInfoDestroy(SUpdateInfo *pInfo);
+void updateInfoAddCloseWindowSBF(SUpdateInfo *pInfo);
+void updateInfoDestoryColseWinSBF(SUpdateInfo *pInfo);
+int32_t updateInfoSerialize(void *buf, int32_t bufLen, const SUpdateInfo *pInfo);
+int32_t updateInfoDeserialize(void *buf, int32_t bufLen, SUpdateInfo *pInfo);
#ifdef __cplusplus
}
#endif
-#endif /* ifndef _TSTREAMUPDATE_H_ */
\ No newline at end of file
+#endif /* ifndef _TSTREAMUPDATE_H_ */
diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h
index aa563343f8..6d8895eb96 100644
--- a/include/libs/sync/sync.h
+++ b/include/libs/sync/sync.h
@@ -30,6 +30,7 @@ extern bool gRaftDetailLog;
#define SYNC_SPEED_UP_HB_TIMER 400
#define SYNC_SPEED_UP_AFTER_MS (1000 * 20)
#define SYNC_SLOW_DOWN_RANGE 100
+#define SYNC_MAX_READ_RANGE 10
#define SYNC_MAX_BATCH_SIZE 1
#define SYNC_INDEX_BEGIN 0
@@ -210,9 +211,12 @@ void syncStop(int64_t rid);
int32_t syncSetStandby(int64_t rid);
ESyncState syncGetMyRole(int64_t rid);
bool syncIsReady(int64_t rid);
+bool syncIsReadyForRead(int64_t rid);
const char* syncGetMyRoleStr(int64_t rid);
bool syncRestoreFinish(int64_t rid);
SyncTerm syncGetMyTerm(int64_t rid);
+SyncIndex syncGetLastIndex(int64_t rid);
+SyncIndex syncGetCommitIndex(int64_t rid);
SyncGroupId syncGetVgId(int64_t rid);
void syncGetEpSet(int64_t rid, SEpSet* pEpSet);
void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet);
diff --git a/include/util/taoserror.h b/include/util/taoserror.h
index 3ca6978156..d7ec3697af 100644
--- a/include/util/taoserror.h
+++ b/include/util/taoserror.h
@@ -622,6 +622,7 @@ int32_t* taosGetErrno();
//tmq
#define TSDB_CODE_TMQ_INVALID_MSG TAOS_DEF_ERROR_CODE(0, 0x4000)
#define TSDB_CODE_TMQ_CONSUMER_MISMATCH TAOS_DEF_ERROR_CODE(0, 0x4001)
+#define TSDB_CODE_TMQ_CONSUMER_CLOSED TAOS_DEF_ERROR_CODE(0, 0x4002)
#ifdef __cplusplus
}
diff --git a/include/util/tref.h b/include/util/tref.h
index 7e08bb045b..c2cc54cb07 100644
--- a/include/util/tref.h
+++ b/include/util/tref.h
@@ -29,11 +29,11 @@ int32_t taosOpenRef(int32_t max, void (*fp)(void *));
// close the reference set, refId is the return value by taosOpenRef
// return 0 if success. On error, -1 is returned, and terrno is set appropriately
-int32_t taosCloseRef(int32_t refId);
+int32_t taosCloseRef(int32_t rsetId);
// add ref, p is the pointer to resource or pointer ID
// return Reference ID(rid) allocated. On error, -1 is returned, and terrno is set appropriately
-int64_t taosAddRef(int32_t refId, void *p);
+int64_t taosAddRef(int32_t rsetId, void *p);
// remove ref, rid is the reference ID returned by taosAddRef
// return 0 if success. On error, -1 is returned, and terrno is set appropriately
diff --git a/packaging/deb/DEBIAN/prerm b/packaging/deb/DEBIAN/prerm
index 5676bf5c43..4953102842 100644
--- a/packaging/deb/DEBIAN/prerm
+++ b/packaging/deb/DEBIAN/prerm
@@ -29,6 +29,7 @@ else
# Remove all links
${csudo}rm -f ${bin_link_dir}/taos || :
${csudo}rm -f ${bin_link_dir}/taosd || :
+ ${csudo}rm -f ${bin_link_dir}/udfd || :
${csudo}rm -f ${bin_link_dir}/taosadapter || :
${csudo}rm -f ${bin_link_dir}/taosdemo || :
${csudo}rm -f ${cfg_link_dir}/* || :
diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh
index 6de475a4c0..3db9005f95 100755
--- a/packaging/deb/makedeb.sh
+++ b/packaging/deb/makedeb.sh
@@ -60,6 +60,7 @@ cp ${compile_dir}/../packaging/tools/set_core.sh ${pkg_dir}${install_home_pat
cp ${compile_dir}/../packaging/tools/taosd-dump-cfg.gdb ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
+cp ${compile_dir}/build/bin/udfd ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosBenchmark ${pkg_dir}${install_home_path}/bin
if [ -f "${compile_dir}/build/bin/taosadapter" ]; then
diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec
index 7a34f7a222..637d2d425a 100644
--- a/packaging/rpm/tdengine.spec
+++ b/packaging/rpm/tdengine.spec
@@ -69,6 +69,7 @@ cp %{_compiledir}/../packaging/tools/set_core.sh %{buildroot}%{homepath}/bin
cp %{_compiledir}/../packaging/tools/taosd-dump-cfg.gdb %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin
+cp %{_compiledir}/build/bin/udfd %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosBenchmark %{buildroot}%{homepath}/bin
if [ -f %{_compiledir}/build/bin/taosadapter ]; then
@@ -204,6 +205,7 @@ if [ $1 -eq 0 ];then
# Remove all links
${csudo}rm -f ${bin_link_dir}/taos || :
${csudo}rm -f ${bin_link_dir}/taosd || :
+ ${csudo}rm -f ${bin_link_dir}/udfd || :
${csudo}rm -f ${bin_link_dir}/taosadapter || :
${csudo}rm -f ${cfg_link_dir}/* || :
${csudo}rm -f ${inc_link_dir}/taos.h || :
diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh
index eda2b052d1..39606ead30 100755
--- a/packaging/tools/install.sh
+++ b/packaging/tools/install.sh
@@ -18,6 +18,7 @@ script_dir=$(dirname $(readlink -f "$0"))
clientName="taos"
serverName="taosd"
+udfdName="udfd"
configFile="taos.cfg"
productName="TDengine"
emailName="taosdata.com"
@@ -192,6 +193,7 @@ function install_bin() {
# Remove links
${csudo}rm -f ${bin_link_dir}/${clientName} || :
${csudo}rm -f ${bin_link_dir}/${serverName} || :
+ ${csudo}rm -f ${bin_link_dir}/${udfdName} || :
${csudo}rm -f ${bin_link_dir}/${adapterName} || :
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
${csudo}rm -f ${bin_link_dir}/${demoName} || :
@@ -205,6 +207,7 @@ function install_bin() {
#Make link
[ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} || :
[ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} || :
+ [ -x ${install_main_dir}/bin/${udfdName} ] && ${csudo}ln -s ${install_main_dir}/bin/${udfdName} ${bin_link_dir}/${udfdName} || :
[ -x ${install_main_dir}/bin/${adapterName} ] && ${csudo}ln -s ${install_main_dir}/bin/${adapterName} ${bin_link_dir}/${adapterName} || :
[ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${demoName} || :
[ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${benchmarkName} || :
@@ -742,7 +745,7 @@ function is_version_compatible() {
fi
exist_version=$(${installDir}/bin/${serverName} -V | head -1 | cut -d ' ' -f 3)
- vercomp $exist_version "2.0.16.0"
+ vercomp $exist_version "3.0.0.0"
case $? in
2)
prompt_force=1
diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh
index 6103ce170c..f5e3bf1882 100755
--- a/packaging/tools/makepkg.sh
+++ b/packaging/tools/makepkg.sh
@@ -85,6 +85,7 @@ else
${build_dir}/bin/${clientName} \
${taostools_bin_files} \
${build_dir}/bin/taosadapter \
+ ${build_dir}/bin/udfd \
${script_dir}/remove.sh \
${script_dir}/set_core.sh \
${script_dir}/startPre.sh \
@@ -318,7 +319,7 @@ if [ "$verMode" == "cluster" ]; then
fi
# Copy release note
-cp ${script_dir}/release_note ${install_dir}
+# cp ${script_dir}/release_note ${install_dir}
# exit 1
diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh
index aa80cfb86c..fcc8a2a942 100755
--- a/packaging/tools/post.sh
+++ b/packaging/tools/post.sh
@@ -118,6 +118,7 @@ function install_bin() {
# Remove links
${csudo}rm -f ${bin_link_dir}/taos || :
${csudo}rm -f ${bin_link_dir}/taosd || :
+ ${csudo}rm -f ${bin_link_dir}/udfd || :
${csudo}rm -f ${bin_link_dir}/taosadapter || :
${csudo}rm -f ${bin_link_dir}/taosBenchmark || :
${csudo}rm -f ${bin_link_dir}/taosdemo || :
@@ -130,6 +131,7 @@ function install_bin() {
#Make link
[ -x ${bin_dir}/taos ] && ${csudo}ln -s ${bin_dir}/taos ${bin_link_dir}/taos || :
[ -x ${bin_dir}/taosd ] && ${csudo}ln -s ${bin_dir}/taosd ${bin_link_dir}/taosd || :
+ [ -x ${bin_dir}/udfd ] && ${csudo}ln -s ${bin_dir}/udfd ${bin_link_dir}/udfd || :
[ -x ${bin_dir}/taosadapter ] && ${csudo}ln -s ${bin_dir}/taosadapter ${bin_link_dir}/taosadapter || :
[ -x ${bin_dir}/taosBenchmark ] && ${csudo}ln -sf ${bin_dir}/taosBenchmark ${bin_link_dir}/taosdemo || :
[ -x ${bin_dir}/taosBenchmark ] && ${csudo}ln -sf ${bin_dir}/taosBenchmark ${bin_link_dir}/taosBenchmark || :
diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c
index acdb3b68b0..9c086fc83e 100644
--- a/source/client/src/clientImpl.c
+++ b/source/client/src/clientImpl.c
@@ -689,11 +689,11 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList
TDMT_VND_CREATE_TABLE == pRequest->type) {
pRequest->body.resInfo.numOfRows = res.numOfRows;
if (TDMT_VND_SUBMIT == pRequest->type) {
- STscObj *pTscObj = pRequest->pTscObj;
- SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
- atomic_add_fetch_64((int64_t *)&pActivity->numOfInsertRows, res.numOfRows);
+ STscObj* pTscObj = pRequest->pTscObj;
+ SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary;
+ atomic_add_fetch_64((int64_t*)&pActivity->numOfInsertRows, res.numOfRows);
}
-
+
schedulerFreeJob(&pRequest->body.queryJob, 0);
}
@@ -800,8 +800,8 @@ int32_t handleQueryExecRsp(SRequestObj* pRequest) {
break;
}
case TDMT_VND_SUBMIT: {
- atomic_add_fetch_64((int64_t *)&pAppInfo->summary.insertBytes, pRes->numOfBytes);
-
+ atomic_add_fetch_64((int64_t*)&pAppInfo->summary.insertBytes, pRes->numOfBytes);
+
code = handleSubmitExecRes(pRequest, pRes->res, pCatalog, &epset);
break;
}
@@ -832,9 +832,9 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
if (pResult) {
pRequest->body.resInfo.numOfRows = pResult->numOfRows;
if (TDMT_VND_SUBMIT == pRequest->type) {
- STscObj *pTscObj = pRequest->pTscObj;
- SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
- atomic_add_fetch_64((int64_t *)&pActivity->numOfInsertRows, pResult->numOfRows);
+ STscObj* pTscObj = pRequest->pTscObj;
+ SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary;
+ atomic_add_fetch_64((int64_t*)&pActivity->numOfInsertRows, pResult->numOfRows);
}
}
@@ -877,14 +877,14 @@ SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQue
if (pQuery->pRoot) {
pRequest->stmtType = pQuery->pRoot->type;
}
-
+
if (pQuery->pRoot && !pRequest->inRetry) {
- STscObj *pTscObj = pRequest->pTscObj;
- SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
+ STscObj* pTscObj = pRequest->pTscObj;
+ SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary;
if (QUERY_NODE_VNODE_MODIF_STMT == pQuery->pRoot->type) {
- atomic_add_fetch_64((int64_t *)&pActivity->numOfInsertsReq, 1);
+ atomic_add_fetch_64((int64_t*)&pActivity->numOfInsertsReq, 1);
} else if (QUERY_NODE_SELECT_STMT == pQuery->pRoot->type) {
- atomic_add_fetch_64((int64_t *)&pActivity->numOfQueryReq, 1);
+ atomic_add_fetch_64((int64_t*)&pActivity->numOfQueryReq, 1);
}
}
@@ -1467,9 +1467,9 @@ void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4)
tscDebug("0x%" PRIx64 " fetch results, numOfRows:%d total Rows:%" PRId64 ", complete:%d, reqId:0x%" PRIx64,
pRequest->self, pResInfo->numOfRows, pResInfo->totalRows, pResInfo->completed, pRequest->requestId);
- STscObj *pTscObj = pRequest->pTscObj;
- SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
- atomic_add_fetch_64((int64_t *)&pActivity->fetchBytes, pRequest->body.resInfo.payloadLen);
+ STscObj* pTscObj = pRequest->pTscObj;
+ SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary;
+ atomic_add_fetch_64((int64_t*)&pActivity->fetchBytes, pRequest->body.resInfo.payloadLen);
if (pResultInfo->numOfRows == 0) {
return NULL;
@@ -2006,7 +2006,7 @@ int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName,
bool inEscape = false;
int32_t code = 0;
- void *pIter = NULL;
+ void* pIter = NULL;
int32_t vIdx = 0;
int32_t vPos[2];
diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c
index 0ec724c6d0..0e95cd4d99 100644
--- a/source/client/src/clientMain.c
+++ b/source/client/src/clientMain.c
@@ -192,6 +192,7 @@ void taos_free_result(TAOS_RES *res) {
if (pRsp->rsp.withSchema) taosArrayDestroyP(pRsp->rsp.blockSchema, (FDelete)tDeleteSSchemaWrapper);
pRsp->resInfo.pRspMsg = NULL;
doFreeReqResultInfo(&pRsp->resInfo);
+ taosMemoryFree(pRsp);
} else if (TD_RES_TMQ_META(res)) {
SMqMetaRspObj *pRspObj = (SMqMetaRspObj *)res;
taosMemoryFree(pRspObj->metaRsp.metaRsp);
diff --git a/source/client/src/taosx.c b/source/client/src/taosx.c
new file mode 100644
index 0000000000..677567e38f
--- /dev/null
+++ b/source/client/src/taosx.c
@@ -0,0 +1,1628 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "cJSON.h"
+#include "clientInt.h"
+#include "clientLog.h"
+#include "parser.h"
+#include "tdatablock.h"
+#include "tdef.h"
+#include "tglobal.h"
+#include "tmsgtype.h"
+#include "tqueue.h"
+#include "tref.h"
+#include "ttimer.h"
+
+static char* buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* schemaTag, char* name, int64_t id,
+ int8_t t) {
+ char* string = NULL;
+ cJSON* json = cJSON_CreateObject();
+ if (json == NULL) {
+ return string;
+ }
+ cJSON* type = cJSON_CreateString("create");
+ cJSON_AddItemToObject(json, "type", type);
+
+ // char uid[32] = {0};
+ // sprintf(uid, "%"PRIi64, id);
+ // cJSON* id_ = cJSON_CreateString(uid);
+ // cJSON_AddItemToObject(json, "id", id_);
+ cJSON* tableName = cJSON_CreateString(name);
+ cJSON_AddItemToObject(json, "tableName", tableName);
+ cJSON* tableType = cJSON_CreateString(t == TSDB_NORMAL_TABLE ? "normal" : "super");
+ cJSON_AddItemToObject(json, "tableType", tableType);
+ // cJSON* version = cJSON_CreateNumber(1);
+ // cJSON_AddItemToObject(json, "version", version);
+
+ cJSON* columns = cJSON_CreateArray();
+ for (int i = 0; i < schemaRow->nCols; i++) {
+ cJSON* column = cJSON_CreateObject();
+ SSchema* s = schemaRow->pSchema + i;
+ cJSON* cname = cJSON_CreateString(s->name);
+ cJSON_AddItemToObject(column, "name", cname);
+ cJSON* ctype = cJSON_CreateNumber(s->type);
+ cJSON_AddItemToObject(column, "type", ctype);
+ if (s->type == TSDB_DATA_TYPE_BINARY) {
+ int32_t length = s->bytes - VARSTR_HEADER_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(column, "length", cbytes);
+ } else if (s->type == TSDB_DATA_TYPE_NCHAR) {
+ int32_t length = (s->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(column, "length", cbytes);
+ }
+ cJSON_AddItemToArray(columns, column);
+ }
+ cJSON_AddItemToObject(json, "columns", columns);
+
+ cJSON* tags = cJSON_CreateArray();
+ for (int i = 0; schemaTag && i < schemaTag->nCols; i++) {
+ cJSON* tag = cJSON_CreateObject();
+ SSchema* s = schemaTag->pSchema + i;
+ cJSON* tname = cJSON_CreateString(s->name);
+ cJSON_AddItemToObject(tag, "name", tname);
+ cJSON* ttype = cJSON_CreateNumber(s->type);
+ cJSON_AddItemToObject(tag, "type", ttype);
+ if (s->type == TSDB_DATA_TYPE_BINARY) {
+ int32_t length = s->bytes - VARSTR_HEADER_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(tag, "length", cbytes);
+ } else if (s->type == TSDB_DATA_TYPE_NCHAR) {
+ int32_t length = (s->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(tag, "length", cbytes);
+ }
+ cJSON_AddItemToArray(tags, tag);
+ }
+ cJSON_AddItemToObject(json, "tags", tags);
+
+ string = cJSON_PrintUnformatted(json);
+ cJSON_Delete(json);
+ return string;
+}
+
+static char* buildAlterSTableJson(void* alterData, int32_t alterDataLen) {
+ SMAlterStbReq req = {0};
+ cJSON* json = NULL;
+ char* string = NULL;
+
+ if (tDeserializeSMAlterStbReq(alterData, alterDataLen, &req) != 0) {
+ goto end;
+ }
+
+ json = cJSON_CreateObject();
+ if (json == NULL) {
+ goto end;
+ }
+ cJSON* type = cJSON_CreateString("alter");
+ cJSON_AddItemToObject(json, "type", type);
+ // cJSON* uid = cJSON_CreateNumber(id);
+ // cJSON_AddItemToObject(json, "uid", uid);
+ SName name = {0};
+ tNameFromString(&name, req.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
+ cJSON* tableName = cJSON_CreateString(name.tname);
+ cJSON_AddItemToObject(json, "tableName", tableName);
+ cJSON* tableType = cJSON_CreateString("super");
+ cJSON_AddItemToObject(json, "tableType", tableType);
+
+ cJSON* alterType = cJSON_CreateNumber(req.alterType);
+ cJSON_AddItemToObject(json, "alterType", alterType);
+ switch (req.alterType) {
+ case TSDB_ALTER_TABLE_ADD_TAG:
+ case TSDB_ALTER_TABLE_ADD_COLUMN: {
+ TAOS_FIELD* field = taosArrayGet(req.pFields, 0);
+ cJSON* colName = cJSON_CreateString(field->name);
+ cJSON_AddItemToObject(json, "colName", colName);
+ cJSON* colType = cJSON_CreateNumber(field->type);
+ cJSON_AddItemToObject(json, "colType", colType);
+
+ if (field->type == TSDB_DATA_TYPE_BINARY) {
+ int32_t length = field->bytes - VARSTR_HEADER_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(json, "colLength", cbytes);
+ } else if (field->type == TSDB_DATA_TYPE_NCHAR) {
+ int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(json, "colLength", cbytes);
+ }
+ break;
+ }
+ case TSDB_ALTER_TABLE_DROP_TAG:
+ case TSDB_ALTER_TABLE_DROP_COLUMN: {
+ TAOS_FIELD* field = taosArrayGet(req.pFields, 0);
+ cJSON* colName = cJSON_CreateString(field->name);
+ cJSON_AddItemToObject(json, "colName", colName);
+ break;
+ }
+ case TSDB_ALTER_TABLE_UPDATE_TAG_BYTES:
+ case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES: {
+ TAOS_FIELD* field = taosArrayGet(req.pFields, 0);
+ cJSON* colName = cJSON_CreateString(field->name);
+ cJSON_AddItemToObject(json, "colName", colName);
+ cJSON* colType = cJSON_CreateNumber(field->type);
+ cJSON_AddItemToObject(json, "colType", colType);
+ if (field->type == TSDB_DATA_TYPE_BINARY) {
+ int32_t length = field->bytes - VARSTR_HEADER_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(json, "colLength", cbytes);
+ } else if (field->type == TSDB_DATA_TYPE_NCHAR) {
+ int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(json, "colLength", cbytes);
+ }
+ break;
+ }
+ case TSDB_ALTER_TABLE_UPDATE_TAG_NAME:
+ case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME: {
+ TAOS_FIELD* oldField = taosArrayGet(req.pFields, 0);
+ TAOS_FIELD* newField = taosArrayGet(req.pFields, 1);
+ cJSON* colName = cJSON_CreateString(oldField->name);
+ cJSON_AddItemToObject(json, "colName", colName);
+ cJSON* colNewName = cJSON_CreateString(newField->name);
+ cJSON_AddItemToObject(json, "colNewName", colNewName);
+ break;
+ }
+ default:
+ break;
+ }
+ string = cJSON_PrintUnformatted(json);
+
+end:
+ cJSON_Delete(json);
+ tFreeSMAltertbReq(&req);
+ return string;
+}
+
+static char* processCreateStb(SMqMetaRsp* metaRsp) {
+ SVCreateStbReq req = {0};
+ SDecoder coder;
+ char* string = NULL;
+
+ // decode and process req
+ void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
+ int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+
+ if (tDecodeSVCreateStbReq(&coder, &req) < 0) {
+ goto _err;
+ }
+ string = buildCreateTableJson(&req.schemaRow, &req.schemaTag, req.name, req.suid, TSDB_SUPER_TABLE);
+ tDecoderClear(&coder);
+ return string;
+
+_err:
+ tDecoderClear(&coder);
+ return string;
+}
+
+static char* processAlterStb(SMqMetaRsp* metaRsp) {
+ SVCreateStbReq req = {0};
+ SDecoder coder;
+ char* string = NULL;
+
+ // decode and process req
+ void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
+ int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+
+ if (tDecodeSVCreateStbReq(&coder, &req) < 0) {
+ goto _err;
+ }
+ string = buildAlterSTableJson(req.alterOriData, req.alterOriDataLen);
+ tDecoderClear(&coder);
+ return string;
+
+_err:
+ tDecoderClear(&coder);
+ return string;
+}
+
+static char* buildCreateCTableJson(STag* pTag, char* sname, char* name, SArray* tagName, int64_t id, uint8_t tagNum) {
+ char* string = NULL;
+ SArray* pTagVals = NULL;
+ cJSON* json = cJSON_CreateObject();
+ if (json == NULL) {
+ return string;
+ }
+ cJSON* type = cJSON_CreateString("create");
+ cJSON_AddItemToObject(json, "type", type);
+ // char cid[32] = {0};
+ // sprintf(cid, "%"PRIi64, id);
+ // cJSON* cid_ = cJSON_CreateString(cid);
+ // cJSON_AddItemToObject(json, "id", cid_);
+
+ cJSON* tableName = cJSON_CreateString(name);
+ cJSON_AddItemToObject(json, "tableName", tableName);
+ cJSON* tableType = cJSON_CreateString("child");
+ cJSON_AddItemToObject(json, "tableType", tableType);
+ cJSON* using = cJSON_CreateString(sname);
+ cJSON_AddItemToObject(json, "using", using);
+ cJSON* tagNumJson = cJSON_CreateNumber(tagNum);
+ cJSON_AddItemToObject(json, "tagNum", tagNumJson);
+ // cJSON* version = cJSON_CreateNumber(1);
+ // cJSON_AddItemToObject(json, "version", version);
+
+ cJSON* tags = cJSON_CreateArray();
+ int32_t code = tTagToValArray(pTag, &pTagVals);
+ if (code) {
+ goto end;
+ }
+
+ if (tTagIsJson(pTag)) {
+ STag* p = (STag*)pTag;
+ if (p->nTag == 0) {
+ goto end;
+ }
+ char* pJson = parseTagDatatoJson(pTag);
+ cJSON* tag = cJSON_CreateObject();
+ STagVal* pTagVal = taosArrayGet(pTagVals, 0);
+
+ char* ptname = taosArrayGet(tagName, 0);
+ cJSON* tname = cJSON_CreateString(ptname);
+ cJSON_AddItemToObject(tag, "name", tname);
+ // cJSON* cid_ = cJSON_CreateString("");
+ // cJSON_AddItemToObject(tag, "cid", cid_);
+ cJSON* ttype = cJSON_CreateNumber(TSDB_DATA_TYPE_JSON);
+ cJSON_AddItemToObject(tag, "type", ttype);
+ cJSON* tvalue = cJSON_CreateString(pJson);
+ cJSON_AddItemToObject(tag, "value", tvalue);
+ cJSON_AddItemToArray(tags, tag);
+ taosMemoryFree(pJson);
+ goto end;
+ }
+
+ for (int i = 0; i < taosArrayGetSize(pTagVals); i++) {
+ STagVal* pTagVal = (STagVal*)taosArrayGet(pTagVals, i);
+
+ cJSON* tag = cJSON_CreateObject();
+
+ char* ptname = taosArrayGet(tagName, i);
+ cJSON* tname = cJSON_CreateString(ptname);
+ cJSON_AddItemToObject(tag, "name", tname);
+ // cJSON* cid = cJSON_CreateNumber(pTagVal->cid);
+ // cJSON_AddItemToObject(tag, "cid", cid);
+ cJSON* ttype = cJSON_CreateNumber(pTagVal->type);
+ cJSON_AddItemToObject(tag, "type", ttype);
+
+ cJSON* tvalue = NULL;
+ if (IS_VAR_DATA_TYPE(pTagVal->type)) {
+ char* buf = taosMemoryCalloc(pTagVal->nData + 3, 1);
+ if (!buf) goto end;
+ dataConverToStr(buf, pTagVal->type, pTagVal->pData, pTagVal->nData, NULL);
+ tvalue = cJSON_CreateString(buf);
+ taosMemoryFree(buf);
+ } else {
+ double val = 0;
+ GET_TYPED_DATA(val, double, pTagVal->type, &pTagVal->i64);
+ tvalue = cJSON_CreateNumber(val);
+ }
+
+ cJSON_AddItemToObject(tag, "value", tvalue);
+ cJSON_AddItemToArray(tags, tag);
+ }
+
+end:
+ cJSON_AddItemToObject(json, "tags", tags);
+ string = cJSON_PrintUnformatted(json);
+ cJSON_Delete(json);
+ taosArrayDestroy(pTagVals);
+ return string;
+}
+
+static char* processCreateTable(SMqMetaRsp* metaRsp) {
+ SDecoder decoder = {0};
+ SVCreateTbBatchReq req = {0};
+ SVCreateTbReq* pCreateReq;
+ char* string = NULL;
+ // decode
+ void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
+ int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
+ tDecoderInit(&decoder, data, len);
+ if (tDecodeSVCreateTbBatchReq(&decoder, &req) < 0) {
+ goto _exit;
+ }
+
+ // loop to create table
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ pCreateReq = req.pReqs + iReq;
+ if (pCreateReq->type == TSDB_CHILD_TABLE) {
+ string = buildCreateCTableJson((STag*)pCreateReq->ctb.pTag, pCreateReq->ctb.name, pCreateReq->name,
+ pCreateReq->ctb.tagName, pCreateReq->uid, pCreateReq->ctb.tagNum);
+ } else if (pCreateReq->type == TSDB_NORMAL_TABLE) {
+ string =
+ buildCreateTableJson(&pCreateReq->ntb.schemaRow, NULL, pCreateReq->name, pCreateReq->uid, TSDB_NORMAL_TABLE);
+ }
+ }
+
+ tDecoderClear(&decoder);
+
+_exit:
+ tDecoderClear(&decoder);
+ return string;
+}
+
+static char* processAlterTable(SMqMetaRsp* metaRsp) {
+ SDecoder decoder = {0};
+ SVAlterTbReq vAlterTbReq = {0};
+ char* string = NULL;
+
+ // decode
+ void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
+ int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
+ tDecoderInit(&decoder, data, len);
+ if (tDecodeSVAlterTbReq(&decoder, &vAlterTbReq) < 0) {
+ goto _exit;
+ }
+
+ cJSON* json = cJSON_CreateObject();
+ if (json == NULL) {
+ goto _exit;
+ }
+ cJSON* type = cJSON_CreateString("alter");
+ cJSON_AddItemToObject(json, "type", type);
+ // cJSON* uid = cJSON_CreateNumber(id);
+ // cJSON_AddItemToObject(json, "uid", uid);
+ cJSON* tableName = cJSON_CreateString(vAlterTbReq.tbName);
+ cJSON_AddItemToObject(json, "tableName", tableName);
+ cJSON* tableType = cJSON_CreateString(vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_TAG_VAL ? "child" : "normal");
+ cJSON_AddItemToObject(json, "tableType", tableType);
+ cJSON* alterType = cJSON_CreateNumber(vAlterTbReq.action);
+ cJSON_AddItemToObject(json, "alterType", alterType);
+
+ switch (vAlterTbReq.action) {
+ case TSDB_ALTER_TABLE_ADD_COLUMN: {
+ cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
+ cJSON_AddItemToObject(json, "colName", colName);
+ cJSON* colType = cJSON_CreateNumber(vAlterTbReq.type);
+ cJSON_AddItemToObject(json, "colType", colType);
+
+ if (vAlterTbReq.type == TSDB_DATA_TYPE_BINARY) {
+ int32_t length = vAlterTbReq.bytes - VARSTR_HEADER_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(json, "colLength", cbytes);
+ } else if (vAlterTbReq.type == TSDB_DATA_TYPE_NCHAR) {
+ int32_t length = (vAlterTbReq.bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(json, "colLength", cbytes);
+ }
+ break;
+ }
+ case TSDB_ALTER_TABLE_DROP_COLUMN: {
+ cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
+ cJSON_AddItemToObject(json, "colName", colName);
+ break;
+ }
+ case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES: {
+ cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
+ cJSON_AddItemToObject(json, "colName", colName);
+ cJSON* colType = cJSON_CreateNumber(vAlterTbReq.colModType);
+ cJSON_AddItemToObject(json, "colType", colType);
+ if (vAlterTbReq.colModType == TSDB_DATA_TYPE_BINARY) {
+ int32_t length = vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(json, "colLength", cbytes);
+ } else if (vAlterTbReq.colModType == TSDB_DATA_TYPE_NCHAR) {
+ int32_t length = (vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(json, "colLength", cbytes);
+ }
+ break;
+ }
+ case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME: {
+ cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
+ cJSON_AddItemToObject(json, "colName", colName);
+ cJSON* colNewName = cJSON_CreateString(vAlterTbReq.colNewName);
+ cJSON_AddItemToObject(json, "colNewName", colNewName);
+ break;
+ }
+ case TSDB_ALTER_TABLE_UPDATE_TAG_VAL: {
+ cJSON* tagName = cJSON_CreateString(vAlterTbReq.tagName);
+ cJSON_AddItemToObject(json, "colName", tagName);
+
+ bool isNull = vAlterTbReq.isNull;
+ if (vAlterTbReq.tagType == TSDB_DATA_TYPE_JSON) {
+ STag* jsonTag = (STag*)vAlterTbReq.pTagVal;
+ if (jsonTag->nTag == 0) isNull = true;
+ }
+ if (!isNull) {
+ char* buf = NULL;
+
+ if (vAlterTbReq.tagType == TSDB_DATA_TYPE_JSON) {
+ ASSERT(tTagIsJson(vAlterTbReq.pTagVal) == true);
+ buf = parseTagDatatoJson(vAlterTbReq.pTagVal);
+ } else {
+ buf = taosMemoryCalloc(vAlterTbReq.nTagVal + 1, 1);
+ dataConverToStr(buf, vAlterTbReq.tagType, vAlterTbReq.pTagVal, vAlterTbReq.nTagVal, NULL);
+ }
+
+ cJSON* colValue = cJSON_CreateString(buf);
+ cJSON_AddItemToObject(json, "colValue", colValue);
+ taosMemoryFree(buf);
+ }
+
+ cJSON* isNullCJson = cJSON_CreateBool(isNull);
+ cJSON_AddItemToObject(json, "colValueNull", isNullCJson);
+ break;
+ }
+ default:
+ break;
+ }
+ string = cJSON_PrintUnformatted(json);
+
+_exit:
+ tDecoderClear(&decoder);
+ return string;
+}
+
+static char* processDropSTable(SMqMetaRsp* metaRsp) {
+ SDecoder decoder = {0};
+ SVDropStbReq req = {0};
+ char* string = NULL;
+
+ // decode
+ void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
+ int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
+ tDecoderInit(&decoder, data, len);
+ if (tDecodeSVDropStbReq(&decoder, &req) < 0) {
+ goto _exit;
+ }
+
+ cJSON* json = cJSON_CreateObject();
+ if (json == NULL) {
+ goto _exit;
+ }
+ cJSON* type = cJSON_CreateString("drop");
+ cJSON_AddItemToObject(json, "type", type);
+ cJSON* tableName = cJSON_CreateString(req.name);
+ cJSON_AddItemToObject(json, "tableName", tableName);
+ cJSON* tableType = cJSON_CreateString("super");
+ cJSON_AddItemToObject(json, "tableType", tableType);
+
+ string = cJSON_PrintUnformatted(json);
+
+_exit:
+ tDecoderClear(&decoder);
+ return string;
+}
+
+static char* processDropTable(SMqMetaRsp* metaRsp) {
+ SDecoder decoder = {0};
+ SVDropTbBatchReq req = {0};
+ char* string = NULL;
+
+ // decode
+ void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
+ int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
+ tDecoderInit(&decoder, data, len);
+ if (tDecodeSVDropTbBatchReq(&decoder, &req) < 0) {
+ goto _exit;
+ }
+
+ cJSON* json = cJSON_CreateObject();
+ if (json == NULL) {
+ goto _exit;
+ }
+ cJSON* type = cJSON_CreateString("drop");
+ cJSON_AddItemToObject(json, "type", type);
+ // cJSON* uid = cJSON_CreateNumber(id);
+ // cJSON_AddItemToObject(json, "uid", uid);
+ // cJSON* tableType = cJSON_CreateString("normal");
+ // cJSON_AddItemToObject(json, "tableType", tableType);
+
+ cJSON* tableNameList = cJSON_CreateArray();
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ SVDropTbReq* pDropTbReq = req.pReqs + iReq;
+
+ cJSON* tableName = cJSON_CreateString(pDropTbReq->name);
+ cJSON_AddItemToArray(tableNameList, tableName);
+ }
+ cJSON_AddItemToObject(json, "tableNameList", tableNameList);
+
+ string = cJSON_PrintUnformatted(json);
+
+_exit:
+ tDecoderClear(&decoder);
+ return string;
+}
+
+static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) {
+ SVCreateStbReq req = {0};
+ SDecoder coder;
+ SMCreateStbReq pReq = {0};
+ int32_t code = TSDB_CODE_SUCCESS;
+ SRequestObj* pRequest = NULL;
+
+ code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ if (!pRequest->pDb) {
+ code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
+ goto end;
+ }
+ // decode and process req
+ void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
+ int32_t len = metaLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+ if (tDecodeSVCreateStbReq(&coder, &req) < 0) {
+ code = TSDB_CODE_INVALID_PARA;
+ goto end;
+ }
+ // build create stable
+ pReq.pColumns = taosArrayInit(req.schemaRow.nCols, sizeof(SField));
+ for (int32_t i = 0; i < req.schemaRow.nCols; i++) {
+ SSchema* pSchema = req.schemaRow.pSchema + i;
+ SField field = {.type = pSchema->type, .bytes = pSchema->bytes};
+ strcpy(field.name, pSchema->name);
+ taosArrayPush(pReq.pColumns, &field);
+ }
+ pReq.pTags = taosArrayInit(req.schemaTag.nCols, sizeof(SField));
+ for (int32_t i = 0; i < req.schemaTag.nCols; i++) {
+ SSchema* pSchema = req.schemaTag.pSchema + i;
+ SField field = {.type = pSchema->type, .bytes = pSchema->bytes};
+ strcpy(field.name, pSchema->name);
+ taosArrayPush(pReq.pTags, &field);
+ }
+
+ pReq.colVer = req.schemaRow.version;
+ pReq.tagVer = req.schemaTag.version;
+ pReq.numOfColumns = req.schemaRow.nCols;
+ pReq.numOfTags = req.schemaTag.nCols;
+ pReq.commentLen = -1;
+ pReq.suid = req.suid;
+ pReq.source = TD_REQ_FROM_TAOX;
+ pReq.igExists = true;
+
+ STscObj* pTscObj = pRequest->pTscObj;
+ SName tableName;
+ tNameExtractFullName(toName(pTscObj->acctId, pRequest->pDb, req.name, &tableName), pReq.name);
+
+ SCmdMsgInfo pCmdMsg = {0};
+ pCmdMsg.epSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
+ pCmdMsg.msgType = TDMT_MND_CREATE_STB;
+ pCmdMsg.msgLen = tSerializeSMCreateStbReq(NULL, 0, &pReq);
+ pCmdMsg.pMsg = taosMemoryMalloc(pCmdMsg.msgLen);
+ if (NULL == pCmdMsg.pMsg) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ tSerializeSMCreateStbReq(pCmdMsg.pMsg, pCmdMsg.msgLen, &pReq);
+
+ SQuery pQuery = {0};
+ pQuery.execMode = QUERY_EXEC_MODE_RPC;
+ pQuery.pCmdMsg = &pCmdMsg;
+ pQuery.msgType = pQuery.pCmdMsg->msgType;
+ pQuery.stableQuery = true;
+
+ launchQueryImpl(pRequest, &pQuery, true, NULL);
+
+ if (pRequest->code == TSDB_CODE_SUCCESS) {
+ SCatalog* pCatalog = NULL;
+ catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
+ catalogRemoveTableMeta(pCatalog, &tableName);
+ }
+
+ code = pRequest->code;
+ taosMemoryFree(pCmdMsg.pMsg);
+
+end:
+ destroyRequest(pRequest);
+ tFreeSMCreateStbReq(&pReq);
+ tDecoderClear(&coder);
+ return code;
+}
+
+static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) {
+ SVDropStbReq req = {0};
+ SDecoder coder;
+ SMDropStbReq pReq = {0};
+ int32_t code = TSDB_CODE_SUCCESS;
+ SRequestObj* pRequest = NULL;
+
+ code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ if (!pRequest->pDb) {
+ code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
+ goto end;
+ }
+ // decode and process req
+ void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
+ int32_t len = metaLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+ if (tDecodeSVDropStbReq(&coder, &req) < 0) {
+ code = TSDB_CODE_INVALID_PARA;
+ goto end;
+ }
+
+ // build drop stable
+ pReq.igNotExists = true;
+ pReq.source = TD_REQ_FROM_TAOX;
+ pReq.suid = req.suid;
+
+ STscObj* pTscObj = pRequest->pTscObj;
+ SName tableName = {0};
+ tNameExtractFullName(toName(pTscObj->acctId, pRequest->pDb, req.name, &tableName), pReq.name);
+
+ SCmdMsgInfo pCmdMsg = {0};
+ pCmdMsg.epSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
+ pCmdMsg.msgType = TDMT_MND_DROP_STB;
+ pCmdMsg.msgLen = tSerializeSMDropStbReq(NULL, 0, &pReq);
+ pCmdMsg.pMsg = taosMemoryMalloc(pCmdMsg.msgLen);
+ if (NULL == pCmdMsg.pMsg) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ tSerializeSMDropStbReq(pCmdMsg.pMsg, pCmdMsg.msgLen, &pReq);
+
+ SQuery pQuery = {0};
+ pQuery.execMode = QUERY_EXEC_MODE_RPC;
+ pQuery.pCmdMsg = &pCmdMsg;
+ pQuery.msgType = pQuery.pCmdMsg->msgType;
+ pQuery.stableQuery = true;
+
+ launchQueryImpl(pRequest, &pQuery, true, NULL);
+
+ if (pRequest->code == TSDB_CODE_SUCCESS) {
+ SCatalog* pCatalog = NULL;
+ catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
+ catalogRemoveTableMeta(pCatalog, &tableName);
+ }
+
+ code = pRequest->code;
+ taosMemoryFree(pCmdMsg.pMsg);
+
+end:
+ destroyRequest(pRequest);
+ tDecoderClear(&coder);
+ return code;
+}
+
+typedef struct SVgroupCreateTableBatch {
+ SVCreateTbBatchReq req;
+ SVgroupInfo info;
+ char dbName[TSDB_DB_NAME_LEN];
+} SVgroupCreateTableBatch;
+
+static void destroyCreateTbReqBatch(void* data) {
+ SVgroupCreateTableBatch* pTbBatch = (SVgroupCreateTableBatch*)data;
+ taosArrayDestroy(pTbBatch->req.pArray);
+}
+
+static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
+ SVCreateTbBatchReq req = {0};
+ SDecoder coder = {0};
+ int32_t code = TSDB_CODE_SUCCESS;
+ SRequestObj* pRequest = NULL;
+ SQuery* pQuery = NULL;
+ SHashObj* pVgroupHashmap = NULL;
+
+ code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ if (!pRequest->pDb) {
+ code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
+ goto end;
+ }
+ // decode and process req
+ void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
+ int32_t len = metaLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+ if (tDecodeSVCreateTbBatchReq(&coder, &req) < 0) {
+ code = TSDB_CODE_INVALID_PARA;
+ goto end;
+ }
+
+ STscObj* pTscObj = pRequest->pTscObj;
+
+ SVCreateTbReq* pCreateReq = NULL;
+ SCatalog* pCatalog = NULL;
+ code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ pVgroupHashmap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
+ if (NULL == pVgroupHashmap) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ taosHashSetFreeFp(pVgroupHashmap, destroyCreateTbReqBatch);
+
+ SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
+ .requestId = pRequest->requestId,
+ .requestObjRefId = pRequest->self,
+ .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
+
+ pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName));
+ // loop to create table
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ pCreateReq = req.pReqs + iReq;
+
+ SVgroupInfo pInfo = {0};
+ SName pName = {0};
+ toName(pTscObj->acctId, pRequest->pDb, pCreateReq->name, &pName);
+ code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &pInfo);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+ taosArrayPush(pRequest->tableList, &pName);
+
+ SVgroupCreateTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId));
+ if (pTableBatch == NULL) {
+ SVgroupCreateTableBatch tBatch = {0};
+ tBatch.info = pInfo;
+ strcpy(tBatch.dbName, pRequest->pDb);
+
+ tBatch.req.pArray = taosArrayInit(4, sizeof(struct SVCreateTbReq));
+ taosArrayPush(tBatch.req.pArray, pCreateReq);
+
+ taosHashPut(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId), &tBatch, sizeof(tBatch));
+ } else { // add to the correct vgroup
+ taosArrayPush(pTableBatch->req.pArray, pCreateReq);
+ }
+ }
+
+ SArray* pBufArray = serializeVgroupsCreateTableBatch(pVgroupHashmap);
+ if (NULL == pBufArray) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+
+ pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
+ pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
+ pQuery->msgType = TDMT_VND_CREATE_TABLE;
+ pQuery->stableQuery = false;
+ pQuery->pRoot = nodesMakeNode(QUERY_NODE_CREATE_TABLE_STMT);
+
+ code = rewriteToVnodeModifyOpStmt(pQuery, pBufArray);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ launchQueryImpl(pRequest, pQuery, true, NULL);
+ if (pRequest->code == TSDB_CODE_SUCCESS) {
+ removeMeta(pTscObj, pRequest->tableList);
+ }
+
+ code = pRequest->code;
+
+end:
+ taosHashCleanup(pVgroupHashmap);
+ destroyRequest(pRequest);
+ tDecoderClear(&coder);
+ qDestroyQuery(pQuery);
+ return code;
+}
+
+typedef struct SVgroupDropTableBatch {
+ SVDropTbBatchReq req;
+ SVgroupInfo info;
+ char dbName[TSDB_DB_NAME_LEN];
+} SVgroupDropTableBatch;
+
+static void destroyDropTbReqBatch(void* data) {
+ SVgroupDropTableBatch* pTbBatch = (SVgroupDropTableBatch*)data;
+ taosArrayDestroy(pTbBatch->req.pArray);
+}
+
+static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) {
+ SVDropTbBatchReq req = {0};
+ SDecoder coder = {0};
+ int32_t code = TSDB_CODE_SUCCESS;
+ SRequestObj* pRequest = NULL;
+ SQuery* pQuery = NULL;
+ SHashObj* pVgroupHashmap = NULL;
+
+ code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ if (!pRequest->pDb) {
+ code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
+ goto end;
+ }
+ // decode and process req
+ void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
+ int32_t len = metaLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+ if (tDecodeSVDropTbBatchReq(&coder, &req) < 0) {
+ code = TSDB_CODE_INVALID_PARA;
+ goto end;
+ }
+
+ STscObj* pTscObj = pRequest->pTscObj;
+
+ SVDropTbReq* pDropReq = NULL;
+ SCatalog* pCatalog = NULL;
+ code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ pVgroupHashmap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
+ if (NULL == pVgroupHashmap) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ taosHashSetFreeFp(pVgroupHashmap, destroyDropTbReqBatch);
+
+ SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
+ .requestId = pRequest->requestId,
+ .requestObjRefId = pRequest->self,
+ .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
+ pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName));
+ // loop to create table
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ pDropReq = req.pReqs + iReq;
+ pDropReq->igNotExists = true;
+
+ SVgroupInfo pInfo = {0};
+ SName pName = {0};
+ toName(pTscObj->acctId, pRequest->pDb, pDropReq->name, &pName);
+ code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &pInfo);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ taosArrayPush(pRequest->tableList, &pName);
+ SVgroupDropTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId));
+ if (pTableBatch == NULL) {
+ SVgroupDropTableBatch tBatch = {0};
+ tBatch.info = pInfo;
+ tBatch.req.pArray = taosArrayInit(TARRAY_MIN_SIZE, sizeof(SVDropTbReq));
+ taosArrayPush(tBatch.req.pArray, pDropReq);
+
+ taosHashPut(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId), &tBatch, sizeof(tBatch));
+ } else { // add to the correct vgroup
+ taosArrayPush(pTableBatch->req.pArray, pDropReq);
+ }
+ }
+
+ SArray* pBufArray = serializeVgroupsDropTableBatch(pVgroupHashmap);
+ if (NULL == pBufArray) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+
+ pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
+ pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
+ pQuery->msgType = TDMT_VND_DROP_TABLE;
+ pQuery->stableQuery = false;
+ pQuery->pRoot = nodesMakeNode(QUERY_NODE_DROP_TABLE_STMT);
+
+ code = rewriteToVnodeModifyOpStmt(pQuery, pBufArray);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ launchQueryImpl(pRequest, pQuery, true, NULL);
+ if (pRequest->code == TSDB_CODE_SUCCESS) {
+ removeMeta(pTscObj, pRequest->tableList);
+ }
+ code = pRequest->code;
+
+end:
+ taosHashCleanup(pVgroupHashmap);
+ destroyRequest(pRequest);
+ tDecoderClear(&coder);
+ qDestroyQuery(pQuery);
+ return code;
+}
+
+// delete from db.tabl where .. -> delete from tabl where ..
+// delete from db .tabl where .. -> delete from tabl where ..
+// static void getTbName(char *sql){
+// char *ch = sql;
+//
+// bool inBackQuote = false;
+// int8_t dotIndex = 0;
+// while(*ch != '\0'){
+// if(!inBackQuote && *ch == '`'){
+// inBackQuote = true;
+// ch++;
+// continue;
+// }
+//
+// if(inBackQuote && *ch == '`'){
+// inBackQuote = false;
+// ch++;
+//
+// continue;
+// }
+//
+// if(!inBackQuote && *ch == '.'){
+// dotIndex ++;
+// if(dotIndex == 2){
+// memmove(sql, ch + 1, strlen(ch + 1) + 1);
+// break;
+// }
+// }
+// ch++;
+// }
+//}
+
+static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) {
+ SDeleteRes req = {0};
+ SDecoder coder = {0};
+ int32_t code = TSDB_CODE_SUCCESS;
+
+ // decode and process req
+ void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
+ int32_t len = metaLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+ if (tDecodeDeleteRes(&coder, &req) < 0) {
+ code = TSDB_CODE_INVALID_PARA;
+ goto end;
+ }
+
+ // getTbName(req.tableFName);
+ char sql[256] = {0};
+ sprintf(sql, "delete from `%s` where `%s` >= %" PRId64 " and `%s` <= %" PRId64, req.tableFName, req.tsColName,
+ req.skey, req.tsColName, req.ekey);
+ printf("delete sql:%s\n", sql);
+
+ TAOS_RES* res = taos_query(taos, sql);
+ SRequestObj* pRequest = (SRequestObj*)res;
+ code = pRequest->code;
+ if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
+ code = TSDB_CODE_SUCCESS;
+ }
+ taos_free_result(res);
+
+end:
+ tDecoderClear(&coder);
+ return code;
+}
+
+static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) {
+ SVAlterTbReq req = {0};
+ SDecoder coder = {0};
+ int32_t code = TSDB_CODE_SUCCESS;
+ SRequestObj* pRequest = NULL;
+ SQuery* pQuery = NULL;
+ SArray* pArray = NULL;
+ SVgDataBlocks* pVgData = NULL;
+
+ code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
+
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ if (!pRequest->pDb) {
+ code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
+ goto end;
+ }
+ // decode and process req
+ void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
+ int32_t len = metaLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+ if (tDecodeSVAlterTbReq(&coder, &req) < 0) {
+ code = TSDB_CODE_INVALID_PARA;
+ goto end;
+ }
+
+ // do not deal TSDB_ALTER_TABLE_UPDATE_OPTIONS
+ if (req.action == TSDB_ALTER_TABLE_UPDATE_OPTIONS) {
+ goto end;
+ }
+
+ STscObj* pTscObj = pRequest->pTscObj;
+ SCatalog* pCatalog = NULL;
+ code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
+ .requestId = pRequest->requestId,
+ .requestObjRefId = pRequest->self,
+ .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
+
+ SVgroupInfo pInfo = {0};
+ SName pName = {0};
+ toName(pTscObj->acctId, pRequest->pDb, req.tbName, &pName);
+ code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &pInfo);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ pArray = taosArrayInit(1, sizeof(void*));
+ if (NULL == pArray) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+
+ pVgData = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
+ if (NULL == pVgData) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ pVgData->vg = pInfo;
+ pVgData->pData = taosMemoryMalloc(metaLen);
+ if (NULL == pVgData->pData) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ memcpy(pVgData->pData, meta, metaLen);
+ ((SMsgHead*)pVgData->pData)->vgId = htonl(pInfo.vgId);
+ pVgData->size = metaLen;
+ pVgData->numOfTables = 1;
+ taosArrayPush(pArray, &pVgData);
+
+ pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
+ pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
+ pQuery->msgType = TDMT_VND_ALTER_TABLE;
+ pQuery->stableQuery = false;
+ pQuery->pRoot = nodesMakeNode(QUERY_NODE_ALTER_TABLE_STMT);
+
+ code = rewriteToVnodeModifyOpStmt(pQuery, pArray);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ launchQueryImpl(pRequest, pQuery, true, NULL);
+
+ pVgData = NULL;
+ pArray = NULL;
+ code = pRequest->code;
+ if (code == TSDB_CODE_VND_TABLE_NOT_EXIST) {
+ code = TSDB_CODE_SUCCESS;
+ }
+
+ if (pRequest->code == TSDB_CODE_SUCCESS) {
+ SExecResult* pRes = &pRequest->body.resInfo.execRes;
+ if (pRes->res != NULL) {
+ code = handleAlterTbExecRes(pRes->res, pCatalog);
+ }
+ }
+end:
+ taosArrayDestroy(pArray);
+ if (pVgData) taosMemoryFreeClear(pVgData->pData);
+ taosMemoryFreeClear(pVgData);
+ destroyRequest(pRequest);
+ tDecoderClear(&coder);
+ qDestroyQuery(pQuery);
+ return code;
+}
+
+typedef struct {
+ SVgroupInfo vg;
+ void* data;
+} VgData;
+
+static void destroyVgHash(void* data) {
+ VgData* vgData = (VgData*)data;
+ taosMemoryFreeClear(vgData->data);
+}
+
+int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ STableMeta* pTableMeta = NULL;
+ SQuery* pQuery = NULL;
+
+ SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT);
+ if (!pRequest) {
+ uError("WriteRaw:createRequest error request is null");
+ code = terrno;
+ goto end;
+ }
+
+ if (!pRequest->pDb) {
+ uError("WriteRaw:not use db");
+ code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
+ goto end;
+ }
+
+ SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}};
+ strcpy(pName.dbname, pRequest->pDb);
+ strcpy(pName.tname, tbname);
+
+ struct SCatalog* pCatalog = NULL;
+ code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog);
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw: get gatlog error");
+ goto end;
+ }
+
+ SRequestConnInfo conn = {0};
+ conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter;
+ conn.requestId = pRequest->requestId;
+ conn.requestObjRefId = pRequest->self;
+ conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
+
+ SVgroupInfo vgData = {0};
+ code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vgData);
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw:catalogGetTableHashVgroup failed. table name: %s", tbname);
+ goto end;
+ }
+
+ code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbname);
+ goto end;
+ }
+ uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
+ uint64_t uid = pTableMeta->uid;
+ int32_t numOfCols = pTableMeta->tableInfo.numOfColumns;
+
+ uint16_t fLen = 0;
+ int32_t rowSize = 0;
+ int16_t nVar = 0;
+ for (int i = 0; i < numOfCols; i++) {
+ SSchema* schema = pTableMeta->schema + i;
+ fLen += TYPE_BYTES[schema->type];
+ rowSize += schema->bytes;
+ if (IS_VAR_DATA_TYPE(schema->type)) {
+ nVar++;
+ }
+ }
+
+ int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
+ (int32_t)TD_BITMAP_BYTES(numOfCols - 1);
+ int32_t schemaLen = 0;
+ int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
+
+ int32_t totalLen = sizeof(SSubmitReq) + submitLen;
+ SSubmitReq* subReq = taosMemoryCalloc(1, totalLen);
+ SSubmitBlk* blk = POINTER_SHIFT(subReq, sizeof(SSubmitReq));
+ void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk));
+ STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen);
+
+ SRowBuilder rb = {0};
+ tdSRowInit(&rb, pTableMeta->sversion);
+ tdSRowSetTpInfo(&rb, numOfCols, fLen);
+ int32_t dataLen = 0;
+
+ char* pStart = pData + getVersion1BlockMetaSize(pData, numOfCols);
+ int32_t* colLength = (int32_t*)pStart;
+ pStart += sizeof(int32_t) * numOfCols;
+
+ SResultColumn* pCol = taosMemoryCalloc(numOfCols, sizeof(SResultColumn));
+
+ for (int32_t i = 0; i < numOfCols; ++i) {
+ if (IS_VAR_DATA_TYPE(pTableMeta->schema[i].type)) {
+ pCol[i].offset = (int32_t*)pStart;
+ pStart += rows * sizeof(int32_t);
+ } else {
+ pCol[i].nullbitmap = pStart;
+ pStart += BitmapLen(rows);
+ }
+
+ pCol[i].pData = pStart;
+ pStart += colLength[i];
+ }
+
+ for (int32_t j = 0; j < rows; j++) {
+ tdSRowResetBuf(&rb, rowData);
+ int32_t offset = 0;
+ for (int32_t k = 0; k < numOfCols; k++) {
+ const SSchema* pColumn = &pTableMeta->schema[k];
+
+ if (IS_VAR_DATA_TYPE(pColumn->type)) {
+ if (pCol[k].offset[j] != -1) {
+ char* data = pCol[k].pData + pCol[k].offset[j];
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
+ } else {
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
+ }
+ } else {
+ if (!colDataIsNull_f(pCol[k].nullbitmap, j)) {
+ char* data = pCol[k].pData + pColumn->bytes * j;
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
+ } else {
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
+ }
+ }
+
+ offset += TYPE_BYTES[pColumn->type];
+ }
+ tdSRowEnd(&rb);
+ int32_t rowLen = TD_ROW_LEN(rowData);
+ rowData = POINTER_SHIFT(rowData, rowLen);
+ dataLen += rowLen;
+ }
+
+ taosMemoryFree(pCol);
+
+ blk->uid = htobe64(uid);
+ blk->suid = htobe64(suid);
+ blk->sversion = htonl(pTableMeta->sversion);
+ blk->schemaLen = htonl(schemaLen);
+ blk->numOfRows = htonl(rows);
+ blk->dataLen = htonl(dataLen);
+ subReq->length = sizeof(SSubmitReq) + sizeof(SSubmitBlk) + schemaLen + dataLen;
+ subReq->numOfBlocks = 1;
+
+ pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
+ if (NULL == pQuery) {
+ uError("create SQuery error");
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
+ pQuery->haveResultSet = false;
+ pQuery->msgType = TDMT_VND_SUBMIT;
+ pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
+ if (NULL == pQuery->pRoot) {
+ uError("create pQuery->pRoot error");
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ SVnodeModifOpStmt* nodeStmt = (SVnodeModifOpStmt*)(pQuery->pRoot);
+ nodeStmt->payloadType = PAYLOAD_TYPE_KV;
+ nodeStmt->pDataBlocks = taosArrayInit(1, POINTER_BYTES);
+
+ SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
+ if (NULL == dst) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto end;
+ }
+ dst->vg = vgData;
+ dst->numOfTables = subReq->numOfBlocks;
+ dst->size = subReq->length;
+ dst->pData = (char*)subReq;
+ subReq->header.vgId = htonl(dst->vg.vgId);
+ subReq->version = htonl(1);
+ subReq->header.contLen = htonl(subReq->length);
+ subReq->length = htonl(subReq->length);
+ subReq->numOfBlocks = htonl(subReq->numOfBlocks);
+ subReq = NULL; // no need free
+ taosArrayPush(nodeStmt->pDataBlocks, &dst);
+
+ launchQueryImpl(pRequest, pQuery, true, NULL);
+ code = pRequest->code;
+
+end:
+ taosMemoryFreeClear(pTableMeta);
+ qDestroyQuery(pQuery);
+ return code;
+}
+
+static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ SHashObj* pVgHash = NULL;
+ SQuery* pQuery = NULL;
+ SMqRspObj rspObj = {0};
+ SDecoder decoder = {0};
+
+ terrno = TSDB_CODE_SUCCESS;
+ SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT);
+ if (!pRequest) {
+ uError("WriteRaw:createRequest error request is null");
+ return terrno;
+ }
+
+ rspObj.resIter = -1;
+ rspObj.resType = RES_TYPE__TMQ;
+
+ tDecoderInit(&decoder, data, dataLen);
+ code = tDecodeSMqDataRsp(&decoder, &rspObj.rsp);
+ if (code != 0) {
+ uError("WriteRaw:decode smqDataRsp error");
+ code = TSDB_CODE_INVALID_MSG;
+ goto end;
+ }
+
+ if (!pRequest->pDb) {
+ uError("WriteRaw:not use db");
+ code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
+ goto end;
+ }
+
+ pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
+ taosHashSetFreeFp(pVgHash, destroyVgHash);
+ struct SCatalog* pCatalog = NULL;
+ code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog);
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw: get gatlog error");
+ goto end;
+ }
+
+ SRequestConnInfo conn = {0};
+ conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter;
+ conn.requestId = pRequest->requestId;
+ conn.requestObjRefId = pRequest->self;
+ conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
+
+ printf("raw data block num:%d\n", rspObj.rsp.blockNum);
+ while (++rspObj.resIter < rspObj.rsp.blockNum) {
+ SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter);
+ if (!rspObj.rsp.withSchema) {
+ uError("WriteRaw:no schema, iter:%d", rspObj.resIter);
+ goto end;
+ }
+ SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.rsp.blockSchema, rspObj.resIter);
+ setResSchemaInfo(&rspObj.resInfo, pSW->pSchema, pSW->nCols);
+
+ code = setQueryResultFromRsp(&rspObj.resInfo, pRetrieve, false, false);
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw: setQueryResultFromRsp error");
+ goto end;
+ }
+
+ uint16_t fLen = 0;
+ int32_t rowSize = 0;
+ int16_t nVar = 0;
+ for (int i = 0; i < pSW->nCols; i++) {
+ SSchema* schema = pSW->pSchema + i;
+ fLen += TYPE_BYTES[schema->type];
+ rowSize += schema->bytes;
+ if (IS_VAR_DATA_TYPE(schema->type)) {
+ nVar++;
+ }
+ }
+
+ int32_t rows = rspObj.resInfo.numOfRows;
+ int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
+ (int32_t)TD_BITMAP_BYTES(pSW->nCols - 1);
+ int32_t schemaLen = 0;
+ int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
+
+ const char* tbName = (const char*)taosArrayGetP(rspObj.rsp.blockTbName, rspObj.resIter);
+ if (!tbName) {
+ uError("WriteRaw: tbname is null");
+ code = TSDB_CODE_TMQ_INVALID_MSG;
+ goto end;
+ }
+
+ printf("raw data tbname:%s\n", tbName);
+ SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}};
+ strcpy(pName.dbname, pRequest->pDb);
+ strcpy(pName.tname, tbName);
+
+ VgData vgData = {0};
+ code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &(vgData.vg));
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw:catalogGetTableHashVgroup failed. table name: %s", tbName);
+ goto end;
+ }
+
+ SSubmitReq* subReq = NULL;
+ SSubmitBlk* blk = NULL;
+ void* hData = taosHashGet(pVgHash, &vgData.vg.vgId, sizeof(vgData.vg.vgId));
+ if (hData) {
+ vgData = *(VgData*)hData;
+
+ int32_t totalLen = ((SSubmitReq*)(vgData.data))->length + submitLen;
+ void* tmp = taosMemoryRealloc(vgData.data, totalLen);
+ if (tmp == NULL) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto end;
+ }
+ vgData.data = tmp;
+ ((VgData*)hData)->data = tmp;
+ subReq = (SSubmitReq*)(vgData.data);
+ blk = POINTER_SHIFT(vgData.data, subReq->length);
+ } else {
+ int32_t totalLen = sizeof(SSubmitReq) + submitLen;
+ void* tmp = taosMemoryCalloc(1, totalLen);
+ if (tmp == NULL) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto end;
+ }
+ vgData.data = tmp;
+ taosHashPut(pVgHash, (const char*)&vgData.vg.vgId, sizeof(vgData.vg.vgId), (char*)&vgData, sizeof(vgData));
+ subReq = (SSubmitReq*)(vgData.data);
+ subReq->length = sizeof(SSubmitReq);
+ subReq->numOfBlocks = 0;
+
+ blk = POINTER_SHIFT(vgData.data, sizeof(SSubmitReq));
+ }
+
+ STableMeta* pTableMeta = NULL;
+ code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbName);
+ goto end;
+ }
+ uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
+ uint64_t uid = pTableMeta->uid;
+ taosMemoryFreeClear(pTableMeta);
+
+ void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk));
+ STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen);
+
+ SRowBuilder rb = {0};
+ tdSRowInit(&rb, pSW->version);
+ tdSRowSetTpInfo(&rb, pSW->nCols, fLen);
+ int32_t dataLen = 0;
+
+ for (int32_t j = 0; j < rows; j++) {
+ tdSRowResetBuf(&rb, rowData);
+
+ doSetOneRowPtr(&rspObj.resInfo);
+ rspObj.resInfo.current += 1;
+
+ int32_t offset = 0;
+ for (int32_t k = 0; k < pSW->nCols; k++) {
+ const SSchema* pColumn = &pSW->pSchema[k];
+ char* data = rspObj.resInfo.row[k];
+ if (!data) {
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
+ } else {
+ if (IS_VAR_DATA_TYPE(pColumn->type)) {
+ data -= VARSTR_HEADER_SIZE;
+ }
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
+ }
+ offset += TYPE_BYTES[pColumn->type];
+ }
+ tdSRowEnd(&rb);
+ int32_t rowLen = TD_ROW_LEN(rowData);
+ rowData = POINTER_SHIFT(rowData, rowLen);
+ dataLen += rowLen;
+ }
+
+ blk->uid = htobe64(uid);
+ blk->suid = htobe64(suid);
+ blk->sversion = htonl(pSW->version);
+ blk->schemaLen = htonl(schemaLen);
+ blk->numOfRows = htonl(rows);
+ blk->dataLen = htonl(dataLen);
+ subReq->length += sizeof(SSubmitBlk) + schemaLen + dataLen;
+ subReq->numOfBlocks++;
+ }
+
+ pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
+ if (NULL == pQuery) {
+ uError("create SQuery error");
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
+ pQuery->haveResultSet = false;
+ pQuery->msgType = TDMT_VND_SUBMIT;
+ pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
+ if (NULL == pQuery->pRoot) {
+ uError("create pQuery->pRoot error");
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ SVnodeModifOpStmt* nodeStmt = (SVnodeModifOpStmt*)(pQuery->pRoot);
+ nodeStmt->payloadType = PAYLOAD_TYPE_KV;
+
+ int32_t numOfVg = taosHashGetSize(pVgHash);
+ nodeStmt->pDataBlocks = taosArrayInit(numOfVg, POINTER_BYTES);
+
+ VgData* vData = (VgData*)taosHashIterate(pVgHash, NULL);
+ while (vData) {
+ SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
+ if (NULL == dst) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto end;
+ }
+ dst->vg = vData->vg;
+ SSubmitReq* subReq = (SSubmitReq*)(vData->data);
+ dst->numOfTables = subReq->numOfBlocks;
+ dst->size = subReq->length;
+ dst->pData = (char*)subReq;
+ vData->data = NULL; // no need free
+ subReq->header.vgId = htonl(dst->vg.vgId);
+ subReq->version = htonl(1);
+ subReq->header.contLen = htonl(subReq->length);
+ subReq->length = htonl(subReq->length);
+ subReq->numOfBlocks = htonl(subReq->numOfBlocks);
+ taosArrayPush(nodeStmt->pDataBlocks, &dst);
+ vData = (VgData*)taosHashIterate(pVgHash, vData);
+ }
+
+ launchQueryImpl(pRequest, pQuery, true, NULL);
+ code = pRequest->code;
+
+end:
+ tDecoderClear(&decoder);
+ qDestroyQuery(pQuery);
+ destroyRequest(pRequest);
+ taosHashCleanup(pVgHash);
+ return code;
+}
+
+char* tmq_get_json_meta(TAOS_RES* res) {
+ if (!TD_RES_TMQ_META(res)) {
+ return NULL;
+ }
+
+ SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
+ if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_CREATE_STB) {
+ return processCreateStb(&pMetaRspObj->metaRsp);
+ } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_ALTER_STB) {
+ return processAlterStb(&pMetaRspObj->metaRsp);
+ } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DROP_STB) {
+ return processDropSTable(&pMetaRspObj->metaRsp);
+ } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_CREATE_TABLE) {
+ return processCreateTable(&pMetaRspObj->metaRsp);
+ } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_ALTER_TABLE) {
+ return processAlterTable(&pMetaRspObj->metaRsp);
+ } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DROP_TABLE) {
+ return processDropTable(&pMetaRspObj->metaRsp);
+ }
+ return NULL;
+}
+
+void tmq_free_json_meta(char* jsonMeta) { taosMemoryFreeClear(jsonMeta); }
+
+int32_t tmq_get_raw(TAOS_RES* res, tmq_raw_data* raw) {
+ if (!raw || !res) {
+ return TSDB_CODE_INVALID_PARA;
+ }
+ if (TD_RES_TMQ_META(res)) {
+ SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
+ raw->raw = pMetaRspObj->metaRsp.metaRsp;
+ raw->raw_len = pMetaRspObj->metaRsp.metaRspLen;
+ raw->raw_type = pMetaRspObj->metaRsp.resMsgType;
+ } else if (TD_RES_TMQ(res)) {
+ SMqRspObj* rspObj = ((SMqRspObj*)res);
+
+ int32_t len = 0;
+ int32_t code = 0;
+ tEncodeSize(tEncodeSMqDataRsp, &rspObj->rsp, len, code);
+ if (code < 0) {
+ return -1;
+ }
+
+ void* buf = taosMemoryCalloc(1, len);
+ SEncoder encoder = {0};
+ tEncoderInit(&encoder, buf, len);
+ tEncodeSMqDataRsp(&encoder, &rspObj->rsp);
+ tEncoderClear(&encoder);
+
+ raw->raw = buf;
+ raw->raw_len = len;
+ raw->raw_type = RES_TYPE__TMQ;
+ } else {
+ return TSDB_CODE_TMQ_INVALID_MSG;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+void tmq_free_raw(tmq_raw_data raw) {
+ if (raw.raw_type == RES_TYPE__TMQ) {
+ taosMemoryFree(raw.raw);
+ }
+}
+
+int32_t tmq_write_raw(TAOS* taos, tmq_raw_data raw) {
+ if (!taos) {
+ return TSDB_CODE_INVALID_PARA;
+ }
+
+ if (raw.raw_type == TDMT_VND_CREATE_STB) {
+ return taosCreateStb(taos, raw.raw, raw.raw_len);
+ } else if (raw.raw_type == TDMT_VND_ALTER_STB) {
+ return taosCreateStb(taos, raw.raw, raw.raw_len);
+ } else if (raw.raw_type == TDMT_VND_DROP_STB) {
+ return taosDropStb(taos, raw.raw, raw.raw_len);
+ } else if (raw.raw_type == TDMT_VND_CREATE_TABLE) {
+ return taosCreateTable(taos, raw.raw, raw.raw_len);
+ } else if (raw.raw_type == TDMT_VND_ALTER_TABLE) {
+ return taosAlterTable(taos, raw.raw, raw.raw_len);
+ } else if (raw.raw_type == TDMT_VND_DROP_TABLE) {
+ return taosDropTable(taos, raw.raw, raw.raw_len);
+ } else if (raw.raw_type == TDMT_VND_DELETE) {
+ return taosDeleteData(taos, raw.raw, raw.raw_len);
+ } else if (raw.raw_type == RES_TYPE__TMQ) {
+ return tmqWriteRaw(taos, raw.raw, raw.raw_len);
+ }
+ return TSDB_CODE_INVALID_PARA;
+}
diff --git a/source/client/src/tmq.c b/source/client/src/tmq.c
index ea7f03a416..7637ffbc80 100644
--- a/source/client/src/tmq.c
+++ b/source/client/src/tmq.c
@@ -28,8 +28,9 @@
int32_t tmqAskEp(tmq_t* tmq, bool async);
typedef struct {
- int8_t inited;
- tmr_h timer;
+ int8_t inited;
+ tmr_h timer;
+ int32_t rsetId;
} SMqMgmt;
static SMqMgmt tmqMgmt = {0};
@@ -55,8 +56,8 @@ struct tmq_conf_t {
int8_t autoCommit;
int8_t resetOffset;
int8_t withTbName;
- int8_t ssEnable;
- int32_t ssBatchSize;
+ int8_t snapEnable;
+ int32_t snapBatchSize;
bool hbBgEnable;
@@ -70,6 +71,7 @@ struct tmq_conf_t {
};
struct tmq_t {
+ int64_t refId;
// conf
char groupId[TSDB_CGROUP_LEN];
char clientId[256];
@@ -146,8 +148,8 @@ typedef struct {
typedef struct {
// subscribe info
- char* topicName;
- char db[TSDB_DB_FNAME_LEN];
+ char topicName[TSDB_TOPIC_FNAME_LEN];
+ char db[TSDB_DB_FNAME_LEN];
SArray* vgs; // SArray
@@ -166,29 +168,32 @@ typedef struct {
} SMqPollRspWrapper;
typedef struct {
- tmq_t* tmq;
+ int64_t refId;
+ int32_t epoch;
tsem_t rspSem;
int32_t rspErr;
} SMqSubscribeCbParam;
typedef struct {
- tmq_t* tmq;
+ int64_t refId;
+ int32_t epoch;
int32_t code;
int32_t async;
tsem_t rspSem;
} SMqAskEpCbParam;
typedef struct {
- tmq_t* tmq;
+ int64_t refId;
+ int32_t epoch;
SMqClientVg* pVg;
SMqClientTopic* pTopic;
- int32_t epoch;
int32_t vgId;
tsem_t rspSem;
} SMqPollCbParam;
typedef struct {
- tmq_t* tmq;
+ int64_t refId;
+ int32_t epoch;
int8_t automatic;
int8_t async;
int32_t waitingRspNum;
@@ -282,16 +287,21 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
if (strcmp(key, "experimental.snapshot.enable") == 0) {
if (strcmp(value, "true") == 0) {
- conf->ssEnable = true;
+ conf->snapEnable = true;
return TMQ_CONF_OK;
} else if (strcmp(value, "false") == 0) {
- conf->ssEnable = false;
+ conf->snapEnable = false;
return TMQ_CONF_OK;
} else {
return TMQ_CONF_INVALID;
}
}
+ if (strcmp(key, "experimental.snapshot.batch.size") == 0) {
+ conf->snapBatchSize = atoi(value);
+ return TMQ_CONF_OK;
+ }
+
if (strcmp(key, "enable.heartbeat.background") == 0) {
if (strcmp(value, "true") == 0) {
conf->hbBgEnable = true;
@@ -305,11 +315,6 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
return TMQ_CONF_OK;
}
- if (strcmp(key, "experimental.snapshot.batch.size") == 0) {
- conf->ssBatchSize = atoi(value);
- return TMQ_CONF_OK;
- }
-
if (strcmp(key, "td.connect.ip") == 0) {
conf->ip = strdup(value);
return TMQ_CONF_OK;
@@ -369,6 +374,38 @@ static int32_t tmqMakeTopicVgKey(char* dst, const char* topicName, int32_t vg) {
return sprintf(dst, "%s:%d", topicName, vg);
}
+int32_t tmqCommitDone(SMqCommitCbParamSet* pParamSet) {
+ tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, pParamSet->refId);
+ if (tmq == NULL) {
+ if (!pParamSet->async) {
+ tsem_destroy(&pParamSet->rspSem);
+ }
+ taosMemoryFree(pParamSet);
+ terrno = TSDB_CODE_TMQ_CONSUMER_CLOSED;
+ return -1;
+ }
+
+ // if no more waiting rsp
+ if (pParamSet->async) {
+ // call async cb func
+ if (pParamSet->automatic && tmq->commitCb) {
+ tmq->commitCb(tmq, pParamSet->rspErr, tmq->commitCbUserParam);
+ } else if (!pParamSet->automatic && pParamSet->userCb) {
+ // sem post
+ pParamSet->userCb(tmq, pParamSet->rspErr, pParamSet->userParam);
+ }
+ taosMemoryFree(pParamSet);
+ } else {
+ tsem_post(&pParamSet->rspSem);
+ }
+
+#if 0
+ taosArrayDestroyP(pParamSet->successfulOffsets, taosMemoryFree);
+ taosArrayDestroyP(pParamSet->failedOffsets, taosMemoryFree);
+#endif
+ return 0;
+}
+
int32_t tmqCommitCb(void* param, SDataBuf* pBuf, int32_t code) {
SMqCommitCbParam* pParam = (SMqCommitCbParam*)param;
SMqCommitCbParamSet* pParamSet = (SMqCommitCbParamSet*)pParam->params;
@@ -381,6 +418,9 @@ int32_t tmqCommitCb(void* param, SDataBuf* pBuf, int32_t code) {
}
#endif
+ taosMemoryFree(pParam->pOffset);
+ if (pBuf->pData) taosMemoryFree(pBuf->pData);
+
/*tscDebug("receive offset commit cb of %s on vgId:%d, offset is %" PRId64, pParam->pOffset->subKey, pParam->->vgId,
* pOffset->version);*/
@@ -389,23 +429,7 @@ int32_t tmqCommitCb(void* param, SDataBuf* pBuf, int32_t code) {
ASSERT(waitingRspNum >= 0);
if (waitingRspNum == 0) {
- // if no more waiting rsp
- if (pParamSet->async) {
- // call async cb func
- if (pParamSet->automatic && pParamSet->tmq->commitCb) {
- pParamSet->tmq->commitCb(pParamSet->tmq, pParamSet->rspErr, pParamSet->tmq->commitCbUserParam);
- } else if (!pParamSet->automatic && pParamSet->userCb) {
- // sem post
- pParamSet->userCb(pParamSet->tmq, pParamSet->rspErr, pParamSet->userParam);
- }
- } else {
- tsem_post(&pParamSet->rspSem);
- }
-
-#if 0
- taosArrayDestroyP(pParamSet->successfulOffsets, taosMemoryFree);
- taosArrayDestroyP(pParamSet->failedOffsets, taosMemoryFree);
-#endif
+ tmqCommitDone(pParamSet);
}
return 0;
}
@@ -499,7 +523,8 @@ int32_t tmqCommitMsgImpl(tmq_t* tmq, const TAOS_RES* msg, int8_t async, tmq_comm
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
- pParamSet->tmq = tmq;
+ pParamSet->refId = tmq->refId;
+ pParamSet->epoch = tmq->epoch;
pParamSet->automatic = 0;
pParamSet->async = async;
pParamSet->userCb = userCb;
@@ -560,13 +585,19 @@ int32_t tmqCommitInner(tmq_t* tmq, const TAOS_RES* msg, int8_t automatic, int8_t
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
- pParamSet->tmq = tmq;
+
+ pParamSet->refId = tmq->refId;
+ pParamSet->epoch = tmq->epoch;
+
pParamSet->automatic = automatic;
pParamSet->async = async;
pParamSet->userCb = userCb;
pParamSet->userParam = userParam;
tsem_init(&pParamSet->rspSem, 0, 0);
+ // init as 1 to prevent concurrency issue
+ pParamSet->waitingRspNum = 1;
+
for (int32_t i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) {
SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i);
@@ -595,10 +626,17 @@ int32_t tmqCommitInner(tmq_t* tmq, const TAOS_RES* msg, int8_t automatic, int8_t
return 0;
}
+ int32_t waitingRspNum = atomic_sub_fetch_32(&pParamSet->waitingRspNum, 1);
+ ASSERT(waitingRspNum >= 0);
+ if (waitingRspNum == 0) {
+ tmqCommitDone(pParamSet);
+ }
+
if (!async) {
tsem_wait(&pParamSet->rspSem);
code = pParamSet->rspErr;
tsem_destroy(&pParamSet->rspSem);
+ taosMemoryFree(pParamSet);
} else {
code = 0;
}
@@ -622,27 +660,39 @@ int32_t tmqCommitInner(tmq_t* tmq, const TAOS_RES* msg, int8_t automatic, int8_t
}
void tmqAssignAskEpTask(void* param, void* tmrId) {
- tmq_t* tmq = (tmq_t*)param;
- int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM);
- *pTaskType = TMQ_DELAYED_TASK__ASK_EP;
- taosWriteQitem(tmq->delayedTask, pTaskType);
- tsem_post(&tmq->rspSem);
+ int64_t refId = *(int64_t*)param;
+ tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId);
+ if (tmq != NULL) {
+ int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM);
+ *pTaskType = TMQ_DELAYED_TASK__ASK_EP;
+ taosWriteQitem(tmq->delayedTask, pTaskType);
+ tsem_post(&tmq->rspSem);
+ }
+ taosMemoryFree(param);
}
void tmqAssignDelayedCommitTask(void* param, void* tmrId) {
- tmq_t* tmq = (tmq_t*)param;
- int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM);
- *pTaskType = TMQ_DELAYED_TASK__COMMIT;
- taosWriteQitem(tmq->delayedTask, pTaskType);
- tsem_post(&tmq->rspSem);
+ int64_t refId = *(int64_t*)param;
+ tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId);
+ if (tmq != NULL) {
+ int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM);
+ *pTaskType = TMQ_DELAYED_TASK__COMMIT;
+ taosWriteQitem(tmq->delayedTask, pTaskType);
+ tsem_post(&tmq->rspSem);
+ }
+ taosMemoryFree(param);
}
void tmqAssignDelayedReportTask(void* param, void* tmrId) {
- tmq_t* tmq = (tmq_t*)param;
- int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM);
- *pTaskType = TMQ_DELAYED_TASK__REPORT;
- taosWriteQitem(tmq->delayedTask, pTaskType);
- tsem_post(&tmq->rspSem);
+ int64_t refId = *(int64_t*)param;
+ tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId);
+ if (tmq != NULL) {
+ int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM);
+ *pTaskType = TMQ_DELAYED_TASK__REPORT;
+ taosWriteQitem(tmq->delayedTask, pTaskType);
+ tsem_post(&tmq->rspSem);
+ }
+ taosMemoryFree(param);
}
int32_t tmqHbCb(void* param, SDataBuf* pMsg, int32_t code) {
@@ -651,8 +701,11 @@ int32_t tmqHbCb(void* param, SDataBuf* pMsg, int32_t code) {
}
void tmqSendHbReq(void* param, void* tmrId) {
- // TODO replace with ref
- tmq_t* tmq = (tmq_t*)param;
+ int64_t refId = *(int64_t*)param;
+ tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId);
+ if (tmq == NULL) {
+ return;
+ }
int64_t consumerId = tmq->consumerId;
int32_t epoch = tmq->epoch;
SMqHbReq* pReq = taosMemoryMalloc(sizeof(SMqHbReq));
@@ -682,7 +735,7 @@ void tmqSendHbReq(void* param, void* tmrId) {
asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo);
OVER:
- taosTmrReset(tmqSendHbReq, 1000, tmq, tmqMgmt.timer, &tmq->hbLiveTimer);
+ taosTmrReset(tmqSendHbReq, 1000, param, tmqMgmt.timer, &tmq->hbLiveTimer);
}
int32_t tmqHandleAllDelayedTask(tmq_t* tmq) {
@@ -695,10 +748,18 @@ int32_t tmqHandleAllDelayedTask(tmq_t* tmq) {
if (*pTaskType == TMQ_DELAYED_TASK__ASK_EP) {
tmqAskEp(tmq, true);
- taosTmrReset(tmqAssignAskEpTask, 1000, tmq, tmqMgmt.timer, &tmq->epTimer);
+
+ int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t));
+ *pRefId = tmq->refId;
+
+ taosTmrReset(tmqAssignAskEpTask, 1000, pRefId, tmqMgmt.timer, &tmq->epTimer);
} else if (*pTaskType == TMQ_DELAYED_TASK__COMMIT) {
tmqCommitInner(tmq, NULL, 1, 1, tmq->commitCb, tmq->commitCbUserParam);
- taosTmrReset(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, tmq, tmqMgmt.timer, &tmq->commitTimer);
+
+ int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t));
+ *pRefId = tmq->refId;
+
+ taosTmrReset(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, pRefId, tmqMgmt.timer, &tmq->commitTimer);
} else if (*pTaskType == TMQ_DELAYED_TASK__REPORT) {
} else {
ASSERT(0);
@@ -733,7 +794,6 @@ void tmqClearUnhandleMsg(tmq_t* tmq) {
int32_t tmqSubscribeCb(void* param, SDataBuf* pMsg, int32_t code) {
SMqSubscribeCbParam* pParam = (SMqSubscribeCbParam*)param;
pParam->rspErr = code;
- /*tmq_t* tmq = pParam->tmq;*/
tsem_post(&pParam->rspSem);
return 0;
}
@@ -756,40 +816,27 @@ int32_t tmq_unsubscribe(tmq_t* tmq) {
return rsp;
}
-#if 0
-tmq_t* tmq_consumer_new(void* conn, tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
- tmq_t* pTmq = taosMemoryCalloc(sizeof(tmq_t), 1);
- if (pTmq == NULL) {
- return NULL;
+void tmqFreeImpl(void* handle) {
+ tmq_t* tmq = (tmq_t*)handle;
+
+ // TODO stop timer
+ if (tmq->mqueue) taosCloseQueue(tmq->mqueue);
+ if (tmq->delayedTask) taosCloseQueue(tmq->delayedTask);
+ if (tmq->qall) taosFreeQall(tmq->qall);
+
+ tsem_destroy(&tmq->rspSem);
+
+ int32_t sz = taosArrayGetSize(tmq->clientTopics);
+ for (int32_t i = 0; i < sz; i++) {
+ SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i);
+ if (pTopic->schema.nCols) taosMemoryFree(pTopic->schema.pSchema);
+ int32_t vgSz = taosArrayGetSize(pTopic->vgs);
+ taosArrayDestroy(pTopic->vgs);
}
- pTmq->pTscObj = (STscObj*)conn;
- pTmq->status = 0;
- pTmq->pollCnt = 0;
- pTmq->epoch = 0;
- pTmq->epStatus = 0;
- pTmq->epSkipCnt = 0;
- // set conf
- strcpy(pTmq->clientId, conf->clientId);
- strcpy(pTmq->groupId, conf->groupId);
- pTmq->autoCommit = conf->autoCommit;
- pTmq->commit_cb = conf->commit_cb;
- pTmq->resetOffsetCfg = conf->resetOffset;
-
- pTmq->consumerId = generateRequestId() & (((uint64_t)-1) >> 1);
- pTmq->clientTopics = taosArrayInit(0, sizeof(SMqClientTopic));
- if (pTmq->clientTopics == NULL) {
- taosMemoryFree(pTmq);
- return NULL;
- }
-
- pTmq->mqueue = taosOpenQueue();
- pTmq->qall = taosAllocateQall();
-
- tsem_init(&pTmq->rspSem, 0, 0);
-
- return pTmq;
+ taosArrayDestroy(tmq->clientTopics);
+ taos_close_internal(tmq->pTscObj);
+ taosMemoryFree(tmq);
}
-#endif
tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
// init timer
@@ -801,6 +848,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
+ tmqMgmt.rsetId = taosOpenRef(10000, tmqFreeImpl);
}
tmq_t* pTmq = taosMemoryCalloc(1, sizeof(tmq_t));
@@ -841,7 +889,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
strcpy(pTmq->clientId, conf->clientId);
strcpy(pTmq->groupId, conf->groupId);
pTmq->withTbName = conf->withTbName;
- pTmq->useSnapshot = conf->ssEnable;
+ pTmq->useSnapshot = conf->snapEnable;
pTmq->autoCommit = conf->autoCommit;
pTmq->autoCommitInterval = conf->autoCommitInterval;
pTmq->commitCb = conf->commitCb;
@@ -869,8 +917,17 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
goto FAIL;
}
+ pTmq->refId = taosAddRef(tmqMgmt.rsetId, pTmq);
+ if (pTmq->refId < 0) {
+ tmqFreeImpl(pTmq);
+ return NULL;
+ }
+
+ int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t));
+ *pRefId = pTmq->refId;
+
if (pTmq->hbBgEnable) {
- pTmq->hbLiveTimer = taosTmrStart(tmqSendHbReq, 1000, pTmq, tmqMgmt.timer);
+ pTmq->hbLiveTimer = taosTmrStart(tmqSendHbReq, 1000, pRefId, tmqMgmt.timer);
}
tscInfo("consumer %" PRId64 " is setup, consumer group %s", pTmq->consumerId, pTmq->groupId);
@@ -928,7 +985,8 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
SMqSubscribeCbParam param = {
.rspErr = 0,
- .tmq = tmq,
+ .refId = tmq->refId,
+ .epoch = tmq->epoch,
};
if (tsem_init(¶m.rspSem, 0, 0) != 0) goto FAIL;
@@ -970,12 +1028,16 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
// init ep timer
if (tmq->epTimer == NULL) {
- tmq->epTimer = taosTmrStart(tmqAssignAskEpTask, 1000, tmq, tmqMgmt.timer);
+ int64_t* pRefId1 = taosMemoryMalloc(sizeof(int64_t));
+ *pRefId1 = tmq->refId;
+ tmq->epTimer = taosTmrStart(tmqAssignAskEpTask, 1000, pRefId1, tmqMgmt.timer);
}
// init auto commit timer
if (tmq->autoCommit && tmq->commitTimer == NULL) {
- tmq->commitTimer = taosTmrStart(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, tmq, tmqMgmt.timer);
+ int64_t* pRefId2 = taosMemoryMalloc(sizeof(int64_t));
+ *pRefId2 = tmq->refId;
+ tmq->commitTimer = taosTmrStart(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, pRefId2, tmqMgmt.timer);
}
code = 0;
@@ -997,9 +1059,18 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
SMqPollCbParam* pParam = (SMqPollCbParam*)param;
SMqClientVg* pVg = pParam->pVg;
SMqClientTopic* pTopic = pParam->pTopic;
- tmq_t* tmq = pParam->tmq;
- int32_t vgId = pParam->vgId;
- int32_t epoch = pParam->epoch;
+
+ tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, pParam->refId);
+ if (tmq == NULL) {
+ tsem_destroy(&pParam->rspSem);
+ taosMemoryFree(pParam);
+ taosMemoryFree(pMsg->pData);
+ terrno = TSDB_CODE_TMQ_CONSUMER_CLOSED;
+ return -1;
+ }
+
+ int32_t epoch = pParam->epoch;
+ int32_t vgId = pParam->vgId;
taosMemoryFree(pParam);
if (code != 0) {
tscWarn("msg discard from vgId:%d, epoch %d, since %s", vgId, epoch, terrstr());
@@ -1124,7 +1195,7 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) {
SMqClientTopic topic = {0};
SMqSubTopicEp* pTopicEp = taosArrayGet(pRsp->topics, i);
topic.schema = pTopicEp->schema;
- topic.topicName = strdup(pTopicEp->topic);
+ tstrncpy(topic.topicName, pTopicEp->topic, TSDB_TOPIC_FNAME_LEN);
tstrncpy(topic.db, pTopicEp->db, TSDB_DB_FNAME_LEN);
tscDebug("consumer:%" PRId64 ", update topic: %s", tmq->consumerId, topic.topicName);
@@ -1153,7 +1224,16 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) {
}
taosArrayPush(newTopics, &topic);
}
- if (tmq->clientTopics) taosArrayDestroy(tmq->clientTopics);
+ if (tmq->clientTopics) {
+ int32_t sz = taosArrayGetSize(tmq->clientTopics);
+ for (int32_t i = 0; i < sz; i++) {
+ SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i);
+ if (pTopic->schema.nCols) taosMemoryFree(pTopic->schema.pSchema);
+ int32_t vgSz = taosArrayGetSize(pTopic->vgs);
+ taosArrayDestroy(pTopic->vgs);
+ }
+ taosArrayDestroy(tmq->clientTopics);
+ }
taosHashCleanup(pHash);
tmq->clientTopics = newTopics;
@@ -1168,8 +1248,20 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) {
int32_t tmqAskEpCb(void* param, SDataBuf* pMsg, int32_t code) {
SMqAskEpCbParam* pParam = (SMqAskEpCbParam*)param;
- tmq_t* tmq = pParam->tmq;
int8_t async = pParam->async;
+ tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, pParam->refId);
+
+ if (tmq == NULL) {
+ if (!async) {
+ tsem_destroy(&pParam->rspSem);
+ } else {
+ taosMemoryFree(pParam);
+ }
+ taosMemoryFree(pMsg->pData);
+ terrno = TSDB_CODE_TMQ_CONSUMER_CLOSED;
+ return -1;
+ }
+
pParam->code = code;
if (code != 0) {
tscError("consumer:%" PRId64 ", get topic endpoint error, not ready, wait:%d", tmq->consumerId, pParam->async);
@@ -1216,6 +1308,7 @@ END:
} else {
taosMemoryFree(pParam);
}
+ taosMemoryFree(pMsg->pData);
return code;
}
@@ -1248,7 +1341,8 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) {
/*atomic_store_8(&tmq->epStatus, 0);*/
return -1;
}
- pParam->tmq = tmq;
+ pParam->refId = tmq->refId;
+ pParam->epoch = tmq->epoch;
pParam->async = async;
tsem_init(&pParam->rspSem, 0, 0);
@@ -1288,31 +1382,6 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) {
return code;
}
-#if 0
-int32_t tmq_seek(tmq_t* tmq, const tmq_topic_vgroup_t* offset) {
- const SMqOffset* pOffset = &offset->offset;
- if (strcmp(pOffset->cgroup, tmq->groupId) != 0) {
- return TMQ_RESP_ERR__FAIL;
- }
- int32_t sz = taosArrayGetSize(tmq->clientTopics);
- for (int32_t i = 0; i < sz; i++) {
- SMqClientTopic* clientTopic = taosArrayGet(tmq->clientTopics, i);
- if (strcmp(clientTopic->topicName, pOffset->topicName) == 0) {
- int32_t vgSz = taosArrayGetSize(clientTopic->vgs);
- for (int32_t j = 0; j < vgSz; j++) {
- SMqClientVg* pVg = taosArrayGet(clientTopic->vgs, j);
- if (pVg->vgId == pOffset->vgId) {
- pVg->currentOffset = pOffset->offset;
- tmqClearUnhandleMsg(tmq);
- return TMQ_RESP_ERR__SUCCESS;
- }
- }
- }
- }
- return TMQ_RESP_ERR__FAIL;
-}
-#endif
-
SMqPollReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t timeout, SMqClientTopic* pTopic, SMqClientVg* pVg) {
SMqPollReq* pReq = taosMemoryCalloc(1, sizeof(SMqPollReq));
if (pReq == NULL) {
@@ -1406,11 +1475,12 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
tsem_post(&tmq->rspSem);
return -1;
}
- pParam->tmq = tmq;
+ pParam->refId = tmq->refId;
+ pParam->epoch = tmq->epoch;
+
pParam->pVg = pVg;
pParam->pTopic = pTopic;
pParam->vgId = pVg->vgId;
- pParam->epoch = tmq->epoch;
SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
if (sendInfo == NULL) {
@@ -1550,7 +1620,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
}
#endif
- // in no topic status also need process delayed task
+ // in no topic status, delayed task also need to be processed
if (atomic_load_8(&tmq->status) == TMQ_CONSUMER_STATUS__INIT) {
return NULL;
}
@@ -1615,7 +1685,7 @@ int32_t tmq_consumer_close(tmq_t* tmq) {
/*return rsp;*/
return 0;
}
- // TODO: free resources
+ taosRemoveRef(tmqMgmt.rsetId, tmq->refId);
return 0;
}
@@ -1691,1610 +1761,6 @@ const char* tmq_get_table_name(TAOS_RES* res) {
return NULL;
}
-static char* buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* schemaTag, char* name, int64_t id,
- int8_t t) {
- char* string = NULL;
- cJSON* json = cJSON_CreateObject();
- if (json == NULL) {
- return string;
- }
- cJSON* type = cJSON_CreateString("create");
- cJSON_AddItemToObject(json, "type", type);
-
- // char uid[32] = {0};
- // sprintf(uid, "%"PRIi64, id);
- // cJSON* id_ = cJSON_CreateString(uid);
- // cJSON_AddItemToObject(json, "id", id_);
- cJSON* tableName = cJSON_CreateString(name);
- cJSON_AddItemToObject(json, "tableName", tableName);
- cJSON* tableType = cJSON_CreateString(t == TSDB_NORMAL_TABLE ? "normal" : "super");
- cJSON_AddItemToObject(json, "tableType", tableType);
- // cJSON* version = cJSON_CreateNumber(1);
- // cJSON_AddItemToObject(json, "version", version);
-
- cJSON* columns = cJSON_CreateArray();
- for (int i = 0; i < schemaRow->nCols; i++) {
- cJSON* column = cJSON_CreateObject();
- SSchema* s = schemaRow->pSchema + i;
- cJSON* cname = cJSON_CreateString(s->name);
- cJSON_AddItemToObject(column, "name", cname);
- cJSON* ctype = cJSON_CreateNumber(s->type);
- cJSON_AddItemToObject(column, "type", ctype);
- if (s->type == TSDB_DATA_TYPE_BINARY) {
- int32_t length = s->bytes - VARSTR_HEADER_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(column, "length", cbytes);
- } else if (s->type == TSDB_DATA_TYPE_NCHAR) {
- int32_t length = (s->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(column, "length", cbytes);
- }
- cJSON_AddItemToArray(columns, column);
- }
- cJSON_AddItemToObject(json, "columns", columns);
-
- cJSON* tags = cJSON_CreateArray();
- for (int i = 0; schemaTag && i < schemaTag->nCols; i++) {
- cJSON* tag = cJSON_CreateObject();
- SSchema* s = schemaTag->pSchema + i;
- cJSON* tname = cJSON_CreateString(s->name);
- cJSON_AddItemToObject(tag, "name", tname);
- cJSON* ttype = cJSON_CreateNumber(s->type);
- cJSON_AddItemToObject(tag, "type", ttype);
- if (s->type == TSDB_DATA_TYPE_BINARY) {
- int32_t length = s->bytes - VARSTR_HEADER_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(tag, "length", cbytes);
- } else if (s->type == TSDB_DATA_TYPE_NCHAR) {
- int32_t length = (s->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(tag, "length", cbytes);
- }
- cJSON_AddItemToArray(tags, tag);
- }
- cJSON_AddItemToObject(json, "tags", tags);
-
- string = cJSON_PrintUnformatted(json);
- cJSON_Delete(json);
- return string;
-}
-
-static char* buildAlterSTableJson(void* alterData, int32_t alterDataLen) {
- SMAlterStbReq req = {0};
- cJSON* json = NULL;
- char* string = NULL;
-
- if (tDeserializeSMAlterStbReq(alterData, alterDataLen, &req) != 0) {
- goto end;
- }
-
- json = cJSON_CreateObject();
- if (json == NULL) {
- goto end;
- }
- cJSON* type = cJSON_CreateString("alter");
- cJSON_AddItemToObject(json, "type", type);
- // cJSON* uid = cJSON_CreateNumber(id);
- // cJSON_AddItemToObject(json, "uid", uid);
- SName name = {0};
- tNameFromString(&name, req.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
- cJSON* tableName = cJSON_CreateString(name.tname);
- cJSON_AddItemToObject(json, "tableName", tableName);
- cJSON* tableType = cJSON_CreateString("super");
- cJSON_AddItemToObject(json, "tableType", tableType);
-
- cJSON* alterType = cJSON_CreateNumber(req.alterType);
- cJSON_AddItemToObject(json, "alterType", alterType);
- switch (req.alterType) {
- case TSDB_ALTER_TABLE_ADD_TAG:
- case TSDB_ALTER_TABLE_ADD_COLUMN: {
- TAOS_FIELD* field = taosArrayGet(req.pFields, 0);
- cJSON* colName = cJSON_CreateString(field->name);
- cJSON_AddItemToObject(json, "colName", colName);
- cJSON* colType = cJSON_CreateNumber(field->type);
- cJSON_AddItemToObject(json, "colType", colType);
-
- if (field->type == TSDB_DATA_TYPE_BINARY) {
- int32_t length = field->bytes - VARSTR_HEADER_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(json, "colLength", cbytes);
- } else if (field->type == TSDB_DATA_TYPE_NCHAR) {
- int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(json, "colLength", cbytes);
- }
- break;
- }
- case TSDB_ALTER_TABLE_DROP_TAG:
- case TSDB_ALTER_TABLE_DROP_COLUMN: {
- TAOS_FIELD* field = taosArrayGet(req.pFields, 0);
- cJSON* colName = cJSON_CreateString(field->name);
- cJSON_AddItemToObject(json, "colName", colName);
- break;
- }
- case TSDB_ALTER_TABLE_UPDATE_TAG_BYTES:
- case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES: {
- TAOS_FIELD* field = taosArrayGet(req.pFields, 0);
- cJSON* colName = cJSON_CreateString(field->name);
- cJSON_AddItemToObject(json, "colName", colName);
- cJSON* colType = cJSON_CreateNumber(field->type);
- cJSON_AddItemToObject(json, "colType", colType);
- if (field->type == TSDB_DATA_TYPE_BINARY) {
- int32_t length = field->bytes - VARSTR_HEADER_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(json, "colLength", cbytes);
- } else if (field->type == TSDB_DATA_TYPE_NCHAR) {
- int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(json, "colLength", cbytes);
- }
- break;
- }
- case TSDB_ALTER_TABLE_UPDATE_TAG_NAME:
- case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME: {
- TAOS_FIELD* oldField = taosArrayGet(req.pFields, 0);
- TAOS_FIELD* newField = taosArrayGet(req.pFields, 1);
- cJSON* colName = cJSON_CreateString(oldField->name);
- cJSON_AddItemToObject(json, "colName", colName);
- cJSON* colNewName = cJSON_CreateString(newField->name);
- cJSON_AddItemToObject(json, "colNewName", colNewName);
- break;
- }
- default:
- break;
- }
- string = cJSON_PrintUnformatted(json);
-
-end:
- cJSON_Delete(json);
- tFreeSMAltertbReq(&req);
- return string;
-}
-
-static char* processCreateStb(SMqMetaRsp* metaRsp) {
- SVCreateStbReq req = {0};
- SDecoder coder;
- char* string = NULL;
-
- // decode and process req
- void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
- int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
- tDecoderInit(&coder, data, len);
-
- if (tDecodeSVCreateStbReq(&coder, &req) < 0) {
- goto _err;
- }
- string = buildCreateTableJson(&req.schemaRow, &req.schemaTag, req.name, req.suid, TSDB_SUPER_TABLE);
- tDecoderClear(&coder);
- return string;
-
-_err:
- tDecoderClear(&coder);
- return string;
-}
-
-static char* processAlterStb(SMqMetaRsp* metaRsp) {
- SVCreateStbReq req = {0};
- SDecoder coder;
- char* string = NULL;
-
- // decode and process req
- void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
- int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
- tDecoderInit(&coder, data, len);
-
- if (tDecodeSVCreateStbReq(&coder, &req) < 0) {
- goto _err;
- }
- string = buildAlterSTableJson(req.alterOriData, req.alterOriDataLen);
- tDecoderClear(&coder);
- return string;
-
-_err:
- tDecoderClear(&coder);
- return string;
-}
-
-static char* buildCreateCTableJson(STag* pTag, char* sname, char* name, SArray* tagName, int64_t id, uint8_t tagNum) {
- char* string = NULL;
- SArray* pTagVals = NULL;
- cJSON* json = cJSON_CreateObject();
- if (json == NULL) {
- return string;
- }
- cJSON* type = cJSON_CreateString("create");
- cJSON_AddItemToObject(json, "type", type);
- // char cid[32] = {0};
- // sprintf(cid, "%"PRIi64, id);
- // cJSON* cid_ = cJSON_CreateString(cid);
- // cJSON_AddItemToObject(json, "id", cid_);
-
- cJSON* tableName = cJSON_CreateString(name);
- cJSON_AddItemToObject(json, "tableName", tableName);
- cJSON* tableType = cJSON_CreateString("child");
- cJSON_AddItemToObject(json, "tableType", tableType);
- cJSON* using = cJSON_CreateString(sname);
- cJSON_AddItemToObject(json, "using", using);
- cJSON* tagNumJson = cJSON_CreateNumber(tagNum);
- cJSON_AddItemToObject(json, "tagNum", tagNumJson);
- // cJSON* version = cJSON_CreateNumber(1);
- // cJSON_AddItemToObject(json, "version", version);
-
- cJSON* tags = cJSON_CreateArray();
- int32_t code = tTagToValArray(pTag, &pTagVals);
- if (code) {
- goto end;
- }
-
- if (tTagIsJson(pTag)) {
- STag* p = (STag*)pTag;
- if (p->nTag == 0) {
- goto end;
- }
- char* pJson = parseTagDatatoJson(pTag);
- cJSON* tag = cJSON_CreateObject();
- STagVal* pTagVal = taosArrayGet(pTagVals, 0);
-
- char* ptname = taosArrayGet(tagName, 0);
- cJSON* tname = cJSON_CreateString(ptname);
- cJSON_AddItemToObject(tag, "name", tname);
- // cJSON* cid_ = cJSON_CreateString("");
- // cJSON_AddItemToObject(tag, "cid", cid_);
- cJSON* ttype = cJSON_CreateNumber(TSDB_DATA_TYPE_JSON);
- cJSON_AddItemToObject(tag, "type", ttype);
- cJSON* tvalue = cJSON_CreateString(pJson);
- cJSON_AddItemToObject(tag, "value", tvalue);
- cJSON_AddItemToArray(tags, tag);
- taosMemoryFree(pJson);
- goto end;
- }
-
- for (int i = 0; i < taosArrayGetSize(pTagVals); i++) {
- STagVal* pTagVal = (STagVal*)taosArrayGet(pTagVals, i);
-
- cJSON* tag = cJSON_CreateObject();
-
- char* ptname = taosArrayGet(tagName, i);
- cJSON* tname = cJSON_CreateString(ptname);
- cJSON_AddItemToObject(tag, "name", tname);
- // cJSON* cid = cJSON_CreateNumber(pTagVal->cid);
- // cJSON_AddItemToObject(tag, "cid", cid);
- cJSON* ttype = cJSON_CreateNumber(pTagVal->type);
- cJSON_AddItemToObject(tag, "type", ttype);
-
- cJSON* tvalue = NULL;
- if (IS_VAR_DATA_TYPE(pTagVal->type)) {
- char* buf = taosMemoryCalloc(pTagVal->nData + 3, 1);
- if (!buf) goto end;
- dataConverToStr(buf, pTagVal->type, pTagVal->pData, pTagVal->nData, NULL);
- tvalue = cJSON_CreateString(buf);
- taosMemoryFree(buf);
- } else {
- double val = 0;
- GET_TYPED_DATA(val, double, pTagVal->type, &pTagVal->i64);
- tvalue = cJSON_CreateNumber(val);
- }
-
- cJSON_AddItemToObject(tag, "value", tvalue);
- cJSON_AddItemToArray(tags, tag);
- }
-
-end:
- cJSON_AddItemToObject(json, "tags", tags);
- string = cJSON_PrintUnformatted(json);
- cJSON_Delete(json);
- taosArrayDestroy(pTagVals);
- return string;
-}
-
-static char* processCreateTable(SMqMetaRsp* metaRsp) {
- SDecoder decoder = {0};
- SVCreateTbBatchReq req = {0};
- SVCreateTbReq* pCreateReq;
- char* string = NULL;
- // decode
- void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
- int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
- tDecoderInit(&decoder, data, len);
- if (tDecodeSVCreateTbBatchReq(&decoder, &req) < 0) {
- goto _exit;
- }
-
- // loop to create table
- for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
- pCreateReq = req.pReqs + iReq;
- if (pCreateReq->type == TSDB_CHILD_TABLE) {
- string = buildCreateCTableJson((STag*)pCreateReq->ctb.pTag, pCreateReq->ctb.name, pCreateReq->name,
- pCreateReq->ctb.tagName, pCreateReq->uid, pCreateReq->ctb.tagNum);
- } else if (pCreateReq->type == TSDB_NORMAL_TABLE) {
- string =
- buildCreateTableJson(&pCreateReq->ntb.schemaRow, NULL, pCreateReq->name, pCreateReq->uid, TSDB_NORMAL_TABLE);
- }
- }
-
- tDecoderClear(&decoder);
-
-_exit:
- tDecoderClear(&decoder);
- return string;
-}
-
-static char* processAlterTable(SMqMetaRsp* metaRsp) {
- SDecoder decoder = {0};
- SVAlterTbReq vAlterTbReq = {0};
- char* string = NULL;
-
- // decode
- void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
- int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
- tDecoderInit(&decoder, data, len);
- if (tDecodeSVAlterTbReq(&decoder, &vAlterTbReq) < 0) {
- goto _exit;
- }
-
- cJSON* json = cJSON_CreateObject();
- if (json == NULL) {
- goto _exit;
- }
- cJSON* type = cJSON_CreateString("alter");
- cJSON_AddItemToObject(json, "type", type);
- // cJSON* uid = cJSON_CreateNumber(id);
- // cJSON_AddItemToObject(json, "uid", uid);
- cJSON* tableName = cJSON_CreateString(vAlterTbReq.tbName);
- cJSON_AddItemToObject(json, "tableName", tableName);
- cJSON* tableType = cJSON_CreateString(vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_TAG_VAL ? "child" : "normal");
- cJSON_AddItemToObject(json, "tableType", tableType);
- cJSON* alterType = cJSON_CreateNumber(vAlterTbReq.action);
- cJSON_AddItemToObject(json, "alterType", alterType);
-
- switch (vAlterTbReq.action) {
- case TSDB_ALTER_TABLE_ADD_COLUMN: {
- cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
- cJSON_AddItemToObject(json, "colName", colName);
- cJSON* colType = cJSON_CreateNumber(vAlterTbReq.type);
- cJSON_AddItemToObject(json, "colType", colType);
-
- if (vAlterTbReq.type == TSDB_DATA_TYPE_BINARY) {
- int32_t length = vAlterTbReq.bytes - VARSTR_HEADER_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(json, "colLength", cbytes);
- } else if (vAlterTbReq.type == TSDB_DATA_TYPE_NCHAR) {
- int32_t length = (vAlterTbReq.bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(json, "colLength", cbytes);
- }
- break;
- }
- case TSDB_ALTER_TABLE_DROP_COLUMN: {
- cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
- cJSON_AddItemToObject(json, "colName", colName);
- break;
- }
- case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES: {
- cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
- cJSON_AddItemToObject(json, "colName", colName);
- cJSON* colType = cJSON_CreateNumber(vAlterTbReq.colModType);
- cJSON_AddItemToObject(json, "colType", colType);
- if (vAlterTbReq.colModType == TSDB_DATA_TYPE_BINARY) {
- int32_t length = vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(json, "colLength", cbytes);
- } else if (vAlterTbReq.colModType == TSDB_DATA_TYPE_NCHAR) {
- int32_t length = (vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(json, "colLength", cbytes);
- }
- break;
- }
- case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME: {
- cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
- cJSON_AddItemToObject(json, "colName", colName);
- cJSON* colNewName = cJSON_CreateString(vAlterTbReq.colNewName);
- cJSON_AddItemToObject(json, "colNewName", colNewName);
- break;
- }
- case TSDB_ALTER_TABLE_UPDATE_TAG_VAL: {
- cJSON* tagName = cJSON_CreateString(vAlterTbReq.tagName);
- cJSON_AddItemToObject(json, "colName", tagName);
-
- bool isNull = vAlterTbReq.isNull;
- if (vAlterTbReq.tagType == TSDB_DATA_TYPE_JSON) {
- STag* jsonTag = (STag*)vAlterTbReq.pTagVal;
- if (jsonTag->nTag == 0) isNull = true;
- }
- if (!isNull) {
- char* buf = NULL;
-
- if (vAlterTbReq.tagType == TSDB_DATA_TYPE_JSON) {
- ASSERT(tTagIsJson(vAlterTbReq.pTagVal) == true);
- buf = parseTagDatatoJson(vAlterTbReq.pTagVal);
- } else {
- buf = taosMemoryCalloc(vAlterTbReq.nTagVal + 1, 1);
- dataConverToStr(buf, vAlterTbReq.tagType, vAlterTbReq.pTagVal, vAlterTbReq.nTagVal, NULL);
- }
-
- cJSON* colValue = cJSON_CreateString(buf);
- cJSON_AddItemToObject(json, "colValue", colValue);
- taosMemoryFree(buf);
- }
-
- cJSON* isNullCJson = cJSON_CreateBool(isNull);
- cJSON_AddItemToObject(json, "colValueNull", isNullCJson);
- break;
- }
- default:
- break;
- }
- string = cJSON_PrintUnformatted(json);
-
-_exit:
- tDecoderClear(&decoder);
- return string;
-}
-
-static char* processDropSTable(SMqMetaRsp* metaRsp) {
- SDecoder decoder = {0};
- SVDropStbReq req = {0};
- char* string = NULL;
-
- // decode
- void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
- int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
- tDecoderInit(&decoder, data, len);
- if (tDecodeSVDropStbReq(&decoder, &req) < 0) {
- goto _exit;
- }
-
- cJSON* json = cJSON_CreateObject();
- if (json == NULL) {
- goto _exit;
- }
- cJSON* type = cJSON_CreateString("drop");
- cJSON_AddItemToObject(json, "type", type);
- cJSON* tableName = cJSON_CreateString(req.name);
- cJSON_AddItemToObject(json, "tableName", tableName);
- cJSON* tableType = cJSON_CreateString("super");
- cJSON_AddItemToObject(json, "tableType", tableType);
-
- string = cJSON_PrintUnformatted(json);
-
-_exit:
- tDecoderClear(&decoder);
- return string;
-}
-
-static char* processDropTable(SMqMetaRsp* metaRsp) {
- SDecoder decoder = {0};
- SVDropTbBatchReq req = {0};
- char* string = NULL;
-
- // decode
- void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
- int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
- tDecoderInit(&decoder, data, len);
- if (tDecodeSVDropTbBatchReq(&decoder, &req) < 0) {
- goto _exit;
- }
-
- cJSON* json = cJSON_CreateObject();
- if (json == NULL) {
- goto _exit;
- }
- cJSON* type = cJSON_CreateString("drop");
- cJSON_AddItemToObject(json, "type", type);
- // cJSON* uid = cJSON_CreateNumber(id);
- // cJSON_AddItemToObject(json, "uid", uid);
- // cJSON* tableType = cJSON_CreateString("normal");
- // cJSON_AddItemToObject(json, "tableType", tableType);
-
- cJSON* tableNameList = cJSON_CreateArray();
- for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
- SVDropTbReq* pDropTbReq = req.pReqs + iReq;
-
- cJSON* tableName = cJSON_CreateString(pDropTbReq->name);
- cJSON_AddItemToArray(tableNameList, tableName);
- }
- cJSON_AddItemToObject(json, "tableNameList", tableNameList);
-
- string = cJSON_PrintUnformatted(json);
-
-_exit:
- tDecoderClear(&decoder);
- return string;
-}
-
-static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) {
- SVCreateStbReq req = {0};
- SDecoder coder;
- SMCreateStbReq pReq = {0};
- int32_t code = TSDB_CODE_SUCCESS;
- SRequestObj* pRequest = NULL;
-
- code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- if (!pRequest->pDb) {
- code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
- goto end;
- }
- // decode and process req
- void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
- int32_t len = metaLen - sizeof(SMsgHead);
- tDecoderInit(&coder, data, len);
- if (tDecodeSVCreateStbReq(&coder, &req) < 0) {
- code = TSDB_CODE_INVALID_PARA;
- goto end;
- }
- // build create stable
- pReq.pColumns = taosArrayInit(req.schemaRow.nCols, sizeof(SField));
- for (int32_t i = 0; i < req.schemaRow.nCols; i++) {
- SSchema* pSchema = req.schemaRow.pSchema + i;
- SField field = {.type = pSchema->type, .bytes = pSchema->bytes};
- strcpy(field.name, pSchema->name);
- taosArrayPush(pReq.pColumns, &field);
- }
- pReq.pTags = taosArrayInit(req.schemaTag.nCols, sizeof(SField));
- for (int32_t i = 0; i < req.schemaTag.nCols; i++) {
- SSchema* pSchema = req.schemaTag.pSchema + i;
- SField field = {.type = pSchema->type, .bytes = pSchema->bytes};
- strcpy(field.name, pSchema->name);
- taosArrayPush(pReq.pTags, &field);
- }
-
- pReq.colVer = req.schemaRow.version;
- pReq.tagVer = req.schemaTag.version;
- pReq.numOfColumns = req.schemaRow.nCols;
- pReq.numOfTags = req.schemaTag.nCols;
- pReq.commentLen = -1;
- pReq.suid = req.suid;
- pReq.source = TD_REQ_FROM_TAOX;
- pReq.igExists = true;
-
- STscObj* pTscObj = pRequest->pTscObj;
- SName tableName;
- tNameExtractFullName(toName(pTscObj->acctId, pRequest->pDb, req.name, &tableName), pReq.name);
-
- SCmdMsgInfo pCmdMsg = {0};
- pCmdMsg.epSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
- pCmdMsg.msgType = TDMT_MND_CREATE_STB;
- pCmdMsg.msgLen = tSerializeSMCreateStbReq(NULL, 0, &pReq);
- pCmdMsg.pMsg = taosMemoryMalloc(pCmdMsg.msgLen);
- if (NULL == pCmdMsg.pMsg) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- tSerializeSMCreateStbReq(pCmdMsg.pMsg, pCmdMsg.msgLen, &pReq);
-
- SQuery pQuery = {0};
- pQuery.execMode = QUERY_EXEC_MODE_RPC;
- pQuery.pCmdMsg = &pCmdMsg;
- pQuery.msgType = pQuery.pCmdMsg->msgType;
- pQuery.stableQuery = true;
-
- launchQueryImpl(pRequest, &pQuery, true, NULL);
-
- if (pRequest->code == TSDB_CODE_SUCCESS) {
- SCatalog* pCatalog = NULL;
- catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
- catalogRemoveTableMeta(pCatalog, &tableName);
- }
-
- code = pRequest->code;
- taosMemoryFree(pCmdMsg.pMsg);
-
-end:
- destroyRequest(pRequest);
- tFreeSMCreateStbReq(&pReq);
- tDecoderClear(&coder);
- return code;
-}
-
-static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) {
- SVDropStbReq req = {0};
- SDecoder coder;
- SMDropStbReq pReq = {0};
- int32_t code = TSDB_CODE_SUCCESS;
- SRequestObj* pRequest = NULL;
-
- code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- if (!pRequest->pDb) {
- code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
- goto end;
- }
- // decode and process req
- void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
- int32_t len = metaLen - sizeof(SMsgHead);
- tDecoderInit(&coder, data, len);
- if (tDecodeSVDropStbReq(&coder, &req) < 0) {
- code = TSDB_CODE_INVALID_PARA;
- goto end;
- }
-
- // build drop stable
- pReq.igNotExists = true;
- pReq.source = TD_REQ_FROM_TAOX;
- pReq.suid = req.suid;
-
- STscObj* pTscObj = pRequest->pTscObj;
- SName tableName = {0};
- tNameExtractFullName(toName(pTscObj->acctId, pRequest->pDb, req.name, &tableName), pReq.name);
-
- SCmdMsgInfo pCmdMsg = {0};
- pCmdMsg.epSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
- pCmdMsg.msgType = TDMT_MND_DROP_STB;
- pCmdMsg.msgLen = tSerializeSMDropStbReq(NULL, 0, &pReq);
- pCmdMsg.pMsg = taosMemoryMalloc(pCmdMsg.msgLen);
- if (NULL == pCmdMsg.pMsg) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- tSerializeSMDropStbReq(pCmdMsg.pMsg, pCmdMsg.msgLen, &pReq);
-
- SQuery pQuery = {0};
- pQuery.execMode = QUERY_EXEC_MODE_RPC;
- pQuery.pCmdMsg = &pCmdMsg;
- pQuery.msgType = pQuery.pCmdMsg->msgType;
- pQuery.stableQuery = true;
-
- launchQueryImpl(pRequest, &pQuery, true, NULL);
-
- if (pRequest->code == TSDB_CODE_SUCCESS) {
- SCatalog* pCatalog = NULL;
- catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
- catalogRemoveTableMeta(pCatalog, &tableName);
- }
-
- code = pRequest->code;
- taosMemoryFree(pCmdMsg.pMsg);
-
-end:
- destroyRequest(pRequest);
- tDecoderClear(&coder);
- return code;
-}
-
-typedef struct SVgroupCreateTableBatch {
- SVCreateTbBatchReq req;
- SVgroupInfo info;
- char dbName[TSDB_DB_NAME_LEN];
-} SVgroupCreateTableBatch;
-
-static void destroyCreateTbReqBatch(void* data) {
- SVgroupCreateTableBatch* pTbBatch = (SVgroupCreateTableBatch*)data;
- taosArrayDestroy(pTbBatch->req.pArray);
-}
-
-static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
- SVCreateTbBatchReq req = {0};
- SDecoder coder = {0};
- int32_t code = TSDB_CODE_SUCCESS;
- SRequestObj* pRequest = NULL;
- SQuery* pQuery = NULL;
- SHashObj* pVgroupHashmap = NULL;
-
- code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- if (!pRequest->pDb) {
- code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
- goto end;
- }
- // decode and process req
- void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
- int32_t len = metaLen - sizeof(SMsgHead);
- tDecoderInit(&coder, data, len);
- if (tDecodeSVCreateTbBatchReq(&coder, &req) < 0) {
- code = TSDB_CODE_INVALID_PARA;
- goto end;
- }
-
- STscObj* pTscObj = pRequest->pTscObj;
-
- SVCreateTbReq* pCreateReq = NULL;
- SCatalog* pCatalog = NULL;
- code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- pVgroupHashmap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
- if (NULL == pVgroupHashmap) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- taosHashSetFreeFp(pVgroupHashmap, destroyCreateTbReqBatch);
-
- SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
- .requestId = pRequest->requestId,
- .requestObjRefId = pRequest->self,
- .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
-
- pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName));
- // loop to create table
- for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
- pCreateReq = req.pReqs + iReq;
-
- SVgroupInfo pInfo = {0};
- SName pName = {0};
- toName(pTscObj->acctId, pRequest->pDb, pCreateReq->name, &pName);
- code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &pInfo);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
- taosArrayPush(pRequest->tableList, &pName);
-
- SVgroupCreateTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId));
- if (pTableBatch == NULL) {
- SVgroupCreateTableBatch tBatch = {0};
- tBatch.info = pInfo;
- strcpy(tBatch.dbName, pRequest->pDb);
-
- tBatch.req.pArray = taosArrayInit(4, sizeof(struct SVCreateTbReq));
- taosArrayPush(tBatch.req.pArray, pCreateReq);
-
- taosHashPut(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId), &tBatch, sizeof(tBatch));
- } else { // add to the correct vgroup
- taosArrayPush(pTableBatch->req.pArray, pCreateReq);
- }
- }
-
- SArray* pBufArray = serializeVgroupsCreateTableBatch(pVgroupHashmap);
- if (NULL == pBufArray) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
-
- pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
- pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
- pQuery->msgType = TDMT_VND_CREATE_TABLE;
- pQuery->stableQuery = false;
- pQuery->pRoot = nodesMakeNode(QUERY_NODE_CREATE_TABLE_STMT);
-
- code = rewriteToVnodeModifyOpStmt(pQuery, pBufArray);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- launchQueryImpl(pRequest, pQuery, true, NULL);
- if (pRequest->code == TSDB_CODE_SUCCESS) {
- removeMeta(pTscObj, pRequest->tableList);
- }
-
- code = pRequest->code;
-
-end:
- taosHashCleanup(pVgroupHashmap);
- destroyRequest(pRequest);
- tDecoderClear(&coder);
- qDestroyQuery(pQuery);
- return code;
-}
-
-typedef struct SVgroupDropTableBatch {
- SVDropTbBatchReq req;
- SVgroupInfo info;
- char dbName[TSDB_DB_NAME_LEN];
-} SVgroupDropTableBatch;
-
-static void destroyDropTbReqBatch(void* data) {
- SVgroupDropTableBatch* pTbBatch = (SVgroupDropTableBatch*)data;
- taosArrayDestroy(pTbBatch->req.pArray);
-}
-
-static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) {
- SVDropTbBatchReq req = {0};
- SDecoder coder = {0};
- int32_t code = TSDB_CODE_SUCCESS;
- SRequestObj* pRequest = NULL;
- SQuery* pQuery = NULL;
- SHashObj* pVgroupHashmap = NULL;
-
- code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- if (!pRequest->pDb) {
- code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
- goto end;
- }
- // decode and process req
- void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
- int32_t len = metaLen - sizeof(SMsgHead);
- tDecoderInit(&coder, data, len);
- if (tDecodeSVDropTbBatchReq(&coder, &req) < 0) {
- code = TSDB_CODE_INVALID_PARA;
- goto end;
- }
-
- STscObj* pTscObj = pRequest->pTscObj;
-
- SVDropTbReq* pDropReq = NULL;
- SCatalog* pCatalog = NULL;
- code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- pVgroupHashmap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
- if (NULL == pVgroupHashmap) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- taosHashSetFreeFp(pVgroupHashmap, destroyDropTbReqBatch);
-
- SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
- .requestId = pRequest->requestId,
- .requestObjRefId = pRequest->self,
- .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
- pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName));
- // loop to create table
- for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
- pDropReq = req.pReqs + iReq;
- pDropReq->igNotExists = true;
-
- SVgroupInfo pInfo = {0};
- SName pName = {0};
- toName(pTscObj->acctId, pRequest->pDb, pDropReq->name, &pName);
- code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &pInfo);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- taosArrayPush(pRequest->tableList, &pName);
- SVgroupDropTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId));
- if (pTableBatch == NULL) {
- SVgroupDropTableBatch tBatch = {0};
- tBatch.info = pInfo;
- tBatch.req.pArray = taosArrayInit(TARRAY_MIN_SIZE, sizeof(SVDropTbReq));
- taosArrayPush(tBatch.req.pArray, pDropReq);
-
- taosHashPut(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId), &tBatch, sizeof(tBatch));
- } else { // add to the correct vgroup
- taosArrayPush(pTableBatch->req.pArray, pDropReq);
- }
- }
-
- SArray* pBufArray = serializeVgroupsDropTableBatch(pVgroupHashmap);
- if (NULL == pBufArray) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
-
- pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
- pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
- pQuery->msgType = TDMT_VND_DROP_TABLE;
- pQuery->stableQuery = false;
- pQuery->pRoot = nodesMakeNode(QUERY_NODE_DROP_TABLE_STMT);
-
- code = rewriteToVnodeModifyOpStmt(pQuery, pBufArray);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- launchQueryImpl(pRequest, pQuery, true, NULL);
- if (pRequest->code == TSDB_CODE_SUCCESS) {
- removeMeta(pTscObj, pRequest->tableList);
- }
- code = pRequest->code;
-
-end:
- taosHashCleanup(pVgroupHashmap);
- destroyRequest(pRequest);
- tDecoderClear(&coder);
- qDestroyQuery(pQuery);
- return code;
-}
-
-// delete from db.tabl where .. -> delete from tabl where ..
-// delete from db .tabl where .. -> delete from tabl where ..
-// static void getTbName(char *sql){
-// char *ch = sql;
-//
-// bool inBackQuote = false;
-// int8_t dotIndex = 0;
-// while(*ch != '\0'){
-// if(!inBackQuote && *ch == '`'){
-// inBackQuote = true;
-// ch++;
-// continue;
-// }
-//
-// if(inBackQuote && *ch == '`'){
-// inBackQuote = false;
-// ch++;
-//
-// continue;
-// }
-//
-// if(!inBackQuote && *ch == '.'){
-// dotIndex ++;
-// if(dotIndex == 2){
-// memmove(sql, ch + 1, strlen(ch + 1) + 1);
-// break;
-// }
-// }
-// ch++;
-// }
-//}
-
-static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) {
- SDeleteRes req = {0};
- SDecoder coder = {0};
- int32_t code = TSDB_CODE_SUCCESS;
-
- // decode and process req
- void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
- int32_t len = metaLen - sizeof(SMsgHead);
- tDecoderInit(&coder, data, len);
- if (tDecodeDeleteRes(&coder, &req) < 0) {
- code = TSDB_CODE_INVALID_PARA;
- goto end;
- }
-
- // getTbName(req.tableFName);
- char sql[256] = {0};
- sprintf(sql, "delete from `%s` where `%s` >= %" PRId64 " and `%s` <= %" PRId64, req.tableFName, req.tsColName,
- req.skey, req.tsColName, req.ekey);
- printf("delete sql:%s\n", sql);
-
- TAOS_RES* res = taos_query(taos, sql);
- SRequestObj* pRequest = (SRequestObj*)res;
- code = pRequest->code;
- if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
- code = TSDB_CODE_SUCCESS;
- }
- taos_free_result(res);
-
-end:
- tDecoderClear(&coder);
- return code;
-}
-
-static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) {
- SVAlterTbReq req = {0};
- SDecoder coder = {0};
- int32_t code = TSDB_CODE_SUCCESS;
- SRequestObj* pRequest = NULL;
- SQuery* pQuery = NULL;
- SArray* pArray = NULL;
- SVgDataBlocks* pVgData = NULL;
-
- code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
-
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- if (!pRequest->pDb) {
- code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
- goto end;
- }
- // decode and process req
- void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
- int32_t len = metaLen - sizeof(SMsgHead);
- tDecoderInit(&coder, data, len);
- if (tDecodeSVAlterTbReq(&coder, &req) < 0) {
- code = TSDB_CODE_INVALID_PARA;
- goto end;
- }
-
- // do not deal TSDB_ALTER_TABLE_UPDATE_OPTIONS
- if (req.action == TSDB_ALTER_TABLE_UPDATE_OPTIONS) {
- goto end;
- }
-
- STscObj* pTscObj = pRequest->pTscObj;
- SCatalog* pCatalog = NULL;
- code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
- .requestId = pRequest->requestId,
- .requestObjRefId = pRequest->self,
- .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
-
- SVgroupInfo pInfo = {0};
- SName pName = {0};
- toName(pTscObj->acctId, pRequest->pDb, req.tbName, &pName);
- code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &pInfo);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- pArray = taosArrayInit(1, sizeof(void*));
- if (NULL == pArray) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
-
- pVgData = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
- if (NULL == pVgData) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- pVgData->vg = pInfo;
- pVgData->pData = taosMemoryMalloc(metaLen);
- if (NULL == pVgData->pData) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- memcpy(pVgData->pData, meta, metaLen);
- ((SMsgHead*)pVgData->pData)->vgId = htonl(pInfo.vgId);
- pVgData->size = metaLen;
- pVgData->numOfTables = 1;
- taosArrayPush(pArray, &pVgData);
-
- pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
- pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
- pQuery->msgType = TDMT_VND_ALTER_TABLE;
- pQuery->stableQuery = false;
- pQuery->pRoot = nodesMakeNode(QUERY_NODE_ALTER_TABLE_STMT);
-
- code = rewriteToVnodeModifyOpStmt(pQuery, pArray);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- launchQueryImpl(pRequest, pQuery, true, NULL);
-
- pVgData = NULL;
- pArray = NULL;
- code = pRequest->code;
- if (code == TSDB_CODE_VND_TABLE_NOT_EXIST) {
- code = TSDB_CODE_SUCCESS;
- }
-
- if (pRequest->code == TSDB_CODE_SUCCESS) {
- SExecResult* pRes = &pRequest->body.resInfo.execRes;
- if (pRes->res != NULL) {
- code = handleAlterTbExecRes(pRes->res, pCatalog);
- }
- }
-end:
- taosArrayDestroy(pArray);
- if (pVgData) taosMemoryFreeClear(pVgData->pData);
- taosMemoryFreeClear(pVgData);
- destroyRequest(pRequest);
- tDecoderClear(&coder);
- qDestroyQuery(pQuery);
- return code;
-}
-
-typedef struct {
- SVgroupInfo vg;
- void* data;
-} VgData;
-
-static void destroyVgHash(void* data) {
- VgData* vgData = (VgData*)data;
- taosMemoryFreeClear(vgData->data);
-}
-
-int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname) {
- int32_t code = TSDB_CODE_SUCCESS;
- STableMeta* pTableMeta = NULL;
- SQuery* pQuery = NULL;
-
- SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT);
- if (!pRequest) {
- uError("WriteRaw:createRequest error request is null");
- code = terrno;
- goto end;
- }
-
- if (!pRequest->pDb) {
- uError("WriteRaw:not use db");
- code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
- goto end;
- }
-
- SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}};
- strcpy(pName.dbname, pRequest->pDb);
- strcpy(pName.tname, tbname);
-
- struct SCatalog* pCatalog = NULL;
- code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog);
- if (code != TSDB_CODE_SUCCESS) {
- uError("WriteRaw: get gatlog error");
- goto end;
- }
-
- SRequestConnInfo conn = {0};
- conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter;
- conn.requestId = pRequest->requestId;
- conn.requestObjRefId = pRequest->self;
- conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
-
- SVgroupInfo vgData = {0};
- code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vgData);
- if (code != TSDB_CODE_SUCCESS) {
- uError("WriteRaw:catalogGetTableHashVgroup failed. table name: %s", tbname);
- goto end;
- }
-
- code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
- if (code != TSDB_CODE_SUCCESS) {
- uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbname);
- goto end;
- }
- uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
- uint64_t uid = pTableMeta->uid;
- int32_t numOfCols = pTableMeta->tableInfo.numOfColumns;
-
- uint16_t fLen = 0;
- int32_t rowSize = 0;
- int16_t nVar = 0;
- for (int i = 0; i < numOfCols; i++) {
- SSchema* schema = pTableMeta->schema + i;
- fLen += TYPE_BYTES[schema->type];
- rowSize += schema->bytes;
- if (IS_VAR_DATA_TYPE(schema->type)) {
- nVar++;
- }
- }
-
- int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
- (int32_t)TD_BITMAP_BYTES(numOfCols - 1);
- int32_t schemaLen = 0;
- int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
-
- int32_t totalLen = sizeof(SSubmitReq) + submitLen;
- SSubmitReq* subReq = taosMemoryCalloc(1, totalLen);
- SSubmitBlk* blk = POINTER_SHIFT(subReq, sizeof(SSubmitReq));
- void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk));
- STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen);
-
- SRowBuilder rb = {0};
- tdSRowInit(&rb, pTableMeta->sversion);
- tdSRowSetTpInfo(&rb, numOfCols, fLen);
- int32_t dataLen = 0;
-
- char* pStart = pData + getVersion1BlockMetaSize(pData, numOfCols);
- int32_t* colLength = (int32_t*)pStart;
- pStart += sizeof(int32_t) * numOfCols;
-
- SResultColumn* pCol = taosMemoryCalloc(numOfCols, sizeof(SResultColumn));
-
- for (int32_t i = 0; i < numOfCols; ++i) {
- if (IS_VAR_DATA_TYPE(pTableMeta->schema[i].type)) {
- pCol[i].offset = (int32_t*)pStart;
- pStart += rows * sizeof(int32_t);
- } else {
- pCol[i].nullbitmap = pStart;
- pStart += BitmapLen(rows);
- }
-
- pCol[i].pData = pStart;
- pStart += colLength[i];
- }
-
- for (int32_t j = 0; j < rows; j++) {
- tdSRowResetBuf(&rb, rowData);
- int32_t offset = 0;
- for (int32_t k = 0; k < numOfCols; k++) {
- const SSchema* pColumn = &pTableMeta->schema[k];
-
- if (IS_VAR_DATA_TYPE(pColumn->type)) {
- if (pCol[k].offset[j] != -1) {
- char* data = pCol[k].pData + pCol[k].offset[j];
- tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
- } else {
-
- tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
- }
- } else {
- if (!colDataIsNull_f(pCol[k].nullbitmap, j)) {
- char* data = pCol[k].pData + pColumn->bytes * j;
- tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
- } else {
- tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
- }
- }
-
- offset += TYPE_BYTES[pColumn->type];
- }
- tdSRowEnd(&rb);
- int32_t rowLen = TD_ROW_LEN(rowData);
- rowData = POINTER_SHIFT(rowData, rowLen);
- dataLen += rowLen;
- }
-
- taosMemoryFree(pCol);
-
- blk->uid = htobe64(uid);
- blk->suid = htobe64(suid);
- blk->sversion = htonl(pTableMeta->sversion);
- blk->schemaLen = htonl(schemaLen);
- blk->numOfRows = htonl(rows);
- blk->dataLen = htonl(dataLen);
- subReq->length = sizeof(SSubmitReq) + sizeof(SSubmitBlk) + schemaLen + dataLen;
- subReq->numOfBlocks = 1;
-
- pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
- if (NULL == pQuery) {
- uError("create SQuery error");
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
- pQuery->haveResultSet = false;
- pQuery->msgType = TDMT_VND_SUBMIT;
- pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
- if (NULL == pQuery->pRoot) {
- uError("create pQuery->pRoot error");
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- SVnodeModifOpStmt* nodeStmt = (SVnodeModifOpStmt*)(pQuery->pRoot);
- nodeStmt->payloadType = PAYLOAD_TYPE_KV;
- nodeStmt->pDataBlocks = taosArrayInit(1, POINTER_BYTES);
-
- SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
- if (NULL == dst) {
- code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- goto end;
- }
- dst->vg = vgData;
- dst->numOfTables = subReq->numOfBlocks;
- dst->size = subReq->length;
- dst->pData = (char*)subReq;
- subReq->header.vgId = htonl(dst->vg.vgId);
- subReq->version = htonl(1);
- subReq->header.contLen = htonl(subReq->length);
- subReq->length = htonl(subReq->length);
- subReq->numOfBlocks = htonl(subReq->numOfBlocks);
- subReq = NULL; // no need free
- taosArrayPush(nodeStmt->pDataBlocks, &dst);
-
- launchQueryImpl(pRequest, pQuery, true, NULL);
- code = pRequest->code;
-
-end:
- taosMemoryFreeClear(pTableMeta);
- qDestroyQuery(pQuery);
- return code;
-}
-
-static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
- int32_t code = TSDB_CODE_SUCCESS;
- SHashObj* pVgHash = NULL;
- SQuery* pQuery = NULL;
- SMqRspObj rspObj = {0};
- SDecoder decoder = {0};
-
- terrno = TSDB_CODE_SUCCESS;
- SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT);
- if (!pRequest) {
- uError("WriteRaw:createRequest error request is null");
- return terrno;
- }
-
- rspObj.resIter = -1;
- rspObj.resType = RES_TYPE__TMQ;
-
- tDecoderInit(&decoder, data, dataLen);
- code = tDecodeSMqDataRsp(&decoder, &rspObj.rsp);
- if (code != 0) {
- uError("WriteRaw:decode smqDataRsp error");
- code = TSDB_CODE_INVALID_MSG;
- goto end;
- }
-
- if (!pRequest->pDb) {
- uError("WriteRaw:not use db");
- code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
- goto end;
- }
-
- pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
- taosHashSetFreeFp(pVgHash, destroyVgHash);
- struct SCatalog* pCatalog = NULL;
- code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog);
- if (code != TSDB_CODE_SUCCESS) {
- uError("WriteRaw: get gatlog error");
- goto end;
- }
-
- SRequestConnInfo conn = {0};
- conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter;
- conn.requestId = pRequest->requestId;
- conn.requestObjRefId = pRequest->self;
- conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
-
- printf("raw data block num:%d\n", rspObj.rsp.blockNum);
- while (++rspObj.resIter < rspObj.rsp.blockNum) {
- SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter);
- if (!rspObj.rsp.withSchema) {
- uError("WriteRaw:no schema, iter:%d", rspObj.resIter);
- goto end;
- }
- SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.rsp.blockSchema, rspObj.resIter);
- setResSchemaInfo(&rspObj.resInfo, pSW->pSchema, pSW->nCols);
-
- code = setQueryResultFromRsp(&rspObj.resInfo, pRetrieve, false, false);
- if (code != TSDB_CODE_SUCCESS) {
- uError("WriteRaw: setQueryResultFromRsp error");
- goto end;
- }
-
- uint16_t fLen = 0;
- int32_t rowSize = 0;
- int16_t nVar = 0;
- for (int i = 0; i < pSW->nCols; i++) {
- SSchema* schema = pSW->pSchema + i;
- fLen += TYPE_BYTES[schema->type];
- rowSize += schema->bytes;
- if (IS_VAR_DATA_TYPE(schema->type)) {
- nVar++;
- }
- }
-
- int32_t rows = rspObj.resInfo.numOfRows;
- int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
- (int32_t)TD_BITMAP_BYTES(pSW->nCols - 1);
- int32_t schemaLen = 0;
- int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
-
- const char* tbName = (const char*)taosArrayGetP(rspObj.rsp.blockTbName, rspObj.resIter);
- if (!tbName) {
- uError("WriteRaw: tbname is null");
- code = TSDB_CODE_TMQ_INVALID_MSG;
- goto end;
- }
-
- printf("raw data tbname:%s\n", tbName);
- SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}};
- strcpy(pName.dbname, pRequest->pDb);
- strcpy(pName.tname, tbName);
-
- VgData vgData = {0};
- code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &(vgData.vg));
- if (code != TSDB_CODE_SUCCESS) {
- uError("WriteRaw:catalogGetTableHashVgroup failed. table name: %s", tbName);
- goto end;
- }
-
- SSubmitReq* subReq = NULL;
- SSubmitBlk* blk = NULL;
- void* hData = taosHashGet(pVgHash, &vgData.vg.vgId, sizeof(vgData.vg.vgId));
- if (hData) {
- vgData = *(VgData*)hData;
-
- int32_t totalLen = ((SSubmitReq*)(vgData.data))->length + submitLen;
- void* tmp = taosMemoryRealloc(vgData.data, totalLen);
- if (tmp == NULL) {
- code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- goto end;
- }
- vgData.data = tmp;
- ((VgData*)hData)->data = tmp;
- subReq = (SSubmitReq*)(vgData.data);
- blk = POINTER_SHIFT(vgData.data, subReq->length);
- } else {
- int32_t totalLen = sizeof(SSubmitReq) + submitLen;
- void* tmp = taosMemoryCalloc(1, totalLen);
- if (tmp == NULL) {
- code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- goto end;
- }
- vgData.data = tmp;
- taosHashPut(pVgHash, (const char*)&vgData.vg.vgId, sizeof(vgData.vg.vgId), (char*)&vgData, sizeof(vgData));
- subReq = (SSubmitReq*)(vgData.data);
- subReq->length = sizeof(SSubmitReq);
- subReq->numOfBlocks = 0;
-
- blk = POINTER_SHIFT(vgData.data, sizeof(SSubmitReq));
- }
-
- STableMeta* pTableMeta = NULL;
- code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
- if (code != TSDB_CODE_SUCCESS) {
- uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbName);
- goto end;
- }
- uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
- uint64_t uid = pTableMeta->uid;
- taosMemoryFreeClear(pTableMeta);
-
- void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk));
- STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen);
-
- SRowBuilder rb = {0};
- tdSRowInit(&rb, pSW->version);
- tdSRowSetTpInfo(&rb, pSW->nCols, fLen);
- int32_t dataLen = 0;
-
- for (int32_t j = 0; j < rows; j++) {
- tdSRowResetBuf(&rb, rowData);
-
- doSetOneRowPtr(&rspObj.resInfo);
- rspObj.resInfo.current += 1;
-
- int32_t offset = 0;
- for (int32_t k = 0; k < pSW->nCols; k++) {
- const SSchema* pColumn = &pSW->pSchema[k];
- char* data = rspObj.resInfo.row[k];
- if (!data) {
- tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
- } else {
- if (IS_VAR_DATA_TYPE(pColumn->type)) {
- data -= VARSTR_HEADER_SIZE;
- }
- tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
- }
- offset += TYPE_BYTES[pColumn->type];
- }
- tdSRowEnd(&rb);
- int32_t rowLen = TD_ROW_LEN(rowData);
- rowData = POINTER_SHIFT(rowData, rowLen);
- dataLen += rowLen;
- }
-
- blk->uid = htobe64(uid);
- blk->suid = htobe64(suid);
- blk->sversion = htonl(pSW->version);
- blk->schemaLen = htonl(schemaLen);
- blk->numOfRows = htonl(rows);
- blk->dataLen = htonl(dataLen);
- subReq->length += sizeof(SSubmitBlk) + schemaLen + dataLen;
- subReq->numOfBlocks++;
- }
-
- pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
- if (NULL == pQuery) {
- uError("create SQuery error");
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
- pQuery->haveResultSet = false;
- pQuery->msgType = TDMT_VND_SUBMIT;
- pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
- if (NULL == pQuery->pRoot) {
- uError("create pQuery->pRoot error");
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- SVnodeModifOpStmt* nodeStmt = (SVnodeModifOpStmt*)(pQuery->pRoot);
- nodeStmt->payloadType = PAYLOAD_TYPE_KV;
-
- int32_t numOfVg = taosHashGetSize(pVgHash);
- nodeStmt->pDataBlocks = taosArrayInit(numOfVg, POINTER_BYTES);
-
- VgData* vData = (VgData*)taosHashIterate(pVgHash, NULL);
- while (vData) {
- SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
- if (NULL == dst) {
- code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- goto end;
- }
- dst->vg = vData->vg;
- SSubmitReq* subReq = (SSubmitReq*)(vData->data);
- dst->numOfTables = subReq->numOfBlocks;
- dst->size = subReq->length;
- dst->pData = (char*)subReq;
- vData->data = NULL; // no need free
- subReq->header.vgId = htonl(dst->vg.vgId);
- subReq->version = htonl(1);
- subReq->header.contLen = htonl(subReq->length);
- subReq->length = htonl(subReq->length);
- subReq->numOfBlocks = htonl(subReq->numOfBlocks);
- taosArrayPush(nodeStmt->pDataBlocks, &dst);
- vData = (VgData*)taosHashIterate(pVgHash, vData);
- }
-
- launchQueryImpl(pRequest, pQuery, true, NULL);
- code = pRequest->code;
-
-end:
- tDecoderClear(&decoder);
- taos_free_result(&rspObj);
- qDestroyQuery(pQuery);
- destroyRequest(pRequest);
- taosHashCleanup(pVgHash);
- return code;
-}
-
-char* tmq_get_json_meta(TAOS_RES* res) {
- if (!TD_RES_TMQ_META(res)) {
- return NULL;
- }
-
- SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
- if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_CREATE_STB) {
- return processCreateStb(&pMetaRspObj->metaRsp);
- } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_ALTER_STB) {
- return processAlterStb(&pMetaRspObj->metaRsp);
- } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DROP_STB) {
- return processDropSTable(&pMetaRspObj->metaRsp);
- } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_CREATE_TABLE) {
- return processCreateTable(&pMetaRspObj->metaRsp);
- } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_ALTER_TABLE) {
- return processAlterTable(&pMetaRspObj->metaRsp);
- } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DROP_TABLE) {
- return processDropTable(&pMetaRspObj->metaRsp);
- }
- return NULL;
-}
-
-void tmq_free_json_meta(char* jsonMeta) { taosMemoryFreeClear(jsonMeta); }
-
-int32_t tmq_get_raw(TAOS_RES* res, tmq_raw_data* raw) {
- if (!raw || !res) {
- return TSDB_CODE_INVALID_PARA;
- }
- if (TD_RES_TMQ_META(res)) {
- SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
- raw->raw = pMetaRspObj->metaRsp.metaRsp;
- raw->raw_len = pMetaRspObj->metaRsp.metaRspLen;
- raw->raw_type = pMetaRspObj->metaRsp.resMsgType;
- } else if (TD_RES_TMQ(res)) {
- SMqRspObj* rspObj = ((SMqRspObj*)res);
-
- int32_t len = 0;
- int32_t code = 0;
- tEncodeSize(tEncodeSMqDataRsp, &rspObj->rsp, len, code);
- if (code < 0) {
- return -1;
- }
-
- void* buf = taosMemoryCalloc(1, len);
- SEncoder encoder = {0};
- tEncoderInit(&encoder, buf, len);
- tEncodeSMqDataRsp(&encoder, &rspObj->rsp);
- tEncoderClear(&encoder);
-
- raw->raw = buf;
- raw->raw_len = len;
- raw->raw_type = RES_TYPE__TMQ;
- } else {
- return TSDB_CODE_TMQ_INVALID_MSG;
- }
- return TSDB_CODE_SUCCESS;
-}
-
-void tmq_free_raw(tmq_raw_data raw) {
- if (raw.raw_type == RES_TYPE__TMQ) {
- taosMemoryFree(raw.raw);
- }
-}
-
-int32_t tmq_write_raw(TAOS* taos, tmq_raw_data raw) {
- if (!taos) {
- return TSDB_CODE_INVALID_PARA;
- }
-
- if (raw.raw_type == TDMT_VND_CREATE_STB) {
- return taosCreateStb(taos, raw.raw, raw.raw_len);
- } else if (raw.raw_type == TDMT_VND_ALTER_STB) {
- return taosCreateStb(taos, raw.raw, raw.raw_len);
- } else if (raw.raw_type == TDMT_VND_DROP_STB) {
- return taosDropStb(taos, raw.raw, raw.raw_len);
- } else if (raw.raw_type == TDMT_VND_CREATE_TABLE) {
- return taosCreateTable(taos, raw.raw, raw.raw_len);
- } else if (raw.raw_type == TDMT_VND_ALTER_TABLE) {
- return taosAlterTable(taos, raw.raw, raw.raw_len);
- } else if (raw.raw_type == TDMT_VND_DROP_TABLE) {
- return taosDropTable(taos, raw.raw, raw.raw_len);
- } else if (raw.raw_type == TDMT_VND_DELETE) {
- return taosDeleteData(taos, raw.raw, raw.raw_len);
- } else if (raw.raw_type == RES_TYPE__TMQ) {
- return tmqWriteRaw(taos, raw.raw, raw.raw_len);
- }
- return TSDB_CODE_INVALID_PARA;
-}
-
void tmq_commit_async(tmq_t* tmq, const TAOS_RES* msg, tmq_commit_cb* cb, void* param) {
//
tmqCommitInner(tmq, msg, 0, 1, cb, param);
diff --git a/source/common/CMakeLists.txt b/source/common/CMakeLists.txt
index 1c11ee7085..9c6d941172 100644
--- a/source/common/CMakeLists.txt
+++ b/source/common/CMakeLists.txt
@@ -9,6 +9,11 @@ IF (TD_GRANT)
ADD_DEFINITIONS(-D_GRANT)
ENDIF ()
+IF (TD_STORAGE)
+ ADD_DEFINITIONS(-D_STORAGE)
+ TARGET_LINK_LIBRARIES(common PRIVATE storage)
+ENDIF ()
+
target_include_directories(
common
PUBLIC "${TD_SOURCE_DIR}/include/common"
diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c
index 8823e63db4..3956b99fdb 100644
--- a/source/common/src/tglobal.c
+++ b/source/common/src/tglobal.c
@@ -165,58 +165,11 @@ int32_t tsTtlUnit = 86400;
int32_t tsTtlPushInterval = 86400;
int32_t tsGrantHBInterval = 60;
-void taosAddDataDir(int32_t index, char *v1, int32_t level, int32_t primary) {
- tstrncpy(tsDiskCfg[index].dir, v1, TSDB_FILENAME_LEN);
- tsDiskCfg[index].level = level;
- tsDiskCfg[index].primary = primary;
- uTrace("dataDir:%s, level:%d primary:%d is configured", v1, level, primary);
-}
-
-static int32_t taosSetTfsCfg(SConfig *pCfg) {
- SConfigItem *pItem = cfgGetItem(pCfg, "dataDir");
- memset(tsDataDir, 0, PATH_MAX);
-
- int32_t size = taosArrayGetSize(pItem->array);
- if (size <= 0) {
- tsDiskCfgNum = 1;
- taosAddDataDir(0, pItem->str, 0, 1);
- tstrncpy(tsDataDir, pItem->str, PATH_MAX);
- if (taosMulMkDir(tsDataDir) != 0) {
- uError("failed to create dataDir:%s since %s", tsDataDir, terrstr());
- return -1;
- }
- } else {
- tsDiskCfgNum = size < TFS_MAX_DISKS ? size : TFS_MAX_DISKS;
- for (int32_t index = 0; index < tsDiskCfgNum; ++index) {
- SDiskCfg *pCfg = taosArrayGet(pItem->array, index);
- memcpy(&tsDiskCfg[index], pCfg, sizeof(SDiskCfg));
- if (pCfg->level == 0 && pCfg->primary == 1) {
- tstrncpy(tsDataDir, pCfg->dir, PATH_MAX);
- }
- if (taosMulMkDir(pCfg->dir) != 0) {
- uError("failed to create tfsDir:%s since %s", tsDataDir, terrstr());
- return -1;
- }
- }
- }
-
- if (tsDataDir[0] == 0) {
- if (pItem->str != NULL) {
- taosAddDataDir(tsDiskCfgNum, pItem->str, 0, 1);
- tstrncpy(tsDataDir, pItem->str, PATH_MAX);
- if (taosMulMkDir(tsDataDir) != 0) {
- uError("failed to create tfsDir:%s since %s", tsDataDir, terrstr());
- return -1;
- }
- tsDiskCfgNum++;
- } else {
- uError("datadir not set");
- return -1;
- }
- }
-
- return 0;
-}
+#ifndef _STORAGE
+int32_t taosSetTfsCfg(SConfig *pCfg) { return 0; }
+#else
+int32_t taosSetTfsCfg(SConfig *pCfg);
+#endif
struct SConfig *taosGetCfg() {
return tsCfg;
diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h
index 18a7583f4c..19cafcbbbb 100644
--- a/source/dnode/vnode/inc/vnode.h
+++ b/source/dnode/vnode/inc/vnode.h
@@ -128,19 +128,19 @@ typedef struct STsdbReader STsdbReader;
#define LASTROW_RETRIEVE_TYPE_ALL 0x1
#define LASTROW_RETRIEVE_TYPE_SINGLE 0x2
-int32_t tsdbSetTableId(STsdbReader *pReader, int64_t uid);
-int32_t tsdbReaderOpen(SVnode *pVnode, SQueryTableDataCond *pCond, SArray *pTableList, STsdbReader **ppReader,
- const char *idstr);
-void tsdbReaderClose(STsdbReader *pReader);
-bool tsdbNextDataBlock(STsdbReader *pReader);
-void tsdbRetrieveDataBlockInfo(STsdbReader *pReader, SDataBlockInfo *pDataBlockInfo);
-int32_t tsdbRetrieveDatablockSMA(STsdbReader *pReader, SColumnDataAgg ***pBlockStatis, bool *allHave);
-SArray *tsdbRetrieveDataBlock(STsdbReader *pTsdbReadHandle, SArray *pColumnIdList);
-int32_t tsdbReaderReset(STsdbReader *pReader, SQueryTableDataCond *pCond);
-int32_t tsdbGetFileBlocksDistInfo(STsdbReader *pReader, STableBlockDistInfo *pTableBlockInfo);
-int64_t tsdbGetNumOfRowsInMemTable(STsdbReader *pHandle);
-void *tsdbGetIdx(SMeta *pMeta);
-void *tsdbGetIvtIdx(SMeta *pMeta);
+int32_t tsdbSetTableId(STsdbReader *pReader, int64_t uid);
+int32_t tsdbReaderOpen(SVnode *pVnode, SQueryTableDataCond *pCond, SArray *pTableList, STsdbReader **ppReader,
+ const char *idstr);
+void tsdbReaderClose(STsdbReader *pReader);
+bool tsdbNextDataBlock(STsdbReader *pReader);
+void tsdbRetrieveDataBlockInfo(STsdbReader *pReader, SDataBlockInfo *pDataBlockInfo);
+int32_t tsdbRetrieveDatablockSMA(STsdbReader *pReader, SColumnDataAgg ***pBlockStatis, bool *allHave);
+SArray *tsdbRetrieveDataBlock(STsdbReader *pTsdbReadHandle, SArray *pColumnIdList);
+int32_t tsdbReaderReset(STsdbReader *pReader, SQueryTableDataCond *pCond);
+int32_t tsdbGetFileBlocksDistInfo(STsdbReader *pReader, STableBlockDistInfo *pTableBlockInfo);
+int64_t tsdbGetNumOfRowsInMemTable(STsdbReader *pHandle);
+void *tsdbGetIdx(SMeta *pMeta);
+void *tsdbGetIvtIdx(SMeta *pMeta);
uint64_t getReaderMaxVersion(STsdbReader *pReader);
int32_t tsdbLastRowReaderOpen(void *pVnode, int32_t type, SArray *pTableIdList, int32_t numOfCols, void **pReader);
diff --git a/source/dnode/vnode/src/inc/vnd.h b/source/dnode/vnode/src/inc/vnd.h
index dd1facb462..5164e22474 100644
--- a/source/dnode/vnode/src/inc/vnd.h
+++ b/source/dnode/vnode/src/inc/vnd.h
@@ -80,7 +80,7 @@ int32_t vnodeQueryOpen(SVnode* pVnode);
void vnodeQueryClose(SVnode* pVnode);
int32_t vnodeGetTableMeta(SVnode* pVnode, SRpcMsg* pMsg, bool direct);
int vnodeGetTableCfg(SVnode* pVnode, SRpcMsg* pMsg, bool direct);
-int32_t vnodeGetBatchMeta(SVnode *pVnode, SRpcMsg *pMsg);
+int32_t vnodeGetBatchMeta(SVnode* pVnode, SRpcMsg* pMsg);
// vnodeCommit.c
int32_t vnodeBegin(SVnode* pVnode);
@@ -98,6 +98,8 @@ void vnodeSyncStart(SVnode* pVnode);
void vnodeSyncClose(SVnode* pVnode);
void vnodeRedirectRpcMsg(SVnode* pVnode, SRpcMsg* pMsg);
bool vnodeIsLeader(SVnode* pVnode);
+bool vnodeIsReadyForRead(SVnode* pVnode);
+bool vnodeIsRoleLeader(SVnode* pVnode);
#ifdef __cplusplus
}
diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h
index 02c4129d6f..35c26eac44 100644
--- a/source/dnode/vnode/src/inc/vnodeInt.h
+++ b/source/dnode/vnode/src/inc/vnodeInt.h
@@ -144,6 +144,7 @@ int32_t tsdbDeleteTableData(STsdb* pTsdb, int64_t version, tb_uid_t suid, tb
STsdbReader tsdbQueryCacheLastT(STsdb* tsdb, SQueryTableDataCond* pCond, STableListInfo* tableList, uint64_t qId,
void* pMemRef);
int32_t tsdbSetKeepCfg(STsdb* pTsdb, STsdbCfg* pCfg);
+int32_t tsdbGetStbIdList(SMeta* pMeta, int64_t suid, SArray* list);
// tq
int tqInit();
@@ -169,10 +170,9 @@ int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskRecoverRsp(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg);
-int32_t tsdbGetStbIdList(SMeta* pMeta, int64_t suid, SArray* list);
-SSubmitReq* tdBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchema* pSchema, bool createTb, int64_t suid,
- const char* stbFullName, int32_t vgId, SBatchDeleteReq* pDeleteReq);
+SSubmitReq* tqBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchema* pSchema, bool createTb, int64_t suid,
+ const char* stbFullName, SBatchDeleteReq* pDeleteReq);
// sma
int32_t smaInit();
diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c
index 3e7fd9df2b..e56b8ad939 100644
--- a/source/dnode/vnode/src/meta/metaTable.c
+++ b/source/dnode/vnode/src/meta/metaTable.c
@@ -298,14 +298,14 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
tdbTbcClose(pUidIdxc);
terrno = TSDB_CODE_TDB_STB_NOT_EXIST;
- // ASSERT(0);
return -1;
}
ret = tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData);
if (ret < 0) {
+ tdbTbcClose(pUidIdxc);
+
terrno = TSDB_CODE_TDB_STB_NOT_EXIST;
- // ASSERT(0);
return -1;
}
diff --git a/source/dnode/vnode/src/sma/smaTimeRange.c b/source/dnode/vnode/src/sma/smaTimeRange.c
index f46d9dc29c..b09d7e3c23 100644
--- a/source/dnode/vnode/src/sma/smaTimeRange.c
+++ b/source/dnode/vnode/src/sma/smaTimeRange.c
@@ -201,9 +201,8 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
}
SBatchDeleteReq deleteReq;
- SSubmitReq *pSubmitReq =
- tdBlockToSubmit(pSma->pVnode, (const SArray *)msg, pTsmaStat->pTSchema, true, pTsmaStat->pTSma->dstTbUid,
- pTsmaStat->pTSma->dstTbName, pTsmaStat->pTSma->dstVgId, &deleteReq);
+ SSubmitReq *pSubmitReq = tqBlockToSubmit(pSma->pVnode, (const SArray *)msg, pTsmaStat->pTSchema, true,
+ pTsmaStat->pTSma->dstTbUid, pTsmaStat->pTSma->dstTbName, &deleteReq);
if (!pSubmitReq) {
smaError("vgId:%d, failed to gen submit blk while tsma insert for smaIndex %" PRIi64 " since %s", SMA_VID(pSma),
diff --git a/source/dnode/vnode/src/tq/tqPush.c b/source/dnode/vnode/src/tq/tqPush.c
index ae3fef9b4b..ed7fa80c47 100644
--- a/source/dnode/vnode/src/tq/tqPush.c
+++ b/source/dnode/vnode/src/tq/tqPush.c
@@ -14,6 +14,7 @@
*/
#include "tq.h"
+#include "vnd.h"
#if 0
void tqTmrRspFunc(void* param, void* tmrId) {
@@ -212,9 +213,7 @@ int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_
#endif
int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver) {
- walApplyVer(pTq->pVnode->pWal, ver);
-
- if (msgType == TDMT_VND_SUBMIT) {
+ if (vnodeIsRoleLeader(pTq->pVnode) && msgType == TDMT_VND_SUBMIT) {
if (taosHashGetSize(pTq->pStreamMeta->pTasks) == 0) return 0;
void* data = taosMemoryMalloc(msgLen);
diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c
index 42fb5c329d..55630511bf 100644
--- a/source/dnode/vnode/src/tq/tqSink.c
+++ b/source/dnode/vnode/src/tq/tqSink.c
@@ -17,7 +17,7 @@
#include "tmsg.h"
#include "tq.h"
-int32_t tdBuildDeleteReq(SVnode* pVnode, const char* stbFullName, const SSDataBlock* pDataBlock,
+int32_t tqBuildDeleteReq(SVnode* pVnode, const char* stbFullName, const SSDataBlock* pDataBlock,
SBatchDeleteReq* deleteReq) {
ASSERT(pDataBlock->info.type == STREAM_DELETE_RESULT);
int32_t totRow = pDataBlock->info.rows;
@@ -25,8 +25,7 @@ int32_t tdBuildDeleteReq(SVnode* pVnode, const char* stbFullName, const SSDataBl
SColumnInfoData* pGidCol = taosArrayGet(pDataBlock->pDataBlock, GROUPID_COLUMN_INDEX);
for (int32_t row = 0; row < totRow; row++) {
int64_t ts = *(int64_t*)colDataGetData(pTsCol, row);
- /*int64_t groupId = *(int64_t*)colDataGetData(pGidCol, row);*/
- int64_t groupId = 0;
+ int64_t groupId = *(int64_t*)colDataGetData(pGidCol, row);
char* name = buildCtbNameByGroupId(stbFullName, groupId);
tqDebug("stream delete msg: groupId :%ld, name: %s", groupId, name);
SMetaReader mr = {0};
@@ -49,8 +48,8 @@ int32_t tdBuildDeleteReq(SVnode* pVnode, const char* stbFullName, const SSDataBl
return 0;
}
-SSubmitReq* tdBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchema* pTSchema, bool createTb,
- int64_t suid, const char* stbFullName, int32_t vgId, SBatchDeleteReq* pDeleteReq) {
+SSubmitReq* tqBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchema* pTSchema, bool createTb,
+ int64_t suid, const char* stbFullName, SBatchDeleteReq* pDeleteReq) {
SSubmitReq* ret = NULL;
SArray* schemaReqs = NULL;
SArray* schemaReqSz = NULL;
@@ -69,9 +68,10 @@ SSubmitReq* tdBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchem
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
if (pDataBlock->info.type == STREAM_DELETE_RESULT) {
int32_t padding1 = 0;
- void* padding2 = taosMemoryMalloc(1);
+ void* padding2 = NULL;
taosArrayPush(schemaReqSz, &padding1);
taosArrayPush(schemaReqs, &padding2);
+ continue;
}
STagVal tagVal = {
@@ -139,8 +139,7 @@ SSubmitReq* tdBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchem
continue;
}
int32_t rows = pDataBlock->info.rows;
- // TODO min
- int32_t rowSize = pDataBlock->info.rowSize;
+ /*int32_t rowSize = pDataBlock->info.rowSize;*/
int32_t maxLen = TD_ROW_MAX_BYTES_FROM_SCHEMA(pTSchema);
int32_t schemaLen = 0;
@@ -151,9 +150,8 @@ SSubmitReq* tdBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchem
}
// assign data
- // TODO
ret = rpcMallocCont(cap);
- ret->header.vgId = vgId;
+ ret->header.vgId = pVnode->config.vgId;
ret->length = sizeof(SSubmitReq);
ret->numOfBlocks = htonl(sz);
@@ -162,13 +160,12 @@ SSubmitReq* tdBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchem
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
if (pDataBlock->info.type == STREAM_DELETE_RESULT) {
pDeleteReq->suid = suid;
- tdBuildDeleteReq(pVnode, stbFullName, pDataBlock, pDeleteReq);
+ tqBuildDeleteReq(pVnode, stbFullName, pDataBlock, pDeleteReq);
continue;
}
blkHead->numOfRows = htonl(pDataBlock->info.rows);
blkHead->sversion = htonl(pTSchema->version);
- // TODO
blkHead->suid = htobe64(suid);
// uid is assigned by vnode
blkHead->uid = 0;
@@ -234,8 +231,8 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) {
ASSERT(pTask->tbSink.pTSchema);
deleteReq.deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq));
- SSubmitReq* pReq = tdBlockToSubmit(pVnode, pRes, pTask->tbSink.pTSchema, true, pTask->tbSink.stbUid,
- pTask->tbSink.stbFullName, pVnode->config.vgId, &deleteReq);
+ SSubmitReq* pReq = tqBlockToSubmit(pVnode, pRes, pTask->tbSink.pTSchema, true, pTask->tbSink.stbUid,
+ pTask->tbSink.stbFullName, &deleteReq);
tqDebug("vgId:%d, task %d convert blocks over, put into write-queue", TD_VID(pVnode), pTask->taskId);
diff --git a/source/dnode/vnode/src/tsdb/tsdbMemTable.c b/source/dnode/vnode/src/tsdb/tsdbMemTable.c
index 8ae0e824cf..6fc6636623 100644
--- a/source/dnode/vnode/src/tsdb/tsdbMemTable.c
+++ b/source/dnode/vnode/src/tsdb/tsdbMemTable.c
@@ -196,9 +196,9 @@ int32_t tsdbDeleteTableData(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid
tsdbCacheDeleteLast(pTsdb->lruCache, pTbData->uid, eKey);
}
- tsdbError("vgId:%d, delete data from table suid:%" PRId64 " uid:%" PRId64 " skey:%" PRId64 " eKey:%" PRId64
- " since %s",
- TD_VID(pTsdb->pVnode), suid, uid, sKey, eKey, tstrerror(code));
+ tsdbInfo("vgId:%d, delete data from table suid:%" PRId64 " uid:%" PRId64 " skey:%" PRId64 " eKey:%" PRId64
+ " since %s",
+ TD_VID(pTsdb->pVnode), suid, uid, sKey, eKey, tstrerror(code));
return code;
_err:
diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c
index d5c5e18668..43c9b4c09f 100644
--- a/source/dnode/vnode/src/vnd/vnodeSvr.c
+++ b/source/dnode/vnode/src/vnd/vnodeSvr.c
@@ -247,6 +247,8 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp
vTrace("vgId:%d, process %s request success, index:%" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType), version);
+ walApplyVer(pVnode->pWal, version);
+
if (tqPushMsg(pVnode->pTq, pMsg->pCont, pMsg->contLen, pMsg->msgType, version) < 0) {
vError("vgId:%d, failed to push msg to TQ since %s", TD_VID(pVnode), tstrerror(terrno));
return -1;
@@ -281,7 +283,7 @@ int32_t vnodePreprocessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
vTrace("message in vnode query queue is processing");
- if ((pMsg->msgType == TDMT_SCH_QUERY) && !vnodeIsLeader(pVnode)) {
+ if ((pMsg->msgType == TDMT_SCH_QUERY) && !vnodeIsReadyForRead(pVnode)) {
vnodeRedirectRpcMsg(pVnode, pMsg);
return 0;
}
@@ -305,7 +307,7 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
vTrace("vgId:%d, msg:%p in fetch queue is processing", pVnode->config.vgId, pMsg);
if ((pMsg->msgType == TDMT_SCH_FETCH || pMsg->msgType == TDMT_VND_TABLE_META || pMsg->msgType == TDMT_VND_TABLE_CFG ||
pMsg->msgType == TDMT_VND_BATCH_META) &&
- !vnodeIsLeader(pVnode)) {
+ !vnodeIsReadyForRead(pVnode)) {
vnodeRedirectRpcMsg(pVnode, pMsg);
return 0;
}
diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c
index 9703ed27ae..65d4e9aaf1 100644
--- a/source/dnode/vnode/src/vnd/vnodeSync.c
+++ b/source/dnode/vnode/src/vnd/vnodeSync.c
@@ -764,6 +764,8 @@ void vnodeSyncStart(SVnode *pVnode) {
void vnodeSyncClose(SVnode *pVnode) { syncStop(pVnode->sync); }
+bool vnodeIsRoleLeader(SVnode *pVnode) { return syncGetMyRole(pVnode->sync) == TAOS_SYNC_STATE_LEADER; }
+
bool vnodeIsLeader(SVnode *pVnode) {
if (!syncIsReady(pVnode->sync)) {
vDebug("vgId:%d, vnode not ready, state:%s, restore:%d", pVnode->config.vgId, syncGetMyRoleStr(pVnode->sync),
@@ -779,3 +781,17 @@ bool vnodeIsLeader(SVnode *pVnode) {
return true;
}
+
+bool vnodeIsReadyForRead(SVnode *pVnode) {
+ if (syncIsReady(pVnode->sync)) {
+ return true;
+ }
+
+ if (syncIsReadyForRead(pVnode->sync)) {
+ return true;
+ }
+
+ vDebug("vgId:%d, vnode not ready for read, state:%s, last:%ld, cmt:%ld", pVnode->config.vgId,
+ syncGetMyRoleStr(pVnode->sync), syncGetLastIndex(pVnode->sync), syncGetCommitIndex(pVnode->sync));
+ return false;
+}
diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c
index 77c5d073a3..02089d9fec 100644
--- a/source/libs/executor/src/scanoperator.c
+++ b/source/libs/executor/src/scanoperator.c
@@ -13,11 +13,11 @@
* along with this program. If not, see .
*/
-#include "os.h"
#include "executorimpl.h"
#include "filter.h"
#include "function.h"
#include "functionMgt.h"
+#include "os.h"
#include "querynodes.h"
#include "systable.h"
#include "tname.h"
@@ -128,7 +128,7 @@ static bool overlapWithTimeWindow(SInterval* pInterval, SDataBlockInfo* pBlockIn
w = getAlignQueryTimeWindow(pInterval, pInterval->precision, pBlockInfo->window.skey);
assert(w.ekey >= pBlockInfo->window.skey);
- if (w.ekey < pBlockInfo->window.ekey) {
+ if (TMAX(w.skey, pBlockInfo->window.skey) <= TMIN(w.ekey, pBlockInfo->window.ekey)) {
return true;
}
@@ -178,8 +178,8 @@ static SResultRow* getTableGroupOutputBuf(SOperatorInfo* pOperator, uint64_t gro
STableScanInfo* pTableScanInfo = pOperator->info;
- SResultRowPosition* p1 =
- (SResultRowPosition*)taosHashGet(pTableScanInfo->pdInfo.pAggSup->pResultRowHashTable, buf, GET_RES_WINDOW_KEY_LEN(sizeof(groupId)));
+ SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(pTableScanInfo->pdInfo.pAggSup->pResultRowHashTable, buf,
+ GET_RES_WINDOW_KEY_LEN(sizeof(groupId)));
if (p1 == NULL) {
return NULL;
@@ -238,7 +238,7 @@ static FORCE_INLINE bool doFilterByBlockSMA(const SNode* pFilterNode, SColumnDat
// todo move to the initialization function
int32_t code = filterInitFromNode((SNode*)pFilterNode, &filter, 0);
- bool keep = filterRangeExecute(filter, pColsAgg, numOfCols, numOfRows);
+ bool keep = filterRangeExecute(filter, pColsAgg, numOfCols, numOfRows);
filterFreeInfo(filter);
return keep;
@@ -312,9 +312,9 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca
return TSDB_CODE_SUCCESS;
} else if (*status == FUNC_DATA_REQUIRED_STATIS_LOAD) {
pCost->loadBlockStatis += 1;
- loadSMA = true; // mark the operation of load sma;
+ loadSMA = true; // mark the operation of load sma;
bool success = doLoadBlockSMA(pTableScanInfo, pBlock, pTaskInfo);
- if (success) { // failed to load the block sma data, data block statistics does not exist, load data block instead
+ if (success) { // failed to load the block sma data, data block statistics does not exist, load data block instead
qDebug("%s data block SMA loaded, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
return TSDB_CODE_SUCCESS;
@@ -454,7 +454,7 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, SExprInfo* pPseudoExpr, int
colDataAppendNNULL(pColInfoData, 0, pBlock->info.rows);
} else if (pColInfoData->info.type != TSDB_DATA_TYPE_JSON) {
colDataAppendNItems(pColInfoData, 0, data, pBlock->info.rows);
- } else { // todo opt for json tag
+ } else { // todo opt for json tag
for (int32_t i = 0; i < pBlock->info.rows; ++i) {
colDataAppend(pColInfoData, i, data, false);
}
@@ -571,7 +571,10 @@ static SSDataBlock* doTableScanGroup(SOperatorInfo* pOperator) {
if (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) {
setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED);
pTableScanInfo->scanFlag = REPEAT_SCAN;
- qDebug("%s start to repeat ascending order scan data SELECT last_row(*),hostname from cpu group by hostname;blocks due to query func required", GET_TASKID(pTaskInfo));
+ qDebug(
+ "%s start to repeat ascending order scan data SELECT last_row(*),hostname from cpu group by hostname;blocks "
+ "due to query func required",
+ GET_TASKID(pTaskInfo));
// do prepare for the next round table scan operation
tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond);
@@ -1175,16 +1178,18 @@ static void checkUpdateData(SStreamScanInfo* pInfo, bool invertible, SSDataBlock
for (int32_t rowId = 0; rowId < pBlock->info.rows; rowId++) {
SResultRowInfo dumyInfo;
dumyInfo.cur.pageId = -1;
- bool isClosed = false;
+ bool isClosed = false;
STimeWindow win = {.skey = INT64_MIN, .ekey = INT64_MAX};
if (isOverdue(tsCol[rowId], &pInfo->twAggSup)) {
win = getActiveTimeWindow(NULL, &dumyInfo, tsCol[rowId], &pInfo->interval, TSDB_ORDER_ASC);
isClosed = isCloseWindow(&win, &pInfo->twAggSup);
}
+ bool inserted = updateInfoIsTableInserted(pInfo->pUpdateInfo, pBlock->info.uid);
// must check update info first.
bool update = updateInfoIsUpdated(pInfo->pUpdateInfo, pBlock->info.uid, tsCol[rowId]);
- if ((update || (isSignleIntervalWindow(pInfo) && isClosed &&
- isDeletedWindow(&win, pBlock->info.groupId, pInfo->sessionSup.pIntervalAggSup))) && out) {
+ bool closedWin = isClosed && inserted && isSignleIntervalWindow(pInfo) &&
+ isDeletedWindow(&win, pBlock->info.groupId, pInfo->sessionSup.pIntervalAggSup);
+ if ((update || closedWin) && out) {
appendOneRow(pInfo->pUpdateDataRes, tsCol + rowId, tsCol + rowId, &pBlock->info.uid);
}
}
@@ -1391,8 +1396,8 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
SSDataBlock* pSDB = doRangeScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex);
if (pSDB) {
STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
- uint64_t version = getReaderMaxVersion(pTableScanInfo->dataReader);
- updateInfoSetScanRange(pInfo->pUpdateInfo, &pTableScanInfo->cond.twindows, pInfo->groupId,version);
+ uint64_t version = getReaderMaxVersion(pTableScanInfo->dataReader);
+ updateInfoSetScanRange(pInfo->pUpdateInfo, &pTableScanInfo->cond.twindows, pInfo->groupId, version);
pSDB->info.type = pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER_RANGE ? STREAM_NORMAL : STREAM_PULL_DATA;
checkUpdateData(pInfo, true, pSDB, false);
return pSDB;
@@ -1446,7 +1451,8 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
setBlockIntoRes(pInfo, &block);
- if (updateInfoIgnore(pInfo->pUpdateInfo, &pInfo->pRes->info.window, pInfo->pRes->info.groupId, pInfo->pRes->info.version)) {
+ if (updateInfoIgnore(pInfo->pUpdateInfo, &pInfo->pRes->info.window, pInfo->pRes->info.groupId,
+ pInfo->pRes->info.version)) {
printDataBlock(pInfo->pRes, "stream scan ignore");
blockDataCleanup(pInfo->pRes);
continue;
@@ -2249,7 +2255,7 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
// build message and send to mnode to fetch the content of system tables.
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SSysTableScanInfo* pInfo = pOperator->info;
- char dbName[TSDB_DB_NAME_LEN] = {0};
+ char dbName[TSDB_DB_NAME_LEN] = {0};
const char* name = tNameGetTableName(&pInfo->name);
if (pInfo->showRewrite) {
@@ -2261,8 +2267,8 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
return sysTableScanUserTables(pOperator);
} else if (strncasecmp(name, TSDB_INS_TABLE_TAGS, TSDB_TABLE_FNAME_LEN) == 0) {
return sysTableScanUserTags(pOperator);
- } else if (strncasecmp(name, TSDB_INS_TABLE_STABLES, TSDB_TABLE_FNAME_LEN) == 0 &&
- pInfo->showRewrite && IS_SYS_DBNAME(dbName)) {
+ } else if (strncasecmp(name, TSDB_INS_TABLE_STABLES, TSDB_TABLE_FNAME_LEN) == 0 && pInfo->showRewrite &&
+ IS_SYS_DBNAME(dbName)) {
return sysTableScanUserSTables(pOperator);
} else { // load the meta from mnode of the given epset
if (pOperator->status == OP_EXEC_DONE) {
@@ -2542,7 +2548,7 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) {
pInfo->pRes = blockDataDestroy(pInfo->pRes);
taosArrayDestroy(pInfo->pColMatchInfo);
-
+
taosMemoryFreeClear(param);
}
@@ -2598,7 +2604,6 @@ _error:
int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags, bool groupSort, SReadHandle* pHandle,
STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond,
const char* idStr) {
-
int64_t st = taosGetTimestampUs();
int32_t code = getTableList(pHandle->meta, pHandle->vnode, pScanNode, pTagCond, pTagIndexCond, pTableListInfo);
@@ -2607,7 +2612,7 @@ int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags
}
int64_t st1 = taosGetTimestampUs();
- qDebug("generate queried table list completed, elapsed time:%.2f ms %s", (st1-st)/1000.0, idStr);
+ qDebug("generate queried table list completed, elapsed time:%.2f ms %s", (st1 - st) / 1000.0, idStr);
if (taosArrayGetSize(pTableListInfo->pTableList) == 0) {
qDebug("no table qualified for query, %s" PRIx64, idStr);
@@ -2621,7 +2626,7 @@ int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags
}
int64_t st2 = taosGetTimestampUs();
- qDebug("generate group id map completed, elapsed time:%.2f ms %s", (st2-st1)/1000.0, idStr);
+ qDebug("generate group id map completed, elapsed time:%.2f ms %s", (st2 - st1) / 1000.0, idStr);
return TSDB_CODE_SUCCESS;
}
diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c
index de72c32fa1..cbf81f1d0d 100644
--- a/source/libs/function/src/builtinsimpl.c
+++ b/source/libs/function/src/builtinsimpl.c
@@ -4918,6 +4918,16 @@ int32_t mavgFunction(SqlFunctionCtx* pCtx) {
return numOfElems;
}
+static SSampleInfo* getSampleOutputInfo(SqlFunctionCtx* pCtx) {
+ SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
+ SSampleInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
+
+ pInfo->data = (char*)pInfo + sizeof(SSampleInfo);
+ pInfo->tuplePos = (STuplePos*)((char*)pInfo + sizeof(SSampleInfo) + pInfo->samples * pInfo->colBytes);
+
+ return pInfo;
+}
+
bool getSampleFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
SColumnNode* pCol = (SColumnNode*)nodesListGetNode(pFunc->pParameterList, 0);
SValueNode* pVal = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1);
@@ -4972,7 +4982,7 @@ static void doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* da
int32_t sampleFunction(SqlFunctionCtx* pCtx) {
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
- SSampleInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
+ SSampleInfo* pInfo = getSampleOutputInfo(pCtx);
SInputColumnInfoData* pInput = &pCtx->input;
@@ -4998,7 +5008,7 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) {
int32_t sampleFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx);
- SSampleInfo* pInfo = GET_ROWCELL_INTERBUF(pEntryInfo);
+ SSampleInfo* pInfo = getSampleOutputInfo(pCtx);
pEntryInfo->complete = true;
int32_t slotId = pCtx->pExpr->base.resSchema.slotId;
diff --git a/source/libs/parser/inc/parUtil.h b/source/libs/parser/inc/parUtil.h
index 2249bc7823..308afd467f 100644
--- a/source/libs/parser/inc/parUtil.h
+++ b/source/libs/parser/inc/parUtil.h
@@ -22,6 +22,7 @@ extern "C" {
#include "catalog.h"
#include "os.h"
+#include "parser.h"
#include "query.h"
#define parserFatal(param, ...) qFatal("PARSER: " param, ##__VA_ARGS__)
@@ -44,18 +45,37 @@ typedef struct SParseTablesMetaReq {
SHashObj* pTables;
} SParseTablesMetaReq;
+typedef enum ECatalogReqType {
+ CATALOG_REQ_TYPE_META = 1,
+ CATALOG_REQ_TYPE_VGROUP,
+ CATALOG_REQ_TYPE_BOTH
+} ECatalogReqType;
+
+typedef struct SInsertTablesMetaReq {
+ char dbFName[TSDB_DB_FNAME_LEN];
+ SArray* pTableMetaPos;
+ SArray* pTableMetaReq; // element is SName
+ SArray* pTableVgroupPos;
+ SArray* pTableVgroupReq; // element is SName
+} SInsertTablesMetaReq;
+
typedef struct SParseMetaCache {
- SHashObj* pTableMeta; // key is tbFName, element is STableMeta*
- SHashObj* pDbVgroup; // key is dbFName, element is SArray*
- SHashObj* pTableVgroup; // key is tbFName, element is SVgroupInfo*
- SHashObj* pDbCfg; // key is tbFName, element is SDbCfgInfo*
- SHashObj* pDbInfo; // key is tbFName, element is SDbInfo*
- SHashObj* pUserAuth; // key is SUserAuthInfo serialized string, element is bool indicating whether or not to pass
- SHashObj* pUdf; // key is funcName, element is SFuncInfo*
- SHashObj* pTableIndex; // key is tbFName, element is SArray*
- SHashObj* pTableCfg; // key is tbFName, element is STableCfg*
- SArray* pDnodes; // element is SEpSet
- bool dnodeRequired;
+ SHashObj* pTableMeta; // key is tbFName, element is STableMeta*
+ SHashObj* pDbVgroup; // key is dbFName, element is SArray*
+ SHashObj* pTableVgroup; // key is tbFName, element is SVgroupInfo*
+ SHashObj* pDbCfg; // key is tbFName, element is SDbCfgInfo*
+ SHashObj* pDbInfo; // key is tbFName, element is SDbInfo*
+ SHashObj* pUserAuth; // key is SUserAuthInfo serialized string, element is bool indicating whether or not to pass
+ SHashObj* pUdf; // key is funcName, element is SFuncInfo*
+ SHashObj* pTableIndex; // key is tbFName, element is SArray*
+ SHashObj* pTableCfg; // key is tbFName, element is STableCfg*
+ SArray* pDnodes; // element is SEpSet
+ bool dnodeRequired;
+ SHashObj* pInsertTables; // key is dbName, element is SInsertTablesMetaReq*, for insert
+ const char* pUser;
+ const SArray* pTableMetaData; // pRes = STableMeta*
+ const SArray* pTableVgroupData; // pRes = SVgroupInfo*
+ int32_t sqlTableNum;
} SParseMetaCache;
int32_t generateSyntaxErrMsg(SMsgBuf* pBuf, int32_t errCode, ...);
@@ -72,8 +92,9 @@ STableMeta* tableMetaDup(const STableMeta* pTableMeta);
int32_t trimString(const char* src, int32_t len, char* dst, int32_t dlen);
-int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq);
-int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache);
+int32_t buildCatalogReq(SParseContext* pCxt, const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq);
+int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache,
+ bool insertValuesStmt);
int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache);
int32_t reserveTableMetaInCacheExt(const SName* pName, SParseMetaCache* pMetaCache);
int32_t reserveDbVgInfoInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache);
@@ -100,6 +121,12 @@ int32_t getUdfInfoFromCache(SParseMetaCache* pMetaCache, const char* pFunc, SFun
int32_t getTableIndexFromCache(SParseMetaCache* pMetaCache, const SName* pName, SArray** pIndexes);
int32_t getTableCfgFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableCfg** pOutput);
int32_t getDnodeListFromCache(SParseMetaCache* pMetaCache, SArray** pDnodes);
+int32_t reserveTableMetaInCacheForInsert(const SName* pName, ECatalogReqType reqType, int32_t tableNo,
+ SParseMetaCache* pMetaCache);
+int32_t getTableMetaFromCacheForInsert(SArray* pTableMetaPos, SParseMetaCache* pMetaCache, int32_t tableNo,
+ STableMeta** pMeta);
+int32_t getTableVgroupFromCacheForInsert(SArray* pTableVgroupPos, SParseMetaCache* pMetaCache, int32_t tableNo,
+ SVgroupInfo* pVgroup);
void destoryParseMetaCache(SParseMetaCache* pMetaCache, bool request);
#ifdef __cplusplus
diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c
index b7532173c8..31ae35e717 100644
--- a/source/libs/parser/src/parInsert.c
+++ b/source/libs/parser/src/parInsert.c
@@ -73,6 +73,9 @@ typedef struct SInsertParseContext {
SStmtCallback* pStmtCb;
SParseMetaCache* pMetaCache;
char sTableName[TSDB_TABLE_NAME_LEN];
+ char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW];
+ int64_t memElapsed;
+ int64_t parRowElapsed;
} SInsertParseContext;
typedef struct SInsertParseSyntaxCxt {
@@ -203,10 +206,11 @@ static int32_t checkAuth(SInsertParseContext* pCxt, char* pDbFname, bool* pPass)
return catalogChkAuth(pBasicCtx->pCatalog, &conn, pBasicCtx->pUser, pDbFname, AUTH_TYPE_WRITE, pPass);
}
-static int32_t getTableSchema(SInsertParseContext* pCxt, SName* pTbName, bool isStb, STableMeta** pTableMeta) {
+static int32_t getTableSchema(SInsertParseContext* pCxt, int32_t tbNo, SName* pTbName, bool isStb,
+ STableMeta** pTableMeta) {
SParseContext* pBasicCtx = pCxt->pComCxt;
if (pBasicCtx->async) {
- return getTableMetaFromCache(pCxt->pMetaCache, pTbName, pTableMeta);
+ return getTableMetaFromCacheForInsert(pBasicCtx->pTableMetaPos, pCxt->pMetaCache, tbNo, pTableMeta);
}
SRequestConnInfo conn = {.pTrans = pBasicCtx->pTransporter,
.requestId = pBasicCtx->requestId,
@@ -219,10 +223,10 @@ static int32_t getTableSchema(SInsertParseContext* pCxt, SName* pTbName, bool is
return catalogGetTableMeta(pBasicCtx->pCatalog, &conn, pTbName, pTableMeta);
}
-static int32_t getTableVgroup(SInsertParseContext* pCxt, SName* pTbName, SVgroupInfo* pVg) {
+static int32_t getTableVgroup(SInsertParseContext* pCxt, int32_t tbNo, SName* pTbName, SVgroupInfo* pVg) {
SParseContext* pBasicCtx = pCxt->pComCxt;
if (pBasicCtx->async) {
- return getTableVgroupFromCache(pCxt->pMetaCache, pTbName, pVg);
+ return getTableVgroupFromCacheForInsert(pBasicCtx->pTableVgroupPos, pCxt->pMetaCache, tbNo, pVg);
}
SRequestConnInfo conn = {.pTrans = pBasicCtx->pTransporter,
.requestId = pBasicCtx->requestId,
@@ -231,28 +235,22 @@ static int32_t getTableVgroup(SInsertParseContext* pCxt, SName* pTbName, SVgroup
return catalogGetTableHashVgroup(pBasicCtx->pCatalog, &conn, pTbName, pVg);
}
-static int32_t getTableMetaImpl(SInsertParseContext* pCxt, SName* name, char* dbFname, bool isStb) {
- bool pass = false;
- CHECK_CODE(checkAuth(pCxt, dbFname, &pass));
- if (!pass) {
- return TSDB_CODE_PAR_PERMISSION_DENIED;
- }
-
- CHECK_CODE(getTableSchema(pCxt, name, isStb, &pCxt->pTableMeta));
+static int32_t getTableMetaImpl(SInsertParseContext* pCxt, int32_t tbNo, SName* name, char* dbFname, bool isStb) {
+ CHECK_CODE(getTableSchema(pCxt, tbNo, name, isStb, &pCxt->pTableMeta));
if (!isStb) {
SVgroupInfo vg;
- CHECK_CODE(getTableVgroup(pCxt, name, &vg));
+ CHECK_CODE(getTableVgroup(pCxt, tbNo, name, &vg));
CHECK_CODE(taosHashPut(pCxt->pVgroupsHashObj, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg)));
}
return TSDB_CODE_SUCCESS;
}
-static int32_t getTableMeta(SInsertParseContext* pCxt, SName* name, char* dbFname) {
- return getTableMetaImpl(pCxt, name, dbFname, false);
+static int32_t getTableMeta(SInsertParseContext* pCxt, int32_t tbNo, SName* name, char* dbFname) {
+ return getTableMetaImpl(pCxt, tbNo, name, dbFname, false);
}
-static int32_t getSTableMeta(SInsertParseContext* pCxt, SName* name, char* dbFname) {
- return getTableMetaImpl(pCxt, name, dbFname, true);
+static int32_t getSTableMeta(SInsertParseContext* pCxt, int32_t tbNo, SName* name, char* dbFname) {
+ return getTableMetaImpl(pCxt, tbNo, name, dbFname, true);
}
static int32_t getDBCfg(SInsertParseContext* pCxt, const char* pDbFName, SDbCfgInfo* pInfo) {
@@ -1028,13 +1026,13 @@ end:
return code;
}
-static int32_t storeTableMeta(SInsertParseContext* pCxt, SHashObj* pHash, SName* pTableName, const char* pName,
- int32_t len, STableMeta* pMeta) {
+static int32_t storeTableMeta(SInsertParseContext* pCxt, SHashObj* pHash, int32_t tbNo, SName* pTableName,
+ const char* pName, int32_t len, STableMeta* pMeta) {
SVgroupInfo vg;
- CHECK_CODE(getTableVgroup(pCxt, pTableName, &vg));
+ CHECK_CODE(getTableVgroup(pCxt, tbNo, pTableName, &vg));
CHECK_CODE(taosHashPut(pCxt->pVgroupsHashObj, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg)));
- pMeta->uid = 0;
+ pMeta->uid = tbNo;
pMeta->vgId = vg.vgId;
pMeta->tableType = TSDB_CHILD_TABLE;
@@ -1084,7 +1082,7 @@ static int32_t ignoreAutoCreateTableClause(SInsertParseContext* pCxt) {
}
// pSql -> stb_name [(tag1_name, ...)] TAGS (tag1_value, ...)
-static int32_t parseUsingClause(SInsertParseContext* pCxt, SName* name, char* tbFName) {
+static int32_t parseUsingClause(SInsertParseContext* pCxt, int32_t tbNo, SName* name, char* tbFName) {
int32_t len = strlen(tbFName);
STableMeta** pMeta = taosHashGet(pCxt->pSubTableHashObj, tbFName, len);
if (NULL != pMeta) {
@@ -1102,11 +1100,11 @@ static int32_t parseUsingClause(SInsertParseContext* pCxt, SName* name, char* tb
tNameGetFullDbName(&sname, dbFName);
strcpy(pCxt->sTableName, sname.tname);
- CHECK_CODE(getSTableMeta(pCxt, &sname, dbFName));
+ CHECK_CODE(getSTableMeta(pCxt, tbNo, &sname, dbFName));
if (TSDB_SUPER_TABLE != pCxt->pTableMeta->tableType) {
return buildInvalidOperationMsg(&pCxt->msg, "create table only from super table is allowed");
}
- CHECK_CODE(storeTableMeta(pCxt, pCxt->pSubTableHashObj, name, tbFName, len, pCxt->pTableMeta));
+ CHECK_CODE(storeTableMeta(pCxt, pCxt->pSubTableHashObj, tbNo, name, tbFName, len, pCxt->pTableMeta));
SSchema* pTagsSchema = getTableTagSchema(pCxt->pTableMeta);
setBoundColumnInfo(&pCxt->tags, pTagsSchema, getNumOfTags(pCxt->pTableMeta));
@@ -1195,7 +1193,7 @@ static int parseOneRow(SInsertParseContext* pCxt, STableDataBlocks* pDataBlocks,
tdSRowEnd(pBuilder);
*gotRow = true;
-
+
#ifdef TD_DEBUG_PRINT_ROW
STSchema* pSTSchema = tdGetSTSChemaFromSSChema(schema, spd->numOfCols, 1);
tdSRowPrint(row, pSTSchema, __func__);
@@ -1214,7 +1212,7 @@ static int32_t parseValues(SInsertParseContext* pCxt, STableDataBlocks* pDataBlo
CHECK_CODE(initRowBuilder(&pDataBlock->rowBuilder, pDataBlock->pTableMeta->sversion, &pDataBlock->boundColumnInfo));
(*numOfRows) = 0;
- char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW] = {0}; // used for deleting Escape character: \\, \', \"
+ // char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW] = {0}; // used for deleting Escape character: \\, \', \"
SToken sToken;
while (1) {
int32_t index = 0;
@@ -1232,7 +1230,7 @@ static int32_t parseValues(SInsertParseContext* pCxt, STableDataBlocks* pDataBlo
}
bool gotRow = false;
- CHECK_CODE(parseOneRow(pCxt, pDataBlock, tinfo.precision, &gotRow, tmpTokenBuf));
+ CHECK_CODE(parseOneRow(pCxt, pDataBlock, tinfo.precision, &gotRow, pCxt->tmpTokenBuf));
if (gotRow) {
pDataBlock->size += extendedRowSize; // len;
}
@@ -1347,7 +1345,9 @@ static int32_t parseDataFromFile(SInsertParseContext* pCxt, SToken filePath, STa
}
static void destroyInsertParseContextForTable(SInsertParseContext* pCxt) {
- taosMemoryFreeClear(pCxt->pTableMeta);
+ if (!pCxt->pComCxt->async) {
+ taosMemoryFreeClear(pCxt->pTableMeta);
+ }
destroyBoundColumnInfo(&pCxt->tags);
tdDestroySVCreateTbReq(&pCxt->createTblReq);
}
@@ -1365,6 +1365,20 @@ static void destroyInsertParseContext(SInsertParseContext* pCxt) {
destroyBlockArrayList(pCxt->pVgDataBlocks);
}
+static int32_t parseTableName(SInsertParseContext* pCxt, SToken* pTbnameToken, SName* pName, char* pDbFName,
+ char* pTbFName) {
+ int32_t code = createSName(pName, pTbnameToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg);
+ if (TSDB_CODE_SUCCESS == code) {
+ tNameExtractFullName(pName, pTbFName);
+ code = taosHashPut(pCxt->pTableNameHashObj, pTbFName, strlen(pTbFName), pName, sizeof(SName));
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ tNameGetFullDbName(pName, pDbFName);
+ code = taosHashPut(pCxt->pDbFNameHashObj, pDbFName, strlen(pDbFName), pDbFName, TSDB_DB_FNAME_LEN);
+ }
+ return code;
+}
+
// tb_name
// [USING stb_name [(tag1_name, ...)] TAGS (tag1_value, ...)]
// [(field1_name, ...)]
@@ -1372,7 +1386,9 @@ static void destroyInsertParseContext(SInsertParseContext* pCxt) {
// [...];
static int32_t parseInsertBody(SInsertParseContext* pCxt) {
int32_t tbNum = 0;
+ SName name;
char tbFName[TSDB_TABLE_FNAME_LEN];
+ char dbFName[TSDB_DB_FNAME_LEN];
bool autoCreateTbl = false;
// for each table
@@ -1415,20 +1431,15 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
SToken tbnameToken = sToken;
NEXT_TOKEN(pCxt->pSql, sToken);
- SName name;
- CHECK_CODE(createSName(&name, &tbnameToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg));
-
- tNameExtractFullName(&name, tbFName);
- CHECK_CODE(taosHashPut(pCxt->pTableNameHashObj, tbFName, strlen(tbFName), &name, sizeof(SName)));
- char dbFName[TSDB_DB_FNAME_LEN];
- tNameGetFullDbName(&name, dbFName);
- CHECK_CODE(taosHashPut(pCxt->pDbFNameHashObj, dbFName, strlen(dbFName), dbFName, sizeof(dbFName)));
+ if (!pCxt->pComCxt->async || TK_USING == sToken.type) {
+ CHECK_CODE(parseTableName(pCxt, &tbnameToken, &name, dbFName, tbFName));
+ }
bool existedUsing = false;
// USING clause
if (TK_USING == sToken.type) {
existedUsing = true;
- CHECK_CODE(parseUsingClause(pCxt, &name, tbFName));
+ CHECK_CODE(parseUsingClause(pCxt, tbNum, &name, tbFName));
NEXT_TOKEN(pCxt->pSql, sToken);
autoCreateTbl = true;
}
@@ -1438,22 +1449,31 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
// pSql -> field1_name, ...)
pBoundColsStart = pCxt->pSql;
CHECK_CODE(ignoreBoundColumns(pCxt));
- // CHECK_CODE(parseBoundColumns(pCxt, &dataBuf->boundColumnInfo, getTableColumnSchema(pCxt->pTableMeta)));
NEXT_TOKEN(pCxt->pSql, sToken);
}
if (TK_USING == sToken.type) {
- CHECK_CODE(parseUsingClause(pCxt, &name, tbFName));
+ if (pCxt->pComCxt->async) {
+ CHECK_CODE(parseTableName(pCxt, &tbnameToken, &name, dbFName, tbFName));
+ }
+ CHECK_CODE(parseUsingClause(pCxt, tbNum, &name, tbFName));
NEXT_TOKEN(pCxt->pSql, sToken);
autoCreateTbl = true;
} else if (!existedUsing) {
- CHECK_CODE(getTableMeta(pCxt, &name, dbFName));
+ CHECK_CODE(getTableMeta(pCxt, tbNum, &name, dbFName));
}
STableDataBlocks* dataBuf = NULL;
- CHECK_CODE(getDataBlockFromList(pCxt->pTableBlockHashObj, tbFName, strlen(tbFName), TSDB_DEFAULT_PAYLOAD_SIZE,
- sizeof(SSubmitBlk), getTableInfo(pCxt->pTableMeta).rowSize, pCxt->pTableMeta,
- &dataBuf, NULL, &pCxt->createTblReq));
+ if (pCxt->pComCxt->async) {
+ CHECK_CODE(getDataBlockFromList(pCxt->pTableBlockHashObj, &pCxt->pTableMeta->uid, sizeof(pCxt->pTableMeta->uid),
+ TSDB_DEFAULT_PAYLOAD_SIZE, sizeof(SSubmitBlk),
+ getTableInfo(pCxt->pTableMeta).rowSize, pCxt->pTableMeta, &dataBuf, NULL,
+ &pCxt->createTblReq));
+ } else {
+ CHECK_CODE(getDataBlockFromList(pCxt->pTableBlockHashObj, tbFName, strlen(tbFName), TSDB_DEFAULT_PAYLOAD_SIZE,
+ sizeof(SSubmitBlk), getTableInfo(pCxt->pTableMeta).rowSize, pCxt->pTableMeta,
+ &dataBuf, NULL, &pCxt->createTblReq));
+ }
if (NULL != pBoundColsStart) {
char* pCurrPos = pCxt->pSql;
@@ -1532,7 +1552,9 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery, SParseMetaCache
.totalNum = 0,
.pOutput = (SVnodeModifOpStmt*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT),
.pStmtCb = pContext->pStmtCb,
- .pMetaCache = pMetaCache};
+ .pMetaCache = pMetaCache,
+ .memElapsed = 0,
+ .parRowElapsed = 0};
if (pContext->pStmtCb && *pQuery) {
(*pContext->pStmtCb->getExecInfoFn)(pContext->pStmtCb->pStmt, &context.pVgroupsHashObj,
@@ -1547,7 +1569,7 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery, SParseMetaCache
} else {
context.pVgroupsHashObj = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
context.pTableBlockHashObj =
- taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
}
if (NULL == context.pVgroupsHashObj || NULL == context.pTableBlockHashObj || NULL == context.pSubTableHashObj ||
@@ -1656,24 +1678,24 @@ static int32_t skipUsingClause(SInsertParseSyntaxCxt* pCxt) {
return TSDB_CODE_SUCCESS;
}
-static int32_t collectTableMetaKey(SInsertParseSyntaxCxt* pCxt, SToken* pTbToken) {
+static int32_t collectTableMetaKey(SInsertParseSyntaxCxt* pCxt, bool isStable, int32_t tableNo, SToken* pTbToken) {
SName name;
CHECK_CODE(createSName(&name, pTbToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg));
- CHECK_CODE(reserveUserAuthInCacheExt(pCxt->pComCxt->pUser, &name, AUTH_TYPE_WRITE, pCxt->pMetaCache));
- CHECK_CODE(reserveTableMetaInCacheExt(&name, pCxt->pMetaCache));
- CHECK_CODE(reserveTableVgroupInCacheExt(&name, pCxt->pMetaCache));
+ CHECK_CODE(reserveTableMetaInCacheForInsert(&name, isStable ? CATALOG_REQ_TYPE_META : CATALOG_REQ_TYPE_BOTH, tableNo,
+ pCxt->pMetaCache));
return TSDB_CODE_SUCCESS;
}
-static int32_t collectAutoCreateTableMetaKey(SInsertParseSyntaxCxt* pCxt, SToken* pTbToken) {
+static int32_t collectAutoCreateTableMetaKey(SInsertParseSyntaxCxt* pCxt, int32_t tableNo, SToken* pTbToken) {
SName name;
CHECK_CODE(createSName(&name, pTbToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg));
- CHECK_CODE(reserveTableVgroupInCacheExt(&name, pCxt->pMetaCache));
+ CHECK_CODE(reserveTableMetaInCacheForInsert(&name, CATALOG_REQ_TYPE_VGROUP, tableNo, pCxt->pMetaCache));
return TSDB_CODE_SUCCESS;
}
static int32_t parseInsertBodySyntax(SInsertParseSyntaxCxt* pCxt) {
- bool hasData = false;
+ bool hasData = false;
+ int32_t tableNo = 0;
// for each table
while (1) {
SToken sToken;
@@ -1702,9 +1724,9 @@ static int32_t parseInsertBodySyntax(SInsertParseSyntaxCxt* pCxt) {
// USING clause
if (TK_USING == sToken.type) {
existedUsing = true;
- CHECK_CODE(collectAutoCreateTableMetaKey(pCxt, &tbnameToken));
+ CHECK_CODE(collectAutoCreateTableMetaKey(pCxt, tableNo, &tbnameToken));
NEXT_TOKEN(pCxt->pSql, sToken);
- CHECK_CODE(collectTableMetaKey(pCxt, &sToken));
+ CHECK_CODE(collectTableMetaKey(pCxt, true, tableNo, &sToken));
CHECK_CODE(skipUsingClause(pCxt));
NEXT_TOKEN(pCxt->pSql, sToken);
}
@@ -1717,15 +1739,17 @@ static int32_t parseInsertBodySyntax(SInsertParseSyntaxCxt* pCxt) {
if (TK_USING == sToken.type && !existedUsing) {
existedUsing = true;
- CHECK_CODE(collectAutoCreateTableMetaKey(pCxt, &tbnameToken));
+ CHECK_CODE(collectAutoCreateTableMetaKey(pCxt, tableNo, &tbnameToken));
NEXT_TOKEN(pCxt->pSql, sToken);
- CHECK_CODE(collectTableMetaKey(pCxt, &sToken));
+ CHECK_CODE(collectTableMetaKey(pCxt, true, tableNo, &sToken));
CHECK_CODE(skipUsingClause(pCxt));
NEXT_TOKEN(pCxt->pSql, sToken);
- } else {
- CHECK_CODE(collectTableMetaKey(pCxt, &tbnameToken));
+ } else if (!existedUsing) {
+ CHECK_CODE(collectTableMetaKey(pCxt, false, tableNo, &tbnameToken));
}
+ ++tableNo;
+
if (TK_VALUES == sToken.type) {
// pSql -> (field1_value, ...) [(field1_value2, ...) ...]
CHECK_CODE(skipValuesClause(pCxt));
diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c
index ef985a3894..1c7446ad6f 100644
--- a/source/libs/parser/src/parTranslater.c
+++ b/source/libs/parser/src/parTranslater.c
@@ -1399,7 +1399,7 @@ static int32_t translateTimelineFunc(STranslateContext* pCxt, SFunctionNode* pFu
"%s function must be used in select statements", pFunc->functionName);
}
SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt;
- if (QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) &&
+ if (NULL != pSelect->pFromTable && QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) &&
!isTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC,
"%s function requires valid time series input", pFunc->functionName);
@@ -2037,16 +2037,13 @@ static int32_t setVnodeSysTableVgroupList(STranslateContext* pCxt, SName* pName,
code = getDBVgInfoImpl(pCxt, pName, &vgroupList);
}
- if (TSDB_CODE_SUCCESS == code &&
- 0 == strcmp(pRealTable->table.dbName, TSDB_INFORMATION_SCHEMA_DB) &&
- 0 == strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_TAGS) &&
- isSelectStmt(pCxt->pCurrStmt) &&
+ if (TSDB_CODE_SUCCESS == code && 0 == strcmp(pRealTable->table.dbName, TSDB_INFORMATION_SCHEMA_DB) &&
+ 0 == strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_TAGS) && isSelectStmt(pCxt->pCurrStmt) &&
0 == taosArrayGetSize(vgroupList)) {
((SSelectStmt*)pCxt->pCurrStmt)->isEmptyResult = true;
}
- if (TSDB_CODE_SUCCESS == code &&
- 0 == strcmp(pRealTable->table.dbName, TSDB_INFORMATION_SCHEMA_DB) &&
+ if (TSDB_CODE_SUCCESS == code && 0 == strcmp(pRealTable->table.dbName, TSDB_INFORMATION_SCHEMA_DB) &&
0 == strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_TABLES)) {
code = addMnodeToVgroupList(&pCxt->pParseCxt->mgmtEpSet, &vgroupList);
}
diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c
index ae5a281aab..17e78e7806 100644
--- a/source/libs/parser/src/parUtil.c
+++ b/source/libs/parser/src/parUtil.c
@@ -476,9 +476,11 @@ static int32_t buildDbReq(SHashObj* pDbsHash, SArray** pDbs) {
static int32_t buildTableReqFromDb(SHashObj* pDbsHash, SArray** pDbs) {
if (NULL != pDbsHash) {
- *pDbs = taosArrayInit(taosHashGetSize(pDbsHash), sizeof(STablesReq));
if (NULL == *pDbs) {
- return TSDB_CODE_OUT_OF_MEMORY;
+ *pDbs = taosArrayInit(taosHashGetSize(pDbsHash), sizeof(STablesReq));
+ if (NULL == *pDbs) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
}
SParseTablesMetaReq* p = taosHashIterate(pDbsHash, NULL);
while (NULL != p) {
@@ -530,7 +532,62 @@ static int32_t buildUdfReq(SHashObj* pUdfHash, SArray** pUdf) {
return TSDB_CODE_SUCCESS;
}
-int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) {
+static int32_t buildCatalogReqForInsert(SParseContext* pCxt, const SParseMetaCache* pMetaCache,
+ SCatalogReq* pCatalogReq) {
+ int32_t ndbs = taosHashGetSize(pMetaCache->pInsertTables);
+ pCatalogReq->pTableMeta = taosArrayInit(ndbs, sizeof(STablesReq));
+ if (NULL == pCatalogReq->pTableMeta) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ pCatalogReq->pTableHash = taosArrayInit(ndbs, sizeof(STablesReq));
+ if (NULL == pCatalogReq->pTableHash) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ pCatalogReq->pUser = taosArrayInit(ndbs, sizeof(SUserAuthInfo));
+ if (NULL == pCatalogReq->pUser) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ pCxt->pTableMetaPos = taosArrayInit(pMetaCache->sqlTableNum, sizeof(int32_t));
+ pCxt->pTableVgroupPos = taosArrayInit(pMetaCache->sqlTableNum, sizeof(int32_t));
+
+ int32_t metaReqNo = 0;
+ int32_t vgroupReqNo = 0;
+ SInsertTablesMetaReq* p = taosHashIterate(pMetaCache->pInsertTables, NULL);
+ while (NULL != p) {
+ STablesReq req = {0};
+ strcpy(req.dbFName, p->dbFName);
+ TSWAP(req.pTables, p->pTableMetaReq);
+ taosArrayPush(pCatalogReq->pTableMeta, &req);
+
+ req.pTables = NULL;
+ TSWAP(req.pTables, p->pTableVgroupReq);
+ taosArrayPush(pCatalogReq->pTableHash, &req);
+
+ int32_t ntables = taosArrayGetSize(p->pTableMetaPos);
+ for (int32_t i = 0; i < ntables; ++i) {
+ taosArrayInsert(pCxt->pTableMetaPos, *(int32_t*)taosArrayGet(p->pTableMetaPos, i), &metaReqNo);
+ ++metaReqNo;
+ }
+
+ ntables = taosArrayGetSize(p->pTableVgroupPos);
+ for (int32_t i = 0; i < ntables; ++i) {
+ taosArrayInsert(pCxt->pTableVgroupPos, *(int32_t*)taosArrayGet(p->pTableVgroupPos, i), &vgroupReqNo);
+ ++vgroupReqNo;
+ }
+
+ SUserAuthInfo auth = {0};
+ strcpy(auth.user, pCxt->pUser);
+ strcpy(auth.dbFName, p->dbFName);
+ auth.type = AUTH_TYPE_WRITE;
+ taosArrayPush(pCatalogReq->pUser, &auth);
+
+ p = taosHashIterate(pMetaCache->pInsertTables, p);
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t buildCatalogReqForQuery(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) {
int32_t code = buildTableReqFromDb(pMetaCache->pTableMeta, &pCatalogReq->pTableMeta);
if (TSDB_CODE_SUCCESS == code) {
code = buildDbReq(pMetaCache->pDbVgroup, &pCatalogReq->pDbVgroup);
@@ -560,6 +617,13 @@ int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalog
return code;
}
+int32_t buildCatalogReq(SParseContext* pCxt, const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) {
+ if (NULL != pMetaCache->pInsertTables) {
+ return buildCatalogReqForInsert(pCxt, pMetaCache, pCatalogReq);
+ }
+ return buildCatalogReqForQuery(pMetaCache, pCatalogReq);
+}
+
static int32_t putMetaDataToHash(const char* pKey, int32_t len, const SArray* pData, int32_t index, SHashObj** pHash) {
if (NULL == *pHash) {
*pHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
@@ -647,7 +711,8 @@ static int32_t putUdfToCache(const SArray* pUdfReq, const SArray* pUdfData, SHas
return TSDB_CODE_SUCCESS;
}
-int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache) {
+int32_t putMetaDataToCacheForQuery(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData,
+ SParseMetaCache* pMetaCache) {
int32_t code = putDbTableDataToCache(pCatalogReq->pTableMeta, pMetaData->pTableMeta, &pMetaCache->pTableMeta);
if (TSDB_CODE_SUCCESS == code) {
code = putDbDataToCache(pCatalogReq->pDbVgroup, pMetaData->pDbVgroup, &pMetaCache->pDbVgroup);
@@ -677,6 +742,30 @@ int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMet
return code;
}
+int32_t putMetaDataToCacheForInsert(const SMetaData* pMetaData, SParseMetaCache* pMetaCache) {
+ int32_t ndbs = taosArrayGetSize(pMetaData->pUser);
+ for (int32_t i = 0; i < ndbs; ++i) {
+ SMetaRes* pRes = taosArrayGet(pMetaData->pUser, i);
+ if (TSDB_CODE_SUCCESS != pRes->code) {
+ return pRes->code;
+ }
+ if (!(*(bool*)pRes->pRes)) {
+ return TSDB_CODE_PAR_PERMISSION_DENIED;
+ }
+ }
+ pMetaCache->pTableMetaData = pMetaData->pTableMeta;
+ pMetaCache->pTableVgroupData = pMetaData->pTableHash;
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache,
+ bool insertValuesStmt) {
+ if (insertValuesStmt) {
+ return putMetaDataToCacheForInsert(pMetaData, pMetaCache);
+ }
+ return putMetaDataToCacheForQuery(pCatalogReq, pMetaData, pMetaCache);
+}
+
static int32_t reserveTableReqInCacheImpl(const char* pTbFName, int32_t len, SHashObj** pTables) {
if (NULL == *pTables) {
*pTables = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
@@ -977,6 +1066,82 @@ int32_t getDnodeListFromCache(SParseMetaCache* pMetaCache, SArray** pDnodes) {
return TSDB_CODE_SUCCESS;
}
+static int32_t reserveTableReqInCacheForInsert(const SName* pName, ECatalogReqType reqType, int32_t tableNo,
+ SInsertTablesMetaReq* pReq) {
+ switch (reqType) {
+ case CATALOG_REQ_TYPE_META:
+ taosArrayPush(pReq->pTableMetaReq, pName);
+ taosArrayPush(pReq->pTableMetaPos, &tableNo);
+ break;
+ case CATALOG_REQ_TYPE_VGROUP:
+ taosArrayPush(pReq->pTableVgroupReq, pName);
+ taosArrayPush(pReq->pTableVgroupPos, &tableNo);
+ break;
+ case CATALOG_REQ_TYPE_BOTH:
+ taosArrayPush(pReq->pTableMetaReq, pName);
+ taosArrayPush(pReq->pTableMetaPos, &tableNo);
+ taosArrayPush(pReq->pTableVgroupReq, pName);
+ taosArrayPush(pReq->pTableVgroupPos, &tableNo);
+ break;
+ default:
+ break;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t reserveTableReqInDbCacheForInsert(const SName* pName, ECatalogReqType reqType, int32_t tableNo,
+ SHashObj* pDbs) {
+ SInsertTablesMetaReq req = {.pTableMetaReq = taosArrayInit(4, sizeof(SName)),
+ .pTableMetaPos = taosArrayInit(4, sizeof(int32_t)),
+ .pTableVgroupReq = taosArrayInit(4, sizeof(SName)),
+ .pTableVgroupPos = taosArrayInit(4, sizeof(int32_t))};
+ tNameGetFullDbName(pName, req.dbFName);
+ int32_t code = reserveTableReqInCacheForInsert(pName, reqType, tableNo, &req);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = taosHashPut(pDbs, pName->dbname, strlen(pName->dbname), &req, sizeof(SInsertTablesMetaReq));
+ }
+ return code;
+}
+
+int32_t reserveTableMetaInCacheForInsert(const SName* pName, ECatalogReqType reqType, int32_t tableNo,
+ SParseMetaCache* pMetaCache) {
+ if (NULL == pMetaCache->pInsertTables) {
+ pMetaCache->pInsertTables = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ if (NULL == pMetaCache->pInsertTables) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ pMetaCache->sqlTableNum = tableNo;
+ SInsertTablesMetaReq* pReq = taosHashGet(pMetaCache->pInsertTables, pName->dbname, strlen(pName->dbname));
+ if (NULL == pReq) {
+ return reserveTableReqInDbCacheForInsert(pName, reqType, tableNo, pMetaCache->pInsertTables);
+ }
+ return reserveTableReqInCacheForInsert(pName, reqType, tableNo, pReq);
+}
+
+int32_t getTableMetaFromCacheForInsert(SArray* pTableMetaPos, SParseMetaCache* pMetaCache, int32_t tableNo,
+ STableMeta** pMeta) {
+ int32_t reqIndex = *(int32_t*)taosArrayGet(pTableMetaPos, tableNo);
+ SMetaRes* pRes = taosArrayGet(pMetaCache->pTableMetaData, reqIndex);
+ if (TSDB_CODE_SUCCESS == pRes->code) {
+ *pMeta = pRes->pRes;
+ if (NULL == *pMeta) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ return pRes->code;
+}
+
+int32_t getTableVgroupFromCacheForInsert(SArray* pTableVgroupPos, SParseMetaCache* pMetaCache, int32_t tableNo,
+ SVgroupInfo* pVgroup) {
+ int32_t reqIndex = *(int32_t*)taosArrayGet(pTableVgroupPos, tableNo);
+ SMetaRes* pRes = taosArrayGet(pMetaCache->pTableVgroupData, reqIndex);
+ if (TSDB_CODE_SUCCESS == pRes->code) {
+ memcpy(pVgroup, pRes->pRes, sizeof(SVgroupInfo));
+ }
+ return pRes->code;
+}
+
void destoryParseTablesMetaReqHash(SHashObj* pHash) {
SParseTablesMetaReq* p = taosHashIterate(pHash, NULL);
while (NULL != p) {
diff --git a/source/libs/parser/src/parser.c b/source/libs/parser/src/parser.c
index 34cd783ace..7e27132f3c 100644
--- a/source/libs/parser/src/parser.c
+++ b/source/libs/parser/src/parser.c
@@ -185,7 +185,7 @@ int32_t qParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq
code = parseSqlSyntax(pCxt, pQuery, &metaCache);
}
if (TSDB_CODE_SUCCESS == code) {
- code = buildCatalogReq(&metaCache, pCatalogReq);
+ code = buildCatalogReq(pCxt, &metaCache, pCatalogReq);
}
destoryParseMetaCache(&metaCache, true);
terrno = code;
@@ -195,7 +195,7 @@ int32_t qParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq
int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCatalogReq,
const struct SMetaData* pMetaData, SQuery* pQuery) {
SParseMetaCache metaCache = {0};
- int32_t code = putMetaDataToCache(pCatalogReq, pMetaData, &metaCache);
+ int32_t code = putMetaDataToCache(pCatalogReq, pMetaData, &metaCache, NULL == pQuery->pRoot);
if (TSDB_CODE_SUCCESS == code) {
if (NULL == pQuery->pRoot) {
code = parseInsertSql(pCxt, &pQuery, &metaCache);
diff --git a/source/libs/parser/test/parInsertTest.cpp b/source/libs/parser/test/parInsertTest.cpp
index 7302491ba7..ddf15ec67b 100644
--- a/source/libs/parser/test/parInsertTest.cpp
+++ b/source/libs/parser/test/parInsertTest.cpp
@@ -13,21 +13,13 @@
* along with this program. If not, see .
*/
-#include
-
#include
-#include "mockCatalogService.h"
-#include "os.h"
-#include "parInt.h"
+#include "parTestUtil.h"
using namespace std;
-using namespace std::placeholders;
-using namespace testing;
-namespace {
-string toString(int32_t code) { return tstrerror(code); }
-} // namespace
+namespace ParserTest {
// syntax:
// INSERT INTO
@@ -36,259 +28,60 @@ string toString(int32_t code) { return tstrerror(code); }
// [(field1_name, ...)]
// VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
// [...];
-class InsertTest : public Test {
- protected:
- InsertTest() : res_(nullptr) {}
- ~InsertTest() { reset(); }
-
- void setDatabase(const string& acctId, const string& db) {
- acctId_ = acctId;
- db_ = db;
- }
-
- void bind(const char* sql) {
- reset();
- cxt_.acctId = atoi(acctId_.c_str());
- cxt_.db = (char*)db_.c_str();
- strcpy(sqlBuf_, sql);
- cxt_.sqlLen = strlen(sql);
- sqlBuf_[cxt_.sqlLen] = '\0';
- cxt_.pSql = sqlBuf_;
- }
-
- int32_t run() {
- code_ = parseInsertSql(&cxt_, &res_, nullptr);
- if (code_ != TSDB_CODE_SUCCESS) {
- cout << "code:" << toString(code_) << ", msg:" << errMagBuf_ << endl;
- }
- return code_;
- }
-
- int32_t runAsync() {
- cxt_.async = true;
- bool request = true;
- unique_ptr > metaCache(
- new SParseMetaCache(), std::bind(_destoryParseMetaCache, _1, cref(request)));
- code_ = parseInsertSyntax(&cxt_, &res_, metaCache.get());
- if (code_ != TSDB_CODE_SUCCESS) {
- cout << "parseInsertSyntax code:" << toString(code_) << ", msg:" << errMagBuf_ << endl;
- return code_;
- }
-
- unique_ptr catalogReq(new SCatalogReq(),
- MockCatalogService::destoryCatalogReq);
- code_ = buildCatalogReq(metaCache.get(), catalogReq.get());
- if (code_ != TSDB_CODE_SUCCESS) {
- cout << "buildCatalogReq code:" << toString(code_) << ", msg:" << errMagBuf_ << endl;
- return code_;
- }
-
- unique_ptr metaData(new SMetaData(), MockCatalogService::destoryMetaData);
- g_mockCatalogService->catalogGetAllMeta(catalogReq.get(), metaData.get());
-
- metaCache.reset(new SParseMetaCache());
- request = false;
- code_ = putMetaDataToCache(catalogReq.get(), metaData.get(), metaCache.get());
- if (code_ != TSDB_CODE_SUCCESS) {
- cout << "putMetaDataToCache code:" << toString(code_) << ", msg:" << errMagBuf_ << endl;
- return code_;
- }
-
- code_ = parseInsertSql(&cxt_, &res_, metaCache.get());
- if (code_ != TSDB_CODE_SUCCESS) {
- cout << "parseInsertSql code:" << toString(code_) << ", msg:" << errMagBuf_ << endl;
- return code_;
- }
-
- return code_;
- }
-
- void dumpReslut() {
- SVnodeModifOpStmt* pStmt = getVnodeModifStmt(res_);
- size_t num = taosArrayGetSize(pStmt->pDataBlocks);
- cout << "payloadType:" << (int32_t)pStmt->payloadType << ", insertType:" << pStmt->insertType
- << ", numOfVgs:" << num << endl;
- for (size_t i = 0; i < num; ++i) {
- SVgDataBlocks* vg = (SVgDataBlocks*)taosArrayGetP(pStmt->pDataBlocks, i);
- cout << "vgId:" << vg->vg.vgId << ", numOfTables:" << vg->numOfTables << ", dataSize:" << vg->size << endl;
- SSubmitReq* submit = (SSubmitReq*)vg->pData;
- cout << "length:" << ntohl(submit->length) << ", numOfBlocks:" << ntohl(submit->numOfBlocks) << endl;
- int32_t numOfBlocks = ntohl(submit->numOfBlocks);
- SSubmitBlk* blk = (SSubmitBlk*)(submit + 1);
- for (int32_t i = 0; i < numOfBlocks; ++i) {
- cout << "Block:" << i << endl;
- cout << "\tuid:" << be64toh(blk->uid) << ", tid:" << be64toh(blk->suid) << ", sversion:" << ntohl(blk->sversion)
- << ", dataLen:" << ntohl(blk->dataLen) << ", schemaLen:" << ntohl(blk->schemaLen)
- << ", numOfRows:" << ntohl(blk->numOfRows) << endl;
- blk = (SSubmitBlk*)(blk->data + ntohl(blk->dataLen));
- }
- }
- }
-
- void checkReslut(int32_t numOfTables, int32_t numOfRows1, int32_t numOfRows2 = -1) {
- SVnodeModifOpStmt* pStmt = getVnodeModifStmt(res_);
- ASSERT_EQ(pStmt->payloadType, PAYLOAD_TYPE_KV);
- ASSERT_EQ(pStmt->insertType, TSDB_QUERY_TYPE_INSERT);
- size_t num = taosArrayGetSize(pStmt->pDataBlocks);
- ASSERT_GE(num, 0);
- for (size_t i = 0; i < num; ++i) {
- SVgDataBlocks* vg = (SVgDataBlocks*)taosArrayGetP(pStmt->pDataBlocks, i);
- ASSERT_EQ(vg->numOfTables, numOfTables);
- ASSERT_GE(vg->size, 0);
- SSubmitReq* submit = (SSubmitReq*)vg->pData;
- ASSERT_GE(ntohl(submit->length), 0);
- ASSERT_GE(ntohl(submit->numOfBlocks), 0);
- int32_t numOfBlocks = ntohl(submit->numOfBlocks);
- SSubmitBlk* blk = (SSubmitBlk*)(submit + 1);
- for (int32_t i = 0; i < numOfBlocks; ++i) {
- ASSERT_EQ(ntohl(blk->numOfRows), (0 == i ? numOfRows1 : (numOfRows2 > 0 ? numOfRows2 : numOfRows1)));
- blk = (SSubmitBlk*)(blk->data + ntohl(blk->dataLen));
- }
- }
- }
-
- private:
- static const int max_err_len = 1024;
- static const int max_sql_len = 1024 * 1024;
-
- static void _destoryParseMetaCache(SParseMetaCache* pMetaCache, bool request) {
- destoryParseMetaCache(pMetaCache, request);
- delete pMetaCache;
- }
-
- void reset() {
- memset(&cxt_, 0, sizeof(cxt_));
- memset(errMagBuf_, 0, max_err_len);
- cxt_.pMsg = errMagBuf_;
- cxt_.msgLen = max_err_len;
- code_ = TSDB_CODE_SUCCESS;
- qDestroyQuery(res_);
- res_ = nullptr;
- }
-
- SVnodeModifOpStmt* getVnodeModifStmt(SQuery* pQuery) { return (SVnodeModifOpStmt*)pQuery->pRoot; }
-
- string acctId_;
- string db_;
- char errMagBuf_[max_err_len];
- char sqlBuf_[max_sql_len];
- SParseContext cxt_;
- int32_t code_;
- SQuery* res_;
-};
+class ParserInsertTest : public ParserTestBase {};
// INSERT INTO tb_name [(field1_name, ...)] VALUES (field1_value, ...)
-TEST_F(InsertTest, singleTableSingleRowTest) {
- setDatabase("root", "test");
+TEST_F(ParserInsertTest, singleTableSingleRowTest) {
+ useDb("root", "test");
- bind("insert into t1 values (now, 1, 'beijing', 3, 4, 5)");
- ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
- dumpReslut();
- checkReslut(1, 1);
+ run("INSERT INTO t1 VALUES (now, 1, 'beijing', 3, 4, 5)");
- bind("insert into t1 (ts, c1, c2, c3, c4, c5) values (now, 1, 'beijing', 3, 4, 5)");
- ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
-
- bind("insert into t1 values (now, 1, 'beijing', 3, 4, 5)");
- ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
- dumpReslut();
- checkReslut(1, 1);
-
- bind("insert into t1 (ts, c1, c2, c3, c4, c5) values (now, 1, 'beijing', 3, 4, 5)");
- ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
+ run("INSERT INTO t1 (ts, c1, c2, c3, c4, c5) VALUES (now, 1, 'beijing', 3, 4, 5)");
}
// INSERT INTO tb_name VALUES (field1_value, ...)(field1_value, ...)
-TEST_F(InsertTest, singleTableMultiRowTest) {
- setDatabase("root", "test");
+TEST_F(ParserInsertTest, singleTableMultiRowTest) {
+ useDb("root", "test");
- bind(
- "insert into t1 values (now, 1, 'beijing', 3, 4, 5)(now+1s, 2, 'shanghai', 6, 7, 8)"
+ run("INSERT INTO t1 VALUES (now, 1, 'beijing', 3, 4, 5)"
+ "(now+1s, 2, 'shanghai', 6, 7, 8)"
"(now+2s, 3, 'guangzhou', 9, 10, 11)");
- ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
- dumpReslut();
- checkReslut(1, 3);
-
- bind(
- "insert into t1 values (now, 1, 'beijing', 3, 4, 5)(now+1s, 2, 'shanghai', 6, 7, 8)"
- "(now+2s, 3, 'guangzhou', 9, 10, 11)");
- ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
}
// INSERT INTO tb1_name VALUES (field1_value, ...) tb2_name VALUES (field1_value, ...)
-TEST_F(InsertTest, multiTableSingleRowTest) {
- setDatabase("root", "test");
+TEST_F(ParserInsertTest, multiTableSingleRowTest) {
+ useDb("root", "test");
- bind("insert into st1s1 values (now, 1, \"beijing\") st1s2 values (now, 10, \"131028\")");
- ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
- dumpReslut();
- checkReslut(2, 1);
-
- bind("insert into st1s1 values (now, 1, \"beijing\") st1s2 values (now, 10, \"131028\")");
- ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
+ run("INSERT INTO st1s1 VALUES (now, 1, 'beijing') st1s2 VALUES (now, 10, '131028')");
}
// INSERT INTO tb1_name VALUES (field1_value, ...) tb2_name VALUES (field1_value, ...)
-TEST_F(InsertTest, multiTableMultiRowTest) {
- setDatabase("root", "test");
+TEST_F(ParserInsertTest, multiTableMultiRowTest) {
+ useDb("root", "test");
- bind(
- "insert into st1s1 values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")"
- " st1s2 values (now, 10, \"131028\")(now+1s, 20, \"132028\")");
- ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
- dumpReslut();
- checkReslut(2, 3, 2);
-
- bind(
- "insert into st1s1 values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")"
- " st1s2 values (now, 10, \"131028\")(now+1s, 20, \"132028\")");
- ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
+ run("INSERT INTO "
+ "st1s1 VALUES (now, 1, 'beijing')(now+1s, 2, 'shanghai')(now+2s, 3, 'guangzhou') "
+ "st1s2 VALUES (now, 10, '131028')(now+1s, 20, '132028')");
}
// INSERT INTO
// tb1_name USING st1_name [(tag1_name, ...)] TAGS (tag1_value, ...) VALUES (field1_value, ...)
// tb2_name USING st2_name [(tag1_name, ...)] TAGS (tag1_value, ...) VALUES (field1_value, ...)
-TEST_F(InsertTest, autoCreateTableTest) {
- setDatabase("root", "test");
+TEST_F(ParserInsertTest, autoCreateTableTest) {
+ useDb("root", "test");
- bind(
- "insert into st1s1 using st1 tags(1, 'wxy', now) "
- "values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")");
- ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
- dumpReslut();
- checkReslut(1, 3);
+ run("INSERT INTO st1s1 USING st1 TAGS(1, 'wxy', now) "
+ "VALUES (now, 1, 'beijing')(now+1s, 2, 'shanghai')(now+2s, 3, 'guangzhou')");
- bind(
- "insert into st1s1 using st1 (tag1, tag2) tags(1, 'wxy') values (now, 1, \"beijing\")"
- "(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")");
- ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
+ run("INSERT INTO st1s1 USING st1 (tag1, tag2) TAGS(1, 'wxy') (ts, c1, c2) "
+ "VALUES (now, 1, 'beijing')(now+1s, 2, 'shanghai')(now+2s, 3, 'guangzhou')");
- bind(
- "insert into st1s1 using st1 tags(1, 'wxy', now) "
- "values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")");
- ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
+ run("INSERT INTO st1s1 (ts, c1, c2) USING st1 (tag1, tag2) TAGS(1, 'wxy') "
+ "VALUES (now, 1, 'beijing')(now+1s, 2, 'shanghai')(now+2s, 3, 'guangzhou')");
- bind(
- "insert into st1s1 using st1 (tag1, tag2) tags(1, 'wxy') values (now, 1, \"beijing\")"
- "(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")");
- ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
-
- bind(
- "insert into st1s1 using st1 tags(1, 'wxy', now) values (now, 1, \"beijing\")"
- "st1s1 using st1 tags(1, 'wxy', now) values (now+1s, 2, \"shanghai\")");
- ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
+ run("INSERT INTO "
+ "st1s1 USING st1 (tag1, tag2) TAGS(1, 'wxy') (ts, c1, c2) VALUES (now, 1, 'beijing') "
+ "st1s2 (ts, c1, c2) USING st1 TAGS(2, 'abc', now) VALUES (now+1s, 2, 'shanghai')");
}
-TEST_F(InsertTest, toleranceTest) {
- setDatabase("root", "test");
-
- bind("insert into");
- ASSERT_NE(run(), TSDB_CODE_SUCCESS);
- bind("insert into t");
- ASSERT_NE(run(), TSDB_CODE_SUCCESS);
-
- bind("insert into");
- ASSERT_NE(runAsync(), TSDB_CODE_SUCCESS);
- bind("insert into t");
- ASSERT_NE(runAsync(), TSDB_CODE_SUCCESS);
-}
+} // namespace ParserTest
diff --git a/source/libs/parser/test/parTestUtil.cpp b/source/libs/parser/test/parTestUtil.cpp
index 3fe4b533e4..98281b7bf0 100644
--- a/source/libs/parser/test/parTestUtil.cpp
+++ b/source/libs/parser/test/parTestUtil.cpp
@@ -225,16 +225,17 @@ class ParserTestBaseImpl {
DO_WITH_THROW(collectMetaKey, pCxt, pQuery, pMetaCache);
}
- void doBuildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) {
- DO_WITH_THROW(buildCatalogReq, pMetaCache, pCatalogReq);
+ void doBuildCatalogReq(SParseContext* pCxt, const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) {
+ DO_WITH_THROW(buildCatalogReq, pCxt, pMetaCache, pCatalogReq);
}
void doGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) {
DO_WITH_THROW(g_mockCatalogService->catalogGetAllMeta, pCatalogReq, pMetaData);
}
- void doPutMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache) {
- DO_WITH_THROW(putMetaDataToCache, pCatalogReq, pMetaData, pMetaCache);
+ void doPutMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache,
+ bool isInsertValues) {
+ DO_WITH_THROW(putMetaDataToCache, pCatalogReq, pMetaData, pMetaCache, isInsertValues);
}
void doAuthenticate(SParseContext* pCxt, SQuery* pQuery, SParseMetaCache* pMetaCache) {
@@ -261,7 +262,9 @@ class ParserTestBaseImpl {
void doParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, SCatalogReq* pCatalogReq) {
DO_WITH_THROW(qParseSqlSyntax, pCxt, pQuery, pCatalogReq);
ASSERT_NE(*pQuery, nullptr);
- res_.parsedAst_ = toString((*pQuery)->pRoot);
+ if (nullptr != (*pQuery)->pRoot) {
+ res_.parsedAst_ = toString((*pQuery)->pRoot);
+ }
}
void doAnalyseSqlSemantic(SParseContext* pCxt, const SCatalogReq* pCatalogReq, const SMetaData* pMetaData,
@@ -270,6 +273,17 @@ class ParserTestBaseImpl {
res_.calcConstAst_ = toString(pQuery->pRoot);
}
+ void doParseInsertSql(SParseContext* pCxt, SQuery** pQuery, SParseMetaCache* pMetaCache) {
+ DO_WITH_THROW(parseInsertSql, pCxt, pQuery, pMetaCache);
+ ASSERT_NE(*pQuery, nullptr);
+ res_.parsedAst_ = toString((*pQuery)->pRoot);
+ }
+
+ void doParseInsertSyntax(SParseContext* pCxt, SQuery** pQuery, SParseMetaCache* pMetaCache) {
+ DO_WITH_THROW(parseInsertSyntax, pCxt, pQuery, pMetaCache);
+ ASSERT_NE(*pQuery, nullptr);
+ }
+
string toString(const SNode* pRoot) {
char* pStr = NULL;
int32_t len = 0;
@@ -287,15 +301,20 @@ class ParserTestBaseImpl {
SParseContext cxt = {0};
setParseContext(sql, &cxt);
- unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
- doParse(&cxt, query.get());
- SQuery* pQuery = *(query.get());
+ if (qIsInsertValuesSql(cxt.pSql, cxt.sqlLen)) {
+ unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
+ doParseInsertSql(&cxt, query.get(), nullptr);
+ } else {
+ unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
+ doParse(&cxt, query.get());
+ SQuery* pQuery = *(query.get());
- doAuthenticate(&cxt, pQuery, nullptr);
+ doAuthenticate(&cxt, pQuery, nullptr);
- doTranslate(&cxt, pQuery, nullptr);
+ doTranslate(&cxt, pQuery, nullptr);
- doCalculateConstant(&cxt, pQuery);
+ doCalculateConstant(&cxt, pQuery);
+ }
if (g_dump) {
dump();
@@ -338,17 +357,22 @@ class ParserTestBaseImpl {
setParseContext(sql, &cxt, true);
unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
- doParse(&cxt, query.get());
- SQuery* pQuery = *(query.get());
-
- bool request = true;
+ bool request = true;
unique_ptr > metaCache(
new SParseMetaCache(), bind(_destoryParseMetaCache, _1, cref(request)));
- doCollectMetaKey(&cxt, pQuery, metaCache.get());
+ bool isInsertValues = qIsInsertValuesSql(cxt.pSql, cxt.sqlLen);
+ if (isInsertValues) {
+ doParseInsertSyntax(&cxt, query.get(), metaCache.get());
+ } else {
+ doParse(&cxt, query.get());
+ doCollectMetaKey(&cxt, *(query.get()), metaCache.get());
+ }
+
+ SQuery* pQuery = *(query.get());
unique_ptr catalogReq(new SCatalogReq(),
MockCatalogService::destoryCatalogReq);
- doBuildCatalogReq(metaCache.get(), catalogReq.get());
+ doBuildCatalogReq(&cxt, metaCache.get(), catalogReq.get());
string err;
thread t1([&]() {
@@ -358,13 +382,17 @@ class ParserTestBaseImpl {
metaCache.reset(new SParseMetaCache());
request = false;
- doPutMetaDataToCache(catalogReq.get(), metaData.get(), metaCache.get());
+ doPutMetaDataToCache(catalogReq.get(), metaData.get(), metaCache.get(), isInsertValues);
- doAuthenticate(&cxt, pQuery, metaCache.get());
+ if (isInsertValues) {
+ doParseInsertSql(&cxt, query.get(), metaCache.get());
+ } else {
+ doAuthenticate(&cxt, pQuery, metaCache.get());
- doTranslate(&cxt, pQuery, metaCache.get());
+ doTranslate(&cxt, pQuery, metaCache.get());
- doCalculateConstant(&cxt, pQuery);
+ doCalculateConstant(&cxt, pQuery);
+ }
} catch (const TerminateFlag& e) {
// success and terminate
} catch (const runtime_error& e) {
diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c
index c843dd0a67..71f084d412 100644
--- a/source/libs/planner/src/planLogicCreater.c
+++ b/source/libs/planner/src/planLogicCreater.c
@@ -1002,7 +1002,7 @@ static int32_t createPartitionLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pS
int32_t code =
nodesCollectColumns(pSelect, SQL_CLAUSE_PARTITION_BY, NULL, COLLECT_COL_TYPE_ALL, &pPartition->node.pTargets);
if (TSDB_CODE_SUCCESS == code && NULL == pPartition->node.pTargets) {
- code = nodesListMakeStrictAppend(&pPartition->node.pTargets, nodesListGetNode(pCxt->pCurrRoot->pTargets, 0));
+ code = nodesListMakeStrictAppend(&pPartition->node.pTargets, nodesCloneNode(nodesListGetNode(pCxt->pCurrRoot->pTargets, 0)));
}
if (TSDB_CODE_SUCCESS == code) {
diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c
index e06b752862..862d142100 100644
--- a/source/libs/qworker/src/qworker.c
+++ b/source/libs/qworker/src/qworker.c
@@ -1,3 +1,5 @@
+#include "qworker.h"
+
#include "dataSinkMgt.h"
#include "executor.h"
#include "planner.h"
@@ -7,7 +9,6 @@
#include "tcommon.h"
#include "tmsg.h"
#include "tname.h"
-#include "qworker.h"
SQWorkerMgmt gQwMgmt = {
.lock = 0,
@@ -15,7 +16,6 @@ SQWorkerMgmt gQwMgmt = {
.qwNum = 0,
};
-
int32_t qwProcessHbLinkBroken(SQWorker *mgmt, SQWMsg *qwMsg, SSchedulerHbReq *req) {
int32_t code = 0;
SSchedulerHbRsp rsp = {0};
@@ -26,7 +26,7 @@ int32_t qwProcessHbLinkBroken(SQWorker *mgmt, SQWMsg *qwMsg, SSchedulerHbReq *re
QW_LOCK(QW_WRITE, &sch->hbConnLock);
sch->hbBrokenTs = taosGetTimestampMs();
-
+
if (qwMsg->connInfo.handle == sch->hbConnInfo.handle) {
tmsgReleaseHandle(&sch->hbConnInfo, TAOS_CONN_SERVER);
sch->hbConnInfo.handle = NULL;
@@ -44,8 +44,8 @@ int32_t qwProcessHbLinkBroken(SQWorker *mgmt, SQWMsg *qwMsg, SSchedulerHbReq *re
QW_RET(TSDB_CODE_SUCCESS);
}
-static void freeItem(void* param) {
- SExplainExecInfo* pInfo = param;
+static void freeItem(void *param) {
+ SExplainExecInfo *pInfo = param;
taosMemoryFree(pInfo->verboseInfo);
}
@@ -54,7 +54,7 @@ int32_t qwHandleTaskComplete(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
if (TASK_TYPE_TEMP == ctx->taskType && taskHandle) {
if (ctx->explain) {
- SArray* execInfoList = taosArrayInit(4, sizeof(SExplainExecInfo));
+ SArray *execInfoList = taosArrayInit(4, sizeof(SExplainExecInfo));
QW_ERR_RET(qGetExplainExecInfo(taskHandle, execInfoList));
SRpcHandleInfo connInfo = ctx->ctrlConnInfo;
@@ -81,7 +81,7 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) {
qTaskInfo_t taskHandle = ctx->taskHandle;
DataSinkHandle sinkHandle = ctx->sinkHandle;
- SArray* pResList = taosArrayInit(4, POINTER_BYTES);
+ SArray *pResList = taosArrayInit(4, POINTER_BYTES);
while (true) {
QW_TASK_DLOG("start to execTask, loopIdx:%d", i++);
@@ -95,7 +95,7 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) {
} else {
QW_TASK_DLOG("qExecTask failed, code:%x - %s", code, tstrerror(code));
}
- QW_ERR_RET(code);
+ QW_ERR_JRET(code);
}
}
@@ -105,7 +105,7 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) {
QW_TASK_DLOG("qExecTask end with empty res, useconds:%" PRIu64, useconds);
dsEndPut(sinkHandle, useconds);
- QW_ERR_RET(qwHandleTaskComplete(QW_FPARAMS(), ctx));
+ QW_ERR_JRET(qwHandleTaskComplete(QW_FPARAMS(), ctx));
if (queryStop) {
*queryStop = true;
@@ -114,7 +114,7 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) {
break;
}
- for(int32_t j = 0; j < taosArrayGetSize(pResList); ++j) {
+ for (int32_t j = 0; j < taosArrayGetSize(pResList); ++j) {
SSDataBlock *pRes = taosArrayGetP(pResList, j);
ASSERT(pRes->info.rows > 0);
@@ -122,7 +122,7 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) {
code = dsPutDataBlock(sinkHandle, &inputData, &qcontinue);
if (code) {
QW_TASK_ELOG("dsPutDataBlock failed, code:%x - %s", code, tstrerror(code));
- QW_ERR_RET(code);
+ QW_ERR_JRET(code);
}
QW_TASK_DLOG("data put into sink, rows:%d, continueExecTask:%d", pRes->info.rows, qcontinue);
@@ -132,7 +132,7 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) {
if (queryStop) {
*queryStop = true;
}
-
+
break;
}
@@ -151,6 +151,11 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) {
taosArrayDestroy(pResList);
QW_RET(code);
+
+_return:
+ taosArrayDestroy(pResList);
+
+ return code;
}
int32_t qwGenerateSchHbRsp(SQWorker *mgmt, SQWSchStatus *sch, SQWHbInfo *hbInfo) {
@@ -222,7 +227,8 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen,
QW_ERR_RET(code);
}
- QW_TASK_DLOG("no more data in sink and query end, fetched blocks %d rows %d", pOutput->numOfBlocks, pOutput->numOfRows);
+ QW_TASK_DLOG("no more data in sink and query end, fetched blocks %d rows %d", pOutput->numOfBlocks,
+ pOutput->numOfRows);
qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_SUCC);
if (NULL == rsp) {
@@ -266,7 +272,8 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen,
pOutput->numOfBlocks++;
if (DS_BUF_EMPTY == pOutput->bufStatus && pOutput->queryEnd) {
- QW_TASK_DLOG("task all data fetched and done, fetched blocks %d rows %d", pOutput->numOfBlocks, pOutput->numOfRows);
+ QW_TASK_DLOG("task all data fetched and done, fetched blocks %d rows %d", pOutput->numOfBlocks,
+ pOutput->numOfRows);
qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_SUCC);
break;
}
@@ -288,10 +295,10 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen,
}
int32_t qwGetDeleteResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, SDeleteRes *pRes) {
- int64_t len = 0;
- bool queryEnd = false;
- int32_t code = 0;
- SOutputData output = {0};
+ int64_t len = 0;
+ bool queryEnd = false;
+ int32_t code = 0;
+ SOutputData output = {0};
dsGetDataLength(ctx->sinkHandle, &len, &queryEnd);
@@ -304,7 +311,7 @@ int32_t qwGetDeleteResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, SDeleteRes *pRes
if (NULL == output.pData) {
QW_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
-
+
code = dsGetDataBlock(ctx->sinkHandle, &output);
if (code) {
QW_TASK_ELOG("dsGetDataBlock failed, code:%x - %s", code, tstrerror(code));
@@ -312,8 +319,8 @@ int32_t qwGetDeleteResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, SDeleteRes *pRes
QW_ERR_RET(code);
}
- SDeleterRes* pDelRes = (SDeleterRes*)output.pData;
-
+ SDeleterRes *pDelRes = (SDeleterRes *)output.pData;
+
pRes->suid = pDelRes->suid;
pRes->uidList = pDelRes->uidList;
pRes->skey = pDelRes->skey;
@@ -322,14 +329,13 @@ int32_t qwGetDeleteResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, SDeleteRes *pRes
strcpy(pRes->tableFName, pDelRes->tableName);
strcpy(pRes->tsColName, pDelRes->tsColName);
taosMemoryFree(output.pData);
-
+
return TSDB_CODE_SUCCESS;
}
-
int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *input, SQWPhaseOutput *output) {
- int32_t code = 0;
- SQWTaskCtx *ctx = NULL;
+ int32_t code = 0;
+ SQWTaskCtx *ctx = NULL;
QW_TASK_DLOG("start to handle event at phase %s", qwPhaseStr(phase));
@@ -355,8 +361,8 @@ int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inpu
if (QW_EVENT_RECEIVED(ctx, QW_EVENT_DROP)) {
QW_ERR_JRET(qwDropTask(QW_FPARAMS()));
- //qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code);
- //QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
+ // qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code);
+ // QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED);
break;
@@ -391,8 +397,8 @@ int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inpu
if (QW_EVENT_RECEIVED(ctx, QW_EVENT_DROP)) {
QW_ERR_JRET(qwDropTask(QW_FPARAMS()));
- //qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code);
- //QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
+ // qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code);
+ // QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED);
}
@@ -428,9 +434,9 @@ _return:
}
int32_t qwHandlePostPhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *input, SQWPhaseOutput *output) {
- int32_t code = 0;
- SQWTaskCtx *ctx = NULL;
- SRpcHandleInfo connInfo = {0};
+ int32_t code = 0;
+ SQWTaskCtx *ctx = NULL;
+ SRpcHandleInfo connInfo = {0};
QW_TASK_DLOG("start to handle event at phase %s", qwPhaseStr(phase));
@@ -449,8 +455,8 @@ int32_t qwHandlePostPhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inp
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
}
- //qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code);
- //QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
+ // qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code);
+ // QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
QW_ERR_JRET(qwDropTask(QW_FPARAMS()));
QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED);
@@ -473,14 +479,14 @@ _return:
if (QW_PHASE_POST_QUERY == phase && ctx) {
ctx->queryRsped = true;
- bool rsped = false;
+ bool rsped = false;
SQWMsg qwMsg = {.msgType = ctx->msgType, .connInfo = ctx->ctrlConnInfo};
qwDbgSimulateRedirect(&qwMsg, ctx, &rsped);
qwDbgSimulateDead(QW_FPARAMS(), ctx, &rsped);
if (!rsped) {
qwBuildAndSendQueryRsp(input->msgType + 1, &ctx->ctrlConnInfo, code, ctx);
QW_TASK_DLOG("query msg rsped, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
- }
+ }
}
if (ctx) {
@@ -507,7 +513,6 @@ int32_t qwAbortPrerocessQuery(QW_FPARAMS_DEF) {
QW_RET(TSDB_CODE_SUCCESS);
}
-
int32_t qwPreprocessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
int32_t code = 0;
bool queryRsped = false;
@@ -537,8 +542,7 @@ _return:
QW_RET(TSDB_CODE_SUCCESS);
}
-
-int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, char* sql) {
+int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, char *sql) {
int32_t code = 0;
bool queryRsped = false;
SSubplan *plan = NULL;
@@ -556,7 +560,7 @@ int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, char* sql) {
ctx->needFetch = qwMsg->msgInfo.needFetch;
ctx->msgType = qwMsg->msgType;
- //QW_TASK_DLOGL("subplan json string, len:%d, %s", qwMsg->msgLen, qwMsg->msg);
+ // QW_TASK_DLOGL("subplan json string, len:%d, %s", qwMsg->msgLen, qwMsg->msg);
code = qStringToSubplan(qwMsg->msg, &plan);
if (TSDB_CODE_SUCCESS != code) {
@@ -594,7 +598,7 @@ int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, char* sql) {
_return:
taosMemoryFree(sql);
-
+
input.code = code;
input.msgType = qwMsg->msgType;
code = qwHandlePostPhaseEvents(QW_FPARAMS(), QW_PHASE_POST_QUERY, &input, NULL);
@@ -648,7 +652,7 @@ int32_t qwProcessCQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
qwBuildAndSendFetchRsp(ctx->fetchType, &qwMsg->connInfo, rsp, dataLen, code);
rsp = NULL;
-
+
QW_TASK_DLOG("fetch rsp send, handle:%p, code:%x - %s, dataLen:%d", qwMsg->connInfo.handle, code,
tstrerror(code), dataLen);
} else {
@@ -754,13 +758,13 @@ _return:
if (code || rsp) {
bool rsped = false;
if (ctx) {
- qwDbgSimulateRedirect(qwMsg, ctx, &rsped);
+ qwDbgSimulateRedirect(qwMsg, ctx, &rsped);
qwDbgSimulateDead(QW_FPARAMS(), ctx, &rsped);
}
if (!rsped) {
qwBuildAndSendFetchRsp(qwMsg->msgType + 1, &qwMsg->connInfo, rsp, dataLen, code);
- QW_TASK_DLOG("%s send, handle:%p, code:%x - %s, dataLen:%d", TMSG_INFO(qwMsg->msgType + 1), qwMsg->connInfo.handle, code, tstrerror(code),
- dataLen);
+ QW_TASK_DLOG("%s send, handle:%p, code:%x - %s, dataLen:%d", TMSG_INFO(qwMsg->msgType + 1),
+ qwMsg->connInfo.handle, code, tstrerror(code), dataLen);
}
}
@@ -919,10 +923,11 @@ void qwProcessHbTimerEvent(void *param, void *tmrId) {
uint64_t *sId = taosHashGetKey(pIter, NULL);
QW_TLOG("cancel send hb to sch %" PRIx64 " cause of no connection handle", *sId);
- if (sch->hbBrokenTs > 0 && ((currentMs - sch->hbBrokenTs) > QW_SCH_TIMEOUT_MSEC) && taosHashGetSize(sch->tasksHash) <= 0) {
+ if (sch->hbBrokenTs > 0 && ((currentMs - sch->hbBrokenTs) > QW_SCH_TIMEOUT_MSEC) &&
+ taosHashGetSize(sch->tasksHash) <= 0) {
taosArrayPush(pExpiredSch, sId);
}
-
+
pIter = taosHashIterate(mgmt->schHash, pIter);
continue;
}
@@ -998,7 +1003,6 @@ _return:
QW_RET(TSDB_CODE_SUCCESS);
}
-
int32_t qWorkerInit(int8_t nodeType, int32_t nodeId, SQWorkerCfg *cfg, void **qWorkerMgmt, const SMsgCb *pMsgCb) {
if (NULL == qWorkerMgmt || pMsgCb->mgmt == NULL) {
qError("invalid param to init qworker");
@@ -1119,12 +1123,12 @@ int32_t qWorkerGetStat(SReadHandle *handle, void *qWorkerMgmt, SQWorkerStat *pSt
QW_RET(TSDB_CODE_QRY_INVALID_INPUT);
}
- SQWorker *mgmt = (SQWorker *)qWorkerMgmt;
+ SQWorker *mgmt = (SQWorker *)qWorkerMgmt;
SDataSinkStat sinkStat = {0};
-
+
dsDataSinkGetCacheSize(&sinkStat);
pStat->cacheDataSize = sinkStat.cachedSize;
-
+
pStat->queryProcessed = QW_STAT_GET(mgmt->stat.msgStat.queryProcessed);
pStat->cqueryProcessed = QW_STAT_GET(mgmt->stat.msgStat.cqueryProcessed);
pStat->fetchProcessed = QW_STAT_GET(mgmt->stat.msgStat.fetchProcessed);
@@ -1139,6 +1143,3 @@ int32_t qWorkerGetStat(SReadHandle *handle, void *qWorkerMgmt, SQWorkerStat *pSt
return TSDB_CODE_SUCCESS;
}
-
-
-
diff --git a/source/libs/stream/src/streamUpdate.c b/source/libs/stream/src/streamUpdate.c
index 0b1ce27b77..d053662bd3 100644
--- a/source/libs/stream/src/streamUpdate.c
+++ b/source/libs/stream/src/streamUpdate.c
@@ -13,33 +13,31 @@
* along with this program. If not, see .
*/
-#include "tstreamUpdate.h"
-#include "tencode.h"
-#include "ttime.h"
#include "query.h"
+#include "tencode.h"
+#include "tstreamUpdate.h"
+#include "ttime.h"
-#define DEFAULT_FALSE_POSITIVE 0.01
-#define DEFAULT_BUCKET_SIZE 1310720
-#define DEFAULT_MAP_CAPACITY 1310720
-#define DEFAULT_MAP_SIZE (DEFAULT_MAP_CAPACITY * 10)
-#define ROWS_PER_MILLISECOND 1
-#define MAX_NUM_SCALABLE_BF 100000
-#define MIN_NUM_SCALABLE_BF 10
-#define DEFAULT_PREADD_BUCKET 1
-#define MAX_INTERVAL MILLISECOND_PER_MINUTE
-#define MIN_INTERVAL (MILLISECOND_PER_SECOND * 10)
-#define DEFAULT_EXPECTED_ENTRIES 10000
+#define DEFAULT_FALSE_POSITIVE 0.01
+#define DEFAULT_BUCKET_SIZE 1310720
+#define DEFAULT_MAP_CAPACITY 1310720
+#define DEFAULT_MAP_SIZE (DEFAULT_MAP_CAPACITY * 10)
+#define ROWS_PER_MILLISECOND 1
+#define MAX_NUM_SCALABLE_BF 100000
+#define MIN_NUM_SCALABLE_BF 10
+#define DEFAULT_PREADD_BUCKET 1
+#define MAX_INTERVAL MILLISECOND_PER_MINUTE
+#define MIN_INTERVAL (MILLISECOND_PER_SECOND * 10)
+#define DEFAULT_EXPECTED_ENTRIES 10000
-static int64_t adjustExpEntries(int64_t entries) {
- return TMIN(DEFAULT_EXPECTED_ENTRIES, entries);
-}
+static int64_t adjustExpEntries(int64_t entries) { return TMIN(DEFAULT_EXPECTED_ENTRIES, entries); }
static void windowSBfAdd(SUpdateInfo *pInfo, uint64_t count) {
if (pInfo->numSBFs < count) {
count = pInfo->numSBFs;
}
for (uint64_t i = 0; i < count; ++i) {
- int64_t rows = adjustExpEntries(pInfo->interval * ROWS_PER_MILLISECOND);
+ int64_t rows = adjustExpEntries(pInfo->interval * ROWS_PER_MILLISECOND);
SScalableBf *tsSBF = tScalableBfInit(rows, DEFAULT_FALSE_POSITIVE);
taosArrayPush(pInfo->pTsSBFs, &tsSBF);
}
@@ -78,7 +76,7 @@ static int64_t adjustInterval(int64_t interval, int32_t precision) {
static int64_t adjustWatermark(int64_t adjInterval, int64_t originInt, int64_t watermark) {
if (watermark <= adjInterval) {
- watermark = TMAX(originInt/adjInterval, 1) * adjInterval;
+ watermark = TMAX(originInt / adjInterval, 1) * adjInterval;
} else if (watermark > MAX_NUM_SCALABLE_BF * adjInterval) {
watermark = MAX_NUM_SCALABLE_BF * adjInterval;
}/* else if (watermark < MIN_NUM_SCALABLE_BF * adjInterval) {
@@ -158,11 +156,17 @@ static SScalableBf *getSBf(SUpdateInfo *pInfo, TSKEY ts) {
return res;
}
+bool updateInfoIsTableInserted(SUpdateInfo *pInfo, int64_t tbUid) {
+ void *pVal = taosHashGet(pInfo->pMap, &tbUid, sizeof(int64_t));
+ if (pVal || taosHashGetSize(pInfo->pMap) >= DEFAULT_MAP_SIZE) return true;
+ return false;
+}
+
bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts) {
- int32_t res = TSDB_CODE_FAILED;
- TSKEY* pMapMaxTs = taosHashGet(pInfo->pMap, &tableId, sizeof(uint64_t));
- uint64_t index = ((uint64_t)tableId) % pInfo->numBuckets;
- TSKEY maxTs = *(TSKEY *)taosArrayGet(pInfo->pTsBuckets, index);
+ int32_t res = TSDB_CODE_FAILED;
+ TSKEY *pMapMaxTs = taosHashGet(pInfo->pMap, &tableId, sizeof(uint64_t));
+ uint64_t index = ((uint64_t)tableId) % pInfo->numBuckets;
+ TSKEY maxTs = *(TSKEY *)taosArrayGet(pInfo->pTsBuckets, index);
if (ts < maxTs - pInfo->watermark) {
// this window has been closed.
if (pInfo->pCloseWinSBF) {
@@ -178,42 +182,47 @@ bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts) {
}
int32_t size = taosHashGetSize(pInfo->pMap);
- if ( (!pMapMaxTs && size < DEFAULT_MAP_SIZE) || (pMapMaxTs && *pMapMaxTs < ts)) {
+ if ((!pMapMaxTs && size < DEFAULT_MAP_SIZE) || (pMapMaxTs && *pMapMaxTs < ts)) {
taosHashPut(pInfo->pMap, &tableId, sizeof(uint64_t), &ts, sizeof(TSKEY));
return false;
}
- if ( !pMapMaxTs && maxTs < ts ) {
+ if (!pMapMaxTs && maxTs < ts) {
taosArraySet(pInfo->pTsBuckets, index, &ts);
return false;
}
if (ts < pInfo->minTS) {
- qDebug("===stream===Update. tableId:%" PRIu64 ", maxTs:%" PRIu64 ", mapMaxTs:%" PRIu64 ", ts:%" PRIu64 , tableId, maxTs, *pMapMaxTs, ts);
+ qDebug("===stream===Update. tableId:%" PRIu64 ", maxTs:%" PRIu64 ", mapMaxTs:%" PRIu64 ", ts:%" PRIu64, tableId,
+ maxTs, *pMapMaxTs, ts);
return true;
} else if (res == TSDB_CODE_SUCCESS) {
return false;
}
- qDebug("===stream===Update. tableId:%" PRIu64 ", maxTs:%" PRIu64 ", mapMaxTs:%" PRIu64 ", ts:%" PRIu64 , tableId, maxTs, *pMapMaxTs, ts);
+ qDebug("===stream===Update. tableId:%" PRIu64 ", maxTs:%" PRIu64 ", mapMaxTs:%" PRIu64 ", ts:%" PRIu64, tableId,
+ maxTs, *pMapMaxTs, ts);
// check from tsdb api
return true;
}
-void updateInfoSetScanRange(SUpdateInfo *pInfo, STimeWindow* pWin, uint64_t groupId, uint64_t version) {
- qDebug("===stream===groupId:%" PRIu64 ", startTs:%" PRIu64 ", endTs:%" PRIu64 ", version:%" PRIu64 , groupId, pWin->skey, pWin->ekey, version);
+void updateInfoSetScanRange(SUpdateInfo *pInfo, STimeWindow *pWin, uint64_t groupId, uint64_t version) {
+ qDebug("===stream===groupId:%" PRIu64 ", startTs:%" PRIu64 ", endTs:%" PRIu64 ", version:%" PRIu64, groupId,
+ pWin->skey, pWin->ekey, version);
pInfo->scanWindow = *pWin;
pInfo->scanGroupId = groupId;
pInfo->maxVersion = version;
}
-bool updateInfoIgnore(SUpdateInfo *pInfo, STimeWindow* pWin, uint64_t groupId, uint64_t version) {
+bool updateInfoIgnore(SUpdateInfo *pInfo, STimeWindow *pWin, uint64_t groupId, uint64_t version) {
if (!pInfo) {
return false;
}
- qDebug("===stream===check groupId:%" PRIu64 ", startTs:%" PRIu64 ", endTs:%" PRIu64 ", version:%" PRIu64 , groupId, pWin->skey, pWin->ekey, version);
- if (pInfo->scanGroupId == groupId && pInfo->scanWindow.skey <= pWin->skey &&
- pWin->ekey <= pInfo->scanWindow.ekey && version <= pInfo->maxVersion ) {
- qDebug("===stream===ignore groupId:%" PRIu64 ", startTs:%" PRIu64 ", endTs:%" PRIu64 ", version:%" PRIu64 , groupId, pWin->skey, pWin->ekey, version);
+ qDebug("===stream===check groupId:%" PRIu64 ", startTs:%" PRIu64 ", endTs:%" PRIu64 ", version:%" PRIu64, groupId,
+ pWin->skey, pWin->ekey, version);
+ if (pInfo->scanGroupId == groupId && pInfo->scanWindow.skey <= pWin->skey && pWin->ekey <= pInfo->scanWindow.ekey &&
+ version <= pInfo->maxVersion) {
+ qDebug("===stream===ignore groupId:%" PRIu64 ", startTs:%" PRIu64 ", endTs:%" PRIu64 ", version:%" PRIu64, groupId,
+ pWin->skey, pWin->ekey, version);
return true;
}
return false;
@@ -261,7 +270,7 @@ int32_t updateInfoSerialize(void *buf, int32_t bufLen, const SUpdateInfo *pInfo)
int32_t size = taosArrayGetSize(pInfo->pTsBuckets);
if (tEncodeI32(&encoder, size) < 0) return -1;
for (int32_t i = 0; i < size; i++) {
- TSKEY* pTs = (TSKEY*)taosArrayGet(pInfo->pTsBuckets, i);
+ TSKEY *pTs = (TSKEY *)taosArrayGet(pInfo->pTsBuckets, i);
if (tEncodeI64(&encoder, *pTs) < 0) return -1;
}
@@ -270,7 +279,7 @@ int32_t updateInfoSerialize(void *buf, int32_t bufLen, const SUpdateInfo *pInfo)
int32_t sBfSize = taosArrayGetSize(pInfo->pTsSBFs);
if (tEncodeI32(&encoder, sBfSize) < 0) return -1;
for (int32_t i = 0; i < sBfSize; i++) {
- SScalableBf* pSBf = taosArrayGetP(pInfo->pTsSBFs, i);
+ SScalableBf *pSBf = taosArrayGetP(pInfo->pTsSBFs, i);
if (tScalableBfEncode(pSBf, &encoder) < 0) return -1;
}
@@ -278,17 +287,17 @@ int32_t updateInfoSerialize(void *buf, int32_t bufLen, const SUpdateInfo *pInfo)
if (tEncodeI64(&encoder, pInfo->interval) < 0) return -1;
if (tEncodeI64(&encoder, pInfo->watermark) < 0) return -1;
if (tEncodeI64(&encoder, pInfo->minTS) < 0) return -1;
-
+
if (tScalableBfEncode(pInfo->pCloseWinSBF, &encoder) < 0) return -1;
int32_t mapSize = taosHashGetSize(pInfo->pMap);
if (tEncodeI32(&encoder, mapSize) < 0) return -1;
- void* pIte = NULL;
+ void *pIte = NULL;
size_t keyLen = 0;
while ((pIte = taosHashIterate(pInfo->pMap, pIte)) != NULL) {
- void* key = taosHashGetKey(pIte, &keyLen);
- if (tEncodeU64(&encoder, *(uint64_t*)key) < 0) return -1;
- if (tEncodeI64(&encoder, *(TSKEY*)pIte) < 0) return -1;
+ void *key = taosHashGetKey(pIte, &keyLen);
+ if (tEncodeU64(&encoder, *(uint64_t *)key) < 0) return -1;
+ if (tEncodeI64(&encoder, *(TSKEY *)pIte) < 0) return -1;
}
if (tEncodeI64(&encoder, pInfo->scanWindow.skey) < 0) return -1;
@@ -311,7 +320,7 @@ int32_t updateInfoDeserialize(void *buf, int32_t bufLen, SUpdateInfo *pInfo) {
int32_t size = 0;
if (tDecodeI32(&decoder, &size) < 0) return -1;
- pInfo->pTsBuckets = taosArrayInit(size, sizeof(TSKEY));
+ pInfo->pTsBuckets = taosArrayInit(size, sizeof(TSKEY));
TSKEY ts = INT64_MIN;
for (int32_t i = 0; i < size; i++) {
if (tDecodeI64(&decoder, &ts) < 0) return -1;
@@ -324,7 +333,7 @@ int32_t updateInfoDeserialize(void *buf, int32_t bufLen, SUpdateInfo *pInfo) {
if (tDecodeI32(&decoder, &sBfSize) < 0) return -1;
pInfo->pTsSBFs = taosArrayInit(sBfSize, sizeof(void *));
for (int32_t i = 0; i < sBfSize; i++) {
- SScalableBf* pSBf = tScalableBfDecode(&decoder);
+ SScalableBf *pSBf = tScalableBfDecode(&decoder);
if (!pSBf) return -1;
taosArrayPush(pInfo->pTsSBFs, &pSBf);
}
@@ -337,11 +346,11 @@ int32_t updateInfoDeserialize(void *buf, int32_t bufLen, SUpdateInfo *pInfo) {
int32_t mapSize = 0;
if (tDecodeI32(&decoder, &mapSize) < 0) return -1;
- _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
+ _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT);
pInfo->pMap = taosHashInit(mapSize, hashFn, true, HASH_NO_LOCK);
uint64_t uid = 0;
ts = INT64_MIN;
- for(int32_t i = 0; i < mapSize; i++) {
+ for (int32_t i = 0; i < mapSize; i++) {
if (tDecodeU64(&decoder, &uid) < 0) return -1;
if (tDecodeI64(&decoder, &ts) < 0) return -1;
taosHashPut(pInfo->pMap, &uid, sizeof(uint64_t), &ts, sizeof(TSKEY));
diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c
index c7784cd62e..1991560d42 100644
--- a/source/libs/sync/src/syncMain.c
+++ b/source/libs/sync/src/syncMain.c
@@ -392,6 +392,29 @@ bool syncIsReady(int64_t rid) {
return b;
}
+bool syncIsReadyForRead(int64_t rid) {
+ SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
+ if (pSyncNode == NULL) {
+ return false;
+ }
+ ASSERT(rid == pSyncNode->rid);
+
+ // TODO: last not noop?
+ SyncIndex lastIndex = syncNodeGetLastIndex(pSyncNode);
+ bool b = (pSyncNode->state == TAOS_SYNC_STATE_LEADER) && (pSyncNode->commitIndex >= lastIndex - SYNC_MAX_READ_RANGE);
+ taosReleaseRef(tsNodeRefId, pSyncNode->rid);
+
+ // if false, set error code
+ if (false == b) {
+ if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) {
+ terrno = TSDB_CODE_SYN_NOT_LEADER;
+ } else {
+ terrno = TSDB_CODE_APP_NOT_READY;
+ }
+ }
+ return b;
+}
+
bool syncIsRestoreFinish(int64_t rid) {
SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
if (pSyncNode == NULL) {
@@ -519,6 +542,30 @@ SyncTerm syncGetMyTerm(int64_t rid) {
return term;
}
+SyncIndex syncGetLastIndex(int64_t rid) {
+ SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
+ if (pSyncNode == NULL) {
+ return SYNC_INDEX_INVALID;
+ }
+ ASSERT(rid == pSyncNode->rid);
+ SyncIndex lastIndex = syncNodeGetLastIndex(pSyncNode);
+
+ taosReleaseRef(tsNodeRefId, pSyncNode->rid);
+ return lastIndex;
+}
+
+SyncIndex syncGetCommitIndex(int64_t rid) {
+ SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
+ if (pSyncNode == NULL) {
+ return SYNC_INDEX_INVALID;
+ }
+ ASSERT(rid == pSyncNode->rid);
+ SyncIndex cmtIndex = pSyncNode->commitIndex;
+
+ taosReleaseRef(tsNodeRefId, pSyncNode->rid);
+ return cmtIndex;
+}
+
SyncGroupId syncGetVgId(int64_t rid) {
SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
if (pSyncNode == NULL) {
@@ -828,6 +875,15 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak) {
pSyncNode->changing = true;
}
+ // not restored, vnode enable
+ if (!pSyncNode->restoreFinish && pSyncNode->vgId != 1) {
+ ret = -1;
+ terrno = TSDB_CODE_SYN_PROPOSE_NOT_READY;
+ sError("vgId:%d, failed to sync propose since not ready, type:%s, last:%ld, cmt:%ld", pSyncNode->vgId,
+ TMSG_INFO(pMsg->msgType), syncNodeGetLastIndex(pSyncNode), pSyncNode->commitIndex);
+ goto _END;
+ }
+
SRespStub stub;
stub.createTime = taosGetTimestampMs();
stub.rpcMsg = *pMsg;
diff --git a/source/libs/transport/inc/transComm.h b/source/libs/transport/inc/transComm.h
index 04b58da570..6b52c74271 100644
--- a/source/libs/transport/inc/transComm.h
+++ b/source/libs/transport/inc/transComm.h
@@ -293,7 +293,7 @@ int transSendResponse(const STransMsg* msg);
int transRegisterMsg(const STransMsg* msg);
int transSetDefaultAddr(void* shandle, const char* ip, const char* fqdn);
-int transGetSockDebugInfo(struct sockaddr* sockname, char* dst);
+int transSockInfo2Str(struct sockaddr* sockname, char* dst);
int64_t transAllocHandle();
diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c
index 9eea43be23..ebad365ce0 100644
--- a/source/libs/transport/src/transCli.c
+++ b/source/libs/transport/src/transCli.c
@@ -103,14 +103,6 @@ static SCliConn* getConnFromPool(void* pool, char* ip, uint32_t port);
static void addConnToPool(void* pool, SCliConn* conn);
static void doCloseIdleConn(void* param);
-static int sockDebugInfo(struct sockaddr* sockname, char* dst) {
- struct sockaddr_in addr = *(struct sockaddr_in*)sockname;
-
- char buf[16] = {0};
- int r = uv_ip4_name(&addr, (char*)buf, sizeof(buf));
- sprintf(dst, "%s:%d", buf, ntohs(addr.sin_port));
- return r;
-}
// register timer for read
static void cliReadTimeoutCb(uv_timer_t* handle);
// register timer in each thread to clear expire conn
@@ -121,12 +113,14 @@ static void cliAllocRecvBufferCb(uv_handle_t* handle, size_t suggested_size, uv_
static void cliRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf);
// callback after write data to socket
static void cliSendCb(uv_write_t* req, int status);
-// callback after conn to server
+// callback after conn to server
static void cliConnCb(uv_connect_t* req, int status);
static void cliAsyncCb(uv_async_t* handle);
static void cliIdleCb(uv_idle_t* handle);
static void cliPrepareCb(uv_prepare_t* handle);
+static bool cliRecvReleaseReq(SCliConn* conn, STransMsgHead* pHead);
+
static int32_t allocConnRef(SCliConn* conn, bool update);
static int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg);
@@ -361,6 +355,9 @@ void cliHandleResp(SCliConn* conn) {
SCliMsg* pMsg = NULL;
STransConnCtx* pCtx = NULL;
+ if (cliRecvReleaseReq(conn, pHead)) {
+ return;
+ }
CONN_SHOULD_RELEASE(conn, pHead);
if (CONN_NO_PERSIST_BY_APP(conn)) {
@@ -383,7 +380,7 @@ void cliHandleResp(SCliConn* conn) {
transMsg.info.ahandle);
}
} else {
- pCtx = pMsg ? pMsg->ctx : NULL;
+ pCtx = pMsg->ctx;
transMsg.info.ahandle = pCtx ? pCtx->ahandle : NULL;
tDebug("%s conn %p get ahandle %p, persist: 1", CONN_GET_INST_LABEL(conn), conn, transMsg.info.ahandle);
}
@@ -395,7 +392,6 @@ void cliHandleResp(SCliConn* conn) {
}
STraceId* trace = &transMsg.info.traceId;
-
tGDebug("%s conn %p %s received from %s, local info:%s, len:%d, code str:%s", CONN_GET_INST_LABEL(conn), conn,
TMSG_INFO(pHead->msgType), conn->dst, conn->src, transMsg.contLen, tstrerror(transMsg.code));
@@ -830,11 +826,11 @@ void cliConnCb(uv_connect_t* req, int status) {
int addrlen = sizeof(peername);
uv_tcp_getpeername((uv_tcp_t*)pConn->stream, &peername, &addrlen);
- transGetSockDebugInfo(&peername, pConn->dst);
+ transSockInfo2Str(&peername, pConn->dst);
addrlen = sizeof(sockname);
uv_tcp_getsockname((uv_tcp_t*)pConn->stream, &sockname, &addrlen);
- transGetSockDebugInfo(&sockname, pConn->src);
+ transSockInfo2Str(&sockname, pConn->src);
tTrace("%s conn %p connect to server successfully", CONN_GET_INST_LABEL(pConn), pConn);
assert(pConn->stream == req->handle);
@@ -1053,6 +1049,30 @@ static void cliPrepareCb(uv_prepare_t* handle) {
if (thrd->stopMsg != NULL) cliHandleQuit(thrd->stopMsg, thrd);
}
+bool cliRecvReleaseReq(SCliConn* conn, STransMsgHead* pHead) {
+ if (pHead->release == 1 && (pHead->msgLen) == sizeof(*pHead)) {
+ uint64_t ahandle = pHead->ahandle;
+ SCliMsg* pMsg = NULL;
+ CONN_GET_MSGCTX_BY_AHANDLE(conn, ahandle);
+ transClearBuffer(&conn->readBuf);
+ transFreeMsg(transContFromHead((char*)pHead));
+ if (transQueueSize(&conn->cliMsgs) > 0 && ahandle == 0) {
+ SCliMsg* cliMsg = transQueueGet(&conn->cliMsgs, 0);
+ if (cliMsg->type == Release) return true;
+ }
+ tDebug("%s conn %p receive release request, refId:%" PRId64 "", CONN_GET_INST_LABEL(conn), conn, conn->refId);
+ if (T_REF_VAL_GET(conn) > 1) {
+ transUnrefCliHandle(conn);
+ }
+ destroyCmsg(pMsg);
+ cliReleaseUnfinishedMsg(conn);
+ transQueueClear(&conn->cliMsgs);
+ addConnToPool(((SCliThrd*)conn->hostThrd)->pool, conn);
+ return true;
+ }
+ return false;
+}
+
static void* cliWorkThread(void* arg) {
SCliThrd* pThrd = (SCliThrd*)arg;
pThrd->pid = taosGetSelfPthreadId();
diff --git a/source/libs/transport/src/transComm.c b/source/libs/transport/src/transComm.c
index b568163e23..4272ec0b1c 100644
--- a/source/libs/transport/src/transComm.c
+++ b/source/libs/transport/src/transComm.c
@@ -77,7 +77,7 @@ void transFreeMsg(void* msg) {
}
taosMemoryFree((char*)msg - sizeof(STransMsgHead));
}
-int transGetSockDebugInfo(struct sockaddr* sockname, char* dst) {
+int transSockInfo2Str(struct sockaddr* sockname, char* dst) {
struct sockaddr_in addr = *(struct sockaddr_in*)sockname;
char buf[20] = {0};
diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c
index 4d35e346b1..3512b27bf8 100644
--- a/source/libs/transport/src/transSvr.c
+++ b/source/libs/transport/src/transSvr.c
@@ -114,6 +114,8 @@ static void uvAcceptAsyncCb(uv_async_t* handle);
static void uvShutDownCb(uv_shutdown_t* req, int status);
static void uvPrepareCb(uv_prepare_t* handle);
+static bool uvRecvReleaseReq(SSvrConn* conn, STransMsgHead* pHead);
+
/*
* time-consuming task throwed into BG work thread
*/
@@ -123,7 +125,7 @@ static void uvWorkAfterTask(uv_work_t* req, int status);
static void uvWalkCb(uv_handle_t* handle, void* arg);
static void uvFreeCb(uv_handle_t* handle);
-static void uvStartSendRespInternal(SSvrMsg* smsg);
+static void uvStartSendRespImpl(SSvrMsg* smsg);
static void uvPrepareSendData(SSvrMsg* msg, uv_buf_t* wb);
static void uvStartSendResp(SSvrMsg* msg);
@@ -154,37 +156,6 @@ static void* transAcceptThread(void* arg);
static bool addHandleToWorkloop(SWorkThrd* pThrd, char* pipeName);
static bool addHandleToAcceptloop(void* arg);
-#define CONN_SHOULD_RELEASE(conn, head) \
- do { \
- if ((head)->release == 1 && (head->msgLen) == sizeof(*head)) { \
- reallocConnRef(conn); \
- tTrace("conn %p received release request", conn); \
- \
- STraceId traceId = head->traceId; \
- conn->status = ConnRelease; \
- transClearBuffer(&conn->readBuf); \
- transFreeMsg(transContFromHead((char*)head)); \
- \
- STransMsg tmsg = { \
- .code = 0, .info.handle = (void*)conn, .info.traceId = traceId, .info.ahandle = (void*)0x9527}; \
- SSvrMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSvrMsg)); \
- srvMsg->msg = tmsg; \
- srvMsg->type = Release; \
- srvMsg->pConn = conn; \
- if (!transQueuePush(&conn->srvMsgs, srvMsg)) { \
- return; \
- } \
- if (conn->regArg.init) { \
- tTrace("conn %p release, notify server app", conn); \
- STrans* pTransInst = conn->pTransInst; \
- (*pTransInst->cfp)(pTransInst->parent, &(conn->regArg.msg), NULL); \
- memset(&conn->regArg, 0, sizeof(conn->regArg)); \
- } \
- uvStartSendRespInternal(srvMsg); \
- return; \
- } \
- } while (0)
-
#define SRV_RELEASE_UV(loop) \
do { \
uv_walk(loop, uvWalkCb, NULL); \
@@ -230,7 +201,9 @@ static void uvHandleReq(SSvrConn* pConn) {
// transRefSrvHandle(pConn);
// uv_queue_work(((SWorkThrd*)pConn->hostThrd)->loop, wreq, uvWorkDoTask, uvWorkAfterTask);
- CONN_SHOULD_RELEASE(pConn, pHead);
+ if (uvRecvReleaseReq(pConn, pHead)) {
+ return;
+ }
STransMsg transMsg;
memset(&transMsg, 0, sizeof(transMsg));
@@ -356,10 +329,10 @@ void uvOnSendCb(uv_write_t* req, int status) {
msg = (SSvrMsg*)transQueueGet(&conn->srvMsgs, 0);
if (msg != NULL) {
- uvStartSendRespInternal(msg);
+ uvStartSendRespImpl(msg);
}
} else {
- uvStartSendRespInternal(msg);
+ uvStartSendRespImpl(msg);
}
}
}
@@ -423,7 +396,7 @@ static void uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) {
wb->len = len;
}
-static void uvStartSendRespInternal(SSvrMsg* smsg) {
+static void uvStartSendRespImpl(SSvrMsg* smsg) {
SSvrConn* pConn = smsg->pConn;
if (pConn->broken) {
return;
@@ -453,7 +426,7 @@ static void uvStartSendResp(SSvrMsg* smsg) {
if (!transQueuePush(&pConn->srvMsgs, smsg)) {
return;
}
- uvStartSendRespInternal(smsg);
+ uvStartSendRespImpl(smsg);
return;
}
@@ -544,6 +517,35 @@ static void uvShutDownCb(uv_shutdown_t* req, int status) {
uv_close((uv_handle_t*)req->handle, uvDestroyConn);
taosMemoryFree(req);
}
+static bool uvRecvReleaseReq(SSvrConn* pConn, STransMsgHead* pHead) {
+ if ((pHead)->release == 1 && (pHead->msgLen) == sizeof(*pHead)) {
+ reallocConnRef(pConn);
+ tTrace("conn %p received release request", pConn);
+
+ STraceId traceId = pHead->traceId;
+ pConn->status = ConnRelease;
+ transClearBuffer(&pConn->readBuf);
+ transFreeMsg(transContFromHead((char*)pHead));
+
+ STransMsg tmsg = {.code = 0, .info.handle = (void*)pConn, .info.traceId = traceId, .info.ahandle = (void*)0x9527};
+ SSvrMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSvrMsg));
+ srvMsg->msg = tmsg;
+ srvMsg->type = Release;
+ srvMsg->pConn = pConn;
+ if (!transQueuePush(&pConn->srvMsgs, srvMsg)) {
+ return true;
+ }
+ if (pConn->regArg.init) {
+ tTrace("conn %p release, notify server app", pConn);
+ STrans* pTransInst = pConn->pTransInst;
+ (*pTransInst->cfp)(pTransInst->parent, &(pConn->regArg.msg), NULL);
+ memset(&pConn->regArg, 0, sizeof(pConn->regArg));
+ }
+ uvStartSendRespImpl(srvMsg);
+ return true;
+ }
+ return false;
+}
static void uvPrepareCb(uv_prepare_t* handle) {
// prepare callback
SWorkThrd* pThrd = handle->data;
@@ -696,7 +698,7 @@ void uvOnConnectionCb(uv_stream_t* q, ssize_t nread, const uv_buf_t* buf) {
transUnrefSrvHandle(pConn);
return;
}
- transGetSockDebugInfo(&peername, pConn->dst);
+ transSockInfo2Str(&peername, pConn->dst);
addrlen = sizeof(sockname);
if (0 != uv_tcp_getsockname(pConn->pTcp, (struct sockaddr*)&sockname, &addrlen)) {
@@ -704,7 +706,7 @@ void uvOnConnectionCb(uv_stream_t* q, ssize_t nread, const uv_buf_t* buf) {
transUnrefSrvHandle(pConn);
return;
}
- transGetSockDebugInfo(&sockname, pConn->src);
+ transSockInfo2Str(&sockname, pConn->src);
struct sockaddr_in addr = *(struct sockaddr_in*)&sockname;
pConn->clientIp = addr.sin_addr.s_addr;
@@ -992,7 +994,7 @@ void uvHandleRelease(SSvrMsg* msg, SWorkThrd* thrd) {
if (!transQueuePush(&conn->srvMsgs, msg)) {
return;
}
- uvStartSendRespInternal(msg);
+ uvStartSendRespImpl(msg);
return;
} else if (conn->status == ConnRelease || conn->status == ConnNormal) {
tDebug("%s conn %p already released, ignore release-msg", transLabel(thrd->pTransInst), conn);
diff --git a/source/os/src/osRand.c b/source/os/src/osRand.c
index 461a72e962..bd2bfa486e 100644
--- a/source/os/src/osRand.c
+++ b/source/os/src/osRand.c
@@ -37,9 +37,13 @@ uint32_t taosRandR(uint32_t *pSeed) {
uint32_t taosSafeRand(void) {
#ifdef WINDOWS
- uint32_t seed;
+ uint32_t seed = taosRand();
HCRYPTPROV hCryptProv;
- if (!CryptAcquireContext(&hCryptProv, NULL, NULL, PROV_RSA_FULL, 0)) return seed;
+ if (!CryptAcquireContext(&hCryptProv, NULL, NULL, PROV_RSA_FULL, 0)) {
+ if (!CryptAcquireContext(&hCryptProv, NULL, NULL, PROV_RSA_FULL, CRYPT_NEWKEYSET)) {
+ return seed;
+ }
+ }
if (hCryptProv != NULL) {
if (!CryptGenRandom(hCryptProv, 4, &seed)) return seed;
}
diff --git a/source/util/src/terror.c b/source/util/src/terror.c
index 6e3067d44e..7b06967940 100644
--- a/source/util/src/terror.c
+++ b/source/util/src/terror.c
@@ -624,6 +624,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_REBUILDING, "Invalid index file"
//tmq
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_INVALID_MSG, "Invalid message")
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_MISMATCH, "Consumer mismatch")
+TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_CLOSED, "Consumer closed")
#ifdef TAOS_ERROR_C
};
diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py
index 203541f14a..600c64b8e6 100755
--- a/tests/pytest/crash_gen/crash_gen_main.py
+++ b/tests/pytest/crash_gen/crash_gen_main.py
@@ -1120,7 +1120,7 @@ class Database:
@classmethod
def setupLastTick(cls):
# start time will be auto generated , start at 10 years ago local time
- local_time = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-16]
+ local_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-16]
local_epoch_time = [int(i) for i in local_time.split("-")]
#local_epoch_time will be such as : [2022, 7, 18]
diff --git a/tests/pytest/crash_gen/shared/misc.py b/tests/pytest/crash_gen/shared/misc.py
index fd73f97fcb..6a8a59a027 100644
--- a/tests/pytest/crash_gen/shared/misc.py
+++ b/tests/pytest/crash_gen/shared/misc.py
@@ -46,7 +46,7 @@ class Logging:
@classmethod
def _get_datetime(cls):
- return datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-1]
+ return datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-1]
@classmethod
def getLogger(cls):
diff --git a/tests/pytest/util/taosadapter.py b/tests/pytest/util/taosadapter.py
index 614eb95d6b..0307675dfb 100644
--- a/tests/pytest/util/taosadapter.py
+++ b/tests/pytest/util/taosadapter.py
@@ -238,19 +238,23 @@ class TAdapter:
if self.running != 0:
psCmd = f"ps -ef|grep -w {toBeKilled}| grep -v grep | awk '{{print $2}}'"
+ # psCmd = f"pgrep {toBeKilled}"
processID = subprocess.check_output(
- psCmd, shell=True).decode("utf-8")
+ psCmd, shell=True)
while(processID):
- killCmd = f"kill {signal} {processID} > /dev/null 2>&1"
+ killCmd = f"pkill {signal} {processID} > /dev/null 2>&1"
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
if not platform.system().lower() == 'windows':
- for port in range(6030, 6041):
- fuserCmd = f"fuser -k -n tcp {port} > /dev/null"
- os.system(fuserCmd)
+ port = 6041
+ fuserCmd = f"fuser -k -n tcp {port} > /dev/null"
+ os.system(fuserCmd)
+ # for port in range(6030, 6041):
+ # fuserCmd = f"fuser -k -n tcp {port} > /dev/null"
+ # os.system(fuserCmd)
self.running = 0
tdLog.debug(f"taosadapter is stopped by kill {signal}")
diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt
index fda5e5cb6e..97295d75e0 100644
--- a/tests/script/jenkins/basic.txt
+++ b/tests/script/jenkins/basic.txt
@@ -237,8 +237,8 @@
./test.sh -f tsim/stream/distributeInterval0.sim
./test.sh -f tsim/stream/distributeIntervalRetrive0.sim
./test.sh -f tsim/stream/distributeSession0.sim
-#./test.sh -f tsim/stream/session0.sim
-#./test.sh -f tsim/stream/session1.sim
+./test.sh -f tsim/stream/session0.sim
+./test.sh -f tsim/stream/session1.sim
./test.sh -f tsim/stream/state0.sim
./test.sh -f tsim/stream/triggerInterval0.sim
./test.sh -f tsim/stream/triggerSession0.sim
diff --git a/tests/script/tsim/stream/session0.sim b/tests/script/tsim/stream/session0.sim
index fee8c98cce..3e0af354d8 100644
--- a/tests/script/tsim/stream/session0.sim
+++ b/tests/script/tsim/stream/session0.sim
@@ -83,22 +83,22 @@ if $data11 != 3 then
goto loop0
endi
-if $data12 != NULL then
+if $data12 != 10 then
print ======data12=$data12
goto loop0
endi
-if $data13 != NULL then
+if $data13 != 10 then
print ======data13=$data13
goto loop0
endi
-if $data14 != NULL then
+if $data14 != 1.100000000 then
print ======data14=$data14
return -1
endi
-if $data15 != NULL then
+if $data15 != 0.000000000 then
print ======data15=$data15
return -1
endi
@@ -141,38 +141,38 @@ if $data01 != 7 then
goto loop1
endi
-if $data02 != NULL then
+if $data02 != 18 then
print =====data02=$data02
goto loop1
endi
-if $data03 != NULL then
+if $data03 != 4 then
print =====data03=$data03
goto loop1
endi
-if $data04 != NULL then
- print ======$data04
+if $data04 != 1.000000000 then
+ print ======data04=$data04
return -1
endi
-if $data05 != NULL then
- print ======$data05
+if $data05 != 1.154700538 then
+ print ======data05=$data05
return -1
endi
if $data06 != 4 then
- print ======$data06
+ print ======data06=$data06
return -1
endi
if $data07 != 1.000000000 then
- print ======$data07
+ print ======data07=$data07
return -1
endi
if $data08 != 13 then
- print ======$data08
+ print ======data08=$data08
return -1
endi
diff --git a/tests/system-test/2-query/json_tag.py b/tests/system-test/2-query/json_tag.py
index d9d7ef2300..698695a228 100644
--- a/tests/system-test/2-query/json_tag.py
+++ b/tests/system-test/2-query/json_tag.py
@@ -1,25 +1,8 @@
-###################################################################
-# Copyright (c) 2016 by TAOS Technologies, Inc.
-# All rights reserved.
-#
-# This file is proprietary and confidential to TAOS Technologies.
-# No part of this file may be reproduced, db_test.stored, transmitted,
-# disclosed or used in any form or by any means other than as
-# expressly provided by the written permission from Jianhui Tao
-#
-###################################################################
-
# -*- coding: utf-8 -*-
-import imp
-import sys
-import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
-import json
-import os
-
class TDTestCase:
def caseDescription(self):
@@ -31,35 +14,33 @@ class TDTestCase:
return
def init(self, conn, logSql):
- self.testcasePath = os.path.split(__file__)[0]
- self.testcaseFilename = os.path.split(__file__)[-1]
- # os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), True)
+ tdSql.init(conn.cursor(), False)
def run(self):
# tdSql.prepare()
- tdSql.execute('drop database if exists db')
- tdSql.execute('create database db vgroups 1')
- tdSql.execute('use db')
+ dbname = "db"
+ tdSql.execute(f'drop database if exists {dbname}')
+ tdSql.execute(f'create database {dbname} vgroups 1')
+ tdSql.execute(f'use {dbname}')
print("============== STEP 1 ===== prepare data & validate json string")
- tdSql.error("create table if not exists jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json, tagint int)")
- tdSql.error("create table if not exists jsons1(ts timestamp, data json) tags(tagint int)")
- tdSql.execute("create table if not exists jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)")
- tdSql.execute("insert into jsons1_1 using jsons1 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 1, false, 'json1', '你是') (1591060608000, 23, true, '等等', 'json')")
- tdSql.execute("insert into jsons1_2 using jsons1 tags('{\"tag1\":5,\"tag2\":\"beijing\"}') values (1591060628000, 2, true, 'json2', 'sss')")
- tdSql.execute("insert into jsons1_3 using jsons1 tags('{\"tag1\":false,\"tag2\":\"beijing\"}') values (1591060668000, 3, false, 'json3', 'efwe')")
- tdSql.execute("insert into jsons1_4 using jsons1 tags('{\"tag1\":null,\"tag2\":\"shanghai\",\"tag3\":\"hello\"}') values (1591060728000, 4, true, 'json4', '323sd')")
- tdSql.execute("insert into jsons1_5 using jsons1 tags('{\"tag1\":1.232, \"tag2\":null}') values(1591060928000, 1, false, '你就会', 'ewe')")
- tdSql.execute("insert into jsons1_6 using jsons1 tags('{\"tag1\":11,\"tag2\":\"\",\"tag2\":null}') values(1591061628000, 11, false, '你就会','')")
- tdSql.execute("insert into jsons1_7 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":null}') values(1591062628000, 2, NULL, '你就会', 'dws')")
+ tdSql.error(f"create table if not exists {dbname}.jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json, tagint int)")
+ tdSql.error(f"create table if not exists {dbname}.jsons1(ts timestamp, data json) tags(tagint int)")
+ tdSql.execute(f"create table if not exists {dbname}.jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)")
+ tdSql.execute(f"insert into {dbname}.jsons1_1 using {dbname}.jsons1 tags('{{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}}') values(1591060618000, 1, false, 'json1', '你是') (1591060608000, 23, true, '等等', 'json')")
+ tdSql.execute(f"insert into {dbname}.jsons1_2 using {dbname}.jsons1 tags('{{\"tag1\":5,\"tag2\":\"beijing\"}}') values (1591060628000, 2, true, 'json2', 'sss')")
+ tdSql.execute(f"insert into {dbname}.jsons1_3 using {dbname}.jsons1 tags('{{\"tag1\":false,\"tag2\":\"beijing\"}}') values (1591060668000, 3, false, 'json3', 'efwe')")
+ tdSql.execute(f"insert into {dbname}.jsons1_4 using {dbname}.jsons1 tags('{{\"tag1\":null,\"tag2\":\"shanghai\",\"tag3\":\"hello\"}}') values (1591060728000, 4, true, 'json4', '323sd')")
+ tdSql.execute(f"insert into {dbname}.jsons1_5 using {dbname}.jsons1 tags('{{\"tag1\":1.232, \"tag2\":null}}') values(1591060928000, 1, false, '你就会', 'ewe')")
+ tdSql.execute(f"insert into {dbname}.jsons1_6 using {dbname}.jsons1 tags('{{\"tag1\":11,\"tag2\":\"\",\"tag2\":null}}') values(1591061628000, 11, false, '你就会','')")
+ tdSql.execute(f"insert into {dbname}.jsons1_7 using {dbname}.jsons1 tags('{{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":null}}') values(1591062628000, 2, NULL, '你就会', 'dws')")
# test duplicate key using the first one. elimate empty key
- tdSql.execute("CREATE TABLE if not exists jsons1_8 using jsons1 tags('{\"tag1\":null, \"tag1\":true, \"tag1\":45, \"1tag$\":2, \" \":90, \"\":32}')")
- tdSql.query("select jtag from jsons1_8")
- tdSql.checkRows(0);
+ tdSql.execute(f"create TABLE if not exists {dbname}.jsons1_8 using {dbname}.jsons1 tags('{{\"tag1\":null, \"tag1\":true, \"tag1\":45, \"1tag$\":2, \" \":90, \"\":32}}')")
+ tdSql.query(f"select jtag from {dbname}.jsons1_8")
+ tdSql.checkRows(0)
- tdSql.query("select ts,jtag from jsons1 order by ts limit 2,3")
+ tdSql.query(f"select ts,jtag from {dbname}.jsons1 order by ts limit 2,3")
tdSql.checkData(0, 0, '2020-06-02 09:17:08.000')
tdSql.checkData(0, 1, '{"tag1":5,"tag2":"beijing"}')
tdSql.checkData(1, 0, '2020-06-02 09:17:48.000')
@@ -67,7 +48,7 @@ class TDTestCase:
tdSql.checkData(2, 0, '2020-06-02 09:18:48.000')
tdSql.checkData(2, 1, '{"tag1":null,"tag2":"shanghai","tag3":"hello"}')
- tdSql.query("select ts,jtag->'tag1' from jsons1 order by ts limit 2,3")
+ tdSql.query(f"select ts,jtag->'tag1' from {dbname}.jsons1 order by ts limit 2,3")
tdSql.checkData(0, 0, '2020-06-02 09:17:08.000')
tdSql.checkData(0, 1, '5.000000000')
tdSql.checkData(1, 0, '2020-06-02 09:17:48.000')
@@ -76,163 +57,163 @@ class TDTestCase:
tdSql.checkData(2, 1, 'null')
# test empty json string, save as jtag is NULL
- tdSql.execute("insert into jsons1_9 using jsons1 tags('\t') values (1591062328000, 24, NULL, '你就会', '2sdw')")
- tdSql.execute("CREATE TABLE if not exists jsons1_10 using jsons1 tags('')")
- tdSql.execute("CREATE TABLE if not exists jsons1_11 using jsons1 tags(' ')")
- tdSql.execute("CREATE TABLE if not exists jsons1_12 using jsons1 tags('{}')")
- tdSql.execute("CREATE TABLE if not exists jsons1_13 using jsons1 tags('null')")
+ tdSql.execute(f"insert into {dbname}.jsons1_9 using {dbname}.jsons1 tags('\t') values (1591062328000, 24, NULL, '你就会', '2sdw')")
+ tdSql.execute(f"create TABLE if not exists {dbname}.jsons1_10 using {dbname}.jsons1 tags('')")
+ tdSql.execute(f"create TABLE if not exists {dbname}.jsons1_11 using {dbname}.jsons1 tags(' ')")
+ tdSql.execute(f"create TABLE if not exists {dbname}.jsons1_12 using {dbname}.jsons1 tags('{{}}')")
+ tdSql.execute(f"create TABLE if not exists {dbname}.jsons1_13 using {dbname}.jsons1 tags('null')")
# test invalidate json
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('\"efwewf\"')")
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('3333')")
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags(76)")
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags(hell)")
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('33.33')")
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('false')")
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('[1,true]')")
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{222}')")
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"fe\"}')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('\"efwewf\"')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('3333')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags(76)")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags(hell)")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('33.33')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('false')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('[1,true]')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{222}}')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"fe\"}}')")
# test invalidate json key, key must can be printed assic char
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"tag1\":[1,true]}')")
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"tag1\":{}}')")
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"。loc\":\"fff\"}')")
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"\t\":\"fff\"}')")
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"试试\":\"fff\"}')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"tag1\":[1,true]}}')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"tag1\":{{}}}}')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"。loc\":\"fff\"}}')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"\t\":\"fff\"}}')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"试试\":\"fff\"}}')")
# test invalidate json value, value number can not be inf,nan TD-12166
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"k\":1.8e308}')")
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"k\":-1.8e308}')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"k\":1.8e308}}')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"k\":-1.8e308}}')")
#test length limit
char1= ''.join(['abcd']*64)
char3= ''.join(['abcd']*1021)
print(len(char3)) # 4084
- tdSql.error("CREATE TABLE if not exists jsons1_15 using jsons1 tags('{\"%s1\":5}')" % char1) # len(key)=257
- tdSql.execute("CREATE TABLE if not exists jsons1_15 using jsons1 tags('{\"%s\":5}')" % char1) # len(key)=256
- tdSql.error("CREATE TABLE if not exists jsons1_16 using jsons1 tags('{\"TSSSS\":\"%s\"}')" % char3) # len(object)=4096
- tdSql.execute("CREATE TABLE if not exists jsons1_16 using jsons1 tags('{\"TSSS\":\"%s\"}')" % char3) # len(object)=4095
- tdSql.execute("drop table if exists jsons1_15")
- tdSql.execute("drop table if exists jsons1_16")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_15 using {dbname}.jsons1 tags('{{\"%s1\":5}}')" % char1) # len(key)=257
+ tdSql.execute(f"create TABLE if not exists {dbname}.jsons1_15 using {dbname}.jsons1 tags('{{\"%s\":5}}')" % char1) # len(key)=256
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_16 using {dbname}.jsons1 tags('{{\"TSSSS\":\"%s\"}}')" % char3) # len(object)=4096
+ tdSql.execute(f"create TABLE if not exists {dbname}.jsons1_16 using {dbname}.jsons1 tags('{{\"TSSS\":\"%s\"}}')" % char3) # len(object)=4095
+ tdSql.execute(f"drop table if exists {dbname}.jsons1_15")
+ tdSql.execute(f"drop table if exists {dbname}.jsons1_16")
print("============== STEP 2 ===== alter table json tag")
- tdSql.error("ALTER STABLE jsons1 add tag tag2 nchar(20)")
- tdSql.error("ALTER STABLE jsons1 drop tag jtag")
- tdSql.error("ALTER TABLE jsons1 MODIFY TAG jtag nchar(128)")
+ tdSql.error(f"ALTER stable {dbname}.jsons1 add tag tag2 nchar(20)")
+ tdSql.error(f"ALTER stable {dbname}.jsons1 drop tag jtag")
+ tdSql.error(f"ALTER table {dbname}.jsons1 MODIFY TAG jtag nchar(128)")
- tdSql.execute("ALTER TABLE jsons1_1 SET TAG jtag='{\"tag1\":\"femail\",\"tag2\":35,\"tag3\":true}'")
- tdSql.query("select jtag from jsons1_1")
+ tdSql.execute(f"ALTER table {dbname}.jsons1_1 SET TAG jtag='{{\"tag1\":\"femail\",\"tag2\":35,\"tag3\":true}}'")
+ tdSql.query(f"select jtag from {dbname}.jsons1_1")
tdSql.checkData(0, 0, '{"tag1":"femail","tag2":35,"tag3":true}')
- tdSql.execute("ALTER TABLE jsons1 rename TAG jtag jtag_new")
- tdSql.execute("ALTER TABLE jsons1 rename TAG jtag_new jtag")
+ tdSql.execute(f"ALTER table {dbname}.jsons1 rename TAG jtag jtag_new")
+ tdSql.execute(f"ALTER table {dbname}.jsons1 rename TAG jtag_new jtag")
- tdSql.execute("create table st(ts timestamp, i int) tags(t int)")
- tdSql.error("ALTER STABLE st add tag jtag json")
- tdSql.error("ALTER STABLE st add column jtag json")
+ tdSql.execute(f"create table {dbname}.st(ts timestamp, i int) tags(t int)")
+ tdSql.error(f"ALTER stable {dbname}.st add tag jtag json")
+ tdSql.error(f"ALTER stable {dbname}.st add column jtag json")
print("============== STEP 3 ===== query table")
# test error syntax
- tdSql.error("select * from jsons1 where jtag->tag1='beijing'")
- tdSql.error("select -> from jsons1")
- tdSql.error("select * from jsons1 where contains")
- tdSql.error("select * from jsons1 where jtag->")
- tdSql.error("select jtag->location from jsons1")
- tdSql.error("select jtag contains location from jsons1")
- tdSql.error("select * from jsons1 where jtag contains location")
- tdSql.query("select * from jsons1 where jtag contains''")
- tdSql.error("select * from jsons1 where jtag contains 'location'='beijing'")
+ tdSql.error(f"select * from {dbname}.jsons1 where jtag->tag1='beijing'")
+ tdSql.error(f"select -> from {dbname}.jsons1")
+ tdSql.error(f"select * from {dbname}.jsons1 where contains")
+ tdSql.error(f"select * from {dbname}.jsons1 where jtag->")
+ tdSql.error(f"select jtag->location from {dbname}.jsons1")
+ tdSql.error(f"select jtag contains location from {dbname}.jsons1")
+ tdSql.error(f"select * from {dbname}.jsons1 where jtag contains location")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag contains''")
+ tdSql.error(f"select * from {dbname}.jsons1 where jtag contains 'location'='beijing'")
# test function error
- tdSql.error("select avg(jtag->'tag1') from jsons1")
- tdSql.error("select avg(jtag) from jsons1")
- tdSql.error("select min(jtag->'tag1') from jsons1")
- tdSql.error("select min(jtag) from jsons1")
- tdSql.error("select ceil(jtag->'tag1') from jsons1")
- tdSql.error("select ceil(jtag) from jsons1")
+ tdSql.error(f"select avg(jtag->'tag1') from {dbname}.jsons1")
+ tdSql.error(f"select avg(jtag) from {dbname}.jsons1")
+ tdSql.error(f"select min(jtag->'tag1') from {dbname}.jsons1")
+ tdSql.error(f"select min(jtag) from {dbname}.jsons1")
+ tdSql.error(f"select ceil(jtag->'tag1') from {dbname}.jsons1")
+ tdSql.error(f"select ceil(jtag) from {dbname}.jsons1")
#test scalar operation
- tdSql.query("select jtag contains 'tag1',jtag->'tag1' from jsons1 order by jtag->'tag1'")
+ tdSql.query(f"select jtag contains 'tag1',jtag->'tag1' from {dbname}.jsons1 order by jtag->'tag1'")
tdSql.checkRows(9)
- tdSql.query("select jtag->'tag1' like 'fe%',jtag->'tag1' from jsons1 order by jtag->'tag1'")
+ tdSql.query(f"select jtag->'tag1' like 'fe%',jtag->'tag1' from {dbname}.jsons1 order by jtag->'tag1'")
tdSql.checkRows(9)
- tdSql.query("select jtag->'tag1' not like 'fe%',jtag->'tag1' from jsons1 order by jtag->'tag1'")
+ tdSql.query(f"select jtag->'tag1' not like 'fe%',jtag->'tag1' from {dbname}.jsons1 order by jtag->'tag1'")
tdSql.checkRows(9)
- tdSql.query("select jtag->'tag1' match 'fe',jtag->'tag1' from jsons1 order by jtag->'tag1'")
+ tdSql.query(f"select jtag->'tag1' match 'fe',jtag->'tag1' from {dbname}.jsons1 order by jtag->'tag1'")
tdSql.checkRows(9)
- tdSql.query("select jtag->'tag1' nmatch 'fe',jtag->'tag1' from jsons1 order by jtag->'tag1'")
+ tdSql.query(f"select jtag->'tag1' nmatch 'fe',jtag->'tag1' from {dbname}.jsons1 order by jtag->'tag1'")
tdSql.checkRows(9)
- tdSql.query("select jtag->'tag1',jtag->'tag1'>='a' from jsons1 order by jtag->'tag1'")
+ tdSql.query(f"select jtag->'tag1',jtag->'tag1'>='a' from {dbname}.jsons1 order by jtag->'tag1'")
tdSql.checkRows(9)
# test select normal column
- tdSql.query("select dataint from jsons1 order by dataint")
+ tdSql.query(f"select dataint from {dbname}.jsons1 order by dataint")
tdSql.checkRows(9)
tdSql.checkData(1, 0, 1)
# test select json tag
- tdSql.query("select * from jsons1")
+ tdSql.query(f"select * from {dbname}.jsons1")
tdSql.checkRows(9)
- tdSql.query("select jtag from jsons1")
+ tdSql.query(f"select jtag from {dbname}.jsons1")
tdSql.checkRows(9)
- tdSql.query("select * from jsons1 where jtag is null")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag is null")
tdSql.checkRows(1)
- tdSql.query("select * from jsons1 where jtag is not null")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag is not null")
tdSql.checkRows(8)
# test jtag is NULL
- tdSql.query("select jtag from jsons1_9")
+ tdSql.query(f"select jtag from {dbname}.jsons1_9")
tdSql.checkData(0, 0, None)
# test select json tag->'key', value is string
- tdSql.query("select jtag->'tag1' from jsons1_1")
+ tdSql.query(f"select jtag->'tag1' from {dbname}.jsons1_1")
tdSql.checkData(0, 0, '"femail"')
- tdSql.query("select jtag->'tag2' from jsons1_6")
+ tdSql.query(f"select jtag->'tag2' from {dbname}.jsons1_6")
tdSql.checkData(0, 0, '""')
# test select json tag->'key', value is int
- tdSql.query("select jtag->'tag2' from jsons1_1")
+ tdSql.query(f"select jtag->'tag2' from {dbname}.jsons1_1")
tdSql.checkData(0, 0, "35.000000000")
# test select json tag->'key', value is bool
- tdSql.query("select jtag->'tag3' from jsons1_1")
+ tdSql.query(f"select jtag->'tag3' from {dbname}.jsons1_1")
tdSql.checkData(0, 0, "true")
# test select json tag->'key', value is null
- tdSql.query("select jtag->'tag1' from jsons1_4")
+ tdSql.query(f"select jtag->'tag1' from {dbname}.jsons1_4")
tdSql.checkData(0, 0, "null")
# test select json tag->'key', value is double
- tdSql.query("select jtag->'tag1' from jsons1_5")
+ tdSql.query(f"select jtag->'tag1' from {dbname}.jsons1_5")
tdSql.checkData(0, 0, "1.232000000")
# test select json tag->'key', key is not exist
- tdSql.query("select jtag->'tag10' from jsons1_4")
+ tdSql.query(f"select jtag->'tag10' from {dbname}.jsons1_4")
tdSql.checkData(0, 0, None)
- tdSql.query("select jtag->'tag1' from jsons1")
+ tdSql.query(f"select jtag->'tag1' from {dbname}.jsons1")
tdSql.checkRows(9)
# test header name
- res = tdSql.getColNameList("select jtag->'tag1' from jsons1")
+ res = tdSql.getColNameList(f"select jtag->'tag1' from {dbname}.jsons1")
cname_list = []
cname_list.append("jtag->'tag1'")
tdSql.checkColNameList(res, cname_list)
# test where with json tag
- tdSql.query("select * from jsons1_1 where jtag is not null")
- tdSql.query("select * from jsons1 where jtag='{\"tag1\":11,\"tag2\":\"\"}'")
- tdSql.error("select * from jsons1 where jtag->'tag1'={}")
+ tdSql.query(f"select * from {dbname}.jsons1_1 where jtag is not null")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag='{{\"tag1\":11,\"tag2\":\"\"}}'")
+ tdSql.error(f"select * from {dbname}.jsons1 where jtag->'tag1'={{}}")
# test json error
- tdSql.error("select jtag + 1 from jsons1")
- tdSql.error("select jtag > 1 from jsons1")
- tdSql.error("select jtag like \"1\" from jsons1")
- tdSql.error("select jtag in (\"1\") from jsons1")
- #tdSql.error("select jtag from jsons1 where jtag > 1")
- #tdSql.error("select jtag from jsons1 where jtag like 'fsss'")
- #tdSql.error("select jtag from jsons1 where jtag in (1)")
+ tdSql.error(f"select jtag + 1 from {dbname}.jsons1")
+ tdSql.error(f"select jtag > 1 from {dbname}.jsons1")
+ tdSql.error(f"select jtag like \"1\" from {dbname}.jsons1")
+ tdSql.error(f"select jtag in (\"1\") from {dbname}.jsons1")
+ #tdSql.error(f"select jtag from {dbname}.jsons1 where jtag > 1")
+ #tdSql.error(f"select jtag from {dbname}.jsons1 where jtag like 'fsss'")
+ #tdSql.error(f"select jtag from {dbname}.jsons1 where jtag in (1)")
# where json value is string
- tdSql.query("select * from jsons1 where jtag->'tag2'='beijing'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2'='beijing'")
tdSql.checkRows(2)
- tdSql.query("select dataint,tbname,jtag->'tag1',jtag from jsons1 where jtag->'tag2'='beijing' order by dataint")
+ tdSql.query(f"select dataint,tbname,jtag->'tag1',jtag from {dbname}.jsons1 where jtag->'tag2'='beijing' order by dataint")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 2)
tdSql.checkData(0, 1, 'jsons1_2')
@@ -243,180 +224,180 @@ class TDTestCase:
tdSql.checkData(1, 2, 'false')
- tdSql.query("select * from jsons1 where jtag->'tag1'='beijing'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'='beijing'")
tdSql.checkRows(0)
- tdSql.query("select * from jsons1 where jtag->'tag1'='收到货'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'='收到货'")
tdSql.checkRows(1)
- tdSql.query("select * from jsons1 where jtag->'tag2'>'beijing'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2'>'beijing'")
tdSql.checkRows(1)
- tdSql.query("select * from jsons1 where jtag->'tag2'>='beijing'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2'>='beijing'")
tdSql.checkRows(3)
- tdSql.query("select * from jsons1 where jtag->'tag2'<'beijing'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2'<'beijing'")
tdSql.checkRows(2)
- tdSql.query("select * from jsons1 where jtag->'tag2'<='beijing'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2'<='beijing'")
tdSql.checkRows(4)
- tdSql.query("select * from jsons1 where jtag->'tag2'!='beijing'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2'!='beijing'")
tdSql.checkRows(3)
- tdSql.query("select * from jsons1 where jtag->'tag2'=''")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2'=''")
tdSql.checkRows(2)
# where json value is int
- tdSql.query("select * from jsons1 where jtag->'tag1'=5")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=5")
tdSql.checkRows(1)
tdSql.checkData(0, 1, 2)
- tdSql.query("select * from jsons1 where jtag->'tag1'=10")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=10")
tdSql.checkRows(0)
- tdSql.query("select * from jsons1 where jtag->'tag1'<54")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'<54")
tdSql.checkRows(3)
- tdSql.query("select * from jsons1 where jtag->'tag1'<=11")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'<=11")
tdSql.checkRows(3)
- tdSql.query("select * from jsons1 where jtag->'tag1'>4")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'>4")
tdSql.checkRows(2)
- tdSql.query("select * from jsons1 where jtag->'tag1'>=5")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'>=5")
tdSql.checkRows(2)
- tdSql.query("select * from jsons1 where jtag->'tag1'!=5")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'!=5")
tdSql.checkRows(2)
- tdSql.query("select * from jsons1 where jtag->'tag1'!=55")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'!=55")
tdSql.checkRows(3)
# where json value is double
- tdSql.query("select * from jsons1 where jtag->'tag1'=1.232")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=1.232")
tdSql.checkRows(1)
- tdSql.query("select * from jsons1 where jtag->'tag1'<1.232")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'<1.232")
tdSql.checkRows(0)
- tdSql.query("select * from jsons1 where jtag->'tag1'<=1.232")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'<=1.232")
tdSql.checkRows(1)
- tdSql.query("select * from jsons1 where jtag->'tag1'>1.23")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'>1.23")
tdSql.checkRows(3)
- tdSql.query("select * from jsons1 where jtag->'tag1'>=1.232")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'>=1.232")
tdSql.checkRows(3)
- tdSql.query("select * from jsons1 where jtag->'tag1'!=1.232")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'!=1.232")
tdSql.checkRows(2)
- tdSql.query("select * from jsons1 where jtag->'tag1'!=3.232")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'!=3.232")
tdSql.checkRows(3)
- tdSql.query("select * from jsons1 where jtag->'tag1'/0=3")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'/0=3")
tdSql.checkRows(0)
- tdSql.query("select * from jsons1 where jtag->'tag1'/5=1")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'/5=1")
tdSql.checkRows(1)
# where json value is bool
- tdSql.query("select * from jsons1 where jtag->'tag1'=true")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=true")
tdSql.checkRows(0)
- tdSql.query("select * from jsons1 where jtag->'tag1'=false")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=false")
tdSql.checkRows(1)
- tdSql.query("select * from jsons1 where jtag->'tag1'!=false")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'!=false")
tdSql.checkRows(0)
- tdSql.query("select * from jsons1 where jtag->'tag1'>false")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'>false")
tdSql.checkRows(0)
# where json value is null
- tdSql.query("select * from jsons1 where jtag->'tag1'=null")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=null")
tdSql.checkRows(0)
# where json key is null
- tdSql.query("select * from jsons1 where jtag->'tag_no_exist'=3")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag_no_exist'=3")
tdSql.checkRows(0)
# where json value is not exist
- tdSql.query("select * from jsons1 where jtag->'tag1' is null")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' is null")
tdSql.checkData(0, 0, 'jsons1_9')
tdSql.checkRows(2)
- tdSql.query("select * from jsons1 where jtag->'tag4' is null")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag4' is null")
tdSql.checkRows(9)
- tdSql.query("select * from jsons1 where jtag->'tag3' is not null")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag3' is not null")
tdSql.checkRows(3)
# test contains
- tdSql.query("select * from jsons1 where jtag contains 'tag1'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag contains 'tag1'")
tdSql.checkRows(8)
- tdSql.query("select * from jsons1 where jtag contains 'tag3'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag contains 'tag3'")
tdSql.checkRows(4)
- tdSql.query("select * from jsons1 where jtag contains 'tag_no_exist'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag contains 'tag_no_exist'")
tdSql.checkRows(0)
# test json tag in where condition with and/or
- tdSql.query("select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='beijing'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=false and jtag->'tag2'='beijing'")
tdSql.checkRows(1)
- tdSql.query("select * from jsons1 where jtag->'tag1'=false or jtag->'tag2'='beijing'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=false or jtag->'tag2'='beijing'")
tdSql.checkRows(2)
- tdSql.query("select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='shanghai'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=false and jtag->'tag2'='shanghai'")
tdSql.checkRows(0)
- tdSql.query("select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='shanghai'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=false and jtag->'tag2'='shanghai'")
tdSql.checkRows(0)
- tdSql.query("select * from jsons1 where jtag->'tag1'=13 or jtag->'tag2'>35")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=13 or jtag->'tag2'>35")
tdSql.checkRows(0)
- tdSql.query("select * from jsons1 where jtag->'tag1'=13 or jtag->'tag2'>35")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=13 or jtag->'tag2'>35")
tdSql.checkRows(0)
- tdSql.query("select * from jsons1 where jtag->'tag1' is not null and jtag contains 'tag3'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' is not null and jtag contains 'tag3'")
tdSql.checkRows(3)
- tdSql.query("select * from jsons1 where jtag->'tag1'='femail' and jtag contains 'tag3'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'='femail' and jtag contains 'tag3'")
tdSql.checkRows(2)
# test with between and
- tdSql.query("select * from jsons1 where jtag->'tag1' between 1 and 30")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' between 1 and 30")
tdSql.checkRows(3)
- tdSql.query("select * from jsons1 where jtag->'tag1' between 'femail' and 'beijing'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' between 'femail' and 'beijing'")
tdSql.checkRows(2)
# test with tbname/normal column
- tdSql.query("select * from jsons1 where tbname = 'jsons1_1'")
+ tdSql.query(f"select * from {dbname}.jsons1 where tbname = 'jsons1_1'")
tdSql.checkRows(2)
- tdSql.query("select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3'")
+ tdSql.query(f"select * from {dbname}.jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3'")
tdSql.checkRows(2)
- tdSql.query("select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3' and dataint=3")
+ tdSql.query(f"select * from {dbname}.jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3' and dataint=3")
tdSql.checkRows(0)
- tdSql.query("select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3' and dataint=23")
+ tdSql.query(f"select * from {dbname}.jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3' and dataint=23")
tdSql.checkRows(1)
# test where condition like
- tdSql.query("select * from jsons1 where jtag->'tag2' like 'bei%'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2' like 'bei%'")
tdSql.checkRows(2)
- tdSql.query("select * from jsons1 where jtag->'tag1' like 'fe%' and jtag->'tag2' is not null")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' like 'fe%' and jtag->'tag2' is not null")
tdSql.checkRows(2)
# test where condition in no support in
- tdSql.error("select * from jsons1 where jtag->'tag1' in ('beijing')")
+ tdSql.error(f"select * from {dbname}.jsons1 where jtag->'tag1' in ('beijing')")
# test where condition match/nmath
- tdSql.query("select * from jsons1 where jtag->'tag1' match 'ma'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' match 'ma'")
tdSql.checkRows(2)
- tdSql.query("select * from jsons1 where jtag->'tag1' match 'ma$'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' match 'ma$'")
tdSql.checkRows(0)
- tdSql.query("select * from jsons1 where jtag->'tag2' match 'jing$'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2' match 'jing$'")
tdSql.checkRows(2)
- tdSql.query("select * from jsons1 where jtag->'tag1' match '收到'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' match '收到'")
tdSql.checkRows(1)
- tdSql.query("select * from jsons1 where jtag->'tag1' nmatch 'ma'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' nmatch 'ma'")
tdSql.checkRows(1)
# test distinct
- tdSql.execute("insert into jsons1_14 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":null}') values(1591062628000, 2, NULL, '你就会', 'dws')")
- tdSql.query("select distinct jtag->'tag1' from jsons1")
+ tdSql.execute(f"insert into {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":null}}') values(1591062628000, 2, NULL, '你就会', 'dws')")
+ tdSql.query(f"select distinct jtag->'tag1' from {dbname}.jsons1")
tdSql.checkRows(8)
- tdSql.error("select distinct jtag from jsons1")
+ tdSql.error(f"select distinct jtag from {dbname}.jsons1")
#test dumplicate key with normal colomn
- tdSql.execute("INSERT INTO jsons1_15 using jsons1 tags('{\"tbname\":\"tt\",\"databool\":true,\"datastr\":\"是是是\"}') values(1591060828000, 4, false, 'jjsf', \"你就会\")")
- tdSql.query("select * from jsons1 where jtag->'datastr' match '是' and datastr match 'js'")
+ tdSql.execute(f"insert into {dbname}.jsons1_15 using {dbname}.jsons1 tags('{{\"tbname\":\"tt\",\"databool\":true,\"datastr\":\"是是是\"}}') values(1591060828000, 4, false, 'jjsf', \"你就会\")")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'datastr' match '是' and datastr match 'js'")
tdSql.checkRows(1)
- tdSql.query("select tbname,jtag->'tbname' from jsons1 where jtag->'tbname'='tt' and tbname='jsons1_15'")
+ tdSql.query(f"select tbname,jtag->'tbname' from {dbname}.jsons1 where jtag->'tbname'='tt' and tbname='jsons1_15'")
tdSql.checkRows(1)
# test join
- tdSql.execute("create table if not exists jsons2(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)")
- tdSql.execute("insert into jsons2_1 using jsons2 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 2, false, 'json2', '你是2')")
- tdSql.execute("insert into jsons2_2 using jsons2 tags('{\"tag1\":5,\"tag2\":null}') values (1591060628000, 2, true, 'json2', 'sss')")
+ tdSql.execute(f"create table if not exists {dbname}.jsons2(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)")
+ tdSql.execute(f"insert into {dbname}.jsons2_1 using {dbname}.jsons2 tags('{{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}}') values(1591060618000, 2, false, 'json2', '你是2')")
+ tdSql.execute(f"insert into {dbname}.jsons2_2 using {dbname}.jsons2 tags('{{\"tag1\":5,\"tag2\":null}}') values (1591060628000, 2, true, 'json2', 'sss')")
- tdSql.execute("create table if not exists jsons3(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)")
- tdSql.execute("insert into jsons3_1 using jsons3 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 3, false, 'json3', '你是3')")
- tdSql.execute("insert into jsons3_2 using jsons3 tags('{\"tag1\":5,\"tag2\":\"beijing\"}') values (1591060638000, 2, true, 'json3', 'sss')")
- tdSql.query("select 'sss',33,a.jtag->'tag3' from jsons2 a,jsons3 b where a.ts=b.ts and a.jtag->'tag1'=b.jtag->'tag1'")
+ tdSql.execute(f"create table if not exists {dbname}.jsons3(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)")
+ tdSql.execute(f"insert into {dbname}.jsons3_1 using {dbname}.jsons3 tags('{{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}}') values(1591060618000, 3, false, 'json3', '你是3')")
+ tdSql.execute(f"insert into {dbname}.jsons3_2 using {dbname}.jsons3 tags('{{\"tag1\":5,\"tag2\":\"beijing\"}}') values (1591060638000, 2, true, 'json3', 'sss')")
+ tdSql.query(f"select 'sss',33,a.jtag->'tag3' from {dbname}.jsons2 a,jsons3 b where a.ts=b.ts and a.jtag->'tag1'=b.jtag->'tag1'")
tdSql.checkData(0, 0, "sss")
tdSql.checkData(0, 2, "true")
- res = tdSql.getColNameList("select 'sss',33,a.jtag->'tag3' from jsons2 a,jsons3 b where a.ts=b.ts and a.jtag->'tag1'=b.jtag->'tag1'")
+ res = tdSql.getColNameList(f"select 'sss',33,a.jtag->'tag3' from {dbname}.jsons2 a,jsons3 b where a.ts=b.ts and a.jtag->'tag1'=b.jtag->'tag1'")
cname_list = []
cname_list.append("'sss'")
cname_list.append("33")
@@ -424,19 +405,19 @@ class TDTestCase:
tdSql.checkColNameList(res, cname_list)
#
# test group by & order by json tag
- tdSql.query("select ts,jtag->'tag1' from jsons1 partition by jtag->'tag1' order by jtag->'tag1' desc")
+ tdSql.query(f"select ts,jtag->'tag1' from {dbname}.jsons1 partition by jtag->'tag1' order by jtag->'tag1' desc")
tdSql.checkRows(11)
tdSql.checkData(0, 1, '"femail"')
tdSql.checkData(2, 1, '"收到货"')
tdSql.checkData(7, 1, "false")
- tdSql.error("select count(*) from jsons1 group by jtag")
- tdSql.error("select count(*) from jsons1 partition by jtag")
- tdSql.error("select count(*) from jsons1 group by jtag order by jtag")
- tdSql.error("select count(*) from jsons1 group by jtag->'tag1' order by jtag->'tag2'")
- tdSql.error("select count(*) from jsons1 group by jtag->'tag1' order by jtag")
- tdSql.query("select count(*),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1' desc")
+ tdSql.error(f"select count(*) from {dbname}.jsons1 group by jtag")
+ tdSql.error(f"select count(*) from {dbname}.jsons1 partition by jtag")
+ tdSql.error(f"select count(*) from {dbname}.jsons1 group by jtag order by jtag")
+ tdSql.error(f"select count(*) from {dbname}.jsons1 group by jtag->'tag1' order by jtag->'tag2'")
+ tdSql.error(f"select count(*) from {dbname}.jsons1 group by jtag->'tag1' order by jtag")
+ tdSql.query(f"select count(*),jtag->'tag1' from {dbname}.jsons1 group by jtag->'tag1' order by jtag->'tag1' desc")
tdSql.checkRows(8)
tdSql.checkData(0, 0, 2)
tdSql.checkData(0, 1, '"femail"')
@@ -447,7 +428,7 @@ class TDTestCase:
tdSql.checkData(5, 0, 1)
tdSql.checkData(5, 1, "false")
- tdSql.query("select count(*),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1' asc")
+ tdSql.query(f"select count(*),jtag->'tag1' from {dbname}.jsons1 group by jtag->'tag1' order by jtag->'tag1' asc")
tdSql.checkRows(8)
tdSql.checkData(0, 1, None)
tdSql.checkData(2, 0, 1)
@@ -458,7 +439,7 @@ class TDTestCase:
tdSql.checkData(7, 1, '"femail"')
# test stddev with group by json tag
- tdSql.query("select stddev(dataint),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1'")
+ tdSql.query(f"select stddev(dataint),jtag->'tag1' from {dbname}.jsons1 group by jtag->'tag1' order by jtag->'tag1'")
tdSql.checkRows(8)
tdSql.checkData(0, 1, None)
tdSql.checkData(4, 0, 0)
@@ -466,222 +447,222 @@ class TDTestCase:
tdSql.checkData(7, 0, 11)
tdSql.checkData(7, 1, '"femail"')
- res = tdSql.getColNameList("select stddev(dataint),jsons1.jtag->'tag1' from jsons1 group by jsons1.jtag->'tag1' order by jtag->'tag1'")
+ res = tdSql.getColNameList(f"select stddev(dataint),jsons1.jtag->'tag1' from {dbname}.jsons1 group by jsons1.jtag->'tag1' order by jtag->'tag1'")
cname_list = []
cname_list.append("stddev(dataint)")
cname_list.append("jsons1.jtag->'tag1'")
tdSql.checkColNameList(res, cname_list)
# test top/bottom with group by json tag
- tdSql.query("select top(dataint,2),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1'")
+ tdSql.query(f"select top(dataint,2),jtag->'tag1' from {dbname}.jsons1 group by jtag->'tag1' order by jtag->'tag1'")
tdSql.checkRows(11)
tdSql.checkData(0, 1, None)
# test having
- tdSql.query("select count(*),jtag->'tag1' from jsons1 group by jtag->'tag1' having count(*) > 1")
+ tdSql.query(f"select count(*),jtag->'tag1' from {dbname}.jsons1 group by jtag->'tag1' having count(*) > 1")
tdSql.checkRows(3)
# subquery with json tag
- tdSql.query("select * from (select jtag, dataint from jsons1) order by dataint")
+ tdSql.query(f"select * from (select jtag, dataint from {dbname}.jsons1) order by dataint")
tdSql.checkRows(11)
tdSql.checkData(1, 1, 1)
tdSql.checkData(5, 0, '{"tag1":false,"tag2":"beijing"}')
- tdSql.error("select jtag->'tag1' from (select jtag->'tag1', dataint from jsons1)")
- tdSql.error("select t->'tag1' from (select jtag->'tag1' as t, dataint from jsons1)")
- tdSql.error("select ts,jtag->'tag1' from (select jtag->'tag1',tbname,ts from jsons1 order by ts)")
+ tdSql.error(f"select jtag->'tag1' from (select jtag->'tag1', dataint from {dbname}.jsons1)")
+ tdSql.error(f"select t->'tag1' from (select jtag->'tag1' as t, dataint from {dbname}.jsons1)")
+ tdSql.error(f"select ts,jtag->'tag1' from (select jtag->'tag1',tbname,ts from {dbname}.jsons1 order by ts)")
# union all
- tdSql.query("select jtag->'tag1' from jsons1 union all select jtag->'tag2' from jsons2")
+ tdSql.query(f"select jtag->'tag1' from {dbname}.jsons1 union all select jtag->'tag2' from {dbname}.jsons2")
tdSql.checkRows(13)
- tdSql.query("select jtag->'tag1' from jsons1_1 union all select jtag->'tag2' from jsons2_1")
+ tdSql.query(f"select jtag->'tag1' from {dbname}.jsons1_1 union all select jtag->'tag2' from {dbname}.jsons2_1")
tdSql.checkRows(3)
- tdSql.query("select jtag->'tag1' from jsons1_1 union all select jtag->'tag1' from jsons2_1")
+ tdSql.query(f"select jtag->'tag1' from {dbname}.jsons1_1 union all select jtag->'tag1' from {dbname}.jsons2_1")
tdSql.checkRows(3)
- tdSql.query("select dataint,jtag->'tag1',tbname from jsons1 union all select dataint,jtag->'tag1',tbname from jsons2")
+ tdSql.query(f"select dataint,jtag->'tag1',tbname from {dbname}.jsons1 union all select dataint,jtag->'tag1',tbname from {dbname}.jsons2")
tdSql.checkRows(13)
- tdSql.query("select dataint,jtag,tbname from jsons1 union all select dataint,jtag,tbname from jsons2")
+ tdSql.query(f"select dataint,jtag,tbname from {dbname}.jsons1 union all select dataint,jtag,tbname from {dbname}.jsons2")
tdSql.checkRows(13)
#show create table
- tdSql.query("show create table jsons1")
+ tdSql.query(f"show create table {dbname}.jsons1")
tdSql.checkData(0, 1, 'CREATE STABLE `jsons1` (`ts` TIMESTAMP, `dataint` INT, `databool` BOOL, `datastr` NCHAR(50), `datastrbin` VARCHAR(150)) TAGS (`jtag` JSON)')
#test aggregate function:count/avg/twa/irate/sum/stddev/leastsquares
- tdSql.query("select count(*) from jsons1 where jtag is not null")
+ tdSql.query(f"select count(*) from {dbname}.jsons1 where jtag is not null")
tdSql.checkData(0, 0, 10)
- tdSql.query("select avg(dataint) from jsons1 where jtag is not null")
+ tdSql.query(f"select avg(dataint) from {dbname}.jsons1 where jtag is not null")
tdSql.checkData(0, 0, 5.3)
- # tdSql.query("select twa(dataint) from jsons1 where jtag is not null")
+ # tdSql.query(f"select twa(dataint) from {dbname}.jsons1 where jtag is not null")
# tdSql.checkData(0, 0, 28.386363636363637)
- # tdSql.query("select irate(dataint) from jsons1 where jtag is not null")
+ # tdSql.query(f"select irate(dataint) from {dbname}.jsons1 where jtag is not null")
- tdSql.query("select sum(dataint) from jsons1 where jtag->'tag1' is not null")
+ tdSql.query(f"select sum(dataint) from {dbname}.jsons1 where jtag->'tag1' is not null")
tdSql.checkData(0, 0, 45)
- tdSql.query("select stddev(dataint) from jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select stddev(dataint) from {dbname}.jsons1 where jtag->'tag1'>1")
tdSql.checkData(0, 0, 4.496912521)
- tdSql.query("SELECT LEASTSQUARES(dataint, 1, 1) from jsons1 where jtag is not null")
+ tdSql.query(f"select LEASTSQUARES(dataint, 1, 1) from {dbname}.jsons1 where jtag is not null")
#test selection function:min/max/first/last/top/bottom/percentile/apercentile/last_row/interp
- tdSql.query("select min(dataint) from jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select min(dataint) from {dbname}.jsons1 where jtag->'tag1'>1")
tdSql.checkData(0, 0, 1)
- tdSql.query("select max(dataint) from jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select max(dataint) from {dbname}.jsons1 where jtag->'tag1'>1")
tdSql.checkData(0, 0, 11)
- tdSql.query("select first(dataint) from jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select first(dataint) from {dbname}.jsons1 where jtag->'tag1'>1")
tdSql.checkData(0, 0, 2)
- tdSql.query("select last(dataint) from jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select last(dataint) from {dbname}.jsons1 where jtag->'tag1'>1")
tdSql.checkData(0, 0, 11)
- tdSql.query("select top(dataint,100) from jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select top(dataint,100) from {dbname}.jsons1 where jtag->'tag1'>1")
tdSql.checkRows(3)
- tdSql.query("select bottom(dataint,100) from jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select bottom(dataint,100) from {dbname}.jsons1 where jtag->'tag1'>1")
tdSql.checkRows(3)
- #tdSql.query("select percentile(dataint,20) from jsons1 where jtag->'tag1'>1")
- tdSql.query("select apercentile(dataint, 50) from jsons1 where jtag->'tag1'>1")
+ #tdSql.query(f"select percentile(dataint,20) from {dbname}.jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select apercentile(dataint, 50) from {dbname}.jsons1 where jtag->'tag1'>1")
tdSql.checkData(0, 0, 1.5)
- # tdSql.query("select last_row(dataint) from jsons1 where jtag->'tag1'>1")
- # tdSql.query("select interp(dataint) from jsons1 where ts = '2020-06-02 09:17:08.000' and jtag->'tag1'>1")
+ # tdSql.query(f"select last_row(dataint) from {dbname}.jsons1 where jtag->'tag1'>1")
+ # tdSql.query(f"select interp(dataint) from {dbname}.jsons1 where ts = '2020-06-02 09:17:08.000' and jtag->'tag1'>1")
#test calculation function:diff/derivative/spread/ceil/floor/round/
- tdSql.query("select diff(dataint) from jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select diff(dataint) from {dbname}.jsons1 where jtag->'tag1'>1")
tdSql.checkRows(2)
# tdSql.checkData(0, 0, -1)
# tdSql.checkData(1, 0, 10)
- tdSql.query("select derivative(dataint, 10m, 0) from jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select derivative(dataint, 10m, 0) from {dbname}.jsons1 where jtag->'tag1'>1")
# tdSql.checkData(0, 0, -2)
- tdSql.query("select spread(dataint) from jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select spread(dataint) from {dbname}.jsons1 where jtag->'tag1'>1")
tdSql.checkData(0, 0, 10)
- tdSql.query("select ceil(dataint) from jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select ceil(dataint) from {dbname}.jsons1 where jtag->'tag1'>1")
tdSql.checkRows(3)
- tdSql.query("select floor(dataint) from jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select floor(dataint) from {dbname}.jsons1 where jtag->'tag1'>1")
tdSql.checkRows(3)
- tdSql.query("select round(dataint) from jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select round(dataint) from {dbname}.jsons1 where jtag->'tag1'>1")
tdSql.checkRows(3)
#math function
- tdSql.query("select sin(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select sin(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select cos(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select cos(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select tan(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select tan(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select asin(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select asin(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select acos(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select acos(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select atan(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select atan(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select ceil(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select ceil(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select floor(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select floor(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select round(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select round(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select abs(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select abs(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select pow(dataint,5) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select pow(dataint,5) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select log(dataint,10) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select log(dataint,10) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select sqrt(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select sqrt(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select HISTOGRAM(dataint,'user_input','[1, 33, 555, 7777]',1) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select HISTOGRAM(dataint,'user_input','[1, 33, 555, 7777]',1) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select csum(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select csum(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select mavg(dataint,1) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select mavg(dataint,1) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select statecount(dataint,'GE',10) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select statecount(dataint,'GE',10) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select stateduration(dataint,'GE',0) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select stateduration(dataint,'GE',0) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select sample(dataint,3) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select sample(dataint,3) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select HYPERLOGLOG(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select HYPERLOGLOG(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(1)
- tdSql.query("select twa(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select twa(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(1)
# function not ready
- tdSql.query("select tail(dataint,1) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select tail(dataint,1) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(1)
- tdSql.query("select unique(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select unique(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select mode(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select mode(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(1)
- tdSql.query("select irate(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select irate(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(1)
#str function
- tdSql.query("select upper(dataStr) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select upper(dataStr) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select ltrim(dataStr) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select ltrim(dataStr) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select lower(dataStr) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select lower(dataStr) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select rtrim(dataStr) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select rtrim(dataStr) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select LENGTH(dataStr) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select LENGTH(dataStr) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select CHAR_LENGTH(dataStr) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select CHAR_LENGTH(dataStr) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select SUBSTR(dataStr,5) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select SUBSTR(dataStr,5) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select CONCAT(dataStr,dataStrBin) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select CONCAT(dataStr,dataStrBin) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select CONCAT_ws('adad!@!@%$^$%$^$%^a',dataStr,dataStrBin) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select CONCAT_ws('adad!@!@%$^$%$^$%^a',dataStr,dataStrBin) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select CAST(dataStr as bigint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select CAST(dataStr as bigint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
#time function
- tdSql.query("select now() from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select now() from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select today() from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select today() from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select TIMEZONE() from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select TIMEZONE() from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select TO_ISO8601(ts) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select TO_ISO8601(ts) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select TO_UNIXTIMESTAMP(datastr) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select TO_UNIXTIMESTAMP(datastr) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select TIMETRUNCATE(ts,1s) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select TIMETRUNCATE(ts,1s) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select TIMEDIFF(ts,_c0) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select TIMEDIFF(ts,_c0) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select TIMEDIFF(ts,1u) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select TIMEDIFF(ts,1u) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select ELAPSED(ts,1h) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select ELAPSED(ts,1h) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(1)
# to_json()
- tdSql.query("select to_json('{\"abc\":123}') from jsons1_1")
+ tdSql.query(f"select to_json('{{\"abc\":123}}') from {dbname}.jsons1_1")
tdSql.checkRows(2)
tdSql.checkData(0, 0, '{"abc":123}')
tdSql.checkData(1, 0, '{"abc":123}')
- tdSql.query("select to_json('null') from jsons1_1")
+ tdSql.query(f"select to_json('null') from {dbname}.jsons1_1")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 'null')
tdSql.checkData(1, 0, 'null')
- tdSql.query("select to_json('{\"key\"}') from jsons1_1")
+ tdSql.query(f"select to_json('{{\"key\"}}') from {dbname}.jsons1_1")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 'null')
tdSql.checkData(1, 0, 'null')
#test TD-12077
- tdSql.execute("insert into jsons1_16 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":-2.111}') values(1591062628000, 2, NULL, '你就会', 'dws')")
- tdSql.query("select jtag->'tag3' from jsons1_16")
+ tdSql.execute(f"insert into {dbname}.jsons1_16 using {dbname}.jsons1 tags('{{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":-2.111}}') values(1591062628000, 2, NULL, '你就会', 'dws')")
+ tdSql.query(f"select jtag->'tag3' from {dbname}.jsons1_16")
tdSql.checkData(0, 0, '-2.111000000')
# test TD-12452
- tdSql.execute("ALTER TABLE jsons1_1 SET TAG jtag=NULL")
- tdSql.query("select jtag from jsons1_1")
+ tdSql.execute(f"ALTER table {dbname}.jsons1_1 SET TAG jtag=NULL")
+ tdSql.query(f"select jtag from {dbname}.jsons1_1")
tdSql.checkData(0, 0, None)
- tdSql.execute("CREATE TABLE if not exists jsons1_20 using jsons1 tags(NULL)")
- tdSql.query("select jtag from jsons1_20")
+ tdSql.execute(f"create TABLE if not exists {dbname}.jsons1_20 using {dbname}.jsons1 tags(NULL)")
+ tdSql.query(f"select jtag from {dbname}.jsons1_20")
tdSql.checkRows(0)
- tdSql.execute("insert into jsons1_21 using jsons1 tags(NULL) values(1591061628000, 11, false, '你就会','')")
- tdSql.query("select jtag from jsons1_21")
+ tdSql.execute(f"insert into {dbname}.jsons1_21 using {dbname}.jsons1 tags(NULL) values(1591061628000, 11, false, '你就会','')")
+ tdSql.query(f"select jtag from {dbname}.jsons1_21")
tdSql.checkData(0, 0, None)
#
# #test TD-12389
@@ -691,23 +672,23 @@ class TDTestCase:
tdSql.checkData(5, 2, 4095)
#
# #test TD-13918
- tdSql.execute("drop table if exists jsons_13918_1")
- tdSql.execute("drop table if exists jsons_13918_2")
- tdSql.execute("drop table if exists jsons_13918_3")
- tdSql.execute("drop table if exists jsons_13918_4")
- tdSql.execute("drop table if exists jsons_stb")
- tdSql.execute("create table jsons_stb (ts timestamp, dataInt int) tags (jtag json)")
- tdSql.error("create table jsons_13918_1 using jsons_stb tags ('nullx')")
- tdSql.error("create table jsons_13918_2 using jsons_stb tags (nullx)")
- tdSql.error("insert into jsons_13918_3 using jsons_stb tags('NULLx') values(1591061628001, 11)")
- tdSql.error("insert into jsons_13918_4 using jsons_stb tags(NULLx) values(1591061628002, 11)")
- tdSql.execute("create table jsons_13918_1 using jsons_stb tags ('null')")
- tdSql.execute("create table jsons_13918_2 using jsons_stb tags (null)")
- tdSql.execute("insert into jsons_13918_1 values(1591061628003, 11)")
- tdSql.execute("insert into jsons_13918_2 values(1591061628004, 11)")
- tdSql.execute("insert into jsons_13918_3 using jsons_stb tags('NULL') values(1591061628005, 11)")
- tdSql.execute("insert into jsons_13918_4 using jsons_stb tags(\"NULL\") values(1591061628006, 11)")
- tdSql.query("select * from jsons_stb")
+ tdSql.execute(f"drop table if exists {dbname}.jsons_13918_1")
+ tdSql.execute(f"drop table if exists {dbname}.jsons_13918_2")
+ tdSql.execute(f"drop table if exists {dbname}.jsons_13918_3")
+ tdSql.execute(f"drop table if exists {dbname}.jsons_13918_4")
+ tdSql.execute(f"drop table if exists {dbname}.jsons_stb")
+ tdSql.execute(f"create table {dbname}.jsons_stb (ts timestamp, dataInt int) tags (jtag json)")
+ tdSql.error(f"create table {dbname}.jsons_13918_1 using {dbname}.jsons_stb tags ('nullx')")
+ tdSql.error(f"create table {dbname}.jsons_13918_2 using {dbname}.jsons_stb tags (nullx)")
+ tdSql.error(f"insert into {dbname}.jsons_13918_3 using {dbname}.jsons_stb tags('NULLx') values(1591061628001, 11)")
+ tdSql.error(f"insert into {dbname}.jsons_13918_4 using {dbname}.jsons_stb tags(NULLx) values(1591061628002, 11)")
+ tdSql.execute(f"create table {dbname}.jsons_13918_1 using {dbname}.jsons_stb tags ('null')")
+ tdSql.execute(f"create table {dbname}.jsons_13918_2 using {dbname}.jsons_stb tags (null)")
+ tdSql.execute(f"insert into {dbname}.jsons_13918_1 values(1591061628003, 11)")
+ tdSql.execute(f"insert into {dbname}.jsons_13918_2 values(1591061628004, 11)")
+ tdSql.execute(f"insert into {dbname}.jsons_13918_3 using {dbname}.jsons_stb tags('NULL') values(1591061628005, 11)")
+ tdSql.execute(f"insert into {dbname}.jsons_13918_4 using {dbname}.jsons_stb tags(\"NULL\") values(1591061628006, 11)")
+ tdSql.query(f"select * from {dbname}.jsons_stb")
tdSql.checkRows(4)
def stop(self):
@@ -717,4 +698,3 @@ class TDTestCase:
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
-
diff --git a/tests/system-test/2-query/json_tag_large_tables.py b/tests/system-test/2-query/json_tag_large_tables.py
index 5d7df6ceb8..9164c108f9 100644
--- a/tests/system-test/2-query/json_tag_large_tables.py
+++ b/tests/system-test/2-query/json_tag_large_tables.py
@@ -35,7 +35,7 @@ class TDTestCase:
self.testcaseFilename = os.path.split(__file__)[-1]
# os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), True)
+ tdSql.init(conn.cursor(), False)
def run(self):
# tdSql.prepare()
@@ -47,24 +47,24 @@ class TDTestCase:
i = 0
# add 100000 table
tdSql.execute("create table if not exists jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)")
- while i <= 10 0000:
+ while i <= 100000:
sql = """insert into jsons1_{%d} using jsons1 tags('{"tag1":{%d}}') values(1591060618000, 1, false, 'json1', '你是') (1591060608000, 23, true, '等等', 'json')"""%(i, i)
tdSql.execute(sql)
i = i + 1
- // do query
- i = 0
- while i <= 10 0000:
+ # do query
+ i = 0
+ while i <= 100000:
sql = """select count(*) from jsons1 where jtag->'tag1' = %d"""%(i)
tdSql.query(sql)
if 1 != tdSql.getRows():
print("err: %s"%(sql))
-
- while i <= 10000000
+
+ while i <= 10000000:
sql = """insert into jsons1_{%d} using jsons1 tags('{"tag1":{%d}}') values(1591060618000, 1, false, 'json1', '你是') (1591060608000, 23, true, '等等', 'json')"""%(i, i)
tdSql.execute(sql)
i = i + 1
-
+
i = 0
# drop super table
tdSql.execute("create table if not exists jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)")
@@ -76,7 +76,7 @@ class TDTestCase:
tdSql.execute('drop stable jsons1')
- # drop database
+ # drop database
i = 0
tdSql.execute("create table if not exists jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)")
while i <= 100000:
@@ -84,10 +84,10 @@ class TDTestCase:
tdSql.execute(sql)
i = i + 1
tdSql.execute('drop database db')
-
+
# test duplicate key using the first one. elimate empty key
- #tdSql.execute("CREATE TABLE if not exists jsons1_8 using jsons1 tags('{\"tag1\":null, \"tag1\":true, \"tag1\":45, \"1tag$\":2, \" \":90, \"\":32}')") tdSql.query("select jtag from jsons1_8") tdSql.checkRows(0);
+ #tdSql.execute("CREATE TABLE if not exists jsons1_8 using jsons1 tags('{\"tag1\":null, \"tag1\":true, \"tag1\":45, \"1tag$\":2, \" \":90, \"\":32}')") tdSql.query("select jtag from jsons1_8") tdSql.checkRows(0);
#tdSql.query("select ts,jtag from jsons1 order by ts limit 2,3")
#tdSql.checkData(0, 0, '2020-06-02 09:17:08.000')
#tdSql.checkData(0, 1, '{"tag1":5,"tag2":"beijing"}')
@@ -704,4 +704,3 @@ class TDTestCase:
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
-
diff --git a/tests/system-test/2-query/last_row.py b/tests/system-test/2-query/last_row.py
index 105dc883c7..f65744a0b7 100644
--- a/tests/system-test/2-query/last_row.py
+++ b/tests/system-test/2-query/last_row.py
@@ -10,29 +10,26 @@ import random
class TDTestCase:
- updatecfgDict = {'debugFlag': 143, "cDebugFlag": 143, "uDebugFlag": 143, "rpcDebugFlag": 143, "tmrDebugFlag": 143,
- "jniDebugFlag": 143, "simDebugFlag": 143, "dDebugFlag": 143, "dDebugFlag": 143, "vDebugFlag": 143, "mDebugFlag": 143, "qDebugFlag": 143,
- "wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "fnDebugFlag": 143 ,"udf":0}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
- tdSql.init(conn.cursor(), True)
+ tdSql.init(conn.cursor(), False)
self.tb_nums = 10
self.row_nums = 20
self.ts = 1434938400000
self.time_step = 1000
- def insert_datas_and_check_abs(self ,tbnums , rownums , time_step ,cache_value ):
- tdSql.execute("drop database if exists test ")
- tdLog.info(" prepare datas for auto check abs function ")
+ def insert_datas_and_check_abs(self, tbnums, rownums, time_step, cache_value, dbname="test"):
+ tdSql.execute(f"drop database if exists {dbname} ")
+ tdLog.info("prepare datas for auto check abs function ")
- tdSql.execute(f" create database test cachemodel {cache_value} ")
- tdSql.execute(" use test ")
- tdSql.execute(" create stable test.stb (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint,\
+ tdSql.execute(f"create database {dbname} cachemodel {cache_value} ")
+ tdSql.execute(f"use {dbname} ")
+ tdSql.execute(f"create stable {dbname}.stb (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint,\
c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int)")
for tbnum in range(tbnums):
- tbname = "test.sub_tb_%d"%tbnum
- tdSql.execute(" create table %s using stb tags(%d) "%(tbname , tbnum))
+ tbname = f"{dbname}.sub_tb_{tbnum}"
+ tdSql.execute(f"create table {tbname} using {dbname}.stb tags({tbnum}) ")
ts = self.ts
for row in range(rownums):
@@ -49,66 +46,65 @@ class TDTestCase:
c10 = ts
tdSql.execute(f" insert into {tbname} values ({ts},{c1},{c2},{c3},{c4},{c5},{c6},{c7},{c8},{c9},{c10})")
- tdSql.execute("use test")
tbnames = ["stb", "sub_tb_1"]
support_types = ["BIGINT", "SMALLINT", "TINYINT", "FLOAT", "DOUBLE", "INT"]
for tbname in tbnames:
- tdSql.query("desc {}".format(tbname))
+ tdSql.query(f"desc {dbname}.{tbname}")
coltypes = tdSql.queryResult
for coltype in coltypes:
colname = coltype[0]
- abs_sql = "select abs({}) from {} order by tbname ".format(colname, 'test.'+tbname)
- origin_sql = "select {} from {} order by tbname".format(colname, 'test.'+tbname)
+ abs_sql = f"select abs({colname}) from {dbname}.{tbname} order by tbname "
+ origin_sql = f"select {colname} from {dbname}.{tbname} order by tbname"
if coltype[1] in support_types:
self.check_result_auto(origin_sql , abs_sql)
- def prepare_datas(self ,cache_value):
- tdSql.execute("drop database if exists db ")
- create_db_sql = f"create database if not exists db keep 3650 duration 1000 cachemodel {cache_value}"
+ def prepare_datas(self ,cache_value, dbname="db"):
+ tdSql.execute(f"drop database if exists {dbname} ")
+ create_db_sql = f"create database if not exists {dbname} keep 3650 duration 1000 cachemodel {cache_value}"
tdSql.execute(create_db_sql)
- tdSql.execute("use db")
+ tdSql.execute(f"use {dbname}")
tdSql.execute(
- '''create table db.stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table db.t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table db.ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into db.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into db.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- "insert into db.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
tdSql.execute(
- "insert into db.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
- "insert into db.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
- "insert into db.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
- "insert into db.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- "insert into db.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- "insert into db.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into db.t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -124,53 +120,53 @@ class TDTestCase:
'''
)
- def prepare_tag_datas(self,cache_value):
+ def prepare_tag_datas(self,cache_value, dbname="testdb"):
- tdSql.execute("drop database if exists testdb ")
+ tdSql.execute(f"drop database if exists {dbname} ")
# prepare datas
- tdSql.execute(f"create database if not exists testdb keep 3650 duration 1000 cachemodel {cache_value}")
+ tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 cachemodel {cache_value}")
- tdSql.execute(" use testdb ")
+ tdSql.execute(f"use {dbname} ")
- tdSql.execute(f" create stable testdb.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp , uc1 int unsigned,\
+ tdSql.execute(f"create stable {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp , uc1 int unsigned,\
uc2 bigint unsigned ,uc3 smallint unsigned , uc4 tinyint unsigned ) tags( t1 int , t2 bigint , t3 smallint , t4 tinyint , t5 float , t6 double , t7 bool , t8 binary(36)\
, t9 nchar(36) , t10 int unsigned , t11 bigint unsigned ,t12 smallint unsigned , t13 tinyint unsigned ,t14 timestamp ) ")
tdSql.execute(
- '''
- create table testdb.t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
tdSql.execute(
- f'create table testdb.ct{i+1} using stb1 tags ( {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" ,{111*i}, {1*i},{1*i},{1*i},now())')
+ f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" ,{111*i}, {1*i},{1*i},{1*i},now())')
for i in range(9):
tdSql.execute(
- f"insert into testdb.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a ,{111*i},{1111*i},{i},{i} )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a ,{111*i},{1111*i},{i},{i} )"
)
tdSql.execute(
- f"insert into testdb.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a ,{111*i},{1111*i},{i},{i})"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a ,{111*i},{1111*i},{i},{i})"
)
tdSql.execute(
- "insert into testdb.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a ,0,0,0,0)")
+ f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a ,0,0,0,0)")
tdSql.execute(
- "insert into testdb.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a , 999 , 9999 , 9 , 9)")
+ f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a , 999 , 9999 , 9 , 9)")
tdSql.execute(
- "insert into testdb.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a , 999 , 99999 , 9 , 9)")
+ f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a , 999 , 99999 , 9 , 9)")
tdSql.execute(
- "insert into testdb.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a ,999 , 99999 , 9 , 9)")
+ f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a ,999 , 99999 , 9 , 9)")
tdSql.execute(
- "insert into testdb.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, NULL, NULL) ")
+ f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, NULL, NULL) ")
tdSql.execute(
- "insert into testdb.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, NULL, NULL) ")
+ f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, NULL, NULL) ")
tdSql.execute(
- "insert into testdb.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, NULL, NULL ) ")
+ f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into testdb.t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -217,118 +213,116 @@ class TDTestCase:
tdLog.info(
"abs value check pass , it work as expected ,sql is \"%s\" " % abs_query)
- def test_errors(self):
- tdSql.execute("use testdb")
-
+ def test_errors(self, dbname="testdb"):
# bug need fix
- tdSql.error("select last_row(c1 ,NULL) from testdb.t1")
+ tdSql.error(f"select last_row(c1 ,NULL) from {dbname}.t1")
error_sql_lists = [
- "select last_row from testdb.t1",
- "select last_row(-+--+c1) from testdb.t1",
- "select last_row(123--123)==1 from testdb.t1",
- "select last_row(c1) as 'd1' from testdb.t1",
- #"select last_row(c1 ,NULL) from testdb.t1",
- "select last_row(,) from testdb.t1;",
- "select last_row(abs(c1) ab from testdb.t1)",
- "select last_row(c1) as int from testdb.t1",
- "select last_row from testdb.stb1",
- "select last_row(123--123)==1 from testdb.stb1",
- "select last_row(c1) as 'd1' from testdb.stb1",
- #"select last_row(c1 ,NULL) from testdb.stb1",
- "select last_row(,) from testdb.stb1;",
- "select last_row(abs(c1) ab from testdb.stb1)",
- "select last_row(c1) as int from testdb.stb1"
+ f"select last_row from {dbname}.t1",
+ f"select last_row(-+--+c1) from {dbname}.t1",
+ f"select last_row(123--123)==1 from {dbname}.t1",
+ f"select last_row(c1) as 'd1' from {dbname}.t1",
+ #f"select last_row(c1 ,NULL) from {dbname}.t1",
+ f"select last_row(,) from {dbname}.t1;",
+ f"select last_row(abs(c1) ab from {dbname}.t1)",
+ f"select last_row(c1) as int from {dbname}.t1",
+ f"select last_row from {dbname}.stb1",
+ f"select last_row(123--123)==1 from {dbname}.stb1",
+ f"select last_row(c1) as 'd1' from {dbname}.stb1",
+ #f"select last_row(c1 ,NULL) from {dbname}.stb1",
+ f"select last_row(,) from {dbname}.stb1;",
+ f"select last_row(abs(c1) ab from {dbname}.stb1)",
+ f"select last_row(c1) as int from {dbname}.stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
- def support_types(self):
- tdSql.execute("use testdb")
+ def support_types(self, dbname="testdb"):
+ tdSql.execute(f"use {dbname}")
tbnames = ["stb1", "t1", "ct1", "ct2"]
for tbname in tbnames:
- tdSql.query("desc {}".format(tbname))
+ tdSql.query(f"desc {dbname}.{tbname}")
coltypes = tdSql.queryResult
for coltype in coltypes:
colname = coltype[0]
col_note = coltype[-1]
if col_note != "TAG":
- abs_sql = "select last_row({}) from {}".format(colname, "testdb."+tbname)
+ abs_sql = f"select last_row({colname}) from {dbname}.{tbname}"
tdSql.query(abs_sql)
- def basic_abs_function(self):
+ def basic_abs_function(self, dbname="testdb"):
# basic query
- tdSql.query("select c1 from testdb.ct3")
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from testdb.t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from testdb.stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select last_row(c1) from testdb.ct3")
+ tdSql.query(f"select last_row(c1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select last_row(c2) from testdb.ct3")
+ tdSql.query(f"select last_row(c2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select last_row(c3) from testdb.ct3")
+ tdSql.query(f"select last_row(c3) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select last_row(c4) from testdb.ct3")
+ tdSql.query(f"select last_row(c4) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select last_row(c5) from testdb.ct3")
+ tdSql.query(f"select last_row(c5) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select last_row(c6) from testdb.ct3")
+ tdSql.query(f"select last_row(c6) from {dbname}.ct3")
# used for regular table
# bug need fix
- tdSql.query("select last_row(c1) from testdb.t1")
+ tdSql.query(f"select last_row(c1) from {dbname}.t1")
tdSql.checkData(0, 0, None)
- tdSql.query("select last_row(c1) from testdb.ct4")
+ tdSql.query(f"select last_row(c1) from {dbname}.ct4")
tdSql.checkData(0, 0, None)
- tdSql.query("select last_row(c1) from testdb.stb1")
+ tdSql.query(f"select last_row(c1) from {dbname}.stb1")
tdSql.checkData(0, 0, None)
-
- # support regular query about last ,first ,last_row
- tdSql.error("select last_row(c1,NULL) from testdb.t1")
- tdSql.error("select last_row(NULL) from testdb.t1")
- tdSql.error("select last(NULL) from testdb.t1")
- tdSql.error("select first(NULL) from testdb.t1")
- tdSql.query("select last_row(c1,123) from testdb.t1")
+ # support regular query about last ,first ,last_row
+ tdSql.error(f"select last_row(c1,NULL) from {dbname}.t1")
+ tdSql.error(f"select last_row(NULL) from {dbname}.t1")
+ tdSql.error(f"select last(NULL) from {dbname}.t1")
+ tdSql.error(f"select first(NULL) from {dbname}.t1")
+
+ tdSql.query(f"select last_row(c1,123) from {dbname}.t1")
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,123)
- tdSql.query("select last_row(123) from testdb.t1")
+ tdSql.query(f"select last_row(123) from {dbname}.t1")
tdSql.checkData(0,0,123)
- tdSql.error("select last(c1,NULL) from testdb.t1")
+ tdSql.error(f"select last(c1,NULL) from {dbname}.t1")
- tdSql.query("select last(c1,123) from testdb.t1")
+ tdSql.query(f"select last(c1,123) from {dbname}.t1")
tdSql.checkData(0,0,9)
tdSql.checkData(0,1,123)
- tdSql.error("select first(c1,NULL) from testdb.t1")
+ tdSql.error(f"select first(c1,NULL) from {dbname}.t1")
- tdSql.query("select first(c1,123) from testdb.t1")
+ tdSql.query(f"select first(c1,123) from {dbname}.t1")
tdSql.checkData(0,0,1)
tdSql.checkData(0,1,123)
- tdSql.error("select last_row(c1,c2,c3,NULL,c4) from testdb.t1")
+ tdSql.error(f"select last_row(c1,c2,c3,NULL,c4) from {dbname}.t1")
- tdSql.query("select last_row(c1,c2,c3,123,c4) from testdb.t1")
+ tdSql.query(f"select last_row(c1,c2,c3,123,c4) from {dbname}.t1")
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,None)
tdSql.checkData(0,2,None)
tdSql.checkData(0,3,123)
tdSql.checkData(0,4,None)
-
- tdSql.error("select last_row(c1,c2,c3,NULL,c4,t1,t2) from testdb.ct1")
- tdSql.query("select last_row(c1,c2,c3,123,c4,t1,t2) from testdb.ct1")
+ tdSql.error(f"select last_row(c1,c2,c3,NULL,c4,t1,t2) from {dbname}.ct1")
+
+ tdSql.query(f"select last_row(c1,c2,c3,123,c4,t1,t2) from {dbname}.ct1")
tdSql.checkData(0,0,9)
tdSql.checkData(0,1,-99999)
tdSql.checkData(0,2,-999)
@@ -338,13 +332,13 @@ class TDTestCase:
tdSql.checkData(0,5,0)
# # bug need fix
- tdSql.query("select last_row(c1), c2, c3 , c4, c5 from testdb.t1")
+ tdSql.query(f"select last_row(c1), c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
# # bug need fix
- tdSql.query("select last_row(c1), c2, c3 , c4, c5 from testdb.ct1")
+ tdSql.query(f"select last_row(c1), c2, c3 , c4, c5 from {dbname}.ct1")
tdSql.checkData(0, 0, 9)
tdSql.checkData(0, 1, -99999)
tdSql.checkData(0, 2, -999)
@@ -352,7 +346,7 @@ class TDTestCase:
tdSql.checkData(0, 4,-9.99000)
# bug need fix
- tdSql.query("select last_row(c1), c2, c3 , c4, c5 from testdb.stb1 where tbname='ct1'")
+ tdSql.query(f"select last_row(c1), c2, c3 , c4, c5 from {dbname}.stb1 where tbname='ct1'")
tdSql.checkData(0, 0, 9)
tdSql.checkData(0, 1, -99999)
tdSql.checkData(0, 2, -999)
@@ -360,124 +354,124 @@ class TDTestCase:
tdSql.checkData(0, 4,-9.99000)
# bug fix
- tdSql.query("select last_row(abs(c1)) from testdb.ct1")
+ tdSql.query(f"select last_row(abs(c1)) from {dbname}.ct1")
tdSql.checkData(0,0,9)
# # bug fix
- tdSql.query("select last_row(c1+1) from testdb.ct1")
- tdSql.query("select last_row(c1+1) from testdb.stb1")
- tdSql.query("select last_row(c1+1) from testdb.t1")
+ tdSql.query(f"select last_row(c1+1) from {dbname}.ct1")
+ tdSql.query(f"select last_row(c1+1) from {dbname}.stb1")
+ tdSql.query(f"select last_row(c1+1) from {dbname}.t1")
# used for stable table
- tdSql.query("select last_row(c1 ,c2 ,c3) ,last_row(c4) from testdb.ct1")
+ tdSql.query(f"select last_row(c1 ,c2 ,c3) ,last_row(c4) from {dbname}.ct1")
tdSql.checkData(0,0,9)
tdSql.checkData(0,1,-99999)
tdSql.checkData(0,2,-999)
tdSql.checkData(0,3,None)
# bug need fix
- tdSql.query("select last_row(c1 ,c2 ,c3) from testdb.stb1 ")
+ tdSql.query(f"select last_row(c1 ,c2 ,c3) from {dbname}.stb1 ")
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,None)
tdSql.checkData(0,2,None)
- tdSql.query('select last_row(c1) from testdb.t1 where ts <"2022-12-31 01:01:36.000"')
+ tdSql.query(f'select last_row(c1) from {dbname}.t1 where ts <"2022-12-31 01:01:36.000"')
tdSql.checkData(0,0,8)
# bug need fix
- tdSql.query("select abs(last_row(c1)-2)+max(c1),ceil(last_row(c4)-2) from testdb.stb1 where c4 is not null")
+ tdSql.query(f"select abs(last_row(c1)-2)+max(c1),ceil(last_row(c4)-2) from {dbname}.stb1 where c4 is not null")
tdSql.checkData(0,0,16.000000000)
tdSql.checkData(0,1,-101.000000000)
- tdSql.query("select abs(last_row(c1)-2)+max(c1),ceil(last_row(c4)-2) from testdb.ct1 where c4<0")
+ tdSql.query(f"select abs(last_row(c1)-2)+max(c1),ceil(last_row(c4)-2) from {dbname}.ct1 where c4<0")
tdSql.checkData(0,0,16.000000000)
tdSql.checkData(0,1,-101.000000000)
- tdSql.query("select last_row(ceil(c1+2)+floor(c1)-10) from testdb.stb1")
+ tdSql.query(f"select last_row(ceil(c1+2)+floor(c1)-10) from {dbname}.stb1")
tdSql.checkData(0,0,None)
- tdSql.query("select last_row(ceil(c1+2)+floor(c1)-10) from testdb.ct1")
+ tdSql.query(f"select last_row(ceil(c1+2)+floor(c1)-10) from {dbname}.ct1")
tdSql.checkData(0,0,10.000000000)
# filter for last_row
# bug need fix for all function
- tdSql.query("select last_row(ts ,c1 ) from testdb.ct4 where t1 = 1 ")
+ tdSql.query(f"select last_row(ts ,c1 ) from {dbname}.ct4 where t1 = 1 ")
tdSql.checkRows(0)
- tdSql.query("select count(c1) from testdb.ct4 where t1 = 1 ")
+ tdSql.query(f"select count(c1) from {dbname}.ct4 where t1 = 1 ")
tdSql.checkRows(0)
- tdSql.query("select last_row(c1) ,last(c1) from testdb.stb1 where c1 is null")
+ tdSql.query(f"select last_row(c1) ,last(c1) from {dbname}.stb1 where c1 is null")
tdSql.checkRows(1)
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,None)
- tdSql.query("select last_row(c1) ,count(*) from testdb.stb1 where c1 is null")
+ tdSql.query(f"select last_row(c1) ,count(*) from {dbname}.stb1 where c1 is null")
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,3)
- tdSql.query("select last_row(c1) ,count(c1) from testdb.stb1 where c1 is null")
+ tdSql.query(f"select last_row(c1) ,count(c1) from {dbname}.stb1 where c1 is null")
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,0)
# bug need fix
- tdSql.query("select tbname ,last_row(c1) from testdb.stb1")
+ tdSql.query(f"select tbname ,last_row(c1) from {dbname}.stb1")
tdSql.checkData(0,0,'ct4')
tdSql.checkData(0,1,None)
- tdSql.query(" select tbname ,last_row(c1) from testdb.stb1 partition by tbname order by tbname ")
+ tdSql.query(f"select tbname ,last_row(c1) from {dbname}.stb1 partition by tbname order by tbname ")
tdSql.checkData(0,0,'ct1')
tdSql.checkData(0,1,9)
tdSql.checkData(1,0,'ct4')
tdSql.checkData(1,1,None)
- tdSql.query(" select tbname ,last_row(c1) from testdb.stb1 group by tbname order by tbname ")
+ tdSql.query(f"select tbname ,last_row(c1) from {dbname}.stb1 group by tbname order by tbname ")
tdSql.checkData(0,0,'ct1')
tdSql.checkData(0,1,9)
tdSql.checkData(1,0,'ct4')
tdSql.checkData(1,1,None)
- tdSql.query(" select t1 ,count(c1) from testdb.stb1 partition by t1 ")
+ tdSql.query(f"select t1 ,count(c1) from {dbname}.stb1 partition by t1 ")
tdSql.checkRows(2)
# filter by tbname
- tdSql.query("select last_row(c1) from testdb.stb1 where tbname = 'ct1' ")
+ tdSql.query(f"select last_row(c1) from {dbname}.stb1 where tbname = 'ct1' ")
tdSql.checkData(0,0,9)
# bug need fix
- tdSql.query("select tbname ,last_row(c1) from testdb.stb1 where tbname = 'ct1' ")
+ tdSql.query(f"select tbname ,last_row(c1) from {dbname}.stb1 where tbname = 'ct1' ")
tdSql.checkData(0,1,9)
- tdSql.query("select tbname ,last_row(c1) from testdb.stb1 partition by tbname order by tbname")
+ tdSql.query(f"select tbname ,last_row(c1) from {dbname}.stb1 partition by tbname order by tbname")
tdSql.checkData(0, 0, 'ct1')
tdSql.checkData(0, 1, 9)
tdSql.checkData(1, 0, 'ct4')
tdSql.checkData(1, 1, None)
- tdSql.query("select tbname ,last_row(c1) from testdb.stb1 group by tbname order by tbname")
+ tdSql.query(f"select tbname ,last_row(c1) from {dbname}.stb1 group by tbname order by tbname")
tdSql.checkData(0, 0, 'ct1')
tdSql.checkData(0, 1, 9)
tdSql.checkData(1, 0, 'ct4')
tdSql.checkData(1, 1, None)
# last_row for only tag
- tdSql.query("select last_row(t1 ,t2 ,t3 , t4 ) from testdb.stb1")
+ tdSql.query(f"select last_row(t1 ,t2 ,t3 , t4 ) from {dbname}.stb1")
tdSql.checkData(0,0,3)
tdSql.checkData(0,1,33333)
tdSql.checkData(0,2,333)
tdSql.checkData(0,3,3)
- tdSql.query("select last_row(abs(floor(t1)) ,t2 ,ceil(abs(t3)) , abs(ceil(t4)) ) from testdb.stb1")
+ tdSql.query(f"select last_row(abs(floor(t1)) ,t2 ,ceil(abs(t3)) , abs(ceil(t4)) ) from {dbname}.stb1")
tdSql.checkData(0,0,3)
tdSql.checkData(0,1,33333)
tdSql.checkData(0,2,333)
tdSql.checkData(0,3,3)
# filter by tag
- tdSql.query("select tbname ,last_row(c1) from testdb.stb1 where t1 =0 ")
+ tdSql.query(f"select tbname ,last_row(c1) from {dbname}.stb1 where t1 =0 ")
tdSql.checkData(0,1,9)
- tdSql.query("select tbname ,last_row(c1) ,t1 from testdb.stb1 partition by t1 order by t1")
+ tdSql.query(f"select tbname ,last_row(c1) ,t1 from {dbname}.stb1 partition by t1 order by t1")
tdSql.checkData(0, 0, 'ct1')
tdSql.checkData(0, 1, 9)
tdSql.checkData(1, 0, 'ct4')
@@ -485,56 +479,55 @@ class TDTestCase:
# filter by col
- tdSql.query("select tbname ,last_row(c1),abs(c1)from testdb.stb1 where c1 =1;")
+ tdSql.query(f"select tbname ,last_row(c1),abs(c1)from {dbname}.stb1 where c1 =1;")
tdSql.checkData(0, 0, 'ct1')
tdSql.checkData(0, 1, 1)
tdSql.checkData(0, 2, 1)
- tdSql.query("select last_row(c1) from testdb.stb1 where abs(ceil(c1))*c1==1")
+ tdSql.query(f"select last_row(c1) from {dbname}.stb1 where abs(ceil(c1))*c1==1")
tdSql.checkData(0,0,1)
# mix with common functions
- tdSql.query("select last_row(*) ,last(*) from testdb.stb1 ")
+ tdSql.query(f"select last_row(*) ,last(*) from {dbname}.stb1 ")
tdSql.checkRows(1)
- tdSql.query("select last_row(*) ,last(*) from testdb.stb1 ")
+ tdSql.query(f"select last_row(*) ,last(*) from {dbname}.stb1 ")
tdSql.checkRows(1)
- tdSql.query("select last_row(c1+abs(c1)) from testdb.stb1 partition by tbname order by tbname")
- tdSql.query("select last(c1), max(c1+abs(c1)),last_row(c1+abs(c1)) from testdb.stb1 partition by tbname order by tbname")
+ tdSql.query(f"select last_row(c1+abs(c1)) from {dbname}.stb1 partition by tbname order by tbname")
+ tdSql.query(f"select last(c1), max(c1+abs(c1)),last_row(c1+abs(c1)) from {dbname}.stb1 partition by tbname order by tbname")
# # bug need fix ,taosd crash
- tdSql.error("select last_row(*) ,last(*) from testdb.stb1 partition by tbname order by last(*)")
- tdSql.error("select last_row(*) ,last(*) from testdb.stb1 partition by tbname order by last_row(*)")
+ tdSql.error(f"select last_row(*) ,last(*) from {dbname}.stb1 partition by tbname order by last(*)")
+ tdSql.error(f"select last_row(*) ,last(*) from {dbname}.stb1 partition by tbname order by last_row(*)")
# mix with agg functions
- tdSql.query("select last(*), last_row(*),last(c1), last_row(c1) from testdb.stb1 ")
- tdSql.query("select last(*), last_row(*),last(c1), last_row(c1) from testdb.ct1 ")
- tdSql.query("select last(*), last_row(*),last(c1+1)*max(c1), last_row(c1+2)/2 from testdb.t1 ")
- tdSql.query("select last_row(*) ,abs(c1/2)+100 from testdb.stb1 where tbname =\"ct1\" ")
- tdSql.query("select c1, last_row(c5) from testdb.ct1 ")
- tdSql.error("select c1, last_row(c5) ,last(c1) from testdb.stb1 ")
+ tdSql.query(f"select last(*), last_row(*),last(c1), last_row(c1) from {dbname}.stb1 ")
+ tdSql.query(f"select last(*), last_row(*),last(c1), last_row(c1) from {dbname}.ct1 ")
+ tdSql.query(f"select last(*), last_row(*),last(c1+1)*max(c1), last_row(c1+2)/2 from {dbname}.t1 ")
+ tdSql.query(f"select last_row(*) ,abs(c1/2)+100 from {dbname}.stb1 where tbname =\"ct1\" ")
+ tdSql.query(f"select c1, last_row(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, last_row(c5) ,last(c1) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query("select last(c1) , max(c5), count(c5) from testdb.stb1")
- tdSql.query("select last_row(c1) , max(c5), count(c5) from testdb.ct1")
+ tdSql.query(f"select last(c1) , max(c5), count(c5) from {dbname}.stb1")
+ tdSql.query(f"select last_row(c1) , max(c5), count(c5) from {dbname}.ct1")
# bug fix for compute
- tdSql.query("select last_row(c1) -0 ,last(c1)-0 ,last(c1)+last_row(c1) from testdb.ct4 ")
+ tdSql.query(f"select last_row(c1) -0 ,last(c1)-0 ,last(c1)+last_row(c1) from {dbname}.ct4 ")
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,0.000000000)
tdSql.checkData(0,2,None)
- tdSql.query(" select c1, abs(c1) -0 ,last_row(c1-0.1)-0.1 from testdb.ct1")
+ tdSql.query(f"select c1, abs(c1) -0 ,last_row(c1-0.1)-0.1 from {dbname}.ct1")
tdSql.checkData(0,0,9)
tdSql.checkData(0,1,9.000000000)
tdSql.checkData(0,2,8.800000000)
- def abs_func_filter(self):
- tdSql.execute("use db")
+ def abs_func_filter(self, dbname="db"):
tdSql.query(
- "select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,last_row(log(c1,2)-0.5) from db.ct4 where c1>5 ")
+ f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,last_row(log(c1,2)-0.5) from {dbname}.ct4 where c1>5 ")
tdSql.checkData(0, 0, 6)
tdSql.checkData(0, 1, 6.000000000)
tdSql.checkData(0, 2, 6.000000000)
@@ -542,19 +535,19 @@ class TDTestCase:
tdSql.checkData(0, 4, 2.084962501)
tdSql.query(
- "select last_row(c1,c2,c1+5) from db.ct4 where c1=5 ")
+ f"select last_row(c1,c2,c1+5) from {dbname}.ct4 where c1=5 ")
tdSql.checkData(0, 0, 5)
tdSql.checkData(0, 1, 55555)
tdSql.checkData(0, 2, 10.000000000)
tdSql.query(
- "select last(c1,c2,c1+5) from db.ct4 where c1=5 ")
+ f"select last(c1,c2,c1+5) from {dbname}.ct4 where c1=5 ")
tdSql.checkData(0, 0, 5)
tdSql.checkData(0, 1, 55555)
tdSql.checkData(0, 2, 10.000000000)
tdSql.query(
- "select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from db.ct4 where c1>log(c1,2) limit 1 ")
+ f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1>log(c1,2) limit 1 ")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 8)
tdSql.checkData(0, 1, 88888)
@@ -566,166 +559,162 @@ class TDTestCase:
def abs_Arithmetic(self):
pass
- def check_boundary_values(self):
+ def check_boundary_values(self, dbname="bound_test"):
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test cachemodel 'LAST_ROW' ")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname} cachemodel 'LAST_ROW' ")
time.sleep(3)
- tdSql.execute("use bound_test")
+ tdSql.execute(f"use {dbname}")
tdSql.execute(
- "create table bound_test.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table bound_test.sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into bound_test.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into bound_test.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into bound_test.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into bound_test.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into bound_test.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
# check basic elem for table per row
tdSql.query(
- "select last(c1) ,last_row(c2), last_row(c3)+1 , last(c4)+1 from bound_test.sub1_bound ")
+ f"select last(c1) ,last_row(c2), last_row(c3)+1 , last(c4)+1 from {dbname}.sub1_bound ")
tdSql.checkData(0, 0, -2147483646)
tdSql.checkData(0, 1, -9223372036854775806)
tdSql.checkData(0, 2, -32765.000000000)
tdSql.checkData(0, 3, -125.000000000)
# check + - * / in functions
tdSql.query(
- "select last_row(c1+1) ,last_row(c2) , last(c3*1) , last(c4/2) from bound_test.sub1_bound ")
-
- def test_tag_compute_for_scalar_function(self):
-
- tdSql.execute("use testdb")
+ f"select last_row(c1+1) ,last_row(c2) , last(c3*1) , last(c4/2) from {dbname}.sub1_bound ")
+ def test_tag_compute_for_scalar_function(self, dbname="testdb"):
# bug need fix
- tdSql.query(" select sum(c1) from testdb.stb1 where t1+10 >1; ")
- tdSql.query("select c1 ,t1 from testdb.stb1 where t1 =0 ")
+ tdSql.query(f"select sum(c1) from {dbname}.stb1 where t1+10 >1; ")
+ tdSql.query(f"select c1 ,t1 from {dbname}.stb1 where t1 =0 ")
tdSql.checkRows(13)
- tdSql.query("select last_row(c1,t1) from testdb.stb1 ")
+ tdSql.query(f"select last_row(c1,t1) from {dbname}.stb1 ")
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,3)
- tdSql.query("select last_row(c1),t1 from testdb.stb1 ")
+ tdSql.query(f"select last_row(c1),t1 from {dbname}.stb1 ")
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,3)
- tdSql.query("select last_row(c1,t1),last(t1) from testdb.stb1 ")
+ tdSql.query(f"select last_row(c1,t1),last(t1) from {dbname}.stb1 ")
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,3)
tdSql.checkData(0,2,3)
- tdSql.query("select last_row(t1) from testdb.stb1 where t1 >0 ")
+ tdSql.query(f"select last_row(t1) from {dbname}.stb1 where t1 >0 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,3)
- tdSql.query("select last_row(t1) from testdb.stb1 where t1 =3 ")
+ tdSql.query(f"select last_row(t1) from {dbname}.stb1 where t1 =3 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,3)
- tdSql.query("select last_row(t1) from testdb.stb1 where t1 =2")
+ tdSql.query(f"select last_row(t1) from {dbname}.stb1 where t1 =2")
tdSql.checkRows(0)
# nest query for last_row
- tdSql.query("select last_row(t1) from (select ts , c1 ,t1 from testdb.stb1)")
+ tdSql.query(f"select last_row(t1) from (select ts , c1 ,t1 from {dbname}.stb1)")
tdSql.checkData(0,0,3)
- tdSql.query("select distinct(c1) ,t1 from testdb.stb1")
+ tdSql.query(f"select distinct(c1) ,t1 from {dbname}.stb1")
tdSql.checkRows(20)
- tdSql.query("select last_row(c1) from (select _rowts , c1 ,t1 from testdb.stb1)")
+ tdSql.query(f"select last_row(c1) from (select _rowts , c1 ,t1 from {dbname}.stb1)")
tdSql.checkData(0,0,None)
- tdSql.query("select last_row(c1) from (select ts , c1 ,t1 from testdb.stb1)")
+ tdSql.query(f"select last_row(c1) from (select ts , c1 ,t1 from {dbname}.stb1)")
tdSql.checkData(0,0,None)
- tdSql.query("select ts , last_row(c1) ,c1 from (select ts , c1 ,t1 from testdb.stb1)")
+ tdSql.query(f"select ts , last_row(c1) ,c1 from (select ts , c1 ,t1 from {dbname}.stb1)")
tdSql.checkData(0,1,None)
- tdSql.query("select ts , last_row(c1) ,c1 from (select ts , max(c1) c1 ,t1 from testdb.stb1 where ts >now -1h and ts now -1h and ts now -1h and ts now -1h and ts now -1h and ts now -1h and ts ="2021-01-01 01:01:06.000" and ts < "2021-07-21 01:01:01.000" interval(50d) sliding(30d) fill(NULL)')
+ tdSql.query(f'select max(c1) from {dbname}.t1 where ts>="2021-01-01 01:01:06.000" and ts < "2021-07-21 01:01:01.000" interval(50d) sliding(30d) fill(NULL)')
tdSql.checkRows(8)
tdSql.checkData(7,0,None)
- tdSql.query('select last_row(c1) from testdb.t1 where ts>="2021-01-01 01:01:06.000" and ts < "2021-07-21 01:01:01.000" interval(50d) sliding(30d) fill(value ,2 )')
+ tdSql.query(f'select last_row(c1) from {dbname}.t1 where ts>="2021-01-01 01:01:06.000" and ts < "2021-07-21 01:01:01.000" interval(50d) sliding(30d) fill(value ,2 )')
tdSql.checkRows(8)
tdSql.checkData(7,0,2)
- tdSql.query('select last_row(c1) from testdb.stb1 where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s)')
- tdSql.query('select last_row(c1) from (select ts , c1 from testdb.t1 where ts>="2021-01-01 01:01:06.000" and ts < "2021-07-21 01:01:01.000" ) interval(10s) sliding(5s)')
+ tdSql.query(f'select last_row(c1) from {dbname}.stb1 where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s)')
+ tdSql.query(f'select last_row(c1) from (select ts , c1 from {dbname}.t1 where ts>="2021-01-01 01:01:06.000" and ts < "2021-07-21 01:01:01.000" ) interval(10s) sliding(5s)')
# join
- tdSql.query("use test")
- tdSql.query("select last(sub_tb_1.c1), last(sub_tb_2.c2) from sub_tb_1, sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts")
+ db1 = "test"
+ tdSql.query(f"use {db1}")
+ tdSql.query(f"select last(sub_tb_1.c1), last(sub_tb_2.c2) from {db1}.sub_tb_1 sub_tb_1, {db1}.sub_tb_2 sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts")
tdSql.checkCols(2)
last_row_result = tdSql.queryResult
- tdSql.query("select last_row(sub_tb_1.c1), last_row(sub_tb_2.c2) from sub_tb_1, sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts")
+ tdSql.query(f"select last_row(sub_tb_1.c1), last_row(sub_tb_2.c2) from {db1}.sub_tb_1 sub_tb_1, {db1}.sub_tb_2 sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts")
for ind , row in enumerate(last_row_result):
tdSql.checkData(ind , 0 , row[0])
- tdSql.query("select last(*), last(*) from sub_tb_1, sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts")
+ tdSql.query(f"select last(*), last(*) from {db1}.sub_tb_1 sub_tb_1, {db1}.sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts")
last_row_result = tdSql.queryResult
- tdSql.query("select last_row(*), last_row(*) from sub_tb_1, sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts")
+ tdSql.query(f"select last_row(*), last_row(*) from {db1}.sub_tb_1 sub_tb_1, {db1}.sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts")
for ind , row in enumerate(last_row_result):
tdSql.checkData(ind , 0 , row[0])
- tdSql.query("select last(*), last_row(*) from sub_tb_1, sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts")
+ tdSql.query(f"select last(*), last_row(*) from {db1}.sub_tb_1 sub_tb_1, {db1}.sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts")
for ind , row in enumerate(last_row_result):
tdSql.checkData(ind , 0 , row[0])
- tdSql.query("select last_row(*), last(*) from sub_tb_1, sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts")
+ tdSql.query(f"select last_row(*), last(*) from {db1}.sub_tb_1 sub_tb_1, {db1}.sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts")
for ind , row in enumerate(last_row_result):
tdSql.checkData(ind , 0 , row[0])
- def support_super_table_test(self):
- tdSql.execute(" use testdb ")
- self.check_result_auto( " select c1 from testdb.stb1 order by ts " , "select abs(c1) from testdb.stb1 order by ts" )
- self.check_result_auto( " select c1 from testdb.stb1 order by tbname " , "select abs(c1) from testdb.stb1 order by tbname" )
- self.check_result_auto( " select c1 from testdb.stb1 where c1 > 0 order by tbname " , "select abs(c1) from testdb.stb1 where c1 > 0 order by tbname" )
- self.check_result_auto( " select c1 from testdb.stb1 where c1 > 0 order by tbname " , "select abs(c1) from testdb.stb1 where c1 > 0 order by tbname" )
+ def support_super_table_test(self, dbname="testdb"):
+ self.check_result_auto( f"select c1 from {dbname}.stb1 order by ts " , f"select abs(c1) from {dbname}.stb1 order by ts" )
+ self.check_result_auto( f"select c1 from {dbname}.stb1 order by tbname " , f"select abs(c1) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto( f"select c1 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select abs(c1) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto( f"select c1 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select abs(c1) from {dbname}.stb1 where c1 > 0 order by tbname" )
- self.check_result_auto( " select t1,c1 from testdb.stb1 order by ts " , "select t1, abs(c1) from testdb.stb1 order by ts" )
- self.check_result_auto( " select t2,c1 from testdb.stb1 order by tbname " , "select t2 ,abs(c1) from testdb.stb1 order by tbname" )
- self.check_result_auto( " select t3,c1 from testdb.stb1 where c1 > 0 order by tbname " , "select t3 ,abs(c1) from testdb.stb1 where c1 > 0 order by tbname" )
- self.check_result_auto( " select t4,c1 from testdb.stb1 where c1 > 0 order by tbname " , "select t4 , abs(c1) from testdb.stb1 where c1 > 0 order by tbname" )
- pass
+ self.check_result_auto( f"select t1,c1 from {dbname}.stb1 order by ts " , f"select t1, abs(c1) from {dbname}.stb1 order by ts" )
+ self.check_result_auto( f"select t2,c1 from {dbname}.stb1 order by tbname " , f"select t2 ,abs(c1) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto( f"select t3,c1 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select t3 ,abs(c1) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto( f"select t4,c1 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select t4 , abs(c1) from {dbname}.stb1 where c1 > 0 order by tbname" )
def basic_query(self):
diff --git a/tests/system-test/2-query/leastsquares.py b/tests/system-test/2-query/leastsquares.py
index 3bab509942..fe7188a545 100644
--- a/tests/system-test/2-query/leastsquares.py
+++ b/tests/system-test/2-query/leastsquares.py
@@ -26,6 +26,7 @@ TS_TYPE_COL = [ TS_COL, ]
ALL_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BOOL_COL, BINARY_COL, NCHAR_COL, TS_COL ]
+DBNAME = "db"
class TDTestCase:
def init(self, conn, logSql):
@@ -133,13 +134,13 @@ class TDTestCase:
return f"select leastsquares({select_clause}, {start_val}, {step_val}) from {from_clause} {where_condition} {group_condition}"
@property
- def __tb_list(self):
+ def __tb_list(self, dbname=DBNAME):
return [
- "ct1",
- "ct4",
- "t1",
- "ct2",
- "stb1",
+ f"{dbname}.ct1",
+ f"{dbname}.ct4",
+ f"{dbname}.nt1",
+ f"{dbname}.ct2",
+ f"{dbname}.stb1",
]
@property
@@ -161,36 +162,37 @@ class TDTestCase:
err_sqls = []
__no_join_tblist = self.__tb_list
for tb in __no_join_tblist:
- select_claus_list = self.__query_condition(tb)
- for select_claus in select_claus_list:
- group_claus = self.__group_condition(col=select_claus)
- where_claus = self.__where_condition(query_conditon=select_claus)
- having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null")
- for arg in self.start_step_val:
- if not isinstance(arg,int) or isinstance(arg, bool) :
- err_sqls.extend(
- (
- self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg),
- self.__single_sql(select_clause=select_claus, from_clause=tb, step_val=arg, group_condition=group_claus),
- self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg, where_condition=where_claus, group_condition=having_claus),
- )
+ tbname = tb.split(".")[-1]
+ select_claus_list = self.__query_condition(tbname)
+ for select_claus in select_claus_list:
+ group_claus = self.__group_condition(col=select_claus)
+ where_claus = self.__where_condition(query_conditon=select_claus)
+ having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null")
+ for arg in self.start_step_val:
+ if not isinstance(arg,int) or isinstance(arg, bool) :
+ err_sqls.extend(
+ (
+ self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg),
+ self.__single_sql(select_clause=select_claus, from_clause=tb, step_val=arg, group_condition=group_claus),
+ self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg, where_condition=where_claus, group_condition=having_claus),
)
- elif isinstance(select_claus, str) and any([BOOL_COL in select_claus, BINARY_COL in select_claus, NCHAR_COL in select_claus, TS_COL in select_claus]):
- err_sqls.extend(
- (
- self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg),
- self.__single_sql(select_clause=select_claus, from_clause=tb, step_val=arg, group_condition=group_claus),
- self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg, where_condition=where_claus, group_condition=having_claus),
- )
+ )
+ elif isinstance(select_claus, str) and any([BOOL_COL in select_claus, BINARY_COL in select_claus, NCHAR_COL in select_claus, TS_COL in select_claus]):
+ err_sqls.extend(
+ (
+ self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg),
+ self.__single_sql(select_clause=select_claus, from_clause=tb, step_val=arg, group_condition=group_claus),
+ self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg, where_condition=where_claus, group_condition=having_claus),
)
- else:
- current_sqls.extend(
- (
- self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg, step_val=0),
- self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=0, step_val=arg, group_condition=group_claus),
- self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg, step_val=arg, where_condition=where_claus, group_condition=having_claus),
- )
+ )
+ else:
+ current_sqls.extend(
+ (
+ self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg, step_val=0),
+ self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=0, step_val=arg, group_condition=group_claus),
+ self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg, step_val=arg, where_condition=where_claus, group_condition=having_claus),
)
+ )
# return filter(None, sqls)
return list(filter(None, current_sqls)), list(filter(None, err_sqls))
@@ -207,25 +209,25 @@ class TDTestCase:
def __test_current(self):
- # tdSql.query("explain select c1 from ct1")
- # tdSql.query("explain select 1 from ct2")
- # tdSql.query("explain select cast(ceil(c6) as bigint) from ct4 group by c6")
- # tdSql.query("explain select count(c3) from ct4 group by c7 having count(c3) > 0")
- # tdSql.query("explain select ct2.c3 from ct4 join ct2 on ct4.ts=ct2.ts")
+ # tdSql.query("explain select c1 from {dbname}.ct1")
+ # tdSql.query("explain select 1 from {dbname}.ct2")
+ # tdSql.query("explain select cast(ceil(c6) as bigint) from {dbname}.ct4 group by c6")
+ # tdSql.query("explain select count(c3) from {dbname}.ct4 group by c7 having count(c3) > 0")
+ # tdSql.query("explain select ct2.c3 from {dbname}.ct4 join ct2 on ct4.ts=ct2.ts")
# tdSql.query("explain select c1 from stb1 where c1 is not null and c1 in (0, 1, 2) or c1 between 2 and 100 ")
self.leastsquares_check()
- def __test_error(self):
+ def __test_error(self, dbname=DBNAME):
tdLog.printNoPrefix("===step 0: err case, must return err")
- tdSql.error( "select leastsquares(c1) from ct8" )
- tdSql.error( "select leastsquares(c1, 1) from ct1 " )
- tdSql.error( "select leastsquares(c1, null, 1) from ct1 " )
- tdSql.error( "select leastsquares(c1, 1, null) from ct1 " )
- tdSql.error( "select leastsquares(null, 1, 1) from ct1 " )
- tdSql.error( '''select leastsquares(['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'])
- from ct1
+ tdSql.error( f"select leastsquares(c1) from {dbname}.ct8" )
+ tdSql.error( f"select leastsquares(c1, 1) from {dbname}.ct1 " )
+ tdSql.error( f"select leastsquares(c1, null, 1) from {dbname}.ct1 " )
+ tdSql.error( f"select leastsquares(c1, 1, null) from {dbname}.ct1 " )
+ tdSql.error( f"select leastsquares(null, 1, 1) from {dbname}.ct1 " )
+ tdSql.error( f'''select leastsquares(['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'])
+ from {dbname}.ct1
where ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null
group by ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10']
having ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null ''' )
@@ -234,16 +236,16 @@ class TDTestCase:
self.__test_error()
self.__test_current()
- def __create_tb(self):
+ def __create_tb(self, dbname=DBNAME):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.nt1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -253,30 +255,29 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
- { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname=DBNAME):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
+ f'''insert into {dbname}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -292,7 +293,7 @@ class TDTestCase:
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -308,13 +309,13 @@ class TDTestCase:
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.nt1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.nt1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -332,7 +333,7 @@ class TDTestCase:
def run(self):
- tdSql.prepare()
+ tdSql.prepare(DBNAME)
tdLog.printNoPrefix("==========step1:create table")
self.__create_tb()
@@ -344,10 +345,9 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ tdSql.execute(f"flush database {DBNAME}")
- tdSql.execute("use db")
+ tdSql.execute(f"use {DBNAME}")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
diff --git a/tests/system-test/2-query/length.py b/tests/system-test/2-query/length.py
index ed604c41ae..1761572245 100644
--- a/tests/system-test/2-query/length.py
+++ b/tests/system-test/2-query/length.py
@@ -19,6 +19,7 @@ TS_COL = "c10"
UN_CHAR_COL = [INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BOOL_COL, ]
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
TS_TYPE_COL = [TS_COL]
+DBNAME = "db"
class TDTestCase:
@@ -102,16 +103,16 @@ class TDTestCase:
return sqls
- def __test_current(self):
+ def __test_current(self, dbname=DBNAME):
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.nt1", f"{dbname}.stb1"]
for tb in tbname:
self.__length_current_check(tb)
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
- def __test_error(self):
+ def __test_error(self, dbname=DBNAME):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.nt1", f"{dbname}.stb1"]
for tb in tbname:
for errsql in self.__length_err_check(tb):
@@ -124,17 +125,16 @@ class TDTestCase:
self.__test_error()
- def __create_tb(self):
- tdSql.prepare()
+ def __create_tb(self, dbname=DBNAME):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.nt1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -144,29 +144,29 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname=DBNAME):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
+ f'''insert into {dbname}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -182,7 +182,7 @@ class TDTestCase:
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -198,13 +198,13 @@ class TDTestCase:
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.nt1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.nt1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -233,8 +233,7 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ tdSql.execute("flush database db")
tdSql.execute("use db")
diff --git a/tests/system-test/2-query/log.py b/tests/system-test/2-query/log.py
index b8e0aaf52e..e304284bf9 100644
--- a/tests/system-test/2-query/log.py
+++ b/tests/system-test/2-query/log.py
@@ -10,48 +10,46 @@ from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
- def prepare_datas(self):
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -185,68 +183,68 @@ class TDTestCase:
else:
tdLog.info("log value check pass , it work as expected ,sql is \"%s\" "%log_query )
- def test_errors(self):
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select log from t1",
- # "select log(-+--+c1 ,2) from t1",
- # "select +-log(c1,2) from t1",
- # "select ++-log(c1,2) from t1",
- # "select ++--log(c1,2) from t1",
- # "select - -log(c1,2)*0 from t1",
- # "select log(tbname+1,2) from t1 ",
- "select log(123--123,2)==1 from t1",
- "select log(c1,2) as 'd1' from t1",
- "select log(c1 ,c2 ,2) from t1",
- "select log(c1 ,NULL ,2) from t1",
- "select log(, 2) from t1;",
- "select log(log(c1, 2) ab from t1)",
- "select log(c1 ,2 ) as int from t1",
- "select log from stb1",
- # "select log(-+--+c1) from stb1",
- # "select +-log(c1) from stb1",
- # "select ++-log(c1) from stb1",
- # "select ++--log(c1) from stb1",
- # "select - -log(c1)*0 from stb1",
- # "select log(tbname+1) from stb1 ",
- "select log(123--123 ,2)==1 from stb1",
- "select log(c1 ,2) as 'd1' from stb1",
- "select log(c1 ,c2 ,2 ) from stb1",
- "select log(c1 ,NULL,2) from stb1",
- "select log(,) from stb1;",
- "select log(log(c1 , 2) ab from stb1)",
- "select log(c1 , 2) as int from stb1"
+ f"select log from {dbname}.t1",
+ # f"select log(-+--+c1 ,2) from {dbname}.t1",
+ # f"select +-log(c1,2) from {dbname}.t1",
+ # f"select ++-log(c1,2) from {dbname}.t1",
+ # f"select ++--log(c1,2) from {dbname}.t1",
+ # f"select - -log(c1,2)*0 from {dbname}.t1",
+ # f"select log(tbname+1,2) from {dbname}.t1 ",
+ f"select log(123--123,2)==1 from {dbname}.t1",
+ f"select log(c1,2) as 'd1' from {dbname}.t1",
+ f"select log(c1 ,c2 ,2) from {dbname}.t1",
+ f"select log(c1 ,NULL ,2) from {dbname}.t1",
+ f"select log(, 2) from {dbname}.t1;",
+ f"select log(log(c1, 2) ab from {dbname}.t1)",
+ f"select log(c1 ,2 ) as int from {dbname}.t1",
+ f"select log from {dbname}.stb1",
+ # f"select log(-+--+c1) from {dbname}.stb1",
+ # f"select +-log(c1) from {dbname}.stb1",
+ # f"select ++-log(c1) from {dbname}.stb1",
+ # f"select ++--log(c1) from {dbname}.stb1",
+ # f"select - -log(c1)*0 from {dbname}.stb1",
+ # f"select log(tbname+1) from {dbname}.stb1 ",
+ f"select log(123--123 ,2)==1 from {dbname}.stb1",
+ f"select log(c1 ,2) as 'd1' from {dbname}.stb1",
+ f"select log(c1 ,c2 ,2 ) from {dbname}.stb1",
+ f"select log(c1 ,NULL,2) from {dbname}.stb1",
+ f"select log(,) from {dbname}.stb1;",
+ f"select log(log(c1 , 2) ab from {dbname}.stb1)",
+ f"select log(c1 , 2) as int from {dbname}.stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
- def support_types(self):
+ def support_types(self, dbname="db"):
type_error_sql_lists = [
- "select log(ts ,2 ) from t1" ,
- "select log(c7,c2 ) from t1",
- "select log(c8,c1 ) from t1",
- "select log(c9,c2 ) from t1",
- "select log(ts,c7 ) from ct1" ,
- "select log(c7,c9 ) from ct1",
- "select log(c8,c2 ) from ct1",
- "select log(c9,c1 ) from ct1",
- "select log(ts,2 ) from ct3" ,
- "select log(c7,2 ) from ct3",
- "select log(c8,2 ) from ct3",
- "select log(c9,2 ) from ct3",
- "select log(ts,2 ) from ct4" ,
- "select log(c7,2 ) from ct4",
- "select log(c8,2 ) from ct4",
- "select log(c9,2 ) from ct4",
- "select log(ts,2 ) from stb1" ,
- "select log(c7,2 ) from stb1",
- "select log(c8,2 ) from stb1",
- "select log(c9,2 ) from stb1" ,
+ f"select log(ts ,2 ) from {dbname}.t1" ,
+ f"select log(c7,c2 ) from {dbname}.t1",
+ f"select log(c8,c1 ) from {dbname}.t1",
+ f"select log(c9,c2 ) from {dbname}.t1",
+ f"select log(ts,c7 ) from {dbname}.ct1" ,
+ f"select log(c7,c9 ) from {dbname}.ct1",
+ f"select log(c8,c2 ) from {dbname}.ct1",
+ f"select log(c9,c1 ) from {dbname}.ct1",
+ f"select log(ts,2 ) from {dbname}.ct3" ,
+ f"select log(c7,2 ) from {dbname}.ct3",
+ f"select log(c8,2 ) from {dbname}.ct3",
+ f"select log(c9,2 ) from {dbname}.ct3",
+ f"select log(ts,2 ) from {dbname}.ct4" ,
+ f"select log(c7,2 ) from {dbname}.ct4",
+ f"select log(c8,2 ) from {dbname}.ct4",
+ f"select log(c9,2 ) from {dbname}.ct4",
+ f"select log(ts,2 ) from {dbname}.stb1" ,
+ f"select log(c7,2 ) from {dbname}.stb1",
+ f"select log(c8,2 ) from {dbname}.stb1",
+ f"select log(c9,2 ) from {dbname}.stb1" ,
- "select log(ts,2 ) from stbbb1" ,
- "select log(c7,2 ) from stbbb1",
+ f"select log(ts,2 ) from {dbname}.stbbb1" ,
+ f"select log(c7,2 ) from {dbname}.stbbb1",
- "select log(ts,2 ) from tbname",
- "select log(c9,2 ) from tbname"
+ f"select log(ts,2 ) from {dbname}.tbname",
+ f"select log(c9,2 ) from {dbname}.tbname"
]
@@ -255,98 +253,88 @@ class TDTestCase:
type_sql_lists = [
- "select log(c1,2 ) from t1",
- "select log(c2,2 ) from t1",
- "select log(c3,2 ) from t1",
- "select log(c4,2 ) from t1",
- "select log(c5,2 ) from t1",
- "select log(c6,2 ) from t1",
+ f"select log(c1,2 ) from {dbname}.t1",
+ f"select log(c2,2 ) from {dbname}.t1",
+ f"select log(c3,2 ) from {dbname}.t1",
+ f"select log(c4,2 ) from {dbname}.t1",
+ f"select log(c5,2 ) from {dbname}.t1",
+ f"select log(c6,2 ) from {dbname}.t1",
- "select log(c1,2 ) from ct1",
- "select log(c2,2 ) from ct1",
- "select log(c3,2 ) from ct1",
- "select log(c4,2 ) from ct1",
- "select log(c5,2 ) from ct1",
- "select log(c6,2 ) from ct1",
+ f"select log(c1,2 ) from {dbname}.ct1",
+ f"select log(c2,2 ) from {dbname}.ct1",
+ f"select log(c3,2 ) from {dbname}.ct1",
+ f"select log(c4,2 ) from {dbname}.ct1",
+ f"select log(c5,2 ) from {dbname}.ct1",
+ f"select log(c6,2 ) from {dbname}.ct1",
- "select log(c1,2 ) from ct3",
- "select log(c2,2 ) from ct3",
- "select log(c3,2 ) from ct3",
- "select log(c4,2 ) from ct3",
- "select log(c5,2 ) from ct3",
- "select log(c6,2 ) from ct3",
+ f"select log(c1,2 ) from {dbname}.ct3",
+ f"select log(c2,2 ) from {dbname}.ct3",
+ f"select log(c3,2 ) from {dbname}.ct3",
+ f"select log(c4,2 ) from {dbname}.ct3",
+ f"select log(c5,2 ) from {dbname}.ct3",
+ f"select log(c6,2 ) from {dbname}.ct3",
- "select log(c1,2 ) from stb1",
- "select log(c2,2 ) from stb1",
- "select log(c3,2 ) from stb1",
- "select log(c4,2 ) from stb1",
- "select log(c5,2 ) from stb1",
- "select log(c6,2 ) from stb1",
+ f"select log(c1,2 ) from {dbname}.stb1",
+ f"select log(c2,2 ) from {dbname}.stb1",
+ f"select log(c3,2 ) from {dbname}.stb1",
+ f"select log(c4,2 ) from {dbname}.stb1",
+ f"select log(c5,2 ) from {dbname}.stb1",
+ f"select log(c6,2 ) from {dbname}.stb1",
- "select log(c6,2) as alisb from stb1",
- "select log(c6,2) alisb from stb1",
+ f"select log(c6,2) as alisb from {dbname}.stb1",
+ f"select log(c6,2) alisb from {dbname}.stb1",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
- def basic_log_function(self):
+ def basic_log_function(self, dbname="db"):
# basic query
- tdSql.query("select c1 from ct3")
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select log(c1 ,2) from ct3")
+ tdSql.query(f"select log(c1 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select log(c2 ,2) from ct3")
+ tdSql.query(f"select log(c2 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select log(c3 ,2) from ct3")
+ tdSql.query(f"select log(c3 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select log(c4 ,2) from ct3")
+ tdSql.query(f"select log(c4 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select log(c5 ,2) from ct3")
+ tdSql.query(f"select log(c5 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select log(c6 ,2) from ct3")
+ tdSql.query(f"select log(c6 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
# # used for regular table
- tdSql.query("select log(c1 ,2) from t1")
+ tdSql.query(f"select log(c1 ,2) from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1 , 0, 0.000000000)
tdSql.checkData(3 , 0, 1.584962501)
tdSql.checkData(5 , 0, None)
- tdSql.query("select log(c1) from t1")
+ tdSql.query(f"select log(c1) from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1 , 0, 0.000000000)
tdSql.checkData(2 , 0, 0.693147181)
tdSql.checkData(3 , 0, 1.098612289)
tdSql.checkData(4 , 0, 1.386294361)
- tdSql.query("select c1, c2, c3 , c4, c5 from t1")
- tdSql.checkData(1, 4, 1.11000)
- tdSql.checkData(3, 3, 33)
- tdSql.checkData(5, 4, None)
-
- tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
- tdSql.checkData(1, 5, 1.11000)
- tdSql.checkData(3, 4, 33)
- tdSql.checkData(5, 5, None)
-
- self.check_result_auto_log( "select c1, c2, c3 , c4, c5 from t1", "select log(c1), log(c2) ,log(c3), log(c4), log(c5) from t1")
- self.check_result_auto_log2( "select c1, c2, c3 , c4, c5 from t1", "select log(c1 ,2), log(c2 ,2) ,log(c3, 2), log(c4 ,2), log(c5 ,2) from t1")
- self.check_result_auto_log1( "select c1, c2, c3 , c4, c5 from t1", "select log(c1 ,1), log(c2 ,1) ,log(c3, 1), log(c4 ,1), log(c5 ,1) from t1")
- self.check_result_auto_log__10( "select c1, c2, c3 , c4, c5 from t1", "select log(c1 ,-10), log(c2 ,-10) ,log(c3, -10), log(c4 ,-10), log(c5 ,-10) from t1")
+ self.check_result_auto_log( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) from {dbname}.t1")
+ self.check_result_auto_log2( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,2), log(c2 ,2) ,log(c3, 2), log(c4 ,2), log(c5 ,2) from {dbname}.t1")
+ self.check_result_auto_log__10( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,1), log(c2 ,1) ,log(c3, 1), log(c4 ,1), log(c5 ,1) from {dbname}.t1")
+ self.check_result_auto_log__10( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,-10), log(c2 ,-10) ,log(c3, -10), log(c4 ,-10), log(c5 ,-10) from {dbname}.t1")
# used for sub table
- tdSql.query("select c1 ,log(c1 ,3) from ct1")
+ tdSql.query(f"select c1 ,log(c1 ,3) from {dbname}.ct1")
tdSql.checkData(0, 1, 1.892789261)
tdSql.checkData(1 , 1, 1.771243749)
tdSql.checkData(3 , 1, 1.464973521)
@@ -354,19 +342,19 @@ class TDTestCase:
# test bug fix for log(c1,c2)
- tdSql.query("select c1, c2 ,log(c1,c2) from ct1")
+ tdSql.query(f"select c1, c2 ,log(c1,c2) from {dbname}.ct1")
tdSql.checkData(0 , 2, 0.182485070)
tdSql.checkData(1 , 2, 0.172791608)
tdSql.checkData(2 , 2, 0.161311499)
tdSql.checkData(3 , 2, 0.147315235)
tdSql.checkData(4 , 2, None)
- self.check_result_auto_log( "select c1, c2, c3 , c4, c5 from ct1", "select log(c1), log(c2) ,log(c3), log(c4), log(c5) from ct1")
- self.check_result_auto_log2( "select c1, c2, c3 , c4, c5 from ct1", "select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) from ct1")
- self.check_result_auto_log__10( "select c1, c2, c3 , c4, c5 from ct1", "select log(c1,-10), log(c2,-10) ,log(c3,-10), log(c4,-10), log(c5,-10) from ct1")
+ self.check_result_auto_log( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) from {dbname}.ct1")
+ self.check_result_auto_log2( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) from {dbname}.ct1")
+ self.check_result_auto_log__10( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1,-10), log(c2,-10) ,log(c3,-10), log(c4,-10), log(c5,-10) from {dbname}.ct1")
# nest query for log functions
- tdSql.query("select c1 , log(c1,3) ,log(log(c1,3),3) , log(log(log(c1,3),3),3) from ct1;")
+ tdSql.query(f"select c1 , log(c1,3) ,log(log(c1,3),3) , log(log(log(c1,3),3),3) from {dbname}.ct1;")
tdSql.checkData(0 , 0 , 8)
tdSql.checkData(0 , 1 , 1.892789261)
tdSql.checkData(0 , 2 , 0.580779541)
@@ -384,36 +372,36 @@ class TDTestCase:
# # used for stable table
- tdSql.query("select log(c1, 2) from stb1")
+ tdSql.query(f"select log(c1, 2) from {dbname}.stb1")
tdSql.checkRows(25)
# used for not exists table
- tdSql.error("select log(c1, 2) from stbbb1")
- tdSql.error("select log(c1, 2) from tbname")
- tdSql.error("select log(c1, 2) from ct5")
+ tdSql.error(f"select log(c1, 2) from {dbname}.stbbb1")
+ tdSql.error(f"select log(c1, 2) from {dbname}tbname")
+ tdSql.error(f"select log(c1, 2) from {dbname}.ct5")
# mix with common col
- tdSql.query("select c1, log(c1 ,2) from ct1")
+ tdSql.query(f"select c1, log(c1 ,2) from {dbname}.ct1")
tdSql.checkData(0 , 0 ,8)
tdSql.checkData(0 , 1 ,3.000000000)
tdSql.checkData(4 , 0 ,0)
tdSql.checkData(4 , 1 ,None)
- tdSql.query("select c1, log(c1,2) from ct4")
+ tdSql.query(f"select c1, log(c1,2) from {dbname}.ct4")
tdSql.checkData(0 , 0 , None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(4 , 0 ,5)
tdSql.checkData(4 , 1 ,2.321928095)
tdSql.checkData(5 , 0 ,None)
tdSql.checkData(5 , 1 ,None)
- tdSql.query("select c1, log(c1 ,2 ) from ct4 ")
+ tdSql.query(f"select c1, log(c1 ,2 ) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(4 , 0 ,5)
tdSql.checkData(4 , 1 ,2.321928095)
# mix with common functions
- tdSql.query("select c1, log(c1 ,2),c5, log(c5 ,2) from ct4 ")
+ tdSql.query(f"select c1, log(c1 ,2),c5, log(c5 ,2) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None)
@@ -424,34 +412,34 @@ class TDTestCase:
tdSql.checkData(3 , 2 ,6.66000)
tdSql.checkData(3 , 3 ,2.735522144)
- tdSql.query("select c1, log(c1,1),c5, floor(c5 ) from stb1 ")
+ tdSql.query(f"select c1, log(c1,1),c5, floor(c5 ) from {dbname}.stb1 ")
# # mix with agg functions , not support
- tdSql.error("select c1, log(c1 ,2),c5, count(c5) from stb1 ")
- tdSql.error("select c1, log(c1 ,2),c5, count(c5) from ct1 ")
- tdSql.error("select log(c1 ,2), count(c5) from stb1 ")
- tdSql.error("select log(c1 ,2), count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from stb1 ")
+ tdSql.error(f"select c1, log(c1 ,2),c5, count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select c1, log(c1 ,2),c5, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select log(c1 ,2), count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select log(c1 ,2), count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query("select max(c5), count(c5) from stb1")
- tdSql.query("select max(c5), count(c5) from ct1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1")
# bug fix for count
- tdSql.query("select count(c1) from ct4 ")
+ tdSql.query(f"select count(c1) from {dbname}.ct4 ")
tdSql.checkData(0,0,9)
- tdSql.query("select count(*) from ct4 ")
+ tdSql.query(f"select count(*) from {dbname}.ct4 ")
tdSql.checkData(0,0,12)
- tdSql.query("select count(c1) from stb1 ")
+ tdSql.query(f"select count(c1) from {dbname}.stb1 ")
tdSql.checkData(0,0,22)
- tdSql.query("select count(*) from stb1 ")
+ tdSql.query(f"select count(*) from {dbname}.stb1 ")
tdSql.checkData(0,0,25)
# # bug fix for compute
- tdSql.query("select c1, log(c1 ,2) -0 ,log(c1-4 ,2)-0 from ct4 ")
+ tdSql.query(f"select c1, log(c1 ,2) -0 ,log(c1-4 ,2)-0 from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -459,7 +447,7 @@ class TDTestCase:
tdSql.checkData(1, 1, 3.000000000)
tdSql.checkData(1, 2, 2.000000000)
- tdSql.query(" select c1, log(c1 ,2) -0 ,log(c1-0.1 ,2)-0.1 from ct4")
+ tdSql.query(f"select c1, log(c1 ,2) -0 ,log(c1-0.1 ,2)-0.1 from {dbname}.ct4")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -467,88 +455,87 @@ class TDTestCase:
tdSql.checkData(1, 1, 3.000000000)
tdSql.checkData(1, 2, 2.881852653)
- tdSql.query("select c1, log(c1, -10), c2, log(c2, -10), c3, log(c3, -10) from ct1")
+ tdSql.query(f"select c1, log(c1, -10), c2, log(c2, -10), c3, log(c3, -10) from {dbname}.ct1")
- def test_big_number(self):
+ def test_big_number(self, dbname="db"):
- tdSql.query("select c1, log(c1, 100000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, log(c1, 100000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(0, 1, 0.112886248)
tdSql.checkData(1, 1, 0.105637255)
tdSql.checkData(4, 1, None)
- tdSql.query("select c1, log(c1, 10000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, log(c1, 10000000000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(0, 1, 0.069468461)
tdSql.checkData(1, 1, 0.065007542)
tdSql.checkData(4, 1, None)
- tdSql.query("select c1, log(c1, 10000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, log(c1, 10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, log(c1, 10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, log(c1, 10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(0, 1, 0.036123599)
tdSql.checkData(1, 1, 0.033803922)
tdSql.checkData(4, 1, None)
- tdSql.query("select c1, log(c1, 10000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, log(c1, 10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, log(c1, 10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, log(c1, 10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(0, 1, 0.026561470)
tdSql.checkData(1, 1, 0.024855825)
tdSql.checkData(4, 1, None)
- tdSql.query("select c1, log(c1, 10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, log(c1, 10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, log(c1, 10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, log(c1, 10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(0, 1, 0.022577250)
tdSql.checkData(1, 1, 0.021127451)
tdSql.checkData(4, 1, None)
- tdSql.query("select c1, log(c1, 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, log(c1, 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
- def log_base_test(self):
+ def log_base_test(self, dbname="db"):
# base is an regular number ,int or double
- tdSql.query("select c1, log(c1, 2) from ct1")
+ tdSql.query(f"select c1, log(c1, 2) from {dbname}.ct1")
tdSql.checkData(0, 1,3.000000000)
- tdSql.query("select c1, log(c1, 2.0) from ct1")
+ tdSql.query(f"select c1, log(c1, 2.0) from {dbname}.ct1")
tdSql.checkData(0, 1, 3.000000000)
- tdSql.query("select c1, log(1, 2.0) from ct1")
+ tdSql.query(f"select c1, log(1, 2.0) from {dbname}.ct1")
tdSql.checkData(0, 1, 0.000000000)
tdSql.checkRows(13)
# # bug for compute in functions
- # tdSql.query("select c1, abs(1/0) from ct1")
+ # tdSql.query(f"select c1, abs(1/0) from {dbname}.ct1")
# tdSql.checkData(0, 0, 8)
# tdSql.checkData(0, 1, 1)
- tdSql.query("select c1, log(1, 2.0) from ct1")
+ tdSql.query(f"select c1, log(1, 2.0) from {dbname}.ct1")
tdSql.checkData(0, 1, 0.000000000)
tdSql.checkRows(13)
# two cols start log(x,y)
- tdSql.query("select c1,c2, log(c1,c2) from ct1")
+ tdSql.query(f"select c1,c2, log(c1,c2) from {dbname}.ct1")
tdSql.checkData(0, 2, 0.182485070)
tdSql.checkData(1, 2, 0.172791608)
tdSql.checkData(4, 2, None)
- tdSql.query("select c1,c2, log(c2,c1) from ct1")
+ tdSql.query(f"select c1,c2, log(c2,c1) from {dbname}.ct1")
tdSql.checkData(0, 2, 5.479900349)
tdSql.checkData(1, 2, 5.787318105)
tdSql.checkData(4, 2, None)
- tdSql.query("select c1, log(2.0 , c1) from ct1")
+ tdSql.query(f"select c1, log(2.0 , c1) from {dbname}.ct1")
tdSql.checkData(0, 1, 0.333333333)
tdSql.checkData(1, 1, 0.356207187)
tdSql.checkData(4, 1, None)
- tdSql.query("select c1, log(2.0 , ceil(abs(c1))) from ct1")
+ tdSql.query(f"select c1, log(2.0 , ceil(abs(c1))) from {dbname}.ct1")
tdSql.checkData(0, 1, 0.333333333)
tdSql.checkData(1, 1, 0.356207187)
tdSql.checkData(4, 1, None)
- def abs_func_filter(self):
- tdSql.execute("use db")
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1>5 ")
+ def abs_func_filter(self, dbname="db"):
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1>5 ")
tdSql.checkRows(3)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,8.000000000)
@@ -556,7 +543,7 @@ class TDTestCase:
tdSql.checkData(0,3,7.900000000)
tdSql.checkData(0,4,3.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -564,15 +551,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,2.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1=5 ")
- tdSql.checkRows(1)
- tdSql.checkData(0,0,5)
- tdSql.checkData(0,1,5.000000000)
- tdSql.checkData(0,2,5.000000000)
- tdSql.checkData(0,3,4.900000000)
- tdSql.checkData(0,4,2.000000000)
-
- tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1>log(c1,2) limit 1 ")
+ tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1>log(c1,2) limit 1 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,88888)
@@ -581,46 +560,43 @@ class TDTestCase:
tdSql.checkData(0,4,7.900000000)
tdSql.checkData(0,5,3.000000000)
- def log_Arithmetic(self):
- pass
- def check_boundary_values(self):
+ def check_boundary_values(self, dbname="bound_test"):
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
time.sleep(3)
- tdSql.execute("use bound_test")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- self.check_result_auto_log( "select c1, c2, c3 , c4, c5 ,c6 from sub1_bound ", "select log(c1), log(c2) ,log(c3), log(c4), log(c5) ,log(c6) from sub1_bound")
- self.check_result_auto_log2( "select c1, c2, c3 , c4, c5 ,c6 from sub1_bound ", "select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) ,log(c6,2) from sub1_bound")
- self.check_result_auto_log__10( "select c1, c2, c3 , c4, c5 ,c6 from sub1_bound ", "select log(c1,-10), log(c2,-10) ,log(c3,-10), log(c4,-10), log(c5,-10) ,log(c6,-10) from sub1_bound")
+ self.check_result_auto_log( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) ,log(c6) from {dbname}.sub1_bound")
+ self.check_result_auto_log2( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) ,log(c6,2) from {dbname}.sub1_bound")
+ self.check_result_auto_log__10( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1,-10), log(c2,-10) ,log(c3,-10), log(c4,-10), log(c5,-10) ,log(c6,-10) from {dbname}.sub1_bound")
- self.check_result_auto_log2( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select log(c1,2), log(c2,2) ,log(c3,2), log(c3,2), log(c2,2) ,log(c1,2) from sub1_bound")
- self.check_result_auto_log( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select log(c1), log(c2) ,log(c3), log(c3), log(c2) ,log(c1) from sub1_bound")
+ self.check_result_auto_log2( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c3,2), log(c2,2) ,log(c1,2) from {dbname}.sub1_bound")
+ self.check_result_auto_log( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select log(c1), log(c2) ,log(c3), log(c3), log(c2) ,log(c1) from {dbname}.sub1_bound")
- self.check_result_auto_log2("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select log(abs(c1) ,2) from sub1_bound" )
+ self.check_result_auto_log2(f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select log(abs(c1) ,2) from {dbname}.sub1_bound" )
# check basic elem for table per row
- tdSql.query("select log(abs(c1),2) ,log(abs(c2),2) , log(abs(c3),2) , log(abs(c4),2), log(abs(c5),2), log(abs(c6),2) from sub1_bound ")
+ tdSql.query(f"select log(abs(c1),2) ,log(abs(c2),2) , log(abs(c3),2) , log(abs(c4),2), log(abs(c5),2), log(abs(c6),2) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.log(2147483647,2))
tdSql.checkData(0,1,math.log(9223372036854775807 ,2))
tdSql.checkData(0,2,math.log(32767,2))
@@ -641,7 +617,7 @@ class TDTestCase:
tdSql.checkData(3,5,math.log(169999999999999993883079578865998174333346074304075874502773119193537729178160565864330091787584707988572262467983188919169916105593357174268369962062473635296474636515660464935663040684957844303524367815028553272712298986386310828644513212353921123253311675499856875650512437415429217994623324794855339589632.000000000,2))
# check basic elem for table per row
- tdSql.query("select log(abs(c1)) ,log(abs(c2)) , log(abs(c3)) , log(abs(c4)), log(abs(c5)), log(abs(c6)) from sub1_bound ")
+ tdSql.query(f"select log(abs(c1)) ,log(abs(c2)) , log(abs(c3)) , log(abs(c4)), log(abs(c5)), log(abs(c6)) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.log(2147483647))
tdSql.checkData(0,1,math.log(9223372036854775807))
tdSql.checkData(0,2,math.log(32767))
@@ -661,28 +637,25 @@ class TDTestCase:
tdSql.checkData(3,4,math.log(339999995214436424907732413799364296704.00000))
tdSql.checkData(3,5,math.log(169999999999999993883079578865998174333346074304075874502773119193537729178160565864330091787584707988572262467983188919169916105593357174268369962062473635296474636515660464935663040684957844303524367815028553272712298986386310828644513212353921123253311675499856875650512437415429217994623324794855339589632.000000000))
-
-
# check + - * / in functions
- tdSql.query("select log(abs(c1+1) ,2) ,log(abs(c2),2) , log(abs(c3*1),2) , log(abs(c4/2),2), log(abs(c5) ,2)/2, log(abs(c6) ,2) from sub1_bound ")
+ tdSql.query(f"select log(abs(c1+1) ,2) ,log(abs(c2),2) , log(abs(c3*1),2) , log(abs(c4/2),2), log(abs(c5) ,2)/2, log(abs(c6) ,2) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.log(2147483648.000000000,2))
tdSql.checkData(0,1,math.log(9223372036854775807,2))
tdSql.checkData(0,2,math.log(32767.000000000,2))
tdSql.checkData(0,3,math.log(63.500000000,2))
tdSql.checkData(0,4,63.999401166)
- def support_super_table_test(self):
- tdSql.execute(" use db ")
- self.check_result_auto_log2( " select c5 from stb1 order by ts " , "select log(c5,2) from stb1 order by ts" )
- self.check_result_auto_log2( " select c5 from stb1 order by tbname " , "select log(c5,2) from stb1 order by tbname" )
- self.check_result_auto_log2( " select c5 from stb1 where c1 > 0 order by tbname " , "select log(c5,2) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_log2( " select c5 from stb1 where c1 > 0 order by tbname " , "select log(c5,2) from stb1 where c1 > 0 order by tbname" )
+ def support_super_table_test(self, dbname="db"):
- self.check_result_auto_log2( " select t1,c5 from stb1 order by ts " , "select log(t1,2), log(c5,2) from stb1 order by ts" )
- self.check_result_auto_log2( " select t1,c5 from stb1 order by tbname " , "select log(t1,2) ,log(c5,2) from stb1 order by tbname" )
- self.check_result_auto_log2( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select log(t1,2) ,log(c5,2) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_log2( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select log(t1,2) , log(c5,2) from stb1 where c1 > 0 order by tbname" )
- pass
+ self.check_result_auto_log2( f"select c5 from {dbname}.stb1 order by ts " , f"select log(c5,2) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_log2( f"select c5 from {dbname}.stb1 order by tbname " , f"select log(c5,2) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_log2( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_log2( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+
+ self.check_result_auto_log2( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select log(t1,2), log(c5,2) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_log2( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select log(t1,2) ,log(c5,2) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_log2( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(t1,2) ,log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_log2( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(t1,2) , log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh
index e9331de6bf..e9fbba86f9 100755
--- a/tests/system-test/fulltest.sh
+++ b/tests/system-test/fulltest.sh
@@ -110,15 +110,20 @@ python3 ./test.py -f 2-query/histogram.py
python3 ./test.py -f 2-query/histogram.py -R
python3 ./test.py -f 2-query/hyperloglog.py
python3 ./test.py -f 2-query/hyperloglog.py -R
+python3 ./test.py -f 2-query/interp.py
+python3 ./test.py -f 2-query/interp.py -R
python3 ./test.py -f 2-query/irate.py
# python3 ./test.py -f 2-query/irate.py -R
python3 ./test.py -f 2-query/join.py
python3 ./test.py -f 2-query/join.py -R
-
-python3 ./test.py -f 2-query/interp.py
-python3 ./test.py -f 2-query/interp.py -R
-
-
+python3 ./test.py -f 2-query/last_row.py
+python3 ./test.py -f 2-query/last_row.py -R
+python3 ./test.py -f 2-query/last.py
+python3 ./test.py -f 2-query/last.py -R
+python3 ./test.py -f 2-query/leastsquares.py
+python3 ./test.py -f 2-query/leastsquares.py -R
+python3 ./test.py -f 2-query/length.py
+python3 ./test.py -f 2-query/length.py -R
python3 ./test.py -f 1-insert/update_data.py
@@ -127,7 +132,6 @@ python3 ./test.py -f 1-insert/delete_data.py
python3 ./test.py -f 2-query/varchar.py
python3 ./test.py -f 2-query/ltrim.py
python3 ./test.py -f 2-query/rtrim.py
-python3 ./test.py -f 2-query/length.py
python3 ./test.py -f 2-query/upper.py
python3 ./test.py -f 2-query/lower.py
python3 ./test.py -f 2-query/join2.py
@@ -136,7 +140,6 @@ python3 ./test.py -f 2-query/union.py
python3 ./test.py -f 2-query/union1.py
python3 ./test.py -f 2-query/concat2.py
python3 ./test.py -f 2-query/spread.py
-python3 ./test.py -f 2-query/leastsquares.py
python3 ./test.py -f 2-query/timezone.py
@@ -144,7 +147,6 @@ python3 ./test.py -f 2-query/Now.py
python3 ./test.py -f 2-query/Today.py
python3 ./test.py -f 2-query/max.py
python3 ./test.py -f 2-query/min.py
-python3 ./test.py -f 2-query/last.py
python3 ./test.py -f 2-query/To_iso8601.py
python3 ./test.py -f 2-query/To_unixtimestamp.py
python3 ./test.py -f 2-query/timetruncate.py
@@ -178,7 +180,6 @@ python3 ./test.py -f 2-query/ttl_comment.py
python3 ./test.py -f 2-query/twa.py
python3 ./test.py -f 2-query/queryQnode.py
python3 ./test.py -f 2-query/max_partition.py
-python3 ./test.py -f 2-query/last_row.py
python3 ./test.py -f 2-query/tsbsQuery.py
python3 ./test.py -f 6-cluster/5dnode1mnode.py
diff --git a/tests/system-test/test.py b/tests/system-test/test.py
index 5dc6139410..2f482e4277 100644
--- a/tests/system-test/test.py
+++ b/tests/system-test/test.py
@@ -194,7 +194,7 @@ if __name__ == "__main__":
processID = subprocess.check_output(psCmd, shell=True)
for port in range(6030, 6041):
- usePortPID = "lsof -i tcp:%d | grep LISTEn | awk '{print $2}'" % port
+ usePortPID = "lsof -i tcp:%d | grep LISTEN | awk '{print $2}'" % port
processID = subprocess.check_output(usePortPID, shell=True)
if processID:
@@ -206,11 +206,13 @@ if __name__ == "__main__":
time.sleep(2)
if restful:
- toBeKilled = "taosadapter"
+ toBeKilled = "taosadapt"
- killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" % toBeKilled
+ # killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" % toBeKilled
+ killCmd = f"pkill {toBeKilled}"
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
+ # psCmd = f"pgrep {toBeKilled}"
processID = subprocess.check_output(psCmd, shell=True)
while(processID):
@@ -218,15 +220,15 @@ if __name__ == "__main__":
time.sleep(1)
processID = subprocess.check_output(psCmd, shell=True)
- for port in range(6030, 6041):
- usePortPID = "lsof -i tcp:%d | grep LISTEn | awk '{print $2}'" % port
- processID = subprocess.check_output(usePortPID, shell=True)
+ port = 6041
+ usePortPID = f"lsof -i tcp:{port} | grep LISTEN | awk '{{print $2}}'"
+ processID = subprocess.check_output(usePortPID, shell=True)
- if processID:
- killCmd = "kill -TERM %s" % processID
- os.system(killCmd)
- fuserCmd = "fuser -k -n tcp %d" % port
- os.system(fuserCmd)
+ if processID:
+ killCmd = f"kill -TERM {processID}"
+ os.system(killCmd)
+ fuserCmd = f"fuser -k -n tcp {port}"
+ os.system(fuserCmd)
tdLog.info('stop taosadapter')
diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt
index 5751c347e3..25d6e33175 100644
--- a/tools/CMakeLists.txt
+++ b/tools/CMakeLists.txt
@@ -99,11 +99,20 @@ ELSE ()
MESSAGE("CURRENT SOURCE DIR ${CMAKE_CURRENT_SOURCE_DIR}")
IF (TD_LINUX)
include(ExternalProject)
+ set(_upx_prefix "$ENV{HOME}/.taos/externals/upx")
+ ExternalProject_Add(upx
+ PREFIX "${_upx_prefix}"
+ URL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-${PLATFORM_ARCH_STR}_linux.tar.xz
+ CONFIGURE_COMMAND cmake -E true
+ BUILD_COMMAND cmake -E true
+ INSTALL_COMMAND cmake -E true
+ )
+
ExternalProject_Add(taosadapter
PREFIX "taosadapter"
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter
BUILD_ALWAYS off
- DEPENDS taos
+ DEPENDS taos upx
BUILD_IN_SOURCE 1
CONFIGURE_COMMAND cmake -E echo "taosadapter no need cmake to config"
PATCH_COMMAND
@@ -112,7 +121,7 @@ ELSE ()
COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
INSTALL_COMMAND
- COMMAND wget -nc https://github.com/upx/upx/releases/download/v3.96/upx-3.96-${PLATFORM_ARCH_STR}_linux.tar.xz -O $ENV{HOME}/upx.tar.xz && tar -xvJf $ENV{HOME}/upx.tar.xz -C $ENV{HOME}/ --strip-components 1 > /dev/null && $ENV{HOME}/upx taosadapter || :
+ COMMAND ${_upx_prefix}/src/upx/upx taosadapter
COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin
COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/
COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/
@@ -140,6 +149,7 @@ ELSE ()
COMMAND cmake -E copy ./taosadapter.service ${CMAKE_BINARY_DIR}/test/cfg/
COMMAND cmake -E copy taosadapter-debug ${CMAKE_BINARY_DIR}/build/bin
)
+ unset(_upx_prefix)
ELSE ()
MESSAGE("${Yellow} Windows system still use original embedded httpd ${ColourReset}")
ENDIF ()