Merge remote-tracking branch 'origin/3.0' into feat/caseWhen
This commit is contained in:
commit
a13f553868
34
README-CN.md
34
README-CN.md
|
@ -39,9 +39,9 @@ TDengine 是一款开源、高性能、云原生的时序数据库 (Time-Series
|
|||
|
||||
# 构建
|
||||
|
||||
TDengine 目前可以在 Linux、 Windows 等平台上安装和运行。任何 OS 的应用也可以选择 taosAdapter 的 RESTful 接口连接服务端 taosd。CPU 支持 X64/ARM64,后续会支持 MIPS64、Alpha64、ARM32、RISC-V 等 CPU 架构。
|
||||
TDengine 目前可以在 Linux、 Windows、macOS 等平台上安装和运行。任何 OS 的应用也可以选择 taosAdapter 的 RESTful 接口连接服务端 taosd。CPU 支持 X64/ARM64,后续会支持 MIPS64、Alpha64、ARM32、RISC-V 等 CPU 架构。
|
||||
|
||||
用户可根据需求选择通过源码、[容器](https://docs.taosdata.com/get-started/docker/)、[安装包](https://docs.taosdata.com/get-started/package/)或[Kubenetes](https://docs.taosdata.com/deployment/k8s/)来安装。本快速指南仅适用于通过源码安装。
|
||||
用户可根据需求选择通过源码、[容器](https://docs.taosdata.com/get-started/docker/)、[安装包](https://docs.taosdata.com/get-started/package/)或[Kubernetes](https://docs.taosdata.com/deployment/k8s/)来安装。本快速指南仅适用于通过源码安装。
|
||||
|
||||
TDengine 还提供一组辅助工具软件 taosTools,目前它包含 taosBenchmark(曾命名为 taosdemo)和 taosdump 两个软件。默认 TDengine 编译不包含 taosTools, 您可以在编译 TDengine 时使用`cmake .. -DBUILD_TOOLS=true` 来同时编译 taosTools。
|
||||
|
||||
|
@ -104,6 +104,12 @@ sudo yum install -y zlib-devel xz-devel snappy-devel jansson jansson-devel pkgco
|
|||
sudo yum config-manager --set-enabled Powertools
|
||||
```
|
||||
|
||||
### macOS
|
||||
|
||||
```
|
||||
sudo brew install argp-standalone pkgconfig
|
||||
```
|
||||
|
||||
### 设置 golang 开发环境
|
||||
|
||||
TDengine 包含数个使用 Go 语言开发的组件,比如taosAdapter, 请参考 golang.org 官方文档设置 go 开发环境。
|
||||
|
@ -210,14 +216,14 @@ cmake .. -G "NMake Makefiles"
|
|||
nmake
|
||||
```
|
||||
|
||||
<!-- ### macOS 系统
|
||||
### macOS 系统
|
||||
|
||||
安装 Xcode 命令行工具和 cmake. 在 Catalina 和 Big Sur 操作系统上,需要安装 XCode 11.4+ 版本。
|
||||
安装 XCode 命令行工具和 cmake. 在 Catalina 和 Big Sur 操作系统上,需要安装 XCode 11.4+ 版本。
|
||||
|
||||
```bash
|
||||
mkdir debug && cd debug
|
||||
cmake .. && cmake --build .
|
||||
``` -->
|
||||
```
|
||||
|
||||
# 安装
|
||||
|
||||
|
@ -263,6 +269,24 @@ nmake install
|
|||
sudo make install
|
||||
```
|
||||
|
||||
用户可以在[文件目录结构](https://docs.taosdata.com/reference/directory/)中了解更多在操作系统中生成的目录或文件。
|
||||
|
||||
从源代码安装也会为 TDengine 配置服务管理 ,用户也可以选择[从安装包中安装](https://docs.taosdata.com/get-started/package/)。
|
||||
|
||||
安装成功后,可以在应用程序中双击 TDengine 图标启动服务,或者在终端中启动 TDengine 服务:
|
||||
|
||||
```bash
|
||||
launchctl start taosd
|
||||
```
|
||||
|
||||
用户可以使用 TDengine CLI 来连接 TDengine 服务,在终端中,输入:
|
||||
|
||||
```bash
|
||||
taos
|
||||
```
|
||||
|
||||
如果 TDengine CLI 连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印出错误消息。
|
||||
|
||||
## 快速运行
|
||||
|
||||
如果不希望以服务方式运行 TDengine,也可以在终端中直接运行它。也即在生成完成后,执行以下命令(在 Windows 下,生成的可执行文件会带有 .exe 后缀,例如会名为 taosd.exe ):
|
||||
|
|
39
README.md
39
README.md
|
@ -15,11 +15,11 @@
|
|||
[](https://coveralls.io/github/taosdata/TDengine?branch=develop)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/4201)
|
||||
|
||||
English | [简体中文](README-CN.md) | [Learn more about TSDB](https://tdengine.com/tsdb/)
|
||||
English | [简体中文](README-CN.md) | [TDengine Cloud](https://cloud.tdengine.com) | [Learn more about TSDB](https://tdengine.com/tsdb/)
|
||||
|
||||
# What is TDengine?
|
||||
|
||||
TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. TDengine differentiates itself from other time-seires databases with the following advantages:
|
||||
TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. TDengine differentiates itself from other time-series databases with the following advantages:
|
||||
|
||||
- **[High Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
|
||||
|
||||
|
@ -33,7 +33,7 @@ TDengine is an open source, high-performance, cloud native [time-series database
|
|||
|
||||
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered 18.8k stars on GitHub. There is an active developer community, and over 139k running instances worldwide.
|
||||
|
||||
For a full list of TDengine competitive advantages, please [check here](https://tdengine.com/tdengine/)
|
||||
For a full list of TDengine competitive advantages, please [check here](https://tdengine.com/tdengine/). The easiest way to experience TDengine is through [TDengine Cloud](https://cloud.tdengine.com).
|
||||
|
||||
# Documentation
|
||||
|
||||
|
@ -41,7 +41,7 @@ For user manual, system design and architecture, please refer to [TDengine Docum
|
|||
|
||||
# Building
|
||||
|
||||
At the moment, TDengine server supports running on Linux and Windows systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service . TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future.
|
||||
At the moment, TDengine server supports running on Linux/Windows/macOS systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service . TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future.
|
||||
|
||||
You can choose to install through source code, [container](https://docs.tdengine.com/get-started/docker/), [installation package](https://docs.tdengine.com/get-started/package/) or [Kubernetes](https://docs.tdengine.com/deployment/k8s/). This quick guide only applies to installing from source.
|
||||
|
||||
|
@ -105,6 +105,12 @@ If the PowerTools installation fails, you can try to use:
|
|||
sudo yum config-manager --set-enabled powertools
|
||||
```
|
||||
|
||||
### macOS
|
||||
|
||||
```
|
||||
sudo brew install argp-standalone pkgconfig
|
||||
```
|
||||
|
||||
### Setup golang environment
|
||||
|
||||
TDengine includes a few components like taosAdapter developed by Go language. Please refer to golang.org official documentation for golang environment setup.
|
||||
|
@ -213,14 +219,14 @@ cmake .. -G "NMake Makefiles"
|
|||
nmake
|
||||
```
|
||||
|
||||
<!-- ### On macOS platform
|
||||
### On macOS platform
|
||||
|
||||
Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur.
|
||||
|
||||
```shell
|
||||
mkdir debug && cd debug
|
||||
cmake .. && cmake --build .
|
||||
``` -->
|
||||
```
|
||||
|
||||
# Installing
|
||||
|
||||
|
@ -258,7 +264,7 @@ After building successfully, TDengine can be installed by:
|
|||
nmake install
|
||||
```
|
||||
|
||||
<!--
|
||||
|
||||
## On macOS platform
|
||||
|
||||
After building successfully, TDengine can be installed by:
|
||||
|
@ -266,7 +272,24 @@ After building successfully, TDengine can be installed by:
|
|||
```bash
|
||||
sudo make install
|
||||
```
|
||||
-->
|
||||
|
||||
Users can find more information about directories installed on the system in the [directory and files](https://docs.tdengine.com/reference/directory/) section.
|
||||
|
||||
Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.tdengine.com/get-started/package/) for it.
|
||||
|
||||
To start the service after installation, double-click the /applications/TDengine to start the program, or in a terminal, use:
|
||||
|
||||
```bash
|
||||
launchctl start taosd
|
||||
```
|
||||
|
||||
Then users can use the TDengine CLI to connect the TDengine server. In a terminal, use:
|
||||
|
||||
```bash
|
||||
taos
|
||||
```
|
||||
|
||||
If TDengine CLI connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown.
|
||||
|
||||
## Quick Run
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosadapter
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG be729ab
|
||||
GIT_TAG cc43ef0
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG 70f5a1c
|
||||
GIT_TAG d58230c
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -37,6 +37,11 @@ if(${BUILD_WITH_ICONV})
|
|||
cat("${TD_SUPPORT_DIR}/iconv_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif()
|
||||
|
||||
# jemalloc
|
||||
if(${JEMALLOC_ENABLED})
|
||||
cat("${TD_SUPPORT_DIR}/jemalloc_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif()
|
||||
|
||||
# msvc regex
|
||||
if(${BUILD_MSVCREGEX})
|
||||
cat("${TD_SUPPORT_DIR}/msvcregex_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
@ -258,6 +263,19 @@ if(${BUILD_PTHREAD})
|
|||
target_link_libraries(pthread INTERFACE libpthreadVC3)
|
||||
endif()
|
||||
|
||||
# jemalloc
|
||||
if(${JEMALLOC_ENABLED})
|
||||
include(ExternalProject)
|
||||
ExternalProject_Add(jemalloc
|
||||
PREFIX "jemalloc"
|
||||
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc
|
||||
BUILD_IN_SOURCE 1
|
||||
CONFIGURE_COMMAND ./autogen.sh COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --disable-initial-exec-tls
|
||||
BUILD_COMMAND ${MAKE}
|
||||
)
|
||||
INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include)
|
||||
endif()
|
||||
|
||||
# crashdump
|
||||
if(${BUILD_CRASHDUMP})
|
||||
add_executable(dumper "crashdump/dumper/dumper.c")
|
||||
|
|
|
@ -23,8 +23,8 @@ The major features are listed below:
|
|||
4. [Stream Processing](../develop/stream/): Not only is the continuous query is supported, but TDengine also supports event driven stream processing, so Flink or Spark is not needed for time-series data processing.
|
||||
5. [Data Subscription](../develop/tmq/): Application can subscribe a table or a set of tables. API is the same as Kafka, but you can specify filter conditions.
|
||||
6. Visualization
|
||||
- Supports seamless integration with [Grafana](../third-party/grafana/) for visualization.
|
||||
- Supports seamless integration with Google Data Studio.
|
||||
- Supports seamless integration with [Grafana](../third-party/grafana/).
|
||||
- Supports seamless integration with [Google Data Studio](../third-party/google-data-studio/).
|
||||
7. Cluster
|
||||
- Supports [cluster](../deployment/) with the capability of increasing processing power by adding more nodes.
|
||||
- Supports [deployment on Kubernetes](../deployment/k8s/).
|
||||
|
@ -33,7 +33,7 @@ The major features are listed below:
|
|||
- Provides [monitoring](../operation/monitor) on running instances of TDengine.
|
||||
- Provides many ways to [import](../operation/import) and [export](../operation/export) data.
|
||||
9. Tools
|
||||
- Provides an interactive [Command-line Interface (CLI)](../reference/taos-shell) for management, maintenance and ad-hoc queries.
|
||||
- Provides an interactive [Command Line Interface (CLI)](../reference/taos-shell) for management, maintenance and ad-hoc queries.
|
||||
- Provides a tool [taosBenchmark](../reference/taosbenchmark/) for testing the performance of TDengine.
|
||||
10. Programming
|
||||
- Provides [connectors](../reference/connector/) for [C/C++](../reference/connector/cpp), [Java](../reference/connector/java), [Python](../reference/connector/python), [Go](../reference/connector/go), [Rust](../reference/connector/rust), [Node.js](../reference/connector/node) and other programming languages.
|
||||
|
|
|
@ -11,7 +11,19 @@ This document describes how to install TDengine in a Docker container and perfor
|
|||
|
||||
## Run TDengine
|
||||
|
||||
If Docker is already installed on your computer, run the following command:
|
||||
If Docker is already installed on your computer, pull the latest TDengine Docker container image:
|
||||
|
||||
```shell
|
||||
docker pull tdengine/tdengine:latest
|
||||
```
|
||||
|
||||
Or the container image of specific version:
|
||||
|
||||
```shell
|
||||
docker pull tdengine/tdengine:3.0.1.4
|
||||
```
|
||||
|
||||
And then run the following command:
|
||||
|
||||
```shell
|
||||
docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
|
||||
|
@ -50,7 +62,7 @@ taos>
|
|||
|
||||
After your TDengine Server is running normally, you can run the taosBenchmark utility to test its performance:
|
||||
|
||||
Start TDengine service and execute `taosBenchmark` (formerly named `taosdemo`) in a Linux or Windows terminal.
|
||||
Start TDengine service and execute `taosBenchmark` (formerly named `taosdemo`) in a terminal.
|
||||
|
||||
```bash
|
||||
taosBenchmark
|
||||
|
|
|
@ -7,7 +7,7 @@ import Tabs from "@theme/Tabs";
|
|||
import TabItem from "@theme/TabItem";
|
||||
import PkgListV3 from "/components/PkgListV3";
|
||||
|
||||
This document describes how to install TDengine on Linux and Windows and perform queries and inserts.
|
||||
This document describes how to install TDengine on Linux/Windows/macOS and perform queries and inserts.
|
||||
|
||||
- The easiest way to explore TDengine is through [TDengine Cloud](http://cloud.tdengine.com).
|
||||
- To get started with TDengine on Docker, see [Quick Install on Docker](../../get-started/docker).
|
||||
|
@ -17,7 +17,7 @@ The full package of TDengine includes the TDengine Server (`taosd`), TDengine Cl
|
|||
|
||||
The standard server installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark`, and sample code. You can also download the Lite package that includes only `taosd` and the C/C++ connector.
|
||||
|
||||
The TDengine Community Edition is released as Deb and RPM packages. The Deb package can be installed on Debian, Ubuntu, and derivative systems. The RPM package can be installed on CentOS, RHEL, SUSE, and derivative systems. A .tar.gz package is also provided for enterprise customers, and you can install TDengine over `apt-get` as well. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the Deb or RPM package, download and install taosTools separately. TDengine can also be installed on 64-bit Windows.
|
||||
The TDengine Community Edition is released as Deb and RPM packages. The Deb package can be installed on Debian, Ubuntu, and derivative systems. The RPM package can be installed on CentOS, RHEL, SUSE, and derivative systems. A .tar.gz package is also provided for enterprise customers, and you can install TDengine over `apt-get` as well. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the Deb or RPM package, download and install taosTools separately. TDengine can also be installed on x64 Windows and x64/m1 macOS.
|
||||
|
||||
## Installation
|
||||
|
||||
|
@ -111,11 +111,18 @@ Note: TDengine only supports Windows Server 2016/2019 and Windows 10/11 on the W
|
|||
<PkgListV3 type={3}/>
|
||||
2. Run the downloaded package to install TDengine.
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="macOS" value="macos">
|
||||
|
||||
1. Download the macOS installation package.
|
||||
<PkgListV3 type={7}/>
|
||||
2. Run the downloaded package to install TDengine.
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
:::info
|
||||
For information about TDengine releases, see [Release History](../../releases/tdengine).
|
||||
For information about TDengine other releases, check [Release History](../../releases/tdengine).
|
||||
:::
|
||||
|
||||
:::note
|
||||
|
@ -178,12 +185,33 @@ The following `systemctl` commands can help you manage TDengine service:
|
|||
|
||||
After the installation is complete, run `C:\TDengine\taosd.exe` to start TDengine Server.
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="macOS" value="macos">
|
||||
|
||||
After the installation is complete, double-click the /applications/TDengine to start the program, or run `launchctl start taosd` to start TDengine Server.
|
||||
|
||||
The following `launchctl` commands can help you manage TDengine service:
|
||||
|
||||
- Start TDengine Server: `launchctl start taosd`
|
||||
|
||||
- Stop TDengine Server: `launchctl stop taosd`
|
||||
|
||||
- Check TDengine Server status: `launchctl list | grep taosd`
|
||||
|
||||
:::info
|
||||
|
||||
- The `launchctl` command does not require _root_ privileges. You don't need to use the `sudo` command.
|
||||
- The first content returned by the `launchctl list | grep taosd` command is the PID of the program, if '-' indicates that the TDengine service is not running.
|
||||
|
||||
:::
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
## Command Line Interface (CLI)
|
||||
|
||||
You can use the TDengine CLI to monitor your TDengine deployment and execute ad hoc queries. To open the CLI, you can execute `taos` in the Linux terminal where TDengine is installed, or you can run `taos.exe` in the `C:\TDengine` directory of the Windows terminal where TDengine is installed to start the TDengine command line.
|
||||
You can use the TDengine CLI to monitor your TDengine deployment and execute ad hoc queries. To open the CLI, you can execute `taos` in the Linux/macOS terminal where TDengine is installed, or you can run `taos.exe` in the `C:\TDengine` directory of the Windows terminal where TDengine is installed to start the TDengine command line.
|
||||
|
||||
```bash
|
||||
taos
|
||||
|
@ -213,13 +241,13 @@ SELECT * FROM t;
|
|||
Query OK, 2 row(s) in set (0.003128s)
|
||||
```
|
||||
|
||||
You can also can monitor the deployment status, add and remove user accounts, and manage running instances. You can run the TDengine CLI on either Linux or Windows machines. For more information, see [TDengine CLI](../../reference/taos-shell/).
|
||||
You can also can monitor the deployment status, add and remove user accounts, and manage running instances. You can run the TDengine CLI on either machines. For more information, see [TDengine CLI](../../reference/taos-shell/).
|
||||
|
||||
## Test data insert performance
|
||||
|
||||
After your TDengine Server is running normally, you can run the taosBenchmark utility to test its performance:
|
||||
|
||||
Start TDengine service and execute `taosBenchmark` (formerly named `taosdemo`) in a Linux or Windows terminal.
|
||||
Start TDengine service and execute `taosBenchmark` (formerly named `taosdemo`) in a terminal.
|
||||
|
||||
```bash
|
||||
taosBenchmark
|
||||
|
|
|
@ -3,7 +3,7 @@ title: Get Started
|
|||
description: This article describes how to install TDengine and test its performance.
|
||||
---
|
||||
|
||||
You can install and run TDengine on Linux and Windows machines as well as Docker containers. You can also deploy TDengine as a managed service with TDengine Cloud.
|
||||
You can install and run TDengine on Linux/Windows/macOS machines as well as Docker containers. You can also deploy TDengine as a managed service with TDengine Cloud.
|
||||
|
||||
The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface, and some tools. In addition to connectors for multiple languages, TDengine also provides a [RESTful interface](/reference/rest-api) through [taosAdapter](/reference/taosadapter).
|
||||
|
||||
|
|
|
@ -49,6 +49,55 @@ The preceding SQL statement can be used in migration scenarios. It returns the C
|
|||
DESCRIBE [db_name.]stb_name;
|
||||
```
|
||||
|
||||
### View tag information for all child tables in the supertable
|
||||
|
||||
```
|
||||
taos> SHOW TABLE TAGS FROM st1;
|
||||
tbname | id | loc |
|
||||
======================================================================
|
||||
st1s1 | 1 | beijing |
|
||||
st1s2 | 2 | shanghai |
|
||||
st1s3 | 3 | guangzhou |
|
||||
Query OK, 3 rows in database (0.004455s)
|
||||
```
|
||||
|
||||
The first column of the returned result set is the subtable name, and the subsequent columns are the tag columns.
|
||||
|
||||
If you already know the name of the tag column, you can use the following statement to get the value of the specified tag column.
|
||||
|
||||
```
|
||||
taos> SELECT DISTINCT TBNAME, id FROM st1;
|
||||
tbname | id |
|
||||
===============================================
|
||||
st1s1 | 1 |
|
||||
st1s2 | 2 |
|
||||
st1s3 | 3 |
|
||||
Query OK, 3 rows in database (0.002891s)
|
||||
```
|
||||
|
||||
It should be noted that DISTINCT and TBNAME in the SELECT statement are essential, and TDengine will optimize the statement according to them, so that it can return the tag value correctly and quickly even when there is no data or a lot of data.
|
||||
|
||||
### View the tag information of a subtable
|
||||
|
||||
```
|
||||
taos> SHOW TAGS FROM st1s1;
|
||||
table_name | db_name | stable_name | tag_name | tag_type | tag_value |
|
||||
============================================================================================================
|
||||
st1s1 | test | st1 | id | INT | 1 |
|
||||
st1s1 | test | st1 | loc | VARCHAR(20) | beijing |
|
||||
Query OK, 2 rows in database (0.003684s)
|
||||
```
|
||||
|
||||
Similarly, you can also use the SELECT statement to query the value of the specified tag column.
|
||||
|
||||
```
|
||||
taos> SELECT DISTINCT TBNAME, id, loc FROM st1s1;
|
||||
tbname | id | loc |
|
||||
==================================================
|
||||
st1s1 | 1 | beijing |
|
||||
Query OK, 1 rows in database (0.001884s)
|
||||
```
|
||||
|
||||
## Drop STable
|
||||
|
||||
```
|
||||
|
|
|
@ -11,7 +11,7 @@ SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW
|
|||
SELECT [DISTINCT] select_list
|
||||
from_clause
|
||||
[WHERE condition]
|
||||
[PARTITION BY tag_list]
|
||||
[partition_by_clause]
|
||||
[window_clause]
|
||||
[group_by_clause]
|
||||
[order_by_clasue]
|
||||
|
@ -52,6 +52,9 @@ window_clause: {
|
|||
| STATE_WINDOW(col)
|
||||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
||||
|
||||
partition_by_clause:
|
||||
PARTITION BY expr [, expr] ...
|
||||
|
||||
group_by_clause:
|
||||
GROUP BY expr [, expr] ... HAVING condition
|
||||
|
||||
|
|
|
@ -864,7 +864,7 @@ INTERP(expr)
|
|||
- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
|
||||
- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
|
||||
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
|
||||
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter.
|
||||
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. The parameter `EVERY` must be an integer, with no quotes, with a time unit of: b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
|
||||
- Interpolation is performed based on `FILL` parameter.
|
||||
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
|
||||
- Pseudo column `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.1.4).
|
||||
|
|
|
@ -136,19 +136,3 @@ The parameters that you can modify through this statement are the same as those
|
|||
```sql
|
||||
SHOW LOCAL VARIABLES;
|
||||
```
|
||||
|
||||
## Combine Vgroups
|
||||
|
||||
```sql
|
||||
MERGE VGROUP vgroup_no1 vgroup_no2;
|
||||
```
|
||||
|
||||
If load and data are not properly balanced among vgroups due to the data in different tim lines having different characteristics, you can combine or separate vgroups.
|
||||
|
||||
## Separate Vgroups
|
||||
|
||||
```sql
|
||||
SPLIT VGROUP vgroup_no;
|
||||
```
|
||||
|
||||
This statement creates a new vgroup and migrates part of the data from the original vgroup to the new vgroup with consistent hashing. During this process, the original vgroup can continue to provide services normally.
|
||||
|
|
|
@ -29,8 +29,8 @@ Provides information about dnodes. Similar to SHOW DNODES.
|
|||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :------------: | ------------ | ------------------------- |
|
||||
| 1 | vnodes | SMALLINT | Current number of vnodes on the dnode |
|
||||
| 2 | vnodes | SMALLINT | Maximum number of vnodes on the dnode |
|
||||
| 1 | vnodes | SMALLINT | Current number of vnodes on the dnode. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 2 | support_vnodes | SMALLINT | Maximum number of vnodes on the dnode |
|
||||
| 3 | status | BINARY(10) | Current status |
|
||||
| 4 | note | BINARY(256) | Reason for going offline or other information |
|
||||
| 5 | id | SMALLINT | Dnode ID |
|
||||
|
@ -49,16 +49,6 @@ Provides information about mnodes. Similar to SHOW MNODES.
|
|||
| 4 | role_time | TIMESTAMP | Time at which the current role was assumed |
|
||||
| 5 | create_time | TIMESTAMP | Creation time |
|
||||
|
||||
## INS_MODULES
|
||||
|
||||
Provides information about modules. Similar to SHOW MODULES.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :------: | ------------ | ---------- |
|
||||
| 1 | id | SMALLINT | Module ID |
|
||||
| 2 | endpoint | BINARY(134) | Module endpoint |
|
||||
| 3 | module | BINARY(10) | Module status |
|
||||
|
||||
## INS_QNODES
|
||||
|
||||
Provides information about qnodes. Similar to SHOW QNODES.
|
||||
|
@ -88,33 +78,33 @@ Provides information about user-created databases. Similar to SHOW DATABASES.
|
|||
| 1| name| BINARY(32)| Database name |
|
||||
| 2 | create_time | TIMESTAMP | Creation time |
|
||||
| 3 | ntables | INT | Number of standard tables and subtables (not including supertables) |
|
||||
| 4 | vgroups | INT | Number of vgroups |
|
||||
| 6 | replica | INT | Number of replicas |
|
||||
| 7 | quorum | BINARY(3) | Strong consistency |
|
||||
| 8 | duration | INT | Duration for storage of single files |
|
||||
| 9 | keep | INT | Data retention period |
|
||||
| 10 | buffer | INT | Write cache size per vnode, in MB |
|
||||
| 11 | pagesize | INT | Page size for vnode metadata storage engine, in KB |
|
||||
| 12 | pages | INT | Number of pages per vnode metadata storage engine |
|
||||
| 13 | minrows | INT | Maximum number of records per file block |
|
||||
| 14 | maxrows | INT | Minimum number of records per file block |
|
||||
| 15 | comp | INT | Compression method |
|
||||
| 16 | precision | BINARY(2) | Time precision |
|
||||
| 4 | vgroups | INT | Number of vgroups. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 6 | replica | INT | Number of replicas. It should be noted that `replica` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 7 | strict | BINARY(3) | Strong consistency. It should be noted that `strict` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 8 | duration | INT | Duration for storage of single files. It should be noted that `duration` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | keep | INT | Data retention period. It should be noted that `keep` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 10 | buffer | INT | Write cache size per vnode, in MB. It should be noted that `buffer` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 11 | pagesize | INT | Page size for vnode metadata storage engine, in KB. It should be noted that `pagesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 12 | pages | INT | Number of pages per vnode metadata storage engine. It should be noted that `pages` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 13 | minrows | INT | Maximum number of records per file block. It should be noted that `minrows` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 14 | maxrows | INT | Minimum number of records per file block. It should be noted that `maxrows` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 15 | comp | INT | Compression method. It should be noted that `comp` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 16 | precision | BINARY(2) | Time precision. It should be noted that `precision` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 17 | status | BINARY(10) | Current database status |
|
||||
| 18 | retention | BINARY (60) | Aggregation interval and retention period |
|
||||
| 19 | single_stable | BOOL | Whether the database can contain multiple supertables |
|
||||
| 20 | cachemodel | BINARY(60) | Caching method for the newest data |
|
||||
| 21 | cachesize | INT | Memory per vnode used for caching the newest data |
|
||||
| 22 | wal_level | INT | WAL level |
|
||||
| 23 | wal_fsync_period | INT | Interval at which WAL is written to disk |
|
||||
| 24 | wal_retention_period | INT | WAL retention period |
|
||||
| 25 | wal_retention_size | INT | Maximum WAL size |
|
||||
| 26 | wal_roll_period | INT | WAL rotation period |
|
||||
| 27 | wal_segment_size | BIGINT | WAL file size |
|
||||
| 28 | stt_trigger | SMALLINT | The threshold for number of files to trigger file merging |
|
||||
| 29 | table_prefix | SMALLINT | The prefix length in the table name that is ignored when distributing table to vnode based on table name |
|
||||
| 30 | table_suffix | SMALLINT | The suffix length in the table name that is ignored when distributing table to vnode based on table name |
|
||||
| 31 | tsdb_pagesize | INT | The page size for internal storage engine, its unit is KB |
|
||||
| 18 | retentions | BINARY (60) | Aggregation interval and retention period. It should be noted that `retentions` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 19 | single_stable | BOOL | Whether the database can contain multiple supertables. It should be noted that `single_stable` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 20 | cachemodel | BINARY(60) | Caching method for the newest data. It should be noted that `cachemodel` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 21 | cachesize | INT | Memory per vnode used for caching the newest data. It should be noted that `cachesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 22 | wal_level | INT | WAL level. It should be noted that `wal_level` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 23 | wal_fsync_period | INT | Interval at which WAL is written to disk. It should be noted that `wal_fsync_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 24 | wal_retention_period | INT | WAL retention period. It should be noted that `wal_retention_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 25 | wal_retention_size | INT | Maximum WAL size. It should be noted that `wal_retention_size` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 26 | wal_roll_period | INT | WAL rotation period. It should be noted that `wal_roll_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 27 | wal_segment_size | BIGINT | WAL file size. It should be noted that `wal_segment_size` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 28 | stt_trigger | SMALLINT | The threshold for number of files to trigger file merging. It should be noted that `stt_trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 29 | table_prefix | SMALLINT | The prefix length in the table name that is ignored when distributing table to vnode based on table name. It should be noted that `table_prefix` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 30 | table_suffix | SMALLINT | The suffix length in the table name that is ignored when distributing table to vnode based on table name. It should be noted that `table_suffix` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 31 | tsdb_pagesize | INT | The page size for internal storage engine, its unit is KB. It should be noted that `tsdb_pagesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
|
||||
## INS_FUNCTIONS
|
||||
|
||||
|
@ -123,8 +113,8 @@ Provides information about user-defined functions.
|
|||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :---------: | ------------ | -------------- |
|
||||
| 1 | name | BINARY(64) | Function name |
|
||||
| 2 | comment | BINARY(255) | Function description |
|
||||
| 3 | aggregate | INT | Whether the UDF is an aggregate function |
|
||||
| 2 | comment | BINARY(255) | Function description. It should be noted that `comment` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 3 | aggregate | INT | Whether the UDF is an aggregate function. It should be noted that `aggregate` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 4 | output_type | BINARY(31) | Output data type |
|
||||
| 5 | create_time | TIMESTAMP | Creation time |
|
||||
| 6 | code_len | INT | Length of the source code |
|
||||
|
@ -153,12 +143,12 @@ Provides information about supertables.
|
|||
| 2 | db_name | BINARY(64) | All databases in the supertable |
|
||||
| 3 | create_time | TIMESTAMP | Creation time |
|
||||
| 4 | columns | INT | Number of columns |
|
||||
| 5 | tags | INT | Number of tags |
|
||||
| 5 | tags | INT | Number of tags. It should be noted that `tags` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 6 | last_update | TIMESTAMP | Last updated time |
|
||||
| 7 | table_comment | BINARY(1024) | Table description |
|
||||
| 8 | watermark | BINARY(64) | Window closing time |
|
||||
| 9 | max_delay | BINARY(64) | Maximum delay for pushing stream processing results |
|
||||
| 10 | rollup | BINARY(128) | Rollup aggregate function |
|
||||
| 8 | watermark | BINARY(64) | Window closing time. It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | max_delay | BINARY(64) | Maximum delay for pushing stream processing results. It should be noted that `max_delay` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 10 | rollup | BINARY(128) | Rollup aggregate function. It should be noted that `rollup` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
|
||||
## INS_TABLES
|
||||
|
||||
|
@ -173,7 +163,7 @@ Provides information about standard tables and subtables.
|
|||
| 5 | stable_name | BINARY(192) | Supertable name |
|
||||
| 6 | uid | BIGINT | Table ID |
|
||||
| 7 | vgroup_id | INT | Vgroup ID |
|
||||
| 8 | ttl | INT | Table time-to-live |
|
||||
| 8 | ttl | INT | Table time-to-live. It should be noted that `ttl` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | table_comment | BINARY(1024) | Table description |
|
||||
| 10 | type | BINARY(20) | Table type |
|
||||
|
||||
|
@ -206,13 +196,13 @@ Provides information about TDengine Enterprise Edition permissions.
|
|||
| --- | :---------: | ------------ | -------------------------------------------------- |
|
||||
| 1 | version | BINARY(9) | Whether the deployment is a licensed or trial version |
|
||||
| 2 | cpu_cores | BINARY(9) | CPU cores included in license |
|
||||
| 3 | dnodes | BINARY(10) | Dnodes included in license |
|
||||
| 4 | streams | BINARY(10) | Streams included in license |
|
||||
| 5 | users | BINARY(10) | Users included in license |
|
||||
| 6 | streams | BINARY(10) | Accounts included in license |
|
||||
| 7 | storage | BINARY(21) | Storage space included in license |
|
||||
| 8 | connections | BINARY(21) | Client connections included in license |
|
||||
| 9 | databases | BINARY(11) | Databases included in license |
|
||||
| 3 | dnodes | BINARY(10) | Dnodes included in license. It should be noted that `dnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 4 | streams | BINARY(10) | Streams included in license. It should be noted that `streams` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 5 | users | BINARY(10) | Users included in license. It should be noted that `users` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 6 | accounts | BINARY(10) | Accounts included in license. It should be noted that `accounts` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 7 | storage | BINARY(21) | Storage space included in license. It should be noted that `storage` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 8 | connections | BINARY(21) | Client connections included in license. It should be noted that `connections` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | databases | BINARY(11) | Databases included in license. It should be noted that `databases` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 10 | speed | BINARY(9) | Write speed specified in license (data points per second) |
|
||||
| 11 | querytime | BINARY(9) | Total query time specified in license |
|
||||
| 12 | timeseries | BINARY(21) | Number of metrics included in license |
|
||||
|
@ -227,7 +217,7 @@ Provides information about vgroups.
|
|||
| --- | :-------: | ------------ | ------------------------------------------------------ |
|
||||
| 1 | vgroup_id | INT | Vgroup ID |
|
||||
| 2 | db_name | BINARY(32) | Database name |
|
||||
| 3 | tables | INT | Tables in vgroup |
|
||||
| 3 | tables | INT | Tables in vgroup. It should be noted that `tables` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 4 | status | BINARY(10) | Vgroup status |
|
||||
| 5 | v1_dnode | INT | Dnode ID of first vgroup member |
|
||||
| 6 | v1_status | BINARY(10) | Status of first vgroup member |
|
||||
|
@ -246,7 +236,7 @@ Provides system configuration information.
|
|||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :------: | ------------ | ------------ |
|
||||
| 1 | name | BINARY(32) | Parameter |
|
||||
| 2 | value | BINARY(64) | Value |
|
||||
| 2 | value | BINARY(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
|
||||
## INS_DNODE_VARIABLES
|
||||
|
||||
|
@ -256,7 +246,7 @@ Provides dnode configuration information.
|
|||
| --- | :------: | ------------ | ------------ |
|
||||
| 1 | dnode_id | INT | Dnode ID |
|
||||
| 2 | name | BINARY(32) | Parameter |
|
||||
| 3 | value | BINARY(64) | Value |
|
||||
| 3 | value | BINARY(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
|
||||
## INS_TOPICS
|
||||
|
||||
|
@ -287,5 +277,5 @@ Provides dnode configuration information.
|
|||
| 5 | source_db | BINARY(64) | Source database |
|
||||
| 6 | target_db | BIANRY(64) | Target database |
|
||||
| 7 | target_table | BINARY(192) | Target table |
|
||||
| 8 | watermark | BIGINT | Watermark (see stream processing documentation) |
|
||||
| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation) |
|
||||
| 8 | watermark | BIGINT | Watermark (see stream processing documentation). It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation). It should be noted that `trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
|
|
|
@ -13,14 +13,6 @@ SHOW APPS;
|
|||
|
||||
Shows all clients (such as applications) that connect to the cluster.
|
||||
|
||||
## SHOW BNODES
|
||||
|
||||
```sql
|
||||
SHOW BNODES;
|
||||
```
|
||||
|
||||
Shows information about backup nodes (bnodes) in the system.
|
||||
|
||||
## SHOW CLUSTER
|
||||
|
||||
```sql
|
||||
|
@ -128,14 +120,6 @@ SHOW MNODES;
|
|||
|
||||
Shows information about mnodes in the system.
|
||||
|
||||
## SHOW MODULES
|
||||
|
||||
```sql
|
||||
SHOW MODULES;
|
||||
```
|
||||
|
||||
Shows information about modules installed in the system.
|
||||
|
||||
## SHOW QNODES
|
||||
|
||||
```sql
|
||||
|
@ -154,14 +138,6 @@ Shows information about the storage space allowed by the license.
|
|||
|
||||
Note: TDengine Enterprise Edition only.
|
||||
|
||||
## SHOW SNODES
|
||||
|
||||
```sql
|
||||
SHOW SNODES;
|
||||
```
|
||||
|
||||
Shows information about stream processing nodes (snodes) in the system.
|
||||
|
||||
## SHOW STABLES
|
||||
|
||||
```sql
|
||||
|
|
|
@ -6,7 +6,7 @@ title: Problem Diagnostics
|
|||
|
||||
When a TDengine client is unable to access a TDengine server, the network connection between the client side and the server side must be checked to find the root cause and resolve problems.
|
||||
|
||||
Diagnostics for network connections can be executed between Linux and Linux or between Linux and Windows.
|
||||
Diagnostics for network connections can be executed between Linux/Windows/macOS.
|
||||
|
||||
Diagnostic steps:
|
||||
|
||||
|
|
|
@ -13,11 +13,13 @@ After TDengine server or client installation, `taos.h` is located at
|
|||
|
||||
- Linux:`/usr/local/taos/include`
|
||||
- Windows:`C:\TDengine\include`
|
||||
- macOS:`/usr/local/include`
|
||||
|
||||
The dynamic libraries for the TDengine client driver are located in.
|
||||
|
||||
- Linux: `/usr/local/taos/driver/libtaos.so`
|
||||
- Windows: `C:\TDengine\taos.dll`
|
||||
- macOS: `/usr/local/lib/libtaos.dylib`
|
||||
|
||||
## Supported platforms
|
||||
|
||||
|
@ -119,7 +121,7 @@ This section shows sample code for standard access methods to TDengine clusters
|
|||
|
||||
:::info
|
||||
More example code and downloads are available at [GitHub](https://github.com/taosdata/TDengine/tree/develop/examples/c).
|
||||
You can find it in the installation directory under the `examples/c` path. This directory has a makefile and can be compiled under Linux by executing `make` directly.
|
||||
You can find it in the installation directory under the `examples/c` path. This directory has a makefile and can be compiled under Linux/macOS by executing `make` directly.
|
||||
**Hint:** When compiling in an ARM environment, please remove `-msse4.2` from the makefile. This option is only supported on the x64/x86 hardware platforms.
|
||||
|
||||
:::
|
||||
|
|
|
@ -120,13 +120,13 @@ Connection conn = DriverManager.getConnection(jdbcUrl);
|
|||
|
||||
In the above example, TSDBDriver, which uses a JDBC native connection, establishes a connection to a hostname `taosdemo.com`, port `6030` (the default port for TDengine), and a database named `test`. In this URL, the user name `user` is specified as `root`, and the `password` is `taosdata`.
|
||||
|
||||
Note: With JDBC native connections, taos-jdbcdriver relies on the client driver (`libtaos.so` on Linux; `taos.dll` on Windows).
|
||||
Note: With JDBC native connections, taos-jdbcdriver relies on the client driver (`libtaos.so` on Linux; `taos.dll` on Windows; `libtaos.dylib` on macOS).
|
||||
|
||||
The configuration parameters in the URL are as follows:
|
||||
|
||||
- user: Log in to the TDengine username. The default value is 'root'.
|
||||
- password: User login password, the default value is 'taosdata'.
|
||||
- cfgdir: client configuration file directory path, default '/etc/taos' on Linux OS, 'C:/TDengine/cfg' on Windows OS.
|
||||
- cfgdir: client configuration file directory path, default '/etc/taos' on Linux OS, 'C:/TDengine/cfg' on Windows OS, '/etc/taos' on macOS.
|
||||
- charset: The character set used by the client, the default value is the system character set.
|
||||
- locale: Client locale, by default, use the system's current locale.
|
||||
- timezone: The time zone used by the client, the default value is the system's current time zone.
|
||||
|
@ -172,7 +172,7 @@ In the above example, JDBC uses the client's configuration file to establish a c
|
|||
|
||||
In TDengine, as long as one node in firstEp and secondEp is valid, the connection to the cluster can be established normally.
|
||||
|
||||
The configuration file here refers to the configuration file on the machine where the application that calls the JDBC Connector is located, the default path is `/etc/taos/taos.cfg` on Linux, and the default path is `C://TDengine/cfg/taos.cfg` on Windows.
|
||||
The configuration file here refers to the configuration file on the machine where the application that calls the JDBC Connector is located, the default path is `/etc/taos/taos.cfg` on Linux, the default path is `C://TDengine/cfg/taos.cfg` on Windows, and the default path is `/etc/taos/taos.cfg` on macOS.
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="rest" label="REST connection">
|
||||
|
@ -261,7 +261,7 @@ The configuration parameters in properties are as follows.
|
|||
- TSDBDriver.PROPERTY_KEY_PASSWORD: user login password, default value 'taosdata'.
|
||||
- TSDBDriver.PROPERTY_KEY_BATCH_LOAD: true: pull the result set in batch when executing query; false: pull the result set row by row. The default value is: false.
|
||||
- TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE: true: when executing executeBatch of Statement, if there is a SQL execution failure in the middle, continue to execute the following sql. false: no longer execute any statement after the failed SQL. The default value is: false.
|
||||
- TSDBDriver.PROPERTY_KEY_CONFIG_DIR: only works when using JDBC native connection. Client configuration file directory path, default value `/etc/taos` on Linux OS, default value `C:/TDengine/cfg` on Windows OS.
|
||||
- TSDBDriver.PROPERTY_KEY_CONFIG_DIR: only works when using JDBC native connection. Client configuration file directory path, default value `/etc/taos` on Linux OS, default value `C:/TDengine/cfg` on Windows OS, default value `/etc/taos` on macOS.
|
||||
- TSDBDriver.PROPERTY_KEY_CHARSET: In the character set used by the client, the default value is the system character set.
|
||||
- TSDBDriver.PROPERTY_KEY_LOCALE: this only takes effect when using JDBC native connection. Client language environment, the default value is system current locale.
|
||||
- TSDBDriver.PROPERTY_KEY_TIME_ZONE: only takes effect when using JDBC native connection. In the time zone used by the client, the default value is the system's current time zone.
|
||||
|
@ -896,7 +896,7 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
|
|||
|
||||
**Cause**: The program did not find the dependent native library `taos`.
|
||||
|
||||
**Solution**: On Windows you can copy `C:\TDengine\driver\taos.dll` to the `C:\Windows\System32` directory, on Linux the following soft link will be created `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` will work.
|
||||
**Solution**: On Windows you can copy `C:\TDengine\driver\taos.dll` to the `C:\Windows\System32` directory, on Linux the following soft link will be created `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` will work, on macOS the lib soft link will be `/usr/local/lib/libtaos.dylib`.
|
||||
|
||||
3. java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ We recommend using the latest version of `taospy`, regardless of the version of
|
|||
|
||||
### Preparation
|
||||
|
||||
1. Install Python. Python >= 3.6 is recommended. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it.
|
||||
1. Install Python. Python >= 3.7 is recommended. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it.
|
||||
2. Install [pip](https://pypi.org/project/pip/). In most cases, the Python installer comes with the pip utility. If not, please refer to [pip documentation](https://pip.pypa.io/en/stable/installation/) to install it.
|
||||
If you use a native connection, you will also need to [Install Client Driver](/reference/connector#Install-Client-Driver). The client install package includes the TDengine client dynamic link library (`libtaos.so` or `taos.dll`) and the TDengine CLI.
|
||||
|
||||
|
@ -121,7 +121,7 @@ Before establishing a connection with the connector, we recommend testing the co
|
|||
<Tabs defaultValue="rest">
|
||||
<TabItem value="native" label="native connection">
|
||||
|
||||
Ensure that the TDengine instance is up and that the FQDN of the machines in the cluster (the FQDN defaults to hostname if you are starting a standalone version) can be resolved locally, by testing with the `ping` command.
|
||||
Ensure that the TDengine instance is up and that the FQDN of the machines in the cluster (the FQDN defaults to hostname if you are starting a stand-alone version) can be resolved locally, by testing with the `ping` command.
|
||||
|
||||
```
|
||||
ping <FQDN>
|
||||
|
@ -186,7 +186,7 @@ All arguments of the `connect()` function are optional keyword arguments. The fo
|
|||
- `user` : The TDengine user name. The default value is `root`.
|
||||
- `password` : TDengine user password. The default value is `taosdata`.
|
||||
- `port` : The starting port of the data node to connect to, i.e., the serverPort configuration. The default value is 6030, which will only take effect if the host parameter is provided.
|
||||
- `config` : The path to the client configuration file. On Windows systems, the default is `C:\TDengine\cfg`. The default is `/etc/taos/` on Linux systems.
|
||||
- `config` : The path to the client configuration file. On Windows systems, the default is `C:\TDengine\cfg`. The default is `/etc/taos/` on Linux/macOS.
|
||||
- `timezone` : The timezone used to convert the TIMESTAMP data in the query results to python `datetime` objects. The default is the local timezone.
|
||||
|
||||
:::warning
|
||||
|
|
|
@ -13,11 +13,13 @@ After TDengine client or server is installed, `taos.h` is located at:
|
|||
|
||||
- Linux:`/usr/local/taos/include`
|
||||
- Windows:`C:\TDengine\include`
|
||||
- macOS:`/usr/local/include`
|
||||
|
||||
TDengine client driver is located at:
|
||||
|
||||
- Linux: `/usr/local/taos/driver/libtaos.so`
|
||||
- Windows: `C:\TDengine\taos.dll`
|
||||
- macOS:`/usr/local/lib/libtaos.dylib`
|
||||
|
||||
## Supported Platforms
|
||||
|
||||
|
|
|
@ -6,5 +6,6 @@ Since the TDengine client driver is written in C, using the native connection re
|
|||
|
||||
- libtaos.so: After successful installation of TDengine on a Linux system, the dependent Linux version of the client driver `libtaos.so` file will be automatically linked to `/usr/lib/libtaos.so`, which is included in the Linux scannable path and does not need to be specified separately.
|
||||
- taos.dll: After installing the client on Windows, the dependent Windows version of the client driver taos.dll file will be automatically copied to the system default search path C:/Windows/System32, again without the need to specify it separately.
|
||||
- libtaos.dylib: After successful installation of TDengine on a mac system, the dependent macOS version of the client driver `libtaos.dylib` file will be automatically linked to `/usr/local/lib/libtaos.dylib`, which is included in the macOS scannable path and does not need to be specified separately.
|
||||
|
||||
:::
|
||||
|
|
|
@ -8,13 +8,15 @@ TDengine provides a rich set of APIs (application development interface). To fac
|
|||
|
||||
## Supported platforms
|
||||
|
||||
Currently, TDengine's native interface connectors can support platforms such as x64 and ARM hardware platforms and Linux and Windows development environments. The comparison matrix is as follows.
|
||||
Currently, TDengine's native interface connectors can support platforms such as x64 and ARM hardware platforms and Linux/Windows/macOS development environments. The comparison matrix is as follows.
|
||||
|
||||
| **CPU** | **OS** | **Java** | **Python** | **Go** | **Node.js** | **C#** | **Rust** | C/C++ |
|
||||
| -------------- | --------- | -------- | ---------- | ------ | ----------- | ------ | -------- | ----- |
|
||||
| **X86 64bit** | **Linux** | ● | ● | ● | ● | ● | ● | ● |
|
||||
| **X86 64bit** | **Win64** | ● | ● | ● | ● | ● | ● | ● |
|
||||
| **X86 64bit** | **macOS** | ○ | ● | ● | ○ | ○ | ● | ● |
|
||||
| **ARM64** | **Linux** | ● | ● | ● | ● | ○ | ○ | ● |
|
||||
| **ARM64** | **macOS** | ○ | ● | ● | ○ | ○ | ● | ● |
|
||||
|
||||
Where ● means the official test verification passed, ○ means the unofficial test verification passed, -- means no assurance.
|
||||
|
||||
|
|
|
@ -197,6 +197,7 @@ Support InfluxDB query parameters as follows.
|
|||
- `p` TDengine password
|
||||
|
||||
Note: InfluxDB token authorization is not supported at present. Only Basic authorization and query parameter validation are supported.
|
||||
Example: curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000"
|
||||
|
||||
### OpenTSDB
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ There are two ways to install taosBenchmark:
|
|||
|
||||
TaosBenchmark needs to be executed on the terminal of the operating system, it supports two configuration methods: [Command-line arguments](#command-line-arguments-in-detail) and [JSON configuration file](#configuration-file-parameters-in-detail). These two methods are mutually exclusive. Users can use `-f <json file>` to specify a configuration file. When running taosBenchmark with command-line arguments to control its behavior, users should use other parameters for configuration, but not the `-f` parameter. In addition, taosBenchmark offers a special way of running without parameters.
|
||||
|
||||
taosBenchmark supports the complete performance testing of TDengine by providing functionally to write, query, and subscribe. These three functions are mutually exclusive, users can only select one of them each time taosBenchmark runs. The query and subscribe functionalities are only configurable using a json configuration file by specifying the parameter `filetype`, while write can be performed through both the command-line and a configuration file. If you want to test the performance of queries or data subscriptionm configure taosBenchmark with the configuration file. You can modify the value of the `filetype` parameter to specify the function that you want to test.
|
||||
taosBenchmark supports the complete performance testing of TDengine by providing functionally to write, query, and subscribe. These three functions are mutually exclusive, users can only select one of them each time taosBenchmark runs. The query and subscribe functionalities are only configurable using a json configuration file by specifying the parameter `filetype`, while write can be performed through both the command-line and a configuration file. If you want to test the performance of queries or data subscription configure taosBenchmark with the configuration file. You can modify the value of the `filetype` parameter to specify the function that you want to test.
|
||||
|
||||
**Make sure that the TDengine cluster is running correctly before running taosBenchmark. **
|
||||
|
||||
|
@ -340,7 +340,7 @@ The configuration parameters for specifying super table tag columns and data col
|
|||
|
||||
- **values**: The value field of the nchar/binary column/label, which will be chosen randomly from the values.
|
||||
|
||||
- **sma**: Insert the column into the BSMA. Enter `yes` or `no`. The default is `no`.
|
||||
- **sma**: Insert the column into the SMA. Enter `yes` or `no`. The default is `no`.
|
||||
|
||||
#### insertion behavior configuration parameters
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ If executed on the TDengine server-side, there is no need for additional install
|
|||
|
||||
## Execution
|
||||
|
||||
To access the TDengine CLI, you can execute `taos` command-line utility from a Linux terminal or Windows terminal.
|
||||
To access the TDengine CLI, you can execute `taos` command-line utility from a terminal.
|
||||
|
||||
```bash
|
||||
taos
|
||||
|
|
|
@ -5,28 +5,28 @@ description: "List of platforms supported by TDengine server, client, and connec
|
|||
|
||||
## List of supported platforms for TDengine server
|
||||
|
||||
| | **Windows Server 2016/2019** | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18/20** |
|
||||
| ------------ | ---------------------------- | ----------------- | ---------------- | ---------------- |
|
||||
| X64 | ● | ● | ● | ● |
|
||||
| ARM64 | | | ● | |
|
||||
| | **Windows Server 2016/2019** | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18/20** | **macOS** |
|
||||
| ------------ | ---------------------------- | ----------------- | ---------------- | ---------------- | --------- |
|
||||
| X64 | ● | ● | ● | ● | ● |
|
||||
| ARM64 | | | ● | | ● |
|
||||
|
||||
Note: ● means officially tested and verified, ○ means unofficially tested and verified.
|
||||
|
||||
## List of supported platforms for TDengine clients and connectors
|
||||
|
||||
TDengine's connector can support a wide range of platforms, including X64/X86/ARM64/ARM32/MIPS/Alpha hardware platforms and Linux/Win64/Win32 development environments.
|
||||
TDengine's connector can support a wide range of platforms, including X64/X86/ARM64/ARM32/MIPS/Alpha hardware platforms and Linux/Win64/Win32/macOS development environments.
|
||||
|
||||
The comparison matrix is as follows.
|
||||
|
||||
| **CPU** | **X64 64bit** | **X64 64bit** | **ARM64** |
|
||||
| ----------- | ------------- | ------------- | --------- |
|
||||
| **OS** | **Linux** | **Win64** | **Linux** |
|
||||
| **C/C++** | ● | ● | ● |
|
||||
| **JDBC** | ● | ● | ● |
|
||||
| **Python** | ● | ● | ● |
|
||||
| **Go** | ● | ● | ● |
|
||||
| **NodeJs** | ● | ● | ● |
|
||||
| **C#** | ● | ● | ○ |
|
||||
| **RESTful** | ● | ● | ● |
|
||||
| **CPU** | **X64 64bit** | **X64 64bit** | **ARM64** | **X64 64bit** | **ARM64** |
|
||||
| ----------- | ------------- | ------------- | --------- | ------------- | --------- |
|
||||
| **OS** | **Linux** | **Win64** | **Linux** | **macOS** | **macOS** |
|
||||
| **C/C++** | ● | ● | ● | ● | ● |
|
||||
| **JDBC** | ● | ● | ● | ○ | ○ |
|
||||
| **Python** | ● | ● | ● | ● | ● |
|
||||
| **Go** | ● | ● | ● | ● | ● |
|
||||
| **NodeJs** | ● | ● | ● | ○ | ○ |
|
||||
| **C#** | ● | ● | ○ | ○ | ○ |
|
||||
| **RESTful** | ● | ● | ● | ● | ● |
|
||||
|
||||
Note: ● means the official test is verified, ○ means the unofficial test is verified, -- means not verified.
|
||||
|
|
|
@ -205,7 +205,7 @@ The parameters described in this document by the effect that they have on the sy
|
|||
:::info
|
||||
To handle the data insertion and data query from multiple timezones, Unix Timestamp is used and stored in TDengine. The timestamp generated from any timezones at same time is same in Unix timestamp. Note that Unix timestamps are converted and recorded on the client side. To make sure the time on client side can be converted to Unix timestamp correctly, the timezone must be set properly.
|
||||
|
||||
On Linux system, TDengine clients automatically obtain timezone from the host. Alternatively, the timezone can be configured explicitly in configuration file `taos.cfg` like below. For example:
|
||||
On Linux/macOS, TDengine clients automatically obtain timezone from the host. Alternatively, the timezone can be configured explicitly in configuration file `taos.cfg` like below. For example:
|
||||
|
||||
```
|
||||
timezone UTC-8
|
||||
|
@ -248,9 +248,9 @@ To avoid the problems of using time strings, Unix timestamp can be used directly
|
|||
:::info
|
||||
A specific type "nchar" is provided in TDengine to store non-ASCII characters such as Chinese, Japanese, and Korean. The characters to be stored in nchar type are firstly encoded in UCS4-LE before sending to server side. Note that the correct encoding is determined by the user. To store non-ASCII characters correctly, the encoding format of the client side needs to be set properly.
|
||||
|
||||
The characters input on the client side are encoded using the default system encoding, which is UTF-8 on Linux, or GB18030 or GBK on some systems in Chinese, POSIX in docker, CP936 on Windows in Chinese. The encoding of the operating system in use must be set correctly so that the characters in nchar type can be converted to UCS4-LE.
|
||||
The characters input on the client side are encoded using the default system encoding, which is UTF-8 on Linux/macOS, or GB18030 or GBK on some systems in Chinese, POSIX in docker, CP936 on Windows in Chinese. The encoding of the operating system in use must be set correctly so that the characters in nchar type can be converted to UCS4-LE.
|
||||
|
||||
The locale definition standard on Linux is: <Language\>\_<Region\>.<charset\>, for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. The charset indicates how to display the characters. On Linux and Mac OSX, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux to specify the charset.
|
||||
The locale definition standard on Linux/macOS is: <Language\>\_<Region\>.<charset\>, for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. The charset indicates how to display the characters. On Linux/macOS, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux/macOS to specify the charset.
|
||||
|
||||
:::
|
||||
|
||||
|
@ -263,9 +263,9 @@ The locale definition standard on Linux is: <Language\>\_<Region\>.<charset\>, f
|
|||
| Default Value | charset set in the system |
|
||||
|
||||
:::info
|
||||
On Linux, if `charset` is not set in `taos.cfg`, when `taos` is started, the charset is obtained from system locale. If obtaining charset from system locale fails, `taos` would fail to start.
|
||||
On Linux/macOS, if `charset` is not set in `taos.cfg`, when `taos` is started, the charset is obtained from system locale. If obtaining charset from system locale fails, `taos` would fail to start.
|
||||
|
||||
So on Linux system, if system locale is set properly, it's not necessary to set `charset` in `taos.cfg`. For example:
|
||||
So on Linux/macOS, if system locale is set properly, it's not necessary to set `charset` in `taos.cfg`. For example:
|
||||
|
||||
```
|
||||
locale zh_CN.UTF-8
|
||||
|
@ -279,7 +279,7 @@ charset CP936
|
|||
|
||||
Refer to the documentation for your operating system before changing the charset.
|
||||
|
||||
On a Linux system, if the charset contained in `locale` is not consistent with that set by `charset`, the later setting in the configuration file takes precedence.
|
||||
On a Linux/macOS, if the charset contained in `locale` is not consistent with that set by `charset`, the later setting in the configuration file takes precedence.
|
||||
|
||||
```
|
||||
locale zh_CN.UTF-8
|
||||
|
@ -675,7 +675,7 @@ To prevent system resource from being exhausted by multiple concurrent streams,
|
|||
| Meaning | Whether to generate core file when server crashes |
|
||||
| Value Range | 0: false, 1: true |
|
||||
| Default Value | 1 |
|
||||
| Note | The core file is generated under root directory `systemctl start taosd` is used to start, or under the working directory if `taosd` is started directly on Linux Shell. |
|
||||
| Note | The core file is generated under root directory `systemctl/launchctl start taosd` is used to start, or under the working directory if `taosd` is started directly on Linux/macOS Shell. |
|
||||
|
||||
### udf
|
||||
|
||||
|
|
|
@ -55,14 +55,16 @@ This error indicates that the client could not connect to the server. Perform th
|
|||
|
||||
7. If you are using the Python, Java, Go, Rust, C#, or Node.js connector on Linux to connect to the server, verify that `libtaos.so` is in the `/usr/local/taos/driver` directory and `/usr/local/taos/driver` is in the `LD_LIBRARY_PATH` environment variable.
|
||||
|
||||
8. If you are using Windows, verify that `C:\TDengine\driver\taos.dll` is in the `PATH` environment variable. If possible, move `taos.dll` to the `C:\Windows\System32` directory.
|
||||
8. If you are using macOS, verify that `libtaos.dylib` is in the `/usr/local/lib` directory and `/usr/local/lib` is in the `LD_LIBRARY_PATH` environment variable..
|
||||
|
||||
9. On Linux systems, you can use the `nc` tool to check whether a port is accessible:
|
||||
9. If you are using Windows, verify that `C:\TDengine\driver\taos.dll` is in the `PATH` environment variable. If possible, move `taos.dll` to the `C:\Windows\System32` directory.
|
||||
|
||||
10. On Linux/macOS, you can use the `nc` tool to check whether a port is accessible:
|
||||
- To check whether a UDP port is open, run `nc -vuz {hostIP} {port}`.
|
||||
- To check whether a TCP port on the server side is open, run `nc -l {port}`.
|
||||
- To check whether a TCP port on client side is open, run `nc {hostIP} {port}`.
|
||||
|
||||
10. On Windows systems, you can run `Test-NetConnection -ComputerName {fqdn} -Port {port}` in PowerShell to check whether a port on the server side is accessible.
|
||||
On Windows systems, you can run `Test-NetConnection -ComputerName {fqdn} -Port {port}` in PowerShell to check whether a port on the server side is accessible.
|
||||
|
||||
11. You can also use the TDengine CLI to diagnose network issues. For more information, see [Problem Diagnostics](https://docs.tdengine.com/operation/diagnose/).
|
||||
|
||||
|
|
|
@ -184,22 +184,54 @@ void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) {
|
|||
tmq_t* build_consumer() {
|
||||
tmq_conf_res_t code;
|
||||
tmq_conf_t* conf = tmq_conf_new();
|
||||
|
||||
code = tmq_conf_set(conf, "enable.auto.commit", "true");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
if (TMQ_CONF_OK != code) {
|
||||
tmq_conf_destroy(conf);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
code = tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
if (TMQ_CONF_OK != code) {
|
||||
tmq_conf_destroy(conf);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
code = tmq_conf_set(conf, "group.id", "cgrpName");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
if (TMQ_CONF_OK != code) {
|
||||
tmq_conf_destroy(conf);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
code = tmq_conf_set(conf, "client.id", "user defined name");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
if (TMQ_CONF_OK != code) {
|
||||
tmq_conf_destroy(conf);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
code = tmq_conf_set(conf, "td.connect.user", "root");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
if (TMQ_CONF_OK != code) {
|
||||
tmq_conf_destroy(conf);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
code = tmq_conf_set(conf, "td.connect.pass", "taosdata");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
if (TMQ_CONF_OK != code) {
|
||||
tmq_conf_destroy(conf);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
code = tmq_conf_set(conf, "auto.offset.reset", "earliest");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
if (TMQ_CONF_OK != code) {
|
||||
tmq_conf_destroy(conf);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
code = tmq_conf_set(conf, "experimental.snapshot.enable", "false");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
if (TMQ_CONF_OK != code) {
|
||||
tmq_conf_destroy(conf);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
||||
|
||||
|
|
|
@ -8,7 +8,19 @@ description: 使用 Docker 快速体验 TDengine 的高效写入和查询
|
|||
|
||||
## 启动 TDengine
|
||||
|
||||
如果已经安装了 Docker,只需执行下面的命令:
|
||||
如果已经安装了 Docker,首先拉取最新的 TDengine 容器镜像:
|
||||
|
||||
```shell
|
||||
docker pull tdengine/tdengine:latest
|
||||
```
|
||||
|
||||
或者指定版本的容器镜像:
|
||||
|
||||
```shell
|
||||
docker pull tdengine/tdengine:3.0.1.4
|
||||
```
|
||||
|
||||
然后只需执行下面的命令:
|
||||
|
||||
```shell
|
||||
docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
|
||||
|
@ -46,7 +58,7 @@ taos>
|
|||
|
||||
可以使用 TDengine 的自带工具 taosBenchmark 快速体验 TDengine 的写入速度。
|
||||
|
||||
启动 TDengine 的服务,在 Linux 或 Windows 终端执行 `taosBenchmark`(曾命名为 `taosdemo`):
|
||||
启动 TDengine 的服务,在终端执行 `taosBenchmark`(曾命名为 `taosdemo`):
|
||||
|
||||
```bash
|
||||
$ taosBenchmark
|
||||
|
|
|
@ -10,11 +10,11 @@ import PkgListV3 from "/components/PkgListV3";
|
|||
|
||||
您可以[用 Docker 立即体验](../../get-started/docker/) TDengine。如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
|
||||
|
||||
TDengine 完整的软件包包括服务端(taosd)、应用驱动(taosc)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、命令行程序(CLI,taos)和一些工具软件。目前 taosAdapter 仅在 Linux 系统上安装和运行,后续将支持 Windows、macOS 等系统。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../../reference/taosadapter/) 提供 [RESTful 接口](../../connector/rest-api/)。
|
||||
TDengine 完整的软件包包括服务端(taosd)、应用驱动(taosc)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、命令行程序(CLI,taos)和一些工具软件。目前 taosdump、TDinsight 仅在 Linux 系统上安装和运行,后续将支持 Windows、macOS 等系统。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../../reference/taosadapter/) 提供 [RESTful 接口](../../connector/rest-api/)。
|
||||
|
||||
为方便使用,标准的服务端安装包包含了 taosd、taosAdapter、taosc、taos、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 Lite 版本的安装包。
|
||||
|
||||
在 Linux 系统上,TDengine 社区版提供 Deb 和 RPM 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 Deb 支持 Debian/Ubuntu 及其衍生系统,RPM 支持 CentOS/RHEL/SUSE 及其衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包,也支持通过 `apt-get` 工具从线上进行安装。需要注意的是,RPM 和 Deb 包不含 `taosdump` 和 TDinsight 安装脚本,这些工具需要通过安装 taosTool 包获得。TDengine 也提供 Windows x64 平台的安装包。
|
||||
在 Linux 系统上,TDengine 社区版提供 Deb 和 RPM 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 Deb 支持 Debian/Ubuntu 及其衍生系统,RPM 支持 CentOS/RHEL/SUSE 及其衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包,也支持通过 `apt-get` 工具从线上进行安装。需要注意的是,RPM 和 Deb 包不含 `taosdump` 和 TDinsight 安装脚本,这些工具需要通过安装 taosTools 包获得。TDengine 也提供 Windows x64 平台和 macOS x64/m1 平台的安装包。
|
||||
|
||||
## 安装
|
||||
|
||||
|
@ -110,6 +110,13 @@ apt-get 方式只适用于 Debian 或 Ubuntu 系统。
|
|||
<PkgListV3 type={3}/>
|
||||
2. 运行可执行程序来安装 TDengine。
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="macOS 安装" value="macos">
|
||||
|
||||
1. 从列表中下载获得 pkg 安装程序;
|
||||
<PkgListV3 type={7}/>
|
||||
2. 运行可执行程序来安装 TDengine。
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
|
@ -177,12 +184,33 @@ Active: inactive (dead)
|
|||
|
||||
安装后,在 `C:\TDengine` 目录下,运行 `taosd.exe` 来启动 TDengine 服务进程。
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="macOS 系统" value="macos">
|
||||
|
||||
安装后,在应用程序目录下,双击 TDengine 图标来启动程序,也可以运行 `launchctl start taosd` 来启动 TDengine 服务进程。
|
||||
|
||||
如下 `launchctl` 命令可以帮助你管理 TDengine 服务:
|
||||
|
||||
- 启动服务进程:`launchctl start taosd`
|
||||
|
||||
- 停止服务进程:`launchctl stop taosd`
|
||||
|
||||
- 查看服务状态:`launchctl list | grep taosd`
|
||||
|
||||
:::info
|
||||
|
||||
- `launchctl` 命令不需要管理员权限,请不要在前面加 `sudo`。
|
||||
- `launchctl list | grep taosd` 指令返回的第一个内容是程序的 PID,若为 `-` 则说明 TDengine 服务未运行。
|
||||
|
||||
:::
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
## TDengine 命令行(CLI)
|
||||
|
||||
为便于检查 TDengine 的状态,执行数据库(Database)的各种即席(Ad Hoc)查询,TDengine 提供一命令行应用程序(以下简称为 TDengine CLI)taos。要进入 TDengine 命令行,您只要在安装有 TDengine 的 Linux 终端执行 `taos` 即可,也可以在安装有 TDengine 的 Windows 终端的 C:\TDengine 目录下,运行 taos.exe 来启动 TDengine 命令行。
|
||||
为便于检查 TDengine 的状态,执行数据库(Database)的各种即席(Ad Hoc)查询,TDengine 提供一命令行应用程序(以下简称为 TDengine CLI)taos。要进入 TDengine 命令行,您只要在安装有 TDengine 的 Linux、macOS 终端执行 `taos` 即可,也可以在安装有 TDengine 的 Windows 终端的 C:\TDengine 目录下,运行 taos.exe 来启动 TDengine 命令行。
|
||||
|
||||
```bash
|
||||
taos
|
||||
|
@ -212,13 +240,13 @@ SELECT * FROM t;
|
|||
Query OK, 2 row(s) in set (0.003128s)
|
||||
```
|
||||
|
||||
除执行 SQL 语句外,系统管理员还可以从 TDengine CLI 进行检查系统运行状态、添加删除用户账号等操作。TDengine CLI 连同应用驱动也可以独立安装在 Linux 或 Windows 机器上运行,更多细节请参考 [TDengine 命令行](../../reference/taos-shell/)。
|
||||
除执行 SQL 语句外,系统管理员还可以从 TDengine CLI 进行检查系统运行状态、添加删除用户账号等操作。TDengine CLI 连同应用驱动也可以独立安装在机器上运行,更多细节请参考 [TDengine 命令行](../../reference/taos-shell/)。
|
||||
|
||||
## 使用 taosBenchmark 体验写入速度
|
||||
|
||||
可以使用 TDengine 的自带工具 taosBenchmark 快速体验 TDengine 的写入速度。
|
||||
|
||||
启动 TDengine 的服务,在 Linux 或 Windows 终端执行 `taosBenchmark`(曾命名为 `taosdemo`):
|
||||
启动 TDengine 服务,然后在终端执行 `taosBenchmark`(曾命名为 `taosdemo`):
|
||||
|
||||
```bash
|
||||
$ taosBenchmark
|
||||
|
@ -249,7 +277,7 @@ SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters;
|
|||
查询 location = "California.SanFrancisco" 的记录总条数:
|
||||
|
||||
```sql
|
||||
SELECT COUNT(*) FROM test.meters WHERE location = "Calaifornia.SanFrancisco";
|
||||
SELECT COUNT(*) FROM test.meters WHERE location = "California.SanFrancisco";
|
||||
```
|
||||
|
||||
查询 groupId = 10 的所有记录的平均值、最大值、最小值等:
|
||||
|
|
|
@ -67,6 +67,10 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
|
|||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
## 查询示例
|
||||
比如查询 location=California.LosAngeles,groupid=2 子表的数据可以通过如下sql:
|
||||
select * from meters where location=California.LosAngeles and groupid=2
|
||||
## SQL查询示例
|
||||
- meters 是插入数据的超级表名
|
||||
- 可以通过超级表的tag来过滤数据,比如查询 `location=California.LosAngeles,groupid=2` 可以通过如下sql:
|
||||
|
||||
``` cmd
|
||||
select * from meters where location="California.LosAngeles" and groupid=2
|
||||
```
|
||||
|
|
|
@ -81,6 +81,10 @@ taos> select tbname, * from `meters.current`;
|
|||
t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | California.SanFrancisco |
|
||||
Query OK, 4 row(s) in set (0.005399s)
|
||||
```
|
||||
## 查询示例:
|
||||
想要查询 location=California.LosAngeles groupid=3 的数据,可以通过如下sql:
|
||||
select * from `meters.voltage` where location="California.LosAngeles" and groupid=3
|
||||
|
||||
## SQL查询示例
|
||||
- `meters.current` 是插入数据的超级表名
|
||||
- 可以通过超级表的tag来过滤数据,比如查询 `location=California.LosAngeles groupid=3` 可以通过如下sql:
|
||||
``` cmd
|
||||
select * from `meters.current` where location="California.LosAngeles" and groupid=3
|
||||
```
|
||||
|
|
|
@ -96,6 +96,9 @@ taos> select * from `meters.current`;
|
|||
Query OK, 2 row(s) in set (0.004076s)
|
||||
```
|
||||
|
||||
## 查询示例
|
||||
想要查询"tags": {"location": "California.LosAngeles", "groupid": 1} 的数据,可以通过如下sql:
|
||||
## SQL查询示例
|
||||
- `meters.voltage` 是插入数据的超级表名
|
||||
- 可以通过超级表的tag来过滤数据,比如查询 `location=California.LosAngeles groupid=1` 可以通过如下sql:
|
||||
``` cmd
|
||||
select * from `meters.voltage` where location="California.LosAngeles" and groupid=1
|
||||
```
|
||||
|
|
|
@ -70,7 +70,7 @@ insert into d1004 values("2018-10-03 14:38:06.500", 11.50000, 221, 0.35000);
|
|||
### 查询以观察结果
|
||||
|
||||
```sql
|
||||
taos> select start, end, max_current from current_stream_output_stb;
|
||||
taos> select start, wend, max_current from current_stream_output_stb;
|
||||
start | wend | max_current |
|
||||
===========================================================================
|
||||
2018-10-03 14:38:05.000 | 2018-10-03 14:38:10.000 | 10.30000 |
|
||||
|
|
|
@ -74,7 +74,7 @@ http://<fqdn>:<port>/rest/sql/[db_name]
|
|||
|
||||
参数说明:
|
||||
|
||||
- fqnd: 集群中的任一台主机 FQDN 或 IP 地址。
|
||||
- fqdn: 集群中的任一台主机 FQDN 或 IP 地址。
|
||||
- port: 配置文件中 httpPort 配置项,缺省为 6041。
|
||||
- db_name: 可选参数,指定本次所执行的 SQL 语句的默认数据库库名。
|
||||
|
||||
|
|
|
@ -13,11 +13,13 @@ TDengine 服务端或客户端安装后,`taos.h` 位于:
|
|||
|
||||
- Linux:`/usr/local/taos/include`
|
||||
- Windows:`C:\TDengine\include`
|
||||
- macOS:`/usr/local/include`
|
||||
|
||||
TDengine 客户端驱动的动态库位于:
|
||||
|
||||
- Linux: `/usr/local/taos/driver/libtaos.so`
|
||||
- Windows: `C:\TDengine\taos.dll`
|
||||
- macOS: `/usr/local/lib/libtaos.dylib`
|
||||
|
||||
## 支持的平台
|
||||
|
||||
|
@ -119,7 +121,7 @@ TDengine 客户端驱动的安装请参考 [安装指南](../#安装步骤)
|
|||
|
||||
:::info
|
||||
更多示例代码及下载请见 [GitHub](https://github.com/taosdata/TDengine/tree/develop/examples/c)。
|
||||
也可以在安装目录下的 `examples/c` 路径下找到。 该目录下有 makefile,在 Linux 环境下,直接执行 make 就可以编译得到执行文件。
|
||||
也可以在安装目录下的 `examples/c` 路径下找到。 该目录下有 makefile,在 Linux/macOS 环境下,直接执行 make 就可以编译得到执行文件。
|
||||
**提示:**在 ARM 环境下编译时,请将 makefile 中的 `-msse4.2` 去掉,这个选项只有在 x64/x86 硬件平台上才能支持。
|
||||
|
||||
:::
|
||||
|
|
|
@ -120,7 +120,7 @@ Connection conn = DriverManager.getConnection(jdbcUrl);
|
|||
|
||||
以上示例,使用了 JDBC 原生连接的 TSDBDriver,建立了到 hostname 为 taosdemo.com,端口为 6030(TDengine 的默认端口),数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。
|
||||
|
||||
**注意**:使用 JDBC 原生连接,taos-jdbcdriver 需要依赖客户端驱动(Linux 下是 libtaos.so;Windows 下是 taos.dll)。
|
||||
**注意**:使用 JDBC 原生连接,taos-jdbcdriver 需要依赖客户端驱动(Linux 下是 libtaos.so;Windows 下是 taos.dll;macOS 下是 libtaos.dylib)。
|
||||
|
||||
url 中的配置参数如下:
|
||||
|
||||
|
@ -375,7 +375,7 @@ public class ParameterBindingDemo {
|
|||
|
||||
private static final String host = "127.0.0.1";
|
||||
private static final Random random = new Random(System.currentTimeMillis());
|
||||
private static final int BINARY_COLUMN_SIZE = 20;
|
||||
private static final int BINARY_COLUMN_SIZE = 30;
|
||||
private static final String[] schemaList = {
|
||||
"create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)",
|
||||
"create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)",
|
||||
|
@ -898,7 +898,7 @@ public static void main(String[] args) throws Exception {
|
|||
|
||||
**原因**:程序没有找到依赖的本地函数库 taos。
|
||||
|
||||
**解决方法**:Windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,Linux 下将建立如下软链 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
|
||||
**解决方法**:Windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,Linux 下将建立如下软链 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可,macOS 下需要建立软链 `ln -s /usr/local/lib/libtaos.dylib`。
|
||||
|
||||
3. java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
sidebar_label: Python
|
||||
title: TDengine Python Connector
|
||||
description: "taospy 是 TDengine 的官方 Python 连接器。taospy 提供了丰富的 API, 使得 Python 应用可以很方便地使用 TDengine。tasopy 对 TDengine 的原生接口和 REST 接口都进行了封装, 分别对应 tasopy 的两个子模块:tasos 和 taosrest。除了对原生接口和 REST 接口的封装,taospy 还提供了符合 Python 数据访问规范(PEP 249)的编程接口。这使得 taospy 和很多第三方工具集成变得简单,比如 SQLAlchemy 和 pandas"
|
||||
description: "taospy 是 TDengine 的官方 Python 连接器。taospy 提供了丰富的 API, 使得 Python 应用可以很方便地使用 TDengine。tasopy 对 TDengine 的原生接口和 REST 接口都进行了封装, 分别对应 tasopy 的两个子模块:taos 和 taosrest。除了对原生接口和 REST 接口的封装,taospy 还提供了符合 Python 数据访问规范(PEP 249)的编程接口。这使得 taospy 和很多第三方工具集成变得简单,比如 SQLAlchemy 和 pandas"
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
|
@ -25,15 +25,15 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con
|
|||
|
||||
## 支持的功能
|
||||
|
||||
- 原生连接支持 TDeingine 的所有核心功能, 包括: 连接管理、执行 SQL、参数绑定、订阅、无模式写入(schemaless)。
|
||||
- 原生连接支持 TDengine 的所有核心功能, 包括: 连接管理、执行 SQL、参数绑定、订阅、无模式写入(schemaless)。
|
||||
- REST 连接支持的功能包括:连接管理、执行 SQL。 (通过执行 SQL 可以: 管理数据库、管理表和超级表、写入数据、查询数据、创建连续查询等)。
|
||||
|
||||
## 安装
|
||||
|
||||
### 准备
|
||||
|
||||
1. 安装 Python。建议使用 Python >= 3.6。如果系统上还没有 Python 可参考 [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) 安装。
|
||||
2. 安装 [pip](https://pypi.org/project/pip/)。大部分情况下 Python 的安装包都自带了 pip 工具, 如果没有请参考 [pip docuemntation](https://pip.pypa.io/en/stable/installation/) 安装。
|
||||
1. 安装 Python。建议使用 Python >= 3.7。如果系统上还没有 Python 可参考 [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) 安装。
|
||||
2. 安装 [pip](https://pypi.org/project/pip/)。大部分情况下 Python 的安装包都自带了 pip 工具, 如果没有请参考 [pip documentation](https://pip.pypa.io/en/stable/installation/) 安装。
|
||||
3. 如果使用原生连接,还需[安装客户端驱动](../#安装客户端驱动)。客户端软件包含了 TDengine 客户端动态链接库(libtaos.so 或 taos.dll) 和 TDengine CLI。
|
||||
|
||||
### 使用 pip 安装
|
||||
|
@ -186,7 +186,7 @@ curl -u root:taosdata http://<FQDN>:<PORT>/rest/sql -d "select server_version()"
|
|||
- `user` :TDengine 用户名。 默认值是 root。
|
||||
- `password` : TDengine 用户密码。 默认值是 taosdata。
|
||||
- `port` : 要连接的数据节点的起始端口,即 serverPort 配置。默认值是 6030。只有在提供了 host 参数的时候,这个参数才生效。
|
||||
- `config` : 客户端配置文件路径。 在 Windows 系统上默认是 `C:\TDengine\cfg`。 在 Linux 系统上默认是 `/etc/taos/`。
|
||||
- `config` : 客户端配置文件路径。 在 Windows 系统上默认是 `C:\TDengine\cfg`。 在 Linux/macOS 系统上默认是 `/etc/taos/`。
|
||||
- `timezone` : 查询结果中 TIMESTAMP 类型的数据,转换为 python 的 datetime 对象时使用的时区。默认为本地时区。
|
||||
|
||||
:::warning
|
||||
|
@ -208,8 +208,8 @@ curl -u root:taosdata http://<FQDN>:<PORT>/rest/sql -d "select server_version()"
|
|||
`connect()` 函数的所有参数都是可选的关键字参数。下面是连接参数的具体说明:
|
||||
|
||||
- `url`: taosAdapter REST 服务的 URL。默认是 <http://localhost:6041>。
|
||||
- `user`: TDenigne 用户名。默认是 root。
|
||||
- `password`: TDeingine 用户密码。默认是 taosdata。
|
||||
- `user`: TDengine 用户名。默认是 root。
|
||||
- `password`: TDengine 用户密码。默认是 taosdata。
|
||||
- `timeout`: HTTP 请求超时时间。单位为秒。默认为 `socket._GLOBAL_DEFAULT_TIMEOUT`。 一般无需配置。
|
||||
|
||||
</TabItem>
|
||||
|
|
|
@ -13,11 +13,13 @@ TDengine 服务端或客户端安装后,`taos.h` 位于:
|
|||
|
||||
- Linux:`/usr/local/taos/include`
|
||||
- Windows:`C:\TDengine\include`
|
||||
- macOS:`/usr/local/include`
|
||||
|
||||
TDengine 客户端驱动的动态库位于:
|
||||
|
||||
- Linux: `/usr/local/taos/driver/libtaos.so`
|
||||
- Windows: `C:\TDengine\taos.dll`
|
||||
- macOS:`/usr/local/lib/libtaos.dylib`
|
||||
|
||||
## 支持的平台
|
||||
|
||||
|
|
|
@ -6,5 +6,6 @@
|
|||
|
||||
- libtaos.so: 在 Linux 系统中成功安装 TDengine 后,依赖的 Linux 版客户端驱动 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。
|
||||
- taos.dll: 在 Windows 系统中安装完客户端之后,依赖的 Windows 版客户端驱动 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
|
||||
- libtaos.dylib: 在 macOS 系统中成功安装 TDengine 后,依赖的 macOS 版客户端驱动 libtaos.dylib 文件会被自动拷贝至 /usr/local/lib/libtaos.dylib,该目录包含在 macOS 自动扫描路径上,无需单独指定。
|
||||
|
||||
:::
|
||||
|
|
|
@ -39,7 +39,7 @@ CREATE DATABASE db_name PRECISION 'ns';
|
|||
| 11 | TINYINT | 1 | 单字节整型,范围 [-128, 127] |
|
||||
| 12 | TINYINT UNSIGNED | 1 | 无符号单字节整型,范围 [0, 255] |
|
||||
| 13 | BOOL | 1 | 布尔型,{true, false} |
|
||||
| 14 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 NCHAR 字符占用 4 字节的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\'`。NCHAR 使用时须指定字符串大小,类型为 NCHAR(10) 的列表示此列的字符串最多存储 10 个 NCHAR 字符,会固定占用 40 字节的空间。如果用户字符串长度超出声明长度,将会报错。 |
|
||||
| 14 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 NCHAR 字符占用 4 字节的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\'`。NCHAR 使用时须指定字符串大小,类型为 NCHAR(10) 的列表示此列的字符串最多存储 10 个 NCHAR 字符。如果用户字符串长度超出声明长度,将会报错。 |
|
||||
| 15 | JSON | | JSON 数据类型, 只有 Tag 可以是 JSON 格式 |
|
||||
| 16 | VARCHAR | 自定义 | BINARY 类型的别名 |
|
||||
|
||||
|
|
|
@ -50,6 +50,56 @@ SHOW CREATE STABLE stb_name;
|
|||
DESCRIBE [db_name.]stb_name;
|
||||
```
|
||||
|
||||
### 获取超级表中所有子表的标签信息
|
||||
|
||||
```
|
||||
taos> SHOW TABLE TAGS FROM st1;
|
||||
tbname | id | loc |
|
||||
======================================================================
|
||||
st1s1 | 1 | beijing |
|
||||
st1s2 | 2 | shanghai |
|
||||
st1s3 | 3 | guangzhou |
|
||||
Query OK, 3 rows in database (0.004455s)
|
||||
```
|
||||
|
||||
返回结果集的第一列为子表名,后续列为标签列。
|
||||
|
||||
如果已经知道标签列的名称,可以使用下面的语句来获取指定标签列的值。
|
||||
|
||||
```
|
||||
taos> SELECT DISTINCT TBNAME, id FROM st1;
|
||||
tbname | id |
|
||||
===============================================
|
||||
st1s1 | 1 |
|
||||
st1s2 | 2 |
|
||||
st1s3 | 3 |
|
||||
Query OK, 3 rows in database (0.002891s)
|
||||
```
|
||||
|
||||
需要注意,SELECT 语句中的 DISTINCT 和 TBNAME 都是必不可少的,TDengine 会根据它们对语句进行优化,使之在没有数据或数据非常多的情况下都可以正确并快速的返回标签值。
|
||||
|
||||
### 获取某个子表的标签信息
|
||||
|
||||
```
|
||||
taos> SHOW TAGS FROM st1s1;
|
||||
table_name | db_name | stable_name | tag_name | tag_type | tag_value |
|
||||
============================================================================================================
|
||||
st1s1 | test | st1 | id | INT | 1 |
|
||||
st1s1 | test | st1 | loc | VARCHAR(20) | beijing |
|
||||
Query OK, 2 rows in database (0.003684s)
|
||||
```
|
||||
|
||||
同样的,也可以用 SELECT 语句来查询指定标签列的值。
|
||||
|
||||
```
|
||||
taos> SELECT DISTINCT TBNAME, id, loc FROM st1s1;
|
||||
tbname | id | loc |
|
||||
==================================================
|
||||
st1s1 | 1 | beijing |
|
||||
Query OK, 1 rows in database (0.001884s)
|
||||
```
|
||||
|
||||
|
||||
## 删除超级表
|
||||
|
||||
```
|
||||
|
|
|
@ -12,7 +12,7 @@ SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW
|
|||
SELECT [DISTINCT] select_list
|
||||
from_clause
|
||||
[WHERE condition]
|
||||
[PARTITION BY tag_list]
|
||||
[partition_by_clause]
|
||||
[window_clause]
|
||||
[group_by_clause]
|
||||
[order_by_clasue]
|
||||
|
@ -53,6 +53,9 @@ window_clause: {
|
|||
| STATE_WINDOW(col)
|
||||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
||||
|
||||
partition_by_clause:
|
||||
PARTITION BY expr [, expr] ...
|
||||
|
||||
group_by_clause:
|
||||
GROUP BY expr [, expr] ... HAVING condition
|
||||
|
||||
|
|
|
@ -4,9 +4,9 @@ title: 特色查询
|
|||
description: TDengine 提供的时序数据特有的查询功能
|
||||
---
|
||||
|
||||
TDengine 是专为时序数据而研发的大数据平台,存储和计算都针对时序数据的特定进行了量身定制,在支持标准 SQL 的基础之上,还提供了一系列贴合时序业务场景的特色查询语法,极大的方便时序场景的应用开发。
|
||||
TDengine 在支持标准 SQL 的基础之上,还提供了一系列满足时序业务场景需求的特色查询语法,这些语法能够为时序场景的应用的开发带来极大的便利。
|
||||
|
||||
TDengine 提供的特色查询包括数据切分查询和窗口切分查询。
|
||||
TDengine 提供的特色查询包括数据切分查询和时间窗口切分查询。
|
||||
|
||||
## 数据切分查询
|
||||
|
||||
|
@ -31,7 +31,7 @@ select max(current) from meters partition by location interval(10m)
|
|||
|
||||
## 窗口切分查询
|
||||
|
||||
TDengine 支持按时间段窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这种场景下可以使用窗口子句来获得需要的查询结果。窗口子句用于针对查询的数据集合按照窗口切分成为查询子集并进行聚合,窗口包含时间窗口(time window)、状态窗口(status window)、会话窗口(session window)三种窗口。其中时间窗口又可划分为滑动时间窗口和翻转时间窗口。窗口切分查询语法如下:
|
||||
TDengine 支持按时间窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这种场景下可以使用窗口子句来获得需要的查询结果。窗口子句用于针对查询的数据集合按照窗口切分成为查询子集并进行聚合,窗口包含时间窗口(time window)、状态窗口(status window)、会话窗口(session window)三种窗口。其中时间窗口又可划分为滑动时间窗口和翻转时间窗口。窗口切分查询语法如下:
|
||||
|
||||
```sql
|
||||
SELECT select_list FROM tb_name
|
||||
|
@ -132,6 +132,10 @@ SELECT * FROM (SELECT COUNT(*) AS cnt, FIRST(ts) AS fst, status FROM temp_tb_1 S
|
|||
SELECT COUNT(*), FIRST(ts) FROM temp_tb_1 SESSION(ts, tol_val);
|
||||
```
|
||||
|
||||
### 时间戳伪列
|
||||
|
||||
窗口聚合查询结果中,如果 SQL 语句中没有指定输出查询结果中的时间戳列,那么最终结果中不会自动包含窗口的时间列信息。如果需要在结果中输出聚合结果所对应的时间窗口信息,需要在 SELECT 子句中使用时间戳相关的伪列: 时间窗口起始时间 (\_WSTART), 时间窗口结束时间 (\_WEND), 时间窗口持续时间 (\_WDURATION), 以及查询整体窗口相关的伪列: 查询窗口起始时间(\_QSTART) 和查询窗口结束时间(\_QEND)。需要注意的是时间窗口起始时间和结束时间均是闭区间,时间窗口持续时间是数据当前时间分辨率下的数值。例如,如果当前数据库的时间分辨率是毫秒,那么结果中 500 就表示当前时间窗口的持续时间是 500毫秒 (500 ms)。
|
||||
|
||||
### 示例
|
||||
|
||||
智能电表的建表语句如下:
|
||||
|
@ -143,8 +147,10 @@ CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS
|
|||
针对智能电表采集的数据,以 10 分钟为一个阶段,计算过去 24 小时的电流数据的平均值、最大值、电流的中位数。如果没有计算值,用前一个非 NULL 值填充。使用的查询语句如下:
|
||||
|
||||
```
|
||||
SELECT AVG(current), MAX(current), APERCENTILE(current, 50) FROM meters
|
||||
SELECT _WSTART, _WEND, AVG(current), MAX(current), APERCENTILE(current, 50) FROM meters
|
||||
WHERE ts>=NOW-1d and ts<=now
|
||||
INTERVAL(10m)
|
||||
FILL(PREV);
|
||||
```
|
||||
|
||||
|
||||
|
|
|
@ -137,19 +137,3 @@ local_option: {
|
|||
```sql
|
||||
SHOW LOCAL VARIABLES;
|
||||
```
|
||||
|
||||
## 合并 vgroup
|
||||
|
||||
```sql
|
||||
MERGE VGROUP vgroup_no1 vgroup_no2;
|
||||
```
|
||||
|
||||
如果在系统实际运行一段时间后,因为不同时间线的数据特征不同导致在 vgroups 之间的数据和负载分布不均衡,可以通过合并或拆分 vgroups 的方式逐步实现负载均衡。
|
||||
|
||||
## 拆分 vgroup
|
||||
|
||||
```sql
|
||||
SPLIT VGROUP vgroup_no;
|
||||
```
|
||||
|
||||
会创建一个新的 vgroup,并将指定 vgroup 中的数据按照一致性 HASH 迁移一部分到新的 vgroup 中。此过程中,原 vgroup 可以正常提供读写服务。
|
||||
|
|
|
@ -30,7 +30,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :------------: | ------------ | ------------------------- |
|
||||
| 1 | vnodes | SMALLINT | dnode 中的实际 vnode 个数 |
|
||||
| 1 | vnodes | SMALLINT | dnode 中的实际 vnode 个数。需要注意,`vnodes` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 2 | support_vnodes | SMALLINT | 最多支持的 vnode 个数 |
|
||||
| 3 | status | BINARY(10) | 当前状态 |
|
||||
| 4 | note | BINARY(256) | 离线原因等信息 |
|
||||
|
@ -50,16 +50,6 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| 4 | role_time | TIMESTAMP | 成为当前角色的时间 |
|
||||
| 5 | create_time | TIMESTAMP | 创建时间 |
|
||||
|
||||
## INS_MODULES
|
||||
|
||||
提供组件的相关信息。也可以使用 SHOW MODULES 来查询这些信息
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :------: | ------------ | ---------- |
|
||||
| 1 | id | SMALLINT | module id |
|
||||
| 2 | endpoint | BINARY(134) | 组件的地址 |
|
||||
| 3 | module | BINARY(10) | 组件状态 |
|
||||
|
||||
## INS_QNODES
|
||||
|
||||
当前系统中 QNODE 的信息。也可以使用 SHOW QNODES 来查询这些信息。
|
||||
|
@ -89,33 +79,33 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| 1 | name | BINARY(32) | 数据库名 |
|
||||
| 2 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 3 | ntables | INT | 数据库中表的数量,包含子表和普通表但不包含超级表 |
|
||||
| 4 | vgroups | INT | 数据库中有多少个 vgroup |
|
||||
| 6 | replica | INT | 副本数 |
|
||||
| 7 | quorum | BINARY(3) | 强一致性 |
|
||||
| 8 | duration | INT | 单文件存储数据的时间跨度 |
|
||||
| 9 | keep | INT | 数据保留时长 |
|
||||
| 10 | buffer | INT | 每个 vnode 写缓存的内存块大小,单位 MB |
|
||||
| 11 | pagesize | INT | 每个 VNODE 中元数据存储引擎的页大小,单位为 KB |
|
||||
| 12 | pages | INT | 每个 vnode 元数据存储引擎的缓存页个数 |
|
||||
| 13 | minrows | INT | 文件块中记录的最大条数 |
|
||||
| 14 | maxrows | INT | 文件块中记录的最小条数 |
|
||||
| 15 | comp | INT | 数据压缩方式 |
|
||||
| 16 | precision | BINARY(2) | 时间分辨率 |
|
||||
| 4 | vgroups | INT | 数据库中有多少个 vgroup。需要注意,`vgroups` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 6 | replica | INT | 副本数。需要注意,`replica` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 7 | strict | BINARY(3) | 强一致性。需要注意,`strict` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 8 | duration | INT | 单文件存储数据的时间跨度。需要注意,`duration` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | keep | INT | 数据保留时长。需要注意,`keep` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 10 | buffer | INT | 每个 vnode 写缓存的内存块大小,单位 MB。需要注意,`buffer` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 11 | pagesize | INT | 每个 VNODE 中元数据存储引擎的页大小,单位为 KB。需要注意,`pagesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 12 | pages | INT | 每个 vnode 元数据存储引擎的缓存页个数。需要注意,`pages` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 13 | minrows | INT | 文件块中记录的最大条数。需要注意,`minrows` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 14 | maxrows | INT | 文件块中记录的最小条数。需要注意,`maxrows` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 15 | comp | INT | 数据压缩方式。需要注意,`comp` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 16 | precision | BINARY(2) | 时间分辨率。需要注意,`precision` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 17 | status | BINARY(10) | 数据库状态 |
|
||||
| 18 | retention | BINARY (60) | 数据的聚合周期和保存时长 |
|
||||
| 19 | single_stable | BOOL | 表示此数据库中是否只可以创建一个超级表 |
|
||||
| 20 | cachemodel | BINARY(60) | 表示是否在内存中缓存子表的最近数据 |
|
||||
| 21 | cachesize | INT | 表示每个 vnode 中用于缓存子表最近数据的内存大小 |
|
||||
| 22 | wal_level | INT | WAL 级别 |
|
||||
| 23 | wal_fsync_period | INT | 数据落盘周期 |
|
||||
| 24 | wal_retention_period | INT | WAL 的保存时长 |
|
||||
| 25 | wal_retention_size | INT | WAL 的保存上限 |
|
||||
| 26 | wal_roll_period | INT | wal 文件切换时长 |
|
||||
| 27 | wal_segment_size | BIGINT | wal 单个文件大小 |
|
||||
| 28 | stt_trigger | SMALLINT | 触发文件合并的落盘文件的个数 |
|
||||
| 29 | table_prefix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度 |
|
||||
| 30 | table_suffix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度 |
|
||||
| 31 | tsdb_pagesize | INT | 时序数据存储引擎中的页大小 |
|
||||
| 18 | retentions | BINARY (60) | 数据的聚合周期和保存时长。需要注意,`retentions` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 19 | single_stable | BOOL | 表示此数据库中是否只可以创建一个超级表。需要注意,`single_stable` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 20 | cachemodel | BINARY(60) | 表示是否在内存中缓存子表的最近数据。需要注意,`cachemodel` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 21 | cachesize | INT | 表示每个 vnode 中用于缓存子表最近数据的内存大小。需要注意,`cachesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 22 | wal_level | INT | WAL 级别。需要注意,`wal_level` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 23 | wal_fsync_period | INT | 数据落盘周期。需要注意,`wal_fsync_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 24 | wal_retention_period | INT | WAL 的保存时长。需要注意,`wal_retention_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 25 | wal_retention_size | INT | WAL 的保存上限。需要注意,`wal_retention_size` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 26 | wal_roll_period | INT | wal 文件切换时长。需要注意,`wal_roll_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 27 | wal_segment_size | BIGINT | wal 单个文件大小。需要注意,`wal_segment_size` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 28 | stt_trigger | SMALLINT | 触发文件合并的落盘文件的个数。需要注意,`stt_trigger` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 29 | table_prefix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度。需要注意,`table_prefix` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 30 | table_suffix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度。需要注意,`table_suffix` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 31 | tsdb_pagesize | INT | 时序数据存储引擎中的页大小。需要注意,`tsdb_pagesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
|
||||
## INS_FUNCTIONS
|
||||
|
||||
|
@ -124,8 +114,8 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------ | -------------- |
|
||||
| 1 | name | BINARY(64) | 函数名 |
|
||||
| 2 | comment | BINARY(255) | 补充说明 |
|
||||
| 3 | aggregate | INT | 是否为聚合函数 |
|
||||
| 2 | comment | BINARY(255) | 补充说明。需要注意,`comment` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 3 | aggregate | INT | 是否为聚合函数。需要注意,`aggregate` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 4 | output_type | BINARY(31) | 输出类型 |
|
||||
| 5 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 6 | code_len | INT | 代码长度 |
|
||||
|
@ -154,12 +144,12 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| 2 | db_name | BINARY(64) | 超级表所在的数据库的名称 |
|
||||
| 3 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 4 | columns | INT | 列数目 |
|
||||
| 5 | tags | INT | 标签数目 |
|
||||
| 5 | tags | INT | 标签数目。需要注意,`tags` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 6 | last_update | TIMESTAMP | 最后更新时间 |
|
||||
| 7 | table_comment | BINARY(1024) | 表注释 |
|
||||
| 8 | watermark | BINARY(64) | 窗口的关闭时间 |
|
||||
| 9 | max_delay | BINARY(64) | 推送计算结果的最大延迟 |
|
||||
| 10 | rollup | BINARY(128) | rollup 聚合函数 |
|
||||
| 8 | watermark | BINARY(64) | 窗口的关闭时间。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | max_delay | BINARY(64) | 推送计算结果的最大延迟。需要注意,`max_delay` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 10 | rollup | BINARY(128) | rollup 聚合函数。需要注意,`rollup` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
|
||||
## INS_TABLES
|
||||
|
||||
|
@ -174,7 +164,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| 5 | stable_name | BINARY(192) | 所属的超级表表名 |
|
||||
| 6 | uid | BIGINT | 表 id |
|
||||
| 7 | vgroup_id | INT | vgroup id |
|
||||
| 8 | ttl | INT | 表的生命周期 |
|
||||
| 8 | ttl | INT | 表的生命周期。需要注意,`ttl` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | table_comment | BINARY(1024) | 表注释 |
|
||||
| 10 | type | BINARY(20) | 表类型 |
|
||||
|
||||
|
@ -207,13 +197,13 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| --- | :---------: | ------------ | -------------------------------------------------- |
|
||||
| 1 | version | BINARY(9) | 企业版授权说明:official(官方授权的)/trial(试用的) |
|
||||
| 2 | cpu_cores | BINARY(9) | 授权使用的 CPU 核心数量 |
|
||||
| 3 | dnodes | BINARY(10) | 授权使用的 dnode 节点数量 |
|
||||
| 4 | streams | BINARY(10) | 授权创建的流数量 |
|
||||
| 5 | users | BINARY(10) | 授权创建的用户数量 |
|
||||
| 6 | accounts | BINARY(10) | 授权创建的帐户数量 |
|
||||
| 7 | storage | BINARY(21) | 授权使用的存储空间大小 |
|
||||
| 8 | connections | BINARY(21) | 授权使用的客户端连接数量 |
|
||||
| 9 | databases | BINARY(11) | 授权使用的数据库数量 |
|
||||
| 3 | dnodes | BINARY(10) | 授权使用的 dnode 节点数量。需要注意,`dnodes` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 4 | streams | BINARY(10) | 授权创建的流数量。需要注意,`streams` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 5 | users | BINARY(10) | 授权创建的用户数量。需要注意,`users` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 6 | accounts | BINARY(10) | 授权创建的帐户数量。需要注意,`accounts` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 7 | storage | BINARY(21) | 授权使用的存储空间大小。需要注意,`storage` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 8 | connections | BINARY(21) | 授权使用的客户端连接数量。需要注意,`connections` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | databases | BINARY(11) | 授权使用的数据库数量。需要注意,`databases` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 10 | speed | BINARY(9) | 授权使用的数据点每秒写入数量 |
|
||||
| 11 | querytime | BINARY(9) | 授权使用的查询总时长 |
|
||||
| 12 | timeseries | BINARY(21) | 授权使用的测点数量 |
|
||||
|
@ -228,7 +218,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| --- | :-------: | ------------ | ------------------------------------------------------ |
|
||||
| 1 | vgroup_id | INT | vgroup id |
|
||||
| 2 | db_name | BINARY(32) | 数据库名 |
|
||||
| 3 | tables | INT | 此 vgroup 内有多少表 |
|
||||
| 3 | tables | INT | 此 vgroup 内有多少表。需要注意,`tables` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 4 | status | BINARY(10) | 此 vgroup 的状态 |
|
||||
| 5 | v1_dnode | INT | 第一个成员所在的 dnode 的 id |
|
||||
| 6 | v1_status | BINARY(10) | 第一个成员的状态 |
|
||||
|
@ -247,7 +237,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :------: | ------------ | ------------ |
|
||||
| 1 | name | BINARY(32) | 配置项名称 |
|
||||
| 2 | value | BINARY(64) | 该配置项的值 |
|
||||
| 2 | value | BINARY(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
|
||||
## INS_DNODE_VARIABLES
|
||||
|
||||
|
@ -257,7 +247,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| --- | :------: | ------------ | ------------ |
|
||||
| 1 | dnode_id | INT | dnode 的 ID |
|
||||
| 2 | name | BINARY(32) | 配置项名称 |
|
||||
| 3 | value | BINARY(64) | 该配置项的值 |
|
||||
| 3 | value | BINARY(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
|
||||
## INS_TOPICS
|
||||
|
||||
|
@ -288,5 +278,5 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| 5 | source_db | BINARY(64) | 源数据库 |
|
||||
| 6 | target_db | BIANRY(64) | 目的数据库 |
|
||||
| 7 | target_table | BINARY(192) | 流计算写入的目标表 |
|
||||
| 8 | watermark | BIGINT | watermark,详见 SQL 手册流式计算 |
|
||||
| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算 |
|
||||
| 8 | watermark | BIGINT | watermark,详见 SQL 手册流式计算。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算。需要注意,`trigger` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
|
|
|
@ -14,14 +14,6 @@ SHOW APPS;
|
|||
|
||||
显示接入集群的应用(客户端)信息。
|
||||
|
||||
## SHOW BNODES
|
||||
|
||||
```sql
|
||||
SHOW BNODES;
|
||||
```
|
||||
|
||||
显示当前系统中存在的 BNODE (backup node, 即备份节点)的信息。
|
||||
|
||||
## SHOW CLUSTER
|
||||
|
||||
```sql
|
||||
|
@ -129,14 +121,6 @@ SHOW MNODES;
|
|||
|
||||
显示当前系统中 MNODE 的信息。
|
||||
|
||||
## SHOW MODULES
|
||||
|
||||
```sql
|
||||
SHOW MODULES;
|
||||
```
|
||||
|
||||
显示当前系统中所安装的组件的信息。
|
||||
|
||||
## SHOW QNODES
|
||||
|
||||
```sql
|
||||
|
@ -153,15 +137,7 @@ SHOW SCORES;
|
|||
|
||||
显示系统被许可授权的容量的信息。
|
||||
|
||||
注:企业版独有
|
||||
|
||||
## SHOW SNODES
|
||||
|
||||
```sql
|
||||
SHOW SNODES;
|
||||
```
|
||||
|
||||
显示当前系统中 SNODE (流计算节点)的信息。
|
||||
注:企业版独有。
|
||||
|
||||
## SHOW STABLES
|
||||
|
||||
|
|
|
@ -189,7 +189,7 @@ AllowWebSockets
|
|||
/influxdb/v1/write
|
||||
```
|
||||
|
||||
支持 InfluxDB 查询参数如下:
|
||||
支持 InfluxDB 参数如下:
|
||||
|
||||
- `db` 指定 TDengine 使用的数据库名
|
||||
- `precision` TDengine 使用的时间精度
|
||||
|
@ -197,7 +197,7 @@ AllowWebSockets
|
|||
- `p` TDengine 密码
|
||||
|
||||
注意: 目前不支持 InfluxDB 的 token 验证方式,仅支持 Basic 验证和查询参数验证。
|
||||
|
||||
示例: curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000"
|
||||
### OpenTSDB
|
||||
|
||||
您可以使用任何支持 http 协议的客户端访问 Restful 接口地址 `http://<fqdn>:6041/<APIEndPoint>` 来写入 OpenTSDB 兼容格式的数据到 TDengine。EndPoint 如下:
|
||||
|
|
|
@ -340,7 +340,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
|
|||
|
||||
- **values** : nchar/binary 列/标签的值域,将从值中随机选择。
|
||||
|
||||
- **sma**: 将该列加入bsma中,值为 "yes" 或者 "no",默认为 "no"。
|
||||
- **sma**: 将该列加入 SMA 中,值为 "yes" 或者 "no",默认为 "no"。
|
||||
|
||||
#### 插入行为配置参数
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ TDengine 命令行程序(以下简称 TDengine CLI)是用户操作 TDengine
|
|||
|
||||
## 执行
|
||||
|
||||
要进入 TDengine CLI,您只要在 Linux 终端或 Windows 终端执行 `taos` 即可。
|
||||
要进入 TDengine CLI,您只要在终端执行 `taos` 即可。
|
||||
|
||||
```bash
|
||||
taos
|
||||
|
|
|
@ -5,29 +5,30 @@ description: "TDengine 服务端、客户端和连接器支持的平台列表"
|
|||
|
||||
## TDengine 服务端支持的平台列表
|
||||
|
||||
| | **Windows server 2016/2019** | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18/20** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** |
|
||||
| ------------ | ---------------------------- | ----------------- | ---------------- | ---------------- | ------------ | ----------------- | ---------------- |
|
||||
| X64 | ● | ● | ● | ● | ● | ● | ● |
|
||||
| 树莓派 ARM64 | | | ● | | | | |
|
||||
| 华为云 ARM64 | | | | ● | | | |
|
||||
| | **Windows server 2016/2019** | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18/20** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **macOS** |
|
||||
| ------------ | ---------------------------- | ----------------- | ---------------- | ---------------- | ------------ | ----------------- | ---------------- | --------- |
|
||||
| X64 | ● | ● | ● | ● | ● | ● | ● | ● |
|
||||
| 树莓派 ARM64 | | | ● | | | | | |
|
||||
| 华为云 ARM64 | | | | ● | | | | |
|
||||
| M1 | | | | | | | | ● |
|
||||
|
||||
注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。
|
||||
|
||||
## TDengine 客户端和连接器支持的平台列表
|
||||
|
||||
目前 TDengine 的连接器可支持的平台广泛,目前包括:X64/X86/ARM64/ARM32/MIPS/Alpha 等硬件平台,以及 Linux/Win64/Win32 等开发环境。
|
||||
目前 TDengine 的连接器可支持的平台广泛,目前包括:X64/X86/ARM64/ARM32/MIPS/Alpha 等硬件平台,以及 Linux/Win64/Win32/macOS 等开发环境。
|
||||
|
||||
对照矩阵如下:
|
||||
|
||||
| **CPU** | **X64 64bit** | **X64 64bit** | **ARM64** |
|
||||
| ----------- | ------------- | ------------- | --------- |
|
||||
| **OS** | **Linux** | **Win64** | **Linux** |
|
||||
| **C/C++** | ● | ● | ● |
|
||||
| **JDBC** | ● | ● | ● |
|
||||
| **Python** | ● | ● | ● |
|
||||
| **Go** | ● | ● | ● |
|
||||
| **NodeJs** | ● | ● | ● |
|
||||
| **C#** | ● | ● | ○ |
|
||||
| **RESTful** | ● | ● | ● |
|
||||
| **CPU** | **X64 64bit** | **X64 64bit** | **ARM64** | **X64 64bit** | **ARM64** |
|
||||
| ----------- | ------------- | ------------- | --------- | ------------- | --------- |
|
||||
| **OS** | **Linux** | **Win64** | **Linux** | **macOS** | **macOS** |
|
||||
| **C/C++** | ● | ● | ● | ● | ● |
|
||||
| **JDBC** | ● | ● | ● | ○ | ○ |
|
||||
| **Python** | ● | ● | ● | ● | ● |
|
||||
| **Go** | ● | ● | ● | ● | ● |
|
||||
| **NodeJs** | ● | ● | ● | ○ | ○ |
|
||||
| **C#** | ● | ● | ○ | ○ | ○ |
|
||||
| **RESTful** | ● | ● | ● | ● | ● |
|
||||
|
||||
注:● 表示官方测试验证通过,○ 表示非官方测试验证通过,-- 表示未经验证。
|
||||
|
|
|
@ -205,7 +205,7 @@ taos --dump-config
|
|||
:::info
|
||||
为应对多时区的数据写入和查询问题,TDengine 采用 Unix 时间戳(Unix Timestamp)来记录和存储时间戳。Unix 时间戳的特点决定了任一时刻不论在任何时区,产生的时间戳均一致。需要注意的是,Unix 时间戳是在客户端完成转换和记录。为了确保客户端其他形式的时间转换为正确的 Unix 时间戳,需要设置正确的时区。
|
||||
|
||||
在 Linux 系统中,客户端会自动读取系统设置的时区信息。用户也可以采用多种方式在配置文件设置时区。例如:
|
||||
在 Linux/macOS 中,客户端会自动读取系统设置的时区信息。用户也可以采用多种方式在配置文件设置时区。例如:
|
||||
|
||||
```
|
||||
timezone UTC-8
|
||||
|
@ -248,9 +248,9 @@ SELECT count(*) FROM table_name WHERE TS<1554984068000;
|
|||
:::info
|
||||
TDengine 为存储中文、日文、韩文等非 ASCII 编码的宽字符,提供一种专门的字段类型 nchar。写入 nchar 字段的数据将统一采用 UCS4-LE 格式进行编码并发送到服务器。需要注意的是,编码正确性是客户端来保证。因此,如果用户想要正常使用 nchar 字段来存储诸如中文、日文、韩文等非 ASCII 字符,需要正确设置客户端的编码格式。
|
||||
|
||||
客户端的输入的字符均采用操作系统当前默认的编码格式,在 Linux 系统上多为 UTF-8,部分中文系统编码则可能是 GB18030 或 GBK 等。在 docker 环境中默认的编码是 POSIX。在中文版 Windows 系统中,编码则是 CP936。客户端需要确保正确设置自己所使用的字符集,即客户端运行的操作系统当前编码字符集,才能保证 nchar 中的数据正确转换为 UCS4-LE 编码格式。
|
||||
客户端的输入的字符均采用操作系统当前默认的编码格式,在 Linux/macOS 系统上多为 UTF-8,部分中文系统编码则可能是 GB18030 或 GBK 等。在 docker 环境中默认的编码是 POSIX。在中文版 Windows 系统中,编码则是 CP936。客户端需要确保正确设置自己所使用的字符集,即客户端运行的操作系统当前编码字符集,才能保证 nchar 中的数据正确转换为 UCS4-LE 编码格式。
|
||||
|
||||
在 Linux 中 locale 的命名规则为: <语言>\_<地区>.<字符集编码> 如:zh_CN.UTF-8,zh 代表中文,CN 代表大陆地区,UTF-8 表示字符集。字符集编码为客户端正确解析本地字符串提供编码转换的说明。Linux 系统与 Mac OSX 系统可以通过设置 locale 来确定系统的字符编码,由于 Windows 使用的 locale 中不是 POSIX 标准的 locale 格式,因此在 Windows 下需要采用另一个配置参数 charset 来指定字符编码。在 Linux 系统中也可以使用 charset 来指定字符编码。
|
||||
在 Linux/macOS 中 locale 的命名规则为: <语言>\_<地区>.<字符集编码> 如:zh_CN.UTF-8,zh 代表中文,CN 代表大陆地区,UTF-8 表示字符集。字符集编码为客户端正确解析本地字符串提供编码转换的说明。Linux/macOS 可以通过设置 locale 来确定系统的字符编码,由于 Windows 使用的 locale 中不是 POSIX 标准的 locale 格式,因此在 Windows 下需要采用另一个配置参数 charset 来指定字符编码。在 Linux/macOS 中也可以使用 charset 来指定字符编码。
|
||||
|
||||
:::
|
||||
|
||||
|
@ -263,9 +263,9 @@ TDengine 为存储中文、日文、韩文等非 ASCII 编码的宽字符,提
|
|||
| 缺省值 | 系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过 API 设置 |
|
||||
|
||||
:::info
|
||||
如果配置文件中不设置 charset,在 Linux 系统中,taos 在启动时候,自动读取系统当前的 locale 信息,并从 locale 信息中解析提取 charset 编码格式。如果自动读取 locale 信息失败,则尝试读取 charset 配置,如果读取 charset 配置也失败,则中断启动过程。
|
||||
如果配置文件中不设置 charset,在 Linux/macOS 中,taos 在启动时候,自动读取系统当前的 locale 信息,并从 locale 信息中解析提取 charset 编码格式。如果自动读取 locale 信息失败,则尝试读取 charset 配置,如果读取 charset 配置也失败,则中断启动过程。
|
||||
|
||||
在 Linux 系统中,locale 信息包含了字符编码信息,因此正确设置了 Linux 系统 locale 以后可以不用再单独设置 charset。例如:
|
||||
在 Linux/macOS 中,locale 信息包含了字符编码信息,因此正确设置了 Linux/macOS 的 locale 以后可以不用再单独设置 charset。例如:
|
||||
|
||||
```
|
||||
locale zh_CN.UTF-8
|
||||
|
@ -279,7 +279,7 @@ charset CP936
|
|||
|
||||
如果需要调整字符编码,请查阅当前操作系统使用的编码,并在配置文件中正确设置。
|
||||
|
||||
在 Linux 系统中,如果用户同时设置了 locale 和字符集编码 charset,并且 locale 和 charset 的不一致,后设置的值将覆盖前面设置的值。
|
||||
在 Linux/macOS 中,如果用户同时设置了 locale 和字符集编码 charset,并且 locale 和 charset 的不一致,后设置的值将覆盖前面设置的值。
|
||||
|
||||
```
|
||||
locale zh_CN.UTF-8
|
||||
|
|
|
@ -7,7 +7,7 @@ description: 一些常见问题的诊断技巧
|
|||
|
||||
当出现客户端应用无法访问服务端时,需要确认客户端与服务端之间网络的各端口连通情况,以便有针对性地排除故障。
|
||||
|
||||
目前网络连接诊断支持在:Linux 与 Linux,Linux 与 Windows 之间进行诊断测试。
|
||||
目前网络连接诊断支持在:Linux/Windows/macOS 之间进行诊断测试。
|
||||
|
||||
诊断步骤:
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ description: 一些常见问题的解决方法汇总
|
|||
|
||||
3. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd*
|
||||
|
||||
4. 确认客户端连接时指定了正确的服务器 FQDN (Fully Qualified Domain Name —— 可在服务器上执行 Linux 命令 hostname -f 获得),FQDN 配置参考:[一篇文章说清楚 TDengine 的 FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。
|
||||
4. 确认客户端连接时指定了正确的服务器 FQDN (Fully Qualified Domain Name —— 可在服务器上执行 Linux/macOS 命令 hostname -f 获得),FQDN 配置参考:[一篇文章说清楚 TDengine 的 FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。
|
||||
|
||||
5. ping 服务器 FQDN,如果没有反应,请检查你的网络,DNS 设置,或客户端所在计算机的系统 hosts 文件。如果部署的是 TDengine 集群,客户端需要能 ping 通所有集群节点的 FQDN。
|
||||
|
||||
|
@ -64,18 +64,20 @@ description: 一些常见问题的解决方法汇总
|
|||
|
||||
7. 对于 Linux 上的 JDBC(ODBC, Python, Go 等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/taos/driver*里, 并且*/usr/local/taos/driver*在系统库函数搜索路径*LD_LIBRARY_PATH*里
|
||||
|
||||
8. 对于 Windows 上的 JDBC, ODBC, Python, Go 等连接,确保*C:\TDengine\driver\taos.dll*在你的系统库函数搜索目录里 (建议*taos.dll*放在目录 _C:\Windows\System32_)
|
||||
8. 对于 macOS 上的 JDBC(ODBC, Python, Go 等接口类似)连接, 确保*libtaos.dylib*在目录*/usr/local/lib*里, 并且*/usr/local/lib*在系统库函数搜索路径*LD_LIBRARY_PATH*里
|
||||
|
||||
9. 如果仍不能排除连接故障
|
||||
9. 对于 Windows 上的 JDBC, ODBC, Python, Go 等连接,确保*C:\TDengine\driver\taos.dll*在你的系统库函数搜索目录里 (建议*taos.dll*放在目录 _C:\Windows\System32_)
|
||||
|
||||
- Linux 系统请使用命令行工具 nc 来分别判断指定端口的 TCP 和 UDP 连接是否通畅
|
||||
10. 如果仍不能排除连接故障
|
||||
|
||||
- Linux/macOS 系统请使用命令行工具 nc 来分别判断指定端口的 TCP 和 UDP 连接是否通畅
|
||||
检查 UDP 端口连接是否工作:`nc -vuz {hostIP} {port} `
|
||||
检查服务器侧 TCP 端口连接是否工作:`nc -l {port}`
|
||||
检查客户端侧 TCP 端口连接是否工作:`nc {hostIP} {port}`
|
||||
|
||||
- Windows 系统请使用 PowerShell 命令 Test-NetConnection -ComputerName {fqdn} -Port {port} 检测服务段端口是否访问
|
||||
|
||||
10. 也可以使用 taos 程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅:[诊断及其他](https://docs.taosdata.com/3.0-preview/operation/diagnose/)。
|
||||
11. 也可以使用 taos 程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅:[诊断及其他](https://docs.taosdata.com/3.0-preview/operation/diagnose/)。
|
||||
|
||||
### 5. 遇到错误 Unable to resolve FQDN” 怎么办?
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ typedef struct {
|
|||
TSKEY ts;
|
||||
} SWinKey;
|
||||
|
||||
static inline int SWinKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2) {
|
||||
static inline int sWinKeyCmprImpl(const void* pKey1, const void* pKey2) {
|
||||
SWinKey* pWin1 = (SWinKey*)pKey1;
|
||||
SWinKey* pWin2 = (SWinKey*)pKey2;
|
||||
|
||||
|
@ -68,6 +68,10 @@ static inline int SWinKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, i
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int winKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2) {
|
||||
return sWinKeyCmprImpl(pKey1, pKey2);
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
uint64_t groupId;
|
||||
TSKEY ts;
|
||||
|
@ -157,7 +161,7 @@ typedef struct SDataBlockInfo {
|
|||
int32_t rowSize;
|
||||
uint64_t uid; // the uid of table, from which current data block comes
|
||||
uint16_t blockId; // block id, generated by physical planner
|
||||
uint64_t groupId; // no need to serialize
|
||||
uint64_t groupId;
|
||||
int16_t hasVarCol;
|
||||
uint32_t capacity;
|
||||
// TODO: optimize and remove following
|
||||
|
@ -166,6 +170,9 @@ typedef struct SDataBlockInfo {
|
|||
EStreamType type; // used for stream, do not serialize
|
||||
STimeWindow calWin; // used for stream, do not serialize
|
||||
TSKEY watermark; // used for stream
|
||||
|
||||
char parTbName[TSDB_TABLE_NAME_LEN]; // used for stream partition
|
||||
STag* pTag; // used for stream partition
|
||||
} SDataBlockInfo;
|
||||
|
||||
typedef struct SSDataBlock {
|
||||
|
@ -334,6 +341,7 @@ typedef struct SSortExecInfo {
|
|||
#define GROUPID_COLUMN_INDEX 3
|
||||
#define CALCULATE_START_TS_COLUMN_INDEX 4
|
||||
#define CALCULATE_END_TS_COLUMN_INDEX 5
|
||||
#define TABLE_NAME_COLUMN_INDEX 6
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -235,6 +235,7 @@ void blockDataFreeRes(SSDataBlock* pBlock);
|
|||
SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData);
|
||||
SSDataBlock* createSpecialDataBlock(EStreamType type);
|
||||
|
||||
SSDataBlock* blockCopyOneRow(const SSDataBlock* pDataBlock, int32_t rowIdx);
|
||||
int32_t blockDataAppendColInfo(SSDataBlock* pBlock, SColumnInfoData* pColInfoData);
|
||||
|
||||
SColumnInfoData createColumnInfoData(int16_t type, int32_t bytes, int16_t colId);
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct SBuffer SBuffer;
|
||||
typedef struct SSchema SSchema;
|
||||
typedef struct STColumn STColumn;
|
||||
typedef struct STSchema STSchema;
|
||||
|
@ -56,6 +57,18 @@ const static uint8_t BIT2_MAP[4][4] = {{0b00000000, 0b00000001, 0b00000010, 0},
|
|||
#define SET_BIT2(p, i, v) ((p)[(i) >> 2] = (p)[(i) >> 2] & N1(BIT2_MAP[(i)&3][3]) | BIT2_MAP[(i)&3][(v)])
|
||||
#define GET_BIT2(p, i) (((p)[(i) >> 2] >> BIT2_MAP[(i)&3][3]) & ((uint8_t)3))
|
||||
|
||||
// SBuffer ================================
|
||||
struct SBuffer {
|
||||
int64_t nBuf;
|
||||
uint8_t *pBuf;
|
||||
};
|
||||
|
||||
#define tBufferCreate() \
|
||||
(SBuffer) { .nBuf = 0, .pBuf = NULL }
|
||||
void tBufferDestroy(SBuffer *pBuffer);
|
||||
int32_t tBufferInit(SBuffer *pBuffer, int64_t size);
|
||||
int32_t tBufferPut(SBuffer *pBuffer, const void *pData, int64_t nData);
|
||||
|
||||
// STSchema ================================
|
||||
int32_t tTSchemaCreate(int32_t sver, SSchema *pSchema, int32_t nCols, STSchema **ppTSchema);
|
||||
void tTSchemaDestroy(STSchema *pTSchema);
|
||||
|
|
|
@ -103,6 +103,7 @@ extern bool tsKeepColumnName;
|
|||
// client
|
||||
extern int32_t tsMinSlidingTime;
|
||||
extern int32_t tsMinIntervalTime;
|
||||
extern int32_t tsMaxMemUsedByInsert;
|
||||
|
||||
// build info
|
||||
extern char version[];
|
||||
|
|
|
@ -676,7 +676,6 @@ typedef struct {
|
|||
col_id_t colId;
|
||||
int16_t slotId;
|
||||
};
|
||||
bool output; // TODO remove it later
|
||||
|
||||
int8_t type;
|
||||
int32_t bytes;
|
||||
|
@ -1397,6 +1396,7 @@ typedef struct {
|
|||
int64_t ekey;
|
||||
int64_t version; // for stream
|
||||
TSKEY watermark; // for stream
|
||||
char parTbName[TSDB_TABLE_NAME_LEN]; // for stream
|
||||
char data[];
|
||||
} SRetrieveTableRsp;
|
||||
|
||||
|
@ -2025,7 +2025,7 @@ typedef struct SVCreateTbReq {
|
|||
int8_t type;
|
||||
union {
|
||||
struct {
|
||||
char* name; // super table name
|
||||
char* stbName; // super table name
|
||||
uint8_t tagNum;
|
||||
tb_uid_t suid;
|
||||
SArray* tagName;
|
||||
|
@ -2045,7 +2045,7 @@ static FORCE_INLINE void tdDestroySVCreateTbReq(SVCreateTbReq* req) {
|
|||
taosMemoryFreeClear(req->comment);
|
||||
if (req->type == TSDB_CHILD_TABLE) {
|
||||
taosMemoryFreeClear(req->ctb.pTag);
|
||||
taosMemoryFreeClear(req->ctb.name);
|
||||
taosMemoryFreeClear(req->ctb.stbName);
|
||||
taosArrayDestroy(req->ctb.tagName);
|
||||
req->ctb.tagName = NULL;
|
||||
} else if (req->type == TSDB_NORMAL_TABLE) {
|
||||
|
|
|
@ -303,6 +303,10 @@ int32_t ctgdLaunchAsyncCall(SCatalog* pCtg, SRequestConnInfo* pConn, uint64_t re
|
|||
|
||||
int32_t catalogClearCache(void);
|
||||
|
||||
SMetaData* catalogCloneMetaData(SMetaData* pData);
|
||||
|
||||
void catalogFreeMetaData(SMetaData* pData);
|
||||
|
||||
/**
|
||||
* Destroy catalog and relase all resources
|
||||
*/
|
||||
|
|
|
@ -89,13 +89,6 @@ qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* n
|
|||
*/
|
||||
int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type);
|
||||
|
||||
/**
|
||||
* @brief Cleanup SSDataBlock for StreamScanInfo
|
||||
*
|
||||
* @param tinfo
|
||||
*/
|
||||
void tdCleanupStreamInputDataBlock(qTaskInfo_t tinfo);
|
||||
|
||||
/**
|
||||
* Update the table id list, add or remove.
|
||||
*
|
||||
|
|
|
@ -54,7 +54,7 @@ typedef struct SFuncExecFuncs {
|
|||
FExecCombine combine;
|
||||
} SFuncExecFuncs;
|
||||
|
||||
#define MAX_INTERVAL_TIME_WINDOW 1000000 // maximum allowed time windows in final results
|
||||
#define MAX_INTERVAL_TIME_WINDOW 10000000 // maximum allowed time windows in final results
|
||||
|
||||
#define TOP_BOTTOM_QUERY_LIMIT 100
|
||||
#define FUNCTIONS_NAME_MAX_LENGTH 16
|
||||
|
|
|
@ -385,7 +385,6 @@ typedef struct SCmdMsgInfo {
|
|||
SEpSet epSet;
|
||||
void* pMsg;
|
||||
int32_t msgLen;
|
||||
void* pExtension; // todo remove it soon
|
||||
} SCmdMsgInfo;
|
||||
|
||||
typedef enum EQueryExecMode {
|
||||
|
|
|
@ -33,6 +33,13 @@ typedef struct SStmtCallback {
|
|||
int32_t (*getExecInfoFn)(TAOS_STMT*, SHashObj**, SHashObj**);
|
||||
} SStmtCallback;
|
||||
|
||||
typedef struct SParseCsvCxt {
|
||||
TdFilePtr fp; // last parsed file
|
||||
int32_t tableNo; // last parsed table
|
||||
SName tableName; // last parsed table
|
||||
const char* pLastSqlPos; // the location of the last parsed sql
|
||||
} SParseCsvCxt;
|
||||
|
||||
typedef struct SParseContext {
|
||||
uint64_t requestId;
|
||||
int64_t requestRid;
|
||||
|
@ -57,6 +64,8 @@ typedef struct SParseContext {
|
|||
SArray* pTableMetaPos; // sql table pos => catalog data pos
|
||||
SArray* pTableVgroupPos; // sql table pos => catalog data pos
|
||||
int64_t allocatorId;
|
||||
bool needMultiParse;
|
||||
SParseCsvCxt csvCxt;
|
||||
} SParseContext;
|
||||
|
||||
int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery);
|
||||
|
@ -67,6 +76,8 @@ int32_t qParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq
|
|||
int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCatalogReq,
|
||||
const struct SMetaData* pMetaData, SQuery* pQuery);
|
||||
|
||||
void qDestroyParseContext(SParseContext* pCxt);
|
||||
|
||||
void qDestroyQuery(SQuery* pQueryNode);
|
||||
|
||||
int32_t qExtractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pSchema);
|
||||
|
|
|
@ -51,6 +51,9 @@ extern "C" {
|
|||
#endif
|
||||
#else
|
||||
|
||||
#ifndef __func__
|
||||
#define __func__ __FUNCTION__
|
||||
#endif
|
||||
#include <malloc.h>
|
||||
#include <time.h>
|
||||
#ifndef TD_USE_WINSOCK
|
||||
|
|
|
@ -38,9 +38,9 @@ extern "C" {
|
|||
#define TD_LOG_DIR_PATH "C:\\TDengine\\log\\"
|
||||
#elif defined(_TD_DARWIN_64)
|
||||
#define TD_TMP_DIR_PATH "/tmp/taosd/"
|
||||
#define TD_CFG_DIR_PATH "/usr/local/etc/taos/"
|
||||
#define TD_DATA_DIR_PATH "/usr/local/var/lib/taos/"
|
||||
#define TD_LOG_DIR_PATH "/usr/local/var/log/taos/"
|
||||
#define TD_CFG_DIR_PATH "/etc/taos/"
|
||||
#define TD_DATA_DIR_PATH "/var/lib/taos/"
|
||||
#define TD_LOG_DIR_PATH "/var/log/taos/"
|
||||
#else
|
||||
#define TD_TMP_DIR_PATH "/tmp/"
|
||||
#define TD_CFG_DIR_PATH "/etc/taos/"
|
||||
|
|
|
@ -84,6 +84,7 @@ static FORCE_INLINE int64_t taosGetTimestampNs() {
|
|||
|
||||
char *taosStrpTime(const char *buf, const char *fmt, struct tm *tm);
|
||||
struct tm *taosLocalTime(const time_t *timep, struct tm *result);
|
||||
struct tm *taosLocalTimeNolock(struct tm *result, const time_t *timep, int dst);
|
||||
time_t taosTime(time_t *t);
|
||||
time_t taosMktime(struct tm *timep);
|
||||
|
||||
|
|
|
@ -632,6 +632,10 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_TMQ_CONSUMER_MISMATCH TAOS_DEF_ERROR_CODE(0, 0x4001)
|
||||
#define TSDB_CODE_TMQ_CONSUMER_CLOSED TAOS_DEF_ERROR_CODE(0, 0x4002)
|
||||
|
||||
// TDLite
|
||||
#define TSDB_CODE_TDLITE_IVLD_OPEN_FLAGS TAOS_DEF_ERROR_CODE(0, 0x4100)
|
||||
#define TSDB_CODE_TDLITE_IVLD_OPEN_DIR TAOS_DEF_ERROR_CODE(0, 0x4101)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -288,6 +288,13 @@ void* taosDecodeArray(const void* buf, SArray** pArray, FDecode decode, int32_
|
|||
|
||||
char* taosShowStrArray(const SArray* pArray);
|
||||
|
||||
/**
|
||||
* swap array
|
||||
* @param a
|
||||
* @param b
|
||||
* @return
|
||||
*/
|
||||
void taosArraySwap(SArray* a, SArray* b);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -297,7 +297,7 @@ typedef enum ELogicConditionType {
|
|||
#define TSDB_MAX_BUFFER_PER_VNODE 16384 // unit MB
|
||||
#define TSDB_DEFAULT_BUFFER_PER_VNODE 96
|
||||
#define TSDB_MIN_PAGES_PER_VNODE 64
|
||||
#define TSDB_MAX_PAGES_PER_VNODE 16384
|
||||
#define TSDB_MAX_PAGES_PER_VNODE (INT32_MAX - 1)
|
||||
#define TSDB_DEFAULT_PAGES_PER_VNODE 256
|
||||
#define TSDB_MIN_PAGESIZE_PER_VNODE 1 // unit KB
|
||||
#define TSDB_MAX_PAGESIZE_PER_VNODE 16384
|
||||
|
|
|
@ -23,7 +23,7 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
TdThread* taosCreateThread(void* (*__start_routine)(void*), void* param);
|
||||
bool taosDestoryThread(TdThread* pthread);
|
||||
bool taosDestroyThread(TdThread* pthread);
|
||||
bool taosThreadRunning(TdThread* pthread);
|
||||
|
||||
typedef void *(*ThreadFp)(void *param);
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
|
||||
function showAlertMessage(){
|
||||
osascript <<EOF
|
||||
set buttonStr to "${3}"
|
||||
set oldDelimiters to AppleScript's text item delimiters
|
||||
set AppleScript's text item delimiters to ","
|
||||
set buttonList to every text item of buttonStr
|
||||
set AppleScript's text item delimiters to oldDelimiters
|
||||
get buttonList
|
||||
set btns to buttonList
|
||||
display dialog "${1}" with title "${2}" buttons btns with icon ${4}
|
||||
get result
|
||||
EOF
|
||||
}
|
||||
|
||||
taosd_status=`Launchctl list | grep taosd | head -n 1 | awk '{print $1}'`
|
||||
if [ "$taosd_status"x = "-"x ]; then
|
||||
launchctl start taosd
|
||||
showAlertMessage "Taosd is running!" "TDengine" "ok" "note"
|
||||
else
|
||||
choose_result=`showAlertMessage "Taosd is running!\nDo you want to close it?" "TDengine" "yes,cancel" "stop"`
|
||||
if [ "$choose_result"x = "button returned:yes"x ]; then
|
||||
launchctl stop taosd
|
||||
fi
|
||||
fi
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>Label</key>
|
||||
<string>taosd</string>
|
||||
<key>ProgramArguments</key>
|
||||
<array>
|
||||
<string>/usr/local/bin/taosd</string>
|
||||
</array>
|
||||
<key>ProcessType</key>
|
||||
<string>Interactive</string>
|
||||
<key>Disabled</key>
|
||||
<false/>
|
||||
<key>RunAtLoad</key>
|
||||
<false/>
|
||||
<key>LaunchOnlyOnce</key>
|
||||
<false/>
|
||||
<key>SessionCreate</key>
|
||||
<true/>
|
||||
<key>ExitTimeOut</key>
|
||||
<integer>600</integer>
|
||||
<key>KeepAlive</key>
|
||||
<dict>
|
||||
<key>SuccessfulExit</key>
|
||||
<false/>
|
||||
<key>AfterInitialDemand</key>
|
||||
<true/>
|
||||
</dict>
|
||||
<key>Program</key>
|
||||
<string>/usr/local/bin/taosd</string>
|
||||
</dict>
|
||||
</plist>
|
Binary file not shown.
After Width: | Height: | Size: 15 KiB |
|
@ -0,0 +1,5 @@
|
|||
TDengine is a high-efficient, scalable, high-available distributed time-series database, which makes a lot of optimizations on inserting and querying data, which is far more efficient than normal regular databases. So TDengine can meet the high requirements of IOT and other areas on storing and querying a large amount of data.
|
||||
|
||||
To configure TDengine : edit /etc/taos/taos.cfg
|
||||
To start service : launchctl start taosd
|
||||
To access TDengine : use taos in shell
|
|
@ -30,36 +30,31 @@ productName="TDengine"
|
|||
emailName="taosdata.com"
|
||||
uninstallScript="rmtaos"
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
data_dir=${dataDir}
|
||||
log_dir=${logDir}
|
||||
|
||||
cfg_install_dir=${configDir}
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
install_main_dir=${installDir}
|
||||
|
||||
bin_dir="${installDir}/bin"
|
||||
else
|
||||
data_dir="/usr/local${dataDir}"
|
||||
log_dir="/usr/local${logDir}"
|
||||
|
||||
cfg_install_dir="/usr/local${configDir}"
|
||||
|
||||
bin_link_dir="/usr/local/bin"
|
||||
lib_link_dir="/usr/local/lib"
|
||||
inc_link_dir="/usr/local/include"
|
||||
|
||||
install_main_dir="/usr/local/Cellar/tdengine/${verNumber}"
|
||||
install_main_2_dir="/usr/local/Cellar/tdengine@${verNumber}/${verNumber}"
|
||||
|
||||
bin_dir="/usr/local/Cellar/tdengine/${verNumber}/bin"
|
||||
bin_2_dir="/usr/local/Cellar/tdengine@${verNumber}/${verNumber}/bin"
|
||||
if [ -d "/usr/local/Cellar/" ];then
|
||||
installDir="/usr/local/Cellar/tdengine/${verNumber}"
|
||||
elif [ -d "/opt/homebrew/Cellar/" ];then
|
||||
installDir="/opt/homebrew/Cellar/tdengine/${verNumber}"
|
||||
else
|
||||
installDir="/usr/local/taos"
|
||||
fi
|
||||
fi
|
||||
install_main_dir=${installDir}
|
||||
bin_dir="${installDir}/bin"
|
||||
cfg_dir="${installDir}/cfg"
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
|
||||
|
@ -71,14 +66,16 @@ GREEN_UNDERLINE='\033[4;32m'
|
|||
NC='\033[0m'
|
||||
|
||||
csudo=""
|
||||
csudouser=""
|
||||
if command -v sudo >/dev/null; then
|
||||
csudo="sudo "
|
||||
csudouser="sudo -u ${USER} "
|
||||
fi
|
||||
|
||||
service_mod=2
|
||||
os_type=0
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
if command -v sudo >/dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
initd_mod=0
|
||||
if pidof systemd &>/dev/null; then
|
||||
service_mod=0
|
||||
|
@ -142,7 +139,6 @@ function kill_taosd() {
|
|||
|
||||
function install_main_path() {
|
||||
#create install main dir and all sub dir
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}rm -rf ${install_main_dir} || :
|
||||
${csudo}mkdir -p ${install_main_dir}
|
||||
${csudo}mkdir -p ${install_main_dir}/cfg
|
||||
|
@ -153,17 +149,6 @@ function install_main_path() {
|
|||
${csudo}mkdir -p ${install_main_dir}/include
|
||||
${csudo}mkdir -p ${install_main_dir}/share
|
||||
# ${csudo}mkdir -p ${install_main_dir}/init.d
|
||||
else
|
||||
${csudo}rm -rf ${install_main_dir} || ${csudo}rm -rf ${install_main_2_dir} || :
|
||||
${csudo}mkdir -p ${install_main_dir} || ${csudo}mkdir -p ${install_main_2_dir}
|
||||
${csudo}mkdir -p ${install_main_dir}/cfg || ${csudo}mkdir -p ${install_main_2_dir}/cfg
|
||||
${csudo}mkdir -p ${install_main_dir}/bin || ${csudo}mkdir -p ${install_main_2_dir}/bin
|
||||
# ${csudo}mkdir -p ${install_main_dir}/connector || ${csudo}mkdir -p ${install_main_2_dir}/connector
|
||||
${csudo}mkdir -p ${install_main_dir}/driver || ${csudo}mkdir -p ${install_main_2_dir}/driver
|
||||
${csudo}mkdir -p ${install_main_dir}/examples || ${csudo}mkdir -p ${install_main_2_dir}/examples
|
||||
${csudo}mkdir -p ${install_main_dir}/include || ${csudo}mkdir -p ${install_main_2_dir}/include
|
||||
${csudo}mkdir -p ${install_main_dir}/share || ${csudo}mkdir -p ${install_main_2_dir}/share
|
||||
fi
|
||||
}
|
||||
|
||||
function install_bin() {
|
||||
|
@ -175,11 +160,11 @@ function install_bin() {
|
|||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdump || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosx || :
|
||||
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}rm -f ${bin_link_dir}/perfMonitor || :
|
||||
${csudo}rm -f ${bin_link_dir}/set_core || :
|
||||
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
|
||||
|
||||
${csudo}cp -r ${binary_dir}/build/bin/${clientName} ${install_main_dir}/bin || :
|
||||
[ -f ${binary_dir}/build/bin/taosBenchmark ] && ${csudo}cp -r ${binary_dir}/build/bin/taosBenchmark ${install_main_dir}/bin || :
|
||||
|
@ -209,18 +194,26 @@ function install_bin() {
|
|||
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
|
||||
else
|
||||
|
||||
${csudo}cp -r ${binary_dir}/build/bin/* ${install_main_dir}/bin || ${csudo}cp -r ${binary_dir}/build/bin/* ${install_main_2_dir}/bin || :
|
||||
${csudo}cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_dir}/bin || ${csudo}cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_2_dir} || :
|
||||
${csudo}cp -r ${script_dir}/remove_client.sh ${install_main_dir}/bin || ${csudo}cp -r ${script_dir}/remove_client.sh ${install_main_2_dir}/bin || :
|
||||
${csudo}chmod 0555 ${install_main_dir}/bin/* || ${csudo}chmod 0555 ${install_main_2_dir}/bin/*
|
||||
${csudo}cp -r ${binary_dir}/build/bin/${clientName} ${install_main_dir}/bin || :
|
||||
[ -f ${binary_dir}/build/bin/taosBenchmark ] && ${csudo}cp -r ${binary_dir}/build/bin/taosBenchmark ${install_main_dir}/bin || :
|
||||
[ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo || :
|
||||
[ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || :
|
||||
[ -f ${binary_dir}/build/bin/taosadapter ] && ${csudo}cp -r ${binary_dir}/build/bin/taosadapter ${install_main_dir}/bin || :
|
||||
[ -f ${binary_dir}/build/bin/udfd ] && ${csudo}cp -r ${binary_dir}/build/bin/udfd ${install_main_dir}/bin || :
|
||||
[ -f ${binary_dir}/build/bin/taosx ] && ${csudo}cp -r ${binary_dir}/build/bin/taosx ${install_main_dir}/bin || :
|
||||
${csudo}cp -r ${binary_dir}/build/bin/${serverName} ${install_main_dir}/bin || :
|
||||
|
||||
${csudo}cp -r ${script_dir}/remove.sh ${install_main_dir}/bin || :
|
||||
${csudo}chmod 0555 ${install_main_dir}/bin/*
|
||||
#Make link
|
||||
[ -x ${install_main_dir}/bin/${clientName} ] || [ -x ${install_main_2_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} || ${csudo}ln -s ${install_main_2_dir}/bin/${clientName} || :
|
||||
[ -x ${install_main_dir}/bin/${serverName} ] || [ -x ${install_main_2_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} || ${csudo}ln -s ${install_main_2_dir}/bin/${serverName} || :
|
||||
[ -x ${install_main_dir}/bin/taosadapter ] || [ -x ${install_main_2_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter || ${csudo}ln -s ${install_main_2_dir}/bin/taosadapter || :
|
||||
[ -x ${install_main_dir}/bin/udfd ] || [ -x ${install_main_2_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd || ${csudo}ln -s ${install_main_2_dir}/bin/udfd || :
|
||||
[ -x ${install_main_dir}/bin/taosdump ] || [ -x ${install_main_2_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || ln -s ${install_main_2_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
|
||||
[ -x ${install_main_dir}/bin/taosdemo ] || [ -x ${install_main_2_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || ln -s ${install_main_2_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
|
||||
[ -x ${install_main_dir}/bin/taosx ] || [ -x ${install_main_2_dir}/bin/taosx ] && ${csudo}ln -s ${install_main_dir}/bin/taosx ${bin_link_dir}/taosx || ln -s ${install_main_2_dir}/bin/taosx ${bin_link_dir}/taosx || :
|
||||
[ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} || :
|
||||
[ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} || :
|
||||
[ -x ${install_main_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter || :
|
||||
[ -x ${install_main_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd || :
|
||||
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
|
||||
[ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo || :
|
||||
[ -x ${install_main_dir}/bin/taosx ] && ${csudo}ln -s ${install_main_dir}/bin/taosx ${bin_link_dir}/taosx || :
|
||||
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -324,35 +317,23 @@ function install_lib() {
|
|||
${csudo}chmod 777 ${install_main_dir}/driver/libtaosws.so ||:
|
||||
|
||||
${csudo}ln -sf ${install_main_dir}/driver/libtaosws.so ${lib_link_dir}/libtaosws.so || :
|
||||
|
||||
if [ -d "${lib64_link_dir}" ]; then
|
||||
${csudo}ln -sf ${lib64_link_dir}/libtaosws.so ${lib64_link_dir}/libtaosws.so || :
|
||||
fi
|
||||
fi
|
||||
else
|
||||
${csudo}cp -Rf ${binary_dir}/build/lib/libtaos.${verNumber}.dylib \
|
||||
${install_main_dir}/driver ||
|
||||
${csudo}cp -Rf ${binary_dir}/build/lib/libtaos.${verNumber}.dylib \
|
||||
${install_main_2_dir}/driver &&
|
||||
${csudo}chmod 777 ${install_main_dir}/driver/* ||
|
||||
${csudo}chmod 777 ${install_main_2_dir}/driver/*
|
||||
|
||||
${csudo}ln -sf ${install_main_dir}/driver/libtaos.* \
|
||||
${install_main_dir}/driver/libtaos.1.dylib ||
|
||||
${csudo}ln -sf ${install_main_2_dir}/driver/libtaos.* \
|
||||
${install_main_2_dir}/driver/libtaos.1.dylib || :
|
||||
|
||||
${csudo}ln -sf ${install_main_dir}/driver/libtaos.1.dylib \
|
||||
${install_main_dir}/driver/libtaos.dylib ||
|
||||
${csudo}ln -sf ${install_main_2_dir}/driver/libtaos.1.dylib \
|
||||
${install_main_2_dir}/driver/libtaos.dylib || :
|
||||
${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/*
|
||||
|
||||
${csudo}ln -sf ${install_main_dir}/driver/libtaos.${verNumber}.dylib \
|
||||
${lib_link_dir}/libtaos.1.dylib ||
|
||||
${csudo}ln -sf ${install_main_2_dir}/driver/libtaos.${verNumber}.dylib \
|
||||
${lib_link_dir}/libtaos.1.dylib || :
|
||||
|
||||
${csudo}ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib || :
|
||||
|
||||
if [ -f ${binary_dir}/build/lib/libtaosws.dylib ]; then
|
||||
${csudo}cp ${binary_dir}/build/lib/libtaosws.dylib \
|
||||
${install_main_dir}/driver &&
|
||||
${csudo}chmod 777 ${install_main_dir}/driver/libtaosws.dylib ||:
|
||||
|
||||
${csudo}ln -sf ${install_main_dir}/driver/libtaosws.dylib ${lib_link_dir}/libtaosws.dylib || :
|
||||
fi
|
||||
fi
|
||||
|
||||
install_jemalloc
|
||||
|
@ -365,8 +346,6 @@ function install_lib() {
|
|||
}
|
||||
|
||||
function install_header() {
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h ${inc_link_dir}/taosudf.h || :
|
||||
[ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h ||:
|
||||
${csudo}cp -f ${source_dir}/include/client/taos.h ${source_dir}/include/common/taosdef.h ${source_dir}/include/util/taoserror.h ${source_dir}/include/libs/function/taosudf.h \
|
||||
|
@ -382,20 +361,15 @@ function install_header() {
|
|||
${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
|
||||
${csudo}ln -s ${install_main_dir}/include/taosudf.h ${inc_link_dir}/taosudf.h
|
||||
|
||||
else
|
||||
${csudo}cp -f ${source_dir}/include/client/taos.h ${source_dir}/include/common/taosdef.h ${source_dir}/include/util/taoserror.h ${source_dir}/include/libs/function/taosudf.h \
|
||||
${install_main_dir}/include ||
|
||||
${csudo}cp -f ${source_dir}/include/client/taos.h ${source_dir}/include/common/taosdef.h ${source_dir}/include/util/taoserror.h ${source_dir}/include/libs/function/taosudf.h \
|
||||
${install_main_2_dir}/include &&
|
||||
${csudo}chmod 644 ${install_main_dir}/include/* || ${csudo}chmod 644 ${install_main_2_dir}/include/*
|
||||
fi
|
||||
${csudo}chmod 644 ${install_main_dir}/include/*
|
||||
}
|
||||
|
||||
function install_config() {
|
||||
if [ ! -f ${cfg_install_dir}/${configFile} ]; then
|
||||
${csudo}mkdir -p ${cfg_install_dir}
|
||||
[ -f ${script_dir}/../cfg/${configFile} ] &&
|
||||
${csudo}cp ${script_dir}/../cfg/${configFile} ${cfg_install_dir}
|
||||
${csudo}cp ${script_dir}/../cfg/${configFile} ${cfg_install_dir} &&
|
||||
${csudo}cp ${script_dir}/../cfg/${configFile} ${cfg_dir}
|
||||
${csudo}chmod 644 ${cfg_install_dir}/${configFile}
|
||||
${csudo}cp -f ${script_dir}/../cfg/${configFile} \
|
||||
${cfg_install_dir}/${configFile}.${verNumber}
|
||||
|
@ -404,6 +378,7 @@ function install_config() {
|
|||
else
|
||||
${csudo}cp -f ${script_dir}/../cfg/${configFile} \
|
||||
${cfg_install_dir}/${configFile}.${verNumber}
|
||||
${csudo}cp -f ${script_dir}/../cfg/${configFile} ${cfg_dir}
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -411,7 +386,8 @@ function install_taosadapter_config() {
|
|||
if [ ! -f "${cfg_install_dir}/taosadapter.toml" ]; then
|
||||
${csudo}mkdir -p ${cfg_install_dir} || :
|
||||
[ -f ${binary_dir}/test/cfg/taosadapter.toml ] &&
|
||||
${csudo}cp ${binary_dir}/test/cfg/taosadapter.toml ${cfg_install_dir} || :
|
||||
${csudo}cp ${binary_dir}/test/cfg/taosadapter.toml ${cfg_install_dir} &&
|
||||
${csudo}cp ${binary_dir}/test/cfg/taosadapter.toml ${cfg_dir} || :
|
||||
[ -f ${cfg_install_dir}/taosadapter.toml ] &&
|
||||
${csudo}chmod 644 ${cfg_install_dir}/taosadapter.toml || :
|
||||
[ -f ${binary_dir}/test/cfg/taosadapter.toml ] &&
|
||||
|
@ -424,6 +400,7 @@ function install_taosadapter_config() {
|
|||
if [ -f "${binary_dir}/test/cfg/taosadapter.toml" ]; then
|
||||
${csudo}cp -f ${binary_dir}/test/cfg/taosadapter.toml \
|
||||
${cfg_install_dir}/taosadapter.toml.${verNumber} || :
|
||||
${csudo}cp -f ${binary_dir}/test/cfg/taosadapter.toml ${cfg_dir} || :
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
@ -431,20 +408,12 @@ function install_taosadapter_config() {
|
|||
function install_log() {
|
||||
${csudo}rm -rf ${log_dir} || :
|
||||
${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir}
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}ln -s ${log_dir} ${install_main_dir}/log
|
||||
else
|
||||
${csudo}ln -s ${log_dir} ${install_main_dir}/log || ${csudo}ln -s ${log_dir} ${install_main_2_dir}/log
|
||||
fi
|
||||
}
|
||||
|
||||
function install_data() {
|
||||
${csudo}mkdir -p ${data_dir}
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}mkdir -p ${data_dir} && ${csudo}chmod 777 ${data_dir}
|
||||
${csudo}ln -s ${data_dir} ${install_main_dir}/data
|
||||
else
|
||||
${csudo}ln -s ${data_dir} ${install_main_dir}/data || ${csudo}ln -s ${data_dir} ${install_main_2_dir}/data
|
||||
fi
|
||||
}
|
||||
|
||||
function install_connector() {
|
||||
|
@ -453,31 +422,17 @@ function install_connector() {
|
|||
else
|
||||
echo "WARNING: go connector not found, please check if want to use it!"
|
||||
fi
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}cp -rf ${source_dir}/src/connector/python ${install_main_dir}/connector || :
|
||||
${csudo}cp ${binary_dir}/build/lib/*.jar ${install_main_dir}/connector &>/dev/null && ${csudo}chmod 777 ${install_main_dir}/connector/*.jar || echo &>/dev/null || :
|
||||
else
|
||||
${csudo}cp -rf ${source_dir}/src/connector/python ${install_main_dir}/connector || ${csudo}cp -rf ${source_dir}/src/connector/python ${install_main_2_dir}/connector || :
|
||||
${csudo}cp ${binary_dir}/build/lib/*.jar ${install_main_dir}/connector &>/dev/null && ${csudo}chmod 777 ${install_main_dir}/connector/*.jar || echo &>/dev/null || :
|
||||
${csudo}cp ${binary_dir}/build/lib/*.jar ${install_main_2_dir}/connector &>/dev/null && ${csudo}chmod 777 ${install_main_2_dir}/connector/*.jar || echo &>/dev/null || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_examples() {
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}cp -rf ${source_dir}/examples/* ${install_main_dir}/examples || :
|
||||
else
|
||||
${csudo}cp -rf ${source_dir}/examples/* ${install_main_dir}/examples || ${csudo}cp -rf ${source_dir}/examples/* ${install_main_2_dir}/examples || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_web() {
|
||||
if [ -d "${binary_dir}/build/share" ]; then
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}cp -rf ${binary_dir}/build/share/* ${install_main_dir}/share || :
|
||||
else
|
||||
${csudo}cp -rf ${binary_dir}/build/share/* ${install_main_dir}/share || ${csudo}cp -rf ${binary_dir}/build/share/* ${install_main_2_dir}/share || :
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -575,7 +530,14 @@ function install_taosadapter_service() {
|
|||
fi
|
||||
}
|
||||
|
||||
function install_service_on_launchctl() {
|
||||
${csudouser}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosd.plist > /dev/null 2>&1 || :
|
||||
${csudo}cp ${script_dir}/com.taosdata.taosd.plist /Library/LaunchDaemons/com.taosdata.taosd.plist
|
||||
${csudouser}launchctl load -w /Library/LaunchDaemons/com.taosdata.taosd.plist || :
|
||||
}
|
||||
|
||||
function install_service() {
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
if ((${service_mod} == 0)); then
|
||||
install_service_on_systemd
|
||||
elif ((${service_mod} == 1)); then
|
||||
|
@ -583,6 +545,22 @@ function install_service() {
|
|||
else
|
||||
kill_taosd
|
||||
fi
|
||||
else
|
||||
install_service_on_launchctl
|
||||
fi
|
||||
}
|
||||
function install_app() {
|
||||
if [ "$osType" = "Darwin" ]; then
|
||||
${csudo}rm -rf /Applications/TDengine.app &&
|
||||
${csudo}mkdir -p /Applications/TDengine.app/Contents/MacOS/ &&
|
||||
${csudo}cp ${script_dir}/TDengine /Applications/TDengine.app/Contents/MacOS/ &&
|
||||
echo "<plist><dict></dict></plist>" | ${csudo}tee /Applications/TDengine.app/Contents/Info.plist > /dev/null &&
|
||||
${csudo}sips -i ${script_dir}/logo.png > /dev/null &&
|
||||
DeRez -only icns ${script_dir}/logo.png | ${csudo}tee /Applications/TDengine.app/mac_logo.rsrc > /dev/null &&
|
||||
${csudo}rez -append /Applications/TDengine.app/mac_logo.rsrc -o $'/Applications/TDengine.app/Icon\r' &&
|
||||
${csudo}SetFile -a C /Applications/TDengine.app/ &&
|
||||
${csudo}rm /Applications/TDengine.app/mac_logo.rsrc
|
||||
fi
|
||||
}
|
||||
|
||||
function update_TDengine() {
|
||||
|
@ -610,6 +588,7 @@ function update_TDengine() {
|
|||
install_examples
|
||||
install_web
|
||||
install_bin
|
||||
install_app
|
||||
|
||||
install_service
|
||||
install_taosadapter_service
|
||||
|
@ -633,7 +612,11 @@ function update_TDengine() {
|
|||
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}service taosadapter start${NC}"
|
||||
else
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To start service ${NC}: launchctl start ${serverName}${NC}"
|
||||
fi
|
||||
[ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: taosadapter &${NC}"
|
||||
fi
|
||||
|
@ -656,6 +639,7 @@ function install_TDengine() {
|
|||
# install_connector
|
||||
install_examples
|
||||
install_bin
|
||||
install_app
|
||||
|
||||
install_service
|
||||
install_taosadapter_service
|
||||
|
@ -679,7 +663,11 @@ function install_TDengine() {
|
|||
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}service taosadapter start${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ./${serverName}${NC}"
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To start service ${NC}: launchctl start ${serverName}${NC}"
|
||||
fi
|
||||
[ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: taosadapter &${NC}"
|
||||
fi
|
||||
|
@ -694,16 +682,10 @@ echo source directory: $1
|
|||
echo binary directory: $2
|
||||
if [ -x ${data_dir}/dnode/dnodeCfg.json ]; then
|
||||
echo -e "\033[44;31;5mThe default data directory ${data_dir} contains old data of tdengine 2.x, please clear it before installing!\033[0m"
|
||||
elif [ "$osType" != "Darwin" ]; then
|
||||
else
|
||||
if [ -x ${bin_dir}/${clientName} ]; then
|
||||
update_TDengine
|
||||
else
|
||||
install_TDengine
|
||||
fi
|
||||
else
|
||||
if [ -x ${bin_dir}/${clientName} ] || [ -x ${bin_2_dir}/${clientName} ]; then
|
||||
update_TDengine
|
||||
else
|
||||
install_TDengine
|
||||
fi
|
||||
fi
|
||||
|
|
|
@ -47,7 +47,7 @@ if [ -d ${top_dir}/tools/taos-tools/packaging/deb ]; then
|
|||
cd ${top_dir}/tools/taos-tools/packaging/deb
|
||||
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
|
||||
|
||||
taostools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}')
|
||||
taostools_ver=$(git tag |grep -v taos | sort | tail -1)
|
||||
taostools_install_dir="${release_dir}/${clientName}Tools-${taostools_ver}"
|
||||
|
||||
cd ${curr_dir}
|
||||
|
|
|
@ -7,28 +7,53 @@
|
|||
iplist=""
|
||||
serverFqdn=""
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
script_dir=$(dirname $(readlink -f "$0"))
|
||||
osType=`uname`
|
||||
|
||||
# Dynamic directory
|
||||
data_dir="/var/lib/taos"
|
||||
log_dir="/var/log/taos"
|
||||
data_link_dir="/usr/local/taos/data"
|
||||
log_link_dir="/usr/local/taos/log"
|
||||
install_main_dir="/usr/local/taos"
|
||||
|
||||
# static directory
|
||||
cfg_dir="/usr/local/taos/cfg"
|
||||
bin_dir="/usr/local/taos/bin"
|
||||
lib_dir="/usr/local/taos/driver"
|
||||
init_d_dir="/usr/local/taos/init.d"
|
||||
inc_dir="/usr/local/taos/include"
|
||||
|
||||
cfg_install_dir="/etc/taos"
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
script_dir=$(dirname $(readlink -f "$0"))
|
||||
verNumber=""
|
||||
lib_file_ext="so"
|
||||
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
install_main_dir="/usr/local/taos"
|
||||
else
|
||||
script_dir=${source_dir}/packaging/tools
|
||||
verNumber=`ls tdengine/driver | grep -E "libtaos\.[0-9]\.[0-9]" | sed "s/libtaos.//g" | sed "s/.dylib//g" | head -n 1`
|
||||
lib_file_ext="dylib"
|
||||
|
||||
bin_link_dir="/usr/local/bin"
|
||||
lib_link_dir="/usr/local/lib"
|
||||
lib64_link_dir="/usr/local/lib"
|
||||
inc_link_dir="/usr/local/include"
|
||||
|
||||
if [ -d "/usr/local/Cellar/" ];then
|
||||
install_main_dir="/usr/local/Cellar/tdengine/${verNumber}"
|
||||
elif [ -d "/opt/homebrew/Cellar/" ];then
|
||||
install_main_dir="/opt/homebrew/Cellar/tdengine/${verNumber}"
|
||||
else
|
||||
install_main_dir="/usr/local/taos"
|
||||
fi
|
||||
fi
|
||||
|
||||
data_link_dir="${install_main_dir}/data"
|
||||
log_link_dir="${install_main_dir}/log"
|
||||
|
||||
# static directory
|
||||
cfg_dir="${install_main_dir}/cfg"
|
||||
bin_dir="${install_main_dir}/bin"
|
||||
lib_dir="${install_main_dir}/driver"
|
||||
init_d_dir="${install_main_dir}/init.d"
|
||||
inc_dir="${install_main_dir}/include"
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
|
||||
|
||||
|
@ -40,8 +65,10 @@ GREEN_UNDERLINE='\033[4;32m'
|
|||
NC='\033[0m'
|
||||
|
||||
csudo=""
|
||||
csudouser=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
csudouser="sudo -u ${USER} "
|
||||
fi
|
||||
|
||||
initd_mod=0
|
||||
|
@ -63,6 +90,14 @@ elif $(which service &> /dev/null); then
|
|||
else
|
||||
service_mod=2
|
||||
fi
|
||||
if [ "$osType" = "Darwin" ]; then
|
||||
if [ -d "${install_main_dir}" ];then
|
||||
${csudo}rm -rf ${install_main_dir}
|
||||
fi
|
||||
${csudo}mkdir -p ${install_main_dir}
|
||||
${csudo}rm -rf ${install_main_dir}
|
||||
${csudo}cp -rf tdengine ${install_main_dir}
|
||||
fi
|
||||
|
||||
function kill_taosadapter() {
|
||||
# ${csudo}pkill -f taosadapter || :
|
||||
|
@ -96,22 +131,24 @@ function install_lib() {
|
|||
${csudo}rm -f ${lib_link_dir}/libtaos* || :
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaos* || :
|
||||
|
||||
[ -f ${lib_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.so || :
|
||||
[ -f ${lib64_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.so || :
|
||||
[ -f ${lib_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.${lib_file_ext} || :
|
||||
[ -f ${lib64_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.${lib_file_ext} || :
|
||||
|
||||
${csudo}ln -s ${lib_dir}/libtaos.* ${lib_link_dir}/libtaos.so.1
|
||||
${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
|
||||
|
||||
[ -f ${lib_dir}/libtaosws.so ] && ${csudo}ln -sf ${lib_dir}/libtaosws.so ${lib_link_dir}/libtaosws.so ||:
|
||||
[ -f ${lib_dir}/libtaosws.${lib_file_ext} ] && ${csudo}ln -sf ${lib_dir}/libtaosws.${lib_file_ext} ${lib_link_dir}/libtaosws.${lib_file_ext} ||:
|
||||
|
||||
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
|
||||
${csudo}ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
|
||||
${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
|
||||
|
||||
[ -f ${lib_dir}/libtaosws.so ] && ${csudo}ln -sf ${lib_dir}/libtaosws.so ${lib64_link_dir}/libtaosws.so || :
|
||||
[ -f ${lib_dir}/libtaosws.${lib_file_ext} ] && ${csudo}ln -sf ${lib_dir}/libtaosws.${lib_file_ext} ${lib64_link_dir}/libtaosws.${lib_file_ext} || :
|
||||
fi
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}ldconfig
|
||||
fi
|
||||
}
|
||||
|
||||
function install_bin() {
|
||||
|
@ -138,6 +175,7 @@ function install_bin() {
|
|||
[ -x ${bin_dir}/TDinsight.sh ] && ${csudo}ln -sf ${bin_dir}/TDinsight.sh ${bin_link_dir}/TDinsight.sh || :
|
||||
[ -x ${bin_dir}/taosdump ] && ${csudo}ln -s ${bin_dir}/taosdump ${bin_link_dir}/taosdump || :
|
||||
[ -x ${bin_dir}/set_core.sh ] && ${csudo}ln -s ${bin_dir}/set_core.sh ${bin_link_dir}/set_core || :
|
||||
[ -x ${bin_dir}/remove.sh ] && ${csudo}ln -s ${bin_dir}/remove.sh ${bin_link_dir}/rmtaos || :
|
||||
}
|
||||
|
||||
function add_newHostname_to_hosts() {
|
||||
|
@ -466,6 +504,14 @@ function install_service_on_systemd() {
|
|||
${csudo}systemctl enable taosd
|
||||
}
|
||||
|
||||
function install_service_on_launchctl() {
|
||||
if [ -f ${install_main_dir}/service/com.taosdata.taosd.plist ]; then
|
||||
${csudouser}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosd.plist > /dev/null 2>&1 || :
|
||||
${csudo}cp ${install_main_dir}/service/com.taosdata.taosd.plist /Library/LaunchDaemons/com.taosdata.taosd.plist || :
|
||||
${csudouser}launchctl load -w /Library/LaunchDaemons/com.taosdata.taosd.plist || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_taosadapter_service() {
|
||||
if ((${service_mod}==0)); then
|
||||
[ -f ${script_dir}/../cfg/taosadapter.service ] &&\
|
||||
|
@ -476,6 +522,7 @@ function install_taosadapter_service() {
|
|||
}
|
||||
|
||||
function install_service() {
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
if ((${service_mod}==0)); then
|
||||
install_service_on_systemd
|
||||
elif ((${service_mod}==1)); then
|
||||
|
@ -485,6 +532,25 @@ function install_service() {
|
|||
kill_taosadapter
|
||||
kill_taosd
|
||||
fi
|
||||
else
|
||||
install_service_on_launchctl
|
||||
fi
|
||||
}
|
||||
|
||||
function install_app() {
|
||||
if [ "$osType" = "Darwin" ]; then
|
||||
if [ -f ${install_main_dir}/service/TDengine ]; then
|
||||
${csudo}rm -rf /Applications/TDengine.app &&
|
||||
${csudo}mkdir -p /Applications/TDengine.app/Contents/MacOS/ &&
|
||||
${csudo}cp ${install_main_dir}/service/TDengine /Applications/TDengine.app/Contents/MacOS/ &&
|
||||
echo "<plist><dict></dict></plist>" | ${csudo}tee /Applications/TDengine.app/Contents/Info.plist > /dev/null &&
|
||||
${csudo}sips -i ${install_main_dir}/service/logo.png > /dev/null &&
|
||||
DeRez -only icns ${install_main_dir}/service/logo.png | ${csudo}tee /Applications/TDengine.app/mac_logo.rsrc > /dev/null &&
|
||||
${csudo}rez -append /Applications/TDengine.app/mac_logo.rsrc -o $'/Applications/TDengine.app/Icon\r' &&
|
||||
${csudo}SetFile -a C /Applications/TDengine.app/ &&
|
||||
${csudo}rm /Applications/TDengine.app/mac_logo.rsrc
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function install_TDengine() {
|
||||
|
@ -492,7 +558,7 @@ function install_TDengine() {
|
|||
|
||||
#install log and data dir , then ln to /usr/local/taos
|
||||
${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir}
|
||||
${csudo}mkdir -p ${data_dir}
|
||||
${csudo}mkdir -p ${data_dir} && ${csudo}chmod 777 ${data_dir}
|
||||
|
||||
${csudo}rm -rf ${log_link_dir} || :
|
||||
${csudo}rm -rf ${data_link_dir} || :
|
||||
|
@ -508,6 +574,7 @@ function install_TDengine() {
|
|||
install_taosadapter_config
|
||||
install_taosadapter_service
|
||||
install_service
|
||||
install_app
|
||||
|
||||
# Ask if to start the service
|
||||
#echo
|
||||
|
|
|
@ -6,12 +6,31 @@ set -e
|
|||
#set -x
|
||||
|
||||
verMode=edge
|
||||
osType=`uname`
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
installDir="/usr/local/taos"
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
else
|
||||
if [ -d "/usr/local/Cellar/" ];then
|
||||
installDir="/usr/local/Cellar/tdengine/${verNumber}"
|
||||
elif [ -d "/opt/homebrew/Cellar/" ];then
|
||||
installDir="/opt/homebrew/Cellar/tdengine/${verNumber}"
|
||||
else
|
||||
installDir="/usr/local/taos"
|
||||
fi
|
||||
bin_link_dir="/usr/local/bin"
|
||||
lib_link_dir="/usr/local/lib"
|
||||
lib64_link_dir="/usr/local/lib"
|
||||
inc_link_dir="/usr/local/include"
|
||||
fi
|
||||
serverName="taosd"
|
||||
clientName="taos"
|
||||
uninstallScript="rmtaos"
|
||||
|
@ -22,11 +41,8 @@ install_main_dir=${installDir}
|
|||
data_link_dir=${installDir}/data
|
||||
log_link_dir=${installDir}/log
|
||||
cfg_link_dir=${installDir}/cfg
|
||||
bin_link_dir="/usr/bin"
|
||||
local_bin_link_dir="/usr/local/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
taos_service_name=${serverName}
|
||||
|
@ -82,6 +98,7 @@ function clean_bin() {
|
|||
# Remove link
|
||||
${csudo}rm -f ${bin_link_dir}/${clientName} || :
|
||||
${csudo}rm -f ${bin_link_dir}/${serverName} || :
|
||||
${csudo}rm -f ${bin_link_dir}/udfd || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosadapter || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosBenchmark || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
|
@ -103,7 +120,7 @@ function clean_lib() {
|
|||
[ -f ${lib_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.so || :
|
||||
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
[ -f ${lib64_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.so || :
|
||||
[ -f ${lib64_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.* || :
|
||||
#${csudo}rm -rf ${v15_java_app_dir} || :
|
||||
}
|
||||
|
||||
|
@ -195,12 +212,20 @@ function clean_service_on_sysvinit() {
|
|||
fi
|
||||
}
|
||||
|
||||
function clean_service_on_launchctl() {
|
||||
${csudouser}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosd.plist > /dev/null 2>&1 || :
|
||||
${csudo}rm /Library/LaunchDaemons/com.taosdata.taosd.plist > /dev/null 2>&1 || :
|
||||
}
|
||||
|
||||
function clean_service() {
|
||||
if ((${service_mod} == 0)); then
|
||||
clean_service_on_systemd
|
||||
elif ((${service_mod} == 1)); then
|
||||
clean_service_on_sysvinit
|
||||
else
|
||||
if [ "$osType" = "Darwin" ]; then
|
||||
clean_service_on_launchctl
|
||||
fi
|
||||
kill_taosadapter
|
||||
kill_taosd
|
||||
kill_tarbitrator
|
||||
|
@ -241,6 +266,9 @@ elif echo $osinfo | grep -qwi "centos"; then
|
|||
# echo "this is centos system"
|
||||
${csudo}rpm -e --noscripts tdengine >/dev/null 2>&1 || :
|
||||
fi
|
||||
if [ "$osType" = "Darwin" ]; then
|
||||
${csudo}rm -rf /Applications/TDengine.app
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}${productName} is removed successfully!${NC}"
|
||||
echo
|
||||
|
|
|
@ -380,9 +380,16 @@ void hbDeregisterConn(SAppHbMgr* pAppHbMgr, SClientHbKey connKey);
|
|||
// --- mq
|
||||
void hbMgrInitMqHbRspHandle();
|
||||
|
||||
typedef struct SSqlCallbackWrapper {
|
||||
SParseContext* pParseCtx;
|
||||
SCatalogReq* pCatalogReq;
|
||||
SMetaData* pResultMeta;
|
||||
SRequestObj* pRequest;
|
||||
} SSqlCallbackWrapper;
|
||||
|
||||
SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQuery, void** res);
|
||||
int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList);
|
||||
void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultMeta);
|
||||
void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultMeta, SSqlCallbackWrapper* pWrapper);
|
||||
int32_t refreshMeta(STscObj* pTscObj, SRequestObj* pRequest);
|
||||
int32_t updateQnodeList(SAppInstInfo* pInfo, SArray* pNodeList);
|
||||
void doAsyncQuery(SRequestObj* pRequest, bool forceUpdateMeta);
|
||||
|
@ -390,6 +397,8 @@ int32_t removeMeta(STscObj* pTscObj, SArray* tbList);
|
|||
int32_t handleAlterTbExecRes(void* res, struct SCatalog* pCatalog);
|
||||
int32_t handleCreateTbExecRes(void* res, SCatalog* pCatalog);
|
||||
bool qnodeRequired(SRequestObj* pRequest);
|
||||
int32_t continueInsertFromCsv(SSqlCallbackWrapper* pWrapper, SRequestObj* pRequest);
|
||||
void destorySqlCallbackWrapper(SSqlCallbackWrapper* pWrapper);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -870,7 +870,8 @@ int32_t handleQueryExecRsp(SRequestObj* pRequest) {
|
|||
|
||||
// todo refacto the error code mgmt
|
||||
void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
|
||||
SRequestObj* pRequest = (SRequestObj*)param;
|
||||
SSqlCallbackWrapper* pWrapper = param;
|
||||
SRequestObj* pRequest = pWrapper->pRequest;
|
||||
STscObj* pTscObj = pRequest->pTscObj;
|
||||
|
||||
pRequest->code = code;
|
||||
|
@ -882,7 +883,7 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
|
|||
int32_t type = pRequest->type;
|
||||
if (TDMT_VND_SUBMIT == type || TDMT_VND_DELETE == type || TDMT_VND_CREATE_TABLE == type) {
|
||||
if (pResult) {
|
||||
pRequest->body.resInfo.numOfRows = pResult->numOfRows;
|
||||
pRequest->body.resInfo.numOfRows += pResult->numOfRows;
|
||||
|
||||
// record the insert rows
|
||||
if (TDMT_VND_SUBMIT == type) {
|
||||
|
@ -899,12 +900,13 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
|
|||
pRequest->requestId);
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS && NEED_CLIENT_HANDLE_ERROR(code) && pRequest->sqlstr != NULL) {
|
||||
tscDebug("0x%" PRIx64 " client retry to handle the error, code:%s, tryCount:%d, reqId:0x%" PRIx64,
|
||||
pRequest->self, tstrerror(code), pRequest->retry, pRequest->requestId);
|
||||
tscDebug("0x%" PRIx64 " client retry to handle the error, code:%s, tryCount:%d, reqId:0x%" PRIx64, pRequest->self,
|
||||
tstrerror(code), pRequest->retry, pRequest->requestId);
|
||||
pRequest->prevCode = code;
|
||||
schedulerFreeJob(&pRequest->body.queryJob, 0);
|
||||
qDestroyQuery(pRequest->pQuery);
|
||||
pRequest->pQuery = NULL;
|
||||
destorySqlCallbackWrapper(pWrapper);
|
||||
doAsyncQuery(pRequest, true);
|
||||
return;
|
||||
}
|
||||
|
@ -920,6 +922,15 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
|
|||
pRequest->code = code1;
|
||||
}
|
||||
|
||||
if (pRequest->code == TSDB_CODE_SUCCESS && NULL != pWrapper->pParseCtx && pWrapper->pParseCtx->needMultiParse) {
|
||||
code = continueInsertFromCsv(pWrapper, pRequest);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
destorySqlCallbackWrapper(pWrapper);
|
||||
|
||||
// return to client
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
|
||||
}
|
||||
|
@ -1020,23 +1031,11 @@ SRequestObj* launchQuery(uint64_t connId, const char* sql, int sqlLen, bool vali
|
|||
return launchQueryImpl(pRequest, pQuery, false, NULL);
|
||||
}
|
||||
|
||||
void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultMeta) {
|
||||
int32_t code = 0;
|
||||
|
||||
pRequest->body.execMode = pQuery->execMode;
|
||||
|
||||
switch (pQuery->execMode) {
|
||||
case QUERY_EXEC_MODE_LOCAL:
|
||||
asyncExecLocalCmd(pRequest, pQuery);
|
||||
return;
|
||||
case QUERY_EXEC_MODE_RPC:
|
||||
code = asyncExecDdlQuery(pRequest, pQuery);
|
||||
break;
|
||||
case QUERY_EXEC_MODE_SCHEDULE: {
|
||||
SArray* pMnodeList = taosArrayInit(4, sizeof(SQueryNodeLoad));
|
||||
|
||||
static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultMeta,
|
||||
SSqlCallbackWrapper* pWrapper) {
|
||||
pRequest->type = pQuery->msgType;
|
||||
|
||||
SArray* pMnodeList = taosArrayInit(4, sizeof(SQueryNodeLoad));
|
||||
SPlanContext cxt = {.queryId = pRequest->requestId,
|
||||
.acctId = pRequest->pTscObj->acctId,
|
||||
.mgmtEpSet = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp),
|
||||
|
@ -1047,10 +1046,8 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM
|
|||
.pUser = pRequest->pTscObj->user,
|
||||
.sysInfo = pRequest->pTscObj->sysInfo,
|
||||
.allocatorId = pRequest->allocatorRefId};
|
||||
|
||||
SAppInstInfo* pAppInfo = getAppInfo(pRequest);
|
||||
SQueryPlan* pDag = NULL;
|
||||
code = qCreateQueryPlan(&cxt, &pDag, pMnodeList);
|
||||
int32_t code = qCreateQueryPlan(&cxt, &pDag, pMnodeList);
|
||||
if (code) {
|
||||
tscError("0x%" PRIx64 " failed to create query plan, code:%s 0x%" PRIx64, pRequest->self, tstrerror(code),
|
||||
pRequest->requestId);
|
||||
|
@ -1059,12 +1056,14 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM
|
|||
}
|
||||
|
||||
pRequest->metric.planEnd = taosGetTimestampUs();
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code && !pRequest->validateOnly) {
|
||||
SArray* pNodeList = NULL;
|
||||
buildAsyncExecNodeList(pRequest, &pNodeList, pMnodeList, pResultMeta);
|
||||
|
||||
SRequestConnInfo conn = {
|
||||
.pTrans = pAppInfo->pTransporter, .requestId = pRequest->requestId, .requestObjRefId = pRequest->self};
|
||||
SRequestConnInfo conn = {.pTrans = getAppInfo(pRequest)->pTransporter,
|
||||
.requestId = pRequest->requestId,
|
||||
.requestObjRefId = pRequest->self};
|
||||
SSchedulerReq req = {
|
||||
.syncReq = false,
|
||||
.localReq = (tsQueryPolicy == QUERY_POLICY_CLIENT),
|
||||
|
@ -1075,7 +1074,7 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM
|
|||
.sql = pRequest->sqlstr,
|
||||
.startTs = pRequest->metric.start,
|
||||
.execFp = schedulerExecCb,
|
||||
.cbParam = pRequest,
|
||||
.cbParam = pWrapper,
|
||||
.chkKillFp = chkRequestKilled,
|
||||
.chkKillParam = (void*)pRequest->self,
|
||||
.pExecRes = NULL,
|
||||
|
@ -1085,11 +1084,33 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM
|
|||
} else {
|
||||
tscDebug("0x%" PRIx64 " plan not executed, code:%s 0x%" PRIx64, pRequest->self, tstrerror(code),
|
||||
pRequest->requestId);
|
||||
destorySqlCallbackWrapper(pWrapper);
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
|
||||
}
|
||||
|
||||
// todo not to be released here
|
||||
taosArrayDestroy(pMnodeList);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultMeta, SSqlCallbackWrapper* pWrapper) {
|
||||
int32_t code = 0;
|
||||
|
||||
pRequest->body.execMode = pQuery->execMode;
|
||||
if (QUERY_EXEC_MODE_SCHEDULE != pRequest->body.execMode) {
|
||||
destorySqlCallbackWrapper(pWrapper);
|
||||
}
|
||||
|
||||
switch (pQuery->execMode) {
|
||||
case QUERY_EXEC_MODE_LOCAL:
|
||||
asyncExecLocalCmd(pRequest, pQuery);
|
||||
break;
|
||||
case QUERY_EXEC_MODE_RPC:
|
||||
code = asyncExecDdlQuery(pRequest, pQuery);
|
||||
break;
|
||||
case QUERY_EXEC_MODE_SCHEDULE: {
|
||||
code = asyncExecSchQuery(pRequest, pQuery, pResultMeta, pWrapper);
|
||||
break;
|
||||
}
|
||||
case QUERY_EXEC_MODE_EMPTY_RESULT:
|
||||
|
|
|
@ -667,35 +667,39 @@ const char *taos_get_server_info(TAOS *taos) {
|
|||
return pTscObj->sDetailVer;
|
||||
}
|
||||
|
||||
typedef struct SqlParseWrapper {
|
||||
SParseContext *pCtx;
|
||||
SCatalogReq catalogReq;
|
||||
SRequestObj *pRequest;
|
||||
} SqlParseWrapper;
|
||||
|
||||
static void destoryTablesReq(void *p) {
|
||||
STablesReq *pRes = (STablesReq *)p;
|
||||
taosArrayDestroy(pRes->pTables);
|
||||
}
|
||||
|
||||
static void destorySqlParseWrapper(SqlParseWrapper *pWrapper) {
|
||||
taosArrayDestroy(pWrapper->catalogReq.pDbVgroup);
|
||||
taosArrayDestroy(pWrapper->catalogReq.pDbCfg);
|
||||
taosArrayDestroy(pWrapper->catalogReq.pDbInfo);
|
||||
taosArrayDestroyEx(pWrapper->catalogReq.pTableMeta, destoryTablesReq);
|
||||
taosArrayDestroyEx(pWrapper->catalogReq.pTableHash, destoryTablesReq);
|
||||
taosArrayDestroy(pWrapper->catalogReq.pUdf);
|
||||
taosArrayDestroy(pWrapper->catalogReq.pIndex);
|
||||
taosArrayDestroy(pWrapper->catalogReq.pUser);
|
||||
taosArrayDestroy(pWrapper->catalogReq.pTableIndex);
|
||||
taosArrayDestroy(pWrapper->pCtx->pTableMetaPos);
|
||||
taosArrayDestroy(pWrapper->pCtx->pTableVgroupPos);
|
||||
taosMemoryFree(pWrapper->pCtx);
|
||||
static void destoryCatalogReq(SCatalogReq *pCatalogReq) {
|
||||
if (NULL == pCatalogReq) {
|
||||
return;
|
||||
}
|
||||
taosArrayDestroy(pCatalogReq->pDbVgroup);
|
||||
taosArrayDestroy(pCatalogReq->pDbCfg);
|
||||
taosArrayDestroy(pCatalogReq->pDbInfo);
|
||||
taosArrayDestroyEx(pCatalogReq->pTableMeta, destoryTablesReq);
|
||||
taosArrayDestroyEx(pCatalogReq->pTableHash, destoryTablesReq);
|
||||
taosArrayDestroy(pCatalogReq->pUdf);
|
||||
taosArrayDestroy(pCatalogReq->pIndex);
|
||||
taosArrayDestroy(pCatalogReq->pUser);
|
||||
taosArrayDestroy(pCatalogReq->pTableIndex);
|
||||
taosMemoryFree(pCatalogReq);
|
||||
}
|
||||
|
||||
void destorySqlCallbackWrapper(SSqlCallbackWrapper *pWrapper) {
|
||||
if (NULL == pWrapper) {
|
||||
return;
|
||||
}
|
||||
destoryCatalogReq(pWrapper->pCatalogReq);
|
||||
qDestroyParseContext(pWrapper->pParseCtx);
|
||||
catalogFreeMetaData(pWrapper->pResultMeta);
|
||||
taosMemoryFree(pWrapper);
|
||||
}
|
||||
|
||||
void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) {
|
||||
SqlParseWrapper *pWrapper = (SqlParseWrapper *)param;
|
||||
SSqlCallbackWrapper *pWrapper = (SSqlCallbackWrapper *)param;
|
||||
SRequestObj *pRequest = pWrapper->pRequest;
|
||||
SQuery *pQuery = pRequest->pQuery;
|
||||
|
||||
|
@ -703,7 +707,7 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) {
|
|||
qDebug("0x%" PRIx64 " start to semantic analysis, reqId:0x%" PRIx64, pRequest->self, pRequest->requestId);
|
||||
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
code = qAnalyseSqlSemantic(pWrapper->pCtx, &pWrapper->catalogReq, pResultMeta, pQuery);
|
||||
code = qAnalyseSqlSemantic(pWrapper->pParseCtx, pWrapper->pCatalogReq, pResultMeta, pQuery);
|
||||
pRequest->stableQuery = pQuery->stableQuery;
|
||||
if (pQuery->pRoot) {
|
||||
pRequest->stmtType = pQuery->pRoot->type;
|
||||
|
@ -712,6 +716,13 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) {
|
|||
|
||||
pRequest->metric.semanticEnd = taosGetTimestampUs();
|
||||
|
||||
if (code == TSDB_CODE_SUCCESS && pWrapper->pParseCtx->needMultiParse) {
|
||||
pWrapper->pResultMeta = catalogCloneMetaData(pResultMeta);
|
||||
if (NULL == pWrapper->pResultMeta) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
if (pQuery->haveResultSet) {
|
||||
setResSchemaInfo(&pRequest->body.resInfo, pQuery->pResSchema, pQuery->numOfResCols);
|
||||
|
@ -722,15 +733,13 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) {
|
|||
TSWAP(pRequest->tableList, (pQuery)->pTableList);
|
||||
TSWAP(pRequest->targetTableList, (pQuery)->pTargetTableList);
|
||||
|
||||
destorySqlParseWrapper(pWrapper);
|
||||
|
||||
double el = (pRequest->metric.semanticEnd - pRequest->metric.ctgEnd) / 1000.0;
|
||||
tscDebug("0x%" PRIx64 " analysis semantics completed, start async query, elapsed time:%.2f ms, reqId:0x%" PRIx64,
|
||||
pRequest->self, el, pRequest->requestId);
|
||||
|
||||
launchAsyncQuery(pRequest, pQuery, pResultMeta);
|
||||
launchAsyncQuery(pRequest, pQuery, pResultMeta, pWrapper);
|
||||
} else {
|
||||
destorySqlParseWrapper(pWrapper);
|
||||
destorySqlCallbackWrapper(pWrapper);
|
||||
qDestroyQuery(pRequest->pQuery);
|
||||
pRequest->pQuery = NULL;
|
||||
|
||||
|
@ -750,6 +759,16 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) {
|
|||
}
|
||||
}
|
||||
|
||||
int32_t continueInsertFromCsv(SSqlCallbackWrapper *pWrapper, SRequestObj *pRequest) {
|
||||
qDestroyQuery(pRequest->pQuery);
|
||||
pRequest->pQuery = (SQuery *)nodesMakeNode(QUERY_NODE_QUERY);
|
||||
if (NULL == pRequest->pQuery) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
retrieveMetaCallback(pWrapper->pResultMeta, pWrapper, TSDB_CODE_SUCCESS);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void taos_query_a(TAOS *taos, const char *sql, __taos_async_fn_t fp, void *param) {
|
||||
int64_t connId = *(int64_t *)taos;
|
||||
taosAsyncQueryImpl(connId, sql, fp, param, false);
|
||||
|
@ -786,37 +805,48 @@ int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt) {
|
|||
}
|
||||
|
||||
void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
|
||||
SParseContext *pCxt = NULL;
|
||||
STscObj *pTscObj = pRequest->pTscObj;
|
||||
int32_t code = 0;
|
||||
SSqlCallbackWrapper *pWrapper = NULL;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
if (pRequest->retry++ > REQUEST_TOTAL_EXEC_TIMES) {
|
||||
code = pRequest->prevCode;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
code = createParseContext(pRequest, &pCxt);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
pWrapper = taosMemoryCalloc(1, sizeof(SSqlCallbackWrapper));
|
||||
if (pWrapper == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
} else {
|
||||
pWrapper->pRequest = pRequest;
|
||||
}
|
||||
}
|
||||
|
||||
pCxt->mgmtEpSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
|
||||
code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCxt->pCatalog);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = createParseContext(pRequest, &pWrapper->pParseCtx);
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
pWrapper->pParseCtx->mgmtEpSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
|
||||
code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pWrapper->pParseCtx->pCatalog);
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
pRequest->metric.syntaxStart = taosGetTimestampUs();
|
||||
|
||||
SCatalogReq catalogReq = {.forceUpdate = updateMetaForce, .qNodeRequired = qnodeRequired(pRequest)};
|
||||
code = qParseSqlSyntax(pCxt, &pRequest->pQuery, &catalogReq);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
pWrapper->pCatalogReq = taosMemoryCalloc(1, sizeof(SCatalogReq));
|
||||
if (pWrapper->pCatalogReq == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
} else {
|
||||
pWrapper->pCatalogReq->forceUpdate = updateMetaForce;
|
||||
pWrapper->pCatalogReq->qNodeRequired = qnodeRequired(pRequest);
|
||||
code = qParseSqlSyntax(pWrapper->pParseCtx, &pRequest->pQuery, pWrapper->pCatalogReq);
|
||||
}
|
||||
|
||||
pRequest->metric.syntaxEnd = taosGetTimestampUs();
|
||||
}
|
||||
|
||||
if (!updateMetaForce) {
|
||||
if (TSDB_CODE_SUCCESS == code && !updateMetaForce) {
|
||||
SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
|
||||
if (NULL == pRequest->pQuery->pRoot) {
|
||||
atomic_add_fetch_64((int64_t *)&pActivity->numOfInsertsReq, 1);
|
||||
|
@ -825,39 +855,27 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
|
|||
}
|
||||
}
|
||||
|
||||
SqlParseWrapper *pWrapper = taosMemoryCalloc(1, sizeof(SqlParseWrapper));
|
||||
if (pWrapper == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
pWrapper->pCtx = pCxt;
|
||||
pWrapper->pRequest = pRequest;
|
||||
pWrapper->catalogReq = catalogReq;
|
||||
|
||||
SRequestConnInfo conn = {.pTrans = pCxt->pTransporter,
|
||||
.requestId = pCxt->requestId,
|
||||
.requestObjRefId = pCxt->requestRid,
|
||||
.mgmtEps = pCxt->mgmtEpSet};
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
SRequestConnInfo conn = {.pTrans = pWrapper->pParseCtx->pTransporter,
|
||||
.requestId = pWrapper->pParseCtx->requestId,
|
||||
.requestObjRefId = pWrapper->pParseCtx->requestRid,
|
||||
.mgmtEps = pWrapper->pParseCtx->mgmtEpSet};
|
||||
|
||||
pRequest->metric.ctgStart = taosGetTimestampUs();
|
||||
|
||||
code = catalogAsyncGetAllMeta(pCxt->pCatalog, &conn, &catalogReq, retrieveMetaCallback, pWrapper,
|
||||
&pRequest->body.queryJob);
|
||||
pCxt = NULL;
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
return;
|
||||
code = catalogAsyncGetAllMeta(pWrapper->pParseCtx->pCatalog, &conn, pWrapper->pCatalogReq, retrieveMetaCallback,
|
||||
pWrapper, &pRequest->body.queryJob);
|
||||
}
|
||||
|
||||
_error:
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
tscError("0x%" PRIx64 " error happens, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code, tstrerror(code),
|
||||
pRequest->requestId);
|
||||
taosMemoryFree(pCxt);
|
||||
|
||||
destorySqlCallbackWrapper(pWrapper);
|
||||
terrno = code;
|
||||
pRequest->code = code;
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
|
||||
}
|
||||
}
|
||||
|
||||
static void fetchCallback(void *pResult, void *param, int32_t code) {
|
||||
SRequestObj *pRequest = (SRequestObj *)param;
|
||||
|
|
|
@ -227,7 +227,7 @@ _err:
|
|||
|
||||
static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) {
|
||||
STag* pTag = (STag*)pCreateReq->ctb.pTag;
|
||||
char* sname = pCreateReq->ctb.name;
|
||||
char* sname = pCreateReq->ctb.stbName;
|
||||
char* name = pCreateReq->name;
|
||||
SArray* tagName = pCreateReq->ctb.tagName;
|
||||
int64_t id = pCreateReq->uid;
|
||||
|
@ -355,7 +355,8 @@ static char* processCreateTable(SMqMetaRsp* metaRsp) {
|
|||
if (pCreateReq->type == TSDB_CHILD_TABLE) {
|
||||
string = buildCreateCTableJson(req.pReqs, req.nReqs);
|
||||
} else if (pCreateReq->type == TSDB_NORMAL_TABLE) {
|
||||
string = buildCreateTableJson(&pCreateReq->ntb.schemaRow, NULL, pCreateReq->name, pCreateReq->uid, TSDB_NORMAL_TABLE);
|
||||
string =
|
||||
buildCreateTableJson(&pCreateReq->ntb.schemaRow, NULL, pCreateReq->name, pCreateReq->uid, TSDB_NORMAL_TABLE);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -828,10 +829,10 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
if (pCreateReq->type == TSDB_CHILD_TABLE) {
|
||||
STableMeta* pTableMeta = NULL;
|
||||
SName sName = {0};
|
||||
toName(pTscObj->acctId, pRequest->pDb, pCreateReq->ctb.name, &sName);
|
||||
toName(pTscObj->acctId, pRequest->pDb, pCreateReq->ctb.stbName, &sName);
|
||||
code = catalogGetTableMeta(pCatalog, &conn, &sName, &pTableMeta);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("taosCreateTable:catalogGetTableMeta failed. table name: %s", pCreateReq->ctb.name);
|
||||
uError("taosCreateTable:catalogGetTableMeta failed. table name: %s", pCreateReq->ctb.stbName);
|
||||
goto end;
|
||||
}
|
||||
|
||||
|
@ -1765,7 +1766,7 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
|
|||
if (strcmp(tbName, pCreateReq.name) == 0) {
|
||||
schemaLen = *lenTmp;
|
||||
schemaData = *dataTmp;
|
||||
strcpy(pName.tname, pCreateReq.ctb.name);
|
||||
strcpy(pName.tname, pCreateReq.ctb.stbName);
|
||||
tDecoderClear(&decoderTmp);
|
||||
taosMemoryFreeClear(pCreateReq.comment);
|
||||
taosArrayDestroy(pCreateReq.ctb.tagName);
|
||||
|
|
|
@ -314,7 +314,8 @@ static int32_t getBytes(uint8_t type, int32_t length){
|
|||
}
|
||||
}
|
||||
|
||||
static int32_t smlBuildFieldsList(SSmlHandle *info, SSchema *schemaField, SHashObj *schemaHash, SArray *cols, SArray* results, int32_t numOfCols, bool isTag) {
|
||||
static int32_t smlBuildFieldsList(SSmlHandle *info, SSchema *schemaField, SHashObj *schemaHash, SArray *cols,
|
||||
SArray *results, int32_t numOfCols, bool isTag) {
|
||||
for (int j = 0; j < taosArrayGetSize(cols); ++j) {
|
||||
SSmlKv *kv = (SSmlKv *)taosArrayGetP(cols, j);
|
||||
ESchemaAction action = SCHEMA_ACTION_NULL;
|
||||
|
@ -338,9 +339,8 @@ static int32_t smlBuildFieldsList(SSmlHandle *info, SSchema *schemaField, SHashO
|
|||
|
||||
// static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SSmlSTableMeta *sTableData,
|
||||
// int32_t colVer, int32_t tagVer, int8_t source, uint64_t suid){
|
||||
static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SArray* pColumns, SArray* pTags,
|
||||
STableMeta *pTableMeta, ESchemaAction action){
|
||||
|
||||
static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SArray *pColumns, SArray *pTags, STableMeta *pTableMeta,
|
||||
ESchemaAction action) {
|
||||
SRequestObj *pRequest = NULL;
|
||||
SMCreateStbReq pReq = {0};
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
@ -463,8 +463,8 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
|||
}
|
||||
info->cost.numOfCreateSTables++;
|
||||
} else if (code == TSDB_CODE_SUCCESS) {
|
||||
hashTmp = taosHashInit(pTableMeta->tableInfo.numOfTags,
|
||||
taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
|
||||
hashTmp = taosHashInit(pTableMeta->tableInfo.numOfTags, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true,
|
||||
HASH_NO_LOCK);
|
||||
for (uint16_t i = pTableMeta->tableInfo.numOfColumns;
|
||||
i < pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; i++) {
|
||||
taosHashPut(hashTmp, pTableMeta->schema[i].name, strlen(pTableMeta->schema[i].name), &i, SHORT_BYTES);
|
||||
|
@ -476,8 +476,10 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
|||
goto end;
|
||||
}
|
||||
if (action != SCHEMA_ACTION_NULL) {
|
||||
SArray* pColumns = taosArrayInit(taosArrayGetSize(sTableData->cols) + pTableMeta->tableInfo.numOfColumns, sizeof(SField));
|
||||
SArray* pTags = taosArrayInit(taosArrayGetSize(sTableData->tags) + pTableMeta->tableInfo.numOfTags, sizeof(SField));
|
||||
SArray *pColumns =
|
||||
taosArrayInit(taosArrayGetSize(sTableData->cols) + pTableMeta->tableInfo.numOfColumns, sizeof(SField));
|
||||
SArray *pTags =
|
||||
taosArrayInit(taosArrayGetSize(sTableData->tags) + pTableMeta->tableInfo.numOfTags, sizeof(SField));
|
||||
|
||||
for (uint16_t i = 0; i < pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; i++) {
|
||||
SField field = {0};
|
||||
|
@ -490,7 +492,8 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
|||
taosArrayPush(pTags, &field);
|
||||
}
|
||||
}
|
||||
smlBuildFieldsList(info, pTableMeta->schema, hashTmp, sTableData->tags, pTags, pTableMeta->tableInfo.numOfColumns, true);
|
||||
smlBuildFieldsList(info, pTableMeta->schema, hashTmp, sTableData->tags, pTags,
|
||||
pTableMeta->tableInfo.numOfColumns, true);
|
||||
|
||||
code = smlSendMetaMsg(info, &pName, pColumns, pTags, pTableMeta, action);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -519,8 +522,10 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
|||
goto end;
|
||||
}
|
||||
if (action != SCHEMA_ACTION_NULL) {
|
||||
SArray* pColumns = taosArrayInit(taosArrayGetSize(sTableData->cols) + pTableMeta->tableInfo.numOfColumns, sizeof(SField));
|
||||
SArray* pTags = taosArrayInit(taosArrayGetSize(sTableData->tags) + pTableMeta->tableInfo.numOfTags, sizeof(SField));
|
||||
SArray *pColumns =
|
||||
taosArrayInit(taosArrayGetSize(sTableData->cols) + pTableMeta->tableInfo.numOfColumns, sizeof(SField));
|
||||
SArray *pTags =
|
||||
taosArrayInit(taosArrayGetSize(sTableData->tags) + pTableMeta->tableInfo.numOfTags, sizeof(SField));
|
||||
|
||||
for (uint16_t i = 0; i < pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; i++) {
|
||||
SField field = {0};
|
||||
|
@ -534,7 +539,8 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
|||
}
|
||||
}
|
||||
|
||||
smlBuildFieldsList(info, pTableMeta->schema, hashTmp, sTableData->cols, pColumns, pTableMeta->tableInfo.numOfColumns, false);
|
||||
smlBuildFieldsList(info, pTableMeta->schema, hashTmp, sTableData->cols, pColumns,
|
||||
pTableMeta->tableInfo.numOfColumns, false);
|
||||
|
||||
code = smlSendMetaMsg(info, &pName, pColumns, pTags, pTableMeta, action);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -1830,7 +1836,8 @@ static int32_t smlConvertJSONString(SSmlKv *pVal, char *typeStr, cJSON *value) {
|
|||
if (pVal->type == TSDB_DATA_TYPE_BINARY && pVal->length > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) {
|
||||
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
|
||||
}
|
||||
if (pVal->type == TSDB_DATA_TYPE_NCHAR && pVal->length > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE){
|
||||
if (pVal->type == TSDB_DATA_TYPE_NCHAR &&
|
||||
pVal->length > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
|
||||
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
|
||||
}
|
||||
|
||||
|
@ -2314,7 +2321,8 @@ static int32_t smlInsertData(SSmlHandle *info) {
|
|||
(*pMeta)->tableMeta->uid = tableData->uid; // one table merge data block together according uid
|
||||
|
||||
code = smlBindData(info->exec, tableData->tags, (*pMeta)->cols, tableData->cols, info->dataFormat,
|
||||
(*pMeta)->tableMeta, tableData->childTableName, tableData->sTableName, tableData->sTableNameLen, info->msgBuf.buf, info->msgBuf.len);
|
||||
(*pMeta)->tableMeta, tableData->childTableName, tableData->sTableName, tableData->sTableNameLen,
|
||||
info->msgBuf.buf, info->msgBuf.len);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("SML:0x%" PRIx64 " smlBindData failed", info->id);
|
||||
return code;
|
||||
|
@ -2336,7 +2344,12 @@ static int32_t smlInsertData(SSmlHandle *info) {
|
|||
SAppClusterSummary *pActivity = &info->taos->pAppInfo->summary;
|
||||
atomic_add_fetch_64((int64_t *)&pActivity->numOfInsertsReq, 1);
|
||||
|
||||
launchAsyncQuery(info->pRequest, info->pQuery, NULL);
|
||||
SSqlCallbackWrapper *pWrapper = (SSqlCallbackWrapper *)taosMemoryCalloc(1, sizeof(SSqlCallbackWrapper));
|
||||
if (pWrapper == NULL) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
pWrapper->pRequest = info->pRequest;
|
||||
launchAsyncQuery(info->pRequest, info->pQuery, NULL, pWrapper);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -1235,6 +1235,7 @@ void blockDataFreeRes(SSDataBlock* pBlock) {
|
|||
taosArrayDestroy(pBlock->pDataBlock);
|
||||
pBlock->pDataBlock = NULL;
|
||||
taosMemoryFreeClear(pBlock->pBlockAgg);
|
||||
taosMemoryFree(pBlock->info.pTag);
|
||||
memset(&pBlock->info, 0, sizeof(SDataBlockInfo));
|
||||
}
|
||||
|
||||
|
@ -1316,8 +1317,8 @@ SSDataBlock* createSpecialDataBlock(EStreamType type) {
|
|||
pBlock->info.groupId = 0;
|
||||
pBlock->info.rows = 0;
|
||||
pBlock->info.type = type;
|
||||
pBlock->info.rowSize =
|
||||
sizeof(TSKEY) + sizeof(TSKEY) + sizeof(uint64_t) + sizeof(uint64_t) + sizeof(TSKEY) + sizeof(TSKEY);
|
||||
pBlock->info.rowSize = sizeof(TSKEY) + sizeof(TSKEY) + sizeof(uint64_t) + sizeof(uint64_t) + sizeof(TSKEY) +
|
||||
sizeof(TSKEY) + VARSTR_HEADER_SIZE + TSDB_TABLE_NAME_LEN;
|
||||
pBlock->info.watermark = INT64_MIN;
|
||||
|
||||
pBlock->pDataBlock = taosArrayInit(6, sizeof(SColumnInfoData));
|
||||
|
@ -1343,6 +1344,48 @@ SSDataBlock* createSpecialDataBlock(EStreamType type) {
|
|||
// calculate end ts
|
||||
taosArrayPush(pBlock->pDataBlock, &infoData);
|
||||
|
||||
// table name
|
||||
infoData.info.type = TSDB_DATA_TYPE_VARCHAR;
|
||||
infoData.info.bytes = VARSTR_HEADER_SIZE + TSDB_TABLE_NAME_LEN;
|
||||
taosArrayPush(pBlock->pDataBlock, &infoData);
|
||||
|
||||
return pBlock;
|
||||
}
|
||||
|
||||
SSDataBlock* blockCopyOneRow(const SSDataBlock* pDataBlock, int32_t rowIdx) {
|
||||
if (pDataBlock == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SSDataBlock* pBlock = createDataBlock();
|
||||
pBlock->info = pDataBlock->info;
|
||||
pBlock->info.rows = 0;
|
||||
pBlock->info.capacity = 0;
|
||||
|
||||
size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock);
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData* p = taosArrayGet(pDataBlock->pDataBlock, i);
|
||||
SColumnInfoData colInfo = {.hasNull = true, .info = p->info};
|
||||
blockDataAppendColInfo(pBlock, &colInfo);
|
||||
}
|
||||
|
||||
int32_t code = blockDataEnsureCapacity(pBlock, 1);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
terrno = code;
|
||||
blockDataDestroy(pBlock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, i);
|
||||
SColumnInfoData* pSrc = taosArrayGet(pDataBlock->pDataBlock, i);
|
||||
void* pData = colDataGetData(pSrc, rowIdx);
|
||||
bool isNull = colDataIsNull(pSrc, pDataBlock->info.rows, rowIdx, NULL);
|
||||
colDataAppend(pDst, 0, pData, isNull);
|
||||
}
|
||||
|
||||
pBlock->info.rows = 1;
|
||||
|
||||
return pBlock;
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,30 @@
|
|||
#include "tdatablock.h"
|
||||
#include "tlog.h"
|
||||
|
||||
// SBuffer ================================
|
||||
void tBufferDestroy(SBuffer *pBuffer) {
|
||||
tFree(pBuffer->pBuf);
|
||||
pBuffer->pBuf = NULL;
|
||||
}
|
||||
|
||||
int32_t tBufferInit(SBuffer *pBuffer, int64_t size) {
|
||||
pBuffer->nBuf = 0;
|
||||
return tRealloc(&pBuffer->pBuf, size);
|
||||
}
|
||||
|
||||
int32_t tBufferPut(SBuffer *pBuffer, const void *pData, int64_t nData) {
|
||||
int32_t code = 0;
|
||||
|
||||
code = tRealloc(&pBuffer->pBuf, pBuffer->nBuf + nData);
|
||||
if (code) return code;
|
||||
|
||||
memcpy(pBuffer->pBuf + pBuffer->nBuf, pData, nData);
|
||||
pBuffer->nBuf += nData;
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
// ================================
|
||||
static int32_t tGetTagVal(uint8_t *p, STagVal *pTagVal, int8_t isJson);
|
||||
|
||||
#pragma pack(push, 1)
|
||||
|
@ -387,7 +411,7 @@ _exit:
|
|||
|
||||
int32_t tTSRowClone(const STSRow2 *pRow, STSRow2 **ppRow) {
|
||||
int32_t code = 0;
|
||||
int32_t rLen;
|
||||
int32_t rLen = 0;
|
||||
|
||||
TSROW_LEN(pRow, rLen);
|
||||
(*ppRow) = (STSRow2 *)taosMemoryMalloc(rLen);
|
||||
|
@ -1654,8 +1678,8 @@ int32_t tColDataCopy(SColData *pColDataSrc, SColData *pColDataDest) {
|
|||
int32_t size;
|
||||
|
||||
ASSERT(pColDataSrc->nVal > 0);
|
||||
ASSERT(pColDataDest->cid = pColDataSrc->cid);
|
||||
ASSERT(pColDataDest->type = pColDataSrc->type);
|
||||
ASSERT(pColDataDest->cid == pColDataSrc->cid);
|
||||
ASSERT(pColDataDest->type == pColDataSrc->type);
|
||||
|
||||
pColDataDest->smaOn = pColDataSrc->smaOn;
|
||||
pColDataDest->nVal = pColDataSrc->nVal;
|
||||
|
|
|
@ -85,7 +85,8 @@ uint16_t tsTelemPort = 80;
|
|||
char tsSmlTagName[TSDB_COL_NAME_LEN] = "_tag_null";
|
||||
char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; // user defined child table name can be specified in tag value.
|
||||
// If set to empty system will generate table name using MD5 hash.
|
||||
bool tsSmlDataFormat = false; // true means that the name and order of cols in each line are the same(only for influx protocol)
|
||||
// true means that the name and order of cols in each line are the same(only for influx protocol)
|
||||
bool tsSmlDataFormat = false;
|
||||
|
||||
// query
|
||||
int32_t tsQueryPolicy = 1;
|
||||
|
@ -125,6 +126,9 @@ int32_t tsMaxNumOfDistinctResults = 1000 * 10000;
|
|||
// 1 database precision unit for interval time range, changed accordingly
|
||||
int32_t tsMinIntervalTime = 1;
|
||||
|
||||
// maximum memory allowed to be allocated for a single csv load (in MB)
|
||||
int32_t tsMaxMemUsedByInsert = 1024;
|
||||
|
||||
// the maximum allowed query buffer size during query processing for each data node.
|
||||
// -1 no limit (default)
|
||||
// 0 no query allowed, queries are disabled
|
||||
|
@ -296,6 +300,7 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
|
|||
if (cfgAddString(pCfg, "smlChildTableName", "", 1) != 0) return -1;
|
||||
if (cfgAddString(pCfg, "smlTagName", tsSmlTagName, 1) != 0) return -1;
|
||||
if (cfgAddBool(pCfg, "smlDataFormat", tsSmlDataFormat, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "maxMemUsedByInsert", tsMaxMemUsedByInsert, 1, INT32_MAX, true) != 0) return -1;
|
||||
|
||||
tsNumOfTaskQueueThreads = tsNumOfCores / 2;
|
||||
tsNumOfTaskQueueThreads = TMAX(tsNumOfTaskQueueThreads, 4);
|
||||
|
@ -374,8 +379,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
tsNumOfVnodeStreamThreads = TMAX(tsNumOfVnodeStreamThreads, 4);
|
||||
if (cfgAddInt32(pCfg, "numOfVnodeStreamThreads", tsNumOfVnodeStreamThreads, 4, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfVnodeFetchThreads = 1;
|
||||
if (cfgAddInt32(pCfg, "numOfVnodeFetchThreads", tsNumOfVnodeFetchThreads, 1, 1024, 0) != 0) return -1;
|
||||
// tsNumOfVnodeFetchThreads = 1;
|
||||
// if (cfgAddInt32(pCfg, "numOfVnodeFetchThreads", tsNumOfVnodeFetchThreads, 1, 1, 0) != 0) return -1;
|
||||
|
||||
tsNumOfVnodeWriteThreads = tsNumOfCores;
|
||||
tsNumOfVnodeWriteThreads = TMAX(tsNumOfVnodeWriteThreads, 1);
|
||||
|
@ -497,6 +502,7 @@ static int32_t taosUpdateServerCfg(SConfig *pCfg) {
|
|||
pItem->stype = stype;
|
||||
}
|
||||
|
||||
/*
|
||||
pItem = cfgGetItem(tsCfg, "numOfVnodeFetchThreads");
|
||||
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
||||
tsNumOfVnodeFetchThreads = numOfCores / 4;
|
||||
|
@ -504,6 +510,7 @@ static int32_t taosUpdateServerCfg(SConfig *pCfg) {
|
|||
pItem->i32 = tsNumOfVnodeFetchThreads;
|
||||
pItem->stype = stype;
|
||||
}
|
||||
*/
|
||||
|
||||
pItem = cfgGetItem(tsCfg, "numOfVnodeWriteThreads");
|
||||
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
||||
|
@ -646,6 +653,8 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
|
|||
tstrncpy(tsSmlTagName, cfgGetItem(pCfg, "smlTagName")->str, TSDB_COL_NAME_LEN);
|
||||
tsSmlDataFormat = cfgGetItem(pCfg, "smlDataFormat")->bval;
|
||||
|
||||
tsMaxMemUsedByInsert = cfgGetItem(pCfg, "maxMemUsedByInsert")->i32;
|
||||
|
||||
tsShellActivityTimer = cfgGetItem(pCfg, "shellActivityTimer")->i32;
|
||||
tsCompressMsgSize = cfgGetItem(pCfg, "compressMsgSize")->i32;
|
||||
tsCompressColData = cfgGetItem(pCfg, "compressColData")->i32;
|
||||
|
@ -703,7 +712,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
|||
tsNumOfMnodeReadThreads = cfgGetItem(pCfg, "numOfMnodeReadThreads")->i32;
|
||||
tsNumOfVnodeQueryThreads = cfgGetItem(pCfg, "numOfVnodeQueryThreads")->i32;
|
||||
tsNumOfVnodeStreamThreads = cfgGetItem(pCfg, "numOfVnodeStreamThreads")->i32;
|
||||
tsNumOfVnodeFetchThreads = cfgGetItem(pCfg, "numOfVnodeFetchThreads")->i32;
|
||||
// tsNumOfVnodeFetchThreads = cfgGetItem(pCfg, "numOfVnodeFetchThreads")->i32;
|
||||
tsNumOfVnodeWriteThreads = cfgGetItem(pCfg, "numOfVnodeWriteThreads")->i32;
|
||||
tsNumOfVnodeSyncThreads = cfgGetItem(pCfg, "numOfVnodeSyncThreads")->i32;
|
||||
tsNumOfVnodeRsmaThreads = cfgGetItem(pCfg, "numOfVnodeRsmaThreads")->i32;
|
||||
|
@ -875,6 +884,8 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
|
|||
tsMaxShellConns = cfgGetItem(pCfg, "maxShellConns")->i32;
|
||||
} else if (strcasecmp("maxNumOfDistinctRes", name) == 0) {
|
||||
tsMaxNumOfDistinctResults = cfgGetItem(pCfg, "maxNumOfDistinctRes")->i32;
|
||||
} else if (strcasecmp("maxMemUsedByInsert", name) == 0) {
|
||||
tsMaxMemUsedByInsert = cfgGetItem(pCfg, "maxMemUsedByInsert")->i32;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -953,8 +964,10 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
|
|||
tsNumOfMnodeReadThreads = cfgGetItem(pCfg, "numOfMnodeReadThreads")->i32;
|
||||
} else if (strcasecmp("numOfVnodeQueryThreads", name) == 0) {
|
||||
tsNumOfVnodeQueryThreads = cfgGetItem(pCfg, "numOfVnodeQueryThreads")->i32;
|
||||
/*
|
||||
} else if (strcasecmp("numOfVnodeFetchThreads", name) == 0) {
|
||||
tsNumOfVnodeFetchThreads = cfgGetItem(pCfg, "numOfVnodeFetchThreads")->i32;
|
||||
*/
|
||||
} else if (strcasecmp("numOfVnodeWriteThreads", name) == 0) {
|
||||
tsNumOfVnodeWriteThreads = cfgGetItem(pCfg, "numOfVnodeWriteThreads")->i32;
|
||||
} else if (strcasecmp("numOfVnodeSyncThreads", name) == 0) {
|
||||
|
|
|
@ -5075,7 +5075,7 @@ int tEncodeSVCreateTbReq(SEncoder *pCoder, const SVCreateTbReq *pReq) {
|
|||
}
|
||||
|
||||
if (pReq->type == TSDB_CHILD_TABLE) {
|
||||
if (tEncodeCStr(pCoder, pReq->ctb.name) < 0) return -1;
|
||||
if (tEncodeCStr(pCoder, pReq->ctb.stbName) < 0) return -1;
|
||||
if (tEncodeU8(pCoder, pReq->ctb.tagNum) < 0) return -1;
|
||||
if (tEncodeI64(pCoder, pReq->ctb.suid) < 0) return -1;
|
||||
if (tEncodeTag(pCoder, (const STag *)pReq->ctb.pTag) < 0) return -1;
|
||||
|
@ -5112,7 +5112,7 @@ int tDecodeSVCreateTbReq(SDecoder *pCoder, SVCreateTbReq *pReq) {
|
|||
}
|
||||
|
||||
if (pReq->type == TSDB_CHILD_TABLE) {
|
||||
if (tDecodeCStr(pCoder, &pReq->ctb.name) < 0) return -1;
|
||||
if (tDecodeCStr(pCoder, &pReq->ctb.stbName) < 0) return -1;
|
||||
if (tDecodeU8(pCoder, &pReq->ctb.tagNum) < 0) return -1;
|
||||
if (tDecodeI64(pCoder, &pReq->ctb.suid) < 0) return -1;
|
||||
if (tDecodeTag(pCoder, (STag **)&pReq->ctb.pTag) < 0) return -1;
|
||||
|
|
|
@ -52,10 +52,13 @@ STSBuf* tsBufCreate(bool autoDelete, int32_t order) {
|
|||
}
|
||||
|
||||
if (!autoDelete) {
|
||||
taosRemoveFile(pTSBuf->path);
|
||||
if (taosRemoveFile(pTSBuf->path) != 0) {
|
||||
taosMemoryFree(pTSBuf);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (NULL == allocResForTSBuf(pTSBuf)) {
|
||||
if (allocResForTSBuf(pTSBuf) == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -14,7 +14,4 @@ target_include_directories(
|
|||
taosd
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/node_mgmt/inc"
|
||||
)
|
||||
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
add_dependencies(taosd jemalloc)
|
||||
ENDIF ()
|
||||
target_link_libraries(taosd dnode)
|
||||
|
|
|
@ -94,6 +94,7 @@ int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t
|
|||
pCfgs = taosMemoryCalloc(vnodesNum, sizeof(SWrapperCfg));
|
||||
if (pCfgs == NULL) {
|
||||
dError("failed to read %s since out of memory", file);
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
|
@ -104,6 +105,7 @@ int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t
|
|||
cJSON *vgId = cJSON_GetObjectItem(vnode, "vgId");
|
||||
if (!vgId || vgId->type != cJSON_Number) {
|
||||
dError("failed to read %s since vgId not found", file);
|
||||
taosMemoryFree(pCfgs);
|
||||
goto _OVER;
|
||||
}
|
||||
pCfg->vgId = vgId->valueint;
|
||||
|
@ -112,6 +114,7 @@ int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t
|
|||
cJSON *dropped = cJSON_GetObjectItem(vnode, "dropped");
|
||||
if (!dropped || dropped->type != cJSON_Number) {
|
||||
dError("failed to read %s since dropped not found", file);
|
||||
taosMemoryFree(pCfgs);
|
||||
goto _OVER;
|
||||
}
|
||||
pCfg->dropped = dropped->valueint;
|
||||
|
@ -119,6 +122,7 @@ int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t
|
|||
cJSON *vgVersion = cJSON_GetObjectItem(vnode, "vgVersion");
|
||||
if (!vgVersion || vgVersion->type != cJSON_Number) {
|
||||
dError("failed to read %s since vgVersion not found", file);
|
||||
taosMemoryFree(pCfgs);
|
||||
goto _OVER;
|
||||
}
|
||||
pCfg->vgVersion = vgVersion->valueint;
|
||||
|
@ -135,14 +139,13 @@ _OVER:
|
|||
if (content != NULL) taosMemoryFree(content);
|
||||
if (root != NULL) cJSON_Delete(root);
|
||||
if (pFile != NULL) taosCloseFile(&pFile);
|
||||
if (*ppCfgs == NULL && pCfgs != NULL) taosMemoryFree(pCfgs);
|
||||
|
||||
terrno = code;
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) {
|
||||
int32_t ret = 0;
|
||||
int32_t code = 0;
|
||||
char file[PATH_MAX] = {0};
|
||||
char realfile[PATH_MAX] = {0};
|
||||
snprintf(file, sizeof(file), "%s%svnodes.json.bak", pMgmt->path, TD_DIRSEP);
|
||||
|
@ -156,21 +159,26 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) {
|
|||
}
|
||||
|
||||
int32_t numOfVnodes = 0;
|
||||
SVnodeObj **pVnodes = vmGetVnodeListFromHash(pMgmt, &numOfVnodes);
|
||||
SVnodeObj **ppVnodes = vmGetVnodeListFromHash(pMgmt, &numOfVnodes);
|
||||
if (ppVnodes == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
code = -1;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
int32_t len = 0;
|
||||
int32_t maxLen = MAX_CONTENT_LEN;
|
||||
char *content = taosMemoryCalloc(1, maxLen + 1);
|
||||
if (content == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
ret = -1;
|
||||
code = -1;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
len += snprintf(content + len, maxLen - len, "{\n");
|
||||
len += snprintf(content + len, maxLen - len, " \"vnodes\": [\n");
|
||||
for (int32_t i = 0; i < numOfVnodes; ++i) {
|
||||
SVnodeObj *pVnode = pVnodes[i];
|
||||
SVnodeObj *pVnode = ppVnodes[i];
|
||||
if (pVnode == NULL) continue;
|
||||
|
||||
len += snprintf(content + len, maxLen - len, " {\n");
|
||||
|
@ -193,16 +201,17 @@ _OVER:
|
|||
taosCloseFile(&pFile);
|
||||
taosMemoryFree(content);
|
||||
|
||||
if (ppVnodes != NULL) {
|
||||
for (int32_t i = 0; i < numOfVnodes; ++i) {
|
||||
SVnodeObj *pVnode = pVnodes[i];
|
||||
SVnodeObj *pVnode = ppVnodes[i];
|
||||
if (pVnode != NULL) {
|
||||
vmReleaseVnode(pMgmt, pVnode);
|
||||
}
|
||||
|
||||
if (pVnodes != NULL) {
|
||||
taosMemoryFree(pVnodes);
|
||||
}
|
||||
taosMemoryFree(ppVnodes);
|
||||
}
|
||||
|
||||
if (ret != 0) return -1;
|
||||
if (code != 0) return -1;
|
||||
|
||||
dDebug("successed to write %s, numOfVnodes:%d", realfile, numOfVnodes);
|
||||
return taosRenameFile(file, realfile);
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#define _DEFAULT_SOURCE
|
||||
#include "vmInt.h"
|
||||
|
||||
void vmGetVnodeLoads(SVnodeMgmt *pMgmt, SMonVloadInfo *pInfo) {
|
||||
void vmGetVnodeLoads(SVnodeMgmt *pMgmt, SMonVloadInfo *pInfo, bool isReset) {
|
||||
pInfo->pVloads = taosArrayInit(pMgmt->state.totalVnodes, sizeof(SVnodeLoad));
|
||||
if (pInfo->pVloads == NULL) return;
|
||||
|
||||
|
@ -30,6 +30,7 @@ void vmGetVnodeLoads(SVnodeMgmt *pMgmt, SMonVloadInfo *pInfo) {
|
|||
SVnodeObj *pVnode = *ppVnode;
|
||||
SVnodeLoad vload = {0};
|
||||
vnodeGetLoad(pVnode->pImpl, &vload);
|
||||
if (isReset) vnodeResetLoad(pVnode->pImpl, &vload);
|
||||
taosArrayPush(pInfo->pVloads, &vload);
|
||||
pIter = taosHashIterate(pMgmt->hash, pIter);
|
||||
}
|
||||
|
@ -39,7 +40,7 @@ void vmGetVnodeLoads(SVnodeMgmt *pMgmt, SMonVloadInfo *pInfo) {
|
|||
|
||||
void vmGetMonitorInfo(SVnodeMgmt *pMgmt, SMonVmInfo *pInfo) {
|
||||
SMonVloadInfo vloads = {0};
|
||||
vmGetVnodeLoads(pMgmt, &vloads);
|
||||
vmGetVnodeLoads(pMgmt, &vloads, true);
|
||||
|
||||
SArray *pVloads = vloads.pVloads;
|
||||
if (pVloads == NULL) return;
|
||||
|
@ -66,10 +67,10 @@ void vmGetMonitorInfo(SVnodeMgmt *pMgmt, SMonVmInfo *pInfo) {
|
|||
pInfo->vstat.totalVnodes = totalVnodes;
|
||||
pInfo->vstat.masterNum = masterNum;
|
||||
pInfo->vstat.numOfSelectReqs = numOfSelectReqs - pMgmt->state.numOfSelectReqs;
|
||||
pInfo->vstat.numOfInsertReqs = numOfInsertReqs - pMgmt->state.numOfInsertReqs;
|
||||
pInfo->vstat.numOfInsertSuccessReqs = numOfInsertSuccessReqs - pMgmt->state.numOfInsertSuccessReqs;
|
||||
pInfo->vstat.numOfBatchInsertReqs = numOfBatchInsertReqs - pMgmt->state.numOfBatchInsertReqs;
|
||||
pInfo->vstat.numOfBatchInsertSuccessReqs = numOfBatchInsertSuccessReqs - pMgmt->state.numOfBatchInsertSuccessReqs;
|
||||
pInfo->vstat.numOfInsertReqs = numOfInsertReqs; // delta
|
||||
pInfo->vstat.numOfInsertSuccessReqs = numOfInsertSuccessReqs; // delta
|
||||
pInfo->vstat.numOfBatchInsertReqs = numOfBatchInsertReqs; // delta
|
||||
pInfo->vstat.numOfBatchInsertSuccessReqs = numOfBatchInsertSuccessReqs; // delta
|
||||
pMgmt->state.totalVnodes = totalVnodes;
|
||||
pMgmt->state.masterNum = masterNum;
|
||||
pMgmt->state.numOfSelectReqs = numOfSelectReqs;
|
||||
|
@ -109,7 +110,7 @@ int32_t vmProcessGetMonitorInfoReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
|
||||
int32_t vmProcessGetLoadsReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||
SMonVloadInfo vloads = {0};
|
||||
vmGetVnodeLoads(pMgmt, &vloads);
|
||||
vmGetVnodeLoads(pMgmt, &vloads, false);
|
||||
|
||||
int32_t rspLen = tSerializeSMonVloadInfo(NULL, 0, &vloads);
|
||||
if (rspLen < 0) {
|
||||
|
@ -212,38 +213,47 @@ static int32_t vmTsmaProcessCreate(SVnode *pVnode, SCreateVnodeReq *pReq) {
|
|||
}
|
||||
|
||||
int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||
SCreateVnodeReq createReq = {0};
|
||||
SCreateVnodeReq req = {0};
|
||||
SVnodeCfg vnodeCfg = {0};
|
||||
SWrapperCfg wrapperCfg = {0};
|
||||
int32_t code = -1;
|
||||
char path[TSDB_FILENAME_LEN] = {0};
|
||||
|
||||
if (tDeserializeSCreateVnodeReq(pMsg->pCont, pMsg->contLen, &createReq) != 0) {
|
||||
if (tDeserializeSCreateVnodeReq(pMsg->pCont, pMsg->contLen, &req) != 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
return -1;
|
||||
}
|
||||
|
||||
dInfo(
|
||||
"vgId:%d, start to create vnode, tsma:%d standby:%d cacheLast:%d cacheLastSize:%d sstTrigger:%d "
|
||||
"tsdbPageSize:%d",
|
||||
createReq.vgId, createReq.isTsma, createReq.standby, createReq.cacheLast, createReq.cacheLastSize,
|
||||
createReq.sstTrigger, createReq.tsdbPageSize);
|
||||
dInfo("vgId:%d, hashMethod:%d begin:%u end:%u prefix:%d surfix:%d", createReq.vgId, createReq.hashMethod,
|
||||
createReq.hashBegin, createReq.hashEnd, createReq.hashPrefix, createReq.hashSuffix);
|
||||
vmGenerateVnodeCfg(&createReq, &vnodeCfg);
|
||||
"vgId:%d, start to create vnode, page:%d pageSize:%d buffer:%d szPage:%d szBuf:%" PRIu64
|
||||
" cacheLast:%d cacheLastSize:%d sstTrigger:%d tsdbPageSize:%d %d dbname:%s dbId:%" PRId64
|
||||
"days:%d keep0:%d keep1:%d keep2:%d tsma:%d precision:%d compression:%d minRows:%d maxRows:%d, wal "
|
||||
"fsync:%d level:%d retentionPeriod:%d retentionSize:%d rollPeriod:%d segSize:%d, hash method:%d begin:%u end:%u "
|
||||
"prefix:%d surfix:%d replica:%d selfIndex:%d strict:%d",
|
||||
req.vgId, req.pages, req.pageSize, req.buffer, req.pageSize * 1024, (uint64_t)req.buffer * 1024 * 1024,
|
||||
req.cacheLast, req.cacheLastSize, req.sstTrigger, req.tsdbPageSize, req.tsdbPageSize * 1024, req.db, req.dbUid,
|
||||
req.daysPerFile, req.daysToKeep0, req.daysToKeep1, req.daysToKeep2, req.isTsma, req.precision, req.compression,
|
||||
req.minRows, req.maxRows, req.walFsyncPeriod, req.walLevel, req.walRetentionPeriod, req.walRetentionSize,
|
||||
req.walRollPeriod, req.walSegmentSize, req.hashMethod, req.hashBegin, req.hashEnd, req.hashPrefix, req.hashSuffix,
|
||||
req.replica, req.selfIndex, req.strict);
|
||||
for (int32_t i = 0; i < req.replica; ++i) {
|
||||
dInfo("vgId:%d, replica:%d fqdn:%s port:%u", req.vgId, req.replicas[i].id, req.replicas[i].fqdn,
|
||||
req.replicas[i].port);
|
||||
}
|
||||
vmGenerateVnodeCfg(&req, &vnodeCfg);
|
||||
|
||||
if (vmTsmaAdjustDays(&vnodeCfg, &createReq) < 0) {
|
||||
dError("vgId:%d, failed to adjust tsma days since %s", createReq.vgId, terrstr());
|
||||
if (vmTsmaAdjustDays(&vnodeCfg, &req) < 0) {
|
||||
dError("vgId:%d, failed to adjust tsma days since %s", req.vgId, terrstr());
|
||||
code = terrno;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
vmGenerateWrapperCfg(pMgmt, &createReq, &wrapperCfg);
|
||||
vmGenerateWrapperCfg(pMgmt, &req, &wrapperCfg);
|
||||
|
||||
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, createReq.vgId);
|
||||
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, req.vgId);
|
||||
if (pVnode != NULL) {
|
||||
dDebug("vgId:%d, already exist", createReq.vgId);
|
||||
tFreeSCreateVnodeReq(&createReq);
|
||||
dDebug("vgId:%d, already exist", req.vgId);
|
||||
tFreeSCreateVnodeReq(&req);
|
||||
vmReleaseVnode(pMgmt, pVnode);
|
||||
terrno = TSDB_CODE_NODE_ALREADY_DEPLOYED;
|
||||
code = terrno;
|
||||
|
@ -252,36 +262,36 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
|
||||
snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, vnodeCfg.vgId);
|
||||
if (vnodeCreate(path, &vnodeCfg, pMgmt->pTfs) < 0) {
|
||||
tFreeSCreateVnodeReq(&createReq);
|
||||
dError("vgId:%d, failed to create vnode since %s", createReq.vgId, terrstr());
|
||||
tFreeSCreateVnodeReq(&req);
|
||||
dError("vgId:%d, failed to create vnode since %s", req.vgId, terrstr());
|
||||
code = terrno;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
SVnode *pImpl = vnodeOpen(path, pMgmt->pTfs, pMgmt->msgCb);
|
||||
if (pImpl == NULL) {
|
||||
dError("vgId:%d, failed to open vnode since %s", createReq.vgId, terrstr());
|
||||
dError("vgId:%d, failed to open vnode since %s", req.vgId, terrstr());
|
||||
code = terrno;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
code = vmOpenVnode(pMgmt, &wrapperCfg, pImpl);
|
||||
if (code != 0) {
|
||||
dError("vgId:%d, failed to open vnode since %s", createReq.vgId, terrstr());
|
||||
dError("vgId:%d, failed to open vnode since %s", req.vgId, terrstr());
|
||||
code = terrno;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
code = vmTsmaProcessCreate(pImpl, &createReq);
|
||||
code = vmTsmaProcessCreate(pImpl, &req);
|
||||
if (code != 0) {
|
||||
dError("vgId:%d, failed to create tsma since %s", createReq.vgId, terrstr());
|
||||
dError("vgId:%d, failed to create tsma since %s", req.vgId, terrstr());
|
||||
code = terrno;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
code = vnodeStart(pImpl);
|
||||
if (code != 0) {
|
||||
dError("vgId:%d, failed to start sync since %s", createReq.vgId, terrstr());
|
||||
dError("vgId:%d, failed to start sync since %s", req.vgId, terrstr());
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
|
@ -296,10 +306,10 @@ _OVER:
|
|||
vnodeClose(pImpl);
|
||||
vnodeDestroy(path, pMgmt->pTfs);
|
||||
} else {
|
||||
dInfo("vgId:%d, vnode is created", createReq.vgId);
|
||||
dInfo("vgId:%d, vnode is created", req.vgId);
|
||||
}
|
||||
|
||||
tFreeSCreateVnodeReq(&createReq);
|
||||
tFreeSCreateVnodeReq(&req);
|
||||
terrno = code;
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -384,7 +384,7 @@ static int32_t vmStartVnodes(SVnodeMgmt *pMgmt) {
|
|||
for (int32_t v = 0; v < numOfVnodes; ++v) {
|
||||
int32_t t = v % threadNum;
|
||||
SVnodeThread *pThread = &threads[t];
|
||||
if (pThread->ppVnodes != NULL) {
|
||||
if (pThread->ppVnodes != NULL && ppVnodes != NULL) {
|
||||
pThread->ppVnodes[pThread->vnodeNum++] = ppVnodes[v];
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ void qmGetMonitorInfo(void *pMgmt, SMonQmInfo *pInfo);
|
|||
void smGetMonitorInfo(void *pMgmt, SMonSmInfo *pInfo);
|
||||
void bmGetMonitorInfo(void *pMgmt, SMonBmInfo *pInfo);
|
||||
|
||||
void vmGetVnodeLoads(void *pMgmt, SMonVloadInfo *pInfo);
|
||||
void vmGetVnodeLoads(void *pMgmt, SMonVloadInfo *pInfo, bool isReset);
|
||||
void mmGetMnodeLoads(void *pMgmt, SMonMloadInfo *pInfo);
|
||||
void qmGetQnodeLoads(void *pMgmt, SQnodeLoad *pInfo);
|
||||
|
||||
|
|
|
@ -152,7 +152,7 @@ void dmGetVnodeLoads(SMonVloadInfo *pInfo) {
|
|||
if (tsMultiProcess) {
|
||||
dmSendLocalRecv(pDnode, TDMT_MON_VM_LOAD, tDeserializeSMonVloadInfo, pInfo);
|
||||
} else if (pWrapper->pMgmt != NULL) {
|
||||
vmGetVnodeLoads(pWrapper->pMgmt, pInfo);
|
||||
vmGetVnodeLoads(pWrapper->pMgmt, pInfo, false);
|
||||
}
|
||||
dmReleaseWrapper(pWrapper);
|
||||
}
|
||||
|
|
|
@ -65,6 +65,7 @@ static SProcQueue *dmInitProcQueue(SProc *proc, char *ptr, int32_t size) {
|
|||
}
|
||||
|
||||
if (proc->ptype & DND_PROC_PARENT) {
|
||||
memset(ptr, 0, sizeof(SProcQueue));
|
||||
if (dmInitProcMutex(queue) != 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
@ -76,11 +77,11 @@ static SProcQueue *dmInitProcQueue(SProc *proc, char *ptr, int32_t size) {
|
|||
tstrncpy(queue->name, proc->name, sizeof(queue->name));
|
||||
|
||||
taosThreadMutexLock(&queue->mutex);
|
||||
queue->head = 0;
|
||||
queue->tail = 0;
|
||||
// queue->head = 0;
|
||||
// queue->tail = 0;
|
||||
queue->total = bufSize;
|
||||
queue->avail = bufSize;
|
||||
queue->items = 0;
|
||||
// queue->items = 0;
|
||||
taosThreadMutexUnlock(&queue->mutex);
|
||||
}
|
||||
|
||||
|
|
|
@ -639,6 +639,7 @@ typedef struct {
|
|||
char* physicalPlan;
|
||||
SArray* tasks; // SArray<SArray<SStreamTask>>
|
||||
SSchemaWrapper outputSchema;
|
||||
SSchemaWrapper tagSchema;
|
||||
} SStreamObj;
|
||||
|
||||
int32_t tEncodeSStreamObj(SEncoder* pEncoder, const SStreamObj* pObj);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue