Merge branch 'main' into feat/add_setup_script
This commit is contained in:
commit
513a07dc69
|
@ -1,49 +0,0 @@
|
|||
version: 1.0.{build}
|
||||
image:
|
||||
- Visual Studio 2015
|
||||
- macos
|
||||
environment:
|
||||
matrix:
|
||||
- ARCH: amd64
|
||||
- ARCH: x86
|
||||
matrix:
|
||||
exclude:
|
||||
- image: macos
|
||||
ARCH: x86
|
||||
for:
|
||||
-
|
||||
matrix:
|
||||
only:
|
||||
- image: Visual Studio 2015
|
||||
clone_folder: c:\dev\TDengine
|
||||
clone_depth: 1
|
||||
|
||||
init:
|
||||
- call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %ARCH%
|
||||
|
||||
before_build:
|
||||
- cd c:\dev\TDengine
|
||||
- md build
|
||||
|
||||
build_script:
|
||||
- cd build
|
||||
- cmake -G "NMake Makefiles" .. -DBUILD_JDBC=false
|
||||
- nmake install
|
||||
-
|
||||
matrix:
|
||||
only:
|
||||
- image: macos
|
||||
clone_depth: 1
|
||||
|
||||
build_script:
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake .. > /dev/null
|
||||
- make > /dev/null
|
||||
notifications:
|
||||
- provider: Email
|
||||
to:
|
||||
- sangshuduo@gmail.com
|
||||
on_build_success: true
|
||||
on_build_failure: true
|
||||
on_build_status_changed: true
|
|
@ -1,13 +0,0 @@
|
|||
# Use the latest 2.1 version of CircleCI pipeline process engine. See: https://circleci.com/docs/2.0/configuration-reference
|
||||
version: 2.1
|
||||
# Use a package of configuration called an orb.
|
||||
orbs:
|
||||
# Declare a dependency on the welcome-orb
|
||||
welcome: circleci/welcome-orb@0.4.1
|
||||
# Orchestrate or schedule a set of jobs
|
||||
workflows:
|
||||
# Name the workflow "welcome"
|
||||
welcome:
|
||||
# Run the welcome/run job in its own container
|
||||
jobs:
|
||||
- welcome/run
|
266
.drone.yml
266
.drone.yml
|
@ -1,266 +0,0 @@
|
|||
---
|
||||
kind: pipeline
|
||||
name: test_amd64
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: amd64
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: gcc
|
||||
commands:
|
||||
- apt-get update
|
||||
- apt-get install -y cmake build-essential
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake ..
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
- 3.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: test_arm64_bionic
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: arm64
|
||||
steps:
|
||||
- name: build
|
||||
image: arm64v8/ubuntu:bionic
|
||||
commands:
|
||||
- apt-get update
|
||||
- apt-get install -y cmake build-essential
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake .. -DCPUTYPE=aarch64 > /dev/null
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
- 3.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: test_arm64_focal
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: arm64
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: arm64v8/ubuntu:focal
|
||||
commands:
|
||||
- echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
|
||||
- apt-get update
|
||||
- apt-get install -y -qq cmake build-essential
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake .. -DCPUTYPE=aarch64 > /dev/null
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
- 3.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: test_arm64_centos7
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: arm64
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: arm64v8/centos:7
|
||||
commands:
|
||||
- yum install -y gcc gcc-c++ make cmake git
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake .. -DCPUTYPE=aarch64 > /dev/null
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
- 3.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: test_arm64_centos8
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: arm64
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: arm64v8/centos:8
|
||||
commands:
|
||||
- dnf install -y gcc gcc-c++ make cmake epel-release git libarchive
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake .. -DCPUTYPE=aarch64 > /dev/null
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
- 3.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: test_arm_bionic
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: arm
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: arm32v7/ubuntu:bionic
|
||||
commands:
|
||||
- apt-get update
|
||||
- apt-get install -y cmake build-essential
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake .. -DCPUTYPE=aarch32 > /dev/null
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
- 3.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: build_trusty
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: amd64
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: ubuntu:trusty
|
||||
commands:
|
||||
- apt-get update
|
||||
- apt-get install -y gcc cmake3 build-essential git binutils-2.26
|
||||
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake ..
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
- 3.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: build_xenial
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: amd64
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: ubuntu:xenial
|
||||
commands:
|
||||
- apt-get update
|
||||
- apt-get install -y gcc cmake build-essential
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake ..
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
- 3.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: build_bionic
|
||||
platform:
|
||||
os: linux
|
||||
arch: amd64
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: ubuntu:bionic
|
||||
commands:
|
||||
- apt-get update
|
||||
- apt-get install -y gcc cmake build-essential
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake ..
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
- 3.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: build_centos7
|
||||
platform:
|
||||
os: linux
|
||||
arch: amd64
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: ansible/centos7-ansible
|
||||
commands:
|
||||
- yum install -y gcc gcc-c++ make cmake
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake ..
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
- 3.0
|
|
@ -6,6 +6,10 @@ on:
|
|||
- 'main'
|
||||
- '3.0'
|
||||
- '3.1'
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- 'packaging/**'
|
||||
- 'tests/**'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
|
@ -14,7 +18,7 @@ concurrency:
|
|||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
name: Run unit tests
|
||||
name: Build and test
|
||||
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
|
@ -32,13 +36,14 @@ jobs:
|
|||
libgeos-dev libjansson-dev libsnappy-dev liblzma-dev libz-dev \
|
||||
zlib1g pkg-config libssl-dev gawk
|
||||
|
||||
|
||||
- name: Build and install TDengine
|
||||
run: |
|
||||
mkdir debug && cd debug
|
||||
cmake .. -DBUILD_HTTP=false -DBUILD_JDBC=false \
|
||||
-DBUILD_TOOLS=true -DBUILD_TEST=off \
|
||||
-DBUILD_KEEPER=true -DBUILD_DEPENDENCY_TESTS=false
|
||||
cmake .. -DBUILD_TOOLS=true \
|
||||
-DBUILD_KEEPER=true \
|
||||
-DBUILD_HTTP=false \
|
||||
-DBUILD_TEST=false \
|
||||
-DBUILD_DEPENDENCY_TESTS=false
|
||||
make -j 4
|
||||
sudo make install
|
||||
which taosd
|
||||
|
|
|
@ -508,7 +508,7 @@ pipeline {
|
|||
}
|
||||
}
|
||||
stage('linux test') {
|
||||
agent{label "slave1_47 || slave1_48 || slave1_49 || slave1_50 || slave1_52 || slave1_59 || slave1_63 "}
|
||||
agent{label "slave1_47 || slave1_48 || slave1_49 || slave1_50 || slave1_52 || slave1_59 || slave1_63 || worker03 || slave215 || slave217 || slave219 "}
|
||||
options { skipDefaultCheckout() }
|
||||
when {
|
||||
changeRequest()
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
|
||||
[](https://github.com/taosdata/TDengine/actions/workflows/taosd-ci-build.yml)
|
||||
[](https://coveralls.io/github/taosdata/TDengine?branch=3.0)
|
||||

|
||||
[](https://bestpractices.coreinfrastructure.org/projects/4201)
|
||||
<br />
|
||||
[](https://twitter.com/tdenginedb)
|
||||
|
@ -19,6 +20,9 @@
|
|||
[](https://discord.com/invite/VZdSuUg4pS)
|
||||
[](https://www.linkedin.com/company/tdengine)
|
||||
[](https://stackoverflow.com/questions/tagged/tdengine)
|
||||
<br />
|
||||

|
||||

|
||||
|
||||
English | [简体中文](README-CN.md) | [TDengine Cloud](https://cloud.tdengine.com) | [Learn more about TSDB](https://tdengine.com/tsdb/)
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ If you are using Maven to manage your project, simply add the following dependen
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.4.0</version>
|
||||
<version>3.5.0</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
|
|
@ -28,8 +28,15 @@ Next, we continue to use smart meters as an example to demonstrate the efficient
|
|||
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
<TabItem value="java" label="Java">
|
||||
|
||||
There are two kinds of interfaces for parameter binding: one is the standard JDBC interface, and the other is an extended interface. The extended interface offers better performance.
|
||||
|
||||
```java
|
||||
{{#include docs/examples/java/src/main/java/com/taos/example/WSParameterBindingBasicDemo.java:para_bind}}
|
||||
{{#include docs/examples/java/src/main/java/com/taos/example/WSParameterBindingStdInterfaceDemo.java:para_bind}}
|
||||
```
|
||||
|
||||
```java
|
||||
{{#include docs/examples/java/src/main/java/com/taos/example/WSParameterBindingExtendInterfaceDemo.java:para_bind}}
|
||||
```
|
||||
|
||||
This is a [more detailed parameter binding example](https://github.com/taosdata/TDengine/blob/main/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingFullDemo.java)
|
||||
|
@ -91,14 +98,24 @@ This is a [more detailed parameter binding example](https://github.com/taosdata/
|
|||
<TabItem label="Python" value="python">
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/stmt_native.py}}
|
||||
{{#include docs/examples/python/stmt2_native.py}}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="Go" value="go">
|
||||
|
||||
The example code for binding parameters with stmt2 (Go connector v3.6.0 and above, TDengine v3.3.5.0 and above) is as follows:
|
||||
|
||||
```go
|
||||
{{#include docs/examples/go/stmt2/native/main.go}}
|
||||
```
|
||||
|
||||
The example code for binding parameters with stmt is as follows:
|
||||
|
||||
```go
|
||||
{{#include docs/examples/go/stmt/native/main.go}}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="Rust" value="rust">
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
---
|
||||
toc_max_heading_level: 4
|
||||
sidebar_label: C/C++
|
||||
title: C/C++ Client Library
|
||||
slug: /tdengine-reference/client-libraries/cpp
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
---
|
||||
toc_max_heading_level: 4
|
||||
sidebar_label: Java
|
||||
title: Java Client Library
|
||||
slug: /tdengine-reference/client-libraries/java
|
||||
|
@ -30,33 +31,34 @@ The JDBC driver implementation for TDengine strives to be consistent with relati
|
|||
|
||||
## Version History
|
||||
|
||||
| taos-jdbcdriver Version | Major Changes | TDengine Version |
|
||||
| ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- |
|
||||
| 3.4.0 | 1. Replaced fastjson library with jackson. <br/> 2. WebSocket uses a separate protocol identifier. <br/> 3. Optimized background thread usage to avoid user misuse leading to timeouts. | - |
|
||||
| 3.3.4 | Fixed getInt error when data type is float. | - |
|
||||
| 3.3.3 | Fixed memory leak caused by closing WebSocket statement. | - |
|
||||
| 3.3.2 | 1. Optimized parameter binding performance under WebSocket connection. <br/> 2. Improved support for mybatis. | - |
|
||||
| 3.3.0 | 1. Optimized data transmission performance under WebSocket connection. <br/> 2. Supports skipping SSL verification, off by default. | 3.3.2.0 and higher |
|
||||
| 3.2.11 | Fixed a bug in closing result set in Native connection. | - |
|
||||
| 3.2.10 | 1. REST/WebSocket connections support data compression during transmission. <br/> 2. WebSocket automatic reconnection mechanism, off by default. <br/> 3. Connection class provides methods for schemaless writing. <br/> 4. Optimized data fetching performance for native connections. <br/> 5. Fixed some known issues. <br/> 6. Metadata retrieval functions can return a list of supported functions. | - |
|
||||
| 3.2.9 | Fixed bug in closing WebSocket prepareStatement. | - |
|
||||
| 3.2.8 | 1. Optimized auto-commit. <br/> 2. Fixed manual commit bug in WebSocket. <br/> 3. Optimized WebSocket prepareStatement using a single connection. <br/> 4. Metadata supports views. | - |
|
||||
| 3.2.7 | 1. Supports VARBINARY and GEOMETRY types. <br/> 2. Added timezone setting support for native connections. <br/> 3. Added WebSocket automatic reconnection feature. | 3.2.0.0 and higher |
|
||||
| 3.2.5 | Data subscription adds committed() and assignment() methods. | 3.1.0.3 and higher |
|
||||
| 3.2.4 | Data subscription adds enable.auto.commit parameter under WebSocket connection, as well as unsubscribe() method. | - |
|
||||
| 3.2.3 | Fixed ResultSet data parsing failure in some cases. | - |
|
||||
| 3.2.2 | New feature: Data subscription supports seek function. | 3.0.5.0 and higher |
|
||||
| 3.2.1 | 1. WebSocket connection supports schemaless and prepareStatement writing. <br/> 2. Consumer poll returns result set as ConsumerRecord, which can be accessed through value() method. | 3.0.3.0 and higher |
|
||||
| 3.2.0 | Connection issues, not recommended for use. | - |
|
||||
| 3.1.0 | WebSocket connection supports subscription function. | - |
|
||||
| 3.0.1 - 3.0.4 | Fixed data parsing errors in result sets under some conditions. 3.0.1 compiled in JDK 11 environment, other versions recommended for JDK 8. | - |
|
||||
| 3.0.0 | Supports TDengine 3.0 | 3.0.0.0 and higher |
|
||||
| 2.0.42 | Fixed wasNull interface return value in WebSocket connection. | - |
|
||||
| 2.0.41 | Fixed username and password encoding method in REST connection. | - |
|
||||
| 2.0.39 - 2.0.40 | Added REST connection/request timeout settings. | - |
|
||||
| 2.0.38 | JDBC REST connection adds batch fetching function. | - |
|
||||
| 2.0.37 | Added support for json tag. | - |
|
||||
| 2.0.36 | Added support for schemaless writing. | - |
|
||||
| taos-jdbcdriver Version | Major Changes | TDengine Version |
|
||||
| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------ |
|
||||
| 3.5.0 | 1. Optimized the performance of WebSocket connection parameter binding, supporting parameter binding queries using binary data. <br/> 2. Optimized the performance of small queries in WebSocket connection. <br/> 3. Added support for setting time zone and app info on WebSocket connection. | 3.3.5.0 and higher |
|
||||
| 3.4.0 | 1. Replaced fastjson library with jackson. <br/> 2. WebSocket uses a separate protocol identifier. <br/> 3. Optimized background thread usage to avoid user misuse leading to timeouts. | - |
|
||||
| 3.3.4 | Fixed getInt error when data type is float. | - |
|
||||
| 3.3.3 | Fixed memory leak caused by closing WebSocket statement. | - |
|
||||
| 3.3.2 | 1. Optimized parameter binding performance under WebSocket connection. <br/> 2. Improved support for mybatis. | - |
|
||||
| 3.3.0 | 1. Optimized data transmission performance under WebSocket connection. <br/> 2. Supports skipping SSL verification, off by default. | 3.3.2.0 and higher |
|
||||
| 3.2.11 | Fixed a bug in closing result set in Native connection. | - |
|
||||
| 3.2.10 | 1. REST/WebSocket connections support data compression during transmission. <br/> 2. WebSocket automatic reconnection mechanism, off by default. <br/> 3. Connection class provides methods for schemaless writing. <br/> 4. Optimized data fetching performance for native connections. <br/> 5. Fixed some known issues. <br/> 6. Metadata retrieval functions can return a list of supported functions. | - |
|
||||
| 3.2.9 | Fixed bug in closing WebSocket prepareStatement. | - |
|
||||
| 3.2.8 | 1. Optimized auto-commit. <br/> 2. Fixed manual commit bug in WebSocket. <br/> 3. Optimized WebSocket prepareStatement using a single connection. <br/> 4. Metadata supports views. | - |
|
||||
| 3.2.7 | 1. Supports VARBINARY and GEOMETRY types. <br/> 2. Added timezone setting support for native connections. <br/> 3. Added WebSocket automatic reconnection feature. | 3.2.0.0 and higher |
|
||||
| 3.2.5 | Data subscription adds committed() and assignment() methods. | 3.1.0.3 and higher |
|
||||
| 3.2.4 | Data subscription adds enable.auto.commit parameter under WebSocket connection, as well as unsubscribe() method. | - |
|
||||
| 3.2.3 | Fixed ResultSet data parsing failure in some cases. | - |
|
||||
| 3.2.2 | New feature: Data subscription supports seek function. | 3.0.5.0 and higher |
|
||||
| 3.2.1 | 1. WebSocket connection supports schemaless and prepareStatement writing. <br/> 2. Consumer poll returns result set as ConsumerRecord, which can be accessed through value() method. | 3.0.3.0 and higher |
|
||||
| 3.2.0 | Connection issues, not recommended for use. | - |
|
||||
| 3.1.0 | WebSocket connection supports subscription function. | - |
|
||||
| 3.0.1 - 3.0.4 | Fixed data parsing errors in result sets under some conditions. 3.0.1 compiled in JDK 11 environment, other versions recommended for JDK 8. | - |
|
||||
| 3.0.0 | Supports TDengine 3.0 | 3.0.0.0 and higher |
|
||||
| 2.0.42 | Fixed wasNull interface return value in WebSocket connection. | - |
|
||||
| 2.0.41 | Fixed username and password encoding method in REST connection. | - |
|
||||
| 2.0.39 - 2.0.40 | Added REST connection/request timeout settings. | - |
|
||||
| 2.0.38 | JDBC REST connection adds batch fetching function. | - |
|
||||
| 2.0.37 | Added support for json tag. | - |
|
||||
| 2.0.36 | Added support for schemaless writing. | - |
|
||||
|
||||
## Exceptions and Error Codes
|
||||
|
||||
|
@ -75,47 +77,47 @@ The error codes that the JDBC connector may report include 4 types:
|
|||
|
||||
Please refer to the specific error codes:
|
||||
|
||||
| Error Code | Description | Suggested Actions |
|
||||
| ---------- | --------------------------------------------------------------- | ----------------------------------------------------------------------------------------- |
|
||||
| 0x2301 | connection already closed | The connection is already closed, check the connection status, or recreate the connection to execute related commands. |
|
||||
| 0x2302 | this operation is NOT supported currently! | The current interface is not supported, consider switching to another connection method. |
|
||||
| 0x2303 | invalid variables | Invalid parameters, please check the interface specifications and adjust the parameter types and sizes. |
|
||||
| 0x2304 | statement is closed | The statement is already closed, check if the statement was used after being closed, or if the connection is normal. |
|
||||
| 0x2305 | resultSet is closed | The resultSet has been released, check if the resultSet was used after being released. |
|
||||
| 0x2306 | Batch is empty! | Add parameters to prepareStatement before executing executeBatch. |
|
||||
| 0x2307 | Can not issue data manipulation statements with executeQuery() | Use executeUpdate() for update operations, not executeQuery(). |
|
||||
| 0x2308 | Can not issue SELECT via executeUpdate() | Use executeQuery() for query operations, not executeUpdate(). |
|
||||
| 0x230d | parameter index out of range | Parameter out of bounds, check the reasonable range of parameters. |
|
||||
| 0x230e | connection already closed | The connection is already closed, check if the Connection was used after being closed, or if the connection is normal. |
|
||||
| 0x230f | unknown sql type in tdengine | Check the Data Type types supported by TDengine. |
|
||||
| 0x2310 | can't register JDBC-JNI driver | Cannot register JNI driver, check if the url is correctly filled. |
|
||||
| 0x2312 | url is not set | Check if the REST connection url is correctly filled. |
|
||||
| 0x2314 | numeric value out of range | Check if the correct interface was used for numeric types in the result set. |
|
||||
| 0x2315 | unknown taos type in tdengine | When converting TDengine data types to JDBC data types, check if the correct TDengine data type was specified. |
|
||||
| 0x2317 | | Incorrect request type used in REST connection. |
|
||||
| 0x2318 | | Data transmission error occurred in REST connection, check the network situation and retry. |
|
||||
| 0x2319 | user is required | Username information is missing when creating a connection. |
|
||||
| 0x231a | password is required | Password information is missing when creating a connection. |
|
||||
| 0x231c | httpEntity is null, sql: | An exception occurred in REST connection execution. |
|
||||
| 0x231d | can't create connection with server within | Increase the httpConnectTimeout parameter to extend the connection time, or check the connection with taosAdapter. |
|
||||
| 0x231e | failed to complete the task within the specified time | Increase the messageWaitTimeout parameter to extend the execution time, or check the connection with taosAdapter. |
|
||||
| 0x2350 | unknown error | Unknown exception, please provide feedback to the developers on github. |
|
||||
| 0x2352 | Unsupported encoding | An unsupported character encoding set was specified in the local connection. |
|
||||
| 0x2353 | internal error of database, please see taoslog for more details | An error occurred while executing prepareStatement in local connection, check taos log for troubleshooting. |
|
||||
| 0x2354 | JNI connection is NULL | The Connection was already closed when executing commands in local connection. Check the connection with TDengine. |
|
||||
| 0x2355 | JNI result set is NULL | The result set is abnormal in local connection, check the connection and retry. |
|
||||
| 0x2356 | invalid num of fields | The meta information of the result set obtained in local connection does not match. |
|
||||
| 0x2357 | empty sql string | Fill in the correct SQL for execution. |
|
||||
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | Memory allocation error in local connection, check taos log for troubleshooting. |
|
||||
| 0x2371 | consumer properties must not be null! | Parameters are null when creating a subscription, fill in the correct parameters. |
|
||||
| 0x2372 | configs contain empty key, failed to set consumer property | The parameter key contains empty values, fill in the correct parameters. |
|
||||
| 0x2373 | failed to set consumer property, | The parameter value contains empty values, fill in the correct parameters. |
|
||||
| 0x2375 | topic reference has been destroyed | During the data subscription process, the topic reference was released. Check the connection with TDengine. |
|
||||
| Error Code | Description | Suggested Actions |
|
||||
| ---------- | --------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 0x2301 | connection already closed | The connection is already closed, check the connection status, or recreate the connection to execute related commands. |
|
||||
| 0x2302 | this operation is NOT supported currently! | The current interface is not supported, consider switching to another connection method. |
|
||||
| 0x2303 | invalid variables | Invalid parameters, please check the interface specifications and adjust the parameter types and sizes. |
|
||||
| 0x2304 | statement is closed | The statement is already closed, check if the statement was used after being closed, or if the connection is normal. |
|
||||
| 0x2305 | resultSet is closed | The resultSet has been released, check if the resultSet was used after being released. |
|
||||
| 0x2306 | Batch is empty! | Add parameters to prepareStatement before executing executeBatch. |
|
||||
| 0x2307 | Can not issue data manipulation statements with executeQuery() | Use executeUpdate() for update operations, not executeQuery(). |
|
||||
| 0x2308 | Can not issue SELECT via executeUpdate() | Use executeQuery() for query operations, not executeUpdate(). |
|
||||
| 0x230d | parameter index out of range | Parameter out of bounds, check the reasonable range of parameters. |
|
||||
| 0x230e | connection already closed | The connection is already closed, check if the Connection was used after being closed, or if the connection is normal. |
|
||||
| 0x230f | unknown sql type in tdengine | Check the Data Type types supported by TDengine. |
|
||||
| 0x2310 | can't register JDBC-JNI driver | Cannot register JNI driver, check if the url is correctly filled. |
|
||||
| 0x2312 | url is not set | Check if the REST connection url is correctly filled. |
|
||||
| 0x2314 | numeric value out of range | Check if the correct interface was used for numeric types in the result set. |
|
||||
| 0x2315 | unknown taos type in tdengine | When converting TDengine data types to JDBC data types, check if the correct TDengine data type was specified. |
|
||||
| 0x2317 | | Incorrect request type used in REST connection. |
|
||||
| 0x2318 | | Data transmission error occurred in REST connection, check the network situation and retry. |
|
||||
| 0x2319 | user is required | Username information is missing when creating a connection. |
|
||||
| 0x231a | password is required | Password information is missing when creating a connection. |
|
||||
| 0x231c | httpEntity is null, sql: | An exception occurred in REST connection execution. |
|
||||
| 0x231d | can't create connection with server within | Increase the httpConnectTimeout parameter to extend the connection time, or check the connection with taosAdapter. |
|
||||
| 0x231e | failed to complete the task within the specified time | Increase the messageWaitTimeout parameter to extend the execution time, or check the connection with taosAdapter. |
|
||||
| 0x2350 | unknown error | Unknown exception, please provide feedback to the developers on github. |
|
||||
| 0x2352 | Unsupported encoding | An unsupported character encoding set was specified in the local connection. |
|
||||
| 0x2353 | internal error of database, please see taoslog for more details | An error occurred while executing prepareStatement in local connection, check taos log for troubleshooting. |
|
||||
| 0x2354 | JNI connection is NULL | The Connection was already closed when executing commands in local connection. Check the connection with TDengine. |
|
||||
| 0x2355 | JNI result set is NULL | The result set is abnormal in local connection, check the connection and retry. |
|
||||
| 0x2356 | invalid num of fields | The meta information of the result set obtained in local connection does not match. |
|
||||
| 0x2357 | empty sql string | Fill in the correct SQL for execution. |
|
||||
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | Memory allocation error in local connection, check taos log for troubleshooting. |
|
||||
| 0x2371 | consumer properties must not be null! | Parameters are null when creating a subscription, fill in the correct parameters. |
|
||||
| 0x2372 | configs contain empty key, failed to set consumer property | The parameter key contains empty values, fill in the correct parameters. |
|
||||
| 0x2373 | failed to set consumer property, | The parameter value contains empty values, fill in the correct parameters. |
|
||||
| 0x2375 | topic reference has been destroyed | During the data subscription process, the topic reference was released. Check the connection with TDengine. |
|
||||
| 0x2376 | failed to set consumer topic, topic name is empty | During the data subscription process, the subscription topic name is empty. Check if the specified topic name is correctly filled. |
|
||||
| 0x2377 | consumer reference has been destroyed | The data transmission channel for the subscription has been closed, check the connection with TDengine. |
|
||||
| 0x2378 | consumer create error | Data subscription creation failed, check the error information and taos log for troubleshooting. |
|
||||
| 0x2379 | seek offset must not be a negative number | The seek interface parameter must not be negative, use the correct parameters. |
|
||||
| 0x237a | vGroup not found in result set | VGroup not assigned to the current consumer, due to the Rebalance mechanism causing the Consumer and VGroup to be unbound. |
|
||||
| 0x2377 | consumer reference has been destroyed | The data transmission channel for the subscription has been closed, check the connection with TDengine. |
|
||||
| 0x2378 | consumer create error | Data subscription creation failed, check the error information and taos log for troubleshooting. |
|
||||
| 0x2379 | seek offset must not be a negative number | The seek interface parameter must not be negative, use the correct parameters. |
|
||||
| 0x237a | vGroup not found in result set | VGroup not assigned to the current consumer, due to the Rebalance mechanism causing the Consumer and VGroup to be unbound. |
|
||||
|
||||
- [TDengine Java Connector Error Code](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
||||
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
||||
|
@ -244,13 +246,13 @@ For WebSocket connections, the configuration parameters in the URL are as follow
|
|||
|
||||
- user: Login username for TDengine, default value 'root'.
|
||||
- password: User login password, default value 'taosdata'.
|
||||
- charset: Specifies the character set for parsing string data when batch fetching is enabled.
|
||||
- batchErrorIgnore: true: Continues executing the following SQL if one SQL fails during the execution of Statement's executeBatch. false: Does not execute any statements after a failed SQL. Default value: false.
|
||||
- httpConnectTimeout: Connection timeout in ms, default value 60000.
|
||||
- messageWaitTimeout: Message timeout in ms, default value 60000.
|
||||
- useSSL: Whether SSL is used in the connection.
|
||||
- timezone: Client timezone, default is the system current timezone. Recommended not to set, using the system time zone provides better performance.
|
||||
|
||||
**Note**: Some configuration items (such as: locale, timezone) do not take effect in WebSocket connections.
|
||||
**Note**: Some configuration items (such as: locale, charset) do not take effect in WebSocket connections.
|
||||
|
||||
**REST Connection**
|
||||
Using JDBC REST connection does not depend on the client driver. Compared to native JDBC connections, you only need to:
|
||||
|
@ -263,14 +265,13 @@ For REST connections, the configuration parameters in the URL are as follows:
|
|||
|
||||
- user: Login username for TDengine, default value 'root'.
|
||||
- password: User login password, default value 'taosdata'.
|
||||
- charset: Specifies the character set for parsing string data when batch fetching is enabled.
|
||||
- batchErrorIgnore: true: Continues executing the following SQL if one SQL fails during the execution of Statement's executeBatch. false: Does not execute any statements after a failed SQL. Default value: false.
|
||||
- httpConnectTimeout: Connection timeout in ms, default value 60000.
|
||||
- httpSocketTimeout: Socket timeout in ms, default value 60000.
|
||||
- useSSL: Whether SSL is used in the connection.
|
||||
- httpPoolSize: REST concurrent request size, default 20.
|
||||
|
||||
**Note**: Some configuration items (such as: locale, timezone) do not take effect in REST connections.
|
||||
**Note**: Some configuration items (such as: locale, charset and timezone) do not take effect in REST connections.
|
||||
|
||||
:::note
|
||||
|
||||
|
@ -294,7 +295,9 @@ The configuration parameters in properties are as follows:
|
|||
- TSDBDriver.PROPERTY_KEY_CONFIG_DIR: Effective only when using native JDBC connections. Client configuration file directory path, default value on Linux OS is `/etc/taos`, on Windows OS is `C:/TDengine/cfg`.
|
||||
- TSDBDriver.PROPERTY_KEY_CHARSET: Character set used by the client, default value is the system character set.
|
||||
- TSDBDriver.PROPERTY_KEY_LOCALE: Effective only when using native JDBC connections. Client locale, default value is the current system locale.
|
||||
- TSDBDriver.PROPERTY_KEY_TIME_ZONE: Effective only when using native JDBC connections. Client time zone, default value is the current system time zone. Due to historical reasons, we only support part of the POSIX standard, such as UTC-8 (representing Shanghai, China), GMT-8, Asia/Shanghai.
|
||||
- TSDBDriver.PROPERTY_KEY_TIME_ZONE:
|
||||
- Native connections: Client time zone, default value is the current system time zone. Effective globally. Due to historical reasons, we only support part of the POSIX standard, such as UTC-8 (representing Shanghai, China), GMT-8, Asia/Shanghai.
|
||||
- WebSocket connections. Client time zone, default value is the current system time zone. Effective on the connection. Only IANA time zones are supported, such as Asia/Shanghai. It is recommended not to set this parameter, as using the system time zone provides better performance.
|
||||
- TSDBDriver.HTTP_CONNECT_TIMEOUT: Connection timeout, in ms, default value is 60000. Effective only in REST connections.
|
||||
- TSDBDriver.HTTP_SOCKET_TIMEOUT: Socket timeout, in ms, default value is 60000. Effective only in REST connections and when batchfetch is set to false.
|
||||
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: Message timeout, in ms, default value is 60000. Effective only under WebSocket connections.
|
||||
|
@ -303,12 +306,14 @@ The configuration parameters in properties are as follows:
|
|||
- TSDBDriver.PROPERTY_KEY_ENABLE_COMPRESSION: Whether to enable compression during transmission. Effective only when using REST/WebSocket connections. true: enabled, false: not enabled. Default is false.
|
||||
- TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT: Whether to enable auto-reconnect. Effective only when using WebSocket connections. true: enabled, false: not enabled. Default is false.
|
||||
|
||||
> **Note**: Enabling auto-reconnect is only effective for simple SQL execution, schema-less writing, and data subscription. It is ineffective for parameter binding. Auto-reconnect is only effective for connections established through parameters specifying the database, and ineffective for later `use db` statements to switch databases.
|
||||
> **Note**: Enabling auto-reconnect is only effective for simple SQL execution, schema-less writing, and data subscription. It is ineffective for parameter binding. Auto-reconnect is only effective for connections established through parameters specifying the database, and ineffective for later `use db` statements to switch databases.
|
||||
|
||||
- TSDBDriver.PROPERTY_KEY_RECONNECT_INTERVAL_MS: Auto-reconnect retry interval, in milliseconds, default value 2000. Effective only when PROPERTY_KEY_ENABLE_AUTO_RECONNECT is true.
|
||||
- TSDBDriver.PROPERTY_KEY_RECONNECT_RETRY_COUNT: Auto-reconnect retry count, default value 3, effective only when PROPERTY_KEY_ENABLE_AUTO_RECONNECT is true.
|
||||
- TSDBDriver.PROPERTY_KEY_DISABLE_SSL_CERT_VALIDATION: Disable SSL certificate validation. Effective only when using WebSocket connections. true: enabled, false: not enabled. Default is false.
|
||||
|
||||
- TSDBDriver.PROPERTY_KEY_APP_NAME: App name, can be used for display in the `show connections` query result. Effective only when using WebSocket connections. Default value is java.
|
||||
- TSDBDriver.PROPERTY_KEY_APP_IP: App IP, can be used for display in the `show connections` query result. Effective only when using WebSocket connections. Default value is empty.
|
||||
|
||||
Additionally, for native JDBC connections, other parameters such as log level and SQL length can be specified by specifying the URL and Properties.
|
||||
|
||||
**Priority of Configuration Parameters**
|
||||
|
@ -489,16 +494,16 @@ For example: if the password is specified as taosdata in the URL and as taosdemo
|
|||
|
||||
List of interface methods that return `true` for supported features, others not explicitly mentioned return `false`.
|
||||
|
||||
| Interface Method | Description |
|
||||
|--------------------------------------------------------|-----------------------------------------------------|
|
||||
| `boolean nullsAreSortedAtStart()` | Determines if `NULL` values are sorted at the start |
|
||||
| `boolean storesLowerCaseIdentifiers()` | Determines if the database stores identifiers in lowercase |
|
||||
| `boolean supportsAlterTableWithAddColumn()` | Determines if the database supports adding columns with `ALTER TABLE` |
|
||||
| `boolean supportsAlterTableWithDropColumn()` | Determines if the database supports dropping columns with `ALTER TABLE` |
|
||||
| `boolean supportsColumnAliasing()` | Determines if the database supports column aliasing |
|
||||
| `boolean supportsGroupBy()` | Determines if the database supports `GROUP BY` statements |
|
||||
| `boolean isCatalogAtStart()` | Determines if the catalog name appears at the start of the fully qualified name in the database |
|
||||
| `boolean supportsCatalogsInDataManipulation()` | Determines if the database supports catalog names in data manipulation statements |
|
||||
| Interface Method | Description |
|
||||
| ---------------------------------------------- | ----------------------------------------------------------------------------------------------- |
|
||||
| `boolean nullsAreSortedAtStart()` | Determines if `NULL` values are sorted at the start |
|
||||
| `boolean storesLowerCaseIdentifiers()` | Determines if the database stores identifiers in lowercase |
|
||||
| `boolean supportsAlterTableWithAddColumn()` | Determines if the database supports adding columns with `ALTER TABLE` |
|
||||
| `boolean supportsAlterTableWithDropColumn()` | Determines if the database supports dropping columns with `ALTER TABLE` |
|
||||
| `boolean supportsColumnAliasing()` | Determines if the database supports column aliasing |
|
||||
| `boolean supportsGroupBy()` | Determines if the database supports `GROUP BY` statements |
|
||||
| `boolean isCatalogAtStart()` | Determines if the catalog name appears at the start of the fully qualified name in the database |
|
||||
| `boolean supportsCatalogsInDataManipulation()` | Determines if the database supports catalog names in data manipulation statements |
|
||||
|
||||
### Connection Features
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
---
|
||||
toc_max_heading_level: 4
|
||||
sidebar_label: Go
|
||||
title: Go Client Library
|
||||
slug: /tdengine-reference/client-libraries/go
|
||||
|
@ -21,24 +22,25 @@ Supports Go 1.14 and above.
|
|||
|
||||
## Version History
|
||||
|
||||
| driver-go Version | Major Changes | TDengine Version |
|
||||
|------------------|------------------------------------------------------------------|-------------------|
|
||||
| v3.5.8 | Fixed null pointer exception. | - |
|
||||
| v3.5.7 | taosWS and taosRestful support passing request id. | - |
|
||||
| v3.5.6 | Improved websocket query and insert performance. | 3.3.2.0 and higher |
|
||||
| v3.5.5 | Restful supports skipping SSL certificate check. | - |
|
||||
| v3.5.4 | Compatible with TDengine 3.3.0.0 tmq raw data. | - |
|
||||
| v3.5.3 | Refactored taosWS. | - |
|
||||
| v3.5.2 | Websocket compression and optimized tmq subscription performance. | 3.2.3.0 and higher |
|
||||
| v3.5.1 | Native stmt query and geometry type support. | 3.2.1.0 and higher |
|
||||
| v3.5.0 | Support tmq get assignment and seek offset. | 3.0.5.0 and higher |
|
||||
| v3.3.1 | Schemaless protocol insert based on websocket. | 3.0.4.1 and higher |
|
||||
| v3.1.0 | Provided Kafka-like subscription API. | - |
|
||||
| v3.0.4 | Added request id related interfaces. | 3.0.2.2 and higher |
|
||||
| v3.0.3 | Websocket-based statement insert. | - |
|
||||
| v3.0.2 | Websocket-based data query and insert. | 3.0.1.5 and higher |
|
||||
| v3.0.1 | Websocket-based message subscription. | - |
|
||||
| v3.0.0 | Adapted to TDengine 3.0 query and insert. | 3.0.0.0 and higher |
|
||||
| driver-go Version | Major Changes | TDengine Version |
|
||||
|-------------------|-------------------------------------------------------------------------------------------------|--------------------|
|
||||
| v3.6.0 | stmt2 native interface, DSN supports passwords containing special characters (url.QueryEscape). | 3.3.5.0 and higher |
|
||||
| v3.5.8 | Fixed null pointer exception. | - |
|
||||
| v3.5.7 | taosWS and taosRestful support passing request id. | - |
|
||||
| v3.5.6 | Improved websocket query and insert performance. | 3.3.2.0 and higher |
|
||||
| v3.5.5 | Restful supports skipping SSL certificate check. | - |
|
||||
| v3.5.4 | Compatible with TDengine 3.3.0.0 tmq raw data. | - |
|
||||
| v3.5.3 | Refactored taosWS. | - |
|
||||
| v3.5.2 | Websocket compression and optimized tmq subscription performance. | 3.2.3.0 and higher |
|
||||
| v3.5.1 | Native stmt query and geometry type support. | 3.2.1.0 and higher |
|
||||
| v3.5.0 | Support tmq get assignment and seek offset. | 3.0.5.0 and higher |
|
||||
| v3.3.1 | Schemaless protocol insert based on websocket. | 3.0.4.1 and higher |
|
||||
| v3.1.0 | Provided Kafka-like subscription API. | - |
|
||||
| v3.0.4 | Added request id related interfaces. | 3.0.2.2 and higher |
|
||||
| v3.0.3 | Websocket-based statement insert. | - |
|
||||
| v3.0.2 | Websocket-based data query and insert. | 3.0.1.5 and higher |
|
||||
| v3.0.1 | Websocket-based message subscription. | - |
|
||||
| v3.0.0 | Adapted to TDengine 3.0 query and insert. | 3.0.0.0 and higher |
|
||||
|
||||
|
||||
## Exceptions and Error Codes
|
||||
|
@ -136,6 +138,8 @@ Full form of DSN:
|
|||
username:password@protocol(address)/dbname?param=value
|
||||
```
|
||||
|
||||
When the password contains special characters, it needs to be escaped using url.QueryEscape.
|
||||
|
||||
##### Native Connection
|
||||
|
||||
Import the driver:
|
||||
|
@ -493,6 +497,43 @@ The `af` package provides more interfaces using native connections for parameter
|
|||
* **Interface Description**: Closes the statement.
|
||||
* **Return Value**: Error information.
|
||||
|
||||
From version 3.6.0, the `stmt2` interface for binding parameters is provided.
|
||||
|
||||
* `func (conn *Connector) Stmt2(reqID int64, singleTableBindOnce bool) *Stmt2`
|
||||
* **Interface Description**: Returns a Stmt2 object bound to this connection.
|
||||
* **Parameter Description**:
|
||||
* `reqID`: Request ID.
|
||||
* `singleTableBindOnce`: Indicates whether a single child table is bound only once during a single execution.
|
||||
* **Return Value**: Stmt2 object.
|
||||
|
||||
* `func (s *Stmt2) Prepare(sql string) error`
|
||||
* **Interface Description**: Prepares an SQL.
|
||||
* **Parameter Description**:
|
||||
* `sql`: The statement for parameter binding.
|
||||
* **Return Value**: Error information.
|
||||
|
||||
* `func (s *Stmt2) Bind(params []*stmt.TaosStmt2BindData) error`
|
||||
* **Interface Description**: Binds data to the prepared statement.
|
||||
* **Parameter Description**:
|
||||
* `params`: The data to bind.
|
||||
* **Return Value**: Error information.
|
||||
|
||||
* `func (s *Stmt2) Execute() error`
|
||||
* **Interface Description**: Executes the batch.
|
||||
* **Return Value**: Error information.
|
||||
|
||||
* `func (s *Stmt2) GetAffectedRows() int`
|
||||
* **Interface Description**: Gets the number of affected rows (only valid for insert statements).
|
||||
* **Return Value**: Number of affected rows.
|
||||
|
||||
* `func (s *Stmt2) UseResult() (driver.Rows, error)`
|
||||
* **Interface Description**: Retrieves the result set (only valid for query statements).
|
||||
* **Return Value**: Result set Rows object, error information.
|
||||
|
||||
* `func (s *Stmt2) Close() error`
|
||||
* **Interface Description**: Closes the statement.
|
||||
* **Return Value**: Error information.
|
||||
|
||||
The `ws/stmt` package provides interfaces for parameter binding via WebSocket
|
||||
|
||||
* `func (c *Connector) Init() (*Stmt, error)`
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
---
|
||||
toc_max_heading_level: 4
|
||||
sidebar_label: Rust
|
||||
title: Rust Client Library
|
||||
slug: /tdengine-reference/client-libraries/rust
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
---
|
||||
toc_max_heading_level: 4
|
||||
sidebar_label: Python
|
||||
title: Python Client Library
|
||||
slug: /tdengine-reference/client-libraries/python
|
||||
|
@ -55,6 +56,8 @@ Python Connector historical versions (it is recommended to use the latest versio
|
|||
|
||||
|Python Connector Version | Major Changes | TDengine Version|
|
||||
| -------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------- |
|
||||
|2.7.21 | Native supports STMT2 writing | - |
|
||||
|2.7.19 | Support Apache Superset connection to TDengine Cloud data source | - |
|
||||
|2.7.18 | Support Apache SuperSet BI Tools. | - |
|
||||
|2.7.16 | Add subscription configuration (session. timeout. ms, Max. roll. interval. ms). | - |
|
||||
|2.7.15 | Added support for VARBINRY and GEOMETRY types. | - |
|
||||
|
@ -136,7 +139,7 @@ TDengine currently supports timestamp, numeric, character, boolean types, and th
|
|||
| [tmq_consumer.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/tmq_consumer.py) | tmq subscription |
|
||||
| [native_all_type_query.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/native_all_type_query.py) | Example supporting all types |
|
||||
| [native_all_type_stmt.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/native_all_type_stmt.py) | Parameter binding example supporting all types |
|
||||
|
||||
| [test_stmt2.py](https://github.com/taosdata/taos-connector-python/blob/main/tests/test_stmt2.py) | Example of STMT2 writing |
|
||||
Example program source code can be found at:
|
||||
|
||||
1. [More native example programs](https://github.com/taosdata/taos-connector-python/tree/main/examples)
|
||||
|
@ -429,51 +432,40 @@ TaosResult object can be iterated over to retrieve queried data.
|
|||
- **Exceptions**: Throws `SchemalessError` if operation fails.
|
||||
|
||||
#### Parameter Binding
|
||||
|
||||
- `def statement(self, sql=None)`
|
||||
- **Interface Description**: Creates a stmt object using the connection object, if sql is not empty it will call prepare.
|
||||
- `sql`: Precompiled SQL statement.
|
||||
- **Return Value**: stmt object.
|
||||
- **Exception**: Throws `StatementError` exception on failure.
|
||||
- `def statement2(self, sql=None, option=None)`
|
||||
- **Interface Description**:Creating an STMT2 object using a connection object
|
||||
- **Parameter Description**
|
||||
- `sql`: The bound SQL statement will call the `prepare` function if it is not empty
|
||||
- `option` Pass in `TaoStmt2Option` class instance
|
||||
- **Return Value**:STMT2 object
|
||||
- **Exception**:Throws `ConnectionError` on failure
|
||||
- `def prepare(self, sql)`
|
||||
- **Interface Description**: Binds a precompiled sql statement.
|
||||
- **Parameter Description**:
|
||||
- `sql`: Precompiled SQL statement.
|
||||
- **Exception**: Throws `StatementError` exception on failure.
|
||||
- `def set_tbname(self, name)`
|
||||
- **Interface Description**: Sets the table name for data to be written to.
|
||||
- **Parameter Description**:
|
||||
- `name`: Table name, if you need to specify a database, for example: `db_name.table_name`.
|
||||
- **Exception**: Throws `StatementError` exception on failure.
|
||||
- `def set_tbname_tags(self, name, tags):`
|
||||
- **Interface Description**: Sets the table and Tags data, used for automatic table creation.
|
||||
- **Parameter Description**:
|
||||
- `name`: Table name, if you need to specify a database, for example: `db_name.table_name`.
|
||||
- `tags`: Tags data.
|
||||
- **Exception**: Throws `StatementError` exception on failure.
|
||||
- `def bind_param(self, params, add_batch=True)`
|
||||
- **Interface Description**: Binds a set of data and submits.
|
||||
- **Parameter Description**:
|
||||
- `params`: Data to bind.
|
||||
- `add_batch`: Whether to submit the bound data.
|
||||
- **Exception**: Throws `StatementError` exception on failure.
|
||||
- `def bind_param_batch(self, binds, add_batch=True)`
|
||||
- **Interface Description**: Binds multiple sets of data and submits.
|
||||
- **Parameter Description**:
|
||||
- `binds`: Data to bind.
|
||||
- `add_batch`: Whether to submit the bound data.
|
||||
- **Exception**: Throws `StatementError` exception on failure.
|
||||
- `def add_batch(self)`
|
||||
- **Interface Description**: Submits the bound data.
|
||||
- **Exception**: Throws `StatementError` exception on failure.
|
||||
- `def execute(self)`
|
||||
- **Interface Description**: Executes and writes all the bound data.
|
||||
- **Exception**: Throws `StatementError` exception on failure.
|
||||
- `def affected_rows(self)`
|
||||
- **Interface Description**: Gets the number of rows written.
|
||||
- **Return Value**: Number of rows written.
|
||||
- `def close(&self)`
|
||||
- **Interface Description**: Closes the stmt object.
|
||||
- **Interface Description**:Bind a precompiled SQL statement
|
||||
- **Parameter Description**:
|
||||
- `sql`: Precompiled SQL statement
|
||||
- **Exception**:Throws `StatementError` on failure
|
||||
- `def bind_param(self, tbnames, tags, datas)`
|
||||
- **Interface Description**:Binding Data as an Independent Array
|
||||
- **Parameter Description**:
|
||||
- `tbnames`:Bind table name array, data type is list
|
||||
- `tags`: Bind tag column value array, data type is list
|
||||
- `datas`: Bind data column value array, data type of list
|
||||
- **Exception**:Throws `StatementError` on failure
|
||||
- `def bind_param_with_tables(self, tables)`
|
||||
- **Interface Description**:Bind data in an independent table format. Independent tables are organized by table units, with table name, TAG value, and data column attributes in table object
|
||||
- **Parameter Description**:
|
||||
- `tables`: `BindTable` Independent table object array
|
||||
- **Exception**:Throws `StatementError` on failure
|
||||
- `def execute(self) -> int:`
|
||||
- **Interface Description**:Execute to write all bound data
|
||||
- **Return Value**:Affects the number of rows
|
||||
- **Exception**:Throws `QueryError` on failure
|
||||
- `def result(self)`
|
||||
- **Interface Description**:Get parameter binding query result set
|
||||
- **Return Value**:Returns the TaosResult object
|
||||
- `def close(self)`
|
||||
- **Interface Description**: close the STMT2 object
|
||||
|
||||
|
||||
#### Data Subscription
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
---
|
||||
toc_max_heading_level: 4
|
||||
sidebar_label: Node.js
|
||||
title: Node.js Client Library
|
||||
slug: /tdengine-reference/client-libraries/node
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
---
|
||||
toc_max_heading_level: 4
|
||||
sidebar_label: C#
|
||||
title: C# Client Library
|
||||
slug: /tdengine-reference/client-libraries/csharp
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.4.0</version>
|
||||
<version>3.5.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.locationtech.jts</groupId>
|
||||
|
|
|
@ -47,7 +47,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.4.0</version>
|
||||
<version>3.5.0</version>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.4.0</version>
|
||||
<version>3.5.0</version>
|
||||
</dependency>
|
||||
<!-- druid -->
|
||||
<dependency>
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.4.0</version>
|
||||
<version>3.5.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
|
|
|
@ -47,7 +47,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.4.0</version>
|
||||
<version>3.5.0</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
|
|
@ -70,7 +70,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.4.0</version>
|
||||
<version>3.5.0</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
|
|
@ -67,7 +67,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.4.0</version>
|
||||
<version>3.5.0</version>
|
||||
<!-- <scope>system</scope>-->
|
||||
<!-- <systemPath>${project.basedir}/src/main/resources/lib/taos-jdbcdriver-2.0.15-dist.jar</systemPath>-->
|
||||
</dependency>
|
||||
|
|
|
@ -2,7 +2,7 @@ module goexample
|
|||
|
||||
go 1.17
|
||||
|
||||
require github.com/taosdata/driver-go/v3 v3.5.6
|
||||
require github.com/taosdata/driver-go/v3 v3.6.0
|
||||
|
||||
require (
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
|
|
|
@ -18,8 +18,8 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
|
|||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/taosdata/driver-go/v3 v3.5.6 h1:LDVtMyT3B9p2VREsd5KKM91D4Y7P4kSdh2SQumXi8bk=
|
||||
github.com/taosdata/driver-go/v3 v3.5.6/go.mod h1:H2vo/At+rOPY1aMzUV9P49SVX7NlXb3LAbKw+MCLrmU=
|
||||
github.com/taosdata/driver-go/v3 v3.6.0 h1:4dRXMl01DhIS5xBXUvtkkB+MjL8g64zN674xKd+ojTE=
|
||||
github.com/taosdata/driver-go/v3 v3.6.0/go.mod h1:H2vo/At+rOPY1aMzUV9P49SVX7NlXb3LAbKw+MCLrmU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/taosdata/driver-go/v3/af"
|
||||
"github.com/taosdata/driver-go/v3/common"
|
||||
"github.com/taosdata/driver-go/v3/common/stmt"
|
||||
)
|
||||
|
||||
func main() {
|
||||
host := "127.0.0.1"
|
||||
numOfSubTable := 10
|
||||
numOfRow := 10
|
||||
db, err := af.Open(host, "root", "taosdata", "", 0)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to connect to " + host + "; ErrMessage: " + err.Error())
|
||||
}
|
||||
defer db.Close()
|
||||
// prepare database and table
|
||||
_, err = db.Exec("CREATE DATABASE IF NOT EXISTS power")
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to create database power, ErrMessage: " + err.Error())
|
||||
}
|
||||
_, err = db.Exec("USE power")
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to use database power, ErrMessage: " + err.Error())
|
||||
}
|
||||
_, err = db.Exec("CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))")
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to create stable meters, ErrMessage: " + err.Error())
|
||||
}
|
||||
// prepare statement
|
||||
sql := "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)"
|
||||
reqID := common.GetReqID()
|
||||
stmt2 := db.Stmt2(reqID, false)
|
||||
err = stmt2.Prepare(sql)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to prepare sql, sql: " + sql + ", ErrMessage: " + err.Error())
|
||||
}
|
||||
for i := 1; i <= numOfSubTable; i++ {
|
||||
// generate column data
|
||||
current := time.Now()
|
||||
columns := make([][]driver.Value, 4)
|
||||
for j := 0; j < numOfRow; j++ {
|
||||
columns[0] = append(columns[0], current.Add(time.Millisecond*time.Duration(j)))
|
||||
columns[1] = append(columns[1], rand.Float32()*30)
|
||||
columns[2] = append(columns[2], rand.Int31n(300))
|
||||
columns[3] = append(columns[3], rand.Float32())
|
||||
}
|
||||
// generate bind data
|
||||
tableName := fmt.Sprintf("d_bind_%d", i)
|
||||
tags := []driver.Value{int32(i), []byte(fmt.Sprintf("location_%d", i))}
|
||||
bindData := []*stmt.TaosStmt2BindData{
|
||||
{
|
||||
TableName: tableName,
|
||||
Tags: tags,
|
||||
Cols: columns,
|
||||
},
|
||||
}
|
||||
// bind params
|
||||
err = stmt2.Bind(bindData)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to bind params, ErrMessage: " + err.Error())
|
||||
}
|
||||
// execute batch
|
||||
err = stmt2.Execute()
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to exec, ErrMessage: " + err.Error())
|
||||
}
|
||||
// get affected rows
|
||||
affected := stmt2.GetAffectedRows()
|
||||
// you can check exeResult here
|
||||
fmt.Printf("Successfully inserted %d rows to %s.\n", affected, tableName)
|
||||
}
|
||||
err = stmt2.Close()
|
||||
if err != nil {
|
||||
log.Fatal("failed to close statement, err:", err)
|
||||
}
|
||||
}
|
|
@ -22,7 +22,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.4.0</version>
|
||||
<version>3.5.0</version>
|
||||
</dependency>
|
||||
<!-- ANCHOR_END: dep-->
|
||||
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
package com.taos.example;
|
||||
|
||||
import com.taosdata.jdbc.ws.TSWSPreparedStatement;
|
||||
|
||||
import java.sql.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Random;
|
||||
|
||||
// ANCHOR: para_bind
|
||||
public class WSParameterBindingExtendInterfaceDemo {
|
||||
|
||||
// modify host to your own
|
||||
private static final String host = "127.0.0.1";
|
||||
private static final Random random = new Random(System.currentTimeMillis());
|
||||
private static final int numOfSubTable = 10, numOfRow = 10;
|
||||
|
||||
public static void main(String[] args) throws SQLException {
|
||||
|
||||
String jdbcUrl = "jdbc:TAOS-WS://" + host + ":6041";
|
||||
try (Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata")) {
|
||||
init(conn);
|
||||
|
||||
String sql = "INSERT INTO ? USING power.meters TAGS(?,?) VALUES (?,?,?,?)";
|
||||
|
||||
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("d_bind_" + i);
|
||||
|
||||
// set tags
|
||||
pstmt.setTagInt(0, i);
|
||||
pstmt.setTagString(1, "location_" + i);
|
||||
|
||||
// set column ts
|
||||
ArrayList<Long> tsList = new ArrayList<>();
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
tsList.add(current + j);
|
||||
pstmt.setTimestamp(0, tsList);
|
||||
|
||||
// set column current
|
||||
ArrayList<Float> currentList = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
currentList.add(random.nextFloat() * 30);
|
||||
pstmt.setFloat(1, currentList);
|
||||
|
||||
// set column voltage
|
||||
ArrayList<Integer> voltageList = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
voltageList.add(random.nextInt(300));
|
||||
pstmt.setInt(2, voltageList);
|
||||
|
||||
// set column phase
|
||||
ArrayList<Float> phaseList = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
phaseList.add(random.nextFloat());
|
||||
pstmt.setFloat(3, phaseList);
|
||||
// add column
|
||||
pstmt.columnDataAddBatch();
|
||||
}
|
||||
// execute column
|
||||
pstmt.columnDataExecuteBatch();
|
||||
// you can check exeResult here
|
||||
System.out.println("Successfully inserted " + (numOfSubTable * numOfRow) + " rows to power.meters.");
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
// please refer to the JDBC specifications for detailed exceptions info
|
||||
System.out.printf("Failed to insert to table meters using stmt, %sErrMessage: %s%n",
|
||||
ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "",
|
||||
ex.getMessage());
|
||||
// Print stack trace for context in examples. Use logging in production.
|
||||
ex.printStackTrace();
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
|
||||
private static void init(Connection conn) throws SQLException {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.execute("CREATE DATABASE IF NOT EXISTS power");
|
||||
stmt.execute("USE power");
|
||||
stmt.execute(
|
||||
"CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))");
|
||||
}
|
||||
}
|
||||
}
|
||||
// ANCHOR_END: para_bind
|
|
@ -1,12 +1,10 @@
|
|||
package com.taos.example;
|
||||
|
||||
import com.taosdata.jdbc.ws.TSWSPreparedStatement;
|
||||
|
||||
import java.sql.*;
|
||||
import java.util.Random;
|
||||
|
||||
// ANCHOR: para_bind
|
||||
public class WSParameterBindingBasicDemo {
|
||||
public class WSParameterBindingStdInterfaceDemo {
|
||||
|
||||
// modify host to your own
|
||||
private static final String host = "127.0.0.1";
|
||||
|
@ -19,31 +17,29 @@ public class WSParameterBindingBasicDemo {
|
|||
try (Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata")) {
|
||||
init(conn);
|
||||
|
||||
String sql = "INSERT INTO ? USING power.meters TAGS(?,?) VALUES (?,?,?,?)";
|
||||
// If you are certain that the child table exists, you can avoid binding the tag column to improve performance.
|
||||
String sql = "INSERT INTO power.meters (tbname, groupid, location, ts, current, voltage, phase) VALUES (?,?,?,?,?,?,?)";
|
||||
|
||||
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
|
||||
long current = System.currentTimeMillis();
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("d_bind_" + i);
|
||||
|
||||
// set tags
|
||||
pstmt.setTagInt(0, i);
|
||||
pstmt.setTagString(1, "location_" + i);
|
||||
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||
pstmt.setFloat(2, random.nextFloat() * 30);
|
||||
pstmt.setInt(3, random.nextInt(300));
|
||||
pstmt.setFloat(4, random.nextFloat());
|
||||
pstmt.setString(1, "d_bind_" + i);
|
||||
|
||||
pstmt.setInt(2, i);
|
||||
pstmt.setString(3, "location_" + i);
|
||||
|
||||
pstmt.setTimestamp(4, new Timestamp(current + j));
|
||||
pstmt.setFloat(5, random.nextFloat() * 30);
|
||||
pstmt.setInt(6, random.nextInt(300));
|
||||
pstmt.setFloat(7, random.nextFloat());
|
||||
pstmt.addBatch();
|
||||
}
|
||||
int[] exeResult = pstmt.executeBatch();
|
||||
// you can check exeResult here
|
||||
System.out.println("Successfully inserted " + exeResult.length + " rows to power.meters.");
|
||||
}
|
||||
int[] exeResult = pstmt.executeBatch();
|
||||
// you can check exeResult here
|
||||
System.out.println("Successfully inserted " + exeResult.length + " rows to power.meters.");
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
// please refer to the JDBC specifications for detailed exceptions info
|
|
@ -118,9 +118,14 @@ public class TestAll {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testWsStmtBasic() throws Exception {
|
||||
public void testWsStmtStd() throws Exception {
|
||||
dropDB("power");
|
||||
WSParameterBindingBasicDemo.main(args);
|
||||
WSParameterBindingStdInterfaceDemo.main(args);
|
||||
}
|
||||
@Test
|
||||
public void testWsStmtExtend() throws Exception {
|
||||
dropDB("power");
|
||||
WSParameterBindingExtendInterfaceDemo.main(args);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -0,0 +1,71 @@
|
|||
import taos
|
||||
from datetime import datetime
|
||||
import random
|
||||
|
||||
numOfSubTable = 10
|
||||
numOfRow = 10
|
||||
|
||||
conn = None
|
||||
stmt2 = None
|
||||
host="localhost"
|
||||
port=6030
|
||||
try:
|
||||
# 1 connect
|
||||
conn = taos.connect(
|
||||
user="root",
|
||||
password="taosdata",
|
||||
host=host,
|
||||
port=port,
|
||||
)
|
||||
|
||||
# 2 create db and table
|
||||
conn.execute("CREATE DATABASE IF NOT EXISTS power")
|
||||
conn.execute("USE power")
|
||||
conn.execute(
|
||||
"CREATE TABLE IF NOT EXISTS `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))"
|
||||
)
|
||||
|
||||
# 3 prepare
|
||||
sql = "INSERT INTO ? USING meters (groupid, location) TAGS(?,?) VALUES (?,?,?,?)"
|
||||
stmt2 = conn.statement2(sql)
|
||||
|
||||
tbnames = []
|
||||
tags = []
|
||||
datas = []
|
||||
|
||||
for i in range(numOfSubTable):
|
||||
# tbnames
|
||||
tbnames.append(f"d_bind_{i}")
|
||||
# tags
|
||||
tags.append([i, f"location_{i}"])
|
||||
# datas
|
||||
current = int(datetime.now().timestamp() * 1000)
|
||||
timestamps = []
|
||||
currents = []
|
||||
voltages = []
|
||||
phases = []
|
||||
for j in range (numOfRow):
|
||||
timestamps.append(current + i*1000 + j)
|
||||
currents.append(float(random.random() * 30))
|
||||
voltages.append(random.randint(100, 300))
|
||||
phases.append(float(random.random()))
|
||||
data = [timestamps, currents, voltages, phases]
|
||||
datas.append(data)
|
||||
|
||||
# 4 bind param
|
||||
stmt2.bind_param(tbnames, tags, datas)
|
||||
|
||||
# 5 execute
|
||||
stmt2.execute()
|
||||
|
||||
# show
|
||||
print(f"Successfully inserted with stmt2 to power.meters. child={numOfSubTable} rows={numOfRow} \n")
|
||||
|
||||
except Exception as err:
|
||||
print(f"Failed to insert to table meters using stmt2, ErrMessage:{err}")
|
||||
raise err
|
||||
finally:
|
||||
if stmt2:
|
||||
stmt2.close()
|
||||
if conn:
|
||||
conn.close()
|
|
@ -89,7 +89,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.4.0</version>
|
||||
<version>3.5.0</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
|
|
@ -26,10 +26,16 @@ import TabItem from "@theme/TabItem";
|
|||
## WebSocket 连接
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
<TabItem value="java" label="Java">
|
||||
|
||||
参数绑定有两种接口使用方式,一种是 JDBC 标准接口,一种是扩展接口,扩展接口性能更好一些。
|
||||
|
||||
```java
|
||||
{{#include docs/examples/java/src/main/java/com/taos/example/WSParameterBindingBasicDemo.java:para_bind}}
|
||||
{{#include docs/examples/java/src/main/java/com/taos/example/WSParameterBindingStdInterfaceDemo.java:para_bind}}
|
||||
```
|
||||
|
||||
```java
|
||||
{{#include docs/examples/java/src/main/java/com/taos/example/WSParameterBindingExtendInterfaceDemo.java:para_bind}}
|
||||
```
|
||||
|
||||
这是一个[更详细的参数绑定示例](https://github.com/taosdata/TDengine/blob/main/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingFullDemo.java)
|
||||
|
||||
|
@ -87,13 +93,24 @@ import TabItem from "@theme/TabItem";
|
|||
<TabItem label="Python" value="python">
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/stmt_native.py}}
|
||||
{{#include docs/examples/python/stmt2_native.py}}
|
||||
```
|
||||
</TabItem>
|
||||
<TabItem label="Go" value="go">
|
||||
|
||||
stmt2 绑定参数的示例代码如下(go 连接器 v3.6.0 及以上,TDengine v3.3.5.0 及以上):
|
||||
|
||||
```go
|
||||
{{#include docs/examples/go/stmt2/native/main.go}}
|
||||
```
|
||||
|
||||
stmt 绑定参数的示例代码如下:
|
||||
|
||||
```go
|
||||
{{#include docs/examples/go/stmt/native/main.go}}
|
||||
```
|
||||
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="Rust" value="rust">
|
||||
|
||||
|
|
|
@ -60,11 +60,15 @@ dataDir /mnt/data6 2 0
|
|||
|
||||
## 对象存储
|
||||
|
||||
本节介绍在 TDengine Enterprise 如何使用 S3 对象存储,本功能基于通用 S3 SDK 实现,对各个 S3 平台的访问参数进行了兼容适配,可以访问如 minio,腾讯云 COS,Amazon S3 等对象存储服务。通过适当的参数配置,可以把大部分较冷的时序数据存储到 S3 服务中。
|
||||
本节介绍在 TDengine Enterprise 版本中如何使用对象存储功能,如 Amazon S3、Azure Blob Storage、华为 OBS、腾讯云 COS、阿里云 OSS、MinIO 等对象存储服务。
|
||||
|
||||
**注意** 在配合多级存储使用时,每一级存储介质上保存的数据都有可能被按规则备份到远程对象存储中并删除本地数据文件。
|
||||
|
||||
### 配置方式
|
||||
### S3 对象存储
|
||||
|
||||
本功能基于通用 S3 SDK 实现,对各个 S3 平台的访问参数进行了兼容适配,通过适当的参数配置,可以把大部分较冷的时序数据存储到 S3 服务中。
|
||||
|
||||
#### 配置方式
|
||||
|
||||
在配置文件 /etc/taos/taos.cfg 中,添加用于 S3 访问的参数:
|
||||
|
||||
|
@ -78,7 +82,7 @@ dataDir /mnt/data6 2 0
|
|||
| s3MigrateIntervalSec | 本地数据文件自动上传 S3 的触发周期,单位为秒。最小值:600;最大值:100000。默认值 3600 |
|
||||
| s3MigrateEnabled | 是否自动进行 S3 迁移,默认值为 0,表示关闭自动 S3 迁移,可配置为 1。 |
|
||||
|
||||
### 检查配置参数可用性
|
||||
#### 检查配置参数可用性
|
||||
|
||||
在 taos.cfg 中完成对 S3 的配置后,通过 taosd 命令的 checks3 参数可以检查所配置的 S3 服务是否可用:
|
||||
|
||||
|
@ -88,7 +92,7 @@ taosd --checks3
|
|||
|
||||
如果配置的 S3 服务无法访问,此命令会在运行过程中输出相应的错误信息。
|
||||
|
||||
### 创建使用 S3 的 DB
|
||||
#### 创建使用 S3 的 DB
|
||||
|
||||
完成配置后,即可启动 TDengine 集群,创建使用 S3 的数据库,比如:
|
||||
|
||||
|
@ -112,11 +116,11 @@ s3migrate database <db_name>;
|
|||
| 2 | s3_chunkpages | 131072 | 131072 | 1048576 | 上传对象的大小阈值,与 tsdb_pagesize 参数一样,不可修改,单位为 TSDB 页 |
|
||||
| 3 | s3_compact | 1 | 0 | 1 | TSDB 文件组首次上传 S3 时,是否自动进行 compact 操作 |
|
||||
|
||||
### 对象存储读写次数估算
|
||||
#### 对象存储读写次数估算
|
||||
|
||||
对象存储服务的使用成本与存储的数据量及请求次数相关,下面分别介绍数据的上传及下载过程。
|
||||
|
||||
#### 数据上传
|
||||
##### 数据上传
|
||||
|
||||
当 TSDB 时序数据超过 `s3_keeplocal` 参数指定的时间,相关的数据文件会被切分成多个文件块,每个文件块的默认大小是 512M 字节 (`s3_chunkpages * tsdb_pagesize`)。除了最后一个文件块保留在本地文件系统外,其余的文件块会被上传到对象存储服务。
|
||||
|
||||
|
@ -128,7 +132,7 @@ s3migrate database <db_name>;
|
|||
|
||||
其它类型的文件如 head, stt, sma 等,保留在本地文件系统,以加速预计算相关查询。
|
||||
|
||||
#### 数据下载
|
||||
##### 数据下载
|
||||
|
||||
在查询操作中,如果需要访问对象存储中的数据,TSDB 不会下载整个数据文件,而是计算所需数据在文件中的位置,只下载相应的数据到 TSDB 页缓存中,然后将数据返回给查询执行引擎。后续查询首先检查页缓存,查看数据是否已被缓存。如果数据已缓存,则直接使用缓存中的数据,而无需重复从对象存储下载,从而有效降低从对象存储下载数据的次数。
|
||||
|
||||
|
@ -140,15 +144,15 @@ s3migrate database <db_name>;
|
|||
|
||||
页缓存是内存缓存,节点重启后,再次查询需要重新下载数据。缓存采用 LRU (Least Recently Used) 策略,当缓存空间不足时,最近最少使用的数据将被淘汰。缓存的大小可以通过 `s3PageCacheSize` 参数进行调整,通常来说,缓存越大,下载次数越少。
|
||||
|
||||
## Azure Blob 存储
|
||||
本节介绍在 TDengine Enterprise 如何使用微软 Azure Blob 对象存储。本功能是上一小节‘对象存储’功能的扩展,需额外依赖 Flexify 服务提供的 S3 网关。通过适当的参数配置,可以把大部分较冷的时序数据存储到 Azure Blob 服务中。
|
||||
### Azure Blob 存储
|
||||
本节介绍在 TDengine Enterprise 版本中如何使用微软 Azure Blob 存储。本功能可以通过两个方式使用:利用 Flexify 服务提供的 S3 网关功能和不依赖 Flexify 服务。通过配置参数,可以把大部分较冷的时序数据存储到 Azure Blob 服务中。
|
||||
|
||||
### Flexify 服务
|
||||
#### Flexify 服务
|
||||
Flexify 是 Azure Marketplace 中的一款应用程序,允许兼容 S3 的应用程序通过标准 S3 API 在 Azure Blob Storage 中存储数据。可使用多个 Flexify 服务对同一个 Blob 存储建立多个 S3 网关。
|
||||
|
||||
部署方式请参考 [Flexify](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/flexify.azure-s3-api?tab=Overview) 应用页面说明。
|
||||
|
||||
### 配置方式
|
||||
##### 配置方式
|
||||
|
||||
在配置文件 /etc/taos/taos.cfg 中,添加用于 S3 访问的参数:
|
||||
|
||||
|
@ -164,7 +168,7 @@ s3BucketName td-test
|
|||
- 在某一 S3 服务上操作失败后会切换至其他服务,全部服务都失败后将返回最后产生的错误码
|
||||
- 最大支持的 S3 服务配置数为 10
|
||||
|
||||
### 不依赖 Flexify 服务
|
||||
#### 不依赖 Flexify 服务
|
||||
|
||||
用户界面同 S3,不同的地方在于下面三个参数的配置:
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ create user user_name pass'password' [sysinfo {1|0}] [createdb {1|0}]
|
|||
|
||||
相关参数说明如下。
|
||||
- user_name:用户名最长不超过 23 个字节。
|
||||
- password:密码长度必须为 8 到 16 位,并且至少包含大写字母、小写字母、数字、特殊字符中的三类。特殊字符包括 `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`。
|
||||
- password:密码长度必须为 8 到 16 位,并且至少包含大写字母、小写字母、数字、特殊字符中的三类。特殊字符包括 `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`。(始自 3.3.5.0 版本)
|
||||
- sysinfo :用户是否可以查看系统信息。1 表示可以查看,0 表示不可以查看。系统信息包括服务端配置信息、服务端各种节点信息,如 dnode、查询节点(qnode)等,以及与存储相关的信息等。默认为可以查看系统信息。
|
||||
- createdb:用户是否可以创建数据库。1 表示可以创建,0 表示不可以创建。缺省值为 0。// 从 TDengine 企业版 3.3.2.0 开始支持
|
||||
|
||||
|
|
|
@ -256,7 +256,7 @@ charset 的有效值是 UTF-8。
|
|||
|slowLogMaxLen |3.3.3.0 后|支持动态修改 立即生效 |慢查询日志最大长度,取值范围 1-16384,默认值 4096|
|
||||
|slowLogScope |3.3.3.0 后|支持动态修改 立即生效 |慢查询记录类型,取值范围 ALL/QUERY/INSERT/OTHERS/NONE,默认值 QUERY|
|
||||
|slowLogExceptDb |3.3.3.0 后|支持动态修改 立即生效 |指定的数据库不上报慢查询,仅支持配置换一个数据库|
|
||||
|debugFlag | |支持动态修改 立即生效 |运行日志开关,131(输出错误和警告日志),135(输出错误、警告和调试日志),143(输出错误、警告、调试和跟踪日志);默认值 131 或 135 (取决于不同模块)|
|
||||
|debugFlag | |支持动态修改 立即生效 |运行日志开关,131(输出错误和警告日志),135(输出错误、警告和调试日志),143(输出错误、警告、调试和跟踪日志);默认值 131 或 135 (取决于不同模块),该参数的设置会影响所有模块的开关,后设置的参数起效|
|
||||
|tmrDebugFlag | |支持动态修改 立即生效 |定时器模块的日志开关,取值范围同上|
|
||||
|uDebugFlag | |支持动态修改 立即生效 |共用功能模块的日志开关,取值范围同上|
|
||||
|rpcDebugFlag | |支持动态修改 立即生效 |rpc 模块的日志开关,取值范围同上|
|
||||
|
|
|
@ -37,6 +37,6 @@ description: 合法字符集和命名中的限制规则
|
|||
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
|
||||
- 数据库的副本数只能设置为 1 或 3
|
||||
- 用户名的最大长度是 23 字节
|
||||
- 用户密码的最大长度是 31 字节
|
||||
- 用户密码的长度范围是 8-16 字节
|
||||
- 总数据行数取决于可用资源
|
||||
- 单个数据库的虚拟结点数上限为 1024
|
||||
|
|
|
@ -33,6 +33,7 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
|
|||
|
||||
| taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
|
||||
| ------------------| ---------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- |
|
||||
| 3.5.0 | 1. 优化了 WebSocket 连接参数绑定性能,支持参数绑定查询使用二进制数据 <br/> 2. 优化了 WebSocket 连接在小查询上的性能 <br/> 3. WebSocket 连接上支持设置时区和应用信息 | 3.3.5.0 及更高版本 |
|
||||
| 3.4.0 | 1. 使用 jackson 库替换 fastjson 库 <br/> 2. WebSocket 采用独立协议标识 <br/> 3. 优化后台拉取线程使用,避免用户误用导致超时 | - |
|
||||
| 3.3.4 | 解决了 getInt 在数据类型为 float 报错 | - |
|
||||
| 3.3.3 | 解决了 WebSocket statement 关闭导致的内存泄漏 | - |
|
||||
|
@ -243,13 +244,13 @@ TDengine 中,只要保证 firstEp 和 secondEp 中一个节点有效,就可
|
|||
对于 WebSocket 连接,url 中的配置参数如下:
|
||||
- user:登录 TDengine 用户名,默认值 'root'。
|
||||
- password:用户登录密码,默认值 'taosdata'。
|
||||
- charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。
|
||||
- batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 SQL 了。false:不再执行失败 SQL 后的任何语句。默认值为:false。
|
||||
- httpConnectTimeout: 连接超时时间,单位 ms, 默认值为 60000。
|
||||
- messageWaitTimeout: 消息超时时间, 单位 ms, 默认值为 60000。
|
||||
- useSSL: 连接中是否使用 SSL。
|
||||
- timezone:客户端使用的时区,连接上生效,默认值为系统时区。推荐不设置,使用系统时区性能更好。
|
||||
|
||||
**注意**:部分配置项(比如:locale、timezone)在 WebSocket 连接中不生效。
|
||||
**注意**:部分配置项(比如:locale、charset)在 WebSocket 连接中不生效。
|
||||
|
||||
**REST 连接**
|
||||
使用 JDBC REST 连接,不需要依赖客户端驱动。与 JDBC 原生连接相比,仅需要:
|
||||
|
@ -261,14 +262,13 @@ TDengine 中,只要保证 firstEp 和 secondEp 中一个节点有效,就可
|
|||
对于 REST 连接,url 中的配置参数如下:
|
||||
- user:登录 TDengine 用户名,默认值 'root'。
|
||||
- password:用户登录密码,默认值 'taosdata'。
|
||||
- charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。
|
||||
- batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 SQL 了。false:不再执行失败 SQL 后的任何语句。默认值为:false。
|
||||
- httpConnectTimeout: 连接超时时间,单位 ms, 默认值为 60000。
|
||||
- httpSocketTimeout: socket 超时时间,单位 ms,默认值为 60000。
|
||||
- useSSL: 连接中是否使用 SSL。
|
||||
- httpPoolSize: REST 并发请求大小,默认 20。
|
||||
|
||||
**注意**:部分配置项(比如:locale、timezone)在 REST 连接中不生效。
|
||||
**注意**:部分配置项(比如:locale、charset 和 timezone)在 REST 连接中不生效。
|
||||
|
||||
:::note
|
||||
|
||||
|
@ -291,7 +291,9 @@ properties 中的配置参数如下:
|
|||
- TSDBDriver.PROPERTY_KEY_CONFIG_DIR:仅在使用 JDBC 原生连接时生效。客户端配置文件目录路径,Linux OS 上默认值 `/etc/taos`,Windows OS 上默认值 `C:/TDengine/cfg`。
|
||||
- TSDBDriver.PROPERTY_KEY_CHARSET:客户端使用的字符集,默认值为系统字符集。
|
||||
- TSDBDriver.PROPERTY_KEY_LOCALE:仅在使用 JDBC 原生连接时生效。 客户端语言环境,默认值系统当前 locale。
|
||||
- TSDBDriver.PROPERTY_KEY_TIME_ZONE:仅在使用 JDBC 原生连接时生效。 客户端使用的时区,默认值为系统当前时区。因为历史的原因,我们只支持POSIX标准的部分规范,如UTC-8(代表中国上上海), GMT-8,Asia/Shanghai 这几种形式。
|
||||
- TSDBDriver.PROPERTY_KEY_TIME_ZONE:
|
||||
- 原生连接:客户端使用的时区,默认值为系统当前时区,全局生效。因为历史的原因,我们只支持POSIX标准的部分规范,如UTC-8(代表中国上上海), GMT-8,Asia/Shanghai 这几种形式。
|
||||
- WebSocket 连接:客户端使用的时区,连接上生效,默认值为系统时区。仅支持 IANA 时区,即 Asia/Shanghai 这种形式。推荐不设置,使用系统时区性能更好。
|
||||
- TSDBDriver.HTTP_CONNECT_TIMEOUT: 连接超时时间,单位 ms, 默认值为 60000。仅在 REST 连接时生效。
|
||||
- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket 超时时间,单位 ms,默认值为 60000。仅在 REST 连接且 batchfetch 设置为 false 时生效。
|
||||
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: 消息超时时间, 单位 ms, 默认值为 60000。 仅 WebSocket 连接下有效。
|
||||
|
@ -299,12 +301,15 @@ properties 中的配置参数如下:
|
|||
- TSDBDriver.HTTP_POOL_SIZE: REST 并发请求大小,默认 20。
|
||||
- TSDBDriver.PROPERTY_KEY_ENABLE_COMPRESSION: 传输过程是否启用压缩。仅在使用 REST/WebSocket 连接时生效。true: 启用,false: 不启用。默认为 false。
|
||||
- TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT: 是否启用自动重连。仅在使用 WebSocket 连接时生效。true: 启用,false: 不启用。默认为 false。
|
||||
> **注意**:启用自动重连仅对简单执行 SQL 语句以及 无模式写入、数据订阅有效。对于参数绑定无效。自动重连仅对连接建立时通过参数指定数据库有效,对后面的 `use db` 语句切换数据库无效。
|
||||
> **注意**:启用自动重连仅对简单执行 SQL 语句以及 无模式写入、数据订阅有效。对于参数绑定无效。自动重连仅对连接建立时通过参数指定数据库有效,对后面的 `use db` 语句切换数据库无效。
|
||||
|
||||
- TSDBDriver.PROPERTY_KEY_RECONNECT_INTERVAL_MS: 自动重连重试间隔,单位毫秒,默认值 2000。仅在 PROPERTY_KEY_ENABLE_AUTO_RECONNECT 为 true 时生效。
|
||||
- TSDBDriver.PROPERTY_KEY_RECONNECT_RETRY_COUNT: 自动重连重试次数,默认值 3,仅在 PROPERTY_KEY_ENABLE_AUTO_RECONNECT 为 true 时生效。
|
||||
- TSDBDriver.PROPERTY_KEY_DISABLE_SSL_CERT_VALIDATION: 关闭 SSL 证书验证 。仅在使用 WebSocket 连接时生效。true: 启用,false: 不启用。默认为 false。
|
||||
|
||||
- TSDBDriver.PROPERTY_KEY_APP_NAME: App 名称,可用于 `show connections` 查询结果显示。仅在使用 WebSocket 连接时生效。默认值为 java。
|
||||
- TSDBDriver.PROPERTY_KEY_APP_IP: App IP,可用于 `show connections` 查询结果显示。仅在使用 WebSocket 连接时生效。默认值为空。
|
||||
|
||||
此外对 JDBC 原生连接,通过指定 URL 和 Properties 还可以指定其他参数,比如日志级别、SQL 长度等。
|
||||
|
||||
**配置参数的优先级**
|
||||
|
|
|
@ -23,24 +23,25 @@ import RequestId from "./_request_id.mdx";
|
|||
|
||||
## 版本历史
|
||||
|
||||
| driver-go 版本 | 主要变化 | TDengine 版本 |
|
||||
|-------------|-------------------------------------|---------------|
|
||||
| v3.5.8 | 修复空指针异常 | - |
|
||||
| v3.5.7 | taosWS 和 taosRestful 支持传入 request id | - |
|
||||
| v3.5.6 | 提升 websocket 查询和写入性能 | 3.3.2.0 及更高版本 |
|
||||
| v3.5.5 | restful 支持跳过 ssl 证书检查 | - |
|
||||
| v3.5.4 | 兼容 TDengine 3.3.0.0 tmq raw data | - |
|
||||
| v3.5.3 | 重构 taosWS | - |
|
||||
| v3.5.2 | websocket 压缩和优化消息订阅性能 | 3.2.3.0 及更高版本 |
|
||||
| v3.5.1 | 原生 stmt 查询和 geometry 类型支持 | 3.2.1.0 及更高版本 |
|
||||
| v3.5.0 | 获取消费进度及按照指定进度开始消费 | 3.0.5.0 及更高版本 |
|
||||
| v3.3.1 | 基于 websocket 的 schemaless 协议写入 | 3.0.4.1 及更高版本 |
|
||||
| v3.1.0 | 提供贴近 kafka 的订阅 api | - |
|
||||
| v3.0.4 | 新增 request id 相关接口 | 3.0.2.2 及更高版本 |
|
||||
| v3.0.3 | 基于 websocket 的 statement 写入 | - |
|
||||
| v3.0.2 | 基于 websocket 的数据查询和写入 | 3.0.1.5 及更高版本 |
|
||||
| v3.0.1 | 基于 websocket 的消息订阅 | - |
|
||||
| v3.0.0 | 适配 TDengine 3.0 查询和写入 | 3.0.0.0 及更高版本 |
|
||||
| driver-go 版本 | 主要变化 | TDengine 版本 |
|
||||
|--------------|--------------------------------------------|---------------|
|
||||
| v3.6.0 | stmt2 原生接口,DSN 支持密码包含特殊字符(url.QueryEscape) | 3.3.5.0 及更高版本 |
|
||||
| v3.5.8 | 修复空指针异常 | - |
|
||||
| v3.5.7 | taosWS 和 taosRestful 支持传入 request id | - |
|
||||
| v3.5.6 | 提升 websocket 查询和写入性能 | 3.3.2.0 及更高版本 |
|
||||
| v3.5.5 | restful 支持跳过 ssl 证书检查 | - |
|
||||
| v3.5.4 | 兼容 TDengine 3.3.0.0 tmq raw data | - |
|
||||
| v3.5.3 | 重构 taosWS | - |
|
||||
| v3.5.2 | websocket 压缩和优化消息订阅性能 | 3.2.3.0 及更高版本 |
|
||||
| v3.5.1 | 原生 stmt 查询和 geometry 类型支持 | 3.2.1.0 及更高版本 |
|
||||
| v3.5.0 | 获取消费进度及按照指定进度开始消费 | 3.0.5.0 及更高版本 |
|
||||
| v3.3.1 | 基于 websocket 的 schemaless 协议写入 | 3.0.4.1 及更高版本 |
|
||||
| v3.1.0 | 提供贴近 kafka 的订阅 api | - |
|
||||
| v3.0.4 | 新增 request id 相关接口 | 3.0.2.2 及更高版本 |
|
||||
| v3.0.3 | 基于 websocket 的 statement 写入 | - |
|
||||
| v3.0.2 | 基于 websocket 的数据查询和写入 | 3.0.1.5 及更高版本 |
|
||||
| v3.0.1 | 基于 websocket 的消息订阅 | - |
|
||||
| v3.0.0 | 适配 TDengine 3.0 查询和写入 | 3.0.0.0 及更高版本 |
|
||||
|
||||
## 异常和错误码
|
||||
|
||||
|
@ -137,6 +138,8 @@ WKB规范请参考[Well-Known Binary (WKB)](https://libgeos.org/specifications/w
|
|||
username:password@protocol(address)/dbname?param=value
|
||||
```
|
||||
|
||||
当密码中包含特殊字符时,需要使用 `url.QueryEscape` 进行转义。
|
||||
|
||||
##### 原生连接
|
||||
|
||||
导入驱动:
|
||||
|
@ -494,6 +497,37 @@ Prepare 允许使用预编译的 SQL 语句,可以提高性能并提供参数
|
|||
- **接口说明**:关闭语句。
|
||||
- **返回值**:错误信息。
|
||||
|
||||
从 3.6.0 版本开始,提供 stmt2 绑定参数的接口
|
||||
|
||||
- `func (conn *Connector) Stmt2(reqID int64, singleTableBindOnce bool) *Stmt2`
|
||||
- **接口说明**:从连接创建 stmt2。
|
||||
- **参数说明**:
|
||||
- `reqID`:请求 ID。
|
||||
- `singleTableBindOnce`:单个子表在单次执行中只有一次数据绑定。
|
||||
- **返回值**:stmt2 对象。
|
||||
- `func (s *Stmt2) Prepare(sql string) error`
|
||||
- **接口说明**:绑定 sql 语句。
|
||||
- **参数说明**:
|
||||
- `sql`:要绑定的 sql 语句。
|
||||
- **返回值**:错误信息。
|
||||
- `func (s *Stmt2) Bind(params []*stmt.TaosStmt2BindData) error`
|
||||
- **接口说明**:绑定数据。
|
||||
- **参数说明**:
|
||||
- params要绑定的数据。
|
||||
- **返回值**:错误信息。
|
||||
- `func (s *Stmt2) Execute() error`
|
||||
- **接口说明**:执行语句。
|
||||
- **返回值**:错误信息。
|
||||
- `func (s *Stmt2) GetAffectedRows() int`
|
||||
- **接口说明**:获取受影响行数(只在插入语句有效)。
|
||||
- **返回值**:受影响行数。
|
||||
- `func (s *Stmt2) UseResult() (driver.Rows, error)`
|
||||
- **接口说明**:获取结果集(只在查询语句有效)。
|
||||
- **返回值**:结果集 Rows 对象,错误信息。
|
||||
- `func (s *Stmt2) Close() error`
|
||||
- **接口说明**:关闭stmt2。
|
||||
- **返回值**:错误信息。
|
||||
|
||||
`ws/stmt` 包提供了通过 WebSocket 进行参数绑定的接口
|
||||
|
||||
- `func (c *Connector) Init() (*Stmt, error)`
|
||||
|
|
|
@ -52,7 +52,9 @@ Python Connector 历史版本(建议使用最新版本的 `taospy`):
|
|||
|
||||
| Python Connector 版本 | 主要变化 | TDengine 版本 |
|
||||
| -------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------- |
|
||||
| 2.7.18 | 支持 Apache Superset BI 产品 | - |
|
||||
| 2.7.21 | Native 支持 STMT2 写入 | - |
|
||||
| 2.7.19 | 支持 Apache Superset 连接 TDengine Cloud 数据源 | - |
|
||||
| 2.7.18 | 支持 Apache Superset 产品连接本地 TDengine 数据源 | - |
|
||||
| 2.7.16 | 新增订阅配置 (session.timeout.ms, max.poll.interval.ms) | - |
|
||||
| 2.7.15 | 新增 VARBINARY 和 GEOMETRY 类型支持 | - |
|
||||
| 2.7.14 | 修复已知问题 | - |
|
||||
|
@ -131,7 +133,8 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Python 对
|
|||
| [json_tag.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/json-tag.py) | 使用 JSON 类型的标签 |
|
||||
| [tmq_consumer.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/tmq_consumer.py) | tmq 订阅 |
|
||||
| [native_all_type_query.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/native_all_type_query.py) | 支持全部类型示例 |
|
||||
| [native_all_type_stmt.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/native_all_type_stmt.py) | 参数绑定支持全部类型示例 |
|
||||
| [native_all_type_stmt.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/native_all_type_stmt.py) | 参数绑定 stmt 全部类型示例 |
|
||||
| [test_stmt2.py](https://github.com/taosdata/taos-connector-python/blob/main/tests/test_stmt2.py) | 参数绑定 stmt2 写入示例 |
|
||||
|
||||
示例程序源码请参考:
|
||||
|
||||
|
@ -284,6 +287,7 @@ TaosResult 对象可以通过循环遍历获取查询到的数据。
|
|||
- `fn close(&self) -> PyResult<()>`
|
||||
- **接口说明**: 关闭 stmt 对象。
|
||||
|
||||
|
||||
#### 数据订阅
|
||||
- **创建消费者支持属性列表**:
|
||||
- host:主机地址。
|
||||
|
@ -424,50 +428,40 @@ TaosResult 对象可以通过循环遍历获取查询到的数据。
|
|||
- **异常**:操作失败抛出 `SchemalessError` 异常。
|
||||
|
||||
#### 参数绑定
|
||||
- `def statement(self, sql=None)`
|
||||
- **接口说明**:使用连接对象创建 stmt 对象, 如果 sql 不空会进行调用 prepare。
|
||||
- `sql`: 预编译的 SQL 语句。
|
||||
- **返回值**:stmt 对象。
|
||||
- **异常**:操作失败抛出 `StatementError` 异常。
|
||||
- `def statement2(self, sql=None, option=None)`
|
||||
- **接口说明**:使用连接对象创建 stmt2 对象
|
||||
- **参数说明**
|
||||
- `sql`: 绑定的 SQL 语句,如果不为空会调用`prepare`函数
|
||||
- `option` 传入 TaosStmt2Option 类实例选项
|
||||
- **返回值**:stmt2 对象。
|
||||
- **异常**:操作失败抛出 `ConnectionError` 异常。
|
||||
- `def prepare(self, sql)`
|
||||
- **接口说明**:绑定预编译 sql 语句。
|
||||
- **接口说明**:绑定预编译 sql 语句
|
||||
- **参数说明**:
|
||||
- `sql`: 预编译的 SQL 语句。
|
||||
- `sql`: 绑定的 SQL 语句
|
||||
- **异常**:操作失败抛出 `StatementError` 异常。
|
||||
- `def set_tbname(self, name)`
|
||||
- **接口说明**:设置将要写入数据的表名。
|
||||
- `def bind_param(self, tbnames, tags, datas)`
|
||||
- **接口说明**:以独立数组方式绑定数据
|
||||
- **参数说明**:
|
||||
- `name`: 表名,如果需要指定数据库, 例如: `db_name.table_name` 即可。
|
||||
- **异常**:操作失败抛出 `StatementError` 异常。
|
||||
- `def set_tbname_tags(self, name, tags):`
|
||||
- **接口说明**:设置表和 Tags 数据, 用于自动建表。
|
||||
- `tbnames`: 绑定表名数组,数据类型为 list
|
||||
- `tags`: 绑定 tag 列值数组,数据类型为 list
|
||||
- `tags`: 绑定普通列值数组,数据类型为 list
|
||||
- **异常**:操作失败抛出 `StatementError` 异常
|
||||
- `def bind_param_with_tables(self, tables)`
|
||||
- **接口说明**:以独立表方式绑定数据,独立表是以表为组织单位,每张表中有表名,TAG 值及普通列数值属性
|
||||
- **参数说明**:
|
||||
- `name`: 表名,如果需要指定数据库, 例如: `db_name.table_name` 即可。
|
||||
- `tags`: Tags 数据。
|
||||
- `tables`: `BindTable` 独立表对象数组
|
||||
- **异常**:操作失败抛出 `StatementError` 异常。
|
||||
- `def bind_param(self, params, add_batch=True)`
|
||||
- **接口说明**:绑定一组数据并提交。
|
||||
- **参数说明**:
|
||||
- `params`: 绑定数据。
|
||||
- `add_batch`: 是否提交绑定数据。
|
||||
- **异常**:操作失败抛出 `StatementError` 异常。
|
||||
- `def bind_param_batch(self, binds, add_batch=True)`
|
||||
- **接口说明**:绑定多组数据并提交。
|
||||
- **参数说明**:
|
||||
- `binds`: 绑定数据。
|
||||
- `add_batch`: 是否提交绑定数据。
|
||||
- **异常**:操作失败抛出 `StatementError` 异常。
|
||||
- `def add_batch(self)`
|
||||
- **接口说明**:提交绑定数据。
|
||||
- **异常**:操作失败抛出 `StatementError` 异常。
|
||||
- `def execute(self)`
|
||||
- **接口说明**:执行将绑定的数据全部写入。
|
||||
- **异常**:操作失败抛出 `StatementError` 异常。
|
||||
- `def affected_rows(self)`
|
||||
- **接口说明**: 获取写入条数。
|
||||
- **返回值**:写入条数。
|
||||
- `def close(&self)`
|
||||
- **接口说明**: 关闭 stmt 对象。
|
||||
- `def execute(self) -> int:`
|
||||
- **接口说明**:执行将绑定数据全部写入
|
||||
- **返回值**:影响行数
|
||||
- **异常**:操作失败抛出 `QueryError` 异常。
|
||||
- `def result(self)`
|
||||
- **接口说明**: 获取参数绑定查询结果集
|
||||
- **返回值**:返回 TaosResult 对象
|
||||
- `def close(self)`
|
||||
- **接口说明**: 关闭 stmt2 对象
|
||||
|
||||
|
||||
#### 数据订阅
|
||||
- **创建消费者支持属性列表**:
|
||||
|
|
|
@ -1,4 +1,8 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Run cleanup function on exit
|
||||
trap cleanup EXIT
|
||||
|
||||
# define default timezone
|
||||
DEFAULT_TIMEZONE="Asia/Shanghai"
|
||||
|
||||
|
@ -64,6 +68,9 @@ PROCESS_EXPORTER_BINARY="/usr/local/bin/process-exporter"
|
|||
# Define fstab input
|
||||
FSTAB_LINE="share-server.platform.tdengine.dev:/mnt/share_server /mnt/share_server nfs rw,sync,_netdev 0 0"
|
||||
|
||||
# Results need to be stored when source
|
||||
SOURCE_RESULTS=""
|
||||
|
||||
# ANSI color codes
|
||||
GREEN='\033[0;32m' # Green color
|
||||
RED='\033[0;31m' # Red color
|
||||
|
@ -146,9 +153,14 @@ help() {
|
|||
echo " config_coredump - Configure core dump settings"
|
||||
echo " disable_service - Disable specified services"
|
||||
echo " install_python - Install Python and pip"
|
||||
echo " install_pyenv - Install Pyenv"
|
||||
echo " install_python_via_pyenv - Install Python via pyenv"
|
||||
echo " install_java - Install Java"
|
||||
echo " install_maven - Install Maven"
|
||||
echo " install_java_via_sdkman - Install Java via sdkman"
|
||||
echo " install_maven_via_sdkman - Install Maven via sdkman"
|
||||
echo " deploy_go - Deploy Go environment"
|
||||
echo " install_gvm - Install GVM"
|
||||
echo " install_go_via_gvm - Install Go via GVM"
|
||||
echo " deploy_rust - Deploy Rust environment"
|
||||
echo " install_node - Install Node via package manager or binary"
|
||||
echo " install_node_via_nvm - Install Node via NVM"
|
||||
|
@ -786,7 +798,7 @@ update_redhat_gcc() {
|
|||
update_redhat_tmux() {
|
||||
echo "Downloading the latest version of tmux..."
|
||||
cd /usr/local/src || exit
|
||||
latest_tmux_version=$(curl -s https://api.github.com/repos/tmux/tmux/releases/latest | grep -Po '"tag_name": "\K.*?(?=")')
|
||||
latest_tmux_version=$(curl --retry 10 --retry-delay 5 --retry-max-time 120 -s https://api.github.com/repos/tmux/tmux/releases/latest | grep -Po '"tag_name": "\K.*?(?=")')
|
||||
wget https://github.com/tmux/tmux/releases/download/"${latest_tmux_version}"/tmux-"${latest_tmux_version}".tar.gz
|
||||
|
||||
echo "Extracting tmux ${latest_tmux_version}..."
|
||||
|
@ -838,6 +850,7 @@ deploy_tmux() {
|
|||
# }
|
||||
|
||||
# Install Java
|
||||
# shellcheck disable=SC2120
|
||||
install_java() {
|
||||
echo -e "${YELLOW}Installing Java...${NO_COLOR}"
|
||||
# Specify the major JDK version to search for; default is set to 17 if not specified
|
||||
|
@ -917,6 +930,7 @@ install_java() {
|
|||
INSTALLED_VERSION=$("$JAVA_HOME"/bin/java --version 2>&1)
|
||||
if echo "$INSTALLED_VERSION" | grep -q "openjdk $DEFAULT_JDK_VERSION"; then
|
||||
echo -e "${GREEN}Java installed successfully.${NO_COLOR}"
|
||||
SOURCE_RESULTS+="source /root/.bashrc # For openjdk\n"
|
||||
else
|
||||
echo -e "${YELLOW}Java version not match.${NO_COLOR}"
|
||||
exit 1
|
||||
|
@ -925,35 +939,118 @@ install_java() {
|
|||
|
||||
# Install sdkman
|
||||
install_sdkman() {
|
||||
install_package zip unzip
|
||||
echo -e "${YELLOW}Installing SDKMAN...${NO_COLOR}"
|
||||
if [ -d "$HOME/.sdkman" ]; then
|
||||
echo -e "${GREEN}SDKMAN is already installed.${NO_COLOR}"
|
||||
else
|
||||
echo -e "${YELLOW}Installing SDKMAN...${NO_COLOR}"
|
||||
curl -s "https://get.sdkman.io" | bash
|
||||
install_package zip unzip
|
||||
curl --retry 10 --retry-delay 5 --retry-max-time 120 -s "https://get.sdkman.io" | bash
|
||||
fi
|
||||
}
|
||||
|
||||
# Install gvm
|
||||
install_gvm() {
|
||||
echo -e "${YELLOW}Installing GVM...${NO_COLOR}"
|
||||
if [ -d "$HOME/.gvm" ]; then
|
||||
echo -e "${GREEN}GVM is already installed.${NO_COLOR}"
|
||||
else
|
||||
install_package bison gcc make
|
||||
bash < <(curl --retry 10 --retry-delay 5 --retry-max-time 120 -s -S -L https://raw.githubusercontent.com/moovweb/gvm/master/binscripts/gvm-installer)
|
||||
source $HOME/.gvm/scripts/gvm
|
||||
gvm version
|
||||
check_status "Failed to install GVM" "GVM installed successfully." $?
|
||||
add_config_if_not_exist "export GO111MODULE=on" "$BASH_RC"
|
||||
add_config_if_not_exist "export GOPROXY=https://goproxy.cn,direct" "$BASH_RC"
|
||||
add_config_if_not_exist "export GO_BINARY_BASE_URL=https://mirrors.aliyun.com/golang/" "$BASH_RC"
|
||||
add_config_if_not_exist "export GOROOT_BOOTSTRAP=$GOROOT" "$BASH_RC"
|
||||
fi
|
||||
SOURCE_RESULTS+="source $HOME/.gvm/scripts/gvm # For gvm\n"
|
||||
}
|
||||
|
||||
# enable pyenv
|
||||
enable_pyenv() {
|
||||
export PATH="$HOME/.pyenv/bin:$PATH"
|
||||
eval "$(pyenv init --path)"
|
||||
eval "$(pyenv init -)"
|
||||
}
|
||||
|
||||
# Install pyenv
|
||||
install_pyenv() {
|
||||
echo -e "${YELLOW}Installing Pyenv...${NO_COLOR}"
|
||||
if [ -d "$HOME/.pyenv" ]; then
|
||||
echo -e "${GREEN}Pyenv is already installed.${NO_COLOR}"
|
||||
else
|
||||
curl -L https://gitee.com/xinghuipeng/pyenv-installer/raw/master/bin/pyenv-installer | bash
|
||||
enable_pyenv
|
||||
add_config_if_not_exist "export PATH=\"\$HOME/.pyenv/bin:\$PATH\"" "$BASH_RC"
|
||||
add_config_if_not_exist "eval \"\$(pyenv init --path)\"" "$BASH_RC"
|
||||
add_config_if_not_exist "eval \"\$(pyenv init -)\"" "$BASH_RC"
|
||||
pyenv --version
|
||||
check_status "Failed to install Pyenv" "Pyenv installed successfully." $?
|
||||
fi
|
||||
SOURCE_RESULTS+="source $BASH_RC For: pyenv/python\n"
|
||||
}
|
||||
|
||||
# Install python via pyenv
|
||||
install_python_via_pyenv() {
|
||||
echo -e "${YELLOW}Installing Python via Pyenv...${NO_COLOR}"
|
||||
if [ -f /etc/debian_version ]; then
|
||||
install_package gcc make build-essential libssl-dev zlib1g-dev libbz2-dev libreadline-dev libsqlite3-dev wget curl llvm libncurses5-dev libncursesw5-dev xz-utils tk-dev libffi-dev liblzma-dev
|
||||
elif [ -f /etc/redhat-release ]; then
|
||||
install_package gcc zlib zlib-devel libffi libffi-devel readline-devel openssl-devel openssl11 openssl11-devel
|
||||
else
|
||||
echo "Unsupported Linux distribution."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -n "$1" ]; then
|
||||
DEFAULT_PYTHON_VERSION="$1"
|
||||
else
|
||||
DEFAULT_PYTHON_VERSION="3.10.12"
|
||||
fi
|
||||
install_pyenv
|
||||
enable_pyenv
|
||||
pyenv install "$DEFAULT_PYTHON_VERSION"
|
||||
pyenv global "$DEFAULT_PYTHON_VERSION"
|
||||
python --version
|
||||
check_status "Failed to install Python" "Python installed successfully." $?
|
||||
}
|
||||
|
||||
# Install Maven
|
||||
# shellcheck disable=SC2120
|
||||
install_maven() {
|
||||
install_maven_via_sdkman() {
|
||||
echo -e "${YELLOW}Installing maven...${NO_COLOR}"
|
||||
if [ -n "$1" ]; then
|
||||
DEFAULT_MVN_VERSION="$1"
|
||||
install_sdkman
|
||||
if [ -f "$HOME/.sdkman/bin/sdkman-init.sh" ]; then
|
||||
source "$HOME/.sdkman/bin/sdkman-init.sh"
|
||||
fi
|
||||
[[ -s "$HOME/.sdkman/bin/sdkman-init.sh" ]] && source "$HOME/.sdkman/bin/sdkman-init.sh"
|
||||
# 3.2.5
|
||||
sdk install maven "$DEFAULT_MVN_VERSION"
|
||||
yes | sdk install maven "$DEFAULT_MVN_VERSION"
|
||||
else
|
||||
install_package "maven"
|
||||
fi
|
||||
[[ -s "$HOME/.sdkman/bin/sdkman-init.sh" ]] && source "$HOME/.sdkman/bin/sdkman-init.sh"
|
||||
mvn -version
|
||||
check_status "Failed to install maven" "Maven installed successfully." $?
|
||||
}
|
||||
|
||||
install_java_via_sdkman() {
|
||||
echo -e "${YELLOW}Installing java...${NO_COLOR}"
|
||||
if [ -n "$1" ]; then
|
||||
DEFAULT_JDK_VERSION="$1"
|
||||
else
|
||||
DEFAULT_JDK_VERSION="17"
|
||||
fi
|
||||
install_sdkman
|
||||
[[ -s "$HOME/.sdkman/bin/sdkman-init.sh" ]] && source "$HOME/.sdkman/bin/sdkman-init.sh"
|
||||
yes | sdk install java "$DEFAULT_JDK_VERSION-open"
|
||||
[[ -s "$HOME/.sdkman/bin/sdkman-init.sh" ]] && source "$HOME/.sdkman/bin/sdkman-init.sh"
|
||||
java -version
|
||||
check_status "Failed to install java" "Java installed successfully." $?
|
||||
SOURCE_RESULTS+="source $HOME/.sdkman/bin/sdkman-init.sh # For sdkman/java/maven\n"
|
||||
}
|
||||
|
||||
# Install Go
|
||||
deploy_go() {
|
||||
# Define the installation location for Go
|
||||
|
@ -987,6 +1084,31 @@ deploy_go() {
|
|||
# Apply the environment variables
|
||||
$GO_INSTALL_DIR/bin/go version
|
||||
check_status "Failed to install GO" "Install GO successfully" $?
|
||||
SOURCE_RESULTS+="source $BASH_RC # For golang\n"
|
||||
}
|
||||
|
||||
# Install Go via gvm
|
||||
install_go_via_gvm() {
|
||||
echo -e "${YELLOW}Installing Go...${NO_COLOR}"
|
||||
if [ -n "$1" ]; then
|
||||
DEFAULT_GO_VERSION="$1"
|
||||
else
|
||||
DEFAULT_GO_VERSION="1.23.0"
|
||||
fi
|
||||
install_gvm
|
||||
source $HOME/.gvm/scripts/gvm
|
||||
export GO111MODULE=on
|
||||
export GOPROXY=https://goproxy.cn,direct
|
||||
export GO_BINARY_BASE_URL=https://mirrors.aliyun.com/golang/
|
||||
export GOROOT_BOOTSTRAP=$GOROOT
|
||||
|
||||
gvm install go"$DEFAULT_GO_VERSION" -B
|
||||
gvm use go"$DEFAULT_GO_VERSION"
|
||||
gvm use go"$DEFAULT_GO_VERSION" --default
|
||||
|
||||
go version
|
||||
check_status "Failed to install Go" "Go installed successfully." $?
|
||||
SOURCE_RESULTS+="source $BASH_RC # For golang\n"
|
||||
}
|
||||
|
||||
# Function to install Rust and Cargo
|
||||
|
@ -1037,6 +1159,7 @@ deploy_rust() {
|
|||
# Install cargo-make
|
||||
cargo install cargo-make
|
||||
check_status "Failed to install Rust" "Install Rust successfully" $?
|
||||
SOURCE_RESULTS+="source $BASH_RC && source $HOME/.cargo/env # For cargo/rust\n"
|
||||
else
|
||||
echo "Rust is already installed."
|
||||
fi
|
||||
|
@ -1063,7 +1186,7 @@ install_node_in_ubuntu18.04() {
|
|||
NODE_DISTRO="node-v$DEFAULT_NODE_VERSION-linux-x64"
|
||||
update_ubuntu_gcc_18.04
|
||||
echo "Installing Node..."
|
||||
curl -O https://nodejs.org/dist/v22.0.0/node-v22.0.0.tar.gz
|
||||
curl --retry 10 --retry-delay 5 --retry-max-time 120 -O https://nodejs.org/dist/v22.0.0/node-v22.0.0.tar.gz
|
||||
tar -xzf node-v22.0.0.tar.gz
|
||||
cd node-v22.0.0 || exit
|
||||
./configure
|
||||
|
@ -1115,12 +1238,13 @@ install_node_via_nvm () {
|
|||
|
||||
# Install NVM
|
||||
if ! command -v nvm &> /dev/null; then
|
||||
NVM_VERSION=$(curl -s https://api.github.com/repos/nvm-sh/nvm/releases/latest | grep -oP '"tag_name": "\K(.*)(?=")')
|
||||
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/"$NVM_VERSION"/install.sh | bash
|
||||
NVM_VERSION=$(curl --retry 10 --retry-delay 5 --retry-max-time 120 -s https://api.github.com/repos/nvm-sh/nvm/releases/latest | grep -oP '"tag_name": "\K(.*)(?=")')
|
||||
curl --retry 10 --retry-delay 5 --retry-max-time 120 -o- https://raw.githubusercontent.com/nvm-sh/nvm/"$NVM_VERSION"/install.sh | bash
|
||||
export NVM_DIR="$HOME/.nvm"
|
||||
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"
|
||||
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion"
|
||||
echo -e "${GREEN}NVM installed successfully.${NO_COLOR}"
|
||||
SOURCE_RESULTS+="source $NVM_DIR/nvm.sh && source $NVM_DIR/bash_completion # For nvm/node/npm/yarn/pnpm\n"
|
||||
else
|
||||
echo -e "${GREEN}NVM is already installed.${NO_COLOR}"
|
||||
fi
|
||||
|
@ -1655,6 +1779,19 @@ config_cloud_init() {
|
|||
# cloud-init clean --logs
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
if [ -n "$SOURCE_RESULTS" ]; then
|
||||
echo -e "${YELLOW}===========================================\n${NO_COLOR}"
|
||||
echo -e "${YELLOW}Installation complete! \n${NO_COLOR}"
|
||||
echo -e "${YELLOW}Some tools require you to manually source${NO_COLOR}"
|
||||
echo -e "${YELLOW}or restart your terminal to take effect.\n${NO_COLOR}"
|
||||
echo -e "${YELLOW}===========================================\n${NO_COLOR}"
|
||||
echo -e "${YELLOW}$SOURCE_RESULTS${NO_COLOR}"
|
||||
else
|
||||
echo -e "${YELLOW}Installation complete \n${NO_COLOR}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Clone a repository with a specified target directory
|
||||
clone_repo_with_rename() {
|
||||
local repo_url="$1"
|
||||
|
@ -1751,16 +1888,17 @@ clone_repos() {
|
|||
|
||||
new_funcs() {
|
||||
echo "Adding test..."
|
||||
install_python 3.10.12
|
||||
# install_java 21
|
||||
# install_node 16.20.2
|
||||
# install_maven 3.2.5
|
||||
install_python_via_pyenv 3.10.12
|
||||
install_java_via_sdkman 21.0.2
|
||||
install_node 16.20.2
|
||||
install_maven_via_sdkman 3.2.5
|
||||
deploy_rust
|
||||
}
|
||||
|
||||
# deploy TDasset
|
||||
TDasset() {
|
||||
install_java 21
|
||||
install_maven 3.9.9
|
||||
install_java_via_sdkman 21.0.2
|
||||
install_maven_via_sdkman 3.9.9
|
||||
# not supported in centos7/ubuntu18 because of the old version of glibc
|
||||
install_node_via_nvm 22.0.0
|
||||
install_pnpm
|
||||
|
@ -1768,18 +1906,18 @@ TDasset() {
|
|||
|
||||
# deploy TDinternal/TDengine/taosx
|
||||
TDinternal() {
|
||||
deploy_go
|
||||
install_go_via_gvm 1.23.3
|
||||
deploy_rust
|
||||
install_java 17
|
||||
install_maven 3.9.0
|
||||
install_java_via_sdkman 17
|
||||
install_maven_via_sdkman 3.9.9
|
||||
install_node_via_nvm 16.20.2
|
||||
install_python 3.10.12
|
||||
install_python_via_pyenv 3.10.12
|
||||
install_via_pip pandas psutil fabric2 requests faker simplejson toml pexpect tzlocal distro decorator loguru hyperloglog toml taospy taos-ws-py
|
||||
}
|
||||
|
||||
# deploy TDgpt
|
||||
TDgpt() {
|
||||
install_python 3.10.12
|
||||
install_python_via_pyenv 3.10.12
|
||||
}
|
||||
|
||||
# deploy taos-test-framework
|
||||
|
@ -1854,7 +1992,7 @@ deploy_dev() {
|
|||
install_python
|
||||
install_pip_pkg
|
||||
install_java
|
||||
install_maven
|
||||
install_maven_via_sdkman
|
||||
deploy_go
|
||||
deploy_rust
|
||||
install_node
|
||||
|
@ -1910,6 +2048,9 @@ main() {
|
|||
replace_sources)
|
||||
replace_sources
|
||||
;;
|
||||
update)
|
||||
update
|
||||
;;
|
||||
upgrade)
|
||||
upgrade
|
||||
;;
|
||||
|
@ -1937,14 +2078,23 @@ main() {
|
|||
install_python)
|
||||
install_python
|
||||
;;
|
||||
install_pyenv)
|
||||
install_pyenv
|
||||
;;
|
||||
install_python_via_pyenv)
|
||||
install_python_via_pyenv
|
||||
;;
|
||||
install_pip_pkg)
|
||||
install_pip_pkg
|
||||
;;
|
||||
install_java)
|
||||
install_java
|
||||
;;
|
||||
install_maven)
|
||||
install_maven
|
||||
install_java_via_sdkman)
|
||||
install_java_via_sdkman
|
||||
;;
|
||||
install_maven_via_sdkman)
|
||||
install_maven_via_sdkman
|
||||
;;
|
||||
deploy_cmake)
|
||||
deploy_cmake
|
||||
|
@ -1961,6 +2111,12 @@ main() {
|
|||
deploy_go)
|
||||
deploy_go
|
||||
;;
|
||||
install_gvm)
|
||||
install_gvm
|
||||
;;
|
||||
install_go_via_gvm)
|
||||
install_go_via_gvm
|
||||
;;
|
||||
deploy_rust)
|
||||
deploy_rust
|
||||
;;
|
||||
|
|
|
@ -207,7 +207,7 @@ int32_t tqSendDataRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPollReq*
|
|||
(void)tFormatOffset(buf1, TSDB_OFFSET_LEN, &(pRsp->reqOffset));
|
||||
(void)tFormatOffset(buf2, TSDB_OFFSET_LEN, &(pRsp->rspOffset));
|
||||
|
||||
tqDebug("tmq poll vgId:%d consumer:0x%" PRIx64 " (epoch %d) send rsp, block num:%d, req:%s, rsp:%s,QID:0x%" PRIx64,
|
||||
tqDebug("tmq poll vgId:%d consumer:0x%" PRIx64 " (epoch %d) start to send rsp, block num:%d, req:%s, rsp:%s,QID:0x%" PRIx64,
|
||||
vgId, pReq->consumerId, pReq->epoch, pRsp->blockNum, buf1, buf2, pReq->reqId);
|
||||
|
||||
return tqDoSendDataRsp(&pMsg->info, pRsp, pReq->epoch, pReq->consumerId, type, sver, ever);
|
||||
|
|
|
@ -197,7 +197,9 @@ bool isValValidForTable(STqHandle* pHandle, SWalCont* pHead) {
|
|||
|
||||
end:
|
||||
tDecoderClear(&dcoder);
|
||||
return tbSuid == realTbSuid;
|
||||
bool tmp = tbSuid == realTbSuid;
|
||||
tqDebug("%s suid:%"PRId64" realSuid:%"PRId64" return:%d", __FUNCTION__, tbSuid, realTbSuid, tmp);
|
||||
return tmp;
|
||||
}
|
||||
|
||||
int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, uint64_t reqId) {
|
||||
|
@ -262,6 +264,8 @@ int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, uint64_t
|
|||
|
||||
END:
|
||||
*fetchOffset = offset;
|
||||
tqDebug("vgId:%d, end to fetch wal, code:%d , index:%" PRId64 ", last:%" PRId64 " commit:%" PRId64 ", applied:%" PRId64 ", 0x%" PRIx64,
|
||||
vgId, code, offset, lastVer, committedVer, appliedVer, id);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -273,6 +277,8 @@ bool tqGetTablePrimaryKey(STqReader* pReader) {
|
|||
}
|
||||
|
||||
void tqSetTablePrimaryKey(STqReader* pReader, int64_t uid) {
|
||||
tqDebug("%s:%p uid:%"PRId64, __FUNCTION__ , pReader, uid);
|
||||
|
||||
if (pReader == NULL) {
|
||||
return;
|
||||
}
|
||||
|
@ -286,6 +292,7 @@ void tqSetTablePrimaryKey(STqReader* pReader, int64_t uid) {
|
|||
}
|
||||
|
||||
STqReader* tqReaderOpen(SVnode* pVnode) {
|
||||
tqDebug("%s:%p", __FUNCTION__ , pVnode);
|
||||
if (pVnode == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
@ -317,6 +324,7 @@ STqReader* tqReaderOpen(SVnode* pVnode) {
|
|||
}
|
||||
|
||||
void tqReaderClose(STqReader* pReader) {
|
||||
tqDebug("%s:%p", __FUNCTION__ , pReader);
|
||||
if (pReader == NULL) return;
|
||||
|
||||
// close wal reader
|
||||
|
@ -485,26 +493,25 @@ bool tqNextBlockInWal(STqReader* pReader, const char* id, int sourceExcluded) {
|
|||
}
|
||||
|
||||
int32_t tqReaderSetSubmitMsg(STqReader* pReader, void* msgStr, int32_t msgLen, int64_t ver) {
|
||||
if (pReader == NULL) {
|
||||
if (pReader == NULL) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
pReader->msg.msgStr = msgStr;
|
||||
pReader->msg.msgLen = msgLen;
|
||||
pReader->msg.ver = ver;
|
||||
|
||||
tqDebug("tq reader set msg %p %d", msgStr, msgLen);
|
||||
tqDebug("tq reader set msg pointer:%p, msg len:%d", msgStr, msgLen);
|
||||
SDecoder decoder = {0};
|
||||
|
||||
tDecoderInit(&decoder, pReader->msg.msgStr, pReader->msg.msgLen);
|
||||
int32_t code = tDecodeSubmitReq(&decoder, &pReader->submit);
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
if (code != 0) {
|
||||
tDecoderClear(&decoder);
|
||||
tqError("DecodeSSubmitReq2 error, msgLen:%d, ver:%" PRId64, msgLen, ver);
|
||||
return code;
|
||||
}
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
return 0;
|
||||
return code;
|
||||
}
|
||||
|
||||
SWalReader* tqGetWalReader(STqReader* pReader) {
|
||||
|
@ -529,63 +536,63 @@ int64_t tqGetResultBlockTime(STqReader* pReader) {
|
|||
}
|
||||
|
||||
bool tqNextBlockImpl(STqReader* pReader, const char* idstr) {
|
||||
if (pReader == NULL || pReader->msg.msgStr == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t numOfBlocks = taosArrayGetSize(pReader->submit.aSubmitTbData);
|
||||
while (pReader->nextBlk < numOfBlocks) {
|
||||
tqDebug("try next data block, len:%d ver:%" PRId64 " index:%d/%d, %s", pReader->msg.msgLen, pReader->msg.ver,
|
||||
(pReader->nextBlk + 1), numOfBlocks, idstr);
|
||||
|
||||
SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk);
|
||||
if (pSubmitTbData == NULL) {
|
||||
return false;
|
||||
}
|
||||
if (pReader->tbIdHash == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
void* ret = taosHashGet(pReader->tbIdHash, &pSubmitTbData->uid, sizeof(int64_t));
|
||||
if (ret != NULL) {
|
||||
tqDebug("block found, ver:%" PRId64 ", uid:%" PRId64 ", %s", pReader->msg.ver, pSubmitTbData->uid, idstr);
|
||||
return true;
|
||||
} else {
|
||||
tqDebug("discard submit block, uid:%" PRId64 ", total queried tables:%d continue %s", pSubmitTbData->uid,
|
||||
taosHashGetSize(pReader->tbIdHash), idstr);
|
||||
}
|
||||
|
||||
pReader->nextBlk++;
|
||||
}
|
||||
|
||||
tDestroySubmitReq(&pReader->submit, TSDB_MSG_FLG_DECODE);
|
||||
pReader->nextBlk = 0;
|
||||
pReader->msg.msgStr = NULL;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool tqNextDataBlockFilterOut(STqReader* pReader, SHashObj* filterOutUids) {
|
||||
if (pReader == NULL || pReader->msg.msgStr == NULL) return false;
|
||||
int32_t code = false;
|
||||
int32_t lino = 0;
|
||||
int64_t uid = 0;
|
||||
TSDB_CHECK_NULL(pReader, code, lino, END, false);
|
||||
TSDB_CHECK_NULL(pReader->msg.msgStr, code, lino, END, false);
|
||||
TSDB_CHECK_NULL(pReader->tbIdHash, code, lino, END, true);
|
||||
|
||||
int32_t blockSz = taosArrayGetSize(pReader->submit.aSubmitTbData);
|
||||
while (pReader->nextBlk < blockSz) {
|
||||
SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk);
|
||||
if (pSubmitTbData == NULL) return false;
|
||||
if (filterOutUids == NULL) return true;
|
||||
TSDB_CHECK_NULL(pSubmitTbData, code, lino, END, false);
|
||||
uid = pSubmitTbData->uid;
|
||||
void* ret = taosHashGet(pReader->tbIdHash, &pSubmitTbData->uid, sizeof(int64_t));
|
||||
TSDB_CHECK_CONDITION(ret == NULL, code, lino, END, true);
|
||||
|
||||
void* ret = taosHashGet(filterOutUids, &pSubmitTbData->uid, sizeof(int64_t));
|
||||
if (ret == NULL) {
|
||||
return true;
|
||||
}
|
||||
tqDebug("iterator data block in hash continue, progress:%d/%d, total queried tables:%d, uid:%"PRId64, pReader->nextBlk, blockSz, taosHashGetSize(pReader->tbIdHash), uid);
|
||||
pReader->nextBlk++;
|
||||
}
|
||||
|
||||
tDestroySubmitReq(&pReader->submit, TSDB_MSG_FLG_DECODE);
|
||||
pReader->nextBlk = 0;
|
||||
pReader->msg.msgStr = NULL;
|
||||
tqDebug("iterator data block end, block progress:%d/%d, uid:%"PRId64, pReader->nextBlk, blockSz, uid);
|
||||
|
||||
return false;
|
||||
END:
|
||||
tqDebug("%s:%d return:%s, uid:%"PRId64, __FUNCTION__, lino, code?"true":"false", uid);
|
||||
return code;
|
||||
}
|
||||
|
||||
bool tqNextDataBlockFilterOut(STqReader* pReader, SHashObj* filterOutUids) {
|
||||
int32_t code = false;
|
||||
int32_t lino = 0;
|
||||
int64_t uid = 0;
|
||||
|
||||
TSDB_CHECK_NULL(pReader, code, lino, END, false);
|
||||
TSDB_CHECK_NULL(pReader->msg.msgStr, code, lino, END, false);
|
||||
TSDB_CHECK_NULL(filterOutUids, code, lino, END, true);
|
||||
|
||||
int32_t blockSz = taosArrayGetSize(pReader->submit.aSubmitTbData);
|
||||
while (pReader->nextBlk < blockSz) {
|
||||
SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk);
|
||||
TSDB_CHECK_NULL(pSubmitTbData, code, lino, END, false);
|
||||
uid = pSubmitTbData->uid;
|
||||
void* ret = taosHashGet(filterOutUids, &pSubmitTbData->uid, sizeof(int64_t));
|
||||
TSDB_CHECK_NULL(ret, code, lino, END, true);
|
||||
tqDebug("iterator data block in hash continue, progress:%d/%d, uid:%" PRId64 "", pReader->nextBlk, blockSz, uid);
|
||||
pReader->nextBlk++;
|
||||
}
|
||||
|
||||
tDestroySubmitReq(&pReader->submit, TSDB_MSG_FLG_DECODE);
|
||||
pReader->nextBlk = 0;
|
||||
pReader->msg.msgStr = NULL;
|
||||
tqDebug("iterator data block end, block progress:%d/%d, uid:%"PRId64, pReader->nextBlk, blockSz, uid);
|
||||
|
||||
END:
|
||||
tqDebug("%s:%d return:%s, uid:%"PRId64, __FUNCTION__, lino, code?"true":"false", uid);
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tqMaskBlock(SSchemaWrapper* pDst, SSDataBlock* pBlock, const SSchemaWrapper* pSrc, char* mask) {
|
||||
|
@ -709,7 +716,7 @@ int32_t tqRetrieveDataBlock(STqReader* pReader, SSDataBlock** pRes, const char*
|
|||
if (pReader == NULL || pRes == NULL) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
tqTrace("tq reader retrieve data block %p, index:%d", pReader->msg.msgStr, pReader->nextBlk);
|
||||
tqDebug("tq reader retrieve data block %p, index:%d", pReader->msg.msgStr, pReader->nextBlk);
|
||||
int32_t code = 0;
|
||||
int32_t line = 0;
|
||||
STSchema* pTSchema = NULL;
|
||||
|
@ -846,7 +853,7 @@ int32_t tqRetrieveDataBlock(STqReader* pReader, SSDataBlock** pRes, const char*
|
|||
|
||||
END:
|
||||
if (code != 0) {
|
||||
tqError("tqRetrieveDataBlock failed, line:%d, code:%d", line, code);
|
||||
tqError("tqRetrieveDataBlock failed, line:%d, msg:%s", line, tstrerror(code));
|
||||
}
|
||||
taosMemoryFreeClear(pTSchema);
|
||||
return code;
|
||||
|
@ -876,10 +883,6 @@ END:
|
|||
static int32_t processBuildNew(STqReader* pReader, SSubmitTbData* pSubmitTbData, SArray* blocks, SArray* schemas,
|
||||
SSchemaWrapper* pSchemaWrapper, char* assigned, int32_t numOfRows, int32_t curRow,
|
||||
int32_t* lastRow) {
|
||||
if (pReader == NULL || pSubmitTbData == NULL || blocks == NULL || schemas == NULL || pSchemaWrapper == NULL ||
|
||||
assigned == NULL || lastRow == NULL) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
int32_t code = 0;
|
||||
SSchemaWrapper* pSW = NULL;
|
||||
SSDataBlock* block = NULL;
|
||||
|
@ -909,15 +912,15 @@ static int32_t processBuildNew(STqReader* pReader, SSubmitTbData* pSubmitTbData,
|
|||
taosMemoryFreeClear(block);
|
||||
|
||||
END:
|
||||
if (code != 0) {
|
||||
tqError("processBuildNew failed, code:%d", code);
|
||||
}
|
||||
tDeleteSchemaWrapper(pSW);
|
||||
blockDataFreeRes(block);
|
||||
taosMemoryFree(block);
|
||||
return code;
|
||||
}
|
||||
static int32_t tqProcessColData(STqReader* pReader, SSubmitTbData* pSubmitTbData, SArray* blocks, SArray* schemas) {
|
||||
if (pReader == NULL || pSubmitTbData == NULL || blocks == NULL || schemas == NULL) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
int32_t code = 0;
|
||||
int32_t curRow = 0;
|
||||
int32_t lastRow = 0;
|
||||
|
@ -931,6 +934,7 @@ static int32_t tqProcessColData(STqReader* pReader, SSubmitTbData* pSubmitTbData
|
|||
TQ_NULL_GO_TO_END(pCol);
|
||||
int32_t numOfRows = pCol->nVal;
|
||||
int32_t numOfCols = taosArrayGetSize(pCols);
|
||||
tqDebug("vgId:%d, tqProcessColData start, col num: %d, rows:%d", pReader->pWalReader->pWal->cfg.vgId, numOfCols, numOfRows);
|
||||
for (int32_t i = 0; i < numOfRows; i++) {
|
||||
bool buildNew = false;
|
||||
|
||||
|
@ -970,16 +974,16 @@ static int32_t tqProcessColData(STqReader* pReader, SSubmitTbData* pSubmitTbData
|
|||
}
|
||||
SSDataBlock* pLastBlock = taosArrayGetLast(blocks);
|
||||
pLastBlock->info.rows = curRow - lastRow;
|
||||
|
||||
tqDebug("vgId:%d, tqProcessColData end, col num: %d, rows:%d, block num:%d", pReader->pWalReader->pWal->cfg.vgId, numOfCols, numOfRows, (int)taosArrayGetSize(blocks));
|
||||
END:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("vgId:%d, process col data failed, code:%d", pReader->pWalReader->pWal->cfg.vgId, code);
|
||||
}
|
||||
taosMemoryFree(assigned);
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tqProcessRowData(STqReader* pReader, SSubmitTbData* pSubmitTbData, SArray* blocks, SArray* schemas) {
|
||||
if (pReader == NULL || pSubmitTbData == NULL || blocks == NULL || schemas == NULL) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
int32_t code = 0;
|
||||
STSchema* pTSchema = NULL;
|
||||
|
||||
|
@ -992,6 +996,8 @@ int32_t tqProcessRowData(STqReader* pReader, SSubmitTbData* pSubmitTbData, SArra
|
|||
SArray* pRows = pSubmitTbData->aRowP;
|
||||
int32_t numOfRows = taosArrayGetSize(pRows);
|
||||
pTSchema = tBuildTSchema(pSchemaWrapper->pSchema, pSchemaWrapper->nCols, pSchemaWrapper->version);
|
||||
TQ_NULL_GO_TO_END(pTSchema);
|
||||
tqDebug("vgId:%d, tqProcessRowData start, rows:%d", pReader->pWalReader->pWal->cfg.vgId, numOfRows);
|
||||
|
||||
for (int32_t i = 0; i < numOfRows; i++) {
|
||||
bool buildNew = false;
|
||||
|
@ -1030,17 +1036,18 @@ int32_t tqProcessRowData(STqReader* pReader, SSubmitTbData* pSubmitTbData, SArra
|
|||
SSDataBlock* pLastBlock = taosArrayGetLast(blocks);
|
||||
pLastBlock->info.rows = curRow - lastRow;
|
||||
|
||||
tqDebug("vgId:%d, tqProcessRowData end, rows:%d, block num:%d", pReader->pWalReader->pWal->cfg.vgId, numOfRows, (int)taosArrayGetSize(blocks));
|
||||
END:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("vgId:%d, process row data failed, code:%d", pReader->pWalReader->pWal->cfg.vgId, code);
|
||||
}
|
||||
taosMemoryFreeClear(pTSchema);
|
||||
taosMemoryFree(assigned);
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas, SSubmitTbData** pSubmitTbDataRet, int64_t *createTime) {
|
||||
if (pReader == NULL || blocks == NULL || schemas == NULL) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
tqTrace("tq reader retrieve data block %p, %d", pReader->msg.msgStr, pReader->nextBlk);
|
||||
tqDebug("tq reader retrieve data block msg pointer:%p, index:%d", pReader->msg.msgStr, pReader->nextBlk);
|
||||
SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk);
|
||||
if (pSubmitTbData == NULL) {
|
||||
return terrno;
|
||||
|
|
|
@ -16,15 +16,16 @@
|
|||
#include "tq.h"
|
||||
|
||||
int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t numOfCols, int8_t precision) {
|
||||
if (pBlock == NULL || pRsp == NULL) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
int32_t code = TDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
void* buf = NULL;
|
||||
TSDB_CHECK_NULL(pBlock, code, lino, END, TSDB_CODE_INVALID_PARA);
|
||||
TSDB_CHECK_NULL(pRsp, code, lino, END, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
size_t dataEncodeBufSize = blockGetEncodeSize(pBlock);
|
||||
int32_t dataStrLen = sizeof(SRetrieveTableRspForTmq) + dataEncodeBufSize;
|
||||
void* buf = taosMemoryCalloc(1, dataStrLen);
|
||||
if (buf == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
buf = taosMemoryCalloc(1, dataStrLen);
|
||||
TSDB_CHECK_NULL(buf, code, lino, END, terrno);
|
||||
|
||||
SRetrieveTableRspForTmq* pRetrieve = (SRetrieveTableRspForTmq*)buf;
|
||||
pRetrieve->version = 1;
|
||||
|
@ -33,49 +34,49 @@ int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t
|
|||
pRetrieve->numOfRows = htobe64((int64_t)pBlock->info.rows);
|
||||
|
||||
int32_t actualLen = blockEncode(pBlock, pRetrieve->data, dataEncodeBufSize, numOfCols);
|
||||
if(actualLen < 0){
|
||||
taosMemoryFree(buf);
|
||||
return terrno;
|
||||
}
|
||||
TSDB_CHECK_CONDITION(actualLen >= 0, code, lino, END, terrno);
|
||||
actualLen += sizeof(SRetrieveTableRspForTmq);
|
||||
if (taosArrayPush(pRsp->blockDataLen, &actualLen) == NULL){
|
||||
taosMemoryFree(buf);
|
||||
return terrno;
|
||||
}
|
||||
if (taosArrayPush(pRsp->blockData, &buf) == NULL) {
|
||||
taosMemoryFree(buf);
|
||||
return terrno;
|
||||
}
|
||||
TSDB_CHECK_NULL(taosArrayPush(pRsp->blockDataLen, &actualLen), code, lino, END, terrno);
|
||||
TSDB_CHECK_NULL(taosArrayPush(pRsp->blockData, &buf), code, lino, END, terrno);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
tqDebug("add block data to block array, blockDataLen:%d, blockData:%p", actualLen, buf);
|
||||
END:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
taosMemoryFree(buf);
|
||||
tqError("%s failed at %d, failed to add block data to response:%s", __FUNCTION__, lino, tstrerror(code));
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t tqAddTbNameToRsp(const STQ* pTq, int64_t uid, SMqDataRsp* pRsp, int32_t n) {
|
||||
if (pRsp == NULL || pTq == NULL) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
int32_t code = TDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
SMetaReader mr = {0};
|
||||
|
||||
TSDB_CHECK_NULL(pTq, code, lino, END, TSDB_CODE_INVALID_PARA);
|
||||
TSDB_CHECK_NULL(pRsp, code, lino, END, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
metaReaderDoInit(&mr, pTq->pVnode->pMeta, META_READER_LOCK);
|
||||
|
||||
int32_t code = metaReaderGetTableEntryByUidCache(&mr, uid);
|
||||
if (code < 0) {
|
||||
metaReaderClear(&mr);
|
||||
return code;
|
||||
}
|
||||
code = metaReaderGetTableEntryByUidCache(&mr, uid);
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
|
||||
for (int32_t i = 0; i < n; i++) {
|
||||
char* tbName = taosStrdup(mr.me.name);
|
||||
if (tbName == NULL) {
|
||||
metaReaderClear(&mr);
|
||||
return terrno;
|
||||
}
|
||||
TSDB_CHECK_NULL(tbName, code, lino, END, terrno);
|
||||
if(taosArrayPush(pRsp->blockTbName, &tbName) == NULL){
|
||||
tqError("failed to push tbName to blockTbName:%s", tbName);
|
||||
tqError("failed to push tbName to blockTbName:%s, uid:%"PRId64, tbName, uid);
|
||||
continue;
|
||||
}
|
||||
tqDebug("add tbName to response success tbname:%s, uid:%"PRId64, tbName, uid);
|
||||
}
|
||||
|
||||
END:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("%s failed at %d, failed to add tbName to response:%s, uid:%"PRId64, __FUNCTION__, lino, tstrerror(code), uid);
|
||||
}
|
||||
metaReaderClear(&mr);
|
||||
return 0;
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t getDataBlock(qTaskInfo_t task, const STqHandle* pHandle, int32_t vgId, SSDataBlock** res) {
|
||||
|
@ -96,109 +97,130 @@ int32_t getDataBlock(qTaskInfo_t task, const STqHandle* pHandle, int32_t vgId, S
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqScanData(STQ* pTq, STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset, const SMqPollReq* pRequest) {
|
||||
if (pTq == NULL || pHandle == NULL || pRsp == NULL || pOffset == NULL || pRequest == NULL){
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
static int32_t tqProcessReplayRsp(STQ* pTq, STqHandle* pHandle, SMqDataRsp* pRsp, const SMqPollReq* pRequest, SSDataBlock* pDataBlock, qTaskInfo_t task){
|
||||
int32_t code = 0;
|
||||
int32_t line = 0;
|
||||
int32_t lino = 0;
|
||||
|
||||
if (IS_OFFSET_RESET_TYPE(pRequest->reqOffset.type) && pHandle->block != NULL) {
|
||||
blockDataDestroy(pHandle->block);
|
||||
pHandle->block = NULL;
|
||||
}
|
||||
if (pHandle->block == NULL) {
|
||||
if (pDataBlock == NULL) {
|
||||
goto END;
|
||||
}
|
||||
|
||||
STqOffsetVal offset = {0};
|
||||
code = qStreamExtractOffset(task, &offset);
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
|
||||
pHandle->block = NULL;
|
||||
|
||||
code = createOneDataBlock(pDataBlock, true, &pHandle->block);
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
|
||||
pHandle->blockTime = offset.ts;
|
||||
tOffsetDestroy(&offset);
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
code = getDataBlock(task, pHandle, vgId, &pDataBlock);
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
}
|
||||
|
||||
const STqExecHandle* pExec = &pHandle->execHandle;
|
||||
code = tqAddBlockDataToRsp(pHandle->block, pRsp, pExec->numOfCols, pTq->pVnode->config.tsdbCfg.precision);
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
|
||||
pRsp->blockNum++;
|
||||
if (pDataBlock == NULL) {
|
||||
blockDataDestroy(pHandle->block);
|
||||
pHandle->block = NULL;
|
||||
} else {
|
||||
code = copyDataBlock(pHandle->block, pDataBlock);
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
|
||||
STqOffsetVal offset = {0};
|
||||
code = qStreamExtractOffset(task, &offset);
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
|
||||
pRsp->sleepTime = offset.ts - pHandle->blockTime;
|
||||
pHandle->blockTime = offset.ts;
|
||||
tOffsetDestroy(&offset);
|
||||
}
|
||||
|
||||
END:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("%s failed at %d, failed to process replay response:%s", __FUNCTION__, lino, tstrerror(code));
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tqScanData(STQ* pTq, STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset, const SMqPollReq* pRequest) {
|
||||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
TSDB_CHECK_NULL(pRsp, code, lino, END, TSDB_CODE_INVALID_PARA);
|
||||
TSDB_CHECK_NULL(pTq, code, lino, END, TSDB_CODE_INVALID_PARA);
|
||||
TSDB_CHECK_NULL(pHandle, code, lino, END, TSDB_CODE_INVALID_PARA);
|
||||
TSDB_CHECK_NULL(pOffset, code, lino, END, TSDB_CODE_INVALID_PARA);
|
||||
TSDB_CHECK_NULL(pRequest, code, lino, END, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
int32_t totalRows = 0;
|
||||
|
||||
const STqExecHandle* pExec = &pHandle->execHandle;
|
||||
qTaskInfo_t task = pExec->task;
|
||||
|
||||
code = qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType);
|
||||
TSDB_CHECK_CODE(code, line, END);
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
|
||||
qStreamSetSourceExcluded(task, pRequest->sourceExcluded);
|
||||
uint64_t st = taosGetTimestampMs();
|
||||
while (1) {
|
||||
SSDataBlock* pDataBlock = NULL;
|
||||
code = getDataBlock(task, pHandle, vgId, &pDataBlock);
|
||||
TSDB_CHECK_CODE(code, line, END);
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
|
||||
if (pRequest->enableReplay) {
|
||||
if (IS_OFFSET_RESET_TYPE(pRequest->reqOffset.type) && pHandle->block != NULL) {
|
||||
blockDataDestroy(pHandle->block);
|
||||
pHandle->block = NULL;
|
||||
}
|
||||
if (pHandle->block == NULL) {
|
||||
if (pDataBlock == NULL) {
|
||||
break;
|
||||
}
|
||||
|
||||
STqOffsetVal offset = {0};
|
||||
code = qStreamExtractOffset(task, &offset);
|
||||
TSDB_CHECK_CODE(code, line, END);
|
||||
|
||||
pHandle->block = NULL;
|
||||
|
||||
code = createOneDataBlock(pDataBlock, true, &pHandle->block);
|
||||
TSDB_CHECK_CODE(code, line, END);
|
||||
|
||||
pHandle->blockTime = offset.ts;
|
||||
tOffsetDestroy(&offset);
|
||||
code = getDataBlock(task, pHandle, vgId, &pDataBlock);
|
||||
TSDB_CHECK_CODE(code, line, END);
|
||||
}
|
||||
|
||||
code = tqAddBlockDataToRsp(pHandle->block, pRsp, pExec->numOfCols, pTq->pVnode->config.tsdbCfg.precision);
|
||||
TSDB_CHECK_CODE(code, line, END);
|
||||
|
||||
pRsp->blockNum++;
|
||||
if (pDataBlock == NULL) {
|
||||
blockDataDestroy(pHandle->block);
|
||||
pHandle->block = NULL;
|
||||
} else {
|
||||
code = copyDataBlock(pHandle->block, pDataBlock);
|
||||
TSDB_CHECK_CODE(code, line, END);
|
||||
|
||||
STqOffsetVal offset = {0};
|
||||
code = qStreamExtractOffset(task, &offset);
|
||||
TSDB_CHECK_CODE(code, line, END);
|
||||
|
||||
pRsp->sleepTime = offset.ts - pHandle->blockTime;
|
||||
pHandle->blockTime = offset.ts;
|
||||
tOffsetDestroy(&offset);
|
||||
}
|
||||
code = tqProcessReplayRsp(pTq, pHandle, pRsp, pRequest, pDataBlock, task);
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
break;
|
||||
} else {
|
||||
if (pDataBlock == NULL) {
|
||||
break;
|
||||
}
|
||||
code = tqAddBlockDataToRsp(pDataBlock, pRsp, pExec->numOfCols, pTq->pVnode->config.tsdbCfg.precision);
|
||||
TSDB_CHECK_CODE(code, line, END);
|
||||
}
|
||||
if (pDataBlock == NULL) {
|
||||
break;
|
||||
}
|
||||
code = tqAddBlockDataToRsp(pDataBlock, pRsp, pExec->numOfCols, pTq->pVnode->config.tsdbCfg.precision);
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
|
||||
pRsp->blockNum++;
|
||||
totalRows += pDataBlock->info.rows;
|
||||
if (totalRows >= tmqRowSize || (taosGetTimestampMs() - st > 1000)) {
|
||||
break;
|
||||
}
|
||||
pRsp->blockNum++;
|
||||
totalRows += pDataBlock->info.rows;
|
||||
if (totalRows >= tmqRowSize || (taosGetTimestampMs() - st > 1000)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
tqDebug("consumer:0x%" PRIx64 " vgId:%d tmq task executed finished, total blocks:%d, totalRows:%d",
|
||||
pHandle->consumerId, vgId, pRsp->blockNum, totalRows);
|
||||
tqDebug("consumer:0x%" PRIx64 " vgId:%d tmq task executed finished, total blocks:%d, totalRows:%d", pHandle->consumerId, vgId, pRsp->blockNum, totalRows);
|
||||
code = qStreamExtractOffset(task, &pRsp->rspOffset);
|
||||
|
||||
END:
|
||||
if (code != 0) {
|
||||
tqError("consumer:0x%" PRIx64 " vgId:%d tmq task executed error, line:%d code:%d", pHandle->consumerId, vgId, line,
|
||||
code);
|
||||
tqError("%s failed at %d, tmq task executed error msg:%s", __FUNCTION__, lino, tstrerror(code));
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqBatchMetaRsp* pBatchMetaRsp, STqOffsetVal* pOffset) {
|
||||
if (pTq == NULL || pHandle == NULL || pRsp == NULL || pBatchMetaRsp == NULL || pOffset == NULL) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
char* tbName = NULL;
|
||||
SSchemaWrapper* pSW = NULL;
|
||||
TSDB_CHECK_NULL(pRsp, code, lino, END, TSDB_CODE_INVALID_PARA);
|
||||
TSDB_CHECK_NULL(pTq, code, lino, END, TSDB_CODE_INVALID_PARA);
|
||||
TSDB_CHECK_NULL(pHandle, code, lino, END, TSDB_CODE_INVALID_PARA);
|
||||
TSDB_CHECK_NULL(pOffset, code, lino, END, TSDB_CODE_INVALID_PARA);
|
||||
TSDB_CHECK_NULL(pBatchMetaRsp, code, lino, END, TSDB_CODE_INVALID_PARA);
|
||||
const STqExecHandle* pExec = &pHandle->execHandle;
|
||||
qTaskInfo_t task = pExec->task;
|
||||
int code = qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType);
|
||||
if (code != 0) {
|
||||
return code;
|
||||
}
|
||||
code = qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType);
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
|
||||
int32_t rowCnt = 0;
|
||||
while (1) {
|
||||
|
@ -206,52 +228,37 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqBat
|
|||
uint64_t ts = 0;
|
||||
tqDebug("tmqsnap task start to execute");
|
||||
code = qExecTask(task, &pDataBlock, &ts);
|
||||
if (code != 0) {
|
||||
tqError("vgId:%d, task exec error since %s", pTq->pVnode->config.vgId, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
tqDebug("tmqsnap task execute end, get %p", pDataBlock);
|
||||
|
||||
if (pDataBlock != NULL && pDataBlock->info.rows > 0) {
|
||||
if (pRsp->withTbName) {
|
||||
char* tbName = taosStrdup(qExtractTbnameFromTask(task));
|
||||
if (tbName == NULL) {
|
||||
tqError("vgId:%d, failed to add tbname to rsp msg, null", pTq->pVnode->config.vgId);
|
||||
return terrno;
|
||||
}
|
||||
if (taosArrayPush(pRsp->blockTbName, &tbName) == NULL){
|
||||
tqError("vgId:%d, failed to add tbname to rsp msg", pTq->pVnode->config.vgId);
|
||||
continue;
|
||||
}
|
||||
tbName = taosStrdup(qExtractTbnameFromTask(task));
|
||||
TSDB_CHECK_NULL(tbName, code, lino, END, terrno);
|
||||
TSDB_CHECK_NULL(taosArrayPush(pRsp->blockTbName, &tbName), code, lino, END, terrno);
|
||||
tbName = NULL;
|
||||
}
|
||||
if (pRsp->withSchema) {
|
||||
SSchemaWrapper* pSW = tCloneSSchemaWrapper(qExtractSchemaFromTask(task));
|
||||
if(taosArrayPush(pRsp->blockSchema, &pSW) == NULL){
|
||||
tqError("vgId:%d, failed to add schema to rsp msg", pTq->pVnode->config.vgId);
|
||||
continue;
|
||||
}
|
||||
pSW = tCloneSSchemaWrapper(qExtractSchemaFromTask(task));
|
||||
TSDB_CHECK_NULL(pSW, code, lino, END, terrno);
|
||||
TSDB_CHECK_NULL(taosArrayPush(pRsp->blockSchema, &pSW), code, lino, END, terrno);
|
||||
pSW = NULL;
|
||||
}
|
||||
|
||||
if (tqAddBlockDataToRsp(pDataBlock, pRsp, taosArrayGetSize(pDataBlock->pDataBlock),
|
||||
pTq->pVnode->config.tsdbCfg.precision) != 0) {
|
||||
tqError("vgId:%d, failed to add block to rsp msg", pTq->pVnode->config.vgId);
|
||||
continue;
|
||||
}
|
||||
code = tqAddBlockDataToRsp(pDataBlock, pRsp, taosArrayGetSize(pDataBlock->pDataBlock), pTq->pVnode->config.tsdbCfg.precision);
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
pRsp->blockNum++;
|
||||
rowCnt += pDataBlock->info.rows;
|
||||
if (rowCnt <= tmqRowSize) continue;
|
||||
|
||||
if (rowCnt <= tmqRowSize) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// get meta
|
||||
SMqBatchMetaRsp* tmp = qStreamExtractMetaMsg(task);
|
||||
if (taosArrayGetSize(tmp->batchMetaReq) > 0) {
|
||||
code = qStreamExtractOffset(task, &tmp->rspOffset);
|
||||
if (code) {
|
||||
return code;
|
||||
}
|
||||
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
*pBatchMetaRsp = *tmp;
|
||||
tqDebug("tmqsnap task get meta");
|
||||
break;
|
||||
|
@ -259,16 +266,13 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqBat
|
|||
|
||||
if (pDataBlock == NULL) {
|
||||
code = qStreamExtractOffset(task, pOffset);
|
||||
if (code) {
|
||||
break;
|
||||
}
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
|
||||
if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
|
||||
continue;
|
||||
}
|
||||
|
||||
tqDebug("tmqsnap vgId: %d, tsdb consume over, switch to wal, ver %" PRId64, TD_VID(pTq->pVnode),
|
||||
pHandle->snapshotVer + 1);
|
||||
tqDebug("tmqsnap vgId: %d, tsdb consume over, switch to wal, ver %" PRId64, TD_VID(pTq->pVnode), pHandle->snapshotVer + 1);
|
||||
code = qStreamExtractOffset(task, &pRsp->rspOffset);
|
||||
break;
|
||||
}
|
||||
|
@ -280,119 +284,93 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqBat
|
|||
}
|
||||
}
|
||||
|
||||
tqDebug("%s:%d success", __FUNCTION__, lino);
|
||||
END:
|
||||
if (code != 0){
|
||||
tqDebug("%s:%d failed, code:%s", __FUNCTION__, lino, tstrerror(code) );
|
||||
}
|
||||
taosMemoryFree(pSW);
|
||||
taosMemoryFree(tbName);
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t buildCreateTbInfo(SMqDataRsp* pRsp, SVCreateTbReq* pCreateTbReq){
|
||||
if (pRsp == NULL || pCreateTbReq == NULL) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
void* createReq = NULL;
|
||||
TSDB_CHECK_NULL(pRsp, code, lino, END, TSDB_CODE_INVALID_PARA);
|
||||
TSDB_CHECK_NULL(pCreateTbReq, code, lino, END, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
if (pRsp->createTableNum == 0) {
|
||||
pRsp->createTableLen = taosArrayInit(0, sizeof(int32_t));
|
||||
if (pRsp->createTableLen == NULL) {
|
||||
code = terrno;
|
||||
goto END;
|
||||
}
|
||||
TSDB_CHECK_NULL(pRsp->createTableLen, code, lino, END, terrno);
|
||||
pRsp->createTableReq = taosArrayInit(0, sizeof(void*));
|
||||
if (pRsp->createTableReq == NULL) {
|
||||
code = terrno;
|
||||
goto END;
|
||||
}
|
||||
TSDB_CHECK_NULL(pRsp->createTableReq, code, lino, END, terrno);
|
||||
}
|
||||
|
||||
uint32_t len = 0;
|
||||
tEncodeSize(tEncodeSVCreateTbReq, pCreateTbReq, len, code);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
goto END;
|
||||
}
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
createReq = taosMemoryCalloc(1, len);
|
||||
if (createReq == NULL){
|
||||
code = terrno;
|
||||
goto END;
|
||||
}
|
||||
TSDB_CHECK_NULL(createReq, code, lino, END, terrno);
|
||||
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, createReq, len);
|
||||
code = tEncodeSVCreateTbReq(&encoder, pCreateTbReq);
|
||||
tEncoderClear(&encoder);
|
||||
if (code < 0) {
|
||||
goto END;
|
||||
}
|
||||
if (taosArrayPush(pRsp->createTableLen, &len) == NULL){
|
||||
code = terrno;
|
||||
goto END;
|
||||
}
|
||||
if (taosArrayPush(pRsp->createTableReq, &createReq) == NULL){
|
||||
code = terrno;
|
||||
goto END;
|
||||
}
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
TSDB_CHECK_NULL(taosArrayPush(pRsp->createTableLen, &len), code, lino, END, terrno);
|
||||
TSDB_CHECK_NULL(taosArrayPush(pRsp->createTableReq, &createReq), code, lino, END, terrno);
|
||||
pRsp->createTableNum++;
|
||||
tqDebug("build create table info msg success");
|
||||
|
||||
return 0;
|
||||
END:
|
||||
taosMemoryFree(createReq);
|
||||
if (code != 0){
|
||||
tqError("%s failed at %d, failed to build create table info msg:%s", __FUNCTION__, lino, tstrerror(code));
|
||||
taosMemoryFree(createReq);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static void tqProcessSubData(STQ* pTq, STqHandle* pHandle, SMqDataRsp* pRsp, int32_t* totalRows, int8_t sourceExcluded){
|
||||
if (pTq == NULL || pHandle == NULL || pRsp == NULL || totalRows == NULL) {
|
||||
return;
|
||||
}
|
||||
int32_t code = 0;
|
||||
STqExecHandle* pExec = &pHandle->execHandle;
|
||||
STqReader* pReader = pExec->pTqReader;
|
||||
int32_t lino = 0;
|
||||
SArray* pBlocks = NULL;
|
||||
SArray* pSchemas = NULL;
|
||||
|
||||
STqExecHandle* pExec = &pHandle->execHandle;
|
||||
STqReader* pReader = pExec->pTqReader;
|
||||
|
||||
pBlocks = taosArrayInit(0, sizeof(SSDataBlock));
|
||||
if (pBlocks == NULL) {
|
||||
code = terrno;
|
||||
goto END;
|
||||
}
|
||||
TSDB_CHECK_NULL(pBlocks, code, lino, END, terrno);
|
||||
pSchemas = taosArrayInit(0, sizeof(void*));
|
||||
if(pSchemas == NULL){
|
||||
code = terrno;
|
||||
goto END;
|
||||
}
|
||||
TSDB_CHECK_NULL(pSchemas, code, lino, END, terrno);
|
||||
|
||||
SSubmitTbData* pSubmitTbDataRet = NULL;
|
||||
int64_t createTime = INT64_MAX;
|
||||
code = tqRetrieveTaosxBlock(pReader, pBlocks, pSchemas, &pSubmitTbDataRet, &createTime);
|
||||
if (code != 0) {
|
||||
tqError("vgId:%d, failed to retrieve block", pTq->pVnode->config.vgId);
|
||||
goto END;
|
||||
}
|
||||
|
||||
if ((pSubmitTbDataRet->flags & sourceExcluded) != 0) {
|
||||
goto END;
|
||||
}
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
bool tmp = (pSubmitTbDataRet->flags & sourceExcluded) != 0;
|
||||
TSDB_CHECK_CONDITION(!tmp, code, lino, END, TSDB_CODE_SUCCESS);
|
||||
if (pRsp->withTbName) {
|
||||
int64_t uid = pExec->pTqReader->lastBlkUid;
|
||||
code = tqAddTbNameToRsp(pTq, uid, pRsp, taosArrayGetSize(pBlocks));
|
||||
if (code != 0) {
|
||||
tqError("vgId:%d, failed to add tbname to rsp msg", pTq->pVnode->config.vgId);
|
||||
goto END;
|
||||
}
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
}
|
||||
if (pHandle->fetchMeta != WITH_DATA && pSubmitTbDataRet->pCreateTbReq != NULL) {
|
||||
if (pSubmitTbDataRet->ctimeMs - createTime <= 1000) { // judge if table is already created to avoid sending crateTbReq
|
||||
code = buildCreateTbInfo(pRsp, pSubmitTbDataRet->pCreateTbReq);
|
||||
if (code != 0){
|
||||
tqError("vgId:%d, failed to build create table info", pTq->pVnode->config.vgId);
|
||||
goto END;
|
||||
}
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
}
|
||||
}
|
||||
if (pHandle->fetchMeta == ONLY_META && pSubmitTbDataRet->pCreateTbReq == NULL) {
|
||||
goto END;
|
||||
}
|
||||
tmp = (pHandle->fetchMeta == ONLY_META && pSubmitTbDataRet->pCreateTbReq == NULL);
|
||||
TSDB_CHECK_CONDITION(!tmp, code, lino, END, TSDB_CODE_SUCCESS);
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pBlocks); i++) {
|
||||
SSDataBlock* pBlock = taosArrayGet(pBlocks, i);
|
||||
if (pBlock == NULL) {
|
||||
continue;
|
||||
}
|
||||
if (tqAddBlockDataToRsp(pBlock, pRsp, taosArrayGetSize(pBlock->pDataBlock),
|
||||
pTq->pVnode->config.tsdbCfg.precision) != 0){
|
||||
if (tqAddBlockDataToRsp(pBlock, pRsp, taosArrayGetSize(pBlock->pDataBlock), pTq->pVnode->config.tsdbCfg.precision) != 0){
|
||||
tqError("vgId:%d, failed to add block to rsp msg", pTq->pVnode->config.vgId);
|
||||
continue;
|
||||
}
|
||||
|
@ -405,28 +383,29 @@ static void tqProcessSubData(STQ* pTq, STqHandle* pHandle, SMqDataRsp* pRsp, int
|
|||
}
|
||||
pRsp->blockNum++;
|
||||
}
|
||||
|
||||
taosArrayDestroy(pBlocks);
|
||||
taosArrayDestroy(pSchemas);
|
||||
return;
|
||||
|
||||
tqDebug("vgId:%d, process sub data success, response blocknum:%d, rows:%d", pTq->pVnode->config.vgId, pRsp->blockNum, *totalRows);
|
||||
END:
|
||||
taosArrayDestroyEx(pBlocks, (FDelete)blockDataFreeRes);
|
||||
taosArrayDestroyP(pSchemas, (FDelete)tDeleteSchemaWrapper);
|
||||
if (code != 0){
|
||||
tqError("%s failed at %d, failed to process sub data:%s", __FUNCTION__, lino, tstrerror(code));
|
||||
taosArrayDestroyEx(pBlocks, (FDelete)blockDataFreeRes);
|
||||
taosArrayDestroyP(pSchemas, (FDelete)tDeleteSchemaWrapper);
|
||||
} else {
|
||||
taosArrayDestroy(pBlocks);
|
||||
taosArrayDestroy(pSchemas);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, SMqDataRsp* pRsp, int32_t* totalRows,
|
||||
int8_t sourceExcluded) {
|
||||
if (pTq == NULL || pHandle == NULL || pRsp == NULL || totalRows == NULL) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, SMqDataRsp* pRsp, int32_t* totalRows, int8_t sourceExcluded) {
|
||||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
TSDB_CHECK_NULL(pRsp, code, lino, END, TSDB_CODE_INVALID_PARA);
|
||||
TSDB_CHECK_NULL(pTq, code, lino, END, TSDB_CODE_INVALID_PARA);
|
||||
TSDB_CHECK_NULL(pHandle, code, lino, END, TSDB_CODE_INVALID_PARA);
|
||||
TSDB_CHECK_NULL(totalRows, code, lino, END, TSDB_CODE_INVALID_PARA);
|
||||
STqExecHandle* pExec = &pHandle->execHandle;
|
||||
int32_t code = 0;
|
||||
STqReader* pReader = pExec->pTqReader;
|
||||
code = tqReaderSetSubmitMsg(pReader, submit.msgStr, submit.msgLen, submit.ver);
|
||||
if (code != 0) {
|
||||
return code;
|
||||
}
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
|
||||
if (pExec->subType == TOPIC_SUB_TYPE__TABLE) {
|
||||
while (tqNextBlockImpl(pReader, NULL)) {
|
||||
|
@ -438,5 +417,9 @@ int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, SMqData
|
|||
}
|
||||
}
|
||||
|
||||
END:
|
||||
if (code != 0){
|
||||
tqError("%s failed at %d, failed to scan log:%s", __FUNCTION__, lino, tstrerror(code));
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -21,21 +21,27 @@ static int32_t tqSendBatchMetaPollRsp(STqHandle* pHandle, const SRpcMsg* pMsg, c
|
|||
const SMqBatchMetaRsp* pRsp, int32_t vgId);
|
||||
|
||||
int32_t tqInitDataRsp(SMqDataRsp* pRsp, STqOffsetVal pOffset) {
|
||||
if (pRsp == NULL) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
pRsp->blockData = taosArrayInit(0, sizeof(void*));
|
||||
pRsp->blockDataLen = taosArrayInit(0, sizeof(int32_t));
|
||||
int32_t code = TDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
tqDebug("%s called", __FUNCTION__ );
|
||||
TSDB_CHECK_NULL(pRsp, code, lino, END, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
if (pRsp->blockData == NULL || pRsp->blockDataLen == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
pRsp->blockData = taosArrayInit(0, sizeof(void*));
|
||||
TSDB_CHECK_NULL(pRsp->blockData, code, lino, END, terrno);
|
||||
|
||||
pRsp->blockDataLen = taosArrayInit(0, sizeof(int32_t));
|
||||
TSDB_CHECK_NULL(pRsp->blockDataLen, code, lino, END, terrno);
|
||||
|
||||
tOffsetCopy(&pRsp->reqOffset, &pOffset);
|
||||
tOffsetCopy(&pRsp->rspOffset, &pOffset);
|
||||
pRsp->withTbName = 0;
|
||||
pRsp->withSchema = false;
|
||||
return 0;
|
||||
|
||||
END:
|
||||
if (code != 0){
|
||||
tqError("%s failed at:%d, code:%s", __FUNCTION__ , lino, tstrerror(code));
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
void tqUpdateNodeStage(STQ* pTq, bool isLeader) {
|
||||
|
@ -44,45 +50,37 @@ void tqUpdateNodeStage(STQ* pTq, bool isLeader) {
|
|||
}
|
||||
|
||||
static int32_t tqInitTaosxRsp(SMqDataRsp* pRsp, STqOffsetVal pOffset) {
|
||||
if (pRsp == NULL) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
int32_t code = TDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
tqDebug("%s called", __FUNCTION__ );
|
||||
TSDB_CHECK_NULL(pRsp, code, lino, END, TSDB_CODE_INVALID_PARA);
|
||||
tOffsetCopy(&pRsp->reqOffset, &pOffset);
|
||||
tOffsetCopy(&pRsp->rspOffset, &pOffset);
|
||||
|
||||
pRsp->withTbName = 1;
|
||||
pRsp->withSchema = 1;
|
||||
pRsp->blockData = taosArrayInit(0, sizeof(void*));
|
||||
TSDB_CHECK_NULL(pRsp->blockData, code, lino, END, terrno);\
|
||||
|
||||
pRsp->blockDataLen = taosArrayInit(0, sizeof(int32_t));
|
||||
TSDB_CHECK_NULL(pRsp->blockDataLen, code, lino, END, terrno);
|
||||
|
||||
pRsp->blockTbName = taosArrayInit(0, sizeof(void*));
|
||||
TSDB_CHECK_NULL(pRsp->blockTbName, code, lino, END, terrno);
|
||||
|
||||
pRsp->blockSchema = taosArrayInit(0, sizeof(void*));
|
||||
TSDB_CHECK_NULL(pRsp->blockSchema, code, lino, END, terrno);
|
||||
|
||||
if (pRsp->blockData == NULL || pRsp->blockDataLen == NULL ||
|
||||
pRsp->blockTbName == NULL || pRsp->blockSchema == NULL) {
|
||||
if (pRsp->blockData != NULL) {
|
||||
taosArrayDestroy(pRsp->blockData);
|
||||
pRsp->blockData = NULL;
|
||||
}
|
||||
|
||||
if (pRsp->blockDataLen != NULL) {
|
||||
taosArrayDestroy(pRsp->blockDataLen);
|
||||
pRsp->blockDataLen = NULL;
|
||||
}
|
||||
|
||||
if (pRsp->blockTbName != NULL) {
|
||||
taosArrayDestroy(pRsp->blockTbName);
|
||||
pRsp->blockTbName = NULL;
|
||||
}
|
||||
|
||||
if (pRsp->blockSchema != NULL) {
|
||||
taosArrayDestroy(pRsp->blockSchema);
|
||||
pRsp->blockSchema = NULL;
|
||||
}
|
||||
|
||||
return terrno;
|
||||
END:
|
||||
if (code != 0){
|
||||
tqError("%s failed at:%d, code:%s", __FUNCTION__ , lino, tstrerror(code));
|
||||
taosArrayDestroy(pRsp->blockData);
|
||||
taosArrayDestroy(pRsp->blockDataLen);
|
||||
taosArrayDestroy(pRsp->blockTbName);
|
||||
taosArrayDestroy(pRsp->blockSchema);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest,
|
||||
|
@ -151,23 +149,19 @@ static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHand
|
|||
|
||||
static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest,
|
||||
SRpcMsg* pMsg, STqOffsetVal* pOffset) {
|
||||
if (pTq == NULL || pHandle == NULL || pRequest == NULL || pMsg == NULL || pOffset == NULL) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
int32_t code = TDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
tqDebug("%s called", __FUNCTION__ );
|
||||
uint64_t consumerId = pRequest->consumerId;
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
terrno = 0;
|
||||
|
||||
SMqDataRsp dataRsp = {0};
|
||||
int code = tqInitDataRsp(&dataRsp, *pOffset);
|
||||
if (code != 0) {
|
||||
goto end;
|
||||
}
|
||||
code = tqInitDataRsp(&dataRsp, *pOffset);
|
||||
TSDB_CHECK_CODE(code, lino, end);
|
||||
|
||||
code = qSetTaskId(pHandle->execHandle.task, consumerId, pRequest->reqId);
|
||||
if (code != 0) {
|
||||
goto end;
|
||||
}
|
||||
TSDB_CHECK_CODE(code, lino, end);
|
||||
|
||||
code = tqScanData(pTq, pHandle, &dataRsp, pOffset, pRequest);
|
||||
if (code != 0 && terrno != TSDB_CODE_WAL_LOG_NOT_EXIST) {
|
||||
|
@ -191,15 +185,21 @@ static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle,
|
|||
tOffsetCopy(&dataRsp.reqOffset, pOffset);
|
||||
code = tqSendDataRsp(pHandle, pMsg, pRequest, &dataRsp, TMQ_MSG_TYPE__POLL_DATA_RSP, vgId);
|
||||
|
||||
end : {
|
||||
char buf[TSDB_OFFSET_LEN] = {0};
|
||||
tFormatOffset(buf, TSDB_OFFSET_LEN, &dataRsp.rspOffset);
|
||||
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, rsp block:%d, rsp offset type:%s,QID:0x%" PRIx64
|
||||
" code:%d",
|
||||
consumerId, pHandle->subKey, vgId, dataRsp.blockNum, buf, pRequest->reqId, code);
|
||||
tDeleteMqDataRsp(&dataRsp);
|
||||
return code;
|
||||
}
|
||||
end:
|
||||
{
|
||||
char buf[TSDB_OFFSET_LEN] = {0};
|
||||
tFormatOffset(buf, TSDB_OFFSET_LEN, &dataRsp.rspOffset);
|
||||
if (code != 0){
|
||||
tqError("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, rsp block:%d, rsp offset type:%s,QID:0x%" PRIx64 " error msg:%s, line:%d",
|
||||
consumerId, pHandle->subKey, vgId, dataRsp.blockNum, buf, pRequest->reqId, tstrerror(code), lino);
|
||||
} else {
|
||||
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, rsp block:%d, rsp offset type:%s,QID:0x%" PRIx64 " success",
|
||||
consumerId, pHandle->subKey, vgId, dataRsp.blockNum, buf, pRequest->reqId);
|
||||
}
|
||||
|
||||
tDeleteMqDataRsp(&dataRsp);
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
||||
#define PROCESS_EXCLUDED_MSG(TYPE, DECODE_FUNC, DELETE_FUNC) \
|
||||
|
@ -224,9 +224,6 @@ static void tDeleteCommon(void* parm) {}
|
|||
|
||||
static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest,
|
||||
SRpcMsg* pMsg, STqOffsetVal* offset) {
|
||||
if (pTq == NULL || pHandle == NULL || pRequest == NULL || pMsg == NULL || offset == NULL) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
SMqDataRsp taosxRsp = {0};
|
||||
SMqBatchMetaRsp btMetaRsp = {0};
|
||||
|
@ -238,17 +235,13 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
|
|||
|
||||
if (taosArrayGetSize(btMetaRsp.batchMetaReq) > 0) {
|
||||
code = tqSendBatchMetaPollRsp(pHandle, pMsg, pRequest, &btMetaRsp, vgId);
|
||||
tqDebug("tmq poll: consumer:0x%" PRIx64 " subkey:%s vgId:%d, send meta offset type:%d,uid:%" PRId64
|
||||
",ts:%" PRId64,
|
||||
pRequest->consumerId, pHandle->subKey, vgId, btMetaRsp.rspOffset.type, btMetaRsp.rspOffset.uid,
|
||||
btMetaRsp.rspOffset.ts);
|
||||
tqDebug("tmq poll: consumer:0x%" PRIx64 " subkey:%s vgId:%d, send meta offset type:%d,uid:%" PRId64 ",ts:%" PRId64,
|
||||
pRequest->consumerId, pHandle->subKey, vgId, btMetaRsp.rspOffset.type, btMetaRsp.rspOffset.uid,btMetaRsp.rspOffset.ts);
|
||||
goto END;
|
||||
}
|
||||
|
||||
tqDebug("taosx poll: consumer:0x%" PRIx64 " subkey:%s vgId:%d, send data blockNum:%d, offset type:%d,uid:%" PRId64
|
||||
",ts:%" PRId64,
|
||||
pRequest->consumerId, pHandle->subKey, vgId, taosxRsp.blockNum, taosxRsp.rspOffset.type,
|
||||
taosxRsp.rspOffset.uid, taosxRsp.rspOffset.ts);
|
||||
tqDebug("taosx poll: consumer:0x%" PRIx64 " subkey:%s vgId:%d, send data blockNum:%d, offset type:%d,uid:%" PRId64",ts:%" PRId64,
|
||||
pRequest->consumerId, pHandle->subKey, vgId, taosxRsp.blockNum, taosxRsp.rspOffset.type, taosxRsp.rspOffset.uid, taosxRsp.rspOffset.ts);
|
||||
if (taosxRsp.blockNum > 0) {
|
||||
code = tqSendDataRsp(pHandle, pMsg, pRequest, &taosxRsp, TMQ_MSG_TYPE__POLL_DATA_RSP, vgId);
|
||||
goto END;
|
||||
|
@ -277,30 +270,23 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
|
|||
if (totalMetaRows > 0) {
|
||||
tqOffsetResetToLog(&btMetaRsp.rspOffset, fetchVer);
|
||||
code = tqSendBatchMetaPollRsp(pHandle, pMsg, pRequest, &btMetaRsp, vgId);
|
||||
if (totalRows != 0) {
|
||||
tqError("tmq poll: consumer:0x%" PRIx64 " (epoch %d) iter log, totalRows error, vgId:%d offset %" PRId64,
|
||||
pRequest->consumerId, pRequest->epoch, vgId, fetchVer);
|
||||
code = code == 0 ? TSDB_CODE_TQ_INTERNAL_ERROR : code;
|
||||
}
|
||||
goto END;
|
||||
}
|
||||
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
|
||||
code = tqSendDataRsp(
|
||||
pHandle, pMsg, pRequest, &taosxRsp,
|
||||
code = tqSendDataRsp(pHandle, pMsg, pRequest, &taosxRsp,
|
||||
taosxRsp.createTableNum > 0 ? TMQ_MSG_TYPE__POLL_DATA_META_RSP : TMQ_MSG_TYPE__POLL_DATA_RSP, vgId);
|
||||
goto END;
|
||||
}
|
||||
|
||||
SWalCont* pHead = &pHandle->pWalReader->pHead->head;
|
||||
tqDebug("tmq poll: consumer:0x%" PRIx64 " (epoch %d) iter log, vgId:%d offset %" PRId64 " msgType %d",
|
||||
pRequest->consumerId, pRequest->epoch, vgId, fetchVer, pHead->msgType);
|
||||
tqDebug("tmq poll: consumer:0x%" PRIx64 " (epoch %d) iter log, vgId:%d offset %" PRId64 " msgType %s",
|
||||
pRequest->consumerId, pRequest->epoch, vgId, fetchVer, TMSG_INFO(pHead->msgType));
|
||||
|
||||
// process meta
|
||||
if (pHead->msgType != TDMT_VND_SUBMIT) {
|
||||
if (totalRows > 0) {
|
||||
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
|
||||
code = tqSendDataRsp(
|
||||
pHandle, pMsg, pRequest, &taosxRsp,
|
||||
code = tqSendDataRsp(pHandle, pMsg, pRequest, &taosxRsp,
|
||||
taosxRsp.createTableNum > 0 ? TMQ_MSG_TYPE__POLL_DATA_META_RSP : TMQ_MSG_TYPE__POLL_DATA_RSP, vgId);
|
||||
goto END;
|
||||
}
|
||||
|
@ -331,15 +317,9 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
|
|||
|
||||
if (!btMetaRsp.batchMetaReq) {
|
||||
btMetaRsp.batchMetaReq = taosArrayInit(4, POINTER_BYTES);
|
||||
if (btMetaRsp.batchMetaReq == NULL) {
|
||||
code = TAOS_GET_TERRNO(terrno);
|
||||
goto END;
|
||||
}
|
||||
TQ_NULL_GO_TO_END(btMetaRsp.batchMetaReq);
|
||||
btMetaRsp.batchMetaLen = taosArrayInit(4, sizeof(int32_t));
|
||||
if (btMetaRsp.batchMetaLen == NULL) {
|
||||
code = TAOS_GET_TERRNO(terrno);
|
||||
goto END;
|
||||
}
|
||||
TQ_NULL_GO_TO_END(btMetaRsp.batchMetaLen);
|
||||
}
|
||||
fetchVer++;
|
||||
|
||||
|
@ -355,10 +335,7 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
|
|||
}
|
||||
int32_t tLen = sizeof(SMqRspHead) + len;
|
||||
void* tBuf = taosMemoryCalloc(1, tLen);
|
||||
if (tBuf == NULL) {
|
||||
code = TAOS_GET_TERRNO(terrno);
|
||||
goto END;
|
||||
}
|
||||
TQ_NULL_GO_TO_END(tBuf);
|
||||
void* metaBuff = POINTER_SHIFT(tBuf, sizeof(SMqRspHead));
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, metaBuff, len);
|
||||
|
@ -369,14 +346,8 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
|
|||
tqError("tmq extract meta from log, tEncodeMqMetaRsp error");
|
||||
continue;
|
||||
}
|
||||
if (taosArrayPush(btMetaRsp.batchMetaReq, &tBuf) == NULL) {
|
||||
code = TAOS_GET_TERRNO(terrno);
|
||||
goto END;
|
||||
}
|
||||
if (taosArrayPush(btMetaRsp.batchMetaLen, &tLen) == NULL) {
|
||||
code = TAOS_GET_TERRNO(terrno);
|
||||
goto END;
|
||||
}
|
||||
TQ_NULL_GO_TO_END (taosArrayPush(btMetaRsp.batchMetaReq, &tBuf));
|
||||
TQ_NULL_GO_TO_END (taosArrayPush(btMetaRsp.batchMetaLen, &tLen));
|
||||
totalMetaRows++;
|
||||
if ((taosArrayGetSize(btMetaRsp.batchMetaReq) >= tmqRowSize) || (taosGetTimestampMs() - st > 1000)) {
|
||||
tqOffsetResetToLog(&btMetaRsp.rspOffset, fetchVer);
|
||||
|
@ -399,17 +370,11 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
|
|||
.ver = pHead->version,
|
||||
};
|
||||
|
||||
code = tqTaosxScanLog(pTq, pHandle, submit, &taosxRsp, &totalRows, pRequest->sourceExcluded);
|
||||
if (code < 0) {
|
||||
tqError("tmq poll: tqTaosxScanLog error %" PRId64 ", in vgId:%d, subkey %s", pRequest->consumerId, vgId,
|
||||
pRequest->subKey);
|
||||
goto END;
|
||||
}
|
||||
TQ_ERR_GO_TO_END(tqTaosxScanLog(pTq, pHandle, submit, &taosxRsp, &totalRows, pRequest->sourceExcluded));
|
||||
|
||||
if (totalRows >= tmqRowSize || (taosGetTimestampMs() - st > 1000)) {
|
||||
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer + 1);
|
||||
code = tqSendDataRsp(
|
||||
pHandle, pMsg, pRequest, &taosxRsp,
|
||||
code = tqSendDataRsp(pHandle, pMsg, pRequest, &taosxRsp,
|
||||
taosxRsp.createTableNum > 0 ? TMQ_MSG_TYPE__POLL_DATA_META_RSP : TMQ_MSG_TYPE__POLL_DATA_RSP, vgId);
|
||||
goto END;
|
||||
} else {
|
||||
|
@ -419,6 +384,10 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
|
|||
}
|
||||
|
||||
END:
|
||||
if (code != 0){
|
||||
tqError("tmq poll: tqTaosxScanLog error. consumerId:0x%" PRIx64 ", in vgId:%d, subkey %s", pRequest->consumerId, vgId,
|
||||
pRequest->subKey);
|
||||
}
|
||||
tDeleteMqBatchMetaRsp(&btMetaRsp);
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
return code;
|
||||
|
@ -457,6 +426,9 @@ int32_t tqExtractDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequ
|
|||
}
|
||||
|
||||
END:
|
||||
if (code != 0){
|
||||
uError("failed to extract data for mq, msg:%s", tstrerror(code));
|
||||
}
|
||||
tOffsetDestroy(&reqOffset);
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -680,7 +680,7 @@ static int32_t fset_cmpr_fn(const struct STFileSet *pSet1, const struct STFileSe
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int32_t edit_fs(STFileSystem *fs, const TFileOpArray *opArray) {
|
||||
static int32_t edit_fs(STFileSystem *fs, const TFileOpArray *opArray, EFEditT etype) {
|
||||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
|
||||
|
@ -690,6 +690,8 @@ static int32_t edit_fs(STFileSystem *fs, const TFileOpArray *opArray) {
|
|||
TFileSetArray *fsetArray = fs->fSetArrTmp;
|
||||
STFileSet *fset = NULL;
|
||||
const STFileOp *op;
|
||||
int32_t fid = INT32_MIN;
|
||||
TSKEY now = taosGetTimestampMs();
|
||||
TARRAY2_FOREACH_PTR(opArray, op) {
|
||||
if (!fset || fset->fid != op->fid) {
|
||||
STFileSet tfset = {.fid = op->fid};
|
||||
|
@ -708,6 +710,15 @@ static int32_t edit_fs(STFileSystem *fs, const TFileOpArray *opArray) {
|
|||
|
||||
code = tsdbTFileSetEdit(fs->tsdb, fset, op);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
||||
if (fid != op->fid) {
|
||||
fid = op->fid;
|
||||
if (etype == TSDB_FEDIT_COMMIT) {
|
||||
fset->lastCommit = now;
|
||||
} else if (etype == TSDB_FEDIT_COMPACT) {
|
||||
fset->lastCompact = now;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// remove empty empty stt level and empty file set
|
||||
|
@ -864,7 +875,7 @@ int32_t tsdbFSEditBegin(STFileSystem *fs, const TFileOpArray *opArray, EFEditT e
|
|||
fs->etype = etype;
|
||||
|
||||
// edit
|
||||
code = edit_fs(fs, opArray);
|
||||
code = edit_fs(fs, opArray, etype);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
||||
// save fs
|
||||
|
@ -1288,6 +1299,12 @@ int32_t tsdbFileSetReaderOpen(void *pVnode, struct SFileSetReader **ppReader) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
extern bool tsdbShouldCompact(const STFileSet *pFileSet);
|
||||
|
||||
#ifndef TD_ENTERPRISE
|
||||
bool tsdbShouldCompact(const STFileSet *pFileSet) { return false; }
|
||||
#endif
|
||||
|
||||
static int32_t tsdbFileSetReaderNextNoLock(struct SFileSetReader *pReader) {
|
||||
STsdb *pTsdb = pReader->pTsdb;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
@ -1311,7 +1328,7 @@ static int32_t tsdbFileSetReaderNextNoLock(struct SFileSetReader *pReader) {
|
|||
// get file set details
|
||||
pReader->fid = pReader->pFileSet->fid;
|
||||
tsdbFidKeyRange(pReader->fid, pTsdb->keepCfg.days, pTsdb->keepCfg.precision, &pReader->startTime, &pReader->endTime);
|
||||
pReader->lastCompactTime = 0; // TODO
|
||||
pReader->lastCompactTime = pReader->pFileSet->lastCompact;
|
||||
pReader->totalSize = 0;
|
||||
for (int32_t i = 0; i < TSDB_FTYPE_MAX; i++) {
|
||||
STFileObj *fobj = pReader->pFileSet->farr[i];
|
||||
|
@ -1375,7 +1392,7 @@ int32_t tsdbFileSetGetEntryField(struct SFileSetReader *pReader, const char *fie
|
|||
|
||||
fieldName = "should_compact";
|
||||
if (strncmp(field, fieldName, strlen(fieldName) + 1) == 0) {
|
||||
*(char *)value = 0; // TODO
|
||||
*(char *)value = tsdbShouldCompact(pReader->pFileSet);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -273,6 +273,15 @@ int32_t tsdbTFileSetToJson(const STFileSet *fset, cJSON *json) {
|
|||
if (code) return code;
|
||||
}
|
||||
|
||||
// about compact and commit
|
||||
if (cJSON_AddNumberToObject(json, "last compact", fset->lastCompact) == NULL) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
if (cJSON_AddNumberToObject(json, "last commit", fset->lastCommit) == NULL) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -324,6 +333,20 @@ int32_t tsdbJsonToTFileSet(STsdb *pTsdb, const cJSON *json, STFileSet **fset) {
|
|||
} else {
|
||||
return TSDB_CODE_FILE_CORRUPTED;
|
||||
}
|
||||
// about compact and commit
|
||||
item1 = cJSON_GetObjectItem(json, "last compact");
|
||||
if (cJSON_IsNumber(item1)) {
|
||||
(*fset)->lastCompact = item1->valuedouble;
|
||||
} else {
|
||||
(*fset)->lastCompact = 0;
|
||||
}
|
||||
|
||||
item1 = cJSON_GetObjectItem(json, "last commit");
|
||||
if (cJSON_IsNumber(item1)) {
|
||||
(*fset)->lastCommit = item1->valuedouble;
|
||||
} else {
|
||||
(*fset)->lastCommit = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -467,6 +490,9 @@ int32_t tsdbTFileSetApplyEdit(STsdb *pTsdb, const STFileSet *fset1, STFileSet *f
|
|||
}
|
||||
}
|
||||
|
||||
fset2->lastCompact = fset1->lastCompact;
|
||||
fset2->lastCommit = fset1->lastCommit;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -522,6 +548,9 @@ int32_t tsdbTFileSetInitCopy(STsdb *pTsdb, const STFileSet *fset1, STFileSet **f
|
|||
if (code) return code;
|
||||
}
|
||||
|
||||
(*fset)->lastCompact = fset1->lastCompact;
|
||||
(*fset)->lastCommit = fset1->lastCommit;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -617,6 +646,9 @@ int32_t tsdbTFileSetInitRef(STsdb *pTsdb, const STFileSet *fset1, STFileSet **fs
|
|||
}
|
||||
}
|
||||
|
||||
(*fset)->lastCompact = fset1->lastCompact;
|
||||
(*fset)->lastCommit = fset1->lastCommit;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -92,6 +92,8 @@ struct STFileSet {
|
|||
int64_t maxVerValid;
|
||||
STFileObj *farr[TSDB_FTYPE_MAX]; // file array
|
||||
TSttLvlArray lvlArr[1]; // level array
|
||||
TSKEY lastCompact;
|
||||
TSKEY lastCommit;
|
||||
|
||||
bool mergeScheduled;
|
||||
SVATaskID mergeTask;
|
||||
|
|
|
@ -1415,7 +1415,8 @@ static int32_t vnodeProcessAlterTbReq(SVnode *pVnode, int64_t ver, void *pReq, i
|
|||
SVAlterTbReq vAlterTbReq = {0};
|
||||
SVAlterTbRsp vAlterTbRsp = {0};
|
||||
SDecoder dc = {0};
|
||||
int32_t rcode = 0;
|
||||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
int32_t ret;
|
||||
SEncoder ec = {0};
|
||||
STableMetaRsp vMetaRsp = {0};
|
||||
|
@ -1431,7 +1432,6 @@ static int32_t vnodeProcessAlterTbReq(SVnode *pVnode, int64_t ver, void *pReq, i
|
|||
if (tDecodeSVAlterTbReq(&dc, &vAlterTbReq) < 0) {
|
||||
vAlterTbRsp.code = TSDB_CODE_INVALID_MSG;
|
||||
tDecoderClear(&dc);
|
||||
rcode = -1;
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
|
@ -1439,7 +1439,6 @@ static int32_t vnodeProcessAlterTbReq(SVnode *pVnode, int64_t ver, void *pReq, i
|
|||
if (metaAlterTable(pVnode->pMeta, ver, &vAlterTbReq, &vMetaRsp) < 0) {
|
||||
vAlterTbRsp.code = terrno;
|
||||
tDecoderClear(&dc);
|
||||
rcode = -1;
|
||||
goto _exit;
|
||||
}
|
||||
tDecoderClear(&dc);
|
||||
|
@ -1449,6 +1448,31 @@ static int32_t vnodeProcessAlterTbReq(SVnode *pVnode, int64_t ver, void *pReq, i
|
|||
vAlterTbRsp.pMeta = &vMetaRsp;
|
||||
}
|
||||
|
||||
if (vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_TAG_VAL || vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL) {
|
||||
int64_t uid = metaGetTableEntryUidByName(pVnode->pMeta, vAlterTbReq.tbName);
|
||||
if (uid == 0) {
|
||||
vError("vgId:%d, %s failed at %s:%d since table %s not found", TD_VID(pVnode), __func__, __FILE__, __LINE__,
|
||||
vAlterTbReq.tbName);
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
SArray* tbUids = taosArrayInit(4, sizeof(int64_t));
|
||||
void* p = taosArrayPush(tbUids, &uid);
|
||||
TSDB_CHECK_NULL(p, code, lino, _exit, terrno);
|
||||
|
||||
vDebug("vgId:%d, remove tags value altered table:%s from query table list", TD_VID(pVnode), vAlterTbReq.tbName);
|
||||
if ((code = tqUpdateTbUidList(pVnode->pTq, tbUids, false)) < 0) {
|
||||
vError("vgId:%d, failed to remove tbUid list since %s", TD_VID(pVnode), tstrerror(code));
|
||||
}
|
||||
|
||||
vDebug("vgId:%d, try to add table:%s in query table list", TD_VID(pVnode), vAlterTbReq.tbName);
|
||||
if ((code = tqUpdateTbUidList(pVnode->pTq, tbUids, true)) < 0) {
|
||||
vError("vgId:%d, failed to add tbUid list since %s", TD_VID(pVnode), tstrerror(code));
|
||||
}
|
||||
|
||||
taosArrayDestroy(tbUids);
|
||||
}
|
||||
|
||||
_exit:
|
||||
taosArrayDestroy(vAlterTbReq.pMultiTag);
|
||||
tEncodeSize(tEncodeSVAlterTbRsp, &vAlterTbRsp, pRsp->contLen, ret);
|
||||
|
@ -1457,6 +1481,7 @@ _exit:
|
|||
if (tEncodeSVAlterTbRsp(&ec, &vAlterTbRsp) != 0) {
|
||||
vError("vgId:%d, failed to encode alter table response", TD_VID(pVnode));
|
||||
}
|
||||
|
||||
tEncoderClear(&ec);
|
||||
if (vMetaRsp.pSchemas) {
|
||||
taosMemoryFree(vMetaRsp.pSchemas);
|
||||
|
|
|
@ -2269,6 +2269,8 @@ static SSDataBlock* sysTableBuildUserFileSets(SOperatorInfo* pOperator) {
|
|||
if (ret) {
|
||||
if (ret == TSDB_CODE_NOT_FOUND) {
|
||||
// no more scan entry
|
||||
setOperatorCompleted(pOperator);
|
||||
pAPI->tsdReader.fileSetReaderClose(&pInfo->pFileSetReader);
|
||||
break;
|
||||
} else {
|
||||
code = ret;
|
||||
|
|
|
@ -7782,7 +7782,7 @@ static int32_t buildCreateDbReq(STranslateContext* pCxt, SCreateDatabaseStmt* pS
|
|||
|
||||
static int32_t checkRangeOption(STranslateContext* pCxt, int32_t code, const char* pName, int64_t val, int64_t minVal,
|
||||
int64_t maxVal, bool skipUndef) {
|
||||
if (skipUndef ? ((val >= 0 | val < -2) && (val < minVal || val > maxVal)) : (val < minVal || val > maxVal)) {
|
||||
if (skipUndef ? ((val >= 0 || val < -2) && (val < minVal || val > maxVal)) : (val < minVal || val > maxVal)) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, code,
|
||||
"Invalid option %s: %" PRId64 ", valid range: [%" PRId64 ", %" PRId64 "]", pName,
|
||||
val, minVal, maxVal);
|
||||
|
|
|
@ -746,9 +746,19 @@ _return:
|
|||
}
|
||||
|
||||
int32_t qwAbortPrerocessQuery(QW_FPARAMS_DEF) {
|
||||
QW_ERR_RET(qwDropTask(QW_FPARAMS()));
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SQWTaskCtx *ctx = NULL;
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
QW_ERR_RET(qwAcquireTaskCtx(QW_FPARAMS(), &ctx));
|
||||
|
||||
QW_LOCK(QW_WRITE, &ctx->lock);
|
||||
QW_ERR_JRET(qwDropTask(QW_FPARAMS()));
|
||||
|
||||
_return:
|
||||
|
||||
QW_UNLOCK(QW_WRITE, &ctx->lock);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t qwPreprocessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
|
||||
|
|
|
@ -1808,7 +1808,7 @@ int stateKeyDecode(void* k, char* buf) {
|
|||
return p - buf;
|
||||
}
|
||||
|
||||
int stateKeyToString(void* k, char* buf) {
|
||||
int32_t stateKeyToString(void* k, char* buf) {
|
||||
SStateKey* key = k;
|
||||
int n = 0;
|
||||
n += sprintf(buf + n, "[groupId:%" PRIu64 ",", key->key.groupId);
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
#include "streamBackendRocksdb.h"
|
||||
#include "streamInt.h"
|
||||
|
||||
#define CHECK_NOT_RSP_DURATION 10 * 1000 // 10 sec
|
||||
#define CHECK_NOT_RSP_DURATION 60 * 1000 // 60 sec
|
||||
|
||||
static void processDownstreamReadyRsp(SStreamTask* pTask);
|
||||
static void rspMonitorFn(void* param, void* tmrId);
|
||||
|
@ -660,7 +660,7 @@ void handleTimeoutDownstreamTasks(SStreamTask* pTask, SArray* pTimeoutList) {
|
|||
|
||||
pInfo->timeoutRetryCount += 1;
|
||||
|
||||
// timeout more than 100 sec, add into node update list
|
||||
// timeout more than 600 sec, add into node update list
|
||||
if (pInfo->timeoutRetryCount > 10) {
|
||||
pInfo->timeoutRetryCount = 0;
|
||||
|
||||
|
@ -674,7 +674,7 @@ void handleTimeoutDownstreamTasks(SStreamTask* pTask, SArray* pTimeoutList) {
|
|||
findCheckRspStatus(pInfo, *pTaskId, &p);
|
||||
if (p != NULL) {
|
||||
code = streamTaskAddIntoNodeUpdateList(pTask, p->vgId);
|
||||
stDebug("s-task:%s vgId:%d downstream task:0x%x (vgId:%d) timeout more than 100sec, add into nodeUpdate list",
|
||||
stDebug("s-task:%s vgId:%d downstream task:0x%x (vgId:%d) timeout more than 600sec, add into nodeUpdate list",
|
||||
id, vgId, p->taskId, p->vgId);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -250,9 +250,10 @@ _EXIT:
|
|||
streamBackendCleanup((void*)pBackend);
|
||||
|
||||
if (code == 0) {
|
||||
char* state = taosMemoryCalloc(1, strlen(pMeta->path) + 32);
|
||||
int32_t len = strlen(pMeta->path) + 32;
|
||||
char* state = taosMemoryCalloc(1, len);
|
||||
if (state != NULL) {
|
||||
sprintf(state, "%s%s%s", pMeta->path, TD_DIRSEP, "state");
|
||||
(void) snprintf(state, len, "%s%s%s", pMeta->path, TD_DIRSEP, "state");
|
||||
taosRemoveDir(state);
|
||||
taosMemoryFree(state);
|
||||
} else {
|
||||
|
@ -379,7 +380,7 @@ int32_t streamMetaOpen(const char* path, void* ahandle, FTaskBuild buildTaskFn,
|
|||
char* tpath = taosMemoryCalloc(1, len);
|
||||
TSDB_CHECK_NULL(tpath, code, lino, _err, terrno);
|
||||
|
||||
sprintf(tpath, "%s%s%s", path, TD_DIRSEP, "stream");
|
||||
(void) snprintf(tpath, len, "%s%s%s", path, TD_DIRSEP, "stream");
|
||||
pMeta->path = tpath;
|
||||
|
||||
code = streamMetaOpenTdb(pMeta);
|
||||
|
|
|
@ -119,23 +119,21 @@ static int64_t kBlockSize = 64 * 1024;
|
|||
int32_t streamSnapHandleInit(SStreamSnapHandle* handle, char* path, void* pMeta);
|
||||
void streamSnapHandleDestroy(SStreamSnapHandle* handle);
|
||||
|
||||
// static void streamBuildFname(char* path, char* file, char* fullname)
|
||||
|
||||
#define STREAM_ROCKSDB_BUILD_FULLNAME(path, file, fullname) \
|
||||
do { \
|
||||
sprintf(fullname, "%s%s%s", path, TD_DIRSEP, file); \
|
||||
} while (0)
|
||||
|
||||
int32_t streamGetFileSize(char* path, char* name, int64_t* sz) {
|
||||
int32_t ret = 0;
|
||||
int32_t len = strlen(path) + 32;
|
||||
|
||||
char* fullname = taosMemoryCalloc(1, strlen(path) + 32);
|
||||
char* fullname = taosMemoryCalloc(1, len);
|
||||
if (fullname == NULL) {
|
||||
stError("failed to get file:%s size, code: out of memory", name);
|
||||
return terrno;
|
||||
}
|
||||
|
||||
sprintf(fullname, "%s%s%s", path, TD_DIRSEP, name);
|
||||
ret = snprintf(fullname, len, "%s%s%s", path, TD_DIRSEP, name);
|
||||
if (ret < 0 || ret >= len) {
|
||||
stError("%s failed to set the file path for get the file size, code: out of buffer", name);
|
||||
return TSDB_CODE_OUT_OF_BUFFER;
|
||||
}
|
||||
|
||||
ret = taosStatFile(fullname, sz, NULL, NULL);
|
||||
taosMemoryFree(fullname);
|
||||
|
@ -146,7 +144,7 @@ int32_t streamGetFileSize(char* path, char* name, int64_t* sz) {
|
|||
TdFilePtr streamOpenFile(char* path, char* name, int32_t opt) {
|
||||
char fullname[256] = {0};
|
||||
|
||||
STREAM_ROCKSDB_BUILD_FULLNAME(path, name, fullname);
|
||||
(void) snprintf(fullname, tListLen(fullname),"%s%s%s", path, TD_DIRSEP, name);
|
||||
return taosOpenFile(fullname, opt);
|
||||
}
|
||||
|
||||
|
@ -155,35 +153,74 @@ int32_t streamCreateTaskDbSnapInfo(void* arg, char* path, SArray* pSnap) { retur
|
|||
int32_t streamDestroyTaskDbSnapInfo(void* arg, SArray* snap) { return taskDbDestroySnap(arg, snap); }
|
||||
|
||||
void snapFileDebugInfo(SBackendSnapFile2* pSnapFile) {
|
||||
if (qDebugFlag & DEBUG_DEBUG) {
|
||||
int16_t cap = 512;
|
||||
int16_t cap = 512;
|
||||
|
||||
if (qDebugFlag & DEBUG_DEBUG) {
|
||||
char* buf = taosMemoryCalloc(1, cap);
|
||||
if (buf == NULL) {
|
||||
stError("%s failed to alloc memory, reason:%s", STREAM_STATE_TRANSFER, tstrerror(TSDB_CODE_OUT_OF_MEMORY));
|
||||
return;
|
||||
}
|
||||
|
||||
int32_t nBytes = snprintf(buf + strlen(buf), cap, "[");
|
||||
if (nBytes <= 0 || nBytes >= cap) {
|
||||
taosMemoryFree(buf);
|
||||
stError("%s failed to write buf, reason:%s", STREAM_STATE_TRANSFER, tstrerror(TSDB_CODE_OUT_OF_RANGE));
|
||||
return;
|
||||
}
|
||||
int32_t len = 0;
|
||||
int32_t wlen = 1;
|
||||
|
||||
if (pSnapFile->pCurrent) sprintf(buf, "current: %s,", pSnapFile->pCurrent);
|
||||
if (pSnapFile->pMainfest) sprintf(buf + strlen(buf), "MANIFEST: %s,", pSnapFile->pMainfest);
|
||||
if (pSnapFile->pOptions) sprintf(buf + strlen(buf), "options: %s,", pSnapFile->pOptions);
|
||||
if (pSnapFile->pSst) {
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pSnapFile->pSst); i++) {
|
||||
char* name = taosArrayGetP(pSnapFile->pSst, i);
|
||||
if (strlen(buf) + strlen(name) < cap) sprintf(buf + strlen(buf), "%s,", name);
|
||||
do {
|
||||
buf[0] = '[';
|
||||
if (pSnapFile->pCurrent) {
|
||||
len = snprintf(buf + wlen, cap - wlen, "current: %s,", pSnapFile->pCurrent);
|
||||
if (len > 0 && len < (cap - wlen)) {
|
||||
wlen += len;
|
||||
} else {
|
||||
stError("%s failed to build buf for debug, code: out of buffer", STREAM_STATE_TRANSFER);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pSnapFile->pMainfest) {
|
||||
len = snprintf(buf + wlen, cap - wlen, "MANIFEST: %s,", pSnapFile->pMainfest);
|
||||
if (len > 0 && len < (cap - wlen)) {
|
||||
wlen += len;
|
||||
} else {
|
||||
stError("%s failed to build buf for debug, code: out of buffer", STREAM_STATE_TRANSFER);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pSnapFile->pOptions) {
|
||||
len = snprintf(buf + wlen, cap - wlen, "options: %s,", pSnapFile->pOptions);
|
||||
if (len > 0 && len < (cap - wlen)) {
|
||||
wlen += len;
|
||||
} else {
|
||||
stError("%s failed to build buf for debug, code: out of buffer", STREAM_STATE_TRANSFER);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pSnapFile->pSst) {
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pSnapFile->pSst); i++) {
|
||||
char* name = taosArrayGetP(pSnapFile->pSst, i);
|
||||
if (strlen(buf) + strlen(name) < cap) {
|
||||
len = snprintf(buf + wlen, cap - wlen, "%s,", name);
|
||||
if (len > 0 && len < (cap - wlen)) {
|
||||
wlen += len;
|
||||
} else {
|
||||
stError("%s failed to build buf for debug, code: out of buffer", STREAM_STATE_TRANSFER);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} while (0);
|
||||
|
||||
if (wlen < cap) {
|
||||
buf[wlen] = ']';
|
||||
}
|
||||
if ((strlen(buf)) < cap) sprintf(buf + strlen(buf) - 1, "]");
|
||||
buf[cap - 1] = '\0';
|
||||
|
||||
stInfo("%s %" PRId64 "-%" PRId64 " get file list: %s", STREAM_STATE_TRANSFER, pSnapFile->snapInfo.streamId,
|
||||
pSnapFile->snapInfo.taskId, buf);
|
||||
|
||||
taosMemoryFree(buf);
|
||||
}
|
||||
}
|
||||
|
@ -771,16 +808,23 @@ int32_t streamSnapWrite(SStreamSnapWriter* pWriter, uint8_t* pData, uint32_t nDa
|
|||
SBackendSnapFile2* pDbSnapFile = taosArrayGet(pHandle->pDbSnapSet, pHandle->currIdx);
|
||||
if (pDbSnapFile->inited == 0) {
|
||||
char idstr[64] = {0};
|
||||
sprintf(idstr, "0x%" PRIx64 "-0x%x", snapInfo.streamId, (int32_t)(snapInfo.taskId));
|
||||
(void)snprintf(idstr, tListLen(idstr), "0x%" PRIx64 "-0x%x", snapInfo.streamId, (int32_t)(snapInfo.taskId));
|
||||
|
||||
char* path = taosMemoryCalloc(1, strlen(pHandle->metaPath) + 256);
|
||||
int32_t bufLen = strlen(pHandle->metaPath) + 256;
|
||||
char* path = taosMemoryCalloc(1, bufLen);
|
||||
if (path == NULL) {
|
||||
stError("s-task:0x%x failed to prepare meta header buffer, code:Out of memory", (int32_t) snapInfo.taskId);
|
||||
return terrno;
|
||||
}
|
||||
|
||||
sprintf(path, "%s%s%s%s%s%s%s%" PRId64 "", pHandle->metaPath, TD_DIRSEP, idstr, TD_DIRSEP, "checkpoints", TD_DIRSEP,
|
||||
"checkpoint", snapInfo.chkpId);
|
||||
int32_t ret = snprintf(path, bufLen, "%s%s%s%s%s%s%s%" PRId64 "", pHandle->metaPath, TD_DIRSEP, idstr, TD_DIRSEP,
|
||||
"checkpoints", TD_DIRSEP, "checkpoint", snapInfo.chkpId);
|
||||
if (ret < 0 || ret >= bufLen) {
|
||||
stError("s-task:0x%x failed to set the path for take snapshot, code: out of buffer, %s", (int32_t)snapInfo.taskId,
|
||||
pHandle->metaPath);
|
||||
return TSDB_CODE_OUT_OF_BUFFER;
|
||||
}
|
||||
|
||||
if (!taosIsDir(path)) {
|
||||
code = taosMulMkDir(path);
|
||||
stInfo("%s mkdir %s", STREAM_STATE_TRANSFER, path);
|
||||
|
|
|
@ -132,8 +132,12 @@ int32_t tNewStreamTask(int64_t streamId, int8_t taskLevel, SEpSet* pEpset, bool
|
|||
return code;
|
||||
}
|
||||
|
||||
char buf[128] = {0};
|
||||
sprintf(buf, "0x%" PRIx64 "-0x%x", pTask->id.streamId, pTask->id.taskId);
|
||||
char buf[128] = {0};
|
||||
int32_t ret = snprintf(buf, tListLen(buf), "0x%" PRIx64 "-0x%x", pTask->id.streamId, pTask->id.taskId);
|
||||
if (ret < 0 || ret >= tListLen(buf)) {
|
||||
stError("s-task:0x%x failed to set the taskIdstr, code: out of buffer", pTask->id.taskId);
|
||||
return TSDB_CODE_OUT_OF_BUFFER;
|
||||
}
|
||||
|
||||
pTask->id.idStr = taosStrdup(buf);
|
||||
if (pTask->id.idStr == NULL) {
|
||||
|
@ -402,7 +406,7 @@ int32_t streamTaskSetBackendPath(SStreamTask* pTask) {
|
|||
}
|
||||
|
||||
char id[128] = {0};
|
||||
int32_t nBytes = sprintf(id, "0x%" PRIx64 "-0x%x", streamId, taskId);
|
||||
int32_t nBytes = snprintf(id, tListLen(id), "0x%" PRIx64 "-0x%x", streamId, taskId);
|
||||
if (nBytes < 0 || nBytes >= sizeof(id)) {
|
||||
return TSDB_CODE_OUT_OF_BUFFER;
|
||||
}
|
||||
|
@ -413,10 +417,14 @@ int32_t streamTaskSetBackendPath(SStreamTask* pTask) {
|
|||
return terrno;
|
||||
}
|
||||
|
||||
(void)sprintf(pTask->backendPath, "%s%s%s", pTask->pMeta->path, TD_DIRSEP, id);
|
||||
stDebug("s-task:%s set backend path:%s", pTask->id.idStr, pTask->backendPath);
|
||||
|
||||
return 0;
|
||||
int32_t code = snprintf(pTask->backendPath, len + nBytes + 2, "%s%s%s", pTask->pMeta->path, TD_DIRSEP, id);
|
||||
if (code < 0 || code >= len + nBytes + 2) {
|
||||
stError("s-task:%s failed to set backend path:%s, code: out of buffer", pTask->id.idStr, pTask->backendPath);
|
||||
return TSDB_CODE_OUT_OF_BUFFER;
|
||||
} else {
|
||||
stDebug("s-task:%s set backend path:%s", pTask->id.idStr, pTask->backendPath);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, int64_t ver) {
|
||||
|
@ -1129,7 +1137,11 @@ SEpSet* streamTaskGetDownstreamEpInfo(SStreamTask* pTask, int32_t taskId) {
|
|||
|
||||
int32_t createStreamTaskIdStr(int64_t streamId, int32_t taskId, const char** pId) {
|
||||
char buf[128] = {0};
|
||||
sprintf(buf, "0x%" PRIx64 "-0x%x", streamId, taskId);
|
||||
int32_t code = snprintf(buf, tListLen(buf),"0x%" PRIx64 "-0x%x", streamId, taskId);
|
||||
if (code < 0 || code >= tListLen(buf)) {
|
||||
return TSDB_CODE_OUT_OF_BUFFER;
|
||||
}
|
||||
|
||||
*pId = taosStrdup(buf);
|
||||
|
||||
if (*pId == NULL) {
|
||||
|
|
|
@ -1403,7 +1403,7 @@ int64_t taosGetLineFile(TdFilePtr pFile, char **__restrict ptrBuf) {
|
|||
}
|
||||
|
||||
(*ptrBuf)[totalBytesRead] = '\0';
|
||||
ret = totalBytesRead;
|
||||
ret = (totalBytesRead > 0 ? totalBytesRead : -1); // -1 means EOF
|
||||
#else
|
||||
size_t len = 0;
|
||||
ret = getline(ptrBuf, &len, pFile->fp);
|
||||
|
|
|
@ -222,7 +222,7 @@ typedef struct SMPSessionChunk {
|
|||
} SMPSessionChunk;
|
||||
|
||||
typedef struct SMPSession {
|
||||
// SMPListNode list;
|
||||
SMPListNode list;
|
||||
|
||||
char* sessionId;
|
||||
SMPJob* pJob;
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
"insert_mode": "taosc",
|
||||
"line_protocol": "line",
|
||||
"childtable_limit": -10,
|
||||
"childtable_offset": 10,
|
||||
"childtable_offset": 0,
|
||||
"insert_rows": 20,
|
||||
"insert_interval": 0,
|
||||
"interlace_rows": 0,
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
"insert_mode": "taosc",
|
||||
"line_protocol": "line",
|
||||
"childtable_limit": -10,
|
||||
"childtable_offset": 10,
|
||||
"childtable_offset": 0,
|
||||
"insert_rows": 20,
|
||||
"insert_interval": 0,
|
||||
"interlace_rows": 0,
|
||||
|
|
|
@ -61,6 +61,11 @@ check_transactions || exit 1
|
|||
reset_cache || exit 1
|
||||
go run ./stmt/ws/main.go
|
||||
|
||||
taos -s "drop database if exists power"
|
||||
check_transactions || exit 1
|
||||
reset_cache || exit 1
|
||||
go run ./stmt2/native/main.go
|
||||
|
||||
taos -s "drop database if exists power"
|
||||
check_transactions || exit 1
|
||||
reset_cache || exit 1
|
||||
|
|
|
@ -196,3 +196,4 @@ check_transactions || exit 1
|
|||
reset_cache || exit 1
|
||||
python3 tmq_websocket_example.py
|
||||
|
||||
python3 stmt2_native.py
|
|
@ -6,6 +6,16 @@
|
|||
|
||||
,,n,unit-test,bash test.sh
|
||||
|
||||
#docs-examples test
|
||||
,,n,docs-examples-test,bash c.sh
|
||||
,,n,docs-examples-test,bash python.sh
|
||||
,,n,docs-examples-test,bash node.sh
|
||||
,,n,docs-examples-test,bash csharp.sh
|
||||
,,n,docs-examples-test,bash jdbc.sh
|
||||
,,n,docs-examples-test,bash rust.sh
|
||||
,,n,docs-examples-test,bash go.sh
|
||||
,,n,docs-examples-test,bash test_R.sh
|
||||
|
||||
#
|
||||
# army-test
|
||||
#
|
||||
|
@ -443,6 +453,7 @@
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/InsertFuturets.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/insert_wide_column.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/insert_column_value.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/insert_from_csv.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_benchmark.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k.py -R
|
||||
|
@ -482,6 +493,7 @@
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/show.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/show_tag_index.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/information_schema.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/ins_filesets.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/grant.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/abs.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/abs.py -R
|
||||
|
@ -1383,6 +1395,8 @@
|
|||
,,y,script,./test.sh -f tsim/stream/basic2.sim
|
||||
,,y,script,./test.sh -f tsim/stream/basic3.sim
|
||||
,,y,script,./test.sh -f tsim/stream/basic4.sim
|
||||
,,y,script,./test.sh -f tsim/stream/basic5.sim
|
||||
,,y,script,./test.sh -f tsim/stream/tag.sim
|
||||
,,y,script,./test.sh -f tsim/stream/snodeCheck.sim
|
||||
,,y,script,./test.sh -f tsim/stream/concurrentcheckpt.sim
|
||||
,,y,script,./test.sh -f tsim/stream/checkpointInterval0.sim
|
||||
|
@ -1658,13 +1672,3 @@
|
|||
,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/sml_json_alltypes.py
|
||||
,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/taosdemoTestQueryWithJson.py -R
|
||||
,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/telnet_tcp.py -R
|
||||
|
||||
#docs-examples test
|
||||
,,n,docs-examples-test,bash c.sh
|
||||
,,n,docs-examples-test,bash python.sh
|
||||
,,n,docs-examples-test,bash node.sh
|
||||
,,n,docs-examples-test,bash csharp.sh
|
||||
,,n,docs-examples-test,bash jdbc.sh
|
||||
,,n,docs-examples-test,bash rust.sh
|
||||
,,n,docs-examples-test,bash go.sh
|
||||
,,n,docs-examples-test,bash test_R.sh
|
||||
|
|
|
@ -77,7 +77,7 @@ md5sum /usr/lib/libtaos.so.1
|
|||
md5sum /home/TDinternal/debug/build/lib/libtaos.so
|
||||
|
||||
#get python connector and update: taospy 2.7.16 taos-ws-py 0.3.5
|
||||
pip3 install taospy==2.7.16
|
||||
pip3 install taospy==2.7.21
|
||||
pip3 install taos-ws-py==0.3.5
|
||||
$TIMEOUT_CMD $cmd
|
||||
RET=$?
|
||||
|
|
|
@ -0,0 +1,110 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sleep 100
|
||||
sql connect
|
||||
|
||||
print step1
|
||||
print =============== create database
|
||||
sql create database test vgroups 2;
|
||||
sql use test;
|
||||
|
||||
sql create table st1(ts timestamp, a int, b int , c int, d double) tags(x int);
|
||||
sql create table t1 using st1 tags(1);
|
||||
sql create table t2 using st1 tags(2);
|
||||
sql create stream streams1 trigger at_once IGNORE EXPIRED 1 IGNORE UPDATE 0 WATERMARK 100s into streamt as select _wstart as s, count(*) c1 from st1 where x>=2 interval(60s) ;
|
||||
|
||||
run tsim/stream/checkTaskStatus.sim
|
||||
|
||||
|
||||
sql insert into t2 values(1648791213000,0,1,1,1.0);
|
||||
sql insert into t2 values(1648791213001,9,2,2,1.1);
|
||||
sql insert into t2 values(1648791213009,0,3,3,1.0);
|
||||
|
||||
sql insert into t1 values(1648791223000,0,1,1,1.0);
|
||||
sql insert into t1 values(1648791223001,9,2,2,1.1);
|
||||
sql insert into t1 values(1648791223009,0,3,3,1.0);
|
||||
|
||||
sleep 300
|
||||
|
||||
sql select * from streamt;
|
||||
if $data01 != 3 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql alter table t1 set tag x=3;
|
||||
|
||||
sql insert into t1 values(1648791233000,0,1,1,1.0);
|
||||
sql insert into t1 values(1648791233001,9,2,2,1.1);
|
||||
sql insert into t1 values(1648791233009,0,3,3,1.0);
|
||||
|
||||
sleep 1000
|
||||
sql select * from streamt;
|
||||
|
||||
if $data01 != 6 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql alter table t1 set tag x=1;
|
||||
sql alter table t2 set tag x=1;
|
||||
|
||||
sql insert into t1 values(1648791243000,0,1,1,1.0);
|
||||
sql insert into t1 values(1648791243001,9,2,2,1.1);
|
||||
|
||||
|
||||
sql select * from streamt;
|
||||
if $data01 != 6 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
#$loop_count = 0
|
||||
#loop2:
|
||||
#
|
||||
#sleep 300
|
||||
#print 1 sql select * from streamt;
|
||||
#sql select * from streamt;
|
||||
#
|
||||
#print $data00 $data01 $data02 $data03
|
||||
#print $data10 $data11 $data12 $data13
|
||||
#
|
||||
#$loop_count = $loop_count + 1
|
||||
#if $loop_count == 10 then
|
||||
# return -1
|
||||
#endi
|
||||
#
|
||||
## row 0
|
||||
#if $data01 != 3 then
|
||||
# print ======data01=$data01
|
||||
# goto loop2
|
||||
#endi
|
||||
#
|
||||
#if $data02 != 6 then
|
||||
# print ======data02=$data02
|
||||
# goto loop2
|
||||
#endi
|
||||
#
|
||||
#if $data03 != 3 then
|
||||
# print ======data03=$data03
|
||||
# goto loop2
|
||||
#endi
|
||||
#
|
||||
## row 1
|
||||
#if $data11 != 3 then
|
||||
# print ======data11=$data11
|
||||
# goto loop2
|
||||
#endi
|
||||
#
|
||||
#if $data12 != 6 then
|
||||
# print ======data12=$data12
|
||||
# goto loop2
|
||||
#endi
|
||||
#
|
||||
#if $data13 != 3 then
|
||||
# print ======data13=$data13
|
||||
# goto loop2
|
||||
#endi
|
||||
#
|
||||
|
||||
print tag end
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -111,6 +111,10 @@ run tsim/stream/distributeInterval0.sim
|
|||
run tsim/stream/distributeSession0.sim
|
||||
run tsim/stream/state0.sim
|
||||
run tsim/stream/basic2.sim
|
||||
run tsim/stream/basic3.sim
|
||||
run tsim/stream/basic4.sim
|
||||
run tsim/stream/basic5.sim
|
||||
run tsim/stream/tag.sim
|
||||
run tsim/stream/concurrentcheckpt.sim
|
||||
run tsim/insert/basic1.sim
|
||||
run tsim/insert/commit-merge0.sim
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.common import *
|
||||
from util.sqlset import *
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
def run(self):
|
||||
tdSql.execute('create database db vgroups 1')
|
||||
tdSql.execute('use db')
|
||||
tdSql.execute('create table t1 (ts timestamp, a int, b int)')
|
||||
tdSql.execute('insert into t1 values(\'2024-12-27 14:00:00\', 1, 2)')
|
||||
tdSql.execute('flush database db')
|
||||
|
||||
tdLog.sleep(5)
|
||||
|
||||
rows = tdSql.query('select * from information_schema.ins_filesets')
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkEqual(tdSql.getData(0, 0), 'db')
|
||||
tdSql.checkEqual(tdSql.getData(0, 1), 2)
|
||||
tdSql.checkEqual(tdSql.getData(0, 2), 2008)
|
||||
# tdSql.CheckEqual(str(tdSql.getData(0, 3)), '2024-12-23 08:00:00.000')
|
||||
# tdSql.CheckEqual(str(tdSql.getData(0, 4)), '2025-01-02 07:59:59.999')
|
||||
# tdSql.CheckEqual(tdSql.getData(0, 6), '1970-01-01 08:00:00.000')
|
||||
# tdSql.CheckEqual(tdSql.getData(0, 7), False)
|
||||
|
||||
tdDnodes.stopAll()
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,47 @@
|
|||
import taos
|
||||
import sys
|
||||
import datetime
|
||||
import inspect
|
||||
import random
|
||||
from util.dnodes import TDDnode
|
||||
from util.dnodes import tdDnodes
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor(), True)
|
||||
|
||||
self.testcasePath = os.path.split(__file__)[0]
|
||||
self.testcasePath = self.testcasePath.replace('\\', '//')
|
||||
self.database = "test_insert_csv_db"
|
||||
self.table = "test_insert_csv_tbl"
|
||||
|
||||
def insert_from_csv(self):
|
||||
tdSql.execute(f"drop database if exists {self.database}")
|
||||
tdSql.execute(f"create database {self.database}")
|
||||
tdSql.execute(f"use {self.database}")
|
||||
tdSql.execute(f"create table {self.table} (ts timestamp, c1 nchar(16), c2 double, c3 int)")
|
||||
tdSql.execute(f"insert into {self.table} file '{self.testcasePath}//test_insert_from_csv.csv'")
|
||||
tdSql.query(f"select count(*) from {self.table}")
|
||||
tdSql.checkData(0, 0, 5)
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
startTime_all = time.time()
|
||||
self.insert_from_csv()
|
||||
endTime_all = time.time()
|
||||
print("total time %ds" % (endTime_all - startTime_all))
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,5 @@
|
|||
'2024-12-13 09:30:00.050','ABCDEF68',24.774736842805263,200
|
||||
'2024-12-13 09:30:00.060','ABCDEF68',24.774736842805263,201
|
||||
'2024-12-13 09:30:00.080','ABCDEF68',24.774736842805263,202
|
||||
'2024-12-13 09:30:00.100','ABCDEF68',24.774736842805263,203
|
||||
'2024-12-13 09:30:00.110','ABCDEF68',24.774736842805263,204
|
|
|
@ -13,6 +13,7 @@ python3 .\test.py -f 0-others\cachemodel.py
|
|||
@REM python3 .\test.py -f 0-others\fsync.py
|
||||
|
||||
python3 .\test.py -f 1-insert\influxdb_line_taosc_insert.py
|
||||
python3 .\test.py -f 1-insert\insert_from_csv.py
|
||||
@REM python3 .\test.py -f 1-insert\opentsdb_telnet_line_taosc_insert.py
|
||||
@REM python3 .\test.py -f 1-insert\opentsdb_json_taosc_insert.py
|
||||
@REM #python3 .\test.py -f 1-insert\test_stmt_muti_insert_query.py
|
||||
|
|
|
@ -251,7 +251,7 @@ IF(TD_BUILD_KEEPER)
|
|||
PATCH_COMMAND
|
||||
COMMAND git clean -f -d
|
||||
BUILD_COMMAND
|
||||
COMMAND go build -a -ldflags "-X 'github.com/taosdata/taoskeeper/version.Version=${taos_version}' -X 'github.com/taosdata/taoskeeper/version.CommitID=${taoskeeper_commit_sha1}' -X 'github.com/taosdata/taoskeeper/version.BuildInfo=${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}'"
|
||||
COMMAND go build -a -ldflags "-X 'github.com/taosdata/taoskeeper/version.Version=${taos_version}' -X 'github.com/taosdata/taoskeeper/version.Gitinfo=${taoskeeper_commit_sha1}' -X 'github.com/taosdata/taoskeeper/version.CommitID=${taoskeeper_commit_sha1}' -X 'github.com/taosdata/taoskeeper/version.BuildInfo=${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}'"
|
||||
INSTALL_COMMAND
|
||||
COMMAND cmake -E echo "Comparessing taoskeeper.exe"
|
||||
COMMAND cmake -E time upx taoskeeper.exe
|
||||
|
@ -278,7 +278,7 @@ IF(TD_BUILD_KEEPER)
|
|||
PATCH_COMMAND
|
||||
COMMAND git clean -f -d
|
||||
BUILD_COMMAND
|
||||
COMMAND go build -a -ldflags "-X 'github.com/taosdata/taoskeeper/version.Version=${taos_version}' -X 'github.com/taosdata/taoskeeper/version.CommitID=${taoskeeper_commit_sha1}' -X 'github.com/taosdata/taoskeeper/version.BuildInfo=${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}'"
|
||||
COMMAND go build -a -ldflags "-X 'github.com/taosdata/taoskeeper/version.Version=${taos_version}' -X 'github.com/taosdata/taoskeeper/version.Gitinfo=${taoskeeper_commit_sha1}' -X 'github.com/taosdata/taoskeeper/version.CommitID=${taoskeeper_commit_sha1}' -X 'github.com/taosdata/taoskeeper/version.BuildInfo=${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}'"
|
||||
INSTALL_COMMAND
|
||||
COMMAND cmake -E echo "Copy taoskeeper"
|
||||
COMMAND cmake -E copy taoskeeper ${CMAKE_BINARY_DIR}/build/bin
|
||||
|
|
Loading…
Reference in New Issue