diff --git a/.github/workflows/tdengine-build.yml b/.github/workflows/tdengine-build.yml index 1fd7831203..017c9d69fa 100644 --- a/.github/workflows/tdengine-build.yml +++ b/.github/workflows/tdengine-build.yml @@ -90,6 +90,16 @@ jobs: which taosadapter which taoskeeper + - name: Statistics ldd + run: | + find ${{ github.workspace }}/debug/build/lib -type f -name "*.so" -print0 | xargs -0 ldd || true + find ${{ github.workspace }}/debug/build/bin -type f -print0 | xargs -0 ldd || true + + - name: Statistics size + run: | + find ${{ github.workspace }}/debug/build/lib -type f -print0 | xargs -0 ls -lhrS + find ${{ github.workspace }}/debug/build/bin -type f -print0 | xargs -0 ls -lhrS + - name: Start taosd run: | cp /etc/taos/taos.cfg ./ diff --git a/.github/workflows/tdengine-test.yml b/.github/workflows/tdengine-test.yml index 436eedd0e2..b981d005c4 100644 --- a/.github/workflows/tdengine-test.yml +++ b/.github/workflows/tdengine-test.yml @@ -12,6 +12,7 @@ on: - 'tools/tdgpt/**' - 'source/libs/executor/src/forecastoperator.c' - 'source/libs/executor/src/anomalywindowoperator.c' + - 'source/dnode/mnode/impl/src/mndAnode.c' - 'include/common/tanalytics.h' - 'source/common/src/tanalytics.c' - 'tests/parallel/tdgpt_cases.task' diff --git a/.github/workflows/tdgpt-test.yml b/.github/workflows/tdgpt-test.yml index 4bdebdad32..0db9cee11d 100644 --- a/.github/workflows/tdgpt-test.yml +++ b/.github/workflows/tdgpt-test.yml @@ -12,6 +12,7 @@ on: - 'tools/tdgpt/**' - 'source/libs/executor/src/forecastoperator.c' - 'source/libs/executor/src/anomalywindowoperator.c' + - 'source/dnode/mnode/impl/src/mndAnode.c' - 'include/common/tanalytics.h' - 'source/common/src/tanalytics.c' - 'tests/parallel/tdgpt_cases.task' diff --git a/.gitignore b/.gitignore index aa1f567bf7..6a7a631e78 100644 --- a/.gitignore +++ b/.gitignore @@ -59,7 +59,6 @@ tools/upx* html/ /.vs /CMakeFiles/3.10.2 -/CMakeCache.txt /Makefile /*.cmake /src/cq/test/CMakeFiles/cqtest.dir/*.cmake diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 8853d068cb..aa9d5e0701 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -361,6 +361,7 @@ def pre_test_build_win() { pip3 install taospy==2.7.21 pip3 install taos-ws-py==0.3.8 xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32 + xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taosnative.dll C:\\Windows\\System32 ''' return 1 } @@ -379,7 +380,9 @@ def run_win_test() { bat ''' echo "windows test ..." xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32 + xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taosnative.dll C:\\Windows\\System32 ls -l C:\\Windows\\System32\\taos.dll + ls -l C:\\Windows\\System32\\taosnative.dll time /t cd %WIN_SYSTEM_TEST_ROOT% echo "testing ..." diff --git a/cmake/cmake.define b/cmake/cmake.define index 3770f1f3b0..043a6f6263 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -97,14 +97,10 @@ ELSE() SET(TD_TAOS_TOOLS TRUE) ENDIF() -SET(TAOS_LIB taos) +SET(TAOS_LIB taos) SET(TAOS_LIB_STATIC taos_static) - -IF(${TD_WINDOWS}) - SET(TAOS_LIB_PLATFORM_SPEC taos_static) -ELSE() - SET(TAOS_LIB_PLATFORM_SPEC taos) -ENDIF() +SET(TAOS_NATIVE_LIB taosnative) +SET(TAOS_NATIVE_LIB_STATIC taosnative_static) # build TSZ by default IF("${TSZ_ENABLED}" MATCHES "false") diff --git a/cmake/taosws_CMakeLists.txt.in b/cmake/taosws_CMakeLists.txt.in index b013d45911..820418a452 100644 --- a/cmake/taosws_CMakeLists.txt.in +++ b/cmake/taosws_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taosws-rs ExternalProject_Add(taosws-rs GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git - GIT_TAG main + GIT_TAG 3.0 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/docs/en/05-basic/03-query.md b/docs/en/05-basic/03-query.md index d0aef17f4d..af0267a901 100644 --- a/docs/en/05-basic/03-query.md +++ b/docs/en/05-basic/03-query.md @@ -191,7 +191,7 @@ INTERVAL(interval_val [, interval_offset]) The time window clause includes 3 sub-clauses: -- INTERVAL clause: used to generate windows of equal time periods, where interval_val specifies the size of each time window, and interval_offset specifies; +- INTERVAL clause: used to generate windows of equal time periods, where interval_val specifies the size of each time window, and interval_offset specifies its starting offset. By default, windows begin at Unix time 0 (1970-01-01 00:00:00 UTC). If interval_offset is specified, the windows start from "Unix time 0 + interval_offset"; - SLIDING clause: used to specify the time the window slides forward; - FILL: used to specify the filling mode of data in case of missing data in the window interval. diff --git a/docs/en/07-develop/05-stmt.md b/docs/en/07-develop/05-stmt.md index 16fe156cc3..b63efb903f 100644 --- a/docs/en/07-develop/05-stmt.md +++ b/docs/en/07-develop/05-stmt.md @@ -146,9 +146,19 @@ Not supported ``` + +The example code for binding parameters with stmt2 (TDengine v3.3.5.0 or higher is required) is as follows: + +```c +{{#include docs/examples/c/stmt2_insert_demo.c}} +``` + +The example code for binding parameters with stmt is as follows: + ```c {{#include docs/examples/c/stmt_insert_demo.c}} ``` + Not supported diff --git a/docs/en/08-operation/12-multi.md b/docs/en/08-operation/12-multi.md index 1d0b8ad6cb..e6f3b7d611 100644 --- a/docs/en/08-operation/12-multi.md +++ b/docs/en/08-operation/12-multi.md @@ -55,7 +55,7 @@ When network I/O and other processing resources are not bottlenecks, by optimizi Generally, when TDengine needs to select a mount point from the same level to create a new data file, it uses a round-robin strategy for selection. However, in reality, each disk may have different capacities, or the same capacity but different amounts of data written, leading to an imbalance in available space on each disk. In practice, this may result in selecting a disk with very little remaining space. -To address this issue, starting from 3.1.1.0, a new configuration minDiskFreeSize was introduced. When the available space on a disk is less than or equal to this threshold, that disk will no longer be selected for generating new data files. The unit of this configuration item is bytes, and its value should be greater than 2GB, i.e., mount points with less than 2GB of available space will be skipped. +To address this issue, starting from 3.1.1.0, a new configuration minDiskFreeSize was introduced. When the available space on a disk is less than or equal to this threshold, that disk will no longer be selected for generating new data files. The unit of this configuration item is bytes. If its value is set as 2GB, i.e., mount points with less than 2GB of available space will be skipped. Starting from version 3.3.2.0, a new configuration `disable_create_new_file` has been introduced to control the prohibition of generating new files on a certain mount point. The default value is `false`, which means new files can be generated on each mount point by default. diff --git a/docs/en/08-operation/15-sec.md b/docs/en/08-operation/15-sec.md new file mode 100644 index 0000000000..ed7ac529f9 --- /dev/null +++ b/docs/en/08-operation/15-sec.md @@ -0,0 +1,278 @@ +--- +sidebar_label: Security Configuration +title: Security Configuration +toc_max_heading_level: 4 +--- + +import Image from '@theme/IdealImage'; +import imgEcosys from '../assets/tdengine-components-01.png'; + +## Background + +The distributed and multi-component nature of TDengine makes its security configuration a concern in production systems. This document aims to explain the security issues of various TDengine components and different deployment methods, and provide deployment and configuration suggestions to support the security of user data. + +## Components Involved in Security Configuration + +TDengine includes multiple components: + +- `taosd`: Core component. +- `taosc`: Client library. +- `taosAdapter`: REST API and WebSocket service. +- `taosKeeper`: Monitoring service component. +- `taosX`: Data pipeline and backup recovery component. +- `taosxAgent`: Auxiliary component for external data source access. +- `taosExplorer`: Web visualization management interface. + +In addition to TDengine deployment and applications, there are also the following components: + +- Applications that access and use the TDengine database through various connectors. +- External data sources: Other data sources that access TDengine, such as MQTT, OPC, Kafka, etc. + +The relationship between the components is as follows: + +
+TDengine ecosystem +
TDengine ecosystem
+
+ +## TDengine Security Settings + +### `taosd` + +The `taosd` cluster uses TCP connections based on its own protocol for data exchange, which has low risk, but the transmission process is not encrypted, so there is still some security risk. + +Enabling compression may help with TCP data obfuscation. + +- **compressMsgSize**: Whether to compress RPC messages. Integer, optional: -1: Do not compress any messages; 0: Compress all messages; N (N>0): Only compress messages larger than N bytes. + +To ensure the traceability of database operations, it is recommended to enable the audit function. + +- **audit**: Audit function switch, 0 is off, 1 is on. Default is on. +- **auditInterval**: Reporting interval, in milliseconds. Default is 5000. +- **auditCreateTable**: Whether to enable the audit function for creating sub-tables. 0 is off, 1 is on. Default is on. + +To ensure the security of data files, database encryption can be enabled. + +- **encryptAlgorithm**: Data encryption algorithm. +- **encryptScope**: Data encryption scope. + +Enabling the whitelist can restrict access addresses and further enhance privacy. + +- **enableWhiteList**: Whitelist function switch, 0 is off, 1 is on; default is off. + +### `taosc` + +Users and other components use the native client library (`taosc`) and its own protocol to connect to `taosd`, which has low data security risk, but the transmission process is still not encrypted, so there is some security risk. + +### `taosAdapter` + +`taosAdapter` uses the native client library (`taosc`) and its own protocol to connect to `taosd`, and also supports RPC message compression, so there is no data security issue. + +Applications and other components connect to `taosAdapter` through various language connectors. By default, the connection is based on HTTP 1.1 and is not encrypted. To ensure the security of data transmission between `taosAdapter` and other components, SSL encrypted connections need to be configured. Modify the following configuration in the `/etc/taos/taosadapter.toml` configuration file: + +```toml +[ssl] +enable = true +certFile = "/path/to/certificate-file" +keyFile = "/path/to/private-key" +``` + +Configure HTTPS/SSL access in the connector to complete encrypted access. + +To further enhance security, the whitelist function can be enabled, and configured in `taosd`, which also applies to the `taosAdapter` component. + +### `taosX` + +`taosX` includes REST API and gRPC interfaces, where the gRPC interface is used for `taos-agent` connections. + +- The REST API interface is based on HTTP 1.1 and is not encrypted, posing a security risk. +- The gRPC interface is based on HTTP 2 and is not encrypted, posing a security risk. + +To ensure data security, it is recommended that the `taosX` API interface is limited to internal access only. Modify the following configuration in the `/etc/taos/taosx.toml` configuration file: + +```toml +[serve] +listen = "127.0.0.1:6050" +grpc = "127.0.0.1:6055" +``` + +Starting from TDengine 3.3.6.0, `taosX` supports HTTPS connections. Add the following configuration in the `/etc/taos/taosx.toml` file: + +```toml +[serve] +ssl_cert = "/path/to/server.pem" +ssl_key = "/path/to/server.key" +ssl_ca = "/path/to/ca.pem" +``` + +And modify the API address to HTTPS connection in Explorer: + +```toml +# Local connection to taosX API +x_api = "https://127.0.01:6050" +# Public IP or domain address +grpc = "https://public.domain.name:6055" +``` + +### `taosExplorer` + +Similar to the `taosAdapter` component, the `taosExplorer` component provides HTTP services for external access. Modify the following configuration in the `/etc/taos/explorer.toml` configuration file: + +```toml +[ssl] +# SSL certificate file +certificate = "/path/to/ca.file" + +# SSL certificate private key +certificate_key = "/path/to/key.file" +``` + +Then, use HTTPS to access Explorer, such as [https://192.168.12.34](https://192.168.12.34:6060). + +### `taosxAgent` + +After `taosX` enables HTTPS, the `Agent` component and `taosX` use HTTP 2 encrypted connections, using Arrow-Flight RPC for data exchange. The transmission content is in binary format, and only registered `Agent` connections are valid, ensuring data security. + +It is recommended to always enable HTTPS connections for `Agent` services in insecure or public network environments. + +### `taosKeeper` + +`taosKeeper` uses WebSocket connections to communicate with `taosAdapter`, writing monitoring information reported by other components into TDengine. + +The current version of `taosKeeper` has security risks: + +- The monitoring address cannot be restricted to the local machine. By default, it monitors all addresses on port 6043, posing a risk of network attacks. This risk can be ignored when deploying with Docker or Kubernetes without exposing the `taosKeeper` port. +- The configuration file contains plaintext passwords, so the visibility of the configuration file needs to be reduced. In `/etc/taos/taoskeeper.toml`: + +```toml +[tdengine] +host = "localhost" +port = 6041 +username = "root" +password = "taosdata" +usessl = false +``` + +## Security Enhancements + +We recommend using TDengine within a local area network. + +If you must provide access outside the local area network, consider adding the following configurations: + +### Load Balancing + +Use load balancing to provide `taosAdapter` services externally. + +Take Nginx as an example to configure multi-node load balancing: + +```nginx +http { + server { + listen 6041; + + location / { + proxy_pass http://websocket; + # Headers for websocket compatible + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + # Forwarded headers + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-Server $hostname; + proxy_set_header X-Real-IP $remote_addr; + } + } + + upstream websocket { + server 192.168.11.61:6041; + server 192.168.11.62:6041; + server 192.168.11.63:6041; + } +} +``` + +If the `taosAdapter` component is not configured with SSL secure connections, SSL needs to be configured to ensure secure access. SSL can be configured at a higher-level API Gateway or in Nginx; if you have stronger security requirements for the connections between components, you can configure SSL in all components. The Nginx configuration is as follows: + +```nginx +http { + server { + listen 443 ssl; + + ssl_certificate /path/to/your/certificate.crt; + ssl_certificate_key /path/to/your/private.key; + } +} +``` + +### Security Gateway + +In modern internet production systems, the use of security gateways is also very common. [traefik](https://traefik.io/) is a good open-source choice. We take traefik as an example to explain the security configuration in the API gateway. + +Traefik provides various security configurations through middleware, including: + +1. Authentication: Traefik provides multiple authentication methods such as BasicAuth, DigestAuth, custom authentication middleware, and OAuth 2.0. +2. IP Whitelist: Restrict the allowed client IPs. +3. Rate Limit: Control the number of requests sent to the service. +4. Custom Headers: Add configurations such as `allowedHosts` through custom headers to improve security. + +A common middleware example is as follows: + +```yaml +labels: + - "traefik.enable=true" + - "traefik.http.routers.tdengine.rule=Host(`api.tdengine.example.com`)" + - "traefik.http.routers.tdengine.entrypoints=https" + - "traefik.http.routers.tdengine.tls.certresolver=default" + - "traefik.http.routers.tdengine.service=tdengine" + - "traefik.http.services.tdengine.loadbalancer.server.port=6041" + - "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https" + - "traefik.http.middlewares.check-header.headers.customrequestheaders.X-Secret-Header=SecretValue" + - "traefik.http.middlewares.check-header.headers.customresponseheaders.X-Header-Check=true" + - "traefik.http.middlewares.tdengine-ipwhitelist.ipwhitelist.sourcerange=127.0.0.1/32, 192.168.1.7" + - "traefik.http.routers.tdengine.middlewares=redirect-to-https,check-header,tdengine-ipwhitelist" +``` + +The above example completes the following configurations: + +- TLS authentication uses the `default` configuration, which can be configured in the configuration file or traefik startup parameters, as follows: + + ```yaml + traefik: + image: "traefik:v2.3.2" + hostname: "traefik" + networks: + - traefik + command: + - "--log.level=INFO" + - "--api.insecure=true" + - "--providers.docker=true" + - "--providers.docker.exposedbydefault=false" + - "--providers.docker.swarmmode=true" + - "--providers.docker.network=traefik" + - "--providers.docker.watch=true" + - "--entrypoints.http.address=:80" + - "--entrypoints.https.address=:443" + - "--certificatesresolvers.default.acme.dnschallenge=true" + - "--certificatesresolvers.default.acme.dnschallenge.provider=alidns" + - "--certificatesresolvers.default.acme.dnschallenge.resolvers=ns1.alidns.com" + - "--certificatesresolvers.default.acme.email=linhehuo@gmail.com" + - "--certificatesresolvers.default.acme.storage=/letsencrypt/acme.json" + ``` + +The above startup parameters configure the `default` TSL certificate resolver and automatic acme authentication (automatic certificate application and renewal). + +- Middleware `redirect-to-https`: Configure redirection from HTTP to HTTPS, forcing the use of secure connections. + + ```yaml + - "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https" + ``` + +- Middleware `check-header`: Configure custom header checks. External access must add custom headers and match header values to prevent unauthorized access. This is a very simple and effective security mechanism when providing API access. +- Middleware `tdengine-ipwhitelist`: Configure IP whitelist. Only allow specified IPs to access, using CIDR routing rules for matching, and can set internal and external IP addresses. + +## Summary + +Data security is a key indicator of the TDengine product. These measures are designed to protect TDengine deployments from unauthorized access and data breaches while maintaining performance and functionality. However, the security configuration of TDengine itself is not the only guarantee in production. It is more important to develop solutions that better match customer needs in combination with the user's business system. diff --git a/docs/en/10-third-party/03-visual/02-perspective.md b/docs/en/10-third-party/03-visual/02-perspective.md new file mode 100644 index 0000000000..056abed8f6 --- /dev/null +++ b/docs/en/10-third-party/03-visual/02-perspective.md @@ -0,0 +1,81 @@ +--- +sidebar_label: Perspective +title: Integration With Perspective +toc_max_heading_level: 4 +--- + +Perspective is an open-source and powerful data visualization library developed by [Prospective.co](https://www.perspective.co/). Leveraging the technologies of WebAssembly and Web Workers, it enables interactive real-time data analysis in web applications and provides high-performance visualization capabilities on the browser side. With its help, developers can build dashboards, charts, etc. that update in real time, and users can easily interact with the data, filtering, sorting, and exploring it as needed. It boasts high flexibility, adapting to various data formats and business scenarios. It is also fast, ensuring smooth interaction even when dealing with large-scale data. Moreover, it has excellent usability, allowing both beginners and professional developers to quickly build visualization interfaces. + +In terms of data connection, Perspective, through the Python connector of TDengine, perfectly supports TDengine data sources. It can efficiently retrieve various types of data, such as massive time-series data, from TDengine. Additionally, it offers real-time functions including the display of complex charts, in-depth statistical analysis, and trend prediction, helping users gain insights into the value of the data and providing strong support for decision-making. It is an ideal choice for building applications with high requirements for real-time data visualization and analysis. + +![perspective-architecture](./perspective/prsp_architecture.webp) + +## Prerequisites + +Perform the following installation operations in the Linux system: + +- TDengine is installed and running normally (both Enterprise and Community versions are available). +- taosAdapter is running normally, refer to [taosAdapter Reference](../../../tdengine-reference/components/taosadapter/). +- Python version 3.10 or higher has been installed (if not installed, please refer to [Python Installation](https://docs.python.org/)). +- Download or clone the [perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) project. After entering the root directory of the project, run the "install.sh" script to download and install the TDengine client library and related dependencies locally. + +## Visualize data + +**Step 1**, Run the "run.sh" script in the root directory of the [perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) project to start the Perspective service. This service will retrieve data from the TDengine database every 300 milliseconds and transmit the data in a streaming form to the web-based `Perspective Viewer`. + +```shell +sh run.sh +``` + +**Step 2**, Start a static web service. Then, access the prsp-viewer.html resource in the browser, and the visualized data can be displayed. + +```python +python -m http.server 8081 +``` + +The effect presented after accessing the web page through the browser is shown in the following figure: + +![perspective-viewer](./perspective/prsp_view.webp) + +## Instructions for use + +### Write Data to TDengine + +The `producer.py` script in the root directory of the [perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) project can periodically insert data into the TDengine database with the help of the TDengine Python connector. This script will generate random data and insert it into the database, thus simulating the process of writing real-time data. The specific execution steps are as follows: + +1. Establish a connection to TDengine. +2. Create the `power` database and the `meters` table. +3. Generate random data every 300 milliseconds and write it into the TDengine database. + +For detailed instructions on writing using the Python connector, please refer to [Python Parameter Binding](../../../tdengine-reference/client-libraries/python/#parameter-binding). + +### Load Data from TDengine + +The `perspective_server.py` script in the root directory of the [perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) project will start a Perspective server. This server will read data from TDengine and stream the data to a Perspective table via the Tornado WebSocket. + +1. Start a Perspective server. +2. Establish a connection to TDengine. +3. Create a Perspective table (the table structure needs to match the type of the table in the TDengine database). +4. Call the `Tornado.PeriodicCallback` function to start a scheduled task, thereby achieving the update of the data in the Perspective table. The sample code is as follows: + +```python +{{#include docs/examples/perspective/perspective_server.py:perspective_server}} +``` + +### HTML Page Configuration + +The `prsp-viewer.html` file in the root directory of the [perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) project embeds the `Perspective Viewer` into the HTML page. It connects to the Perspective server via a WebSocket and displays real-time data according to the chart configuration. + +- Configure the displayed charts and the rules for data analysis. +- Establish a Websocket connection with the Perspective server. +- Import the Perspective library, connect to the Perspective server via a WebSocket, and load the `meters_values` table to display dynamic data. + +```html +{{#include docs/examples/perspective/prsp-viewer.html:perspective_viewer}} +``` + +## Reference Materials + +- [Perspective Docs](https://perspective.finos.org/) +- [TDengine Python Connector](../../../tdengine-reference/client-libraries/python/) +- [TDengine Stream Processing](../../../advanced-features/stream-processing/) diff --git a/docs/en/10-third-party/03-visual/perspective/prsp_architecture.webp b/docs/en/10-third-party/03-visual/perspective/prsp_architecture.webp new file mode 100644 index 0000000000..f94b2572ff Binary files /dev/null and b/docs/en/10-third-party/03-visual/perspective/prsp_architecture.webp differ diff --git a/docs/en/10-third-party/03-visual/perspective/prsp_view.webp b/docs/en/10-third-party/03-visual/perspective/prsp_view.webp new file mode 100644 index 0000000000..8cae8101a1 Binary files /dev/null and b/docs/en/10-third-party/03-visual/perspective/prsp_view.webp differ diff --git a/docs/en/14-reference/01-components/01-taosd.md b/docs/en/14-reference/01-components/01-taosd.md index 35f1bcff8b..92e9763f5e 100644 --- a/docs/en/14-reference/01-components/01-taosd.md +++ b/docs/en/14-reference/01-components/01-taosd.md @@ -170,7 +170,7 @@ The effective value of charset is UTF-8. |tempDir | |Not supported |Specifies the directory for generating temporary files during system operation, default value /tmp| |minimalDataDirGB | |Not supported |Minimum space to be reserved in the time-series data storage directory specified by dataDir, in GB, default value 2| |minimalTmpDirGB | |Not supported |Minimum space to be reserved in the temporary file directory specified by tempDir, in GB, default value 1| -|minDiskFreeSize |After 3.1.1.0|Supported, effective immediately |When the available space on a disk is less than or equal to this threshold, the disk will no longer be selected for generating new data files, unit is bytes, range 52428800-1073741824, default value 52428800; Enterprise parameter| +|minDiskFreeSize |After 3.1.1.0|Supported, effective immediately |When the available space on a disk is less than or equal to this threshold, the disk will no longer be selected for generating new data files, unit is bytes, range 52428800-2199023255552, default value 52428800; Enterprise parameter| |s3MigrateIntervalSec|After 3.3.4.3|Supported, effective immediately |Trigger cycle for automatic upload of local data files to S3, in seconds. Minimum: 600; Maximum: 100000. Default value 3600; Enterprise parameter| |s3MigrateEnabled |After 3.3.4.3|Supported, effective immediately |Whether to automatically perform S3 migration, default value is 0, which means auto S3 migration is off, can be set to 1; Enterprise parameter| |s3Accesskey |After 3.3.4.3|Supported, effective after restart|Colon-separated user SecretId:SecretKey, for example AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E; Enterprise parameter| diff --git a/docs/en/14-reference/02-tools/10-taosbenchmark.md b/docs/en/14-reference/02-tools/10-taosbenchmark.md index 338cc74a3d..a8a977b79a 100644 --- a/docs/en/14-reference/02-tools/10-taosbenchmark.md +++ b/docs/en/14-reference/02-tools/10-taosbenchmark.md @@ -379,6 +379,7 @@ Specify the configuration parameters for tag and data columns in `super_tables` `query_times` specifies the number of times to run the query, numeric type. +**Note: from version 3.3.5.6 and beyond, simultaneous configuration for `specified_table_query` and `super_table_query` in a JSON file is no longer supported ** For other common parameters, see [General Configuration Parameters](#general-configuration-parameters) @@ -508,6 +509,15 @@ Note: Data types in the taosBenchmark configuration file must be in lowercase to +
+queryStb.json + +```json +{{#include /TDengine/tools/taos-tools/example/queryStb.json}} +``` + +
+ #### Subscription Example
diff --git a/docs/en/14-reference/03-taos-sql/01-data-type.md b/docs/en/14-reference/03-taos-sql/01-data-type.md index bce25182d7..57294fb886 100644 --- a/docs/en/14-reference/03-taos-sql/01-data-type.md +++ b/docs/en/14-reference/03-taos-sql/01-data-type.md @@ -43,6 +43,7 @@ In TDengine, the following data types can be used in the data model of basic tab | 16 | VARCHAR | Custom | Alias for BINARY type | | 17 | GEOMETRY | Custom | Geometry type, supported starting from version 3.1.0.0 | | 18 | VARBINARY | Custom | Variable-length binary data, supported starting from version 3.1.1.0 | +| 19 | DECIMAL | 8 or 16 | High-precision numeric type. The range of values depends on the precision and scale specified in the type. Supported starting from version 3.3.6. See the description below. | :::note @@ -61,6 +62,18 @@ In TDengine, the following data types can be used in the data model of basic tab - VARBINARY is a data type for storing binary data, with a maximum length of 65,517 bytes for data columns and 16,382 bytes for label columns. Binary data can be written via SQL or schemaless methods (needs to be converted to a string starting with \x), or through stmt methods (can use binary directly). Displayed as hexadecimal starting with \x. ::: +### DECIMAL Data Type + +The `DECIMAL` data type is used for high-precision numeric storage and is supported starting from version 3.3.6. The definition syntax is: `DECIMAL(18, 2)`, `DECIMAL(38, 10)`, where two parameters must be specified: `precision` and `scale`. `Precision` refers to the maximum number of significant digits supported, and `scale` refers to the maximum number of decimal places. For example, `DECIMAL(8, 4)` represents a range of `[-9999.9999, 9999.9999]`. When defining the `DECIMAL` data type, the range of `precision` is `[1, 38]`, and the range of `scale` is `[0, precision]`. If `scale` is 0, it represents integers only. You can also omit `scale`, in which case it defaults to 0. For example, `DECIMAL(18)` is equivalent to `DECIMAL(18, 0)`. + +When the `precision` value is less than or equal to 18, 8 bytes of storage (DECIMAL64) are used internally. When the `precision` is in the range `(18, 38]`, 16 bytes of storage (DECIMAL) are used. When writing `DECIMAL` type data in SQL, numeric values can be written directly. If the value exceeds the maximum representable value for the type, a `DECIMAL_OVERFLOW` error will be reported. If the value does not exceed the maximum representable value but the number of decimal places exceeds the `scale`, it will be automatically rounded. For example, if the type is defined as `DECIMAL(10, 2)` and the value `10.987` is written, the actual stored value will be `10.99`. + +The `DECIMAL` type only supports regular columns and does not currently support tag columns. The `DECIMAL` type supports SQL-based writes only and does not currently support `stmt` or schemaless writes. + +When performing operations between integer types and the `DECIMAL` type, the integer type is converted to the `DECIMAL` type before the calculation. When the `DECIMAL` type is involved in calculations with `DOUBLE`, `FLOAT`, `VARCHAR`, or `NCHAR` types, it is converted to `DOUBLE` type for computation. + +When querying `DECIMAL` type expressions, if the intermediate result of the calculation exceeds the maximum value that the current type can represent, a `DECIMAL_OVERFLOW` error is reported. + ## Constants diff --git a/docs/en/14-reference/03-taos-sql/10-function.md b/docs/en/14-reference/03-taos-sql/10-function.md index 403fec8488..43fd49c406 100644 --- a/docs/en/14-reference/03-taos-sql/10-function.md +++ b/docs/en/14-reference/03-taos-sql/10-function.md @@ -1186,6 +1186,7 @@ CAST(expr AS type_name) 1) Invalid character situations when converting string types to numeric types, e.g., "a" might convert to 0, but will not throw an error. 2) When converting to numeric types, if the value exceeds the range that `type_name` can represent, it will overflow, but will not throw an error. 3) When converting to string types, if the converted length exceeds the length specified in `type_name`, it will be truncated, but will not throw an error. +- The DECIMAL type does not support conversion to or from JSON, VARBINARY, or GEOMETRY types. #### TO_ISO8601 @@ -1691,12 +1692,14 @@ AVG(expr) **Function Description**: Calculates the average value of the specified field. -**Return Data Type**: DOUBLE. +**Return Data Type**: DOUBLE, DECIMAL. **Applicable Data Types**: Numeric types. **Applicable to**: Tables and supertables. +**Description**: When the input type is DECIMAL, the output type is also DECIMAL. The precision and scale of the output conform to the rules described in the data type section. The result type is obtained by dividing the SUM type by UINT64. If the SUM result causes a DECIMAL type overflow, a DECIMAL OVERFLOW error is reported. + ### COUNT ```sql @@ -1847,12 +1850,14 @@ SUM(expr) **Function Description**: Calculates the sum of a column in a table/supertable. -**Return Data Type**: DOUBLE, BIGINT. +**Return Data Type**: DOUBLE, BIGINT,DECIMAL. **Applicable Data Types**: Numeric types. **Applicable to**: Tables and supertables. +**Description**: When the input type is DECIMAL, the output type is DECIMAL(38, scale), where precision is the maximum value currently supported, and scale is the scale of the input type. If the SUM result overflows, a DECIMAL OVERFLOW error is reported. + ### HYPERLOGLOG ```sql @@ -2254,6 +2259,7 @@ ignore_null_values: { - INTERP is used to obtain the record value of a specified column at the specified time slice. It has a dedicated syntax (interp_clause) when used. For syntax introduction, see [reference link](../query-data/#interp). - When there is no row data that meets the conditions at the specified time slice, the INTERP function will interpolate according to the settings of the [FILL](../time-series-extensions/#fill-clause) parameter. - When INTERP is applied to a supertable, it will sort all the subtable data under that supertable by primary key column and perform interpolation calculations, and can also be used with PARTITION BY tbname to force the results to a single timeline. +- When using INTERP with FILL PREV/NEXT/NEAR modes, its behavior differs from window queries. If data exists at the slice, no FILL operation will be performed, even if the current value is NULL. - INTERP can be used with the pseudocolumn _irowts to return the timestamp corresponding to the interpolation point (supported from version 3.0.2.0). - INTERP can be used with the pseudocolumn _isfilled to display whether the return result is from the original record or generated by the interpolation algorithm (supported from version 3.0.3.0). - INTERP can only use the pseudocolumn `_irowts_origin` when using FILL PREV/NEXT/NEAR modes. `_irowts_origin` is supported from version 3.3.4.9. diff --git a/docs/en/14-reference/03-taos-sql/12-distinguished.md b/docs/en/14-reference/03-taos-sql/12-distinguished.md index fb8c4ffe96..bba1e61c53 100644 --- a/docs/en/14-reference/03-taos-sql/12-distinguished.md +++ b/docs/en/14-reference/03-taos-sql/12-distinguished.md @@ -84,10 +84,10 @@ The FILL statement specifies the filling mode when data is missing in a window i 1. No filling: NONE (default filling mode). 2. VALUE filling: Fixed value filling, where the fill value must be specified. For example: FILL(VALUE, 1.23). Note that the final fill value is determined by the type of the corresponding column, such as FILL(VALUE, 1.23), if the corresponding column is of INT type, then the fill value is 1. If multiple columns in the query list need FILL, then each FILL column must specify a VALUE, such as `SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`. Note, only ordinary columns in the SELECT expression need to specify FILL VALUE, such as `_wstart`, `_wstart+1a`, `now`, `1+1` and the partition key (like tbname) used with partition by do not need to specify VALUE, like `timediff(last(ts), _wstart)` needs to specify VALUE. -3. PREV filling: Fill data using the previous non-NULL value. For example: FILL(PREV). +3. PREV filling: Fill data using the previous value. For example: FILL(PREV). 4. NULL filling: Fill data with NULL. For example: FILL(NULL). 5. LINEAR filling: Perform linear interpolation filling based on the nearest non-NULL values before and after. For example: FILL(LINEAR). -6. NEXT filling: Fill data using the next non-NULL value. For example: FILL(NEXT). +6. NEXT filling: Fill data using the next value. For example: FILL(NEXT). Among these filling modes, except for the NONE mode which does not fill by default, other modes will be ignored if there is no data in the entire query time range, resulting in no fill data and an empty query result. This behavior is reasonable under some modes (PREV, NEXT, LINEAR) because no data means no fill value can be generated. For other modes (NULL, VALUE), theoretically, fill values can be generated, and whether to output fill values depends on the application's needs. To meet the needs of applications that require forced filling of data or NULL, without breaking the compatibility of existing filling modes, two new filling modes have been added starting from version 3.0.3.0: @@ -112,7 +112,7 @@ The differences between NULL, NULL_F, VALUE, VALUE_F filling modes for different Time windows can be divided into sliding time windows and tumbling time windows. -The INTERVAL clause is used to generate windows of equal time periods, and SLIDING is used to specify the time the window slides forward. Each executed query is a time window, and the time window slides forward as time flows. When defining continuous queries, it is necessary to specify the size of the time window (time window) and the forward sliding times for each execution. As shown, [t0s, t0e], [t1s, t1e], [t2s, t2e] are the time window ranges for three continuous queries, and the sliding time range is indicated by sliding time. Query filtering, aggregation, and other operations are performed independently for each time window. When SLIDING is equal to INTERVAL, the sliding window becomes a tumbling window. +The INTERVAL clause is used to generate windows of equal time periods, and SLIDING is used to specify the time the window slides forward. Each executed query is a time window, and the time window slides forward as time flows. When defining continuous queries, it is necessary to specify the size of the time window (time window) and the forward sliding times for each execution. As shown, [t0s, t0e], [t1s, t1e], [t2s, t2e] are the time window ranges for three continuous queries, and the sliding time range is indicated by sliding time. Query filtering, aggregation, and other operations are performed independently for each time window. When SLIDING is equal to INTERVAL, the sliding window becomes a tumbling window. By default, windows begin at Unix time 0 (1970-01-01 00:00:00 UTC). If interval_offset is specified, the windows start from "Unix time 0 + interval_offset".
diff --git a/docs/en/14-reference/03-taos-sql/32-compress.md b/docs/en/14-reference/03-taos-sql/32-compress.md index 30b107b632..3a9714c0fe 100644 --- a/docs/en/14-reference/03-taos-sql/32-compress.md +++ b/docs/en/14-reference/03-taos-sql/32-compress.md @@ -36,6 +36,7 @@ In this document, it specifically refers to the internal levels of the second-le | float/double | disabled/delta-d | delta-d | lz4/zlib/zstd/xz/tsz | lz4 | medium | | binary/nchar | disabled | disabled | lz4/zlib/zstd/xz | zstd | medium | | bool | disabled/bit-packing | bit-packing | lz4/zlib/zstd/xz | zstd | medium | +| decimal | disabled | disabled | lz4/zlib/zstd/xz | zstd | medium | ## SQL Syntax diff --git a/docs/en/14-reference/05-connector/10-cpp.md b/docs/en/14-reference/05-connector/10-cpp.md index 52c64e1209..ade5d58963 100644 --- a/docs/en/14-reference/05-connector/10-cpp.md +++ b/docs/en/14-reference/05-connector/10-cpp.md @@ -682,7 +682,7 @@ The basic API is used to establish database connections and provide a runtime en - **Interface Description**: Cleans up the runtime environment, should be called before the application exits. - `int taos_options(TSDB_OPTION option, const void * arg, ...)` - - **Interface Description**: Sets client options, currently supports locale (`TSDB_OPTION_LOCALE`), character set (`TSDB_OPTION_CHARSET`), timezone (`TSDB_OPTION_TIMEZONE`), and configuration file path (`TSDB_OPTION_CONFIGDIR`). Locale, character set, and timezone default to the current settings of the operating system. + - **Interface Description**: Sets client options, currently supports locale (`TSDB_OPTION_LOCALE`), character set (`TSDB_OPTION_CHARSET`), timezone (`TSDB_OPTION_TIMEZONE`), configuration file path (`TSDB_OPTION_CONFIGDIR`), and driver type (`TSDB_OPTION_DRIVER`). Locale, character set, and timezone default to the current settings of the operating system. The driver type can be either the native interface(`native`) or the WebSocket interface(`websocket`), with the default being `websocket`. - **Parameter Description**: - `option`: [Input] Setting item type. - `arg`: [Input] Setting item value. @@ -830,6 +830,12 @@ This section introduces APIs that are all synchronous interfaces. After being ca - res: [Input] Result set. - **Return Value**: Non-`NULL`: successful, returns a pointer to a TAOS_FIELD structure, each element representing the metadata of a column. `NULL`: failure. +- `TAOS_FIELD_E *taos_fetch_fields_e(TAOS_RES *res)` + - **Interface Description**: Retrieves the attributes of each column in the query result set (column name, data type, column length). Used in conjunction with `taos_num_fields()`, it can be used to parse the data of a tuple (a row) returned by `taos_fetch_row()`. In addition to the basic information provided by TAOS_FIELD, TAOS_FIELD_E also includes `precision` and `scale` information for the data type. + - **Parameter Description**: + - res: [Input] Result set. + - **Return Value**: Non-`NULL`: Success, returns a pointer to a TAOS_FIELD_E structure, where each element represents the metadata of a column. `NULL`: Failure. + - `void taos_stop_query(TAOS_RES *res)` - **Interface Description**: Stops the execution of the current query. - **Parameter Description**: diff --git a/docs/en/14-reference/05-connector/14-java.md b/docs/en/14-reference/05-connector/14-java.md index 8486f7d4d3..3282c134cf 100644 --- a/docs/en/14-reference/05-connector/14-java.md +++ b/docs/en/14-reference/05-connector/14-java.md @@ -121,6 +121,7 @@ Please refer to the specific error codes: | 0x2378 | consumer create error | Data subscription creation failed, check the error information and taos log for troubleshooting. | | 0x2379 | seek offset must not be a negative number | The seek interface parameter must not be negative, use the correct parameters. | | 0x237a | vGroup not found in result set | VGroup not assigned to the current consumer, due to the Rebalance mechanism causing the Consumer and VGroup to be unbound. | +| 0x2390 | background thread write error in Efficient Writing | In the event of an efficient background thread write error, you can stop writing and rebuild the connection. | - [TDengine Java Connector Error Code](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java) @@ -320,7 +321,15 @@ The configuration parameters in properties are as follows: - TSDBDriver.PROPERTY_KEY_DISABLE_SSL_CERT_VALIDATION: Disable SSL certificate validation. Effective only when using WebSocket connections. true: enabled, false: not enabled. Default is false. - TSDBDriver.PROPERTY_KEY_APP_NAME: App name, can be used for display in the `show connections` query result. Effective only when using WebSocket connections. Default value is java. - TSDBDriver.PROPERTY_KEY_APP_IP: App IP, can be used for display in the `show connections` query result. Effective only when using WebSocket connections. Default value is empty. - + +- TSDBDriver.PROPERTY_KEY_ASYNC_WRITE: Efficient Writing mode. Currently, only the `stmt` method is supported. Effective only when using WebSocket connections. DeDefault value is empty, meaning Efficient Writing mode is not enabled. +- TSDBDriver.PROPERTY_KEY_BACKEND_WRITE_THREAD_NUM: In Efficient Writing mode, this refers to the number of background write threads. Effective only when using WebSocket connections. Default value is 10. +- TSDBDriver.PROPERTY_KEY_BATCH_SIZE_BY_ROW: In Efficient Writing mode, this is the batch size for writing data, measured in rows. Effective only when using WebSocket connections. Default value is 1000. +- TSDBDriver.PROPERTY_KEY_CACHE_SIZE_BY_ROW: In Efficient Writing mode, this is the cache size, measured in rows. Effective only when using WebSocket connections. Default value is 10000. +- TSDBDriver.PROPERTY_KEY_COPY_DATA: In Efficient Writing mode, this determines Whether to copy the binary data passed by the application through the `addBatch` method. Effective only when using WebSocket connections. Default value is false. +- TSDBDriver.PROPERTY_KEY_STRICT_CHECK: In Efficient Writing mode, this determines whether to validate the length of table names and variable-length data types. Effective only when using WebSocket connections. Default value is false. +- TSDBDriver.PROPERTY_KEY_RETRY_TIMES: In Efficient Writing mode, this is the number of retry attempts for failed write operations. Effective only when using WebSocket connections. Default value is 3. + Additionally, for native JDBC connections, other parameters such as log level and SQL length can be specified by specifying the URL and Properties. **Priority of Configuration Parameters** diff --git a/docs/en/14-reference/05-connector/35-node.md b/docs/en/14-reference/05-connector/35-node.md index 858426cc3f..5cc599d865 100644 --- a/docs/en/14-reference/05-connector/35-node.md +++ b/docs/en/14-reference/05-connector/35-node.md @@ -25,6 +25,7 @@ Support all platforms that can run Node.js. | Node.js Connector Version | Major Changes | TDengine Version | | ------------------------- | ------------------------------------------------------------------------ | --------------------------- | +| 3.1.5 | Password supports special characters. | - | | 3.1.4 | Modified the readme.| - | | 3.1.3 | Upgraded the es5-ext version to address vulnerabilities in the lower version. | - | | 3.1.2 | Optimized data protocol and parsing, significantly improved performance. | - | diff --git a/docs/en/14-reference/09-error-code.md b/docs/en/14-reference/09-error-code.md index 1aa62fbfce..139f8d38d7 100644 --- a/docs/en/14-reference/09-error-code.md +++ b/docs/en/14-reference/09-error-code.md @@ -41,6 +41,8 @@ This document details the server error codes that may be encountered when using | 0x80000107 | Ref ID is removed | The referenced ref resource has been released | Preserve the scene and logs, report issue on github | | 0x80000108 | Invalid Ref ID | Invalid ref ID | Preserve the scene and logs, report issue on github | | 0x8000010A | Ref is not there | ref information does not exist | Preserve the scene and logs, report issue on github | +| 0x8000010B | Driver was not loaded | libtaosnative.so or libtaosws.so was not found in the system path | Reinstall the client driver | +| 0x8000010C | Function was not loaded from the driver | some function defined in libtaos.so are not implemented in libtaosnative.so or libtaosws.so | Reinstall the client driver | | 0x80000110 | Unexpected generic error | System internal error | Preserve the scene and logs, report issue on github | | 0x80000111 | Action in progress | Operation in progress | 1. Wait for the operation to complete 2. Cancel the operation if necessary 3. If it exceeds a reasonable time and still not completed, preserve the scene and logs, or contact customer support | | 0x80000112 | Out of range | Configuration parameter exceeds allowed value range | Change the parameter | @@ -559,10 +561,13 @@ This document details the server error codes that may be encountered when using ## virtual table -| Error Code | Description | Possible Error Scenarios or Reasons | Recommended Actions for Users | -|------------|---------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------| -| 0x80006200 | Virtual table scan internal error | virtual table scan operator internal error, generally does not occur | Check error logs, contact development for handling | -| 0x80006201 | Virtual table scan invalid downstream operator type | The incorrect execution plan generated causes the downstream operator type of the virtual table scan operator to be incorrect. | Check error logs, contact development for handling | -| 0x80006202 | Virtual table prim timestamp column should not has ref | The timestamp primary key column of a virtual table should not have a data source. If it does, this error will occur during subsequent queries on the virtual table. | Check error logs, contact development for handling | -| 0x80006203 | Create virtual child table must use virtual super table | Create virtual child table using non-virtual super table | create virtual child table using virtual super table | -| 0x80006204 | Virtual table not support decimal type | Create virtual table using decimal type | create virtual table without using decimal type | +| Error Code | Description | Possible Error Scenarios or Reasons | Recommended Actions for Users | +|------------|---------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------| +| 0x80006200 | Virtual table scan internal error | virtual table scan operator internal error, generally does not occur | Check error logs, contact development for handling | +| 0x80006201 | Virtual table scan invalid downstream operator type | The incorrect execution plan generated causes the downstream operator type of the virtual table scan operator to be incorrect. | Check error logs, contact development for handling | +| 0x80006202 | Virtual table prim timestamp column should not has ref | The timestamp primary key column of a virtual table should not have a data source. If it does, this error will occur during subsequent queries on the virtual table. | Check error logs, contact development for handling | +| 0x80006203 | Create virtual child table must use virtual super table | Create virtual child table using non-virtual super table | create virtual child table using virtual super table | +| 0x80006204 | Virtual table not support decimal type | Create virtual table using decimal type | create virtual table without using decimal type | +| 0x80006205 | Virtual table not support in STMT query and STMT insert | Use virtual table in stmt query and stmt insert | do not use virtual table in stmt query and insert | +| 0x80006206 | Virtual table not support in Topic | Use virtual table in topic | do not use virtual table in topic | +| 0x80006206 | Virtual super table query not support origin table from different databases | Virtual super table ‘s child table's origin table from different databases | make sure virtual super table's child table's origin table from same database | diff --git a/docs/examples/c/Makefile b/docs/examples/c/Makefile index 9fda575ec6..1b889459c2 100644 --- a/docs/examples/c/Makefile +++ b/docs/examples/c/Makefile @@ -9,6 +9,7 @@ TARGETS = connect_example \ with_reqid_demo \ sml_insert_demo \ stmt_insert_demo \ + stmt2_insert_demo \ tmq_demo SOURCES = connect_example.c \ @@ -18,6 +19,7 @@ SOURCES = connect_example.c \ with_reqid_demo.c \ sml_insert_demo.c \ stmt_insert_demo.c \ + stmt2_insert_demo.c \ tmq_demo.c LIBS = -ltaos -lpthread @@ -31,4 +33,4 @@ $(TARGETS): $(CC) $(CFLAGS) -o $@ $(wildcard $(@F).c) $(LIBS) clean: - rm -f $(TARGETS) \ No newline at end of file + rm -f $(TARGETS) diff --git a/docs/examples/c/stmt2_insert_demo.c b/docs/examples/c/stmt2_insert_demo.c new file mode 100644 index 0000000000..e446c5f631 --- /dev/null +++ b/docs/examples/c/stmt2_insert_demo.c @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +// TAOS standard API example. The same syntax as MySQL, but only a subset +// to compile: gcc -o stmt2_insert_demo stmt2_insert_demo.c -ltaos + +#include +#include +#include +#include +#include "taos.h" + +#define NUM_OF_SUB_TABLES 10 +#define NUM_OF_ROWS 10 + +/** + * @brief Executes an SQL query and checks for errors. + * + * @param taos Pointer to TAOS connection. + * @param sql SQL query string. + */ +void executeSQL(TAOS *taos, const char *sql) { + TAOS_RES *res = taos_query(taos, sql); + int code = taos_errno(res); + if (code != 0) { + fprintf(stderr, "Error: %s\n", taos_errstr(res)); + taos_free_result(res); + taos_close(taos); + exit(EXIT_FAILURE); + } + taos_free_result(res); +} + +/** + * @brief Checks return status and exits if an error occurs. + * + * @param stmt2 Pointer to TAOS_STMT2. + * @param code Error code. + * @param msg Error message prefix. + */ +void checkErrorCode(TAOS_STMT2 *stmt2, int code, const char *msg) { + if (code != 0) { + fprintf(stderr, "%s. Code: %d, Error: %s\n", msg, code, taos_stmt2_error(stmt2)); + taos_stmt2_close(stmt2); + exit(EXIT_FAILURE); + } +} + +/** + * @brief Prepares data bindings for batch insertion. + * + * @param table_name Pointer to store allocated table names. + * @param tags Pointer to store allocated tag bindings. + * @param params Pointer to store allocated parameter bindings. + */ +void prepareBindData(char ***table_name, TAOS_STMT2_BIND ***tags, TAOS_STMT2_BIND ***params) { + *table_name = (char **)malloc(NUM_OF_SUB_TABLES * sizeof(char *)); + *tags = (TAOS_STMT2_BIND **)malloc(NUM_OF_SUB_TABLES * sizeof(TAOS_STMT2_BIND *)); + *params = (TAOS_STMT2_BIND **)malloc(NUM_OF_SUB_TABLES * sizeof(TAOS_STMT2_BIND *)); + + for (int i = 0; i < NUM_OF_SUB_TABLES; i++) { + // Allocate and assign table name + (*table_name)[i] = (char *)malloc(20 * sizeof(char)); + sprintf((*table_name)[i], "d_bind_%d", i); + + // Allocate memory for tags data + int *gid = (int *)malloc(sizeof(int)); + int *gid_len = (int *)malloc(sizeof(int)); + *gid = i; + *gid_len = sizeof(int); + + char *location = (char *)malloc(20 * sizeof(char)); + int *location_len = (int *)malloc(sizeof(int)); + *location_len = sprintf(location, "location_%d", i); + + (*tags)[i] = (TAOS_STMT2_BIND *)malloc(2 * sizeof(TAOS_STMT2_BIND)); + (*tags)[i][0] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_INT, gid, gid_len, NULL, 1}; + (*tags)[i][1] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_BINARY, location, location_len, NULL, 1}; + + // Allocate memory for columns data + (*params)[i] = (TAOS_STMT2_BIND *)malloc(4 * sizeof(TAOS_STMT2_BIND)); + + int64_t *ts = (int64_t *)malloc(NUM_OF_ROWS * sizeof(int64_t)); + float *current = (float *)malloc(NUM_OF_ROWS * sizeof(float)); + int *voltage = (int *)malloc(NUM_OF_ROWS * sizeof(int)); + float *phase = (float *)malloc(NUM_OF_ROWS * sizeof(float)); + int32_t *ts_len = (int32_t *)malloc(NUM_OF_ROWS * sizeof(int32_t)); + int32_t *current_len = (int32_t *)malloc(NUM_OF_ROWS * sizeof(int32_t)); + int32_t *voltage_len = (int32_t *)malloc(NUM_OF_ROWS * sizeof(int32_t)); + int32_t *phase_len = (int32_t *)malloc(NUM_OF_ROWS * sizeof(int32_t)); + + (*params)[i][0] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_TIMESTAMP, ts, ts_len, NULL, NUM_OF_ROWS}; + (*params)[i][1] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_FLOAT, current, current_len, NULL, NUM_OF_ROWS}; + (*params)[i][2] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_INT, voltage, voltage_len, NULL, NUM_OF_ROWS}; + (*params)[i][3] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_FLOAT, phase, phase_len, NULL, NUM_OF_ROWS}; + + for (int j = 0; j < NUM_OF_ROWS; j++) { + struct timeval tv; + gettimeofday(&tv, NULL); + ts[j] = tv.tv_sec * 1000LL + tv.tv_usec / 1000 + j; + current[j] = (float)rand() / RAND_MAX * 30; + voltage[j] = rand() % 300; + phase[j] = (float)rand() / RAND_MAX; + + ts_len[j] = sizeof(int64_t); + current_len[j] = sizeof(float); + voltage_len[j] = sizeof(int); + phase_len[j] = sizeof(float); + } + } +} + +/** + * @brief Frees allocated memory for binding data. + * + * @param table_name Pointer to allocated table names. + * @param tags Pointer to allocated tag bindings. + * @param params Pointer to allocated parameter bindings. + */ +void freeBindData(char ***table_name, TAOS_STMT2_BIND ***tags, TAOS_STMT2_BIND ***params) { + for (int i = 0; i < NUM_OF_SUB_TABLES; i++) { + free((*table_name)[i]); + for (int j = 0; j < 2; j++) { + free((*tags)[i][j].buffer); + free((*tags)[i][j].length); + } + free((*tags)[i]); + + for (int j = 0; j < 4; j++) { + free((*params)[i][j].buffer); + free((*params)[i][j].length); + } + free((*params)[i]); + } + free(*table_name); + free(*tags); + free(*params); +} + +/** + * @brief Inserts data using the TAOS stmt2 API. + * + * @param taos Pointer to TAOS connection. + */ +void insertData(TAOS *taos) { + TAOS_STMT2_OPTION option = {0, false, false, NULL, NULL}; + TAOS_STMT2 *stmt2 = taos_stmt2_init(taos, &option); + if (!stmt2) { + fprintf(stderr, "Failed to initialize TAOS statement.\n"); + exit(EXIT_FAILURE); + } + // stmt2 prepare sql + checkErrorCode(stmt2, taos_stmt2_prepare(stmt2, "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)", 0), + "Statement preparation failed"); + + char **table_name; + TAOS_STMT2_BIND **tags, **params; + prepareBindData(&table_name, &tags, ¶ms); + // stmt2 bind batch + TAOS_STMT2_BINDV bindv = {NUM_OF_SUB_TABLES, table_name, tags, params}; + checkErrorCode(stmt2, taos_stmt2_bind_param(stmt2, &bindv, -1), "Parameter binding failed"); + // stmt2 exec batch + int affected; + checkErrorCode(stmt2, taos_stmt2_exec(stmt2, &affected), "Execution failed"); + printf("Successfully inserted %d rows.\n", affected); + // free and close + freeBindData(&table_name, &tags, ¶ms); + taos_stmt2_close(stmt2); +} + +int main() { + const char *host = "localhost"; + const char *user = "root"; + const char *password = "taosdata"; + uint16_t port = 6030; + TAOS *taos = taos_connect(host, user, password, NULL, port); + if (taos == NULL) { + fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), + taos_errstr(NULL)); + taos_cleanup(); + exit(EXIT_FAILURE); + } + // create database and table + executeSQL(taos, "CREATE DATABASE IF NOT EXISTS power"); + executeSQL(taos, "USE power"); + executeSQL(taos, + "CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS " + "(groupId INT, location BINARY(24))"); + insertData(taos); + taos_close(taos); + taos_cleanup(); +} \ No newline at end of file diff --git a/docs/examples/node/package.json b/docs/examples/node/package.json index d77c96fbb3..ea5d4bf84c 100644 --- a/docs/examples/node/package.json +++ b/docs/examples/node/package.json @@ -4,7 +4,7 @@ "main": "index.js", "license": "MIT", "dependencies": { - "@tdengine/websocket": "^3.1.2" + "@tdengine/websocket": "^3.1.5" }, "scripts": { "test": "echo \"Error: no test specified\" && exit 1" diff --git a/docs/examples/node/websocketexample/tmq_example.js b/docs/examples/node/websocketexample/tmq_example.js index 4ea3db55c3..72b4c9940e 100644 --- a/docs/examples/node/websocketexample/tmq_example.js +++ b/docs/examples/node/websocketexample/tmq_example.js @@ -1,4 +1,3 @@ -const { sleep } = require("@tdengine/websocket"); const taos = require("@tdengine/websocket"); // ANCHOR: create_consumer @@ -52,6 +51,12 @@ async function prepare() { await wsSql.close(); } +const delay = function(ms) { + return new Promise(function(resolve) { + setTimeout(resolve, ms); + }); +}; + async function insert() { let conf = new taos.WSConfig('ws://localhost:6041'); conf.setUser('root'); @@ -60,7 +65,7 @@ async function insert() { let wsSql = await taos.sqlConnect(conf); for (let i = 0; i < 50; i++) { await wsSql.exec(`INSERT INTO d1001 USING ${stable} (location, groupId) TAGS ("California.SanFrancisco", 3) VALUES (NOW, ${10 + i}, ${200 + i}, ${0.32 + i})`); - await sleep(100); + await delay(100); } await wsSql.close(); } diff --git a/docs/examples/node/websocketexample/tmq_seek_example.js b/docs/examples/node/websocketexample/tmq_seek_example.js index 67286f34b1..d68b038043 100644 --- a/docs/examples/node/websocketexample/tmq_seek_example.js +++ b/docs/examples/node/websocketexample/tmq_seek_example.js @@ -1,4 +1,3 @@ -const { sleep } = require("@tdengine/websocket"); const taos = require("@tdengine/websocket"); const db = 'power'; diff --git a/docs/examples/perspective/perspective_server.py b/docs/examples/perspective/perspective_server.py new file mode 100644 index 0000000000..919828379e --- /dev/null +++ b/docs/examples/perspective/perspective_server.py @@ -0,0 +1,207 @@ +# ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +# ┃ ██████ ██████ ██████ █ █ █ █ █ █▄ ▀███ █ ┃ +# ┃ ▄▄▄▄▄█ █▄▄▄▄▄ ▄▄▄▄▄█ ▀▀▀▀▀█▀▀▀▀▀ █ ▀▀▀▀▀█ ████████▌▐███ ███▄ ▀█ █ ▀▀▀▀▀ ┃ +# ┃ █▀▀▀▀▀ █▀▀▀▀▀ █▀██▀▀ ▄▄▄▄▄ █ ▄▄▄▄▄█ ▄▄▄▄▄█ ████████▌▐███ █████▄ █ ▄▄▄▄▄ ┃ +# ┃ █ ██████ █ ▀█▄ █ ██████ █ ███▌▐███ ███████▄ █ ┃ +# ┣━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┫ +# ┃ Copyright (c) 2017, the Perspective Authors. ┃ +# ┃ ╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌ ┃ +# ┃ This file is part of the Perspective library, distributed under the terms ┃ +# ┃ of the [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0). ┃ +# ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ + +import logging +import tornado.websocket +import tornado.web +import tornado.ioloop +from datetime import date, datetime +import perspective +import perspective.handlers.tornado +import json +import taosws + + +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') +logger = logging.getLogger('main') + + +# ============================================================================= +# TDengine connection parameters +# ============================================================================= +TAOS_HOST = "localhost" # TDengine server host +TAOS_PORT = 6041 # TDengine server port +TAOS_USER = "root" # TDengine username +TAOS_PASSWORD = "taosdata" # TDengine password + +TAOS_DATABASE = "power" # TDengine database name +TAOS_TABLENAME = "meters" # TDengine table name + +# ============================================================================= +# Perspective server parameters +# ============================================================================= +PERSPECTIVE_TABLE_NAME = "meters_values" # name of the Perspective table +PERSPECTIVE_REFRESH_RATE = 250 # refresh rate in milliseconds + + +class CustomJSONEncoder(json.JSONEncoder): + """ + Custom JSON encoder that serializes datetime and date objects + """ + def default(self, obj): + if isinstance(obj, datetime): + return obj.isoformat() + elif isinstance(obj, date): + return obj.isoformat() + return super().default(obj) + + +json.JSONEncoder.default = CustomJSONEncoder().default + + +def convert_ts(ts) -> datetime: + """ + Convert a timestamp string to a datetime object + """ + for fmt in ('%Y-%m-%d %H:%M:%S.%f %z', '%Y-%m-%d %H:%M:%S %z'): + try: + return datetime.strptime(ts, fmt) + except ValueError: + continue + raise ValueError(f"Time data '{ts}' does not match any format") + + +def create_tdengine_connection( + host: str = TAOS_HOST, + port: int = TAOS_PORT, + user: str = TAOS_USER, + password: str = TAOS_PASSWORD, + ) -> taosws.Connection: + try: + # connect to the tdengine server + conn = taosws.connect( + user=user, + password=password, + host=host, + port=port, + ) + # switch to the right database + conn.execute(f"USE {TAOS_DATABASE}") + # connection successful + logger.info(f"Connected to tdengine successfully: {host}:{port}") + return conn + except Exception as err: + logger.error(f"Failed to connect to tdengine: {host}:{port} -- ErrMessage: {err}") + raise err + + +def read_tdengine( + conn: taosws.Connection, + ) -> list[dict]: + try: + # query the database + sql = f""" + SELECT `ts`, location, groupid, current, voltage, phase + FROM {TAOS_TABLENAME} + WHERE `ts` >= NOW() - 12h + ORDER BY `ts` DESC + LIMIT 1000 + """ + logger.debug(f"Executing query: {sql}") + res = conn.query(sql) + data = [ + { + "timestamp": convert_ts(row[0]), + "location": row[1], + "groupid": row[2], + "current": row[3], + "voltage": row[4], + "phase": row[5], + } + for row in res + ] + logger.info(f"select result: {data}") + return data + except Exception as err: + logger.error(f"Failed to query tdengine: {err}") + raise err + + +// ANCHOR: perspective_server +def perspective_thread(perspective_server: perspective.Server, tdengine_conn: taosws.Connection): + """ + Create a new Perspective table and update it with new data every 50ms + """ + # create a new Perspective table + client = perspective_server.new_local_client() + schema = { + "timestamp": datetime, + "location": str, + "groupid": int, + "current": float, + "voltage": int, + "phase": float, + } + # define the table schema + table = client.table( + schema, + limit=1000, # maximum number of rows in the table + name=PERSPECTIVE_TABLE_NAME, # table name. Use this with perspective-viewer on the client side + ) + logger.info("Created new Perspective table") + + # update with new data + def updater(): + data = read_tdengine(tdengine_conn) + table.update(data) + logger.debug(f"Updated Perspective table: {len(data)} rows") + + logger.info(f"Starting tornado ioloop update loop every {PERSPECTIVE_REFRESH_RATE} milliseconds") + # start the periodic callback to update the table data + callback = tornado.ioloop.PeriodicCallback(callback=updater, callback_time=PERSPECTIVE_REFRESH_RATE) + callback.start() + +// ANCHOR_END: perspective_server + +def make_app(perspective_server): + """ + Create a new Tornado application with a websocket handler that + serves a Perspective table. PerspectiveTornadoHandler handles + the websocket connection and streams the Perspective table changes + to the client. + """ + return tornado.web.Application([ + ( + r"/websocket", # websocket endpoint. Use this URL to configure the websocket client OR Prospective Server adapter + perspective.handlers.tornado.PerspectiveTornadoHandler, # PerspectiveTornadoHandler handles perspective table updates <-> websocket client + {"perspective_server": perspective_server}, # pass the perspective server to the handler + ), + ]) + + +if __name__ == "__main__": + logger.info("TDEngine <-> Perspective Demo") + + # create a new Perspective server + logger.info("Creating new Perspective server") + perspective_server = perspective.Server() + # create the tdengine connection + logger.info("Creating new TDEngine connection") + tdengine_conn = create_tdengine_connection() + + # setup and start the Tornado app + logger.info("Creating Tornado server") + app = make_app(perspective_server) + app.listen(8085, address='0.0.0.0') + logger.info("Listening on http://localhost:8080") + + try: + # start the io loop + logger.info("Starting ioloop to update Perspective table data via tornado websocket...") + loop = tornado.ioloop.IOLoop.current() + loop.call_later(0, perspective_thread, perspective_server, tdengine_conn) + loop.start() + except KeyboardInterrupt: + logger.warning("Keyboard interrupt detected. Shutting down tornado server...") + loop.stop() + loop.close() + logging.info("Shut down") diff --git a/docs/examples/perspective/prsp-viewer.html b/docs/examples/perspective/prsp-viewer.html new file mode 100644 index 0000000000..e6b1a6e734 --- /dev/null +++ b/docs/examples/perspective/prsp-viewer.html @@ -0,0 +1,135 @@ + + + + + + + Perspective Viewer Dashboard + + + + + + + + + + + +// ANCHOR: perspective_viewer + + + +
+
+ +
+
+// ANCHOR_END: perspective_viewer + + + \ No newline at end of file diff --git a/docs/zh/05-basic/03-query.md b/docs/zh/05-basic/03-query.md index 52b825c47c..1634035411 100644 --- a/docs/zh/05-basic/03-query.md +++ b/docs/zh/05-basic/03-query.md @@ -182,7 +182,7 @@ INTERVAL(interval_val [, interval_offset]) ``` 时间窗口子句包括 3 个子句: -- INTERVAL 子句:用于产生相等时间周期的窗口,interval_val 指定每个时间窗口的大小,interval_offset 指定窗口偏移量; +- INTERVAL 子句:用于产生相等时间周期的窗口,interval_val 指定每个时间窗口的大小,interval_offset 指定窗口偏移量;默认情况下,窗口是从 Unix time 0(1970-01-01 00:00:00 UTC)开始划分的;如果设置了 interval_offset,那么窗口的划分将从 “Unix time 0 + interval_offset” 开始; - SLIDING 子句:用于指定窗口向前滑动的时间; - FILL:用于指定窗口区间数据缺失的情况下,数据的填充模式。 @@ -688,4 +688,4 @@ select a.* from meters a left asof join meters b on timetruncate(a.ts, 1s) < tim 查询结果顺序的限制包括如下这些。 - 普通表、子表、subquery 且无分组条件无排序的场景下,查询结果会按照驱动表的主键列顺序输出。 -- 由于超级表查询、Full Join 或有分组条件无排序的场景下,查询结果没有固定的输出顺序,因此,在有排序需求且输出无固定顺序的场景下,需要进行排序操作。部分依赖时间线的函数可能会因为没有有效的时间线输出而无法执行。 \ No newline at end of file +- 由于超级表查询、Full Join 或有分组条件无排序的场景下,查询结果没有固定的输出顺序,因此,在有排序需求且输出无固定顺序的场景下,需要进行排序操作。部分依赖时间线的函数可能会因为没有有效的时间线输出而无法执行。 diff --git a/docs/zh/07-develop/05-stmt.md b/docs/zh/07-develop/05-stmt.md index 56ed706bf2..29909f0aa1 100644 --- a/docs/zh/07-develop/05-stmt.md +++ b/docs/zh/07-develop/05-stmt.md @@ -141,9 +141,20 @@ stmt 绑定参数的示例代码如下: ``` + +stmt2 绑定参数的示例代码如下(需要 TDengine v3.3.5.0 及以上): + +```c +{{#include docs/examples/c/stmt2_insert_demo.c}} +``` + +stmt 绑定参数的示例代码如下: + ```c {{#include docs/examples/c/stmt_insert_demo.c}} ``` + + 不支持 diff --git a/docs/zh/08-operation/12-multi.md b/docs/zh/08-operation/12-multi.md index 1a12225862..5c35aa12ea 100644 --- a/docs/zh/08-operation/12-multi.md +++ b/docs/zh/08-operation/12-multi.md @@ -56,7 +56,7 @@ dataDir /mnt/data6 2 0 一般情况下,当 TDengine 要从同级挂载点中选择一个用于生成新的数据文件时,采用 round robin 策略进行选择。但现实中有可能每个磁盘的容量不相同,或者容量相同但写入的数据量不相同,这就导致会出现每个磁盘上的可用空间不均衡,在实际进行选择时有可能会选择到一个剩余空间已经很小的磁盘。 -为了解决这个问题,从 3.1.1.0 开始引入了一个新的配置 minDiskFreeSize,当某块磁盘上的可用空间小于等于这个阈值时,该磁盘将不再被选择用于生成新的数据文件。该配置项的单位为字节,其值应该大于 2GB,即会跳过可用空间小于 2GB 的挂载点。 +为了解决这个问题,从 3.1.1.0 开始引入了一个新的配置 minDiskFreeSize,当某块磁盘上的可用空间小于等于这个阈值时,该磁盘将不再被选择用于生成新的数据文件。该配置项的单位为字节,若配置值大于 2GB,则会跳过可用空间小于 2GB 的挂载点。 从 3.3.2.0 版本开始,引入了一个新的配置 disable_create_new_file,用于控制在某个挂载点上禁止生成新文件,其缺省值为 false,即每个挂载点上默认都可以生成新文件。 diff --git a/docs/zh/08-operation/15-sec.md b/docs/zh/08-operation/15-sec.md new file mode 100644 index 0000000000..5277d418f9 --- /dev/null +++ b/docs/zh/08-operation/15-sec.md @@ -0,0 +1,274 @@ +--- +sidebar_label: 安全配置 +title: 安全配置 +toc_max_heading_level: 4 +--- + +## 背景 + +TDengine 的分布式、多组件特性导致 TDengine 的安全配置是生产系统中比较关注的问题。本文档旨在对 TDengine 各组件及在不同部署方式下的安全问题进行说明,并提供部署和配置建议,为用户的数据安全提供支持。 + +## 安全配置涉及组件 + +TDengine 包含多个组件,有: + +- `taosd`: 内核组件。 +- `taosc`: 客户端库。 +- `taosAdapter`: REST API 和 WebSocket 服务。 +- `taosKeeper`:监控服务组件。 +- `taosX`:数据管道和备份恢复组件。 +- `taosxAgent`:外部数据源数据接入辅助组件。 +- `taosExplorer`:Web 可视化管理界面。 + +与 TDengine 部署和应用相关,还会存在以下组件: + +- 通过各种连接器接入并使用 TDengine 数据库的应用。 +- 外部数据源:指接入 TDengine 的其他数据源,如 MQTT、OPC、Kafka 等。 + +各组件关系如下: + +![TDengine 产品生态拓扑架构](./tdengine-topology.png) + +关于各组件的详细介绍,请参考 [组件介绍](./intro)。 + +## TDengine 安全设置 + +### `taosd` + +taosd 集群间使用 TCP 连接基于自有协议进行数据交换,风险较低,但传输过程不是加密的,仍有一定安全风险。 + +启用压缩可能对 TCP 数据混淆有帮助。 + +- **compressMsgSize**:是否对 RPC 消息进行压缩,整数,可选:-1:所有消息都不压缩;0:所有消息都压缩;N (N>0):只有大于 N 个字节的消息才压缩。 + +为了保证数据库操作可追溯,建议启用审计功能。 + +- **audit**:审计功能开关,0 为关,1 为开。默认打开。 +- **auditInterval**:上报间隔,单位为毫秒。默认 5000。 +- **auditCreateTable**:是否针对创建子表开启申计功能。 0 为关,1 为开。默认打开。 + +为保证数据文件安全,可启用数据库加密。 + +- **encryptAlgorithm**:数据加密算法。 +- **encryptScope**:数据加密范围。 + +启用白名单可限制访问地址,进一步增强私密性。 + +- **enableWhiteList**:白名单功能开关,0 为关, 1 为开;默认关闭。 + +### `taosc` + +用户和其他组件与 `taosd` 之间使用原生客户端库(taosc)和自有协议进行连接,数据安全风险较低,但传输过程仍然不是加密的,有一定安全风险。 + +### `taosAdapter` + +taosadapter 与 taosd 之间使用原生客户端库(taosc)和自有协议进行连接,同样支持 RPC 消息压缩,不会造成数据安全问题。 + +应用和其他组件通过各语言连接器与 taosadapter 进行连接。默认情况下,连接是基于 HTTP 1.1 且不加密的。要保证 taosadapter 与其他组件之间的数据传输安全,需要配置 SSL 加密连接。在 `/etc/taos/taosadapter.toml` 配置文件中修改如下配置: + +```toml +[ssl] +enable = true +certFile = "/path/to/certificate-file" +keyFile = "/path/to/private-key" +``` + +在连接器中配置 HTTPS/SSL 访问方式,完成加密访问。 + +为进一步增强安全性,可启用白名单功能,在 `taosd` 中配置,对 taosdapter 组件同样生效。 + +### `taosX` + +`taosX` 对外包括 REST API 接口和 gRPC 接口,其中 gRPC 接口用于 taos-agent 连接。 + +- REST API 接口是基于 HTTP 1.1 且不加密的,有安全风险。 +- gRPC 接口基于 HTTP 2 且不加密,有安全风险 。 + +为了保证数据安全,建议 taosX API 接口仅限内部访问。在 `/etc/taos/taosx.toml` 配置文件中修改如下配置: + +```toml +[serve] +listen = "127.0.0.1:6050" +grpc = "127.0.0.1:6055" +``` + +从 TDengine 3.3.6.0 开始,taosX 支持 HTTPS 连接,在 `/etc/taos/taosx.toml` 文件中添加如下配置: + +```toml +[serve] +ssl_cert = "/path/to/server.pem" +ssl_key = "/path/to/server.key" +ssl_ca = "/path/to/ca.pem" +``` + +并在 Explorer 中修改 API 地址为 HTTPS 连接: + +```toml +# taosX API 本地连接 +x_api = "https://127.0.01:6050" +# Public IP 或者域名地址 +grpc = "https://public.domain.name:6055" +``` + +### `taosExplorer` + +与 `taosAdapter` 组件相似,`taosExplorer` 组件提供 HTTP 服务对外访问。在 `/etc/taos/explorer.toml` 配置文件中修改如下配置: + +```toml +[ssl] +# SSL certificate file +certificate = "/path/to/ca.file" + +# SSL certificate private key +certificate_key = "/path/to/key.file" +``` + +之后,使用 HTTPS 进行 Explorer 访问,如 [https://192.168.12.34](https://192.168.12.34:6060) 。 + +### `taosxAgent` + +taosX 启用 HTTPS 后,Agent 组件与 taosx 之间使用 HTTP 2 加密连接,使用 Arrow-Flight RPC 进行数据交换,传输内容是二进制格式,且仅注册过的 Agent 连接有效,保障数据安全。 + +建议在不安全网络或公共网络环境下的 Agent 服务,始终开启 HTTPS 连接。 + +### `taosKeeper` + +taosKeeper 使用 WebSocket 连接与 taosadpater 通信,将其他组件上报的监控信息写入 TDengine。 + +`taosKeeper` 当前版本存在安全风险: + +- 监控地址不可限制在本机,默认监控 所有地址的 6043 端口,存在网络攻击风险。使用 Docker 或 Kubernetes 部署不暴露 taosKeeper 端口时,此风险可忽略。 +- 配置文件中配置明文密码,需要降低配置文件可见性。在 `/etc/taos/taoskeeper.toml` 中存在: + +```toml +[tdengine] +host = "localhost" +port = 6041 +username = "root" +password = "taosdata" +usessl = false +``` + +## 安全增强 + +我们建议使用在局域网内部使用 TDengine。 + +如果必须在局域网外部提供访问,请考虑添加以下配置: + +### 负载均衡 + +使用负载均衡对外提供 taosAdapter 服务。 + +以 Nginx 为例,配置多节点负载均衡: + +```nginx +http { + server { + listen 6041; + + location / { + proxy_pass http://websocket; + # Headers for websocket compatible + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + # Forwarded headers + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-Server $hostname; + proxy_set_header X-Real-IP $remote_addr; + } + } + + upstream websocket { + server 192.168.11.61:6041; + server 192.168.11.62:6041; + server 192.168.11.63:6041; + } +} +``` + +如果 taosAdapter 组件未配置 SSL 安全连接,还需要配置 SSL 才能保证安全访问。SSL 可以配置在更上层的 API Gateway,也可以配置在 Nginx 中;如果你对各组件之间的安全性有更强的要求,您可以在所有组件中都配置 SSL。Nginx 配置如下: + +```nginx +http { + server { + listen 443 ssl; + + ssl_certificate /path/to/your/certificate.crt; + ssl_certificate_key /path/to/your/private.key; + } +} +``` + +### 安全网关 + +在现在互联网生产系统中,安全网关使用也很普遍。[traefik](https://traefik.io/) 是一个很好的开源选择,我们以 traefik 为例,解释在 API 网关中的安全配置。 + +Traefik 中通过 middleware 中间件提供多种安全配置,包括: + +1. 认证(Authentication):Traefik 提供 BasicAuth、DigestAuth、自定义认证中间件、OAuth 2.0 等多种认证方式。 +2. IP 白名单(IPWhitelist):限制允许访问的客户端 IP。 +3. 频率限制(RateLimit):控制发送到服务的请求数。 +4. 自定义 Headers:通过自定义 Headers 添加 `allowedHosts` 等配置,提高安全性。 + +一个常见的中间件示例如下: + +```yaml +labels: + - "traefik.enable=true" + - "traefik.http.routers.tdengine.rule=Host(`api.tdengine.example.com`)" + - "traefik.http.routers.tdengine.entrypoints=https" + - "traefik.http.routers.tdengine.tls.certresolver=default" + - "traefik.http.routers.tdengine.service=tdengine" + - "traefik.http.services.tdengine.loadbalancer.server.port=6041" + - "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https" + - "traefik.http.middlewares.check-header.headers.customrequestheaders.X-Secret-Header=SecretValue" + - "traefik.http.middlewares.check-header.headers.customresponseheaders.X-Header-Check=true" + - "traefik.http.middlewares.tdengine-ipwhitelist.ipwhitelist.sourcerange=127.0.0.1/32, 192.168.1.7" + - "traefik.http.routers.tdengine.middlewares=redirect-to-https,check-header,tdengine-ipwhitelist" +``` + +上面的示例完成以下配置: + +- TLS 认证使用 `default` 配置,这个配置可使用配置文件或 traefik 启动参数中配置,如下: + + ```yaml + traefik: + image: "traefik:v2.3.2" + hostname: "traefik" + networks: + - traefik + command: + - "--log.level=INFO" + - "--api.insecure=true" + - "--providers.docker=true" + - "--providers.docker.exposedbydefault=false" + - "--providers.docker.swarmmode=true" + - "--providers.docker.network=traefik" + - "--providers.docker.watch=true" + - "--entrypoints.http.address=:80" + - "--entrypoints.https.address=:443" + - "--certificatesresolvers.default.acme.dnschallenge=true" + - "--certificatesresolvers.default.acme.dnschallenge.provider=alidns" + - "--certificatesresolvers.default.acme.dnschallenge.resolvers=ns1.alidns.com" + - "--certificatesresolvers.default.acme.email=linhehuo@gmail.com" + - "--certificatesresolvers.default.acme.storage=/letsencrypt/acme.json" + ``` + +上面的启动参数配置了 `default` TSL 证书解析器和自动 acme 认证(自动证书申请和延期)。 + +- 中间件 `redirect-to-https`:配置从 HTTP 到 HTTPS 的转发,强制使用安全连接。 + + ```yaml + - "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https" + ``` + +- 中间件 `check-header`:配置自定义 Headers 检查。外部访问必须添加自定义 Header 并匹配 Header 值,避免非法访问。这在提供 API 访问时是一个非常简单有效的安全机制。 +- 中间件 `tdengine-ipwhitelist`:配置 IP 白名单。仅允许指定 IP 访问,使用 CIDR 路由规则进行匹配,可以设置内网及外网 IP 地址。 + +## 总结 + +数据安全是 TDengine 产品的一项关键指标,这些措施旨在保护 TDengine 部署免受未经授权的访问和数据泄露,同时保持性能和功能。但 TDengine 自身的安全配置不是生产中的唯一保障,结合用户业务系统制定更加匹配客户需求的解决方案更加重要。 diff --git a/docs/zh/10-third-party/03-visual/02-perspective.mdx b/docs/zh/10-third-party/03-visual/02-perspective.mdx new file mode 100644 index 0000000000..1db8474570 --- /dev/null +++ b/docs/zh/10-third-party/03-visual/02-perspective.mdx @@ -0,0 +1,86 @@ +--- +sidebar_label: Perspective +title: 与 Perspective 集成 +toc_max_heading_level: 4 +--- + +## 概述 + +Perspective 是一款开源且强大的数据可视化库,由 [Prospective.co](https://www.perspective.co/) 开发,运用 `WebAssembly` 和 `Web Workers` 技术,在 Web 应用中实现交互式实时数据分析,能在浏览器端提供高性能可视化能力。借助它,开发者可构建实时更新的仪表盘、图表等,用户能轻松与数据交互,按需求筛选、排序及挖掘数据。其灵活性高,适配多种数据格式与业务场景;速度快,处理大规模数据也能保障交互流畅;易用性佳,新手和专业开发者都能快速搭建可视化界面。 + +在数据连接方面,Perspective 通过 TDengine 的 Python 连接器,完美支持 TDengine 数据源,可高效获取其中海量时序数据等各类数据,并提供展示复杂图表、深度统计分析和趋势预测等实时功能,助力用户洞察数据价值,为决策提供有力支持,是构建对实时数据可视化和分析要求高的应用的理想选择。 + + +![perspective-architecture](./perspective/prsp_architecture.webp) + +## 前置条件 + +在 Linux 系统中进行如下安装操作: + +- TDengine 服务已部署并正常运行(企业及社区版均可)。 +- taosAdapter 能够正常运行,详细参考 [taosAdapter 使用手册](../../../reference/components/taosadapter)。 +- Python 3.10 及以上版本已安装(如未安装,可参考 [Python 安装](https://docs.python.org/)。 +- 下载或克隆 [perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) 项目,进入项目根目录后运行 “install.sh” 脚本,以便在本地下载并安装 TDengine 客户端库以及相关的依赖项。 + +## 可视化数据 + +**第 1 步**,运行 [perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) 项目根目录中的 “run.sh” 脚本,以此启动 Perspective 服务。该服务会每隔 300 毫秒从 TDengine 数据库中获取一次数据,并将数据以流的形式传输至基于 Web 的 `Perspective Viewer` 。 + +```shell +sh run.sh +``` + +**第 2 步**,启动一个静态 Web 服务,随后在浏览器中访问 `prsp-viewer.html` 资源,便能展示可视化数据。 + +```python +python -m http.server 8081 +``` + +通过浏览器访问该 Web 页面后所呈现出的效果如下图所示: + +![perspective-viewer](./perspective/prsp_view.webp) + +## 使用说明 + +### 写入数据 + +[perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) 项目根目录中的 `producer.py` 脚本,借助 TDengine Python 连接器,可定期向 TDengine 数据库插入数据。此脚本会生成随机数据并将其插入数据库,以此模拟实时数据的写入过程。具体执行步骤如下: + +1. 建立与 TDengine 的连接。 +1. 创建 power 数据库和 meters 表。 +1. 每隔 300 毫秒生成一次随机数据,并写入 TDengine 数据库中。 + +Python 连接器详细写入说明可参见 [Python 参数绑定](../../../reference/connector/python/#参数绑定)。 + +### 加载数据 + +[perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) 项目根目录中的 `perspective_server.py` 脚本会启动一个 Perspective 服务器,该服务器会从 TDengine 读取数据,并通过 Tornado WebSocket 将数据流式传输到一个 Perspective 表中。 + +1. 启动一个 Perspective 服务器 +1. 建立与 TDengine 的连接。 +1. 创建一个 Perspective 表(表结构需要与 TDengine 数据库中表的类型保持匹配)。 +1. 调用 `Tornado.PeriodicCallback` 函数来启动定时任务,进而实现对 Perspective 表数据的更新,示例代码如下: + +```python +{{#include docs/examples/perspective/perspective_server.py:perspective_server}} +``` + +### HTML 页面配置 + +[perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) 项目根目录中的 `prsp-viewer.html`文件将 `Perspective Viewer` 嵌入到 HTML 页面中。它通过 WebSocket 连接到 Perspective 服务器,并根据图表配置显示实时数据。 + +- 配置展示的图表以及数据分析的规则。 +- 与 Perspective 服务器建立 Websocket 连接。 +- 引入 Perspective 库,通过 WebSocket 连接到 Perspective 服务器,加载 meters_values 表来展示动态数据。 + +```html +{{#include docs/examples/perspective/prsp-viewer.html:perspective_viewer}} +``` + +## 参考资料 + +- [Perspective 文档](https://perspective.finos.org/) +- [TDengine Python 连接器](../../../reference/connector/python) +- [TDengine 流计算](../../../advanced/stream/) + + diff --git a/docs/zh/10-third-party/03-visual/perspective/prsp_architecture.webp b/docs/zh/10-third-party/03-visual/perspective/prsp_architecture.webp new file mode 100644 index 0000000000..f94b2572ff Binary files /dev/null and b/docs/zh/10-third-party/03-visual/perspective/prsp_architecture.webp differ diff --git a/docs/zh/10-third-party/03-visual/perspective/prsp_view.webp b/docs/zh/10-third-party/03-visual/perspective/prsp_view.webp new file mode 100644 index 0000000000..8cae8101a1 Binary files /dev/null and b/docs/zh/10-third-party/03-visual/perspective/prsp_view.webp differ diff --git a/docs/zh/14-reference/01-components/01-taosd.md b/docs/zh/14-reference/01-components/01-taosd.md index a6177807f5..5cf2d555d4 100644 --- a/docs/zh/14-reference/01-components/01-taosd.md +++ b/docs/zh/14-reference/01-components/01-taosd.md @@ -582,9 +582,9 @@ charset 的有效值是 UTF-8。 - 说明:当某块磁盘上的可用空间小于等于这个阈值时,该磁盘将不再被选择用于生成新的数据文件。 **`企业版参数`** - 类型:整数 - 单位:byte -- 默认值:52428800 -- 最小值:52428800 -- 最大值:1073741824 +- 默认值:52428800 (50MB) +- 最小值:52428800 (50MB) +- 最大值:2199023255552 (2TB) - 动态修改:支持通过 SQL 修改,立即生效。 - 支持版本:从 v3.1.0.0 版本开始引入 diff --git a/docs/zh/14-reference/02-tools/10-taosbenchmark.md b/docs/zh/14-reference/02-tools/10-taosbenchmark.md index 95ae3fa803..92bf9da02a 100644 --- a/docs/zh/14-reference/02-tools/10-taosbenchmark.md +++ b/docs/zh/14-reference/02-tools/10-taosbenchmark.md @@ -290,6 +290,8 @@ taosBenchmark -f 其它通用参数详见 [通用配置参数](#通用配置参数)。 +**说明:从 v3.3.5.6 及以上版本不再支持 json 文件中同时配置 `specified_table_query` 和 `super_table_query`** + #### 执行指定查询语句 查询指定表(可以指定超级表、子表或普通表)的配置参数在 `specified_table_query` 中设置。 @@ -416,6 +418,15 @@ taosBenchmark -f
+
+queryStb.json + +```json +{{#include /TDengine/tools/taos-tools/example/queryStb.json}} +``` + +
+ ### 订阅 JSON 示例
diff --git a/docs/zh/14-reference/03-taos-sql/01-data-type.md b/docs/zh/14-reference/03-taos-sql/01-data-type.md index 2aa3756c78..ac0883e2ef 100644 --- a/docs/zh/14-reference/03-taos-sql/01-data-type.md +++ b/docs/zh/14-reference/03-taos-sql/01-data-type.md @@ -44,6 +44,7 @@ CREATE DATABASE db_name PRECISION 'ns'; | 16 | VARCHAR | 自定义 | BINARY 类型的别名 | | 17 | GEOMETRY | 自定义 | 几何类型,3.1.0.0 版本开始支持 | 18 | VARBINARY | 自定义 | 可变长的二进制数据, 3.1.1.0 版本开始支持| +| 19 | DECIMAL | 8或16 | 高精度数值类型, 取值范围取决于类型中指定的precision和scale, 自3.3.6开始支持, 见下文描述| :::note @@ -63,6 +64,18 @@ CREATE DATABASE db_name PRECISION 'ns'; ::: +### DECIMAL数据类型 +`DECIMAL`数据类型用于高精度数值存储, 自版本3.3.6开始支持, 定义语法: DECIMAL(18, 2), DECIMAL(38, 10), 其中需要指定两个参数, 分别为`precision`和`scale`. `precision`是指最大支持的有效数字个数, `scale`是指最大支持的小数位数. 如DECIMAL(8, 4), 可表示范围即[-9999.9999, 9999.9999]. 定义DECIMAL数据类型时, `precision`范围为: [1,38], scale的范围为: [0,precision], scale为0时, 仅表示整数. 也可以不指定scale, 默认为0, 如DECIMAL(18), 与DECIMAL(18,0)相同。 + +当`precision`值不大于18时, 内部使用8字节存储(DECIMAL64), 当precision范围为(18, 38]时, 使用16字节存储(DECIMAL). SQL中写入DECIMAL类型数据时, 可直接使用数值写入, 当写入值大于类型可表示的最大值时会报DECIMAL_OVERFLOW错误, 当未大于类型表示的最大值, 但小数位数超过SCALE时, 会自动四舍五入处理, 如定义类型DECIMAL(10, 2), 写入10.987, 则实际存储值为10.99。 + +DECIMAL类型仅支持普通列, 暂不支持tag列. DECIMAL类型只支持SQL写入, 暂不支持stmt写入和schemeless写入。 + +整数类型和DECIMAL类型操作时, 会将整数类型转换为DECIMAL类型再进行计算. DECIMAL类型与DOUBLE/FLOAT/VARCHAR/NCHAR等类型计算时, 转换为DOUBLE类型进行计算. + +查询DECIMAL类型表达式时, 若计算的中间结果超出当前类型可表示的最大值时, 报DECIMAL OVERFLOW错误. + + ## 常量 TDengine 支持多个类型的常量,细节如下表: diff --git a/docs/zh/14-reference/03-taos-sql/10-function.md b/docs/zh/14-reference/03-taos-sql/10-function.md index a3360b04be..85d9434e04 100644 --- a/docs/zh/14-reference/03-taos-sql/10-function.md +++ b/docs/zh/14-reference/03-taos-sql/10-function.md @@ -1137,6 +1137,7 @@ CAST(expr AS type_name) - 字符串类型转换数值类型时可能出现的无效字符情况,例如 "a" 可能转为 0,但不会报错。 - 转换到数值类型时,数值大于 type_name 可表示的范围时,则会溢出,但不会报错。 - 转换到字符串类型时,如果转换后长度超过 type_name 中指定的长度,则会截断,但不会报错。 +- DECIMAL类型不支持与JSON,VARBINARY,GEOMERTY类型的互转. #### TO_CHAR @@ -1618,12 +1619,14 @@ AVG(expr) **功能说明**:统计指定字段的平均值。 -**返回数据类型**:DOUBLE。 +**返回数据类型**:DOUBLE, DECIMAL。 **适用数据类型**:数值类型。 **适用于**:表和超级表。 +**说明**: 当输入类型为DECIMAL类型时, 输出类型也为DECIMAL类型, 输出的precision和scale大小符合数据类型章节中的描述规则, 通过计算SUM类型和UINT64的除法得到结果类型, 若SUM的结果导致DECIMAL类型溢出, 则报DECIMAL OVERFLOW错误。 + ### COUNT ```sql @@ -1805,12 +1808,14 @@ SUM(expr) **功能说明**:统计表/超级表中某列的和。 -**返回数据类型**:DOUBLE、BIGINT。 +**返回数据类型**:DOUBLE、BIGINT,DECIMAL。 **适用数据类型**:数值类型。 **适用于**:表和超级表。 +**说明**: 输入类型为DECIMAL类型时, 输出类型为DECIMAL(38, scale), precision为当前支持的最大值, scale为输入类型的scale, 若SUM的结果溢出时, 报DECIMAL OVERFLOW错误. + ### VAR_POP ```sql @@ -2174,6 +2179,7 @@ ignore_null_values: { - INTERP 用于在指定时间断面获取指定列的记录值,使用时有专用语法(interp_clause),语法介绍[参考链接](../select/#interp) 。 - 当指定时间断面不存在符合条件的行数据时,INTERP 函数会根据 [FILL](../distinguished/#fill-子句) 参数的设定进行插值。 - INTERP 作用于超级表时,会将该超级表下的所有子表数据按照主键列排序后进行插值计算,也可以搭配 PARTITION BY tbname 使用,将结果强制规约到单个时间线。 +- INTERP在FILL PREV/NEXT/NEAR时, 行为与窗口查询有所区别, 当截面存在数据时, 不会进行FILL, 即便当前值为NULL. - INTERP 可以与伪列 `_irowts` 一起使用,返回插值点所对应的时间戳(v3.0.2.0 以后支持)。 - INTERP 可以与伪列 `_isfilled` 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(v3.0.3.0 以后支持)。 - 只有在使用 FILL PREV/NEXT/NEAR 模式时才可以使用伪列 `_irowts_origin`, 用于返回 `interp` 函数所使用的原始数据的时间戳列。若范围内无值, 则返回 NULL。`_irowts_origin` 在 v3.3.4.9 以后支持。 diff --git a/docs/zh/14-reference/03-taos-sql/12-distinguished.md b/docs/zh/14-reference/03-taos-sql/12-distinguished.md index 34959996c2..4c3ad17da4 100644 --- a/docs/zh/14-reference/03-taos-sql/12-distinguished.md +++ b/docs/zh/14-reference/03-taos-sql/12-distinguished.md @@ -77,10 +77,10 @@ FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填 1. 不进行填充:NONE(默认填充模式)。 2. VALUE 填充:固定值填充,此时需要指定填充的数值。例如 `FILL(VALUE, 1.23)`。这里需要注意,最终填充的值受由相应列的类型决定,如 `FILL(VALUE, 1.23)`,相应列为 INT 类型,则填充值为 1,若查询列表中有多列需要 FILL,则需要给每一个 FILL 列指定 VALUE,如 `SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`,注意,SELECT 表达式中只有包含普通列时才需要指定 FILL VALUE,如 `_wstart`、`_wstart+1a`、`now`、`1+1` 以及使用 `partition by` 时的 `partition key` (如 tbname)都不需要指定 VALUE,如 `timediff(last(ts), _wstart)` 则需要指定 VALUE。 -3. PREV 填充:使用前一个非 NULL 值填充数据。例如 FILL(PREV)。 +3. PREV 填充:使用前一个值填充数据。例如 FILL(PREV)。 4. NULL 填充:使用 NULL 填充数据。例如 FILL(NULL)。 5. LINEAR 填充:根据前后距离最近的非 NULL 值做线性插值填充。例如 FILL(LINEAR)。 -6. NEXT 填充:使用下一个非 NULL 值填充数据。例如 FILL(NEXT)。 +6. NEXT 填充:使用下一个值填充数据。例如 FILL(NEXT)。 以上填充模式中,除了 NONE 模式默认不填充值之外,其他模式在查询的整个时间范围内如果没有数据 FILL 子句将被忽略,即不产生填充数据,查询结果为空。这种行为在部分模式(PREV、NEXT、LINEAR)下具有合理性,因为在这些模式下没有数据意味着无法产生填充数值。而对另外一些模式(NULL、VALUE)来说,理论上是可以产生填充数值的,至于需不需要输出填充数值,取决于应用的需求。所以为了满足这类需要强制填充数据或 NULL 的应用的需求,同时不破坏现有填充模式的行为兼容性,从 v3.0.3.0 开始,增加了两种新的填充模式: @@ -104,7 +104,7 @@ NULL、NULL_F、VALUE、 VALUE_F 这几种填充模式针对不同场景区别 时间窗口又可分为滑动时间窗口和翻转时间窗口。 -INTERVAL 子句用于产生相等时间周期的窗口,SLIDING 用以指定窗口向前滑动的时间。每次执行的查询是一个时间窗口,时间窗口随着时间流动向前滑动。在定义连续查询的时候需要指定时间窗口(time window )大小和每次前向增量时间(forward sliding times)。如图,[t0s, t0e] ,[t1s , t1e],[t2s, t2e] 是分别是执行三次连续查询的时间窗口范围,窗口的前向滑动的时间范围 sliding time 标识 。查询过滤、聚合等操作按照每个时间窗口为独立的单位执行。当 SLIDING 与 INTERVAL 相等的时候,滑动窗口即为翻转窗口。 +INTERVAL 子句用于产生相等时间周期的窗口,SLIDING 用以指定窗口向前滑动的时间。每次执行的查询是一个时间窗口,时间窗口随着时间流动向前滑动。在定义连续查询的时候需要指定时间窗口(time window )大小和每次前向增量时间(forward sliding times)。如图,[t0s, t0e] ,[t1s , t1e],[t2s, t2e] 是分别是执行三次连续查询的时间窗口范围,窗口的前向滑动的时间范围 sliding time 标识 。查询过滤、聚合等操作按照每个时间窗口为独立的单位执行。当 SLIDING 与 INTERVAL 相等的时候,滑动窗口即为翻转窗口。默认情况下,窗口是从 Unix time 0(1970-01-01 00:00:00 UTC)开始划分的;如果设置了 interval_offset,那么窗口的划分将从 “Unix time 0 + interval_offset” 开始。 ![TDengine Database 时间窗口示意图](./timewindow-1.webp) diff --git a/docs/zh/14-reference/03-taos-sql/32-compress.md b/docs/zh/14-reference/03-taos-sql/32-compress.md index 43322a3727..b977d06c2e 100644 --- a/docs/zh/14-reference/03-taos-sql/32-compress.md +++ b/docs/zh/14-reference/03-taos-sql/32-compress.md @@ -37,6 +37,7 @@ description: 可配置压缩算法 | float/double | disabled/delta-d | delta-d | lz4/zlib/zstd/xz/tsz | lz4 | medium | | binary/nchar | disabled | disabled | lz4/zlib/zstd/xz | zstd | medium | | bool | disabled/bit-packing | bit-packing | lz4/zlib/zstd/xz | zstd | medium | +| decimal | disabled | disabled | lz4/zlib/zstd/xz | zstd | medium | ## SQL 语法 diff --git a/docs/zh/14-reference/05-connector/10-cpp.mdx b/docs/zh/14-reference/05-connector/10-cpp.mdx index 2f8431cc46..977c259b49 100644 --- a/docs/zh/14-reference/05-connector/10-cpp.mdx +++ b/docs/zh/14-reference/05-connector/10-cpp.mdx @@ -680,7 +680,7 @@ TDengine 客户端驱动的版本号与 TDengine 服务端的版本号是一一 - **接口说明**:清理运行环境,应用退出前应调用。 - `int taos_options(TSDB_OPTION option, const void * arg, ...)` - - **接口说明**:设置客户端选项,支持区域设置(`TSDB_OPTION_LOCALE`)、字符集设置(`TSDB_OPTION_CHARSET`)、时区设置(`TSDB_OPTION_TIMEZONE`)、配置文件路径设置(`TSDB_OPTION_CONFIGDIR`)。区域设置、字符集、时区默认为操作系统当前设置。 + - **接口说明**:设置客户端选项,支持区域设置(`TSDB_OPTION_LOCALE`)、字符集设置(`TSDB_OPTION_CHARSET`)、时区设置(`TSDB_OPTION_TIMEZONE`)、配置文件路径设置(`TSDB_OPTION_CONFIGDIR`)、驱动类型设置(`TSDB_OPTION_DRIVER`)。区域设置、字符集、时区默认为操作系统当前设置。驱动类型可选内部原生接口(`native`)和 WebSocket 接口(`websocket`),默认为 `websocket`。 - **参数说明**: - `option`:[入参] 设置项类型。 - `arg`:[入参] 设置项值。 @@ -826,6 +826,12 @@ TDengine 客户端驱动的版本号与 TDengine 服务端的版本号是一一 - res:[入参] 结果集。 - **返回值**:非 `NULL`:成功,返回一个指向 TAOS_FIELD 结构体的指针,每个元素代表一列的元数据。`NULL`:失败。 +- `TAOS_FIELD_E *taos_fetch_fields_e(TAOS_RES *res)` + - **接口说明**:获取查询结果集每列数据的属性(列的名称、列的数据类型、列的长度),与 `taos_num_fields()` 配合使用,可用来解析 `taos_fetch_row()` 返回的一个元组(一行)的数据。TAOS_FIELD_E中除了TAOS_FIELD的基本信息外, 还包括了类型的`precision`和`scale`信息。 + - **参数说明**: + - res:[入参] 结果集。 + - **返回值**:非 `NULL`:成功,返回一个指向 TAOS_FIELD_E 结构体的指针,每个元素代表一列的元数据。`NULL`:失败。 + - `void taos_stop_query(TAOS_RES *res)` - **接口说明**:停止当前查询的执行。 - **参数说明**: diff --git a/docs/zh/14-reference/05-connector/14-java.mdx b/docs/zh/14-reference/05-connector/14-java.mdx index d4138249fc..a7378c2c8c 100644 --- a/docs/zh/14-reference/05-connector/14-java.mdx +++ b/docs/zh/14-reference/05-connector/14-java.mdx @@ -121,6 +121,7 @@ JDBC 连接器可能报错的错误码包括 4 种: | 0x2378 | consumer create error | 创建数据订阅失败,请根据错误信息检查 taos log 进行问题定位。 | | 0x2379 | seek offset must not be a negative number | seek 接口参数不能为负值,请使用正确的参数 | | 0x237a | vGroup not found in result set | VGroup 没有分配给当前 consumer,由于 Rebalance 机制导致 Consumer 与 VGroup 不是绑定的关系 | +| 0x2390 | background thread write error in Efficient Writing | 高效写入后台线程写入错误,可以停止写入,重建连接 | - [TDengine Java Connector Error Code](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java) @@ -315,7 +316,15 @@ properties 中的配置参数如下: - TSDBDriver.PROPERTY_KEY_DISABLE_SSL_CERT_VALIDATION:关闭 SSL 证书验证 。仅在使用 WebSocket 连接时生效。true:启用,false:不启用。默认为 false。 - TSDBDriver.PROPERTY_KEY_APP_NAME:App 名称,可用于 `show connections` 查询结果显示。仅在使用 WebSocket 连接时生效。默认值为 java。 -- TSDBDriver.PROPERTY_KEY_APP_IP:App IP,可用于 `show connections` 查询结果显示。仅在使用 WebSocket 连接时生效。默认值为空。 +- TSDBDriver.PROPERTY_KEY_APP_IP:App IP,可用于 `show connections` 查询结果显示。仅在使用 WebSocket 连接时生效。默认值为空。 + +- TSDBDriver.PROPERTY_KEY_ASYNC_WRITE:高效写入模式,目前仅支持 `stmt` 方式。仅在使用 WebSocket 连接时生效。默认值为空,即不启用高效写入模式。 +- TSDBDriver.PROPERTY_KEY_BACKEND_WRITE_THREAD_NUM:高效写入模式下,后台写入线程数。仅在使用 WebSocket 连接时生效。默认值为 10。 +- TSDBDriver.PROPERTY_KEY_BATCH_SIZE_BY_ROW:高效写入模式下,写入数据的批大小,单位是行。仅在使用 WebSocket 连接时生效。默认值为 1000。 +- TSDBDriver.PROPERTY_KEY_CACHE_SIZE_BY_ROW:高效写入模式下,缓存的大小,单位是行。仅在使用 WebSocket 连接时生效。默认值为 10000。 +- TSDBDriver.PROPERTY_KEY_COPY_DATA:高效写入模式下,是否拷贝应用通过 addBatch 传入的二进制类型数据。仅在使用 WebSocket 连接时生效。默认值为 false。 +- TSDBDriver.PROPERTY_KEY_STRICT_CHECK:高效写入模式下,是否校验表名长度和变长数据类型长度。仅在使用 WebSocket 连接时生效。默认值为 false。 +- TSDBDriver.PROPERTY_KEY_RETRY_TIMES:高效写入模式下,写入失败重试次数。仅在使用 WebSocket 连接时生效。默认值为 3。 此外对 JDBC 原生连接,通过指定 URL 和 Properties 还可以指定其他参数,比如日志级别、SQL 长度等。 diff --git a/docs/zh/14-reference/05-connector/35-node.mdx b/docs/zh/14-reference/05-connector/35-node.mdx index 235476da2b..0eb0d99277 100644 --- a/docs/zh/14-reference/05-connector/35-node.mdx +++ b/docs/zh/14-reference/05-connector/35-node.mdx @@ -24,6 +24,7 @@ Node.js 连接器源码托管在 [GitHub](https://github.com/taosdata/taos-conne | Node.js 连接器 版本 | 主要变化 | TDengine 版本 | | ------------------| ----------------------| ----------------| +| 3.1.5 | 密码支持特殊字符 | - | | 3.1.4 | 修改 readme | - | | 3.1.3 | 升级了 es5-ext 版本,解决低版本的漏洞 | - | | 3.1.2 | 对数据协议和解析进行了优化,性能得到大幅提升| - | diff --git a/docs/zh/14-reference/09-error-code.md b/docs/zh/14-reference/09-error-code.md index 4cf853283e..7a87e1ddfb 100644 --- a/docs/zh/14-reference/09-error-code.md +++ b/docs/zh/14-reference/09-error-code.md @@ -44,6 +44,8 @@ description: TDengine 服务端的错误码列表和详细说明 | 0x80000107 | Ref ID is removed | 引用的 ref 资源已经释放 | 保留现场和日志,github 上报 issue | | 0x80000108 | Invalid Ref ID | 无效 ref ID | 保留现场和日志,github 上报 issue | | 0x8000010A | Ref is not there | ref 信息不存在 | 保留现场和日志,github 上报 issue | +| 0x8000010B | Driver was not loaded | 未在系统路径中找到 libtaosnative.so 或 libtaosws.so | 重新安装客户端驱动 | +| 0x8000010C | Function was not loaded from the driver | 在 libtaos.so 中定义的一些函数在 libtaosnative.so 或 libtaosws.so 中未实现 | 保留现场和日志,github 上报 issue | | 0x80000110 | Unexpected generic error | 系统内部错误 | 保留现场和日志,github 上报 issue | | 0x80000111 | Action in progress | 操作进行中 | 1.等待操作完成 2.根据需要取消操作 3.当超出合理时间仍然未完成可保留现场和日志,或联系客户支持 | | 0x80000112 | Out of range | 配置参数超出允许值范围 | 更改参数 | @@ -578,11 +580,14 @@ description: TDengine 服务端的错误码列表和详细说明 ## virtual table -| 错误码 | 错误描述 | 可能的出错场景或者可能的原因 | 建议用户采取的措施 | -|------------|---------------------------------------------------------|------------------------------------------------|----------------------------| -| 0x80006200 | Virtual table scan 算子内部错误 | virtual table scan 算子内部逻辑错误,一般不会出现 | 具体查看client端的错误日志提示 | -| 0x80006201 | Virtual table scan invalid downstream operator type | 由于生成的执行计划不对,导致 virtual table scan 算子的下游算子类型不正确 | 保留 explain 执行计划,联系开发处理 | -| 0x80006202 | Virtual table prim timestamp column should not has ref | 虚拟表的时间戳主键列不应该有数据源,如果有,后续查询虚拟表的时候就会出现该错误 | 检查错误日志,联系开发处理 | -| 0x80006203 | Create virtual child table must use virtual super table | 虚拟子表必须建在虚拟超级表下,否则就会出现该错误 | 创建虚拟子表的时候,USING 虚拟超级表 | +| 错误码 | 错误描述 | 可能的出错场景或者可能的原因 | 建议用户采取的措施 | +|------------|---------------------------------------------------------|------------------------------------------------|-------------------------| +| 0x80006200 | Virtual table scan 算子内部错误 | virtual table scan 算子内部逻辑错误,一般不会出现 | 具体查看client端的错误日志提示 | +| 0x80006201 | Virtual table scan invalid downstream operator type | 由于生成的执行计划不对,导致 virtual table scan 算子的下游算子类型不正确 | 保留 explain 执行计划,联系开发处理 | +| 0x80006202 | Virtual table prim timestamp column should not has ref | 虚拟表的时间戳主键列不应该有数据源,如果有,后续查询虚拟表的时候就会出现该错误 | 检查错误日志,联系开发处理 | +| 0x80006203 | Create virtual child table must use virtual super table | 虚拟子表必须建在虚拟超级表下,否则就会出现该错误 | 创建虚拟子表的时候,USING 虚拟超级表 | | 0x80006204 | Virtual table not support decimal type | 虚拟表不支持 decimal 类型 | 创建虚拟表时不使用 decimal 类型的列/tag | +| 0x80006205 | Virtual table not support in STMT query and STMT insert | 不支持在 stmt 写入和查询中使用虚拟表 | 不在 stmt 写入和查询中使用虚拟表 | +| 0x80006206 | Virtual table not support in Topic | 不支持在订阅中使用虚拟表 | 不在订阅中使用虚拟表 | +| 0x80006207 | Virtual super table query not support origin table from different databases | 虚拟超级表不支持子表的数据源来自不同的数据库 | 确保虚拟超级表的子表的数据源都来自同一个数据库 | diff --git a/docs/zh/27-train-faq/02-dst.md b/docs/zh/27-train-faq/02-dst.md index 5c430fd42f..111edd6ab9 100644 --- a/docs/zh/27-train-faq/02-dst.md +++ b/docs/zh/27-train-faq/02-dst.md @@ -1,291 +1,293 @@ ---- -title: 夏令时使用指南 -description: TDengine 中关于夏令时使用问题的解释和建议 ---- - -## 背景 - -在时序数据库的使用中,有时会遇到使用夏令时的情况。我们将 TDengine 中使用夏令时的情况和问题进行分析说明,以便您在 TDengine 的使用中更加顺利。 - -## 定义 - -### 时区 - -时区是地球上使用相同标准时间的区域。由于地球的自转,为了保证各地的时间与当地的日出日落相协调,全球划分为多个时区。 - -### IANA 时区 - -IANA(Internet Assigned Numbers Authority)时区数据库,也称为 tz database,提供全球时区信息的标准参考。它是现代各类系统和软件处理时区相关操作的基础。 - -IANA 使用“区域/城市”格式(如 Europe/Berlin)来明确标识时区。 - -TDengine 在不同组件中均支持使用 IANA 时区(除 Windows taos.cfg 时区设置外)。 - -### 标准时间与当地时间 - -标准时间是根据地球上某个固定经线确定的时间。它为各个时区提供了一个统一的参考点。 - -- 格林尼治标准时间(GMT):历史上使用的参考时间,位于 0° 经线。 -- 协调世界时(UTC):现代的时间标准,类似于GMT,但更加精确。 - -标准时间与时区的关系如下: - -- 基准:标准时间(如 UTC)是时区设定的基准点。 -- 偏移量:不同时区通过相对于标准时间的偏移量来定义。例如,UTC+1 表示比 UTC 快 1 小时。 -- 区域划分:全球被划分为多个时区,每个时区使用一个或多个标准时间。 - -相对于标准时间,每个地区根据其所在时区设定其当地时间: - -- 时区偏移:当地时间等于标准时间加上该时区的偏移量。例如,UTC+2 表示比 UTC 时间快 2 小时。 -- 夏令时(DST):某些地区在特定时间段调整当地时间,例如将时钟拨快一小时。详见下节。 - -### 夏令时 - -夏令时(Daylight Saving Time,DST)是一种通过将时间提前一小时,以充分利用日光、节约能源的制度。通常在春季开始,秋季结束。夏令时的具体开始和结束时间因地区而异。以下均以柏林时间为例,对夏令时和夏令时的影响做说明。 - -按照这个规则,可以看到: - -- 柏林当地时间 2024 年 03 月 31 日 02:00:00 到 03:00:00 (不含 03:00:00)之间的时间不存在(跳变)。 -- 柏林当地时间 2024 年 10 月 27 日 02:00:00 到 03:00:00 (不含 03:00:00)之间的时间出现了两次。 - -#### 夏令时与 IANA 时区数据库 - -- 记录规则:IANA 时区数据库详细记录了各地的夏令时规则,包括开始和结束的日期与时间。 -- 自动调整:许多操作系统和软件利用 IANA 数据库来自动处理夏令时的调整。 -- 历史变更:IANA 数据库还追踪历史上的夏令时变化,以确保准确性。 - -#### 夏令时与时间戳转换 - -- 时间戳转为当地时间是确定的。例如,1729990654 为柏林时间**夏令时** `2024-10-27 02:57:34`,1729994254 为柏林时间**冬令时** `2024-10-27 02:57:34`(这两个本地时间除时间偏移量外是一样的)。 -- 不指定时间偏移量时,当地时间转为时间戳是不确定的。夏令时跳过的时间不存在会造成无法转换成时间戳,如 **柏林时间** `2024-03-31 02:34:56` 不存在,所以无法转换为时间戳。夏令时结束时重复导致无法确定是哪个时间戳,如 `2024-10-27 02:57:34` 不指定时间偏移量无法确定 是 1729990654 还是 1729994254。指定时间偏移量才能确定时间戳,如 `2024-10-27 02:57:34 CEST(+02:00) `,指定了夏令时 `2024-10-27 02:57:34` 时间戳 1729990654 。 - -### RFC3339 时间格式 - -RFC 3339 是一种互联网时间格式标准,用于表示日期和时间。它基于 ISO 8601 标准,但更具体地规定了一些格式细节。 - -其格式如下: - -- 基本格式:`YYYY-MM-DDTHH:MM:SSZ` -- 时区表示: - - Z 表示协调世界时(UTC)。 - - 偏移量格式,例如 +02:00,表示与 UTC 的时差。 - -通过明确的时区偏移,RFC 3339 格式可以在全球范围内准确地解析和比较时间。 - -RFC 3339 的优势包括: - -- 标准化:提供统一的格式,方便跨系统数据交换。 -- 清晰性:明确时区信息,避免时间误解。 - -TDengine 在 REST API 和 Explorer UI 中,均使用 RFC3339 格式进行展示。在 SQL 语句中,可使用 RFC3339 格式写入时间戳数据: - -```sql -insert into t1 values('2024-10-27T01:59:59.000Z', 0); -select * from t1 where ts >= '2024-10-27T01:59:59.000Z'; -``` - -### 未定义行为 - -未定义行为(Undefined Behavior)是指特定代码或操作没有明确规定的结果,也不会对该结果作出兼容性的保证,TDengine 可能在某个版本后对当前的行为作出修改而不会通知用户。所以,在 TDengine 中,用户不可依赖当前未定义的行为进行判断或应用。 - -## 夏令时在 TDengine 中的写入与查询 - -我们使用下表来展示夏令时在写入和查询中的影响。 - -![DST Berlin](./02-dst/dst-berlin.png) - -### 表格说明 - -- **TIMESTAMP**:TDengine 中使用 64位整数来存储原始时间戳。 -- **UTC**:时间戳对应的 UTC 时间表示。 -- **Europe/Berlin**:表示时区 Europe/Berlin 对应的 RFC3339 格式时间。 -- **Local**:表示时区 Europe/Berlin 对应的当地时间(不含时区)。 - -### 表格分析 - -- 在**夏令时开始**(柏林时间 3 月 31 日 02:00)时,时间直接从 02:00 跳到 03:00(往后跳一小时)。 - - 浅绿色是夏令时开始前一小时的时间戳; - - 深绿色是夏令时开始后一小时的时间戳; - - 红色为 TDengine 数据库中插入了不存在的当地时间: - - 使用 SQL `INSERT INTO t1 values('2024-03-31 02:59:59',..)` 插入 `2024-03-31 02:00:00` 到 `2024-03-31 02:59:59` 的数据会被自动调整为 -1000(在 TDengine 中属于未定义行为,当前该值与数据库精度 precision 有关,毫秒数据库为 -1000,微秒数据库为 -1000000,纳秒数据库为 -1000000000),因为那一时刻在本地时间中不存在; -- 在**夏令时结束**(柏林时间 10 月 27 日 03:00)时,时间从 03:00 跳到 02:00 (往前跳一小时)。 - - 浅蓝色表示时钟跳变前一小时的时间戳; - - 深蓝色表示时钟跳变后一小时内的时间戳,其无时区的当地时间与上一小时一致。 - - 紫色表示时钟跳变一小时后的时间戳; -- **当地时间变化**:可见,由于夏令时的调整而导致了当地时间的变化,可能导致某些时间段出现重复或缺失。 -- **UTC 时间不变**:UTC 时间保持不变,确保了时间的一致性和顺序性。 -- **RFC3339**:RFC3339 格式时间显示了时间偏移量的变化,在夏令时开始后变为 +02:00,结束后变为 +01:00 。 -- **条件查询**: - - **夏令时开始**时,跳过的时间(`[03-31 02:00:00,03-31 03:00:00)`)不存在,所以在使用该时间进行查询时,行为不确定:`SELECT ts FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 02:59:59'`(不存在的本地时间戳被转换为 `-1000`): - - ```sql - taos> SELECT ts FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 02:59:59'; - ts | - ================= - -1000 | - Query OK, 1 row(s) in set (0.003635s) - ``` - - 当不存在的时间戳与存在的时间戳共同使用时,其结果同样不符合预期,以下为起始本地时间不存在: - - ```sql - taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 03:59:59'; - ts | to_iso8601(ts,'Z') | - ================================================== - -1000 | 1969-12-31T23:59:59.000Z | - 1711843200000 | 2024-03-31T00:00:00.000Z | - 1711846799000 | 2024-03-31T00:59:59.000Z | - 1711846800000 | 2024-03-31T01:00:00.000Z | - 1711846801000 | 2024-03-31T01:00:01.000Z | - Query OK, 5 row(s) in set (0.003339s) - ``` - - 以下语句中第一个 SQL 查询截止时间不存在,第二个截止时间存在,第一个 SQL 查询结果不符合预期: - - ```sql - taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 01:00:00' AND '2024-03-31 02:00:00'; - Query OK, 0 row(s) in set (0.000930s) - - taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 01:00:00' AND '2024-03-31 01:59:59'; - ts | to_iso8601(ts,'Z') | - ================================================== - 1711843200000 | 2024-03-31T00:00:00.000Z | - 1711846799000 | 2024-03-31T00:59:59.000Z | - Query OK, 2 row(s) in set (0.001227s) - ``` - - - 夏令时结束时,跳变的时间(`[10-27 02:00:00,10-27 03:00:00)` 不包含 `10-27 03:00:00`)重复了两次,TDengine 在使用该区间内的时间戳进行查询时,也属于未定义行为。 - - 查询 `[2024-10-27 02:00:00, 2024-10-27 03:00:00]` 之间的数据结果,包含了两次重复的时间戳和 `2024-10-27 03:00:00` 这个时间点的数据: - - ```sql - taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts BETWEEN '2024-10-27 02:00:00' AND '2024-10-27 03:00:00'; - ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') | - ======================================================================================= - 1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 | - 1729990799000 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 | - 1729990800000 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 | - 1729994399000 | 2024-10-27T01:59:59.000Z | 2024-10-27 02:59:59 | - 1729994400000 | 2024-10-27T02:00:00.000Z | 2024-10-27 03:00:00 | - Query OK, 5 row(s) in set (0.001370s) - ```` - - - 但以下查询 [2024-10-27 02:00:00.000,2024-10-27 02:57:34.999] 区间只能查询到第一个2024-10-27 02:00:00 时间点的数据: - - ```sql - taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts >= '2024-10-27 02:00:00' AND ts <= '2024-10-27 02:57:00.999'; - ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') | - ======================================================================================= - 1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 | - Query OK, 1 row(s) in set (0.004480s) - ``` - - - 以下查询 `[2024-10-27 02:00:01,2024-10-27 02:57:35]` 却能查到 3 条数据(包含一条 02:59:59 的当地时间数据): - - ```sql - taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts >= '2024-10-27 02:00:00' AND ts <= '2024-10-27 02:57:35';; - ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') | - ================================================================================================ - 2024-10-27 02:00:00.000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 | - 2024-10-27 02:59:59.000 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 | - 2024-10-27 02:00:00.000 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 | - Query OK, 3 row(s) in set (0.004428s) - ``` - -## 总结与建议 - -### 总结 - -仅针对使用当地时间带来的影响作说明,使用 UNIX 时间戳或 RFC3339 无影响。 - -- 写入: - - 无法写入夏令时跳变时不存在的时间数据。 - - 写入夏令时跳变时重复的时间是未定义行为。 -- 查询: - - 查询条件指定夏令时开始时跳变的时间,其查询结果为未定义行为。 - - 查询条件指定夏令时结束时重复的时间,其查询结果为未定义行为。 -- 显示: - - 带时区显示不受影响。 - - 显示当地时间是准确的,但夏令时结束时重复的时间会无法区分。 - - 用户应谨慎使用不带时区的时间进行展示和应用。 - -### 建议 - -为避免夏令时给查询和写入造成不必要的影响,在 TDengine 中,建议使用明确的时间偏移量进行写入和查询。 - -- 使用 UNIX 时间戳:使用 UNIX 时间戳可避免时区问题。 - - | TIMESTAMP | UTC | Europe/Berlin | Local | - | ------------: | :----------------------: | :---------------------------: | :-----------------: | - | 1711846799000 | 2024-03-31T00:59:59.000Z | 2024-03-31T01:59:59.000+01:00 | 2024-03-31 01:59:59 | - | 1711846800000 | 2024-03-31T01:00:00.000Z | 2024-03-31T03:00:00.000+02:00 | 2024-03-31 03:00:00 | - - ```sql - taos> insert into t1 values(1711846799000, 1)(1711846800000, 2); - Insert OK, 2 row(s) affected (0.001434s) - - taos> select * from t1 where ts between 1711846799000 and 1711846800000; - ts | v1 | - =============================== - 1711846799000 | 1 | - 1711846800000 | 2 | - Query OK, 2 row(s) in set (0.003503s) - ``` - -- 使用 RFC3339 时间格式:带时区偏移量的 RFC3339 时间格式可以有效避免夏令时的不确定性。 - - | TIMESTAMP | UTC | Europe/Berlin | Local | - | ------------: | :----------------------: | :---------------------------: | :-----------------: | - | 1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27T02:00:00.000+02:00 | 2024-10-27 02:00:00 | - | 1729990799000 | 2024-10-27T00:59:59.000Z | 2024-10-27T02:59:59.000+02:00 | 2024-10-27 02:59:59 | - | 1729990800000 | 2024-10-27T01:00:00.000Z | 2024-10-27T02:00:00.000+01:00 | 2024-10-27 02:00:00 | - | 1729994399000 | 2024-10-27T01:59:59.000Z | 2024-10-27T02:59:59.000+01:00 | 2024-10-27 02:59:59 | - - ```sql - taos> insert into t1 values ('2024-10-27T02:00:00.000+02:00', 1) - ('2024-10-27T02:59:59.000+02:00', 2) - ('2024-10-27T02:00:00.000+01:00', 3) - ('2024-10-27T02:59:59.000+01:00', 4); - Insert OK, 4 row(s) affected (0.001514s) - - taos> SELECT *, - to_iso8601(ts,'Z'), - to_char(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 - WHERE ts >= '2024-10-27T02:00:00.000+02:00' - AND ts <= '2024-10-27T02:59:59.000+01:00'; - ts | v1 | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') | - ===================================================================================================== - 1729987200000 | 1 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 | - 1729990799000 | 2 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 | - 1729990800000 | 3 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 | - 1729994399000 | 4 | 2024-10-27T01:59:59.000Z | 2024-10-27 02:59:59 | - Query OK, 4 row(s) in set (0.004275s) - - taos> SELECT *, - to_iso8601(ts,'Z'), - to_char(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 - WHERE ts >= '2024-10-27T02:00:00.000+02:00' - AND ts <= '2024-10-27T02:59:59.000+02:00'; - ts | v1 | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') | - ===================================================================================================== - 1729987200000 | 1 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 | - 1729990799000 | 2 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 | - Query OK, 2 row(s) in set (0.004275s) - ``` - -- 查询时注意时区设定:在查询和显示时,如果需要本地时间,务必考虑夏令时的影响。 - - taosAdapter:使用 REST API 时,支持设置 IANA 时区,结果使用 RFC3339 格式返回。 - - ```shell - $ curl -uroot:taosdata 'localhost:6041/rest/sql?tz=Europe/Berlin'\ - -d "select ts from tz1.t1" - {"code":0,"column_meta":[["ts","TIMESTAMP",8]],"data":[["1970-01-01T00:59:59.000+01:00"],["2024-03-31T01:00:00.000+01:00"],["2024-03-31T01:59:59.000+01:00"],["2024-03-31T03:00:00.000+02:00"],["2024-03-31T03:00:01.000+02:00"],["2024-10-27T02:00:00.000+02:00"],["2024-10-27T02:59:59.000+02:00"],["2024-10-27T02:00:00.000+01:00"],["2024-10-27T02:59:59.000+01:00"],["2024-10-27T03:00:00.000+01:00"]],"rows":10} - ``` - - - Explorer:使用 Explorer 页面进行 SQL 查询时,用户可配置客户端时区,以 RFC3339 格式显示。 - - ![Explorer DST](./02-dst/explorer-with-tz.png) - -## 参考文档 - -- IANA Time Zone Database: [https://www.iana.org/time-zones](https://www.iana.org/time-zones) -- RFC3339: [https://datatracker.ietf.org/doc/html/rfc3339](https://datatracker.ietf.org/doc/html/rfc3339) +--- +title: 夏令时使用指南 +description: TDengine 中关于夏令时使用问题的解释和建议 +--- + +## 背景 + +在时序数据库的使用中,有时会遇到使用夏令时的情况。我们将 TDengine 中使用夏令时的情况和问题进行分析说明,以便您在 TDengine 的使用中更加顺利。 + +## 定义 + +### 时区 + +时区是地球上使用相同标准时间的区域。由于地球的自转,为了保证各地的时间与当地的日出日落相协调,全球划分为多个时区。 + +### IANA 时区 + +IANA(Internet Assigned Numbers Authority)时区数据库,也称为 tz database,提供全球时区信息的标准参考。它是现代各类系统和软件处理时区相关操作的基础。 + +IANA 使用“区域/城市”格式(如 Europe/Berlin)来明确标识时区。 + +TDengine 在不同组件中均支持使用 IANA 时区(除 Windows taos.cfg 时区设置外)。 + +### 标准时间与当地时间 + +标准时间是根据地球上某个固定经线确定的时间。它为各个时区提供了一个统一的参考点。 + +- 格林尼治标准时间(GMT):历史上使用的参考时间,位于 0° 经线。 +- 协调世界时(UTC):现代的时间标准,类似于GMT,但更加精确。 + +标准时间与时区的关系如下: + +- 基准:标准时间(如 UTC)是时区设定的基准点。 +- 偏移量:不同时区通过相对于标准时间的偏移量来定义。例如,UTC+1 表示比 UTC 快 1 小时。 +- 区域划分:全球被划分为多个时区,每个时区使用一个或多个标准时间。 + +相对于标准时间,每个地区根据其所在时区设定其当地时间: + +- 时区偏移:当地时间等于标准时间加上该时区的偏移量。例如,UTC+2 表示比 UTC 时间快 2 小时。 +- 夏令时(DST):某些地区在特定时间段调整当地时间,例如将时钟拨快一小时。详见下节。 + +### 夏令时 + +夏令时(Daylight Saving Time,DST)是一种通过将时间提前一小时,以充分利用日光、节约能源的制度。通常在春季开始,秋季结束。夏令时的具体开始和结束时间因地区而异。以下均以柏林时间为例,对夏令时和夏令时的影响做说明。 + +![DST Berlin](./02-dst/dst-berlin.png) + +按照这个规则,可以看到: + +- 柏林当地时间 2024 年 03 月 31 日 02:00:00 到 03:00:00 (不含 03:00:00)之间的时间不存在(跳变)。 +- 柏林当地时间 2024 年 10 月 27 日 02:00:00 到 03:00:00 (不含 03:00:00)之间的时间出现了两次。 + +#### 夏令时与 IANA 时区数据库 + +- 记录规则:IANA 时区数据库详细记录了各地的夏令时规则,包括开始和结束的日期与时间。 +- 自动调整:许多操作系统和软件利用 IANA 数据库来自动处理夏令时的调整。 +- 历史变更:IANA 数据库还追踪历史上的夏令时变化,以确保准确性。 + +#### 夏令时与时间戳转换 + +- 时间戳转为当地时间是确定的。例如,1729990654 为柏林时间**夏令时** `2024-10-27 02:57:34`,1729994254 为柏林时间**冬令时** `2024-10-27 02:57:34`(这两个本地时间除时间偏移量外是一样的)。 +- 不指定时间偏移量时,当地时间转为时间戳是不确定的。夏令时跳过的时间不存在会造成无法转换成时间戳,如 **柏林时间** `2024-03-31 02:34:56` 不存在,所以无法转换为时间戳。夏令时结束时重复导致无法确定是哪个时间戳,如 `2024-10-27 02:57:34` 不指定时间偏移量无法确定 是 1729990654 还是 1729994254。指定时间偏移量才能确定时间戳,如 `2024-10-27 02:57:34 CEST(+02:00) `,指定了夏令时 `2024-10-27 02:57:34` 时间戳 1729990654 。 + +### RFC3339 时间格式 + +RFC 3339 是一种互联网时间格式标准,用于表示日期和时间。它基于 ISO 8601 标准,但更具体地规定了一些格式细节。 + +其格式如下: + +- 基本格式:`YYYY-MM-DDTHH:MM:SSZ` +- 时区表示: + - Z 表示协调世界时(UTC)。 + - 偏移量格式,例如 +02:00,表示与 UTC 的时差。 + +通过明确的时区偏移,RFC 3339 格式可以在全球范围内准确地解析和比较时间。 + +RFC 3339 的优势包括: + +- 标准化:提供统一的格式,方便跨系统数据交换。 +- 清晰性:明确时区信息,避免时间误解。 + +TDengine 在 REST API 和 Explorer UI 中,均使用 RFC3339 格式进行展示。在 SQL 语句中,可使用 RFC3339 格式写入时间戳数据: + +```sql +insert into t1 values('2024-10-27T01:59:59.000Z', 0); +select * from t1 where ts >= '2024-10-27T01:59:59.000Z'; +``` + +### 未定义行为 + +未定义行为(Undefined Behavior)是指特定代码或操作没有明确规定的结果,也不会对该结果作出兼容性的保证,TDengine 可能在某个版本后对当前的行为作出修改而不会通知用户。所以,在 TDengine 中,用户不可依赖当前未定义的行为进行判断或应用。 + +## 夏令时在 TDengine 中的写入与查询 + +我们使用下表来展示夏令时在写入和查询中的影响。 + +![DST Table](./02-dst/dst-table.png) + +### 表格说明 + +- **TIMESTAMP**:TDengine 中使用 64位整数来存储原始时间戳。 +- **UTC**:时间戳对应的 UTC 时间表示。 +- **Europe/Berlin**:表示时区 Europe/Berlin 对应的 RFC3339 格式时间。 +- **Local**:表示时区 Europe/Berlin 对应的当地时间(不含时区)。 + +### 表格分析 + +- 在**夏令时开始**(柏林时间 3 月 31 日 02:00)时,时间直接从 02:00 跳到 03:00(往后跳一小时)。 + - 浅绿色是夏令时开始前一小时的时间戳; + - 深绿色是夏令时开始后一小时的时间戳; + - 红色为 TDengine 数据库中插入了不存在的当地时间: + - 使用 SQL `INSERT INTO t1 values('2024-03-31 02:59:59',..)` 插入 `2024-03-31 02:00:00` 到 `2024-03-31 02:59:59` 的数据会被自动调整为 -1000(在 TDengine 中属于未定义行为,当前该值与数据库精度 precision 有关,毫秒数据库为 -1000,微秒数据库为 -1000000,纳秒数据库为 -1000000000),因为那一时刻在本地时间中不存在; +- 在**夏令时结束**(柏林时间 10 月 27 日 03:00)时,时间从 03:00 跳到 02:00 (往前跳一小时)。 + - 浅蓝色表示时钟跳变前一小时的时间戳; + - 深蓝色表示时钟跳变后一小时内的时间戳,其无时区的当地时间与上一小时一致。 + - 紫色表示时钟跳变一小时后的时间戳; +- **当地时间变化**:可见,由于夏令时的调整而导致了当地时间的变化,可能导致某些时间段出现重复或缺失。 +- **UTC 时间不变**:UTC 时间保持不变,确保了时间的一致性和顺序性。 +- **RFC3339**:RFC3339 格式时间显示了时间偏移量的变化,在夏令时开始后变为 +02:00,结束后变为 +01:00 。 +- **条件查询**: + - **夏令时开始**时,跳过的时间(`[03-31 02:00:00,03-31 03:00:00)`)不存在,所以在使用该时间进行查询时,行为不确定:`SELECT ts FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 02:59:59'`(不存在的本地时间戳被转换为 `-1000`): + + ```sql + taos> SELECT ts FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 02:59:59'; + ts | + ================= + -1000 | + Query OK, 1 row(s) in set (0.003635s) + ``` + + 当不存在的时间戳与存在的时间戳共同使用时,其结果同样不符合预期,以下为起始本地时间不存在: + + ```sql + taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 03:59:59'; + ts | to_iso8601(ts,'Z') | + ================================================== + -1000 | 1969-12-31T23:59:59.000Z | + 1711843200000 | 2024-03-31T00:00:00.000Z | + 1711846799000 | 2024-03-31T00:59:59.000Z | + 1711846800000 | 2024-03-31T01:00:00.000Z | + 1711846801000 | 2024-03-31T01:00:01.000Z | + Query OK, 5 row(s) in set (0.003339s) + ``` + + 以下语句中第一个 SQL 查询截止时间不存在,第二个截止时间存在,第一个 SQL 查询结果不符合预期: + + ```sql + taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 01:00:00' AND '2024-03-31 02:00:00'; + Query OK, 0 row(s) in set (0.000930s) + + taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 01:00:00' AND '2024-03-31 01:59:59'; + ts | to_iso8601(ts,'Z') | + ================================================== + 1711843200000 | 2024-03-31T00:00:00.000Z | + 1711846799000 | 2024-03-31T00:59:59.000Z | + Query OK, 2 row(s) in set (0.001227s) + ``` + + - 夏令时结束时,跳变的时间(`[10-27 02:00:00,10-27 03:00:00)` 不包含 `10-27 03:00:00`)重复了两次,TDengine 在使用该区间内的时间戳进行查询时,也属于未定义行为。 + - 查询 `[2024-10-27 02:00:00, 2024-10-27 03:00:00]` 之间的数据结果,包含了两次重复的时间戳和 `2024-10-27 03:00:00` 这个时间点的数据: + + ```sql + taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts BETWEEN '2024-10-27 02:00:00' AND '2024-10-27 03:00:00'; + ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') | + ======================================================================================= + 1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 | + 1729990799000 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 | + 1729990800000 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 | + 1729994399000 | 2024-10-27T01:59:59.000Z | 2024-10-27 02:59:59 | + 1729994400000 | 2024-10-27T02:00:00.000Z | 2024-10-27 03:00:00 | + Query OK, 5 row(s) in set (0.001370s) + ```` + + - 但以下查询 [2024-10-27 02:00:00.000,2024-10-27 02:57:34.999] 区间只能查询到第一个2024-10-27 02:00:00 时间点的数据: + + ```sql + taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts >= '2024-10-27 02:00:00' AND ts <= '2024-10-27 02:57:00.999'; + ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') | + ======================================================================================= + 1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 | + Query OK, 1 row(s) in set (0.004480s) + ``` + + - 以下查询 `[2024-10-27 02:00:01,2024-10-27 02:57:35]` 却能查到 3 条数据(包含一条 02:59:59 的当地时间数据): + + ```sql + taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts >= '2024-10-27 02:00:00' AND ts <= '2024-10-27 02:57:35';; + ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') | + ================================================================================================ + 2024-10-27 02:00:00.000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 | + 2024-10-27 02:59:59.000 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 | + 2024-10-27 02:00:00.000 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 | + Query OK, 3 row(s) in set (0.004428s) + ``` + +## 总结与建议 + +### 总结 + +仅针对使用当地时间带来的影响作说明,使用 UNIX 时间戳或 RFC3339 无影响。 + +- 写入: + - 无法写入夏令时跳变时不存在的时间数据。 + - 写入夏令时跳变时重复的时间是未定义行为。 +- 查询: + - 查询条件指定夏令时开始时跳变的时间,其查询结果为未定义行为。 + - 查询条件指定夏令时结束时重复的时间,其查询结果为未定义行为。 +- 显示: + - 带时区显示不受影响。 + - 显示当地时间是准确的,但夏令时结束时重复的时间会无法区分。 + - 用户应谨慎使用不带时区的时间进行展示和应用。 + +### 建议 + +为避免夏令时给查询和写入造成不必要的影响,在 TDengine 中,建议使用明确的时间偏移量进行写入和查询。 + +- 使用 UNIX 时间戳:使用 UNIX 时间戳可避免时区问题。 + + | TIMESTAMP | UTC | Europe/Berlin | Local | + | ------------: | :----------------------: | :---------------------------: | :-----------------: | + | 1711846799000 | 2024-03-31T00:59:59.000Z | 2024-03-31T01:59:59.000+01:00 | 2024-03-31 01:59:59 | + | 1711846800000 | 2024-03-31T01:00:00.000Z | 2024-03-31T03:00:00.000+02:00 | 2024-03-31 03:00:00 | + + ```sql + taos> insert into t1 values(1711846799000, 1)(1711846800000, 2); + Insert OK, 2 row(s) affected (0.001434s) + + taos> select * from t1 where ts between 1711846799000 and 1711846800000; + ts | v1 | + =============================== + 1711846799000 | 1 | + 1711846800000 | 2 | + Query OK, 2 row(s) in set (0.003503s) + ``` + +- 使用 RFC3339 时间格式:带时区偏移量的 RFC3339 时间格式可以有效避免夏令时的不确定性。 + + | TIMESTAMP | UTC | Europe/Berlin | Local | + | ------------: | :----------------------: | :---------------------------: | :-----------------: | + | 1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27T02:00:00.000+02:00 | 2024-10-27 02:00:00 | + | 1729990799000 | 2024-10-27T00:59:59.000Z | 2024-10-27T02:59:59.000+02:00 | 2024-10-27 02:59:59 | + | 1729990800000 | 2024-10-27T01:00:00.000Z | 2024-10-27T02:00:00.000+01:00 | 2024-10-27 02:00:00 | + | 1729994399000 | 2024-10-27T01:59:59.000Z | 2024-10-27T02:59:59.000+01:00 | 2024-10-27 02:59:59 | + + ```sql + taos> insert into t1 values ('2024-10-27T02:00:00.000+02:00', 1) + ('2024-10-27T02:59:59.000+02:00', 2) + ('2024-10-27T02:00:00.000+01:00', 3) + ('2024-10-27T02:59:59.000+01:00', 4); + Insert OK, 4 row(s) affected (0.001514s) + + taos> SELECT *, + to_iso8601(ts,'Z'), + to_char(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 + WHERE ts >= '2024-10-27T02:00:00.000+02:00' + AND ts <= '2024-10-27T02:59:59.000+01:00'; + ts | v1 | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') | + ===================================================================================================== + 1729987200000 | 1 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 | + 1729990799000 | 2 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 | + 1729990800000 | 3 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 | + 1729994399000 | 4 | 2024-10-27T01:59:59.000Z | 2024-10-27 02:59:59 | + Query OK, 4 row(s) in set (0.004275s) + + taos> SELECT *, + to_iso8601(ts,'Z'), + to_char(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 + WHERE ts >= '2024-10-27T02:00:00.000+02:00' + AND ts <= '2024-10-27T02:59:59.000+02:00'; + ts | v1 | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') | + ===================================================================================================== + 1729987200000 | 1 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 | + 1729990799000 | 2 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 | + Query OK, 2 row(s) in set (0.004275s) + ``` + +- 查询时注意时区设定:在查询和显示时,如果需要本地时间,务必考虑夏令时的影响。 + - taosAdapter:使用 REST API 时,支持设置 IANA 时区,结果使用 RFC3339 格式返回。 + + ```shell + $ curl -uroot:taosdata 'localhost:6041/rest/sql?tz=Europe/Berlin'\ + -d "select ts from tz1.t1" + {"code":0,"column_meta":[["ts","TIMESTAMP",8]],"data":[["1970-01-01T00:59:59.000+01:00"],["2024-03-31T01:00:00.000+01:00"],["2024-03-31T01:59:59.000+01:00"],["2024-03-31T03:00:00.000+02:00"],["2024-03-31T03:00:01.000+02:00"],["2024-10-27T02:00:00.000+02:00"],["2024-10-27T02:59:59.000+02:00"],["2024-10-27T02:00:00.000+01:00"],["2024-10-27T02:59:59.000+01:00"],["2024-10-27T03:00:00.000+01:00"]],"rows":10} + ``` + + - Explorer:使用 Explorer 页面进行 SQL 查询时,用户可配置客户端时区,以 RFC3339 格式显示。 + + ![Explorer DST](./02-dst/explorer-with-tz.png) + +## 参考文档 + +- IANA Time Zone Database: [https://www.iana.org/time-zones](https://www.iana.org/time-zones) +- RFC3339: [https://datatracker.ietf.org/doc/html/rfc3339](https://datatracker.ietf.org/doc/html/rfc3339) diff --git a/examples/c/CMakeLists.txt b/examples/c/CMakeLists.txt index e3c992f53f..94297272f7 100644 --- a/examples/c/CMakeLists.txt +++ b/examples/c/CMakeLists.txt @@ -42,27 +42,27 @@ IF(TD_LINUX) ) target_link_libraries(tmq - ${TAOS_LIB} + ${TAOS_NATIVE_LIB} ) target_link_libraries(stream_demo - ${TAOS_LIB} + ${TAOS_NATIVE_LIB} ) target_link_libraries(schemaless - ${TAOS_LIB} + ${TAOS_NATIVE_LIB} ) target_link_libraries(prepare - ${TAOS_LIB} + ${TAOS_NATIVE_LIB} ) target_link_libraries(demo - ${TAOS_LIB} + ${TAOS_NATIVE_LIB} ) target_link_libraries(asyncdemo - ${TAOS_LIB} + ${TAOS_NATIVE_LIB} ) SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq) diff --git a/include/client/taos.h b/include/client/taos.h index 919ace9fc4..1120bd1d97 100644 --- a/include/client/taos.h +++ b/include/client/taos.h @@ -62,6 +62,7 @@ typedef enum { TSDB_OPTION_CONFIGDIR, TSDB_OPTION_SHELL_ACTIVITY_TIMER, TSDB_OPTION_USE_ADAPTER, + TSDB_OPTION_DRIVER, TSDB_MAX_OPTIONS } TSDB_OPTION; @@ -155,11 +156,14 @@ typedef enum { TAOS_NOTIFY_USER_DROPPED = 2, } TAOS_NOTIFY_TYPE; +/* -- implemented in the native interface, for internal component only, the API may change -- */ #define RET_MSG_LENGTH 1024 typedef struct setConfRet { SET_CONF_RET_CODE retCode; char retMsg[RET_MSG_LENGTH]; } setConfRet; +DLL_EXPORT setConfRet taos_set_config(const char *config); // implemented in the native interface +/* -- end -- */ typedef struct TAOS_VGROUP_HASH_INFO { int32_t vgId; @@ -182,14 +186,13 @@ typedef struct TAOS_STMT_OPTIONS { bool singleTableBindOnce; } TAOS_STMT_OPTIONS; -DLL_EXPORT void taos_cleanup(void); -DLL_EXPORT int taos_options(TSDB_OPTION option, const void *arg, ...); -DLL_EXPORT int taos_options_connection(TAOS *taos, TSDB_OPTION_CONNECTION option, const void *arg, ...); -DLL_EXPORT setConfRet taos_set_config(const char *config); -DLL_EXPORT int taos_init(void); -DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port); -DLL_EXPORT TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port); -DLL_EXPORT void taos_close(TAOS *taos); +DLL_EXPORT int taos_init(void); +DLL_EXPORT void taos_cleanup(void); +DLL_EXPORT int taos_options(TSDB_OPTION option, const void *arg, ...); +DLL_EXPORT int taos_options_connection(TAOS *taos, TSDB_OPTION_CONNECTION option, const void *arg, ...); +DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port); +DLL_EXPORT TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port); +DLL_EXPORT void taos_close(TAOS *taos); DLL_EXPORT const char *taos_data_type(int type); @@ -220,6 +223,7 @@ DLL_EXPORT char *taos_stmt_errstr(TAOS_STMT *stmt); DLL_EXPORT int taos_stmt_affected_rows(TAOS_STMT *stmt); DLL_EXPORT int taos_stmt_affected_rows_once(TAOS_STMT *stmt); +/* -- implemented in the native interface, for internal component only, the API may change -- */ typedef void TAOS_STMT2; typedef struct TAOS_STMT2_OPTION { @@ -257,6 +261,7 @@ DLL_EXPORT int taos_stmt2_get_fields(TAOS_STMT2 *stmt, int *count, TAOS_FIELD_AL DLL_EXPORT void taos_stmt2_free_fields(TAOS_STMT2 *stmt, TAOS_FIELD_ALL *fields); DLL_EXPORT TAOS_RES *taos_stmt2_result(TAOS_STMT2 *stmt); DLL_EXPORT char *taos_stmt2_error(TAOS_STMT2 *stmt); +/* -- end -- */ DLL_EXPORT TAOS_RES *taos_query(TAOS *taos, const char *sql); DLL_EXPORT TAOS_RES *taos_query_with_reqid(TAOS *taos, const char *sql, int64_t reqId); @@ -313,9 +318,11 @@ DLL_EXPORT void taos_set_hb_quit(int8_t quitByKill); DLL_EXPORT int taos_set_notify_cb(TAOS *taos, __taos_notify_fn_t fp, void *param, int type); +/* -- implemented in the native interface, for internal component only, the API may change -- */ typedef void (*__taos_async_whitelist_fn_t)(void *param, int code, TAOS *taos, int numOfWhiteLists, uint64_t *pWhiteLists); DLL_EXPORT void taos_fetch_whitelist_a(TAOS *taos, __taos_async_whitelist_fn_t fp, void *param); +/* ---- end ---- */ typedef enum { TAOS_CONN_MODE_BI = 0, @@ -414,7 +421,7 @@ DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res); DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES *res); DLL_EXPORT const char *tmq_err2str(int32_t code); -/* ------------------------------ TAOSX INTERFACE -----------------------------------*/ +/* -- implemented in the native interface, for internal component(TAOSX) only, the API may change -- */ typedef struct tmq_raw_data { void *raw; uint32_t raw_len; @@ -435,8 +442,9 @@ DLL_EXPORT void tmq_free_raw(tmq_raw_data raw); // Returning null means error. Returned result need to be freed by tmq_free_json_meta DLL_EXPORT char *tmq_get_json_meta(TAOS_RES *res); DLL_EXPORT void tmq_free_json_meta(char *jsonMeta); -/* ---------------------------- TAOSX END -------------------------------- */ +/* ---- end ---- */ +/* -- implemented in the native interface, for internal component only, the API may change -- */ typedef enum { TSDB_SRV_STATUS_UNAVAILABLE = 0, TSDB_SRV_STATUS_NETWORK_OK = 1, @@ -446,7 +454,10 @@ typedef enum { } TSDB_SERVER_STATUS; DLL_EXPORT TSDB_SERVER_STATUS taos_check_server_status(const char *fqdn, int port, char *details, int maxlen); +DLL_EXPORT void taos_write_crashinfo(int signum, void *sigInfo, void *context); DLL_EXPORT char *getBuildInfo(); +/* ---- end ---- */ + #ifdef __cplusplus } #endif diff --git a/include/common/tanalytics.h b/include/common/tanalytics.h index 0fb1d543f7..976e89beb3 100644 --- a/include/common/tanalytics.h +++ b/include/common/tanalytics.h @@ -29,9 +29,10 @@ extern "C" { #define ANALY_FORECAST_DEFAULT_CONF 95 #define ANALY_FORECAST_DEFAULT_WNCHECK 1 #define ANALY_FORECAST_MAX_ROWS 40000 +#define ANALY_FORECAST_RES_MAX_ROWS 1024 #define ANALY_ANOMALY_WINDOW_MAX_ROWS 40000 -#define ANALY_DEFAULT_TIMEOUT 60 -#define ANALY_MAX_TIMEOUT 600 +#define ANALY_DEFAULT_TIMEOUT 60 +#define ANALY_MAX_TIMEOUT 600 typedef struct { EAnalAlgoType type; diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 9fd0ad057c..c295c40c1e 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -1430,6 +1430,7 @@ typedef struct { int64_t watermark1; int64_t watermark2; int32_t ttl; + int32_t keep; SArray* pFuncs; int32_t commentLen; char* pComment; diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index 4d35c8db7d..45737b9273 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -102,6 +102,8 @@ int32_t qSetStreamOpOpen(qTaskInfo_t tinfo); int32_t qSetStreamNotifyInfo(qTaskInfo_t tinfo, int32_t eventTypes, const SSchemaWrapper* pSchemaWrapper, const char* stbFullName, bool newSubTableRule, STaskNotifyEventStat* pNotifyEventStat); +void qSetStreamMergeInfo(qTaskInfo_t tinfo, SArray* pVTables); + /** * Set multiple input data blocks for the stream scan. * @param tinfo diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 328f7f6a8f..4d05bb8c80 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -263,6 +263,7 @@ typedef struct SDynQueryCtrlStbJoin { typedef struct SDynQueryCtrlVtbScan { bool scanAllCols; + char dbName[TSDB_DB_NAME_LEN]; uint64_t suid; SVgroupsInfo* pVgroupList; } SDynQueryCtrlVtbScan; @@ -666,6 +667,7 @@ typedef struct SStbJoinDynCtrlBasic { typedef struct SVtbScanDynCtrlBasic { bool scanAllCols; + char dbName[TSDB_DB_NAME_LEN]; uint64_t suid; int32_t accountId; SEpSet mgmtEpSet; diff --git a/include/os/os.h b/include/os/os.h index 94edcdad75..98ff17359a 100644 --- a/include/os/os.h +++ b/include/os/os.h @@ -54,6 +54,7 @@ extern "C" { #include #if defined(DARWIN) +#include #else #if !defined(TD_ASTRA) #include diff --git a/include/os/osDir.h b/include/os/osDir.h index 6d32ab36ce..6ebfad72be 100644 --- a/include/os/osDir.h +++ b/include/os/osDir.h @@ -112,7 +112,9 @@ bool taosDirEntryIsDir(TdDirEntryPtr pDirEntry); char *taosGetDirEntryName(TdDirEntryPtr pDirEntry); int32_t taosCloseDir(TdDirPtr *ppDir); -int taosGetDirSize(const char *path, int64_t *size); +int32_t taosAppPath(char *path, int32_t maxLen); +int32_t taosGetDirSize(const char *path, int64_t *size); + #ifdef __cplusplus } #endif diff --git a/include/os/osSystem.h b/include/os/osSystem.h index 06f23eec0f..694bbf4085 100644 --- a/include/os/osSystem.h +++ b/include/os/osSystem.h @@ -46,10 +46,12 @@ int32_t taosEOFCmd(TdCmdPtr pCmd); void taosCloseCmd(TdCmdPtr *ppCmd); -void *taosLoadDll(const char *filename); +void *taosLoadDll(const char *fileName); void taosCloseDll(void *handle); +void *taosLoadDllFunc(void *handle, const char *funcName); + int32_t taosSetConsoleEcho(bool on); int32_t taosSetTerminalMode(); diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 7f49dfb633..9cd21ad577 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -117,6 +117,8 @@ int32_t taosGetErrSize(); #define TSDB_CODE_REF_INVALID_ID TAOS_DEF_ERROR_CODE(0, 0x0108) // internal #define TSDB_CODE_REF_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0109) // internal #define TSDB_CODE_REF_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x010A) // internal +#define TSDB_CODE_DLL_NOT_LOAD TAOS_DEF_ERROR_CODE(0, 0x010B) +#define TSDB_CODE_DLL_FUNC_NOT_LOAD TAOS_DEF_ERROR_CODE(0, 0x010C) #define TSDB_CODE_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x0110) // #define TSDB_CODE_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0111) // internal @@ -514,6 +516,7 @@ int32_t taosGetErrSize(); #define TSDB_CODE_ANA_ANODE_RETURN_ERROR TAOS_DEF_ERROR_CODE(0, 0x0445) #define TSDB_CODE_ANA_ANODE_TOO_MANY_ROWS TAOS_DEF_ERROR_CODE(0, 0x0446) #define TSDB_CODE_ANA_WN_DATA TAOS_DEF_ERROR_CODE(0, 0x0447) +#define TSDB_CODE_ANA_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x0448) // mnode-sma #define TSDB_CODE_MND_SMA_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0480) @@ -1071,6 +1074,9 @@ int32_t taosGetErrSize(); #define TSDB_CODE_VTABLE_PRIMTS_HAS_REF TAOS_DEF_ERROR_CODE(0, 0x6202) #define TSDB_CODE_VTABLE_NOT_VIRTUAL_SUPER_TABLE TAOS_DEF_ERROR_CODE(0, 0x6203) #define TSDB_CODE_VTABLE_NOT_SUPPORT_DATA_TYPE TAOS_DEF_ERROR_CODE(0, 0x6204) +#define TSDB_CODE_VTABLE_NOT_SUPPORT_STMT TAOS_DEF_ERROR_CODE(0, 0x6205) +#define TSDB_CODE_VTABLE_NOT_SUPPORT_TOPIC TAOS_DEF_ERROR_CODE(0, 0x6206) +#define TSDB_CODE_VTABLE_NOT_SUPPORT_CROSS_DB TAOS_DEF_ERROR_CODE(0, 0x6207) #ifdef __cplusplus } #endif diff --git a/include/util/tdef.h b/include/util/tdef.h index 63cd0b186a..328dea9d2b 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -606,7 +606,8 @@ typedef enum ELogicConditionType { #define TFS_MAX_LEVEL (TFS_MAX_TIERS - 1) #define TFS_PRIMARY_LEVEL 0 #define TFS_PRIMARY_ID 0 -#define TFS_MIN_DISK_FREE_SIZE 50 * 1024 * 1024 +#define TFS_MIN_DISK_FREE_SIZE 50 * 1024 * 1024 // 50MB +#define TFS_MIN_DISK_FREE_SIZE_MAX (2ULL * 1024 * 1024 * 1024 * 1024) // 2TB enum { TRANS_STAT_INIT = 0, TRANS_STAT_EXECUTING, TRANS_STAT_EXECUTED, TRANS_STAT_ROLLBACKING, TRANS_STAT_ROLLBACKED }; enum { TRANS_OPER_INIT = 0, TRANS_OPER_EXECUTE, TRANS_OPER_ROLLBACK }; diff --git a/packaging/check_package.sh b/packaging/check_package.sh index 5c3a2f9267..2179b64f3b 100644 --- a/packaging/check_package.sh +++ b/packaging/check_package.sh @@ -152,10 +152,14 @@ function check_lib_path() { # check all links check_link ${lib_link_dir}/libtaos.so check_link ${lib_link_dir}/libtaos.so.1 + check_link ${lib_link_dir}/libtaosnative.so + check_link ${lib_link_dir}/libtaosnative.so.1 if [[ -d ${lib64_link_dir} ]]; then check_link ${lib64_link_dir}/libtaos.so check_link ${lib64_link_dir}/libtaos.so.1 + check_link ${lib64_link_dir}/libtaosnative.so + check_link ${lib64_link_dir}/libtaosnative.so.1 fi echo -e "Check lib path:\033[32mOK\033[0m!" } diff --git a/packaging/deb/DEBIAN/preinst b/packaging/deb/DEBIAN/preinst index 904a946e20..b223dbf3d8 100644 --- a/packaging/deb/DEBIAN/preinst +++ b/packaging/deb/DEBIAN/preinst @@ -80,4 +80,5 @@ fi # there can not libtaos.so*, otherwise ln -s error ${csudo}rm -f ${install_main_dir}/driver/libtaos.* || : +${csudo}rm -f ${install_main_dir}/driver/libtaosnative.* || : [ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}rm -f ${install_main_dir}/driver/libtaosws.so || : diff --git a/packaging/deb/DEBIAN/prerm b/packaging/deb/DEBIAN/prerm index cdf68e0d78..0c9eaf2860 100644 --- a/packaging/deb/DEBIAN/prerm +++ b/packaging/deb/DEBIAN/prerm @@ -44,6 +44,8 @@ else ${csudo}rm -f ${inc_link_dir}/taosws.h || : ${csudo}rm -f ${lib_link_dir}/libtaos.* || : ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : + ${csudo}rm -f ${lib_link_dir}/libtaosnative.* || : + ${csudo}rm -f ${lib64_link_dir}/libtaosnative.* || : ${csudo}rm -f ${lib_link_dir}/libtaosws.so || : ${csudo}rm -f ${lib64_link_dir}/libtaosws.so || : diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh index d4616f29ff..d7bfe533e9 100755 --- a/packaging/deb/makedeb.sh +++ b/packaging/deb/makedeb.sh @@ -31,6 +31,7 @@ mkdir -p ${pkg_dir} cd ${pkg_dir} libfile="libtaos.so.${tdengine_ver}" +nativelibfile="libtaosnative.so.${tdengine_ver}" wslibfile="libtaosws.so" # create install dir @@ -120,11 +121,12 @@ fi cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver +cp ${compile_dir}/build/lib/${nativelibfile} ${pkg_dir}${install_home_path}/driver [ -f ${compile_dir}/build/lib/${wslibfile} ] && cp ${compile_dir}/build/lib/${wslibfile} ${pkg_dir}${install_home_path}/driver ||: cp ${compile_dir}/../include/client/taos.h ${pkg_dir}${install_home_path}/include cp ${compile_dir}/../include/common/taosdef.h ${pkg_dir}${install_home_path}/include cp ${compile_dir}/../include/util/taoserror.h ${pkg_dir}${install_home_path}/include -cp ${compile_dir}/../include/util/tdef.h ${pkg_dir}${install_home_path}/include +cp ${compile_dir}/../include/util/tdef.h ${pkg_dir}${install_home_path}/include cp ${compile_dir}/../include/libs/function/taosudf.h ${pkg_dir}${install_home_path}/include [ -f ${compile_dir}/build/include/taosws.h ] && cp ${compile_dir}/build/include/taosws.h ${pkg_dir}${install_home_path}/include ||: cp -r ${top_dir}/examples/* ${pkg_dir}${install_home_path}/examples diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec index ff576949c7..2801794623 100644 --- a/packaging/rpm/tdengine.spec +++ b/packaging/rpm/tdengine.spec @@ -44,6 +44,7 @@ echo version: %{_version} echo buildroot: %{buildroot} libfile="libtaos.so.%{_version}" +nativelibfile="libtaosnative.so.%{_version}" wslibfile="libtaosws.so" # create install path, and cp file @@ -112,11 +113,12 @@ if [ -f %{_compiledir}/build/bin/taosadapter ]; then cp %{_compiledir}/build/bin/taosadapter %{buildroot}%{homepath}/bin fi cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver +cp %{_compiledir}/build/lib/${nativelibfile} %{buildroot}%{homepath}/driver [ -f %{_compiledir}/build/lib/${wslibfile} ] && cp %{_compiledir}/build/lib/${wslibfile} %{buildroot}%{homepath}/driver ||: cp %{_compiledir}/../include/client/taos.h %{buildroot}%{homepath}/include cp %{_compiledir}/../include/common/taosdef.h %{buildroot}%{homepath}/include cp %{_compiledir}/../include/util/taoserror.h %{buildroot}%{homepath}/include -cp %{_compiledir}/../include/util/tdef.h %{buildroot}%{homepath}/include +cp %{_compiledir}/../include/util/tdef.h %{buildroot}%{homepath}/include cp %{_compiledir}/../include/libs/function/taosudf.h %{buildroot}%{homepath}/include [ -f %{_compiledir}/build/include/taosws.h ] && cp %{_compiledir}/build/include/taosws.h %{buildroot}%{homepath}/include ||: #cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector @@ -246,6 +248,8 @@ if [ $1 -eq 0 ];then ${csudo}rm -f ${inc_link_dir}/taosudf.h || : ${csudo}rm -f ${inc_link_dir}/taows.h || : ${csudo}rm -f ${lib_link_dir}/libtaos.so || : + ${csudo}rm -f ${lib_link_dir}/libtaosnative.so || : + ${csudo}rm -f ${lib64_link_dir}/libtaosnative.so || : ${csudo}rm -f ${lib_link_dir}/libtaosws.so || : ${csudo}rm -f ${lib64_link_dir}/libtaosws.so || : diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index d844ce876e..8fd483b6a7 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -271,17 +271,23 @@ function install_lib() { # Remove links ${csudo}rm -f ${lib_link_dir}/libtaos.* || : ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : + ${csudo}rm -f ${lib_link_dir}/libtaosnative.* || : + ${csudo}rm -f ${lib64_link_dir}/libtaosnative.* || : #${csudo}rm -rf ${v15_java_app_dir} || : ${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/* ${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 ${csudo}ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + ${csudo}ln -sf ${install_main_dir}/driver/libtaosnative.* ${lib_link_dir}/libtaosnative.so.1 + ${csudo}ln -sf ${lib_link_dir}/libtaosnative.so.1 ${lib_link_dir}/libtaosnative.so [ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/driver/libtaosws.so ${lib_link_dir}/libtaosws.so || : if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then ${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : ${csudo}ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + ${csudo}ln -sf ${install_main_dir}/driver/libtaosnative.* ${lib64_link_dir}/libtaosnative.so.1 || : + ${csudo}ln -sf ${lib64_link_dir}/libtaosnative.so.1 ${lib64_link_dir}/libtaosnative.so || : [ -f ${install_main_dir}/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/libtaosws.so ${lib64_link_dir}/libtaosws.so || : fi diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh index 532838fd2b..a3fe210e8e 100755 --- a/packaging/tools/install_client.sh +++ b/packaging/tools/install_client.sh @@ -134,6 +134,7 @@ function install_bin() { function clean_lib() { sudo rm -f /usr/lib/libtaos.* || : + sudo rm -f /usr/lib/libtaosnative.* || : [ -f /usr/lib/libtaosws.so ] && sudo rm -f /usr/lib/libtaosws.so || : [ -f /usr/lib64/libtaosws.so ] && sudo rm -f /usr/lib64/libtaosws.so || : sudo rm -rf ${lib_dir} || : @@ -143,6 +144,8 @@ function install_lib() { # Remove links ${csudo}rm -f ${lib_link_dir}/libtaos.* || : ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : + ${csudo}rm -f ${lib_link_dir}/libtaosnative.* || : + ${csudo}rm -f ${lib64_link_dir}/libtaosnative.* || : [ -f ${lib_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.so || : [ -f ${lib64_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.so || : @@ -154,18 +157,24 @@ function install_lib() { if [ "$osType" != "Darwin" ]; then ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 ${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + ${csudo}ln -s ${install_main_dir}/driver/libtaosnative.* ${lib_link_dir}/libtaosnative.so.1 + ${csudo}ln -s ${lib_link_dir}/libtaosnative.so.1 ${lib_link_dir}/libtaosnative.so [ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/driver/libtaosws.so ${lib_link_dir}/libtaosws.so ||: if [ -d "${lib64_link_dir}" ]; then ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : ${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + ${csudo}ln -s ${install_main_dir}/driver/libtaosnative.* ${lib64_link_dir}/libtaosnative.so.1 || : + ${csudo}ln -s ${lib64_link_dir}/libtaosnative.so.1 ${lib64_link_dir}/libtaosnative.so || : [ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/driver/libtaosws.so ${lib64_link_dir}/libtaosws.so || : fi else ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib ${csudo}ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib + ${csudo}ln -s ${install_main_dir}/driver/libtaosnative.* ${lib_link_dir}/libtaosnative.1.dylib + ${csudo}ln -s ${lib_link_dir}/libtaosnative.1.dylib ${lib_link_dir}/libtaosnative.dylib [ -f ${install_main_dir}/driver/libtaosws.dylib ] && ${csudo}ln -sf ${install_main_dir}/driver/libtaosws.dylib ${lib_link_dir}/libtaosws.dylib ||: fi @@ -178,7 +187,7 @@ function install_lib() { } function install_header() { - ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/tdef.h ${inc_link_dir}/taoserror.h ${inc_link_dir}/taosudf.h || : + ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosws.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/tdef.h ${inc_link_dir}/taoserror.h ${inc_link_dir}/taosudf.h || : ${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/* ${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h ${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h diff --git a/packaging/tools/make_install.bat b/packaging/tools/make_install.bat index e5b29b9557..7e1264d216 100644 --- a/packaging/tools/make_install.bat +++ b/packaging/tools/make_install.bat @@ -66,6 +66,9 @@ copy %source_dir%\\include\\libs\\function\\taosudf.h %target_dir%\\include > nu copy %binary_dir%\\build\\lib\\taos.lib %target_dir%\\driver > nul copy %binary_dir%\\build\\lib\\taos_static.lib %target_dir%\\driver > nul copy %binary_dir%\\build\\lib\\taos.dll %target_dir%\\driver > nul +copy %binary_dir%\\build\\lib\\taosnative.lib %target_dir%\\driver > nul +copy %binary_dir%\\build\\lib\\taosnative_static.lib %target_dir%\\driver > nul +copy %binary_dir%\\build\\lib\\taosnative.dll %target_dir%\\driver > nul copy %binary_dir%\\build\\bin\\taos.exe %target_dir% > nul if exist %binary_dir%\\build\\bin\\taosBenchmark.exe ( copy %binary_dir%\\build\\bin\\taosBenchmark.exe %target_dir% > nul @@ -149,12 +152,14 @@ call :check_svc taoskeeper if exist c:\\windows\\sysnative ( echo x86 copy /y C:\\TDengine\\driver\\taos.dll %windir%\\sysnative > nul + copy /y C:\\TDengine\\driver\\taosnative.dll %windir%\\sysnative > nul if exist C:\\TDengine\\driver\\taosws.dll ( copy /y C:\\TDengine\\driver\\taosws.dll %windir%\\sysnative > nul ) ) else ( echo x64 copy /y C:\\TDengine\\driver\\taos.dll C:\\Windows\\System32 > nul + copy /y C:\\TDengine\\driver\\taosnative.dll C:\\Windows\\System32 > nul if exist C:\\TDengine\\driver\\taosws.dll ( copy /y C:\\TDengine\\driver\\taosws.dll C:\\Windows\\System32 > nul ) diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index f44a46d862..4a694f9841 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -313,9 +313,11 @@ function install_avro() { function install_lib() { # Remove links ${csudo}rm -f ${lib_link_dir}/libtaos.* || : + ${csudo}rm -f ${lib_link_dir}/libtaosnative.* || : [ -f ${lib_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.so || : if [ "$osType" != "Darwin" ]; then ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : + ${csudo}rm -f ${lib64_link_dir}/libtaosnative.* || : [ -f ${lib64_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.so || : fi @@ -324,6 +326,10 @@ function install_lib() { ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/libtaos.so.${verNumber} + ${csudo}cp ${binary_dir}/build/lib/libtaosnative.so.${verNumber} \ + ${install_main_dir}/driver && + ${csudo}chmod 777 ${install_main_dir}/driver/libtaosnative.so.${verNumber} + ${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 > /dev/null 2>&1 ${csudo}ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so > /dev/null 2>&1 if [ -d "${lib64_link_dir}" ]; then @@ -331,6 +337,13 @@ function install_lib() { ${csudo}ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so > /dev/null 2>&1 fi + ${csudo}ln -sf ${install_main_dir}/driver/libtaosnative.* ${lib_link_dir}/libtaosnative.so.1 > /dev/null 2>&1 + ${csudo}ln -sf ${lib_link_dir}/libtaosnative.so.1 ${lib_link_dir}/libtaosnative.so > /dev/null 2>&1 + if [ -d "${lib64_link_dir}" ]; then + ${csudo}ln -sf ${install_main_dir}/driver/libtaosnative.* ${lib64_link_dir}/libtaosnative.so.1 > /dev/null 2>&1 + ${csudo}ln -sf ${lib64_link_dir}/libtaosnative.so.1 ${lib64_link_dir}/libtaosnative.so > /dev/null 2>&1 + fi + if [ -f ${binary_dir}/build/lib/libtaosws.so ]; then ${csudo}cp ${binary_dir}/build/lib/libtaosws.so \ ${install_main_dir}/driver && @@ -342,11 +355,19 @@ function install_lib() { ${csudo}cp -Rf ${binary_dir}/build/lib/libtaos.${verNumber}.dylib \ ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/* + ${csudo}cp -Rf ${binary_dir}/build/lib/libtaosnative.${verNumber}.dylib \ + ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/* + ${csudo}ln -sf ${install_main_dir}/driver/libtaos.${verNumber}.dylib \ ${lib_link_dir}/libtaos.1.dylib > /dev/null 2>&1 || : + ${csudo}ln -sf ${install_main_dir}/driver/libtaosnative.${verNumber}.dylib \ + ${lib_link_dir}/libtaosnative.1.dylib > /dev/null 2>&1 || : + ${csudo}ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib > /dev/null 2>&1 || : + ${csudo}ln -sf ${lib_link_dir}/libtaosnative.1.dylib ${lib_link_dir}/libtaosnative.dylib > /dev/null 2>&1 || : + if [ -f ${binary_dir}/build/lib/libtaosws.dylib ]; then ${csudo}cp ${binary_dir}/build/lib/libtaosws.dylib \ ${install_main_dir}/driver && diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh index 87f4f57fd3..2c2f6205b5 100755 --- a/packaging/tools/makeclient.sh +++ b/packaging/tools/makeclient.sh @@ -79,10 +79,12 @@ if [ "$osType" != "Darwin" ]; then ${script_dir}/get_client.sh" fi lib_files="${build_dir}/lib/libtaos.so.${version}" + nativelib_files="${build_dir}/lib/libtaosnative.so.${version}" wslib_files="${build_dir}/lib/libtaosws.so" else bin_files="${build_dir}/bin/${clientName} ${script_dir}/remove_client.sh" lib_files="${build_dir}/lib/libtaos.${version}.dylib" + nativelib_files="${build_dir}/lib/libtaosnative.${version}.dylib" wslib_files="${build_dir}/lib/libtaosws.dylib" fi @@ -224,6 +226,7 @@ fi # Copy driver mkdir -p ${install_dir}/driver cp ${lib_files} ${install_dir}/driver +cp ${nativelib_files} ${install_dir}/driver # Copy connector connector_dir="${code_dir}/connector" diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index 7ef7903137..cc36d6bd5f 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -108,9 +108,11 @@ fi if [ "$osType" == "Darwin" ]; then lib_files="${build_dir}/lib/libtaos.${version}.dylib" + nativelib_files="${build_dir}/lib/libtaosnative.${version}.dylib" wslib_files="${build_dir}/lib/libtaosws.dylib" else lib_files="${build_dir}/lib/libtaos.so.${version}" + nativelib_files="${build_dir}/lib/libtaosnative.so.${version}" wslib_files="${build_dir}/lib/libtaosws.so" fi header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h ${code_dir}/include/util/tdef.h ${code_dir}/include/libs/function/taosudf.h" @@ -332,7 +334,7 @@ if [[ $dbName == "taos" ]]; then fi # Copy driver -mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" >${install_dir}/driver/vercomp.txt +mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && cp ${nativelib_files} ${install_dir}/driver && echo "${versionComp}" >${install_dir}/driver/vercomp.txt [ -f ${wslib_files} ] && cp ${wslib_files} ${install_dir}/driver || : # Copy connector && taosx diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh index 68e3df8138..d8e9d207b1 100755 --- a/packaging/tools/post.sh +++ b/packaging/tools/post.sh @@ -205,18 +205,24 @@ function install_lib() { log_print "start install lib from ${lib_dir} to ${lib_link_dir}" ${csudo}rm -f ${lib_link_dir}/libtaos* || : ${csudo}rm -f ${lib64_link_dir}/libtaos* || : + ${csudo}rm -f ${lib_link_dir}/libtaosnative* || : + ${csudo}rm -f ${lib64_link_dir}/libtaosnative* || : [ -f ${lib_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.${lib_file_ext} || : [ -f ${lib64_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.${lib_file_ext} || : ${csudo}ln -s ${lib_dir}/libtaos.* ${lib_link_dir}/libtaos.${lib_file_ext_1} 2>>${install_log_path} || return 1 ${csudo}ln -s ${lib_link_dir}/libtaos.${lib_file_ext_1} ${lib_link_dir}/libtaos.${lib_file_ext} 2>>${install_log_path} || return 1 + ${csudo}ln -s ${lib_dir}/libtaosnative.* ${lib_link_dir}/libtaosnative.${lib_file_ext_1} 2>>${install_log_path} || return 1 + ${csudo}ln -s ${lib_link_dir}/libtaosnative.${lib_file_ext_1} ${lib_link_dir}/libtaosnative.${lib_file_ext} 2>>${install_log_path} || return 1 [ -f ${lib_dir}/libtaosws.${lib_file_ext} ] && ${csudo}ln -sf ${lib_dir}/libtaosws.${lib_file_ext} ${lib_link_dir}/libtaosws.${lib_file_ext} ||: if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.${lib_file_ext} ]]; then ${csudo}ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.${lib_file_ext_1} 2>>${install_log_path} || return 1 ${csudo}ln -s ${lib64_link_dir}/libtaos.${lib_file_ext_1} ${lib64_link_dir}/libtaos.${lib_file_ext} 2>>${install_log_path} || return 1 + ${csudo}ln -s ${lib_dir}/libtaosnative.* ${lib64_link_dir}/libtaosnative.${lib_file_ext_1} 2>>${install_log_path} || return 1 + ${csudo}ln -s ${lib64_link_dir}/libtaosnative.${lib_file_ext_1} ${lib64_link_dir}/libtaosnative.${lib_file_ext} 2>>${install_log_path} || return 1 [ -f ${lib_dir}/libtaosws.${lib_file_ext} ] && ${csudo}ln -sf ${lib_dir}/libtaosws.${lib_file_ext} ${lib64_link_dir}/libtaosws.${lib_file_ext} 2>>${install_log_path} fi diff --git a/packaging/tools/preun.sh b/packaging/tools/preun.sh index d975841e83..9ebe880e9c 100755 --- a/packaging/tools/preun.sh +++ b/packaging/tools/preun.sh @@ -150,6 +150,7 @@ clean_service # Remove all links ${csudo}rm -f ${bin_link_dir}/taos || : ${csudo}rm -f ${bin_link_dir}/taosd || : +${csudo}rm -f ${bin_link_dir}/taosudf || : ${csudo}rm -f ${bin_link_dir}/taosadapter || : ${csudo}rm -f ${bin_link_dir}/taosBenchmark || : ${csudo}rm -f ${bin_link_dir}/taosdemo || : @@ -167,8 +168,10 @@ ${csudo}rm -f ${inc_link_dir}/tdef.h || : ${csudo}rm -f ${inc_link_dir}/taosudf.h || : ${csudo}rm -f ${inc_link_dir}/taosws.h || : ${csudo}rm -f ${lib_link_dir}/libtaos.* || : +${csudo}rm -f ${lib_link_dir}/libtaosnative.* || : ${csudo}rm -f ${lib_link_dir}/libtaosws.so || : ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : +${csudo}rm -f ${lib64_link_dir}/libtaosnative.* || : ${csudo}rm -f ${lib64_link_dir}/libtaosws.so || : ${csudo}rm -f ${log_link_dir} || : diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index ec73ca88cf..d62aa5a564 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -180,9 +180,11 @@ remove_bin() { function clean_lib() { # Remove link ${csudo}rm -f ${lib_link_dir}/libtaos.* || : + [ -f ${lib_link_dir}/libtaosnative.* ] && ${csudo}rm -f ${lib_link_dir}/libtaosnative.* || : [ -f ${lib_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.* || : ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : + [ -f ${lib64_link_dir}/libtaosnative.* ] && ${csudo}rm -f ${lib64_link_dir}/libtaosnative.* || : [ -f ${lib64_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.* || : #${csudo}rm -rf ${v15_java_app_dir} || : } diff --git a/packaging/tools/remove_client.sh b/packaging/tools/remove_client.sh index a7eb225704..e6ec9c3768 100755 --- a/packaging/tools/remove_client.sh +++ b/packaging/tools/remove_client.sh @@ -73,9 +73,11 @@ function clean_lib() { # Remove link ${csudo}rm -f ${lib_link_dir}/libtaos.* || : [ -f ${lib_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.* || : + [ -f ${lib_link_dir}/libtaosnative.* ] && ${csudo}rm -f ${lib_link_dir}/libtaosnative.* || : ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : [ -f ${lib64_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.* || : + [ -f ${lib64_link_dir}/libtaosnative.* ] && ${csudo}rm -f ${lib64_link_dir}/libtaosnative.* || : #${csudo}rm -rf ${v15_java_app_dir} || : } diff --git a/source/client/CMakeLists.txt b/source/client/CMakeLists.txt index 1b38ecd7ad..e4b6077113 100644 --- a/source/client/CMakeLists.txt +++ b/source/client/CMakeLists.txt @@ -5,19 +5,19 @@ if(TD_ENTERPRISE) endif() if(TD_WINDOWS) - add_library(${TAOS_LIB} SHARED ${CLIENT_SRC} ${CMAKE_CURRENT_SOURCE_DIR}/src/taos.rc.in) + add_library(${TAOS_NATIVE_LIB} SHARED ${CLIENT_SRC} ${CMAKE_CURRENT_SOURCE_DIR}/src/taosnative.rc.in) else() - add_library(${TAOS_LIB} SHARED ${CLIENT_SRC}) + add_library(${TAOS_NATIVE_LIB} SHARED ${CLIENT_SRC}) endif() if(${TD_DARWIN}) - target_compile_options(${TAOS_LIB} PRIVATE -Wno-error=deprecated-non-prototype) + target_compile_options(${TAOS_NATIVE_LIB} PRIVATE -Wno-error=deprecated-non-prototype) endif() -INCLUDE_DIRECTORIES(jni) + target_include_directories( - ${TAOS_LIB} + ${TAOS_NATIVE_LIB} PUBLIC "${TD_SOURCE_DIR}/include/client" PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" ) @@ -26,46 +26,38 @@ if(${TAOSD_INTEGRATED}) set(TAOSD_MODULE "taosd") endif() target_link_libraries( - ${TAOS_LIB} + ${TAOS_NATIVE_LIB} INTERFACE api PRIVATE os util common transport monitor nodes parser command planner catalog scheduler function qcom geometry ${TAOSD_MODULE} decimal ) -if(TD_WINDOWS) - INCLUDE_DIRECTORIES(jni/windows) - INCLUDE_DIRECTORIES(jni/windows/win32) - INCLUDE_DIRECTORIES(jni/windows/win32/bridge) -else() - INCLUDE_DIRECTORIES(jni/linux) -endif() - set_target_properties( - ${TAOS_LIB} + ${TAOS_NATIVE_LIB} PROPERTIES CLEAN_DIRECT_OUTPUT 1 ) set_target_properties( - ${TAOS_LIB} + ${TAOS_NATIVE_LIB} PROPERTIES VERSION ${TD_VER_NUMBER} SOVERSION 1 ) -add_library(${TAOS_LIB_STATIC} STATIC ${CLIENT_SRC}) +add_library(${TAOS_NATIVE_LIB_STATIC} STATIC ${CLIENT_SRC}) if(${TD_DARWIN}) - target_compile_options(${TAOS_LIB_STATIC} PRIVATE -Wno-error=deprecated-non-prototype) + target_compile_options(${TAOS_NATIVE_LIB_STATIC} PRIVATE -Wno-error=deprecated-non-prototype) endif() target_include_directories( - ${TAOS_LIB_STATIC} + ${TAOS_NATIVE_LIB_STATIC} PUBLIC "${TD_SOURCE_DIR}/include/client" PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" ) target_link_libraries( - ${TAOS_LIB_STATIC} + ${TAOS_NATIVE_LIB_STATIC} INTERFACE api PRIVATE os util common transport monitor nodes parser command planner catalog scheduler function qcom geometry decimal ) @@ -73,3 +65,5 @@ target_link_libraries( if(${BUILD_TEST}) ADD_SUBDIRECTORY(test) endif(${BUILD_TEST}) + +ADD_SUBDIRECTORY(wrapper) diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index aa6e06a989..39c9bb685e 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -922,7 +922,7 @@ void tscStopCrashReport() { } } -void tscWriteCrashInfo(int signum, void *sigInfo, void *context) { +void taos_write_crashinfo(int signum, void *sigInfo, void *context) { writeCrashLogToFile(signum, sigInfo, CUS_PROMPT, lastClusterId, appInfo.startTime); } #endif diff --git a/source/client/src/taos.rc.in b/source/client/src/taos.rc.in index 84a2a7a5b5..c062a32304 100644 --- a/source/client/src/taos.rc.in +++ b/source/client/src/taos.rc.in @@ -15,10 +15,10 @@ BEGIN BEGIN BLOCK "040904b0" BEGIN - VALUE "FileDescription", "Native C Driver for TDengine" + VALUE "FileDescription", "C Driver for TDengine" VALUE "FileVersion", "${TD_VER_NUMBER}" VALUE "InternalName", "taos.dll(${TD_VER_CPUTYPE})" - VALUE "LegalCopyright", "Copyright (C) 2020 TAOS Data" + VALUE "LegalCopyright", "Copyright (C) 2025 TAOS Data" VALUE "OriginalFilename", "" VALUE "ProductName", "taos.dll(${TD_VER_CPUTYPE})" VALUE "ProductVersion", "${TD_VER_NUMBER}" diff --git a/source/client/src/taosnative.rc.in b/source/client/src/taosnative.rc.in new file mode 100644 index 0000000000..1cd080b492 --- /dev/null +++ b/source/client/src/taosnative.rc.in @@ -0,0 +1,31 @@ +1 VERSIONINFO + FILEVERSION ${TD_VER_NUMBER} + PRODUCTVERSION ${TD_VER_NUMBER} + FILEFLAGSMASK 0x17L +#ifdef _DEBUG + FILEFLAGS 0x1L +#else + FILEFLAGS 0x0L +#endif + FILEOS 0x4L + FILETYPE 0x0L + FILESUBTYPE 0x0L +BEGIN + BLOCK "StringFileInfo" + BEGIN + BLOCK "040904b0" + BEGIN + VALUE "FileDescription", "Internal C Driver for TDengine" + VALUE "FileVersion", "${TD_VER_NUMBER}" + VALUE "InternalName", "taosnative.dll(${TD_VER_CPUTYPE})" + VALUE "LegalCopyright", "Copyright (C) 2025 TAOS Data" + VALUE "OriginalFilename", "" + VALUE "ProductName", "taosnative.dll(${TD_VER_CPUTYPE})" + VALUE "ProductVersion", "${TD_VER_NUMBER}" + END + END + BLOCK "VarFileInfo" + BEGIN + VALUE "Translation", 0x409, 1200 + END +END \ No newline at end of file diff --git a/source/client/test/CMakeLists.txt b/source/client/test/CMakeLists.txt index 9e1a04879e..cee5dc08f9 100644 --- a/source/client/test/CMakeLists.txt +++ b/source/client/test/CMakeLists.txt @@ -8,49 +8,49 @@ AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) ADD_EXECUTABLE(clientTest clientTests.cpp) TARGET_LINK_LIBRARIES( clientTest - os util common transport parser catalog scheduler gtest ${TAOS_LIB_STATIC} qcom executor function + os util common transport parser catalog scheduler gtest ${TAOS_NATIVE_LIB_STATIC} qcom executor function ) ADD_EXECUTABLE(connectOptionsTest connectOptionsTest.cpp) TARGET_LINK_LIBRARIES( connectOptionsTest - os util common transport parser catalog scheduler gtest ${TAOS_LIB_STATIC} qcom executor function + os util common transport parser catalog scheduler gtest ${TAOS_NATIVE_LIB_STATIC} qcom executor function ) ADD_EXECUTABLE(tmqTest tmqTest.cpp) TARGET_LINK_LIBRARIES( tmqTest - PUBLIC os util common transport parser catalog scheduler function gtest ${TAOS_LIB_STATIC} qcom + PUBLIC os util common transport parser catalog scheduler function gtest ${TAOS_NATIVE_LIB_STATIC} qcom ) ADD_EXECUTABLE(smlTest smlTest.cpp) TARGET_LINK_LIBRARIES( smlTest - PUBLIC os util common transport parser catalog scheduler function gtest ${TAOS_LIB_STATIC} qcom geometry + PUBLIC os util common transport parser catalog scheduler function gtest ${TAOS_NATIVE_LIB_STATIC} qcom geometry ) #ADD_EXECUTABLE(clientMonitorTest clientMonitorTests.cpp) #TARGET_LINK_LIBRARIES( # clientMonitorTest -# PUBLIC os util common transport monitor parser catalog scheduler function gtest ${TAOS_LIB_STATIC} qcom executor +# PUBLIC os util common transport monitor parser catalog scheduler function gtest ${TAOS_NATIVE_LIB_STATIC} qcom executor #) ADD_EXECUTABLE(userOperTest ../../../tests/script/api/passwdTest.c) TARGET_LINK_LIBRARIES( userOperTest - PUBLIC ${TAOS_LIB} + PUBLIC ${TAOS_NATIVE_LIB} ) ADD_EXECUTABLE(stmt2Test stmt2Test.cpp) TARGET_LINK_LIBRARIES( stmt2Test - os util common transport parser catalog scheduler gtest ${TAOS_LIB_STATIC} qcom executor function + os util common transport parser catalog scheduler gtest ${TAOS_NATIVE_LIB_STATIC} qcom executor function ) ADD_EXECUTABLE(stmtTest stmtTest.cpp) TARGET_LINK_LIBRARIES( stmtTest - os util common transport parser catalog scheduler gtest ${TAOS_LIB_STATIC} qcom executor function + os util common transport parser catalog scheduler gtest ${TAOS_NATIVE_LIB_STATIC} qcom executor function ) TARGET_INCLUDE_DIRECTORIES( diff --git a/source/client/wrapper/CMakeLists.txt b/source/client/wrapper/CMakeLists.txt new file mode 100644 index 0000000000..17243f571c --- /dev/null +++ b/source/client/wrapper/CMakeLists.txt @@ -0,0 +1,67 @@ +aux_source_directory(src WRAPPER_SRC) + +if(TD_WINDOWS) + add_library(${TAOS_LIB} SHARED ${WRAPPER_SRC} ${CMAKE_CURRENT_SOURCE_DIR}/../src/taos.rc.in) +else() + add_library(${TAOS_LIB} SHARED ${WRAPPER_SRC}) +endif() + +if(${TD_DARWIN}) + target_compile_options(${TAOS_LIB} PRIVATE -Wno-error=deprecated-non-prototype) +endif() + +# jni include +INCLUDE_DIRECTORIES(jni) +if(TD_WINDOWS) + INCLUDE_DIRECTORIES(jni/windows) + INCLUDE_DIRECTORIES(jni/windows/win32) + INCLUDE_DIRECTORIES(jni/windows/win32/bridge) +else() + INCLUDE_DIRECTORIES(jni/linux) +endif() + +target_include_directories( + ${TAOS_LIB} + PUBLIC "${TD_SOURCE_DIR}/include/client" + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" +) + +target_link_libraries( + ${TAOS_LIB} + PUBLIC os util +) + +set_target_properties( + ${TAOS_LIB} + PROPERTIES + CLEAN_DIRECT_OUTPUT + 1 +) + +set_target_properties( + ${TAOS_LIB} + PROPERTIES + VERSION ${TD_VER_NUMBER} + SOVERSION 1 +) + +add_library(${TAOS_LIB_STATIC} STATIC ${WRAPPER_SRC}) + +if(${TD_DARWIN}) + target_compile_options(${TAOS_LIB_STATIC} PRIVATE -Wno-error=deprecated-non-prototype) +endif() + +target_include_directories( + ${TAOS_LIB_STATIC} + PUBLIC "${TD_SOURCE_DIR}/include/client" + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" +) + +target_link_libraries( + ${TAOS_LIB_STATIC} + PUBLIC os util +) + +# if(${BUILD_TEST}) +# ADD_SUBDIRECTORY(test) +# endif(${BUILD_TEST}) diff --git a/source/client/wrapper/inc/wrapper.h b/source/client/wrapper/inc/wrapper.h new file mode 100644 index 0000000000..34f9283f99 --- /dev/null +++ b/source/client/wrapper/inc/wrapper.h @@ -0,0 +1,232 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_WRAPPER_H +#define TDENGINE_WRAPPER_H + +#include "os.h" +#include "taos.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum { + DRIVER_NATIVE = 0, + DRIVER_WEBSOCKET = 1, + DRIVER_MAX = 2, +} EDriverType; + +extern EDriverType tsDriverType; +extern void *tsDriver; + +extern int32_t taosDriverInit(EDriverType driverType); +extern void taosDriverCleanup(); + +extern setConfRet (*fp_taos_set_config)(const char *config); + +extern int (*fp_taos_init)(void); +extern void (*fp_taos_cleanup)(void); +extern int (*fp_taos_options)(TSDB_OPTION option, const void *arg, ...); +extern int (*fp_taos_options_connection)(TAOS *taos, TSDB_OPTION_CONNECTION option, const void *arg, ...); +extern TAOS *(*fp_taos_connect)(const char *ip, const char *user, const char *pass, const char *db, uint16_t port); +extern TAOS *(*fp_taos_connect_auth)(const char *ip, const char *user, const char *auth, const char *db, uint16_t port); +extern void (*fp_taos_close)(TAOS *taos); + +extern const char *(*fp_taos_data_type)(int type); + +extern TAOS_STMT *(*fp_taos_stmt_init)(TAOS *taos); +extern TAOS_STMT *(*fp_taos_stmt_init_with_reqid)(TAOS *taos, int64_t reqid); +extern TAOS_STMT *(*fp_taos_stmt_init_with_options)(TAOS *taos, TAOS_STMT_OPTIONS *options); +extern int (*fp_taos_stmt_prepare)(TAOS_STMT *stmt, const char *sql, unsigned long length); +extern int (*fp_taos_stmt_set_tbname_tags)(TAOS_STMT *stmt, const char *name, TAOS_MULTI_BIND *tags); +extern int (*fp_taos_stmt_set_tbname)(TAOS_STMT *stmt, const char *name); +extern int (*fp_taos_stmt_set_tags)(TAOS_STMT *stmt, TAOS_MULTI_BIND *tags); +extern int (*fp_taos_stmt_set_sub_tbname)(TAOS_STMT *stmt, const char *name); +extern int (*fp_taos_stmt_get_tag_fields)(TAOS_STMT *stmt, int *fieldNum, TAOS_FIELD_E **fields); +extern int (*fp_taos_stmt_get_col_fields)(TAOS_STMT *stmt, int *fieldNum, TAOS_FIELD_E **fields); +extern void (*fp_taos_stmt_reclaim_fields)(TAOS_STMT *stmt, TAOS_FIELD_E *fields); + +extern int (*fp_taos_stmt_is_insert)(TAOS_STMT *stmt, int *insert); +extern int (*fp_taos_stmt_num_params)(TAOS_STMT *stmt, int *nums); +extern int (*fp_taos_stmt_get_param)(TAOS_STMT *stmt, int idx, int *type, int *bytes); +extern int (*fp_taos_stmt_bind_param)(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind); +extern int (*fp_taos_stmt_bind_param_batch)(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind); +extern int (*fp_taos_stmt_bind_single_param_batch)(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind, int colIdx); +extern int (*fp_taos_stmt_add_batch)(TAOS_STMT *stmt); +extern int (*fp_taos_stmt_execute)(TAOS_STMT *stmt); +extern TAOS_RES *(*fp_taos_stmt_use_result)(TAOS_STMT *stmt); +extern int (*fp_taos_stmt_close)(TAOS_STMT *stmt); +extern char *(*fp_taos_stmt_errstr)(TAOS_STMT *stmt); +extern int (*fp_taos_stmt_affected_rows)(TAOS_STMT *stmt); +extern int (*fp_taos_stmt_affected_rows_once)(TAOS_STMT *stmt); + +extern TAOS_STMT2 *(*fp_taos_stmt2_init)(TAOS *taos, TAOS_STMT2_OPTION *option); +extern int (*fp_taos_stmt2_prepare)(TAOS_STMT2 *stmt, const char *sql, unsigned long length); +extern int (*fp_taos_stmt2_bind_param)(TAOS_STMT2 *stmt, TAOS_STMT2_BINDV *bindv, int32_t col_idx); +extern int (*fp_taos_stmt2_bind_param_a)(TAOS_STMT2 *stmt, TAOS_STMT2_BINDV *bindv, int32_t col_idx, + __taos_async_fn_t fp, void *param); +extern int (*fp_taos_stmt2_exec)(TAOS_STMT2 *stmt, int *affected_rows); +extern int (*fp_taos_stmt2_close)(TAOS_STMT2 *stmt); +extern int (*fp_taos_stmt2_is_insert)(TAOS_STMT2 *stmt, int *insert); +extern int (*fp_taos_stmt2_get_fields)(TAOS_STMT2 *stmt, int *count, TAOS_FIELD_ALL **fields); +extern void (*fp_taos_stmt2_free_fields)(TAOS_STMT2 *stmt, TAOS_FIELD_ALL *fields); +extern TAOS_RES *(*fp_taos_stmt2_result)(TAOS_STMT2 *stmt); +extern char *(*fp_taos_stmt2_error)(TAOS_STMT2 *stmt); + +extern TAOS_RES *(*fp_taos_query)(TAOS *taos, const char *sql); +extern TAOS_RES *(*fp_taos_query_with_reqid)(TAOS *taos, const char *sql, int64_t reqId); + +extern TAOS_ROW (*fp_taos_fetch_row)(TAOS_RES *res); +extern int (*fp_taos_result_precision)(TAOS_RES *res); // get the time precision of result +extern void (*fp_taos_free_result)(TAOS_RES *res); +extern void (*fp_taos_kill_query)(TAOS *taos); +extern int (*fp_taos_field_count)(TAOS_RES *res); +extern int (*fp_taos_num_fields)(TAOS_RES *res); +extern int (*fp_taos_affected_rows)(TAOS_RES *res); +extern int64_t (*fp_taos_affected_rows64)(TAOS_RES *res); + +extern TAOS_FIELD *(*fp_taos_fetch_fields)(TAOS_RES *res); +extern TAOS_FIELD_E *(*fp_taos_fetch_fields_e)(TAOS_RES *res); +extern int (*fp_taos_select_db)(TAOS *taos, const char *db); +extern int (*fp_taos_print_row)(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields); +extern int (*fp_taos_print_row_with_size)(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields); +extern void (*fp_taos_stop_query)(TAOS_RES *res); +extern bool (*fp_taos_is_null)(TAOS_RES *res, int32_t row, int32_t col); +extern int (*fp_taos_is_null_by_column)(TAOS_RES *res, int columnIndex, bool result[], int *rows); +extern bool (*fp_taos_is_update_query)(TAOS_RES *res); +extern int (*fp_taos_fetch_block)(TAOS_RES *res, TAOS_ROW *rows); +extern int (*fp_taos_fetch_block_s)(TAOS_RES *res, int *numOfRows, TAOS_ROW *rows); +extern int (*fp_taos_fetch_raw_block)(TAOS_RES *res, int *numOfRows, void **pData); +extern int *(*fp_taos_get_column_data_offset)(TAOS_RES *res, int columnIndex); +extern int (*fp_taos_validate_sql)(TAOS *taos, const char *sql); +extern void (*fp_taos_reset_current_db)(TAOS *taos); + +extern int *(*fp_taos_fetch_lengths)(TAOS_RES *res); +extern TAOS_ROW *(*fp_taos_result_block)(TAOS_RES *res); + +extern const char *(*fp_taos_get_server_info)(TAOS *taos); +extern const char *(*fp_taos_get_client_info)(); +extern int (*fp_taos_get_current_db)(TAOS *taos, char *database, int len, int *required); + +extern const char *(*fp_taos_errstr)(TAOS_RES *res); +extern int (*fp_taos_errno)(TAOS_RES *res); + +extern void (*fp_taos_query_a)(TAOS *taos, const char *sql, __taos_async_fn_t fp, void *param); +extern void (*fp_taos_query_a_with_reqid)(TAOS *taos, const char *sql, __taos_async_fn_t fp, void *param, + int64_t reqid); +extern void (*fp_taos_fetch_rows_a)(TAOS_RES *res, __taos_async_fn_t fp, void *param); +extern void (*fp_taos_fetch_raw_block_a)(TAOS_RES *res, __taos_async_fn_t fp, void *param); +extern const void *(*fp_taos_get_raw_block)(TAOS_RES *res); + +extern int (*fp_taos_get_db_route_info)(TAOS *taos, const char *db, TAOS_DB_ROUTE_INFO *dbInfo); +extern int (*fp_taos_get_table_vgId)(TAOS *taos, const char *db, const char *table, int *vgId); +extern int (*fp_taos_get_tables_vgId)(TAOS *taos, const char *db, const char *table[], int tableNum, int *vgId); + +extern int (*fp_taos_load_table_info)(TAOS *taos, const char *tableNameList); + +extern void (*fp_taos_set_hb_quit)(int8_t quitByKill); + +extern int (*fp_taos_set_notify_cb)(TAOS *taos, __taos_notify_fn_t fp, void *param, int type); + +extern void (*fp_taos_fetch_whitelist_a)(TAOS *taos, __taos_async_whitelist_fn_t fp, void *param); + +extern int (*fp_taos_set_conn_mode)(TAOS *taos, int mode, int value); + +extern TAOS_RES *(*fp_taos_schemaless_insert)(TAOS *taos, char *lines[], int numLines, int protocol, int precision); +extern TAOS_RES *(*fp_taos_schemaless_insert_with_reqid)(TAOS *taos, char *lines[], int numLines, int protocol, + int precision, int64_t reqid); +extern TAOS_RES *(*fp_taos_schemaless_insert_raw)(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, + int precision); +extern TAOS_RES *(*fp_taos_schemaless_insert_raw_with_reqid)(TAOS *taos, char *lines, int len, int32_t *totalRows, + int protocol, int precision, int64_t reqid); +extern TAOS_RES *(*fp_taos_schemaless_insert_ttl)(TAOS *taos, char *lines[], int numLines, int protocol, int precision, + int32_t ttl); +extern TAOS_RES *(*fp_taos_schemaless_insert_ttl_with_reqid)(TAOS *taos, char *lines[], int numLines, int protocol, + int precision, int32_t ttl, int64_t reqid); +extern TAOS_RES *(*fp_taos_schemaless_insert_raw_ttl)(TAOS *taos, char *lines, int len, int32_t *totalRows, + int protocol, int precision, int32_t ttl); +extern TAOS_RES *(*fp_taos_schemaless_insert_raw_ttl_with_reqid)(TAOS *taos, char *lines, int len, int32_t *totalRows, + int protocol, int precision, int32_t ttl, + int64_t reqid); +extern TAOS_RES *(*fp_taos_schemaless_insert_raw_ttl_with_reqid_tbname_key)(TAOS *taos, char *lines, int len, + int32_t *totalRows, int protocol, + int precision, int32_t ttl, int64_t reqid, + char *tbnameKey); +extern TAOS_RES *(*fp_taos_schemaless_insert_ttl_with_reqid_tbname_key)(TAOS *taos, char *lines[], int numLines, + int protocol, int precision, int32_t ttl, + int64_t reqid, char *tbnameKey); + +extern tmq_conf_t *(*fp_tmq_conf_new)(); +extern tmq_conf_res_t (*fp_tmq_conf_set)(tmq_conf_t *conf, const char *key, const char *value); +extern void (*fp_tmq_conf_destroy)(tmq_conf_t *conf); +extern void (*fp_tmq_conf_set_auto_commit_cb)(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); + +extern tmq_list_t *(*fp_tmq_list_new)(); +extern int32_t (*fp_tmq_list_append)(tmq_list_t *, const char *); +extern void (*fp_tmq_list_destroy)(tmq_list_t *); +extern int32_t (*fp_tmq_list_get_size)(const tmq_list_t *); +extern char **(*fp_tmq_list_to_c_array)(const tmq_list_t *); + +extern tmq_t *(*fp_tmq_consumer_new)(tmq_conf_t *conf, char *errstr, int32_t errstrLen); +extern int32_t (*fp_tmq_subscribe)(tmq_t *tmq, const tmq_list_t *topic_list); +extern int32_t (*fp_tmq_unsubscribe)(tmq_t *tmq); +extern int32_t (*fp_tmq_subscription)(tmq_t *tmq, tmq_list_t **topics); +extern TAOS_RES *(*fp_tmq_consumer_poll)(tmq_t *tmq, int64_t timeout); +extern int32_t (*fp_tmq_consumer_close)(tmq_t *tmq); +extern int32_t (*fp_tmq_commit_sync)(tmq_t *tmq, const TAOS_RES *msg); +extern void (*fp_tmq_commit_async)(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); +extern int32_t (*fp_tmq_commit_offset_sync)(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); +extern void (*fp_tmq_commit_offset_async)(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, + tmq_commit_cb *cb, void *param); +extern int32_t (*fp_tmq_get_topic_assignment)(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment, + int32_t *numOfAssignment); +extern void (*fp_tmq_free_assignment)(tmq_topic_assignment *pAssignment); +extern int32_t (*fp_tmq_offset_seek)(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); +extern int64_t (*fp_tmq_position)(tmq_t *tmq, const char *pTopicName, int32_t vgId); +extern int64_t (*fp_tmq_committed)(tmq_t *tmq, const char *pTopicName, int32_t vgId); + +extern TAOS *(*fp_tmq_get_connect)(tmq_t *tmq); +extern const char *(*fp_tmq_get_table_name)(TAOS_RES *res); +extern tmq_res_t (*fp_tmq_get_res_type)(TAOS_RES *res); +extern const char *(*fp_tmq_get_topic_name)(TAOS_RES *res); +extern const char *(*fp_tmq_get_db_name)(TAOS_RES *res); +extern int32_t (*fp_tmq_get_vgroup_id)(TAOS_RES *res); +extern int64_t (*fp_tmq_get_vgroup_offset)(TAOS_RES *res); +extern const char *(*fp_tmq_err2str)(int32_t code); + +extern int32_t (*fp_tmq_get_raw)(TAOS_RES *res, tmq_raw_data *raw); +extern int32_t (*fp_tmq_write_raw)(TAOS *taos, tmq_raw_data raw); +extern int (*fp_taos_write_raw_block)(TAOS *taos, int numOfRows, char *pData, const char *tbname); +extern int (*fp_taos_write_raw_block_with_reqid)(TAOS *taos, int numOfRows, char *pData, const char *tbname, + int64_t reqid); +extern int (*fp_taos_write_raw_block_with_fields)(TAOS *taos, int rows, char *pData, const char *tbname, + TAOS_FIELD *fields, int numFields); +extern int (*fp_taos_write_raw_block_with_fields_with_reqid)(TAOS *taos, int rows, char *pData, const char *tbname, + TAOS_FIELD *fields, int numFields, int64_t reqid); +extern void (*fp_tmq_free_raw)(tmq_raw_data raw); + +extern char *(*fp_tmq_get_json_meta)(TAOS_RES *res); +extern void (*fp_tmq_free_json_meta)(char *jsonMeta); + +extern TSDB_SERVER_STATUS (*fp_taos_check_server_status)(const char *fqdn, int port, char *details, int maxlen); +extern void (*fp_taos_write_crashinfo)(int signum, void *sigInfo, void *context); +extern char *(*fp_getBuildInfo)(); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_CLIENT_WRAPPER_H diff --git a/source/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h b/source/client/wrapper/jni/com_taosdata_jdbc_TSDBJNIConnector.h similarity index 100% rename from source/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h rename to source/client/wrapper/jni/com_taosdata_jdbc_TSDBJNIConnector.h diff --git a/source/client/jni/com_taosdata_jdbc_tmq_TMQConnector.h b/source/client/wrapper/jni/com_taosdata_jdbc_tmq_TMQConnector.h similarity index 100% rename from source/client/jni/com_taosdata_jdbc_tmq_TMQConnector.h rename to source/client/wrapper/jni/com_taosdata_jdbc_tmq_TMQConnector.h diff --git a/source/client/jni/jniCommon.h b/source/client/wrapper/jni/jniCommon.h similarity index 100% rename from source/client/jni/jniCommon.h rename to source/client/wrapper/jni/jniCommon.h diff --git a/source/client/jni/linux/AWTCocoaComponent.h b/source/client/wrapper/jni/linux/AWTCocoaComponent.h similarity index 100% rename from source/client/jni/linux/AWTCocoaComponent.h rename to source/client/wrapper/jni/linux/AWTCocoaComponent.h diff --git a/source/client/jni/linux/JDWP.h b/source/client/wrapper/jni/linux/JDWP.h similarity index 100% rename from source/client/jni/linux/JDWP.h rename to source/client/wrapper/jni/linux/JDWP.h diff --git a/source/client/jni/linux/JDWPCommands.h b/source/client/wrapper/jni/linux/JDWPCommands.h similarity index 100% rename from source/client/jni/linux/JDWPCommands.h rename to source/client/wrapper/jni/linux/JDWPCommands.h diff --git a/source/client/jni/linux/JavaVM.h b/source/client/wrapper/jni/linux/JavaVM.h similarity index 100% rename from source/client/jni/linux/JavaVM.h rename to source/client/wrapper/jni/linux/JavaVM.h diff --git a/source/client/jni/linux/NSJavaConfiguration.h b/source/client/wrapper/jni/linux/NSJavaConfiguration.h similarity index 100% rename from source/client/jni/linux/NSJavaConfiguration.h rename to source/client/wrapper/jni/linux/NSJavaConfiguration.h diff --git a/source/client/jni/linux/NSJavaVirtualMachine.h b/source/client/wrapper/jni/linux/NSJavaVirtualMachine.h similarity index 100% rename from source/client/jni/linux/NSJavaVirtualMachine.h rename to source/client/wrapper/jni/linux/NSJavaVirtualMachine.h diff --git a/source/client/jni/linux/jawt.h b/source/client/wrapper/jni/linux/jawt.h similarity index 63% rename from source/client/jni/linux/jawt.h rename to source/client/wrapper/jni/linux/jawt.h index b62fe666fe..0ddbbaad82 100644 --- a/source/client/jni/linux/jawt.h +++ b/source/client/wrapper/jni/linux/jawt.h @@ -14,107 +14,6 @@ extern "C" { #endif -/* - * AWT native interface (new in JDK 1.3) - * - * The AWT native interface allows a native C or C++ application a means - * by which to access native structures in AWT. This is to facilitate moving - * legacy C and C++ applications to Java and to target the needs of the - * community who, at present, wish to do their own native rendering to canvases - * for performance reasons. Standard extensions such as Java3D also require a - * means to access the underlying native data structures of AWT. - * - * There may be future extensions to this API depending on demand. - * - * A VM does not have to implement this API in order to pass the JCK. - * It is recommended, however, that this API is implemented on VMs that support - * standard extensions, such as Java3D. - * - * Since this is a native API, any program which uses it cannot be considered - * 100% pure java. - */ - -/* - * AWT Native Drawing Surface (JAWT_DrawingSurface). - * - * For each platform, there is a native drawing surface structure. This - * platform-specific structure can be found in jawt_md.h. It is recommended - * that additional platforms follow the same model. It is also recommended - * that VMs on Win32 and Solaris support the existing structures in jawt_md.h. - * - ******************* - * EXAMPLE OF USAGE: - ******************* - * - * In Win32, a programmer wishes to access the HWND of a canvas to perform - * native rendering into it. The programmer has declared the paint() method - * for their canvas subclass to be native: - * - * - * MyCanvas.java: - * - * import java.awt.*; - * - * public class MyCanvas extends Canvas { - * - * static { - * System.loadLibrary("mylib"); - * } - * - * public native void paint(Graphics g); - * } - * - * - * myfile.c: - * - * #include "jawt_md.h" - * #include - * - * JNIEXPORT void JNICALL - * Java_MyCanvas_paint(JNIEnv* env, jobject canvas, jobject graphics) - * { - * JAWT awt; - * JAWT_DrawingSurface* ds; - * JAWT_DrawingSurfaceInfo* dsi; - * JAWT_Win32DrawingSurfaceInfo* dsi_win; - * jboolean result; - * jint lock; - * - * // Get the AWT - * awt.version = JAWT_VERSION_1_3; - * result = JAWT_GetAWT(env, &awt); - * assert(result != JNI_FALSE); - * - * // Get the drawing surface - * ds = awt.GetDrawingSurface(env, canvas); - * assert(ds != NULL); - * - * // Lock the drawing surface - * lock = ds->Lock(ds); - * assert((lock & JAWT_LOCK_ERROR) == 0); - * - * // Get the drawing surface info - * dsi = ds->GetDrawingSurfaceInfo(ds); - * - * // Get the platform-specific drawing info - * dsi_win = (JAWT_Win32DrawingSurfaceInfo*)dsi->platformInfo; - * - * ////////////////////////////// - * // !!! DO PAINTING HERE !!! // - * ////////////////////////////// - * - * // Free the drawing surface info - * ds->FreeDrawingSurfaceInfo(dsi); - * - * // Unlock the drawing surface - * ds->Unlock(ds); - * - * // Free the drawing surface - * awt.FreeDrawingSurface(ds); - * } - * - */ - /* * JAWT_Rectangle * Structure for a native rectangle. diff --git a/source/client/jni/linux/jawt_md.h b/source/client/wrapper/jni/linux/jawt_md.h similarity index 100% rename from source/client/jni/linux/jawt_md.h rename to source/client/wrapper/jni/linux/jawt_md.h diff --git a/source/client/jni/linux/jdwpTransport.h b/source/client/wrapper/jni/linux/jdwpTransport.h similarity index 100% rename from source/client/jni/linux/jdwpTransport.h rename to source/client/wrapper/jni/linux/jdwpTransport.h diff --git a/source/client/jni/linux/jni.h b/source/client/wrapper/jni/linux/jni.h similarity index 100% rename from source/client/jni/linux/jni.h rename to source/client/wrapper/jni/linux/jni.h diff --git a/source/client/jni/linux/jni_md.h b/source/client/wrapper/jni/linux/jni_md.h similarity index 100% rename from source/client/jni/linux/jni_md.h rename to source/client/wrapper/jni/linux/jni_md.h diff --git a/source/client/jni/linux/jvmti.h b/source/client/wrapper/jni/linux/jvmti.h similarity index 100% rename from source/client/jni/linux/jvmti.h rename to source/client/wrapper/jni/linux/jvmti.h diff --git a/source/client/jni/windows/classfile_constants.h b/source/client/wrapper/jni/windows/classfile_constants.h similarity index 100% rename from source/client/jni/windows/classfile_constants.h rename to source/client/wrapper/jni/windows/classfile_constants.h diff --git a/source/client/jni/windows/jawt.h b/source/client/wrapper/jni/windows/jawt.h similarity index 73% rename from source/client/jni/windows/jawt.h rename to source/client/wrapper/jni/windows/jawt.h index 231c292dc8..e9bc89d5fb 100644 --- a/source/client/jni/windows/jawt.h +++ b/source/client/wrapper/jni/windows/jawt.h @@ -52,87 +52,6 @@ extern "C" { * 100% pure java. */ -/* - * AWT Native Drawing Surface (JAWT_DrawingSurface). - * - * For each platform, there is a native drawing surface structure. This - * platform-specific structure can be found in jawt_md.h. It is recommended - * that additional platforms follow the same model. It is also recommended - * that VMs on Win32 and Solaris support the existing structures in jawt_md.h. - * - ******************* - * EXAMPLE OF USAGE: - ******************* - * - * In Win32, a programmer wishes to access the HWND of a canvas to perform - * native rendering into it. The programmer has declared the paint() method - * for their canvas subclass to be native: - * - * - * MyCanvas.java: - * - * import java.awt.*; - * - * public class MyCanvas extends Canvas { - * - * static { - * System.loadLibrary("mylib"); - * } - * - * public native void paint(Graphics g); - * } - * - * - * myfile.c: - * - * #include "jawt_md.h" - * #include - * - * JNIEXPORT void JNICALL - * Java_MyCanvas_paint(JNIEnv* env, jobject canvas, jobject graphics) - * { - * JAWT awt; - * JAWT_DrawingSurface* ds; - * JAWT_DrawingSurfaceInfo* dsi; - * JAWT_Win32DrawingSurfaceInfo* dsi_win; - * jboolean result; - * jint lock; - * - * // Get the AWT - * awt.version = JAWT_VERSION_1_3; - * result = JAWT_GetAWT(env, &awt); - * assert(result != JNI_FALSE); - * - * // Get the drawing surface - * ds = awt.GetDrawingSurface(env, canvas); - * assert(ds != NULL); - * - * // Lock the drawing surface - * lock = ds->Lock(ds); - * assert((lock & JAWT_LOCK_ERROR) == 0); - * - * // Get the drawing surface info - * dsi = ds->GetDrawingSurfaceInfo(ds); - * - * // Get the platform-specific drawing info - * dsi_win = (JAWT_Win32DrawingSurfaceInfo*)dsi->platformInfo; - * - * ////////////////////////////// - * // !!! DO PAINTING HERE !!! // - * ////////////////////////////// - * - * // Free the drawing surface info - * ds->FreeDrawingSurfaceInfo(dsi); - * - * // Unlock the drawing surface - * ds->Unlock(ds); - * - * // Free the drawing surface - * awt.FreeDrawingSurface(ds); - * } - * - */ - /* * JAWT_Rectangle * Structure for a native rectangle. diff --git a/source/client/jni/windows/jdwpTransport.h b/source/client/wrapper/jni/windows/jdwpTransport.h similarity index 100% rename from source/client/jni/windows/jdwpTransport.h rename to source/client/wrapper/jni/windows/jdwpTransport.h diff --git a/source/client/jni/windows/jni.h b/source/client/wrapper/jni/windows/jni.h similarity index 100% rename from source/client/jni/windows/jni.h rename to source/client/wrapper/jni/windows/jni.h diff --git a/source/client/jni/windows/jvmti.h b/source/client/wrapper/jni/windows/jvmti.h similarity index 100% rename from source/client/jni/windows/jvmti.h rename to source/client/wrapper/jni/windows/jvmti.h diff --git a/source/client/jni/windows/jvmticmlr.h b/source/client/wrapper/jni/windows/jvmticmlr.h similarity index 100% rename from source/client/jni/windows/jvmticmlr.h rename to source/client/wrapper/jni/windows/jvmticmlr.h diff --git a/source/client/jni/windows/win32/bridge/AccessBridgeCallbacks.h b/source/client/wrapper/jni/windows/win32/bridge/AccessBridgeCallbacks.h similarity index 100% rename from source/client/jni/windows/win32/bridge/AccessBridgeCallbacks.h rename to source/client/wrapper/jni/windows/win32/bridge/AccessBridgeCallbacks.h diff --git a/source/client/jni/windows/win32/bridge/AccessBridgeCalls.c b/source/client/wrapper/jni/windows/win32/bridge/AccessBridgeCalls.c similarity index 100% rename from source/client/jni/windows/win32/bridge/AccessBridgeCalls.c rename to source/client/wrapper/jni/windows/win32/bridge/AccessBridgeCalls.c diff --git a/source/client/jni/windows/win32/bridge/AccessBridgeCalls.h b/source/client/wrapper/jni/windows/win32/bridge/AccessBridgeCalls.h similarity index 100% rename from source/client/jni/windows/win32/bridge/AccessBridgeCalls.h rename to source/client/wrapper/jni/windows/win32/bridge/AccessBridgeCalls.h diff --git a/source/client/jni/windows/win32/bridge/AccessBridgePackages.h b/source/client/wrapper/jni/windows/win32/bridge/AccessBridgePackages.h similarity index 100% rename from source/client/jni/windows/win32/bridge/AccessBridgePackages.h rename to source/client/wrapper/jni/windows/win32/bridge/AccessBridgePackages.h diff --git a/source/client/jni/windows/win32/jawt_md.h b/source/client/wrapper/jni/windows/win32/jawt_md.h similarity index 100% rename from source/client/jni/windows/win32/jawt_md.h rename to source/client/wrapper/jni/windows/win32/jawt_md.h diff --git a/source/client/jni/windows/win32/jni_md.h b/source/client/wrapper/jni/windows/win32/jni_md.h similarity index 100% rename from source/client/jni/windows/win32/jni_md.h rename to source/client/wrapper/jni/windows/win32/jni_md.h diff --git a/source/client/src/clientJniConnector.c b/source/client/wrapper/src/clientJniConnector.c similarity index 100% rename from source/client/src/clientJniConnector.c rename to source/client/wrapper/src/clientJniConnector.c diff --git a/source/client/src/clientTmqConnector.c b/source/client/wrapper/src/clientTmqConnector.c similarity index 100% rename from source/client/src/clientTmqConnector.c rename to source/client/wrapper/src/clientTmqConnector.c diff --git a/source/client/wrapper/src/wrapperDriver.c b/source/client/wrapper/src/wrapperDriver.c new file mode 100644 index 0000000000..415affc66f --- /dev/null +++ b/source/client/wrapper/src/wrapperDriver.c @@ -0,0 +1,271 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "wrapper.h" + +#ifdef WINDOWS +#define DRIVER_NATIVE_NAME "taosnative.dll" +#define DRIVER_WSBSOCKET_NAME "taosws.dll" +#elif defined(DARWIN) +#define DRIVER_NATIVE_NAME "libtaosnative.dylib" +#define DRIVER_WSBSOCKET_NAME "libtaosws.dylib" +#else +#define DRIVER_NATIVE_NAME "libtaosnative.so" +#define DRIVER_WSBSOCKET_NAME "libtaosws.so" +#endif + +#define LOAD_FUNC(fptr, fname) \ + funcName = fname; \ + fptr = taosLoadDllFunc(tsDriver, funcName); \ + if (fptr == NULL) goto _OVER; + +#ifdef WEBSOCKET +EDriverType tsDriverType = DRIVER_NATIVE; // todo simon +#else +EDriverType tsDriverType = DRIVER_NATIVE; +#endif + +void *tsDriver = NULL; + +static int32_t tossGetDevelopPath(char *driverPath, const char *driverName) { + char appPath[PATH_MAX] = {0}; + int32_t ret = taosAppPath(appPath, PATH_MAX); + if (ret == 0) { + snprintf(driverPath, PATH_MAX, "%s%s..%slib%s%s", appPath, TD_DIRSEP, TD_DIRSEP, TD_DIRSEP, driverName); + ret = taosRealPath(driverPath, NULL, PATH_MAX); + } + + return ret; +} + +static int32_t taosGetInstallPath(char *driverPath, const char *driverName) { + tstrncpy(driverPath, driverName, PATH_MAX); + return 0; +} + +int32_t taosDriverInit(EDriverType driverType) { + int32_t code = -1; + char driverPath[PATH_MAX + 32] = {0}; + const char *driverName = NULL; + const char *funcName = NULL; + + if (driverType == DRIVER_NATIVE) { + driverName = DRIVER_NATIVE_NAME; + } else { + driverName = DRIVER_WSBSOCKET_NAME; + } + + if (tsDriver == NULL && tossGetDevelopPath(driverPath, driverName) == 0) { + tsDriver = taosLoadDll(driverPath); + } + + if (tsDriver == NULL && taosGetInstallPath(driverPath, driverName) == 0) { + tsDriver = taosLoadDll(driverPath); + } + + if (tsDriver == NULL) { + printf("failed to load %s since %s [0x%X]\r\n", driverName, terrstr(), terrno); + return code; + } + + // printf("load driver from %s\r\n", driverPath); + LOAD_FUNC(fp_taos_set_config, "taos_set_config"); + + LOAD_FUNC(fp_taos_init, "taos_init"); + LOAD_FUNC(fp_taos_cleanup, "taos_cleanup"); + LOAD_FUNC(fp_taos_options, "taos_options"); + LOAD_FUNC(fp_taos_options_connection, "taos_options_connection"); + LOAD_FUNC(fp_taos_connect, "taos_connect"); + LOAD_FUNC(fp_taos_connect_auth, "taos_connect_auth"); + LOAD_FUNC(fp_taos_close, "taos_close"); + + LOAD_FUNC(fp_taos_data_type, "taos_data_type"); + + LOAD_FUNC(fp_taos_stmt_init, "taos_stmt_init"); + LOAD_FUNC(fp_taos_stmt_init_with_reqid, "taos_stmt_init_with_reqid"); + LOAD_FUNC(fp_taos_stmt_init_with_options, "taos_stmt_init_with_options"); + LOAD_FUNC(fp_taos_stmt_prepare, "taos_stmt_prepare"); + LOAD_FUNC(fp_taos_stmt_set_tbname_tags, "taos_stmt_set_tbname_tags"); + LOAD_FUNC(fp_taos_stmt_set_tbname, "taos_stmt_set_tbname"); + LOAD_FUNC(fp_taos_stmt_set_tags, "taos_stmt_set_tags"); + LOAD_FUNC(fp_taos_stmt_set_sub_tbname, "taos_stmt_set_sub_tbname"); + LOAD_FUNC(fp_taos_stmt_get_tag_fields, "taos_stmt_get_tag_fields"); + LOAD_FUNC(fp_taos_stmt_get_col_fields, "taos_stmt_get_col_fields"); + LOAD_FUNC(fp_taos_stmt_reclaim_fields, "taos_stmt_reclaim_fields"); + + LOAD_FUNC(fp_taos_stmt_is_insert, "taos_stmt_is_insert"); + LOAD_FUNC(fp_taos_stmt_num_params, "taos_stmt_num_params"); + LOAD_FUNC(fp_taos_stmt_get_param, "taos_stmt_get_param"); + LOAD_FUNC(fp_taos_stmt_bind_param, "taos_stmt_bind_param"); + LOAD_FUNC(fp_taos_stmt_bind_param_batch, "taos_stmt_bind_param_batch"); + LOAD_FUNC(fp_taos_stmt_bind_single_param_batch, "taos_stmt_bind_single_param_batch"); + LOAD_FUNC(fp_taos_stmt_add_batch, "taos_stmt_add_batch"); + LOAD_FUNC(fp_taos_stmt_execute, "taos_stmt_execute"); + LOAD_FUNC(fp_taos_stmt_use_result, "taos_stmt_use_result"); + LOAD_FUNC(fp_taos_stmt_close, "taos_stmt_close"); + LOAD_FUNC(fp_taos_stmt_errstr, "taos_stmt_errstr"); + LOAD_FUNC(fp_taos_stmt_affected_rows, "taos_stmt_affected_rows"); + LOAD_FUNC(fp_taos_stmt_affected_rows_once, "taos_stmt_affected_rows_once"); + + LOAD_FUNC(fp_taos_stmt2_init, "taos_stmt2_init"); + LOAD_FUNC(fp_taos_stmt2_prepare, "taos_stmt2_prepare"); + LOAD_FUNC(fp_taos_stmt2_bind_param, "taos_stmt2_bind_param"); + LOAD_FUNC(fp_taos_stmt2_bind_param_a, "taos_stmt2_bind_param_a"); + LOAD_FUNC(fp_taos_stmt2_exec, "taos_stmt2_exec"); + LOAD_FUNC(fp_taos_stmt2_close, "taos_stmt2_close"); + LOAD_FUNC(fp_taos_stmt2_is_insert, "taos_stmt2_is_insert"); + LOAD_FUNC(fp_taos_stmt2_get_fields, "taos_stmt2_get_fields"); + LOAD_FUNC(fp_taos_stmt2_free_fields, "taos_stmt2_free_fields"); + LOAD_FUNC(fp_taos_stmt2_result, "taos_stmt2_result"); + LOAD_FUNC(fp_taos_stmt2_error, "taos_stmt2_error"); + + LOAD_FUNC(fp_taos_query, "taos_query"); + LOAD_FUNC(fp_taos_query_with_reqid, "taos_query_with_reqid"); + + LOAD_FUNC(fp_taos_fetch_row, "taos_fetch_row"); + LOAD_FUNC(fp_taos_result_precision, "taos_result_precision"); + LOAD_FUNC(fp_taos_free_result, "taos_free_result"); + LOAD_FUNC(fp_taos_kill_query, "taos_kill_query"); + LOAD_FUNC(fp_taos_field_count, "taos_field_count"); + LOAD_FUNC(fp_taos_num_fields, "taos_num_fields"); + LOAD_FUNC(fp_taos_affected_rows, "taos_affected_rows"); + LOAD_FUNC(fp_taos_affected_rows64, "taos_affected_rows64"); + + LOAD_FUNC(fp_taos_fetch_fields, "taos_fetch_fields"); + LOAD_FUNC(fp_taos_fetch_fields_e, "taos_fetch_fields_e"); + LOAD_FUNC(fp_taos_select_db, "taos_select_db"); + LOAD_FUNC(fp_taos_print_row, "taos_print_row"); + LOAD_FUNC(fp_taos_print_row_with_size, "taos_print_row_with_size"); + LOAD_FUNC(fp_taos_stop_query, "taos_stop_query"); + LOAD_FUNC(fp_taos_is_null, "taos_is_null"); + LOAD_FUNC(fp_taos_is_null_by_column, "taos_is_null_by_column"); + LOAD_FUNC(fp_taos_is_update_query, "taos_is_update_query"); + LOAD_FUNC(fp_taos_fetch_block, "taos_fetch_block"); + LOAD_FUNC(fp_taos_fetch_block_s, "taos_fetch_block_s"); + LOAD_FUNC(fp_taos_fetch_raw_block, "taos_fetch_raw_block"); + LOAD_FUNC(fp_taos_get_column_data_offset, "taos_get_column_data_offset"); + LOAD_FUNC(fp_taos_validate_sql, "taos_validate_sql"); + LOAD_FUNC(fp_taos_reset_current_db, "taos_reset_current_db"); + + LOAD_FUNC(fp_taos_fetch_lengths, "taos_fetch_lengths"); + LOAD_FUNC(fp_taos_result_block, "taos_result_block"); + + LOAD_FUNC(fp_taos_get_server_info, "taos_get_server_info"); + LOAD_FUNC(fp_taos_get_client_info, "taos_get_client_info"); + LOAD_FUNC(fp_taos_get_current_db, "taos_get_current_db"); + + LOAD_FUNC(fp_taos_errstr, "taos_errstr"); + LOAD_FUNC(fp_taos_errno, "taos_errno"); + + LOAD_FUNC(fp_taos_query_a, "taos_query_a"); + LOAD_FUNC(fp_taos_query_a_with_reqid, "taos_query_a_with_reqid"); + LOAD_FUNC(fp_taos_fetch_rows_a, "taos_fetch_rows_a"); + LOAD_FUNC(fp_taos_fetch_raw_block_a, "taos_fetch_raw_block_a"); + LOAD_FUNC(fp_taos_get_raw_block, "taos_get_raw_block"); + + LOAD_FUNC(fp_taos_get_db_route_info, "taos_get_db_route_info"); + LOAD_FUNC(fp_taos_get_table_vgId, "taos_get_table_vgId"); + LOAD_FUNC(fp_taos_get_tables_vgId, "taos_get_tables_vgId"); + + LOAD_FUNC(fp_taos_load_table_info, "taos_load_table_info"); + + LOAD_FUNC(fp_taos_set_hb_quit, "taos_set_hb_quit"); + + LOAD_FUNC(fp_taos_set_notify_cb, "taos_set_notify_cb"); + + LOAD_FUNC(fp_taos_fetch_whitelist_a, "taos_fetch_whitelist_a"); + + LOAD_FUNC(fp_taos_set_conn_mode, "taos_set_conn_mode"); + + LOAD_FUNC(fp_taos_schemaless_insert, "taos_schemaless_insert"); + LOAD_FUNC(fp_taos_schemaless_insert_with_reqid, "taos_schemaless_insert_with_reqid"); + LOAD_FUNC(fp_taos_schemaless_insert_raw, "taos_schemaless_insert_raw"); + LOAD_FUNC(fp_taos_schemaless_insert_raw_with_reqid, "taos_schemaless_insert_raw_with_reqid"); + LOAD_FUNC(fp_taos_schemaless_insert_ttl, "taos_schemaless_insert_ttl"); + LOAD_FUNC(fp_taos_schemaless_insert_ttl_with_reqid, "taos_schemaless_insert_ttl_with_reqid"); + LOAD_FUNC(fp_taos_schemaless_insert_raw_ttl, "taos_schemaless_insert_raw_ttl"); + LOAD_FUNC(fp_taos_schemaless_insert_raw_ttl_with_reqid, "taos_schemaless_insert_raw_ttl_with_reqid"); + LOAD_FUNC(fp_taos_schemaless_insert_raw_ttl_with_reqid_tbname_key, + "taos_schemaless_insert_raw_ttl_with_reqid_tbname_key"); + LOAD_FUNC(fp_taos_schemaless_insert_ttl_with_reqid_tbname_key, "taos_schemaless_insert_ttl_with_reqid_tbname_key"); + + LOAD_FUNC(fp_tmq_conf_new, "tmq_conf_new"); + LOAD_FUNC(fp_tmq_conf_set, "tmq_conf_set"); + LOAD_FUNC(fp_tmq_conf_destroy, "tmq_conf_destroy"); + LOAD_FUNC(fp_tmq_conf_set_auto_commit_cb, "tmq_conf_set_auto_commit_cb"); + + LOAD_FUNC(fp_tmq_list_new, "tmq_list_new"); + LOAD_FUNC(fp_tmq_list_append, "tmq_list_append"); + LOAD_FUNC(fp_tmq_list_destroy, "tmq_list_destroy"); + LOAD_FUNC(fp_tmq_list_get_size, "tmq_list_get_size"); + LOAD_FUNC(fp_tmq_list_to_c_array, "tmq_list_to_c_array"); + + LOAD_FUNC(fp_tmq_consumer_new, "tmq_consumer_new"); + LOAD_FUNC(fp_tmq_subscribe, "tmq_subscribe"); + LOAD_FUNC(fp_tmq_unsubscribe, "tmq_unsubscribe"); + LOAD_FUNC(fp_tmq_subscription, "tmq_subscription"); + LOAD_FUNC(fp_tmq_consumer_poll, "tmq_consumer_poll"); + LOAD_FUNC(fp_tmq_consumer_close, "tmq_consumer_close"); + LOAD_FUNC(fp_tmq_commit_sync, "tmq_commit_sync"); + LOAD_FUNC(fp_tmq_commit_async, "tmq_commit_async"); + LOAD_FUNC(fp_tmq_commit_offset_sync, "tmq_commit_offset_sync"); + LOAD_FUNC(fp_tmq_commit_offset_async, "tmq_commit_offset_async"); + LOAD_FUNC(fp_tmq_get_topic_assignment, "tmq_get_topic_assignment"); + LOAD_FUNC(fp_tmq_free_assignment, "tmq_free_assignment"); + LOAD_FUNC(fp_tmq_offset_seek, "tmq_offset_seek"); + LOAD_FUNC(fp_tmq_position, "tmq_position"); + LOAD_FUNC(fp_tmq_committed, "tmq_committed"); + + LOAD_FUNC(fp_tmq_get_connect, "tmq_get_connect"); + LOAD_FUNC(fp_tmq_get_table_name, "tmq_get_table_name"); + LOAD_FUNC(fp_tmq_get_res_type, "tmq_get_res_type"); + LOAD_FUNC(fp_tmq_get_topic_name, "tmq_get_topic_name"); + LOAD_FUNC(fp_tmq_get_db_name, "tmq_get_db_name"); + LOAD_FUNC(fp_tmq_get_vgroup_id, "tmq_get_vgroup_id"); + LOAD_FUNC(fp_tmq_get_vgroup_offset, "tmq_get_vgroup_offset"); + LOAD_FUNC(fp_tmq_err2str, "tmq_err2str"); + + LOAD_FUNC(fp_tmq_get_raw, "tmq_get_raw"); + LOAD_FUNC(fp_tmq_write_raw, "tmq_write_raw"); + LOAD_FUNC(fp_taos_write_raw_block, "taos_write_raw_block"); + LOAD_FUNC(fp_taos_write_raw_block_with_reqid, "taos_write_raw_block_with_reqid"); + LOAD_FUNC(fp_taos_write_raw_block_with_fields, "taos_write_raw_block_with_fields"); + LOAD_FUNC(fp_taos_write_raw_block_with_fields_with_reqid, "taos_write_raw_block_with_fields_with_reqid"); + LOAD_FUNC(fp_tmq_free_raw, "tmq_free_raw"); + + LOAD_FUNC(fp_tmq_get_json_meta, "tmq_get_json_meta"); + LOAD_FUNC(fp_tmq_free_json_meta, "tmq_free_json_meta"); + + LOAD_FUNC(fp_taos_check_server_status, "taos_check_server_status"); + LOAD_FUNC(fp_taos_write_crashinfo, "taos_write_crashinfo"); + LOAD_FUNC(fp_getBuildInfo, "getBuildInfo"); + + code = 0; + +_OVER: + if (code != 0) { + printf("failed to load function %s from %s since %s [0x%X]\r\n", funcName, driverPath, terrstr(), terrno); + taosDriverCleanup(); + } + + return code; +} + +void taosDriverCleanup() { + if (tsDriver != NULL) { + taosCloseDll(tsDriver); + tsDriver = NULL; + } +} diff --git a/source/client/wrapper/src/wrapperFunc.c b/source/client/wrapper/src/wrapperFunc.c new file mode 100644 index 0000000000..e77de0b82d --- /dev/null +++ b/source/client/wrapper/src/wrapperFunc.c @@ -0,0 +1,870 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "version.h" +#include "wrapper.h" + +static TdThreadOnce tsDriverOnce = PTHREAD_ONCE_INIT; +volatile int32_t tsDriverOnceRet = 0; + +#define ERR_VOID(code) \ + terrno = code; \ + return; + +#define ERR_PTR(code) \ + terrno = code; \ + return NULL; + +#define ERR_INT(code) \ + terrno = code; \ + return -1; + +#define ERR_BOOL(code) \ + terrno = code; \ + return false; + +#define ERR_CONFRET(code) \ + terrno = code; \ + setConfRet ret = {.retCode = -1}; \ + return ret; + +#define CHECK_VOID(fp) \ + if (tsDriver == NULL) { \ + ERR_VOID(TSDB_CODE_DLL_NOT_LOAD) \ + } \ + if (fp == NULL) { \ + ERR_VOID(TSDB_CODE_DLL_NOT_LOAD) \ + } + +#define CHECK_PTR(fp) \ + if (tsDriver == NULL) { \ + ERR_PTR(TSDB_CODE_DLL_NOT_LOAD) \ + } \ + if (fp == NULL) { \ + ERR_PTR(TSDB_CODE_DLL_NOT_LOAD) \ + } + +#define CHECK_INT(fp) \ + if (tsDriver == NULL) { \ + ERR_INT(TSDB_CODE_DLL_NOT_LOAD) \ + } \ + if (fp == NULL) { \ + ERR_INT(TSDB_CODE_DLL_NOT_LOAD) \ + } + +#define CHECK_BOOL(fp) \ + if (tsDriver == NULL) { \ + ERR_BOOL(TSDB_CODE_DLL_NOT_LOAD) \ + } \ + if (fp == NULL) { \ + ERR_BOOL(TSDB_CODE_DLL_NOT_LOAD) \ + } + +#define CHECK_CONFRET(fp) \ + if (tsDriver == NULL) { \ + ERR_CONFRET(TSDB_CODE_DLL_NOT_LOAD) \ + } \ + if (fp == NULL) { \ + ERR_CONFRET(TSDB_CODE_DLL_NOT_LOAD) \ + } + +setConfRet taos_set_config(const char *config) { + if (taos_init() != 0) { + ERR_CONFRET(TSDB_CODE_DLL_NOT_LOAD) + } + + CHECK_CONFRET(fp_taos_set_config); + return (*fp_taos_set_config)(config); +} + +static void taos_init_wrapper(void) { + tsDriverOnceRet = taosDriverInit(tsDriverType); + if (tsDriverOnceRet != 0) return; + + if (fp_taos_init == NULL) { + terrno = TSDB_CODE_DLL_FUNC_NOT_LOAD; + tsDriverOnceRet = -1; + } else { + tsDriverOnceRet = (*fp_taos_init)(); + } +} + +int taos_init(void) { + (void)taosThreadOnce(&tsDriverOnce, taos_init_wrapper); + return tsDriverOnceRet; +} + +void taos_cleanup(void) { + CHECK_VOID(fp_taos_cleanup); + (*fp_taos_cleanup)(); +} + +int taos_options(TSDB_OPTION option, const void *arg, ...) { + if (option == TSDB_OPTION_DRIVER) { + if (tsDriver == NULL) { + if (strcasecmp((const char *)arg, "native") == 0) { + tsDriverType = DRIVER_NATIVE; + return 0; + } + if (strcasecmp((const char *)arg, "websocket") == 0) { + tsDriverType = DRIVER_WEBSOCKET; + return 0; + } + } + terrno = TSDB_CODE_REPEAT_INIT; + return -1; + } + + if (taos_init() != 0) { + terrno = TSDB_CODE_DLL_NOT_LOAD; + return -1; + } + + CHECK_INT(fp_taos_options); + return (*fp_taos_options)(option, arg); +} + +int taos_options_connection(TAOS *taos, TSDB_OPTION_CONNECTION option, const void *arg, ...) { + CHECK_INT(fp_taos_options_connection); + return (*fp_taos_options_connection)(taos, option, (const char *)arg); +} + +TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port) { + if (taos_init() != 0) { + terrno = TSDB_CODE_DLL_NOT_LOAD; + return NULL; + } + + CHECK_PTR(fp_taos_connect); + return (*fp_taos_connect)(ip, user, pass, db, port); +} + +TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port) { + if (taos_init() != 0) { + terrno = TSDB_CODE_DLL_NOT_LOAD; + return NULL; + } + + CHECK_PTR(fp_taos_connect_auth); + return (*fp_taos_connect_auth)(ip, user, auth, db, port); +} + +void taos_close(TAOS *taos) { + CHECK_VOID(fp_taos_close); + (*fp_taos_close)(taos); +} + +const char *taos_data_type(int type) { + CHECK_PTR(fp_taos_data_type); + return (*fp_taos_data_type)(type); +} + +TAOS_STMT *taos_stmt_init(TAOS *taos) { + CHECK_PTR(fp_taos_stmt_init); + return (*fp_taos_stmt_init)(taos); +} + +TAOS_STMT *taos_stmt_init_with_reqid(TAOS *taos, int64_t reqid) { + CHECK_PTR(fp_taos_stmt_init_with_reqid); + return (*fp_taos_stmt_init_with_reqid)(taos, reqid); +} + +TAOS_STMT *taos_stmt_init_with_options(TAOS *taos, TAOS_STMT_OPTIONS *options) { + CHECK_PTR(fp_taos_stmt_init_with_options); + return (*fp_taos_stmt_init_with_options)(taos, options); +} + +int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length) { + CHECK_INT(fp_taos_stmt_prepare); + return (*fp_taos_stmt_prepare)(stmt, sql, length); +} + +int taos_stmt_set_tbname_tags(TAOS_STMT *stmt, const char *name, TAOS_MULTI_BIND *tags) { + CHECK_INT(fp_taos_stmt_set_tbname_tags); + return (*fp_taos_stmt_set_tbname_tags)(stmt, name, tags); +} + +int taos_stmt_set_tbname(TAOS_STMT *stmt, const char *name) { + CHECK_INT(fp_taos_stmt_set_tbname); + return (*fp_taos_stmt_set_tbname)(stmt, name); +} + +int taos_stmt_set_tags(TAOS_STMT *stmt, TAOS_MULTI_BIND *tags) { + CHECK_INT(fp_taos_stmt_set_tags); + return (*fp_taos_stmt_set_tags)(stmt, tags); +} + +int taos_stmt_set_sub_tbname(TAOS_STMT *stmt, const char *name) { + CHECK_INT(fp_taos_stmt_set_sub_tbname); + return (*fp_taos_stmt_set_sub_tbname)(stmt, name); +} + +int taos_stmt_get_tag_fields(TAOS_STMT *stmt, int *fieldNum, TAOS_FIELD_E **fields) { + CHECK_INT(fp_taos_stmt_get_tag_fields); + return (*fp_taos_stmt_get_tag_fields)(stmt, fieldNum, fields); +} + +int taos_stmt_get_col_fields(TAOS_STMT *stmt, int *fieldNum, TAOS_FIELD_E **fields) { + CHECK_INT(fp_taos_stmt_get_col_fields); + return (*fp_taos_stmt_get_col_fields)(stmt, fieldNum, fields); +} + +void taos_stmt_reclaim_fields(TAOS_STMT *stmt, TAOS_FIELD_E *fields) { + CHECK_VOID(fp_taos_stmt_reclaim_fields); + (*fp_taos_stmt_reclaim_fields)(stmt, fields); +} + +int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert) { + CHECK_INT(fp_taos_stmt_is_insert); + return (*fp_taos_stmt_is_insert)(stmt, insert); +} + +int taos_stmt_num_params(TAOS_STMT *stmt, int *nums) { + CHECK_INT(fp_taos_stmt_num_params); + return (*fp_taos_stmt_num_params)(stmt, nums); +} + +int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes) { + CHECK_INT(fp_taos_stmt_get_param); + return (*fp_taos_stmt_get_param)(stmt, idx, type, bytes); +} + +int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind) { + CHECK_INT(fp_taos_stmt_bind_param); + return (*fp_taos_stmt_bind_param)(stmt, bind); +} + +int taos_stmt_bind_param_batch(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind) { + CHECK_INT(fp_taos_stmt_bind_param_batch); + return (*fp_taos_stmt_bind_param_batch)(stmt, bind); +} + +int taos_stmt_bind_single_param_batch(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind, int colIdx) { + CHECK_INT(fp_taos_stmt_bind_single_param_batch); + return (*fp_taos_stmt_bind_single_param_batch)(stmt, bind, colIdx); +} + +int taos_stmt_add_batch(TAOS_STMT *stmt) { + CHECK_INT(fp_taos_stmt_add_batch); + return (*fp_taos_stmt_add_batch)(stmt); +} + +int taos_stmt_execute(TAOS_STMT *stmt) { + CHECK_INT(fp_taos_stmt_execute); + return (*fp_taos_stmt_execute)(stmt); +} + +TAOS_RES *taos_stmt_use_result(TAOS_STMT *stmt) { + CHECK_PTR(fp_taos_stmt_use_result); + return (*fp_taos_stmt_use_result)(stmt); +} + +int taos_stmt_close(TAOS_STMT *stmt) { + CHECK_INT(fp_taos_stmt_close); + return (*fp_taos_stmt_close)(stmt); +} + +char *taos_stmt_errstr(TAOS_STMT *stmt) { + CHECK_PTR(fp_taos_stmt_errstr); + return (*fp_taos_stmt_errstr)(stmt); +} + +int taos_stmt_affected_rows(TAOS_STMT *stmt) { + CHECK_INT(fp_taos_stmt_affected_rows); + return (*fp_taos_stmt_affected_rows)(stmt); +} + +int taos_stmt_affected_rows_once(TAOS_STMT *stmt) { + CHECK_INT(fp_taos_stmt_affected_rows_once); + return (*fp_taos_stmt_affected_rows_once)(stmt); +} + +TAOS_STMT2 *taos_stmt2_init(TAOS *taos, TAOS_STMT2_OPTION *option) { + CHECK_PTR(fp_taos_stmt2_init); + return (*fp_taos_stmt2_init)(taos, option); +} + +int taos_stmt2_prepare(TAOS_STMT2 *stmt, const char *sql, unsigned long length) { + CHECK_INT(fp_taos_stmt2_prepare); + return (*fp_taos_stmt2_prepare)(stmt, sql, length); +} + +int taos_stmt2_bind_param(TAOS_STMT2 *stmt, TAOS_STMT2_BINDV *bindv, int32_t col_idx) { + CHECK_INT(fp_taos_stmt2_bind_param); + return (*fp_taos_stmt2_bind_param)(stmt, bindv, col_idx); +} + +int taos_stmt2_bind_param_a(TAOS_STMT2 *stmt, TAOS_STMT2_BINDV *bindv, int32_t col_idx, __taos_async_fn_t fp, + void *param) { + CHECK_INT(fp_taos_stmt2_bind_param_a); + return (*fp_taos_stmt2_bind_param_a)(stmt, bindv, col_idx, fp, param); +} + +int taos_stmt2_exec(TAOS_STMT2 *stmt, int *affected_rows) { + CHECK_INT(fp_taos_stmt2_exec); + return (*fp_taos_stmt2_exec)(stmt, affected_rows); +} + +int taos_stmt2_close(TAOS_STMT2 *stmt) { + CHECK_INT(fp_taos_stmt2_close); + return (*fp_taos_stmt2_close)(stmt); +} + +int taos_stmt2_is_insert(TAOS_STMT2 *stmt, int *insert) { + CHECK_INT(fp_taos_stmt2_is_insert); + return (*fp_taos_stmt2_is_insert)(stmt, insert); +} + +int taos_stmt2_get_fields(TAOS_STMT2 *stmt, int *count, TAOS_FIELD_ALL **fields) { + CHECK_INT(fp_taos_stmt2_get_fields); + return (*fp_taos_stmt2_get_fields)(stmt, count, fields); +} + +void taos_stmt2_free_fields(TAOS_STMT2 *stmt, TAOS_FIELD_ALL *fields) { + CHECK_VOID(fp_taos_stmt2_free_fields); + (*fp_taos_stmt2_free_fields)(stmt, fields); +} + +TAOS_RES *taos_stmt2_result(TAOS_STMT2 *stmt) { + CHECK_PTR(fp_taos_stmt2_result); + return (*fp_taos_stmt2_result)(stmt); +} + +char *taos_stmt2_error(TAOS_STMT2 *stmt) { + CHECK_PTR(fp_taos_stmt2_error); + return (*fp_taos_stmt2_error)(stmt); +} + +TAOS_RES *taos_query(TAOS *taos, const char *sql) { + CHECK_PTR(fp_taos_query); + return (*fp_taos_query)(taos, sql); +} + +TAOS_RES *taos_query_with_reqid(TAOS *taos, const char *sql, int64_t reqId) { + CHECK_PTR(fp_taos_query_with_reqid); + return (*fp_taos_query_with_reqid)(taos, sql, reqId); +} + +TAOS_ROW taos_fetch_row(TAOS_RES *res) { + CHECK_PTR(fp_taos_fetch_row); + return (*fp_taos_fetch_row)(res); +} + +int taos_result_precision(TAOS_RES *res) { + CHECK_INT(fp_taos_result_precision); + return (*fp_taos_result_precision)(res); +} + +void taos_free_result(TAOS_RES *res) { + CHECK_VOID(fp_taos_free_result); + return (*fp_taos_free_result)(res); +} + +void taos_kill_query(TAOS *taos) { + CHECK_VOID(fp_taos_kill_query); + return (*fp_taos_kill_query)(taos); +} + +int taos_field_count(TAOS_RES *res) { + CHECK_INT(fp_taos_field_count); + return (*fp_taos_field_count)(res); +} + +int taos_num_fields(TAOS_RES *res) { + CHECK_INT(fp_taos_num_fields); + return (*fp_taos_num_fields)(res); +} + +int taos_affected_rows(TAOS_RES *res) { + CHECK_INT(fp_taos_affected_rows); + return (*fp_taos_affected_rows)(res); +} + +int64_t taos_affected_rows64(TAOS_RES *res) { + CHECK_INT(fp_taos_affected_rows64); + return (*fp_taos_affected_rows64)(res); +} + +TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) { + CHECK_PTR(fp_taos_fetch_fields); + return (*fp_taos_fetch_fields)(res); +} + +TAOS_FIELD_E *taos_fetch_fields_e(TAOS_RES *res) { + CHECK_PTR(fp_taos_fetch_fields_e); + return (*fp_taos_fetch_fields_e)(res); +} + +int taos_select_db(TAOS *taos, const char *db) { + CHECK_INT(fp_taos_select_db); + return (*fp_taos_select_db)(taos, db); +} + +int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) { + CHECK_INT(fp_taos_print_row); + return (*fp_taos_print_row)(str, row, fields, num_fields); +} + +int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) { + CHECK_INT(fp_taos_print_row_with_size); + return (*fp_taos_print_row_with_size)(str, size, row, fields, num_fields); +} + +void taos_stop_query(TAOS_RES *res) { + CHECK_VOID(fp_taos_stop_query); + (*fp_taos_stop_query)(res); +} + +bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col) { + CHECK_BOOL(fp_taos_is_null); + return (*fp_taos_is_null)(res, row, col); +} + +int taos_is_null_by_column(TAOS_RES *res, int columnIndex, bool result[], int *rows) { + CHECK_INT(fp_taos_is_null_by_column); + return (*fp_taos_is_null_by_column)(res, columnIndex, result, rows); +} + +bool taos_is_update_query(TAOS_RES *res) { + CHECK_BOOL(fp_taos_is_update_query); + return (*fp_taos_is_update_query)(res); +} + +int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) { + CHECK_INT(fp_taos_fetch_block); + return (*fp_taos_fetch_block)(res, rows); +} + +int taos_fetch_block_s(TAOS_RES *res, int *numOfRows, TAOS_ROW *rows) { + CHECK_INT(fp_taos_fetch_block_s); + return (*fp_taos_fetch_block_s)(res, numOfRows, rows); +} + +int taos_fetch_raw_block(TAOS_RES *res, int *numOfRows, void **pData) { + CHECK_INT(fp_taos_fetch_raw_block); + return (*fp_taos_fetch_raw_block)(res, numOfRows, pData); +} + +int *taos_get_column_data_offset(TAOS_RES *res, int columnIndex) { + CHECK_PTR(fp_taos_get_column_data_offset); + return (*fp_taos_get_column_data_offset)(res, columnIndex); +} + +int taos_validate_sql(TAOS *taos, const char *sql) { + CHECK_INT(fp_taos_validate_sql); + return (*fp_taos_validate_sql)(taos, sql); +} + +void taos_reset_current_db(TAOS *taos) { + CHECK_VOID(fp_taos_reset_current_db); + (*fp_taos_reset_current_db)(taos); +} + +int *taos_fetch_lengths(TAOS_RES *res) { + CHECK_PTR(fp_taos_fetch_lengths); + return (*fp_taos_fetch_lengths)(res); +} + +TAOS_ROW *taos_result_block(TAOS_RES *res) { + CHECK_PTR(fp_taos_result_block); + return (*fp_taos_result_block)(res); +} + +const char *taos_get_server_info(TAOS *taos) { + CHECK_PTR(fp_taos_get_server_info); + return (*fp_taos_get_server_info)(taos); +} + +const char *taos_get_client_info() { + if (fp_taos_get_client_info == NULL) { + return td_version; + } else { + return (*fp_taos_get_client_info)(); + } +} + +int taos_get_current_db(TAOS *taos, char *database, int len, int *required) { + CHECK_INT(fp_taos_get_current_db); + return (*fp_taos_get_current_db)(taos, database, len, required); +} + +const char *taos_errstr(TAOS_RES *res) { + if (fp_taos_errstr == NULL) { + return tstrerror(terrno); + } + return (*fp_taos_errstr)(res); +} + +int taos_errno(TAOS_RES *res) { + if (fp_taos_errno == NULL) { + return terrno; + } + return (*fp_taos_errno)(res); +} + +void taos_query_a(TAOS *taos, const char *sql, __taos_async_fn_t fp, void *param) { + CHECK_VOID(fp_taos_query_a); + (*fp_taos_query_a)(taos, sql, fp, param); +} + +void taos_query_a_with_reqid(TAOS *taos, const char *sql, __taos_async_fn_t fp, void *param, int64_t reqid) { + CHECK_VOID(fp_taos_query_a_with_reqid); + (*fp_taos_query_a_with_reqid)(taos, sql, fp, param, reqid); +} + +void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) { + CHECK_VOID(fp_taos_fetch_rows_a); + (*fp_taos_fetch_rows_a)(res, fp, param); +} + +void taos_fetch_raw_block_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) { + CHECK_VOID(fp_taos_fetch_raw_block_a); + (*fp_taos_fetch_raw_block_a)(res, fp, param); +} + +const void *taos_get_raw_block(TAOS_RES *res) { + CHECK_PTR(fp_taos_get_raw_block); + return (*fp_taos_get_raw_block)(res); +} + +int taos_get_db_route_info(TAOS *taos, const char *db, TAOS_DB_ROUTE_INFO *dbInfo) { + CHECK_INT(fp_taos_get_db_route_info); + return (*fp_taos_get_db_route_info)(taos, db, dbInfo); +} + +int taos_get_table_vgId(TAOS *taos, const char *db, const char *table, int *vgId) { + CHECK_INT(fp_taos_get_table_vgId); + return (*fp_taos_get_table_vgId)(taos, db, table, vgId); +} + +int taos_get_tables_vgId(TAOS *taos, const char *db, const char *table[], int tableNum, int *vgId) { + CHECK_INT(fp_taos_get_tables_vgId); + return (*fp_taos_get_tables_vgId)(taos, db, table, tableNum, vgId); +} + +int taos_load_table_info(TAOS *taos, const char *tableNameList) { + CHECK_INT(fp_taos_load_table_info); + return (*fp_taos_load_table_info)(taos, tableNameList); +} + +void taos_set_hb_quit(int8_t quitByKill) { + if (taos_init() != 0) { + return; + } + + CHECK_VOID(fp_taos_set_hb_quit); + return (*fp_taos_set_hb_quit)(quitByKill); +} + +int taos_set_notify_cb(TAOS *taos, __taos_notify_fn_t fp, void *param, int type) { + CHECK_INT(fp_taos_set_notify_cb); + return (*fp_taos_set_notify_cb)(taos, fp, param, type); +} + +void taos_fetch_whitelist_a(TAOS *taos, __taos_async_whitelist_fn_t fp, void *param) { + CHECK_VOID(fp_taos_fetch_whitelist_a); + return (*fp_taos_fetch_whitelist_a)(taos, fp, param); +} + +int taos_set_conn_mode(TAOS *taos, int mode, int value) { + CHECK_INT(fp_taos_set_conn_mode); + return (*fp_taos_set_conn_mode)(taos, mode, value); +} + +TAOS_RES *taos_schemaless_insert(TAOS *taos, char *lines[], int numLines, int protocol, int precision) { + CHECK_PTR(fp_taos_schemaless_insert); + return (*fp_taos_schemaless_insert)(taos, lines, numLines, protocol, precision); +} + +TAOS_RES *taos_schemaless_insert_with_reqid(TAOS *taos, char *lines[], int numLines, int protocol, int precision, + int64_t reqid) { + CHECK_PTR(fp_taos_schemaless_insert_with_reqid); + return (*fp_taos_schemaless_insert_with_reqid)(taos, lines, numLines, protocol, precision, reqid); +} + +TAOS_RES *taos_schemaless_insert_raw(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, + int precision) { + CHECK_PTR(fp_taos_schemaless_insert_raw); + return (*fp_taos_schemaless_insert_raw)(taos, lines, len, totalRows, protocol, precision); +} + +TAOS_RES *taos_schemaless_insert_raw_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, + int precision, int64_t reqid) { + CHECK_PTR(fp_taos_schemaless_insert_raw_with_reqid); + return (*fp_taos_schemaless_insert_raw_with_reqid)(taos, lines, len, totalRows, protocol, precision, reqid); +} + +TAOS_RES *taos_schemaless_insert_ttl(TAOS *taos, char *lines[], int numLines, int protocol, int precision, + int32_t ttl) { + CHECK_PTR(fp_taos_schemaless_insert_ttl); + return (*fp_taos_schemaless_insert_ttl)(taos, lines, numLines, protocol, precision, ttl); +} + +TAOS_RES *taos_schemaless_insert_ttl_with_reqid(TAOS *taos, char *lines[], int numLines, int protocol, int precision, + int32_t ttl, int64_t reqid) { + CHECK_PTR(fp_taos_schemaless_insert_ttl_with_reqid); + return (*fp_taos_schemaless_insert_ttl_with_reqid)(taos, lines, numLines, protocol, precision, ttl, reqid); +} + +TAOS_RES *taos_schemaless_insert_raw_ttl(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, + int precision, int32_t ttl) { + CHECK_PTR(fp_taos_schemaless_insert_raw_ttl); + return (*fp_taos_schemaless_insert_raw_ttl)(taos, lines, len, totalRows, protocol, precision, ttl); +} + +TAOS_RES *taos_schemaless_insert_raw_ttl_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, + int precision, int32_t ttl, int64_t reqid) { + CHECK_PTR(fp_taos_schemaless_insert_raw_ttl_with_reqid); + return (*fp_taos_schemaless_insert_raw_ttl_with_reqid)(taos, lines, len, totalRows, protocol, precision, ttl, reqid); +} + +TAOS_RES *taos_schemaless_insert_raw_ttl_with_reqid_tbname_key(TAOS *taos, char *lines, int len, int32_t *totalRows, + int protocol, int precision, int32_t ttl, int64_t reqid, + char *tbnameKey) { + CHECK_PTR(fp_taos_schemaless_insert_raw_ttl_with_reqid_tbname_key); + return (*fp_taos_schemaless_insert_raw_ttl_with_reqid_tbname_key)(taos, lines, len, totalRows, protocol, precision, + ttl, reqid, tbnameKey); +} + +TAOS_RES *taos_schemaless_insert_ttl_with_reqid_tbname_key(TAOS *taos, char *lines[], int numLines, int protocol, + int precision, int32_t ttl, int64_t reqid, char *tbnameKey) { + CHECK_PTR(fp_taos_schemaless_insert_ttl_with_reqid_tbname_key); + return (*fp_taos_schemaless_insert_ttl_with_reqid_tbname_key)(taos, lines, numLines, protocol, precision, ttl, reqid, + tbnameKey); +} + +tmq_conf_t *tmq_conf_new() { + CHECK_PTR(fp_tmq_conf_new); + return (*fp_tmq_conf_new)(); +} + +tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value) { + CHECK_INT(fp_tmq_conf_set); + return (*fp_tmq_conf_set)(conf, key, value); +} + +void tmq_conf_destroy(tmq_conf_t *conf) { + CHECK_VOID(fp_tmq_conf_destroy); + (*fp_tmq_conf_destroy)(conf); +} + +void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param) { + CHECK_VOID(fp_tmq_conf_set_auto_commit_cb); + (*fp_tmq_conf_set_auto_commit_cb)(conf, cb, param); +} + +tmq_list_t *tmq_list_new() { + CHECK_PTR(fp_tmq_list_new); + return (*fp_tmq_list_new)(); +} + +int32_t tmq_list_append(tmq_list_t *tlist, const char *val) { + CHECK_INT(fp_tmq_list_append); + return (*fp_tmq_list_append)(tlist, val); +} + +void tmq_list_destroy(tmq_list_t *tlist) { + CHECK_VOID(fp_tmq_list_destroy); + (*fp_tmq_list_destroy)(tlist); +} + +int32_t tmq_list_get_size(const tmq_list_t *tlist) { + CHECK_INT(fp_tmq_list_get_size); + return (*fp_tmq_list_get_size)(tlist); +} + +char **tmq_list_to_c_array(const tmq_list_t *tlist) { + CHECK_PTR(fp_tmq_list_to_c_array); + return (*fp_tmq_list_to_c_array)(tlist); +} + +tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen) { + CHECK_PTR(fp_tmq_consumer_new); + return (*fp_tmq_consumer_new)(conf, errstr, errstrLen); +} + +int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list) { + CHECK_INT(fp_tmq_subscribe); + return (*fp_tmq_subscribe)(tmq, topic_list); +} + +int32_t tmq_unsubscribe(tmq_t *tmq) { + CHECK_INT(fp_tmq_unsubscribe); + return (*fp_tmq_unsubscribe)(tmq); +} + +int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics) { + CHECK_INT(fp_tmq_subscription); + return (*fp_tmq_subscription)(tmq, topics); +} + +TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout) { + CHECK_PTR(fp_tmq_consumer_poll); + return (*fp_tmq_consumer_poll)(tmq, timeout); +} + +int32_t tmq_consumer_close(tmq_t *tmq) { + CHECK_INT(fp_tmq_consumer_close); + return (*fp_tmq_consumer_close)(tmq); +} + +int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg) { + CHECK_INT(fp_tmq_commit_sync); + return (*fp_tmq_commit_sync)(tmq, msg); +} + +void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param) { + CHECK_VOID(fp_tmq_commit_async); + (*fp_tmq_commit_async)(tmq, msg, cb, param); +} + +int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset) { + CHECK_INT(fp_tmq_commit_offset_sync); + return (*fp_tmq_commit_offset_sync)(tmq, pTopicName, vgId, offset); +} + +void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, + void *param) { + CHECK_VOID(fp_tmq_commit_offset_async); + (*fp_tmq_commit_offset_async)(tmq, pTopicName, vgId, offset, cb, param); +} + +int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment, + int32_t *numOfAssignment) { + CHECK_INT(fp_tmq_get_topic_assignment); + return (*fp_tmq_get_topic_assignment)(tmq, pTopicName, assignment, numOfAssignment); +} + +void tmq_free_assignment(tmq_topic_assignment *pAssignment) { + CHECK_VOID(fp_tmq_free_assignment); + (*fp_tmq_free_assignment)(pAssignment); +} + +int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset) { + CHECK_INT(fp_tmq_offset_seek); + return (*fp_tmq_offset_seek)(tmq, pTopicName, vgId, offset); +} + +int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId) { + CHECK_INT(fp_tmq_position); + return (*fp_tmq_position)(tmq, pTopicName, vgId); +} + +int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId) { + CHECK_INT(fp_tmq_committed); + return (*fp_tmq_committed)(tmq, pTopicName, vgId); +} + +TAOS *tmq_get_connect(tmq_t *tmq) { + CHECK_PTR(fp_tmq_get_connect); + return (*fp_tmq_get_connect)(tmq); +} + +const char *tmq_get_table_name(TAOS_RES *res) { + CHECK_PTR(fp_tmq_get_table_name); + return (*fp_tmq_get_table_name)(res); +} + +tmq_res_t tmq_get_res_type(TAOS_RES *res) { + CHECK_INT(fp_tmq_get_res_type); + return (*fp_tmq_get_res_type)(res); +} + +const char *tmq_get_topic_name(TAOS_RES *res) { + CHECK_PTR(fp_tmq_get_topic_name); + return (*fp_tmq_get_topic_name)(res); +} + +const char *tmq_get_db_name(TAOS_RES *res) { + CHECK_PTR(fp_tmq_get_db_name); + return (*fp_tmq_get_db_name)(res); +} + +int32_t tmq_get_vgroup_id(TAOS_RES *res) { + CHECK_INT(fp_tmq_get_vgroup_id); + return (*fp_tmq_get_vgroup_id)(res); +} + +int64_t tmq_get_vgroup_offset(TAOS_RES *res) { + CHECK_INT(fp_tmq_get_vgroup_offset); + return (*fp_tmq_get_vgroup_offset)(res); +} + +const char *tmq_err2str(int32_t code) { + CHECK_PTR(fp_tmq_err2str); + return (*fp_tmq_err2str)(code); +} + +int32_t tmq_get_raw(TAOS_RES *res, tmq_raw_data *raw) { + CHECK_INT(fp_tmq_get_raw); + return (*fp_tmq_get_raw)(res, raw); +} + +int32_t tmq_write_raw(TAOS *taos, tmq_raw_data raw) { + CHECK_INT(fp_tmq_write_raw); + return (*fp_tmq_write_raw)(taos, raw); +} + +int taos_write_raw_block(TAOS *taos, int numOfRows, char *pData, const char *tbname) { + CHECK_INT(fp_taos_write_raw_block); + return (*fp_taos_write_raw_block)(taos, numOfRows, pData, tbname); +} + +int taos_write_raw_block_with_reqid(TAOS *taos, int numOfRows, char *pData, const char *tbname, int64_t reqid) { + CHECK_INT(fp_taos_write_raw_block_with_reqid); + return (*fp_taos_write_raw_block_with_reqid)(taos, numOfRows, pData, tbname, reqid); +} + +int taos_write_raw_block_with_fields(TAOS *taos, int rows, char *pData, const char *tbname, TAOS_FIELD *fields, + int numFields) { + CHECK_INT(fp_taos_write_raw_block_with_fields); + return (*fp_taos_write_raw_block_with_fields)(taos, rows, pData, tbname, fields, numFields); +} + +int taos_write_raw_block_with_fields_with_reqid(TAOS *taos, int rows, char *pData, const char *tbname, + TAOS_FIELD *fields, int numFields, int64_t reqid) { + CHECK_INT(fp_taos_write_raw_block_with_fields_with_reqid); + return (*fp_taos_write_raw_block_with_fields_with_reqid)(taos, rows, pData, tbname, fields, numFields, reqid); +} + +void tmq_free_raw(tmq_raw_data raw) { + CHECK_VOID(fp_tmq_free_raw); + (*fp_tmq_free_raw)(raw); +} + +char *tmq_get_json_meta(TAOS_RES *res) { + CHECK_PTR(fp_tmq_get_json_meta); + return (*fp_tmq_get_json_meta)(res); +} + +void tmq_free_json_meta(char *jsonMeta) { + CHECK_VOID(fp_tmq_free_json_meta); + return (*fp_tmq_free_json_meta)(jsonMeta); +} + +TSDB_SERVER_STATUS taos_check_server_status(const char *fqdn, int port, char *details, int maxlen) { + CHECK_INT(fp_taos_check_server_status); + return (*fp_taos_check_server_status)(fqdn, port, details, maxlen); +} + +void taos_write_crashinfo(int signum, void *sigInfo, void *context) { + CHECK_VOID(fp_taos_write_crashinfo); + (*fp_taos_write_crashinfo)(signum, sigInfo, context); +} + +char *getBuildInfo() { + CHECK_PTR(fp_getBuildInfo); + return (*fp_getBuildInfo)(); +} diff --git a/source/client/wrapper/src/wrapperVariable.c b/source/client/wrapper/src/wrapperVariable.c new file mode 100644 index 0000000000..ef95b3b3a8 --- /dev/null +++ b/source/client/wrapper/src/wrapperVariable.c @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY = NULL; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "wrapper.h" + +setConfRet (*fp_taos_set_config)(const char *config) = NULL; + +int (*fp_taos_init)(void) = NULL; +void (*fp_taos_cleanup)(void) = NULL; +int (*fp_taos_options)(TSDB_OPTION option, const void *arg, ...) = NULL; +int (*fp_taos_options_connection)(TAOS *taos, TSDB_OPTION_CONNECTION option, const void *arg, ...) = NULL; +TAOS *(*fp_taos_connect)(const char *ip, const char *user, const char *pass, const char *db, uint16_t port) = NULL; +TAOS *(*fp_taos_connect_auth)(const char *ip, const char *user, const char *auth, const char *db, uint16_t port) = NULL; +void (*fp_taos_close)(TAOS *taos) = NULL; + +const char *(*fp_taos_data_type)(int type) = NULL; + +TAOS_STMT *(*fp_taos_stmt_init)(TAOS *taos) = NULL; +TAOS_STMT *(*fp_taos_stmt_init_with_reqid)(TAOS *taos, int64_t reqid) = NULL; +TAOS_STMT *(*fp_taos_stmt_init_with_options)(TAOS *taos, TAOS_STMT_OPTIONS *options) = NULL; +int (*fp_taos_stmt_prepare)(TAOS_STMT *stmt, const char *sql, unsigned long length) = NULL; +int (*fp_taos_stmt_set_tbname_tags)(TAOS_STMT *stmt, const char *name, TAOS_MULTI_BIND *tags) = NULL; +int (*fp_taos_stmt_set_tbname)(TAOS_STMT *stmt, const char *name) = NULL; +int (*fp_taos_stmt_set_tags)(TAOS_STMT *stmt, TAOS_MULTI_BIND *tags) = NULL; +int (*fp_taos_stmt_set_sub_tbname)(TAOS_STMT *stmt, const char *name) = NULL; +int (*fp_taos_stmt_get_tag_fields)(TAOS_STMT *stmt, int *fieldNum, TAOS_FIELD_E **fields) = NULL; +int (*fp_taos_stmt_get_col_fields)(TAOS_STMT *stmt, int *fieldNum, TAOS_FIELD_E **fields) = NULL; +void (*fp_taos_stmt_reclaim_fields)(TAOS_STMT *stmt, TAOS_FIELD_E *fields) = NULL; + +int (*fp_taos_stmt_is_insert)(TAOS_STMT *stmt, int *insert) = NULL; +int (*fp_taos_stmt_num_params)(TAOS_STMT *stmt, int *nums) = NULL; +int (*fp_taos_stmt_get_param)(TAOS_STMT *stmt, int idx, int *type, int *bytes) = NULL; +int (*fp_taos_stmt_bind_param)(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind) = NULL; +int (*fp_taos_stmt_bind_param_batch)(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind) = NULL; +int (*fp_taos_stmt_bind_single_param_batch)(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind, int colIdx) = NULL; +int (*fp_taos_stmt_add_batch)(TAOS_STMT *stmt) = NULL; +int (*fp_taos_stmt_execute)(TAOS_STMT *stmt) = NULL; +TAOS_RES *(*fp_taos_stmt_use_result)(TAOS_STMT *stmt) = NULL; +int (*fp_taos_stmt_close)(TAOS_STMT *stmt) = NULL; +char *(*fp_taos_stmt_errstr)(TAOS_STMT *stmt) = NULL; +int (*fp_taos_stmt_affected_rows)(TAOS_STMT *stmt) = NULL; +int (*fp_taos_stmt_affected_rows_once)(TAOS_STMT *stmt) = NULL; + +TAOS_STMT2 *(*fp_taos_stmt2_init)(TAOS *taos, TAOS_STMT2_OPTION *option) = NULL; +int (*fp_taos_stmt2_prepare)(TAOS_STMT2 *stmt, const char *sql, unsigned long length) = NULL; +int (*fp_taos_stmt2_bind_param)(TAOS_STMT2 *stmt, TAOS_STMT2_BINDV *bindv, int32_t col_idx) = NULL; +int (*fp_taos_stmt2_bind_param_a)(TAOS_STMT2 *stmt, TAOS_STMT2_BINDV *bindv, int32_t col_idx, __taos_async_fn_t fp, + void *param) = NULL; +int (*fp_taos_stmt2_exec)(TAOS_STMT2 *stmt, int *affected_rows) = NULL; +int (*fp_taos_stmt2_close)(TAOS_STMT2 *stmt) = NULL; +int (*fp_taos_stmt2_is_insert)(TAOS_STMT2 *stmt, int *insert) = NULL; +int (*fp_taos_stmt2_get_fields)(TAOS_STMT2 *stmt, int *count, TAOS_FIELD_ALL **fields) = NULL; +void (*fp_taos_stmt2_free_fields)(TAOS_STMT2 *stmt, TAOS_FIELD_ALL *fields) = NULL; +TAOS_RES *(*fp_taos_stmt2_result)(TAOS_STMT2 *stmt) = NULL; +char *(*fp_taos_stmt2_error)(TAOS_STMT2 *stmt) = NULL; + +TAOS_RES *(*fp_taos_query)(TAOS *taos, const char *sql) = NULL; +TAOS_RES *(*fp_taos_query_with_reqid)(TAOS *taos, const char *sql, int64_t reqId) = NULL; + +TAOS_ROW (*fp_taos_fetch_row)(TAOS_RES *res) = NULL; +int (*fp_taos_result_precision)(TAOS_RES *res) = NULL; // get the time precision of result +void (*fp_taos_free_result)(TAOS_RES *res) = NULL; +void (*fp_taos_kill_query)(TAOS *taos) = NULL; +int (*fp_taos_field_count)(TAOS_RES *res) = NULL; +int (*fp_taos_num_fields)(TAOS_RES *res) = NULL; +int (*fp_taos_affected_rows)(TAOS_RES *res) = NULL; +int64_t (*fp_taos_affected_rows64)(TAOS_RES *res) = NULL; + +TAOS_FIELD *(*fp_taos_fetch_fields)(TAOS_RES *res) = NULL; +TAOS_FIELD_E *(*fp_taos_fetch_fields_e)(TAOS_RES *res) = NULL; +int (*fp_taos_select_db)(TAOS *taos, const char *db) = NULL; +int (*fp_taos_print_row)(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) = NULL; +int (*fp_taos_print_row_with_size)(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) = NULL; +void (*fp_taos_stop_query)(TAOS_RES *res) = NULL; +bool (*fp_taos_is_null)(TAOS_RES *res, int32_t row, int32_t col) = NULL; +int (*fp_taos_is_null_by_column)(TAOS_RES *res, int columnIndex, bool result[], int *rows) = NULL; +bool (*fp_taos_is_update_query)(TAOS_RES *res) = NULL; +int (*fp_taos_fetch_block)(TAOS_RES *res, TAOS_ROW *rows) = NULL; +int (*fp_taos_fetch_block_s)(TAOS_RES *res, int *numOfRows, TAOS_ROW *rows) = NULL; +int (*fp_taos_fetch_raw_block)(TAOS_RES *res, int *numOfRows, void **pData) = NULL; +int *(*fp_taos_get_column_data_offset)(TAOS_RES *res, int columnIndex) = NULL; +int (*fp_taos_validate_sql)(TAOS *taos, const char *sql) = NULL; +void (*fp_taos_reset_current_db)(TAOS *taos) = NULL; + +int *(*fp_taos_fetch_lengths)(TAOS_RES *res) = NULL; +TAOS_ROW *(*fp_taos_result_block)(TAOS_RES *res) = NULL; + +const char *(*fp_taos_get_server_info)(TAOS *taos) = NULL; +const char *(*fp_taos_get_client_info)() = NULL; +int (*fp_taos_get_current_db)(TAOS *taos, char *database, int len, int *required) = NULL; + +const char *(*fp_taos_errstr)(TAOS_RES *res) = NULL; +int (*fp_taos_errno)(TAOS_RES *res) = NULL; + +void (*fp_taos_query_a)(TAOS *taos, const char *sql, __taos_async_fn_t fp, void *param) = NULL; +void (*fp_taos_query_a_with_reqid)(TAOS *taos, const char *sql, __taos_async_fn_t fp, void *param, + int64_t reqid) = NULL; +void (*fp_taos_fetch_rows_a)(TAOS_RES *res, __taos_async_fn_t fp, void *param) = NULL; +void (*fp_taos_fetch_raw_block_a)(TAOS_RES *res, __taos_async_fn_t fp, void *param) = NULL; +const void *(*fp_taos_get_raw_block)(TAOS_RES *res) = NULL; + +int (*fp_taos_get_db_route_info)(TAOS *taos, const char *db, TAOS_DB_ROUTE_INFO *dbInfo) = NULL; +int (*fp_taos_get_table_vgId)(TAOS *taos, const char *db, const char *table, int *vgId) = NULL; +int (*fp_taos_get_tables_vgId)(TAOS *taos, const char *db, const char *table[], int tableNum, int *vgId) = NULL; + +int (*fp_taos_load_table_info)(TAOS *taos, const char *tableNameList) = NULL; + +void (*fp_taos_set_hb_quit)(int8_t quitByKill) = NULL; + +int (*fp_taos_set_notify_cb)(TAOS *taos, __taos_notify_fn_t fp, void *param, int type) = NULL; + +void (*fp_taos_fetch_whitelist_a)(TAOS *taos, __taos_async_whitelist_fn_t fp, void *param) = NULL; + +int (*fp_taos_set_conn_mode)(TAOS *taos, int mode, int value) = NULL; + +TAOS_RES *(*fp_taos_schemaless_insert)(TAOS *taos, char *lines[], int numLines, int protocol, int precision) = NULL; +TAOS_RES *(*fp_taos_schemaless_insert_with_reqid)(TAOS *taos, char *lines[], int numLines, int protocol, int precision, + int64_t reqid) = NULL; +TAOS_RES *(*fp_taos_schemaless_insert_raw)(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, + int precision) = NULL; +TAOS_RES *(*fp_taos_schemaless_insert_raw_with_reqid)(TAOS *taos, char *lines, int len, int32_t *totalRows, + int protocol, int precision, int64_t reqid) = NULL; +TAOS_RES *(*fp_taos_schemaless_insert_ttl)(TAOS *taos, char *lines[], int numLines, int protocol, int precision, + int32_t ttl) = NULL; +TAOS_RES *(*fp_taos_schemaless_insert_ttl_with_reqid)(TAOS *taos, char *lines[], int numLines, int protocol, + int precision, int32_t ttl, int64_t reqid) = NULL; +TAOS_RES *(*fp_taos_schemaless_insert_raw_ttl)(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, + int precision, int32_t ttl) = NULL; +TAOS_RES *(*fp_taos_schemaless_insert_raw_ttl_with_reqid)(TAOS *taos, char *lines, int len, int32_t *totalRows, + int protocol, int precision, int32_t ttl, + int64_t reqid) = NULL; +TAOS_RES *(*fp_taos_schemaless_insert_raw_ttl_with_reqid_tbname_key)(TAOS *taos, char *lines, int len, + int32_t *totalRows, int protocol, int precision, + int32_t ttl, int64_t reqid, + char *tbnameKey) = NULL; +TAOS_RES *(*fp_taos_schemaless_insert_ttl_with_reqid_tbname_key)(TAOS *taos, char *lines[], int numLines, int protocol, + int precision, int32_t ttl, int64_t reqid, + char *tbnameKey) = NULL; + +tmq_conf_t *(*fp_tmq_conf_new)() = NULL; +tmq_conf_res_t (*fp_tmq_conf_set)(tmq_conf_t *conf, const char *key, const char *value) = NULL; +void (*fp_tmq_conf_destroy)(tmq_conf_t *conf) = NULL; +void (*fp_tmq_conf_set_auto_commit_cb)(tmq_conf_t *conf, tmq_commit_cb *cb, void *param) = NULL; + +tmq_list_t *(*fp_tmq_list_new)() = NULL; +int32_t (*fp_tmq_list_append)(tmq_list_t *, const char *) = NULL; +void (*fp_tmq_list_destroy)(tmq_list_t *) = NULL; +int32_t (*fp_tmq_list_get_size)(const tmq_list_t *) = NULL; +char **(*fp_tmq_list_to_c_array)(const tmq_list_t *) = NULL; + +tmq_t *(*fp_tmq_consumer_new)(tmq_conf_t *conf, char *errstr, int32_t errstrLen) = NULL; +int32_t (*fp_tmq_subscribe)(tmq_t *tmq, const tmq_list_t *topic_list) = NULL; +int32_t (*fp_tmq_unsubscribe)(tmq_t *tmq) = NULL; +int32_t (*fp_tmq_subscription)(tmq_t *tmq, tmq_list_t **topics) = NULL; +TAOS_RES *(*fp_tmq_consumer_poll)(tmq_t *tmq, int64_t timeout) = NULL; +int32_t (*fp_tmq_consumer_close)(tmq_t *tmq) = NULL; +int32_t (*fp_tmq_commit_sync)(tmq_t *tmq, const TAOS_RES *msg) = NULL; +void (*fp_tmq_commit_async)(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param) = NULL; +int32_t (*fp_tmq_commit_offset_sync)(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset) = NULL; +void (*fp_tmq_commit_offset_async)(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, + void *param) = NULL; +int32_t (*fp_tmq_get_topic_assignment)(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment, + int32_t *numOfAssignment) = NULL; +void (*fp_tmq_free_assignment)(tmq_topic_assignment *pAssignment) = NULL; +int32_t (*fp_tmq_offset_seek)(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset) = NULL; +int64_t (*fp_tmq_position)(tmq_t *tmq, const char *pTopicName, int32_t vgId) = NULL; +int64_t (*fp_tmq_committed)(tmq_t *tmq, const char *pTopicName, int32_t vgId) = NULL; + +TAOS *(*fp_tmq_get_connect)(tmq_t *tmq) = NULL; +const char *(*fp_tmq_get_table_name)(TAOS_RES *res) = NULL; +tmq_res_t (*fp_tmq_get_res_type)(TAOS_RES *res) = NULL; +const char *(*fp_tmq_get_topic_name)(TAOS_RES *res) = NULL; +const char *(*fp_tmq_get_db_name)(TAOS_RES *res) = NULL; +int32_t (*fp_tmq_get_vgroup_id)(TAOS_RES *res) = NULL; +int64_t (*fp_tmq_get_vgroup_offset)(TAOS_RES *res) = NULL; +const char *(*fp_tmq_err2str)(int32_t code) = NULL; + +int32_t (*fp_tmq_get_raw)(TAOS_RES *res, tmq_raw_data *raw) = NULL; +int32_t (*fp_tmq_write_raw)(TAOS *taos, tmq_raw_data raw) = NULL; +int (*fp_taos_write_raw_block)(TAOS *taos, int numOfRows, char *pData, const char *tbname) = NULL; +int (*fp_taos_write_raw_block_with_reqid)(TAOS *taos, int numOfRows, char *pData, const char *tbname, + int64_t reqid) = NULL; +int (*fp_taos_write_raw_block_with_fields)(TAOS *taos, int rows, char *pData, const char *tbname, TAOS_FIELD *fields, + int numFields) = NULL; +int (*fp_taos_write_raw_block_with_fields_with_reqid)(TAOS *taos, int rows, char *pData, const char *tbname, + TAOS_FIELD *fields, int numFields, int64_t reqid) = NULL; +void (*fp_tmq_free_raw)(tmq_raw_data raw) = NULL; +char *(*fp_tmq_get_json_meta)(TAOS_RES *res) = NULL; +void (*fp_tmq_free_json_meta)(char *jsonMeta) = NULL; + +TSDB_SERVER_STATUS (*fp_taos_check_server_status)(const char *fqdn, int port, char *details, int maxlen) = NULL; +void (*fp_taos_write_crashinfo)(int signum, void *sigInfo, void *context) = NULL; +char *(*fp_getBuildInfo)() = NULL; diff --git a/source/common/src/msg/tmsg.c b/source/common/src/msg/tmsg.c index e60da401d1..5d99ef8fea 100644 --- a/source/common/src/msg/tmsg.c +++ b/source/common/src/msg/tmsg.c @@ -3970,6 +3970,8 @@ int32_t tSerializeSTableCfgRsp(void *buf, int32_t bufLen, STableCfgRsp *pRsp) { } } + TAOS_CHECK_EXIT(tEncodeI32(&encoder, pRsp->keep)); + tEndEncode(&encoder); _exit: @@ -4070,6 +4072,13 @@ int32_t tDeserializeSTableCfgRsp(void *buf, int32_t bufLen, STableCfgRsp *pRsp) pRsp->pColRefs = NULL; } } + + if (!tDecodeIsEnd(&decoder)) { + TAOS_CHECK_EXIT(tDecodeI32(&decoder, &pRsp->keep)); + } else { + pRsp->keep = 0; + } + tEndDecode(&decoder); _exit: diff --git a/source/common/src/tanalytics.c b/source/common/src/tanalytics.c index 8818839f09..4dd329fb4c 100644 --- a/source/common/src/tanalytics.c +++ b/source/common/src/tanalytics.c @@ -434,7 +434,7 @@ static int32_t taosAnalyJsonBufWriteStr(SAnalyticBuf *pBuf, const char *buf, int static int32_t taosAnalyJsonBufWriteStart(SAnalyticBuf *pBuf) { return taosAnalyJsonBufWriteStr(pBuf, "{\n", 0); } -static int32_t tsosAnalJsonBufOpen(SAnalyticBuf *pBuf, int32_t numOfCols) { +static int32_t tsosAnalyJsonBufOpen(SAnalyticBuf *pBuf, int32_t numOfCols) { pBuf->filePtr = taosOpenFile(pBuf->fileName, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_WRITE_THROUGH); if (pBuf->filePtr == NULL) { return terrno; @@ -666,7 +666,7 @@ void taosAnalyBufDestroy(SAnalyticBuf *pBuf) { int32_t tsosAnalyBufOpen(SAnalyticBuf *pBuf, int32_t numOfCols) { if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) { - return tsosAnalJsonBufOpen(pBuf, numOfCols); + return tsosAnalyJsonBufOpen(pBuf, numOfCols); } else { return TSDB_CODE_ANA_BUF_INVALID_TYPE; } diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 20caa3812e..1f5f808a65 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1073,12 +1073,6 @@ int32_t blockDataFromBuf(SSDataBlock* pBlock, const char* buf) { if (IS_VAR_DATA_TYPE(pCol->info.type)) { size_t metaSize = pBlock->info.rows * sizeof(int32_t); - char* tmp = taosMemoryRealloc(pCol->varmeta.offset, metaSize); // preview calloc is too small - if (tmp == NULL) { - return terrno; - } - - pCol->varmeta.offset = (int32_t*)tmp; memcpy(pCol->varmeta.offset, pStart, metaSize); pStart += metaSize; } else { @@ -2692,6 +2686,7 @@ int32_t dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf taskIdStr, flag, (int32_t)pDataBlock->info.type, pDataBlock->info.childId, pDataBlock->info.id.groupId, pDataBlock->info.id.uid, pDataBlock->info.rows, pDataBlock->info.version, pDataBlock->info.calWin.skey, pDataBlock->info.calWin.ekey, pDataBlock->info.parTbName); + goto _exit; if (len >= size - 1) { goto _exit; } diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 24deec8a1d..5b495a09dc 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -1000,8 +1000,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "s3PageCacheSize", tsS3PageCacheSize, 4, 1024 * 1024 * 1024, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER_LAZY,CFG_CATEGORY_GLOBAL)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "s3UploadDelaySec", tsS3UploadDelaySec, 1, 60 * 60 * 24 * 30, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER,CFG_CATEGORY_GLOBAL)); - // min free disk space used to check if the disk is full [50MB, 1GB] - TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "minDiskFreeSize", tsMinDiskFreeSize, TFS_MIN_DISK_FREE_SIZE, 1024 * 1024 * 1024, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER,CFG_CATEGORY_LOCAL)); + TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "minDiskFreeSize", tsMinDiskFreeSize, TFS_MIN_DISK_FREE_SIZE, TFS_MIN_DISK_FREE_SIZE_MAX, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER,CFG_CATEGORY_LOCAL)); TAOS_CHECK_RETURN(cfgAddBool(pCfg, "enableWhiteList", tsEnableWhiteList, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "streamNotifyMessageSize", tsStreamNotifyMessageSize, 8, 1024 * 1024, CFG_SCOPE_SERVER, CFG_DYN_NONE,CFG_CATEGORY_LOCAL)); diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c index a29f81b9a8..0616a5ba25 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c @@ -600,7 +600,7 @@ int32_t vmStartWorker(SVnodeMgmt *pMgmt) { SWWorkerPool *pStreamCtrlPool = &pMgmt->streamCtrlPool; pStreamCtrlPool->name = "vnode-stream-ctrl"; - pStreamCtrlPool->max = 1; + pStreamCtrlPool->max = 4; if ((code = tWWorkerInit(pStreamCtrlPool)) != 0) return code; SWWorkerPool *pStreamChkPool = &pMgmt->streamChkPool; diff --git a/source/dnode/mgmt/node_util/CMakeLists.txt b/source/dnode/mgmt/node_util/CMakeLists.txt index 320da45065..ad8282f87f 100644 --- a/source/dnode/mgmt/node_util/CMakeLists.txt +++ b/source/dnode/mgmt/node_util/CMakeLists.txt @@ -6,5 +6,5 @@ target_include_directories( PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/inc" ) target_link_libraries( - node_util cjson mnode vnode qnode snode wal sync ${TAOS_LIB_STATIC} tfs monitor monitorfw + node_util cjson mnode vnode qnode snode wal sync ${TAOS_NATIVE_LIB_STATIC} tfs monitor monitorfw ) \ No newline at end of file diff --git a/source/dnode/mnode/impl/src/mndAnode.c b/source/dnode/mnode/impl/src/mndAnode.c index bd0a4f3138..0777c2e247 100644 --- a/source/dnode/mnode/impl/src/mndAnode.c +++ b/source/dnode/mnode/impl/src/mndAnode.c @@ -371,6 +371,11 @@ static int32_t mndProcessCreateAnodeReq(SRpcMsg *pReq) { SAnodeObj *pObj = NULL; SMCreateAnodeReq createReq = {0}; + if ((code = grantCheck(TSDB_GRANT_TD_GPT)) != TSDB_CODE_SUCCESS) { + mError("failed to create anode, code:%s", tstrerror(code)); + goto _OVER; + } + TAOS_CHECK_GOTO(tDeserializeSMCreateAnodeReq(pReq->pCont, pReq->contLen, &createReq), NULL, _OVER); mInfo("anode:%s, start to create", createReq.url); diff --git a/source/dnode/mnode/impl/src/mndScheduler.c b/source/dnode/mnode/impl/src/mndScheduler.c index 399b7a9205..3cb6521e00 100644 --- a/source/dnode/mnode/impl/src/mndScheduler.c +++ b/source/dnode/mnode/impl/src/mndScheduler.c @@ -375,6 +375,7 @@ static int32_t buildSourceTask(SStreamObj* pStream, SEpSet* pEpset, EStreamTaskT uint64_t uid = 0; SArray** pTaskList = NULL; if (pSourceTaskList) { + uid = pStream->uid; pTaskList = &pSourceTaskList; } else { streamGetUidTaskList(pStream, type, &uid, &pTaskList); @@ -454,6 +455,7 @@ static int32_t addSourceTaskVTableOutput(SStreamTask* pTask, SSHashObj* pVgTasks TSDB_CHECK_NULL(pTaskMap, code, lino, _end, terrno); pTask->outputInfo.type = TASK_OUTPUT__VTABLE_MAP; + pTask->msgInfo.msgType = TDMT_STREAM_TASK_DISPATCH; STaskDispatcherVtableMap *pDispatcher = &pTask->outputInfo.vtableMapDispatcher; pDispatcher->taskInfos = taosArrayInit(taskNum, sizeof(STaskDispatcherFixed)); TSDB_CHECK_NULL(pDispatcher->taskInfos, code, lino, _end, terrno); @@ -462,26 +464,32 @@ static int32_t addSourceTaskVTableOutput(SStreamTask* pTask, SSHashObj* pVgTasks int32_t iter = 0, vgId = 0; uint64_t uid = 0; - STaskDispatcherFixed* pAddr = NULL; void* p = NULL; while (NULL != (p = tSimpleHashIterate(pVtables, p, &iter))) { char* vgUid = tSimpleHashGetKey(p, NULL); vgId = *(int32_t*)vgUid; uid = *(uint64_t*)((int32_t*)vgUid + 1); - pAddr = tSimpleHashGet(pVgTasks, &vgId, sizeof(vgId)); - if (NULL == pAddr) { + void *px = tSimpleHashGet(pVgTasks, &vgId, sizeof(vgId)); + if (NULL == px) { mError("tSimpleHashGet vgId %d not found", vgId); return code; } + SStreamTask* pMergeTask = *(SStreamTask**)px; + if (pMergeTask == NULL) { + mError("tSimpleHashGet pMergeTask %d not found", vgId); + return code; + } - void* px = tSimpleHashGet(pTaskMap, &pAddr->taskId, sizeof(int32_t)); + px = tSimpleHashGet(pTaskMap, &pMergeTask->id.taskId, sizeof(pMergeTask->id.taskId)); int32_t idx = 0; if (px == NULL) { - px = taosArrayPush(pDispatcher->taskInfos, pAddr); + STaskDispatcherFixed addr = { + .taskId = pMergeTask->id.taskId, .nodeId = pMergeTask->info.nodeId, .epSet = pMergeTask->info.epSet}; + px = taosArrayPush(pDispatcher->taskInfos, &addr); TSDB_CHECK_NULL(px, code, lino, _end, terrno); idx = taosArrayGetSize(pDispatcher->taskInfos) - 1; - code = tSimpleHashPut(pTaskMap, &pAddr->taskId, sizeof(int32_t), &idx, sizeof(int32_t)); + code = tSimpleHashPut(pTaskMap, &pMergeTask->id.taskId, sizeof(pMergeTask->id.taskId), &idx, sizeof(idx)); if (code) { mError("tSimpleHashPut uid to task idx failed, error:%d", code); return code; @@ -495,9 +503,15 @@ static int32_t addSourceTaskVTableOutput(SStreamTask* pTask, SSHashObj* pVgTasks mError("tSimpleHashPut uid to STaskDispatcherFixed failed, error:%d", code); return code; } - - mDebug("source task[%s,vg:%d] add vtable output map, vuid %" PRIu64 " => [%d, vg:%d]", - pTask->id.idStr, pTask->info.nodeId, uid, pAddr->taskId, pAddr->nodeId); + + code = streamTaskSetUpstreamInfo(pMergeTask, pTask); + if (code != TSDB_CODE_SUCCESS) { + mError("failed to set upstream info of merge task, error:%d", code); + return code; + } + + mDebug("source task[%s,vg:%d] add vtable output map, vuid %" PRIu64 " => [%d, vg:%d]", pTask->id.idStr, + pTask->info.nodeId, uid, pMergeTask->id.taskId, pMergeTask->info.nodeId); } _end: @@ -662,7 +676,6 @@ static int32_t addVTableMergeTask(SMnode* pMnode, SSubplan* plan, SStreamObj* pS } static int32_t buildMergeTaskHash(SArray* pMergeTaskList, SSHashObj** ppVgTasks) { - STaskDispatcherFixed addr; int32_t code = 0; int32_t taskNum = taosArrayGetSize(pMergeTaskList); @@ -676,11 +689,7 @@ static int32_t buildMergeTaskHash(SArray* pMergeTaskList, SSHashObj** ppVgTasks) for (int32_t i = 0; i < taskNum; ++i) { SStreamTask* pTask = taosArrayGetP(pMergeTaskList, i); - addr.taskId = pTask->id.taskId; - addr.nodeId = pTask->info.nodeId; - addr.epSet = pTask->info.epSet; - - code = tSimpleHashPut(*ppVgTasks, &addr.nodeId, sizeof(addr.nodeId), &addr, sizeof(addr)); + code = tSimpleHashPut(*ppVgTasks, &pTask->info.nodeId, sizeof(pTask->info.nodeId), &pTask, POINTER_BYTES); if (code) { mError("tSimpleHashPut %d STaskDispatcherFixed failed", i); return code; @@ -725,10 +734,9 @@ static int32_t addVTableSourceTask(SMnode* pMnode, SSubplan* plan, SStreamObj* p } plan->pVTables = *(SSHashObj**)p; - *(SSHashObj**)p = NULL; - code = doAddSourceTask(pMnode, plan, pStream, pEpset, nextWindowSkey, pVerList, pVgroup, false, useTriggerParam, hasAggTasks, pVgTasks, pSourceTaskList); + plan->pVTables = NULL; if (code != 0) { mError("failed to create stream task, code:%s", tstrerror(code)); @@ -857,8 +865,12 @@ static int32_t addAggTask(SStreamObj* pStream, SMnode* pMnode, SSubplan* plan, S } if (needHistoryTask(pStream)) { - EStreamTaskType type = (pStream->conf.trigger == STREAM_TRIGGER_CONTINUOUS_WINDOW_CLOSE) ? STREAM_RECALCUL_TASK - : STREAM_HISTORY_TASK; + EStreamTaskType type = 0; + if (pStream->conf.trigger == STREAM_TRIGGER_CONTINUOUS_WINDOW_CLOSE && (pStream->conf.fillHistory == 0)) { + type = STREAM_RECALCUL_TASK; // only the recalculating task + } else { + type = STREAM_HISTORY_TASK; // set the fill-history option + } code = doAddAggTask(pStream, pMnode, plan, pEpset, pVgroup, pSnode, type, useTriggerParam); if (code != 0) { goto END; @@ -1220,7 +1232,7 @@ static int32_t addVgroupToRes(char* fDBName, int32_t vvgId, uint64_t vuid, SRefC char dbVgId[TSDB_DB_NAME_LEN + 32]; SSHashObj *pTarVg = NULL, *pNewVg = NULL; - TSDB_CHECK_CODE(getTableVgId(pDb, 1, fDBName, &vgId, pCol->refColName), lino, _return); + TSDB_CHECK_CODE(getTableVgId(pDb, 1, fDBName, &vgId, pCol->refTableName), lino, _return); snprintf(dbVgId, sizeof(dbVgId), "%s.%d", pCol->refDbName, vgId); diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index 136b6c527c..35c753022b 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -2326,6 +2326,7 @@ static int32_t mndBuildStbCfgImp(SDbObj *pDb, SStbObj *pStb, const char *tbName, pRsp->watermark1 = pStb->watermark[0]; pRsp->watermark2 = pStb->watermark[1]; pRsp->ttl = pStb->ttl; + pRsp->keep = pStb->keep; pRsp->commentLen = pStb->commentLen; if (pStb->commentLen > 0) { pRsp->pComment = taosStrdup(pStb->comment); diff --git a/source/dnode/mnode/impl/src/mndStreamUtil.c b/source/dnode/mnode/impl/src/mndStreamUtil.c index 8fd386bdb2..8504eef3a4 100644 --- a/source/dnode/mnode/impl/src/mndStreamUtil.c +++ b/source/dnode/mnode/impl/src/mndStreamUtil.c @@ -1307,6 +1307,8 @@ int32_t setTaskAttrInResBlock(SStreamObj *pStream, SStreamTask *pTask, SSDataBlo STR_WITH_SIZE_TO_VARSTR(level, "agg", 3); } else if (pTask->info.taskLevel == TASK_LEVEL__SINK) { STR_WITH_SIZE_TO_VARSTR(level, "sink", 4); + } else if (pTask->info.taskLevel == TASK_LEVEL__MERGE) { + STR_WITH_SIZE_TO_VARSTR(level, "merge", 5); } pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 7588ebb7f6..6620656918 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -1200,7 +1200,7 @@ int32_t tqReaderSetTbUidList(STqReader* pReader, const SArray* tbUidList, const } tqDebug("s-task:%s %d tables are set to be queried target table", id, (int32_t)taosArrayGetSize(tbUidList)); - return tqCollectPhysicalTables(pReader, id); + return TSDB_CODE_SUCCESS; } void tqReaderAddTbUidList(STqReader* pReader, const SArray* pTableUidList) { @@ -1498,8 +1498,7 @@ static int32_t tqCollectPhysicalTables(STqReader* pReader, const char* idstr) { pScanInfo->cacheHit = 0; pVirtualTables = pScanInfo->pVirtualTables; - if (taosHashGetSize(pVirtualTables) == 0 || taosHashGetSize(pReader->tbIdHash) == 0 || - taosArrayGetSize(pReader->pColIdList) == 0) { + if (taosHashGetSize(pVirtualTables) == 0 || taosArrayGetSize(pReader->pColIdList) == 0) { goto _end; } @@ -1507,13 +1506,10 @@ static int32_t tqCollectPhysicalTables(STqReader* pReader, const char* idstr) { TSDB_CHECK_NULL(pPhysicalTables, code, lino, _end, terrno); taosHashSetFreeFp(pPhysicalTables, destroySourceScanTables); - pIter = taosHashIterate(pReader->tbIdHash, NULL); + pIter = taosHashIterate(pVirtualTables, NULL); while (pIter != NULL) { int64_t vTbUid = *(int64_t*)taosHashGetKey(pIter, NULL); - - px = taosHashGet(pVirtualTables, &vTbUid, sizeof(int64_t)); - TSDB_CHECK_NULL(px, code, lino, _end, terrno); - SArray* pColInfos = *(SArray**)px; + SArray* pColInfos = *(SArray**)pIter; TSDB_CHECK_NULL(pColInfos, code, lino, _end, TSDB_CODE_INTERNAL_ERROR); // Traverse all required columns and collect corresponding physical tables @@ -1548,7 +1544,7 @@ static int32_t tqCollectPhysicalTables(STqReader* pReader, const char* idstr) { j++; } } - pIter = taosHashIterate(pReader->tbIdHash, pIter); + pIter = taosHashIterate(pVirtualTables, pIter); } pScanInfo->pPhysicalTables = pPhysicalTables; @@ -1574,9 +1570,8 @@ _end: static void freeTableSchemaCache(const void* key, size_t keyLen, void* value, void* ud) { if (value) { - SSchemaWrapper** ppSchemaWrapper = value; - tDeleteSchemaWrapper(*ppSchemaWrapper); - *ppSchemaWrapper = NULL; + SSchemaWrapper* pSchemaWrapper = value; + tDeleteSchemaWrapper(pSchemaWrapper); } } @@ -1686,8 +1681,8 @@ int32_t tqRetrieveVTableDataBlock(STqReader* pReader, SSDataBlock** pRes, const SColumnInfoData* pOutCol = taosArrayGet(pBlock->pDataBlock, j); TSDB_CHECK_NULL(pOutCol, code, lino, _end, terrno); if (i >= nColInfos) { - tqInfo("%s has %d column info, but vtable column %d is missing, id: %s", __func__, nColInfos, pOutCol->info.colId, - idstr); + tqTrace("%s has %d column info, but vtable column %d is missing, id: %s", __func__, nColInfos, + pOutCol->info.colId, idstr); colDataSetNNULL(pOutCol, 0, numOfRows); j++; continue; @@ -1699,17 +1694,26 @@ int32_t tqRetrieveVTableDataBlock(STqReader* pReader, SSDataBlock** pRes, const i++; continue; } else if (pCol->vColId > pOutCol->info.colId) { - tqInfo("%s does not find column info for vtable column %d, closest vtable column is %d, id: %s", __func__, - pOutCol->info.colId, pCol->vColId, idstr); + tqTrace("%s does not find column info for vtable column %d, closest vtable column is %d, id: %s", __func__, + pOutCol->info.colId, pCol->vColId, idstr); colDataSetNNULL(pOutCol, 0, numOfRows); j++; continue; } - // copy data from physical table to the result block of virtual table + // skip this column if it is from another physical table if (pCol->pTbUid != pTbUid) { - // skip this column since it is from another physical table - } else if (pSubmitTbData->flags & SUBMIT_REQ_COLUMN_DATA_FORMAT) { + tqTrace("skip column %d of virtual table %" PRId64 " since it is from table %" PRId64 + ", current block table %" PRId64 ", id: %s", + pCol->vColId, vTbUid, pCol->pTbUid, pTbUid, idstr); + colDataSetNNULL(pOutCol, 0, numOfRows); + i++; + j++; + continue; + } + + // copy data from physical table to the result block of virtual table + if (pSubmitTbData->flags & SUBMIT_REQ_COLUMN_DATA_FORMAT) { // try to find the corresponding column data of physical table SColData* pColData = NULL; for (int32_t k = 0; k < nInputCols; ++k) { @@ -1860,7 +1864,7 @@ _end: if (code != TSDB_CODE_SUCCESS) { tqError("%s failed at line %d since %s, id: %s", __func__, lino, tstrerror(code), idstr); } - return (code == TSDB_CODE_SUCCESS); + return false; } bool tqReaderIsQueriedSourceTable(STqReader* pReader, uint64_t uid) { diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c index 32846829a6..03bc9f65b9 100644 --- a/source/dnode/vnode/src/tq/tqSink.c +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -862,7 +862,7 @@ int32_t doWaitForDstTableCreated(SVnode* pVnode, SStreamTask* pTask, STableSinkI int32_t vgId = TD_VID(pVnode); int64_t suid = pTask->outputInfo.tbSink.stbUid; const char* id = pTask->id.idStr; - int32_t timeout = 300; // 5min + int32_t timeout = 60; // 1min int64_t start = taosGetTimestampSec(); while (pTableSinkInfo->uid == 0) { @@ -985,6 +985,8 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat if (code) { tqDebug("s-task:%s failed to build auto create table-name:%s, groupId:0x%" PRId64, id, dstTableName, groupId); return code; + } else { + tqDebug("s-task:%s no table name given, generated sub-table-name:%s, groupId:0x%" PRId64, id, dstTableName, groupId); } } else { if (pTask->subtableWithoutMd5 != 1 && !isAutoTableName(dstTableName) && diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c index 6b6dfb3167..92b6300beb 100644 --- a/source/dnode/vnode/src/tqCommon/tqCommon.c +++ b/source/dnode/vnode/src/tqCommon/tqCommon.c @@ -77,7 +77,7 @@ int32_t tqExpandStreamTask(SStreamTask* pTask) { .pOtherBackend = NULL, }; - if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) { + if (pTask->info.taskLevel == TASK_LEVEL__SOURCE || pTask->info.taskLevel == TASK_LEVEL__MERGE) { handle.vnode = ((STQ*)pMeta->ahandle)->pVnode; handle.initTqReader = 1; } else if (pTask->info.taskLevel == TASK_LEVEL__AGG) { @@ -86,7 +86,8 @@ int32_t tqExpandStreamTask(SStreamTask* pTask) { initStorageAPI(&handle.api); - if (pTask->info.taskLevel == TASK_LEVEL__SOURCE || pTask->info.taskLevel == TASK_LEVEL__AGG) { + if (pTask->info.taskLevel == TASK_LEVEL__SOURCE || pTask->info.taskLevel == TASK_LEVEL__AGG || + pTask->info.taskLevel == TASK_LEVEL__MERGE) { if (pTask->info.fillHistory == STREAM_RECALCUL_TASK) { handle.pStateBackend = pTask->pRecalState; handle.pOtherBackend = pTask->pState; @@ -113,6 +114,8 @@ int32_t tqExpandStreamTask(SStreamTask* pTask) { tqError("s-task:%s failed to set stream notify info, code:%s", pTask->id.idStr, tstrerror(code)); return code; } + + qSetStreamMergeInfo(pTask->exec.pExecutor, pTask->pVTables); } streamSetupScheduleTrigger(pTask); diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c index b50184774b..1ebe5047c5 100644 --- a/source/dnode/vnode/src/vnd/vnodeQuery.c +++ b/source/dnode/vnode/src/vnd/vnodeQuery.c @@ -544,6 +544,12 @@ int32_t vnodeGetBatchMeta(SVnode *pVnode, SRpcMsg *pMsg) { qWarn("vnodeGetBatchMeta failed, msgType:%d", req->msgType); } break; + case TDMT_VND_VSUBTABLES_META: + // error code has been set into reqMsg, no need to handle it here. + if (TSDB_CODE_SUCCESS != vnodeGetVSubtablesMeta(pVnode, &reqMsg)) { + qWarn("vnodeGetVSubtablesMeta failed, msgType:%d", req->msgType); + } + break; default: qError("invalid req msgType %d", req->msgType); reqMsg.code = TSDB_CODE_INVALID_MSG; @@ -730,7 +736,7 @@ int32_t vnodeGetVSubtablesMeta(SVnode *pVnode, SRpcMsg *pMsg) { qError("tSerializeSVSubTablesRsp failed, error:%d", rspSize); goto _return; } - pRsp = rpcMallocCont(rspSize); + pRsp = taosMemoryCalloc(1, rspSize); if (pRsp == NULL) { code = terrno; qError("rpcMallocCont %d failed, error:%d", rspSize, terrno); @@ -755,9 +761,11 @@ _return: qError("vnd get virtual subtables failed cause of %s", tstrerror(code)); } + *pMsg = rspMsg; + tDestroySVSubTablesRsp(&rsp); - tmsgSendRsp(&rspMsg); + //tmsgSendRsp(&rspMsg); return code; } diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h index a475cca52c..235cbbc5d5 100644 --- a/source/libs/catalog/inc/catalogInt.h +++ b/source/libs/catalog/inc/catalogInt.h @@ -317,7 +317,9 @@ typedef struct SCtgVSubTablesCtx { int32_t vgNum; bool clonedVgroups; SArray* pVgroups; - + + int32_t resCode; + int32_t resDoneNum; SVSubTablesRsp* pResList; int32_t resIdx; } SCtgVSubTablesCtx; diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c index cf60437508..e80d4611e9 100644 --- a/source/libs/catalog/src/ctgAsync.c +++ b/source/libs/catalog/src/ctgAsync.c @@ -3151,13 +3151,20 @@ int32_t ctgHandleGetVSubTablesRsp(SCtgTaskReq* tReq, int32_t reqType, const SDat SCtgTask* pTask = tReq->pTask; int32_t newCode = TSDB_CODE_SUCCESS; SCtgVSubTablesCtx* pCtx = (SCtgVSubTablesCtx*)pTask->taskCtx; + int32_t resIdx = atomic_fetch_add_32(&pCtx->resIdx, 1); - CTG_ERR_JRET(ctgProcessRspMsg(pCtx->pResList + atomic_fetch_add_32(&pCtx->resIdx, 1), reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target)); - - if (atomic_load_32(&pCtx->resIdx) < pCtx->vgNum) { - CTG_RET(code); + code = ctgProcessRspMsg(pCtx->pResList + resIdx, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target); + if (code) { + pCtx->resCode = code; } + int32_t doneNum = atomic_add_fetch_32(&pCtx->resDoneNum, 1); + if (doneNum < pCtx->vgNum) { + return code; + } + + code = pCtx->resCode; + _return: newCode = ctgHandleTaskEnd(pTask, code); diff --git a/source/libs/catalog/src/ctgRemote.c b/source/libs/catalog/src/ctgRemote.c index 3695049403..790c541334 100644 --- a/source/libs/catalog/src/ctgRemote.c +++ b/source/libs/catalog/src/ctgRemote.c @@ -633,7 +633,8 @@ int32_t ctgAddBatch(SCatalog* pCtg, int32_t vgId, SRequestConnInfo* pConn, SCtgT if (TDMT_VND_TABLE_CFG == msgType) { SCtgTbCfgCtx* ctx = (SCtgTbCfgCtx*)pTask->taskCtx; pName = ctx->pName; - } else if (TDMT_VND_TABLE_META == msgType || TDMT_VND_TABLE_NAME == msgType) { + } else if (TDMT_VND_TABLE_META == msgType || TDMT_VND_TABLE_NAME == msgType || + TDMT_VND_VSUBTABLES_META == msgType) { if (CTG_TASK_GET_TB_META_BATCH == pTask->type) { SCtgTbMetasCtx* ctx = (SCtgTbMetasCtx*)pTask->taskCtx; SCtgFetch* fetch = taosArrayGet(ctx->pFetchs, tReq->msgIdx); @@ -714,7 +715,8 @@ int32_t ctgAddBatch(SCatalog* pCtg, int32_t vgId, SRequestConnInfo* pConn, SCtgT if (TDMT_VND_TABLE_CFG == msgType) { SCtgTbCfgCtx* ctx = (SCtgTbCfgCtx*)pTask->taskCtx; pName = ctx->pName; - } else if (TDMT_VND_TABLE_META == msgType || TDMT_VND_TABLE_NAME == msgType) { + } else if (TDMT_VND_TABLE_META == msgType || TDMT_VND_TABLE_NAME == msgType || + TDMT_VND_VSUBTABLES_META == msgType) { if (CTG_TASK_GET_TB_META_BATCH == pTask->type) { SCtgTbMetasCtx* ctx = (SCtgTbMetasCtx*)pTask->taskCtx; SCtgFetch* fetch = taosArrayGet(ctx->pFetchs, tReq->msgIdx); diff --git a/source/libs/catalog/test/CMakeLists.txt b/source/libs/catalog/test/CMakeLists.txt index f23a6beaee..f3b82fd624 100644 --- a/source/libs/catalog/test/CMakeLists.txt +++ b/source/libs/catalog/test/CMakeLists.txt @@ -9,7 +9,7 @@ IF(NOT TD_DARWIN) ADD_EXECUTABLE(catalogTest ${SOURCE_LIST}) TARGET_LINK_LIBRARIES( catalogTest - PUBLIC os util common nodes catalog transport gtest qcom ${TAOS_LIB_STATIC} + PUBLIC os util common nodes catalog transport gtest qcom ${TAOS_NATIVE_LIB_STATIC} ) TARGET_INCLUDE_DIRECTORIES( @@ -19,7 +19,7 @@ IF(NOT TD_DARWIN) ) add_test( - NAME catalogTest - COMMAND catalogTest + NAME catalogTest + COMMAND catalogTest ) ENDIF() diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c index 027c87909b..c83cb19d7a 100644 --- a/source/libs/command/src/command.c +++ b/source/libs/command/src/command.c @@ -784,6 +784,11 @@ static void appendTableOptions(char* buf, int32_t* len, SDbCfgInfo* pDbCfg, STab if (pCfg->ttl > 0) { *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), " TTL %d", pCfg->ttl); + } + + if (pCfg->keep > 0) { + *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), + " KEEP %dm", pCfg->keep); } if (TSDB_SUPER_TABLE == pCfg->tableType || TSDB_NORMAL_TABLE == pCfg->tableType) { diff --git a/source/libs/executor/inc/dynqueryctrl.h b/source/libs/executor/inc/dynqueryctrl.h index feb2dca76f..f8c1675c42 100755 --- a/source/libs/executor/inc/dynqueryctrl.h +++ b/source/libs/executor/inc/dynqueryctrl.h @@ -76,6 +76,7 @@ typedef struct SStbJoinDynCtrlInfo { typedef struct SVtbScanDynCtrlInfo { bool scanAllCols; + char* dbName; tsem_t ready; SEpSet epSet; SUseDbRsp* pRsp; diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index e6be2b34a8..1ec4853e86 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -510,18 +510,19 @@ typedef struct SStreamFillSupporter { } SStreamFillSupporter; typedef struct SStreamRecParam { - char pSql[2048]; - int32_t sqlCapcity; - char pUrl[TSDB_EP_LEN + 17]; // "http://localhost:6041/rest/sql" - char pAuth[512 + 22]; // Authorization: Basic token - char pStbFullName[TSDB_TABLE_FNAME_LEN]; - char pWstartName[TSDB_COL_NAME_LEN]; - char pWendName[TSDB_COL_NAME_LEN]; - char pGroupIdName[TSDB_COL_NAME_LEN]; - char pIsWindowFilledName[TSDB_COL_NAME_LEN]; - void* pIteData; - int32_t iter; - TSKEY gap; + char pSql[2048]; + int32_t sqlCapcity; + char pUrl[TSDB_EP_LEN + 17]; // "http://localhost:6041/rest/sql" + char pAuth[512 + 22]; // Authorization: Basic token + char pStbFullName[TSDB_TABLE_FNAME_LEN]; + char pWstartName[TSDB_COL_NAME_LEN]; + char pWendName[TSDB_COL_NAME_LEN]; + char pGroupIdName[TSDB_COL_NAME_LEN]; + char pIsWindowFilledName[TSDB_COL_NAME_LEN]; + void* pIteData; + int32_t iter; + TSKEY gap; + SSHashObj* pColIdMap; } SStreamRecParam; typedef struct SStreamScanInfo { @@ -546,7 +547,10 @@ typedef struct SStreamScanInfo { uint64_t numOfExec; // execution times STqReader* tqReader; - SHashObj* pVtableMergeHandles; // key: vtable uid, value: SStreamVtableMergeHandle + SHashObj* pVtableMergeHandles; // key: vtable uid, value: SStreamVtableMergeHandle + SDiskbasedBuf* pVtableMergeBuf; // page buffer used by vtable merge + SArray* pVtableReadyHandles; + STableListInfo* pTableListInfo; uint64_t groupId; bool igCheckGroupId; diff --git a/source/libs/executor/inc/operator.h b/source/libs/executor/inc/operator.h index c173085a5f..7788b95bdf 100644 --- a/source/libs/executor/inc/operator.h +++ b/source/libs/executor/inc/operator.h @@ -188,7 +188,7 @@ int32_t createEventNonblockOperatorInfo(SOperatorInfo* downstream, SPhysiNode* p int32_t createVirtualTableMergeOperatorInfo(SOperatorInfo** pDownstream, SReadHandle* readHandle, STableListInfo* pTableListInfo, int32_t numOfDownstream, SVirtualScanPhysiNode * pJoinNode, SExecTaskInfo* pTaskInfo, SOperatorInfo** pOptrInfo); -int32_t createStreamVtableMergeOperatorInfo(SReadHandle* pHandle, SVirtualScanPhysiNode* pVirtualScanNode, SNode* pTagCond, SExecTaskInfo* pTaskInfo, SOperatorInfo** pInfo); +int32_t createStreamVtableMergeOperatorInfo(SOperatorInfo* pDownstream, SReadHandle* pHandle, SVirtualScanPhysiNode* pVirtualScanNode, SNode* pTagCond, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo, SOperatorInfo** pInfo); // clang-format on SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn, __optr_fn_t cleanup, diff --git a/source/libs/executor/inc/querytask.h b/source/libs/executor/inc/querytask.h index b6e2d639f6..6d6e9c72b3 100644 --- a/source/libs/executor/inc/querytask.h +++ b/source/libs/executor/inc/querytask.h @@ -77,6 +77,7 @@ typedef struct { char* stbFullName; // used to generate dest child table name bool newSubTableRule; // used to generate dest child table name STaskNotifyEventStat* pNotifyEventStat; // used to store notify event statistics + SArray * pVTables; // used to store merge info for merge task, SArray } SStreamTaskInfo; struct SExecTaskInfo { diff --git a/source/libs/executor/inc/streamVtableMerge.h b/source/libs/executor/inc/streamVtableMerge.h index 918ef39c68..3e27ae1ced 100644 --- a/source/libs/executor/inc/streamVtableMerge.h +++ b/source/libs/executor/inc/streamVtableMerge.h @@ -30,15 +30,20 @@ typedef enum { SVM_NEXT_FOUND = 1, } SVM_NEXT_RESULT; -int32_t streamVtableMergeCreateHandle(SStreamVtableMergeHandle **ppHandle, int32_t nSrcTbls, int32_t numPageLimit, - SDiskbasedBuf *pBuf, SSDataBlock *pResBlock, const char *idstr); +int32_t streamVtableMergeCreateHandle(SStreamVtableMergeHandle **ppHandle, int64_t vuid, int32_t nSrcTbls, + int32_t numPageLimit, int32_t primaryTsIndex, SDiskbasedBuf *pBuf, + SSDataBlock *pResBlock, const char *idstr); -void streamVtableMergeDestroyHandle(SStreamVtableMergeHandle **ppHandle); +void streamVtableMergeDestroyHandle(void *ppHandle); + +int64_t streamVtableMergeHandleGetVuid(SStreamVtableMergeHandle *pHandle); int32_t streamVtableMergeAddBlock(SStreamVtableMergeHandle *pHandle, SSDataBlock *pDataBlock, const char *idstr); -int32_t streamVtableMergeNextTuple(SStreamVtableMergeHandle *pHandle, SSDataBlock *pResBlock, SVM_NEXT_RESULT *pRes, - const char *idstr); +int32_t streamVtableMergeMoveNext(SStreamVtableMergeHandle *pHandle, SVM_NEXT_RESULT *pRes, const char *idstr); + +int32_t streamVtableMergeCurrent(SStreamVtableMergeHandle *pHandle, SSDataBlock **ppDataBlock, int32_t *pRowIdx, + const char *idstr); #ifdef __cplusplus } diff --git a/source/libs/executor/inc/streaminterval.h b/source/libs/executor/inc/streaminterval.h index 7fe42c5fe1..5f7e79bd3f 100644 --- a/source/libs/executor/inc/streaminterval.h +++ b/source/libs/executor/inc/streaminterval.h @@ -71,6 +71,7 @@ void setDeleteFillValueInfo(TSKEY start, TSKEY end, SStreamFillSupporter* void doStreamFillRange(SStreamFillInfo* pFillInfo, SStreamFillSupporter* pFillSup, SSDataBlock* pRes); int32_t initFillSupRowInfo(SStreamFillSupporter* pFillSup, SSDataBlock* pRes); void getStateKeepInfo(SNonBlockAggSupporter* pNbSup, bool isRecOp, int32_t* pNumRes, TSKEY* pTsRes); +int32_t initStreamFillOperatorColumnMapInfo(SExprSupp* pExprSup, SOperatorInfo* pOperator); #ifdef __cplusplus } diff --git a/source/libs/executor/inc/streamsession.h b/source/libs/executor/inc/streamsession.h index 8ae1c4eb3a..714175de61 100644 --- a/source/libs/executor/inc/streamsession.h +++ b/source/libs/executor/inc/streamsession.h @@ -81,6 +81,7 @@ void setEventWindowInfo(SStreamAggSupporter* pAggSup, SSessionKey* pKey, SRowBuf // stream client int32_t streamClientGetResultRange(SStreamRecParam* pParam, SSHashObj* pRangeMap, SArray* pRangeRes); int32_t streamClientGetFillRange(SStreamRecParam* pParam, SWinKey* pKey, SArray* pRangeRes, void* pEmptyRow, int32_t size, int32_t* pOffsetInfo, int32_t numOfCols); +int32_t streamClientCheckCfg(SStreamRecParam* pParam); #ifdef __cplusplus } diff --git a/source/libs/executor/src/anomalywindowoperator.c b/source/libs/executor/src/anomalywindowoperator.c index 379177bb06..d46c429bef 100644 --- a/source/libs/executor/src/anomalywindowoperator.c +++ b/source/libs/executor/src/anomalywindowoperator.c @@ -327,7 +327,7 @@ static int32_t anomalyParseJson(SJson* pJson, SArray* pWindows, const char* pId) qError("%s failed to exec forecast, msg:%s", pId, pMsg); } - return TSDB_CODE_ANA_WN_DATA; + return TSDB_CODE_ANA_INTERNAL_ERROR; } else if (rows == 0) { return TSDB_CODE_SUCCESS; } @@ -382,7 +382,7 @@ static int32_t anomalyAnalysisWindow(SOperatorInfo* pOperator) { SAnalyticBuf analyBuf = {.bufType = ANALYTICS_BUF_TYPE_JSON}; char dataBuf[64] = {0}; int32_t code = 0; - int64_t ts = 0; + int64_t ts = taosGetTimestampMs(); int32_t lino = 0; const char* pId = GET_TASKID(pOperator->pTaskInfo); diff --git a/source/libs/executor/src/dynqueryctrloperator.c b/source/libs/executor/src/dynqueryctrloperator.c index 97d0cf4a0e..78899d52bc 100644 --- a/source/libs/executor/src/dynqueryctrloperator.c +++ b/source/libs/executor/src/dynqueryctrloperator.c @@ -100,6 +100,9 @@ void freeUseDbOutput(void* pOutput) { } static void destroyVtbScanDynCtrlInfo(SVtbScanDynCtrlInfo* pVtbScan) { + if (pVtbScan->dbName) { + taosMemoryFreeClear(pVtbScan->dbName); + } if (pVtbScan->childTableList) { taosArrayDestroy(pVtbScan->childTableList); } @@ -1136,13 +1139,15 @@ int32_t dynProcessUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) { pScanResInfo->vtbScan.pRsp = taosMemoryMalloc(sizeof(SUseDbRsp)); QUERY_CHECK_NULL(pScanResInfo->vtbScan.pRsp, code, lino, _return, terrno); - QUERY_CHECK_CODE(tDeserializeSUseDbRsp(pMsg->pData, (int32_t)pMsg->len, pScanResInfo->vtbScan.pRsp), lino, _return); + code = tDeserializeSUseDbRsp(pMsg->pData, (int32_t)pMsg->len, pScanResInfo->vtbScan.pRsp); + QUERY_CHECK_CODE(code, lino, _return); taosMemoryFreeClear(pMsg->pData); - QUERY_CHECK_CODE(tsem_post(&pScanResInfo->vtbScan.ready), lino, _return); + code = tsem_post(&pScanResInfo->vtbScan.ready); + QUERY_CHECK_CODE(code, lino, _return); - return TSDB_CODE_SUCCESS; + return code; _return: qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); return code; @@ -1157,7 +1162,8 @@ static int32_t buildDbVgInfoMap(SOperatorInfo* pOperator, SReadHandle* pHandle, pReq = taosMemoryMalloc(sizeof(SUseDbReq)); QUERY_CHECK_NULL(pReq, code, lino, _return, terrno); - QUERY_CHECK_CODE(tNameGetFullDbName(name, pReq->db), lino, _return); + code = tNameGetFullDbName(name, pReq->db); + QUERY_CHECK_CODE(code, lino, _return); int32_t contLen = tSerializeSUseDbReq(NULL, 0, pReq); buf1 = taosMemoryCalloc(1, contLen); QUERY_CHECK_NULL(buf1, code, lino, _return, terrno); @@ -1177,11 +1183,14 @@ static int32_t buildDbVgInfoMap(SOperatorInfo* pOperator, SReadHandle* pHandle, pMsgSendInfo->fp = dynProcessUseDbRsp; pMsgSendInfo->requestId = pTaskInfo->id.queryId; - QUERY_CHECK_CODE(asyncSendMsgToServer(pHandle->pMsgCb->clientRpc, &pScanResInfo->vtbScan.epSet, NULL, pMsgSendInfo), lino, _return); + code = asyncSendMsgToServer(pHandle->pMsgCb->clientRpc, &pScanResInfo->vtbScan.epSet, NULL, pMsgSendInfo); + QUERY_CHECK_CODE(code, lino, _return); - QUERY_CHECK_CODE(tsem_wait(&pScanResInfo->vtbScan.ready), lino, _return); + code = tsem_wait(&pScanResInfo->vtbScan.ready); + QUERY_CHECK_CODE(code, lino, _return); - QUERY_CHECK_CODE(queryBuildUseDbOutput(output, pScanResInfo->vtbScan.pRsp), lino, _return); + code = queryBuildUseDbOutput(output, pScanResInfo->vtbScan.pRsp); + QUERY_CHECK_CODE(code, lino, _return); _return: if (code) { @@ -1250,12 +1259,13 @@ int32_t dynHashValueComp(void const* lp, void const* rp) { int32_t getVgId(SDBVgInfo* dbInfo, char* dbFName, int32_t* vgId, char *tbName) { int32_t code = 0; int32_t lino = 0; - QUERY_CHECK_CODE(dynMakeVgArraySortBy(dbInfo, dynVgInfoComp), lino, _return); + code = dynMakeVgArraySortBy(dbInfo, dynVgInfoComp); + QUERY_CHECK_CODE(code, lino, _return); int32_t vgNum = (int32_t)taosArrayGetSize(dbInfo->vgArray); if (vgNum <= 0) { qError("db vgroup cache invalid, db:%s, vgroup number:%d", dbFName, vgNum); - QUERY_CHECK_CODE(TSDB_CODE_TSC_DB_NOT_SELECTED, lino, _return); + QUERY_CHECK_CODE(code = TSDB_CODE_TSC_DB_NOT_SELECTED, lino, _return); } SVgroupInfo* vgInfo = NULL; @@ -1309,8 +1319,10 @@ int32_t getDbVgInfo(SOperatorInfo* pOperator, SName *name, SDBVgInfo **dbVgInfo) if (find == NULL) { output = taosMemoryMalloc(sizeof(SUseDbOutput)); - QUERY_CHECK_CODE(buildDbVgInfoMap(pOperator, pHandle, name, pTaskInfo, output), line, _return); - QUERY_CHECK_CODE(taosHashPut(pInfo->vtbScan.dbVgInfoMap, name->dbname, strlen(name->dbname), &output, POINTER_BYTES), line, _return); + code = buildDbVgInfoMap(pOperator, pHandle, name, pTaskInfo, output); + QUERY_CHECK_CODE(code, line, _return); + code = taosHashPut(pInfo->vtbScan.dbVgInfoMap, name->dbname, strlen(name->dbname), &output, POINTER_BYTES); + QUERY_CHECK_CODE(code, line, _return); } else { output = *find; } @@ -1357,12 +1369,14 @@ int32_t vtbScan(SOperatorInfo* pOperator, SSDataBlock** pRes) { while (true) { if (pVtbScan->curTableIdx == pVtbScan->lastTableIdx) { - QUERY_CHECK_CODE(pOperator->pDownstream[0]->fpSet.getNextFn(pOperator->pDownstream[0], pRes), line, _return); + code = pOperator->pDownstream[0]->fpSet.getNextFn(pOperator->pDownstream[0], pRes); + QUERY_CHECK_CODE(code, line, _return); } else { uint64_t* id = taosArrayGet(pVtbScan->childTableList, pVtbScan->curTableIdx); QUERY_CHECK_NULL(id, code, line, _return, terrno); pHandle->api.metaReaderFn.initReader(&mr, pHandle->vnode, META_READER_LOCK, &pHandle->api.metaFn); - QUERY_CHECK_CODE(pHandle->api.metaReaderFn.getTableEntryByUid(&mr, *id), line, _return); + code = pHandle->api.metaReaderFn.getTableEntryByUid(&mr, *id); + QUERY_CHECK_CODE(code, line, _return); for (int32_t j = 0; j < mr.me.colRef.nCols; j++) { if (mr.me.colRef.pColRef[j].hasRef && colNeedScan(pOperator, mr.me.colRef.pColRef[j].id)) { @@ -1370,15 +1384,22 @@ int32_t vtbScan(SOperatorInfo* pOperator, SSDataBlock** pRes) { char dbFname[TSDB_DB_FNAME_LEN] = {0}; char orgTbFName[TSDB_TABLE_FNAME_LEN] = {0}; + if (strncmp(mr.me.colRef.pColRef[j].refDbName, pVtbScan->dbName, strlen(pVtbScan->dbName)) != 0) { + QUERY_CHECK_CODE(code = TSDB_CODE_VTABLE_NOT_SUPPORT_CROSS_DB, line, _return); + } toName(pInfo->vtbScan.acctId, mr.me.colRef.pColRef[j].refDbName, mr.me.colRef.pColRef[j].refTableName, &name); - QUERY_CHECK_CODE(getDbVgInfo(pOperator, &name, &dbVgInfo), line, _return); - QUERY_CHECK_CODE(tNameGetFullDbName(&name, dbFname), line, _return); - QUERY_CHECK_CODE(tNameGetFullTableName(&name, orgTbFName), line, _return); + code = getDbVgInfo(pOperator, &name, &dbVgInfo); + QUERY_CHECK_CODE(code, line, _return); + tNameGetFullDbName(&name, dbFname); + QUERY_CHECK_CODE(code, line, _return); + tNameGetFullTableName(&name, orgTbFName); + QUERY_CHECK_CODE(code, line, _return); void *pVal = taosHashGet(pVtbScan->orgTbVgColMap, orgTbFName, sizeof(orgTbFName)); if (!pVal) { SOrgTbInfo map = {0}; - QUERY_CHECK_CODE(getVgId(dbVgInfo, dbFname, &map.vgId, name.tname), line, _return); + code = getVgId(dbVgInfo, dbFname, &map.vgId, name.tname); + QUERY_CHECK_CODE(code, line, _return); tstrncpy(map.tbName, orgTbFName, sizeof(map.tbName)); map.colMap = taosArrayInit(10, sizeof(SColIdNameKV)); QUERY_CHECK_NULL(map.colMap, code, line, _return, terrno); @@ -1386,7 +1407,8 @@ int32_t vtbScan(SOperatorInfo* pOperator, SSDataBlock** pRes) { colIdNameKV.colId = mr.me.colRef.pColRef[j].id; tstrncpy(colIdNameKV.colName, mr.me.colRef.pColRef[j].refColName, sizeof(colIdNameKV.colName)); QUERY_CHECK_NULL(taosArrayPush(map.colMap, &colIdNameKV), code, line, _return, terrno); - QUERY_CHECK_CODE(taosHashPut(pVtbScan->orgTbVgColMap, orgTbFName, sizeof(orgTbFName), &map, sizeof(map)), line, _return); + code = taosHashPut(pVtbScan->orgTbVgColMap, orgTbFName, sizeof(orgTbFName), &map, sizeof(map)); + QUERY_CHECK_CODE(code, line, _return); } else { SOrgTbInfo *tbInfo = (SOrgTbInfo *)pVal; SColIdNameKV colIdNameKV = {0}; @@ -1398,13 +1420,15 @@ int32_t vtbScan(SOperatorInfo* pOperator, SSDataBlock** pRes) { } pVtbScan->vtbScanParam = NULL; - QUERY_CHECK_CODE(buildVtbScanOperatorParam(pInfo, &pVtbScan->vtbScanParam, *id), line, _return); + code = buildVtbScanOperatorParam(pInfo, &pVtbScan->vtbScanParam, *id); + QUERY_CHECK_CODE(code, line, _return); void* pIter = taosHashIterate(pVtbScan->orgTbVgColMap, NULL); while (pIter != NULL) { SOrgTbInfo* pMap = (SOrgTbInfo*)pIter; SOperatorParam* pExchangeParam = NULL; - QUERY_CHECK_CODE(buildExchangeOperatorParamForVScan(&pExchangeParam, 0, pMap), line, _return); + code = buildExchangeOperatorParamForVScan(&pExchangeParam, 0, pMap); + QUERY_CHECK_CODE(code, line, _return); QUERY_CHECK_NULL(taosArrayPush(((SVTableScanOperatorParam*)pVtbScan->vtbScanParam->value)->pOpParamArray, &pExchangeParam), code, line, _return, terrno); pIter = taosHashIterate(pVtbScan->orgTbVgColMap, pIter); } @@ -1412,7 +1436,8 @@ int32_t vtbScan(SOperatorInfo* pOperator, SSDataBlock** pRes) { // reset downstream operator's status pOperator->pDownstream[0]->status = OP_NOT_OPENED; - QUERY_CHECK_CODE(pOperator->pDownstream[0]->fpSet.getNextExtFn(pOperator->pDownstream[0], pVtbScan->vtbScanParam, pRes), line, _return); + code = pOperator->pDownstream[0]->fpSet.getNextExtFn(pOperator->pDownstream[0], pVtbScan->vtbScanParam, pRes); + QUERY_CHECK_CODE(code, line, _return); } if (*pRes) { @@ -1478,7 +1503,8 @@ static int32_t initVtbScanInfo(SOperatorInfo* pOperator, SDynQueryCtrlOperatorIn int32_t code = TSDB_CODE_SUCCESS; int32_t line = 0; - QUERY_CHECK_CODE(tsem_init(&pInfo->vtbScan.ready, 0, 0), line, _return); + code = tsem_init(&pInfo->vtbScan.ready, 0, 0); + QUERY_CHECK_CODE(code, line, _return); pInfo->vtbScan.scanAllCols = pPhyciNode->vtbScan.scanAllCols; pInfo->vtbScan.suid = pPhyciNode->vtbScan.suid; @@ -1487,6 +1513,8 @@ static int32_t initVtbScanInfo(SOperatorInfo* pOperator, SDynQueryCtrlOperatorIn pInfo->vtbScan.readHandle = *pHandle; pInfo->vtbScan.curTableIdx = 0; pInfo->vtbScan.lastTableIdx = -1; + pInfo->vtbScan.dbName = taosStrdup(pPhyciNode->vtbScan.dbName); + QUERY_CHECK_NULL(pInfo->vtbScan.dbName, code, line, _return, terrno); pInfo->vtbScan.readColList = taosArrayInit(LIST_LENGTH(pPhyciNode->vtbScan.pScanCols), sizeof(col_id_t)); QUERY_CHECK_NULL(pInfo->vtbScan.readColList, code, line, _return, terrno); @@ -1499,7 +1527,8 @@ static int32_t initVtbScanInfo(SOperatorInfo* pOperator, SDynQueryCtrlOperatorIn pInfo->vtbScan.childTableList = taosArrayInit(10, sizeof(uint64_t)); QUERY_CHECK_NULL(pInfo->vtbScan.childTableList, code, line, _return, terrno); - QUERY_CHECK_CODE(pHandle->api.metaFn.getChildTableList(pHandle->vnode, pInfo->vtbScan.suid, pInfo->vtbScan.childTableList), line, _return); + code = pHandle->api.metaFn.getChildTableList(pHandle->vnode, pInfo->vtbScan.suid, pInfo->vtbScan.childTableList); + QUERY_CHECK_CODE(code, line, _return); pInfo->vtbScan.dbVgInfoMap = taosHashInit(taosArrayGetSize(pInfo->vtbScan.childTableList), taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_ENTRY_LOCK); QUERY_CHECK_NULL(pInfo->vtbScan.dbVgInfoMap, code, line, _return, terrno); @@ -1518,6 +1547,7 @@ int32_t createDynQueryCtrlOperatorInfo(SOperatorInfo** pDownstream, int32_t numO QRY_PARAM_CHECK(pOptrInfo); int32_t code = TSDB_CODE_SUCCESS; + int32_t line = 0; __optr_fn_t nextFp = NULL; SOperatorInfo* pOperator = NULL; SDynQueryCtrlOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SDynQueryCtrlOperatorInfo)); @@ -1554,7 +1584,8 @@ int32_t createDynQueryCtrlOperatorInfo(SOperatorInfo** pDownstream, int32_t numO nextFp = seqStableJoin; break; case DYN_QTYPE_VTB_SCAN: - QUERY_CHECK_CODE(initVtbScanInfo(pOperator, pInfo, pHandle, pPhyciNode, pTaskInfo), code, _error); + code = initVtbScanInfo(pOperator, pInfo, pHandle, pPhyciNode, pTaskInfo); + QUERY_CHECK_CODE(code, line, _error); nextFp = vtbScan; break; default: diff --git a/source/libs/executor/src/exchangeoperator.c b/source/libs/executor/src/exchangeoperator.c index d20d072668..7f299dfef8 100644 --- a/source/libs/executor/src/exchangeoperator.c +++ b/source/libs/executor/src/exchangeoperator.c @@ -1239,7 +1239,7 @@ int32_t addDynamicExchangeSource(SOperatorInfo* pOperator) { freeOperatorParam(pOperator->pOperatorGetParam, OP_GET_PARAM); pOperator->pOperatorGetParam = NULL; - return TSDB_CODE_SUCCESS; + return code; } int32_t prepareLoadRemoteData(SOperatorInfo* pOperator) { diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index e22a54e194..3084e5c886 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -3049,7 +3049,7 @@ void printDataBlock(SSDataBlock* pBlock, const char* flag, const char* taskIdStr qInfo("%s===stream===%s: Block is Empty. block type %d", taskIdStr, flag, pBlock->info.type); return; } - if (qDebugFlag & DEBUG_DEBUG) { + if (qDebugFlag & DEBUG_INFO) { char* pBuf = NULL; int32_t code = dumpBlockData(pBlock, flag, &pBuf, taskIdStr); if (code == 0) { @@ -3069,13 +3069,13 @@ void printSpecDataBlock(SSDataBlock* pBlock, const char* flag, const char* opStr pBlock->info.version); return; } - if (qDebugFlag & DEBUG_DEBUG) { + if (qDebugFlag & DEBUG_INFO) { char* pBuf = NULL; char flagBuf[64]; snprintf(flagBuf, sizeof(flagBuf), "%s %s", flag, opStr); int32_t code = dumpBlockData(pBlock, flagBuf, &pBuf, taskIdStr); if (code == 0) { - qDebug("%s", pBuf); + qInfo("%s", pBuf); taosMemoryFree(pBuf); } } diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 3182f81e65..2cd95c53c4 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -141,8 +141,7 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu const char* id) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; - if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN && - pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_VIRTUAL_TABLE_SCAN) { + if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { if (pOperator->numOfDownstream == 0) { qError("failed to find stream scan operator to set the input data block, %s" PRIx64, id); return TSDB_CODE_APP_ERROR; @@ -275,6 +274,15 @@ _end: return code; } +void qSetStreamMergeInfo(qTaskInfo_t tinfo, SArray* pVTables) { + if (tinfo == 0 || pVTables == NULL) { + return; + } + + SStreamTaskInfo* pStreamInfo = &((SExecTaskInfo*)tinfo)->streamInfo; + pStreamInfo->pVTables = pVTables; +} + int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type) { if (tinfo == NULL) { return TSDB_CODE_APP_ERROR; diff --git a/source/libs/executor/src/forecastoperator.c b/source/libs/executor/src/forecastoperator.c index e9185824a3..b0966a65fe 100644 --- a/source/libs/executor/src/forecastoperator.c +++ b/source/libs/executor/src/forecastoperator.c @@ -158,8 +158,8 @@ static int32_t forecastCloseBuf(SForecastSupp* pSupp, const char* id) { qDebug("%s forecast rows not found from %s, use default:%" PRId64, id, pSupp->algoOpt, pSupp->optRows); } - if (pSupp->optRows > ANALY_FORECAST_MAX_ROWS) { - qError("%s required too many forecast rows, max allowed:%d, required:%" PRId64, id, ANALY_FORECAST_MAX_ROWS, + if (pSupp->optRows > ANALY_FORECAST_RES_MAX_ROWS) { + qError("%s required too many forecast rows, max allowed:%d, required:%" PRId64, id, ANALY_FORECAST_RES_MAX_ROWS, pSupp->optRows); return TSDB_CODE_ANA_ANODE_TOO_MANY_ROWS; } @@ -235,7 +235,7 @@ static int32_t forecastAnalysis(SForecastSupp* pSupp, SSDataBlock* pBlock, const } tjsonDelete(pJson); - return TSDB_CODE_ANA_WN_DATA; + return TSDB_CODE_ANA_INTERNAL_ERROR; } if (code < 0) { diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index 5677d490dc..7aea03699c 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -382,7 +382,7 @@ int32_t createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHand return terrno; } - if (pHandle->vnode) { + if (pHandle->vnode && (pTaskInfo->pSubplan->pVTables == NULL)) { code = createScanTableListInfo(&pTableScanNode->scan, pTableScanNode->pGroupTags, pTableScanNode->groupSort, pHandle, pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo); if (code) { @@ -515,8 +515,6 @@ int32_t createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHand code = createProjectOperatorInfo(NULL, (SProjectPhysiNode*)pPhyNode, pTaskInfo, &pOperator); } else if (QUERY_NODE_PHYSICAL_PLAN_VIRTUAL_TABLE_SCAN == type && model != OPTR_EXEC_MODEL_STREAM) { code = createVirtualTableMergeOperatorInfo(NULL, pHandle, NULL, 0, (SVirtualScanPhysiNode*)pPhyNode, pTaskInfo, &pOperator); - } else if (QUERY_NODE_PHYSICAL_PLAN_VIRTUAL_TABLE_SCAN == type && model == OPTR_EXEC_MODEL_STREAM) { - code = createStreamVtableMergeOperatorInfo(pHandle, (SVirtualScanPhysiNode*)pPhyNode, pTagCond, pTaskInfo, &pOperator); } else { code = TSDB_CODE_INVALID_PARA; pTaskInfo->code = code; @@ -689,6 +687,26 @@ int32_t createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHand code = createVirtualTableMergeOperatorInfo(ops, pHandle, pTableListInfo, size, (SVirtualScanPhysiNode*)pPhyNode, pTaskInfo, &pOptr); + } else if (QUERY_NODE_PHYSICAL_PLAN_VIRTUAL_TABLE_SCAN == type && model == OPTR_EXEC_MODEL_STREAM) { + SVirtualScanPhysiNode* pVirtualTableScanNode = (SVirtualScanPhysiNode*)pPhyNode; + STableListInfo* pTableListInfo = tableListCreate(); + if (!pTableListInfo) { + pTaskInfo->code = terrno; + qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(terrno)); + return terrno; + } + + code = createScanTableListInfo(&pVirtualTableScanNode->scan, pVirtualTableScanNode->pGroupTags, + pVirtualTableScanNode->groupSort, pHandle, pTableListInfo, pTagCond, pTagIndexCond, + pTaskInfo); + if (code) { + pTaskInfo->code = code; + tableListDestroy(pTableListInfo); + qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(terrno)); + return code; + } + + code = createStreamVtableMergeOperatorInfo(ops[0], pHandle, pVirtualTableScanNode, pTagCond, pTableListInfo, pTaskInfo, &pOptr); } else { code = TSDB_CODE_INVALID_PARA; pTaskInfo->code = code; diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c index b8b9860e26..c471225ad3 100644 --- a/source/libs/executor/src/projectoperator.c +++ b/source/libs/executor/src/projectoperator.c @@ -18,6 +18,7 @@ #include "functionMgt.h" #include "operator.h" #include "querytask.h" +#include "streaminterval.h" #include "taoserror.h" #include "tdatablock.h" @@ -162,6 +163,9 @@ int32_t createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhysiNode* code = setRowTsColumnOutputInfo(pOperator->exprSupp.pCtx, numOfCols, &pInfo->pPseudoColInfo); TSDB_CHECK_CODE(code, lino, _error); + code = initStreamFillOperatorColumnMapInfo(&pOperator->exprSupp, downstream); + TSDB_CHECK_CODE(code, lino, _error); + setOperatorInfo(pOperator, "ProjectOperator", QUERY_NODE_PHYSICAL_PLAN_PROJECT, false, OP_NOT_OPENED, pInfo, pTaskInfo); pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doProjectOperation, NULL, destroyProjectOperatorInfo, diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 07dc3a31c5..e5d2ce0735 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -1237,11 +1237,13 @@ static int32_t createVTableScanInfoFromParam(SOperatorInfo* pOperator) { } pAPI->metaReaderFn.initReader(&orgTable, pInfo->base.readHandle.vnode, META_READER_LOCK, &pAPI->metaFn); - QUERY_CHECK_CODE(pAPI->metaReaderFn.getTableEntryByName(&orgTable, strstr(pParam->pOrgTbInfo->tbName, ".") + 1), lino, _return); + code = pAPI->metaReaderFn.getTableEntryByName(&orgTable, strstr(pParam->pOrgTbInfo->tbName, ".") + 1); + QUERY_CHECK_CODE(code, lino, _return); switch (orgTable.me.type) { case TSDB_CHILD_TABLE: pAPI->metaReaderFn.initReader(&superTable, pInfo->base.readHandle.vnode, META_READER_LOCK, &pAPI->metaFn); - QUERY_CHECK_CODE(pAPI->metaReaderFn.getTableEntryByUid(&superTable, orgTable.me.ctbEntry.suid), lino, _return); + code = pAPI->metaReaderFn.getTableEntryByUid(&superTable, orgTable.me.ctbEntry.suid); + QUERY_CHECK_CODE(code, lino, _return); schema = &superTable.me.stbEntry.schemaRow; break; case TSDB_NORMAL_TABLE: @@ -1289,8 +1291,10 @@ static int32_t createVTableScanInfoFromParam(SOperatorInfo* pOperator) { blockDataDestroy(pInfo->pResBlock); pInfo->pResBlock = NULL; } - QUERY_CHECK_CODE(createOneDataBlockWithColArray(pInfo->pOrgBlock, pBlockColArray, &pInfo->pResBlock), lino, _return); - QUERY_CHECK_CODE(initQueryTableDataCondWithColArray(&pInfo->base.cond, &pInfo->base.orgCond, &pInfo->base.readHandle, pColArray), lino, _return); + code = createOneDataBlockWithColArray(pInfo->pOrgBlock, pBlockColArray, &pInfo->pResBlock); + QUERY_CHECK_CODE(code, lino, _return); + code = initQueryTableDataCondWithColArray(&pInfo->base.cond, &pInfo->base.orgCond, &pInfo->base.readHandle, pColArray); + QUERY_CHECK_CODE(code, lino, _return); pInfo->base.cond.twindows.skey = pParam->window.ekey + 1; pInfo->base.cond.suid = orgTable.me.type == TSDB_CHILD_TABLE ? superTable.me.uid : 0; pInfo->currentGroupId = 0; @@ -1304,7 +1308,8 @@ static int32_t createVTableScanInfoFromParam(SOperatorInfo* pOperator) { uint64_t pUid = orgTable.me.uid; STableKeyInfo info = {.groupId = 0, .uid = pUid}; int32_t tableIdx = 0; - QUERY_CHECK_CODE(taosHashPut(pListInfo->map, &pUid, sizeof(uint64_t), &tableIdx, sizeof(int32_t)), lino, _return); + code = taosHashPut(pListInfo->map, &pUid, sizeof(uint64_t), &tableIdx, sizeof(int32_t)); + QUERY_CHECK_CODE(code, lino, _return); QUERY_CHECK_NULL(taosArrayPush(pListInfo->pTableList, &info), code, lino, _return, terrno); qDebug("add dynamic table scan uid:%" PRIu64 ", %s", info.uid, GET_TASKID(pTaskInfo)); @@ -1470,12 +1475,14 @@ int32_t doTableScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { SSDataBlock* result = NULL; while (true) { - QUERY_CHECK_CODE(startNextGroupScan(pOperator, &result), lino, _end); + code = startNextGroupScan(pOperator, &result); + QUERY_CHECK_CODE(code, lino, _end); if (result || pOperator->status == OP_EXEC_DONE) { SSDataBlock* res = NULL; if (result) { - QUERY_CHECK_CODE(createOneDataBlockWithTwoBlock(result, pInfo->pOrgBlock, &res), lino, _end); + code = createOneDataBlockWithTwoBlock(result, pInfo->pOrgBlock, &res); + QUERY_CHECK_CODE(code, lino, _end); pInfo->pResBlock = res; blockDataDestroy(result); } @@ -3157,7 +3164,7 @@ int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock, STime SOperatorInfo* pOperator = pInfo->pStreamScanOp; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; const char* id = GET_TASKID(pTaskInfo); - SSHashObj* pVtableInfos = pTaskInfo->pSubplan->pVTables; + bool isVtableSourceScan = (pTaskInfo->pSubplan->pVTables != NULL); code = blockDataEnsureCapacity(pInfo->pRes, pBlock->info.rows); QUERY_CHECK_CODE(code, lino, _end); @@ -3168,7 +3175,7 @@ int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock, STime pBlockInfo->version = pBlock->info.version; STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info; - if (pVtableInfos == NULL) { + if (!isVtableSourceScan) { pBlockInfo->id.groupId = tableListGetTableGroupId(pTableScanInfo->base.pTableListInfo, pBlock->info.id.uid); } else { // use original table uid as groupId for vtable @@ -3213,7 +3220,7 @@ int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock, STime } // currently only the tbname pseudo column - if (pInfo->numOfPseudoExpr > 0) { + if (pInfo->numOfPseudoExpr > 0 && !isVtableSourceScan) { code = addTagPseudoColumnData(&pInfo->readHandle, pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, pInfo->pRes, pBlockInfo->rows, pTaskInfo, &pTableScanInfo->base.metaCache); // ignore the table not exists error, since this table may have been dropped during the scan procedure. @@ -3762,9 +3769,20 @@ static int32_t doStreamScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { SStorageAPI* pAPI = &pTaskInfo->storageAPI; SStreamScanInfo* pInfo = pOperator->info; SStreamTaskInfo* pStreamInfo = &pTaskInfo->streamInfo; + SSHashObj* pVtableInfos = pTaskInfo->pSubplan->pVTables; qDebug("stream scan started, %s", id); + if (pVtableInfos != NULL && pStreamInfo->recoverStep != STREAM_RECOVER_STEP__NONE) { + qError("stream vtable source scan should not have recovery step: %d", pStreamInfo->recoverStep); + pStreamInfo->recoverStep = STREAM_RECOVER_STEP__NONE; + } + + if (pVtableInfos != NULL && !pInfo->igCheckUpdate) { + qError("stream vtable source scan should have igCheckUpdate"); + pInfo->igCheckUpdate = false; + } + if (pStreamInfo->recoverStep == STREAM_RECOVER_STEP__PREPARE1 || pStreamInfo->recoverStep == STREAM_RECOVER_STEP__PREPARE2) { STableScanInfo* pTSInfo = pInfo->pTableScanOp->info; @@ -3863,6 +3881,10 @@ static int32_t doStreamScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { // TODO: refactor FETCH_NEXT_BLOCK: if (pInfo->blockType == STREAM_INPUT__DATA_BLOCK) { + if (pVtableInfos != NULL) { + qInfo("stream vtable source scan would ignore all data blocks"); + pInfo->validBlockIndex = total; + } if (pInfo->validBlockIndex >= total) { doClearBufferedBlocks(pInfo); (*ppRes) = NULL; @@ -4013,6 +4035,10 @@ FETCH_NEXT_BLOCK: return code; } else if (pInfo->blockType == STREAM_INPUT__DATA_SUBMIT) { qDebug("stream scan mode:%d, %s", pInfo->scanMode, id); + if (pVtableInfos != NULL && pInfo->scanMode != STREAM_SCAN_FROM_READERHANDLE) { + qError("stream vtable source scan should not have scan mode: %d", pInfo->scanMode); + pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; + } switch (pInfo->scanMode) { case STREAM_SCAN_FROM_RES: { pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; @@ -4167,6 +4193,11 @@ FETCH_NEXT_BLOCK: continue; } + if (pVtableInfos != NULL && pInfo->pCreateTbRes->info.rows > 0) { + qError("stream vtable source scan should not have create table res"); + blockDataCleanup(pInfo->pCreateTbRes); + } + if (pInfo->pCreateTbRes->info.rows > 0) { pInfo->scanMode = STREAM_SCAN_FROM_RES; qDebug("create table res exists, rows:%" PRId64 " return from stream scan, %s", @@ -4178,8 +4209,11 @@ FETCH_NEXT_BLOCK: code = doCheckUpdate(pInfo, pBlockInfo->window.ekey, pInfo->pRes); QUERY_CHECK_CODE(code, lino, _end); setStreamOperatorState(&pInfo->basic, pInfo->pRes->info.type); - code = doFilter(pInfo->pRes, pOperator->exprSupp.pFilterInfo, NULL); - QUERY_CHECK_CODE(code, lino, _end); + if (pVtableInfos == NULL) { + // filter should be applied in merge task for vtables + code = doFilter(pInfo->pRes, pOperator->exprSupp.pFilterInfo, NULL); + QUERY_CHECK_CODE(code, lino, _end); + } code = blockDataUpdateTsWindow(pInfo->pRes, pInfo->primaryTsIndex); QUERY_CHECK_CODE(code, lino, _end); @@ -4215,7 +4249,7 @@ FETCH_NEXT_BLOCK: goto NEXT_SUBMIT_BLK; } else if (pInfo->blockType == STREAM_INPUT__CHECKPOINT) { - if (pInfo->validBlockIndex >= total) { + if (pInfo->validBlockIndex >= total || pVtableInfos != NULL) { doClearBufferedBlocks(pInfo); (*ppRes) = NULL; return code; @@ -4494,6 +4528,18 @@ void destroyStreamScanOperatorInfo(void* param) { taosHashCleanup(pStreamScan->pVtableMergeHandles); pStreamScan->pVtableMergeHandles = NULL; } + if (pStreamScan->pVtableMergeBuf) { + destroyDiskbasedBuf(pStreamScan->pVtableMergeBuf); + pStreamScan->pVtableMergeBuf = NULL; + } + if (pStreamScan->pVtableReadyHandles) { + taosArrayDestroy(pStreamScan->pVtableReadyHandles); + pStreamScan->pVtableReadyHandles = NULL; + } + if (pStreamScan->pTableListInfo) { + tableListDestroy(pStreamScan->pTableListInfo); + pStreamScan->pTableListInfo = NULL; + } if (pStreamScan->matchInfo.pList) { taosArrayDestroy(pStreamScan->matchInfo.pList); } @@ -4681,15 +4727,13 @@ _end: return code; } -static int32_t createStreamVtableBlock(SColMatchInfo *pMatchInfo, SSDataBlock **ppRes, const char *idstr) { +static SSDataBlock* createStreamVtableBlock(SColMatchInfo *pMatchInfo, const char *idstr) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; SSDataBlock *pRes = NULL; QUERY_CHECK_NULL(pMatchInfo, code, lino, _end, TSDB_CODE_INVALID_PARA); - *ppRes = NULL; - code = createDataBlock(&pRes); QUERY_CHECK_CODE(code, lino, _end); int32_t numOfOutput = taosArrayGetSize(pMatchInfo->pList); @@ -4703,18 +4747,16 @@ static int32_t createStreamVtableBlock(SColMatchInfo *pMatchInfo, SSDataBlock ** QUERY_CHECK_CODE(code, lino, _end); } - *ppRes = pRes; - pRes = NULL; - - _end: if (code != TSDB_CODE_SUCCESS) { qError("%s failed at line %d since %s, id: %s", __func__, lino, tstrerror(code), idstr); + if (pRes != NULL) { + blockDataDestroy(pRes); + } + pRes = NULL; + terrno = code; } - if (pRes != NULL) { - blockDataDestroy(pRes); - } - return code; + return pRes; } static int32_t createStreamNormalScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* pTableScanNode, @@ -4838,8 +4880,7 @@ static int32_t createStreamNormalScanOperatorInfo(SReadHandle* pHandle, STableSc if (pVtableInfos != NULL) { // save vtable info into tqReader for vtable source scan - SSDataBlock* pResBlock = NULL; - code = createStreamVtableBlock(&pInfo->matchInfo, &pResBlock, idstr); + SSDataBlock* pResBlock = createStreamVtableBlock(&pInfo->matchInfo, idstr); QUERY_CHECK_CODE(code, lino, _error); code = pAPI->tqReaderFn.tqReaderSetVtableInfo(pInfo->tqReader, pHandle->vnode, pAPI, pVtableInfos, &pResBlock, idstr); @@ -4962,8 +5003,8 @@ _error: taosArrayDestroy(pColIds); } - if (pInfo != NULL) { - STableScanInfo* p = (STableScanInfo*) pInfo->pTableScanOp->info; + if (pInfo != NULL && pInfo->pTableScanOp != NULL) { + STableScanInfo* p = (STableScanInfo*)pInfo->pTableScanOp->info; if (p != NULL) { p->base.pTableListInfo = NULL; } diff --git a/source/libs/executor/src/streamVtableMerge.c b/source/libs/executor/src/streamVtableMerge.c index 1626e8503c..4e0f464132 100644 --- a/source/libs/executor/src/streamVtableMerge.c +++ b/source/libs/executor/src/streamVtableMerge.c @@ -28,6 +28,7 @@ typedef struct SVMBufPageInfo { typedef struct SStreamVtableMergeSource { SDiskbasedBuf* pBuf; // buffer for storing data int32_t* pTotalPages; // total pages of all sources in the buffer + int32_t primaryTsIndex; SSDataBlock* pInputDataBlock; // data block to be written to the buffer int64_t currentExpireTimeMs; // expire time of the input data block @@ -44,12 +45,15 @@ typedef struct SStreamVtableMergeHandle { SDiskbasedBuf* pBuf; int32_t numOfPages; int32_t numPageLimit; + int32_t primaryTsIndex; + int64_t vuid; int32_t nSrcTbls; SHashObj* pSources; SSDataBlock* datablock; // Does not store data, only used to save the schema of input/output data blocks SMultiwayMergeTreeInfo* pMergeTree; + int32_t numEmptySources; int64_t globalLatestTs; } SStreamVtableMergeHandle; @@ -90,6 +94,9 @@ static int32_t svmSourceFlushInput(SStreamVtableMergeSource* pSource, const char // check data block size pBlock = pSource->pInputDataBlock; + if (blockDataGetNumOfRows(pBlock) == 0) { + goto _end; + } int32_t size = blockDataGetSize(pBlock) + sizeof(int32_t) + taosArrayGetSize(pBlock->pDataBlock) * sizeof(int32_t); QUERY_CHECK_CONDITION(size <= getBufPageSize(pSource->pBuf), code, lino, _end, TSDB_CODE_INTERNAL_ERROR); @@ -123,37 +130,36 @@ _end: static int32_t svmSourceAddBlock(SStreamVtableMergeSource* pSource, SSDataBlock* pDataBlock, const char* idstr) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; - int32_t pageSize = 0; - int32_t holdSize = 0; SSDataBlock* pInputDataBlock = NULL; QUERY_CHECK_NULL(pDataBlock, code, lino, _end, TSDB_CODE_INVALID_PARA); QUERY_CHECK_NULL(pSource, code, lino, _end, TSDB_CODE_INVALID_PARA); pInputDataBlock = pSource->pInputDataBlock; - if (pInputDataBlock == NULL) { - code = createOneDataBlock(pDataBlock, false, &pInputDataBlock); - QUERY_CHECK_CODE(code, lino, _end); - pSource->pInputDataBlock = pInputDataBlock; - } + QUERY_CHECK_CONDITION(taosArrayGetSize(pDataBlock->pDataBlock) >= taosArrayGetSize(pInputDataBlock->pDataBlock), code, + lino, _end, TSDB_CODE_INVALID_PARA); int32_t start = 0; int32_t nrows = blockDataGetNumOfRows(pDataBlock); + int32_t pageSize = + getBufPageSize(pSource->pBuf) - sizeof(int32_t) - taosArrayGetSize(pInputDataBlock->pDataBlock) * sizeof(int32_t); while (start < nrows) { int32_t holdSize = blockDataGetSize(pInputDataBlock); QUERY_CHECK_CONDITION(holdSize < pageSize, code, lino, _end, TSDB_CODE_INTERNAL_ERROR); - int32_t stop = 0; + int32_t stop = start; code = blockDataSplitRows(pDataBlock, pDataBlock->info.hasVarCol, start, &stop, pageSize - holdSize); - QUERY_CHECK_CODE(code, lino, _end); if (stop == start - 1) { // If pInputDataBlock cannot hold new rows, ignore the error and write pInputDataBlock to the buffer } else { + QUERY_CHECK_CODE(code, lino, _end); // append new rows to pInputDataBlock if (blockDataGetNumOfRows(pInputDataBlock) == 0) { // set expires time for the first block pSource->currentExpireTimeMs = taosGetTimestampMs() + tsStreamVirtualMergeMaxDelayMs; } int32_t numOfRows = stop - start + 1; + code = blockDataEnsureCapacity(pInputDataBlock, pInputDataBlock->info.rows + numOfRows); + QUERY_CHECK_CODE(code, lino, _end); code = blockDataMergeNRows(pInputDataBlock, pDataBlock, start, numOfRows); QUERY_CHECK_CODE(code, lino, _end); } @@ -176,6 +182,17 @@ _end: static bool svmSourceIsEmpty(SStreamVtableMergeSource* pSource) { return listNEles(pSource->pageInfoList) == 0; } +static int64_t svmSourceGetExpireTime(SStreamVtableMergeSource* pSource) { + SListNode* pn = tdListGetHead(pSource->pageInfoList); + if (pn != NULL) { + SVMBufPageInfo* pageInfo = (SVMBufPageInfo*)pn->data; + if (pageInfo != NULL) { + return pageInfo->expireTimeMs; + } + } + return INT64_MAX; +} + static int32_t svmSourceReadBuf(SStreamVtableMergeSource* pSource, const char* idstr) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; @@ -188,6 +205,11 @@ static int32_t svmSourceReadBuf(SStreamVtableMergeSource* pSource, const char* i QUERY_CHECK_NULL(pSource->pOutputDataBlock, code, lino, _end, TSDB_CODE_INVALID_PARA); blockDataCleanup(pSource->pOutputDataBlock); + int32_t numOfCols = taosArrayGetSize(pSource->pOutputDataBlock->pDataBlock); + for (int32_t i = 0; i < numOfCols; i++) { + SColumnInfoData* pCol = taosArrayGet(pSource->pOutputDataBlock->pDataBlock, i); + pCol->hasNull = true; + } pn = tdListGetHead(pSource->pageInfoList); QUERY_CHECK_NULL(pn, code, lino, _end, TSDB_CODE_INTERNAL_ERROR); @@ -215,21 +237,15 @@ static int32_t svmSourceCurrentTs(SStreamVtableMergeSource* pSource, const char* SColumnInfoData* tsCol = NULL; QUERY_CHECK_NULL(pSource, code, lino, _end, TSDB_CODE_INVALID_PARA); - QUERY_CHECK_CONDITION(!svmSourceIsEmpty(pSource), code, lino, _end, TSDB_CODE_INVALID_PARA); QUERY_CHECK_NULL(pSource->pOutputDataBlock, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_CONDITION(pSource->rowIndex >= 0 && pSource->rowIndex < blockDataGetNumOfRows(pSource->pOutputDataBlock), + code, lino, _end, TSDB_CODE_INVALID_PARA); - if (blockDataGetNumOfRows(pSource->pOutputDataBlock) == 0) { - code = svmSourceReadBuf(pSource, idstr); - QUERY_CHECK_CODE(code, lino, _end); - } - QUERY_CHECK_CONDITION(pSource->rowIndex < blockDataGetNumOfRows(pSource->pOutputDataBlock), code, lino, _end, - TSDB_CODE_INVALID_PARA); - - tsCol = taosArrayGet(pSource->pOutputDataBlock->pDataBlock, 0); + tsCol = taosArrayGet(pSource->pOutputDataBlock->pDataBlock, pSource->primaryTsIndex); QUERY_CHECK_NULL(tsCol, code, lino, _end, terrno); + QUERY_CHECK_CONDITION(tsCol->info.type == TSDB_DATA_TYPE_TIMESTAMP, code, lino, _end, TSDB_CODE_INVALID_PARA); *pTs = ((int64_t*)tsCol->pData)[pSource->rowIndex]; - pSource->latestTs = TMAX(*pTs, pSource->latestTs); _end: if (code != TSDB_CODE_SUCCESS) { @@ -238,55 +254,54 @@ _end: return code; } -static int32_t svmSourceMoveNext(SStreamVtableMergeSource* pSource, const char* idstr, SVM_NEXT_RESULT* pRes) { +static int32_t svmSourceMoveNext(SStreamVtableMergeSource* pSource, const char* idstr) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; SListNode* pn = NULL; void* page = NULL; - int64_t latestTs = 0; QUERY_CHECK_NULL(pSource, code, lino, _end, TSDB_CODE_INVALID_PARA); QUERY_CHECK_NULL(pSource->pOutputDataBlock, code, lino, _end, TSDB_CODE_INVALID_PARA); - *pRes = SVM_NEXT_NOT_READY; - latestTs = pSource->latestTs; - while (true) { - if (svmSourceIsEmpty(pSource)) { - pSource->rowIndex = 0; - break; + if (pSource->rowIndex >= 0) { + QUERY_CHECK_CONDITION(pSource->rowIndex < blockDataGetNumOfRows(pSource->pOutputDataBlock), code, lino, _end, + TSDB_CODE_INVALID_PARA); + pSource->rowIndex++; + if (pSource->rowIndex >= blockDataGetNumOfRows(pSource->pOutputDataBlock)) { + // Pop the page from the list and recycle it + pn = tdListPopHead(pSource->pageInfoList); + QUERY_CHECK_NULL(pn, code, lino, _end, TSDB_CODE_INTERNAL_ERROR); + QUERY_CHECK_NULL(pn->data, code, lino, _end, TSDB_CODE_INTERNAL_ERROR); + SVMBufPageInfo* pageInfo = (SVMBufPageInfo*)pn->data; + page = getBufPage(pSource->pBuf, pageInfo->pageId); + QUERY_CHECK_NULL(page, code, lino, _end, terrno); + code = dBufSetBufPageRecycled(pSource->pBuf, page); + QUERY_CHECK_CODE(code, lino, _end); + (*pSource->pTotalPages)--; + taosMemoryFreeClear(pn); + pSource->rowIndex = -1; + } } - QUERY_CHECK_CONDITION(pSource->rowIndex < blockDataGetNumOfRows(pSource->pOutputDataBlock), code, lino, _end, - TSDB_CODE_INVALID_PARA); - - pSource->rowIndex++; - if (pSource->rowIndex >= blockDataGetNumOfRows(pSource->pOutputDataBlock)) { - // Pop the page from the list and recycle it - pn = tdListPopHead(pSource->pageInfoList); - QUERY_CHECK_NULL(pn, code, lino, _end, TSDB_CODE_INTERNAL_ERROR); - QUERY_CHECK_NULL(pn->data, code, lino, _end, TSDB_CODE_INTERNAL_ERROR); - SVMBufPageInfo* pageInfo = (SVMBufPageInfo*)pn->data; - page = getBufPage(pSource->pBuf, pageInfo->pageId); - QUERY_CHECK_NULL(page, code, lino, _end, terrno); - code = dBufSetBufPageRecycled(pSource->pBuf, page); + if (pSource->rowIndex == -1) { + if (svmSourceIsEmpty(pSource)) { + break; + } + // Read the first page from the list + code = svmSourceReadBuf(pSource, idstr); QUERY_CHECK_CODE(code, lino, _end); - (*pSource->pTotalPages)--; - taosMemoryFreeClear(pn); pSource->rowIndex = 0; } - if (svmSourceIsEmpty(pSource)) { - pSource->rowIndex = 0; - break; - } - - int64_t ts = 0; - code = svmSourceCurrentTs(pSource, idstr, &ts); - QUERY_CHECK_CODE(code, lino, _end); - if (ts > latestTs && ts >= *pSource->pGlobalLatestTs) { - *pRes = SVM_NEXT_FOUND; - break; + // Check the timestamp of the current row + int64_t currentTs = INT64_MIN; + code = svmSourceCurrentTs(pSource, idstr, ¤tTs); + if (currentTs > pSource->latestTs) { + pSource->latestTs = currentTs; + if (currentTs >= *pSource->pGlobalLatestTs) { + break; + } } } @@ -306,6 +321,12 @@ static int32_t svmSourceCompare(const void* pLeft, const void* pRight, void* par SStreamVtableMergeSource* pLeftSource = *(SStreamVtableMergeSource**)taosArrayGet(pValidSources, left); SStreamVtableMergeSource* pRightSource = *(SStreamVtableMergeSource**)taosArrayGet(pValidSources, right); + if (svmSourceIsEmpty(pLeftSource)) { + return 1; + } else if (svmSourceIsEmpty(pRightSource)) { + return -1; + } + int64_t leftTs = 0; code = svmSourceCurrentTs(pLeftSource, "", &leftTs); if (code != TSDB_CODE_SUCCESS) { @@ -335,10 +356,14 @@ static SStreamVtableMergeSource* svmAddSource(SStreamVtableMergeHandle* pHandle, QUERY_CHECK_NULL(pSource, code, lino, _end, terrno); pSource->pBuf = pHandle->pBuf; pSource->pTotalPages = &pHandle->numOfPages; + pSource->primaryTsIndex = pHandle->primaryTsIndex; + code = createOneDataBlock(pHandle->datablock, false, &pSource->pInputDataBlock); + QUERY_CHECK_CODE(code, lino, _end); pSource->pageInfoList = tdListNew(sizeof(SVMBufPageInfo)); QUERY_CHECK_NULL(pSource->pageInfoList, code, lino, _end, terrno); code = createOneDataBlock(pHandle->datablock, false, &pSource->pOutputDataBlock); QUERY_CHECK_CODE(code, lino, _end); + pSource->rowIndex = -1; pSource->latestTs = INT64_MIN; pSource->pGlobalLatestTs = &pHandle->globalLatestTs; code = taosHashPut(pHandle->pSources, &uid, sizeof(uid), &pSource, POINTER_BYTES); @@ -387,14 +412,16 @@ static int32_t svmBuildTree(SStreamVtableMergeHandle* pHandle, SVM_NEXT_RESULT* pIter = taosHashIterate(pHandle->pSources, NULL); while (pIter != NULL) { SStreamVtableMergeSource* pSource = *(SStreamVtableMergeSource**)pIter; - if (svmSourceIsEmpty(pSource)) { - code = svmSourceFlushInput(pSource, idstr); + code = svmSourceFlushInput(pSource, idstr); + QUERY_CHECK_CODE(code, lino, _end); + if (pSource->rowIndex == -1) { + code = svmSourceMoveNext(pSource, idstr); QUERY_CHECK_CODE(code, lino, _end); } if (!svmSourceIsEmpty(pSource)) { px = taosArrayPush(pReadySources, &pSource); QUERY_CHECK_NULL(px, code, lino, _end, terrno); - globalExpireTimeMs = TMIN(globalExpireTimeMs, pSource->currentExpireTimeMs); + globalExpireTimeMs = TMIN(globalExpireTimeMs, svmSourceGetExpireTime(pSource)); } pIter = taosHashIterate(pHandle->pSources, pIter); } @@ -427,6 +454,7 @@ static int32_t svmBuildTree(SStreamVtableMergeHandle* pHandle, SVM_NEXT_RESULT* void* param = NULL; code = tMergeTreeCreate(&pHandle->pMergeTree, taosArrayGetSize(pReadySources), pReadySources, svmSourceCompare); QUERY_CHECK_CODE(code, lino, _end); + pHandle->numEmptySources = 0; pReadySources = NULL; *pRes = SVM_NEXT_FOUND; @@ -453,7 +481,7 @@ int32_t streamVtableMergeAddBlock(SStreamVtableMergeHandle* pHandle, SSDataBlock QUERY_CHECK_NULL(pHandle, code, lino, _end, TSDB_CODE_INVALID_PARA); QUERY_CHECK_NULL(pDataBlock, code, lino, _end, TSDB_CODE_INVALID_PARA); - pTbUid = pDataBlock->info.id.uid; + pTbUid = pDataBlock->info.id.groupId; px = taosHashGet(pHandle->pSources, &pTbUid, sizeof(int64_t)); if (px == NULL) { @@ -480,8 +508,31 @@ _end: return code; } -int32_t streamVtableMergeNextTuple(SStreamVtableMergeHandle* pHandle, SSDataBlock* pResBlock, SVM_NEXT_RESULT* pRes, - const char* idstr) { +int32_t streamVtableMergeCurrent(SStreamVtableMergeHandle* pHandle, SSDataBlock** ppDataBlock, int32_t* pRowIdx, + const char* idstr) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + + QUERY_CHECK_NULL(pHandle, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(pHandle->pMergeTree, code, lino, _end, TSDB_CODE_INVALID_PARA); + + int32_t idx = tMergeTreeGetChosenIndex(pHandle->pMergeTree); + SArray* pReadySources = pHandle->pMergeTree->param; + void* px = taosArrayGet(pReadySources, idx); + QUERY_CHECK_NULL(px, code, lino, _end, TSDB_CODE_INTERNAL_ERROR); + SStreamVtableMergeSource* pSource = *(SStreamVtableMergeSource**)px; + QUERY_CHECK_NULL(pSource, code, lino, _end, TSDB_CODE_INTERNAL_ERROR); + *ppDataBlock = pSource->pOutputDataBlock; + *pRowIdx = pSource->rowIndex; + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s, id: %s", __func__, lino, tstrerror(code), idstr); + } + return code; +} + +int32_t streamVtableMergeMoveNext(SStreamVtableMergeHandle* pHandle, SVM_NEXT_RESULT* pRes, const char* idstr) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; void* px = NULL; @@ -489,61 +540,74 @@ int32_t streamVtableMergeNextTuple(SStreamVtableMergeHandle* pHandle, SSDataBloc SStreamVtableMergeSource* pSource = NULL; QUERY_CHECK_NULL(pHandle, code, lino, _end, TSDB_CODE_INVALID_PARA); - QUERY_CHECK_NULL(pResBlock, code, lino, _end, TSDB_CODE_INVALID_PARA); QUERY_CHECK_NULL(pRes, code, lino, _end, TSDB_CODE_INVALID_PARA); *pRes = SVM_NEXT_NOT_READY; if (pHandle->pMergeTree == NULL) { - SVM_NEXT_RESULT buildRes = SVM_NEXT_NOT_READY; - code = svmBuildTree(pHandle, &buildRes, idstr); + code = svmBuildTree(pHandle, pRes, idstr); QUERY_CHECK_CODE(code, lino, _end); - if (buildRes == SVM_NEXT_NOT_READY) { - goto _end; - } + goto _end; } - QUERY_CHECK_NULL(pHandle->pMergeTree, code, lino, _end, TSDB_CODE_INTERNAL_ERROR); int32_t idx = tMergeTreeGetChosenIndex(pHandle->pMergeTree); pReadySources = pHandle->pMergeTree->param; px = taosArrayGet(pReadySources, idx); QUERY_CHECK_NULL(px, code, lino, _end, terrno); pSource = *(SStreamVtableMergeSource**)px; - code = blockCopyOneRow(pSource->pOutputDataBlock, pSource->rowIndex, &pResBlock); QUERY_CHECK_CODE(code, lino, _end); - *pRes = SVM_NEXT_FOUND; pHandle->globalLatestTs = TMAX(pSource->latestTs, pHandle->globalLatestTs); - SVM_NEXT_RESULT nextRes = SVM_NEXT_NOT_READY; - int32_t origNumOfPages = pHandle->numOfPages; - code = svmSourceMoveNext(pSource, idstr, &nextRes); + int32_t origNumOfPages = pHandle->numOfPages; + code = svmSourceMoveNext(pSource, idstr); QUERY_CHECK_CODE(code, lino, _end); - bool needDestroy = false; - if (nextRes == SVM_NEXT_NOT_READY) { - needDestroy = true; - } else if (taosArrayGetSize((SArray*)pHandle->pMergeTree->param) != pHandle->nSrcTbls && - pHandle->numOfPages != origNumOfPages) { - // The original data for this portion is incomplete. Its merge was forcibly triggered by certain conditions, so we - // must recheck if those conditions are still met. - if (tsStreamVirtualMergeWaitMode == STREAM_VIRTUAL_MERGE_MAX_DELAY) { - int64_t globalExpireTimeMs = INT64_MAX; - for (int32_t i = 0; i < taosArrayGetSize(pReadySources); ++i) { - px = taosArrayGet(pReadySources, i); - QUERY_CHECK_NULL(px, code, lino, _end, TSDB_CODE_INTERNAL_ERROR); - pSource = *(SStreamVtableMergeSource**)px; - globalExpireTimeMs = TMIN(globalExpireTimeMs, pSource->currentExpireTimeMs); - } - needDestroy = taosGetTimestampMs() < globalExpireTimeMs; - } else if (tsStreamVirtualMergeWaitMode == STREAM_VIRTUAL_MERGE_MAX_MEMORY) { - needDestroy = pHandle->numOfPages < pHandle->numPageLimit; - } else { - code = TSDB_CODE_INTERNAL_ERROR; - QUERY_CHECK_CODE(code, lino, _end); - } + if (svmSourceIsEmpty(pSource)) { + ++pHandle->numEmptySources; } + bool needDestroy = false; + if (pHandle->numEmptySources == taosArrayGetSize(pReadySources)) { + // all sources are empty + needDestroy = true; + } else { + code = tMergeTreeAdjust(pHandle->pMergeTree, tMergeTreeGetAdjustIndex(pHandle->pMergeTree)); + QUERY_CHECK_CODE(code, lino, _end); + if (pHandle->numEmptySources > 0) { + if (tsStreamVirtualMergeWaitMode == STREAM_VIRTUAL_MERGE_WAIT_FOREVER) { + idx = tMergeTreeGetChosenIndex(pHandle->pMergeTree); + px = taosArrayGet(pReadySources, idx); + QUERY_CHECK_NULL(px, code, lino, _end, TSDB_CODE_INTERNAL_ERROR); + pSource = *(SStreamVtableMergeSource**)px; + QUERY_CHECK_NULL(pSource, code, lino, _end, TSDB_CODE_INTERNAL_ERROR); + int64_t currentTs = INT64_MIN; + code = svmSourceCurrentTs(pSource, idstr, ¤tTs); + QUERY_CHECK_CODE(code, lino, _end); + needDestroy = currentTs > pHandle->globalLatestTs; + } else if (pHandle->numOfPages != origNumOfPages) { + // The original data for this portion is incomplete. Its merge was forcibly triggered by certain conditions, so + // we must recheck if those conditions are still met. + if (tsStreamVirtualMergeWaitMode == STREAM_VIRTUAL_MERGE_MAX_DELAY) { + int64_t globalExpireTimeMs = INT64_MAX; + for (int32_t i = 0; i < taosArrayGetSize(pReadySources); ++i) { + px = taosArrayGet(pReadySources, i); + QUERY_CHECK_NULL(px, code, lino, _end, TSDB_CODE_INTERNAL_ERROR); + pSource = *(SStreamVtableMergeSource**)px; + globalExpireTimeMs = TMIN(globalExpireTimeMs, svmSourceGetExpireTime(pSource)); + } + needDestroy = taosGetTimestampMs() < globalExpireTimeMs; + } else if (tsStreamVirtualMergeWaitMode == STREAM_VIRTUAL_MERGE_MAX_MEMORY) { + needDestroy = pHandle->numOfPages < pHandle->numPageLimit; + } else { + code = TSDB_CODE_INTERNAL_ERROR; + QUERY_CHECK_CODE(code, lino, _end); + } + } + } + } if (needDestroy) { svmDestroyTree(&pHandle->pMergeTree); + } else { + *pRes = SVM_NEXT_FOUND; } _end: @@ -553,8 +617,9 @@ _end: return code; } -int32_t streamVtableMergeCreateHandle(SStreamVtableMergeHandle** ppHandle, int32_t nSrcTbls, int32_t numPageLimit, - SDiskbasedBuf* pBuf, SSDataBlock* pResBlock, const char* idstr) { +int32_t streamVtableMergeCreateHandle(SStreamVtableMergeHandle** ppHandle, int64_t vuid, int32_t nSrcTbls, + int32_t numPageLimit, int32_t primaryTsIndex, SDiskbasedBuf* pBuf, + SSDataBlock* pResBlock, const char* idstr) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; SStreamVtableMergeHandle* pHandle = NULL; @@ -569,6 +634,8 @@ int32_t streamVtableMergeCreateHandle(SStreamVtableMergeHandle** ppHandle, int32 pHandle->pBuf = pBuf; pHandle->numPageLimit = numPageLimit; + pHandle->primaryTsIndex = primaryTsIndex; + pHandle->vuid = vuid; pHandle->nSrcTbls = nSrcTbls; pHandle->pSources = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); QUERY_CHECK_NULL(pHandle->pSources, code, lino, _end, terrno); @@ -590,7 +657,8 @@ _end: return code; } -void streamVtableMergeDestroyHandle(SStreamVtableMergeHandle** ppHandle) { +void streamVtableMergeDestroyHandle(void* ptr) { + SStreamVtableMergeHandle** ppHandle = ptr; if (ppHandle == NULL || *ppHandle == NULL) { return; } @@ -600,8 +668,16 @@ void streamVtableMergeDestroyHandle(SStreamVtableMergeHandle** ppHandle) { taosHashCleanup(pHandle->pSources); pHandle->pSources = NULL; } - + blockDataDestroy(pHandle->datablock); svmDestroyTree(&pHandle->pMergeTree); taosMemoryFreeClear(*ppHandle); } + +int64_t streamVtableMergeHandleGetVuid(SStreamVtableMergeHandle* pHandle) { + if (pHandle != NULL) { + return pHandle->vuid; + } else { + return 0; + } +} diff --git a/source/libs/executor/src/streamclient.c b/source/libs/executor/src/streamclient.c index 920dd57e7d..29f048ba99 100644 --- a/source/libs/executor/src/streamclient.c +++ b/source/libs/executor/src/streamclient.c @@ -98,7 +98,7 @@ static int32_t doProcessSql(SStreamRecParam* pParam, SJson** ppJsonResult) { curlRes = curl_easy_setopt(pCurl, CURLOPT_POSTFIELDS, pParam->pSql); QUERY_CHECK_CONDITION(curlRes == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED); - qTrace("===stream=== sql:%s", pParam->pSql); + qDebug("===stream=== sql:%s", pParam->pSql); curlRes = curl_easy_setopt(pCurl, CURLOPT_FOLLOWLOCATION, 1L); QUERY_CHECK_CONDITION(curlRes == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED); @@ -110,7 +110,11 @@ static int32_t doProcessSql(SStreamRecParam* pParam, SJson** ppJsonResult) { QUERY_CHECK_CONDITION(curlRes == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED); curlRes = curl_easy_perform(pCurl); - QUERY_CHECK_CONDITION(curlRes == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED); + if (curlRes != CURLE_OK) { + qError("error: unable to request data from %s.since %s. res code:%d", pParam->pUrl, curl_easy_strerror(curlRes), + (int32_t)curlRes); + QUERY_CHECK_CONDITION(curlRes == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED); + } _end: if (pHeaders != NULL) { @@ -222,8 +226,16 @@ static int32_t jsonToDataCell(const SJson* pJson, SResultCellData* pCell) { return code; } +static int32_t getColumnIndex(SSHashObj* pMap, int32_t colId) { + void* pVal = tSimpleHashGet(pMap, &colId, sizeof(int32_t)); + if (pVal == NULL) { + return -1; + } + return *(int32_t*)pVal; +} + static int32_t doTransformFillResult(const SJson* pJsonResult, SArray* pRangeRes, void* pEmptyRow, int32_t size, - int32_t* pOffsetInfo, int32_t numOfCols) { + int32_t* pOffsetInfo, int32_t numOfCols, SSHashObj* pMap) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; @@ -239,13 +251,21 @@ static int32_t doTransformFillResult(const SJson* pJsonResult, SArray* pRangeRes SSliceRowData* pRowData = taosMemoryCalloc(1, sizeof(TSKEY) + size); pRowData->key = INT64_MIN; memcpy(pRowData->pRowVal, pEmptyRow, size); - for (int32_t j = 0; j < cols && j < numOfCols; ++j) { - SJson* pJsonCell = tjsonGetArrayItem(pRow, j); - QUERY_CHECK_NULL(pJsonCell, code, lino, _end, TSDB_CODE_FAILED); - + int32_t colOffset = 0; + for (int32_t j = 0; j < numOfCols; ++j) { SResultCellData* pDataCell = getSliceResultCell((SResultCellData*)pRowData->pRowVal, j, pOffsetInfo); QUERY_CHECK_NULL(pDataCell, code, lino, _end, TSDB_CODE_FAILED); + int32_t colIndex = getColumnIndex(pMap, j); + if (colIndex == -1 || colIndex >= cols) { + qDebug("invalid result columm index:%d", colIndex); + pDataCell->isNull = true; + continue; + } + + SJson* pJsonCell = tjsonGetArrayItem(pRow, colIndex); + QUERY_CHECK_NULL(pJsonCell, code, lino, _end, TSDB_CODE_FAILED); + code = jsonToDataCell(pJsonCell, pDataCell); QUERY_CHECK_CODE(code, lino, _end); } @@ -278,7 +298,7 @@ int32_t streamClientGetFillRange(SStreamRecParam* pParam, SWinKey* pKey, SArray* SJson* pJsRes = NULL; code = doProcessSql(pParam, &pJsRes); QUERY_CHECK_CODE(code, lino, _end); - code = doTransformFillResult(pJsRes, pRangeRes, pEmptyRow, size, pOffsetInfo, numOfCols); + code = doTransformFillResult(pJsRes, pRangeRes, pEmptyRow, size, pOffsetInfo, numOfCols, pParam->pColIdMap); QUERY_CHECK_CODE(code, lino, _end); _end: @@ -288,6 +308,33 @@ _end: return code; } +int32_t streamClientCheckCfg(SStreamRecParam* pParam) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + + const char* pTestSql = "select name, ntables, status from information_schema.ins_databases;"; + (void)memset(pParam->pSql, 0, pParam->sqlCapcity); + tstrncpy(pParam->pSql, pTestSql, pParam->sqlCapcity); + + SJson* pJsRes = NULL; + code = doProcessSql(pParam, &pJsRes); + QUERY_CHECK_CODE(code, lino, _end); + SJson* jArray = tjsonGetObjectItem(pJsRes, "data"); + QUERY_CHECK_NULL(jArray, code, lino, _end, TSDB_CODE_FAILED); + + int32_t rows = tjsonGetArraySize(jArray); + if (rows < 2) { + code = TSDB_CODE_INVALID_CFG_VALUE; + qError("invalid taos adapter config value"); + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + #else int32_t streamClientGetResultRange(SStreamRecParam* pParam, SSHashObj* pRangeMap, SArray* pRangeRes) { @@ -297,4 +344,8 @@ int32_t streamClientGetFillRange(SStreamRecParam* pParam, SWinKey* pKey, SArray* return TSDB_CODE_FAILED; } +int32_t streamClientCheckCfg(SStreamRecParam* pParam) { + return TSDB_CODE_FAILED; +} + #endif \ No newline at end of file diff --git a/source/libs/executor/src/streamfillnonblockoperator.c b/source/libs/executor/src/streamfillnonblockoperator.c index e9f33a5110..53c20c41ae 100644 --- a/source/libs/executor/src/streamfillnonblockoperator.c +++ b/source/libs/executor/src/streamfillnonblockoperator.c @@ -182,6 +182,20 @@ void doBuildNonblockFillResult(SOperatorInfo* pOperator, SStreamFillSupporter* p } } + if (pBlock->info.rows > 0) { + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + void* tbname = NULL; + int32_t winCode = TSDB_CODE_SUCCESS; + code = pInfo->stateStore.streamStateGetParName(pTaskInfo->streamInfo.pState, pBlock->info.id.groupId, &tbname, + false, &winCode); + QUERY_CHECK_CODE(code, lino, _end); + if (winCode != TSDB_CODE_SUCCESS) { + pBlock->info.parTbName[0] = 0; + } else { + memcpy(pBlock->info.parTbName, tbname, TSDB_TABLE_NAME_LEN); + } + } + _end: if (code != TSDB_CODE_SUCCESS) { qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); @@ -402,8 +416,6 @@ int32_t doStreamNonblockFillNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) case STREAM_INVALID: { code = doApplyStreamScalarCalculation(pOperator, pBlock, pInfo->pSrcBlock); QUERY_CHECK_CODE(code, lino, _end); - - memcpy(pInfo->pSrcBlock->info.parTbName, pBlock->info.parTbName, TSDB_TABLE_NAME_LEN); pInfo->srcRowIndex = -1; } break; case STREAM_CHECKPOINT: { @@ -476,4 +488,39 @@ void destroyStreamNonblockFillOperatorInfo(void* param) { SStreamFillOperatorInfo* pInfo = (SStreamFillOperatorInfo*)param; resetTimeSlicePrevAndNextWindow(pInfo->pFillSup); destroyStreamFillOperatorInfo(param); -} \ No newline at end of file +} + +static int32_t doInitStreamColumnMapInfo(SExprSupp* pExprSup, SSHashObj* pColMap) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + + for (int32_t i = 0; i < pExprSup->numOfExprs; ++i) { + SExprInfo* pOneExpr = &pExprSup->pExprInfo[i]; + int32_t destSlotId = pOneExpr->base.resSchema.slotId; + for (int32_t j = 0; j < pOneExpr->base.numOfParams; ++j) { + SFunctParam* pFuncParam = &pOneExpr->base.pParam[j]; + if (pFuncParam->type == FUNC_PARAM_TYPE_COLUMN) { + int32_t sourceSlotId = pFuncParam->pCol->slotId; + code = tSimpleHashPut(pColMap, &sourceSlotId, sizeof(int32_t), &destSlotId, sizeof(int32_t)); + QUERY_CHECK_CODE(code, lino, _end); + } + } + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s.", __func__, lino, tstrerror(code)); + } + return code; +} + +int32_t initStreamFillOperatorColumnMapInfo(SExprSupp* pExprSup, SOperatorInfo* pOperator) { + if (pOperator != NULL && pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL) { + SStreamFillOperatorInfo* pInfo = (SStreamFillOperatorInfo*)pOperator->info; + if (pInfo->nbSup.recParam.pColIdMap == NULL) { + return TSDB_CODE_SUCCESS; + } + return doInitStreamColumnMapInfo(pExprSup, pInfo->nbSup.recParam.pColIdMap); + } + return TSDB_CODE_SUCCESS; +} diff --git a/source/libs/executor/src/streamfilloperator.c b/source/libs/executor/src/streamfilloperator.c index 847da75619..ea8a5176c8 100644 --- a/source/libs/executor/src/streamfilloperator.c +++ b/source/libs/executor/src/streamfilloperator.c @@ -22,6 +22,7 @@ #include "executorInt.h" #include "streamexecutorInt.h" +#include "streamsession.h" #include "streaminterval.h" #include "tcommon.h" #include "thash.h" @@ -1746,7 +1747,8 @@ static void setValueForFillInfo(SStreamFillSupporter* pFillSup, SStreamFillInfo* } } -int32_t getDownStreamInfo(SOperatorInfo* downstream, int8_t* triggerType, SInterval* pInterval, int16_t* pOperatorFlag) { +int32_t getDownStreamInfo(SOperatorInfo* downstream, int8_t* triggerType, SInterval* pInterval, + int16_t* pOperatorFlag) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; if (IS_NORMAL_INTERVAL_OP(downstream)) { @@ -1754,17 +1756,16 @@ int32_t getDownStreamInfo(SOperatorInfo* downstream, int8_t* triggerType, SInter *triggerType = pInfo->twAggSup.calTrigger; *pInterval = pInfo->interval; *pOperatorFlag = pInfo->basic.operatorFlag; - } else if (IS_CONTINUE_INTERVAL_OP(downstream)) { + } else { SStreamIntervalSliceOperatorInfo* pInfo = downstream->info; *triggerType = pInfo->twAggSup.calTrigger; *pInterval = pInfo->interval; pInfo->hasFill = true; *pOperatorFlag = pInfo->basic.operatorFlag; - } else { - code = TSDB_CODE_STREAM_INTERNAL_ERROR; } + QUERY_CHECK_CODE(code, lino, _end); - + _end: if (code != TSDB_CODE_SUCCESS) { qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); @@ -1891,6 +1892,10 @@ int32_t createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFillPhysi initNonBlockAggSupptor(&pInfo->nbSup, &pInfo->pFillSup->interval, downstream); code = initStreamBasicInfo(&pInfo->basic, pOperator); QUERY_CHECK_CODE(code, lino, _error); + + code = streamClientCheckCfg(&pInfo->nbSup.recParam); + QUERY_CHECK_CODE(code, lino, _error); + pInfo->basic.operatorFlag = opFlag; if (isFinalOperator(&pInfo->basic)) { pInfo->nbSup.numOfKeep++; diff --git a/source/libs/executor/src/streamintervalnonblockoperator.c b/source/libs/executor/src/streamintervalnonblockoperator.c index 26693eecac..580825eb19 100644 --- a/source/libs/executor/src/streamintervalnonblockoperator.c +++ b/source/libs/executor/src/streamintervalnonblockoperator.c @@ -223,8 +223,10 @@ int32_t doStreamIntervalNonblockAggImpl(SOperatorInfo* pOperator, SSDataBlock* p code = pInfo->streamAggSup.stateStore.streamStateGetAllPrev(pInfo->streamAggSup.pState, &curKey, pInfo->pUpdated, pInfo->nbSup.numOfKeep); QUERY_CHECK_CODE(code, lino, _end); - code = checkAndSaveWinStateToDisc(startIndex, pInfo->pUpdated, 0, pInfo->basic.pTsDataState, &pInfo->streamAggSup, &pInfo->interval); - QUERY_CHECK_CODE(code, lino, _end); + if (!isRecalculateOperator(&pInfo->basic)) { + code = checkAndSaveWinStateToDisc(startIndex, pInfo->pUpdated, 0, pInfo->basic.pTsDataState, &pInfo->streamAggSup, &pInfo->interval); + QUERY_CHECK_CODE(code, lino, _end); + } } } @@ -704,11 +706,14 @@ int32_t doStreamIntervalNonblockAggNext(SOperatorInfo* pOperator, SSDataBlock** if (pBlock == NULL) { qDebug("===stream===%s return data:%s. rev rows:%d", GET_TASKID(pTaskInfo), getStreamOpName(pOperator->operatorType), pInfo->basic.numOfRecv); - if (isFinalOperator(&pInfo->basic) && isRecalculateOperator(&pInfo->basic)) { - code = pAggSup->stateStore.streamStateFlushReaminInfoToDisk(pInfo->basic.pTsDataState); - QUERY_CHECK_CODE(code, lino, _end); - code = buildRetriveRequest(pTaskInfo, pAggSup, pInfo->basic.pTsDataState, &pInfo->nbSup); - QUERY_CHECK_CODE(code, lino, _end); + if (isFinalOperator(&pInfo->basic)) { + if (isRecalculateOperator(&pInfo->basic)) { + code = buildRetriveRequest(pTaskInfo, pAggSup, pInfo->basic.pTsDataState, &pInfo->nbSup); + QUERY_CHECK_CODE(code, lino, _end); + } else { + code = pAggSup->stateStore.streamStateFlushReaminInfoToDisk(pInfo->basic.pTsDataState); + QUERY_CHECK_CODE(code, lino, _end); + } } pOperator->status = OP_RES_TO_RETURN; break; @@ -820,7 +825,7 @@ int32_t doStreamIntervalNonblockAggNext(SOperatorInfo* pOperator, SSDataBlock** code = closeNonblockIntervalWindow(pAggSup->pResultRows, &pInfo->twAggSup, &pInfo->interval, pInfo->pUpdated, pTaskInfo); QUERY_CHECK_CODE(code, lino, _end); - if (!isHistoryOperator(&pInfo->basic)) { + if (!isHistoryOperator(&pInfo->basic) && !isRecalculateOperator(&pInfo->basic)) { code = checkAndSaveWinStateToDisc(0, pInfo->pUpdated, 0, pInfo->basic.pTsDataState, &pInfo->streamAggSup, &pInfo->interval); QUERY_CHECK_CODE(code, lino, _end); } @@ -1052,7 +1057,7 @@ static int32_t doStreamFinalntervalNonblockAggImpl(SOperatorInfo* pOperator, SSD QUERY_CHECK_CODE(code, lino, _end); } - if (!isHistoryOperator(&pInfo->basic)) { + if (!isHistoryOperator(&pInfo->basic) && !isRecalculateOperator(&pInfo->basic)) { code = checkAndSaveWinStateToDisc(0, pInfo->pUpdated, 0, pInfo->basic.pTsDataState, &pInfo->streamAggSup, &pInfo->interval); QUERY_CHECK_CODE(code, lino, _end); } diff --git a/source/libs/executor/src/streamintervalsliceoperator.c b/source/libs/executor/src/streamintervalsliceoperator.c index 60c7ea867d..acbd3a3c76 100644 --- a/source/libs/executor/src/streamintervalsliceoperator.c +++ b/source/libs/executor/src/streamintervalsliceoperator.c @@ -753,18 +753,20 @@ int32_t createStreamIntervalSliceOperatorInfo(SOperatorInfo* downstream, SPhysiN pInfo->hasInterpoFunc = windowinterpNeeded(pExpSup->pCtx, numOfExprs); initNonBlockAggSupptor(&pInfo->nbSup, &pInfo->interval, NULL); - setOperatorInfo(pOperator, "StreamIntervalSliceOperator", pPhyNode->type, true, OP_NOT_OPENED, pInfo, pTaskInfo); + setOperatorInfo(pOperator, "StreamIntervalSliceOperator", nodeType(pPhyNode), true, OP_NOT_OPENED, pInfo, pTaskInfo); code = initStreamBasicInfo(&pInfo->basic, pOperator); QUERY_CHECK_CODE(code, lino, _error); if (pIntervalPhyNode->window.triggerType == STREAM_TRIGGER_CONTINUOUS_WINDOW_CLOSE) { + qDebug("create continuous interval operator. op type:%d, task type:%d, task id:%s", nodeType(pPhyNode), + pHandle->fillHistory, GET_TASKID(pTaskInfo)); if (pHandle->fillHistory == STREAM_HISTORY_OPERATOR) { setFillHistoryOperatorFlag(&pInfo->basic); } else if (pHandle->fillHistory == STREAM_RECALCUL_OPERATOR) { setRecalculateOperatorFlag(&pInfo->basic); } pInfo->nbSup.pWindowAggFn = doStreamIntervalNonblockAggImpl; - if (pPhyNode->type == QUERY_NODE_PHYSICAL_PLAN_STREAM_CONTINUE_INTERVAL) { + if (nodeType(pPhyNode) == QUERY_NODE_PHYSICAL_PLAN_STREAM_CONTINUE_INTERVAL) { setSingleOperatorFlag(&pInfo->basic); } pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doStreamIntervalNonblockAggNext, NULL, diff --git a/source/libs/executor/src/streamscanoperator.c b/source/libs/executor/src/streamscanoperator.c index 9566b35bab..80d77e7f3c 100644 --- a/source/libs/executor/src/streamscanoperator.c +++ b/source/libs/executor/src/streamscanoperator.c @@ -83,7 +83,8 @@ int32_t copyRecDataToBuff(TSKEY calStart, TSKEY calEnd, uint64_t uid, uint64_t v return pkLen + sizeof(SRecDataInfo); } -int32_t saveRecalculateData(SStateStore* pStateStore, STableTsDataState* pTsDataState, SSDataBlock* pSrcBlock, EStreamType mode) { +int32_t saveRecalculateData(SStateStore* pStateStore, STableTsDataState* pTsDataState, SSDataBlock* pSrcBlock, + EStreamType mode) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; @@ -92,8 +93,10 @@ int32_t saveRecalculateData(SStateStore* pStateStore, STableTsDataState* pTsData } SColumnInfoData* pSrcStartTsCol = (SColumnInfoData*)taosArrayGet(pSrcBlock->pDataBlock, START_TS_COLUMN_INDEX); SColumnInfoData* pSrcEndTsCol = (SColumnInfoData*)taosArrayGet(pSrcBlock->pDataBlock, END_TS_COLUMN_INDEX); - SColumnInfoData* pSrcCalStartTsCol = (SColumnInfoData*)taosArrayGet(pSrcBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX); - SColumnInfoData* pSrcCalEndTsCol = (SColumnInfoData*)taosArrayGet(pSrcBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX); + SColumnInfoData* pSrcCalStartTsCol = + (SColumnInfoData*)taosArrayGet(pSrcBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX); + SColumnInfoData* pSrcCalEndTsCol = + (SColumnInfoData*)taosArrayGet(pSrcBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX); SColumnInfoData* pSrcUidCol = taosArrayGet(pSrcBlock->pDataBlock, UID_COLUMN_INDEX); SColumnInfoData* pSrcGpCol = taosArrayGet(pSrcBlock->pDataBlock, GROUPID_COLUMN_INDEX); TSKEY* srcStartTsCol = (TSKEY*)pSrcStartTsCol->pData; @@ -113,9 +116,10 @@ int32_t saveRecalculateData(SStateStore* pStateStore, STableTsDataState* pTsData calStart = srcStartTsCol[i]; calEnd = srcEndTsCol[i]; } - int32_t len = copyRecDataToBuff(calStart, calEnd, srcUidData[i], pSrcBlock->info.version, mode, NULL, 0, - pTsDataState->pRecValueBuff, pTsDataState->recValueLen); - code = pStateStore->streamStateSessionSaveToDisk(pTsDataState, &key, pTsDataState->pRecValueBuff, len); + int32_t len = copyRecDataToBuff(calStart, calEnd, srcUidData[i], pSrcBlock->info.version, mode, NULL, 0, + pTsDataState->pRecValueBuff, pTsDataState->recValueLen); + code = pStateStore->streamStateMergeAndSaveScanRange(pTsDataState, &key.win, key.groupId, + pTsDataState->pRecValueBuff, len); QUERY_CHECK_CODE(code, lino, _end); } @@ -332,11 +336,6 @@ static int32_t doStreamBlockScan(SOperatorInfo* pOperator, SSDataBlock** ppRes) case STREAM_CHECKPOINT: { qError("stream check point error. msg type: STREAM_INPUT__DATA_BLOCK"); } break; - case STREAM_RETRIEVE: { - code = saveRecalculateData(&pInfo->stateStore, pInfo->basic.pTsDataState, pBlock, STREAM_RETRIEVE); - QUERY_CHECK_CODE(code, lino, _end); - continue; - } break; case STREAM_RECALCULATE_START: { if (!isSemiOperator(&pInfo->basic)) { code = pInfo->stateStore.streamStateFlushReaminInfoToDisk(pInfo->basic.pTsDataState); @@ -388,7 +387,7 @@ static int32_t buildAndSaveRecalculateData(SSDataBlock* pSrcBlock, TSKEY* pTsCol len = copyRecDataToBuff(pTsCol[rowId], pTsCol[rowId], pSrcBlock->info.id.uid, pSrcBlock->info.version, STREAM_CLEAR, NULL, 0, pTsDataState->pRecValueBuff, pTsDataState->recValueLen); SSessionKey key = {.win.skey = pTsCol[rowId], .win.ekey = pTsCol[rowId], .groupId = 0}; - code = pStateStore->streamStateSessionSaveToDisk(pTsDataState, &key, pTsDataState->pRecValueBuff, len); + code = pStateStore->streamState1SessionSaveToDisk(pTsDataState, &key, pTsDataState->pRecValueBuff, len); QUERY_CHECK_CODE(code, lino, _end); uint64_t gpId = 0; code = appendPkToSpecialBlock(pDestBlock, pTsCol, pPkColDataInfo, rowId, &pSrcBlock->info.id.uid, &gpId, NULL); @@ -399,7 +398,7 @@ static int32_t buildAndSaveRecalculateData(SSDataBlock* pSrcBlock, TSKEY* pTsCol len = copyRecDataToBuff(pTsCol[rowId], pTsCol[rowId], pSrcBlock->info.id.uid, pSrcBlock->info.version, STREAM_DELETE_DATA, NULL, 0, pTsDataState->pRecValueBuff, pTsDataState->recValueLen); - code = pStateStore->streamStateSessionSaveToDisk(pTsDataState, &key, pTsDataState->pRecValueBuff, len); + code = pStateStore->streamState1SessionSaveToDisk(pTsDataState, &key, pTsDataState->pRecValueBuff, len); QUERY_CHECK_CODE(code, lino, _end); code = appendPkToSpecialBlock(pDestBlock, pTsCol, pPkColDataInfo, rowId, &pSrcBlock->info.id.uid, &gpId, NULL); @@ -1252,6 +1251,7 @@ static int32_t doDataRangeScan(SStreamScanInfo* pInfo, SExecTaskInfo* pTaskInfo, if (pInfo->pCreateTbRes->info.rows > 0) { (*ppRes) = pInfo->pCreateTbRes; pInfo->scanMode = STREAM_SCAN_FROM_RES; + break; } (*ppRes) = pTsdbBlock; break; @@ -1341,6 +1341,7 @@ static int32_t doStreamRecalculateDataScan(SOperatorInfo* pOperator, SSDataBlock (*ppRes) = pInfo->pRangeScanRes; pInfo->pRangeScanRes = NULL; pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; + printDataBlock((*ppRes), "stream tsdb scan", GET_TASKID(pTaskInfo)); goto _end; } break; case STREAM_SCAN_FROM_CREATE_TABLERES: { @@ -1513,6 +1514,11 @@ _end: return code; } +static void destroyStreamRecalculateParam(SStreamRecParam* pParam) { + tSimpleHashCleanup(pParam->pColIdMap); + pParam->pColIdMap = NULL; +} + static void destroyStreamDataScanOperatorInfo(void* param) { if (param == NULL) { return; @@ -1562,6 +1568,8 @@ static void destroyStreamDataScanOperatorInfo(void* param) { taosArrayDestroy(pStreamScan->pRecRangeRes); pStreamScan->pRecRangeRes = NULL; + destroyStreamRecalculateParam(&pStreamScan->recParam); + taosMemoryFree(pStreamScan); } @@ -1633,6 +1641,9 @@ static void initStreamRecalculateParam(STableScanPhysiNode* pTableScanNode, SStr pParam->sqlCapcity = tListLen(pParam->pSql); (void)tsnprintf(pParam->pUrl, tListLen(pParam->pUrl), "http://%s:%d/rest/sql", tsAdapterFqdn, tsAdapterPort); (void)tsnprintf(pParam->pAuth, tListLen(pParam->pAuth), "Authorization: Basic %s", tsAdapterToken); + + _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); + pParam->pColIdMap = tSimpleHashInit(32, hashFn); } int32_t createStreamDataScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* pTableScanNode, SNode* pTagCond, diff --git a/source/libs/executor/src/streamtimesliceoperator.c b/source/libs/executor/src/streamtimesliceoperator.c index 1b454f63ba..e1e13f8aed 100644 --- a/source/libs/executor/src/streamtimesliceoperator.c +++ b/source/libs/executor/src/streamtimesliceoperator.c @@ -456,7 +456,10 @@ static int32_t fillPointResult(SStreamFillSupporter* pFillSup, SResultRowData* p qError("%s failed at line %d since fill errror", __func__, __LINE__); } } else { - int32_t srcSlot = pFillCol->pExpr->base.pParam[0].pCol->slotId; + int32_t srcSlot = pFillCol->pExpr->base.pParam[0].pCol->slotId; + if (pFillSup->normalFill) { + srcSlot = dstSlotId; + } SResultCellData* pCell = NULL; if (IS_FILL_CONST_VALUE(pFillSup->type) && (isGroupKeyFunc(pFillCol->pExpr) || isSelectGroupConstValueFunc(pFillCol->pExpr))) { @@ -532,7 +535,10 @@ static void fillLinearRange(SStreamFillSupporter* pFillSup, SStreamFillInfo* pFi qError("%s failed at line %d since fill errror", __func__, lino); } } else if (isInterpFunc(pFillCol->pExpr) || pFillSup->normalFill) { - int32_t srcSlot = pFillCol->pExpr->base.pParam[0].pCol->slotId; + int32_t srcSlot = pFillCol->pExpr->base.pParam[0].pCol->slotId; + if (pFillSup->normalFill) { + srcSlot = dstSlotId; + } SResultCellData* pCell = getSliceResultCell(pFillInfo->pResRow->pRowVal, srcSlot, pFillSup->pOffsetInfo); if (IS_VAR_DATA_TYPE(type) || type == TSDB_DATA_TYPE_BOOL || pCell->isNull) { colDataSetNULL(pDstCol, index); diff --git a/source/libs/executor/src/virtualtablescanoperator.c b/source/libs/executor/src/virtualtablescanoperator.c index 2838d107af..960cff5f17 100644 --- a/source/libs/executor/src/virtualtablescanoperator.c +++ b/source/libs/executor/src/virtualtablescanoperator.c @@ -680,499 +680,305 @@ int32_t createVirtualTableMergeOperatorInfo(SOperatorInfo** pDownstream, SReadHa STableListInfo* pTableListInfo, int32_t numOfDownstream, SVirtualScanPhysiNode* pVirtualScanPhyNode, SExecTaskInfo* pTaskInfo, SOperatorInfo** pOptrInfo) { - SPhysiNode* pPhyNode = (SPhysiNode*)pVirtualScanPhyNode; - int32_t lino = 0; - int32_t code = TSDB_CODE_SUCCESS; - SVirtualScanMergeOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SVirtualScanMergeOperatorInfo)); - SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); - SDataBlockDescNode* pDescNode = pPhyNode->pOutputDataBlockDesc; - SNodeList* pMergeKeys = NULL; + SPhysiNode* pPhyNode = (SPhysiNode*)pVirtualScanPhyNode; + int32_t lino = 0; + int32_t code = TSDB_CODE_SUCCESS; + SVirtualScanMergeOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SVirtualScanMergeOperatorInfo)); + SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); + SDataBlockDescNode* pDescNode = pPhyNode->pOutputDataBlockDesc; + SNodeList* pMergeKeys = NULL; - QUERY_CHECK_NULL(pInfo, code, lino, _return, terrno); - QUERY_CHECK_NULL(pOperator, code, lino, _return, terrno); + QUERY_CHECK_NULL(pInfo, code, lino, _return, terrno); + QUERY_CHECK_NULL(pOperator, code, lino, _return, terrno); - pInfo->binfo.inputTsOrder = pVirtualScanPhyNode->scan.node.inputTsOrder; - pInfo->binfo.outputTsOrder = pVirtualScanPhyNode->scan.node.outputTsOrder; + pInfo->binfo.inputTsOrder = pVirtualScanPhyNode->scan.node.inputTsOrder; + pInfo->binfo.outputTsOrder = pVirtualScanPhyNode->scan.node.outputTsOrder; - SVirtualTableScanInfo* pVirtualScanInfo = &pInfo->virtualScanInfo; - pInfo->binfo.pRes = createDataBlockFromDescNode(pDescNode); - TSDB_CHECK_NULL(pInfo->binfo.pRes, code, lino, _return, terrno); + SVirtualTableScanInfo* pVirtualScanInfo = &pInfo->virtualScanInfo; + pInfo->binfo.pRes = createDataBlockFromDescNode(pDescNode); + TSDB_CHECK_NULL(pInfo->binfo.pRes, code, lino, _return, terrno); - SSDataBlock* pInputBlock = createDataBlockFromDescNode(pPhyNode->pOutputDataBlockDesc); - TSDB_CHECK_NULL(pInputBlock, code, lino, _return, terrno); - pVirtualScanInfo->pInputBlock = pInputBlock; + SSDataBlock* pInputBlock = createDataBlockFromDescNode(pPhyNode->pOutputDataBlockDesc); + TSDB_CHECK_NULL(pInputBlock, code, lino, _return, terrno); + pVirtualScanInfo->pInputBlock = pInputBlock; - if (pVirtualScanPhyNode->scan.pScanPseudoCols != NULL) { - SExprSupp* pSup = &pVirtualScanInfo->base.pseudoSup; - pSup->pExprInfo = NULL; - VTS_ERR_JRET(createExprInfo(pVirtualScanPhyNode->scan.pScanPseudoCols, NULL, &pSup->pExprInfo, &pSup->numOfExprs)); + if (pVirtualScanPhyNode->scan.pScanPseudoCols != NULL) { + SExprSupp* pSup = &pVirtualScanInfo->base.pseudoSup; + pSup->pExprInfo = NULL; + VTS_ERR_JRET(createExprInfo(pVirtualScanPhyNode->scan.pScanPseudoCols, NULL, &pSup->pExprInfo, &pSup->numOfExprs)); - pSup->pCtx = createSqlFunctionCtx(pSup->pExprInfo, pSup->numOfExprs, &pSup->rowEntryInfoOffset, - &pTaskInfo->storageAPI.functionStore); - TSDB_CHECK_NULL(pSup->pCtx, code, lino, _return, terrno); - } + pSup->pCtx = createSqlFunctionCtx(pSup->pExprInfo, pSup->numOfExprs, &pSup->rowEntryInfoOffset, + &pTaskInfo->storageAPI.functionStore); + TSDB_CHECK_NULL(pSup->pCtx, code, lino, _return, terrno); + } - initResultSizeInfo(&pOperator->resultInfo, 1024); - TSDB_CHECK_CODE(blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity), lino, _return); + initResultSizeInfo(&pOperator->resultInfo, 1024); + TSDB_CHECK_CODE(blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity), lino, _return); - size_t numOfCols = taosArrayGetSize(pInfo->binfo.pRes->pDataBlock); - int32_t rowSize = pInfo->binfo.pRes->info.rowSize; + size_t numOfCols = taosArrayGetSize(pInfo->binfo.pRes->pDataBlock); + int32_t rowSize = pInfo->binfo.pRes->info.rowSize; - if (!pVirtualScanPhyNode->scan.node.dynamicOp) { - VTS_ERR_JRET(makeTSMergeKey(&pMergeKeys, 0)); - pVirtualScanInfo->pSortInfo = createSortInfo(pMergeKeys); - TSDB_CHECK_NULL(pVirtualScanInfo->pSortInfo, code, lino, _return, terrno); - } - pVirtualScanInfo->bufPageSize = getProperSortPageSize(rowSize, numOfCols); - pVirtualScanInfo->sortBufSize = - pVirtualScanInfo->bufPageSize * (numOfDownstream + 1); // one additional is reserved for merged result. - VTS_ERR_JRET(extractColMap(pVirtualScanPhyNode->pTargets, &pVirtualScanInfo->dataSlotMap, &pVirtualScanInfo->tsSlotId)); + if (!pVirtualScanPhyNode->scan.node.dynamicOp) { + VTS_ERR_JRET(makeTSMergeKey(&pMergeKeys, 0)); + pVirtualScanInfo->pSortInfo = createSortInfo(pMergeKeys); + TSDB_CHECK_NULL(pVirtualScanInfo->pSortInfo, code, lino, _return, terrno); + } + pVirtualScanInfo->bufPageSize = getProperSortPageSize(rowSize, numOfCols); + pVirtualScanInfo->sortBufSize = + pVirtualScanInfo->bufPageSize * (numOfDownstream + 1); // one additional is reserved for merged result. + VTS_ERR_JRET( + extractColMap(pVirtualScanPhyNode->pTargets, &pVirtualScanInfo->dataSlotMap, &pVirtualScanInfo->tsSlotId)); - pVirtualScanInfo->scanAllCols = pVirtualScanPhyNode->scanAllCols; + pVirtualScanInfo->scanAllCols = pVirtualScanPhyNode->scanAllCols; - VTS_ERR_JRET(filterInitFromNode((SNode*)pVirtualScanPhyNode->scan.node.pConditions, &pOperator->exprSupp.pFilterInfo, 0)); + VTS_ERR_JRET( + filterInitFromNode((SNode*)pVirtualScanPhyNode->scan.node.pConditions, &pOperator->exprSupp.pFilterInfo, 0)); - pVirtualScanInfo->base.metaCache.pTableMetaEntryCache = taosLRUCacheInit(1024 * 128, -1, .5); - QUERY_CHECK_NULL(pVirtualScanInfo->base.metaCache.pTableMetaEntryCache, code, lino, _return, terrno); - pVirtualScanInfo->base.readHandle = *readHandle; - pVirtualScanInfo->base.pTableListInfo = pTableListInfo; + pVirtualScanInfo->base.metaCache.pTableMetaEntryCache = taosLRUCacheInit(1024 * 128, -1, .5); + QUERY_CHECK_NULL(pVirtualScanInfo->base.metaCache.pTableMetaEntryCache, code, lino, _return, terrno); + pVirtualScanInfo->base.readHandle = *readHandle; + pVirtualScanInfo->base.pTableListInfo = pTableListInfo; - setOperatorInfo(pOperator, "VirtualTableScanOperator", QUERY_NODE_PHYSICAL_PLAN_VIRTUAL_TABLE_SCAN, false, - OP_NOT_OPENED, pInfo, pTaskInfo); - pOperator->fpSet = - createOperatorFpSet(openVirtualTableScanOperator, virtualTableGetNext, NULL, destroyVirtualTableScanOperatorInfo, - optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL); + setOperatorInfo(pOperator, "VirtualTableScanOperator", QUERY_NODE_PHYSICAL_PLAN_VIRTUAL_TABLE_SCAN, false, + OP_NOT_OPENED, pInfo, pTaskInfo); + pOperator->fpSet = + createOperatorFpSet(openVirtualTableScanOperator, virtualTableGetNext, NULL, destroyVirtualTableScanOperatorInfo, + optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL); - if (NULL != pDownstream) { - VTS_ERR_JRET(appendDownstream(pOperator, pDownstream, numOfDownstream)); - } + if (NULL != pDownstream) { + VTS_ERR_JRET(appendDownstream(pOperator, pDownstream, numOfDownstream)); + } - - nodesDestroyList(pMergeKeys); - *pOptrInfo = pOperator; - return TSDB_CODE_SUCCESS; + nodesDestroyList(pMergeKeys); + *pOptrInfo = pOperator; + return TSDB_CODE_SUCCESS; _return: if (code != TSDB_CODE_SUCCESS) { qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); } - if (pInfo != NULL) { - destroyVirtualTableScanOperatorInfo(pInfo); - } - nodesDestroyList(pMergeKeys); - pTaskInfo->code = code; - destroyOperatorAndDownstreams(pOperator, pDownstream, numOfDownstream); - return code; + if (pInfo != NULL) { + destroyVirtualTableScanOperatorInfo(pInfo); + } + nodesDestroyList(pMergeKeys); + pTaskInfo->code = code; + destroyOperatorAndDownstreams(pOperator, pDownstream, numOfDownstream); + return code; } static int32_t doStreamVtableMergeNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { - // NOTE: this operator does never check if current status is done or not - int32_t code = TSDB_CODE_SUCCESS; - int32_t lino = 0; - SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; - const char* id = GET_TASKID(pTaskInfo); - - SStorageAPI* pAPI = &pTaskInfo->storageAPI; + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + const char* id = GET_TASKID(pTaskInfo); SStreamScanInfo* pInfo = pOperator->info; - SStreamTaskInfo* pStreamInfo = &pTaskInfo->streamInfo; + SSDataBlock* pResBlock = pInfo->pRes; + SArray* pVTables = pTaskInfo->streamInfo.pVTables; + void* pIter = NULL; - qDebug("stream scan started, %s", id); + (*ppRes) = NULL; + if (pOperator->status == OP_EXEC_DONE) { + goto _end; + } + + qDebug("===stream=== stream vtable merge next, taskId:%s", id); // TODO(kjq): add fill history recover step - size_t total = taosArrayGetSize(pInfo->pBlockLists); -// TODO: refactor -FETCH_NEXT_BLOCK: - if (pInfo->blockType == STREAM_INPUT__DATA_BLOCK) { - if (pInfo->validBlockIndex >= total) { - doClearBufferedBlocks(pInfo); - (*ppRes) = NULL; - return code; - } + if (pInfo->pVtableMergeHandles == NULL) { + pInfo->pVtableMergeHandles = taosHashInit(taosArrayGetSize(pVTables), + taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + QUERY_CHECK_NULL(pInfo->pVtableMergeHandles, code, lino, _end, terrno); + taosHashSetFreeFp(pInfo->pVtableMergeHandles, streamVtableMergeDestroyHandle); - int32_t current = pInfo->validBlockIndex++; - qDebug("process %d/%d input data blocks, %s", current, (int32_t)total, id); - - SPackedData* pPacked = taosArrayGet(pInfo->pBlockLists, current); - QUERY_CHECK_NULL(pPacked, code, lino, _end, terrno); - - SSDataBlock* pBlock = pPacked->pDataBlock; - if (pBlock->info.parTbName[0]) { - code = - pAPI->stateStore.streamStatePutParName(pStreamInfo->pState, pBlock->info.id.groupId, pBlock->info.parTbName); - QUERY_CHECK_CODE(code, lino, _end); - } - - // TODO move into scan - pBlock->info.calWin.skey = INT64_MIN; - pBlock->info.calWin.ekey = INT64_MAX; - pBlock->info.dataLoad = 1; - if (pInfo->pUpdateInfo) { - pInfo->pUpdateInfo->maxDataVersion = TMAX(pInfo->pUpdateInfo->maxDataVersion, pBlock->info.version); - } - - code = blockDataUpdateTsWindow(pBlock, 0); - QUERY_CHECK_CODE(code, lino, _end); - switch (pBlock->info.type) { - case STREAM_NORMAL: - case STREAM_GET_ALL: - printDataBlock(pBlock, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo)); - setStreamOperatorState(&pInfo->basic, pBlock->info.type); - (*ppRes) = pBlock; - return code; - case STREAM_RETRIEVE: { - pInfo->blockType = STREAM_INPUT__DATA_SUBMIT; - pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RETRIEVE; - code = copyDataBlock(pInfo->pUpdateRes, pBlock); - QUERY_CHECK_CODE(code, lino, _end); - pInfo->updateResIndex = 0; - prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex, NULL); - pAPI->stateStore.updateInfoAddCloseWindowSBF(pInfo->pUpdateInfo); - } break; - case STREAM_DELETE_DATA: { - printSpecDataBlock(pBlock, getStreamOpName(pOperator->operatorType), "delete recv", GET_TASKID(pTaskInfo)); - SSDataBlock* pDelBlock = NULL; - if (pInfo->tqReader) { - code = createSpecialDataBlock(STREAM_DELETE_DATA, &pDelBlock); - QUERY_CHECK_CODE(code, lino, _end); - - code = filterDelBlockByUid(pDelBlock, pBlock, pInfo->tqReader, &pInfo->readerFn); - QUERY_CHECK_CODE(code, lino, _end); - } else { - pDelBlock = pBlock; - } - - code = setBlockGroupIdByUid(pInfo, pDelBlock); - QUERY_CHECK_CODE(code, lino, _end); - code = rebuildDeleteBlockData(pDelBlock, &pStreamInfo->fillHistoryWindow, id); - QUERY_CHECK_CODE(code, lino, _end); - printSpecDataBlock(pDelBlock, getStreamOpName(pOperator->operatorType), "delete recv filtered", - GET_TASKID(pTaskInfo)); - if (pDelBlock->info.rows == 0) { - if (pInfo->tqReader) { - blockDataDestroy(pDelBlock); - } - goto FETCH_NEXT_BLOCK; - } - - if (!isStreamWindow(pInfo)) { - code = generateDeleteResultBlock(pInfo, pDelBlock, pInfo->pDeleteDataRes); - QUERY_CHECK_CODE(code, lino, _end); - if (pInfo->partitionSup.needCalc) { - pInfo->pDeleteDataRes->info.type = STREAM_DELETE_DATA; - } else { - pInfo->pDeleteDataRes->info.type = STREAM_DELETE_RESULT; - } - blockDataDestroy(pDelBlock); - - if (pInfo->pDeleteDataRes->info.rows > 0) { - printSpecDataBlock(pInfo->pDeleteDataRes, getStreamOpName(pOperator->operatorType), "delete result", - GET_TASKID(pTaskInfo)); - setStreamOperatorState(&pInfo->basic, pInfo->pDeleteDataRes->info.type); - (*ppRes) = pInfo->pDeleteDataRes; - return code; - } else { - goto FETCH_NEXT_BLOCK; - } - } else { - pInfo->blockType = STREAM_INPUT__DATA_SUBMIT; - pInfo->updateResIndex = 0; - code = generateScanRange(pInfo, pDelBlock, pInfo->pUpdateRes, STREAM_DELETE_DATA); - QUERY_CHECK_CODE(code, lino, _end); - prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex, NULL); - code = copyDataBlock(pInfo->pDeleteDataRes, pInfo->pUpdateRes); - QUERY_CHECK_CODE(code, lino, _end); - pInfo->pDeleteDataRes->info.type = STREAM_DELETE_DATA; - if (pInfo->tqReader) { - blockDataDestroy(pDelBlock); - } - if (pInfo->pDeleteDataRes->info.rows > 0) { - pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RANGE; - printSpecDataBlock(pInfo->pDeleteDataRes, getStreamOpName(pOperator->operatorType), "delete result", - GET_TASKID(pTaskInfo)); - setStreamOperatorState(&pInfo->basic, pInfo->pDeleteDataRes->info.type); - (*ppRes) = pInfo->pDeleteDataRes; - return code; - } else { - goto FETCH_NEXT_BLOCK; - } - } - } break; - case STREAM_GET_RESULT: { - pInfo->blockType = STREAM_INPUT__DATA_SUBMIT; - pInfo->updateResIndex = 0; - pInfo->lastScanRange = pBlock->info.window; - TSKEY endKey = taosTimeGetIntervalEnd(pBlock->info.window.skey, &pInfo->interval); - if (pInfo->useGetResultRange == true) { - endKey = pBlock->info.window.ekey; - } - code = copyGetResultBlock(pInfo->pUpdateRes, pBlock->info.window.skey, endKey); - QUERY_CHECK_CODE(code, lino, _end); - pInfo->pUpdateInfo->maxDataVersion = -1; - prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex, NULL); - pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RANGE; - } break; - case STREAM_DROP_CHILD_TABLE: { - int32_t deleteNum = 0; - code = deletePartName(&pInfo->stateStore, pInfo->pStreamScanOp->pTaskInfo->streamInfo.pState, pBlock, &deleteNum); - QUERY_CHECK_CODE(code, lino, _end); - if (deleteNum == 0) { - printSpecDataBlock(pBlock, getStreamOpName(pOperator->operatorType), "block recv", GET_TASKID(pTaskInfo)); - qDebug("===stream=== ignore block type 18, delete num is 0"); - goto FETCH_NEXT_BLOCK; - } - } break; - case STREAM_CHECKPOINT: { - qError("stream check point error. msg type: STREAM_INPUT__DATA_BLOCK"); - } break; - default: - break; - } - printSpecDataBlock(pBlock, getStreamOpName(pOperator->operatorType), "block recv", GET_TASKID(pTaskInfo)); - setStreamOperatorState(&pInfo->basic, pBlock->info.type); - (*ppRes) = pBlock; - return code; - } else if (pInfo->blockType == STREAM_INPUT__DATA_SUBMIT) { - qDebug("stream scan mode:%d, %s", pInfo->scanMode, id); - switch (pInfo->scanMode) { - case STREAM_SCAN_FROM_RES: { - pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; - code = doCheckUpdate(pInfo, pInfo->pRes->info.window.ekey, pInfo->pRes); - QUERY_CHECK_CODE(code, lino, _end); - setStreamOperatorState(&pInfo->basic, pInfo->pRes->info.type); - code = doFilter(pInfo->pRes, pOperator->exprSupp.pFilterInfo, NULL); - QUERY_CHECK_CODE(code, lino, _end); - pInfo->pRes->info.dataLoad = 1; - code = blockDataUpdateTsWindow(pInfo->pRes, pInfo->primaryTsIndex); - QUERY_CHECK_CODE(code, lino, _end); - if (pInfo->pRes->info.rows > 0) { - printDataBlock(pInfo->pRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo)); - (*ppRes) = pInfo->pRes; - return code; - } - } break; - case STREAM_SCAN_FROM_DELETE_DATA: { - code = generateScanRange(pInfo, pInfo->pUpdateDataRes, pInfo->pUpdateRes, STREAM_PARTITION_DELETE_DATA); - QUERY_CHECK_CODE(code, lino, _end); - if (pInfo->pUpdateRes->info.rows > 0) { - prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex, NULL); - pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RANGE; - code = copyDataBlock(pInfo->pDeleteDataRes, pInfo->pUpdateRes); - QUERY_CHECK_CODE(code, lino, _end); - pInfo->pDeleteDataRes->info.type = STREAM_DELETE_DATA; - (*ppRes) = pInfo->pDeleteDataRes; - return code; - } - qError("%s===stream=== %s failed at line %d since pInfo->pUpdateRes is empty", GET_TASKID(pTaskInfo), __func__, - __LINE__); - blockDataCleanup(pInfo->pUpdateDataRes); - pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; - } break; - case STREAM_SCAN_FROM_UPDATERES: { - code = generateScanRange(pInfo, pInfo->pUpdateDataRes, pInfo->pUpdateRes, STREAM_CLEAR); - QUERY_CHECK_CODE(code, lino, _end); - if (pInfo->pUpdateRes->info.rows > 0) { - prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex, NULL); - pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RANGE; - (*ppRes) = pInfo->pUpdateRes; - return code; - } - qError("%s===stream=== %s failed at line %d since pInfo->pUpdateRes is empty", GET_TASKID(pTaskInfo), __func__, - __LINE__); - blockDataCleanup(pInfo->pUpdateDataRes); - pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; - } break; - case STREAM_SCAN_FROM_DATAREADER_RANGE: - case STREAM_SCAN_FROM_DATAREADER_RETRIEVE: { - if (pInfo->pRangeScanRes != NULL) { - (*ppRes) = pInfo->pRangeScanRes; - pInfo->pRangeScanRes = NULL; - return code; - } - SSDataBlock* pSDB = NULL; - code = doRangeScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex, &pSDB); - QUERY_CHECK_CODE(code, lino, _end); - if (pSDB) { - STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info; - pSDB->info.type = pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER_RANGE ? STREAM_NORMAL : STREAM_PULL_DATA; - if (!pInfo->igCheckUpdate && pInfo->pUpdateInfo) { - code = checkUpdateData(pInfo, true, pSDB, false); - QUERY_CHECK_CODE(code, lino, _end); - } - printSpecDataBlock(pSDB, getStreamOpName(pOperator->operatorType), "update", GET_TASKID(pTaskInfo)); - code = calBlockTbName(pInfo, pSDB, 0); - QUERY_CHECK_CODE(code, lino, _end); - - if (pInfo->pCreateTbRes->info.rows > 0) { - printSpecDataBlock(pInfo->pCreateTbRes, getStreamOpName(pOperator->operatorType), "update", - GET_TASKID(pTaskInfo)); - (*ppRes) = pInfo->pCreateTbRes; - pInfo->pRangeScanRes = pSDB; - return code; - } - - (*ppRes) = pSDB; - return code; - } - blockDataCleanup(pInfo->pUpdateDataRes); - pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; - } break; - default: - break; - } - - if (hasScanRange(pInfo)) { - pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RANGE; - pInfo->updateResIndex = 0; - SStreamAggSupporter* pSup = pInfo->windowSup.pStreamAggSup; - code = copyDataBlock(pInfo->pUpdateRes, pSup->pScanBlock); - QUERY_CHECK_CODE(code, lino, _end); - blockDataCleanup(pSup->pScanBlock); - prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex, NULL); - pInfo->pUpdateRes->info.type = STREAM_DELETE_DATA; - printSpecDataBlock(pInfo->pUpdateRes, getStreamOpName(pOperator->operatorType), "rebuild", GET_TASKID(pTaskInfo)); - (*ppRes) = pInfo->pUpdateRes; - return code; - } - - SDataBlockInfo* pBlockInfo = &pInfo->pRes->info; - int32_t totalBlocks = taosArrayGetSize(pInfo->pBlockLists); - - NEXT_SUBMIT_BLK: - while (1) { - if (pInfo->readerFn.tqReaderCurrentBlockConsumed(pInfo->tqReader)) { - if (pInfo->validBlockIndex >= totalBlocks) { - pAPI->stateStore.updateInfoDestoryColseWinSBF(pInfo->pUpdateInfo); - doClearBufferedBlocks(pInfo); - - qDebug("stream scan return empty, all %d submit blocks consumed, %s", totalBlocks, id); - (*ppRes) = NULL; - return code; - } - - int32_t current = pInfo->validBlockIndex++; - SPackedData* pSubmit = taosArrayGet(pInfo->pBlockLists, current); - QUERY_CHECK_NULL(pSubmit, code, lino, _end, terrno); - - qDebug("set %d/%d as the input submit block, %s", current + 1, totalBlocks, id); - if (pAPI->tqReaderFn.tqReaderSetSubmitMsg(pInfo->tqReader, pSubmit->msgStr, pSubmit->msgLen, pSubmit->ver, - NULL) < 0) { - qError("submit msg messed up when initializing stream submit block %p, current %d/%d, %s", pSubmit, current, - totalBlocks, id); - continue; - } - } - - blockDataCleanup(pInfo->pRes); - - while (pAPI->tqReaderFn.tqNextBlockImpl(pInfo->tqReader, id)) { - SSDataBlock* pRes = NULL; - - code = pAPI->tqReaderFn.tqRetrieveBlock(pInfo->tqReader, &pRes, id); - qDebug("retrieve data from submit completed code:%s rows:%" PRId64 " %s", tstrerror(code), pRes->info.rows, id); - - if (code != TSDB_CODE_SUCCESS || pRes->info.rows == 0) { - qDebug("retrieve data failed, try next block in submit block, %s", id); - continue; - } - - code = setBlockIntoRes(pInfo, pRes, &pStreamInfo->fillHistoryWindow, false); - if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) { - pInfo->pRes->info.rows = 0; - code = TSDB_CODE_SUCCESS; - } else { - QUERY_CHECK_CODE(code, lino, _end); - } - - if (pInfo->pRes->info.rows == 0) { - continue; - } - - if (pInfo->pCreateTbRes->info.rows > 0) { - pInfo->scanMode = STREAM_SCAN_FROM_RES; - qDebug("create table res exists, rows:%" PRId64 " return from stream scan, %s", - pInfo->pCreateTbRes->info.rows, id); - (*ppRes) = pInfo->pCreateTbRes; - return code; - } - - code = doCheckUpdate(pInfo, pBlockInfo->window.ekey, pInfo->pRes); - QUERY_CHECK_CODE(code, lino, _end); - setStreamOperatorState(&pInfo->basic, pInfo->pRes->info.type); - code = doFilter(pInfo->pRes, pOperator->exprSupp.pFilterInfo, NULL); - QUERY_CHECK_CODE(code, lino, _end); - - code = blockDataUpdateTsWindow(pInfo->pRes, pInfo->primaryTsIndex); - QUERY_CHECK_CODE(code, lino, _end); - - int64_t numOfUpdateRes = pInfo->pUpdateDataRes->info.rows; - qDebug("%s %" PRId64 " rows in datablock, update res:%" PRId64, id, pBlockInfo->rows, numOfUpdateRes); - if (pBlockInfo->rows > 0 || numOfUpdateRes > 0) { - break; - } - } - - if (pBlockInfo->rows > 0 || pInfo->pUpdateDataRes->info.rows > 0) { - break; - } else { + int32_t nTables = taosArrayGetSize(pVTables); + int32_t numPagePerTable = getNumOfInMemBufPages(pInfo->pVtableMergeBuf) / nTables; + for (int32_t i = 0; i < nTables; ++i) { + SVCTableMergeInfo* pTableInfo = taosArrayGet(pVTables, i); + if (pTableInfo == NULL || pTableInfo->numOfSrcTbls == 0) { continue; } + QUERY_CHECK_CONDITION(pTableInfo->numOfSrcTbls <= numPagePerTable, code, lino, _end, terrno); + SStreamVtableMergeHandle* pMergeHandle = NULL; + code = streamVtableMergeCreateHandle(&pMergeHandle, pTableInfo->uid, pTableInfo->numOfSrcTbls, numPagePerTable, + pInfo->primaryTsIndex, pInfo->pVtableMergeBuf, pInfo->pRes, id); + QUERY_CHECK_CODE(code, lino, _end); + code = taosHashPut(pInfo->pVtableMergeHandles, &pTableInfo->uid, sizeof(pTableInfo->uid), &pMergeHandle, + POINTER_BYTES); + if (code != TSDB_CODE_SUCCESS) { + streamVtableMergeDestroyHandle(&pMergeHandle); + } + QUERY_CHECK_CODE(code, lino, _end); + } + } + + while (pOperator->status != OP_RES_TO_RETURN) { + SSDataBlock* pBlock = NULL; + SOperatorInfo* downStream = pOperator->pDownstream[0]; + + code = downStream->fpSet.getNextFn(downStream, &pBlock); + QUERY_CHECK_CODE(code, lino, _end); + + if (pBlock == NULL) { + pOperator->status = OP_RES_TO_RETURN; + break; } - // record the scan action. - pInfo->numOfExec++; - pOperator->resultInfo.totalRows += pBlockInfo->rows; - - qDebug("stream scan completed, and return source rows:%" PRId64 ", %s", pBlockInfo->rows, id); - if (pBlockInfo->rows > 0) { - printDataBlock(pInfo->pRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo)); - (*ppRes) = pInfo->pRes; - return code; + int32_t inputNCols = taosArrayGetSize(pBlock->pDataBlock); + int32_t resNCols = taosArrayGetSize(pResBlock->pDataBlock); + QUERY_CHECK_CONDITION(inputNCols <= resNCols, code, lino, _end, TSDB_CODE_INTERNAL_ERROR); + for (int32_t i = 0; i < inputNCols; ++i) { + SColumnInfoData *p1 = taosArrayGet(pResBlock->pDataBlock, i); + QUERY_CHECK_NULL(p1, code, lino, _end, terrno); + SColumnInfoData *p2 = taosArrayGet(pBlock->pDataBlock, i); + QUERY_CHECK_CODE(code, lino, _end); + QUERY_CHECK_CONDITION(p1->info.type == p2->info.type, code, lino, _end, TSDB_CODE_INTERNAL_ERROR); + QUERY_CHECK_CONDITION(p1->info.bytes == p2->info.bytes, code, lino, _end, TSDB_CODE_INTERNAL_ERROR); + } + for (int32_t i = inputNCols; i < resNCols; ++i) { + SColumnInfoData *p = taosArrayGet(pResBlock->pDataBlock, i); + QUERY_CHECK_NULL(p, code, lino, _end, terrno); + SColumnInfoData colInfo = {.hasNull = true, .info = p->info}; + code = blockDataAppendColInfo(pBlock, &colInfo); + QUERY_CHECK_CODE(code, lino, _end); + SColumnInfoData* pNewCol = taosArrayGet(pBlock->pDataBlock, i); + QUERY_CHECK_NULL(pNewCol, code, lino, _end, terrno); + code = colInfoDataEnsureCapacity(pNewCol, pBlock->info.rows, false); + QUERY_CHECK_CODE(code, lino, _end); + colDataSetNNULL(pNewCol, 0, pBlock->info.rows); } - if (pInfo->pUpdateDataRes->info.rows > 0) { - goto FETCH_NEXT_BLOCK; - } + if (pBlock->info.type == STREAM_NORMAL) { + SStreamVtableMergeHandle** ppHandle = + taosHashGet(pInfo->pVtableMergeHandles, &pBlock->info.id.uid, sizeof(int64_t)); + if (ppHandle == NULL) { + // skip table that is not needed + continue; + } - goto NEXT_SUBMIT_BLK; - } else if (pInfo->blockType == STREAM_INPUT__CHECKPOINT) { - if (pInfo->validBlockIndex >= total) { - doClearBufferedBlocks(pInfo); - (*ppRes) = NULL; - return code; - } - - int32_t current = pInfo->validBlockIndex++; - qDebug("process %d/%d input data blocks, %s", current, (int32_t)total, id); - - SPackedData* pData = taosArrayGet(pInfo->pBlockLists, current); - QUERY_CHECK_NULL(pData, code, lino, _end, terrno); - SSDataBlock* pBlock = taosArrayGet(pData->pDataBlock, 0); - QUERY_CHECK_NULL(pBlock, code, lino, _end, terrno); - - if (pBlock->info.type == STREAM_CHECKPOINT) { + code = streamVtableMergeAddBlock(*ppHandle, pBlock, id); + QUERY_CHECK_CODE(code, lino, _end); + } else if (pBlock->info.type == STREAM_CHECKPOINT) { // todo(kjq): serialize checkpoint + } else { + qError("unexpected block type %d, id:%s", pBlock->info.type, id); + code = TSDB_CODE_VTABLE_SCAN_INTERNAL_ERROR; + QUERY_CHECK_CODE(code, lino, _end); + } + } + + if (taosArrayGetSize(pInfo->pVtableReadyHandles) == 0) { + void* pIter = taosHashIterate(pInfo->pVtableMergeHandles, NULL); + while (pIter != NULL) { + SStreamVtableMergeHandle* pHandle = *(SStreamVtableMergeHandle**)pIter; + SVM_NEXT_RESULT res = SVM_NEXT_NOT_READY; + code = streamVtableMergeMoveNext(pHandle, &res, id); + QUERY_CHECK_CODE(code, lino, _end); + if (res == SVM_NEXT_FOUND) { + void* px = taosArrayPush(pInfo->pVtableReadyHandles, &pHandle); + QUERY_CHECK_NULL(px, code, lino, _end, terrno); + } + pIter = taosHashIterate(pInfo->pVtableMergeHandles, pIter); + } + } + + blockDataCleanup(pResBlock); + while (true) { + void* px = taosArrayGetLast(pInfo->pVtableReadyHandles); + if (px == NULL) { + break; + } + + SStreamVtableMergeHandle* pHandle = *(SStreamVtableMergeHandle**)px; + QUERY_CHECK_NULL(pHandle, code, lino, _end, terrno); + + SVM_NEXT_RESULT res = SVM_NEXT_FOUND; + int32_t nCols = taosArrayGetSize(pResBlock->pDataBlock); + while (res == SVM_NEXT_FOUND) { + SSDataBlock* pBlock = NULL; + int32_t idx = 0; + code = streamVtableMergeCurrent(pHandle, &pBlock, &idx, id); + QUERY_CHECK_CODE(code, lino, _end); + + bool newTuple = true; + if (pResBlock->info.rows > 0) { + SColumnInfoData* pResTsCol = taosArrayGet(pResBlock->pDataBlock, pInfo->primaryTsIndex); + int64_t lastResTs = *(int64_t*)colDataGetNumData(pResTsCol, pResBlock->info.rows - 1); + SColumnInfoData* pMergeTsCol = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex); + int64_t mergeTs = *(int64_t*)colDataGetNumData(pMergeTsCol, idx); + QUERY_CHECK_CONDITION(mergeTs >= lastResTs, code, lino, _end, TSDB_CODE_INTERNAL_ERROR); + newTuple = (mergeTs > lastResTs); + } + if (newTuple) { + if (pResBlock->info.rows >= pResBlock->info.capacity) { + break; + } + pResBlock->info.rows++; + for (int32_t i = 0; i < nCols; ++i) { + SColumnInfoData* pResCol = taosArrayGet(pResBlock->pDataBlock, i); + colDataSetNULL(pResCol, pResBlock->info.rows - 1); + } + } + for (int32_t i = 0; i < nCols; ++i) { + SColumnInfoData* pMergeCol = taosArrayGet(pBlock->pDataBlock, i); + if (!colDataIsNull_s(pMergeCol, idx)) { + SColumnInfoData* pResCol = taosArrayGet(pResBlock->pDataBlock, i); + code = colDataAssignNRows(pResCol, pResBlock->info.rows - 1, pMergeCol, idx, 1); + QUERY_CHECK_CODE(code, lino, _end); + } + } + code = streamVtableMergeMoveNext(pHandle, &res, id); + QUERY_CHECK_CODE(code, lino, _end); + } + + if (res == SVM_NEXT_NOT_READY) { + px = taosArrayPop(pInfo->pVtableReadyHandles); + QUERY_CHECK_NULL(px, code, lino, _end, terrno); + } + + if (pResBlock->info.rows > 0) { + pResBlock->info.id.uid = streamVtableMergeHandleGetVuid(pHandle); + break; + } + } + + if (taosArrayGetSize(pInfo->pVtableReadyHandles) == 0) { + pOperator->status = OP_EXEC_DONE; + } + + pInfo->numOfExec++; + if (pResBlock->info.rows > 0) { + pResBlock->info.id.groupId = tableListGetTableGroupId(pInfo->pTableListInfo, pResBlock->info.id.uid); + code = blockDataUpdateTsWindow(pResBlock, 0); + QUERY_CHECK_CODE(code, lino, _end); + code = addTagPseudoColumnData(&pInfo->readHandle, pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, pResBlock, + pResBlock->info.rows, pTaskInfo, NULL); + QUERY_CHECK_CODE(code, lino, _end); + code = doFilter(pResBlock, pOperator->exprSupp.pFilterInfo, NULL); + QUERY_CHECK_CODE(code, lino, _end); + if (pResBlock->info.rows > 0) { + (*ppRes) = pResBlock; + pOperator->resultInfo.totalRows += pResBlock->info.rows; } - // printDataBlock(pInfo->pCheckpointRes, "stream scan ck", GET_TASKID(pTaskInfo)); - (*ppRes) = pInfo->pCheckpointRes; - return code; - } else { - qError("stream scan error, invalid block type %d, %s", pInfo->blockType, id); - code = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR; } _end: + if (pIter != NULL) { + taosHashCancelIterate(pInfo->pVtableMergeHandles, pIter); + pIter = NULL; + } if (code != TSDB_CODE_SUCCESS) { qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); pTaskInfo->code = code; T_LONG_JMP(pTaskInfo->env, code); } - (*ppRes) = NULL; return code; } -int32_t createStreamVtableMergeOperatorInfo(SReadHandle* pHandle, SVirtualScanPhysiNode* pVirtualScanNode, - SNode* pTagCond, SExecTaskInfo* pTaskInfo, SOperatorInfo** pOptrInfo) { +int32_t createStreamVtableMergeOperatorInfo(SOperatorInfo* pDownstream, SReadHandle* pHandle, + SVirtualScanPhysiNode* pVirtualScanNode, SNode* pTagCond, + STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo, + SOperatorInfo** pOptrInfo) { QRY_PARAM_CHECK(pOptrInfo); int32_t code = TSDB_CODE_SUCCESS; @@ -1261,7 +1067,7 @@ int32_t createStreamVtableMergeOperatorInfo(SReadHandle* pHandle, SVirtualScanPh pInfo->pCreateTbRes = buildCreateTableBlock(&pInfo->tbnameCalSup, &pInfo->tagCalSup); QUERY_CHECK_NULL(pInfo->pCreateTbRes, code, lino, _error, terrno); - // create the pseduo columns info + // create the pseudo columns info if (pVirtualScanNode->scan.pScanPseudoCols != NULL) { code = createExprInfo(pVirtualScanNode->scan.pScanPseudoCols, NULL, &pInfo->pPseudoExpr, &pInfo->numOfPseudoExpr); QUERY_CHECK_CODE(code, lino, _error); @@ -1272,9 +1078,21 @@ int32_t createStreamVtableMergeOperatorInfo(SReadHandle* pHandle, SVirtualScanPh pInfo->pRes = createDataBlockFromDescNode(pDescNode); QUERY_CHECK_NULL(pInfo->pRes, code, lino, _error, terrno); + code = blockDataEnsureCapacity(pInfo->pRes, TMAX(pOperator->resultInfo.capacity, 4096)); + QUERY_CHECK_CODE(code, lino, _error); + pInfo->pRes->info.type = STREAM_NORMAL; code = createSpecialDataBlock(STREAM_CLEAR, &pInfo->pUpdateRes); QUERY_CHECK_CODE(code, lino, _error); + int32_t pageSize = getProperSortPageSize(pInfo->pRes->info.rowSize, taosArrayGetSize(pInfo->pRes->pDataBlock)); + code = createDiskbasedBuf(&pInfo->pVtableMergeBuf, pageSize, tsStreamVirtualMergeMaxMemKb * 1024, + "streamVtableMergeBuf", tsTempDir); + QUERY_CHECK_CODE(code, lino, _error); + pInfo->pVtableReadyHandles = taosArrayInit(0, POINTER_BYTES); + QUERY_CHECK_NULL(pInfo->pVtableReadyHandles, code, lino, _error, terrno); + pInfo->pTableListInfo = pTableListInfo; + pTableListInfo = NULL; + pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; pInfo->windowSup = (SWindowSupporter){.pStreamAggSup = NULL, .gap = -1, .parentType = QUERY_NODE_PHYSICAL_PLAN}; pInfo->groupId = 0; @@ -1337,6 +1155,9 @@ int32_t createStreamVtableMergeOperatorInfo(SReadHandle* pHandle, SVirtualScanPh optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL); // TODO(kjq): save and load fill history state + code = appendDownstream(pOperator, &pDownstream, 1); + QUERY_CHECK_CODE(code, lino, _error); + *pOptrInfo = pOperator; return code; @@ -1345,6 +1166,10 @@ _error: destroyStreamScanOperatorInfo(pInfo); } + if (pTableListInfo != NULL) { + tableListDestroy(pTableListInfo); + } + if (pOperator != NULL) { pOperator->info = NULL; destroyOperator(pOperator); diff --git a/source/libs/executor/test/CMakeLists.txt b/source/libs/executor/test/CMakeLists.txt index 4136640847..ab64dd85d4 100644 --- a/source/libs/executor/test/CMakeLists.txt +++ b/source/libs/executor/test/CMakeLists.txt @@ -9,7 +9,7 @@ MESSAGE(STATUS "build parser unit test") # ADD_EXECUTABLE(executorTest ${SOURCE_LIST}) # TARGET_LINK_LIBRARIES( # executorTest -# PRIVATE os util common transport gtest ${TAOS_LIB_STATIC} qcom executor function planner scalar nodes vnode +# PRIVATE os util common transport gtest ${TAOS_NATIVE_LIB_STATIC} qcom executor function planner scalar nodes vnode # ) # # TARGET_INCLUDE_DIRECTORIES( diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c index f03bec5a32..48a7e7c345 100644 --- a/source/libs/function/src/tudf.c +++ b/source/libs/function/src/tudf.c @@ -2103,6 +2103,16 @@ _exit: return code; } +static void freeTaskSession(SClientUdfTask *task) { + uv_mutex_lock(&gUdfcProxy.udfcUvMutex); + if (task->session->udfUvPipe != NULL && task->session->udfUvPipe->data != NULL) { + SClientUvConn *conn = task->session->udfUvPipe->data; + conn->session = NULL; + } + uv_mutex_unlock(&gUdfcProxy.udfcUvMutex); + taosMemoryFreeClear(task->session); +} + int32_t doSetupUdf(char udfName[], UdfcFuncHandle *funcHandle) { int32_t code = TSDB_CODE_SUCCESS, lino = 0; SClientUdfTask *task = taosMemoryCalloc(1, sizeof(SClientUdfTask)); @@ -2143,7 +2153,7 @@ _exit: if (code != 0) { fnError("failed to setup udf. udfname: %s, err: %d line:%d", udfName, code, lino); } - taosMemoryFree(task->session); + freeTaskSession(task); taosMemoryFree(task); return code; } @@ -2308,18 +2318,18 @@ int32_t doTeardownUdf(UdfcFuncHandle handle) { fnInfo("tear down udf. udf name: %s, udf func handle: %p", session->udfName, handle); // TODO: synchronization refactor between libuv event loop and request thread - uv_mutex_lock(&gUdfcProxy.udfcUvMutex); - if (session->udfUvPipe != NULL && session->udfUvPipe->data != NULL) { - SClientUvConn *conn = session->udfUvPipe->data; - conn->session = NULL; - } - uv_mutex_unlock(&gUdfcProxy.udfcUvMutex); + // uv_mutex_lock(&gUdfcProxy.udfcUvMutex); + // if (session->udfUvPipe != NULL && session->udfUvPipe->data != NULL) { + // SClientUvConn *conn = session->udfUvPipe->data; + // conn->session = NULL; + // } + // uv_mutex_unlock(&gUdfcProxy.udfcUvMutex); _exit: if (code != 0) { fnError("failed to teardown udf. udf name: %s, err: %d, line: %d", session->udfName, code, lino); } - taosMemoryFree(session); + freeTaskSession(task); taosMemoryFree(task); return code; diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index 800817b857..5344168ceb 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -773,6 +773,7 @@ static int32_t logicDynQueryCtrlCopy(const SDynQueryCtrlLogicNode* pSrc, SDynQue COPY_OBJECT_FIELD(stbJoin.srcScan, sizeof(pDst->stbJoin.srcScan)); COPY_SCALAR_FIELD(vtbScan.scanAllCols); COPY_SCALAR_FIELD(vtbScan.suid); + COPY_CHAR_ARRAY_FIELD(vtbScan.dbName); CLONE_OBJECT_FIELD(vtbScan.pVgroupList, vgroupsInfoClone); return TSDB_CODE_SUCCESS; } @@ -816,6 +817,10 @@ static int32_t physiVirtualTableScanCopy(const SVirtualScanPhysiNode* pSrc, SVir COPY_SCALAR_FIELD(groupSort); COPY_SCALAR_FIELD(scanAllCols); CLONE_NODE_LIST_FIELD(pTargets); + CLONE_NODE_LIST_FIELD(pTags); + CLONE_NODE_FIELD(pSubtable); + COPY_SCALAR_FIELD(igExpired); + COPY_SCALAR_FIELD(igCheckUpdate); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 1351eb49c1..5fa6dbf69d 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -2479,6 +2479,10 @@ static const char* jkVirtualTableScanPhysiPlanGroupTags = "GroupTags"; static const char* jkVirtualTableScanPhysiPlanGroupSort = "GroupSort"; static const char* jkVirtualTableScanPhysiPlanscanAllCols= "scanAllCols"; static const char* jkVirtualTableScanPhysiPlanTargets = "Targets"; +static const char* jkVirtualTableScanPhysiPlanTags = "Tags"; +static const char* jkVirtualTableScanPhysiPlanSubtable = "Subtable"; +static const char* jkVirtualTableScanPhysiPlanIgExpired = "IgExpired"; +static const char* jkVirtualTableScanPhysiPlanIgCheckUpdate = "IgCheckUpdate"; static int32_t physiVirtualTableScanNodeToJson(const void* pObj, SJson* pJson) { const SVirtualScanPhysiNode* pNode = (const SVirtualScanPhysiNode*)pObj; @@ -2486,11 +2490,11 @@ static int32_t physiVirtualTableScanNodeToJson(const void* pObj, SJson* pJson) { int32_t code = physiScanNodeToJson(pObj, pJson); if (TSDB_CODE_SUCCESS == code) { - code = nodeListToJson(pJson, jkVirtualTableScanPhysiPlanTargets, pNode->pGroupTags); + code = nodeListToJson(pJson, jkVirtualTableScanPhysiPlanGroupTags, pNode->pGroupTags); } if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddBoolToObject(pJson, jkVirtualTableScanPhysiPlanTargets, pNode->groupSort); + code = tjsonAddBoolToObject(pJson, jkVirtualTableScanPhysiPlanGroupSort, pNode->groupSort); } if (TSDB_CODE_SUCCESS == code) { @@ -2501,13 +2505,29 @@ static int32_t physiVirtualTableScanNodeToJson(const void* pObj, SJson* pJson) { code = nodeListToJson(pJson, jkVirtualTableScanPhysiPlanTargets, pNode->pTargets); } + if (TSDB_CODE_SUCCESS == code) { + code = nodeListToJson(pJson, jkVirtualTableScanPhysiPlanTags, pNode->pTags); + } + + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkVirtualTableScanPhysiPlanSubtable, nodeToJson, pNode->pSubtable); + } + + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkVirtualTableScanPhysiPlanIgExpired, pNode->igExpired); + } + + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkVirtualTableScanPhysiPlanIgCheckUpdate, pNode->igCheckUpdate); + } + return code; } static int32_t jsonToPhysiVirtualTableScanNode(const SJson* pJson, void* pObj) { SVirtualScanPhysiNode* pNode = (SVirtualScanPhysiNode*)pObj; - int32_t code = jsonToPhysicPlanNode(pJson, pObj); + int32_t code = jsonToPhysiScanNode(pJson, pObj); if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeList(pJson, jkVirtualTableScanPhysiPlanGroupTags, &pNode->pGroupTags); } @@ -2521,6 +2541,22 @@ static int32_t jsonToPhysiVirtualTableScanNode(const SJson* pJson, void* pObj) { code = jsonToNodeList(pJson, jkVirtualTableScanPhysiPlanTargets, &pNode->pTargets); } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeList(pJson, jkVirtualTableScanPhysiPlanTags, &pNode->pTags); + } + + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkVirtualTableScanPhysiPlanSubtable, &pNode->pSubtable); + } + + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkVirtualTableScanPhysiPlanIgExpired, &pNode->igExpired); + } + + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkVirtualTableScanPhysiPlanIgCheckUpdate, &pNode->igCheckUpdate); + } + return code; } diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index bdf5befca4..276dfbe525 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -2207,6 +2207,10 @@ enum { PHY_VIRTUAL_TABLE_SCAN_CODE_GROUP_SORT, PHY_VIRTUAL_TABLE_SCAN_CODE_ONLY_TS, PHY_VIRTUAL_TABLE_SCAN_CODE_TARGETS, + PHY_VIRTUAL_TABLE_SCAN_CODE_TAGS, + PHY_VIRTUAL_TABLE_SCAN_CODE_SUBTABLE, + PHY_VIRTUAL_TABLE_SCAN_CODE_IGNORE_EXPIRED, + PHY_VIRTUAL_TABLE_SCAN_CODE_IGNORE_CHECK_UPDATE, }; static int32_t physiVirtualTableScanNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { @@ -2229,6 +2233,23 @@ static int32_t physiVirtualTableScanNodeToMsg(const void* pObj, STlvEncoder* pEn if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeObj(pEncoder, PHY_VIRTUAL_TABLE_SCAN_CODE_TARGETS, nodeListToMsg, pNode->pTargets); } + + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeObj(pEncoder, PHY_VIRTUAL_TABLE_SCAN_CODE_TAGS, nodeListToMsg, pNode->pTags); + } + + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeObj(pEncoder, PHY_VIRTUAL_TABLE_SCAN_CODE_SUBTABLE, nodeToMsg, pNode->pSubtable); + } + + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeI8(pEncoder, PHY_VIRTUAL_TABLE_SCAN_CODE_IGNORE_EXPIRED, pNode->igExpired); + } + + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeI8(pEncoder, PHY_VIRTUAL_TABLE_SCAN_CODE_IGNORE_CHECK_UPDATE, pNode->igCheckUpdate); + } + return code; } @@ -2254,6 +2275,18 @@ static int32_t msgToPhysiVirtualTableScanNode(STlvDecoder* pDecoder, void* pObj) case PHY_VIRTUAL_TABLE_SCAN_CODE_TARGETS: code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pTargets); break; + case PHY_VIRTUAL_TABLE_SCAN_CODE_TAGS: + code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pTags); + break; + case PHY_VIRTUAL_TABLE_SCAN_CODE_SUBTABLE: + code = msgToNodeFromTlv(pTlv, (void**)&pNode->pSubtable); + break; + case PHY_VIRTUAL_TABLE_SCAN_CODE_IGNORE_EXPIRED: + code = tlvDecodeI8(pTlv, &pNode->igExpired); + break; + case PHY_VIRTUAL_TABLE_SCAN_CODE_IGNORE_CHECK_UPDATE: + code = tlvDecodeI8(pTlv, &pNode->igCheckUpdate); + break; default: break; } @@ -4353,6 +4386,7 @@ enum { PHY_DYN_QUERY_CTRL_CODE_STB_JOIN_SRC_SCAN1, PHY_DYN_QUERY_CTRL_CODE_VTB_SCAN_SCAN_ALL_COLS, PHY_DYN_QUERY_CTRL_CODE_VTB_SCAN_SUID, + PHY_DYN_QUERY_CTRL_CODE_VTB_SCAN_DBNAME, PHY_DYN_QUERY_CTRL_CODE_VTB_SCAN_ACCOUNT_ID, PHY_DYN_QUERY_CTRL_CODE_VTB_SCAN_EP_SET, PHY_DYN_QUERY_CTRL_CODE_VTB_SCAN_SCAN_COLS, @@ -4394,6 +4428,9 @@ static int32_t physiDynQueryCtrlNodeToMsg(const void* pObj, STlvEncoder* pEncode if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeU64(pEncoder, PHY_DYN_QUERY_CTRL_CODE_VTB_SCAN_SUID, pNode->vtbScan.suid); } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeCStr(pEncoder, PHY_DYN_QUERY_CTRL_CODE_VTB_SCAN_DBNAME, pNode->vtbScan.dbName); + } if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeI32(pEncoder, PHY_DYN_QUERY_CTRL_CODE_VTB_SCAN_ACCOUNT_ID, pNode->vtbScan.accountId); } @@ -4452,6 +4489,9 @@ static int32_t msgToPhysiDynQueryCtrlNode(STlvDecoder* pDecoder, void* pObj) { case PHY_DYN_QUERY_CTRL_CODE_VTB_SCAN_SUID: code = tlvDecodeU64(pTlv, &pNode->vtbScan.suid); break; + case PHY_DYN_QUERY_CTRL_CODE_VTB_SCAN_DBNAME: + code = tlvDecodeCStr(pTlv, pNode->vtbScan.dbName, sizeof(pNode->vtbScan.dbName)); + break; case PHY_DYN_QUERY_CTRL_CODE_VTB_SCAN_ACCOUNT_ID: code = tlvDecodeI32(pTlv, &pNode->vtbScan.accountId); break; diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index 0002d9ce9c..8bc03cd25e 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -1908,6 +1908,8 @@ void nodesDestroyNode(SNode* pNode) { destroyScanPhysiNode((SScanPhysiNode*)pNode); nodesDestroyList(pPhyNode->pGroupTags); nodesDestroyList(pPhyNode->pTargets); + nodesDestroyList(pPhyNode->pTags); + nodesDestroyNode(pPhyNode->pSubtable); break; } case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN: diff --git a/source/libs/parser/src/parInsertSql.c b/source/libs/parser/src/parInsertSql.c index 970751d827..81129a6f8f 100644 --- a/source/libs/parser/src/parInsertSql.c +++ b/source/libs/parser/src/parInsertSql.c @@ -1206,6 +1206,10 @@ static int32_t getTableMeta(SInsertParseContext* pCxt, SName* pTbName, STableMet *pMissCache = true; } else if (bUsingTable && TSDB_SUPER_TABLE != (*pTableMeta)->tableType) { code = buildInvalidOperationMsg(&pCxt->msg, "create table only from super table is allowed"); + } else if (((*pTableMeta)->virtualStb) || + TSDB_VIRTUAL_CHILD_TABLE == (*pTableMeta)->tableType || + TSDB_VIRTUAL_NORMAL_TABLE == (*pTableMeta)->tableType) { + code = TSDB_CODE_VTABLE_NOT_SUPPORT_STMT; } } return code; @@ -1341,6 +1345,8 @@ static int32_t getUsingTableSchema(SInsertParseContext* pCxt, SVnodeModifyOpStmt code = getTableMeta(pCxt, &pStmt->usingTableName, &pStableMeta, &pCxt->missCache, bUsingTable); if (TSDB_CODE_SUCCESS == code) { code = taosHashPut(pStmt->pSuperTableHashObj, tbFName, strlen(tbFName), &pStableMeta, POINTER_BYTES); + } else { + taosMemoryFreeClear(pStableMeta); } } } diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 75e01d1356..1b8f0e034c 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -5479,6 +5479,15 @@ int32_t translateTable(STranslateContext* pCxt, SNode** pTable, bool inJoin) { code = TSDB_CODE_TSC_INVALID_OPERATION; break; } + + if (pCxt->pParseCxt->isStmtBind) { + code = TSDB_CODE_VTABLE_NOT_SUPPORT_STMT; + break; + } + if (pCxt->pParseCxt->topicQuery) { + code = TSDB_CODE_VTABLE_NOT_SUPPORT_TOPIC; + break; + } PAR_ERR_RET(translateVirtualTable(pCxt, pTable, &name)); SVirtualTableNode *pVirtualTable = (SVirtualTableNode*)*pTable; pVirtualTable->table.singleTable = true; @@ -8566,7 +8575,7 @@ static int32_t translateInsertTable(STranslateContext* pCxt, SNode** pTable) { int32_t code = translateFrom(pCxt, pTable); if (TSDB_CODE_SUCCESS == code && TSDB_CHILD_TABLE != ((SRealTableNode*)*pTable)->pMeta->tableType && TSDB_NORMAL_TABLE != ((SRealTableNode*)*pTable)->pMeta->tableType) { - code = buildInvalidOperationMsg(&pCxt->msgBuf, "insert data into super table is not supported"); + code = buildInvalidOperationMsg(&pCxt->msgBuf, "insert data into super table or virtual table is not supported"); } return code; } @@ -11734,6 +11743,14 @@ static int32_t buildQueryForTableTopic(STranslateContext* pCxt, SCreateTopicStmt return code; } +static bool isVirtualTable(int8_t tableType) { + if (tableType == TSDB_VIRTUAL_CHILD_TABLE || tableType == TSDB_VIRTUAL_NORMAL_TABLE) { + return true; + } else { + return false; + } +} + static int32_t checkCreateTopic(STranslateContext* pCxt, SCreateTopicStmt* pStmt) { if (NULL == pStmt->pQuery && NULL == pStmt->pWhere) { return TSDB_CODE_SUCCESS; @@ -12010,16 +12027,6 @@ static bool crossTableWithUdaf(SSelectStmt* pSelect) { !hasTbnameFunction(pSelect->pPartitionByList); } - -static bool isVirtualTable(int8_t tableType) { - if (tableType == TSDB_VIRTUAL_CHILD_TABLE || tableType == TSDB_VIRTUAL_NORMAL_TABLE) { - return true; - } else { - return false; - } -} - - static int32_t checkCreateStream(STranslateContext* pCxt, SCreateStreamStmt* pStmt) { if (NULL == pStmt->pQuery) { return TSDB_CODE_SUCCESS; @@ -12045,6 +12052,12 @@ static int32_t checkCreateStream(STranslateContext* pCxt, SCreateStreamStmt* pSt } if (isVirtualTable(tableType) || (tableType == TSDB_SUPER_TABLE && pMeta->virtualStb)) { + SSelectStmt* pSelect = (SSelectStmt*)pStmt->pQuery; + if ((STREAM_TRIGGER_WINDOW_CLOSE != pStmt->pOptions->triggerType) && + !(STREAM_TRIGGER_AT_ONCE == pStmt->pOptions->triggerType && (NULL == pSelect->pWindow && NULL == pSelect->pEvery))) { + taosMemoryFree(pMeta); + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Not supported virtual table stream query or trigger mode"); + } if (0 == pStmt->pOptions->ignoreExpired) { taosMemoryFree(pMeta); return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "For virtual table IGNORE EXPIRED must be 1"); @@ -12669,6 +12682,13 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm } } + if (pStmt->pOptions->triggerType == STREAM_TRIGGER_CONTINUOUS_WINDOW_CLOSE) { + if (pSelect->pWindow != NULL && QUERY_NODE_INTERVAL_WINDOW != nodeType(pSelect->pWindow)) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, + "When trigger was force window close, Stream only support interval window"); + } + } + if (NULL != pSelect->pGroupByList) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported Group by"); } @@ -13461,6 +13481,93 @@ static int32_t buildStreamNotifyOptions(STranslateContext* pCxt, SStreamNotifyOp return code; } +static int32_t buildQueryTableColIdList(SSelectStmt *pSelect, SArray** ppRes) { + STableNode* pTable = (STableNode*)pSelect->pFromTable; + SNodeList* pColList = NULL; + SNode* pCol = NULL; + int32_t code = 0; + PAR_ERR_RET(nodesCollectColumns(pSelect, SQL_CLAUSE_FROM, pTable->tableAlias, COLLECT_COL_TYPE_COL, &pColList)); + *ppRes = taosArrayInit(pColList->length, sizeof(int16_t)); + if (NULL == *ppRes) { + code = terrno; + parserError("taosArrayInit 0x%p colId failed, errno:0x%x", *ppRes, code); + goto _return; + } + + FOREACH(pCol, pColList) { + if (NULL == taosArrayPush(*ppRes, &((SColumnNode*)pCol)->colId)) { + code = terrno; + parserError("taosArrayPush 0x%p colId failed, errno:0x%x", *ppRes, code); + goto _return; + } + } + +_return: + + nodesDestroyList(pColList); + if (code) { + taosArrayDestroy(*ppRes); + *ppRes = NULL; + } + + return code; +} + +static int32_t modifyVtableSrcNumBasedOnCols(SVCTableRefCols* pTb, SArray* pColIdList, SSHashObj* pTbHash) { + tSimpleHashClear(pTbHash); + + char tbFName[TSDB_TABLE_FNAME_LEN]; + int32_t colNum = taosArrayGetSize(pColIdList); + for (int32_t i = 0; i < colNum; ++i) { + int16_t *colId = taosArrayGet(pColIdList, i); + for (int32_t m = 0; m < pTb->numOfColRefs; ++m) { + if (*colId == pTb->refCols[m].colId) { + snprintf(tbFName, sizeof(tbFName), "%s.%s", pTb->refCols[m].refDbName, pTb->refCols[m].refTableName); + PAR_ERR_RET(tSimpleHashPut(pTbHash, tbFName, strlen(tbFName) + 1, &colNum, sizeof(colNum))); + } + } + } + + pTb->numOfSrcTbls = tSimpleHashGetSize(pTbHash); + + return TSDB_CODE_SUCCESS; +} + +static int32_t modifyVtableSrcNumBasedOnQuery(SArray* pVSubTables, SNode* pStmt) { + SSelectStmt *pSelect = (SSelectStmt*)pStmt; + SArray* pColIdList = NULL; + SSHashObj* pTbHash = NULL; + int32_t code = 0; + int32_t colNum = 0; + int32_t vgNum = taosArrayGetSize(pVSubTables); + if (vgNum > 0) { + PAR_ERR_JRET(buildQueryTableColIdList(pSelect, &pColIdList)); + colNum = taosArrayGetSize(pColIdList); + pTbHash = tSimpleHashInit(colNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY)); + if (NULL == pTbHash) { + code = terrno; + parserError("tSimpleHashInit failed, colNum:%d, errno:0x%x", colNum, code); + PAR_ERR_JRET(code); + } + } + + for (int32_t i = 0; i < vgNum; ++i) { + SVSubTablesRsp* pVg = (SVSubTablesRsp*)taosArrayGet(pVSubTables, i); + int32_t vtbNum = taosArrayGetSize(pVg->pTables); + for (int32_t m = 0; m < vtbNum; ++m) { + SVCTableRefCols* pTb = (SVCTableRefCols*)taosArrayGetP(pVg->pTables, m); + PAR_ERR_JRET(modifyVtableSrcNumBasedOnCols(pTb, pColIdList, pTbHash)); + } + } + +_return: + + taosArrayDestroy(pColIdList); + tSimpleHashCleanup(pTbHash); + + return code; +} + static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* pStmt, SCMCreateStreamReq* pReq) { pReq->igExists = pStmt->ignoreExists; @@ -13505,7 +13612,7 @@ static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* if (TSDB_CODE_SUCCESS == code) { code = columnDefNodeToField(pStmt->pCols, &pReq->pCols, false, false); } - pReq->recalculateInterval = 0; + pReq->recalculateInterval = 3600000; if (NULL != pStmt->pOptions->pRecInterval) { SValueNode* pValueNode = ((SValueNode*)pStmt->pOptions->pRecInterval); pReq->recalculateInterval = @@ -13516,8 +13623,11 @@ static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* if (TSDB_CODE_SUCCESS == code) { code = buildStreamNotifyOptions(pCxt, pStmt->pNotifyOptions, pReq); } - if (TSDB_CODE_SUCCESS == code && pCxt->pMetaCache != NULL) { - TSWAP(pReq->pVSubTables, pCxt->pMetaCache->pVSubTables); + if (TSDB_CODE_SUCCESS == code && pCxt->pMetaCache != NULL && pCxt->pMetaCache->pVSubTables != NULL) { + code = modifyVtableSrcNumBasedOnQuery(pCxt->pMetaCache->pVSubTables, pStmt->pQuery); + if (TSDB_CODE_SUCCESS == code) { + TSWAP(pReq->pVSubTables, pCxt->pMetaCache->pVSubTables); + } } return code; } diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index a5f32d7159..69c6f03e99 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -1023,6 +1023,7 @@ static int32_t createVirtualSuperTableLogicNode(SLogicPlanContext* pCxt, SSelect pDynCtrl->qType = DYN_QTYPE_VTB_SCAN; pDynCtrl->vtbScan.scanAllCols = pVtableScan->scanAllCols; pDynCtrl->vtbScan.suid = pVtableScan->stableId; + tstrncpy(pDynCtrl->vtbScan.dbName, pVtableScan->tableName.dbname, TSDB_DB_NAME_LEN); PLAN_ERR_JRET(nodesListMakeStrictAppend(&pDynCtrl->node.pChildren, (SNode*)pVtableScan)); PLAN_ERR_JRET(nodesCloneList(pVtableScan->node.pTargets, &pDynCtrl->node.pTargets)); TSWAP(pVtableScan->pVgroupList, pDynCtrl->vtbScan.pVgroupList); diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 4901b916f3..46442f84f1 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -7861,6 +7861,9 @@ static int32_t findDepTableScanNode(SColumnNode* pCol, SVirtualScanLogicNode *pV FOREACH(pScanCol, pScanNode->pScanCols) { if (QUERY_NODE_COLUMN == nodeType(pScanCol)) { SColumnNode *pScanColNode = (SColumnNode *)pScanCol; + if (pScanColNode->colId == PRIMARYKEY_TIMESTAMP_COL_ID) { + continue; + } if (pScanColNode->hasDep && pCol->hasRef) { if (strcmp(pScanColNode->dbName, pCol->refDbName) == 0 && strcmp(pScanColNode->tableAlias, pCol->refTableName) == 0 && diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 528d88c604..3b19b8d2be 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -1919,6 +1919,7 @@ static int32_t updateDynQueryCtrlVtbScanInfo(SPhysiPlanContext* pCxt, SNodeList* pDynCtrl->vtbScan.suid = pLogicNode->vtbScan.suid; pDynCtrl->vtbScan.mgmtEpSet = pCxt->pPlanCxt->mgmtEpSet; pDynCtrl->vtbScan.accountId = pCxt->pPlanCxt->acctId; + tstrncpy(pDynCtrl->vtbScan.dbName, pLogicNode->vtbScan.dbName, TSDB_DB_NAME_LEN); return code; _return: diff --git a/source/libs/scheduler/test/CMakeLists.txt b/source/libs/scheduler/test/CMakeLists.txt index d9572e8dec..ac06d1e167 100644 --- a/source/libs/scheduler/test/CMakeLists.txt +++ b/source/libs/scheduler/test/CMakeLists.txt @@ -8,15 +8,15 @@ IF(NOT TD_DARWIN) ADD_EXECUTABLE(schedulerTest ${SOURCE_LIST}) - IF (TD_GRANT) + IF(TD_GRANT) TARGET_LINK_LIBRARIES( schedulerTest - PUBLIC os util common catalog transport gtest qcom ${TAOS_LIB_STATIC} planner scheduler grant + PUBLIC os util common catalog transport gtest qcom ${TAOS_NATIVE_LIB_STATIC} planner scheduler grant ) - ELSE () + ELSE() TARGET_LINK_LIBRARIES( schedulerTest - PUBLIC os util common catalog transport gtest qcom ${TAOS_LIB_STATIC} planner scheduler + PUBLIC os util common catalog transport gtest qcom ${TAOS_NATIVE_LIB_STATIC} planner scheduler ) ENDIF() @@ -28,6 +28,5 @@ IF(NOT TD_DARWIN) add_test( NAME schedulerTest COMMAND schedulerTest - ) - + ) ENDIF() \ No newline at end of file diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index ced9400df0..aaa38efe31 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -949,7 +949,7 @@ void streamBackendCleanup(void* arg) { streamMutexDestroy(&pHandle->mutex); streamMutexDestroy(&pHandle->cfMutex); - stDebug("vgId:%d destroy stream backend:%p", (int32_t) pHandle->vgId, pHandle); + stDebug("vgId:%d destroy stream backend:%p", (int32_t)pHandle->vgId, pHandle); taosMemoryFree(pHandle); } @@ -3107,40 +3107,37 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfKe return rocksdb_create_iterator_cf(wrapper->db, *readOpt, ((rocksdb_column_family_handle_t**)wrapper->pCf)[idx]); } -#define STREAM_STATE_PUT_ROCKSDB(pState, funcname, key, value, vLen) \ - do { \ - code = 0; \ - char buf[128] = {0}; \ - char* err = NULL; \ - int i = streamStateGetCfIdx(pState, funcname); \ - if (i < 0) { \ - stWarn("streamState failed to get cf name: %s", funcname); \ - code = TSDB_CODE_THIRDPARTY_ERROR; \ - break; \ - } \ - STaskDbWrapper* wrapper = pState->pTdbState->pOwner->pBackend; \ - if (pState->pTdbState->recalc) { \ - wrapper = pState->pTdbState->pOwner->pRecalBackend; \ - } \ - TAOS_UNUSED(atomic_add_fetch_64(&wrapper->dataWritten, 1)); \ - char toString[128] = {0}; \ - if (stDebugFlag & DEBUG_TRACE) TAOS_UNUSED((ginitDict[i].toStrFunc((void*)key, toString))); \ - int32_t klen = ginitDict[i].enFunc((void*)key, buf); \ - rocksdb_column_family_handle_t* pHandle = ((rocksdb_column_family_handle_t**)wrapper->pCf)[ginitDict[i].idx]; \ - rocksdb_writeoptions_t* opts = wrapper->writeOpt; \ - rocksdb_t* db = wrapper->db; \ - char* ttlV = NULL; \ - int32_t ttlVLen = ginitDict[i].enValueFunc((char*)value, vLen, 0, &ttlV); \ - rocksdb_put_cf(db, opts, pHandle, (const char*)buf, klen, (const char*)ttlV, (size_t)ttlVLen, &err); \ - if (err != NULL) { \ - stError("streamState str: %s failed to write to %s, err: %s", toString, funcname, err); \ - taosMemoryFree(err); \ - code = TSDB_CODE_THIRDPARTY_ERROR; \ - } else { \ - stTrace("streamState str:%s succ to write to %s, rowValLen:%d, ttlValLen:%d, %p", toString, funcname, vLen, \ - ttlVLen, wrapper); \ - } \ - taosMemoryFree(ttlV); \ +#define STREAM_STATE_PUT_ROCKSDB(pState, funcname, key, value, vLen) \ + do { \ + code = 0; \ + char buf[128] = {0}; \ + char* err = NULL; \ + int i = streamStateGetCfIdx(pState, funcname); \ + if (i < 0) { \ + stWarn("streamState failed to get cf name: %s", funcname); \ + code = TSDB_CODE_THIRDPARTY_ERROR; \ + break; \ + } \ + STaskDbWrapper* wrapper = pState->pTdbState->pOwner->pBackend; \ + TAOS_UNUSED(atomic_add_fetch_64(&wrapper->dataWritten, 1)); \ + char toString[128] = {0}; \ + TAOS_UNUSED((ginitDict[i].toStrFunc((void*)key, toString))); \ + int32_t klen = ginitDict[i].enFunc((void*)key, buf); \ + rocksdb_column_family_handle_t* pHandle = ((rocksdb_column_family_handle_t**)wrapper->pCf)[ginitDict[i].idx]; \ + rocksdb_writeoptions_t* opts = wrapper->writeOpt; \ + rocksdb_t* db = wrapper->db; \ + char* ttlV = NULL; \ + int32_t ttlVLen = ginitDict[i].enValueFunc((char*)value, vLen, 0, &ttlV); \ + rocksdb_put_cf(db, opts, pHandle, (const char*)buf, klen, (const char*)ttlV, (size_t)ttlVLen, &err); \ + if (err != NULL) { \ + stError("streamState str: %s failed to write to %s, err: %s", toString, funcname, err); \ + taosMemoryFree(err); \ + code = TSDB_CODE_THIRDPARTY_ERROR; \ + } else { \ + stInfo("[InternalERR] write streamState str:%s succ to write to %s, rowValLen:%d, ttlValLen:%d, %p", toString, \ + funcname, vLen, ttlVLen, wrapper); \ + } \ + taosMemoryFree(ttlV); \ } while (0); #define STREAM_STATE_GET_ROCKSDB(pState, funcname, key, pVal, vLen) \ @@ -4261,22 +4258,16 @@ int32_t streamStateStateAddIfNotExist_rocksdb(SStreamState* pState, SSessionKey* int32_t res = 0; SSessionKey tmpKey = *key; int32_t valSize = *pVLen; - void* tmp = taosMemoryMalloc(valSize); - if (!tmp) { - return -1; - } SStreamStateCur* pCur = streamStateSessionSeekKeyCurrentPrev_rocksdb(pState, key); int32_t code = streamStateSessionGetKVByCur_rocksdb(pState, pCur, key, pVal, pVLen); if (code == 0) { if (key->win.skey <= tmpKey.win.skey && tmpKey.win.ekey <= key->win.ekey) { - memcpy(tmp, *pVal, valSize); goto _end; } void* stateKey = (char*)(*pVal) + (valSize - keyDataLen); if (fn(pKeyData, stateKey) == true) { - memcpy(tmp, *pVal, valSize); goto _end; } @@ -4291,7 +4282,6 @@ int32_t streamStateStateAddIfNotExist_rocksdb(SStreamState* pState, SSessionKey* if (code == 0) { void* stateKey = (char*)(*pVal) + (valSize - keyDataLen); if (fn(pKeyData, stateKey) == true) { - memcpy(tmp, *pVal, valSize); goto _end; } } @@ -4299,11 +4289,11 @@ int32_t streamStateStateAddIfNotExist_rocksdb(SStreamState* pState, SSessionKey* *key = tmpKey; res = 1; - memset(tmp, 0, valSize); _end: - taosMemoryFreeClear(*pVal); - *pVal = tmp; + if (res == 0 && valSize > *pVLen){ + stError("[InternalERR] [skey:%"PRId64 ",ekey:%"PRId64 ",groupId:%"PRIu64 "],valSize:%d bigger than get rocksdb len:%d", key->win.skey, key->win.ekey, key->groupId, valSize, *pVLen); + } streamStateFreeCur(pCur); return res; } @@ -4587,6 +4577,7 @@ int32_t streamStatePutBatchOptimize(SStreamState* pState, int32_t cfIdx, rocksdb void* val, int32_t vlen, int64_t ttl, void* tmpBuf) { int32_t code = 0; char buf[128] = {0}; + char toString[128] = {0}; char* dst = NULL; size_t size = 0; @@ -4600,6 +4591,10 @@ int32_t streamStatePutBatchOptimize(SStreamState* pState, int32_t cfIdx, rocksdb } } int32_t klen = ginitDict[cfIdx].enFunc((void*)key, buf); + + ginitDict[cfIdx].toStrFunc((void*)key, toString); + qInfo("[InternalERR] write cfIdx:%d key:%s vlen:%d", cfIdx, toString, vlen); + char* ttlV = tmpBuf; int32_t ttlVLen = ginitDict[cfIdx].enValueFunc(dst, size, ttl, &ttlV); diff --git a/source/libs/stream/src/streamData.c b/source/libs/stream/src/streamData.c index 4faef0594b..d3412138e1 100644 --- a/source/libs/stream/src/streamData.c +++ b/source/libs/stream/src/streamData.c @@ -110,6 +110,7 @@ int32_t createStreamBlockFromDispatchMsg(const SStreamDispatchReq* pReq, int32_t pDataBlock->info.type = pRetrieve->streamBlockType; pDataBlock->info.childId = pReq->upstreamChildId; + pDataBlock->info.id.uid = be64toh(pRetrieve->useconds); } pData->blocks = pArray; diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index c882aa14ee..b329421585 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -25,7 +25,7 @@ typedef struct SBlockName { static void doMonitorDispatchData(void* param, void* tmrId); static int32_t doSendDispatchMsg(SStreamTask* pTask, const SStreamDispatchReq* pReq, int32_t vgId, SEpSet* pEpSet); -static int32_t streamAddBlockIntoDispatchMsg(const SSDataBlock* pBlock, SStreamDispatchReq* pReq); +static int32_t streamAddBlockIntoDispatchMsg(const SSDataBlock* pBlock, SStreamDispatchReq* pReq, bool withUid); static int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, SSDataBlock* pDataBlock, int64_t groupId, int64_t now); static int32_t streamMapAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, SSDataBlock* pDataBlock, @@ -133,6 +133,7 @@ int32_t streamTaskBroadcastRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* r code = tmsgSendReq(&pEpInfo->epSet, &rpcMsg); if (code != 0) { + rpcFreeCont(buf); stError("s-task:%s (child %d) failed to send retrieve req to task:0x%x (vgId:%d) QID:0x%" PRIx64 " code:%s", pTask->id.idStr, pTask->info.selfChildId, pEpInfo->taskId, pEpInfo->nodeId, req->reqId, tstrerror(code)); } else { @@ -245,12 +246,13 @@ void destroyDispatchMsg(SStreamDispatchReq* pReq, int32_t numOfVgroups) { void clearBufferedDispatchMsg(SStreamTask* pTask) { SDispatchMsgInfo* pMsgInfo = &pTask->msgInfo; + + streamMutexLock(&pMsgInfo->lock); + if (pMsgInfo->pData != NULL) { destroyDispatchMsg(pMsgInfo->pData, streamTaskGetNumOfDownstream(pTask)); } - streamMutexLock(&pMsgInfo->lock); - pMsgInfo->checkpointId = -1; pMsgInfo->transId = -1; pMsgInfo->pData = NULL; @@ -365,7 +367,7 @@ static int32_t doBuildDispatchMsg(SStreamTask* pTask, const SStreamDataBlock* pD return terrno; } - code = streamAddBlockIntoDispatchMsg(pDataBlock, pReqs); + code = streamAddBlockIntoDispatchMsg(pDataBlock, pReqs, false); if (code != TSDB_CODE_SUCCESS) { destroyDispatchMsg(pReqs, 1); return code; @@ -391,7 +393,7 @@ static int32_t doBuildDispatchMsg(SStreamTask* pTask, const SStreamDataBlock* pD if (type == STREAM_DELETE_RESULT || type == STREAM_CHECKPOINT || type == STREAM_TRANS_STATE || type == STREAM_RECALCULATE_START) { for (int32_t j = 0; j < numOfVgroups; j++) { - code = streamAddBlockIntoDispatchMsg(pDataBlock, &pReqs[j]); + code = streamAddBlockIntoDispatchMsg(pDataBlock, &pReqs[j], false); if (code != 0) { destroyDispatchMsg(pReqs, numOfVgroups); return code; @@ -436,7 +438,7 @@ static int32_t doBuildDispatchMsg(SStreamTask* pTask, const SStreamDataBlock* pD if (pDataBlock->info.type == STREAM_DELETE_RESULT || pDataBlock->info.type == STREAM_CHECKPOINT || pDataBlock->info.type == STREAM_TRANS_STATE) { for (int32_t j = 0; j < numOfTasks; j++) { - code = streamAddBlockIntoDispatchMsg(pDataBlock, &pReqs[j]); + code = streamAddBlockIntoDispatchMsg(pDataBlock, &pReqs[j], false); if (code != 0) { destroyDispatchMsg(pReqs, numOfTasks); return code; @@ -599,21 +601,43 @@ static void doSendFailedDispatch(SStreamTask* pTask, SDispatchEntry* pEntry, int SStreamDispatchReq* pReq = pTask->msgInfo.pData; int32_t msgId = pTask->msgInfo.msgId; - SArray* vgInfo = pTask->outputInfo.shuffleDispatcher.dbInfo.pVgroupInfos; - int32_t numOfVgroups = taosArrayGetSize(vgInfo); - setResendInfo(pEntry, now); - for (int32_t j = 0; j < numOfVgroups; ++j) { - SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, j); - if (pVgInfo == NULL) { - continue; - } - if (pVgInfo->vgId == pEntry->nodeId) { - int32_t code = doSendDispatchMsg(pTask, &pReq[j], pVgInfo->vgId, &pVgInfo->epSet); - stDebug("s-task:%s (child taskId:%d) shuffle-dispatch blocks:%d to vgId:%d for %s, msgId:%d, code:%s", - pTask->id.idStr, pTask->info.selfChildId, pReq[j].blockNum, pVgInfo->vgId, pMsg, msgId, tstrerror(code)); - break; + if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) { + SArray* vgInfo = pTask->outputInfo.shuffleDispatcher.dbInfo.pVgroupInfos; + int32_t numOfVgroups = taosArrayGetSize(vgInfo); + + for (int32_t j = 0; j < numOfVgroups; ++j) { + SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, j); + if (pVgInfo == NULL) { + continue; + } + + if (pVgInfo->vgId == pEntry->nodeId) { + int32_t code = doSendDispatchMsg(pTask, &pReq[j], pVgInfo->vgId, &pVgInfo->epSet); + stDebug("s-task:%s (child taskId:%d) shuffle-dispatch blocks:%d to vgId:%d for %s, msgId:%d, code:%s", + pTask->id.idStr, pTask->info.selfChildId, pReq[j].blockNum, pVgInfo->vgId, pMsg, msgId, + tstrerror(code)); + break; + } + } + } else if (pTask->outputInfo.type == TASK_OUTPUT__VTABLE_MAP) { + SArray *pTaskInfos = pTask->outputInfo.vtableMapDispatcher.taskInfos; + int32_t numOfTasks = taosArrayGetSize(pTaskInfos); + + for (int32_t j = 0; j < numOfTasks; ++j) { + STaskDispatcherFixed *pAddr = taosArrayGet(pTaskInfos, j); + if (pAddr == NULL) { + continue; + } + + if (pAddr->nodeId == pEntry->nodeId) { + int32_t code = doSendDispatchMsg(pTask, &pReq[j], pAddr->nodeId, &pAddr->epSet); + stDebug("s-task:%s (child taskId:%d) vtable-map-dispatch blocks:%d to vgId:%d for %s, msgId:%d, code:%s", + pTask->id.idStr, pTask->info.selfChildId, pReq[j].blockNum, pAddr->nodeId, pMsg, msgId, + tstrerror(code)); + break; + } } } } @@ -636,9 +660,10 @@ static int32_t sendFailedDispatchData(SStreamTask* pTask, int64_t now) { int32_t msgId = pMsgInfo->msgId; SStreamDispatchReq* pReq = pTask->msgInfo.pData; - if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) { - stDebug("s-task:%s (child taskId:%d) retry shuffle-dispatch to down streams, msgId:%d", id, pTask->info.selfChildId, - msgId); + if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH || pTask->outputInfo.type == TASK_OUTPUT__VTABLE_MAP) { + const char *taskType = (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) ? "shuffle" : "vtable-map"; + stDebug("s-task:%s (child taskId:%d) retry %s-dispatch to down streams, msgId:%d", id, pTask->info.selfChildId, + taskType, msgId); int32_t numOfRetry = 0; for (int32_t i = 0; i < taosArrayGetSize(pTask->msgInfo.pSendInfo); ++i) { @@ -672,8 +697,8 @@ static int32_t sendFailedDispatchData(SStreamTask* pTask, int64_t now) { } } - stDebug("s-task:%s complete retry shuffle-dispatch blocks to all %d vnodes, msgId:%d", pTask->id.idStr, numOfRetry, - msgId); + stDebug("s-task:%s complete retry %s-dispatch blocks to all %d vnodes, msgId:%d", pTask->id.idStr, taskType, + numOfRetry, msgId); } else { int32_t dstVgId = pTask->outputInfo.fixedDispatcher.nodeId; SEpSet* pEpSet = &pTask->outputInfo.fixedDispatcher.epSet; @@ -798,7 +823,7 @@ static int32_t doAddDispatchBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, stDebug("s-task:%s dst table hashVal:0x%x assign to vgId:%d range[0x%x, 0x%x]", pTask->id.idStr, hashValue, pVgInfo->vgId, pVgInfo->hashBegin, pVgInfo->hashEnd); - if ((code = streamAddBlockIntoDispatchMsg(pDataBlock, &pReqs[j])) < 0) { + if ((code = streamAddBlockIntoDispatchMsg(pDataBlock, &pReqs[j], false)) < 0) { stError("s-task:%s failed to add dispatch block, code:%s", pTask->id.idStr, tstrerror(terrno)); return code; } @@ -913,7 +938,7 @@ int32_t streamMapAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, SSDa STaskDispatcherFixed* pAddr = taosArrayGet(pTaskInfos, *pIdx); QUERY_CHECK_NULL(pAddr, code, lino, _end, terrno); - code = streamAddBlockIntoDispatchMsg(pDataBlock, &pReqs[*pIdx]); + code = streamAddBlockIntoDispatchMsg(pDataBlock, &pReqs[*pIdx], true); QUERY_CHECK_CODE(code, lino, _end); if (pReqs[*pIdx].blockNum == 0) { @@ -1400,7 +1425,7 @@ int32_t streamTaskSendCheckpointSourceRsp(SStreamTask* pTask) { return TSDB_CODE_SUCCESS; } -int32_t streamAddBlockIntoDispatchMsg(const SSDataBlock* pBlock, SStreamDispatchReq* pReq) { +int32_t streamAddBlockIntoDispatchMsg(const SSDataBlock* pBlock, SStreamDispatchReq* pReq, bool withUid) { size_t dataEncodeSize = blockGetEncodeSize(pBlock); int32_t dataStrLen = sizeof(SRetrieveTableRsp) + dataEncodeSize + PAYLOAD_PREFIX_LEN; void* buf = taosMemoryCalloc(1, dataStrLen); @@ -1409,7 +1434,7 @@ int32_t streamAddBlockIntoDispatchMsg(const SSDataBlock* pBlock, SStreamDispatch } SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)buf; - pRetrieve->useconds = 0; + pRetrieve->useconds = withUid ? htobe64(pBlock->info.id.uid) : 0; pRetrieve->precision = TSDB_DEFAULT_PRECISION; pRetrieve->compressed = 0; pRetrieve->completed = 1; @@ -1873,7 +1898,7 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i streamMutexUnlock(&pMsgInfo->lock); - if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) { + if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH || pTask->outputInfo.type == TASK_OUTPUT__VTABLE_MAP) { if (!allRsp) { stDebug( "s-task:%s recv dispatch rsp, msgId:%d from 0x%x(vgId:%d), downstream task input status:%d code:%s, " diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 68315c8e53..f4409eb5aa 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -998,6 +998,7 @@ static int32_t doStreamExecTask(SStreamTask* pTask) { streamMetaReleaseTask(pTask->pMeta, pHTask); } else if ((taskLevel == TASK_LEVEL__SOURCE) && pTask->info.hasAggTasks) { code = continueDispatchRecalculateStart((SStreamDataBlock*)pInput, pTask); + pInput = NULL; } } @@ -1019,11 +1020,13 @@ static int32_t doStreamExecTask(SStreamTask* pTask) { double el = (taosGetTimestampMs() - st) / 1000.0; if (el > 2.0) { // elapsed more than 5 sec, not occupy the CPU anymore - stDebug("s-task:%s occupy more than 5.0s, release the exec threads and idle for 500ms", id); + stDebug("s-task:%s occupy more than 2.0s, release the exec threads and idle for 500ms", id); streamTaskSetIdleInfo(pTask, 500); return code; } } + + } // the task may be set dropping/stopping, while it is still in the task queue, therefore, the sched-status can not diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 21f8a432ae..b4fa3a29ed 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -1394,13 +1394,23 @@ void streamMetaUpdateStageRole(SStreamMeta* pMeta, int64_t stage, bool isLeader) pMeta->role = (isLeader) ? NODE_ROLE_LEADER : NODE_ROLE_FOLLOWER; if (!isLeader) { streamMetaResetStartInfo(&pMeta->startInfo, pMeta->vgId); + } else { // wait for nodeep update if become leader from follower + if (prevStage == NODE_ROLE_FOLLOWER) { + pMeta->startInfo.tasksWillRestart = 1; + } } streamMetaWUnLock(pMeta); if (isLeader) { - stInfo("vgId:%d update meta stage:%" PRId64 ", prev:%" PRId64 " leader:%d, start to send Hb, rid:%" PRId64, - pMeta->vgId, stage, prevStage, isLeader, pMeta->rid); + if (prevStage == NODE_ROLE_FOLLOWER) { + stInfo("vgId:%d update meta stage:%" PRId64 ", prev:%" PRId64 " leader:%d, start to send Hb, rid:%" PRId64 + " restart after nodeEp being updated", + pMeta->vgId, stage, prevStage, isLeader, pMeta->rid); + } else { + stInfo("vgId:%d update meta stage:%" PRId64 ", prev:%" PRId64 " leader:%d, start to send Hb, rid:%" PRId64, + pMeta->vgId, stage, prevStage, isLeader, pMeta->rid); + } streamMetaStartHb(pMeta); } else { stInfo("vgId:%d update meta stage:%" PRId64 " prev:%" PRId64 " leader:%d sendMsg beforeClosing:%d", pMeta->vgId, diff --git a/source/libs/stream/src/streamSessionState.c b/source/libs/stream/src/streamSessionState.c index e4db1101c1..cbdf2e3c43 100644 --- a/source/libs/stream/src/streamSessionState.c +++ b/source/libs/stream/src/streamSessionState.c @@ -151,10 +151,16 @@ SRowBuffPos* createSessionWinBuff(SStreamFileState* pFileState, SSessionKey* pKe memcpy(pNewPos->pKey, pKey, sizeof(SSessionKey)); pNewPos->needFree = true; pNewPos->beFlushed = true; + int32_t len = getRowStateRowSize(pFileState); if (p) { - memcpy(pNewPos->pRowBuff, p, *pVLen); + if (*pVLen > len){ + qError("[InternalERR] read key:[skey:%"PRId64 ",ekey:%"PRId64 ",groupId:%"PRIu64 "],session window buffer is too small, *pVLen:%d, len:%d", pKey->win.skey, pKey->win.ekey, pKey->groupId, *pVLen, len); + code = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR; + QUERY_CHECK_CODE(code, lino, _end); + }else{ + memcpy(pNewPos->pRowBuff, p, *pVLen); + } } else { - int32_t len = getRowStateRowSize(pFileState); memset(pNewPos->pRowBuff, 0, len); } @@ -1283,9 +1289,7 @@ int32_t mergeAndSaveScanRange(STableTsDataState* pTsDataState, STimeWindow* pWin rangeKey.pUIds = tSimpleHashInit(8, hashFn); code = putRangeIdInfo(&rangeKey, gpId, uId); QUERY_CHECK_CODE(code, lino, _end); - if (index < 0) { - index = 0; - } + index++; taosArrayInsert(pRangeArray, index, &rangeKey); _end: @@ -1379,7 +1383,8 @@ int32_t popScanRange(STableTsDataState* pTsDataState, SScanRange* pRange) { SStreamStateCur* pCur = NULL; SArray* pRangeArray = pTsDataState->pScanRanges; if (taosArrayGetSize(pRangeArray) > 0) { - (*pRange) = *(SScanRange*) taosArrayPop(pRangeArray); + (*pRange) = *(SScanRange*) taosArrayGet(pRangeArray, 0); + taosArrayRemove(pRangeArray, 0); goto _end; } diff --git a/source/libs/stream/src/streamStartTask.c b/source/libs/stream/src/streamStartTask.c index 28df04adc8..13cf4a41cc 100644 --- a/source/libs/stream/src/streamStartTask.c +++ b/source/libs/stream/src/streamStartTask.c @@ -465,7 +465,7 @@ int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t tas } int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) { - streamMetaRLock(pMeta); + streamMetaWLock(pMeta); SArray* pTaskList = NULL; int32_t num = taosArrayGetSize(pMeta->pTaskList); @@ -473,7 +473,7 @@ int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) { if (num == 0) { stDebug("vgId:%d stop all %d task(s) completed, elapsed time:0 Sec.", pMeta->vgId, num); - streamMetaRUnLock(pMeta); + streamMetaWUnLock(pMeta); return TSDB_CODE_SUCCESS; } @@ -482,7 +482,7 @@ int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) { // send hb msg to mnode before closing all tasks. int32_t code = streamMetaSendMsgBeforeCloseTasks(pMeta, &pTaskList); if (code != TSDB_CODE_SUCCESS) { - streamMetaRUnLock(pMeta); + streamMetaWUnLock(pMeta); return code; } @@ -509,7 +509,7 @@ int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) { double el = (taosGetTimestampMs() - st) / 1000.0; stDebug("vgId:%d stop all %d task(s) completed, elapsed time:%.2f Sec.", pMeta->vgId, num, el); - streamMetaRUnLock(pMeta); + streamMetaWUnLock(pMeta); return code; } diff --git a/source/libs/stream/src/tstreamFileState.c b/source/libs/stream/src/tstreamFileState.c index 1305eb6bdd..c0cfad48c7 100644 --- a/source/libs/stream/src/tstreamFileState.c +++ b/source/libs/stream/src/tstreamFileState.c @@ -890,7 +890,7 @@ bool hasRowBuff(SStreamFileState* pFileState, const SWinKey* pKey, bool hasLimit if (pIsLast != NULL) { (*pIsLast) = false; } - + SRowBuffPos** pos = tSimpleHashGet(pFileState->rowStateBuff, pKey, sizeof(SWinKey)); if (pos) { res = true; @@ -901,17 +901,19 @@ bool hasRowBuff(SStreamFileState* pFileState, const SWinKey* pKey, bool hasLimit if (ppBuff != NULL) { SArray* pWinStates = (SArray*)(*ppBuff); if (pIsLast != NULL) { - SWinKey* pLastKey = (SWinKey*) taosArrayGetLast(pWinStates); + SWinKey* pLastKey = (SWinKey*)taosArrayGetLast(pWinStates); *pIsLast = (winKeyCmprImpl(pKey, pLastKey) == 0); } if (hasLimit && taosArrayGetSize(pWinStates) <= MIN_NUM_OF_SORT_CACHE_WIN) { res = true; } if (qDebugFlag & DEBUG_DEBUG) { - SWinKey* fistKey = (SWinKey*)taosArrayGet(pWinStates, 0); - qDebug("===stream===check window state. buff min ts:%" PRId64 ",groupId:%" PRIu64 ".key ts:%" PRId64 - ",groupId:%" PRIu64, - fistKey->ts, fistKey->groupId, pKey->ts, pKey->groupId); + if (taosArrayGetSize(pWinStates) > 0) { + SWinKey* fistKey = (SWinKey*)taosArrayGet(pWinStates, 0); + qDebug("===stream===check window state. buff min ts:%" PRId64 ",groupId:%" PRIu64 ".key ts:%" PRId64 + ",groupId:%" PRIu64, + fistKey->ts, fistKey->groupId, pKey->ts, pKey->groupId); + } } } else { res = true; @@ -1095,6 +1097,7 @@ int32_t recoverSession(SStreamFileState* pFileState, int64_t ckId) { if (vlen != pFileState->rowSize) { code = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR; + qError("[InternalERR] read key:[skey:%"PRId64 ",ekey:%"PRId64 ",groupId:%"PRIu64 "],vlen:%d, rowSize:%d", key.win.skey, key.win.ekey, key.groupId, vlen, pFileState->rowSize); QUERY_CHECK_CODE(code, lino, _end); } diff --git a/source/os/CMakeLists.txt b/source/os/CMakeLists.txt index 01103e7bd0..c565b7528d 100644 --- a/source/os/CMakeLists.txt +++ b/source/os/CMakeLists.txt @@ -85,6 +85,7 @@ else() ) endif() + if(JEMALLOC_ENABLED) add_dependencies(os jemalloc) endif() @@ -96,3 +97,4 @@ endif() if(${BUILD_TEST}) add_subdirectory(test) endif(${BUILD_TEST}) + diff --git a/source/os/src/osDir.c b/source/os/src/osDir.c index cf3b84d24e..b10e0c090a 100644 --- a/source/os/src/osDir.c +++ b/source/os/src/osDir.c @@ -58,7 +58,7 @@ int32_t wordexp(char *words, wordexp_t *pwordexp, int32_t flags) { void wordfree(wordexp_t *pwordexp) {} #elif defined(DARWIN) - +#include #include #include #include @@ -77,6 +77,7 @@ typedef struct TdDir { #else +#include #include #include #include @@ -372,8 +373,9 @@ int32_t taosExpandDir(const char *dirname, char *outname, int32_t maxlen) { int32_t taosRealPath(char *dirname, char *realPath, int32_t maxlen) { OS_PARAM_CHECK(dirname); + #ifndef TD_ASTRA - char tmp[PATH_MAX] = {0}; + char tmp[PATH_MAX + 1] = {0}; #ifdef WINDOWS if (_fullpath(tmp, dirname, maxlen) != NULL) { #else @@ -602,6 +604,24 @@ void taosGetCwd(char *buf, int32_t len) { #endif } +int32_t taosAppPath(char *path, int32_t maxLen) { + int32_t ret = 0; + +#ifdef WINDOWS + ret = GetModuleFileName(NULL, path, maxLen - 1); +#elif defined(DARWIN) + ret = _NSGetExecutablePath(path, &maxLen) ; +#else + ret = readlink("/proc/self/exe", path, maxLen - 1); +#endif + + if (ret >= 0) { + ret = (taosDirName(path) == NULL) ? -1 : 0; + } + + return ret; +} + int32_t taosGetDirSize(const char *path, int64_t *size) { int32_t code = 0; char fullPath[PATH_MAX + 100] = {0}; @@ -638,3 +658,54 @@ _OVER: TAOS_UNUSED(taosCloseDir(&pDir)); return code; } + + +void* taosLoadDll(const char* fileName) { +#if defined(WINDOWS) + void* handle = LoadLibraryA(fileName); +#else + void* handle = dlopen(fileName, RTLD_LAZY); +#endif + + if (handle == NULL) { + if (errno != 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + } else { + terrno = TSDB_CODE_DLL_NOT_LOAD; + } + } + + return handle; +} + +void taosCloseDll(void* handle) { + if (handle == NULL) return; + +#if defined(WINDOWS) + FreeLibrary((HMODULE)handle); +#else + if (dlclose(handle) != 0 && errno != 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + } +#endif +} + +void* taosLoadDllFunc(void* handle, const char* funcName) { + if (handle == NULL) return NULL; + +#if defined(WINDOWS) + void *fptr = GetProcAddress((HMODULE)handle, funcName); +#else + void *fptr = dlsym(handle, funcName); +#endif + + if (handle == NULL) { + if (errno != 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + } else { + terrno = TSDB_CODE_DLL_FUNC_NOT_LOAD; + } + } + + return fptr; +} diff --git a/source/os/src/osSystem.c b/source/os/src/osSystem.c index 38114a8deb..837a4791f6 100644 --- a/source/os/src/osSystem.c +++ b/source/os/src/osSystem.c @@ -88,38 +88,6 @@ struct termios oldtio; typedef struct FILE TdCmd; -#ifdef BUILD_NO_CALL -void* taosLoadDll(const char* filename) { -#if defined(WINDOWS) - return NULL; -#elif defined(_TD_DARWIN_64) - return NULL; -#else - void* handle = dlopen(filename, RTLD_LAZY); - if (!handle) { - // printf("load dll:%s failed, error:%s", filename, dlerror()); - return NULL; - } - - // printf("dll %s loaded", filename); - - return handle; -#endif -} - -void taosCloseDll(void* handle) { -#if defined(WINDOWS) - return; -#elif defined(_TD_DARWIN_64) - return; -#else - if (handle) { - dlclose(handle); - } -#endif -} -#endif - int32_t taosSetConsoleEcho(bool on) { #if defined(WINDOWS) HANDLE hStdin = GetStdHandle(STD_INPUT_HANDLE); diff --git a/source/util/src/terror.c b/source/util/src/terror.c index eb80708b86..0e7d1bcbdd 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -76,7 +76,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_REF_ID_REMOVED, "Ref ID is removed") TAOS_DEFINE_ERROR(TSDB_CODE_REF_INVALID_ID, "Invalid Ref ID") TAOS_DEFINE_ERROR(TSDB_CODE_REF_ALREADY_EXIST, "Ref is already there") TAOS_DEFINE_ERROR(TSDB_CODE_REF_NOT_EXIST, "Ref is not there") - +TAOS_DEFINE_ERROR(TSDB_CODE_DLL_NOT_LOAD, "Driver was not loaded") +TAOS_DEFINE_ERROR(TSDB_CODE_DLL_FUNC_NOT_LOAD, "Function was not loaded from the driver") TAOS_DEFINE_ERROR(TSDB_CODE_APP_ERROR, "Unexpected generic error") TAOS_DEFINE_ERROR(TSDB_CODE_ACTION_IN_PROGRESS, "Action in progress") TAOS_DEFINE_ERROR(TSDB_CODE_OUT_OF_RANGE, "Out of range") @@ -377,6 +378,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_ANA_BUF_INVALID_TYPE, "Analysis invalid buffe TAOS_DEFINE_ERROR(TSDB_CODE_ANA_ANODE_RETURN_ERROR, "Analysis failed since anode return error") TAOS_DEFINE_ERROR(TSDB_CODE_ANA_ANODE_TOO_MANY_ROWS, "Analysis failed since too many input rows for anode") TAOS_DEFINE_ERROR(TSDB_CODE_ANA_WN_DATA, "white-noise data not processed") +TAOS_DEFINE_ERROR(TSDB_CODE_ANA_INTERNAL_ERROR, "tdgpt internal error, not processed") // mnode-sma TAOS_DEFINE_ERROR(TSDB_CODE_MND_SMA_ALREADY_EXIST, "SMA already exists") @@ -909,6 +911,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VTABLE_SCAN_INVALID_DOWNSTREAM, "Virtual table scan TAOS_DEFINE_ERROR(TSDB_CODE_VTABLE_PRIMTS_HAS_REF, "Virtual table prim timestamp column should not has ref column") TAOS_DEFINE_ERROR(TSDB_CODE_VTABLE_NOT_VIRTUAL_SUPER_TABLE, "Create virtual child table must use virtual super table") TAOS_DEFINE_ERROR(TSDB_CODE_VTABLE_NOT_SUPPORT_DATA_TYPE, "Virtual table not support decimal type") +TAOS_DEFINE_ERROR(TSDB_CODE_VTABLE_NOT_SUPPORT_STMT, "Virtual table not support in STMT query and STMT insert") +TAOS_DEFINE_ERROR(TSDB_CODE_VTABLE_NOT_SUPPORT_TOPIC, "Virtual table not support in topic") +TAOS_DEFINE_ERROR(TSDB_CODE_VTABLE_NOT_SUPPORT_CROSS_DB, "Virtual super table query not support origin table from different databases") #ifdef TAOS_ERROR_C }; #endif diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index 3bc984aabe..367b4431a3 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -1374,6 +1374,10 @@ static void checkWriteCrashLogToFileInNewThead() { } taosLogCrashInfo(gCrashBasicInfo.nodeType, pMsg, msgLen, gCrashBasicInfo.signum, gCrashBasicInfo.sigInfo); setCrashWriterStatus(CRASH_LOG_WRITER_INIT); + int32_t code = tsem_post(&gCrashBasicInfo.sem); + if (code != 0 ) { + uError("failed to post sem for crashBasicInfo, code:%d", code); + } TAOS_UNUSED(tsem_post(&gCrashBasicInfo.sem)); } } diff --git a/source/util/test/errorCodeTable.ini b/source/util/test/errorCodeTable.ini index f66948504d..c135b677bf 100644 --- a/source/util/test/errorCodeTable.ini +++ b/source/util/test/errorCodeTable.ini @@ -17,6 +17,8 @@ TSDB_CODE_REF_ID_REMOVED = 0x80000107 TSDB_CODE_REF_INVALID_ID = 0x80000108 TSDB_CODE_REF_ALREADY_EXIST = 0x80000109 TSDB_CODE_REF_NOT_EXIST = 0x8000010A +TSDB_CODE_DLL_NOT_LOAD = 0x8000010B +TSDB_CODE_DLL_FUNC_NOT_LOAD = 0x8000010C TSDB_CODE_APP_ERROR = 0x80000110 TSDB_CODE_ACTION_IN_PROGRESS = 0x80000111 TSDB_CODE_OUT_OF_RANGE = 0x80000112 diff --git a/tests/army/cmdline/taosCli.py b/tests/army/cmdline/taosCli.py index f57f594854..2624248a28 100644 --- a/tests/army/cmdline/taosCli.py +++ b/tests/army/cmdline/taosCli.py @@ -57,31 +57,29 @@ class TDTestCase(TBase): def checkResultWithMode(self, db, stb, arg): result = "Query OK, 10 row(s)" mode = arg[0] - rowh = arg[1] - rowv = arg[2] - idx = arg[3] - idxv = arg[4] # use db - if mode != "-R": - rlist = self.taos(f'{mode} -s "show databases;use {db};show databases;" ') - self.checkListString(rlist, "Database changed") + rlist = self.taos(f'{mode} -s "show databases;use {db};show databases;" ') + self.checkListString(rlist, "Database changed") # hori - cmd = f'{mode} -s "select * from {db}.{stb} limit 10' + cmd = f'{mode} -s "select ts,ic from {db}.{stb} limit 10' rlist = self.taos(cmd + '"') - # line count - self.checkSame(len(rlist), rowh) - # last line - self.checkSame(rlist[idx][:len(result)], result) + results = [ + "2022-10-01 00:00:09.000 |", + result + ] + self.checkManyString(rlist, results) # vec rlist = self.taos(cmd + '\G"') - # line count - self.checkSame(len(rlist), rowv) - self.checkSame(rlist[idxv], "*************************** 10.row ***************************") - # last line - self.checkSame(rlist[idx][:len(result)], result) + results = [ + "****** 10.row *******", + "ts: 2022-10-01 00:00:09.000", + result + ] + self.checkManyString(rlist, results) + # -B have some problem need todo self.taos(f'{mode} -B -s "select * from {db}.{stb} where ts < 1"') @@ -89,22 +87,57 @@ class TDTestCase(TBase): # get empty result rlist = self.taos(f'{mode} -r -s "select * from {db}.{stb} where ts < 1"') self.checkListString(rlist, "Query OK, 0 row(s) in set") - + + + def checkDecimalCommon(self, col, value): + rlist = self.taos(f'-s "select {col} from testdec.test"') + self.checkListString(rlist, value) + + outfile = "decimal.csv" + self.taos(f'-s "select {col} from testdec.test>>{outfile}"') + rlist = self.readFileToList(outfile) + self.checkListString(rlist, value) + self.deleteFile(outfile) + + + def checkDecimal(self): + # prepare data + self.taos(f'-s "drop database if exists testdec"') + self.taos(f'-s "create database if not exists testdec"') + self.taos(f'-s "create table if not exists testdec.test(ts timestamp, dec64 decimal(10,6), dec128 decimal(24,10)) tags (note nchar(20))"') + self.taos(f'-s "create table testdec.d0 using testdec.test(note) tags(\'test\')"') + self.taos(f'-s "insert into testdec.d0 values(now(), \'9876.123456\', \'123456789012.0987654321\')"') + + # check decimal64 + self.checkDecimalCommon("dec64", "9876.123456") + + # check decimal128 + self.checkDecimalCommon("dec128", "123456789012.0987654321") + + self.taos(f'-s "drop database if exists testdec"') + + def checkBasic(self): tdLog.info(f"check describe show full.") # insert json = "cmdline/json/taosCli.json" db, stb, childCount, insertRows = self.insertBenchJson(json) + # set + self.db = db + self.stb = stb + self.insert_rows = insertRows + self.childtable_count = childCount # native restful websock test args = [ - ["", 18, 346, -2, 310], - ["-R", 22, 350, -3, 313], - ["-T 40 -E http://localhost:6041", 21, 349, -3, 312] + ["-Z native"], + ["-T 40 -E http://localhost:6041"] ] for arg in args: self.checkResultWithMode(db, stb, arg) + + self.checkDecimal() def checkDumpInOutMode(self, source, arg, db, insertRows): @@ -133,8 +166,7 @@ class TDTestCase(TBase): def checkDumpInOut(self): args = [ - ["", 18], - ["-R ", 22], + ["", 18], ["-E http://localhost:6041", 21] ] @@ -150,10 +182,18 @@ class TDTestCase(TBase): rlist2 = self.taos("--version") self.checkSame(rlist1, rlist2) - self.checkSame(len(rlist1), 5) + if len(rlist1) < 4: + tdLog.exit(f"version lines less than 4. {rlist1}") if len(rlist1[2]) < 42: tdLog.exit("git commit id length is invalid: " + rlist1[2]) + + keys = [ + "version:", + "git:", + "build:" + ] + self.checkManyString(rlist1, keys) def checkHelp(self): @@ -174,20 +214,13 @@ class TDTestCase(TBase): def checkCommand(self): # check coredump - - # o logpath - char = 'a' - lname =f'-o "/root/log/{char * 1000}/" -s "quit;"' queryOK = "Query OK" - # invalid input check + # support Both args = [ - [lname, "failed to create log at"], ['-uroot -w 40 -ptaosdata -c /root/taos/ -s"show databases"', queryOK], - ['-o "./current/log/files/" -s"show databases;"', queryOK], - ['-a ""', "Invalid auth"], + ['-o "./current/log/files/" -h localhost -uroot -ptaosdata -s"show databases;"', queryOK], ['-s "quit;"', "Welcome to the TDengine Command Line Interface"], - ['-a "abc"', "[0x80000357]"], ['-h "" -s "show dnodes;"', "Invalid host"], ['-u "" -s "show dnodes;"', "Invalid user"], ['-P "" -s "show dnodes;"', "Invalid port"], @@ -195,7 +228,7 @@ class TDTestCase(TBase): ['-p"abc" -s "show dnodes;"', "[0x80000357]"], ['-d "abc" -s "show dnodes;"', "[0x80000388]"], ['-N 0 -s "show dnodes;"', "Invalid pktNum"], - ['-N 10 -s "show dnodes;"', queryOK], + ['-N 10 -h 127.0.0.1 -s "show dnodes;"', queryOK], ['-w 0 -s "show dnodes;"', "Invalid displayWidth"], ['-w 10 -s "show dnodes;"', queryOK], ['-W 10 -s "show dnodes;"', None], @@ -209,10 +242,133 @@ class TDTestCase(TBase): ['-uroot -p < cmdline/data/pwd.txt -s "show dnodes;"', queryOK], ] + modes = ["-Z 0","-Z 1"] + for mode in modes: + for arg in args: + rlist = self.taos(mode + " " + arg[0]) + if arg[1] != None: + self.checkListString(rlist, arg[1]) + + # + # support native only + # + + # o logpath + char = 'a' + lname =f'-o "/root/log/{char * 1000}/" -s "quit;"' + + args = [ + [lname, "failed to create log at"], + ['-a ""', "Invalid auth"], + ['-a "abc"', "[0x80000357]"], + ] for arg in args: - rlist = self.taos(arg[0]) + rlist = self.taos("Z 0 " + arg[0]) if arg[1] != None: - self.checkListString(rlist, arg[1]) + self.checkListString(rlist, arg[1]) + + # expect cmd > json > evn + def checkPriority(self): + # + # cmd & env + # + + # env 6043 - invalid + os.environ['TDENGINE_CLOUD_DSN'] = "http://127.0.0.1:6043" + # cmd 6041 - valid + cmd = f"-X http://127.0.0.1:6041 -s 'select ts from test.meters'" + rlist = self.taos(cmd, checkRun = True) + results = [ + "WebSocket Client Version", + "2022-10-01 00:01:39.000", + "Query OK, 200 row(s) in set" + ] + self.checkManyString(rlist, results) + + # + # env + # + + # cloud + os.environ['TDENGINE_CLOUD_DSN'] = "http://127.0.0.1:6041" + cmd = f"-s 'select ts from test.meters'" + rlist = self.taos(cmd, checkRun = True) + self.checkManyString(rlist, results) + # local + os.environ['TDENGINE_CLOUD_DSN'] = "" + os.environ['TDENGINE_DSN'] = "http://127.0.0.1:6041" + cmd = f"-s 'select ts from test.meters'" + rlist = self.taos(cmd, checkRun = True) + self.checkManyString(rlist, results) + # local & cloud -> cloud first + os.environ['TDENGINE_CLOUD_DSN'] = "http://127.0.0.1:6041" # valid + os.environ['TDENGINE_DSN'] = "http://127.0.0.1:6042" # invalid + cmd = f"-s 'select ts from test.meters'" + rlist = self.taos(cmd, checkRun = True) + self.checkManyString(rlist, results) + + + # + # cmd + # + + os.environ['TDENGINE_CLOUD_DSN'] = "" + os.environ['TDENGINE_DSN'] = "" + cmd = f"-X http://127.0.0.1:6041 -s 'select ts from test.meters'" + rlist = self.taos(cmd, checkRun = True) + self.checkManyString(rlist, results) + + + def checkExceptCmd(self): + # exe + taos = frame.etool.taosFile() + # option + options = [ + "-Z native -X http://127.0.0.1:6041", + "-Z 100", + "-Z abcdefg", + "-X", + "-X ", + "-X 127.0.0.1:6041", + "-X https://gw.cloud.taosdata.com?token617ffdf...", + "-Z 1 -X https://gw.cloud.taosdata.com?token=617ffdf...", + "-X http://127.0.0.1:6042" + ] + + # do check + for option in options: + self.checkExcept(taos + " -s 'show dnodes;' " + option) + + def checkModeVersion(self): + # results + results = [ + "WebSocket Client Version", + "2022-10-01 00:01:39.000", + "Query OK, 100 row(s) in set" + ] + + # default + cmd = f"-s 'select ts from test.d0'" + rlist = self.taos(cmd, checkRun = True) + self.checkManyString(rlist, results) + # websocket + cmd = f"-Z 1 -s 'select ts from test.d0'" + rlist = self.taos(cmd, checkRun = True) + self.checkManyString(rlist, results) + + # native + cmd = f"-Z 0 -s 'select ts from test.d0'" + results[0] = "Native Client Version" + rlist = self.taos(cmd, checkRun = True) + self.checkManyString(rlist, results) + + def checkConnMode(self): + # priority + self.checkPriority() + # except + self.checkExceptCmd() + # mode version + self.checkModeVersion() # password def checkPassword(self): @@ -257,6 +413,10 @@ class TDTestCase(TBase): # check data in/out self.checkDumpInOut() + + # check conn mode + self.checkConnMode() + # max password self.checkPassword() diff --git a/tests/army/create/create_stb_keep.py b/tests/army/create/create_stb_keep.py index c6c2807024..87a0dd0b8e 100644 --- a/tests/army/create/create_stb_keep.py +++ b/tests/army/create/create_stb_keep.py @@ -100,6 +100,16 @@ class TDTestCase(TBase): tdSql.error("CREATE TABLE ntb (ts TIMESTAMP, a INT, b FLOAT, c BINARY(10)) KEEP 1d",expectErrInfo="KEEP parameter is not allowed when creating normal table") tdSql.execute("CREATE TABLE ntb (ts TIMESTAMP, a INT, b FLOAT, c BINARY(10))") tdSql.error("ALTER TABLE ntb keep 1d",expectErrInfo="only super table can alter keep duration") + + def chceck_stb_keep_show_create(self): + tdLog.info(f"check stb keep show create") + tdSql.execute("USE test") + tdSql.execute("CREATE STABLE stb (ts TIMESTAMP, a INT, b FLOAT, c BINARY(10)) TAGS (e_id INT) KEEP 10d") + tdSql.query("SHOW CREATE TABLE stb") + tdSql.checkData(0, 1, "CREATE STABLE `stb` (`ts` TIMESTAMP ENCODE 'delta-i' COMPRESS 'lz4' LEVEL 'medium', `a` INT ENCODE 'simple8b' COMPRESS 'lz4' LEVEL 'medium', `b` FLOAT ENCODE 'delta-d' COMPRESS 'lz4' LEVEL 'medium', `c` VARCHAR(10) ENCODE 'disabled' COMPRESS 'zstd' LEVEL 'medium') TAGS (`e_id` INT) KEEP 14400m") + tdSql.execute("ALTER TABLE stb KEEP 5d") + tdSql.query("SHOW CREATE TABLE stb") + tdSql.checkData(0, 1, "CREATE STABLE `stb` (`ts` TIMESTAMP ENCODE 'delta-i' COMPRESS 'lz4' LEVEL 'medium', `a` INT ENCODE 'simple8b' COMPRESS 'lz4' LEVEL 'medium', `b` FLOAT ENCODE 'delta-d' COMPRESS 'lz4' LEVEL 'medium', `c` VARCHAR(10) ENCODE 'disabled' COMPRESS 'zstd' LEVEL 'medium') TAGS (`e_id` INT) KEEP 7200m") # run def run(self): @@ -126,6 +136,9 @@ class TDTestCase(TBase): # check normal table with keep self.check_normal_table_with_keep() + # check stb keep show create + self.chceck_stb_keep_show_create() + tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/army/frame/caseBase.py b/tests/army/frame/caseBase.py index a59c9a4441..b8345c3257 100644 --- a/tests/army/frame/caseBase.py +++ b/tests/army/frame/caseBase.py @@ -38,6 +38,12 @@ class TBase: # init def init(self, conn, logSql, replicaVar=1, db="db", stb="stb", checkColName="ic"): + + # init + self.childtable_count = 0 + self.insert_rows = 0 + self.timestamp_step = 0 + # save param self.replicaVar = int(replicaVar) tdSql.init(conn.cursor(), True) @@ -54,12 +60,12 @@ class TBase: self.stb = stb # sql - self.sqlSum = f"select sum({checkColName}) from {self.stb}" - self.sqlMax = f"select max({checkColName}) from {self.stb}" - self.sqlMin = f"select min({checkColName}) from {self.stb}" - self.sqlAvg = f"select avg({checkColName}) from {self.stb}" - self.sqlFirst = f"select first(ts) from {self.stb}" - self.sqlLast = f"select last(ts) from {self.stb}" + self.sqlSum = f"select sum({checkColName}) from {db}.{self.stb}" + self.sqlMax = f"select max({checkColName}) from {db}.{self.stb}" + self.sqlMin = f"select min({checkColName}) from {db}.{self.stb}" + self.sqlAvg = f"select avg({checkColName}) from {db}.{self.stb}" + self.sqlFirst = f"select first(ts) from {db}.{self.stb}" + self.sqlLast = f"select last(ts) from {db}.{self.stb}" # stop def stop(self): @@ -140,15 +146,15 @@ class TBase: # basic def checkInsertCorrect(self, difCnt = 0): # check count - sql = f"select count(*) from {self.stb}" + sql = f"select count(*) from {self.db}.{self.stb}" tdSql.checkAgg(sql, self.childtable_count * self.insert_rows) # check child table count - sql = f" select count(*) from (select count(*) as cnt , tbname from {self.stb} group by tbname) where cnt = {self.insert_rows} " + sql = f" select count(*) from (select count(*) as cnt , tbname from {self.db}.{self.stb} group by tbname) where cnt = {self.insert_rows} " tdSql.checkAgg(sql, self.childtable_count) # check step - sql = f"select count(*) from (select diff(ts) as dif from {self.stb} partition by tbname order by ts desc) where dif != {self.timestamp_step}" + sql = f"select count(*) from (select diff(ts) as dif from {self.db}.{self.stb} partition by tbname order by ts desc) where dif != {self.timestamp_step}" tdSql.checkAgg(sql, difCnt) # save agg result @@ -172,27 +178,27 @@ class TBase: # self check def checkConsistency(self, col): # top with max - sql = f"select max({col}) from {self.stb}" + sql = f"select max({col}) from {self.db}.{self.stb}" expect = tdSql.getFirstValue(sql) - sql = f"select top({col}, 5) from {self.stb}" + sql = f"select top({col}, 5) from {self.db}.{self.stb}" tdSql.checkFirstValue(sql, expect) #bottom with min - sql = f"select min({col}) from {self.stb}" + sql = f"select min({col}) from {self.db}.{self.stb}" expect = tdSql.getFirstValue(sql) - sql = f"select bottom({col}, 5) from {self.stb}" + sql = f"select bottom({col}, 5) from {self.db}.{self.stb}" tdSql.checkFirstValue(sql, expect) # order by asc limit 1 with first - sql = f"select last({col}) from {self.stb}" + sql = f"select last({col}) from {self.db}.{self.stb}" expect = tdSql.getFirstValue(sql) - sql = f"select {col} from {self.stb} order by _c0 desc limit 1" + sql = f"select {col} from {self.db}.{self.stb} order by _c0 desc limit 1" tdSql.checkFirstValue(sql, expect) # order by desc limit 1 with last - sql = f"select first({col}) from {self.stb}" + sql = f"select first({col}) from {self.db}.{self.db}." expect = tdSql.getFirstValue(sql) - sql = f"select {col} from {self.stb} order by _c0 asc limit 1" + sql = f"select {col} from {self.db}.{self.db}. order by _c0 asc limit 1" tdSql.checkFirstValue(sql, expect) @@ -243,6 +249,17 @@ class TBase: else: tdLog.exit(f"check same failed. real={real} expect={expect}.") + # check except + def checkExcept(self, command): + try: + code = frame.eos.exe(command, show = True) + if code == 0: + tdLog.exit(f"Failed, not report error cmd:{command}") + else: + tdLog.info(f"Passed, report error code={code} is expect, cmd:{command}") + except: + tdLog.info(f"Passed, catch expect report error for command {command}") + # # get db information # @@ -292,7 +309,8 @@ class TBase: def taosdump(self, command, show = True, checkRun = True, retFail = True): return frame.etool.runBinFile("taosdump", command, show, checkRun, retFail) - + def benchmark(self, command, show = True, checkRun = True, retFail = True): + return frame.etool.runBinFile("taosBenchmark", command, show, checkRun, retFail) # # util # @@ -335,15 +353,22 @@ class TBase: # check list have str - def checkListString(self, vlist, s): - for i in range(len(vlist)): - if vlist[i].find(s) != -1: + def checkListString(self, rlist, s): + if s is None: + return + for i in range(len(rlist)): + if rlist[i].find(s) != -1: # found - tdLog.info(f'found "{s}" on index {i} , line={vlist[i]}') + tdLog.info(f'found "{s}" on index {i} , line={rlist[i]}') return # not found - tdLog.exit(f'faild, not found "{s}" on list:{vlist}') + tdLog.exit(f'faild, not found "{s}" on list:{rlist}') + + # check many string + def checkManyString(self, rlist, manys): + for s in manys: + self.checkListString(rlist, s) # # str util @@ -480,6 +505,23 @@ class TBase: return rlist + # cmd + def benchmarkCmd(self, options, childCnt, insertRows, timeStep, results): + # set + self.childtable_count = childCnt + self.insert_rows = insertRows + self.timestamp_step = timeStep + + # run + cmd = f"{options} -t {childCnt} -n {insertRows} -S {timeStep} -y" + rlist = self.benchmark(cmd) + for result in results: + self.checkListString(rlist, result) + + # check correct + self.checkInsertCorrect() + + # generate new json file def genNewJson(self, jsonFile, modifyFunc=None): try: @@ -524,3 +566,17 @@ class TBase: os.remove(filename) except Exception as err: raise Exception(err) + + # read file to list + def readFileToList(self, filePath): + try: + with open(filePath, 'r', encoding='utf-8') as file: + lines = file.readlines() + # Strip trailing newline characters + return [line.rstrip('\n') for line in lines] + except FileNotFoundError: + tdLog.info(f"Error: File not found {filePath}") + return [] + except Exception as e: + tdLog.info(f"Error reading file: {e}") + return [] diff --git a/tests/army/frame/etool.py b/tests/army/frame/etool.py index 4ad3efb036..041a43d4df 100644 --- a/tests/army/frame/etool.py +++ b/tests/army/frame/etool.py @@ -23,6 +23,14 @@ import frame.epath import frame.eos from frame.log import * + +# taos +def taosFile(): + bmFile = frame.epath.binFile("taos") + if frame.eos.isWin(): + bmFile += ".exe" + return bmFile + # taosdump def taosDumpFile(): bmFile = frame.epath.binFile("taosdump") diff --git a/tests/army/pytest.sh b/tests/army/pytest.sh index bae0fdf278..c267b74bc3 100755 --- a/tests/army/pytest.sh +++ b/tests/army/pytest.sh @@ -89,6 +89,10 @@ else export LD_PRELOAD="$(realpath "$(gcc -print-file-name=libasan.so)") $(realpath "$(gcc -print-file-name=libstdc++.so)")" echo "Preload AsanSo:" $? + export ASAN_OPTIONS=detect_odr_violation=0 + echo "forbid check ODR violation." + + $* -a 2>$AsanFile unset LD_PRELOAD diff --git a/tests/army/stream/test_stream_vtable.py b/tests/army/stream/test_stream_vtable.py new file mode 100644 index 0000000000..c0c9d4bd1a --- /dev/null +++ b/tests/army/stream/test_stream_vtable.py @@ -0,0 +1,280 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from frame import etool +from frame.etool import * +from frame.log import * +from frame.cases import * +from frame.sql import * +from frame.caseBase import * +from frame.common import * +import time + +class TDTestCase(TBase): + + def create_tables(self): + tdLog.info("create tables") + + tdSql.execute("drop database if exists test_stream_vtable;") + tdSql.execute("create database test_stream_vtable vgroups 8;") + tdSql.execute("use test_stream_vtable;") + + tdLog.info(f"create org super table.") + tdSql.execute("select database();") + tdSql.execute(f"CREATE STABLE `vtb_org_stb` (" + "ts timestamp, " + "u_tinyint_col tinyint unsigned, " + "u_smallint_col smallint unsigned, " + "u_int_col int unsigned, " + "u_bigint_col bigint unsigned, " + "tinyint_col tinyint, " + "smallint_col smallint, " + "int_col int, " + "bigint_col bigint, " + "float_col float, " + "double_col double, " + "bool_col bool, " + "binary_16_col binary(16)," + "binary_32_col binary(32)," + "nchar_16_col nchar(16)," + "nchar_32_col nchar(32)" + ") TAGS (" + "int_tag int," + "bool_tag bool," + "float_tag float," + "double_tag double)") + + tdLog.info(f"create org child table.") + for i in range(3): + tdSql.execute(f"CREATE TABLE `vtb_org_child_{i}` USING `vtb_org_stb` TAGS ({i}, false, {i}, {i});") + + tdLog.info(f"create virtual normal table.") + + tdSql.execute(f"CREATE VTABLE `vtb_virtual_ntb_full` (" + "ts timestamp, " + "u_tinyint_col tinyint unsigned from vtb_org_child_0.u_tinyint_col, " + "u_smallint_col smallint unsigned from vtb_org_child_1.u_smallint_col, " + "u_int_col int unsigned from vtb_org_child_2.u_int_col, " + "u_bigint_col bigint unsigned from vtb_org_child_0.u_bigint_col, " + "tinyint_col tinyint from vtb_org_child_1.tinyint_col, " + "smallint_col smallint from vtb_org_child_2.smallint_col, " + "int_col int from vtb_org_child_0.int_col, " + "bigint_col bigint from vtb_org_child_1.bigint_col, " + "float_col float from vtb_org_child_2.float_col, " + "double_col double from vtb_org_child_0.double_col, " + "bool_col bool from vtb_org_child_1.bool_col, " + "binary_16_col binary(16) from vtb_org_child_2.binary_16_col," + "binary_32_col binary(32) from vtb_org_child_0.binary_32_col," + "nchar_16_col nchar(16) from vtb_org_child_1.nchar_16_col," + "nchar_32_col nchar(32) from vtb_org_child_2.nchar_32_col)") + + tdSql.execute(f"CREATE VTABLE `vtb_virtual_ntb_half_full` (" + "ts timestamp, " + "u_tinyint_col tinyint unsigned from vtb_org_child_0.u_tinyint_col, " + "u_smallint_col smallint unsigned from vtb_org_child_1.u_smallint_col, " + "u_int_col int unsigned from vtb_org_child_2.u_int_col, " + "u_bigint_col bigint unsigned, " + "tinyint_col tinyint, " + "smallint_col smallint, " + "int_col int from vtb_org_child_0.int_col, " + "bigint_col bigint from vtb_org_child_1.bigint_col, " + "float_col float from vtb_org_child_2.float_col, " + "double_col double, " + "bool_col bool, " + "binary_16_col binary(16)," + "binary_32_col binary(32) from vtb_org_child_0.binary_32_col," + "nchar_16_col nchar(16) from vtb_org_child_1.nchar_16_col," + "nchar_32_col nchar(32) from vtb_org_child_2.nchar_32_col)") + + tdSql.execute(f"CREATE STABLE `vtb_virtual_stb` (" + "ts timestamp, " + "u_tinyint_col tinyint unsigned, " + "u_smallint_col smallint unsigned, " + "u_int_col int unsigned, " + "u_bigint_col bigint unsigned, " + "tinyint_col tinyint, " + "smallint_col smallint, " + "int_col int, " + "bigint_col bigint, " + "float_col float, " + "double_col double, " + "bool_col bool, " + "binary_16_col binary(16)," + "binary_32_col binary(32)," + "nchar_16_col nchar(16)," + "nchar_32_col nchar(32)" + ") TAGS (" + "int_tag int," + "bool_tag bool," + "float_tag float," + "double_tag double)" + "VIRTUAL 1") + + tdLog.info(f"create virtual child table.") + + tdSql.execute(f"CREATE VTABLE `vtb_virtual_ctb_full` (" + "u_tinyint_col from vtb_org_child_0.u_tinyint_col, " + "u_smallint_col from vtb_org_child_1.u_smallint_col, " + "u_int_col from vtb_org_child_2.u_int_col, " + "u_bigint_col from vtb_org_child_0.u_bigint_col, " + "tinyint_col from vtb_org_child_1.tinyint_col, " + "smallint_col from vtb_org_child_2.smallint_col, " + "int_col from vtb_org_child_0.int_col, " + "bigint_col from vtb_org_child_1.bigint_col, " + "float_col from vtb_org_child_2.float_col, " + "double_col from vtb_org_child_0.double_col, " + "bool_col from vtb_org_child_1.bool_col, " + "binary_16_col from vtb_org_child_2.binary_16_col," + "binary_32_col from vtb_org_child_0.binary_32_col," + "nchar_16_col from vtb_org_child_1.nchar_16_col," + "nchar_32_col from vtb_org_child_2.nchar_32_col)" + "USING `vtb_virtual_stb` TAGS (0, false, 0, 0)") + + tdSql.execute(f"CREATE VTABLE `vtb_virtual_ctb_half_full` (" + "u_tinyint_col from vtb_org_child_0.u_tinyint_col, " + "u_smallint_col from vtb_org_child_1.u_smallint_col, " + "u_int_col from vtb_org_child_2.u_int_col, " + "int_col from vtb_org_child_0.int_col, " + "bigint_col from vtb_org_child_1.bigint_col, " + "float_col from vtb_org_child_2.float_col, " + "binary_32_col from vtb_org_child_0.binary_32_col," + "nchar_16_col from vtb_org_child_1.nchar_16_col," + "nchar_32_col from vtb_org_child_2.nchar_32_col)" + "USING `vtb_virtual_stb` TAGS (1, false, 1, 1)") + + tdSql.execute(f"CREATE VTABLE `vtb_virtual_ctb_empty` " + "USING `vtb_virtual_stb` TAGS (2, false, 2, 2)") + + def create_proj_streams(self): + tdSql.execute(f"CREATE STREAM s_proj_1 TRIGGER AT_ONCE INTO dst_proj_1 AS " + "select * from test_stream_vtable.vtb_virtual_ntb_full;") + tdSql.execute(f"CREATE STREAM s_proj_2 TRIGGER AT_ONCE INTO dst_proj_2 AS " + "select * from test_stream_vtable.vtb_virtual_ntb_half_full;") + tdSql.execute(f"CREATE STREAM s_proj_3 TRIGGER AT_ONCE INTO dst_proj_3 AS " + "select * from test_stream_vtable.vtb_virtual_stb PARTITION BY tbname;") + tdSql.execute(f"CREATE STREAM s_proj_4 TRIGGER AT_ONCE INTO dst_proj_4 AS " + "select * from test_stream_vtable.vtb_virtual_ctb_full;") + tdSql.execute(f"CREATE STREAM s_proj_5 TRIGGER AT_ONCE INTO dst_proj_5 AS " + "select * from test_stream_vtable.vtb_virtual_ctb_half_full;") + + tdSql.execute(f"CREATE STREAM s_proj_6 TRIGGER AT_ONCE INTO dst_proj_6 AS " + "select * from test_stream_vtable.vtb_virtual_ntb_full WHERE u_tinyint_col = 1;") + tdSql.execute(f"CREATE STREAM s_proj_7 TRIGGER AT_ONCE INTO dst_proj_7 AS " + "select * from test_stream_vtable.vtb_virtual_ntb_half_full WHERE bool_col = true;") + tdSql.execute(f"CREATE STREAM s_proj_8 TRIGGER AT_ONCE INTO dst_proj_8 AS " + "select * from test_stream_vtable.vtb_virtual_stb WHERE bool_col = true PARTITION BY tbname;") + tdSql.execute(f"CREATE STREAM s_proj_9 TRIGGER AT_ONCE INTO dst_proj_9 AS " + "select * from test_stream_vtable.vtb_virtual_ctb_full WHERE u_tinyint_col = 1;") + tdSql.execute(f"CREATE STREAM s_proj_10 TRIGGER AT_ONCE INTO dst_proj_10 AS " + "select * from test_stream_vtable.vtb_virtual_ctb_half_full WHERE bool_col = true;") + + tdSql.execute(f"CREATE STREAM s_proj_11 TRIGGER AT_ONCE INTO dst_proj_11 AS " + "select ts, cos(u_tinyint_col), u_smallint_col, u_int_col, u_bigint_col from test_stream_vtable.vtb_virtual_ntb_full;") + tdSql.execute(f"CREATE STREAM s_proj_12 TRIGGER AT_ONCE INTO dst_proj_12 AS " + "select ts, cos(u_tinyint_col), u_smallint_col, u_int_col, u_bigint_col from test_stream_vtable.vtb_virtual_ntb_half_full;") + tdSql.execute(f"CREATE STREAM s_proj_13 TRIGGER AT_ONCE INTO dst_proj_13 AS " + "select ts, cos(u_tinyint_col), u_smallint_col, u_int_col, u_bigint_col from test_stream_vtable.vtb_virtual_stb PARTITION BY tbname;") + tdSql.execute(f"CREATE STREAM s_proj_14 TRIGGER AT_ONCE INTO dst_proj_14 AS " + "select ts, cos(u_tinyint_col), u_smallint_col, u_int_col, u_bigint_col from test_stream_vtable.vtb_virtual_ctb_full;") + tdSql.execute(f"CREATE STREAM s_proj_15 TRIGGER AT_ONCE INTO dst_proj_15 AS " + "select ts, cos(u_tinyint_col), u_smallint_col, u_int_col, u_bigint_col from test_stream_vtable.vtb_virtual_ctb_half_full;") + + def create_window_streams(self): + tdSql.execute(f"CREATE STREAM s_interval_1 INTO dst_interval_1 AS " + "select _wstart, _wend, first(u_tinyint_col), last(tinyint_col) from test_stream_vtable.vtb_virtual_ntb_full interval(1s);") + tdSql.execute(f"CREATE STREAM s_interval_2 INTO dst_interval_2 AS " + "select _wstart, _wend, first(u_tinyint_col), last(tinyint_col) from test_stream_vtable.vtb_virtual_ntb_half_full interval(1s) sliding(100a);") + tdSql.execute(f"CREATE STREAM s_interval_3 INTO dst_interval_3 AS " + "select _wstart, _wend, first(u_tinyint_col), last(tinyint_col) from test_stream_vtable.vtb_virtual_stb partition by tbname interval(1s) sliding(200a);") + tdSql.execute(f"CREATE STREAM s_interval_4 INTO dst_interval_4 AS " + "select _wstart, _wend, first(u_tinyint_col), last(tinyint_col) from test_stream_vtable.vtb_virtual_ctb_full interval(1s) sliding(100a);") + tdSql.execute(f"CREATE STREAM s_interval_5 INTO dst_interval_5 AS " + "select _wstart, _wend, first(u_tinyint_col), last(tinyint_col) from test_stream_vtable.vtb_virtual_ctb_half_full interval(1s);") + + tdSql.execute(f"CREATE STREAM s_state_1 INTO dst_state_1 AS " + "select _wstart, _wend, first(u_tinyint_col), last(tinyint_col) from test_stream_vtable.vtb_virtual_ntb_full state_window(bool_col);") + tdSql.execute(f"CREATE STREAM s_state_2 INTO dst_state_2 AS " + "select _wstart, _wend, first(u_tinyint_col), last(tinyint_col) from test_stream_vtable.vtb_virtual_ntb_half_full state_window(bool_col);") + tdSql.execute(f"CREATE STREAM s_state_3 INTO dst_state_3 AS " + "select _wstart, _wend, first(u_tinyint_col), last(tinyint_col) from test_stream_vtable.vtb_virtual_stb partition by tbname state_window(bool_col);") + tdSql.execute(f"CREATE STREAM s_state_4 INTO dst_state_4 AS " + "select _wstart, _wend, first(u_tinyint_col), last(tinyint_col) from test_stream_vtable.vtb_virtual_ctb_full state_window(bool_col);") + tdSql.execute(f"CREATE STREAM s_state_5 INTO dst_state_5 AS " + "select _wstart, _wend, first(u_tinyint_col), last(tinyint_col) from test_stream_vtable.vtb_virtual_ctb_half_full state_window(bool_col);") + + tdSql.execute(f"CREATE STREAM s_session_1 INTO dst_session_1 AS " + "select _wstart, _wend, first(u_tinyint_col), last(tinyint_col) from test_stream_vtable.vtb_virtual_ntb_full session(ts, 10a);") + tdSql.execute(f"CREATE STREAM s_session_2 INTO dst_session_2 AS " + "select _wstart, _wend, first(u_tinyint_col), last(tinyint_col) from test_stream_vtable.vtb_virtual_ntb_half_full session(ts, 10a);") + tdSql.execute(f"CREATE STREAM s_session_3 INTO dst_session_3 AS " + "select _wstart, _wend, first(u_tinyint_col), last(tinyint_col) from test_stream_vtable.vtb_virtual_stb partition by tbname session(ts, 10a);") + tdSql.execute(f"CREATE STREAM s_session_4 INTO dst_session_4 AS " + "select _wstart, _wend, first(u_tinyint_col), last(tinyint_col) from test_stream_vtable.vtb_virtual_ctb_full session(ts, 10a);") + tdSql.execute(f"CREATE STREAM s_session_5 INTO dst_session_5 AS " + "select _wstart, _wend, first(u_tinyint_col), last(tinyint_col) from test_stream_vtable.vtb_virtual_ctb_half_full session(ts, 10a);") + + tdSql.execute(f"CREATE STREAM s_event_1 INTO dst_event_1 AS " + "select _wstart, _wend, first(u_tinyint_col), last(tinyint_col) from test_stream_vtable.vtb_virtual_ntb_full event_window start with u_tinyint_col > 50 end with u_smallint_col > 10000;") + tdSql.execute(f"CREATE STREAM s_event_2 INTO dst_event_2 AS " + "select _wstart, _wend, first(u_tinyint_col), last(tinyint_col) from test_stream_vtable.vtb_virtual_ntb_half_full event_window start with u_tinyint_col > 50 end with u_smallint_col > 10000;") + tdSql.execute(f"CREATE STREAM s_event_3 INTO dst_event_3 AS " + "select _wstart, _wend, first(u_tinyint_col), last(tinyint_col) from test_stream_vtable.vtb_virtual_stb partition by tbname event_window start with u_tinyint_col > 50 end with u_smallint_col > 10000;") + tdSql.execute(f"CREATE STREAM s_event_4 INTO dst_event_4 AS " + "select _wstart, _wend, first(u_tinyint_col), last(tinyint_col) from test_stream_vtable.vtb_virtual_ctb_full event_window start with u_tinyint_col > 50 end with u_smallint_col > 10000;") + tdSql.execute(f"CREATE STREAM s_event_5 INTO dst_event_5 AS " + "select _wstart, _wend, first(u_tinyint_col), last(tinyint_col) from test_stream_vtable.vtb_virtual_ctb_half_full event_window start with u_tinyint_col > 50 end with u_smallint_col > 10000;") + + # tdSql.execute(f"CREATE STREAM s_count_1 INTO dst_count_1 AS " + # "select _wstart, _wend, first(u_tinyint_col), last(tinyint_col) from test_stream_vtable.vtb_virtual_ntb_full count_window(20);") + # tdSql.execute(f"CREATE STREAM s_count_1 INTO dst_count_1 AS " + # "select _wstart, _wend, first(u_tinyint_col), last(tinyint_col) from test_stream_vtable.vtb_virtual_ntb_half_full count_window(20);") + # tdSql.execute(f"CREATE STREAM s_count_1 INTO dst_count_1 AS " + # "select _wstart, _wend, first(u_tinyint_col), last(tinyint_col) from test_stream_vtable.vtb_virtual_stb partition by tbname count_window(20);") + # tdSql.execute(f"CREATE STREAM s_count_1 INTO dst_count_1 AS " + # "select _wstart, _wend, first(u_tinyint_col), last(tinyint_col) from test_stream_vtable.vtb_virtual_ctb_full count_window(20);") + # tdSql.execute(f"CREATE STREAM s_count_1 INTO dst_count_1 AS " + # "select _wstart, _wend, first(u_tinyint_col), last(tinyint_col) from test_stream_vtable.vtb_virtual_ctb_half_full count_window(20);") + + def wait_streams_ready(self): + for i in range(60): + tdLog.info(f"i={i} wait for stream tasks ready ...") + time.sleep(1) + rows = tdSql.query("select * from information_schema.ins_stream_tasks where status <> 'ready';") + if rows == 0: + break + + def wait_streams_done(self): + # The entire test runs for a while. Wait briefly, and if no exceptions occur, it's sufficient. + for i in range(30): + tdLog.info(f"i={i} wait for stream tasks done ...") + time.sleep(1) + rows = tdSql.query("select * from information_schema.ins_stream_tasks where status <> 'ready';") + if rows != 0: + raise Exception("stream task status is wrong, please check it!") + + + def run(self): + tdLog.debug(f"start to excute {__file__}") + + self.create_tables() + self.create_proj_streams() + self.wait_streams_ready() + json = etool.curFile(__file__, "vtable_insert.json") + etool.benchMark(json=json) + self.wait_streams_done() + + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/army/stream/vtable_insert.json b/tests/army/stream/vtable_insert.json new file mode 100644 index 0000000000..cacdc0b288 --- /dev/null +++ b/tests/army/stream/vtable_insert.json @@ -0,0 +1,76 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 3, + "create_table_thread_count": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "num_of_records_per_req": 10000, + "prepared_rand": 10000, + "chinese": "no", + "escape_character": "yes", + "continue_if_fail": "no", + "databases": [ + { + "dbinfo": { + "name": "test_stream_vtable", + "drop": "no", + "vgroups": 8, + "precision": "ms" + }, + "super_tables": [ + { + "name": "vtb_org_stb", + "child_table_exists": "yes", + "childtable_count": 3, + "childtable_prefix": "vtb_org_child_", + "auto_create_table": "no", + "batch_create_tbl_num": 5, + "data_source": "rand", + "insert_mode": "taosc", + "non_stop_mode": "no", + "line_protocol": "line", + "insert_rows": 10000, + "childtable_limit": 0, + "childtable_offset": 0, + "interlace_rows": 0, + "insert_interval": 10, + "partial_col_num": 0, + "timestamp_step": 500, + "start_timestamp": "2025-01-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [ + {"type": "UTINYINT", "name": "u_tinyint_col"}, + {"type": "USMALLINT", "name": "u_smallint_col"}, + {"type": "UINT", "name": "u_int_col"}, + {"type": "UBIGINT", "name": "u_bigint_col"}, + {"type": "TINYINT", "name": "tinyint_col"}, + {"type": "SMALLINT", "name": "smallint_col"}, + {"type": "INT", "name": "int_col"}, + {"type": "BIGINT", "name": "bigint_col"}, + {"type": "FLOAT", "name": "float_col"}, + {"type": "DOUBLE", "name": "double_col"}, + {"type": "BOOL", "name": "bool_col"}, + {"type": "BINARY", "name": "binary_16_col", "len": 16}, + {"type": "BINARY", "name": "binary_32_col", "len": 32}, + {"type": "NCHAR", "name": "nchar_16_col", "len": 16}, + {"type": "NCHAR", "name": "nchar_32_col", "len": 32} + ], + "tags": [ + {"type": "INT", "name": "int_tag"}, + {"type": "BOOL", "name": "bool_tag"}, + {"type": "FLOAT", "name": "float_tag"}, + {"type": "DOUBLE", "name": "double_tag"} + ] + } + ] + } + ] +} diff --git a/tests/army/test.py b/tests/army/test.py index 6ac0948b7b..a66743b40a 100644 --- a/tests/army/test.py +++ b/tests/army/test.py @@ -37,6 +37,9 @@ import taos import taosrest import taosws +from taos.cinterface import * +taos.taos_options(6, "native") + def checkRunTimeError(): import win32gui timeCount = 0 @@ -258,8 +261,9 @@ if __name__ == "__main__": # # do exeCmd command # + taosAdapter = True # default is websocket , so must start taosAdapter if not execCmd == "": - if taosAdapter or taosAdapter or restful or websocket: + if taosAdapter or restful or websocket: tAdapter.init(deployPath) else: tdDnodes.init(deployPath) diff --git a/tests/army/tools/benchmark/basic/commandline-sml-rest.py b/tests/army/tools/benchmark/basic/commandline-sml-rest.py deleted file mode 100644 index 3f5bcd8040..0000000000 --- a/tests/army/tools/benchmark/basic/commandline-sml-rest.py +++ /dev/null @@ -1,75 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- -import os - -import frame -import frame.etool -from frame.log import * -from frame.cases import * -from frame.sql import * -from frame.caseBase import * -from frame import * - - - -class TDTestCase(TBase): - def caseDescription(self): - """ - [TD-22334] taosBenchmark sml rest test cases - """ - - - def run(self): - binPath = etool.benchMarkFile() - - cmd = "%s -I sml-rest -t 1 -n 1 -y" % binPath - tdLog.info("%s" % cmd) - os.system("%s" % cmd) - tdSql.query("select count(*) from test.meters") - tdSql.checkData(0, 0, 1) - - cmd = "%s -I sml-rest-line -t 1 -n 1 -y" % binPath - tdLog.info("%s" % cmd) - os.system("%s" % cmd) - tdSql.query("select count(*) from test.meters") - tdSql.checkData(0, 0, 1) - - cmd = "%s -I sml-rest-telnet -t 1 -n 1 -y" % binPath - tdLog.info("%s" % cmd) - os.system("%s" % cmd) - tdSql.query("select count(*) from test.meters") - tdSql.checkData(0, 0, 1) - - cmd = "%s -I sml-rest-json -t 1 -n 1 -y" % binPath - tdLog.info("%s" % cmd) - os.system("%s" % cmd) - tdSql.query("select count(*) from test.meters") - tdSql.checkData(0, 0, 1) - - cmd = "%s -I sml-rest-taosjson -t 1 -n 1 -y" % binPath - tdLog.info("%s" % cmd) - os.system("%s" % cmd) - tdSql.query("select count(*) from test.meters") - tdSql.checkData(0, 0, 1) - - cmd = "%s -N -I sml-rest -y" % binPath - tdLog.info("%s" % cmd) - assert os.system("%s" % cmd) != 0 - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/army/tools/benchmark/basic/commandline-sml.py b/tests/army/tools/benchmark/basic/commandline-sml.py index 7c3dc39f7b..4533dedbbd 100644 --- a/tests/army/tools/benchmark/basic/commandline-sml.py +++ b/tests/army/tools/benchmark/basic/commandline-sml.py @@ -67,6 +67,16 @@ class TDTestCase(TBase): tdSql.query("select count(*) from test.meters") tdSql.checkData(0, 0, 10*10000) + # add normal table + cmd = "%s -N -I sml -t 2 -n 10000 -y" % binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + + tdSql.query("select count(*) from test.d0") + tdSql.checkData(0, 0, 1*10000) + tdSql.query("select count(*) from test.d1") + tdSql.checkData(0, 0, 1*10000) + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/army/tools/benchmark/basic/commandline.py b/tests/army/tools/benchmark/basic/commandline.py index 94b812aed0..3330056d51 100644 --- a/tests/army/tools/benchmark/basic/commandline.py +++ b/tests/army/tools/benchmark/basic/commandline.py @@ -184,7 +184,7 @@ class TDTestCase(TBase): tdSql.query("select last(ts) from test.meters") tdSql.checkData(0, 0, "2017-07-14 10:40:00.034") - cmd = "%s -N -I taosc -t 11 -n 11 -y -x -E" % binPath + cmd = "%s -N -I taosc -t 11 -n 11 -y -x -E -c abcde" % binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("use test") @@ -195,7 +195,7 @@ class TDTestCase(TBase): tdSql.query("select count(*) from `d10`") tdSql.checkData(0, 0, 11) - cmd = "%s -N -I rest -t 11 -n 11 -y -x" % binPath + cmd = "%s -N -I rest -t 11 -n 11 -y -x -c /etc/taos" % binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("use test") @@ -315,21 +315,6 @@ class TDTestCase(TBase): tdSql.query("describe test.meters") tdSql.checkData(1, 1, "NCHAR") - # 2.x is binary and 3.x is varchar - # cmd = "%s -n 1 -t 1 -y -b binary" %binPath - # tdLog.info("%s" % cmd) - # os.system("%s" % cmd) - # tdSql.execute("reset query cache") - # tdSql.query("describe test.meters") - # tdSql.checkData(1, 1, "BINARY") - - # cmd = "%s -n 1 -t 1 -y -b binary\(7\)" %binPath - # tdLog.info("%s" % cmd) - # os.system("%s" % cmd) - # tdSql.execute("reset query cache") - # tdSql.query("describe test.meters") - # tdSql.checkData(1, 1, "BINARY") - cmd = "%s -n 1 -t 1 -y -A json" % binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) diff --git a/tests/army/tools/benchmark/basic/connMode.py b/tests/army/tools/benchmark/basic/connMode.py new file mode 100644 index 0000000000..0f3a99a386 --- /dev/null +++ b/tests/army/tools/benchmark/basic/connMode.py @@ -0,0 +1,158 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +# import os, signal +import os +from time import sleep +import frame +import frame.etool +from frame.log import * +from frame.cases import * +from frame.sql import * +from frame.caseBase import * +from frame import * + + +class TDTestCase(TBase): + def caseDescription(self): + """ + taosBenchmark public->connMode test cases + """ + + # expect cmd > evn > json + def checkPriority(self): + + # + # cmd & json + # + + # cmd first 6041 - valid + options = "-X http://127.0.0.1:6041" + # json 6042 - invalid + json = "tools/benchmark/basic/json/connModePriorityErrDsn.json" + self.insertBenchJson(json, options, True) + + # + # env > json + # + + # env 6041 - valid + os.environ['TDENGINE_CLOUD_DSN'] = "http://127.0.0.1:6041" + # json 6042 - invalid + json = "tools/benchmark/basic/json/connModePriorityErrDsn.json" + self.insertBenchJson(json, "", True) + + + # + # cmd & json & evn + # + + # cmd 6041 - valid + options = "-X http://127.0.0.1:6041" + # env 6043 - invalid + os.environ['TDENGINE_CLOUD_DSN'] = "http://127.0.0.1:6043" + # json 6042 - invalid + json = "tools/benchmark/basic/json/connModePriorityErrDsn.json" + self.insertBenchJson(json, options, True) + + # clear env + os.environ['TDENGINE_CLOUD_DSN'] = "" + + def checkCommandLine(self): + # modes + modes = ["", "-Z 1 -B 1", "-Z websocket", "-Z 0", "-Z native -B 2"] + # result + Rows = "insert rows: 9990" + results1 = [ + ["Connect mode is : WebSocket", Rows], + ["Connect mode is : WebSocket", Rows], + ["Connect mode is : WebSocket", Rows], + ["Connect mode is : Native", Rows], + ["Connect mode is : Native", Rows], + ] + # iface todo add sml + iface = ["taosc", "stmt", "stmt2"] + + # do check + for face in iface: + for i in range(len(modes)): + self.benchmarkCmd(f"{modes[i]} -I {face}", 10, 999, 1000, results1[i]) + + + def checkExceptCmd(self): + # exe + bench = frame.etool.benchMarkFile() + # option + options = [ + "-Z native -X http://127.0.0.1:6041", + "-Z 100", + "-Z abcdefg", + "-X", + "-X 127.0.0.1:6041", + "-X https://gw.cloud.taosdata.com?token617ffdf...", + "-Z 1 -X https://gw.cloud.taosdata.com?token=617ffdf...", + "-X http://127.0.0.1:6042" + ] + + # do check + for option in options: + self.checkExcept(bench + " -y " + option) + + def checkHostPort(self): + # + # ommand + # + self.benchmarkCmd("-h 127.0.0.1", 5, 100, 10, ["insert rows: 500"]) + self.benchmarkCmd("-h 127.0.0.1 -P 6041 -uroot -ptaosdata", 5, 100, 10, ["insert rows: 500"]) + self.benchmarkCmd("-Z 0 -h 127.0.0.1 -P 6030 -uroot -ptaosdata", 5, 100, 10, ["insert rows: 500"]) + + # + # command & json + # + + # 6041 is default + options = "-h 127.0.0.1 -P 6041 -uroot -ptaosdata" + json = "tools/benchmark/basic/json/connModePriorityErrHost.json" + self.insertBenchJson(json, options, True) + + # cmd port first json port + options = "-Z native -P 6030" + json = "tools/benchmark/basic/json/connModePriority.json" + self.insertBenchJson(json, options, True) + options = "-Z websocket -P 6041" + json = "tools/benchmark/basic/json/connModePriority.json" + self.insertBenchJson(json, options, True) + + def run(self): + # init + self.db = "test" + self.stb = "meters" + + # command line test + self.checkCommandLine() + + # except + self.checkExceptCmd() + + # cmd > json > env + self.checkPriority() + + # host and port + self.checkHostPort() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/army/tools/benchmark/basic/json/TD-31490.json b/tests/army/tools/benchmark/basic/json/TD-31490.json index a4c674fa50..469be9ba54 100644 --- a/tests/army/tools/benchmark/basic/json/TD-31490.json +++ b/tests/army/tools/benchmark/basic/json/TD-31490.json @@ -22,7 +22,7 @@ "wal_retention_period": 0, "buffer": 256, "stt_trigger": 1, - "vgroups ": 1 + "vgroups": 1 }, "super_tables": [ { diff --git a/tests/army/tools/benchmark/basic/json/TS-5002.json b/tests/army/tools/benchmark/basic/json/TS-5002.json index 024f9f0f9e..3e39085ca9 100644 --- a/tests/army/tools/benchmark/basic/json/TS-5002.json +++ b/tests/army/tools/benchmark/basic/json/TS-5002.json @@ -24,7 +24,7 @@ "cachemodel": "'both'", "cachesize": 100, "stt_trigger": 1, - "vgroups ": 3 + "vgroups": 3 }, "super_tables": [ { diff --git a/tests/army/tools/benchmark/basic/json/connModePriority.json b/tests/army/tools/benchmark/basic/json/connModePriority.json new file mode 100644 index 0000000000..aec4e5ac9c --- /dev/null +++ b/tests/army/tools/benchmark/basic/json/connModePriority.json @@ -0,0 +1,65 @@ +{ + "filetype": "insert", + "dsn": "http://127.0.0.1:6041", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "num_of_records_per_req": 3000, + "thread_count": 2, + "confirm_parameter_prompt": "no", + "databases": [ + { + "dbinfo": { + "name": "test", + "drop": "yes", + "precision": "ms", + "vgroups": 1 + }, + "super_tables": [ + { + "name": "meters", + "child_table_exists": "no", + "childtable_count": 2, + "insert_rows": 1000, + "childtable_prefix": "d", + "insert_mode": "taosc", + "interlace_rows": 0, + "timestamp_step": 1000, + "start_timestamp":1700000000000, + "columns": [ + { "type": "bool", "name": "bc"}, + { "type": "float", "name": "fc", "max": 1, "min": 0 }, + { "type": "double", "name": "dc", "max": 10, "min": 0 }, + { "type": "tinyint", "name": "ti", "max": 100, "min": -100 }, + { "type": "smallint", "name": "si", "max": 100, "min": -50 }, + { "type": "int", "name": "ic", "max": 1000, "min": -1000 }, + { "type": "bigint", "name": "bi", "max": 100, "min": -1000 }, + { "type": "utinyint", "name": "uti", "max": 100, "min": 0 }, + { "type": "usmallint", "name": "usi", "max": 100, "min": 0 }, + { "type": "uint", "name": "ui", "max": 1000, "min": 0 }, + { "type": "ubigint", "name": "ubi", "max": 10000, "min": 0 }, + { "type": "binary", "name": "bin", "len": 4}, + { "type": "nchar", "name": "nch", "len": 8} + ], + "tags": [ + { "type": "bool", "name": "tbc"}, + { "type": "float", "name": "tfc", "max": 1, "min": 0 }, + { "type": "double", "name": "tdc", "max": 10, "min": 0 }, + { "type": "tinyint", "name": "tti", "max": 100, "min": -100 }, + { "type": "smallint", "name": "tsi", "max": 100, "min": -50 }, + { "type": "int", "name": "tic", "max": 1000, "min": -1000 }, + { "type": "bigint", "name": "tbi", "max": 100, "min": -1000 }, + { "type": "utinyint", "name": "tuti", "max": 100, "min": 0 }, + { "type": "usmallint", "name": "tusi", "max": 100, "min": 0 }, + { "type": "uint", "name": "tui", "max": 1000, "min": 0 }, + { "type": "ubigint", "name": "tubi", "max": 10000, "min": 0 }, + { "type": "binary", "name": "tbin", "len": 4}, + { "type": "nchar", "name": "tnch", "len": 8} + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/army/tools/benchmark/basic/json/connModePriorityErrDsn.json b/tests/army/tools/benchmark/basic/json/connModePriorityErrDsn.json new file mode 100644 index 0000000000..633d657861 --- /dev/null +++ b/tests/army/tools/benchmark/basic/json/connModePriorityErrDsn.json @@ -0,0 +1,60 @@ +{ + "filetype": "insert", + "dsn": "http://127.0.0.1:6042", + "num_of_records_per_req": 3000, + "thread_count": 2, + "confirm_parameter_prompt": "no", + "databases": [ + { + "dbinfo": { + "name": "test", + "drop": "yes", + "precision": "ms", + "vgroups": 1 + }, + "super_tables": [ + { + "name": "meters", + "child_table_exists": "no", + "childtable_count": 2, + "insert_rows": 1000, + "childtable_prefix": "d", + "insert_mode": "taosc", + "interlace_rows": 0, + "timestamp_step": 1000, + "start_timestamp":1700000000000, + "columns": [ + { "type": "bool", "name": "bc"}, + { "type": "float", "name": "fc", "max": 1, "min": 0 }, + { "type": "double", "name": "dc", "max": 10, "min": 0 }, + { "type": "tinyint", "name": "ti", "max": 100, "min": -100 }, + { "type": "smallint", "name": "si", "max": 100, "min": -50 }, + { "type": "int", "name": "ic", "max": 1000, "min": -1000 }, + { "type": "bigint", "name": "bi", "max": 100, "min": -1000 }, + { "type": "utinyint", "name": "uti", "max": 100, "min": 0 }, + { "type": "usmallint", "name": "usi", "max": 100, "min": 0 }, + { "type": "uint", "name": "ui", "max": 1000, "min": 0 }, + { "type": "ubigint", "name": "ubi", "max": 10000, "min": 0 }, + { "type": "binary", "name": "bin", "len": 4}, + { "type": "nchar", "name": "nch", "len": 8} + ], + "tags": [ + { "type": "bool", "name": "tbc"}, + { "type": "float", "name": "tfc", "max": 1, "min": 0 }, + { "type": "double", "name": "tdc", "max": 10, "min": 0 }, + { "type": "tinyint", "name": "tti", "max": 100, "min": -100 }, + { "type": "smallint", "name": "tsi", "max": 100, "min": -50 }, + { "type": "int", "name": "tic", "max": 1000, "min": -1000 }, + { "type": "bigint", "name": "tbi", "max": 100, "min": -1000 }, + { "type": "utinyint", "name": "tuti", "max": 100, "min": 0 }, + { "type": "usmallint", "name": "tusi", "max": 100, "min": 0 }, + { "type": "uint", "name": "tui", "max": 1000, "min": 0 }, + { "type": "ubigint", "name": "tubi", "max": 10000, "min": 0 }, + { "type": "binary", "name": "tbin", "len": 4}, + { "type": "nchar", "name": "tnch", "len": 8} + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/army/tools/benchmark/basic/json/connModePriorityErrHost.json b/tests/army/tools/benchmark/basic/json/connModePriorityErrHost.json new file mode 100644 index 0000000000..f32acbeb1f --- /dev/null +++ b/tests/army/tools/benchmark/basic/json/connModePriorityErrHost.json @@ -0,0 +1,64 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.2", + "port": 6032, + "user": "root-error", + "password": "taosdata-error", + "num_of_records_per_req": 3000, + "thread_count": 2, + "confirm_parameter_prompt": "no", + "databases": [ + { + "dbinfo": { + "name": "test", + "drop": "yes", + "precision": "ms", + "vgroups": 1 + }, + "super_tables": [ + { + "name": "meters", + "child_table_exists": "no", + "childtable_count": 2, + "insert_rows": 1000, + "childtable_prefix": "d", + "insert_mode": "taosc", + "interlace_rows": 0, + "timestamp_step": 1000, + "start_timestamp":1700000000000, + "columns": [ + { "type": "bool", "name": "bc"}, + { "type": "float", "name": "fc", "max": 1, "min": 0 }, + { "type": "double", "name": "dc", "max": 10, "min": 0 }, + { "type": "tinyint", "name": "ti", "max": 100, "min": -100 }, + { "type": "smallint", "name": "si", "max": 100, "min": -50 }, + { "type": "int", "name": "ic", "max": 1000, "min": -1000 }, + { "type": "bigint", "name": "bi", "max": 100, "min": -1000 }, + { "type": "utinyint", "name": "uti", "max": 100, "min": 0 }, + { "type": "usmallint", "name": "usi", "max": 100, "min": 0 }, + { "type": "uint", "name": "ui", "max": 1000, "min": 0 }, + { "type": "ubigint", "name": "ubi", "max": 10000, "min": 0 }, + { "type": "binary", "name": "bin", "len": 4}, + { "type": "nchar", "name": "nch", "len": 8} + ], + "tags": [ + { "type": "bool", "name": "tbc"}, + { "type": "float", "name": "tfc", "max": 1, "min": 0 }, + { "type": "double", "name": "tdc", "max": 10, "min": 0 }, + { "type": "tinyint", "name": "tti", "max": 100, "min": -100 }, + { "type": "smallint", "name": "tsi", "max": 100, "min": -50 }, + { "type": "int", "name": "tic", "max": 1000, "min": -1000 }, + { "type": "bigint", "name": "tbi", "max": 100, "min": -1000 }, + { "type": "utinyint", "name": "tuti", "max": 100, "min": 0 }, + { "type": "usmallint", "name": "tusi", "max": 100, "min": 0 }, + { "type": "uint", "name": "tui", "max": 1000, "min": 0 }, + { "type": "ubigint", "name": "tubi", "max": 10000, "min": 0 }, + { "type": "binary", "name": "tbin", "len": 4}, + { "type": "nchar", "name": "tnch", "len": 8} + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/army/tools/benchmark/basic/json/queryInsertrestdata.json b/tests/army/tools/benchmark/basic/json/queryInsertrestdata.json deleted file mode 100644 index 6714497766..0000000000 --- a/tests/army/tools/benchmark/basic/json/queryInsertrestdata.json +++ /dev/null @@ -1,73 +0,0 @@ -{ - "filetype": "insert", - "cfgdir": "/etc/taos", - "host": "127.0.0.1", - "port": 6030, - "user": "root", - "password": "taosdata", - "thread_count": 4, - "thread_count_create_tbl": 4, - "result_file": "./insert_res.txt", - "confirm_parameter_prompt": "no", - "insert_interval": 0, - "interlace_rows": 0, - "num_of_records_per_req": 3000, - "max_sql_len": 1024000, - "databases": [{ - "dbinfo": { - "name": "db", - "drop": "yes", - "precision": "ms" - }, - "super_tables": [{ - "name": "stb0", - "child_table_exists":"no", - "childtable_count": 2, - "childtable_prefix": "stb00_", - "auto_create_table": "no", - "batch_create_tbl_num": 10, - "data_source": "rand", - "insert_mode": "taosc", - "insert_rows": 10, - "childtable_limit": 0, - "childtable_offset": 0, - "interlace_rows": 0, - "insert_interval": 0, - "max_sql_len": 1024000, - "disorder_ratio": 0, - "disorder_range": 1000, - "timestamp_step": 1, - "start_timestamp": "2020-11-01 00:00:00.000", - "sample_format": "csv", - "sample_file": "./sample.csv", - "tags_file": "", - "columns": [{"type": "BINARY", "len": 1, "count":1}, {"type": "BINARY", "len": 3, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}], - "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] - }, - { - "name": "stb1", - "child_table_exists":"no", - "childtable_count": 2, - "childtable_prefix": "stb01_", - "auto_create_table": "no", - "batch_create_tbl_num": 10, - "data_source": "rand", - "insert_mode": "taosc", - "insert_rows": 5, - "childtable_limit": 0, - "childtable_offset": 0, - "interlace_rows": 0 , - "insert_interval": 0, - "max_sql_len": 1024000, - "disorder_ratio": 0, - "disorder_range": 1000, - "timestamp_step": 1, - "start_timestamp": "2020-11-01 00:00:00.000", - "sample_format": "csv", - "sample_file": "./sample.csv", - "tags_file": "", - "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 1, "count":3}, {"type": "BINARY", "len": 2, "count":6}], - "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] - }] - }] -} diff --git a/tests/army/tools/benchmark/basic/json/queryModeSpec.json b/tests/army/tools/benchmark/basic/json/queryModeSpec.json index 533cb05c2b..5b7420a973 100644 --- a/tests/army/tools/benchmark/basic/json/queryModeSpec.json +++ b/tests/army/tools/benchmark/basic/json/queryModeSpec.json @@ -10,7 +10,7 @@ "query_times": 100, "query_mode": "taosc", "specified_table_query": { - "concurrent": 3, + "threads": 3, "sqls": [ { "sql": "select last_row(*) from meters" diff --git a/tests/army/tools/benchmark/basic/json/queryModeSpecMix.json b/tests/army/tools/benchmark/basic/json/queryModeSpecMix.json index 39a9593d2e..6418adc35c 100644 --- a/tests/army/tools/benchmark/basic/json/queryModeSpecMix.json +++ b/tests/army/tools/benchmark/basic/json/queryModeSpecMix.json @@ -10,7 +10,7 @@ "query_times": 100, "query_mode": "taosc", "specified_table_query": { - "concurrent": 4, + "threads": 4, "mixed_query": "yes", "sqls": [ { diff --git a/tests/army/tools/benchmark/basic/json/queryModeSpecMixBatch.json b/tests/army/tools/benchmark/basic/json/queryModeSpecMixBatch.json index 6740c585c3..97b7be72ea 100644 --- a/tests/army/tools/benchmark/basic/json/queryModeSpecMixBatch.json +++ b/tests/army/tools/benchmark/basic/json/queryModeSpecMixBatch.json @@ -10,7 +10,7 @@ "query_times": 5, "query_mode": "taosc", "specified_table_query": { - "concurrent": 5, + "threads": 5, "query_interval": 1000, "mixed_query": "yes", "batch_query": "yes", diff --git a/tests/army/tools/benchmark/basic/json/queryModeSpecMixBatchRest.json b/tests/army/tools/benchmark/basic/json/queryModeSpecMixBatchRest.json index b2ffbe1434..9cd21bd1fc 100644 --- a/tests/army/tools/benchmark/basic/json/queryModeSpecMixBatchRest.json +++ b/tests/army/tools/benchmark/basic/json/queryModeSpecMixBatchRest.json @@ -10,7 +10,7 @@ "query_times": 5, "query_mode": "taosc", "specified_table_query": { - "concurrent": 5, + "threads": 5, "query_interval": 100, "mixed_query": "yes", "batch_query": "no", diff --git a/tests/army/tools/benchmark/basic/json/queryModeSpecMixRest.json b/tests/army/tools/benchmark/basic/json/queryModeSpecMixRest.json index 5d962f1354..35b1fa0188 100644 --- a/tests/army/tools/benchmark/basic/json/queryModeSpecMixRest.json +++ b/tests/army/tools/benchmark/basic/json/queryModeSpecMixRest.json @@ -10,7 +10,7 @@ "query_times": 100, "query_mode": "rest", "specified_table_query": { - "concurrent": 3, + "threads": 3, "mixed_query": "yes", "sqls": [ { diff --git a/tests/army/tools/benchmark/basic/json/queryModeSpecRest.json b/tests/army/tools/benchmark/basic/json/queryModeSpecRest.json index 5b00ee6479..561f7c1883 100644 --- a/tests/army/tools/benchmark/basic/json/queryModeSpecRest.json +++ b/tests/army/tools/benchmark/basic/json/queryModeSpecRest.json @@ -10,7 +10,7 @@ "query_times": 100, "query_mode": "rest", "specified_table_query": { - "concurrent": 3, + "threads": 3, "sqls": [ { "sql": "select last_row(*) from meters" diff --git a/tests/army/tools/benchmark/basic/json/queryModeSuper.json b/tests/army/tools/benchmark/basic/json/queryModeSuper.json index 9d20154d49..3682594b42 100644 --- a/tests/army/tools/benchmark/basic/json/queryModeSuper.json +++ b/tests/army/tools/benchmark/basic/json/queryModeSuper.json @@ -12,7 +12,7 @@ "query_mode": "taosc", "super_table_query": { "stblname": "meters", - "concurrent": 3, + "threads": 3, "query_interval": 0, "sqls": [ { diff --git a/tests/army/tools/benchmark/basic/json/queryModeSuperRest.json b/tests/army/tools/benchmark/basic/json/queryModeSuperRest.json index 4ae43062a6..1380a958c0 100644 --- a/tests/army/tools/benchmark/basic/json/queryModeSuperRest.json +++ b/tests/army/tools/benchmark/basic/json/queryModeSuperRest.json @@ -12,7 +12,7 @@ "query_mode": "rest", "super_table_query": { "stblname": "meters", - "concurrent": 3, + "threads": 3, "query_interval": 0, "sqls": [ { diff --git a/tests/army/tools/benchmark/basic/json/queryRestful.json b/tests/army/tools/benchmark/basic/json/queryRestful.json deleted file mode 100644 index 6cb83bc2e1..0000000000 --- a/tests/army/tools/benchmark/basic/json/queryRestful.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "filetype": "query", - "cfgdir": "/etc/taos", - "host": "127.0.0.1", - "port": 6030, - "user": "root", - "password": "taosdata", - "confirm_parameter_prompt": "no", - "databases": "db", - "query_times": 2, - "query_mode": "rest", - "specified_table_query": { - "query_interval": 1, - "threads": 3, - "sqls": [ - { - "sql": "select last_row(*) from db.stb0 ", - "result": "./query_res0.txt" - }, - { - "sql": "select count(*) from db.stb00_1", - "result": "./query_res1.txt" - } - ] - } - } - diff --git a/tests/army/tools/benchmark/basic/json/queryRestful1.json b/tests/army/tools/benchmark/basic/json/queryRestful1.json deleted file mode 100644 index 54d2589ce9..0000000000 --- a/tests/army/tools/benchmark/basic/json/queryRestful1.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "filetype": "query", - "cfgdir": "/etc/taos", - "host": "127.0.0.1", - "port": 6030, - "user": "root", - "password": "taosdata", - "confirm_parameter_prompt": "no", - "databases": "db", - "query_times": 2, - "query_mode": "rest", - "super_table_query": { - "stblname": "stb1", - "query_interval": 1, - "threads": 3, - "sqls": [ - { - "sql": "select last_row(ts) from xxxx", - "result": "./query_res2.txt" - } - ] - } - } - diff --git a/tests/army/tools/benchmark/basic/json/taosc_query-sqlfile.json b/tests/army/tools/benchmark/basic/json/taosc_query-sqlfile.json index 236e87a36d..efb77bbed7 100644 --- a/tests/army/tools/benchmark/basic/json/taosc_query-sqlfile.json +++ b/tests/army/tools/benchmark/basic/json/taosc_query-sqlfile.json @@ -13,7 +13,7 @@ { "query_interval": 1, "concurrent":1, - "sql_file": "./taosbenchmark/json/query-sqls.txt", + "sql_file": "tools/benchmark/basic/json/query-sqls.txt", "result": "taosc_query_specified-sqlfile" } } diff --git a/tests/army/tools/benchmark/basic/queryMain.py b/tests/army/tools/benchmark/basic/queryMain.py index 2c7f45c975..43a9488b5b 100644 --- a/tests/army/tools/benchmark/basic/queryMain.py +++ b/tests/army/tools/benchmark/basic/queryMain.py @@ -124,7 +124,7 @@ class TDTestCase(TBase): except: continueIfFail = "no" - concurrent = data[label]["concurrent"] + threads = data[label]["threads"] sqls = data[label]["sqls"] @@ -140,7 +140,7 @@ class TDTestCase(TBase): except: mixedQuery = "no" - tdLog.info(f"queryTimes={queryTimes} concurrent={concurrent} mixedQuery={mixedQuery} " + tdLog.info(f"queryTimes={queryTimes} threads={threads} mixedQuery={mixedQuery} " f"batchQuery={batchQuery} len(sqls)={len(sqls)} label={label}\n") totalQueries = 0 @@ -154,9 +154,9 @@ class TDTestCase(TBase): if specMode and mixedQuery.lower() != "yes": # spec - threadQueries = queryTimes * concurrent - totalQueries = queryTimes * concurrent * len(sqls) - threadKey = f"complete query with {concurrent} threads and " + threadQueries = queryTimes * threads + totalQueries = queryTimes * threads * len(sqls) + threadKey = f"complete query with {threads} threads and " qpsKey = "QPS: " avgKey = "query delay avg: " minKey = "min:" @@ -178,10 +178,10 @@ class TDTestCase(TBase): threadQueries = totalQueries nSql = len(sqls) - if specMode and nSql < concurrent : - tdLog.info(f"set concurrent = {nSql} because len(sqls) < concurrent") - concurrent = nSql - threadKey = f"using {concurrent} threads complete query " + if specMode and nSql < threads : + tdLog.info(f"set threads = {nSql} because len(sqls) < threads") + threads = nSql + threadKey = f"using {threads} threads complete query " qpsKey = "" avgKey = "avg delay:" minKey = "min delay:" @@ -219,10 +219,6 @@ class TDTestCase(TBase): for arg in args: self.checkAfterRun(benchmark, arg[0] + ".json", arg[1], tbCnt) - # rest - for arg in args: - self.checkAfterRun(benchmark, arg[0] + "Rest.json", arg[1], tbCnt) - def expectFailed(self, command): ret = os.system(command) if ret == 0: diff --git a/tests/army/tools/benchmark/basic/query_json-with-sqlfile.py b/tests/army/tools/benchmark/basic/query_json-with-sqlfile.py index 110651a52d..82968d7ea5 100644 --- a/tests/army/tools/benchmark/basic/query_json-with-sqlfile.py +++ b/tests/army/tools/benchmark/basic/query_json-with-sqlfile.py @@ -40,13 +40,9 @@ class TDTestCase(TBase): tdSql.execute("insert into stb_1 using stb tags (1) values (now, 1)") tdSql.execute("insert into stb_2 using stb tags (2) values (now, 2)") cmd = "%s -f ./tools/benchmark/basic/json/taosc_query-sqlfile.json" % binPath - tdLog.info("%s" % cmd) - os.system("%s" % cmd) - - # with open("%s" % "taosc_query_specified-sqlfile-0", "r+") as f1: - # for line in f1.readlines(): - # queryTaosc = line.strip().split()[0] - # assert queryTaosc == "3", "result is %s != expect: 3" % queryTaosc + rlist = self.benchmark(f"-f {cmd}") + # check result + self.checkListString(rlist, "completed total queries: 2") def stop(self): tdSql.close() diff --git a/tests/army/tools/benchmark/basic/query_json.py b/tests/army/tools/benchmark/basic/query_json.py index 6f71a508af..5e59b1392f 100644 --- a/tests/army/tools/benchmark/basic/query_json.py +++ b/tests/army/tools/benchmark/basic/query_json.py @@ -32,7 +32,7 @@ class TDTestCase(TBase): def run(self): binPath = etool.benchMarkFile() os.system( - "rm -f rest_query_specified-0 rest_query_super-0 taosc_query_specified-0 taosc_query_super-0" + "rm -f taosc_query_specified-0 taosc_query_super-0" ) tdSql.execute("drop database if exists db") tdSql.execute("create database if not exists db") @@ -58,46 +58,6 @@ class TDTestCase(TBase): queryTaosc = line.strip().split()[0] assert queryTaosc == "1", "result is %s != expect: 1" % queryTaosc - # split two - cmd = "%s -f ./tools/benchmark/basic/json/rest_query.json" % binPath - tdLog.info("%s" % cmd) - os.system("%s" % cmd) - cmd = "%s -f ./tools/benchmark/basic/json/rest_query1.json" % binPath - tdLog.info("%s" % cmd) - os.system("%s" % cmd) - - times = 0 - with open("rest_query_super-0", "r+") as f1: - for line in f1.readlines(): - contents = line.strip() - if contents.find("data") != -1: - pattern = re.compile("{.*}") - contents = pattern.search(contents).group() - contentsDict = ast.literal_eval(contents) - queryResultRest = contentsDict["data"][0][0] - assert queryResultRest == 1, ( - "result is %s != expect: 1" % queryResultRest - ) - times += 1 - - assert times == 3, "result is %s != expect: 3" % times - - times = 0 - with open("rest_query_specified-0", "r+") as f1: - for line in f1.readlines(): - contents = line.strip() - if contents.find("data") != -1: - pattern = re.compile("{.*}") - contents = pattern.search(contents).group() - contentsDict = ast.literal_eval(contents) - queryResultRest = contentsDict["data"][0][0] - assert queryResultRest == 3, ( - "result is %s != expect: 3" % queryResultRest - ) - times += 1 - - assert times == 1, "result is %s != expect: 1" % times - def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/army/tools/benchmark/basic/taosdemoTestQueryWithJson-mixed-query.py b/tests/army/tools/benchmark/basic/taosdemoTestQueryWithJson-mixed-query.py index 14bf201e97..a3f1a14967 100644 --- a/tests/army/tools/benchmark/basic/taosdemoTestQueryWithJson-mixed-query.py +++ b/tests/army/tools/benchmark/basic/taosdemoTestQueryWithJson-mixed-query.py @@ -37,22 +37,6 @@ class TDTestCase(TBase): queryResultTaosc = line.strip().split()[0] self.assertCheck(filename, queryResultTaosc, expectResult) - # 获取restful接口查询的结果文件中的关键内容,目前的关键内容找到第一个key就跳出循,所以就只有一个数据。后续再修改多个结果文件。 - def getfileDataRestful(self, filename): - self.filename = filename - with open("%s" % filename, "r+") as f1: - for line in f1.readlines(): - contents = line.strip() - if contents.find("data") != -1: - pattern = re.compile("{.*}") - contents = pattern.search(contents).group() - contentsDict = ast.literal_eval(contents) # 字符串转换为字典 - queryResultRest = contentsDict["data"][0][0] - break - else: - queryResultRest = "" - return queryResultRest - # 获取taosc接口查询次数 def queryTimesTaosc(self, filename): self.filename = filename @@ -60,13 +44,6 @@ class TDTestCase(TBase): times = int(subprocess.getstatusoutput(command)[1]) return times - # 获取restful接口查询次数 - def queryTimesRestful(self, filename): - self.filename = filename - command = 'cat %s |grep "200 OK" |wc -l' % filename - times = int(subprocess.getstatusoutput(command)[1]) - return times - # 定义断言结果是否正确。不正确返回错误结果,正确即通过。 def assertCheck(self, filename, queryResult, expectResult): self.filename = filename @@ -106,26 +83,6 @@ class TDTestCase(TBase): os.system("rm -rf ./query_res*") os.system("rm -rf ./all_query*") - # use restful api to query - os.system("%s -f ./tools/benchmark/basic/json/queryInsertrestdata.json" % binPath) - os.system("%s -f ./tools/benchmark/basic/json/queryRestful.json" % binPath) - os.system("%s -f ./tools/benchmark/basic/json/queryRestful1.json" % binPath) - os.system("cat query_res2.txt* > all_query_res2_rest.txt") - - # correct Times testcases - - queryTimes2Restful = self.queryTimesRestful("all_query_res2_rest.txt") - self.assertCheck("all_query_res2_rest.txt", queryTimes2Restful, 4) - - # correct data testcase - - data2 = self.getfileDataRestful("all_query_res2_rest.txt") - print(data2) - if data2 != "2020-11-01 00:00:00.004" and data2 != "2020-10-31T16:00:00.004Z": - tdLog.exit( - "data2 is not 2020-11-01 00:00:00.004 and 2020-10-31T16:00:00.004Z" - ) - # query times less than or equal to 100 assert ( os.system("%s -f ./tools/benchmark/basic/json/queryInsertdata.json" % binPath) == 0 diff --git a/tests/army/tools/benchmark/basic/taosdemoTestQueryWithJson.py b/tests/army/tools/benchmark/basic/taosdemoTestQueryWithJson.py index 3e55a5df70..c529381c18 100644 --- a/tests/army/tools/benchmark/basic/taosdemoTestQueryWithJson.py +++ b/tests/army/tools/benchmark/basic/taosdemoTestQueryWithJson.py @@ -115,41 +115,6 @@ class TDTestCase(TBase): os.system("rm -rf ./query_res*") os.system("rm -rf ./all_query*") - # use restful api to query - os.system("%s -f ./tools/benchmark/basic/json/queryInsertrestdata.json" % binPath) - os.system("%s -f ./tools/benchmark/basic/json/queryRestful.json" % binPath) - os.system("%s -f ./tools/benchmark/basic/json/queryRestful1.json" % binPath) - os.system("cat query_res0.txt* > all_query_res0_rest.txt") - os.system("cat query_res1.txt* > all_query_res1_rest.txt") - os.system("cat query_res2.txt* > all_query_res2_rest.txt") - - # correct Times testcases - queryTimes0Restful = self.queryTimesRestful("all_query_res0_rest.txt") - self.assertCheck("all_query_res0_rest.txt", queryTimes0Restful, 6) - - queryTimes1Restful = self.queryTimesRestful("all_query_res1_rest.txt") - self.assertCheck("all_query_res1_rest.txt", queryTimes1Restful, 6) - - queryTimes2Restful = self.queryTimesRestful("all_query_res2_rest.txt") - self.assertCheck("all_query_res2_rest.txt", queryTimes2Restful, 4) - - # correct data testcase - data0 = self.getfileDataRestful("all_query_res0_rest.txt") - if data0 != "2020-11-01 00:00:00.009" and data0 != "2020-10-31T16:00:00.009Z": - tdLog.exit( - "data0 is not 2020-11-01 00:00:00.009 and 2020-10-31T16:00:00.009Z" - ) - - data1 = self.getfileDataRestful("all_query_res1_rest.txt") - self.assertCheck("all_query_res1_rest.txt", data1, 10) - - data2 = self.getfileDataRestful("all_query_res2_rest.txt") - print(data2) - if data2 != "2020-11-01 00:00:00.004" and data2 != "2020-10-31T16:00:00.004Z": - tdLog.exit( - "data2 is not 2020-11-01 00:00:00.004 and 2020-10-31T16:00:00.004Z" - ) - # query times less than or equal to 100 assert ( os.system("%s -f ./tools/benchmark/basic/json/queryInsertdata.json" % binPath) == 0 @@ -170,10 +135,6 @@ class TDTestCase(TBase): exceptcode = os.system("%s -f ./tools/benchmark/basic/json/queryQps1.json" % binPath) assert exceptcode == 0 - # 2021.02.09 need modify taosBenchmakr code - # use illegal or out of range parameters query json file - os.system("%s -f ./tools/benchmark/basic/json/queryInsertdata.json" % binPath) - # delete useless files os.system("rm -rf ./insert_res.txt") os.system("rm -rf ./tools/benchmark/basic/*.py.sql") diff --git a/tests/army/tools/benchmark/basic/websiteCase.py b/tests/army/tools/benchmark/basic/websiteCase.py new file mode 100644 index 0000000000..67b5620931 --- /dev/null +++ b/tests/army/tools/benchmark/basic/websiteCase.py @@ -0,0 +1,257 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +import json +import sys +import os +import time +import datetime +import platform +import subprocess + +import frame +import frame.eos +import frame.etool +from frame.log import * +from frame.cases import * +from frame.sql import * +from frame.caseBase import * +from frame import * + + +# reomve single and double quotation +def removeQuotation(origin): + value = "" + for c in origin: + if c != '\'' and c != '"': + value += c + + return value + +class TDTestCase(TBase): + def caseDescription(self): + """ + taosBenchmark query->Basic test cases + """ + + def runSeconds(self, command, timeout = 180): + tdLog.info(f"runSeconds {command} ...") + process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + process.wait(timeout) + + # get output + output = process.stdout.read().decode(encoding="gbk") + error = process.stderr.read().decode(encoding="gbk") + return output, error + + def getKeyValue(self, content, key, end): + # find key + s = content.find(key) + if s == -1: + return False,"" + + # skip self + s += len(key) + # skip blank + while s < len(content): + if content[s] != " ": + break + s += 1 + + # end check + if s + 1 == len(content): + return False, "" + + # find end + if len(end) == 0: + e = -1 + else: + e = content.find(end, s) + + # get value + if e == -1: + value = content[s : ] + else: + value = content[s : e] + + return True, value + + def getDbRows(self, times): + sql = f"select count(*) from test.meters" + tdSql.waitedQuery(sql, 1, times) + dbRows = tdSql.getData(0, 0) + return dbRows + + def checkItem(self, output, key, end, expect, equal): + ret, value = self.getKeyValue(output, key, end) + if ret == False: + tdLog.exit(f"not found key:{key}. end:{end} output:\n{output}") + + fval = float(value) + # compare + if equal and fval != expect: + tdLog.exit(f"check not expect. expect:{expect} real:{fval}, key:'{key}' end:'{end}' output:\n{output}") + elif equal == False and fval <= expect: + tdLog.exit(f"failed because {fval} <= {expect}, key:'{key}' end:'{end}' output:\n{output}") + else: + # succ + if equal: + tdLog.info(f"check successfully. key:'{key}' expect:{expect} real:{fval}") + else: + tdLog.info(f"check successfully. key:'{key}' {fval} > {expect}") + + + def checkAfterRun(self, benchmark, jsonFile, specMode, tbCnt): + # run + cmd = f"{benchmark} -f {jsonFile}" + output, error = self.runSeconds(cmd) + + if specMode : + label = "specified_table_query" + else: + label = "super_table_query" + + # + # check insert result + # + with open(jsonFile, "r") as file: + data = json.load(file) + + queryTimes = data["query_times"] + # contineIfFail + try: + continueIfFail = data["continue_if_fail"] + except: + continueIfFail = "no" + + threads = data[label]["threads"] + sqls = data[label]["sqls"] + + + # batch_query + try: + batchQuery = data[label]["batch_query"] + except: + batchQuery = "no" + + # mixed_query + try: + mixedQuery = data[label]["mixed_query"] + except: + mixedQuery = "no" + + tdLog.info(f"queryTimes={queryTimes} threads={threads} mixedQuery={mixedQuery} " + f"batchQuery={batchQuery} len(sqls)={len(sqls)} label={label}\n") + + totalQueries = 0 + threadQueries = 0 + QPS = 10 + + if continueIfFail.lower() == "yes": + allEnd = " " + else: + allEnd = "\n" + + if specMode and mixedQuery.lower() != "yes": + # spec + threadQueries = queryTimes * threads + totalQueries = queryTimes * threads * len(sqls) + threadKey = f"complete query with {threads} threads and " + qpsKey = "QPS: " + avgKey = "query delay avg: " + minKey = "min:" + else: + # spec mixed or super + + if specMode: + totalQueries = queryTimes * len(sqls) + # spec mixed + if batchQuery.lower() == "yes": + # batch + threadQueries = len(sqls) + QPS = 2 + else: + threadQueries = totalQueries + else: + # super + totalQueries = queryTimes * len(sqls) * tbCnt + threadQueries = totalQueries + + nSql = len(sqls) + if specMode and nSql < threads : + tdLog.info(f"set threads = {nSql} because len(sqls) < threads") + threads = nSql + threadKey = f"using {threads} threads complete query " + qpsKey = "" + avgKey = "avg delay:" + minKey = "min delay:" + + items = [ + [threadKey, " ", threadQueries, True], + [qpsKey, " ", 5, False], # qps need > 1 + [avgKey, "s", 0, False], + [minKey, "s", 0, False], + ["max: ", "s", 0, False], + ["p90: ", "s", 0, False], + ["p95: ", "s", 0, False], + ["p99: ", "s", 0, False], + ["INFO: Spend ", " ", 0, False], + ["completed total queries: ", ",", totalQueries, True], + ["the QPS of all threads:", allEnd, QPS , False] # all qps need > 5 + ] + + # check + for item in items: + if len(item[0]) > 0: + self.checkItem(output, item[0], item[1], item[2], item[3]) + + + + # tmq check + def checkTmqJson(self, benchmark, json): + OK_RESULT = "Consumed total msgs: 30, total rows: 300000" + cmd = benchmark + " -f " + json + output,error = frame.eos.run(cmd, 600) + if output.find(OK_RESULT) != -1: + tdLog.info(f"succ: {cmd} found '{OK_RESULT}'") + else: + tdLog.exit(f"failed: {cmd} not found {OK_RESULT} in:\n{output} \nerror:{error}") + + + def run(self): + tbCnt = 10 + benchmark = etool.benchMarkFile() + + # insert + json = "../../tools/taos-tools/example/insert.json" + self.insertBenchJson(json, checkStep=True) + + # query + json = "../../tools/taos-tools/example/query.json" + self.checkAfterRun(benchmark, json, True, tbCnt) + json = "../../tools/taos-tools/example/queryStb.json" + self.checkAfterRun(benchmark, json, False, tbCnt) + + # tmq + json = "../../tools/taos-tools/example/tmq.json" + self.checkTmqJson(benchmark, json) + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/army/tools/taosdump/native/json/insertFullType.json b/tests/army/tools/taosdump/native/json/insertFullType.json index a121670c37..5bdecef72b 100644 --- a/tests/army/tools/taosdump/native/json/insertFullType.json +++ b/tests/army/tools/taosdump/native/json/insertFullType.json @@ -41,7 +41,9 @@ { "type": "ubigint", "name": "ubi", "max": 10000, "min": 0 }, { "type": "binary", "name": "bin", "len": 4}, { "type": "nchar", "name": "nch", "len": 8}, - { "type": "varchar", "name": "vac", "len": 8} + { "type": "varbinary", "name": "vab", "len": 8}, + { "type": "varchar", "name": "vac", "len": 8}, + { "type": "geometry", "name": "geo", "len": 32} ], "tags":[ { "type": "bool", "name": "tbc"}, @@ -57,7 +59,9 @@ { "type": "ubigint", "name": "tubi", "max": 10000, "min": 0 }, { "type": "binary", "name": "tbin", "len": 4}, { "type": "nchar", "name": "tnch", "len": 8}, - { "type": "varchar", "name": "tvac", "len": 8} + { "type": "varbinary", "name": "tvab", "len": 8}, + { "type": "varchar", "name": "tvac", "len": 8}, + { "type": "geometry", "name": "tgeo", "len": 32} ] } ] diff --git a/tests/army/tools/taosdump/native/json/insertOther.json b/tests/army/tools/taosdump/native/json/insertOther.json deleted file mode 100644 index bd6bb12bec..0000000000 --- a/tests/army/tools/taosdump/native/json/insertOther.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "filetype":"insert", - "cfgdir":"/etc/taos", - "host":"127.0.0.1", - "port":6030, - "user":"root", - "password":"taosdata", - "thread_count":1, - "create_table_thread_count":1, - "confirm_parameter_prompt":"no", - "prepare_rand":100, - "num_of_records_per_req":100, - "databases": [ - { - "dbinfo":{ - "name":"testother", - "drop":"yes" - }, - "super_tables":[ - { - "name":"meters", - "child_table_exists":"no", - "childtable_prefix":"d", - "data_source":"rand", - "insert_mode":"taosc", - "childtable_count": 2, - "insert_rows":100, - "timestamp_step":1000, - "start_timestamp":"2022-10-01 00:00:00.000", - "columns":[ - { "type": "bool", "name": "bc"}, - { "type": "float", "name": "fc", "max": 1, "min": 0 }, - { "type": "double", "name": "dc", "max": 10, "min": 0 }, - { "type": "tinyint", "name": "ti", "max": 100, "min": -100 }, - { "type": "smallint", "name": "si", "max": 100, "min": -50 }, - { "type": "int", "name": "ic", "max": 1000, "min": -1000 }, - { "type": "bigint", "name": "bi", "max": 100, "min": -1000 }, - { "type": "utinyint", "name": "uti", "max": 100, "min": 0 }, - { "type": "usmallint", "name": "usi", "max": 100, "min": 0 }, - { "type": "uint", "name": "ui", "max": 1000, "min": 0 }, - { "type": "ubigint", "name": "ubi", "max": 10000, "min": 0 }, - { "type": "binary", "name": "bin", "len": 4}, - { "type": "nchar", "name": "nch", "len": 8}, - { "type": "varbinary", "name": "vab", "len": 8}, - { "type": "varchar", "name": "vac", "len": 8}, - { "type": "geometry", "name": "geo", "len": 32} - ], - "tags":[ - { "type": "bool", "name": "tbc"}, - { "type": "float", "name": "tfc", "max": 1, "min": 0 }, - { "type": "double", "name": "tdc", "max": 10, "min": 0 }, - { "type": "tinyint", "name": "tti", "max": 100, "min": -100 }, - { "type": "smallint", "name": "tsi", "max": 100, "min": -50 }, - { "type": "int", "name": "tic", "max": 1000, "min": -1000 }, - { "type": "bigint", "name": "tbi", "max": 100, "min": -1000 }, - { "type": "utinyint", "name": "tuti", "max": 100, "min": 0 }, - { "type": "usmallint", "name": "tusi", "max": 100, "min": 0 }, - { "type": "uint", "name": "tui", "max": 1000, "min": 0 }, - { "type": "ubigint", "name": "tubi", "max": 10000, "min": 0 }, - { "type": "binary", "name": "tbin", "len": 4}, - { "type": "nchar", "name": "tnch", "len": 8}, - { "type": "varbinary", "name": "tvab", "len": 8}, - { "type": "varchar", "name": "tvac", "len": 8}, - { "type": "geometry", "name": "tgeo", "len": 32} - ] - } - ] - } - ] -} diff --git a/tests/army/tools/taosdump/native/taosdumpCommandline.py b/tests/army/tools/taosdump/native/taosdumpCommandline.py index 98f1ff6b1d..c3b33ba45e 100644 --- a/tests/army/tools/taosdump/native/taosdumpCommandline.py +++ b/tests/army/tools/taosdump/native/taosdumpCommandline.py @@ -95,11 +95,11 @@ class TDTestCase(TBase): # normal table sqls = [ f"create table {db}.ntb(st timestamp, c1 int, c2 binary(32))", - f"insert into {db}.ntb values(now, 1, 'abc1')", - f"insert into {db}.ntb values(now, 2, 'abc2')", - f"insert into {db}.ntb values(now, 3, 'abc3')", - f"insert into {db}.ntb values(now, 4, 'abc4')", - f"insert into {db}.ntb values(now, 5, 'abc5')", + f"insert into {db}.ntb values('2025-01-01 10:00:01', 1, 'abc1')", + f"insert into {db}.ntb values('2025-01-01 10:00:02', 2, 'abc2')", + f"insert into {db}.ntb values('2025-01-01 10:00:03', 3, 'abc3')", + f"insert into {db}.ntb values('2025-01-01 10:00:04', 4, 'abc4')", + f"insert into {db}.ntb values('2025-01-01 10:00:05', 5, 'abc5')", ] for sql in sqls: tdSql.execute(sql) @@ -158,9 +158,10 @@ class TDTestCase(TBase): def basicCommandLine(self, tmpdir): #command and check result checkItems = [ - [f"-h 127.0.0.1 -P 6030 -uroot -ptaosdata -A -N -o {tmpdir}", ["OK: Database test dumped"]], + [f"-h 127.0.0.1 -P 6041 -uroot -ptaosdata -A -N -o {tmpdir}", ["OK: Database test dumped"]], [f"-r result -a -e test d0 -o {tmpdir}", ["OK: table: d0 dumped", "OK: 100 row(s) dumped out!"]], [f"-n -D test -o {tmpdir}", ["OK: Database test dumped", "OK: 205 row(s) dumped out!"]], + [f"-Z 0 -P 6030 -n -D test -o {tmpdir}", ["OK: Database test dumped", "OK: 205 row(s) dumped out!"]], [f"-L -D test -o {tmpdir}", ["OK: Database test dumped", "OK: 205 row(s) dumped out!"]], [f"-s -D test -o {tmpdir}", ["dumping out schema: 1 from meters.d0", "OK: Database test dumped", "OK: 0 row(s) dumped out!"]], [f"-N -d deflate -S '2022-10-01 00:00:50.000' test meters -o {tmpdir}",["OK: table: meters dumped", "OK: 100 row(s) dumped out!"]], @@ -172,7 +173,18 @@ class TDTestCase(TBase): [f"--help", ["Report bugs to"]], [f"-?", ["Report bugs to"]], [f"-V", ["version:"]], - [f"--usage", ["taosdump [OPTION...] -o outpath"]] + [f"--usage", ["taosdump [OPTION...] -o outpath"]], + # conn mode -Z + [f"-Z 0 -E '2022-10-01 00:00:60.000' test -o {tmpdir}", [ + "Connect mode is : Native", + "OK: Database test dumped", + "OK: 122 row(s) dumped out!"] + ], + [f"-Z 1 -E '2022-10-01 00:00:60.000' test -o {tmpdir}", [ + "Connect mode is : WebSocket", + "OK: Database test dumped", + "OK: 122 row(s) dumped out!"] + ], ] # executes @@ -181,8 +193,9 @@ class TDTestCase(TBase): command = item[0] results = item[1] rlist = self.taosdump(command) - for result in results: - self.checkListString(rlist, result) + self.checkManyString(rlist, results) + # clear tmp + # check except def checkExcept(self, command): @@ -212,6 +225,73 @@ class TDTestCase(TBase): self.checkExcept(taosdump + f" -t 2 -k 2 -z 1 -C https://not-exist.com:80/cloud -D test -o {tmpdir}") self.checkExcept(taosdump + f" -P 65536") + # conn mode + options = [ + f"-Z native -X http://127.0.0.1:6041 -D {db} -o {tmpdir}", + f"-Z 100 -D {db} -o {tmpdir}", + f"-Z abcdefg -D {db} -o {tmpdir}", + f"-X -D {db} -o {tmpdir}", + f"-X 127.0.0.1:6041 -D {db} -o {tmpdir}", + f"-X https://gw.cloud.taosdata.com?token617ffdf... -D {db} -o {tmpdir}", + f"-Z 1 -X https://gw.cloud.taosdata.com?token=617ffdf... -D {db} -o {tmpdir}", + f"-X http://127.0.0.1:6042 -D {db} -o {tmpdir}" + ] + + # do check + for option in options: + self.checkExcept(taosdump + " " + option) + + + # expect cmd > json > evn + def checkPriority(self, db, stb, childCount, insertRows, tmpdir): + # + # cmd & env + # + + # env 6043 - invalid + os.environ['TDENGINE_CLOUD_DSN'] = "http://127.0.0.1:6043" + # cmd 6041 - valid + cmd = f"-X http://127.0.0.1:6041 -D {db} -o {tmpdir}" + self.clearPath(tmpdir) + rlist = self.taosdump(cmd) + results = [ + "Connect mode is : WebSocket", + "OK: Database test dumped", + "OK: 205 row(s) dumped out!" + ] + self.checkManyString(rlist, results) + + # + # env + # + + os.environ['TDENGINE_CLOUD_DSN'] = "http://127.0.0.1:6041" + # cmd 6041 - valid + self.clearPath(tmpdir) + cmd = f"-D {db} -o {tmpdir}" + rlist = self.taosdump(cmd) + self.checkManyString(rlist, results) + + # + # cmd + # + + os.environ['TDENGINE_CLOUD_DSN'] = "" + # cmd 6041 - valid + self.clearPath(tmpdir) + cmd = f"-X http://127.0.0.1:6041 -D {db} -o {tmpdir}" + rlist = self.taosdump(cmd) + self.checkManyString(rlist, results) + + # clear env + os.environ['TDENGINE_CLOUD_DSN'] = "" + + + # conn mode + def checkConnMode(self, db, stb, childCount, insertRows, tmpdir): + # priority + self.checkPriority(db, stb, childCount, insertRows, tmpdir) + # password def checkPassword(self, tmpdir): # 255 char max password @@ -254,7 +334,7 @@ class TDTestCase(TBase): tdLog.info("1. check long password ................................. [Passed]") # dumpInOut - modes = ["", "-R" , "--cloud=http://localhost:6041"] + modes = ["-Z native", "-Z websocket", "--dsn=http://localhost:6041"] for mode in modes: self.dumpInOutMode(mode, db , json, tmpdir) @@ -268,9 +348,6 @@ class TDTestCase(TBase): self.exceptCommandLine(taosdump, db, stb, tmpdir) tdLog.info("4. except command line ................................. [Passed]") - # - # varbinary and geometry for native - # json = "./tools/taosdump/native/json/insertOther.json" # insert db, stb, childCount, insertRows = self.insertData(json) @@ -278,6 +355,12 @@ class TDTestCase(TBase): self.dumpInOutMode("", db , json, tmpdir) tdLog.info("5. native varbinary geometry ........................... [Passed]") + # + # check connMode + # + + self.checkConnMode(db, stb, childCount, insertRows, tmpdir) + tdLog.info("6. check conn mode ..................................... [Passed]") def stop(self): diff --git a/tests/develop-test/test.py b/tests/develop-test/test.py index 3525fd6332..b291e58e3d 100644 --- a/tests/develop-test/test.py +++ b/tests/develop-test/test.py @@ -38,6 +38,8 @@ from util.taosadapter import * import taos import taosrest +from taos.cinterface import * +taos.taos_options(6, "native") def checkRunTimeError(): import win32gui diff --git a/tests/docs-examples-test/c.sh b/tests/docs-examples-test/c.sh index 54e334b22e..01b5428c82 100644 --- a/tests/docs-examples-test/c.sh +++ b/tests/docs-examples-test/c.sh @@ -34,6 +34,7 @@ declare -a TEST_EXES=( "query_data_demo" "with_reqid_demo" "stmt_insert_demo" + "stmt2_insert_demo" "tmq_demo" "sml_insert_demo" ) @@ -46,6 +47,7 @@ declare -a NEED_CLEAN=( "false" "false" "false" + "false" "true" ) @@ -89,4 +91,4 @@ if [ "$totalFailed" -ne "0" ]; then exit 1 fi -echo "All tests completed." \ No newline at end of file +echo "All tests completed." diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 8569a3cb6d..75c50ceb05 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -90,22 +90,25 @@ ,,y,army,./pytest.sh python3 ./test.py -f create/test_stb_keep_compact.py ,,y,army,./pytest.sh python3 ./test.py -f create/test_stb_keep_compact.py -N 3 ,,y,army,./pytest.sh python3 ./test.py -f create/test_stb_keep_compact.py -N 3 -M 3 +,,y,army,./pytest.sh python3 ./test.py -f stream/test_stream_vtable.py # # army/tools # -# benchmark 66 cases +# benchmark 64 cases ,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/commandline.py ,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/commandline-partial-col-numpy.py -,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/commandline-sml-rest.py -R ,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/commandline-single-table.py ,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/commandline-supplement-insert.py ,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/commandline-vgroups.py +,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/connMode.py -B + ,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/custom_col_tag.py ,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/default_json.py +,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/default_tmq_json.py ,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/demo.py ,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/csv-export.py @@ -126,6 +129,7 @@ ,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/query_json.py -B ,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/query_json-with-error-sqlfile.py ,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/query_json-with-sqlfile.py +,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/queryMain.py ,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/rest_insert_alltypes_json.py -R ,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/reuse-exist-stb.py @@ -161,6 +165,7 @@ ,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/tmqBasic.py ,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/tmq_case.py +,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/websiteCase.py ,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/cloud/cloud-test.py ,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/ws/websocket.py -R @@ -562,7 +567,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/taosd_audit.py ,,n,system-test,python3 ./test.py -f 0-others/taosdlog.py ,,n,system-test,python3 ./test.py -f 0-others/taosdShell.py -N 5 -M 3 -Q 3 -,,n,system-test,python3 ./test.py -f 0-others/udfTest.py +,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/udfTest.py ,,n,system-test,python3 ./test.py -f 0-others/udf_create.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/udf_restart_taosd.py ,,n,system-test,python3 ./test.py -f 0-others/udf_cfg1.py diff --git a/tests/parallel_test/longtimeruning_cases.task b/tests/parallel_test/longtimeruning_cases.task index 64c2b1b45b..e4d007ab3d 100644 --- a/tests/parallel_test/longtimeruning_cases.task +++ b/tests/parallel_test/longtimeruning_cases.task @@ -7,6 +7,9 @@ # #,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/stream_multi_agg.py #,,n,system-test,python3 ./test.py -f 8-stream/stream_basic.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/continuous_window_close_interval_basic.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/continuous_window_close_interval.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/continuous_window_close_interval_checkpoint.py # army-test #,,y,army,./pytest.sh python3 ./test.py -f multi-level/mlevel_basic.py -N 3 -L 3 -D 2 diff --git a/tests/parallel_test/run_case.sh b/tests/parallel_test/run_case.sh index b72a7cacca..6cbeecd65e 100755 --- a/tests/parallel_test/run_case.sh +++ b/tests/parallel_test/run_case.sh @@ -50,6 +50,8 @@ if [ $ent -eq 0 ]; then export LD_LIBRARY_PATH=/home/TDengine/debug/build/lib ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so 2>/dev/null ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so.1 2>/dev/null + ln -s /home/TDengine/debug/build/lib/libtaosnative.so /usr/lib/libtaosnative.so 2>/dev/null + ln -s /home/TDengine/debug/build/lib/libtaosnative.so /usr/lib/libtaosnative.so.1 2>/dev/null ln -s /home/TDengine/include/client/taos.h /usr/include/taos.h 2>/dev/null ln -s /home/TDengine/include/common/taosdef.h /usr/include/taosdef.h 2>/dev/null ln -s /home/TDengine/include/util/taoserror.h /usr/include/taoserror.h 2>/dev/null @@ -60,6 +62,8 @@ else export LD_LIBRARY_PATH=/home/TDinternal/debug/build/lib ln -s /home/TDinternal/debug/build/lib/libtaos.so /usr/lib/libtaos.so 2>/dev/null ln -s /home/TDinternal/debug/build/lib/libtaos.so /usr/lib/libtaos.so.1 2>/dev/null + ln -s /home/TDinternal/debug/build/lib/libtaosnative.so /usr/lib/libtaosnative.so 2>/dev/null + ln -s /home/TDinternal/debug/build/lib/libtaosnative.so /usr/lib/libtaosnative.so.1 2>/dev/null ln -s /home/TDinternal/community/include/client/taos.h /usr/include/taos.h 2>/dev/null ln -s /home/TDinternal/community/include/common/taosdef.h /usr/include/taosdef.h 2>/dev/null ln -s /home/TDinternal/community/include/util/taoserror.h /usr/include/taoserror.h 2>/dev/null @@ -75,6 +79,8 @@ ulimit -c unlimited md5sum /usr/lib/libtaos.so.1 md5sum /home/TDinternal/debug/build/lib/libtaos.so +md5sum /usr/lib/libtaosnative.so.1 +md5sum /home/TDinternal/debug/build/lib/libtaosnative.so #get python connector and update: taospy and taos-ws-py to latest pip3 install taospy==2.7.21 @@ -84,6 +90,8 @@ RET=$? echo "cmd exit code: $RET" md5sum /usr/lib/libtaos.so.1 md5sum /home/TDinternal/debug/build/lib/libtaos.so +md5sum /usr/lib/libtaosnative.so.1 +md5sum /home/TDinternal/debug/build/lib/libtaosnative.so if [ $RET -ne 0 ]; then diff --git a/tests/pytest/auto_run_regular.sh b/tests/pytest/auto_run_regular.sh index 27e8013269..c5f275cd68 100755 --- a/tests/pytest/auto_run_regular.sh +++ b/tests/pytest/auto_run_regular.sh @@ -5,6 +5,8 @@ export PATH=$PATH:/home/TDengine/debug/build/bin export LD_LIBRARY_PATH=/home/TDengine/debug/build/lib ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so 2>/dev/null ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so.1 2>/dev/null +ln -s /home/TDengine/debug/build/lib/libtaosnative.so /usr/lib/libtaosnative.so 2>/dev/null +ln -s /home/TDengine/debug/build/lib/libtaosnative.so /usr/lib/libtaosnative.so.1 2>/dev/null ln -s /home/TDengine/include/client/taos.h /usr/include/taos.h 2>/dev/null # run crash_gen auto script diff --git a/tests/pytest/auto_run_valgrind.sh b/tests/pytest/auto_run_valgrind.sh index c7154e867c..4b5e9ee91c 100755 --- a/tests/pytest/auto_run_valgrind.sh +++ b/tests/pytest/auto_run_valgrind.sh @@ -5,6 +5,8 @@ export PATH=$PATH:/home/TDengine/debug/build/bin export LD_LIBRARY_PATH=/home/TDengine/debug/build/lib ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so 2>/dev/null ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so.1 2>/dev/null +ln -s /home/TDengine/debug/build/lib/libtaosnative.so /usr/lib/libtaosnative.so 2>/dev/null +ln -s /home/TDengine/debug/build/lib/libtaosnative.so /usr/lib/libtaosnative.so.1 2>/dev/null ln -s /home/TDengine/include/client/taos.h /usr/include/taos.h 2>/dev/null # run crash_gen auto script diff --git a/tests/pytest/auto_run_valgrind_cluster.sh b/tests/pytest/auto_run_valgrind_cluster.sh index 62bc22e923..4706bbd6d0 100755 --- a/tests/pytest/auto_run_valgrind_cluster.sh +++ b/tests/pytest/auto_run_valgrind_cluster.sh @@ -5,6 +5,8 @@ export PATH=$PATH:/home/TDengine/debug/build/bin export LD_LIBRARY_PATH=/home/TDengine/debug/build/lib ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so 2>/dev/null ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so.1 2>/dev/null +ln -s /home/TDengine/debug/build/lib/libtaosnative.so /usr/lib/libtaosnative.so 2>/dev/null +ln -s /home/TDengine/debug/build/lib/libtaosnative.so /usr/lib/libtaosnative.so.1 2>/dev/null ln -s /home/TDengine/include/client/taos.h /usr/include/taos.h 2>/dev/null # run crash_gen auto script diff --git a/tests/pytest/dockerCluster/Dockerfile b/tests/pytest/dockerCluster/Dockerfile index 437dbc65e6..ecb068541f 100644 --- a/tests/pytest/dockerCluster/Dockerfile +++ b/tests/pytest/dockerCluster/Dockerfile @@ -33,6 +33,7 @@ COPY --from=builder /root/bin/taosdump /usr/bin COPY --from=builder /root/bin/taos /usr/bin COPY --from=builder /root/cfg/taos.cfg /etc/taos/ COPY --from=builder /root/lib/libtaos.so.* /usr/lib/libtaos.so.1 +COPY --from=builder /root/lib/libtaosnative.so.* /usr/lib/libtaosnative.so.1 ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" ENV LC_CTYPE=en_US.UTF-8 diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py index c12f324fd7..c2861155de 100644 --- a/tests/pytest/util/common.py +++ b/tests/pytest/util/common.py @@ -923,12 +923,12 @@ class TDCom: else: if watermark is None: - if trigger_mode == "max_delay": + if trigger_mode == "max_delay" or trigger_mode == "continuous_window_close" : stream_options = f'trigger {trigger_mode} {max_delay}' else: stream_options = f'trigger {trigger_mode}' else: - if trigger_mode == "max_delay": + if trigger_mode == "max_delay" or trigger_mode == "continuous_window_close" : stream_options = f'trigger {trigger_mode} {max_delay} watermark {watermark}' else: stream_options = f'trigger {trigger_mode} watermark {watermark}' diff --git a/tests/script/tsim/stream/basic5.sim b/tests/script/tsim/stream/basic5.sim index 866fbd3ebe..cfda6748b4 100644 --- a/tests/script/tsim/stream/basic5.sim +++ b/tests/script/tsim/stream/basic5.sim @@ -204,7 +204,7 @@ sql insert into t1 values(1648791269001,30,2,3,1.0); $loop_count = 0 -loop11: +loop110: sleep 200 @@ -218,17 +218,17 @@ sql select * from streamt3; if $rows != 30 then print =====rows=$rows - goto loop11 + goto loop110 endi if $data[20][1] != 2 then print =====[20][1]=$[20][1] - goto loop11 + goto loop110 endi if $data[29][1] != 2 then print =====[29][1]=$[29][1] - goto loop11 + goto loop110 endi print step2============= diff --git a/tests/script/tsim/stream/nonblockIntervalBasic.sim b/tests/script/tsim/stream/nonblockIntervalBasic.sim index 855982e07a..5af09a1bca 100644 --- a/tests/script/tsim/stream/nonblockIntervalBasic.sim +++ b/tests/script/tsim/stream/nonblockIntervalBasic.sim @@ -13,6 +13,13 @@ sql use test; sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); + + +sql_error create stream streams_er1 trigger continuous_window_close ignore update 0 ignore expired 0 into streamt_et1 as select _wstart, count(*) c1, sum(b) c2 from st partition by tbname session(ts, 10s); +sql_error create stream streams_er2 trigger continuous_window_close ignore update 0 ignore expired 0 into streamt_et2 as select _wstart, count(*) c1, sum(b) c2 from st partition by tbname state_window(a) ; +sql_error create stream streams_er3 trigger continuous_window_close ignore update 0 ignore expired 0 into streamt_et3 as select _wstart, count(*) c1, sum(b) c2 from st partition by tbname count_window(10); +sql_error create stream streams_er4 trigger continuous_window_close ignore update 0 ignore expired 0 into streamt_et4 as select _wstart, count(*) c1, sum(b) c2 from st partition by tbname event_window start with a = 0 end with b = 9; + sql create stream streams1 trigger continuous_window_close ignore update 0 ignore expired 0 into streamt1 as select _wstart, count(*) c1, sum(b) c2 from st partition by tbname interval(10s) ; run tsim/stream/checkTaskStatus.sim @@ -449,7 +456,7 @@ if $rows != 6 then goto loop6 endi - +$loop_count = 0 loop7: sleep 500 print sql select * from information_schema.ins_tables where table_name like "streams6-tbn-%"; @@ -472,6 +479,7 @@ if $rows != 2 then goto loop7 endi +$loop_count = 0 loop8: sleep 500 print sql select * from information_schema.ins_tables where table_name like "streams7-tbn-%"; @@ -494,7 +502,7 @@ if $rows != 2 then goto loop8 endi - +$loop_count = 0 loop9: sleep 500 print sql select * from streamt6; @@ -517,6 +525,7 @@ if $rows != 2 then goto loop9 endi +$loop_count = 0 loop10: sleep 500 print sql select * from streamt7; @@ -560,6 +569,7 @@ sql insert into t2 values(1648791211000,2,2,3); sql insert into t1 values(1648791221000,1,3,3); +$loop_count = 0 loop11: sleep 500 print sql select * from streamt9; @@ -589,6 +599,7 @@ endi sql insert into t2 values(1648791211001,2,4,3); +$loop_count = 0 loop12: sleep 500 print sql select * from streamt8; @@ -611,6 +622,7 @@ if $rows != 1 then goto loop12 endi +$loop_count = 0 loop13: sleep 500 print sql select * from streamt9; diff --git a/tests/script/tsim/stream/nonblockIntervalHistory.sim b/tests/script/tsim/stream/nonblockIntervalHistory.sim index c7918d3bc8..188e0c17f5 100644 --- a/tests/script/tsim/stream/nonblockIntervalHistory.sim +++ b/tests/script/tsim/stream/nonblockIntervalHistory.sim @@ -153,6 +153,7 @@ sql create stream streams12 trigger continuous_window_close fill_history 1 ignor run tsim/stream/checkTaskStatus.sim +$loop_count = 0 loop3: sleep 500 sql select * from streamt12 order by 1,2; @@ -177,7 +178,7 @@ endi sql insert into t1 values(1648791224001,2,2,3); sql insert into t1 values(1648791225001,2,2,3); - +$loop_count = 0 loop4: sleep 500 sql select * from streamt12 where c3 == "t1" order by 1,2; @@ -239,6 +240,7 @@ print $data30 $data31 $data32 $data33 $data34 print $data40 $data41 $data42 $data43 $data44 print $data50 $data51 $data52 $data53 $data54 +$loop_count = 0 loop5: sleep 500 print sql loop5 select * from streamt3 order by 1,2; @@ -271,8 +273,6 @@ if $data11 != 2 then goto loop5 endi -return 1 - sql insert into t1 values(1648791221001,3,5,3); sql insert into t1 values(1648791241001,3,6,3); diff --git a/tests/system-test/0-others/test_hot_refresh_configurations.py b/tests/system-test/0-others/test_hot_refresh_configurations.py index 47f5209940..50b7ead9ce 100644 --- a/tests/system-test/0-others/test_hot_refresh_configurations.py +++ b/tests/system-test/0-others/test_hot_refresh_configurations.py @@ -102,7 +102,7 @@ class TDTestCase: "alias": "tsMinDiskFreeSize", "values": ["51200K", "100M", "1G"], "check_values": ["52428800", "104857600", "1073741824"], - "except_values": ["1024K", "1.1G", "1T"] + "except_values": ["1024K", "2049G", "3T"] }, { "name": "tmqMaxTopicNum", diff --git a/tests/system-test/0-others/udfTest.py b/tests/system-test/0-others/udfTest.py index 7d953f2977..dc5d5fde8d 100644 --- a/tests/system-test/0-others/udfTest.py +++ b/tests/system-test/0-others/udfTest.py @@ -631,11 +631,16 @@ class TDTestCase: tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") tdSql.checkData(0,0,169.661427555) tdSql.checkData(0,1,169.661427555) - # stop taosudf cmds + + clean_env = os.environ.copy() + clean_env.pop('ASAN_OPTIONS', None) + clean_env.pop('LD_PRELOAD', None) get_processID = "ps -ef | grep -w taosudf | grep -v grep| grep -v defunct | awk '{print $2}'" - processID = subprocess.check_output(get_processID, shell=True).decode("utf-8") + processID = subprocess.check_output(get_processID, shell=True, env=clean_env).decode("utf-8") + tdLog.info("taosudf process ID: %s" % processID) stop_udfd = " kill -9 %s" % processID os.system(stop_udfd) + time.sleep(2) diff --git a/tests/system-test/1-insert/stmt_error.py b/tests/system-test/1-insert/stmt_error.py index 0bfbedb9a1..ce243edd72 100644 --- a/tests/system-test/1-insert/stmt_error.py +++ b/tests/system-test/1-insert/stmt_error.py @@ -154,6 +154,129 @@ class TDTestCase: conn.close() raise err + def test_stmt_insert_vtb_error(self,conn): + # type: (TaosConnection) -> None + + dbname = "pytest_taos_stmt_vtb_error" + try: + conn.execute("drop database if exists %s" % dbname) + conn.execute("create database if not exists %s" % dbname) + conn.select_db(dbname) + + conn.execute( + "create table if not exists log(ts timestamp, bo bool, nil tinyint, ti tinyint, si smallint, ii int,\ + bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \ + ff float, dd double, bb binary(65059), nn nchar(100), tt timestamp)", + ) + + conn.execute( + "create vtable if not exists log_v(ts timestamp, bo bool from pytest_taos_stmt_vtb_error.log.bo, " + "nil tinyint, ti tinyint, si smallint, ii int,\ + bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \ + ff float, dd double, bb binary(65059), nn nchar(100), tt timestamp)", + ) + conn.load_table_info("log_v") + + + stmt = conn.statement("insert into log_v values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + params = new_bind_params(16) + params[0].timestamp(1626861392589, PrecisionEnum.Milliseconds) + params[1].bool(True) + params[2].tinyint(None) + params[3].tinyint(2) + params[4].smallint(3) + params[5].int(4) + params[6].bigint(5) + params[7].tinyint_unsigned(6) + params[8].smallint_unsigned(7) + params[9].int_unsigned(8) + params[10].bigint_unsigned(9) + params[11].float(10.1) + params[12].double(10.11) + binaryStr = '123456789' + for i in range(1301): + binaryStr += "1234567890abcdefghij1234567890abcdefghij12345hello" + params[13].binary(binaryStr) + params[14].nchar("stmt") + params[15].timestamp(1626861392589, PrecisionEnum.Milliseconds) + + stmt.bind_param(params) + stmt.execute() + + assert stmt.affected_rows == 1 + stmt.close() + + querystmt=conn.statement("select ?, bo, nil, ti, si, ii,bi, tu, su, iu, bu, ff, dd, bb, nn, tt from log") + queryparam=new_bind_params(1) + print(type(queryparam)) + queryparam[0].binary("ts") + querystmt.bind_param(queryparam) + querystmt.execute() + result=querystmt.use_result() + + row=result.fetch_all() + print(row) + + assert row[0][1] == True + assert row[0][2] == None + for i in range(3, 10): + assert row[0][i] == i - 1 + #float == may not work as expected + # assert row[0][11] == c_float(10.1) + assert row[0][12] == 10.11 + assert row[0][13][65054:] == "hello" + assert row[0][14] == "stmt" + + conn.execute("drop database if exists %s" % dbname) + conn.close() + + except Exception as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + raise err + + def test_stmt_insert_vstb_error(self,conn): + + dbname = "pytest_taos_stmt_vstb_error" + try: + conn.execute("drop database if exists %s" % dbname) + conn.execute("create database if not exists %s" % dbname) + conn.execute("alter database %s keep 36500" % dbname) + conn.select_db(dbname) + + conn.execute("create stable STB_v(ts timestamp, n int) tags(b int) virtual 1") + + stmt = conn.statement("insert into ? using STB_v tags(?) values(?, ?)") + params = new_bind_params(1) + params[0].int(4); + stmt.set_tbname_tags("ct", params); + + multi_params = new_multi_binds(2); + multi_params[0].timestamp([9223372036854775808]) + multi_params[1].int([123]) + stmt.bind_param_batch(multi_params) + + stmt.execute() + result = stmt.use_result() + + result.close() + stmt.close() + + stmt = conn.statement("select * from STB") + stmt.execute() + result = stmt.use_result() + print(result.affected_rows) + row = result.next() + print(row) + + result.close() + stmt.close() + conn.close() + + except Exception as err: + conn.close() + raise err + def test_stmt_insert_error_null_timestamp(self,conn): dbname = "pytest_taos_stmt_error_null_ts" @@ -270,6 +393,24 @@ class TDTestCase: tdLog.info('=========stmt error occured for bind part column(NULL Timestamp) ==============') else: tdLog.exit("expect error(%s) not occured - 2" % str(error)) + + try: + self.test_stmt_insert_vtb_error(self.conn()) + except Exception as error : + + if str(error)=='[0x6205]: Virtual table not support in STMT query and STMT insert': + tdLog.info('=========stmt error occured for bind part column ==============') + else: + tdLog.exit("expect error(%s) not occured" % str(error)) + + try: + self.test_stmt_insert_vstb_error(self.conn()) + except Exception as error : + + if str(error)=='[0x6205]: Virtual table not support in STMT query and STMT insert': + tdLog.info('=========stmt error occured for bind part column ==============') + else: + tdLog.exit("expect error(%s) not occured" % str(error)) def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/system-test/8-stream/continuous_window_close_interval.py b/tests/system-test/8-stream/continuous_window_close_interval.py new file mode 100644 index 0000000000..908b7049a8 --- /dev/null +++ b/tests/system-test/8-stream/continuous_window_close_interval.py @@ -0,0 +1,217 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + + +class TDTestCase: + updatecfgDict = {"debugFlag": 135, "asynclog": 0, "ratioOfVnodeStreamThreads": 4} + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def check_stream_all_task_status(self, stream_timeout=0): + """check stream status + + Args: + stream_name (str): stream_name + Returns: + str: status + """ + timeout = self.stream_timeout if stream_timeout is None else stream_timeout + + #check stream task rows + sql_task_status = f"select * from information_schema.ins_stream_tasks where status != \"ready\";" + sql_task_all = f"select * from information_schema.ins_stream_tasks;" + + #check stream task status + checktimes = 0 + while checktimes <= timeout: + tdLog.notice(f"checktimes:{checktimes}") + try: + tdSql.query(sql_task_status,row_tag=True) + result_task_status_rows = tdSql.getRows() + if result_task_status_rows == 0: + tdSql.query(sql_task_all,row_tag=True) + result_task_status_rows = tdSql.getRows() + if result_task_status_rows > 0: + break + time.sleep(1) + checktimes += 1 + except Exception as e: + tdLog.notice(f"Try to check stream status again, check times: {checktimes}") + checktimes += 1 + tdSql.print_error_frame_info(f"status is not ready") + else: + tdLog.notice(f"it has spend {checktimes} for checking stream task status but it failed") + if checktimes == timeout: + tdSql.print_error_frame_info(f"status is ready,") + + def docontinuous( + self, + interval, + watermark=None, + partition=None, + fill_value=None, + fill_history_value=None, + ignore_expired=0, + ignore_update=0, + use_exist_stb=None, + tag_value=None + ): + tdLog.info(f"*** testing stream continuous window close: interval: {interval}, partition: {partition}, fill_history: {fill_history_value}, use_exist_stb: {use_exist_stb}, fill: {fill_value}, tag_value: {tag_value} ***") + self.tdCom.case_name = sys._getframe().f_code.co_name + if watermark is not None: + self.tdCom.case_name = "watermark" + sys._getframe().f_code.co_name + self.tdCom.prepare_data(interval=interval, watermark=watermark, ext_stb=use_exist_stb) + tdLog.info( + f"testing stream continue_window_close finish prepare_data" + ) + + sqlstr = "alter local 'streamCoverage' '1'" + tdSql.query(sqlstr) + recalculatetime = 60 + recalculatetimeStr = f"recalculate {recalculatetime}s" + + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.delete = True + + if partition == "tbname": + partition_elm_alias = self.tdCom.partition_tbname_alias + elif partition == "c1": + partition_elm_alias = self.tdCom.partition_col_alias + elif partition == "abs(c1)": + partition_elm_alias = self.tdCom.partition_expression_alias + else: + partition_elm_alias = self.tdCom.partition_tag_alias + if partition == "tbname": + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", cast({partition_elm_alias} as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + + if watermark is not None: + watermark_value = f'{self.tdCom.dataDict["watermark"]}s' + else: + watermark_value = None + # create stb/ctb/tb stream + if fill_value: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' + tdLog.info( + f"*** testing stream continue_window_close + interval + fill. partition: {partition}, interval: {interval}, fill: {fill_value} ***" + ) + + # no subtable + # create stream super table and child table + tdLog.info("create stream super table and child table") + if use_exist_stb: + self.stb_stream_des_table = self.tdCom.ext_stb_stream_des_table + self.des_select_str = self.tdCom.ext_tb_source_select_str + else: + self.des_select_str = self.tdCom.stb_source_select_str + self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.des_select_str} from {self.stb_name} partition by {partition} {partition_elm_alias} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="continuous_window_close", watermark=watermark_value, ignore_expired=ignore_expired, subtable_value=stb_subtable_value, fill_value=fill_value, use_exist_stb=use_exist_stb, tag_value=tag_value, max_delay=recalculatetimeStr) + + # wait and check stream_task status is ready + tdSql.query("show streams") + tdLog.info(f"tdSql.queryResult:{tdSql.queryResult},tdSql.queryRows:{tdSql.queryRows}") + self.check_stream_all_task_status( + stream_timeout=120 + ) + + # insert data + start_time = self.tdCom.date_time + print(f"range count:{self.tdCom.range_count}") + for i in range(self.tdCom.range_count): + if i == 0: + if watermark is not None: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark']) + else: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval']) + else: + self.tdCom.date_time = window_close_ts + self.tdCom.offset + window_close_ts += self.tdCom.dataDict['interval']*self.tdCom.offset + + for num in range(int(window_close_ts/self.tdCom.offset-self.tdCom.date_time/self.tdCom.offset)): + ts_value=self.tdCom.date_time+num*self.tdCom.offset + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + if i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + if self.delete and i%2 != 0: + ts_cast_delete_value = self.tdCom.time_cast(ts_value) + self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=ts_cast_delete_value) + + if not fill_value and partition != "c1": + for tbname in [self.stb_stream_des_table]: + if use_exist_stb and tbname == self.stb_stream_des_table: + tdSql.waitedQuery(f'select {self.tdCom.partitial_stb_filter_des_select_elm} from {self.stb_stream_des_table}', i, 60) + else: + tdSql.waitedQuery(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}', i, 60) + + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1) + + if not fill_value: + for tbname in [self.stb_stream_des_table]: + if use_exist_stb and tbname == self.stb_stream_des_table: + tdSql.waitedQuery(f'select {self.tdCom.partitial_stb_filter_des_select_elm} from {self.stb_stream_des_table}', i, 60) + else: + tdSql.waitedQuery(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}', i, 60) + + start_ts = start_time + future_ts = str(self.tdCom.date_time)+f'+{self.tdCom.dataDict["interval"]*(self.tdCom.range_count+2)}s' + end_ts = self.tdCom.time_cast(future_ts) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts) + future_ts_bigint = self.tdCom.str_ts_trans_bigint(future_ts) + if watermark is not None: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(future_ts_bigint, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark']) + else: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(future_ts_bigint, self.tdCom.dataDict['interval']) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + + waitTime = recalculatetime * 2 + tdLog.info(f"sleep {waitTime} s") + time.sleep(waitTime) + + if fill_value: + for tbname in [self.stb_name]: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' + additional_options = f"where ts >= {start_ts} and ts <= {end_ts}" + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} {additional_options} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value) + else: + for tbname in [self.stb_name]: + additional_options = f"where ts <= {end_ts}" + self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} {additional_options} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart', fill_value=fill_value) + + def run(self): + for fill_value in [None, "VALUE", "NULL", "PREV", "NEXT", "LINEAR"]: + self.docontinuous( + interval=random.randint(10, 15), + partition="tbname", + fill_value=fill_value + ) + for fill_value in ["VALUE", "NULL", "PREV", "NEXT", "LINEAR", None]: + self.docontinuous( + interval=random.randint(10, 12), + partition="t1 as t5,t2 as t11,t3 as t13, t4", + fill_value=fill_value + ) + + def stop(self): + tdLog.info("stop========================================") + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +event = threading.Event() + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/8-stream/continuous_window_close_interval_basic.py b/tests/system-test/8-stream/continuous_window_close_interval_basic.py new file mode 100644 index 0000000000..4e080371cb --- /dev/null +++ b/tests/system-test/8-stream/continuous_window_close_interval_basic.py @@ -0,0 +1,242 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + + +class TDTestCase: + updatecfgDict = {"debugFlag": 135, "asynclog": 0, "ratioOfVnodeStreamThreads": 4} + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def check_stream_all_task_status(self, stream_timeout=0): + """check stream status + + Args: + stream_name (str): stream_name + Returns: + str: status + """ + timeout = self.stream_timeout if stream_timeout is None else stream_timeout + + #check stream task rows + sql_task_status = f"select * from information_schema.ins_stream_tasks where status != \"ready\";" + sql_task_all = f"select * from information_schema.ins_stream_tasks;" + + #check stream task status + checktimes = 0 + while checktimes <= timeout: + tdLog.notice(f"checktimes:{checktimes}") + try: + tdSql.query(sql_task_status,row_tag=True) + result_task_status_rows = tdSql.getRows() + if result_task_status_rows == 0: + tdSql.query(sql_task_all,row_tag=True) + result_task_status_rows = tdSql.getRows() + if result_task_status_rows > 0: + break + time.sleep(1) + checktimes += 1 + except Exception as e: + tdLog.notice(f"Try to check stream status again, check times: {checktimes}") + checktimes += 1 + tdSql.print_error_frame_info(f"status is not ready") + else: + tdLog.notice(f"it has spend {checktimes} for checking stream task status but it failed") + if checktimes == timeout: + tdSql.print_error_frame_info(f"status is ready,") + + def docontinuous( + self, + interval, + watermark=None, + partition=None, + fill_value=None, + fill_history_value=None, + ignore_expired=0, + ignore_update=0, + use_exist_stb=None, + tag_value=None, + ): + tdLog.info(f"*** testing stream continuous window close: interval: {interval}, partition: {partition}, fill_history: {fill_history_value}, use_exist_stb: {use_exist_stb}, fill: {fill_value}, tag_value: {tag_value} ***") + self.tdCom.case_name = sys._getframe().f_code.co_name + if watermark is not None: + self.tdCom.case_name = "watermark" + sys._getframe().f_code.co_name + self.tdCom.prepare_data(interval=interval, watermark=watermark, ext_stb=use_exist_stb) + tdLog.info( + f"testing stream continue_window_close finish prepare_data" + ) + + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}' + defined_tag_count = len(tag_value.split()) if tag_value is not None else 0 + + if partition == "tbname": + partition_elm_alias = self.tdCom.partition_tbname_alias + elif partition == "c1": + partition_elm_alias = self.tdCom.partition_col_alias + elif partition == "abs(c1)": + partition_elm_alias = self.tdCom.partition_expression_alias + else: + partition_elm_alias = self.tdCom.partition_tag_alias + if partition == "tbname": + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", cast({partition_elm_alias} as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast({partition_elm_alias} as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + + if watermark is not None: + watermark_value = f'{self.tdCom.dataDict["watermark"]}s' + else: + watermark_value = None + # create stb/ctb/tb stream + if fill_value: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' + tdLog.info( + f"*** testing stream continue_window_close + interval + fill. partition: {partition}, interval: {interval}, fill: {fill_value} ***" + ) + + # no subtable + # create stream super table and child table + tdLog.info("create stream super table and child table") + if use_exist_stb: + self.stb_stream_des_table = self.tdCom.ext_stb_stream_des_table + self.des_select_str = self.tdCom.ext_tb_source_select_str + else: + self.des_select_str = self.tdCom.stb_source_select_str + recalculatetime = 60000 + recalculatetimeStr = f"recalculate {recalculatetime}s" + self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.des_select_str} from {self.stb_name} partition by {partition} {partition_elm_alias} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="continuous_window_close", watermark=watermark_value, ignore_expired=ignore_expired, subtable_value=stb_subtable_value, fill_value=fill_value, use_exist_stb=use_exist_stb, tag_value=tag_value, max_delay=recalculatetimeStr) + self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} partition by {partition} {partition_elm_alias} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="continuous_window_close", watermark=watermark_value, ignore_expired=ignore_expired, subtable_value=ctb_subtable_value, fill_value=fill_value, use_exist_stb=use_exist_stb, max_delay=recalculatetimeStr) + + # wait and check stream_task status is ready + tdSql.query("show streams") + tdLog.info(f"tdSql.queryResult:{tdSql.queryResult},tdSql.queryRows:{tdSql.queryRows}") + localQueryResult = tdSql.queryResult + for stream_number in range(tdSql.queryRows): + stream_name = localQueryResult[stream_number][0] + self.check_stream_all_task_status( + stream_timeout=120 + ) + + # insert data + start_time = self.tdCom.date_time + print(f"range count:{self.tdCom.range_count}") + for i in range(self.tdCom.range_count): + if i == 0: + if watermark is not None: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark']) + else: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval']) + else: + self.tdCom.date_time = window_close_ts + self.tdCom.offset + window_close_ts += self.tdCom.dataDict['interval']*self.tdCom.offset + + for num in range(int(window_close_ts/self.tdCom.offset-self.tdCom.date_time/self.tdCom.offset)): + ts_value=self.tdCom.date_time+num*self.tdCom.offset + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + + if not fill_value and partition != "c1": + for tbname in [self.stb_stream_des_table, self.ctb_stream_des_table]: + if use_exist_stb and tbname == self.stb_stream_des_table: + tdSql.waitedQuery(f'select {self.tdCom.partitial_stb_filter_des_select_elm} from {self.stb_stream_des_table}', i, 60) + else: + tdSql.waitedQuery(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}', i, 60) + + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1) + + if not fill_value: + for tbname in [self.stb_stream_des_table, self.ctb_stream_des_table]: + if use_exist_stb and tbname == self.stb_stream_des_table: + tdSql.waitedQuery(f'select {self.tdCom.partitial_stb_filter_des_select_elm} from {self.stb_stream_des_table}', i, 60) + else: + tdSql.waitedQuery(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}', i, 60) + + if self.tdCom.subtable: + for tname in [self.stb_name, self.ctb_name]: + ptn_counter = 0 + if partition == "c1": + tdSql.query(f'select * from {self.tb_name}') + for c1_value in tdSql.queryResult: + tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`', count_expected_res=self.tdCom.range_count) + elif partition == "tbname" and ptn_counter == 0: + tdLog.info(f'====={tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}=line{sys._getframe().f_lineno}') + tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`', count_expected_res=self.tdCom.range_count) + ptn_counter += 1 + else: + tdLog.info(f'====={tname}_{self.tdCom.subtable_prefix}=line{sys._getframe().f_lineno}') + tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}') + tdSql.query(f'select count(*) from `{tbname}`', count_expected_res=self.tdCom.range_count) + ptn_counter += 1 + + if fill_value: + start_ts = start_time + future_ts = str(self.tdCom.date_time)+f'+{self.tdCom.dataDict["interval"]*(self.tdCom.range_count+2)}s' + end_ts = self.tdCom.time_cast(future_ts) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts) + future_ts_bigint = self.tdCom.str_ts_trans_bigint(future_ts) + if watermark is not None: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(future_ts_bigint, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark']) + else: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(future_ts_bigint, self.tdCom.dataDict['interval']) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + + self.tdCom.date_time = start_time + for tbname in [self.stb_name, self.ctb_name]: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' + additional_options = f"where ts >= {start_ts} and ts <= {end_ts}" + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} {additional_options} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value) + + def run(self): + for fill_value in ["VALUE", "NULL", "PREV", "NEXT", "LINEAR", None]: + self.docontinuous( + interval=random.randint(10, 15), + partition="tbname", + fill_value=fill_value, + ) + for fill_value in ["NULL", "PREV", "NEXT", "LINEAR", None]: + self.docontinuous( + interval=random.randint(10, 12), + partition="t1 as t5,t2 as t11,t3 as t13, t4", + fill_value=fill_value, + ) + + self.docontinuous( + interval=random.randint(10, 12), + partition="c1", + fill_value=None + ) + + self.docontinuous( + interval=random.randint(10, 12), + partition="t1 as t5,t2 as t11,t3 as t13, t4", + fill_value=None, + use_exist_stb=True, + tag_value="t5,t11,t13" + ) + + def stop(self): + tdLog.info("stop========================================") + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +event = threading.Event() + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/8-stream/continuous_window_close_interval_checkpoint.py b/tests/system-test/8-stream/continuous_window_close_interval_checkpoint.py new file mode 100644 index 0000000000..21c688530c --- /dev/null +++ b/tests/system-test/8-stream/continuous_window_close_interval_checkpoint.py @@ -0,0 +1,192 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * +from util.cluster import * + + +class TDTestCase: + updatecfgDict = {"debugFlag": 135, "asynclog": 0, "checkpointinterval": 60} + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def check_stream_all_task_status(self, stream_timeout=0): + """check stream status + + Args: + stream_name (str): stream_name + Returns: + str: status + """ + timeout = self.stream_timeout if stream_timeout is None else stream_timeout + + #check stream task rows + sql_task_status = f"select * from information_schema.ins_stream_tasks where status != \"ready\";" + sql_task_all = f"select * from information_schema.ins_stream_tasks;" + + #check stream task status + checktimes = 0 + while checktimes <= timeout: + tdLog.notice(f"checktimes:{checktimes}") + try: + tdSql.query(sql_task_status,row_tag=True) + result_task_status_rows = tdSql.getRows() + if result_task_status_rows == 0: + tdSql.query(sql_task_all,row_tag=True) + result_task_status_rows = tdSql.getRows() + if result_task_status_rows > 0: + break + time.sleep(1) + checktimes += 1 + except Exception as e: + tdLog.notice(f"Try to check stream status again, check times: {checktimes}") + checktimes += 1 + tdSql.print_error_frame_info(f"status is not ready") + else: + tdLog.notice(f"it has spend {checktimes} for checking stream task status but it failed") + if checktimes == timeout: + tdSql.print_error_frame_info(f"status is ready,") + + def docontinuous( + self, + interval, + watermark=None, + partition=None, + fill_value=None, + ignore_expired=0, + ignore_update=0, + use_exist_stb=None, + tag_value=None, + fill_history_value=None, + ): + tdLog.info(f"*** testing stream continuous window close: interval: {interval}, partition: {partition}, fill_history: {fill_history_value}, use_exist_stb: {use_exist_stb}, fill: {fill_value}, tag_value: {tag_value} ***") + self.tdCom.case_name = sys._getframe().f_code.co_name + if watermark is not None: + self.tdCom.case_name = "watermark" + sys._getframe().f_code.co_name + self.tdCom.prepare_data(interval=interval, watermark=watermark, ext_stb=use_exist_stb, fill_history_value=fill_history_value) + tdLog.info( + f"testing stream continue_window_close finish prepare_data" + ) + + sqlstr = "alter local 'streamCoverage' '1'" + tdSql.query(sqlstr) + checkpointtime = 70 + recalculatetime = 120 + recalculatetimeStr = f"recalculate {recalculatetime}s" + + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}' + self.delete = True + + if partition == "tbname": + partition_elm_alias = self.tdCom.partition_tbname_alias + else: + partition_elm_alias = self.tdCom.partition_tag_alias + if partition == "tbname": + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + else: + stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", cast({partition_elm_alias} as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None + + if watermark is not None: + watermark_value = f'{self.tdCom.dataDict["watermark"]}s' + else: + watermark_value = None + + # no subtable + # create stream super table and child table + tdLog.info("create stream super table and child table") + self.des_select_str = self.tdCom.stb_source_select_str + self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.des_select_str} from {self.stb_name} partition by {partition} {partition_elm_alias} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="continuous_window_close", watermark=watermark_value, ignore_expired=ignore_expired, subtable_value=stb_subtable_value, fill_value=fill_value, use_exist_stb=use_exist_stb, tag_value=tag_value, max_delay=recalculatetimeStr, fill_history_value=fill_history_value) + + # # wait and check stream_task status is ready + # tdSql.query("show streams") + # self.check_stream_all_task_status( + # stream_timeout=120 + # ) + + # insert data + start_time = self.tdCom.date_time + print(f"range count:{self.tdCom.range_count}") + for i in range(self.tdCom.range_count): + if i == 0: + if watermark is not None: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark']) + else: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval']) + else: + self.tdCom.date_time = window_close_ts + self.tdCom.offset + window_close_ts += self.tdCom.dataDict['interval']*self.tdCom.offset + + for num in range(int(window_close_ts/self.tdCom.offset-self.tdCom.date_time/self.tdCom.offset)): + ts_value=self.tdCom.date_time+num*self.tdCom.offset + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + if i%2 == 0: + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + if self.delete and i%2 != 0: + ts_cast_delete_value = self.tdCom.time_cast(ts_value) + self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=ts_cast_delete_value) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1) + + start_ts = start_time + future_ts = str(self.tdCom.date_time)+f'+{self.tdCom.dataDict["interval"]*(self.tdCom.range_count+2)}s' + end_ts = self.tdCom.time_cast(future_ts) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts) + future_ts_bigint = self.tdCom.str_ts_trans_bigint(future_ts) + if watermark is not None: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(future_ts_bigint, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark']) + else: + window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(future_ts_bigint, self.tdCom.dataDict['interval']) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts) + + waitTime = checkpointtime + tdLog.info(f"sleep {waitTime} s") + time.sleep(waitTime) + + tdDnodes.stoptaosd(1) + tdDnodes.starttaosd(1) + + self.check_stream_all_task_status( + stream_timeout=120 + ) + + if fill_value: + for tbname in [self.stb_name]: + if "value" in fill_value.lower(): + fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11' + additional_options = f"where ts >= {start_ts} and ts <= {end_ts}" + self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} {additional_options} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value) + else: + for tbname in [self.stb_name]: + additional_options = f"where ts <= {end_ts}" + self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} {additional_options} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart', fill_value=fill_value) + tdLog.info("=====end======================================") + + def run(self): + for partition in ["tbname", "t1 as t5,t2 as t11,t3 as t13, t4"]: + # for partition in ["t1 as t5,t2 as t11,t3 as t13, t4"]: + self.docontinuous( + interval=random.randint(10, 15), + partition=partition, + fill_value=None, + fill_history_value=1, + ) + + def stop(self): + tdLog.info("stop========================================") + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +event = threading.Event() + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/pytest.sh b/tests/system-test/pytest.sh index 060717c20e..1ebaa76258 100755 --- a/tests/system-test/pytest.sh +++ b/tests/system-test/pytest.sh @@ -89,6 +89,9 @@ else export LD_PRELOAD="$(realpath "$(gcc -print-file-name=libasan.so)") $(realpath "$(gcc -print-file-name=libstdc++.so)")" echo "Preload AsanSo:" $? + export ASAN_OPTIONS=detect_odr_violation=0 + echo "forbid check ODR violation." + $* -a 2> $AsanFile cat $AsanFile unset LD_PRELOAD diff --git a/tests/system-test/test.py b/tests/system-test/test.py index 01a35df6aa..1958e9976e 100644 --- a/tests/system-test/test.py +++ b/tests/system-test/test.py @@ -40,6 +40,9 @@ import taos import taosrest import taosws +from taos.cinterface import * +taos.taos_options(6, "native") + def checkRunTimeError(): import win32gui timeCount = 0 @@ -251,8 +254,9 @@ if __name__ == "__main__": # # do exeCmd command # + taosAdapter = True # default is websocket , so must start taosAdapter if not execCmd == "": - if restful or websocket: + if taosAdapter or restful or websocket: tAdapter.init(deployPath) else: tdDnodes.init(deployPath) @@ -291,7 +295,7 @@ if __name__ == "__main__": if valgrind: time.sleep(2) - if restful or websocket: + if taosAdapter or restful or websocket: toBeKilled = "taosadapter" # killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" % toBeKilled @@ -387,7 +391,7 @@ if __name__ == "__main__": tdDnodes.deploy(1,updateCfgDict) tdDnodes.start(1) tdCases.logSql(logSql) - if restful or websocket: + if taosAdapter or restful or websocket: tAdapter.deploy(adapter_cfg_dict) tAdapter.start() @@ -427,7 +431,7 @@ if __name__ == "__main__": tdDnodes.starttaosd(dnode.index) tdCases.logSql(logSql) - if restful or websocket: + if taosAdapter or restful or websocket: tAdapter.deploy(adapter_cfg_dict) tAdapter.start() @@ -549,7 +553,7 @@ if __name__ == "__main__": except: pass - if restful or websocket: + if taosAdapter or restful or websocket: tAdapter.init(deployPath, masterIp) tAdapter.stop(force_kill=True) @@ -559,7 +563,7 @@ if __name__ == "__main__": tdDnodes.start(1) tdCases.logSql(logSql) - if restful or websocket: + if taosAdapter or restful or websocket: tAdapter.deploy(adapter_cfg_dict) tAdapter.start() @@ -614,7 +618,7 @@ if __name__ == "__main__": tdDnodes.starttaosd(dnode.index) tdCases.logSql(logSql) - if restful or websocket: + if taosAdapter or restful or websocket: tAdapter.deploy(adapter_cfg_dict) tAdapter.start() diff --git a/tests/taosc_test/CMakeLists.txt b/tests/taosc_test/CMakeLists.txt index e95e232b8c..925845ca5a 100644 --- a/tests/taosc_test/CMakeLists.txt +++ b/tests/taosc_test/CMakeLists.txt @@ -16,7 +16,7 @@ aux_source_directory(src OS_SRC) # taoscTest add_executable(taoscTest "taoscTest.cpp") -target_link_libraries(taoscTest PUBLIC ${TAOS_LIB} os util common gtest_main) +target_link_libraries(taoscTest PUBLIC ${TAOS_NATIVE_LIB} os util common gtest_main) target_include_directories( taoscTest PUBLIC "${TD_SOURCE_DIR}/include/os" diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 110a644e90..fa676f667d 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -13,7 +13,7 @@ IF(TD_WEBSOCKET) PREFIX "taosws-rs" SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosws-rs BUILD_ALWAYS off - DEPENDS ${TAOS_LIB} + DEPENDS ${TAOS_NATIVE_LIB} BUILD_IN_SOURCE 1 CONFIGURE_COMMAND cmake -E echo "taosws-rs no need cmake to config" PATCH_COMMAND @@ -31,7 +31,7 @@ IF(TD_WEBSOCKET) PREFIX "taosws-rs" SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosws-rs BUILD_ALWAYS off - DEPENDS ${TAOS_LIB} + DEPENDS ${TAOS_NATIVE_LIB} BUILD_IN_SOURCE 1 CONFIGURE_COMMAND cmake -E echo "taosws-rs no need cmake to config" PATCH_COMMAND @@ -50,7 +50,7 @@ IF(TD_WEBSOCKET) PREFIX "taosws-rs" SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosws-rs BUILD_ALWAYS off - DEPENDS ${TAOS_LIB} + DEPENDS ${TAOS_NATIVE_LIB} BUILD_IN_SOURCE 1 CONFIGURE_COMMAND cmake -E echo "taosws-rs no need cmake to config" PATCH_COMMAND @@ -136,7 +136,7 @@ ELSE() PREFIX "taosadapter" SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter BUILD_ALWAYS off - DEPENDS ${TAOS_LIB} + DEPENDS ${TAOS_NATIVE_LIB} BUILD_IN_SOURCE 1 CONFIGURE_COMMAND cmake -E echo "taosadapter no need cmake to config" PATCH_COMMAND @@ -165,7 +165,7 @@ ELSE() PREFIX "taosadapter" SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter BUILD_ALWAYS off - DEPENDS ${TAOS_LIB} + DEPENDS ${TAOS_NATIVE_LIB} BUILD_IN_SOURCE 1 CONFIGURE_COMMAND cmake -E echo "taosadapter no need cmake to config" PATCH_COMMAND @@ -190,12 +190,13 @@ ELSE() PREFIX "taosadapter" SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter BUILD_ALWAYS off - DEPENDS ${TAOS_LIB} + DEPENDS ${TAOS_NATIVE_LIB} BUILD_IN_SOURCE 1 CONFIGURE_COMMAND cmake -E echo "taosadapter no need cmake to config" PATCH_COMMAND COMMAND git clean -f -d BUILD_COMMAND + COMMAND tree ${CMAKE_BINARY_DIR}/build/lib COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-X 'github.com/taosdata/taosadapter/v3/version.Version=${taos_version}' -X 'github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}' -X 'github.com/taosdata/taosadapter/v3/version.BuildInfo=${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}'" # COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X 'github.com/taosdata/taosadapter/v3/version.Version=${taos_version}' -X 'github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}' -X 'github.com/taosdata/taosadapter/v3/version.BuildInfo=${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}'" diff --git a/tools/inc/pub.h b/tools/inc/pub.h new file mode 100644 index 0000000000..fd9fa9558f --- /dev/null +++ b/tools/inc/pub.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the MIT license as published by the Free Software + * Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef PUB_H_ +#define PUB_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef WINDOWS +#define strcasecmp _stricmp +#define strncasecmp _strnicmp +#endif + +// +// -------------- define ------------------ +// + +// connect mode string +#define STR_NATIVE "Native" +#define STR_WEBSOCKET "WebSocket" + +#define DRIVER_OPT "driver" +#define DRIVER_DESC "Connect driver , value can be \"Native\" or \"WebSocket\"" + +#define DSN_DESC "The dsn to connect the cloud service." +#define OLD_DSN_DESC "same with -X options" + +#define DSN_NATIVE_CONFLICT "DSN option not support in native connection mode.\n" + +// connect mode type define +#define CONN_MODE_INVALID -1 +#define CONN_MODE_NATIVE 0 +#define CONN_MODE_WEBSOCKET 1 +#define CONN_MODE_DEFAULT CONN_MODE_NATIVE // set default mode + +// define error show module +#define INIT_PHASE "init" +#define TIP_ENGINE_ERR "Call engine failed." + +// default port +#define DEFAULT_PORT_WS_LOCAL 6041 +#define DEFAULT_PORT_WS_CLOUD 443 +#define DEFAULT_PORT_NATIVE 6030 + + +// +// -------------- api ------------------ +// + +// get comn mode, if invalid argp then exit app +int8_t getConnMode(char *arg); + +char* strToLowerCopy(const char *str); +int32_t parseDsn(char* dsn, char **host, char **port, char **user, char **pwd, char* error); + +int32_t setConnMode(int8_t connMode, char *dsn); + +uint16_t defaultPort(int8_t connMode, char *dsn); + +int8_t defaultMode(int8_t connMode, char *dsn); + +#endif // PUB_H_ \ No newline at end of file diff --git a/tools/shell/CMakeLists.txt b/tools/shell/CMakeLists.txt index ac901f5ca2..a503ceedf1 100644 --- a/tools/shell/CMakeLists.txt +++ b/tools/shell/CMakeLists.txt @@ -1,21 +1,13 @@ aux_source_directory(src SHELL_SRC) -add_executable(shell ${SHELL_SRC}) +add_executable(shell ${SHELL_SRC} ../src/pub.c) -IF(TD_LINUX AND TD_WEBSOCKET) - ADD_DEFINITIONS(-DWEBSOCKET -I${CMAKE_BINARY_DIR}/build/include -ltaosws) - SET(LINK_WEBSOCKET "-L${CMAKE_BINARY_DIR}/build/lib -ltaosws") - ADD_DEPENDENCIES(shell taosws-rs) -ELSEIF(TD_DARWIN AND TD_WEBSOCKET) - ADD_DEFINITIONS(-DWEBSOCKET -I${CMAKE_BINARY_DIR}/build/include) - SET(LINK_WEBSOCKET "${CMAKE_BINARY_DIR}/build/lib/libtaosws.dylib") - ADD_DEPENDENCIES(shell taosws-rs) -ELSEIF(TD_WINDOWS AND TD_WEBSOCKET) - ADD_DEFINITIONS(-DWEBSOCKET -I${CMAKE_BINARY_DIR}/build/include) - SET(LINK_WEBSOCKET "${CMAKE_BINARY_DIR}/build/lib/taosws.lib") - ADD_DEPENDENCIES(shell taosws-rs) +IF(TD_LINUX_64 AND JEMALLOC_ENABLED) + ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc) + SET(LINK_JEMALLOC "-L${CMAKE_BINARY_DIR}/build/lib -ljemalloc") + ADD_DEPENDENCIES(shell jemalloc) ELSE() - SET(LINK_WEBSOCKET "") + SET(LINK_JEMALLOC "") ENDIF() IF(TD_LINUX AND TD_ALPINE) @@ -25,9 +17,9 @@ ELSE() ENDIF() if(TD_WINDOWS) - target_link_libraries(shell PUBLIC ${TAOS_LIB_STATIC}) + target_link_libraries(shell PUBLIC ${TAOS_LIB}) else() - target_link_libraries(shell PUBLIC ${TAOS_LIB} ${LINK_ARGP}) + target_link_libraries(shell PUBLIC ${TAOS_LIB} ${LINK_JEMALLOC} ${LINK_ARGP}) endif() target_link_libraries( @@ -50,14 +42,11 @@ IF(TD_LINUX) # include include_directories(${CMAKE_CURRENT_SOURCE_DIR}/inc) + # shell_ut library - add_library(shell_ut STATIC ${SHELL_SRC}) + add_library(shell_ut STATIC ${SHELL_SRC} ../src/pub.c) - IF(TD_WEBSOCKET) - ADD_DEPENDENCIES(shell_ut taosws-rs) - ENDIF() - - target_link_libraries(shell_ut PUBLIC ${TAOS_LIB} ${LINK_WEBSOCKET} ${LINK_JEMALLOC} ${LINK_ARGP}) + target_link_libraries(shell_ut PUBLIC ${TAOS_LIB} ${LINK_JEMALLOC} ${LINK_ARGP}) target_link_libraries(shell_ut PRIVATE os common transport geometry util) # util depends @@ -72,3 +61,16 @@ IF(TD_LINUX) ADD_SUBDIRECTORY(test) ENDIF(${BUILD_TEST}) ENDIF() + +# +# collect --version information +# +MESSAGE("collect --version show info:") +# version +IF (DEFINED TD_VER_NUMBER) + ADD_DEFINITIONS(-DTD_VER_NUMBER="${TD_VER_NUMBER}") + MESSAGE(STATUS "taos version:${TD_VER_NUMBER}") +ELSE () + # abort build + MESSAGE(FATAL_ERROR "build taos not found TD_VER_NUMBER define.") +ENDIF () \ No newline at end of file diff --git a/tools/shell/inc/shellAuto.h b/tools/shell/inc/shellAuto.h index 7583932ff5..836898cdf1 100644 --- a/tools/shell/inc/shellAuto.h +++ b/tools/shell/inc/shellAuto.h @@ -31,7 +31,7 @@ void pressTabKey(SShellCmd* cmd); void pressOtherKey(char c); // init shell auto function , shell start call once -bool shellAutoInit(); +void shellAutoInit(); // set conn void shellSetConn(TAOS* conn, bool runOnce); @@ -51,9 +51,8 @@ void showAD(bool end); // show all commands help void showHelp(); - // -// for unit test +// for unit test // bool fieldOptionsArea(char* p); bool isCreateFieldsArea(char* p); diff --git a/tools/shell/inc/shellInt.h b/tools/shell/inc/shellInt.h index 6dbc5db94f..4b27f4a939 100644 --- a/tools/shell/inc/shellInt.h +++ b/tools/shell/inc/shellInt.h @@ -17,23 +17,22 @@ #define _TD_SHELL_INT_H_ #include "os.h" -#include "taos.h" #include "taosdef.h" #include "taoserror.h" +#include "taos.h" +#include "tcommon.h" #include "tconfig.h" #include "tglobal.h" #include "trpc.h" #include "ttypes.h" #include "tutil.h" +#include "tversion.h" +#include "version.h" +#include "../../inc/pub.h" -#ifdef WEBSOCKET -#include "taosws.h" - -#define SHELL_WS_TIMEOUT 30 -#define SHELL_WS_DSN_BUFF 256 -#define SHELL_WS_DSN_MASK 10 -#endif - +#define SHELL_WS_TIMEOUT 30 +#define SHELL_WS_DSN_BUFF 256 +#define SHELL_WS_DSN_MASK 10 #define SHELL_MAX_HISTORY_SIZE 1000 #define SHELL_MAX_COMMAND_SIZE 1048586 #define SHELL_HISTORY_FILE ".taos_history" @@ -48,7 +47,9 @@ #define SHELL_FLOAT_WIDTH 20 #define SHELL_DOUBLE_WIDTH 25 -#define ERROR_CODE_DETAIL "\r\n\r\nTo view possible causes and suggested actions for error codes, see \r\n\"Error Code Reference\" in the TDengine online documentation.\r\n" +#define ERROR_CODE_DETAIL \ + "\r\n\r\nTo view possible causes and suggested actions for error codes, see \r\n\"Error Code Reference\" in the " \ + "TDengine online documentation.\r\n" typedef struct { char* hist[SHELL_MAX_HISTORY_SIZE]; char file[TSDB_FILENAME_LEN]; @@ -79,20 +80,17 @@ typedef struct { int32_t pktNum; int32_t displayWidth; int32_t abort; -#ifdef WEBSOCKET - bool restful; - bool cloud; - bool local; char* dsn; int32_t timeout; -#endif + int8_t connMode; + bool port_inputted; } SShellArgs; typedef struct { - const char *clientVersion; - char cusName[32]; - char promptHeader[32]; - char promptContinue[32]; + const char* clientVersion; + char cusName[32]; + char promptHeader[32]; + char promptContinue[32]; const char* osname; int32_t promptSize; char programVersion[256]; @@ -106,10 +104,6 @@ typedef struct { TdThread pid; tsem_t cancelSem; bool exit; -#ifdef WEBSOCKET - WS_TAOS* ws_conn; - bool stop_query; -#endif } SShellObj; typedef struct { @@ -134,6 +128,7 @@ int32_t shellCalcColWidth(TAOS_FIELD *field, int32_t precision); void shellPrintHeader(TAOS_FIELD *fields, int32_t *width, int32_t num_fields); void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t length, int32_t precision); void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, int32_t length, int32_t precision); + // shellUtil.c int32_t shellCheckIntSize(); void shellPrintVersion(); @@ -142,21 +137,13 @@ void shellGenerateAuth(); void shellDumpConfig(); void shellCheckServerStatus(); bool shellRegexMatch(const char* s, const char* reg, int32_t cflags); +int32_t getDsnEnv(); void shellExit(); // shellNettest.c void shellTestNetWork(); -#ifdef WEBSOCKET -void shellCheckConnectMode(); -// shellWebsocket.c -int shell_conn_ws_server(bool first); -int32_t shell_run_websocket(); -void shellRunSingleCommandWebsocketImp(char *command); -#endif - // shellMain.c extern SShellObj shell; -extern void tscWriteCrashInfo(int signum, void *sigInfo, void *context); #endif /*_TD_SHELL_INT_H_*/ diff --git a/tools/shell/inc/shellTire.h b/tools/shell/inc/shellTire.h index 472f604a2c..247f6dfa27 100644 --- a/tools/shell/inc/shellTire.h +++ b/tools/shell/inc/shellTire.h @@ -13,8 +13,8 @@ * along with this program. If not, see . */ -#ifndef __TRIE__ -#define __TRIE__ +#ifndef _TD_TRIE_H_ +#define _TD_TRIE_H_ // // The prefix search tree is a efficient storage words and search words tree, it support 95 visible ascii code character diff --git a/tools/shell/src/shellArguments.c b/tools/shell/src/shellArguments.c index 659bb54aef..209168149e 100644 --- a/tools/shell/src/shellArguments.c +++ b/tools/shell/src/shellArguments.c @@ -13,13 +13,8 @@ * along with this program. If not, see . */ -#ifdef _TD_DARWIN_64 -#include -#endif - -#include "cus_name.h" #include "shellInt.h" -#include "version.h" +#include "../../inc/pub.h" #define TAOS_CONSOLE_PROMPT_CONTINUE " -> " @@ -43,17 +38,19 @@ #define SHELL_PKT_LEN "Packet length used for net test, default is 1024 bytes." #define SHELL_PKT_NUM "Packet numbers used for net test, default is 100." #define SHELL_BI_MODE "Set BI mode" +#define SHELL_VERSION "Print program version." +#define SHELL_DSN "Use dsn to connect to the cloud server or to a remote server which provides WebSocket connection." +#define SHELL_TIMEOUT "Set the timeout for WebSocket query in seconds, default is 30." #define SHELL_LOG_OUTPUT \ "Specify log output. Options:\n\r\t\t\t stdout, stderr, /dev/null, , /, " \ "\n\r\t\t\t * If OUTPUT contains an absolute directory, logs will be stored in that directory " \ "instead of logDir.\n\r\t\t\t * If OUTPUT contains a relative directory, logs will be stored in the directory " \ "combined with logDir and the relative directory." -#define SHELL_VERSION "Print program version." #ifdef WEBSOCKET -#define SHELL_DSN "Use dsn to connect to the cloud server or to a remote server which provides WebSocket connection." -#define SHELL_REST "Use RESTful mode when connecting." -#define SHELL_TIMEOUT "Set the timeout for websocket query in seconds, default is 30." +#define SHELL_DRIVER_DEFAULT "0." // todo simon -> 1 +#else +#define SHELL_DRIVER_DEFAULT "0." #endif static int32_t shellParseSingleOpt(int32_t key, char *arg); @@ -82,13 +79,13 @@ void shellPrintHelp() { printf("%s%s%s%s\r\n", indent, "-s,", indent, SHELL_CMD); printf("%s%s%s%s\r\n", indent, "-t,", indent, SHELL_STARTUP); printf("%s%s%s%s\r\n", indent, "-u,", indent, SHELL_USER); -#ifdef WEBSOCKET - printf("%s%s%s%s\r\n", indent, "-E,", indent, SHELL_DSN); - printf("%s%s%s%s\r\n", indent, "-R,", indent, SHELL_REST); + printf("%s%s%s%s\r\n", indent, "-E,", indent, OLD_DSN_DESC); printf("%s%s%s%s\r\n", indent, "-T,", indent, SHELL_TIMEOUT); -#endif printf("%s%s%s%s\r\n", indent, "-w,", indent, SHELL_WIDTH); printf("%s%s%s%s\r\n", indent, "-V,", indent, SHELL_VERSION); + printf("%s%s%s%s\r\n", indent, "-X,", indent, DSN_DESC); + printf("%s%s%s%s\r\n", indent, "-Z,", indent, DRIVER_DESC); + #ifdef CUS_EMAIL printf("\r\n\r\nReport bugs to %s.\r\n", CUS_EMAIL); #else @@ -129,16 +126,13 @@ static struct argp_option shellOptions[] = { {"display-width", 'w', "WIDTH", 0, SHELL_WIDTH}, {"netrole", 'n', "NETROLE", 0, SHELL_NET_ROLE}, {"pktlen", 'l', "PKTLEN", 0, SHELL_PKT_LEN}, -#ifdef WEBSOCKET - {"dsn", 'E', "DSN", 0, SHELL_DSN}, - {"restful", 'R', 0, 0, SHELL_REST}, + {"cloud-dsn", 'E', "DSN", 0, OLD_DSN_DESC}, {"timeout", 'T', "SECONDS", 0, SHELL_TIMEOUT}, -#endif {"pktnum", 'N', "PKTNUM", 0, SHELL_PKT_NUM}, {"bimode", 'B', 0, 0, SHELL_BI_MODE}, -#if defined(LINUX) {"log-output", 'o', "OUTPUT", 0, SHELL_LOG_OUTPUT}, -#endif + {"dsn", 'X', "DSN", 0, DSN_DESC}, + {DRIVER_OPT, 'Z', "DRIVER", 0, DRIVER_DESC}, {0}, }; @@ -146,9 +140,10 @@ static error_t shellParseOpt(int32_t key, char *arg, struct argp_state *state) { static struct argp shellArgp = {shellOptions, shellParseOpt, "", ""}; -static void shellParseArgsUseArgp(int argc, char *argv[]) { +static int32_t shellParseArgsUseArgp(int argc, char *argv[]) { argp_program_version = shell.info.programVersion; - argp_parse(&shellArgp, argc, argv, 0, 0, &shell.args); + error_t err = argp_parse(&shellArgp, argc, argv, 0, 0, &shell.args); + return (err != 0); } #endif @@ -163,16 +158,14 @@ static int32_t shellParseSingleOpt(int32_t key, char *arg) { switch (key) { case 'h': pArgs->host = arg; -#ifdef WEBSOCKET - pArgs->cloud = false; -#endif break; case 'P': pArgs->port = atoi(arg); -#ifdef WEBSOCKET - pArgs->cloud = false; -#endif - if (pArgs->port == 0) pArgs->port = -1; + if (pArgs->port == 0) { + pArgs->port = -1; + } else { + pArgs->port_inputted = true; + } break; case 'u': pArgs->user = arg; @@ -189,9 +182,6 @@ static int32_t shellParseSingleOpt(int32_t key, char *arg) { pArgs->is_bi_mode = true; break; case 'c': -#ifdef WEBSOCKET - pArgs->cloud = false; -#endif pArgs->cfgdir = arg; break; case 'C': @@ -229,33 +219,35 @@ static int32_t shellParseSingleOpt(int32_t key, char *arg) { break; #if defined(LINUX) case 'o': + printf(" -o need todo optins.\n"); + // need todo pass tsLogOutput to engine + /* if (strlen(arg) >= PATH_MAX) { - printf("failed to set log output since length overflow, max length is %d\n", PATH_MAX); + printf("failed to set log output since length overflow, max length is %d\r\n", PATH_MAX); return TSDB_CODE_INVALID_CFG; } tsLogOutput = taosMemoryMalloc(PATH_MAX); if (!tsLogOutput) { - printf("failed to set log output: '%s' since %s\n", arg, tstrerror(terrno)); + printf("failed to set log output: '%s' since %s\r\n", arg, tstrerror(terrno)); return terrno; } if (taosExpandDir(arg, tsLogOutput, PATH_MAX) != 0) { - printf("failed to expand log output: '%s' since %s\n", arg, tstrerror(terrno)); + printf("failed to expand log output: '%s' since %s\r\n", arg, tstrerror(terrno)); return terrno; } + */ break; #endif -#ifdef WEBSOCKET - case 'R': - pArgs->restful = true; - break; case 'E': + case 'X': pArgs->dsn = arg; - pArgs->cloud = true; break; case 'T': pArgs->timeout = atoi(arg); break; -#endif + case 'Z': + pArgs->connMode = getConnMode(arg); + break; case 'V': pArgs->is_version = true; break; @@ -271,14 +263,15 @@ static int32_t shellParseSingleOpt(int32_t key, char *arg) { return 0; } #if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32) || defined(_TD_DARWIN_64) || defined(TD_ASTRA) + int32_t shellParseArgsWithoutArgp(int argc, char *argv[]) { SShellArgs *pArgs = &shell.args; + int32_t ret = 0; for (int i = 1; i < argc; i++) { if (strcmp(argv[i], "--help") == 0 || strcmp(argv[i], "--usage") == 0 || strcmp(argv[i], "-?") == 0 || strcmp(argv[i], "/?") == 0) { - shellParseSingleOpt('?', NULL); - return 0; + return shellParseSingleOpt('?', NULL); } char *key = argv[i]; @@ -292,14 +285,9 @@ int32_t shellParseArgsWithoutArgp(int argc, char *argv[]) { return -1; } - if (key[1] == 'h' || key[1] == 'P' || key[1] == 'u' - || key[1] == 'a' || key[1] == 'c' || key[1] == 's' - || key[1] == 'f' || key[1] == 'd' || key[1] == 'w' - || key[1] == 'n' || key[1] == 'l' || key[1] == 'N' -#ifdef WEBSOCKET - || key[1] == 'E' || key[1] == 'T' -#endif - ) { + if (key[1] == 'h' || key[1] == 'P' || key[1] == 'u' || key[1] == 'a' || key[1] == 'c' || key[1] == 's' || + key[1] == 'f' || key[1] == 'd' || key[1] == 'w' || key[1] == 'n' || key[1] == 'l' || key[1] == 'N' || + key[1] == 'E' || key[1] == 'T' || key[1] == 'X' || key[1] == 'Z') { if (i + 1 >= argc) { fprintf(stderr, "option %s requires an argument\r\n", key); return -1; @@ -309,21 +297,19 @@ int32_t shellParseArgsWithoutArgp(int argc, char *argv[]) { fprintf(stderr, "option %s requires an argument\r\n", key); return -1; } - shellParseSingleOpt(key[1], val); + ret = shellParseSingleOpt(key[1], val); i++; - } else if (key[1] == 'p' || key[1] == 'A' || key[1] == 'C' - || key[1] == 'r' || key[1] == 'k' - || key[1] == 't' || key[1] == 'V' - || key[1] == '?' || key[1] == 1 -#ifdef WEBSOCKET - ||key[1] == 'R' -#endif - ) { - shellParseSingleOpt(key[1], NULL); + } else if (key[1] == 'p' || key[1] == 'A' || key[1] == 'C' || key[1] == 'r' || key[1] == 'k' || key[1] == 't' || + key[1] == 'V' || key[1] == '?' || key[1] == 1 || key[1] == 'R'|| key[1] == 'B') { + ret = shellParseSingleOpt(key[1], NULL); } else { fprintf(stderr, "invalid option %s\r\n", key); return -1; } + + if (ret != 0) { + return ret; + } } return 0; @@ -348,6 +334,7 @@ static void shellInitArgs(int argc, char *argv[]) { tstrncpy(shell.args.password, (char *)(argv[i] + 2), sizeof(shell.args.password)); strcpy(argv[i], "-p"); } + printf("\r\n"); } } if (strlen(shell.args.password) == 0) { @@ -359,6 +346,9 @@ static void shellInitArgs(int argc, char *argv[]) { pArgs->pktLen = SHELL_DEF_PKG_LEN; pArgs->pktNum = SHELL_DEF_PKG_NUM; pArgs->displayWidth = SHELL_DEFAULT_MAX_BINARY_DISPLAY_WIDTH; + pArgs->timeout = SHELL_WS_TIMEOUT; + + shell.exit = false; } static int32_t shellCheckArgs() { @@ -442,7 +432,7 @@ static int32_t shellCheckArgs() { int32_t shellParseArgs(int32_t argc, char *argv[]) { shellInitArgs(argc, argv); shell.info.clientVersion = - "Welcome to the %s Command Line Interface, Client Version:%s\r\n" + "Welcome to the %s Command Line Interface, %s Client Version:%s \r\n" "Copyright (c) 2025 by %s, all rights reserved.\r\n\r\n"; #ifdef CUS_NAME strcpy(shell.info.cusName, CUS_NAME); @@ -485,8 +475,7 @@ int32_t shellParseArgs(int32_t argc, char *argv[]) { #else shell.info.osname = "Linux"; snprintf(shell.history.file, TSDB_FILENAME_LEN, "%s/%s", getenv("HOME"), SHELL_HISTORY_FILE); - shellParseArgsUseArgp(argc, argv); - // if (shellParseArgsWithoutArgp(argc, argv) != 0) return -1; + if (shellParseArgsUseArgp(argc, argv) != 0) return -1; if (shell.args.abort) { return -1; } @@ -494,3 +483,35 @@ int32_t shellParseArgs(int32_t argc, char *argv[]) { return shellCheckArgs(); } + +int32_t getDsnEnv() { + if (shell.args.connMode == CONN_MODE_NATIVE) { + if (shell.args.dsn != NULL) { + fprintf(stderr, DSN_NATIVE_CONFLICT); + return -1; + } + } else { + if (shell.args.dsn != NULL) { + return 0; + } else { + // read cloud + shell.args.dsn = getenv("TDENGINE_CLOUD_DSN"); + if (shell.args.dsn && strlen(shell.args.dsn) > 4) { + fprintf(stderr, "Use the environment variable TDENGINE_CLOUD_DSN:%s as the input for the DSN option.\r\n", + shell.args.dsn); + return 0; + } + + // read local + shell.args.dsn = getenv("TDENGINE_DSN"); + if (shell.args.dsn && strlen(shell.args.dsn) > 4) { + fprintf(stderr, "Use the environment variable TDENGINE_DSN:%s as the input for the DSN option.\r\n", + shell.args.dsn); + return 0; + } + shell.args.dsn = NULL; + } + } + + return 0; +} diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index f8ea42917c..41ed417716 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -787,7 +787,7 @@ void GenerateVarType(int type, char** p, int count) { // // init shell auto function , shell start call once -bool shellAutoInit() { +void shellAutoInit() { // command int32_t count = SHELL_COMMAND_COUNT(); for (int32_t i = 0; i < count; i++) { @@ -816,8 +816,6 @@ bool shellAutoInit() { GenerateVarType(WT_VAR_LANGUAGE, udf_language, sizeof(udf_language) / sizeof(char*)); GenerateVarType(WT_VAR_GLOBALKEYS, global_keys, sizeof(global_keys) / sizeof(char*)); GenerateVarType(WT_VAR_FIELD_OPTIONS, field_options, sizeof(field_options) / sizeof(char*)); - - return true; } // set conn diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 10c3d351d4..46d0c88b02 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -21,6 +21,7 @@ #include "geosWrapper.h" #include "shellAuto.h" #include "shellInt.h" +#include "../../inc/pub.h" SShellObj shell = {0}; @@ -128,15 +129,7 @@ int32_t shellRunSingleCommand(char *command) { shellSourceFile(c_ptr); return 0; } -#ifdef WEBSOCKET - if (shell.args.restful || shell.args.cloud) { - shellRunSingleCommandWebsocketImp(command); - } else { -#endif - shellRunSingleCommandImp(command); -#ifdef WEBSOCKET - } -#endif + shellRunSingleCommandImp(command); return 0; } @@ -291,7 +284,6 @@ void shellRunSingleCommandImp(char *command) { if (error_no == 0) { printf("Query OK, %" PRId64 " row(s) in set (%.6fs)\r\n", numOfRows, (et - st) / 1E6); } else { - terrno = error_no; printf("Query interrupted (%s), %" PRId64 " row(s) in set (%.6fs)\r\n", taos_errstr(NULL), numOfRows, (et - st) / 1E6); } @@ -1101,7 +1093,7 @@ void shellCleanupHistory() { void shellPrintError(TAOS_RES *tres, int64_t st) { int64_t et = taosGetTimestampUs(); - fprintf(stderr, "\r\nDB error: %s[0x%08X] (%.6fs)\r\n", taos_errstr(tres), taos_errno(tres), (et - st) / 1E6); + fprintf(stderr, "\r\nDB error: %s [0x%08X] (%.6fs)\r\n", taos_errstr(tres), taos_errno(tres), (et - st) / 1E6); taos_free_result(tres); } @@ -1258,18 +1250,11 @@ void *shellCancelHandler(void *arg) { continue; } -#ifdef WEBSOCKET - if (shell.args.restful || shell.args.cloud) { - shell.stop_query = true; - } else { -#endif - if (shell.conn) { - shellCmdkilled = true; - taos_kill_query(shell.conn); - } -#ifdef WEBSOCKET + if (shell.conn) { + shellCmdkilled = true; + taos_kill_query(shell.conn); } -#endif + #ifdef WINDOWS printf("\n%s", shell.info.promptHeader); #endif @@ -1314,35 +1299,85 @@ void *shellThreadLoop(void *arg) { } #pragma GCC diagnostic pop +TAOS* createConnect(SShellArgs *pArgs) { + char show[256] = "\0"; + char * host = NULL; + uint16_t port = 0; + char * user = NULL; + char * pwd = NULL; + int32_t code = 0; + char * dsnc = NULL; + + // set mode + if (pArgs->connMode != CONN_MODE_NATIVE && pArgs->dsn) { + dsnc = strToLowerCopy(pArgs->dsn); + if (dsnc == NULL) { + return NULL; + } + + char *cport = NULL; + char error[512] = "\0"; + code = parseDsn(dsnc, &host, &cport, &user, &pwd, error); + if (code) { + printf("%s dsn=%s\n", error, dsnc); + free(dsnc); + return NULL; + } + + // default ws port + if (cport == NULL) { + if (user) + port = DEFAULT_PORT_WS_CLOUD; + else + port = DEFAULT_PORT_WS_LOCAL; + } else { + port = atoi(cport); + } + + // websocket + memcpy(show, pArgs->dsn, 20); + memcpy(show + 20, "...", 3); + memcpy(show + 23, pArgs->dsn + strlen(pArgs->dsn) - 10, 10); + + } else { + + host = (char *)pArgs->host; + user = (char *)pArgs->user; + pwd = pArgs->password; + + if (pArgs->port_inputted) { + port = pArgs->port; + } else { + port = defaultPort(pArgs->connMode, pArgs->dsn); + } + + sprintf(show, "host:%s port:%d ", host, port); + } + + // connect main + if (pArgs->auth) { + return taos_connect_auth(host, user, pArgs->auth, pArgs->database, port); + } else { + return taos_connect(host, user, pwd, pArgs->database, port); + } +} + int32_t shellExecute(int argc, char *argv[]) { int32_t code = 0; - printf(shell.info.clientVersion, shell.info.cusName, taos_get_client_info(), shell.info.cusName); + printf(shell.info.clientVersion, shell.info.cusName, + defaultMode(shell.args.connMode, shell.args.dsn) == CONN_MODE_NATIVE ? STR_NATIVE : STR_WEBSOCKET, + taos_get_client_info(), shell.info.cusName); fflush(stdout); SShellArgs *pArgs = &shell.args; -#ifdef WEBSOCKET - if (shell.args.restful || shell.args.cloud) { - if (shell_conn_ws_server(1)) { - printf("failed to connect to server, reason: %s[0x%08X]\n%s", ws_errstr(NULL), ws_errno(NULL), ERROR_CODE_DETAIL); - fflush(stdout); - return -1; - } - } else { -#endif - if (shell.args.auth == NULL) { - shell.conn = taos_connect(pArgs->host, pArgs->user, pArgs->password, pArgs->database, pArgs->port); - } else { - shell.conn = taos_connect_auth(pArgs->host, pArgs->user, pArgs->auth, pArgs->database, pArgs->port); - } + shell.conn = createConnect(pArgs); - if (shell.conn == NULL) { - printf("failed to connect to server, reason: %s[0x%08X]\n%s", taos_errstr(NULL), taos_errno(NULL), ERROR_CODE_DETAIL); - fflush(stdout); - return -1; - } -#ifdef WEBSOCKET + if (shell.conn == NULL) { + printf("failed to connect to server, reason: %s [0x%08X]\n%s", taos_errstr(NULL), taos_errno(NULL), + ERROR_CODE_DETAIL); + fflush(stdout); + return -1; } -#endif bool runOnce = pArgs->commands != NULL || pArgs->file[0] != 0; shellSetConn(shell.conn, runOnce); @@ -1351,9 +1386,7 @@ int32_t shellExecute(int argc, char *argv[]) { if (shell.args.is_bi_mode) { // need set bi mode printf("Set BI mode is true.\n"); -#ifndef WEBSOCKET taos_set_conn_mode(shell.conn, TAOS_CONN_MODE_BI, 1); -#endif } if (runOnce) { @@ -1367,15 +1400,8 @@ int32_t shellExecute(int argc, char *argv[]) { if (pArgs->file[0] != 0) { shellSourceFile(pArgs->file); } -#ifdef WEBSOCKET - if (shell.args.restful || shell.args.cloud) { - ws_close(shell.ws_conn); - } else { -#endif - taos_close(shell.conn); -#ifdef WEBSOCKET - } -#endif + + taos_close(shell.conn); shellWriteHistory(); shellCleanupHistory(); @@ -1394,28 +1420,20 @@ int32_t shellExecute(int argc, char *argv[]) { taosSetSignal(SIGHUP, shellQueryInterruptHandler); taosSetSignal(SIGINT, shellQueryInterruptHandler); -#ifdef WEBSOCKET - if (!shell.args.restful && !shell.args.cloud) { -#endif - char buf[512] = {0}; - int32_t verType = shellGetGrantInfo(buf); + char buf[512] = {0}; + int32_t verType = shellGetGrantInfo(buf); #ifndef WINDOWS - printfIntroduction(verType); + printfIntroduction(verType); #else -#ifndef WEBSOCKET if (verType == TSDB_VERSION_OSS) { showAD(false); } #endif -#endif - // printf version - if (verType == TSDB_VERSION_ENTERPRISE || verType == TSDB_VERSION_CLOUD) { - printf("%s\n", buf); - } - -#ifdef WEBSOCKET + // printf version + if (verType == TSDB_VERSION_ENTERPRISE || verType == TSDB_VERSION_CLOUD) { + printf("%s\n", buf); } -#endif + while (1) { taosThreadCreate(&shell.pid, NULL, shellThreadLoop, NULL); taosThreadJoin(shell.pid, NULL); @@ -1425,12 +1443,10 @@ int32_t shellExecute(int argc, char *argv[]) { break; } } -#ifndef WEBSOCKET - // commnuity + if (verType == TSDB_VERSION_OSS) { showAD(true); } -#endif taosThreadJoin(spid, NULL); diff --git a/tools/shell/src/shellMain.c b/tools/shell/src/shellMain.c index 19277de1dd..31fad97bd4 100644 --- a/tools/shell/src/shellMain.c +++ b/tools/shell/src/shellMain.c @@ -14,8 +14,8 @@ */ #define __USE_XOPEN -#include "shellInt.h" #include "shellAuto.h" +#include "shellInt.h" extern SShellObj shell; @@ -24,15 +24,14 @@ void shellCrashHandler(int signum, void *sigInfo, void *context) { taosIgnSignal(SIGHUP); taosIgnSignal(SIGINT); taosIgnSignal(SIGBREAK); - -#if !defined(WINDOWS) - taosIgnSignal(SIGBUS); -#endif taosIgnSignal(SIGABRT); taosIgnSignal(SIGFPE); taosIgnSignal(SIGSEGV); +#if !defined(WINDOWS) + taosIgnSignal(SIGBUS); +#endif #ifdef USE_REPORT - tscWriteCrashInfo(signum, sigInfo, context); + taos_write_crashinfo(signum, sigInfo, context); #endif #ifdef _TD_DARWIN_64 exit(signum); @@ -41,14 +40,21 @@ void shellCrashHandler(int signum, void *sigInfo, void *context) { #endif } -int main(int argc, char *argv[]) { - shell.exit = false; -#ifdef WEBSOCKET - shell.args.timeout = SHELL_WS_TIMEOUT; - shell.args.cloud = true; - shell.args.local = false; -#endif +// init arguments +void initArgument(SShellArgs *pArgs) { + pArgs->host = NULL; + pArgs->port = 0; + pArgs->user = NULL; + pArgs->database = NULL; + // conn mode + pArgs->dsn = NULL; + pArgs->connMode = CONN_MODE_INVALID; + + pArgs->port_inputted = false; +} + +int main(int argc, char *argv[]) { #if !defined(WINDOWS) taosSetSignal(SIGBUS, shellCrashHandler); #endif @@ -56,6 +62,8 @@ int main(int argc, char *argv[]) { taosSetSignal(SIGFPE, shellCrashHandler); taosSetSignal(SIGSEGV, shellCrashHandler); + initArgument(&shell.args); + if (shellCheckIntSize() != 0) { return -1; } @@ -78,10 +86,27 @@ int main(int argc, char *argv[]) { shellPrintHelp(); return 0; } -#ifdef WEBSOCKET - shellCheckConnectMode(); -#endif + + if (shell.args.netrole != NULL) { + shellTestNetWork(); + return 0; + } + + if (shell.args.is_dump_config) { + shellDumpConfig(); + return 0; + } + + if (getDsnEnv() != 0) { + return -1; + } + + if (setConnMode(shell.args.connMode, shell.args.dsn)) { + return -1; + } + if (taos_init() != 0) { + fprintf(stderr, "failed to init shell since %s [0x%08X]\r\n", taos_errstr(NULL), taos_errno(NULL)); return -1; } @@ -111,5 +136,6 @@ int main(int argc, char *argv[]) { shellAutoInit(); int32_t ret = shellExecute(argc, argv); shellAutoExit(); + return ret; } diff --git a/tools/shell/src/shellNettest.c b/tools/shell/src/shellNettest.c index d1ecf503d2..c779a2d899 100644 --- a/tools/shell/src/shellNettest.c +++ b/tools/shell/src/shellNettest.c @@ -15,7 +15,6 @@ #define _GNU_SOURCE #include "shellInt.h" -#include "tversion.h" static void shellWorkAsClient() { SShellArgs *pArgs = &shell.args; @@ -30,9 +29,9 @@ static void shellWorkAsClient() { rpcInit.numOfThreads = 1; rpcInit.sessions = 16; rpcInit.connType = TAOS_CONN_CLIENT; - rpcInit.idleTime = tsShellActivityTimer * 1000; + rpcInit.idleTime = 3000; rpcInit.user = "_dnd"; - rpcInit.timeToGetConn = tsTimeToGetAvailableConn; + rpcInit.timeToGetConn = 500000; taosVersionStrToInt(td_version, &rpcInit.compatibilityVer); clientRpc = rpcOpen(&rpcInit); @@ -41,17 +40,16 @@ static void shellWorkAsClient() { goto _OVER; } + if (pArgs->port == 0) { + pArgs->port = 6030; + } if (pArgs->host == NULL) { - pArgs->host = tsFirst; + pArgs->host = "localhost"; } char fqdn[TSDB_FQDN_LEN] = {0}; tstrncpy(fqdn, pArgs->host, TSDB_FQDN_LEN); strtok(fqdn, ":"); - if (pArgs->port == 0) { - pArgs->port = tsServerPort; - } - printf("network test client is initialized, the server is %s:%u\r\n", fqdn, pArgs->port); tstrncpy(epSet.eps[0].fqdn, fqdn, TSDB_FQDN_LEN); @@ -112,18 +110,21 @@ static void shellWorkAsServer() { SShellArgs *pArgs = &shell.args; if (pArgs->port == 0) { - pArgs->port = tsServerPort; + pArgs->port = 6030; + } + if (pArgs->host == NULL) { + pArgs->host = "127.0.0.1"; } SRpcInit rpcInit = {0}; - memcpy(rpcInit.localFqdn, tsLocalFqdn, strlen(tsLocalFqdn)); + memcpy(rpcInit.localFqdn, pArgs->host, strlen(pArgs->host)); rpcInit.localPort = pArgs->port; rpcInit.label = "CHK"; rpcInit.numOfThreads = 2; rpcInit.cfp = (RpcCfp)shellProcessMsg; rpcInit.sessions = 10; rpcInit.connType = TAOS_CONN_SERVER; - rpcInit.idleTime = tsShellActivityTimer * 1000; + rpcInit.idleTime = 3000; taosVersionStrToInt(td_version, &rpcInit.compatibilityVer); @@ -131,13 +132,16 @@ static void shellWorkAsServer() { if (serverRpc == NULL) { printf("failed to init net test server since %s\r\n", terrstr()); } else { - printf("network test server is initialized, port:%u\r\n", pArgs->port); + printf("network test server is initialized, %s:%u\r\n", pArgs->host, pArgs->port); taosSetSignal(SIGTERM, shellNettestHandler); while (1) taosMsleep(10); } } void shellTestNetWork() { + (void)osDefaultInit(); + (void)rpcInit(); + if (strcmp(shell.args.netrole, "client") == 0) { shellWorkAsClient(); } diff --git a/tools/shell/src/shellUtil.c b/tools/shell/src/shellUtil.c index a8c1193ab8..73150b89d3 100644 --- a/tools/shell/src/shellUtil.c +++ b/tools/shell/src/shellUtil.c @@ -50,19 +50,19 @@ bool shellRegexMatch(const char *s, const char *reg, int32_t cflags) { int32_t shellCheckIntSize() { if (sizeof(int8_t) != 1) { - printf("int8 size is %d(!= 1)", (int)sizeof(int8_t)); + printf("int8 size is %d(!= 1)\r\n", (int)sizeof(int8_t)); return -1; } if (sizeof(int16_t) != 2) { - printf("int16 size is %d(!= 2)", (int)sizeof(int16_t)); + printf("int16 size is %d(!= 2)\r\n", (int)sizeof(int16_t)); return -1; } if (sizeof(int32_t) != 4) { - printf("int32 size is %d(!= 4)", (int)sizeof(int32_t)); + printf("int32 size is %d(!= 4)\r\n", (int)sizeof(int32_t)); return -1; } if (sizeof(int64_t) != 8) { - printf("int64 size is %d(!= 8)", (int)sizeof(int64_t)); + printf("int64 size is %d(!= 8)\r\n", (int)sizeof(int64_t)); return -1; } return 0; @@ -78,12 +78,15 @@ void shellGenerateAuth() { } void shellDumpConfig() { - SConfig *pCfg = taosGetCfg(); - if (pCfg == NULL) { - printf("read global config failed!\r\n"); - } else { - cfgDumpCfg(pCfg, 1, true); + (void)osDefaultInit(); + + if (taosInitCfg(configDir, NULL, NULL, NULL, NULL, 1) != 0) { + fprintf(stderr, "failed to load cfg since %s [0x%08X]\n", terrstr(), terrno); + return; } + + cfgDumpCfg(taosGetCfg(), 1, true); + fflush(stdout); } @@ -121,48 +124,6 @@ void shellCheckServerStatus() { } } while (1); } -#ifdef WEBSOCKET -char dsn[1024] = "ws://localhost:6041"; -void shellCheckConnectMode() { - if (shell.args.dsn) { - shell.args.cloud = true; - shell.args.restful = false; - return; - } - if (shell.args.cloud) { - shell.args.dsn = getenv("TDENGINE_CLOUD_DSN"); - if (shell.args.dsn && strlen(shell.args.dsn) > 4) { - shell.args.cloud = true; - shell.args.local = false; - shell.args.restful = false; - return; - } - - shell.args.dsn = getenv("TDENGINE_DSN"); - if (shell.args.dsn && strlen(shell.args.dsn) > 4) { - shell.args.cloud = true; - shell.args.local = true; - shell.args.restful = false; - return; - } - } - - if (shell.args.restful) { - if (!shell.args.host) { - shell.args.host = "localhost"; - } - if (!shell.args.port) { - shell.args.port = 6041; - } - shell.args.dsn = dsn; - snprintf(shell.args.dsn, 1024, "ws://%s:%d", - shell.args.host, shell.args.port); - } - shell.args.cloud = false; - return; - -} -#endif void shellExit() { if (shell.conn != NULL) { diff --git a/tools/shell/src/shellWebsocket.c b/tools/shell/src/shellWebsocket.c deleted file mode 100644 index 61074102be..0000000000 --- a/tools/shell/src/shellWebsocket.c +++ /dev/null @@ -1,396 +0,0 @@ - -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ -#ifdef WEBSOCKET -#include -#include - -// save current database name -char curDBName[128] = ""; // TDB_MAX_DBNAME_LEN is 24, put large - -int shell_conn_ws_server(bool first) { - char cuttedDsn[SHELL_WS_DSN_BUFF] = {0}; - int dsnLen = strlen(shell.args.dsn); - snprintf(cuttedDsn, - ((dsnLen-SHELL_WS_DSN_MASK) > SHELL_WS_DSN_BUFF)? - SHELL_WS_DSN_BUFF:(dsnLen-SHELL_WS_DSN_MASK), - "%s", shell.args.dsn); - fprintf(stdout, "trying to connect %s****** ", cuttedDsn); - fflush(stdout); - for (int i = 0; i < shell.args.timeout; i++) { - if(shell.args.is_bi_mode) { - size_t len = strlen(shell.args.dsn); - char * dsn = taosMemoryMalloc(len + 32); - sprintf(dsn, "%s&conn_mode=1", shell.args.dsn); - shell.ws_conn = ws_connect(dsn); - taosMemoryFree(dsn); - } else { - shell.ws_conn = ws_connect(shell.args.dsn); - } - - if (NULL == shell.ws_conn) { - int errNo = ws_errno(NULL); - if (0xE001 == errNo) { - fprintf(stdout, "."); - fflush(stdout); - taosMsleep(1000); // sleep 1 second then try again - continue; - } else { - fprintf(stderr, "\nfailed to connect %s***, reason: %s\n", - cuttedDsn, ws_errstr(NULL)); - return -1; - } - } else { - break; - } - } - if (NULL == shell.ws_conn) { - fprintf(stdout, "\n timeout\n"); - fprintf(stderr, "\nfailed to connect %s***, reason: %s\n", - cuttedDsn, ws_errstr(NULL)); - return -1; - } else { - fprintf(stdout, "\n"); - } - if (first && shell.args.restful) { - fprintf(stdout, "successfully connected to %s\n\n", - shell.args.dsn); - } else if (first && shell.args.cloud) { - if(shell.args.local) { - const char* host = strstr(shell.args.dsn, "@"); - if(host) { - host += 1; - } else { - host = shell.args.dsn; - } - fprintf(stdout, "successfully connected to %s\n", host); - } else { - fprintf(stdout, "successfully connected to service\n"); - } - } - fflush(stdout); - - // switch to current database if have - if(curDBName[0] !=0) { - char command[256]; - sprintf(command, "use %s;", curDBName); - shellRunSingleCommandWebsocketImp(command); - } - - return 0; -} - -static int horizontalPrintWebsocket(WS_RES* wres, double* execute_time) { - const void* data = NULL; - int rows; - ws_fetch_raw_block(wres, &data, &rows); - if (wres) { - *execute_time += (double)(ws_take_timing(wres)/1E6); - } - if (!rows) { - return 0; - } - int num_fields = ws_field_count(wres); - TAOS_FIELD* fields = (TAOS_FIELD*)ws_fetch_fields(wres); - int precision = ws_result_precision(wres); - - int width[TSDB_MAX_COLUMNS]; - for (int col = 0; col < num_fields; col++) { - width[col] = shellCalcColWidth(fields + col, precision); - } - - shellPrintHeader(fields, width, num_fields); - - int numOfRows = 0; - do { - uint8_t ty; - uint32_t len; - for (int i = 0; i < rows; i++) { - for (int j = 0; j < num_fields; j++) { - putchar(' '); - const void *value = ws_get_value_in_block(wres, i, j, &ty, &len); - shellPrintField((const char*)value, fields+j, width[j], len, precision); - putchar(' '); - putchar('|'); - } - putchar('\r'); - putchar('\n'); - } - numOfRows += rows; - ws_fetch_raw_block(wres, &data, &rows); - } while (rows && !shell.stop_query); - return numOfRows; -} - -static int verticalPrintWebsocket(WS_RES* wres, double* pexecute_time) { - int rows = 0; - const void* data = NULL; - ws_fetch_raw_block(wres, &data, &rows); - if (wres) { - *pexecute_time += (double)(ws_take_timing(wres)/1E6); - } - if (!rows) { - return 0; - } - int num_fields = ws_field_count(wres); - TAOS_FIELD* fields = (TAOS_FIELD*)ws_fetch_fields(wres); - int precision = ws_result_precision(wres); - - int maxColNameLen = 0; - for (int col = 0; col < num_fields; col++) { - int len = (int)strlen(fields[col].name); - if (len > maxColNameLen) { - maxColNameLen = len; - } - } - int numOfRows = 0; - do { - uint8_t ty; - uint32_t len; - for (int i = 0; i < rows; i++) { - printf("*************************** %d.row ***************************\n", - numOfRows + 1); - for (int j = 0; j < num_fields; j++) { - TAOS_FIELD* field = fields + j; - int padding = (int)(maxColNameLen - strlen(field->name)); - printf("%*.s%s: ", padding, " ", field->name); - const void *value = ws_get_value_in_block(wres, i, j, &ty, &len); - shellPrintField((const char*)value, field, 0, len, precision); - putchar('\n'); - } - numOfRows++; - } - ws_fetch_raw_block(wres, &data, &rows); - } while (rows && !shell.stop_query); - return numOfRows; -} - -static int dumpWebsocketToFile(const char* fname, WS_RES* wres, - double* pexecute_time) { - char fullname[PATH_MAX] = {0}; - if (taosExpandDir(fname, fullname, PATH_MAX) != 0) { - tstrncpy(fullname, fname, PATH_MAX); - } - - TdFilePtr pFile = taosOpenFile(fullname, - TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_STREAM); - if (pFile == NULL) { - fprintf(stderr, "failed to open file: %s\r\n", fullname); - return -1; - } - int rows = 0; - const void* data = NULL; - ws_fetch_raw_block(wres, &data, &rows); - if (wres) { - *pexecute_time += (double)(ws_take_timing(wres)/1E6); - } - if (!rows) { - taosCloseFile(&pFile); - return 0; - } - int numOfRows = 0; - TAOS_FIELD* fields = (TAOS_FIELD*)ws_fetch_fields(wres); - int num_fields = ws_field_count(wres); - int precision = ws_result_precision(wres); - for (int col = 0; col < num_fields; col++) { - if (col > 0) { - taosFprintfFile(pFile, ","); - } - taosFprintfFile(pFile, "%s", fields[col].name); - } - taosFprintfFile(pFile, "\r\n"); - do { - uint8_t ty; - uint32_t len; - numOfRows += rows; - for (int i = 0; i < rows; i++) { - for (int j = 0; j < num_fields; j++) { - if (j > 0) { - taosFprintfFile(pFile, ","); - } - const void *value = ws_get_value_in_block(wres, i, j, &ty, &len); - shellDumpFieldToFile(pFile, (const char*)value, - fields + j, len, precision); - } - taosFprintfFile(pFile, "\r\n"); - } - ws_fetch_raw_block(wres, &data, &rows); - } while (rows && !shell.stop_query); - taosCloseFile(&pFile); - return numOfRows; -} - -static int shellDumpWebsocket(WS_RES *wres, char *fname, - int *error_no, bool vertical, - double* pexecute_time) { - int numOfRows = 0; - if (fname != NULL) { - numOfRows = dumpWebsocketToFile(fname, wres, pexecute_time); - } else if (vertical) { - numOfRows = verticalPrintWebsocket(wres, pexecute_time); - } else { - numOfRows = horizontalPrintWebsocket(wres, pexecute_time); - } - *error_no = ws_errno(wres); - return numOfRows; -} - -char * strendG(const char* pstr); -void shellRunSingleCommandWebsocketImp(char *command) { - int64_t st, et; - char *sptr = NULL; - char *cptr = NULL; - char *fname = NULL; - bool printMode = false; - - if ((sptr = strstr(command, ">>")) != NULL) { - fname = sptr + 2; - while (*fname == ' ') fname++; - *sptr = '\0'; - - cptr = strstr(fname, ";"); - if (cptr != NULL) { - *cptr = '\0'; - } - } - - if ((sptr = strendG(command)) != NULL) { - *sptr = '\0'; - printMode = true; // When output to a file, the switch does not work. - } - - shell.stop_query = false; - WS_RES* res; - - for (int reconnectNum = 0; reconnectNum < 2; reconnectNum++) { - if (!shell.ws_conn && shell_conn_ws_server(0) || shell.stop_query) { - return; - } - st = taosGetTimestampUs(); - - res = ws_query_timeout(shell.ws_conn, command, shell.args.timeout); - int code = ws_errno(res); - if (code != 0 && !shell.stop_query) { - // if it's not a ws connection error - if (TSDB_CODE_WS_DSN_ERROR != (code&TSDB_CODE_WS_DSN_ERROR)) { - et = taosGetTimestampUs(); - fprintf(stderr, "\nDB: error:0x%08X %s (%.6fs)\n", - ws_errno(res), ws_errstr(res), (et - st)/1E6); - ws_free_result(res); - return; - } - if (code == TSDB_CODE_WS_SEND_TIMEOUT - || code == TSDB_CODE_WS_RECV_TIMEOUT) { - fprintf(stderr, "Hint: use -T to increase the timeout in seconds\n"); - } else if (code == TSDB_CODE_WS_INTERNAL_ERRO - || code == TSDB_CODE_WS_CLOSED) { - shell.ws_conn = NULL; - } - ws_free_result(res); - if (reconnectNum == 0) { - continue; - } else { - fprintf(stderr, "The server is disconnected, will try to reconnect\n"); - } - return; - } - break; - } - - double execute_time = 0; - if (res) { - execute_time = ws_take_timing(res)/1E6; - } - - if (shellRegexMatch(command, "^\\s*use\\s+[a-zA-Z0-9_]+\\s*;\\s*$", - REG_EXTENDED | REG_ICASE)) { - - // copy dbname to curDBName - char *p = command; - bool firstStart = false; - bool firstEnd = false; - int i = 0; - while (*p != 0) { - if (*p != ' ') { - // not blank - if (!firstStart) { - firstStart = true; - } else if (firstEnd) { - if(*p == ';' && *p != '\\') { - break; - } - // database name - curDBName[i++] = *p; - if(i + 4 > sizeof(curDBName)) { - // DBName is too long, reset zero and break - i = 0; - break; - } - } - } else { - // blank - if(firstStart == true && firstEnd == false){ - firstEnd = true; - } - if(firstStart && firstEnd && i > 0){ - // blank after database name - break; - } - } - // move next - p++; - } - // append end - curDBName[i] = 0; - - fprintf(stdout, "Database changed to %s.\r\n\r\n", curDBName); - fflush(stdout); - ws_free_result(res); - return; - } - - int numOfRows = 0; - if (ws_is_update_query(res)) { - numOfRows = ws_affected_rows(res); - et = taosGetTimestampUs(); - double total_time = (et - st)/1E3; - double net_time = total_time - (double)execute_time; - printf("Query Ok, %d of %d row(s) in database\n", numOfRows, numOfRows); - printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", - execute_time, net_time, total_time); - } else { - int error_no = 0; - numOfRows = shellDumpWebsocket(res, fname, &error_no, - printMode, &execute_time); - if (numOfRows < 0) { - ws_free_result(res); - return; - } - et = taosGetTimestampUs(); - double total_time = (et - st) / 1E3; - double net_time = total_time - execute_time; - if (error_no == 0 && !shell.stop_query) { - printf("Query OK, %d row(s) in set\n", numOfRows); - printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", - execute_time, net_time, total_time); - } else { - printf("Query interrupted, %d row(s) in set (%.6fs)\n", numOfRows, - (et - st)/1E6); - } - } - printf("\n"); - ws_free_result(res); -} -#endif diff --git a/tools/src/pub.c b/tools/src/pub.c new file mode 100644 index 0000000000..7e561b57d9 --- /dev/null +++ b/tools/src/pub.c @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the MIT license as published by the Free Software + * Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + */ + + #include + #include + #include "../inc/pub.h" + + + char* strToLowerCopy(const char *str) { + if (str == NULL) { + return NULL; + } + size_t len = strlen(str); + char *result = (char*)malloc(len + 1); + if (result == NULL) { + return NULL; + } + for (size_t i = 0; i < len; i++) { + result[i] = tolower((unsigned char)str[i]); + } + result[len] = '\0'; + return result; + } + + int32_t parseDsn(char* dsn, char **host, char **port, char **user, char **pwd, char *error) { + // dsn format: + // local http://127.0.0.1:6041 + // cloud https://gw.cloud.taosdata.com?token=617ffdf... + // https://gw.cloud.taosdata.com:433?token=617ffdf... + + // find "://" + char *p1 = strstr(dsn, "://"); + if (p1 == NULL) { + sprintf(error, "%s", "dsn invalid, not found \"://\" "); + return -1; + } + *host = p1 + 3; // host + char *p = *host; + + // find ":" - option + char *p2 = strstr(p, ":"); + if (p2) { + p = p2 + 1; + *port = p2 + 1; // port + *p2 = 0; + } + + // find "?" + char *p3 = strstr(p, "?"); + if (p3) { + p = p3 + 1; + *user = p3 + 1; + *p3 = 0; + } else { + return 0; + } + + // find "=" + char *p4 = strstr(p, "="); + if (p4) { + *p4 = 0; + *pwd = p4 + 1; + } else { + sprintf(error, "%s", "dsn invalid, found \"?\" but not found \"=\" "); + return -1; + } + + return 0; + } + + // get comn mode, if invalid exit app + int8_t getConnMode(char *arg) { + // compare + if (strcasecmp(arg, STR_NATIVE) == 0 || strcasecmp(arg, "0") == 0) { + return CONN_MODE_NATIVE; + } else if (strcasecmp(arg, STR_WEBSOCKET) == 0 || strcasecmp(arg, "1") == 0) { + return CONN_MODE_WEBSOCKET; + } else { + fprintf(stderr, "invalid input %s for option -Z, only support: %s or %s\r\n", arg, STR_NATIVE, STR_WEBSOCKET); + exit(-1); + } + } + + // set conn mode +int32_t setConnMode(int8_t connMode, char *dsn) { + // check default + if (connMode == CONN_MODE_INVALID) { + if (dsn && dsn[0] != 0) { + connMode = CONN_MODE_WEBSOCKET; + } else { + // default + connMode = CONN_MODE_DEFAULT; + } + } + + // set conn mode + char * strMode = connMode == CONN_MODE_NATIVE ? STR_NATIVE : STR_WEBSOCKET; + int32_t code = taos_options(TSDB_OPTION_DRIVER, strMode); + if (code != 0) { + fprintf(stderr, "failed to load driver. since %s [0x%08X]\r\n", taos_errstr(NULL), taos_errno(NULL)); + return code; + } + return 0; +} + +// default mode +int8_t defaultMode(int8_t connMode, char *dsn) { + int8_t mode = connMode; + if (connMode == CONN_MODE_INVALID) { + // no input from command line or config + if (dsn && dsn[0] != 0) { + mode = CONN_MODE_WEBSOCKET; + } else { + // default + mode = CONN_MODE_DEFAULT; + } + } + return mode; +} + +// get default port +uint16_t defaultPort(int8_t connMode, char *dsn) { + // consistent with setConnMode + int8_t mode = defaultMode(connMode, dsn); + + // default port + return mode == CONN_MODE_NATIVE ? DEFAULT_PORT_NATIVE : DEFAULT_PORT_WS_LOCAL; +} + \ No newline at end of file diff --git a/tools/taos-tools/deps/toolscJson/src/toolscJson.c b/tools/taos-tools/deps/toolscJson/src/toolscJson.c index b52538912c..110dcd6c49 100644 --- a/tools/taos-tools/deps/toolscJson/src/toolscJson.c +++ b/tools/taos-tools/deps/toolscJson/src/toolscJson.c @@ -277,7 +277,7 @@ loop_end: item->valuedouble = number; /* use saturation in case of overflow */ - if (number >= LLONG_MAX) + if (number >= (double)LLONG_MAX) { item->valueint = LLONG_MAX; } @@ -303,7 +303,7 @@ loop_end: /* don't ask me, but the original tools_cJSON_SetNumberValue returns an integer or double */ CJSON_PUBLIC(double) tools_cJSON_SetNumberHelper(tools_cJSON *object, double number) { - if (number >= LLONG_MAX) + if (number >= (double)LLONG_MAX) { object->valueint = LLONG_MAX; } @@ -2104,7 +2104,7 @@ CJSON_PUBLIC(tools_cJSON *) tools_cJSON_CreateNumber(double num) item->valuedouble = num; /* use saturation in case of overflow */ - if (num >= LLONG_MAX) + if (num >= (double)LLONG_MAX) { item->valueint = LLONG_MAX; } diff --git a/tools/taos-tools/example/insert.json b/tools/taos-tools/example/insert.json index 1e40883b8b..0f3316fd5b 100644 --- a/tools/taos-tools/example/insert.json +++ b/tools/taos-tools/example/insert.json @@ -27,7 +27,7 @@ { "name": "meters", "child_table_exists": "no", - "childtable_count": 1000, + "childtable_count": 10, "childtable_prefix": "d", "auto_create_table": "no", "batch_create_tbl_num": 5, diff --git a/tools/taos-tools/example/query.json b/tools/taos-tools/example/query.json index bf74d8da3f..4d218eca7a 100644 --- a/tools/taos-tools/example/query.json +++ b/tools/taos-tools/example/query.json @@ -6,13 +6,13 @@ "user": "root", "password": "taosdata", "confirm_parameter_prompt": "no", - "continue_if_fail": "yes", + "continue_if_fail": "yes", "databases": "test", "query_times": 10, "query_mode": "taosc", "specified_table_query": { "query_interval": 1, - "concurrent": 3, + "threads": 3, "sqls": [ { "sql": "select last_row(*) from meters", @@ -23,16 +23,5 @@ "result": "./query_res1.txt" } ] - }, - "super_table_query": { - "stblname": "meters", - "query_interval": 1, - "threads": 3, - "sqls": [ - { - "sql": "select last_row(ts) from xxxx", - "result": "./query_res2.txt" - } - ] } } diff --git a/tools/taos-tools/example/queryStb.json b/tools/taos-tools/example/queryStb.json new file mode 100644 index 0000000000..cb44e0dfc1 --- /dev/null +++ b/tools/taos-tools/example/queryStb.json @@ -0,0 +1,24 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "continue_if_fail": "yes", + "databases": "test", + "query_times": 10, + "query_mode": "taosc", + "super_table_query": { + "stblname": "meters", + "query_interval": 1, + "threads": 3, + "sqls": [ + { + "sql": "select last_row(ts) from xxxx", + "result": "./query_res2.txt" + } + ] + } +} diff --git a/tools/taos-tools/inc/bench.h b/tools/taos-tools/inc/bench.h index 8adb879301..0957bafded 100644 --- a/tools/taos-tools/inc/bench.h +++ b/tools/taos-tools/inc/bench.h @@ -23,6 +23,8 @@ #define MAX(a, b) ((a) > (b) ? (a) : (b)) #define MIN(a, b) ((a) < (b) ? (a) : (b)) +#include "pub.h" + #ifdef LINUX #ifndef _ALPINE @@ -74,10 +76,7 @@ #include #include #include - -#ifdef WEBSOCKET -#include -#endif +#include "../../inc/pub.h" #ifdef WINDOWS #define _CRT_RAND_S @@ -257,19 +256,11 @@ typedef unsigned __int32 uint32_t; "when keep trying be enabled." #define BENCH_NODROP "Do not drop database." -#ifdef WEBSOCKET -#define BENCH_DSN "The dsn to connect the cloud service." -#define BENCH_TIMEOUT \ - "The timeout wait on websocket query in seconds, default is 10." -#endif - #define IS_VAR_DATA_TYPE(t) \ (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_VARBINARY) || ((t) == TSDB_DATA_TYPE_NCHAR) || \ ((t) == TSDB_DATA_TYPE_JSON) || ((t) == TSDB_DATA_TYPE_GEOMETRY)) - - enum TEST_MODE { INSERT_TEST, // 0 QUERY_TEST, // 1 @@ -281,11 +272,9 @@ enum enumSYNC_MODE { SYNC_MODE, ASYNC_MODE, MODE_BUT }; enum enum_TAOS_INTERFACE { TAOSC_IFACE, - REST_IFACE, STMT_IFACE, STMT2_IFACE, SML_IFACE, - SML_REST_IFACE, INTERFACE_BUT }; @@ -763,7 +752,6 @@ typedef struct SArguments_S { uint64_t insert_interval; bool demo_mode; bool aggr_func; - struct sockaddr_in serv_addr; uint64_t totalChildTables; uint64_t actualChildTables; uint64_t autoCreatedChildTables; @@ -777,18 +765,16 @@ typedef struct SArguments_S { #endif bool terminate; bool in_prompt; -#ifdef WEBSOCKET - int32_t timeout; + + // websocket char* dsn; - bool websocket; -#endif + bool supplementInsert; int64_t startTimestamp; int32_t partialColNum; int32_t keep_trying; uint32_t trying_interval; int iface; - int rest_server_ver_major; bool check_sql; int suit; // see define SUIT_ int16_t inputted_vgroups; @@ -797,10 +783,9 @@ typedef struct SArguments_S { bool escape_character; bool pre_load_tb_meta; bool bind_vgroup; - + int8_t connMode; // see define CONN_MODE_ char* output_path; char output_path_buf[MAX_PATH_LEN]; - } SArguments; typedef struct SBenchConn { @@ -808,10 +793,6 @@ typedef struct SBenchConn { TAOS* ctaos; // check taos TAOS_STMT* stmt; TAOS_STMT2* stmt2; -#ifdef WEBSOCKET - WS_TAOS* taos_ws; - WS_STMT* stmt_ws; -#endif } SBenchConn; #define MAX_BATCOLS 256 @@ -941,18 +922,10 @@ void tmfclose(FILE *fp); int64_t fetchResult(TAOS_RES *res, char *filePath); void prompt(bool NonStopMode); void ERROR_EXIT(const char *msg); -int getServerVersionRest(int16_t rest_port); -int postProceSql(char *sqlstr, char* dbName, int precision, int iface, - int protocol, uint16_t rest_port, bool tcp, - int sockfd, char* filePath); int queryDbExecCall(SBenchConn *conn, char *command); -int queryDbExecRest(char *command, char* dbName, int precision, - int iface, int protocol, bool tcp, int sockfd); SBenchConn* initBenchConn(); void closeBenchConn(SBenchConn* conn); int regexMatch(const char *s, const char *reg, int cflags); -int convertHostToServAddr(char *host, uint16_t port, - struct sockaddr_in *serv_addr); int getAllChildNameOfSuperTable(TAOS *taos, char *dbName, char *stbName, char ** childTblNameOfSuperTbl, int64_t childTblCountOfSuperTbl); @@ -998,9 +971,6 @@ int insertTestProcess(); void postFreeResource(); int queryTestProcess(); int subscribeTestProcess(); -int convertServAddr(int iface, bool tcp, int protocol); -int createSockFd(); -void destroySockFd(int sockfd); void printVersion(); int32_t benchParseSingleOpt(int32_t key, char* arg); @@ -1034,6 +1004,9 @@ int tmpGeometry(char *tmp, int iface, Field *field, int64_t k); int tmpInt32ImplTag(Field *field, int i, int k); char* genQMark( int32_t QCnt); +// get colNames , first is tbname if tbName is true +char *genColNames(BArray *cols, bool tbName); + // stmt2 TAOS_STMT2_BINDV* createBindV(int32_t count, int32_t tagCnt, int32_t colCnt); // clear bindv table count tables tag and column @@ -1042,9 +1015,6 @@ void clearBindV(TAOS_STMT2_BINDV *bindv); void freeBindV(TAOS_STMT2_BINDV *bindv); void showBindV(TAOS_STMT2_BINDV *bindv, BArray *tags, BArray *cols); -// IFace is rest return True -bool isRest(int32_t iface); - // get group index about dbname.tbname int32_t calcGroupIndex(char* dbName, char* tbName, int32_t groupCnt); @@ -1060,6 +1030,8 @@ void *queryKiller(void *arg); int killSlowQuery(); // fetch super table child name from server int fetchChildTableName(char *dbName, char *stbName); +// call engine error +void engineError(char * module, char * fun, int32_t code); // trim prefix suffix blank cmp int trimCaseCmp(char *str1,char *str2); diff --git a/tools/taos-tools/inc/benchData.h b/tools/taos-tools/inc/benchData.h index 0ccbf7df22..804ae0c793 100644 --- a/tools/taos-tools/inc/benchData.h +++ b/tools/taos-tools/inc/benchData.h @@ -29,8 +29,8 @@ int generateRandData(SSuperTable *stbInfo, char *sampleDataBuf, int lenOfOneRow, BArray * fields, int64_t loop, bool tag, BArray *childCols); // prepare -int prepareStmt (TAOS_STMT *stmt, SSuperTable *stbInfo, char* tagData, uint64_t tableSeq); -int prepareStmt2(TAOS_STMT2 *stmt2, SSuperTable *stbInfo, char* tagData, uint64_t tableSeq); +int prepareStmt (TAOS_STMT *stmt, SSuperTable *stbInfo, char* tagData, uint64_t tableSeq, char *db); +int prepareStmt2(TAOS_STMT2 *stmt2, SSuperTable *stbInfo, char* tagData, uint64_t tableSeq, char *db); uint32_t bindParamBatch(threadInfo *pThreadInfo, uint32_t batch, int64_t startTime, int64_t pos, diff --git a/tools/taos-tools/inc/benchLog.h b/tools/taos-tools/inc/benchLog.h index ab74aaff75..5b7b9ec139 100644 --- a/tools/taos-tools/inc/benchLog.h +++ b/tools/taos-tools/inc/benchLog.h @@ -39,7 +39,6 @@ void unlockLog(int8_t idx); // exit log void exitLog(); - #define debugPrint(fmt, ...) \ do { \ if (g_arguments->debug_print) { \ diff --git a/tools/taos-tools/inc/dump.h b/tools/taos-tools/inc/dump.h index 9a30ebb9cd..68ccd20d1a 100644 --- a/tools/taos-tools/inc/dump.h +++ b/tools/taos-tools/inc/dump.h @@ -39,10 +39,7 @@ #include #include #include - -#ifdef WEBSOCKET -#include -#endif +#include "../../inc/pub.h" // @@ -378,22 +375,18 @@ typedef struct arguments { bool performance_print; bool dotReplace; int dumpDbCount; -#ifdef WEBSOCKET - bool restful; - bool cloud; - int ws_timeout; + + int8_t connMode; + bool port_inputted; char *dsn; - char *cloudToken; - int cloudPort; - char cloudHost[MAX_HOSTNAME_LEN]; -#endif // put rename db string char * renameBuf; SRenameDB * renameHead; // retry for call engine api int32_t retryCount; - int32_t retrySleepMs; + int32_t retrySleepMs; + } SArguments; bool isSystemDatabase(char *dbName); @@ -480,6 +473,7 @@ int64_t dumpANormalTableNotBelong( void* openQuery(void** taos_v , const char * sql); void closeQuery(void* res); int32_t readRow(void *res, int32_t idx, int32_t col, uint32_t *len, char **data); +void engineError(char * module, char * fun, int32_t code); extern struct arguments g_args; diff --git a/tools/taos-tools/inc/dumpUtil.h b/tools/taos-tools/inc/dumpUtil.h index 484237177e..b4c05e8d51 100644 --- a/tools/taos-tools/inc/dumpUtil.h +++ b/tools/taos-tools/inc/dumpUtil.h @@ -66,18 +66,4 @@ TAOS *taosConnect(const char *dbName); TAOS_RES *taosQuery(TAOS *taos, const char *sql, int32_t *code); -// -// --------------- websocket ------------------ -// -#ifdef WEBSOCKET -// ws connect -WS_TAOS *wsConnect(); -// ws query -WS_RES *wsQuery(WS_TAOS **ws_taos, const char *sql, int32_t *code); -// ws fetch -int32_t wsFetchBlock(WS_RES *rs, const void **pData, int32_t *numOfRows); - -#endif - - #endif // INC_DUMPUTIL_H_ \ No newline at end of file diff --git a/tools/taos-tools/inc/wsdump.h b/tools/taos-tools/inc/wsdump.h deleted file mode 100644 index 5b65b8c17c..0000000000 --- a/tools/taos-tools/inc/wsdump.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef INC_WSDUMP_H_ -#define INC_WSDUMP_H_ - -// -// --------------- websocket ------------------ -// - -#ifdef WEBSOCKET - -#include -#include -#include - -int cleanIfQueryFailedWS(const char *funcname, int lineno, char *command, WS_RES *res); -int getTableRecordInfoWS(char *dbName, char *table, TableRecordInfo *pTableRecordInfo); -int getDbCountWS(WS_RES *ws_res); -int64_t getNtbCountOfStbWS(char *dbName, const char *stbName); -int getTableDesFromStbWS(WS_TAOS **taos_v, const char *dbName, const TableDes *stbTableDes, const char *table, - TableDes **ppTableDes); -int getTableDesWS(WS_TAOS **taos_v, const char *dbName, const char *table, TableDes *tableDes, const bool colOnly); -int64_t queryDbForDumpOutCountWS(char *command, WS_TAOS **taos_v, const char *dbName, const char *tbName, - const int precision); -TAOS_RES *queryDbForDumpOutOffsetWS(WS_TAOS **taos_v, char *command); -int64_t dumpTableDataAvroWS(char *dataFilename, int64_t index, const char *tbName, const bool belongStb, - const char *dbName, const int precision, int colCount, TableDes *tableDes, - int64_t start_time, int64_t end_time); -int64_t fillTbNameArrWS(WS_TAOS **taos_v, char *command, char **tbNameArr, const char *stable, const int64_t preCount); -int readNextTableDesWS(void *ws_res, TableDes *tbDes, int *idx, int *cnt); -void dumpExtraInfoVarWS(void **taos_v, FILE *fp); -int queryDbImplWS(WS_TAOS *taos_v, char *command); -void dumpNormalTablesOfStbWS(threadInfo *pThreadInfo, FILE *fp, char *dumpFilename); -int64_t dumpStbAndChildTbOfDbWS(WS_TAOS **taos_v, SDbInfo *dbInfo, FILE *fpDbs); -int64_t dumpNTablesOfDbWS(WS_TAOS **taos_v, SDbInfo *dbInfo); -int fillDbInfoWS(void **taos_v); -bool jointCloudDsn(); -bool splitCloudDsn(); -int64_t dumpTableDataWS(const int64_t index, FILE *fp, const char *tbName, const char *dbName, const int precision, - TableDes *tableDes, const int64_t start_time, const int64_t end_time); -int32_t readRowWS(void *res, int32_t idx, int32_t col, uint32_t *len, char **data); -#endif - -#endif // INC_WSDUMP_H_ \ No newline at end of file diff --git a/tools/taos-tools/src/CMakeLists.txt b/tools/taos-tools/src/CMakeLists.txt index 320fb1f413..832e0904e8 100644 --- a/tools/taos-tools/src/CMakeLists.txt +++ b/tools/taos-tools/src/CMakeLists.txt @@ -86,6 +86,7 @@ LINK_DIRECTORIES(/usr/lib /usr/lib64) INCLUDE_DIRECTORIES(/usr/local/taos/include) INCLUDE_DIRECTORIES(${CMAKE_CURRENT_LIST_DIR}/../deps/avro/lang/c/src) INCLUDE_DIRECTORIES(${CMAKE_CURRENT_LIST_DIR}/../deps/toolscJson/src) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_LIST_DIR}/../../inc) INCLUDE_DIRECTORIES(${TD_SOURCE_DIR}/contrib/pthread) INCLUDE_DIRECTORIES(${TD_SOURCE_DIR}/contrib/msvcregex) @@ -116,38 +117,18 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin SET_PROPERTY(TARGET snappy PROPERTY IMPORTED_LOCATION "${CMAKE_BINARY_DIR}/build/lib/libsnappy.a") - ADD_EXECUTABLE(taosdump taosdump.c dumpUtil.c wsdump.c toolstime.c toolsSys.c toolsDir.c toolsString.c) + ADD_EXECUTABLE(taosdump taosdump.c ../../src/pub.c dumpUtil.c toolstime.c toolsSys.c toolsDir.c toolsString.c) ADD_DEPENDENCIES(deps-snappy apache-avro) ADD_DEPENDENCIES(taosdump deps-jansson) ADD_DEPENDENCIES(taosdump deps-snappy) - ADD_EXECUTABLE(taosBenchmark benchMain.c benchLog.c benchTmq.c benchQuery.c benchCsv.c benchJsonOpt.c benchInsert.c benchInsertMix.c benchDataMix.c wrapDb.c benchData.c benchCommandOpt.c benchUtil.c benchUtilDs.c benchSys.c toolstime.c toolsSys.c toolsString.c) + ADD_EXECUTABLE(taosBenchmark benchMain.c benchLog.c benchTmq.c benchQuery.c benchCsv.c benchJsonOpt.c benchInsert.c benchInsertMix.c benchDataMix.c wrapDb.c benchData.c benchCommandOpt.c benchUtil.c ../../src/pub.c benchUtilDs.c benchSys.c toolstime.c toolsSys.c toolsString.c) ELSE () INCLUDE_DIRECTORIES(/usr/local/include) ADD_DEFINITIONS(-DDARWIN) LINK_DIRECTORIES(/usr/local/lib) SET(OS_ID "Darwin") - ADD_EXECUTABLE(taosBenchmark benchMain.c benchLog.c benchTmq.c benchQuery.c benchCsv.c benchJsonOpt.c benchInsert.c benchInsertMix.c benchDataMix.c wrapDb.c benchData.c benchCommandOpt.c benchUtil.c benchUtilDs.c benchSys.c toolstime.c toolsSys.c toolsString.c) - ENDIF () - - # websocket - IF (${WEBSOCKET}) - ADD_DEFINITIONS(-DWEBSOCKET) - INCLUDE_DIRECTORIES(/usr/local/include/) - SET(WEBSOCKET_LINK_FLAGS "-ltaosws") - - IF (${CMAKE_PROJECT_NAME} STREQUAL "taos-tools") - MESSAGE("libtaosws.so need to be installed first") - ELSE () - ADD_DEPENDENCIES(taosBenchmark taosws-rs) - IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux") - ADD_DEPENDENCIES(taosdump taosws-rs) - ELSE () - MESSAGE("TODO: taosdump for macOS is WIP") - ENDIF () - ENDIF () - ELSE () - SET(WEBSOCKET_LINK_FLAGS "") + ADD_EXECUTABLE(taosBenchmark benchMain.c benchLog.c benchTmq.c benchQuery.c benchCsv.c benchJsonOpt.c benchInsert.c benchInsertMix.c benchDataMix.c wrapDb.c benchData.c benchCommandOpt.c benchUtil.c ../../src/pub.c benchUtilDs.c benchSys.c toolstime.c toolsSys.c toolsString.c) ENDIF () IF (${TOOLS_COVER} MATCHES "true") @@ -191,7 +172,7 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin ENDIF() ELSE () MESSAGE("Compiler is: ${CMAKE_C_COMPILER_ID}, version: ${CMAKE_C_COMPILER_VERSION}") - SET(CMAKE_C_FLAGS "-std=c99 -std=gnu11 -O0 -g3 -DDEBUG ${WEBSOCKET_LINK_FLAGS}") + SET(CMAKE_C_FLAGS "-std=c99 -std=gnu11 -O0 -g3 -DDEBUG ") ENDIF () IF (${OS_ID} MATCHES "alpine") @@ -202,8 +183,8 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin FIND_LIBRARY(LIBZ_LIBRARY z) MESSAGE(${ARGP_LIBRARY}) - TARGET_LINK_LIBRARIES(taosBenchmark taos pthread m toolscJson $<$:${LIBZ_LIBRARY}> $<$:${ARGP_LIBRARY}> ${WEBSOCKET_LINK_FLAGS}) - TARGET_LINK_LIBRARIES(taosdump taos avro jansson atomic pthread m argp $<$:${LIBZ_LIBRARY}> $<$:${ARGP_LIBRARY}> ${WEBSOCKET_LINK_FLAGS}) + TARGET_LINK_LIBRARIES(taosBenchmark taos pthread m toolscJson $<$:${LIBZ_LIBRARY}> $<$:${ARGP_LIBRARY}> ) + TARGET_LINK_LIBRARIES(taosdump taos avro jansson atomic pthread m argp $<$:${LIBZ_LIBRARY}> $<$:${ARGP_LIBRARY}> ) ELSEIF(${OS_ID} MATCHES "Darwin") ADD_LIBRARY(argp STATIC IMPORTED) IF (CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64") @@ -213,11 +194,11 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin SET_PROPERTY(TARGET argp PROPERTY IMPORTED_LOCATION "/usr/local/lib/libargp.a") INCLUDE_DIRECTORIES(/usr/local/include/) ENDIF () - TARGET_LINK_LIBRARIES(taosBenchmark taos pthread m toolscJson argp ${WEBSOCKET_LINK_FLAGS}) + TARGET_LINK_LIBRARIES(taosBenchmark taos pthread m toolscJson argp ) ElSE () MESSAGE("${Yellow} DEBUG mode use shared avro library to link for debug ${ColourReset}") - TARGET_LINK_LIBRARIES(taosdump taos avro jansson atomic pthread m ${WEBSOCKET_LINK_FLAGS} ${GCC_COVERAGE_LINK_FLAGS}) - TARGET_LINK_LIBRARIES(taosBenchmark taos pthread m toolscJson ${WEBSOCKET_LINK_FLAGS} ${GCC_COVERAGE_LINK_FLAGS}) + TARGET_LINK_LIBRARIES(taosdump taos avro jansson atomic pthread m ${GCC_COVERAGE_LINK_FLAGS}) + TARGET_LINK_LIBRARIES(taosBenchmark taos pthread m toolscJson ${GCC_COVERAGE_LINK_FLAGS}) ENDIF() ELSE () @@ -238,15 +219,6 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin ENDIF () IF (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") - # ADD_LIBRARY(jansson STATIC IMPORTED) - #SET_PROPERTY(TARGET jansson PROPERTY IMPORTED_LOCATION "/opt/homebrew/opt/jansson/lib/libjansson.a") - - # ADD_LIBRARY(snappy STATIC IMPORTED) - # SET_PROPERTY(TARGET snappy PROPERTY IMPORTED_LOCATION "/opt/homebrew/opt/snappy/lib/libsnappy.a") - - # ADD_LIBRARY(avro STATIC IMPORTED) - # SET_PROPERTY(TARGET avro PROPERTY IMPORTED_LOCATION "/opt/homebrew/opt/avro-c/lib/libavro.a") - # TARGET_LINK_LIBRARIES(taosdump taos avro jansson snappy lzma z pthread) ADD_LIBRARY(argp STATIC IMPORTED) IF (CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64") SET_PROPERTY(TARGET argp PROPERTY IMPORTED_LOCATION "/opt/homebrew/opt/argp-standalone/lib/libargp.a") @@ -256,7 +228,7 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin INCLUDE_DIRECTORIES(/usr/local/include/) ENDIF () - TARGET_LINK_LIBRARIES(taosBenchmark taos pthread m toolscJson argp ${WEBSOCKET_LINK_FLAGS}) + TARGET_LINK_LIBRARIES(taosBenchmark taos pthread m toolscJson argp ) ELSE () EXECUTE_PROCESS ( COMMAND sh -c "awk -F= '/^ID=/{print $2}' /etc/os-release |tr -d '\n' | tr -d '\"'" @@ -307,11 +279,11 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin FIND_LIBRARY(LIBZ_LIBRARY z) MESSAGE(${LIBZ_LIBRARY}) - TARGET_LINK_LIBRARIES(taosdump taos avro jansson snappy stdc++ lzma atomic pthread $<$:${LIBZ_LIBRARY}> $<$:${ARGP_LIBRARY}> ${WEBSOCKET_LINK_FLAGS} ${GCC_COVERAGE_LINK_FLAGS}) - TARGET_LINK_LIBRARIES(taosBenchmark taos pthread m toolscJson $<$:${LIBZ_LIBRARY}> $<$:${ARGP_LIBRARY}> ${WEBSOCKET_LINK_FLAGS} ${GCC_COVERAGE_LINK_FLAGS}) + TARGET_LINK_LIBRARIES(taosdump taos avro jansson snappy stdc++ lzma atomic pthread $<$:${LIBZ_LIBRARY}> $<$:${ARGP_LIBRARY}> ${GCC_COVERAGE_LINK_FLAGS}) + TARGET_LINK_LIBRARIES(taosBenchmark taos pthread m toolscJson $<$:${LIBZ_LIBRARY}> $<$:${ARGP_LIBRARY}> ${GCC_COVERAGE_LINK_FLAGS}) ELSE() - TARGET_LINK_LIBRARIES(taosdump taos avro jansson snappy stdc++ lzma libz-static atomic pthread ${WEBSOCKET_LINK_FLAGS} ${GCC_COVERAGE_LINK_FLAGS}) - TARGET_LINK_LIBRARIES(taosBenchmark taos pthread m toolscJson ${WEBSOCKET_LINK_FLAGS} ${GCC_COVERAGE_LINK_FLAGS}) + TARGET_LINK_LIBRARIES(taosdump taos avro jansson snappy stdc++ lzma libz-static atomic pthread ${GCC_COVERAGE_LINK_FLAGS}) + TARGET_LINK_LIBRARIES(taosBenchmark taos pthread m toolscJson ${GCC_COVERAGE_LINK_FLAGS}) ENDIF() ENDIF () @@ -324,9 +296,9 @@ ELSE () SET(CMAKE_C_STANDARD 11) SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /utf-8") SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /utf-8") - ADD_EXECUTABLE(taosBenchmark benchMain.c benchLog.c benchTmq.c benchQuery.c benchCsv.c benchJsonOpt.c benchInsert.c benchInsertMix.c benchDataMix.c wrapDb.c benchData.c benchCommandOpt.c benchUtil.c benchUtilDs.c benchSys.c toolstime.c toolsString.c toolsSys.c toolsString.c) + ADD_EXECUTABLE(taosBenchmark benchMain.c benchLog.c benchTmq.c benchQuery.c benchCsv.c benchJsonOpt.c benchInsert.c benchInsertMix.c benchDataMix.c wrapDb.c benchData.c benchCommandOpt.c benchUtil.c ../../src/pub.c benchUtilDs.c benchSys.c toolstime.c toolsString.c toolsSys.c toolsString.c) - ADD_EXECUTABLE(taosdump taosdump.c dumpUtil.c wsdump.c toolsSys.c toolstime.c toolsDir.c toolsString.c) + ADD_EXECUTABLE(taosdump taosdump.c ../../src/pub.c dumpUtil.c toolsSys.c toolstime.c toolsDir.c toolsString.c) ADD_DEPENDENCIES(apache-avro tools-zlib) ADD_DEPENDENCIES(apache-avro deps-jansson) ADD_DEPENDENCIES(apache-avro deps-snappy) @@ -336,19 +308,6 @@ ELSE () ADD_DEPENDENCIES(taosdump apache-avro) ADD_DEPENDENCIES(taosBenchmark tools-zlib) - IF (${WEBSOCKET}) - INCLUDE_DIRECTORIES(/usr/local/include/) - SET(WEBSOCKET_LINK_FLAGS "taosws.lib") - IF (${CMAKE_PROJECT_NAME} STREQUAL "taos-tools") - MESSAGE("taosws.lib need to be installed first") - ELSE () - ADD_DEPENDENCIES(taosBenchmark taosws-rs) - ADD_DEPENDENCIES(taosdump taosws-rs) - ENDIF () - ELSE () - SET(WEBSOCKET_LINK_FLAGS "") - ENDIF () - target_include_directories( taosdump PUBLIC "${TD_SOURCE_DIR}/contrib/pthread" @@ -356,16 +315,12 @@ ELSE () ) IF (${TOOLS_BUILD_TYPE} MATCHES "Debug") - TARGET_LINK_LIBRARIES(taosdump taos avro jansson snappy pthread libargp.lib zlib ${WEBSOCKET_LINK_FLAGS}) + TARGET_LINK_LIBRARIES(taosdump taos avro jansson snappy pthread libargp.lib zlib ) ELSE () - #SET(CMAKE_C_FLAGS "/w /D_WIN32 /DWIN32 /Zi /D NDEBUG /MTd") - # SET(CMAKE_C_FLAGS "/permissive- /GS /GL /Gy /Zc:wchar_t /Zi /Gm- /O2 /Zc:inline /fp:precise /D \"RELEASE\" /D \"NDEBUG\" /D \"_CONSOLE\" /D \"_UNICODE\" /D \"UNICODE\" /errorReport:prompt /Zc:forScope /Gd /Oi /MTd /FC /EHsc /nologo /diagnostics:column") - # SET(CMAKE_CXX_FLAGS "/permissive- /GS /GL /Gy /Zc:wchar_t /Zi /Gm- /O2 /sdl /Zc:inline /fp:precise /D \"NDEBUG\" /D \"_CONSOLE\" /D \"_UNICODE\" /D \"UNICODE\" /errorReport:prompt /Zc:forScope /Gd /Oi /MTd /FC /EHsc /nologo /diagnostics:column") - #TARGET_LINK_LIBRARIES(taosdump taos avro jansson snappy pthread libargp.lib zlibstatic ${WEBSOCKET_LINK_FLAGS} msvcrt.lib ucrtd.lib) - TARGET_LINK_LIBRARIES(taosdump taos avro jansson snappy pthread libargp.lib zlibstatic ${WEBSOCKET_LINK_FLAGS}) + TARGET_LINK_LIBRARIES(taosdump taos avro jansson snappy pthread libargp.lib zlibstatic ) ENDIF () - TARGET_LINK_LIBRARIES(taosBenchmark taos msvcregex pthread toolscJson ${WEBSOCKET_LINK_FLAGS}) + TARGET_LINK_LIBRARIES(taosBenchmark taos msvcregex pthread toolscJson ) TARGET_LINK_LIBRARIES(taosBenchmark zlibstatic) diff --git a/tools/taos-tools/src/benchCommandOpt.c b/tools/taos-tools/src/benchCommandOpt.c index e2bb3129e1..65194ea02e 100644 --- a/tools/taos-tools/src/benchCommandOpt.c +++ b/tools/taos-tools/src/benchCommandOpt.c @@ -210,13 +210,11 @@ void initArgument() { g_arguments->test_mode = INSERT_TEST; g_arguments->demo_mode = true; g_arguments->host = NULL; - g_arguments->host_auto = true; - g_arguments->port = DEFAULT_PORT; + g_arguments->port = 0; g_arguments->port_inputted = false; - g_arguments->port_auto = true; g_arguments->telnet_tcp_port = TELNET_TCP_PORT; - g_arguments->user = TSDB_DEFAULT_USER; - g_arguments->password = TSDB_DEFAULT_PASS; + g_arguments->user = NULL; + g_arguments->password = NULL; g_arguments->answer_yes = 0; g_arguments->debug_print = 0; g_arguments->binwidth = DEFAULT_BINWIDTH; @@ -233,9 +231,6 @@ void initArgument() { g_arguments->chinese = false; g_arguments->aggr_func = 0; g_arguments->terminate = false; -#ifdef WEBSOCKET - g_arguments->timeout = 10; -#endif g_arguments->supplementInsert = false; g_arguments->startTimestamp = DEFAULT_START_TIME; @@ -244,10 +239,10 @@ void initArgument() { g_arguments->keep_trying = 0; g_arguments->trying_interval = 0; g_arguments->iface = TAOSC_IFACE; - g_arguments->rest_server_ver_major = -1; g_arguments->inputted_vgroups = -1; g_arguments->mistMode = false; + g_arguments->connMode = CONN_MODE_INVALID; initDatabase(); initStable(); @@ -257,29 +252,6 @@ void initArgument() { void modifyArgument() { SDataBase * database = benchArrayGet(g_arguments->databases, 0); SSuperTable *superTable = benchArrayGet(database->superTbls, 0); -#ifdef WEBSOCKET - if (!g_arguments->websocket) { -#endif - if (strlen(g_configDir) - && g_arguments->host_auto - && g_arguments->port_auto) { -#ifdef LINUX - wordexp_t full_path; - if (wordexp(g_configDir, &full_path, 0) != 0) { - errorPrint("Invalid path %s\n", g_configDir); - exit(EXIT_FAILURE); - } - taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); - wordfree(&full_path); -#else - taos_options(TSDB_OPTION_CONFIGDIR, g_configDir); -#endif - g_arguments->host = DEFAULT_HOST; - g_arguments->port = 0; - } -#ifdef WEBSOCKET - } -#endif superTable->startTimestamp = g_arguments->startTimestamp; @@ -332,10 +304,7 @@ void modifyArgument() { static void *queryStableAggrFunc(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; - TAOS *taos = NULL; - if (REST_IFACE != g_arguments->iface) { - taos = pThreadInfo->conn->taos; - } + TAOS *taos = taos = pThreadInfo->conn->taos; #ifdef LINUX prctl(PR_SET_NAME, "queryStableAggrFunc"); #endif @@ -391,24 +360,18 @@ static void *queryStableAggrFunc(void *sarg) { } double t = (double)toolsGetTimestampUs(); int32_t code = -1; - if (REST_IFACE == g_arguments->iface) { - code = postProceSql(command, NULL, 0, REST_IFACE, - 0, g_arguments->port, 0, - pThreadInfo->sockfd, NULL); - } else { - TAOS_RES *res = taos_query(taos, command); - code = taos_errno(res); - if (code != 0) { - printErrCmdCodeStr(command, code, res); - free(command); - return NULL; - } - int count = 0; - while (taos_fetch_row(res) != NULL) { - count++; - } - taos_free_result(res); + TAOS_RES *res = taos_query(taos, command); + code = taos_errno(res); + if (code != 0) { + printErrCmdCodeStr(command, code, res); + free(command); + return NULL; } + int count = 0; + while (taos_fetch_row(res) != NULL) { + count++; + } + taos_free_result(res); t = toolsGetTimestampUs() - t; if (fp) { fprintf(fp, "| Speed: %12.2f(per s) | Latency: %.4f(ms) |\n", @@ -469,23 +432,17 @@ static void *queryNtableAggrFunc(void *sarg) { (uint64_t) DEFAULT_START_TIME); double t = (double)toolsGetTimestampUs(); int32_t code = -1; - if (REST_IFACE == g_arguments->iface) { - code = postProceSql(command, NULL, 0, REST_IFACE, - 0, g_arguments->port, 0, - pThreadInfo->sockfd, NULL); - } else { - TAOS_RES *res = taos_query(taos, command); - code = taos_errno(res); - if (code != 0) { - printErrCmdCodeStr(command, code, res); - free(command); - return NULL; - } - while (taos_fetch_row(res) != NULL) { - count++; - } - taos_free_result(res); + TAOS_RES *res = taos_query(taos, command); + code = taos_errno(res); + if (code != 0) { + printErrCmdCodeStr(command, code, res); + free(command); + return NULL; } + while (taos_fetch_row(res) != NULL) { + count++; + } + taos_free_result(res); t = toolsGetTimestampUs() - t; totalT += t; @@ -524,19 +481,11 @@ void queryAggrFunc() { return; } - if (REST_IFACE != g_arguments->iface) { - pThreadInfo->conn = initBenchConn(); - if (pThreadInfo->conn == NULL) { - errorPrint("%s() failed to init connection\n", __func__); - free(pThreadInfo); - return; - } - } else { - pThreadInfo->sockfd = createSockFd(); - if (pThreadInfo->sockfd < 0) { - free(pThreadInfo); - return; - } + pThreadInfo->conn = initBenchConn(); + if (pThreadInfo->conn == NULL) { + errorPrint("%s() failed to init connection\n", __func__); + free(pThreadInfo); + return; } if (stbInfo->use_metric) { pthread_create(&read_id, NULL, queryStableAggrFunc, pThreadInfo); @@ -544,12 +493,7 @@ void queryAggrFunc() { pthread_create(&read_id, NULL, queryNtableAggrFunc, pThreadInfo); } pthread_join(read_id, NULL); - if (REST_IFACE != g_arguments->iface) { - closeBenchConn(pThreadInfo->conn); - } else { - if (pThreadInfo->sockfd) { - destroySockFd(pThreadInfo->sockfd); - } - } + + closeBenchConn(pThreadInfo->conn); free(pThreadInfo); } diff --git a/tools/taos-tools/src/benchData.c b/tools/taos-tools/src/benchData.c index 0925d1002c..1a49c7f75c 100644 --- a/tools/taos-tools/src/benchData.c +++ b/tools/taos-tools/src/benchData.c @@ -229,12 +229,13 @@ void rand_string(char *str, int size, bool chinese) { } // generate prepare sql -char* genPrepareSql(SSuperTable *stbInfo, char* tagData, uint64_t tableSeq) { +char* genPrepareSql(SSuperTable *stbInfo, char* tagData, uint64_t tableSeq, char *db) { int len = 0; char *prepare = benchCalloc(1, TSDB_MAX_ALLOWED_SQL_LEN, true); int n; char *tagQ = NULL; char *colQ = genQMark(stbInfo->cols->size); + char *colNames = NULL; bool tagQFree = false; if(tagData == NULL) { @@ -252,19 +253,30 @@ char* genPrepareSql(SSuperTable *stbInfo, char* tagData, uint64_t tableSeq) { } n = snprintf(prepare + len, TSDB_MAX_ALLOWED_SQL_LEN - len, - "INSERT INTO ? USING `%s` TAGS (%s) %s VALUES(?,%s)", - stbInfo->stbName, tagQ, ttl, colQ); + "INSERT INTO ? USING `%s`.`%s` TAGS (%s) %s VALUES(?,%s)", + db, stbInfo->stbName, tagQ, ttl, colQ); } else { - n = snprintf(prepare + len, TSDB_MAX_ALLOWED_SQL_LEN - len, - "INSERT INTO ? VALUES(?,%s)", colQ); + if (g_arguments->connMode == CONN_MODE_NATIVE) { + // native + n = snprintf(prepare + len, TSDB_MAX_ALLOWED_SQL_LEN - len, + "INSERT INTO ? VALUES(?,%s)", colQ); + } else { + // websocket + bool ntb = stbInfo->tags == NULL || stbInfo->tags->size == 0; // nomral table + colNames = genColNames(stbInfo->cols, !ntb); + n = snprintf(prepare + len, TSDB_MAX_ALLOWED_SQL_LEN - len, + "INSERT INTO `%s`.`%s`(%s) VALUES(%s,%s)", db, stbInfo->stbName, colNames, + ntb ? "?" : "?,?", colQ); + } } len += n; - // free from genQMark - if(tagQFree) { + // free + if (tagQFree) { tmfree(tagQ); } tmfree(colQ); + tmfree(colNames); // check valid if (g_arguments->prepared_rand < g_arguments->reqPerReq) { @@ -281,19 +293,20 @@ char* genPrepareSql(SSuperTable *stbInfo, char* tagData, uint64_t tableSeq) { return prepare; } -int prepareStmt(TAOS_STMT *stmt, SSuperTable *stbInfo, char* tagData, uint64_t tableSeq) { - char *prepare = genPrepareSql(stbInfo, tagData, tableSeq); +int prepareStmt(TAOS_STMT *stmt, SSuperTable *stbInfo, char* tagData, uint64_t tableSeq, char *db) { + char *prepare = genPrepareSql(stbInfo, tagData, tableSeq, db); if (taos_stmt_prepare(stmt, prepare, strlen(prepare))) { errorPrint("taos_stmt_prepare(%s) failed. errstr=%s\n", prepare, taos_stmt_errstr(stmt)); tmfree(prepare); return -1; } + debugPrint("succ call taos_stmt_prepare sql:%s\n", prepare); tmfree(prepare); return 0; } -int prepareStmt2(TAOS_STMT2 *stmt2, SSuperTable *stbInfo, char* tagData, uint64_t tableSeq) { - char *prepare = genPrepareSql(stbInfo, tagData, tableSeq); +int prepareStmt2(TAOS_STMT2 *stmt2, SSuperTable *stbInfo, char* tagData, uint64_t tableSeq, char *db) { + char *prepare = genPrepareSql(stbInfo, tagData, tableSeq, db); if (taos_stmt2_prepare(stmt2, prepare, strlen(prepare))) { errorPrint("taos_stmt2_prepare(%s) failed. errstr=%s\n", prepare, taos_stmt2_error(stmt2)); tmfree(prepare); @@ -469,11 +482,11 @@ uint32_t accumulateRowLen(BArray *fields, int iface) { return len; } len += 1; - if (iface == SML_REST_IFACE || iface == SML_IFACE) { + if (iface == SML_IFACE) { len += SML_LINE_SQL_SYNTAX_OFFSET + strlen(field->name); } } - if (iface == SML_IFACE || iface == SML_REST_IFACE) { + if (iface == SML_IFACE) { len += 2 * TSDB_TABLE_NAME_LEN * 2 + SML_LINE_SQL_SYNTAX_OFFSET; } len += TIMESTAMP_BUFF_LEN; @@ -1802,7 +1815,6 @@ int generateRandData(SSuperTable *stbInfo, char *sampleDataBuf, int iface = stbInfo->iface; switch (iface) { case TAOSC_IFACE: - case REST_IFACE: return generateRandDataSQL(stbInfo, sampleDataBuf, bufLen, lenOfOneRow, fields, loop, tag); case STMT_IFACE: @@ -1817,7 +1829,6 @@ int generateRandData(SSuperTable *stbInfo, char *sampleDataBuf, bufLen, lenOfOneRow, fields, loop, tag); } case SML_IFACE: - case SML_REST_IFACE: return generateRandDataSml(stbInfo, sampleDataBuf, bufLen, lenOfOneRow, fields, loop, tag); default: @@ -1843,8 +1854,7 @@ int prepareSampleData(SDataBase* database, SSuperTable* stbInfo) { stbInfo->lenOfCols = accumulateRowLen(stbInfo->cols, stbInfo->iface); stbInfo->lenOfTags = accumulateRowLen(stbInfo->tags, stbInfo->iface); if (stbInfo->partialColNum != 0 - && ((stbInfo->iface == TAOSC_IFACE - || stbInfo->iface == REST_IFACE))) { + && stbInfo->iface == TAOSC_IFACE) { // check valid if(stbInfo->partialColFrom >= stbInfo->cols->size) { stbInfo->partialColFrom = 0; @@ -2003,12 +2013,6 @@ int prepareSampleData(SDataBase* database, SSuperTable* stbInfo) { } } - if (0 != convertServAddr( - stbInfo->iface, - stbInfo->tcpTransfer, - stbInfo->lineProtocol)) { - return -1; - } return 0; } diff --git a/tools/taos-tools/src/benchInsert.c b/tools/taos-tools/src/benchInsert.c index 6108f4990c..af084e482d 100644 --- a/tools/taos-tools/src/benchInsert.c +++ b/tools/taos-tools/src/benchInsert.c @@ -39,32 +39,6 @@ TAOS_STMT2* initStmt2(TAOS* taos, bool single); tmfree(infos); \ } while (0) \ -static int getSuperTableFromServerRest( - SDataBase* database, SSuperTable* stbInfo, char *command) { - - // TODO(zero): it will create super table based on this error code. - return TSDB_CODE_NOT_FOUND; - // TODO(me): finish full implementation -#if 0 - int sockfd = createSockFd(); - if (sockfd < 0) { - return -1; - } - - int code = postProceSql(command, - database->dbName, - database->precision, - REST_IFACE, - 0, - g_arguments->port, - false, - sockfd, - NULL); - - destroySockFd(sockfd); -#endif // 0 -} - static int getSuperTableFromServerTaosc( SDataBase *database, SSuperTable *stbInfo, char *command) { TAOS_RES *res; @@ -161,90 +135,41 @@ static int getSuperTableFromServerTaosc( static int getSuperTableFromServer(SDataBase* database, SSuperTable* stbInfo) { -#ifdef WEBSOCKET - if (g_arguments->websocket) { - return 0; - } -#endif - int ret = 0; char command[SHORT_1K_SQL_BUFF_LEN] = "\0"; snprintf(command, SHORT_1K_SQL_BUFF_LEN, "DESCRIBE `%s`.`%s`", database->dbName, stbInfo->stbName); - if (REST_IFACE == stbInfo->iface) { - ret = getSuperTableFromServerRest(database, stbInfo, command); - } else { - ret = getSuperTableFromServerTaosc(database, stbInfo, command); - } - - return ret; + return getSuperTableFromServerTaosc(database, stbInfo, command); } static int queryDbExec(SDataBase *database, SSuperTable *stbInfo, char *command) { int ret = 0; - if (isRest(stbInfo->iface)) { - if (0 != convertServAddr(stbInfo->iface, false, 1)) { - errorPrint("%s", "Failed to convert server address\n"); - return -1; - } - int sockfd = createSockFd(); - if (sockfd < 0) { - ret = -1; - } else { - ret = queryDbExecRest(command, - database->dbName, - database->precision, - stbInfo->iface, - stbInfo->lineProtocol, - stbInfo->tcpTransfer, - sockfd); - destroySockFd(sockfd); - } + SBenchConn* conn = initBenchConn(); + if (NULL == conn) { + ret = -1; } else { - SBenchConn* conn = initBenchConn(); - if (NULL == conn) { - ret = -1; - } else { + ret = queryDbExecCall(conn, command); + int32_t trying = g_arguments->keep_trying; + while (ret && trying) { + infoPrint("will sleep %"PRIu32" milliseconds then re-execute command: %s\n", + g_arguments->trying_interval, command); + toolsMsleep(g_arguments->trying_interval); ret = queryDbExecCall(conn, command); - int32_t trying = g_arguments->keep_trying; - while (ret && trying) { - infoPrint("will sleep %"PRIu32" milliseconds then re-execute command: %s\n", - g_arguments->trying_interval, command); - toolsMsleep(g_arguments->trying_interval); - ret = queryDbExecCall(conn, command); - if (trying != -1) { - trying--; - } + if (trying != -1) { + trying--; } - if (0 != ret) { - ret = -1; - } - closeBenchConn(conn); } + if (0 != ret) { + ret = -1; + } + closeBenchConn(conn); } return ret; } -#ifdef WEBSOCKET -static void dropSuperTable(SDataBase* database, SSuperTable* stbInfo) { - char command[SHORT_1K_SQL_BUFF_LEN] = "\0"; - snprintf(command, sizeof(command), - g_arguments->escape_character - ? "DROP TABLE IF EXISTS `%s`.`%s`" - : "DROP TABLE IF EXISTS %s.%s", - database->dbName, - stbInfo->stbName); - - infoPrint("drop stable: <%s>\n", command); - queryDbExec(database, stbInfo, command); - - return; -} -#endif // WEBSOCKET - int getCompressStr(Field* col, char* buf) { int pos = 0; if(strlen(col->encode) > 0) { @@ -521,80 +446,6 @@ int32_t getVgroupsNative(SBenchConn *conn, SDataBase *database) { return vgroups; } -#ifdef WEBSOCKET -int32_t getVgroupsWS(SBenchConn *conn, SDataBase *database) { - int vgroups = 0; - char sql[128] = "\0"; - snprintf(sql, sizeof(sql), - g_arguments->escape_character - ? "SHOW `%s`.VGROUPS" - : "SHOW %s.VGROUPS", - database->dbName); - - // query - WS_RES *res = ws_query_timeout(conn->taos_ws, sql, g_arguments->timeout); - int32_t code = ws_errno(res); - if (code != 0) { - // failed - errorPrint("Failed ws_query_timeout <%s>, code: 0x%08x, reason: %s\n", - sql, code, ws_errstr(res)); - ws_free_result(res); - return 0; - } - - // fetch - WS_ROW row; - database->vgArray = benchArrayInit(8, sizeof(SVGroup)); - while ( (row = ws_fetch_row(res)) && !g_arguments->terminate) { - SVGroup *vg = benchCalloc(1, sizeof(SVGroup), true); - vg->vgId = *(int32_t *)row[0]; - benchArrayPush(database->vgArray, vg); - vgroups++; - debugPrint(" ws fetch vgroups vgid=%d cnt=%d \n", vg->vgId, vgroups); - } - ws_free_result(res); - database->vgroups = vgroups; - - // return count - return vgroups; -} - -/* -int32_t getTableVgidWS(SBenchConn *conn, char *db, char *tb, int32_t *vgId) { - char sql[128] = "\0"; - snprintf(sql, sizeof(sql), - "select vgroup_id from information_schema.ins_tables where db_name='%s' and table_name='%s';", - db, tb); - // query - WS_RES *res = ws_query_timeout(conn->taos_ws, sql, g_arguments->timeout); - int32_t code = ws_errno(res); - if (code != 0) { - // failed - errorPrint("Failed ws_query_timeout <%s>, code: 0x%08x, reason: %s\n", - sql, code, ws_errstr(res)); - ws_free_result(res); - return code; - } - - // fetch - WS_ROW row; - while ( (row = ws_fetch_row(res)) && !g_arguments->terminate) { - *vgId = *(int32_t *)row[0]; - debugPrint(" getTableVgidWS table:%s vgid=%d\n", tb, *vgId); - break; - } - ws_free_result(res); - - if(*vgId == 0) { - return -1; - } else { - return 0; - } -} -*/ - -#endif - int32_t toolsGetDefaultVGroups() { int32_t cores = toolsGetNumberOfCores(); if (cores < 3 ) { @@ -616,6 +467,7 @@ int32_t toolsGetDefaultVGroups() { } } + int geneDbCreateCmd(SDataBase *database, char *command, int remainVnodes) { int dataLen = 0; int n; @@ -701,70 +553,6 @@ int geneDbCreateCmd(SDataBase *database, char *command, int remainVnodes) { return dataLen; } -int createDatabaseRest(SDataBase* database) { - int32_t code = 0; - char command[SHORT_1K_SQL_BUFF_LEN] = "\0"; - - int sockfd = createSockFd(); - if (sockfd < 0) { - return -1; - } - - // drop exist database - snprintf(command, SHORT_1K_SQL_BUFF_LEN, - g_arguments->escape_character - ? "DROP DATABASE IF EXISTS `%s`;" - : "DROP DATABASE IF EXISTS %s;", - database->dbName); - code = postProceSql(command, - database->dbName, - database->precision, - REST_IFACE, - 0, - g_arguments->port, - false, - sockfd, - NULL); - if (code != 0) { - errorPrint("Failed to drop database %s\n", database->dbName); - } - - // create database - int remainVnodes = INT_MAX; - geneDbCreateCmd(database, command, remainVnodes); - code = postProceSql(command, - database->dbName, - database->precision, - REST_IFACE, - 0, - g_arguments->port, - false, - sockfd, - NULL); - int32_t trying = g_arguments->keep_trying; - while (code && trying) { - infoPrint("will sleep %"PRIu32" milliseconds then " - "re-create database %s\n", - g_arguments->trying_interval, database->dbName); - toolsMsleep(g_arguments->trying_interval); - code = postProceSql(command, - database->dbName, - database->precision, - REST_IFACE, - 0, - g_arguments->port, - false, - sockfd, - NULL); - if (trying != -1) { - trying--; - } - } - - destroySockFd(sockfd); - return code; -} - int32_t getRemainVnodes(SBenchConn *conn) { int remainVnodes = 0; char command[SHORT_1K_SQL_BUFF_LEN] = "SHOW DNODES"; @@ -786,7 +574,7 @@ int32_t getRemainVnodes(SBenchConn *conn) { return remainVnodes; } -int createDatabaseTaosc(SDataBase* database) { +int createDatabase(SDataBase* database) { char command[SHORT_1K_SQL_BUFF_LEN] = "\0"; // conn SBenchConn* conn = initBenchConn(); @@ -817,22 +605,17 @@ int createDatabaseTaosc(SDataBase* database) { "DROP DATABASE IF EXISTS %s;", database->dbName); if (0 != queryDbExecCall(conn, command)) { -#ifdef WEBSOCKET - if (g_arguments->websocket) { + if (g_arguments->dsn) { + // websocket warnPrint("%s", "TDengine cloud normal users have no privilege " "to drop database! DROP DATABASE failure is ignored!\n"); - } else { -#endif - closeBenchConn(conn); - return -1; -#ifdef WEBSOCKET } -#endif + closeBenchConn(conn); + return -1; } // get remain vgroups int remainVnodes = INT_MAX; -#ifndef WEBSOCKET if (g_arguments->bind_vgroup) { remainVnodes = getRemainVnodes(conn); if (0 >= remainVnodes) { @@ -841,7 +624,6 @@ int createDatabaseTaosc(SDataBase* database) { return -1; } } -#endif // generate and execute create database sql geneDbCreateCmd(database, command, remainVnodes); @@ -859,21 +641,15 @@ int createDatabaseTaosc(SDataBase* database) { } if (code) { -#ifdef WEBSOCKET - if (g_arguments->websocket) { + if (g_arguments->dsn) { warnPrint("%s", "TDengine cloud normal users have no privilege " "to create database! CREATE DATABASE " "failure is ignored!\n"); - } else { -#endif + } - closeBenchConn(conn); - errorPrint("\ncreate database %s failed!\n\n", - database->dbName); - return -1; -#ifdef WEBSOCKET - } -#endif + closeBenchConn(conn); + errorPrint("\ncreate database %s failed!\n\n", database->dbName); + return -1; } infoPrint("command to create database: <%s>\n", command); @@ -881,15 +657,7 @@ int createDatabaseTaosc(SDataBase* database) { // malloc and get vgroup if (g_arguments->bind_vgroup) { int32_t vgroups; -#ifdef WEBSOCKET - if (g_arguments->websocket) { - vgroups = getVgroupsWS(conn, database); - } else { -#endif - vgroups = getVgroupsNative(conn, database); -#ifdef WEBSOCKET - } -#endif + vgroups = getVgroupsNative(conn, database); if (vgroups <= 0) { closeBenchConn(conn); errorPrint("Database %s's vgroups is %d\n", @@ -902,28 +670,6 @@ int createDatabaseTaosc(SDataBase* database) { return 0; } -int createDatabase(SDataBase* database) { - int ret = 0; - if (REST_IFACE == g_arguments->iface || SML_REST_IFACE == g_arguments->iface) { - ret = createDatabaseRest(database); - } else { - ret = createDatabaseTaosc(database); - } -#if 0 -#ifdef LINUX - infoPrint("%s() LN%d, ret: %d\n", __func__, __LINE__, ret); - sleep(10); - infoPrint("%s() LN%d, ret: %d\n", __func__, __LINE__, ret); -#elif defined(DARWIN) - sleep(2); -#else - Sleep(2); -#endif -#endif - - return ret; -} - static int generateChildTblName(int len, char *buffer, SDataBase *database, SSuperTable *stbInfo, uint64_t tableSeq, char* tagData, int i, char *ttl) { @@ -1072,26 +818,16 @@ static void *createTable(void *sarg) { int ret = 0; debugPrint("thread[%d] creating table: %s\n", pThreadInfo->threadID, pThreadInfo->buffer); - if (REST_IFACE == stbInfo->iface) { - ret = queryDbExecRest(pThreadInfo->buffer, - database->dbName, - database->precision, - stbInfo->iface, - stbInfo->lineProtocol, - stbInfo->tcpTransfer, - pThreadInfo->sockfd); - } else { + ret = queryDbExecCall(pThreadInfo->conn, pThreadInfo->buffer); + int32_t trying = g_arguments->keep_trying; + while (ret && trying) { + infoPrint("will sleep %"PRIu32" milliseconds then re-create " + "table %s\n", + g_arguments->trying_interval, pThreadInfo->buffer); + toolsMsleep(g_arguments->trying_interval); ret = queryDbExecCall(pThreadInfo->conn, pThreadInfo->buffer); - int32_t trying = g_arguments->keep_trying; - while (ret && trying) { - infoPrint("will sleep %"PRIu32" milliseconds then re-create " - "table %s\n", - g_arguments->trying_interval, pThreadInfo->buffer); - toolsMsleep(g_arguments->trying_interval); - ret = queryDbExecCall(pThreadInfo->conn, pThreadInfo->buffer); - if (trying != -1) { - trying--; - } + if (trying != -1) { + trying--; } } @@ -1123,17 +859,7 @@ static void *createTable(void *sarg) { int ret = 0; debugPrint("thread[%d] creating table: %s\n", pThreadInfo->threadID, pThreadInfo->buffer); - if (REST_IFACE == stbInfo->iface) { - ret = queryDbExecRest(pThreadInfo->buffer, - database->dbName, - database->precision, - stbInfo->iface, - stbInfo->lineProtocol, - stbInfo->tcpTransfer, - pThreadInfo->sockfd); - } else { - ret = queryDbExecCall(pThreadInfo->conn, pThreadInfo->buffer); - } + ret = queryDbExecCall(pThreadInfo->conn, pThreadInfo->buffer); if (0 != ret) { g_fail = true; goto create_table_end; @@ -1188,17 +914,9 @@ static int startMultiThreadCreateChildTable(SDataBase* database, SSuperTable* st pThreadInfo->threadID = i; pThreadInfo->stbInfo = stbInfo; pThreadInfo->dbInfo = database; - if (REST_IFACE == stbInfo->iface) { - int sockfd = createSockFd(); - if (sockfd < 0) { - FREE_PIDS_INFOS_RETURN_MINUS_1(); - } - pThreadInfo->sockfd = sockfd; - } else { - pThreadInfo->conn = initBenchConn(); - if (NULL == pThreadInfo->conn) { - goto over; - } + pThreadInfo->conn = initBenchConn(); + if (NULL == pThreadInfo->conn) { + goto over; } pThreadInfo->start_table_from = tableFrom; pThreadInfo->ntables = i < mod ? div + 1 : div; @@ -1221,7 +939,7 @@ static int startMultiThreadCreateChildTable(SDataBase* database, SSuperTable* st threadInfo *pThreadInfo = infos + i; g_arguments->actualChildTables += pThreadInfo->tables_created; - if ((REST_IFACE != stbInfo->iface) && pThreadInfo->conn) { + if (pThreadInfo->conn) { closeBenchConn(pThreadInfo->conn); } } @@ -1254,8 +972,7 @@ static int createChildTables() { for (int j = 0; (j < database->superTbls->size && !g_arguments->terminate); j++) { SSuperTable * stbInfo = benchArrayGet(database->superTbls, j); - if (stbInfo->autoTblCreating || stbInfo->iface == SML_IFACE - || stbInfo->iface == SML_REST_IFACE) { + if (stbInfo->autoTblCreating || stbInfo->iface == SML_IFACE) { g_arguments->autoCreatedChildTables += stbInfo->childTblCount; continue; @@ -1441,37 +1158,6 @@ int32_t execInsert(threadInfo *pThreadInfo, uint32_t k, int64_t *delay3) { } } break; - - case REST_IFACE: - debugPrint("buffer: %s\n", pThreadInfo->buffer); - code = postProceSql(pThreadInfo->buffer, - database->dbName, - database->precision, - stbInfo->iface, - stbInfo->lineProtocol, - g_arguments->port, - stbInfo->tcpTransfer, - pThreadInfo->sockfd, - pThreadInfo->filePath); - while (code && trying && !g_arguments->terminate) { - infoPrint("will sleep %"PRIu32" milliseconds then re-insert\n", - trying_interval); - toolsMsleep(trying_interval); - code = postProceSql(pThreadInfo->buffer, - database->dbName, - database->precision, - stbInfo->iface, - stbInfo->lineProtocol, - g_arguments->port, - stbInfo->tcpTransfer, - pThreadInfo->sockfd, - pThreadInfo->filePath); - if (trying != -1) { - trying--; - } - } - break; - case STMT_IFACE: // add batch if(!stbInfo->autoTblCreating) { @@ -1552,54 +1238,6 @@ int32_t execInsert(threadInfo *pThreadInfo, uint32_t k, int64_t *delay3) { } taos_free_result(res); break; - - case SML_REST_IFACE: { - if (TSDB_SML_JSON_PROTOCOL == protocol - || SML_JSON_TAOS_FORMAT == protocol) { - code = postProceSql(pThreadInfo->lines[0], database->dbName, - database->precision, stbInfo->iface, - protocol, g_arguments->port, - stbInfo->tcpTransfer, - pThreadInfo->sockfd, pThreadInfo->filePath); - } else { - int len = 0; - for (int i = 0; i < k; i++) { - if (strlen(pThreadInfo->lines[i]) != 0) { - int n; - if (TSDB_SML_TELNET_PROTOCOL == protocol - && stbInfo->tcpTransfer) { - n = snprintf(pThreadInfo->buffer + len, - TSDB_MAX_ALLOWED_SQL_LEN - len, - "put %s\n", pThreadInfo->lines[i]); - } else { - n = snprintf(pThreadInfo->buffer + len, - TSDB_MAX_ALLOWED_SQL_LEN - len, - "%s\n", - pThreadInfo->lines[i]); - } - if (n < 0 || n >= TSDB_MAX_ALLOWED_SQL_LEN - len) { - errorPrint("%s() LN%d snprintf overflow on %d\n", - __func__, __LINE__, i); - break; - } else { - len += n; - } - } else { - break; - } - } - if (g_arguments->terminate) { - break; - } - code = postProceSql(pThreadInfo->buffer, database->dbName, - database->precision, - stbInfo->iface, protocol, - g_arguments->port, - stbInfo->tcpTransfer, - pThreadInfo->sockfd, pThreadInfo->filePath); - } - break; - } } return code; } @@ -1622,29 +1260,20 @@ static int smartContinueIfFail(threadInfo *pThreadInfo, stbInfo->stbName, tagData + i * stbInfo->lenOfTags, ttl); debugPrint("creating table: %s\n", buffer); - int ret; - if (REST_IFACE == stbInfo->iface) { - ret = queryDbExecRest(buffer, - database->dbName, - database->precision, - stbInfo->iface, - stbInfo->lineProtocol, - stbInfo->tcpTransfer, - pThreadInfo->sockfd); - } else { + + int32_t ret = queryDbExecCall(pThreadInfo->conn, buffer); + int32_t trying = g_arguments->keep_trying; + while (ret && trying) { + infoPrint("will sleep %"PRIu32" milliseconds then " + "re-create table %s\n", + g_arguments->trying_interval, buffer); + toolsMsleep(g_arguments->trying_interval); ret = queryDbExecCall(pThreadInfo->conn, buffer); - int32_t trying = g_arguments->keep_trying; - while (ret && trying) { - infoPrint("will sleep %"PRIu32" milliseconds then " - "re-create table %s\n", - g_arguments->trying_interval, buffer); - toolsMsleep(g_arguments->trying_interval); - ret = queryDbExecCall(pThreadInfo->conn, buffer); - if (trying != -1) { - trying--; - } + if (trying != -1) { + trying--; } } + tmfree(buffer); return ret; @@ -1791,7 +1420,7 @@ int32_t reConnectStmt2(threadInfo * pThreadInfo, int32_t w) { } // prepare - code = prepareStmt2(pThreadInfo->conn->stmt2, pThreadInfo->stbInfo, NULL, w); + code = prepareStmt2(pThreadInfo->conn->stmt2, pThreadInfo->stbInfo, NULL, w, pThreadInfo->dbInfo->dbName); if (code != 0) { return code; } @@ -1932,14 +1561,14 @@ static void *syncWriteInterlace(void *sarg) { // not auto create table call once if(stbInfo->iface == STMT_IFACE && !oldInitStmt) { debugPrint("call prepareStmt for stable:%s\n", stbInfo->stbName); - if (prepareStmt(pThreadInfo->conn->stmt, stbInfo, tagData, w)) { + if (prepareStmt(pThreadInfo->conn->stmt, stbInfo, tagData, w, database->dbName)) { g_fail = true; goto free_of_interlace; } } else if (stbInfo->iface == STMT2_IFACE) { // only prepare once - if (prepareStmt2(pThreadInfo->conn->stmt2, stbInfo, NULL, w)) { + if (prepareStmt2(pThreadInfo->conn->stmt2, stbInfo, NULL, w, database->dbName)) { g_fail = true; goto free_of_interlace; } @@ -1980,7 +1609,6 @@ static void *syncWriteInterlace(void *sarg) { snprintf(ttl, SMALL_BUFF_LEN, "TTL %d", stbInfo->ttl); } switch (stbInfo->iface) { - case REST_IFACE: case TAOSC_IFACE: { char escapedTbName[TSDB_TABLE_NAME_LEN+2] = "\0"; if (g_arguments->escape_character) { @@ -2123,7 +1751,7 @@ static void *syncWriteInterlace(void *sarg) { // old must call prepareStmt for each table if (oldInitStmt) { debugPrint("call prepareStmt for stable:%s\n", stbInfo->stbName); - if (prepareStmt(pThreadInfo->conn->stmt, stbInfo, tagData, w)) { + if (prepareStmt(pThreadInfo->conn->stmt, stbInfo, tagData, w, database->dbName)) { g_fail = true; goto free_of_interlace; } @@ -2208,7 +1836,6 @@ static void *syncWriteInterlace(void *sarg) { break; } - case SML_REST_IFACE: case SML_IFACE: { int protocol = stbInfo->lineProtocol; for (int64_t j = 0; j < interlaceRows; j++) { @@ -2341,15 +1968,11 @@ static void *syncWriteInterlace(void *sarg) { int protocol = stbInfo->lineProtocol; switch (stbInfo->iface) { case TAOSC_IFACE: - case REST_IFACE: debugPrint("pThreadInfo->buffer: %s\n", pThreadInfo->buffer); free_ds(&pThreadInfo->buffer); pThreadInfo->buffer = new_ds(0); break; - case SML_REST_IFACE: - memset(pThreadInfo->buffer, 0, - g_arguments->reqPerReq * (pThreadInfo->max_sql_len + 1)); case SML_IFACE: if (TSDB_SML_JSON_PROTOCOL == protocol || SML_JSON_TAOS_FORMAT == protocol) { @@ -2893,7 +2516,7 @@ void *syncWriteProgressive(void *sarg) { char* tagData = NULL; bool stmt = (stbInfo->iface == STMT_IFACE || stbInfo->iface == STMT2_IFACE) && stbInfo->autoTblCreating; bool smart = SMART_IF_FAILED == stbInfo->continueIfFail; - bool acreate = (stbInfo->iface == TAOSC_IFACE || stbInfo->iface == REST_IFACE) && stbInfo->autoTblCreating; + bool acreate = stbInfo->iface == TAOSC_IFACE && stbInfo->autoTblCreating; int w = 0; if (stmt || smart || acreate) { csvFile = openTagCsv(stbInfo); @@ -2903,13 +2526,13 @@ void *syncWriteProgressive(void *sarg) { bool oldInitStmt = stbInfo->autoTblCreating; // stmt. not auto table create call on stmt if (stbInfo->iface == STMT_IFACE && !oldInitStmt) { - if (prepareStmt(pThreadInfo->conn->stmt, stbInfo, tagData, w)) { + if (prepareStmt(pThreadInfo->conn->stmt, stbInfo, tagData, w, database->dbName)) { g_fail = true; goto free_of_progressive; } } else if (stbInfo->iface == STMT2_IFACE && !stbInfo->autoTblCreating) { - if (prepareStmt2(pThreadInfo->conn->stmt2, stbInfo, tagData, w)) { + if (prepareStmt2(pThreadInfo->conn->stmt2, stbInfo, tagData, w, database->dbName)) { g_fail = true; goto free_of_progressive; } @@ -2957,13 +2580,13 @@ void *syncWriteProgressive(void *sarg) { // old init stmt must call for each table if (stbInfo->iface == STMT_IFACE && oldInitStmt) { - if (prepareStmt(pThreadInfo->conn->stmt, stbInfo, tagData, w)) { + if (prepareStmt(pThreadInfo->conn->stmt, stbInfo, tagData, w, database->dbName)) { g_fail = true; goto free_of_progressive; } } else if (stbInfo->iface == STMT2_IFACE && stbInfo->autoTblCreating) { - if (prepareStmt2(pThreadInfo->conn->stmt2, stbInfo, tagData, w)) { + if (prepareStmt2(pThreadInfo->conn->stmt2, stbInfo, tagData, w, database->dbName)) { g_fail = true; goto free_of_progressive; } @@ -2988,7 +2611,6 @@ void *syncWriteProgressive(void *sarg) { int32_t generated = 0; switch (stbInfo->iface) { case TAOSC_IFACE: - case REST_IFACE: generated = prepareProgressDataSql( pThreadInfo, childTbl, @@ -3010,7 +2632,6 @@ void *syncWriteProgressive(void *sarg) { &delay3, &startTs, &endTs, w); break; } - case SML_REST_IFACE: case SML_IFACE: generated = prepareProgressDataSml( pThreadInfo, @@ -3116,14 +2737,9 @@ void *syncWriteProgressive(void *sarg) { } int protocol = stbInfo->lineProtocol; switch (stbInfo->iface) { - case REST_IFACE: case TAOSC_IFACE: memset(pThreadInfo->buffer, 0, pThreadInfo->max_sql_len); break; - case SML_REST_IFACE: - memset(pThreadInfo->buffer, 0, - g_arguments->reqPerReq * - (pThreadInfo->max_sql_len + 1)); case SML_IFACE: if (TSDB_SML_JSON_PROTOCOL == protocol) { memset(pThreadInfo->lines[0], 0, @@ -3660,8 +3276,7 @@ static int64_t fillChildTblName(SDataBase *database, SSuperTable *stbInfo) { snprintf(childName, TSDB_TABLE_NAME_LEN, "%s", stbInfo->stbName); stbInfo->childTblArray[0]->name = strdup(childName); - } else if ((stbInfo->iface != SML_IFACE - && stbInfo->iface != SML_REST_IFACE) + } else if ((stbInfo->iface != SML_IFACE) && stbInfo->childTblExists) { ntables = fillChildTblNameImp(database, stbInfo); } else { @@ -3888,20 +3503,6 @@ int32_t initInsertThread(SDataBase* database, SSuperTable* stbInfo, int32_t nthr // init conn pThreadInfo->delayList = benchArrayInit(1, sizeof(int64_t)); switch (stbInfo->iface) { - // rest - case REST_IFACE: { - if (stbInfo->interlaceRows > 0) { - pThreadInfo->buffer = new_ds(0); - } else { - pThreadInfo->buffer = benchCalloc(1, TSDB_MAX_ALLOWED_SQL_LEN, true); - } - int sockfd = createSockFd(); - if (sockfd < 0) { - goto END; - } - pThreadInfo->sockfd = sockfd; - break; - } // stmt & stmt2 init case STMT_IFACE: case STMT2_IFACE: { @@ -3965,14 +3566,6 @@ int32_t initInsertThread(SDataBase* database, SSuperTable* stbInfo, int32_t nthr break; } - // sml rest - case SML_REST_IFACE: { - int sockfd = createSockFd(); - if (sockfd < 0) { - goto END; - } - pThreadInfo->sockfd = sockfd; - } // sml case SML_IFACE: { if (stbInfo->iface == SML_IFACE) { @@ -3987,9 +3580,6 @@ int32_t initInsertThread(SDataBase* database, SSuperTable* stbInfo, int32_t nthr } } pThreadInfo->max_sql_len = stbInfo->lenOfCols + stbInfo->lenOfTags; - if (stbInfo->iface == SML_REST_IFACE) { - pThreadInfo->buffer = benchCalloc(1, g_arguments->reqPerReq * (1 + pThreadInfo->max_sql_len), true); - } int protocol = stbInfo->lineProtocol; if (TSDB_SML_JSON_PROTOCOL != protocol && SML_JSON_TAOS_FORMAT != protocol) { pThreadInfo->sml_tags = (char **)benchCalloc(pThreadInfo->ntables, sizeof(char *), true); @@ -4202,22 +3792,6 @@ int32_t exitInsertThread(SDataBase* database, SSuperTable* stbInfo, int32_t nthr // close conn int protocol = stbInfo->lineProtocol; switch (stbInfo->iface) { - case REST_IFACE: - if (g_arguments->terminate) - toolsMsleep(100); - destroySockFd(pThreadInfo->sockfd); - if (stbInfo->interlaceRows > 0) { - free_ds(&pThreadInfo->buffer); - } else { - tmfree(pThreadInfo->buffer); - pThreadInfo->buffer = NULL; - } - break; - case SML_REST_IFACE: - if (g_arguments->terminate) - toolsMsleep(100); - tmfree(pThreadInfo->buffer); - // on-purpose no break here case SML_IFACE: if (TSDB_SML_JSON_PROTOCOL != protocol && SML_JSON_TAOS_FORMAT != protocol) { @@ -4352,7 +3926,7 @@ int32_t exitInsertThread(SDataBase* database, SSuperTable* stbInfo, int32_t nthr } static int startMultiThreadInsertData(SDataBase* database, SSuperTable* stbInfo) { - if ((stbInfo->iface == SML_IFACE || stbInfo->iface == SML_REST_IFACE) + if ((stbInfo->iface == SML_IFACE) && !stbInfo->use_metric) { errorPrint("%s", "schemaless cannot work without stable\n"); return -1; @@ -4619,26 +4193,9 @@ int insertTestProcess() { //loop create database for (int i = 0; i < g_arguments->databases->size; i++) { - if (isRest(g_arguments->iface)) { - if (0 != convertServAddr(g_arguments->iface, - false, - 1)) { - return -1; - } - } SDataBase * database = benchArrayGet(g_arguments->databases, i); if (database->drop && !(g_arguments->supplementInsert)) { - if (database->superTbls && database->superTbls->size > 0) { - SSuperTable * stbInfo = benchArrayGet(database->superTbls, 0); - if (stbInfo && isRest(stbInfo->iface)) { - if (0 != convertServAddr(stbInfo->iface, - stbInfo->tcpTransfer, - stbInfo->lineProtocol)) { - return -1; - } - } - } if (createDatabase(database)) { errorPrint("failed to create database (%s)\n", database->dbName); @@ -4649,16 +4206,7 @@ int insertTestProcess() { // database already exist, get vgroups from server SBenchConn* conn = initBenchConn(); if (conn) { - int32_t vgroups; -#ifdef WEBSOCKET - if (g_arguments->websocket) { - vgroups = getVgroupsWS(conn, database); - } else { -#endif - vgroups = getVgroupsNative(conn, database); -#ifdef WEBSOCKET - } -#endif + int32_t vgroups = getVgroupsNative(conn, database); if (vgroups <=0) { closeBenchConn(conn); errorPrint("Database %s's vgroups is zero , db exist case.\n", database->dbName); @@ -4677,13 +4225,7 @@ int insertTestProcess() { for (int j = 0; j < database->superTbls->size; j++) { SSuperTable * stbInfo = benchArrayGet(database->superTbls, j); if (stbInfo->iface != SML_IFACE - && stbInfo->iface != SML_REST_IFACE && !stbInfo->childTblExists) { -#ifdef WEBSOCKET - if (g_arguments->websocket && !g_arguments->supplementInsert) { - dropSuperTable(database, stbInfo); - } -#endif int code = getSuperTableFromServer(database, stbInfo); if (code == TSDB_CODE_FAILED) { return -1; diff --git a/tools/taos-tools/src/benchJsonOpt.c b/tools/taos-tools/src/benchJsonOpt.c index 952bef3e8b..90275e95ca 100644 --- a/tools/taos-tools/src/benchJsonOpt.c +++ b/tools/taos-tools/src/benchJsonOpt.c @@ -820,16 +820,12 @@ void parseStringToIntArray(char *str, BArray *arr) { // get interface name uint16_t getInterface(char *name) { uint16_t iface = TAOSC_IFACE; - if (0 == strcasecmp(name, "rest")) { - iface = REST_IFACE; - } else if (0 == strcasecmp(name, "stmt")) { + if (0 == strcasecmp(name, "stmt")) { iface = STMT_IFACE; } else if (0 == strcasecmp(name, "stmt2")) { iface = STMT2_IFACE; } else if (0 == strcasecmp(name, "sml")) { iface = SML_IFACE; - } else if (0 == strcasecmp(name, "sml-rest")) { - iface = SML_REST_IFACE; } return iface; @@ -969,30 +965,7 @@ static int getStableInfo(tools_cJSON *dbinfos, int index) { g_arguments->reqPerReq, SML_MAX_BATCH); return -1; } - } else if (isRest(superTable->iface)) { - if (g_arguments->reqPerReq > SML_MAX_BATCH) { - errorPrint("reqPerReq (%u) larger than maximum (%d)\n", - g_arguments->reqPerReq, SML_MAX_BATCH); - return -1; - } - if (0 != convertServAddr(REST_IFACE, - false, - 1)) { - errorPrint("%s", "Failed to convert server address\n"); - return -1; - } - encodeAuthBase64(); - g_arguments->rest_server_ver_major = - getServerVersionRest(g_arguments->port + TSDB_PORT_HTTP); } -#ifdef WEBSOCKET - if (g_arguments->websocket) { - infoPrint("Since WebSocket interface is enabled, " - "the interface %s is changed to use WebSocket.\n", - stbIface->valuestring); - superTable->iface = TAOSC_IFACE; - } -#endif } @@ -1582,45 +1555,69 @@ static int getMetaFromCommonJsonFile(tools_cJSON *json) { tools_cJSON *cfgdir = tools_cJSON_GetObjectItem(json, "cfgdir"); if (cfgdir && (cfgdir->type == tools_cJSON_String) && (cfgdir->valuestring != NULL)) { - tstrncpy(g_configDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); + if (!g_arguments->cfg_inputted) { + tstrncpy(g_configDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); + debugPrint("configDir from cfg: %s\n", g_configDir); + } else { + warnPrint("configDir set by command line, so ignore cfg. cmd: %s\n", g_configDir); + } } + // dsn + tools_cJSON *dsn = tools_cJSON_GetObjectItem(json, "dsn"); + if (tools_cJSON_IsString(dsn) && strlen(dsn->valuestring) > 0) { + if (g_arguments->dsn == NULL) { + g_arguments->dsn = dsn->valuestring; + infoPrint("read dsn from json. dsn=%s\n", g_arguments->dsn); + } + } + + // host tools_cJSON *host = tools_cJSON_GetObjectItem(json, "host"); if (host && host->type == tools_cJSON_String && host->valuestring != NULL) { - if(g_arguments->host && strlen(g_arguments->host) > 0) { - warnPrint("command line already pass host is %s, json config host(%s) had been ignored.\n", g_arguments->host, host->valuestring); - } else { + if(g_arguments->host == NULL) { g_arguments->host = host->valuestring; + infoPrint("read host from json: %s .\n", g_arguments->host); } } + // port tools_cJSON *port = tools_cJSON_GetObjectItem(json, "port"); if (port && port->type == tools_cJSON_Number) { - if(g_arguments->port != DEFAULT_PORT) { - warnPrint("command line already pass port is %d, json config port(%d) had been ignored.\n", g_arguments->port, (uint16_t)port->valueint); + if (g_arguments->port_inputted) { + // command line input port first + warnPrint("command port: %d, json port ignored.\n", g_arguments->port); } else { - g_arguments->port = (uint16_t)port->valueint; - if(g_arguments->port != DEFAULT_PORT) { - infoPrint("json file config special port %d .\n", g_arguments->port); - g_arguments->port_inputted = true; + // default port set auto port + if (port->valueint != DEFAULT_PORT) { + g_arguments->port = (uint16_t)port->valueint; + infoPrint("read port form json: %d .\n", g_arguments->port); + g_arguments->port_inputted = true; } } } + // user tools_cJSON *user = tools_cJSON_GetObjectItem(json, "user"); if (user && user->type == tools_cJSON_String && user->valuestring != NULL) { - g_arguments->user = user->valuestring; + if (g_arguments->user == NULL) { + g_arguments->user = user->valuestring; + infoPrint("read user from json: %s .\n", g_arguments->user); + } } + // pass tools_cJSON *password = tools_cJSON_GetObjectItem(json, "password"); if (password && password->type == tools_cJSON_String && password->valuestring != NULL) { - g_arguments->password = password->valuestring; + if(g_arguments->password == NULL) { + g_arguments->password = password->valuestring; + infoPrint("read password from json: %s .\n", "******"); + } } - tools_cJSON *answerPrompt = - tools_cJSON_GetObjectItem(json, - "confirm_parameter_prompt"); // yes, no, + // yes, no + tools_cJSON *answerPrompt = tools_cJSON_GetObjectItem(json, "confirm_parameter_prompt"); if (answerPrompt && answerPrompt->type == tools_cJSON_String && answerPrompt->valuestring != NULL) { if (0 == strcasecmp(answerPrompt->valuestring, "no")) { @@ -1661,15 +1658,6 @@ static int getMetaFromCommonJsonFile(tools_cJSON *json) { static int getMetaFromInsertJsonFile(tools_cJSON *json) { int32_t code = -1; -#ifdef WEBSOCKET - tools_cJSON *dsn = tools_cJSON_GetObjectItem(json, "dsn"); - if (tools_cJSON_IsString(dsn)) { - g_arguments->dsn = dsn->valuestring; - g_arguments->websocket = true; - infoPrint("set websocket true from json->dsn=%s\n", g_arguments->dsn); - } -#endif - // check after inserted tools_cJSON *checkSql = tools_cJSON_GetObjectItem(json, "check_sql"); if (tools_cJSON_IsString(checkSql)) { @@ -1724,24 +1712,6 @@ static int getMetaFromInsertJsonFile(tools_cJSON *json) { g_arguments->table_threads = (uint32_t)table_theads->valueint; } -#ifdef WEBSOCKET - if (!g_arguments->websocket) { -#endif -#ifdef LINUX - if (strlen(g_configDir)) { - wordexp_t full_path; - if (wordexp(g_configDir, &full_path, 0) != 0) { - errorPrint("Invalid path %s\n", g_configDir); - exit(EXIT_FAILURE); - } - taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); - wordfree(&full_path); - } -#endif -#ifdef WEBSOCKET - } -#endif - tools_cJSON *numRecPerReq = tools_cJSON_GetObjectItem(json, "num_of_records_per_req"); if (numRecPerReq && numRecPerReq->type == tools_cJSON_Number) { @@ -2268,9 +2238,7 @@ static int getMetaFromQueryJsonFile(tools_cJSON *json) { tools_cJSON *queryMode = tools_cJSON_GetObjectItem(json, "query_mode"); if (tools_cJSON_IsString(queryMode)) { - if (0 == strcasecmp(queryMode->valuestring, "rest")) { - g_queryInfo.iface = REST_IFACE; - } else if (0 == strcasecmp(queryMode->valuestring, "taosc")) { + if (0 == strcasecmp(queryMode->valuestring, "taosc")) { g_queryInfo.iface = TAOSC_IFACE; } else { errorPrint("Invalid query_mode value: %s\n", @@ -2320,24 +2288,6 @@ static int getMetaFromQueryJsonFile(tools_cJSON *json) { static int getMetaFromTmqJsonFile(tools_cJSON *json) { int32_t code = -1; - - tools_cJSON *cfgdir = tools_cJSON_GetObjectItem(json, "cfgdir"); - if (tools_cJSON_IsString(cfgdir)) { - tstrncpy(g_configDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); - } - -#ifdef LINUX - if (strlen(g_configDir)) { - wordexp_t full_path; - if (wordexp(g_configDir, &full_path, 0) != 0) { - errorPrint("Invalid path %s\n", g_configDir); - exit(EXIT_FAILURE); - } - taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); - wordfree(&full_path); - } -#endif - tools_cJSON *resultfile = tools_cJSON_GetObjectItem(json, "result_file"); if (resultfile && resultfile->type == tools_cJSON_String && resultfile->valuestring != NULL) { diff --git a/tools/taos-tools/src/benchMain.c b/tools/taos-tools/src/benchMain.c index 16b3e3a617..fa47329b37 100644 --- a/tools/taos-tools/src/benchMain.c +++ b/tools/taos-tools/src/benchMain.c @@ -21,6 +21,7 @@ STmqMetaInfo g_tmqInfo; bool g_fail = false; uint64_t g_memoryUsage = 0; tools_cJSON* root; +extern char g_configDir[MAX_PATH_LEN]; #define CLIENT_INFO_LEN 20 static char g_client_info[CLIENT_INFO_LEN] = {0}; @@ -59,18 +60,6 @@ int checkArgumentValid() { g_arguments->host = DEFAULT_HOST; } - if (isRest(g_arguments->iface)) { - if (0 != convertServAddr(g_arguments->iface, - false, - 1)) { - errorPrint("%s", "Failed to convert server address\n"); - return -1; - } - encodeAuthBase64(); - g_arguments->rest_server_ver_major = - getServerVersionRest(g_arguments->port); - } - // check batch query if (g_arguments->test_mode == QUERY_TEST) { if (g_queryInfo.specifiedQueryInfo.batchQuery) { @@ -80,18 +69,35 @@ int checkArgumentValid() { errorPrint("%s\n", "batch_query = yes require mixed_query is yes"); return -1; } - - // rest not support - if (g_queryInfo.iface == REST_IFACE) { - errorPrint("%s\n", "batch_query = yes not support restful."); - return -1; - } } } return 0; } +// apply cfg +int32_t applyConfigDir(char * cfgDir){ + // set engine config dir + int32_t code; +#ifdef LINUX + wordexp_t full_path; + if (wordexp(cfgDir, &full_path, 0) != 0) { + errorPrint("Invalid path %s\n", cfgDir); + exit(EXIT_FAILURE); + } + code = taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); + wordfree(&full_path); +#else + code = taos_options(TSDB_OPTION_CONFIGDIR, cfgDir); +#endif + // show error + if (code) { + engineError("applyConfigDir", "taos_options(TSDB_OPTION_CONFIGDIR, ...)", code); + } + + return code; + } + int main(int argc, char* argv[]) { int ret = 0; @@ -100,46 +106,36 @@ int main(int argc, char* argv[]) { initArgument(); srand(time(NULL)%1000000); + // majorVersion snprintf(g_client_info, CLIENT_INFO_LEN, "%s", taos_get_client_info()); g_majorVersionOfClient = atoi(g_client_info); debugPrint("Client info: %s, major version: %d\n", g_client_info, g_majorVersionOfClient); -#ifdef LINUX - if (sem_init(&g_arguments->cancelSem, 0, 0) != 0) { - errorPrint("%s", "failed to create cancel semaphore\n"); - exit(EXIT_FAILURE); - } - pthread_t spid = {0}; - pthread_create(&spid, NULL, benchCancelHandler, NULL); - - benchSetSignal(SIGINT, benchQueryInterruptHandler); - -#endif + // read command line if (benchParseArgs(argc, argv)) { exitLog(); return -1; } -#ifdef WEBSOCKET - if (g_arguments->debug_print) { - ws_enable_log("info"); + + // check valid + if(g_arguments->connMode == CONN_MODE_NATIVE && g_arguments->dsn) { + errorPrint("%s", DSN_NATIVE_CONFLICT); + exitLog(); + return -1; } - if (g_arguments->dsn != NULL) { - g_arguments->websocket = true; - infoPrint("set websocket true from dsn not empty. dsn=%s\n", g_arguments->dsn); - } else { + // read evn + if (g_arguments->dsn == NULL) { char * dsn = getenv("TDENGINE_CLOUD_DSN"); - if (dsn != NULL && strlen(dsn) > 3) { + if (dsn != NULL && strlen(dsn) > 0) { g_arguments->dsn = dsn; - g_arguments->websocket = true; - infoPrint("set websocket true from getenv TDENGINE_CLOUD_DSN=%s\n", g_arguments->dsn); - } else { - g_arguments->dsn = false; - } + infoPrint("Get dsn from getenv TDENGINE_CLOUD_DSN=%s\n", g_arguments->dsn); + } } -#endif + + // read json config if (g_arguments->metaFile) { g_arguments->totalChildTables = 0; if (readJsonConfig(g_arguments->metaFile)) { @@ -151,6 +147,7 @@ int main(int argc, char* argv[]) { modifyArgument(); } + // open result file if(g_arguments->output_file[0] == 0) { infoPrint("%s","result_file is empty, ignore output."); g_arguments->fpOfInsertResult = NULL; @@ -162,6 +159,7 @@ int main(int argc, char* argv[]) { } } + // check argument infoPrint("client version: %s\n", taos_get_client_info()); if (checkArgumentValid()) { errorPrint("failed to readJsonConfig %s\n", g_arguments->metaFile); @@ -169,6 +167,36 @@ int main(int argc, char* argv[]) { return -1; } + // conn mode + if (setConnMode(g_arguments->connMode, g_arguments->dsn) != 0) { + exitLog(); + return -1; + } + + // check condition for set config dir + if (strlen(g_configDir) + && g_arguments->host_auto + && g_arguments->port_auto) { + // apply + if(applyConfigDir(g_configDir) != TSDB_CODE_SUCCESS) { + exitLog(); + return -1; + } + infoPrint("Set engine cfgdir successfully, dir:%s\n", g_configDir); + } + + // cancel thread +#ifdef LINUX + if (sem_init(&g_arguments->cancelSem, 0, 0) != 0) { + errorPrint("%s", "failed to create cancel semaphore\n"); + exit(EXIT_FAILURE); + } + pthread_t spid = {0}; + pthread_create(&spid, NULL, benchCancelHandler, NULL); + benchSetSignal(SIGINT, benchQueryInterruptHandler); +#endif + + // running if (g_arguments->test_mode == INSERT_TEST) { if (insertTestProcess()) { errorPrint("%s", "insert test process failed\n"); @@ -194,6 +222,8 @@ int main(int argc, char* argv[]) { if ((ret == 0) && g_arguments->aggr_func) { queryAggrFunc(); } + + // free and exit postFreeResource(); #ifdef LINUX diff --git a/tools/taos-tools/src/benchQuery.c b/tools/taos-tools/src/benchQuery.c index 8be8bf9f6c..d0c0713b3d 100644 --- a/tools/taos-tools/src/benchQuery.c +++ b/tools/taos-tools/src/benchQuery.c @@ -23,43 +23,32 @@ int selectAndGetResult(qThreadInfo *pThreadInfo, char *command, bool record) { } // execute sql - uint32_t threadID = pThreadInfo->threadID; char dbName[TSDB_DB_NAME_LEN] = {0}; tstrncpy(dbName, g_queryInfo.dbName, TSDB_DB_NAME_LEN); - if (g_queryInfo.iface == REST_IFACE) { - int retCode = postProceSql(command, g_queryInfo.dbName, 0, REST_IFACE, - 0, g_arguments->port, false, - pThreadInfo->sockfd, pThreadInfo->filePath); - if (0 != retCode) { - errorPrint("====restful return fail, threadID[%u]\n", threadID); - ret = -1; - } + // query + TAOS *taos = pThreadInfo->conn->taos; + int64_t rows = 0; + TAOS_RES *res = taos_query(taos, command); + int code = taos_errno(res); + if (res == NULL || code) { + // failed query + errorPrint("failed to execute sql:%s, " + "code: 0x%08x, reason:%s\n", + command, code, taos_errstr(res)); + ret = -1; } else { - // query - TAOS *taos = pThreadInfo->conn->taos; - int64_t rows = 0; - TAOS_RES *res = taos_query(taos, command); - int code = taos_errno(res); - if (res == NULL || code) { - // failed query - errorPrint("failed to execute sql:%s, " - "code: 0x%08x, reason:%s\n", - command, code, taos_errstr(res)); - ret = -1; - } else { - // succ query - if (record) - rows = fetchResult(res, pThreadInfo->filePath); - } - - // free result - if (res) { - taos_free_result(res); - } - debugPrint("query sql:%s rows:%"PRId64"\n", command, rows); + // succ query + if (record) + rows = fetchResult(res, pThreadInfo->filePath); } + // free result + if (res) { + taos_free_result(res); + } + debugPrint("query sql:%s rows:%"PRId64"\n", command, rows); + // record count if (ret ==0) { // succ @@ -1077,10 +1066,6 @@ void totalQuery(int64_t spends) { int queryTestProcess() { prompt(0); - if (REST_IFACE == g_queryInfo.iface) { - encodeAuthBase64(); - } - // kill sql for executing seconds over "kill_slow_query_threshold" if (g_queryInfo.iface == TAOSC_IFACE && g_queryInfo.killQueryThreshold) { int32_t ret = killSlowQuery(); @@ -1089,16 +1074,6 @@ int queryTestProcess() { } } - // covert addr - if (g_queryInfo.iface == REST_IFACE) { - if (convertHostToServAddr(g_arguments->host, - g_arguments->port + TSDB_PORT_HTTP, - &(g_arguments->serv_addr)) != 0) { - errorPrint("%s", "convert host to server address\n"); - return -1; - } - } - // fetch child name if super table if ((g_queryInfo.superQueryInfo.sqlCount > 0) && (g_queryInfo.superQueryInfo.threadCnt > 0)) { diff --git a/tools/taos-tools/src/benchSys.c b/tools/taos-tools/src/benchSys.c index d7e44c045f..b808b2d8df 100644 --- a/tools/taos-tools/src/benchSys.c +++ b/tools/taos-tools/src/benchSys.c @@ -73,12 +73,10 @@ void benchPrintHelp() { printf("%s%s%s%s\r\n", indent, "-x,", indent, BENCH_AGGR); printf("%s%s%s%s\r\n", indent, "-y,", indent, BENCH_YES); printf("%s%s%s%s\r\n", indent, "-z,", indent, BENCH_TRYING_INTERVAL); -#ifdef WEBSOCKET - printf("%s%s%s%s\r\n", indent, "-W,", indent, BENCH_DSN); - printf("%s%s%s%s\r\n", indent, "-D,", indent, BENCH_TIMEOUT); -#endif printf("%s%s%s%s\r\n", indent, "-v,", indent, BENCH_VGROUPS); printf("%s%s%s%s\r\n", indent, "-V,", indent, BENCH_VERSION); + printf("%s%s%s%s\r\n", indent, "-X,", indent, DSN_DESC); + printf("%s%s%s%s\r\n", indent, "-Z,", indent, DRIVER_DESC); printf("\r\n\r\nReport bugs to %s.\r\n", CUS_EMAIL); } @@ -120,11 +118,10 @@ int32_t benchParseArgsNoArgp(int argc, char* argv[]) { || key[1] == 'R' || key[1] == 'O' || key[1] == 'a' || key[1] == 'F' || key[1] == 'k' || key[1] == 'z' -#ifdef WEBSOCKET - || key[1] == 'D' || key[1] == 'W' -#endif - || key[1] == 'v' + || key[1] == 'W' || key[1] == 'v' + || key[1] == 'X' || key[1] == 'Z' ) { + // check input value if (i + 1 >= argc) { errorPrint("option %s requires an argument\r\n", key); return -1; @@ -191,15 +188,14 @@ static struct argp_option bench_options[] = { {"debug", 'g', 0, 0, BENCH_DEBUG}, {"performance", 'G', 0, 0, BENCH_PERFORMANCE}, {"prepared_rand", 'F', "NUMBER", 0, BENCH_PREPARE}, -#ifdef WEBSOCKET - {"cloud_dsn", 'W', "DSN", 0, BENCH_DSN}, - {"timeout", 'D', "NUMBER", 0, BENCH_TIMEOUT}, -#endif + {"cloud_dsn", 'W', "DSN", 0, OLD_DSN_DESC}, {"keep-trying", 'k', "NUMBER", 0, BENCH_KEEPTRYING}, {"trying-interval", 'z', "NUMBER", 0, BENCH_TRYING_INTERVAL}, {"vgroups", 'v', "NUMBER", 0, BENCH_VGROUPS}, {"version", 'V', 0, 0, BENCH_VERSION}, {"nodrop", 'Q', 0, 0, BENCH_NODROP}, + {"dsn", 'X', "DSN", 0, DSN_DESC}, + {DRIVER_OPT, 'Z', "DRIVER", 0, DRIVER_DESC}, {0} }; @@ -251,11 +247,7 @@ int32_t benchParseSingleOpt(int32_t key, char* arg) { errorPrint( "Invalid -P: %s, will auto set to default(6030)\n", arg); - if (REST_IFACE == g_arguments->iface) { - g_arguments->port = DEFAULT_REST_PORT; - } else { - g_arguments->port = DEFAULT_PORT; - } + g_arguments->port = DEFAULT_PORT; } else { g_arguments->port_auto = false; } @@ -269,11 +261,6 @@ int32_t benchParseSingleOpt(int32_t key, char* arg) { stbInfo->iface = STMT_IFACE; } else if (0 == strcasecmp(arg, "stmt2")) { stbInfo->iface = STMT2_IFACE; - } else if (0 == strcasecmp(arg, "rest")) { - stbInfo->iface = REST_IFACE; - if (false == g_arguments->port_inputted) { - g_arguments->port = DEFAULT_REST_PORT; - } } else if (0 == strcasecmp(arg, "sml") || 0 == strcasecmp(arg, "sml-line")) { stbInfo->iface = SML_IFACE; @@ -287,19 +274,6 @@ int32_t benchParseSingleOpt(int32_t key, char* arg) { } else if (0 == strcasecmp(arg, "sml-taosjson")) { stbInfo->iface = SML_IFACE; stbInfo->lineProtocol = SML_JSON_TAOS_FORMAT; - } else if (0 == strcasecmp(arg, "sml-rest") - || (0 == strcasecmp(arg, "sml-rest-line"))) { - stbInfo->iface = SML_REST_IFACE; - stbInfo->lineProtocol = TSDB_SML_LINE_PROTOCOL; - } else if (0 == strcasecmp(arg, "sml-rest-telnet")) { - stbInfo->iface = SML_REST_IFACE; - stbInfo->lineProtocol = TSDB_SML_TELNET_PROTOCOL; - } else if (0 == strcasecmp(arg, "sml-rest-json")) { - stbInfo->iface = SML_REST_IFACE; - stbInfo->lineProtocol = TSDB_SML_JSON_PROTOCOL; - } else if (0 == strcasecmp(arg, "sml-rest-taosjson")) { - stbInfo->iface = SML_REST_IFACE; - stbInfo->lineProtocol = SML_JSON_TAOS_FORMAT; } else { errorPrint( "Invalid -I: %s, will auto set to default (taosc)\n", @@ -625,19 +599,11 @@ int32_t benchParseSingleOpt(int32_t key, char* arg) { g_arguments->performance_print = true; break; -#ifdef WEBSOCKET case 'W': + case 'X': g_arguments->dsn = arg; break; - case 'D': - if (!toolsIsStringNumber(arg)) { - errorPrintReqArg2(CUS_PROMPT"Benchmark", "D"); - } - - g_arguments->timeout = atoi(arg); - break; -#endif case 'v': if (!toolsIsStringNumber(arg)) { errorPrintReqArg2(CUS_PROMPT"Benchmark", "v"); @@ -651,6 +617,9 @@ int32_t benchParseSingleOpt(int32_t key, char* arg) { case 'V': printVersion(); exit(0); + case 'Z': + g_arguments->connMode = getConnMode(arg); + break; default: return ARGP_ERR_UNKNOWN; } diff --git a/tools/taos-tools/src/benchUtil.c b/tools/taos-tools/src/benchUtil.c index 467af0198a..fe0ab3613b 100644 --- a/tools/taos-tools/src/benchUtil.c +++ b/tools/taos-tools/src/benchUtil.c @@ -10,8 +10,10 @@ * FITNESS FOR A PARTICULAR PURPOSE. */ +#include #include #include "benchLog.h" +#include "pub.h" char resEncodingChunk[] = "Encoding: chunked"; char succMessage[] = "succ"; @@ -45,8 +47,8 @@ FORCE_INLINE void tmfree(void *buf) { } } -FORCE_INLINE bool isRest(int32_t iface) { - return REST_IFACE == iface || SML_REST_IFACE == iface; +void engineError(char * module, char * fun, int32_t code) { + errorPrint("%s API:%s error code:0x%08X %s\n", TIP_ENGINE_ERR, fun, code, module); } void ERROR_EXIT(const char *msg) { @@ -161,50 +163,6 @@ int getAllChildNameOfSuperTable(TAOS *taos, char *dbName, char *stbName, return 0; } -int convertHostToServAddr(char *host, uint16_t port, - struct sockaddr_in *serv_addr) { - if (!host) { - errorPrint("%s", "convertHostToServAddr host is null."); - return -1; - } - debugPrint("convertHostToServAddr(host: %s, port: %d)\n", host, - port); -#ifdef WINDOWS - WSADATA wsaData; - int ret = WSAStartup(MAKEWORD(2, 2), &wsaData); - if (ret) { - return ret; - } -#endif - struct hostent *server = gethostbyname(host); - if ((server == NULL) || (server->h_addr == NULL)) { - errorPrint("%s", "no such host"); - return -1; - } - memset(serv_addr, 0, sizeof(struct sockaddr_in)); - serv_addr->sin_family = AF_INET; - serv_addr->sin_port = htons(port); - -#ifdef WINDOWS - struct addrinfo hints = {0}; - hints.ai_family = AF_INET; - hints.ai_socktype = SOCK_STREAM; - - struct addrinfo *pai = NULL; - - if (!getaddrinfo(server->h_name, NULL, &hints, &pai)) { - serv_addr->sin_addr.s_addr = - ((struct sockaddr_in *) pai->ai_addr)->sin_addr.s_addr; - freeaddrinfo(pai); - } - WSACleanup(); -#else - serv_addr->sin_addr.s_addr = inet_addr(host); - memcpy(&(serv_addr->sin_addr.s_addr), server->h_addr, server->h_length); -#endif - return 0; -} - void prompt(bool nonStopMode) { if (!g_arguments->answer_yes) { g_arguments->in_prompt = true; @@ -295,48 +253,85 @@ int regexMatch(const char *s, const char *reg, int cflags) { return 0; } - - - SBenchConn* initBenchConnImpl() { SBenchConn* conn = benchCalloc(1, sizeof(SBenchConn), true); -#ifdef WEBSOCKET - if (g_arguments->websocket) { - conn->taos_ws = ws_connect(g_arguments->dsn); - char maskedDsn[256] = "\0"; - memcpy(maskedDsn, g_arguments->dsn, 20); - memcpy(maskedDsn+20, "...", 3); - memcpy(maskedDsn+23, - g_arguments->dsn + strlen(g_arguments->dsn)-10, 10); - if (conn->taos_ws == NULL) { - errorPrint("failed to connect %s, reason: %s\n", - maskedDsn, ws_errstr(NULL)); + char show[256] = "\0"; + char * host = NULL; + uint16_t port = 0; + char * user = NULL; + char * pwd = NULL; + int32_t code = 0; + char * dsnc = NULL; + + // set mode + if (g_arguments->connMode != CONN_MODE_NATIVE && g_arguments->dsn) { + dsnc = strToLowerCopy(g_arguments->dsn); + if (dsnc == NULL) { tmfree(conn); return NULL; } - succPrint("%s conneced\n", maskedDsn); + char *cport = NULL; + char error[512] = "\0"; + code = parseDsn(dsnc, &host, &cport, &user, &pwd, error); + if (code) { + errorPrint("%s dsn=%s\n", error, dsnc); + tmfree(conn); + tmfree(dsnc); + return NULL; + } + + // default ws port + if (cport == NULL) { + if (user) + port = DEFAULT_PORT_WS_CLOUD; + else + port = DEFAULT_PORT_WS_LOCAL; + } else { + port = atoi(cport); + } + + // websocket + memcpy(show, g_arguments->dsn, 20); + memcpy(show + 20, "...", 3); + memcpy(show + 23, g_arguments->dsn + strlen(g_arguments->dsn) - 10, 10); + } else { -#endif - conn->taos = taos_connect(g_arguments->host, - g_arguments->user, g_arguments->password, - NULL, g_arguments->port); - if (conn->taos == NULL) { - errorPrint("failed to connect native %s:%d, " - "code: 0x%08x, reason: %s\n", - g_arguments->host, g_arguments->port, - taos_errno(NULL), taos_errstr(NULL)); - tmfree(conn); - return NULL; + + host = g_arguments->host; + user = g_arguments->user; + pwd = g_arguments->password; + + if (g_arguments->port_inputted) { + port = g_arguments->port; + } else { + port = defaultPort(g_arguments->connMode, g_arguments->dsn); } - conn->ctaos = taos_connect(g_arguments->host, - g_arguments->user, - g_arguments->password, - NULL, g_arguments->port); -#ifdef WEBSOCKET + sprintf(show, "host:%s port:%d ", host, port); + } + + // connect main + conn->taos = taos_connect(host, user, pwd, NULL, port); + if (conn->taos == NULL) { + errorPrint("failed to connect %s:%d, " + "code: 0x%08x, reason: %s\n", + g_arguments->host, g_arguments->port, + taos_errno(NULL), taos_errstr(NULL)); + tmfree(conn); + if (dsnc) { + tmfree(dsnc); + } + return NULL; + } + succPrint("%s connect successfully.\n", show); + + // check write correct connect + conn->ctaos = taos_connect(host, user, pwd, NULL, port); + + if (dsnc) { + tmfree(dsnc); } -#endif return conn; } @@ -350,7 +345,7 @@ SBenchConn* initBenchConn() { break; } - infoPrint("sleep %dms and try to connect... %d \n", g_arguments->trying_interval, keep_trying); + infoPrint("sleep %dms and try to connect... %d/%d \n", g_arguments->trying_interval, keep_trying, g_arguments->keep_trying); if(g_arguments->trying_interval > 0) { toolsMsleep(g_arguments->trying_interval); } @@ -362,63 +357,28 @@ SBenchConn* initBenchConn() { void closeBenchConn(SBenchConn* conn) { if(conn == NULL) return ; -#ifdef WEBSOCKET - if (g_arguments->websocket) { - ws_close(conn->taos_ws); - } else { -#endif - if(conn->taos) { - taos_close(conn->taos); - conn->taos = NULL; - } - if (conn->ctaos) { - taos_close(conn->ctaos); - conn->ctaos = NULL; - } -#ifdef WEBSOCKET - } -#endif - tmfree(conn); -} -int32_t queryDbExecRest(char *command, char* dbName, int precision, - int iface, int protocol, bool tcp, int sockfd) { - int32_t code = postProceSql(command, - dbName, - precision, - iface, - protocol, - g_arguments->port, - tcp, - sockfd, - NULL); - return code; + if(conn->taos) { + taos_close(conn->taos); + conn->taos = NULL; + } + + if (conn->ctaos) { + taos_close(conn->ctaos); + conn->ctaos = NULL; + } + tmfree(conn); } int32_t queryDbExecCall(SBenchConn *conn, char *command) { int32_t code = 0; -#ifdef WEBSOCKET - if (g_arguments->websocket) { - WS_RES* res = ws_query_timeout(conn->taos_ws, - command, g_arguments->timeout); - code = ws_errno(res); - if (code != 0) { - errorPrint("Failed to execute <%s>, code: 0x%08x, reason: %s\n", - command, code, ws_errstr(res)); - } - ws_free_result(res); + TAOS_RES *res = taos_query(conn->taos, command); + code = taos_errno(res); + if (code) { + printErrCmdCodeStr(command, code, res); } else { -#endif - TAOS_RES *res = taos_query(conn->taos, command); - code = taos_errno(res); - if (code) { - printErrCmdCodeStr(command, code, res); - } else { - taos_free_result(res); - } -#ifdef WEBSOCKET + taos_free_result(res); } -#endif return code; } @@ -458,374 +418,6 @@ void encodeAuthBase64() { g_arguments->base64_buf[encoded_len - 1 - l] = '='; } -int postProceSqlImpl(char *sqlstr, char* dbName, int precision, int iface, - int protocol, uint16_t rest_port, bool tcp, int sockfd, - char* filePath, - char *responseBuf, int64_t response_length) { - int32_t code = -1; - char * req_fmt = - "POST %s HTTP/1.1\r\nHost: %s:%d\r\nAccept: */*\r\nAuthorization: " - "Basic %s\r\nContent-Length: %d\r\nContent-Type: " - "application/x-www-form-urlencoded\r\n\r\n%s"; - char url[URL_BUFF_LEN] = {0}; - if (iface == REST_IFACE) { - snprintf(url, URL_BUFF_LEN, "/rest/sql/%s", dbName); - } else if (iface == SML_REST_IFACE - && protocol == TSDB_SML_LINE_PROTOCOL) { - snprintf(url, URL_BUFF_LEN, - "/influxdb/v1/write?db=%s&precision=%s", dbName, - precision == TSDB_TIME_PRECISION_MILLI - ? "ms" - : precision == TSDB_TIME_PRECISION_NANO - ? "ns" - : "u"); - } else if (iface == SML_REST_IFACE - && protocol == TSDB_SML_TELNET_PROTOCOL) { - snprintf(url, URL_BUFF_LEN, "/opentsdb/v1/put/telnet/%s", dbName); - } else if (iface == SML_REST_IFACE - && (protocol == TSDB_SML_JSON_PROTOCOL - || protocol == SML_JSON_TAOS_FORMAT)) { - snprintf(url, URL_BUFF_LEN, "/opentsdb/v1/put/json/%s", dbName); - } - - int bytes, sent, received, req_str_len, resp_len; - char * request_buf = NULL; - int req_buf_len = (int)strlen(sqlstr) + REQ_EXTRA_BUF_LEN; - - if (g_arguments->terminate) { - goto free_of_postImpl; - } - request_buf = benchCalloc(1, req_buf_len, false); - - int r; - if (protocol == TSDB_SML_TELNET_PROTOCOL && tcp) { - r = snprintf(request_buf, req_buf_len, "%s", sqlstr); - } else { - r = snprintf(request_buf, req_buf_len, req_fmt, url, g_arguments->host, - rest_port, g_arguments->base64_buf, strlen(sqlstr), - sqlstr); - } - if (r >= req_buf_len) { - free(request_buf); - ERROR_EXIT("too long request"); - } - - req_str_len = (int)strlen(request_buf); - debugPrint("request buffer: %s\n", request_buf); - sent = 0; - do { - bytes = send(sockfd, request_buf + sent, - req_str_len - sent, 0); - if (bytes < 0) { - errorPrint("%s", "writing no message to socket\n"); - goto free_of_postImpl; - } - if (bytes == 0) break; - sent += bytes; - } while ((sent < req_str_len) && !g_arguments->terminate); - - if (protocol == TSDB_SML_TELNET_PROTOCOL - && iface == SML_REST_IFACE && tcp) { - code = 0; - goto free_of_postImpl; - } - - resp_len = response_length - 1; - received = 0; - - bool chunked = false; - - if (g_arguments->terminate) { - goto free_of_postImpl; - } - do { - bytes = recv(sockfd, responseBuf + received, - resp_len - received, 0); - if (bytes <= 0) { - errorPrint("%s", "reading no response from socket\n"); - goto free_of_postImpl; - } - responseBuf[resp_len] = 0; - debugPrint("response buffer: %s bytes=%d\n", responseBuf, bytes); - if (NULL != strstr(responseBuf, resEncodingChunk)) { - chunked = true; - } - int64_t index = strlen(responseBuf) - 1; - while (responseBuf[index] == '\n' || responseBuf[index] == '\r') { - if (index == 0) { - break; - } - index--; - } - debugPrint("index: %" PRId64 "\n", index); - if (chunked && responseBuf[index] == '0') { - code = 0; - break; - } - if (!chunked && responseBuf[index] == '}') { - code = 0; - break; - } - - received += bytes; - - if (g_arguments->test_mode == INSERT_TEST) { - if (strlen(responseBuf)) { - if (((NULL != strstr(responseBuf, resEncodingChunk)) && - (NULL != strstr(responseBuf, resHttp))) || - ((NULL != strstr(responseBuf, resHttpOk)) || - (NULL != strstr(responseBuf, influxHttpOk)) || - (NULL != strstr(responseBuf, opentsdbHttpOk)))) { - break; - } - } - } - } while ((received < resp_len) && !g_arguments->terminate); - - if (received == resp_len) { - errorPrint("%s", "storing complete response from socket\n"); - goto free_of_postImpl; - } - - if (NULL == strstr(responseBuf, resHttpOk) && - NULL == strstr(responseBuf, influxHttpOk) && - NULL == strstr(responseBuf, succMessage) && - NULL == strstr(responseBuf, opentsdbHttpOk)) { - errorPrint("Response:\n%s\n", responseBuf); - goto free_of_postImpl; - } - - code = 0; -free_of_postImpl: - if (filePath && strlen(filePath) > 0 && !g_arguments->terminate) { - appendResultBufToFile(responseBuf, filePath); - } - tmfree(request_buf); - return code; -} - -static int getServerVersionRestImpl(int16_t rest_port, int sockfd) { - int server_ver = -1; - char command[SHORT_1K_SQL_BUFF_LEN] = "\0"; - snprintf(command, SHORT_1K_SQL_BUFF_LEN, "SELECT SERVER_VERSION()"); - char *responseBuf = benchCalloc(1, RESP_BUF_LEN, false); - int code = postProceSqlImpl(command, - NULL, - 0, - REST_IFACE, - 0, - rest_port, - false, - sockfd, - NULL, responseBuf, RESP_BUF_LEN); - if (code != 0) { - errorPrint("Failed to execute command: %s\n", command); - goto free_of_getversion; - } - debugPrint("response buffer: %s\n", responseBuf); - if (NULL != strstr(responseBuf, resHttpOk)) { - char* start = strstr(responseBuf, "{"); - if (start == NULL) { - errorPrint("Invalid response format: %s\n", responseBuf); - goto free_of_getversion; - } - tools_cJSON* resObj = tools_cJSON_Parse(start); - if (resObj == NULL) { - errorPrint("Cannot parse response into json: %s\n", start); - } - tools_cJSON* dataObj = tools_cJSON_GetObjectItem(resObj, "data"); - if (!tools_cJSON_IsArray(dataObj)) { - char* pstr = tools_cJSON_Print(resObj); - errorPrint("Invalid or miss 'data' key in json: %s\n", pstr ? pstr : "null"); - tmfree(pstr); - tools_cJSON_Delete(resObj); - goto free_of_getversion; - } - tools_cJSON *versionObj = tools_cJSON_GetArrayItem(dataObj, 0); - tools_cJSON *versionStrObj = tools_cJSON_GetArrayItem(versionObj, 0); - server_ver = atoi(versionStrObj->valuestring); - char* pstr = tools_cJSON_Print(versionStrObj); - debugPrint("versionStrObj: %s, version: %s, server_ver: %d\n", - pstr ? pstr : "null", - versionStrObj->valuestring, server_ver); - tmfree(pstr); - tools_cJSON_Delete(resObj); - } -free_of_getversion: - free(responseBuf); - return server_ver; -} - -int getServerVersionRest(int16_t rest_port) { - int sockfd = createSockFd(); - if (sockfd < 0) { - return -1; - } - - int server_version = getServerVersionRestImpl(rest_port, sockfd); - - destroySockFd(sockfd); - return server_version; -} - -static int getCodeFromResp(char *responseBuf) { - int code = -1; - char* start = strstr(responseBuf, "{"); - if (start == NULL) { - errorPrint("Invalid response format: %s\n", responseBuf); - return -1; - } - tools_cJSON* resObj = tools_cJSON_Parse(start); - if (resObj == NULL) { - errorPrint("Cannot parse response into json: %s\n", start); - return -1; - } - tools_cJSON* codeObj = tools_cJSON_GetObjectItem(resObj, "code"); - if (!tools_cJSON_IsNumber(codeObj)) { - char* pstr = tools_cJSON_Print(resObj); - errorPrint("Invalid or miss 'code' key in json: %s\n", pstr ? pstr : "null"); - tmfree(pstr); - tools_cJSON_Delete(resObj); - return -1; - } - - code = codeObj->valueint; - - if (codeObj->valueint != 0) { - tools_cJSON* desc = tools_cJSON_GetObjectItem(resObj, "desc"); - if (!tools_cJSON_IsString(desc)) { - char* pstr = tools_cJSON_Print(resObj); - errorPrint("Invalid or miss 'desc' key in json: %s\n", pstr ? pstr : "null"); - tmfree(pstr); - return -1; - } - errorPrint("response, code: %d, reason: %s\n", - (int)codeObj->valueint, desc->valuestring); - } - - tools_cJSON_Delete(resObj); - return code; -} - -int postProceSql(char *sqlstr, char* dbName, int precision, int iface, - int protocol, uint16_t rest_port, - bool tcp, int sockfd, char* filePath) { - uint64_t response_length; - if (g_arguments->test_mode == INSERT_TEST) { - response_length = RESP_BUF_LEN; - } else { - response_length = g_queryInfo.response_buffer; - } - - char *responseBuf = benchCalloc(1, response_length, false); - int code = postProceSqlImpl(sqlstr, dbName, precision, iface, protocol, - rest_port, - tcp, sockfd, filePath, responseBuf, - response_length); - // compatibility 2.6 - if (-1 == g_arguments->rest_server_ver_major) { - // confirm version is 2.x according to "succ" - if (NULL != strstr(responseBuf, succMessage) && iface == REST_IFACE) { - g_arguments->rest_server_ver_major = 2; - } - } - - if (NULL != strstr(responseBuf, resHttpOk) && iface == REST_IFACE) { - // if taosd is not starting , rest_server_ver_major can't be got by 'select server_version()' , so is -1 - if (-1 == g_arguments->rest_server_ver_major || 3 <= g_arguments->rest_server_ver_major) { - code = getCodeFromResp(responseBuf); - } else { - code = 0; - } - goto free_of_post; - } - - if (2 == g_arguments->rest_server_ver_major) { - if (NULL != strstr(responseBuf, succMessage) && iface == REST_IFACE) { - code = getCodeFromResp(responseBuf); - } else { - code = 0; - } - goto free_of_post; - } - - if (NULL != strstr(responseBuf, influxHttpOk) && - protocol == TSDB_SML_LINE_PROTOCOL && iface == SML_REST_IFACE) { - code = 0; - goto free_of_post; - } - - if (NULL != strstr(responseBuf, opentsdbHttpOk) - && (protocol == TSDB_SML_TELNET_PROTOCOL - || protocol == TSDB_SML_JSON_PROTOCOL - || protocol == SML_JSON_TAOS_FORMAT) - && iface == SML_REST_IFACE) { - code = 0; - goto free_of_post; - } - - if (g_arguments->test_mode == INSERT_TEST) { - debugPrint("Response: \n%s\n", responseBuf); - char* start = strstr(responseBuf, "{"); - if ((start == NULL) - && (TSDB_SML_TELNET_PROTOCOL != protocol) - && (TSDB_SML_JSON_PROTOCOL != protocol) - && (SML_JSON_TAOS_FORMAT != protocol) - ) { - errorPrint("Invalid response format: %s\n", responseBuf); - goto free_of_post; - } - tools_cJSON* resObj = tools_cJSON_Parse(start); - if ((resObj == NULL) - && (TSDB_SML_TELNET_PROTOCOL != protocol) - && (TSDB_SML_JSON_PROTOCOL != protocol) - && (SML_JSON_TAOS_FORMAT != protocol) - ) { - errorPrint("Cannot parse response into json: %s\n", start); - } - tools_cJSON* codeObj = tools_cJSON_GetObjectItem(resObj, "code"); - if ((!tools_cJSON_IsNumber(codeObj)) - && (TSDB_SML_TELNET_PROTOCOL != protocol) - && (TSDB_SML_JSON_PROTOCOL != protocol) - && (SML_JSON_TAOS_FORMAT != protocol) - ) { - char* pstr = tools_cJSON_Print(resObj); - errorPrint("Invalid or miss 'code' key in json: %s\n", pstr ? pstr : "null"); - tmfree(pstr); - tools_cJSON_Delete(resObj); - goto free_of_post; - } - - if ((SML_REST_IFACE == iface) && codeObj - && (200 == codeObj->valueint)) { - code = 0; - tools_cJSON_Delete(resObj); - goto free_of_post; - } - - if ((iface == SML_REST_IFACE) - && (protocol == TSDB_SML_LINE_PROTOCOL) - && codeObj - && (codeObj->valueint != 0) && (codeObj->valueint != 200)) { - tools_cJSON* desc = tools_cJSON_GetObjectItem(resObj, "desc"); - if (!tools_cJSON_IsString(desc)) { - char* pstr = tools_cJSON_Print(resObj); - errorPrint("Invalid or miss 'desc' key in json: %s\n", pstr ? pstr : "null"); - tmfree(pstr); - } else { - errorPrint("insert mode response, code: %d, reason: %s\n", - (int)codeObj->valueint, desc->valuestring); - } - } else { - code = 0; - } - tools_cJSON_Delete(resObj); - } -free_of_post: - free(responseBuf); - return code; -} - // fetch result fo file or nothing int64_t fetchResult(TAOS_RES *res, char * filePath) { TAOS_ROW row = NULL; @@ -1219,111 +811,6 @@ void benchSetSignal(int32_t signum, ToolsSignalHandler sigfp) { } #endif -int convertServAddr(int iface, bool tcp, int protocol) { - if (tcp - && iface == SML_REST_IFACE - && protocol == TSDB_SML_TELNET_PROTOCOL) { - // telnet_tcp_port - if (convertHostToServAddr(g_arguments->host, - g_arguments->telnet_tcp_port, - &(g_arguments->serv_addr))) { - errorPrint("%s\n", "convert host to server address"); - return -1; - } - infoPrint("convertServAddr host=%s telnet_tcp_port:%d to serv_addr=%p iface=%d \n", - g_arguments->host, g_arguments->telnet_tcp_port, &g_arguments->serv_addr, iface); - } else { - int port = g_arguments->port_inputted ? g_arguments->port:DEFAULT_REST_PORT; - if (convertHostToServAddr(g_arguments->host, - port, - &(g_arguments->serv_addr))) { - errorPrint("%s\n", "convert host to server address"); - return -1; - } - infoPrint("convertServAddr host=%s port:%d to serv_addr=%p iface=%d \n", - g_arguments->host, port, &g_arguments->serv_addr, iface); - } - return 0; -} - -static void errorPrintSocketMsg(char *msg, int result) { -#ifdef WINDOWS - errorPrint("%s: %d\n", msg, WSAGetLastError()); -#else - errorPrint("%s: %d\n", msg, result); -#endif -} - -int createSockFd() { -#ifdef WINDOWS - WSADATA wsaData; - WSAStartup(MAKEWORD(2, 2), &wsaData); - SOCKET sockfd; -#else - int sockfd; -#endif - sockfd = socket(AF_INET, SOCK_STREAM, 0); - if (sockfd < 0) { - errorPrintSocketMsg("Could not create socket : ", sockfd); - return -1; - } - - int retConn = connect( - sockfd, (struct sockaddr *)&(g_arguments->serv_addr), - sizeof(struct sockaddr)); - infoPrint("createSockFd call connect serv_addr=%p retConn=%d\n", &g_arguments->serv_addr, retConn); - if (retConn < 0) { - errorPrint("%s\n", "failed to connect"); -#ifdef WINDOWS - closesocket(sockfd); - WSACleanup(); -#else - close(sockfd); -#endif - return -1; - } - return sockfd; -} - -static void closeSockFd(int sockfd) { -#ifdef WINDOWS - closesocket(sockfd); - WSACleanup(); -#else - close(sockfd); -#endif -} - -void destroySockFd(int sockfd) { - // check valid - if (sockfd < 0) { - return; - } - - // shutdown the connection since no more data will be sent - int result; - result = shutdown(sockfd, SHUT_WR); - if (SOCKET_ERROR == result) { - errorPrintSocketMsg("Socket shutdown failed with error: ", result); - closeSockFd(sockfd); - return; - } - // Receive until the peer closes the connection - do { - int recvbuflen = LARGE_BUFF_LEN; - char recvbuf[LARGE_BUFF_LEN]; - result = recv(sockfd, recvbuf, recvbuflen, 0); - if ( result > 0 ) { - debugPrint("Socket bytes received: %d\n", result); - } else if (result == 0) { - infoPrint("Connection closed with result %d\n", result); - } else { - errorPrintSocketMsg("Socket recv failed with error: ", result); - } - } while (result > 0); - - closeSockFd(sockfd); -} FORCE_INLINE void printErrCmdCodeStr(char *cmd, int32_t code, TAOS_RES *res) { char buff[512]; @@ -1334,7 +821,7 @@ FORCE_INLINE void printErrCmdCodeStr(char *cmd, int32_t code, TAOS_RES *res) { strcat(buff, "..."); msg = buff; } - errorPrint("failed to run error code: 0x%08x, reason: %s command %s\n", + errorPrint("%s error code: 0x%08x, reason: %s command %s\n", TIP_ENGINE_ERR, code, taos_errstr(res), msg); taos_free_result(res); } @@ -1372,6 +859,24 @@ char* genQMark( int32_t QCnt) { return buf; } +// get colNames , first is tbname if tbName is true +char *genColNames(BArray *cols, bool tbName) { + // reserve tbname,ts and "," space + char * buf = benchCalloc(TSDB_TABLE_NAME_LEN + 1, cols->size + 1, false); + if (tbName) { + strcpy(buf, "tbname,ts"); + } else { + strcpy(buf, "ts"); + } + + for (int32_t i = 0; i < cols->size; i++) { + Field * col = benchArrayGet(cols, i); + strcat(buf, ","); + strcat(buf, col->name); + } + return buf; +} + // // STMT2 // @@ -1583,17 +1088,9 @@ uint32_t MurmurHash3_32(const char *key, uint32_t len) { // init conn int32_t initQueryConn(qThreadInfo * pThreadInfo, int iface) { // create conn - if (iface == REST_IFACE) { - int sockfd = createSockFd(); - if (sockfd < 0) { - return -1; - } - pThreadInfo->sockfd = sockfd; - } else { - pThreadInfo->conn = initBenchConn(); - if (pThreadInfo->conn == NULL) { - return -1; - } + pThreadInfo->conn = initBenchConn(); + if (pThreadInfo->conn == NULL) { + return -1; } return 0; @@ -1601,17 +1098,8 @@ int32_t initQueryConn(qThreadInfo * pThreadInfo, int iface) { // close conn void closeQueryConn(qThreadInfo * pThreadInfo, int iface) { - if (iface == REST_IFACE) { -#ifdef WINDOWS - closesocket(pThreadInfo->sockfd); - WSACleanup(); -#else - close(pThreadInfo->sockfd); -#endif - } else { - closeBenchConn(pThreadInfo->conn); - pThreadInfo->conn = NULL; - } + closeBenchConn(pThreadInfo->conn); + pThreadInfo->conn = NULL; } diff --git a/tools/taos-tools/src/dumpUtil.c b/tools/taos-tools/src/dumpUtil.c index 0b684de43f..d2403ec4c0 100644 --- a/tools/taos-tools/src/dumpUtil.c +++ b/tools/taos-tools/src/dumpUtil.c @@ -15,6 +15,7 @@ #include +#include "pub.h" #include "dump.h" #include "dumpUtil.h" @@ -74,19 +75,6 @@ bool canRetry(int32_t code, int8_t type) { } } -#ifdef WEBSOCKET - int32_t wsCode = code & 0xFFFF; - // range1 - if (wsCode >= WEBSOCKET_CODE_BEGIN1 && wsCode <= WEBSOCKET_CODE_END1) { - return true; - } - // range2 - if (wsCode >= WEBSOCKET_CODE_BEGIN2 && wsCode <= WEBSOCKET_CODE_END2) { - return true; - } - -#endif - return false; } @@ -97,21 +85,81 @@ bool canRetry(int32_t code, int8_t type) { // connect TAOS *taosConnect(const char *dbName) { + // + // collect params + // + char show[256] = "\0"; + char * host = NULL; + uint16_t port = 0; + char * user = NULL; + char * pwd = NULL; + int32_t code = 0; + char * dsnc = NULL; + + // set mode + if (g_args.dsn) { + dsnc = strToLowerCopy(g_args.dsn); + if (dsnc == NULL) { + return NULL; + } + + char *cport = NULL; + char error[512] = ""; + code = parseDsn(dsnc, &host, &cport, &user, &pwd, error); + if (code) { + errorPrint("%s dsn=%s\n", error, dsnc); + free(dsnc); + return NULL; + } + + // default ws port + if (cport == NULL) { + if (user) + port = DEFAULT_PORT_WS_CLOUD; + else + port = DEFAULT_PORT_WS_LOCAL; + } else { + port = atoi(cport); + } + + // websocket + memcpy(show, g_args.dsn, 20); + memcpy(show + 20, "...", 3); + memcpy(show + 23, g_args.dsn + strlen(g_args.dsn) - 10, 10); + + } else { + + host = g_args.host; + user = g_args.user; + pwd = g_args.password; + + if (g_args.port_inputted) { + port = g_args.port; + } else { + port = defaultPort(g_args.connMode, g_args.dsn); + } + + sprintf(show, "host:%s port:%d ", host, port); + } + + // + // connect + // int32_t i = 0; + TAOS *taos = NULL; while (1) { - TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password, dbName, g_args.port); + taos = taos_connect(host, user, pwd, dbName, port); if (taos) { // successful if (i > 0) { - okPrint("Retry %d to connect %s:%d successfully!\n", i, g_args.host, g_args.port); + okPrint("Retry %d to connect %s:%d successfully!\n", i, host, port); } - return taos; + break; } // fail - errorPrint("Failed to connect to server %s, code: 0x%08x, reason: %s! \n", g_args.host, taos_errno(NULL), + errorPrint("Failed to connect to server %s, code: 0x%08x, reason: %s! \n", host, taos_errno(NULL), taos_errstr(NULL)); - if (++i > g_args.retryCount) { break; } @@ -120,7 +168,11 @@ TAOS *taosConnect(const char *dbName) { infoPrint("Retry to connect for %d after sleep %dms ...\n", i, g_args.retrySleepMs); toolsMsleep(g_args.retrySleepMs); } - return NULL; + + if (dsnc) { + free(dsnc); + } + return taos; } // query @@ -160,132 +212,6 @@ TAOS_RES *taosQuery(TAOS *taos, const char *sql, int32_t *code) { return NULL; } - -// -// --------------- websocket ------------------ -// - -#ifdef WEBSOCKET -// ws connect -WS_TAOS *wsConnect() { - int32_t i = 0; - while (1) { - WS_TAOS *ws_taos = ws_connect(g_args.dsn); - if (ws_taos) { - // successful - if (i > 0) { - okPrint("Retry %d to connect %s:%d successfully!\n", i, g_args.host, g_args.port); - } - return ws_taos; - } - - // fail - char maskedDsn[256] = "\0"; - memcpy(maskedDsn, g_args.dsn, 20); - memcpy(maskedDsn + 20, "...", 3); - memcpy(maskedDsn + 23, g_args.dsn + strlen(g_args.dsn) - 10, 10); - errorPrint("Failed to ws_connect to server %s, code: 0x%08x, reason: %s!\n", maskedDsn, ws_errno(NULL), - ws_errstr(NULL)); - - if (++i > g_args.retryCount) { - break; - } - - // retry agian - infoPrint("Retry to ws_connect for %d after sleep %dms ...\n", i, g_args.retrySleepMs); - toolsMsleep(g_args.retrySleepMs); - } - return NULL; -} - -// ws query -WS_RES *wsQuery(WS_TAOS **taos_v, const char *sql, int32_t *code) { - int32_t i = 0; - WS_RES *ws_res = NULL; - while (1) { - ws_res = ws_query_timeout(*taos_v, sql, g_args.ws_timeout); - *code = ws_errno(ws_res); - if (*code == 0) { - if (i > 0) { - okPrint("Retry %d to execute taosQuery %s successfully!\n", i, sql); - } - // successful - return ws_res; - } - - // fail - errorPrint("Failed to execute taosQuery, code: 0x%08x, reason: %s, sql=%s \n", *code, ws_errstr(ws_res), sql); - - // can retry - if(!canRetry(*code, RETRY_TYPE_QUERY)) { - infoPrint("%s", "error code not in retry range , give up retry.\n"); - return ws_res; - } - - if (++i > g_args.retryCount) { - break; - } - - // retry agian - infoPrint("Retry to execute taosQuery for %d after sleep %dms ...\n", i, g_args.retrySleepMs); - toolsMsleep(g_args.retrySleepMs); - } - - // need reconnect - infoPrint("query switch new connect to try , sql=%s \n", sql); - WS_TAOS * new_conn = wsConnect(); - if(new_conn == NULL) { - // return old - return ws_res; - } - - // use new conn to query - ws_res = ws_query_timeout(new_conn, sql, g_args.ws_timeout); - *code = ws_errno(ws_res); - if (*code == 0) { - // set new connect to old - ws_close(*taos_v); - *taos_v = new_conn; - okPrint("execute taosQuery with new connection successfully! sql=%s\n", sql); - // successful - return ws_res; - } - - // fail - errorPrint("execute taosQuery with new connection failed, code: 0x%08x, reason: %s \n", *code, ws_errstr(ws_res)); - ws_close(new_conn); - return ws_res; -} - -// fetch -int32_t wsFetchBlock(WS_RES *rs, const void **pData, int32_t *numOfRows) { - int32_t i = 0; - int32_t ws_code = TSDB_CODE_FAILED; - while (1) { - ws_code = ws_fetch_raw_block(rs, pData, numOfRows); - if (ws_code == TSDB_CODE_SUCCESS) { - // successful - if (i > 0) { - okPrint("Retry %d to fetch block successfully!\n", i); - } - return ws_code; - } - - if(!canRetry(ws_code, RETRY_TYPE_FETCH)) { - infoPrint("give up retry fetch because error code need not retry. err code=%d\n", ws_code); - break; - } - - if (++i > g_args.retryCount) { - break; - } - - // retry agian - infoPrint("Retry to ws fetch raw block for %d after sleep %dms ...\n", i, g_args.retrySleepMs); - toolsMsleep(g_args.retrySleepMs); - } - - return ws_code; -} - -#endif \ No newline at end of file +void engineError(char * module, char * fun, int32_t code) { + errorPrint("%s %s fun=%s error code:0x%08X \n", TIP_ENGINE_ERR, module, fun, code); +} \ No newline at end of file diff --git a/tools/taos-tools/src/taosdump.c b/tools/taos-tools/src/taosdump.c index e300a976b9..6ef633fb9b 100644 --- a/tools/taos-tools/src/taosdump.c +++ b/tools/taos-tools/src/taosdump.c @@ -11,12 +11,10 @@ #define _GNU_SOURCE +#include "pub.h" #include "cus_name.h" // include/util/ #include "dump.h" #include "dumpUtil.h" -#ifdef WEBSOCKET -#include "wsdump.h" -#endif static char **g_tsDumpInDebugFiles = NULL; static char g_dumpInCharset[64] = {0}; @@ -152,18 +150,17 @@ static struct argp_option options[] = { {"inspect", 'I', 0, 0, "inspect avro file content and print on screen", 10}, {"no-escape", 'n', 0, 0, "No escape char '`'. Default is using it.", 10}, -#ifdef WEBSOCKET - {"restful", 'R', 0, 0, "Use RESTful interface to connect server", 11}, - {"cloud", 'C', "CLOUD_DSN", 0, - "specify a DSN to access the cloud service", 11}, + {"cloud", 'C', "CLOUD_DSN", 0, OLD_DSN_DESC, 11}, {"timeout", 't', "SECONDS", 0, "The timeout seconds for " "websocket to interact."}, -#endif {"debug", 'g', 0, 0, "Print debug info.", 15}, {"dot-replace", 'Q', 0, 0, "Repalce dot character with underline character in the table name.", 10}, - {"rename", 'W', "RENAME-LIST", 0, "Rename database name with new name during importing data. RENAME-LIST: \"db1=newDB1|db2=newDB2\" means rename db1 to newDB1 and rename db2 to newDB2", 10}, + {"rename", 'W', "RENAME-LIST", 0, "Rename database name with new name during importing data. \ + RENAME-LIST: \"db1=newDB1|db2=newDB2\" means rename db1 to newDB1 and rename db2 to newDB2", 10}, {"retry-count", 'k', "VALUE", 0, "Set the number of retry attempts for connection or query failures", 11}, {"retry-sleep-ms", 'z', "VALUE", 0, "retry interval sleep time, unit ms", 11}, + {"dsn", 'X', "DSN", 0, DSN_DESC, 11}, + {DRIVER_OPT, 'Z', "DRIVER", 0, DRIVER_DESC}, {0} }; @@ -221,15 +218,9 @@ struct arguments g_args = { false, // dotRepalce 0, // dumpDbCount -#ifdef WEBSOCKET - false, // restful - false, // cloud - 10, // ws_timeout + CONN_MODE_INVALID, // connMode NULL, // dsn - NULL, // cloudToken - 0, // cloudPort - {0}, // cloudHost -#endif // WEBSOCKET + false, // port_inputted NULL, // renameBuf NULL, // renameHead @@ -584,6 +575,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { exit(EXIT_FAILURE); } g_args.port = (uint16_t)port; + g_args.port_inputted = true; break; case 'o': @@ -700,29 +692,19 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { g_args.thread_num = atoi((const char *)arg); break; -#ifdef WEBSOCKET - case 'R': - g_args.restful = true; - break; - case 'C': + case 'X': if (arg) { - g_args.dsn = arg; + if (arg[0]!= 0) { + g_args.dsn = arg; + } + } else { errorPrint("%s", "\n\t-C need a valid cloud DSN following!\n"); exit(EXIT_FAILURE); } break; - case 't': - if (arg) { - g_args.ws_timeout = atoi(arg); - } else { - fprintf(stderr, "Invalid -t option\n"); - } - break; -#endif // WEBSOCKET - case OPT_ABORT: g_args.abort = 1; break; @@ -745,6 +727,10 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { g_args.retrySleepMs = atoi((const char *)arg); printf(" set argument retry interval sleep = %d ms\n", g_args.retrySleepMs); break; + case 'Z': + g_args.connMode = getConnMode(arg); + break; + default: return ARGP_ERR_UNKNOWN; } @@ -1082,7 +1068,7 @@ static int getTableRecordInfoImplNative( return -1; } -static int getTableRecordInfoNative( +static int getTableRecordInfo( char *dbName, char *table, TableRecordInfo *pTableRecordInfo) { if (0 == getTableRecordInfoImplNative( @@ -1097,22 +1083,6 @@ static int getTableRecordInfoNative( return -1; } -static int getTableRecordInfo( - char *dbName, - char *table, TableRecordInfo *pTableRecordInfo) { - int ret; -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - ret = getTableRecordInfoWS(dbName, table, pTableRecordInfo); - } else { -#endif - ret = getTableRecordInfoNative(dbName, table, pTableRecordInfo); -#ifdef WEBSOCKET - } -#endif - return ret; -} - bool isSystemDatabase(char *dbName) { if (g_majorVersionOfClient == 3) { if ((strcmp(dbName, "information_schema") == 0) @@ -1214,46 +1184,20 @@ static int getDumpDbCount() { int32_t code = -1; -#ifdef WEBSOCKET - WS_TAOS *ws_taos = NULL; - WS_RES *ws_res; - /* Connect to server */ - if (g_args.cloud || g_args.restful) { - if (NULL == (ws_taos = wsConnect())) { - free(command); - return 0; - } - - int32_t ws_code = -1; - ws_res = wsQuery(&ws_taos, command, &ws_code); - if (0 != ws_code) { - cleanIfQueryFailedWS(__func__, __LINE__, command, ws_res); - ws_close(ws_taos); - return 0; - } - - count = getDbCountWS(ws_res); - ws_free_result(ws_res); - ws_close(ws_taos); - } else { -#endif // WEBSOCKET - if (NULL == (taos = taosConnect(NULL))) { - free(command); - return 0; - } - res = taosQuery(taos, command, &code); - if (0 != code) { - cleanIfQueryFailed(__func__, __LINE__, command, res); - taos_close(taos); - return 0; - } - - count = getDbCountNative(res); - taos_free_result(res); - taos_close(taos); -#ifdef WEBSOCKET + if (NULL == (taos = taosConnect(NULL))) { + free(command); + return 0; } -#endif + res = taosQuery(taos, command, &code); + if (0 != code) { + cleanIfQueryFailed(__func__, __LINE__, command, res); + taos_close(taos); + return 0; + } + + count = getDbCountNative(res); + taos_free_result(res); + taos_close(taos); free(command); return count; @@ -2043,18 +1987,8 @@ char *queryCreateTableSql(void** taos_v, const char *dbName, char *tbName) { // read uint32_t len = 0; char* data = 0; - int32_t ret; -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - ret = readRowWS(res, 0, 1, &len, &data); - } else { -#endif - ret = readRow(res, 0, 1, &len, &data); -#ifdef WEBSOCKET - } -#endif - + int32_t ret = readRow(res, 0, 1, &len, &data); if (ret != 0) { closeQuery(res); return NULL; @@ -2506,20 +2440,10 @@ static int dumpStableClasuse( const char *stbName, TableDes **pStbTableDes, FILE *fp) { - int colCount = -1; + TableDes *tableDes = *pStbTableDes; -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - colCount = getTableDesWS( - taos_v, dbInfo->name, + int32_t colCount = getTableDesNative(*taos_v, dbInfo->name, stbName, tableDes, true); - } else { -#endif - colCount = getTableDesNative(*taos_v, dbInfo->name, - stbName, tableDes, true); -#ifdef WEBSOCKET - } -#endif if (colCount < 0) { errorPrint("%s() LN%d, failed to get stable[%s] schema\n", @@ -3480,7 +3404,6 @@ int64_t queryDbForDumpOutCount( const char *dbName, const char *tbName, const int precision) { - int64_t count = -1; char *command = calloc(1, TSDB_MAX_ALLOWED_SQL_LEN); if (NULL == command) { errorPrint("%s() LN%d, memory allocation failed\n", __func__, __LINE__); @@ -3499,17 +3422,8 @@ int64_t queryDbForDumpOutCount( dbName, g_escapeChar, tbName, g_escapeChar, startTime, endTime); -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - count = queryDbForDumpOutCountWS( - command, taos_v, dbName, tbName, precision); - } else { -#endif - count = queryDbForDumpOutCountNative( + int64_t count = queryDbForDumpOutCountNative( command, *taos_v, dbName, tbName, precision); -#ifdef WEBSOCKET - } -#endif return count; } @@ -3563,16 +3477,7 @@ void *queryDbForDumpOutOffset( start_time, end_time, limit, offset); } - void *res = NULL; -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - res = queryDbForDumpOutOffsetWS(taos_v, command); - } else { -#endif - res = queryDbForDumpOutOffsetNative(*taos_v, command); -#ifdef WEBSOCKET - } -#endif + void *res = queryDbForDumpOutOffsetNative(*taos_v, command); return res; } @@ -4697,17 +4602,8 @@ static int64_t dumpInAvroTbTagsImpl( if ((0 == strlen(tableDes->name)) || (0 != strcmp(tableDes->name, stbName))) { -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - getTableDesWS(taos_v, namespace, + getTableDesNative(*taos_v, namespace, stbName, tableDes, false); - } else { -#endif - getTableDesNative(*taos_v, namespace, - stbName, tableDes, false); -#ifdef WEBSOCKET - } -#endif } avro_value_get_by_name(&value, "tbname", &field_value, NULL); @@ -4831,34 +4727,16 @@ static int64_t dumpInAvroTbTagsImpl( curr_sqlstr_len += sprintf(sqlstr + curr_sqlstr_len-1, ")"); debugPrint("%s() LN%d, sqlstr=\n%s\n", __func__, __LINE__, sqlstr); freeTbNameIfLooseMode(stbName); -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - int32_t ws_code = -1; - WS_RES *ws_res = wsQuery(taos_v, sqlstr, &ws_code); - if (ws_code != 0) { - warnPrint("%s() LN%d ws_query() failed! reason: %s\n", - __func__, __LINE__, ws_errstr(ws_res)); - failed++; - } else { - success++; - } - ws_free_result(ws_res); - ws_res = NULL; + int32_t code = -1; + TAOS_RES *res = taosQuery(*taos_v, sqlstr, &code); + if (code != 0) { + warnPrint("%s() LN%d taosQuery() failed! sqlstr: %s, reason: %s\n", + __func__, __LINE__, sqlstr, taos_errstr(res)); + failed++; } else { -#endif - int32_t code = -1; - TAOS_RES *res = taosQuery(*taos_v, sqlstr, &code); - if (code != 0) { - warnPrint("%s() LN%d taosQuery() failed! sqlstr: %s, reason: %s\n", - __func__, __LINE__, sqlstr, taos_errstr(res)); - failed++; - } else { - success++; - } - taos_free_result(res); -#ifdef WEBSOCKET + success++; } -#endif + taos_free_result(res); } avro_value_decref(&value); @@ -4910,40 +4788,20 @@ static int64_t dumpInAvroNtbImpl( buf = newBuf; } -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - int32_t ws_code = -1; - WS_RES *ws_res = wsQuery(taos_v, buf, &ws_code); - if (0 != ws_code) { - errorPrint("%s() LN%d," - " Failed to execute ws_query(%s)." - " ws_taos: %p, code: 0x%08x, reason: %s\n", - __func__, __LINE__, buf, - *taos_v, ws_code, ws_errstr(ws_res)); - failed++; - } else { - success++; - } - ws_free_result(ws_res); - ws_res = NULL; + int32_t code = -1; + TAOS_RES *res = taosQuery(*taos_v, buf, &code); + if (0 != code) { + errorPrint("%s() LN%d," + " Failed to execute taosQuery(%s)." + " taos: %p, code: 0x%08x, reason: %s\n", + __func__, __LINE__, buf, + *taos_v, code, taos_errstr(res)); + failed++; } else { -#endif - int32_t code = -1; - TAOS_RES *res = taosQuery(*taos_v, buf, &code); - if (0 != code) { - errorPrint("%s() LN%d," - " Failed to execute taosQuery(%s)." - " taos: %p, code: 0x%08x, reason: %s\n", - __func__, __LINE__, buf, - *taos_v, code, taos_errstr(res)); - failed++; - } else { - success++; - } - taos_free_result(res); -#ifdef WEBSOCKET + success++; } -#endif + taos_free_result(res); + // free if (newBuf) { free(newBuf); @@ -5589,6 +5447,39 @@ static void countFailureAndFree(char *bindArray, freeTbNameIfLooseMode(tbName); } +// stmt prepare +static int32_t prepareStmt(TAOS_STMT *stmt, RecordSchema *recordSchema, char *tbName, int32_t *onlyCol) { + char *sql = calloc(1, TSDB_MAX_ALLOWED_SQL_LEN); + if (NULL == sql) { + errorPrint("%s() LN%d, memory allocation failed!\n", __func__, __LINE__); + return -1; + } + + char *pstr = sql; + pstr += snprintf(pstr, TSDB_MAX_ALLOWED_SQL_LEN, "INSERT INTO %s VALUES(?", tbName); + + for (int col = 1; col < recordSchema->num_fields + -(g_dumpInLooseModeFlag?0:1); col++) { + pstr += sprintf(pstr, ",?"); + (*onlyCol)++; + } + pstr += sprintf(pstr, ")"); + debugPrint("%s() LN%d, stmt buffer: %s\n", + __func__, __LINE__, sql); + + int code; + if (0 != (code = taos_stmt_prepare(stmt, sql, 0))) { + errorPrint("Failed to execute taos_stmt_prepare(). sql:%s reason: %s\n", + sql, taos_stmt_errstr(stmt)); + + free(sql); + return -1; + } + + free(sql); + return code; +} + static int64_t dumpInAvroDataImpl( void **taos_v, char *namespace, @@ -5597,126 +5488,30 @@ static int64_t dumpInAvroDataImpl( RecordSchema *recordSchema, char *fileName) { TAOS_STMT *stmt = NULL; -#ifdef WEBSOCKET - WS_STMT *ws_stmt = NULL; - if (g_args.cloud || g_args.restful) { - ws_stmt = ws_stmt_init(*taos_v); - int32_t ws_code = ws_errno(ws_stmt); - if (ws_code) { - errorPrint("%s() LN%d, stmt init failed! ws_taos: %p," - " code: 0x%08x, reason: %s\n", - __func__, __LINE__, *taos_v, ws_code, ws_errstr(ws_stmt)); - return -1; - } - } else { -#endif - stmt = taos_stmt_init(*taos_v); - if (NULL == stmt) { - errorPrint("%s() LN%d, stmt init failed! taos: %p, code: 0x%08x, " - "reason: %s\n", - __func__, __LINE__, *taos_v, - taos_errno(NULL), taos_errstr(NULL)); - return -1; - } -#ifdef WEBSOCKET + stmt = taos_stmt_init(*taos_v); + if (NULL == stmt) { + errorPrint("%s() LN%d, stmt init failed! taos: %p, code: 0x%08x, " + "reason: %s\n", + __func__, __LINE__, *taos_v, + taos_errno(NULL), taos_errstr(NULL)); + return -1; } -#endif TableDes *tableDes = (TableDes *)calloc(1, sizeof(TableDes) + sizeof(ColDes) * TSDB_MAX_COLUMNS); if (NULL == tableDes) { errorPrint("%s() LN%d, memory allocation failed!\n", __func__, __LINE__); -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - ws_stmt_close(ws_stmt); - } else { -#endif - taos_stmt_close(stmt); -#ifdef WEBSOCKET - } -#endif + taos_stmt_close(stmt); return -1; } - char *stmtBuffer = calloc(1, TSDB_MAX_ALLOWED_SQL_LEN); - if (NULL == stmtBuffer) { - errorPrint("%s() LN%d, memory allocation failed!\n", __func__, __LINE__); - free(tableDes); -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - ws_stmt_close(ws_stmt); - } else { -#endif - taos_stmt_close(stmt); -#ifdef WEBSOCKET - } -#endif - return -1; - } - - char *pstr = stmtBuffer; - pstr += snprintf(pstr, TSDB_MAX_ALLOWED_SQL_LEN, "INSERT INTO ? VALUES(?"); - - int32_t onlyCol = 1; // at least timestamp - for (int col = 1; col < recordSchema->num_fields - -(g_dumpInLooseModeFlag?0:1); col++) { - pstr += sprintf(pstr, ",?"); - onlyCol++; - } - pstr += sprintf(pstr, ")"); - debugPrint("%s() LN%d, stmt buffer: %s\n", - __func__, __LINE__, stmtBuffer); - - int code; -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - if (0 != (code = ws_stmt_prepare(ws_stmt, stmtBuffer, strlen(stmtBuffer)))) { - errorPrint("%s() LN%d, failed to execute ws_stmt_prepare()." - " ws_taos: %p, code: 0x%08x, reason: %s\n", - __func__, __LINE__, - *taos_v, code, ws_errstr(ws_stmt)); - - free(stmtBuffer); - free(tableDes); - ws_stmt_close(ws_stmt); - return -1; - } - } else { -#endif - if (0 != (code = taos_stmt_prepare(stmt, stmtBuffer, 0))) { - errorPrint("Failed to execute taos_stmt_prepare(). reason: %s\n", - taos_stmt_errstr(stmt)); - - free(stmtBuffer); - free(tableDes); - taos_stmt_close(stmt); - return -1; - } -#ifdef WEBSOCKET - } -#endif + int32_t code = 0; + int32_t onlyCol = 1; + char *bindArray = NULL; avro_value_iface_t *value_class = avro_generic_class_from_schema(schema); avro_value_t value; avro_generic_value_new(value_class, &value); - char *bindArray = - calloc(1, sizeof(TAOS_MULTI_BIND) * onlyCol); - if (NULL == bindArray) { - errorPrint("%s() LN%d, memory allocation failed!\n", __func__, __LINE__); - free(stmtBuffer); - free(tableDes); -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - ws_stmt_close(ws_stmt); - } else { -#endif - taos_stmt_close(stmt); -#ifdef WEBSOCKET - } -#endif - return -1; - } - int64_t success = 0; int64_t failed = 0; int64_t count = 0; @@ -5757,78 +5552,48 @@ static int64_t dumpInAvroDataImpl( char *escapedTbName = calloc(1, escapedTbNameLen); if (NULL == escapedTbName) { errorPrint("%s() LN%d, memory allocation failed!\n", __func__, __LINE__); - free(bindArray); - free(stmtBuffer); free(tableDes); tfree(tbName); - #ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - ws_stmt_close(ws_stmt); - } else { - #endif - taos_stmt_close(stmt); - #ifdef WEBSOCKET - } - #endif + taos_stmt_close(stmt); return -1; } - #ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - snprintf(escapedTbName, escapedTbNameLen, "%s.%s%s%s", - namespace, g_escapeChar, tbName, g_escapeChar); + snprintf(escapedTbName, escapedTbNameLen, "%s%s%s", + g_escapeChar, tbName, g_escapeChar); - debugPrint("%s() LN%d escaped table: %s\n", - __func__, __LINE__, escapedTbName); + debugPrint("%s() LN%d escaped table: %s\n", + __func__, __LINE__, escapedTbName); - debugPrint("%s() LN%d, stmt: %p, will call ws_stmt_set_tbname(%s)\n", - __func__, __LINE__, ws_stmt, escapedTbName); - if (0 != (code = ws_stmt_set_tbname(ws_stmt, escapedTbName))) { - errorPrint("%s() LN%d, failed to execute ws_stmt_set_tbname(%s)." - " ws_taos: %p, code: 0x%08x, reason: %s\n", - __func__, __LINE__, - escapedTbName, *taos_v, code, ws_errstr(ws_stmt)); - free(escapedTbName); - freeTbNameIfLooseMode(tbName); - continue; - } - debugPrint("%s() LN%d, stmt: %p, ws_stmt_set_tbname(%s) done\n", - __func__, __LINE__, ws_stmt, escapedTbName); - } else { - #endif - snprintf(escapedTbName, escapedTbNameLen, "%s%s%s", - g_escapeChar, tbName, g_escapeChar); - - debugPrint("%s() LN%d escaped table: %s\n", - __func__, __LINE__, escapedTbName); - - if (0 != taos_stmt_set_tbname(stmt, escapedTbName)) { - errorPrint("Failed to execute taos_stmt_set_tbname(%s)." - "reason: %s\n", - escapedTbName, taos_stmt_errstr(stmt)); - free(escapedTbName); - free(tbName); - tbName = NULL; - continue; - } - #ifdef WEBSOCKET + // prepare + code = prepareStmt(stmt, recordSchema, escapedTbName, &onlyCol); + if (code) { + free(tableDes); + free(tbName); + free(escapedTbName); + taos_stmt_close(stmt); + return -1; + } + + // maloc bind + if (bindArray == NULL) { + bindArray = calloc(1, sizeof(TAOS_MULTI_BIND) * onlyCol); + if (NULL == bindArray) { + errorPrint("%s() LN%d, memory allocation failed!\n", __func__, __LINE__); + free(tableDes); + free(tbName); + free(escapedTbName); + taos_stmt_close(stmt); + return -1; + } } - #endif free(escapedTbName); + + // get table des if ((0 == strlen(tableDes->name)) || (0 != strcmp(tableDes->name, tbName))) { - #ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - getTableDesWS(taos_v, namespace, + getTableDesNative(*taos_v, namespace, tbName, tableDes, true); - } else { - #endif - getTableDesNative(*taos_v, namespace, - tbName, tableDes, true); - #ifdef WEBSOCKET - } - #endif - } + } } // tbName debugPrint("%s() LN%d, count: %"PRId64"\n", @@ -6044,133 +5809,62 @@ static int64_t dumpInAvroDataImpl( bind->num = 1; } debugPrint2("%s", "\n"); -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - if (0 != (code = ws_stmt_bind_param_batch(ws_stmt, - (const WS_MULTI_BIND *)bindArray, onlyCol))) { - errorPrint("%s() LN%d ws_stmt_bind_param_batch() failed!" - " ws_taos: %p, code: 0x%08x, reason: %s\n", - __func__, __LINE__, *taos_v, code, ws_errstr(ws_stmt)); - countFailureAndFree(bindArray, onlyCol, &failed, tbName); - continue; - } - - if (0 != (code = ws_stmt_add_batch(ws_stmt))) { - errorPrint("%s() LN%d stmt_bind_param() failed!" - " ws_taos: %p, code: 0x%08x, reason: %s\n", - __func__, __LINE__, *taos_v, code, ws_errstr(ws_stmt)); - countFailureAndFree(bindArray, onlyCol, &failed, tbName); - continue; - } - - if ( 0 == (count % g_args.data_batch) ) { - // batch to exec - int32_t affected_rows; - if (0 != (code = ws_stmt_execute(ws_stmt, &affected_rows))) { - errorPrint("%s() LN%d ws_stmt_execute() failed!" - " ws_taos: %p, code: 0x%08x, reason: %s, " - "timestamp: %"PRId64" count=%"PRId64"\n", - __func__, __LINE__, *taos_v, code, - ws_errstr(ws_stmt), ts_debug, count); - countFailureAndFree(bindArray, onlyCol, &failed, tbName); - continue; - } else { - success += g_args.data_batch; - debugPrint("ok call ws_stmt_execute count=%"PRId64" success=%"PRId64" failed=%"PRId64"\n", - count, success, failed); - } - } - } else { -#endif - if (0 != (code = taos_stmt_bind_param_batch(stmt, - (TAOS_MULTI_BIND *)bindArray))) { - errorPrint("%s() LN%d stmt_bind_param_batch() failed! " - "reason: %s\n", - __func__, __LINE__, taos_stmt_errstr(stmt)); - countFailureAndFree(bindArray, onlyCol, &failed, tbName); - continue; - } - - if (0 != (code = taos_stmt_add_batch(stmt))) { - errorPrint("%s() LN%d stmt_bind_param() failed! reason: %s\n", + if (0 != (code = taos_stmt_bind_param_batch(stmt, + (TAOS_MULTI_BIND *)bindArray))) { + errorPrint("%s() LN%d stmt_bind_param_batch() failed! " + "reason: %s\n", __func__, __LINE__, taos_stmt_errstr(stmt)); + countFailureAndFree(bindArray, onlyCol, &failed, tbName); + continue; + } + + if (0 != (code = taos_stmt_add_batch(stmt))) { + errorPrint("%s() LN%d stmt_bind_param() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + countFailureAndFree(bindArray, onlyCol, &failed, tbName); + continue; + } + + // batch execute + if ( 0 == (count % g_args.data_batch) ) { + if( 0 != (code = taos_stmt_execute(stmt)) ){ + if (code == TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE) { + countTSOutOfRange++; + } else { + errorPrint("%s() LN%d taos_stmt_execute() failed! " + "code: 0x%08x, reason: %s, timestamp: %"PRId64"\n", + __func__, __LINE__, + code, taos_stmt_errstr(stmt), ts_debug); + } countFailureAndFree(bindArray, onlyCol, &failed, tbName); continue; + } else { + success += g_args.data_batch; + debugPrint("ok call taos_stmt_execute count=%"PRId64" success=%"PRId64" failed=%"PRId64"\n", + count, success, failed); } - - // batch execute - if ( 0 == (count % g_args.data_batch) ) { - if( 0 != (code = taos_stmt_execute(stmt)) ){ - if (code == TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE) { - countTSOutOfRange++; - } else { - errorPrint("%s() LN%d taos_stmt_execute() failed! " - "code: 0x%08x, reason: %s, timestamp: %"PRId64"\n", - __func__, __LINE__, - code, taos_stmt_errstr(stmt), ts_debug); - } - countFailureAndFree(bindArray, onlyCol, &failed, tbName); - continue; - } else { - success += g_args.data_batch; - debugPrint("ok call taos_stmt_execute count=%"PRId64" success=%"PRId64" failed=%"PRId64"\n", - count, success, failed); - } - } -#ifdef WEBSOCKET } -#endif freeBindArray(bindArray, onlyCol); } // last batch execute -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - if ( 0 != (count % g_args.data_batch) ) { - int32_t affected_rows; - if (0 != (code = ws_stmt_execute(ws_stmt, &affected_rows))) { - errorPrint( - "%s() LN%d ws_stmt_execute() failed!" - " ws_taos: %p, code: 0x%08x, reason: %s \n", - __func__, __LINE__, *taos_v, code, ws_errstr(ws_stmt)); - failed++; - } else { - success += count % g_args.data_batch; - debugPrint("ok call last ws_stmt_execute count=%"PRId64" success=%"PRId64" failed=%"PRId64"\n", - count, success, failed); - } + if (0 != (count % g_args.data_batch)) { + if (0 != (code = taos_stmt_execute(stmt))) { + errorPrint("error last execute taos_stmt_execute. errstr=%s\n", taos_stmt_errstr(stmt)); + failed++; + } else { + success += count % g_args.data_batch; + debugPrint("ok call last ws_stmt_execute count=%"PRId64" success=%"PRId64" failed=%"PRId64"\n", + count, success, failed); } - } else { -#endif - if (0 != (count % g_args.data_batch)) { - if (0 != (code = taos_stmt_execute(stmt))) { - errorPrint("error last execute taos_stmt_execute. errstr=%s\n", taos_stmt_errstr(stmt)); - failed++; - } else { - success += count % g_args.data_batch; - debugPrint("ok call last ws_stmt_execute count=%"PRId64" success=%"PRId64" failed=%"PRId64"\n", - count, success, failed); - } - } -#ifdef WEBSOCKET } -#endif free(tbName); avro_value_decref(&value); avro_value_iface_decref(value_class); tfree(bindArray); - tfree(stmtBuffer); freeTbDes(tableDes, true); -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - ws_stmt_close(ws_stmt); - } else { -#endif - taos_stmt_close(stmt); -#ifdef WEBSOCKET - } -#endif + taos_stmt_close(stmt); if (failed) { if (countTSOutOfRange) { errorPrint("Total %"PRId64" record(s) ts out of range!\n", @@ -6266,15 +5960,7 @@ static RecordSchema *getSchemaAndReaderFromFile( } static void closeTaosConnWrapper(void *taos) { -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - ws_close(taos); - } else { -#endif - taos_close(taos); -#ifdef WEBSOCKET - } -#endif + taos_close(taos); } static int64_t dumpInOneAvroFile( @@ -6310,22 +5996,10 @@ static int64_t dumpInOneAvroFile( TAOS *taos = NULL; void **taos_v = NULL; -#ifdef WEBSOCKET - WS_TAOS *ws_taos = NULL; - if (g_args.cloud || g_args.restful) { - if (NULL == (ws_taos = wsConnect())) { - return -1; - } - taos_v = &ws_taos; - } else { -#endif - if (NULL == (taos = taosConnect(namespace))) { - return -1; - } - taos_v = &taos; -#ifdef WEBSOCKET + if (NULL == (taos = taosConnect(namespace))) { + return -1; } -#endif + taos_v = &taos; int64_t retExec = 0; switch (avroType) { @@ -7028,27 +6702,15 @@ static int64_t dumpTableDataAvro( return -1; } - int64_t rows; - int64_t start_time = getStartTime(precision); int64_t end_time = getEndTime(precision); if ((-1 == start_time) || (-1 == end_time)) { return -1; } -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - rows = dumpTableDataAvroWS(dataFilename, index, tbName, + int64_t rows = dumpTableDataAvroNative(dataFilename, index, tbName, belongStb, dbInfo->name, precision, colCount, tableDes, start_time, end_time); - } else { -#endif - rows = dumpTableDataAvroNative(dataFilename, index, tbName, - belongStb, dbInfo->name, precision, colCount, tableDes, - start_time, end_time); -#ifdef WEBSOCKET - } -#endif return rows; } @@ -7093,18 +6755,9 @@ static int64_t dumpTableData( return -1; } - int64_t rows; -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - rows = dumpTableDataWS(index, fp, tbName, dbInfo->name, + + int64_t rows = dumpTableDataNative(index, fp, tbName, dbInfo->name, precision, tableDes, start_time, end_time); - } else { -#endif - rows = dumpTableDataNative(index, fp, tbName, dbInfo->name, - precision, tableDes, start_time, end_time); -#ifdef WEBSOCKET - } -#endif return rows; } @@ -7138,17 +6791,8 @@ int64_t dumpNormalTable( __func__, __LINE__); return -1; } -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - numColsAndTags = getTableDesWS(taos_v, + numColsAndTags = getTableDesNative(*taos_v, dbInfo->name, tbName, tableDes, !belongStb); - } else { -#endif - numColsAndTags = getTableDesNative(*taos_v, - dbInfo->name, tbName, tableDes, !belongStb); -#ifdef WEBSOCKET - } -#endif if (numColsAndTags < 0) { errorPrint("%s() LN%d, failed to get table[%s] schema\n", @@ -7169,18 +6813,8 @@ int64_t dumpNormalTable( errorPrint("%s() LN%d, memory allocation failed!\n", __func__, __LINE__); return -1; } -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - numColsAndTags = getTableDesWS( - taos_v, + numColsAndTags = getTableDesNative(*taos_v, dbInfo->name, tbName, tableDes, !belongStb); - } else { -#endif - numColsAndTags = getTableDesNative(*taos_v, - dbInfo->name, tbName, tableDes, !belongStb); -#ifdef WEBSOCKET - } -#endif if (numColsAndTags < 0) { errorPrint("%s() LN%d, failed to get table[%s] schema\n", @@ -7242,22 +6876,9 @@ int64_t dumpNormalTable( __func__, __LINE__); return -1; } -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - numColsAndTags = getTableDesFromStbWS( - taos_v, - dbInfo->name, - stbDes, - tbName, &tableDes); - - } else { -#endif - numColsAndTags = getTableDesFromStbNative( + numColsAndTags = getTableDesFromStbNative( *taos_v, dbInfo->name, stbDes, tbName, &tableDes); -#ifdef WEBSOCKET - } -#endif if (numColsAndTags < 0) { errorPrint("%s() LN%d columns/tags count is %d\n", __func__, __LINE__, numColsAndTags); @@ -7416,34 +7037,10 @@ static int createMTableAvroHeadImp( return -1; } - int colCount = 0; - colCount = colCount; // reduce compile warning -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - colCount = getTableDesFromStbWS( - (WS_TAOS*)taos_v, - dbName, + getTableDesFromStbNative(*taos_v, dbName, stbTableDes, tbName, &subTableDes); - } else { -#endif // WEBSOCKET - colCount = getTableDesFromStbNative(*taos_v, dbName, - stbTableDes, - tbName, - &subTableDes); -#ifdef WEBSOCKET - } - - if (colCount < 0) { - errorPrint("%s() LN%d, columns count is %d\n", - __func__, __LINE__, colCount); - if (subTableDes) { - freeTbDes(subTableDes, true); - } - return -1; - } -#endif for (int tag = 0; tag < subTableDes->tags; tag++) { debugPrint("%s() LN%d, sub table %s no. %d tags is %s, " @@ -7785,15 +7382,7 @@ static int createMTableAvroHeadSpecified( return -1; } -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - getTableDesWS(taos_v, dbName, stable, stbTableDes, false); - } else { -#endif - getTableDesNative(*taos_v, dbName, stable, stbTableDes, false); -#ifdef WEBSOCKET - } -#endif + getTableDesNative(*taos_v, dbName, stable, stbTableDes, false); char *jsonTagsSchema = NULL; if (0 != convertTbTagsDesToJsonWrap( @@ -7925,19 +7514,8 @@ static int64_t fillTbNameArr( debugPrint("%s() LN%d, run command <%s>.\n", __func__, __LINE__, command2); - int64_t ntbCount; - -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - ntbCount = fillTbNameArrWS( - taos_v, command2, tbNameArr, stable, preCount); - } else { -#endif - ntbCount = fillTbNameArrNative( + int64_t ntbCount = fillTbNameArrNative( *taos_v, command2, tbNameArr, stable, preCount); -#ifdef WEBSOCKET - } -#endif infoPrint("The number of tables of %s be filled is %"PRId64"!\n", stable, ntbCount); @@ -8313,46 +7891,21 @@ static int writeTagsToAvro( // open query with native or websocket void* openQuery(void** taos_v, const char * sql) { -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - int32_t ws_code = -1; - WS_RES *ws_res = wsQuery(taos_v, sql, &ws_code); - if (ws_code != 0) { - errorPrint("exe sql:%s failed. error code =%d\n", sql, ws_code); - return NULL; - } - return ws_res; - } else { -#endif - int32_t code = -1; - TAOS_RES* res = taosQuery(*taos_v, sql, &code); - if (code != 0) { - taos_free_result(res); - errorPrint("open query: %s execute failed. errcode=%d\n", sql, code); - return NULL; - } - return res; -#ifdef WEBSOCKET + int32_t code = -1; + TAOS_RES* res = taosQuery(*taos_v, sql, &code); + if (code != 0) { + taos_free_result(res); + errorPrint("open query: %s execute failed. errcode=%d\n", sql, code); + return NULL; } -#endif + return res; } // close query and free result void closeQuery(void* res) { -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - if(res) { - ws_free_result(res); - } - return ; - } else { -#endif - if(res) { - taos_free_result(res); - } -#ifdef WEBSOCKET - } -#endif + if(res) { + taos_free_result(res); + } } // read next table tags to tbDes @@ -8477,11 +8030,6 @@ static int dumpStableMeta( return -1; } -#ifdef WEBSOCKET - int idx = 0; - int cnt = 0; -#endif - // loop read tables des int size = sizeof(TableDes) + sizeof(ColDes) * stbDes->tags; TableDes *tbDes = calloc(1, size); @@ -8492,16 +8040,7 @@ static int dumpStableMeta( memset(tbDes->name, 0, sizeof(tbDes->name)); // reset zero tbDes->tags = stbDes->tags; // stable tags same with child table memcpy(tbDes->cols, &stbDes->cols[stbDes->columns], sizeof(ColDes)* stbDes->tags); // copy tag info - int ret; -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - ret = readNextTableDesWS(tagsRes, tbDes, &idx, &cnt); - } else { -#endif - ret = readNextTableDesNative(tagsRes, tbDes); -#ifdef WEBSOCKET - } -#endif + int32_t ret = readNextTableDesNative(tagsRes, tbDes); if(ret < 0){ // read error @@ -8654,6 +8193,8 @@ static void printArgs(FILE *file) { fprintf(file, "loose_mode: %s\n", g_args.loose_mode?"true":"false"); fprintf(file, "isDumpIn: %s\n", g_args.isDumpIn?"true":"false"); fprintf(file, "arg_list_len: %d\n", g_args.arg_list_len); + +/* TODO #ifdef WEBSOCKET if (g_args.cloud) { fprintf(file, "cloud: %s\n", g_args.cloud?"true":"false"); @@ -8670,6 +8211,7 @@ static void printArgs(FILE *file) { } } #endif // WEBSOCKET +*/ fflush(file); } @@ -8870,17 +8412,8 @@ static int dumpExtraInfoHead(void *taos, FILE *fp) { return -1; } -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - snprintf(buffer, BUFFER_LEN, "#!server_ver: %s\n", - ws_get_server_info(taos)); - } else { -#endif - snprintf(buffer, BUFFER_LEN, "#!server_ver: %s\n", + snprintf(buffer, BUFFER_LEN, "#!server_ver: %s\n", taos_get_server_info(taos)); -#ifdef WEBSOCKET - } -#endif char *firstline = strchr(buffer, '\n'); @@ -8964,15 +8497,7 @@ static int dumpExtraInfo(void **taos_v, FILE *fp) { return ret; } -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - dumpExtraInfoVarWS(taos_v, fp); - } else { -#endif - dumpExtraInfoVar(*taos_v, fp); -#ifdef WEBSOCKET - } -#endif + dumpExtraInfoVar(*taos_v, fp); ret = ferror(fp); @@ -9165,15 +8690,7 @@ static int64_t dumpInOneDebugFile( } debugPrint("%s() LN%d, cmd: %s\n", __func__, __LINE__, cmd); -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - ret = queryDbImplWS(taos_v, newSql?newSql:cmd); - } else { -#endif - ret = queryDbImplNative(*taos_v, newSql?newSql:cmd); -#ifdef WEBSOCKET - } -#endif + ret = queryDbImplNative(*taos_v, newSql?newSql:cmd); // free if (newSql) { free(newSql); @@ -9305,23 +8822,11 @@ static int dumpInDebugWorkThreads(const char *dbPath) { " from %"PRId64"\n", t, pThreadInfo->count, pThreadInfo->from); -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - if (NULL == (pThreadInfo->taos = wsConnect())) { - free(infos); - free(pids); - return -1; - } - } else { -#endif // WEBSOCKET - if (NULL == (pThreadInfo->taos = taosConnect(NULL))) { - free(infos); - free(pids); - return -1; - } -#ifdef WEBSOCKET + if (NULL == (pThreadInfo->taos = taosConnect(NULL))) { + free(infos); + free(pids); + return -1; } -#endif // WEBSOCKET if (pthread_create(pids + t, NULL, dumpInDebugWorkThreadFp, (void*)pThreadInfo) != 0) { @@ -9344,15 +8849,7 @@ static int dumpInDebugWorkThreads(const char *dbPath) { } for (int32_t t = 0; t < threads; ++t) { -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - ws_close(infos[t].taos); - } else { -#endif - taos_close(infos[t].taos); -#ifdef WEBSOCKET - } -#endif // WEBSOCKET + taos_close(infos[t].taos); } for (int32_t t = 0; t < threads; ++t) { @@ -9371,22 +8868,10 @@ static int dumpInDebugWorkThreads(const char *dbPath) { static int dumpInDbs(const char *dbPath) { void **taos_v = NULL; TAOS *taos = NULL; -#ifdef WEBSOCKET - WS_TAOS *ws_taos = NULL; - if (g_args.cloud || g_args.restful) { - if (NULL == (ws_taos = wsConnect())) { - return -1; - } - taos_v = &ws_taos; - } else { -#endif - if (NULL == (taos = taosConnect(NULL))) { - return -1; - } - taos_v = &taos; -#ifdef WEBSOCKET + if (NULL == (taos = taosConnect(NULL))) { + return -1; } -#endif + taos_v = &taos; char dbsSql[MAX_PATH_LEN]; snprintf(dbsSql, MAX_PATH_LEN, "%s/%s", dbPath, "dbs.sql"); @@ -9628,15 +9113,7 @@ static void *dumpTablesOfStbThread(void *arg) { } } -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - dumpNormalTablesOfStbWS(pThreadInfo, fp, dumpFilename); - } else { -#endif - dumpTablesOfStbNative(pThreadInfo, fp, dumpFilename); -#ifdef WEBSOCKET - } -#endif + dumpTablesOfStbNative(pThreadInfo, fp, dumpFilename); if (fp) { fclose(fp); fp = NULL; @@ -9665,28 +9142,11 @@ int dumpSTableData(SDbInfo* dbInfo, TableDes* stbDes, char** tbNameArr, int64_t threadInfo *pThreadInfo; for (int32_t i = 0; i < threads; i++) { pThreadInfo = infos + i; -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - if (NULL == (pThreadInfo->taos = wsConnect())) { - errorPrint("%s() LN%d, Failed to connect to server, " - "reason: %s\n", - __func__, - __LINE__, - ws_errstr(NULL)); - free(pids); - free(infos); - return -1; - } - } else { -#endif // WEBSOCKET - if (NULL == (pThreadInfo->taos = taosConnect(dbInfo->name))) { - free(pids); - free(infos); - return -1; - } -#ifdef WEBSOCKET + if (NULL == (pThreadInfo->taos = taosConnect(dbInfo->name))) { + free(pids); + free(infos); + return -1; } -#endif pThreadInfo->threadIndex = i; pThreadInfo->count = (i < mod) ? batch+1 : batch; @@ -9727,15 +9187,7 @@ int dumpSTableData(SDbInfo* dbInfo, TableDes* stbDes, char** tbNameArr, int64_t stbDes->name, tbCount); for (int32_t i = 0; i < threads; i++) { pThreadInfo = infos + i; -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - ws_close(pThreadInfo->taos); - } else { -#endif // WEBSOCKET - taos_close(pThreadInfo->taos); -#ifdef WEBSOCKET - } -#endif + taos_close(pThreadInfo->taos); } free(pids); @@ -9776,18 +9228,8 @@ static int64_t dumpStable( } // obtain stable des data - int colCount = 0; -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - colCount = getTableDesWS(taos_v, dbInfo->name, + int32_t colCount = getTableDesNative(*taos_v, dbInfo->name, stbName, stbDes, true); - } else { -#endif - colCount = getTableDesNative(*taos_v, dbInfo->name, - stbName, stbDes, true); -#ifdef WEBSOCKET - } -#endif if (colCount < 0) { errorPrint("%s() LN%d, failed to get stable[%s] schema\n", __func__, __LINE__, stbName); @@ -9799,16 +9241,7 @@ static int64_t dumpStable( stbName, stbDes->columns, stbDes->tags); // get stable child count - int64_t tbCount = 0; -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - tbCount = getNtbCountOfStbWS(dbInfo->name, stbName); - } else { -#endif - tbCount = getTbCountOfStbNative(dbInfo->name, stbName); -#ifdef WEBSOCKET - } -#endif + int64_t tbCount = getTbCountOfStbNative(dbInfo->name, stbName); if(tbCount < 0 ) { errorPrint("get stable %s failed.", stbName); freeTbDes(stbDes, true); @@ -10138,21 +9571,10 @@ static int64_t dumpWholeDatabase(void **taos_v, SDbInfo *dbInfo, FILE *fp) { atomic_add_fetch_64( &g_resultStatistics.totalDatabasesOfDumpOut, 1); -#ifdef WEBSOCKET - if (g_args.cloud || g_args.restful) { - ret = dumpStbAndChildTbOfDbWS(taos_v, dbInfo, fpDbs); - if (ret >= 0) { - ret = dumpNTablesOfDbWS(taos_v, dbInfo); - } - } else { -#endif - ret = dumpStbAndChildTbOfDbNative(taos_v, dbInfo, fpDbs); - if (ret >= 0) { - ret = dumpNTablesOfDbNative(taos_v, dbInfo); - } -#ifdef WEBSOCKET + ret = dumpStbAndChildTbOfDbNative(taos_v, dbInfo, fpDbs); + if (ret >= 0) { + ret = dumpNTablesOfDbNative(taos_v, dbInfo); } -#endif if (AVRO_CODEC_UNKNOWN != g_args.avro_codec) { fclose(fpDbs); } @@ -10637,41 +10059,19 @@ static int dumpOut() { /* Connect to server and dump extra info*/ void **taos_v = NULL; -#ifdef WEBSOCKET - WS_TAOS *ws_taos = NULL; - - if (g_args.cloud || g_args.restful) { - if (NULL == (ws_taos = wsConnect())) { - ret = -1; - goto _exit_failure; - } - - taos_v = &ws_taos; - ret = dumpExtraInfo(taos_v, fp); - - if (ret < 0) { - goto _exit_failure; - } - - dbCount = fillDbInfoWS(taos_v); - } else { -#endif - if (NULL == (taos = taosConnect(NULL))) { - ret = -1; - goto _exit_failure; - } - - taos_v = &taos; - ret = dumpExtraInfo(taos_v, fp); - - if (ret < 0) { - goto _exit_failure; - } - - dbCount = fillDbInfoNative(taos); -#ifdef WEBSOCKET + if (NULL == (taos = taosConnect(NULL))) { + ret = -1; + goto _exit_failure; } -#endif + + taos_v = &taos; + ret = dumpExtraInfo(taos_v, fp); + + if (ret < 0) { + goto _exit_failure; + } + + dbCount = fillDbInfoNative(taos); if (dbCount <= 0) { errorPrint("%d database(s) valid to dump\n", dbCount); @@ -10823,33 +10223,6 @@ _exit_failure_2: static int dumpEntry() { int ret = 0; -#ifdef WEBSOCKET - if ( g_args.debug_print) { - ws_enable_log("trace"); - printf("ws_enable_log(\"trace\");\n"); - } else { - ws_enable_log("error"); - printf("ws_enable_log(\"error\");\n"); - } - - if (NULL == g_args.dsn) { - g_args.dsn = getenv("TDENGINE_CLOUD_DSN"); - if (NULL == g_args.dsn) { - g_args.cloud = false; - } else { - g_args.cloud = true; - } - } else { - g_args.cloud = true; - } - - if (g_args.cloud) { - splitCloudDsn(); - } else if (g_args.restful) { - jointCloudDsn(); - } -#endif // WEBSOCKET - if (checkParam() < 0) { exit(EXIT_FAILURE); } @@ -11483,6 +10856,7 @@ static int inspectAvroFiles(int argc, char *argv[]) { return ret; } + int main(int argc, char *argv[]) { g_uniqueID = getUniqueIDFromEpoch(); @@ -11494,12 +10868,13 @@ int main(int argc, char *argv[]) { parse_args(argc, argv, &g_args); } + // command line argp_parse(&argp, argc, argv, 0, 0, &g_args); - if (g_args.abort) { abort(); } + // client info snprintf(g_client_info, MIDDLE_BUFF_LEN, "%s", taos_get_client_info()); g_majorVersionOfClient = atoi(g_client_info); debugPrint("Client info: %s, major version: %d\n", @@ -11515,6 +10890,31 @@ int main(int argc, char *argv[]) { } } + // env dsn + if ( NULL == g_args.dsn) { + char *dsn = getenv("TDENGINE_CLOUD_DSN"); + if(dsn && dsn[0] != 0) { + if (g_args.connMode != CONN_MODE_NATIVE) { + g_args.dsn = dsn; + infoPrint("read dsn from evn dsn=%s\n", dsn); + } else { + warnPrint("command line pass native mode , ignore evn dsn:%s\n", dsn); + } + } + } else { + // check conflict + if (g_args.connMode == CONN_MODE_NATIVE) { + errorPrint("%s", DSN_NATIVE_CONFLICT); + return -1; + } + } + + // conn mode + if (setConnMode(g_args.connMode, g_args.dsn) != 0) { + return -1; + } + + // running if (g_args.inspect) { ret = inspectAvroFiles(argc, argv); } else { diff --git a/tools/taos-tools/src/wsdump.c b/tools/taos-tools/src/wsdump.c deleted file mode 100644 index 3ed0c461b9..0000000000 --- a/tools/taos-tools/src/wsdump.c +++ /dev/null @@ -1,1939 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the MIT license as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - */ - -#define _GNU_SOURCE - -#ifdef WEBSOCKET - -#include "dump.h" -#include "dumpUtil.h" - -int cleanIfQueryFailedWS(const char *funcname, int lineno, char *command, WS_RES *res) { - errorPrint("%s() LN%d, failed to run command <%s>. code: 0x%08x, reason: %s\n", funcname, lineno, command, - ws_errno(res), ws_errstr(res)); - ws_free_result(res); - free(command); - return -1; -} - -int getTableRecordInfoImplWS(char *dbName, char *table, TableRecordInfo *pTableRecordInfo, bool tryStable) { - WS_TAOS *ws_taos = NULL; - WS_RES *ws_res; - int32_t ws_code = -1; - - if (NULL == (ws_taos = wsConnect())) { - return -1; - } - memset(pTableRecordInfo, 0, sizeof(TableRecordInfo)); - - char *command = calloc(1, TSDB_MAX_ALLOWED_SQL_LEN); - if (NULL == command) { - errorPrint("%s() LN%d, memory allocation failed\n", __func__, __LINE__); - return -1; - } - - snprintf(command, TSDB_MAX_ALLOWED_SQL_LEN, g_args.db_escape_char ? "USE `%s`" : "USE %s", dbName); - ws_res = wsQuery(&ws_taos, command, &ws_code); - if (ws_code != 0) { - errorPrint("Invalid database %s, reason: %s\n", dbName, ws_errstr(ws_res)); - ws_free_result(ws_res); - ws_res = NULL; - free(command); - return 0; - } - ws_free_result(ws_res); - - if (3 == g_majorVersionOfClient) { - if (tryStable) { - snprintf(command, TSDB_MAX_ALLOWED_SQL_LEN, - "SELECT STABLE_NAME FROM information_schema.ins_stables " - "WHERE db_name='%s' AND stable_name='%s'", - dbName, table); - } else { - snprintf(command, TSDB_MAX_ALLOWED_SQL_LEN, - "SELECT TABLE_NAME,STABLE_NAME FROM " - "information_schema.ins_tables " - "WHERE db_name='%s' AND table_name='%s'", - dbName, table); - } - } else { - if (tryStable) { - snprintf(command, TSDB_MAX_ALLOWED_SQL_LEN, "SHOW STABLES LIKE \'%s\'", table); - } else { - snprintf(command, TSDB_MAX_ALLOWED_SQL_LEN, "SHOW TABLES LIKE \'%s\'", table); - } - } - - ws_res = wsQuery(&ws_taos, command, &ws_code); - - if (ws_code != 0) { - cleanIfQueryFailedWS(__func__, __LINE__, command, ws_res); - ws_close(ws_taos); - return -1; - } - - bool isSet = false; - - while (true) { - int rows = 0; - const void *data = NULL; - ws_code = ws_fetch_raw_block(ws_res, &data, &rows); - if (ws_code) { - errorPrint("%s() LN%d, ws_fetch_raw_block() error. reason: %s!\n", __func__, __LINE__, ws_errstr(ws_res)); - ws_free_result(ws_res); - ws_res = NULL; - ws_close(ws_taos); - ws_taos = NULL; - free(command); - return 0; - } - - if (0 == rows) { - break; - } - - uint8_t type; - uint32_t length; - char buffer[TSDB_TABLE_NAME_LEN] = {0}; - - for (int row = 0; row < rows; row++) { - const void *value0 = ws_get_value_in_block(ws_res, row, TSDB_SHOW_DB_NAME_INDEX, &type, &length); - if (NULL == value0) { - errorPrint( - "%s() LN%d, row: %d, col: %d, " - "ws_get_value_in_block() error!\n", - __func__, __LINE__, row, TSDB_SHOW_DB_NAME_INDEX); - continue; - } - - memset(buffer, 0, TSDB_TABLE_NAME_LEN); - memcpy(buffer, value0, length); - - if (0 == strcmp(buffer, table)) { - if (tryStable) { - pTableRecordInfo->isStb = true; - tstrncpy(pTableRecordInfo->tableRecord.stable, buffer, min(TSDB_TABLE_NAME_LEN, length + 1)); - isSet = true; - } else { - pTableRecordInfo->isStb = false; - tstrncpy(pTableRecordInfo->tableRecord.name, buffer, min(TSDB_TABLE_NAME_LEN, length + 1)); - const void *value1 = NULL; - if (3 == g_majorVersionOfClient) { - value1 = ws_get_value_in_block(ws_res, row, 1, &type, &length); - } else { - value1 = ws_get_value_in_block(ws_res, row, TSDB_SHOW_TABLES_METRIC_INDEX, &type, &length); - } - if (length) { - if (NULL == value1) { - errorPrint( - "%s() LN%d, row: %d, col: %d, " - "ws_get_value_in_block() error!\n", - __func__, __LINE__, row, TSDB_SHOW_TABLES_METRIC_INDEX); - break; - } - - pTableRecordInfo->belongStb = true; - memset(buffer, 0, TSDB_TABLE_NAME_LEN); - memcpy(buffer, value1, length); - tstrncpy(pTableRecordInfo->tableRecord.stable, buffer, min(TSDB_TABLE_NAME_LEN, length + 1)); - } else { - pTableRecordInfo->belongStb = false; - } - isSet = true; - break; - } - } - } - - if (isSet) { - break; - } - } - - ws_free_result(ws_res); - ws_res = NULL; - ws_close(ws_taos); - ws_taos = NULL; - - free(command); - - if (isSet) { - return 0; - } - return -1; -} - -int getTableRecordInfoWS(char *dbName, char *table, TableRecordInfo *pTableRecordInfo) { - if (0 == getTableRecordInfoImplWS(dbName, table, pTableRecordInfo, false)) { - return 0; - } else if (0 == getTableRecordInfoImplWS(dbName, table, pTableRecordInfo, true)) { - return 0; - } - - errorPrint("Invalid table/stable %s\n", table); - return -1; -} - -int getDbCountWS(WS_RES *ws_res) { - int count = 0; - int32_t code; - - while (true) { - int rows = 0; - const void *data = NULL; - code = ws_fetch_raw_block(ws_res, &data, &rows); - if (code) { - errorPrint("%s() LN%d, ws_fetch_raw_block() error. reason: %s!\n", __func__, __LINE__, ws_errstr(ws_res)); - return 0; - } - - if (0 == rows) { - break; - } - - uint8_t type; - uint32_t length; - char buffer[VALUE_BUF_LEN] = {0}; - - for (int row = 0; row < rows; row++) { - const void *value = ws_get_value_in_block(ws_res, row, TSDB_SHOW_DB_NAME_INDEX, &type, &length); - if (NULL == value) { - errorPrint( - "%s() LN%d, row: %d, " - "ws_get_value_in_block() error!\n", - __func__, __LINE__, row); - continue; - } - - memset(buffer, 0, VALUE_BUF_LEN); - memcpy(buffer, value, length); - debugPrint("%s() LN%d, dbname: %s\n", __func__, __LINE__, buffer); - - if (isSystemDatabase(buffer)) { - if (!g_args.allow_sys) { - continue; - } - } else if (g_args.databases) { // input multi dbs - if (inDatabasesSeq(buffer) != 0) { - continue; - } - } else if (!g_args.all_databases) { // only input one db - if (strcmp(g_args.arg_list[0], buffer)) { - continue; - } - } - count++; - } - } - - return count; -} - -int64_t getNtbCountOfStbWS(char *dbName, const char *stbName) { - WS_TAOS *ws_taos; - if (NULL == (ws_taos = wsConnect())) { - return -1; - } - - int64_t count = 0; - char *command = calloc(1, TSDB_MAX_ALLOWED_SQL_LEN); - if (NULL == command) { - errorPrint("%s() LN%d, memory allocation failed\n", __func__, __LINE__); - return -1; - } - if (3 == g_majorVersionOfClient) { - snprintf(command, TSDB_MAX_ALLOWED_SQL_LEN, - g_args.db_escape_char ? "SELECT COUNT(*) FROM (SELECT DISTINCT(TBNAME) " - "FROM `%s`.%s%s%s)" - : "SELECT COUNT(*) FROM (SELECT DISTINCT(TBNAME) " - "FROM %s.%s%s%s)", - dbName, g_escapeChar, stbName, g_escapeChar); - } else { - snprintf( - command, TSDB_MAX_ALLOWED_SQL_LEN, - g_args.db_escape_char ? "SELECT COUNT(TBNAME) FROM `%s`.%s%s%s" : "SELECT COUNT(TBNAME) FROM %s.%s%s%s", - dbName, g_escapeChar, stbName, g_escapeChar); - } - debugPrint("get stable child count %s", command); - - int32_t ws_code = -1; - WS_RES *ws_res = wsQuery(&ws_taos, command, &ws_code); - if (ws_code) { - return cleanIfQueryFailedWS(__func__, __LINE__, command, ws_res); - } - tfree(command); - - while (true) { - int rows = 0; - const void *data = NULL; - ws_code = ws_fetch_raw_block(ws_res, &data, &rows); - if (0 == rows) { - debugPrint( - "%s() LN%d, No more data from ws_fetch_raw_block(), " - "ws_taos: %p, code: 0x%08x, reason:%s\n", - __func__, __LINE__, ws_taos, ws_errno(ws_res), ws_errstr(ws_res)); - break; - } - - uint8_t type; - uint32_t len; - - for (int row = 0; row < rows; row++) { - const void *value = ws_get_value_in_block(ws_res, row, TSDB_SHOW_TABLES_NAME_INDEX, &type, &len); - if (0 == len) { - errorPrint( - "%s() LN%d, row: %d, col: %d, " - "ws_get_value_in_block() error!\n", - __func__, __LINE__, TSDB_DESCRIBE_METRIC_FIELD_INDEX, row); - continue; - } - count = *(int64_t *)value; - } - break; - } - debugPrint("%s() LN%d, COUNT(TBNAME): %" PRId64 "\n", __func__, __LINE__, count); - - ws_free_result(ws_res); - ws_close(ws_taos); - return count; -} - -int getTableTagValueWSV3(WS_TAOS **taos_v, const char *dbName, const char *table, TableDes **ppTableDes) { - TableDes *tableDes = *ppTableDes; - char *command = calloc(1, TSDB_MAX_ALLOWED_SQL_LEN); - if (NULL == command) { - errorPrint("%s() LN%d, memory allocation failed\n", __func__, __LINE__); - return -1; - } - - snprintf(command, TSDB_MAX_ALLOWED_SQL_LEN, - "SELECT tag_name,tag_value FROM information_schema.ins_tags " - "WHERE db_name = '%s' AND table_name = '%s'", - dbName, table); - - int32_t ws_code = -1; - WS_RES *ws_res = wsQuery(taos_v, command, &ws_code); - if (ws_code) { - return cleanIfQueryFailedWS(__func__, __LINE__, command, ws_res); - } - - while (true) { - int rows = 0; - const void *data = NULL; - ws_code = ws_fetch_raw_block(ws_res, &data, &rows); - - if (ws_code) { - errorPrint( - "%s() LN%d, ws_fetch_raw_block() error, " - "code: 0x%08x, command: %s, reason: %s\n", - __func__, __LINE__, ws_code, command, ws_errstr(ws_res)); - } - if (0 == rows) { - debugPrint( - "%s() LN%d, No more data from fetch to run " - "command <%s>, " - "ws_taos: %p, code: 0x%08x, reason:%s\n", - __func__, __LINE__, command, taos_v, ws_errno(ws_res), ws_errstr(ws_res)); - break; - } - - uint8_t type; - uint32_t len; - int index = tableDes->columns; - - for (int row = 0; row < rows; row++) { - const void *value1 = ws_get_value_in_block(ws_res, row, 1, &type, &len); - - debugPrint("%s() LN%d, len=%d\n", __func__, __LINE__, len); - - if (NULL == value1) { - strcpy(tableDes->cols[index].value, "NULL"); - strcpy(tableDes->cols[index].note, "NUL"); - } else if (0 != processFieldsValueV3(index, tableDes, value1, len)) { - errorPrint("%s() LN%d, processFieldsValueV3 tag_value: %p\n", __func__, __LINE__, value1); - ws_free_result(ws_res); - free(command); - return -1; - } - index++; - } - } - - ws_free_result(ws_res); - free(command); - - return (tableDes->columns + tableDes->tags); -} - -int getTableTagValueWSV2(WS_TAOS **taos_v, const char *dbName, const char *table, TableDes **ppTableDes) { - TableDes *tableDes = *ppTableDes; - char *command = calloc(1, TSDB_MAX_ALLOWED_SQL_LEN); - if (NULL == command) { - errorPrint("%s() LN%d, memory allocation failed\n", __func__, __LINE__); - return -1; - } - char *sqlstr = command; - - sqlstr += snprintf(sqlstr, TSDB_MAX_ALLOWED_SQL_LEN, "SELECT %s%s%s", g_escapeChar, - tableDes->cols[tableDes->columns].field, g_escapeChar); - for (int i = tableDes->columns + 1; i < (tableDes->columns + tableDes->tags); i++) { - sqlstr += sprintf(sqlstr, ",%s%s%s ", g_escapeChar, tableDes->cols[i].field, g_escapeChar); - } - sqlstr += sprintf(sqlstr, g_args.db_escape_char ? " FROM `%s`.%s%s%s LIMIT 1" : " FROM %s.%s%s%s LIMIT 1", dbName, - g_escapeChar, table, g_escapeChar); - - int32_t ws_code = -1; - int32_t retryCount = 0; - WS_RES *ws_res = NULL; - - RETRY_QUERY: - ws_res = wsQuery(taos_v, command, &ws_code); - if (ws_code) { - return cleanIfQueryFailedWS(__func__, __LINE__, command, ws_res); - } - - while (true) { - int rows = 0; - const void *data = NULL; - ws_code = wsFetchBlock(ws_res, &data, &rows); - - if (ws_code) { - // output error - errorPrint( - "%s() LN%d, getTableTagValueWSV2-> wsFetchBlock() error, " - "code: 0x%08x, sqlstr: %s, reason: %s\n", - __func__, __LINE__, ws_code, sqlstr, ws_errstr(ws_res)); - - // check can retry - if(canRetry(ws_code, RETRY_TYPE_FETCH) && ++retryCount <= g_args.retryCount) { - infoPrint("wsFetchBlock failed, goto wsQuery to retry %d\n", retryCount); - ws_free_result(ws_res); - ws_res = NULL; - toolsMsleep(g_args.retrySleepMs); - goto RETRY_QUERY; - } - - // error break while - break; - } - if (0 == rows) { - debugPrint( - "%s() LN%d, No more data from fetch to run " - "command <%s>, " - "ws_taos: %p, code: 0x%08x, reason:%s\n", - __func__, __LINE__, sqlstr, *taos_v, ws_errno(ws_res), ws_errstr(ws_res)); - break; - } - - uint8_t type; - uint32_t len; - for (int row = 0; row < rows; row++) { - for (int j = tableDes->columns; j < (tableDes->columns + tableDes->tags); j++) { - const void *value = ws_get_value_in_block(ws_res, row, j - tableDes->columns, &type, &len); - - debugPrint("%s() LN%d, len=%d\n", __func__, __LINE__, len); - - if (NULL == value) { - strcpy(tableDes->cols[j].value, "NULL"); - strcpy(tableDes->cols[j].note, "NUL"); - } else if (0 != processFieldsValueV2(j, tableDes, value, len)) { - errorPrint("%s() LN%d, processFieldsValueV2 value0: %p\n", __func__, __LINE__, value); - ws_free_result(ws_res); - free(command); - return -1; - } - } - } - } - - ws_free_result(ws_res); - free(command); - - return (tableDes->columns + tableDes->tags); -} - -int getTableTagValueWS(void **taos_v, const char *dbName, const char *table, TableDes **ppTableDes) { - int ret = -1; - if (3 == g_majorVersionOfClient) { - // if child-table have tag, V3 using select tag_value - // from information_schema.ins_tag where table to get tagValue - ret = getTableTagValueWSV2(taos_v, dbName, table, ppTableDes); - if (ret < 0) { - ret = getTableTagValueWSV3(taos_v, dbName, table, ppTableDes); - } - } else if (2 == g_majorVersionOfClient) { - // if child-table have tag, - // using select tagName from table to get tagValue - ret = getTableTagValueWSV2(taos_v, dbName, table, ppTableDes); - } else { - errorPrint("%s() LN%d, major version %d is not supported\n", __func__, __LINE__, g_majorVersionOfClient); - } - - return ret; -} - -int getTableDesFromStbWS(WS_TAOS **taos_v, const char *dbName, const TableDes *stbTableDes, const char *table, - TableDes **ppTableDes) { - constructTableDesFromStb(stbTableDes, table, ppTableDes); - return getTableTagValueWS(taos_v, dbName, table, ppTableDes); -} - -int getTableDesWS(WS_TAOS **taos_v, const char *dbName, const char *table, TableDes *tableDes, const bool colOnly) { - int colCount = 0; - char *command = calloc(1, TSDB_MAX_ALLOWED_SQL_LEN); - if (NULL == command) { - errorPrint("%s() LN%d, memory allocation failed\n", __func__, __LINE__); - return -1; - } - - snprintf(command, TSDB_MAX_ALLOWED_SQL_LEN, g_args.db_escape_char ? "DESCRIBE `%s`.%s%s%s" : "DESCRIBE %s.%s%s%s", - dbName, g_escapeChar, table, g_escapeChar); - - int32_t ws_code = -1; - WS_RES *ws_res = wsQuery(taos_v, command, &ws_code); - if (ws_code) { - return cleanIfQueryFailedWS(__func__, __LINE__, command, ws_res); - } else { - debugPrint("%s() LN%d, run command <%s> success, ws_taos: %p\n", __func__, __LINE__, command, *taos_v); - } - - tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN); - while (true) { - int rows = 0; - const void *data = NULL; - ws_code = ws_fetch_raw_block(ws_res, &data, &rows); - if (0 == rows) { - debugPrint( - "%s() LN%d, No more data from ws_fetch_raw_block(), " - "ws_taos: %p, code: 0x%08x, reason:%s\n", - __func__, __LINE__, *taos_v, ws_errno(ws_res), ws_errstr(ws_res)); - break; - } - - uint8_t type; - uint32_t len; - char buffer[VALUE_BUF_LEN] = {0}; - const void *value = NULL; - - for (int row = 0; row < rows; row++) { - value = ws_get_value_in_block(ws_res, row, TSDB_DESCRIBE_METRIC_FIELD_INDEX, &type, &len); - if (NULL == value) { - errorPrint( - "%s() LN%d, row: %d, col: %d, " - "ws_get_value_in_block() error!\n", - __func__, __LINE__, TSDB_DESCRIBE_METRIC_FIELD_INDEX, row); - continue; - } - memset(buffer, 0, VALUE_BUF_LEN); - memcpy(buffer, value, len); - strncpy(tableDes->cols[colCount].field, buffer, len); - - value = ws_get_value_in_block(ws_res, row, TSDB_DESCRIBE_METRIC_TYPE_INDEX, &type, &len); - if (NULL == value) { - errorPrint( - "%s() LN%d, row: %d, col: %d, " - "ws_get_value_in_block() error!\n", - __func__, __LINE__, TSDB_DESCRIBE_METRIC_TYPE_INDEX, row); - continue; - } - memset(buffer, 0, VALUE_BUF_LEN); - memcpy(buffer, value, len); - tableDes->cols[colCount].type = typeStrToType(buffer); - - value = ws_get_value_in_block(ws_res, row, TSDB_DESCRIBE_METRIC_LENGTH_INDEX, &type, &len); - if (NULL == value) { - errorPrint("row: %d, col: %d, ws_get_value_in_block() error!\n", TSDB_DESCRIBE_METRIC_LENGTH_INDEX, - row); - continue; - } - tableDes->cols[colCount].length = *((int *)value); - - value = ws_get_value_in_block(ws_res, row, TSDB_DESCRIBE_METRIC_NOTE_INDEX, &type, &len); - if (NULL == value) { - errorPrint("row: %d, col: %d, ws_get_value_in_block() error!\n", TSDB_DESCRIBE_METRIC_NOTE_INDEX, row); - continue; - } - memset(buffer, 0, VALUE_BUF_LEN); - memcpy(buffer, value, len); - - debugPrint("%s() LN%d, buffer: %s\n", __func__, __LINE__, buffer); - - strncpy(tableDes->cols[colCount].note, buffer, len); - if (strcmp(tableDes->cols[colCount].note, "TAG") != 0) { - tableDes->columns++; - } else { - tableDes->tags++; - } - colCount++; - } - } - - ws_free_result(ws_res); - ws_res = NULL; - free(command); - - if (colOnly) { - return colCount; - } - - return getTableTagValueWS(taos_v, dbName, table, &tableDes); -} - -int64_t queryDbForDumpOutCountWS(char *command, WS_TAOS **taos_v, const char *dbName, const char *tbName, - const int precision) { - int64_t count = -1; - int32_t ws_code = -1; - WS_RES *ws_res = wsQuery(taos_v, command, &ws_code); - if (ws_code != 0) { - return cleanIfQueryFailedWS(__func__, __LINE__, command, ws_res); - } - - while (true) { - int rows = 0; - const void *data = NULL; - ws_code = ws_fetch_raw_block(ws_res, &data, &rows); - if (0 == rows) { - debugPrint( - "%s() LN%d, No more data from ws_fetch_raw_block(), " - "ws_taos: %p, code: 0x%08x, reason:%s\n", - __func__, __LINE__, *taos_v, ws_errno(ws_res), ws_errstr(ws_res)); - break; - } - - uint8_t type; - uint32_t len; - - for (int row = 0; row < rows; row++) { - const void *value0 = ws_get_value_in_block(ws_res, row, TSDB_SHOW_TABLES_NAME_INDEX, &type, &len); - if (NULL == value0) { - if (0 == ws_errno(ws_res)) { - count = 0; - debugPrint("%s fetch row, count: %" PRId64 "\n", command, count); - } else { - count = -1; - errorPrint( - "failed run %s to fetch row, ws_taos: %p, " - "code: 0x%08x, reason: %s\n", - command, *taos_v, ws_errno(ws_res), ws_errstr(ws_res)); - } - } else { - count = *(int64_t *)value0; - debugPrint("%s fetch row, count: %" PRId64 "\n", command, count); - break; - } - } - } - - ws_free_result(ws_res); - free(command); - return count; -} - -TAOS_RES *queryDbForDumpOutOffsetWS(WS_TAOS **taos_v, char *command) { - int32_t ws_code = -1; - WS_RES *ws_res = wsQuery(taos_v, command, &ws_code); - if (ws_code) { - cleanIfQueryFailedWS(__func__, __LINE__, command, ws_res); - return NULL; - } - free(command); - return ws_res; -} - -int64_t writeResultToAvroWS(const char *avroFilename, const char *dbName, const char *tbName, char *jsonSchema, - WS_TAOS **taos_v, int precision, int64_t start_time, int64_t end_time) { - int64_t queryCount = queryDbForDumpOutCount(taos_v, dbName, tbName, precision); - if (queryCount <= 0) { - return 0; - } - - avro_schema_t schema; - RecordSchema *recordSchema; - avro_file_writer_t db; - - avro_value_iface_t *wface = prepareAvroWface(avroFilename, jsonSchema, &schema, &recordSchema, &db); - - int64_t success = 0; - int64_t failed = 0; - - bool printDot = true; - - int currentPercent = 0; - int percentComplete = 0; - - int64_t limit = g_args.data_batch; - int64_t offset = 0; - - do { - if (queryCount > limit) { - if (limit < (queryCount - offset)) { - limit = queryCount - offset; - } - } else { - limit = queryCount; - } - - WS_RES *ws_res = NULL; - int numFields = 0; - void *ws_fields = NULL; - int32_t countInBatch = 0; - int32_t retryCount = 0; - -RETRY_QUERY: - countInBatch = 0; - ws_res = queryDbForDumpOutOffset(taos_v, dbName, tbName, precision, start_time, end_time, limit, offset); - if (NULL == ws_res) { - break; - } - - numFields = ws_field_count(ws_res); - if (3 == g_majorVersionOfClient) { - const struct WS_FIELD *ws_fields_v3 = ws_fetch_fields(ws_res); - ws_fields = (void *)ws_fields_v3; - } else { - const struct WS_FIELD_V2 *ws_fields_v2 = ws_fetch_fields_v2(ws_res); - ws_fields = (void *)ws_fields_v2; - } - - while (true) { - int rows = 0; - const void *data = NULL; - int32_t ws_code = wsFetchBlock(ws_res, &data, &rows); - - if (ws_code) { - errorPrint( - "%s() LN%d, writeResultToAvroWS->wsFetchBlock() error, ws_taos: %p, " - "code: 0x%08x, reason: %s\n", - __func__, __LINE__, *taos_v, ws_code, ws_errstr(ws_res)); - - // check can retry - if(canRetry(ws_code, RETRY_TYPE_FETCH) && ++retryCount <= g_args.retryCount) { - infoPrint("wsFetchBlock failed, goto wsQuery to retry %d limit=%"PRId64" offset=%"PRId64" queryCount=%"PRId64" \n", - retryCount, limit, offset, queryCount); - // need close old res - ws_free_result(ws_res); - ws_res = NULL; - toolsMsleep(g_args.retrySleepMs); - goto RETRY_QUERY; - } - - // break - break; - } - - if (0 == rows) { - debugPrint( - "%s() LN%d, No more data from wsFetchBlock(), " - "ws_taos: %p, code: 0x%08x, reason:%s\n", - __func__, __LINE__, *taos_v, ws_errno(ws_res), ws_errstr(ws_res)); - break; - } - - for (int row = 0; row < rows; row++) { - avro_value_t record; - avro_generic_value_new(wface, &record); - - avro_value_t avro_value, branch; - - if (!g_args.loose_mode) { - if (0 != avro_value_get_by_name(&record, "tbname", &avro_value, NULL)) { - errorPrint( - "%s() LN%d, avro_value_get_by_name(tbname) " - "failed\n", - __func__, __LINE__); - break; - } - avro_value_set_branch(&avro_value, 1, &branch); - avro_value_set_string(&branch, tbName); - } - - for (int32_t f = 0; f < numFields; f++) { - uint8_t type; - uint32_t len; - - const void *value = ws_get_value_in_block(ws_res, row, f, &type, &len); - - if (3 == g_majorVersionOfClient) { - struct WS_FIELD *ws_fields_3 = (struct WS_FIELD *)ws_fields; - processValueToAvro(f, record, avro_value, branch, ws_fields_3[f].name, ws_fields_3[f].type, - ws_fields_3[f].bytes, value, len); - } else { - struct WS_FIELD_V2 *ws_fields_2 = (struct WS_FIELD_V2 *)ws_fields; - processValueToAvro(f, record, avro_value, branch, ws_fields_2[f].name, ws_fields_2[f].type, - ws_fields_2[f].bytes, value, len); - } - } - - if (0 != avro_file_writer_append_value(db, &record)) { - errorPrint( - "%s() LN%d, " - "Unable to write record to file. Message: %s\n", - __func__, __LINE__, avro_strerror()); - failed--; - } else { - success++; - } - - countInBatch++; - avro_value_decref(&record); - } - } - - if (countInBatch != limit) { - errorPrint("%s() LN%d, actual dump out: %d, batch %" PRId64 "\n", __func__, __LINE__, countInBatch, limit); - } - ws_free_result(ws_res); - ws_res = NULL; - printDotOrX(offset, &printDot); - offset += limit; - - currentPercent = ((offset) * 100 / queryCount); - if (currentPercent > percentComplete) { - // infoPrint("%d%% of %s\n", currentPercent, tbName); - percentComplete = currentPercent; - } - } while (offset < queryCount); - - if (percentComplete < 100) { - errorPrint("%d%% of %s\n", percentComplete, tbName); - } - - avro_value_iface_decref(wface); - freeRecordSchema(recordSchema); - avro_file_writer_close(db); - avro_schema_decref(schema); - - return success; -} - -int64_t writeResultDebugWS(WS_RES *ws_res, FILE *fp, const char *dbName, const char *tbName) { - int64_t totalRows = 0; - - int32_t sql_buf_len = g_args.max_sql_len; - char *tmpBuffer = (char *)calloc(1, sql_buf_len + 128); - if (NULL == tmpBuffer) { - errorPrint("%s() LN%d, memory allocation failed!\n", __func__, __LINE__); - return 0; - } - - char *pstr = tmpBuffer; - - int64_t lastRowsPrint = 5000000; - int count = 0; - - int fieldCount = ws_field_count(ws_res); - ASSERT(fieldCount > 0); - - void *ws_fields = NULL; - if (3 == g_majorVersionOfClient) { - const struct WS_FIELD *ws_fields_v3 = ws_fetch_fields(ws_res); - ws_fields = (void *)ws_fields_v3; - } else { - const struct WS_FIELD_V2 *ws_fields_v2 = ws_fetch_fields_v2(ws_res); - ws_fields = (void *)ws_fields_v2; - } - - int32_t total_sqlstr_len = 0; - - while (true) { - int rows = 0; - const void *data = NULL; - int32_t ws_code = ws_fetch_raw_block(ws_res, &data, &rows); - - if (ws_code) { - errorPrint( - "%s() LN%d, ws_fetch_raw_block() error!" - " code: 0x%08x, reason: %s\n", - __func__, __LINE__, ws_code, ws_errstr(ws_res)); - break; - } - if (0 == rows) { - debugPrint( - "%s() LN%d, No more data from ws_fetch_raw_block(), " - "code: 0x%08x, reason:%s\n", - __func__, __LINE__, ws_errno(ws_res), ws_errstr(ws_res)); - break; - } - - for (int row = 0; row < rows; row++) { - int32_t curr_sqlstr_len = 0; - - if (count == 0) { - total_sqlstr_len = 0; - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "INSERT INTO %s.%s VALUES (", dbName, tbName); - } else { - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "("); - } - - for (int f = 0; f < fieldCount; f++) { - if (f != 0) { - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", "); - } - uint8_t type; - uint32_t len; - - const void *value = ws_get_value_in_block(ws_res, row, f, &type, &len); - if (NULL == value) { - errorPrint("row: %d, ws_get_value_in_block() error!\n", row); - continue; - } - - if (3 == g_majorVersionOfClient) { - struct WS_FIELD *ws_fields_3 = (struct WS_FIELD *)ws_fields; - curr_sqlstr_len += processResultValue(pstr, curr_sqlstr_len, ws_fields_3[f].type, value, len); - } else { - struct WS_FIELD_V2 *ws_fields_2 = (struct WS_FIELD_V2 *)ws_fields; - curr_sqlstr_len += processResultValue(pstr, curr_sqlstr_len, ws_fields_2[f].type, value, len); - } - } - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ")"); - - totalRows++; - count++; - fprintf(fp, "%s", tmpBuffer); - - if (totalRows >= lastRowsPrint) { - infoPrint(" %" PRId64 " rows already be dump-out from %s.%s\n", totalRows, dbName, tbName); - lastRowsPrint += 5000000; - } - - total_sqlstr_len += curr_sqlstr_len; - - if ((count >= g_args.data_batch) || (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) { - fprintf(fp, ";\n"); - count = 0; - } - } - } - - debugPrint("total_sqlstr_len: %d\n", total_sqlstr_len); - - fprintf(fp, "\n"); - free(tmpBuffer); - - return totalRows; -} - -WS_RES *queryDbForDumpOutWS(WS_TAOS **taos_v, const char *dbName, const char *tbName, const int precision, - const int64_t start_time, const int64_t end_time) { - char *command = calloc(1, TSDB_MAX_ALLOWED_SQL_LEN); - if (NULL == command) { - errorPrint("%s() LN%d, memory allocation failed\n", __func__, __LINE__); - return NULL; - } - - snprintf(command, TSDB_MAX_ALLOWED_SQL_LEN, - g_args.db_escape_char ? "SELECT * FROM `%s`.%s%s%s WHERE _c0 >= %" PRId64 - " " - "AND _c0 <= %" PRId64 " ORDER BY _c0 ASC;" - : "SELECT * FROM %s.%s%s%s WHERE _c0 >= %" PRId64 - " " - "AND _c0 <= %" PRId64 " ORDER BY _c0 ASC;", - dbName, g_escapeChar, tbName, g_escapeChar, start_time, end_time); - - int32_t ws_code = -1; - WS_RES *ws_res = wsQuery(taos_v, command, &ws_code); - if (ws_code != 0) { - cleanIfQueryFailedWS(__func__, __LINE__, command, ws_res); - return NULL; - } - - free(command); - return ws_res; -} - -int64_t dumpTableDataAvroWS(char *dataFilename, int64_t index, const char *tbName, const bool belongStb, - const char *dbName, const int precision, int colCount, TableDes *tableDes, - int64_t start_time, int64_t end_time) { - WS_TAOS *ws_taos; - if (NULL == (ws_taos = wsConnect())) { - return -1; - } - - char *jsonSchema = NULL; - if (0 != convertTbDesToJsonWrap(dbName, tbName, tableDes, colCount, &jsonSchema)) { - errorPrint("%s() LN%d, convertTbDesToJsonWrap failed\n", __func__, __LINE__); - ws_close(ws_taos); - return -1; - } - - int64_t totalRows = - writeResultToAvroWS(dataFilename, dbName, tbName, jsonSchema, &ws_taos, precision, start_time, end_time); - - ws_close(ws_taos); - ws_taos = NULL; - tfree(jsonSchema); - - return totalRows; -} - -int64_t fillTbNameArrWS(WS_TAOS **taos_v, char *command, char **tbNameArr, const char *stable, const int64_t preCount) { - int32_t ws_code = -1; - WS_RES *ws_res = wsQuery(taos_v, command, &ws_code); - if (ws_code) { - return cleanIfQueryFailedWS(__func__, __LINE__, command, ws_res); - } - - int currentPercent = 0; - int percentComplete = 0; - - int64_t ntbCount = 0; - while (true) { - int rows = 0; - const void *data = NULL; - ws_code = ws_fetch_raw_block(ws_res, &data, &rows); - - if (0 == rows) { - debugPrint( - "%s() LN%d, No more data from ws_fetch_raw_block(), " - "ws_taos: %p, code: 0x%08x, reason:%s\n", - __func__, __LINE__, *taos_v, ws_errno(ws_res), ws_errstr(ws_res)); - break; - } - - uint8_t type; - uint32_t len; - - for (int row = 0; row < rows; row++) { - const void *value0 = ws_get_value_in_block(ws_res, row, TSDB_SHOW_TABLES_NAME_INDEX, &type, &len); - if (NULL == value0) { - errorPrint( - "%s() LN%d, ws_get_value_in_blocK() return NULL." - " code: 0x%08x, reason: %s!\n", - __func__, __LINE__, ws_errno(ws_res), ws_errstr(ws_res)); - continue; - } else { - debugPrint("%s() LN%d, ws_get_value_in_blocK() return %s. len: %d\n", __func__, __LINE__, - (char *)value0, len); - } - - tbNameArr[ntbCount] = calloc(len + 1, 1); - strncpy(tbNameArr[ntbCount], (char *)value0, len); - - debugPrint("%s() LN%d, sub table name: %s %" PRId64 " of stable: %s\n", __func__, __LINE__, - tbNameArr[ntbCount], ntbCount, stable); - ++ntbCount; - - currentPercent = (ntbCount * 100 / preCount); - - if (currentPercent > percentComplete) { - infoPrint("connection %p fetched %d%% of %s' tbname\n", *taos_v, currentPercent, stable); - percentComplete = currentPercent; - } - } - } - - if ((preCount > 0) && (percentComplete < 100)) { - errorPrint("%d%% - total %" PRId64 " sub-table's names of stable: %s fetched\n", percentComplete, ntbCount, - stable); - } else { - okPrint("total %" PRId64 " sub-table's name of stable: %s fetched\n", ntbCount, stable); - } - - ws_free_result(ws_res); - free(command); - return ntbCount; -} - -int readNextTableDesWS(void* ws_res, TableDes* tbDes, int *idx, int *cnt) { - // tbname, tagName , tagValue - int index = 0; - uint8_t type = 0; - uint32_t len = 0; - while( index < tbDes->tags) { - // get block - if(*idx >= *cnt || *cnt == 0) { - const void *data = NULL; - int ws_code = ws_fetch_raw_block(ws_res, &data, cnt); - if (ws_code !=0 ) { - // read to end - errorPrint("read next ws_fetch_raw_block failed, err code=%d idx=%d index=%d\n", ws_code, *idx, index); - return -1; - } - - if(*cnt == 0) { - infoPrint("read schema over. tag columns %d.\n", tbDes->tags); - break; - } - *idx = 0; - } - - // read first column tbname - const void *val = ws_get_value_in_block(ws_res, *idx, 0, &type, &len); - if(val == NULL) { - errorPrint("read tbname failed, idx=%d cnt=%d \n", *idx, *cnt); - return -1; - } - - // tbname changed check - if(tbDes->name[0] == 0) { - // first set tbName - strncpy(tbDes->name, val, len); - } else { - // compare tbname change - if(!(strncmp(tbDes->name, val, len) == 0 - && tbDes->name[len] == 0)) { - // tbname cnanged, break - break; - } - } - - // read third column tagvalue - val = ws_get_value_in_block(ws_res, *idx, 2, &type, &len); - // copy tagvalue - if (NULL == val) { - strcpy(tbDes->cols[index].value, "NULL"); - strcpy(tbDes->cols[index].note , "NUL"); - } else if (0 != processFieldsValueV3(index, tbDes, val, len)) { - errorPrint("%s() LN%d, call processFieldsValueV3 tag_value: %p\n", - __func__, __LINE__, val); - return -1; - } - - // move next row - *idx = *idx + 1; - // counter ++ - index++; - } - - // check tags count corrent - if(*cnt && index != tbDes->tags) { - errorPrint("child table %s read tags(%d) not equal stable tags (%d).\n", - tbDes->name, index, tbDes->tags); - return -1; - } - - return index; -} - -// read specail line, col -int32_t readRowWS(void *res, int32_t idx, int32_t col, uint32_t *len, char **data) { - int32_t i = 0; - while (i <= idx) { - // fetch block - const void *block = NULL; - int32_t cnt = 0; - int ws_code = ws_fetch_raw_block(res, &block, &cnt); - if (ws_code != 0) { - errorPrint("readRow->ws_fetch_raw_block failed, err code=%d i=%d\n", ws_code, i); - return -1; - } - - // cnt check - if (cnt == 0) { - infoPrint("ws_fetch_raw_block read cnt zero. i=%d.\n", i); - return -1; - } - - // check idx - if (i + cnt <= idx) { - // move next block - i += cnt; - continue; - } - - // set - uint8_t type = 0; - const void *val = ws_get_value_in_block(res, idx, col, &type, len); - if (val == NULL) { - errorPrint("readRow ws_get_value_in_block failed, cnt=%d idx=%d col=%d \n", cnt, idx, col); - return -1; - } - *data = (char *)val; - break; - } - - return 0; -} - -void dumpExtraInfoVarWS(void **taos_v, FILE *fp) { - char buffer[BUFFER_LEN]; - char *command = calloc(1, TSDB_MAX_ALLOWED_SQL_LEN); - if (NULL == command) { - errorPrint("%s() LN%d, memory allocation failed\n", __func__, __LINE__); - return; - } - strcpy(command, "SHOW VARIABLES"); - - int32_t ws_code = -1; - WS_RES *ws_res = wsQuery(taos_v, command, &ws_code); - - if (0 != ws_code) { - warnPrint( - "%s() LN%d, failed to run command %s, " - "code: 0x%08x, reason: %s. Will use default settings\n", - __func__, __LINE__, command, ws_code, ws_errstr(ws_res)); - fprintf(g_fpOfResult, - "# SHOW VARIABLES failed, " - "code: 0x%08x, reason:%s\n", - ws_errno(ws_res), ws_errstr(ws_res)); - snprintf(buffer, BUFFER_LEN, "#!charset: %s\n", "UTF-8"); - size_t len = fwrite(buffer, 1, strlen(buffer), fp); - if (len != strlen(buffer)) { - errorPrint( - "%s() LN%d, write to file. " - "try to write %zu, actual len %zu, " - "Errno is %d. Reason is %s.\n", - __func__, __LINE__, strlen(buffer), len, errno, strerror(errno)); - } - ws_free_result(ws_res); - ws_res = NULL; - free(command); - return; - } - - while (true) { - int rows = 0; - const void *data = NULL; - ws_code = ws_fetch_raw_block(ws_res, &data, &rows); - - if (0 == rows) { - debugPrint( - "%s() LN%d, No more data from ws_fetch_raw_block(), " - "ws_taos: %p, code: 0x%08x, reason:%s\n", - __func__, __LINE__, *taos_v, ws_errno(ws_res), ws_errstr(ws_res)); - break; - } - - uint8_t type; - uint32_t len; - char tmp[BUFFER_LEN - 12] = {0}; - - for (int row = 0; row < rows; row++) { - const void *value0 = ws_get_value_in_block(ws_res, row, 0, &type, &len); - memset(tmp, 0, BUFFER_LEN - 12); - memcpy(tmp, value0, len); - - verbosePrint("%s() LN%d, value0: %s\n", __func__, __LINE__, tmp); - if (0 == strcmp(tmp, "charset")) { - const void *value1 = ws_get_value_in_block(ws_res, row, 1, &type, &len); - memset(tmp, 0, BUFFER_LEN - 12); - memcpy(tmp, value1, min(BUFFER_LEN - 13, len)); - snprintf(buffer, BUFFER_LEN, "#!charset: %s\n", tmp); - debugPrint("%s() LN%d buffer: %s\n", __func__, __LINE__, buffer); - size_t w_len = fwrite(buffer, 1, strlen(buffer), fp); - if (w_len != strlen(buffer)) { - errorPrint( - "%s() LN%d, write to file. " - "try to write %zu, actual len %zu, " - "Errno is %d. Reason is %s.\n", - __func__, __LINE__, strlen(buffer), w_len, errno, strerror(errno)); - } - } - } - } - - ws_free_result(ws_res); - ws_res = NULL; - free(command); -} - -int queryDbImplWS(WS_TAOS **taos_v, char *command) { - int ret = 0; - WS_RES *ws_res = NULL; - int32_t ws_code = -1; - - ws_res = wsQuery(taos_v, command, &ws_code); - - if (ws_code) { - errorPrint( - "Failed to run <%s>, ws_taos: %p, " - "code: 0x%08x, reason: %s\n", - command, *taos_v, ws_code, ws_errstr(ws_res)); - ret = -1; - ; - } - - ws_free_result(ws_res); - ws_res = NULL; - return ret; -} - -void dumpNormalTablesOfStbWS(threadInfo *pThreadInfo, FILE *fp, char *dumpFilename) { - for (int64_t i = pThreadInfo->from; i < (pThreadInfo->from + pThreadInfo->count); i++) { - char *tbName = pThreadInfo->tbNameArr[i]; - debugPrint("%s() LN%d, [%d] sub table %" PRId64 ": name: %s\n", __func__, __LINE__, pThreadInfo->threadIndex, i, - tbName); - - int64_t count; - if (g_args.avro) { - count = dumpNormalTable(i, &pThreadInfo->taos, pThreadInfo->dbInfo, true, pThreadInfo->stbName, - pThreadInfo->stbDes, tbName, pThreadInfo->precision, dumpFilename, NULL); - } else { - count = dumpNormalTable(i, &pThreadInfo->taos, pThreadInfo->dbInfo, true, pThreadInfo->stbName, - pThreadInfo->stbDes, tbName, pThreadInfo->precision, NULL, fp); - } - - // show progress - atomic_add_fetch_64(&g_tableDone, 1); - infoPrint("%s.%s %" PRId64 "/%" PRId64 " %s dump data ok.\n", g_dbName, g_stbName, g_tableDone, g_tableCount, - tbName); - if (count < 0) { - break; - } else { - atomic_add_fetch_64(&g_totalDumpOutRows, count); - } - } - - return; -} - -int64_t dumpStbAndChildTbOfDbWS(WS_TAOS **taos_v, SDbInfo *dbInfo, FILE *fpDbs) { - int64_t ret = 0; - - // - // obtain need dump all stable name - // - char *command = calloc(1, TSDB_MAX_ALLOWED_SQL_LEN); - if (NULL == command) { - errorPrint("%s() LN%d, memory allocation failed\n", __func__, __LINE__); - return -1; - } - - snprintf(command, TSDB_MAX_ALLOWED_SQL_LEN, g_args.db_escape_char ? "USE `%s`" : "USE %s", dbInfo->name); - WS_RES *ws_res; - int32_t ws_code = -1; - - ws_res = wsQuery(taos_v, command, &ws_code); - if (ws_code != 0) { - errorPrint("Invalid database %s, reason: %s\n", dbInfo->name, ws_errstr(ws_res)); - ws_free_result(ws_res); - free(command); - return -1; - } - - if (3 == g_majorVersionOfClient) { - snprintf(command, TSDB_MAX_ALLOWED_SQL_LEN, - "SELECT STABLE_NAME FROM information_schema.ins_stables " - "WHERE db_name='%s'", - dbInfo->name); - } else { - snprintf(command, TSDB_MAX_ALLOWED_SQL_LEN, "SHOW STABLES"); - } - - ws_res = wsQuery(taos_v, command, &ws_code); - - if (ws_code) { - return cleanIfQueryFailedWS(__func__, __LINE__, command, ws_res); - } - - // link - SNode* head = NULL; - SNode* end = NULL; - - while (true) { - int rows = 0; - const void *data = NULL; - ws_code = ws_fetch_raw_block(ws_res, &data, &rows); - - if (0 == rows) { - debugPrint( - "%s() LN%d, No more data from ws_fetch_raw_block(), " - "ws_taos: %p, code: 0x%08x, reason:%s\n", - __func__, __LINE__, *taos_v, ws_errno(ws_res), ws_errstr(ws_res)); - break; - } - - uint8_t type; - uint32_t len; - - for (int row = 0; row < rows; row++) { - const void *value0 = ws_get_value_in_block(ws_res, row, TSDB_SHOW_DB_NAME_INDEX, &type, &len); - if (NULL == value0) { - errorPrint("row: %d, ws_get_value_in_block() error!\n", row); - continue; - } - - // put to linked list - if (head == NULL) { - head = end = mallocNode(value0, len); - if(head == NULL) { - errorPrint("row: %d, mallocNode head error!\n", row); - continue; - } - } else { - end->next = mallocNode(value0, len); - if(end->next == NULL) { - errorPrint("row: %d, mallocNode next error!\n", row); - continue; - } - end = end->next; - } - // check - debugPrint("%s() LN%d, stable: %s\n", __func__, __LINE__, end->name); - } - } - - free(command); - - // check except - if (head == NULL) { - infoPrint("%s() LN%d, stable count is zero.\n", __func__, __LINE__ ); - return 0; - } - - // - // dump stable data - // - SNode * next = head; - while (next) { - ret = dumpStbAndChildTb(taos_v, dbInfo, next->name, fpDbs); - if (ret < 0) { - errorPrint("%s() LN%d, stable: %s dump out failed\n", __func__, __LINE__, next->name); - break; - } - // move next - next = next->next; - } - - // free nodes - freeNodes(head); - return ret; -} - -int64_t dumpNTablesOfDbWS(WS_TAOS **taos_v, SDbInfo *dbInfo) { - int64_t ret = 0; - if (0 == dbInfo->ntables) { - errorPrint("%s() LN%d, database: %s has 0 tables\n", __func__, __LINE__, dbInfo->name); - return 0; - } - - char *command = calloc(1, TSDB_MAX_ALLOWED_SQL_LEN); - if (NULL == command) { - errorPrint("%s() LN%d, memory allocation failed\n", __func__, __LINE__); - return -1; - } - - WS_RES *ws_res; - int32_t ws_code = -1; - - if (3 == g_majorVersionOfClient) { - snprintf(command, TSDB_MAX_ALLOWED_SQL_LEN, - "SELECT TABLE_NAME,STABLE_NAME FROM " - "information_schema.ins_tables WHERE db_name='%s'", - dbInfo->name); - } else { - snprintf(command, TSDB_MAX_ALLOWED_SQL_LEN, g_args.db_escape_char ? "USE `%s`" : "USE %s", dbInfo->name); - ws_res = wsQuery(taos_v, command, &ws_code); - if (ws_code) { - errorPrint("invalid database %s, code: 0x%08x, reason: %s\n", dbInfo->name, ws_code, ws_errstr(ws_res)); - ws_free_result(ws_res); - ws_res = NULL; - ws_close(taos_v); - taos_v = NULL; - free(command); - return 0; - } - ws_free_result(ws_res); - snprintf(command, TSDB_MAX_ALLOWED_SQL_LEN, "SHOW TABLES"); - } - - ws_res = wsQuery(taos_v, command, &ws_code); - if (ws_code) { - errorPrint("Failed to show %s\'s tables, code: 0x%08x, reason: %s!\n", dbInfo->name, ws_code, - ws_errstr(ws_res)); - ws_free_result(ws_res); - ws_res = NULL; - ws_close(taos_v); - taos_v = NULL; - free(command); - return 0; - } - - // link - SNode* head = NULL; - SNode* end = NULL; - - int64_t count = 0; - while (true) { - int rows = 0; - const void *data = NULL; - ws_code = ws_fetch_raw_block(ws_res, &data, &rows); - - if (0 == rows) { - debugPrint( - "%s() LN%d, No more data from ws_fetch_raw_block(), " - "ws_taos: %p, code: 0x%08x, reason:%s\n", - __func__, __LINE__, *taos_v, ws_errno(ws_res), ws_errstr(ws_res)); - break; - } - - uint8_t type; - uint32_t len0, len1; - - for (int row = 0; row < rows; row++) { - const void *value1 = NULL; - if (3 == g_majorVersionOfClient) { - value1 = ws_get_value_in_block(ws_res, row, 1, &type, &len1); - } else { - value1 = ws_get_value_in_block(ws_res, row, TSDB_SHOW_TABLES_METRIC_INDEX, &type, &len1); - } - - if (len1) { - if (g_args.debug_print || g_args.verbose_print) { - char buffer[VALUE_BUF_LEN]; - memset(buffer, 0, VALUE_BUF_LEN); - memcpy(buffer, value1, len1); - debugPrint("%s() LN%d, get table belong %s\n", __func__, __LINE__, buffer); - } - continue; - } else { - const void *value0 = ws_get_value_in_block(ws_res, row, 0, &type, &len0); - if ((NULL == value0) || (0 == len0)) { - errorPrint("%s() LN%d, value0: %p, type: %d, len0: %d\n", __func__, __LINE__, value0, type, len0); - continue; - } - - // put to linked list - if (head == NULL) { - head = end = mallocNode(value0, len0); - if (head == NULL) { - errorPrint("row: %d, mallocNode head error!\n", row); - continue; - } - } else { - end->next = mallocNode(value0, len0); - if (end->next == NULL) { - errorPrint("row: %d, mallocNode next error!\n", row); - continue; - } - end = end->next; - } - - debugPrint("%s() LN%d count: %" PRId64 - ", table name: %s, " - "length: %d\n", - __func__, __LINE__, count, end->name, len0); - } - count++; - } - } - - ws_free_result(ws_res); - free(command); - - // check except - if (head == NULL) { - infoPrint("%s() LN%d, normal table count is zero.\n", __func__, __LINE__ ); - return 0; - } - - // - // dump stable data - // - SNode * next = head; - while (next) { - ret = dumpANormalTableNotBelong(count, taos_v, dbInfo, next->name); - if (0 == ret) { - infoPrint("Dumping normal table: %s\n", next->name); - } else { - errorPrint("%s() LN%d, dump normal table: %s\n", __func__, __LINE__, next->name); - break; - } - - // move next - next = next->next; - } - - // free nodes - freeNodes(head); - - return ret; -} - -bool fillDBInfoWithFieldsWS(const int index, const char *name, const int row, const int f, WS_RES *res) { - uint8_t type; - uint32_t len; - char tmp[VALUE_BUF_LEN] = {0}; - - const void *value = ws_get_value_in_block(res, row, f, &type, &len); - if (0 == strcmp(name, "name")) { - if (NULL == value) { - errorPrint( - "%s() LN%d, row: %d, field: %d, " - "ws_get_value_in_block() error!\n", - __func__, __LINE__, row, f); - return false; - } else { - memset(tmp, 0, VALUE_BUF_LEN); - memcpy(tmp, value, len); - strncpy(g_dbInfos[index]->name, tmp, len); - } - } else if (0 == strcmp(name, "vgroups")) { - if (TSDB_DATA_TYPE_INT == type) { - g_dbInfos[index]->vgroups = *((int32_t *)value); - } else if (TSDB_DATA_TYPE_SMALLINT == type) { - g_dbInfos[index]->vgroups = *((int16_t *)value); - } else { - errorPrint("%s() LN%d, unexpected type: %d\n", __func__, __LINE__, type); - return false; - } - } else if (0 == strcmp(name, "ntables")) { - if (TSDB_DATA_TYPE_INT == type) { - g_dbInfos[index]->ntables = *((int32_t *)value); - } else if (TSDB_DATA_TYPE_BIGINT == type) { - g_dbInfos[index]->ntables = *((int64_t *)value); - } else { - errorPrint("%s() LN%d, unexpected type: %d\n", __func__, __LINE__, type); - return false; - } - } else if (0 == strcmp(name, "replica")) { - if (TSDB_DATA_TYPE_TINYINT == type) { - g_dbInfos[index]->replica = *((int8_t *)value); - } else if (TSDB_DATA_TYPE_SMALLINT == type) { - g_dbInfos[index]->replica = *((int16_t *)value); - } else { - errorPrint("%s() LN%d, unexpected type: %d\n", __func__, __LINE__, type); - return false; - } - } else if (0 == strcmp(name, "strict")) { - tstrncpy(g_dbInfos[index]->strict, (char *)value, min(STRICT_LEN, len + 1)); - debugPrint("%s() LN%d: field: %d, strict: %s, length:%d\n", __func__, __LINE__, f, g_dbInfos[index]->strict, - len); - } else if (0 == strcmp(name, "quorum")) { - g_dbInfos[index]->quorum = *((int16_t *)value); - } else if (0 == strcmp(name, "days")) { - g_dbInfos[index]->days = *((int16_t *)value); - } else if ((0 == strcmp(name, "keep")) || (0 == strcmp(name, "keep0,keep1,keep2"))) { - tstrncpy(g_dbInfos[index]->keeplist, value, min(KEEPLIST_LEN, len + 1)); - debugPrint("%s() LN%d: field: %d, keep: %s, length:%d\n", __func__, __LINE__, f, g_dbInfos[index]->keeplist, - len); - } else if (0 == strcmp(name, "duration")) { - tstrncpy(g_dbInfos[index]->duration, value, min(DURATION_LEN, len + 1)); - debugPrint("%s() LN%d: field: %d, tmp: %s, duration: %s, length:%d\n", __func__, __LINE__, f, tmp, - g_dbInfos[index]->duration, len); - } else if ((0 == strcmp(name, "cache")) || (0 == strcmp(name, "cache(MB)"))) { - g_dbInfos[index]->cache = *((int32_t *)value); - } else if (0 == strcmp(name, "blocks")) { - g_dbInfos[index]->blocks = *((int32_t *)value); - } else if (0 == strcmp(name, "minrows")) { - if (TSDB_DATA_TYPE_INT == type) { - g_dbInfos[index]->minrows = *((int32_t *)value); - } else { - errorPrint("%s() LN%d, unexpected type: %d\n", __func__, __LINE__, type); - return false; - } - } else if (0 == strcmp(name, "maxrows")) { - if (TSDB_DATA_TYPE_INT == type) { - g_dbInfos[index]->maxrows = *((int32_t *)value); - } else { - errorPrint("%s() LN%d, unexpected type: %d\n", __func__, __LINE__, type); - return false; - } - } else if (0 == strcmp(name, "wallevel")) { - g_dbInfos[index]->wallevel = *((int8_t *)value); - } else if (0 == strcmp(name, "wal")) { - if (TSDB_DATA_TYPE_TINYINT == type) { - g_dbInfos[index]->wal = *((int8_t *)value); - } else { - errorPrint("%s() LN%d, unexpected type: %d\n", __func__, __LINE__, type); - return false; - } - } else if (0 == strcmp(name, "fsync")) { - if (TSDB_DATA_TYPE_INT == type) { - g_dbInfos[index]->fsync = *((int32_t *)value); - } else { - errorPrint("%s() LN%d, unexpected type: %d\n", __func__, __LINE__, type); - return false; - } - } else if (0 == strcmp(name, "comp")) { - if (TSDB_DATA_TYPE_TINYINT == type) { - g_dbInfos[index]->comp = (int8_t)(*((int8_t *)value)); - } else { - errorPrint("%s() LN%d, unexpected type: %d\n", __func__, __LINE__, type); - return false; - } - } else if (0 == strcmp(name, "cachelast")) { - if (TSDB_DATA_TYPE_TINYINT == type) { - g_dbInfos[index]->cachelast = (int8_t)(*((int8_t *)value)); - } else { - errorPrint("%s() LN%d, unexpected type: %d\n", __func__, __LINE__, type); - return false; - } - } else if (0 == strcmp(name, "cache_model")) { - if (TSDB_DATA_TYPE_TINYINT == type) { - g_dbInfos[index]->cache_model = (int8_t)(*((int8_t *)value)); - } else { - errorPrint("%s() LN%d, unexpected type: %d\n", __func__, __LINE__, type); - return false; - } - } else if (0 == strcmp(name, "single_stable_model")) { - if (TSDB_DATA_TYPE_BOOL == type) { - g_dbInfos[index]->single_stable_model = (bool)(*((bool *)value)); - } else { - errorPrint("%s() LN%d, unexpected type: %d\n", __func__, __LINE__, type); - return false; - } - } else if (0 == strcmp(name, "precision")) { - tstrncpy(g_dbInfos[index]->precision, (char *)value, min(DB_PRECISION_LEN, len + 1)); - } else if (0 == strcmp(name, "update")) { - g_dbInfos[index]->update = *((int8_t *)value); - } - - return true; -} - -int fillDbExtraInfoV3WS(void **taos_v, const char *dbName, const int dbIndex) { - int ret = 0; - char *command = calloc(1, TSDB_MAX_ALLOWED_SQL_LEN); - if (NULL == command) { - errorPrint("%s() LN%d, memory allocation failed\n", __func__, __LINE__); - return -1; - } - snprintf(command, TSDB_MAX_ALLOWED_SQL_LEN, - "SELECT COUNT(table_name) FROM " - "information_schema.ins_tables WHERE db_name='%s'", - dbName); - - infoPrint("Getting table(s) count of db (%s) ...\n", dbName); - - int32_t ws_code = -1; - WS_RES *ws_res = wsQuery(taos_v, command, &ws_code); - if (ws_code) { - return cleanIfQueryFailedWS(__func__, __LINE__, command, ws_res); - } else { - while (true) { - int rows = 0; - const void *data = NULL; - ws_code = ws_fetch_raw_block(ws_res, &data, &rows); - - if (0 == rows) { - debugPrint( - "%s() LN%d, No more data from ws_fetch_raw_block(), " - "ws_taos: %p, code: 0x%08x, reason:%s\n", - __func__, __LINE__, *taos_v, ws_errno(ws_res), ws_errstr(ws_res)); - break; - } - - uint8_t type; - uint32_t len; - for (int row = 0; row < rows; row++) { - const void *value0 = ws_get_value_in_block(ws_res, row, TSDB_SHOW_DB_NAME_INDEX, &type, &len); - if (NULL == value0) { - errorPrint("row: %d, ws_get_value_in_block() error!\n", row); - continue; - } - - if (TSDB_DATA_TYPE_BIGINT == type) { - g_dbInfos[dbIndex]->ntables = *(int64_t *)value0; - } else { - errorPrint("%s() LN%d, type: %d, not converted\n", __func__, __LINE__, type); - } - } - } - } - - ws_free_result(ws_res); - free(command); - return ret; -} - -int fillDbInfoWS(void **taos_v) { - int ret = 0; - int dbIndex = 0; - - char *command = calloc(1, TSDB_MAX_ALLOWED_SQL_LEN); - if (NULL == command) { - errorPrint("%s() LN%d, memory allocation failed\n", __func__, __LINE__); - return -1; - } - - if (3 == g_majorVersionOfClient) { - snprintf(command, TSDB_MAX_ALLOWED_SQL_LEN, "SELECT * FROM information_schema.ins_databases"); - } else { - snprintf(command, TSDB_MAX_ALLOWED_SQL_LEN, "SHOW DATABASES"); - } - - int32_t ws_code = -1; - WS_RES *ws_res = wsQuery(taos_v, command, &ws_code); - if (ws_code != 0) { - return cleanIfQueryFailedWS(__func__, __LINE__, command, ws_res); - } - - int fieldCount = ws_field_count(ws_res); - void *ws_fields = NULL; - if (3 == g_majorVersionOfClient) { - const struct WS_FIELD *ws_fields_v3 = ws_fetch_fields(ws_res); - ws_fields = (void *)ws_fields_v3; - } else { - const struct WS_FIELD_V2 *ws_fields_v2 = ws_fetch_fields_v2(ws_res); - ws_fields = (void *)ws_fields_v2; - } - - while (true) { - int rows = 0; - const void *data = NULL; - ws_code = ws_fetch_raw_block(ws_res, &data, &rows); - - if (0 == rows) { - debugPrint( - "%s() LN%d, No more data from ws_fetch_raw_block(), " - "ws_taos: %p, code: 0x%08x, reason:%s\n", - __func__, __LINE__, *taos_v, ws_errno(ws_res), ws_errstr(ws_res)); - break; - } - - uint8_t type; - uint32_t len; - char buffer[VALUE_BUF_LEN] = {0}; - - for (int row = 0; row < rows; row++) { - const void *value0 = ws_get_value_in_block(ws_res, row, TSDB_SHOW_DB_NAME_INDEX, &type, &len); - if (NULL == value0) { - errorPrint("row: %d, ws_get_value_in_block() error!\n", row); - continue; - } - memset(buffer, 0, VALUE_BUF_LEN); - memcpy(buffer, value0, len); - debugPrint("%s() LN%d, dbname: %s\n", __func__, __LINE__, buffer); - - if (isSystemDatabase(buffer)) { - if (!g_args.allow_sys) { - continue; - } - } else if (g_args.databases) { - if (inDatabasesSeq(buffer) != 0) { - continue; - } - } else if (!g_args.all_databases) { - if (strcmp(g_args.arg_list[0], buffer)) { - continue; - } - } - - g_dbInfos[dbIndex] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); - if (NULL == g_dbInfos[dbIndex]) { - errorPrint("%s() LN%d, failed to allocate %" PRIu64 " memory\n", __func__, __LINE__, - (uint64_t)sizeof(SDbInfo)); - ret = -1; - break; - } - - okPrint("Database: %s exists\n", buffer); - if (3 == g_majorVersionOfClient) { - struct WS_FIELD *fields = (struct WS_FIELD *)ws_fields; - for (int f = 0; f < fieldCount; f++) { - if (false == fillDBInfoWithFieldsWS(dbIndex, fields[f].name, row, f, ws_res)) { - ret = -1; - break; - } - } - } else { - struct WS_FIELD_V2 *fields = (struct WS_FIELD_V2 *)ws_fields; - for (int f = 0; f < fieldCount; f++) { - if (false == fillDBInfoWithFieldsWS(dbIndex, fields[f].name, row, f, ws_res)) { - ret = -1; - break; - } - } - } - - if (3 == g_majorVersionOfClient) { - fillDbExtraInfoV3WS(taos_v, g_dbInfos[dbIndex]->name, dbIndex); - } - - dbIndex++; - - if (g_args.databases) { - if (dbIndex > g_args.dumpDbCount) break; - } else if (!g_args.all_databases) { - if (dbIndex >= 1) break; - } - } - } - - ws_free_result(ws_res); - ws_res = NULL; - free(command); - - if (0 != ret) { - return ret; - } - - return dbIndex; -} - -bool jointCloudDsn() { - if ((NULL != g_args.host) && strlen(g_args.host)) { - if (0 == g_args.port) { - snprintf(g_args.cloudHost, MAX_HOSTNAME_LEN, "ws://%s:6041", g_args.host); - } else { - snprintf(g_args.cloudHost, MAX_HOSTNAME_LEN, "ws://%s:%d", g_args.host, g_args.port); - } - } else { - if (0 == g_args.port) { - snprintf(g_args.cloudHost, MAX_HOSTNAME_LEN, "ws://localhost:6041"); - } else { - snprintf(g_args.cloudHost, MAX_HOSTNAME_LEN, "ws://localhost:%d", g_args.port); - } - } - - g_args.dsn = g_args.cloudHost; - debugPrint("%s() LN%d, dsn: %s\n", __func__, __LINE__, g_args.dsn); - return true; -} - -bool splitCloudDsn() { - if (g_args.dsn) { - char *token = strstr(g_args.dsn, "?token="); - if (NULL == token) { - return false; - } else { - g_args.cloudToken = token + strlen("?token="); - } - - char *http = NULL, *https = NULL; - http = strstr(g_args.dsn, "http://"); - if (NULL == http) { - https = strstr(g_args.dsn, "https://"); - if (NULL == https) { - tstrncpy(g_args.cloudHost, g_args.dsn, MAX_HOSTNAME_LEN); - } else { - tstrncpy(g_args.cloudHost, https + strlen("https://"), MAX_HOSTNAME_LEN); - } - } else { - tstrncpy(g_args.cloudHost, http + strlen("http://"), MAX_HOSTNAME_LEN); - } - - char *colon = strstr(g_args.cloudHost, ":"); - if (colon) { - g_args.cloudHost[strlen(g_args.cloudHost) - strlen(colon)] = '\0'; - g_args.cloudPort = atoi(colon + 1); - } - - return true; - } - - return false; -} - -int64_t dumpTableDataWS(const int64_t index, FILE *fp, const char *tbName, const char *dbName, const int precision, - TableDes *tableDes, const int64_t start_time, const int64_t end_time) { - WS_TAOS *ws_taos; - if (NULL == (ws_taos = wsConnect())) { - return -1; - } - - WS_RES *ws_res = queryDbForDumpOutWS(&ws_taos, dbName, tbName, precision, start_time, end_time); - - int64_t totalRows = -1; - if (ws_res) { - totalRows = writeResultDebugWS(ws_res, fp, dbName, tbName); - } - - ws_free_result(ws_res); - ws_res = NULL; - ws_close(ws_taos); - - return totalRows; -} - -#endif // WEBSOCKET diff --git a/tools/taos-tools/test/CMakeLists.txt b/tools/taos-tools/test/CMakeLists.txt index 1586dae65d..182cb166dd 100644 --- a/tools/taos-tools/test/CMakeLists.txt +++ b/tools/taos-tools/test/CMakeLists.txt @@ -15,8 +15,9 @@ IF(TD_LINUX) ) target_include_directories( - benchmarkTest - PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/../inc" + benchmarkTest PRIVATE + "${CMAKE_CURRENT_SOURCE_DIR}/../inc" + "${CMAKE_CURRENT_SOURCE_DIR}/../deps/toolscJson/inc/" ) add_test( diff --git a/tools/taos-tools/test/benchmarkTest.cpp b/tools/taos-tools/test/benchmarkTest.cpp index 5ea296e4cb..389cfe6e71 100644 --- a/tools/taos-tools/test/benchmarkTest.cpp +++ b/tools/taos-tools/test/benchmarkTest.cpp @@ -16,11 +16,55 @@ #include #include -TEST(jsonTest, taosBenchmarkTest) { - printf("hello world taosBenchmark unit test for C \n"); + +// lower +char* strToLowerCopy(const char *str) { + if (str == NULL) { + return NULL; + } + size_t len = strlen(str); + char *result = (char*)malloc(len + 1); + if (result == NULL) { + return NULL; + } + for (size_t i = 0; i < len; i++) { + result[i] = tolower((unsigned char)str[i]); + } + result[len] = '\0'; + return result; +} + +// pase dsn +int32_t parseDsn(char* dsn, char **host, char **port, char **user, char **pwd); + +TEST(jsonTest, strToLowerCopy) { + // strToLowerCopy + const char* arr[][2] = { + {"ABC","abc"}, + {"Http://Localhost:6041","http://localhost:6041"}, + {"DEF","def"} + }; + + int rows = sizeof(arr) / sizeof(arr[0]); + for (int i = 0; i < rows; i++) { + char *p1 = (char *)arr[i][1]; + char *p2 = strToLowerCopy((char *)arr[i][0]); + printf("p1: %s\n", p1); + printf("p2: %s\n", p2); + int32_t cmp = strcmp(p1, p2); + if (p2) { + free(p2); + } + ASSERT_EQ(cmp, 0); + } + + // null + char * p = strToLowerCopy(NULL); + ASSERT_EQ(p, nullptr); } int main(int argc, char **argv) { + printf("Hello world taosBenchmark unit test for C \n"); testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } \ No newline at end of file diff --git a/tools/tdgpt/taosanalytics/algo/fc/arima.py b/tools/tdgpt/taosanalytics/algo/fc/arima.py index fa587c3604..787cb757df 100644 --- a/tools/tdgpt/taosanalytics/algo/fc/arima.py +++ b/tools/tdgpt/taosanalytics/algo/fc/arima.py @@ -83,6 +83,9 @@ class _ArimaService(AbstractForecastService): if self.list is None or len(self.list) < self.period: raise ValueError("number of input data is less than the periods") + if len(self.list) > 3000: + raise ValueError("number of input data is too large") + if self.fc_rows <= 0: raise ValueError("fc rows is not specified yet") diff --git a/tools/tdgpt/taosanalytics/algo/fc/gpt.py b/tools/tdgpt/taosanalytics/algo/fc/gpt.py index a279630722..8a65f88cc3 100644 --- a/tools/tdgpt/taosanalytics/algo/fc/gpt.py +++ b/tools/tdgpt/taosanalytics/algo/fc/gpt.py @@ -17,7 +17,7 @@ class _GPTService(AbstractForecastService): super().__init__() self.table_name = None - self.service_host = 'http://192.168.2.90:5000/ds_predict' + self.service_host = 'http://127.0.0.1:5000/ds_predict' self.headers = {'Content-Type': 'application/json'} self.std = None @@ -39,13 +39,16 @@ class _GPTService(AbstractForecastService): response = requests.post(self.service_host, data=json.dumps(data), headers=self.headers) except Exception as e: app_logger.log_inst.error(f"failed to connect the service: {self.service_host} ", str(e)) - raise ValueError("error") + raise e - # print(response) + if response.status_code == 404: + app_logger.log_inst.error(f"failed to connect the service: {self.service_host} ") + raise ValueError("invalid host url") + elif response.status_code != 200: + app_logger.log_inst.error(f"failed to request the service: {self.service_host}, reason: {response.text}") + raise ValueError(f"failed to request the service, {response.text}") pred_y = response.json()['output'] - # print(f"pred_y len:{len(pred_y)}") - # print(f"pred_y:{pred_y}") res = { "res": [pred_y] @@ -54,31 +57,17 @@ class _GPTService(AbstractForecastService): insert_ts_list(res["res"], self.start_ts, self.time_step, self.fc_rows) return res - # insert_ts_list(res, self.start_ts, self.time_step, self.fc_rows) - # - # if self.return_conf: - # res1 = [res.tolist(), res.tolist(), res.tolist()], None - # else: - # res1 = [res.tolist()], None - # - # # add the conf range if required - # return { - # "mse": None, - # "res": res1 - # } def set_params(self, params): super().set_params(params) - if "host" not in params: - raise ValueError("gpt service host needs to be specified") + if "host" in params: + self.service_host = params['host'] - self.service_host = params['host'].trim() - - if self.service_host.startswith("https://"): - self.service_host = self.service_host.replace("https://", "http://") - elif "http://" not in self.service_host: - self.service_host = "http://" + self.service_host + if self.service_host.startswith("https://"): + self.service_host = self.service_host.replace("https://", "http://") + elif "http://" not in self.service_host: + self.service_host = "http://" + self.service_host app_logger.log_inst.info("%s specify gpt host service: %s", self.__class__.__name__, self.service_host) diff --git a/tools/tdgpt/taosanalytics/test/forecast_test.py b/tools/tdgpt/taosanalytics/test/forecast_test.py index 1e4874b8c8..4b2368c6ba 100644 --- a/tools/tdgpt/taosanalytics/test/forecast_test.py +++ b/tools/tdgpt/taosanalytics/test/forecast_test.py @@ -8,7 +8,7 @@ import pandas as pd sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../../") from taosanalytics.algo.forecast import draw_fc_results -from taosanalytics.conf import setup_log_info +from taosanalytics.conf import setup_log_info, app_logger from taosanalytics.servicemgmt import loader @@ -30,7 +30,8 @@ class ForecastTest(unittest.TestCase): ts_list = data[['Passengers']].index.tolist() dst_list = [int(item.timestamp()) for item in ts_list] - return data[['Passengers']].values.tolist(), dst_list + return data['Passengers'].values.tolist(), dst_list + def test_holt_winters_forecast(self): """ test holt winters forecast with invalid and then valid parameters""" @@ -111,5 +112,20 @@ class ForecastTest(unittest.TestCase): draw_fc_results(data, len(r["res"]) > 1, r["res"], rows, "arima") + def test_gpt_fc(self): + """for local test only, disabled it in github action""" + data, ts = self.get_input_list() + pass + + # s = loader.get_service("td_gpt_fc") + # s.set_input_list(data, ts) + # + # s.set_params({"host":'192.168.2.90:5000/ds_predict', 'fc_rows': 10, 'start_ts': 171000000, 'time_step': 86400*30}) + # r = s.execute() + # + # rows = len(r["res"][0]) + # draw_fc_results(data, False, r["res"], rows, "gpt") + + if __name__ == '__main__': unittest.main() diff --git a/tools/tdgpt/taosanalytics/test/restful_api_test.py b/tools/tdgpt/taosanalytics/test/restful_api_test.py index 6463343e00..7dc43ab890 100644 --- a/tools/tdgpt/taosanalytics/test/restful_api_test.py +++ b/tools/tdgpt/taosanalytics/test/restful_api_test.py @@ -257,3 +257,54 @@ class RestfulTest(TestCase): self.assertEqual(response.status_code, 200) self.assertEqual(response.json["rows"], -1) + + + def test_gpt_restful_service(self): + response = self.client.post('/forecast', json={ + "schema": [ + ["ts", "TIMESTAMP", 8], + ["val", "INT", 4] + ], + "data": [ + [ + 1577808000000, 1577808001000, 1577808002000, 1577808003000, 1577808004000, + 1577808005000, 1577808006000, 1577808007000, 1577808008000, 1577808009000, + 1577808010000, 1577808011000, 1577808012000, 1577808013000, 1577808014000, + 1577808015000, 1577808016000, 1577808017000, 1577808018000, 1577808019000, + 1577808020000, 1577808021000, 1577808022000, 1577808023000, 1577808024000, + 1577808025000, 1577808026000, 1577808027000, 1577808028000, 1577808029000, + 1577808030000, 1577808031000, 1577808032000, 1577808033000, 1577808034000, + 1577808035000, 1577808036000, 1577808037000, 1577808038000, 1577808039000, + 1577808040000, 1577808041000, 1577808042000, 1577808043000, 1577808044000, + 1577808045000, 1577808046000, 1577808047000, 1577808048000, 1577808049000, + 1577808050000, 1577808051000, 1577808052000, 1577808053000, 1577808054000, + 1577808055000, 1577808056000, 1577808057000, 1577808058000, 1577808059000, + 1577808060000, 1577808061000, 1577808062000, 1577808063000, 1577808064000, + 1577808065000, 1577808066000, 1577808067000, 1577808068000, 1577808069000, + 1577808070000, 1577808071000, 1577808072000, 1577808073000, 1577808074000, + 1577808075000, 1577808076000, 1577808077000, 1577808078000, 1577808079000, + 1577808080000, 1577808081000, 1577808082000, 1577808083000, 1577808084000, + 1577808085000, 1577808086000, 1577808087000, 1577808088000, 1577808089000, + 1577808090000, 1577808091000, 1577808092000, 1577808093000, 1577808094000, + 1577808095000 + ], + [ + 13, 14, 8, 10, 16, 26, 32, 27, 18, 32, 36, 24, 22, 23, 22, 18, 25, 21, 21, + 14, 8, 11, 14, 23, 18, 17, 19, 20, 22, 19, 13, 26, 13, 14, 22, 24, 21, 22, + 26, 21, 23, 24, 27, 41, 31, 27, 35, 26, 28, 36, 39, 21, 17, 22, 17, 19, 15, + 34, 10, 15, 22, 18, 15, 20, 15, 22, 19, 16, 30, 27, 29, 23, 20, 16, 21, 21, + 25, 16, 18, 15, 18, 14, 10, 15, 8, 15, 6, 11, 8, 7, 13, 10, 23, 16, 15, 25 + ] + ], + "option": "algo=td_gpt_fc", + "algo": "td_gpt_fc", + "prec": "ms", + "wncheck": 0, + "return_conf": 0, + "forecast_rows": 10, + "conf": 95, + "start": 1577808096000, + "every": 1000, + "rows": 21, + "protocol": 1.0 + }) diff --git a/utils/test/c/CMakeLists.txt b/utils/test/c/CMakeLists.txt index e85fbf4d6d..160374cb3a 100644 --- a/utils/test/c/CMakeLists.txt +++ b/utils/test/c/CMakeLists.txt @@ -1,4 +1,5 @@ add_executable(tmq_demo tmqDemo.c) +add_dependencies(tmq_demo ${TAOS_NATIVE_LIB}) add_executable(tmq_sim tmqSim.c) add_executable(create_table createTable.c) add_executable(tmq_taosx_ci tmq_taosx_ci.c) @@ -27,7 +28,7 @@ endif(${TD_LINUX}) target_link_libraries( tmq_offset - PUBLIC ${TAOS_LIB} + PUBLIC ${TAOS_NATIVE_LIB} PUBLIC util PUBLIC common PUBLIC os @@ -35,7 +36,7 @@ target_link_libraries( target_link_libraries( tmq_multi_thread_test - PUBLIC ${TAOS_LIB} + PUBLIC ${TAOS_NATIVE_LIB} PUBLIC util PUBLIC common PUBLIC os @@ -43,7 +44,7 @@ target_link_libraries( target_link_libraries( create_table - PUBLIC ${TAOS_LIB} + PUBLIC ${TAOS_NATIVE_LIB} PUBLIC util PUBLIC common PUBLIC os @@ -51,7 +52,7 @@ target_link_libraries( target_link_libraries( tmq_demo - PUBLIC ${TAOS_LIB} + PUBLIC ${TAOS_NATIVE_LIB} PUBLIC util PUBLIC common PUBLIC os @@ -59,7 +60,7 @@ target_link_libraries( target_link_libraries( tmq_sim - PUBLIC ${TAOS_LIB_PLATFORM_SPEC} + PUBLIC ${TAOS_NATIVE_LIB} PUBLIC util PUBLIC common PUBLIC os @@ -67,7 +68,7 @@ target_link_libraries( target_link_libraries( tmq_ts5466 - PUBLIC ${TAOS_LIB} + PUBLIC ${TAOS_NATIVE_LIB} PUBLIC util PUBLIC common PUBLIC os @@ -75,7 +76,7 @@ target_link_libraries( target_link_libraries( tmq_td32187 - PUBLIC ${TAOS_LIB} + PUBLIC ${TAOS_NATIVE_LIB} PUBLIC util PUBLIC common PUBLIC os @@ -114,7 +115,7 @@ target_link_libraries( ) target_link_libraries( tmq_td32526 - PUBLIC ${TAOS_LIB} + PUBLIC ${TAOS_NATIVE_LIB} PUBLIC util PUBLIC common PUBLIC os @@ -130,7 +131,7 @@ target_link_libraries( target_link_libraries( tmq_taosx_ci - PUBLIC ${TAOS_LIB} + PUBLIC ${TAOS_NATIVE_LIB} PUBLIC util PUBLIC common PUBLIC os @@ -138,7 +139,7 @@ target_link_libraries( target_link_libraries( tmq_offset_test - PUBLIC ${TAOS_LIB} + PUBLIC ${TAOS_NATIVE_LIB} PUBLIC util PUBLIC common PUBLIC os @@ -146,7 +147,7 @@ target_link_libraries( target_link_libraries( replay_test - PUBLIC ${TAOS_LIB} + PUBLIC ${TAOS_NATIVE_LIB} PUBLIC util PUBLIC common PUBLIC os @@ -154,7 +155,7 @@ target_link_libraries( target_link_libraries( write_raw_block_test - PUBLIC ${TAOS_LIB} + PUBLIC ${TAOS_NATIVE_LIB} PUBLIC util PUBLIC common PUBLIC os @@ -162,7 +163,7 @@ target_link_libraries( target_link_libraries( tmq_write_raw_test - PUBLIC ${TAOS_LIB} + PUBLIC ${TAOS_NATIVE_LIB} PUBLIC util PUBLIC common PUBLIC os @@ -170,7 +171,7 @@ target_link_libraries( target_link_libraries( sml_test - PUBLIC ${TAOS_LIB} + PUBLIC ${TAOS_NATIVE_LIB} PUBLIC util PUBLIC common PUBLIC os @@ -179,7 +180,7 @@ target_link_libraries( target_link_libraries( get_db_name_test - PUBLIC ${TAOS_LIB} + PUBLIC ${TAOS_NATIVE_LIB} PUBLIC util PUBLIC common PUBLIC os @@ -187,7 +188,7 @@ target_link_libraries( target_link_libraries( varbinary_test - PUBLIC ${TAOS_LIB} + PUBLIC ${TAOS_NATIVE_LIB} PUBLIC util PUBLIC common PUBLIC os @@ -204,9 +205,9 @@ target_link_libraries( if(${TD_LINUX}) target_link_libraries( tsz_test - PUBLIC ${TAOS_LIB} + PUBLIC ${TAOS_NATIVE_LIB} PUBLIC util PUBLIC common PUBLIC os ) -endif(${TD_LINUX}) \ No newline at end of file +endif(${TD_LINUX}) diff --git a/utils/tsim/CMakeLists.txt b/utils/tsim/CMakeLists.txt index b725ed919a..d450e378be 100644 --- a/utils/tsim/CMakeLists.txt +++ b/utils/tsim/CMakeLists.txt @@ -10,7 +10,7 @@ TARGET_INCLUDE_DIRECTORIES( ) TARGET_LINK_LIBRARIES( tsim_static - PUBLIC ${TAOS_LIB} + PUBLIC ${TAOS_NATIVE_LIB} PUBLIC util PUBLIC common PUBLIC os diff --git a/utils/tsim/src/simEntry.c b/utils/tsim/src/simEntry.c index dd11c21af0..2ebad8d191 100644 --- a/utils/tsim/src/simEntry.c +++ b/utils/tsim/src/simEntry.c @@ -43,6 +43,8 @@ int32_t simEntry(int32_t argc, char **argv) { } } + taos_options(TSDB_OPTION_DRIVER, "native"); + simInfo("simulator is running ..."); simSystemInit();