Merge remote-tracking branch 'origin' into tomchon-patch-12
This commit is contained in:
commit
89d64f5ee5
|
@ -9,26 +9,9 @@ on:
|
|||
paths-ignore:
|
||||
- 'packaging/**'
|
||||
- 'docs/**'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
specified_source_branch:
|
||||
description: 'Enter the source branch name of TDengine'
|
||||
required: false
|
||||
default: 'unavailable'
|
||||
type: string
|
||||
specified_target_branch:
|
||||
description: 'Enter the target branch name of TDengine'
|
||||
required: false
|
||||
default: 'unavailable'
|
||||
type: string
|
||||
specified_pr_number:
|
||||
description: 'Enter the PR number of TDengine'
|
||||
required: false
|
||||
default: 'unavailable'
|
||||
type: string
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.event.inputs.specified_target_branch }}-${{ github.event_name == 'workflow_dispatch' }}-TDengine
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-TDengine
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
|
@ -48,51 +31,48 @@ jobs:
|
|||
id: parameters
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if ${{ github.event_name == 'workflow_dispatch' }}; then
|
||||
run_function_test="true"
|
||||
run_tdgpt_test="false"
|
||||
else
|
||||
target_branch=${{ github.event.pull_request.base.ref }}
|
||||
# target_branch=${{ github.event.pull_request.base.ref }}
|
||||
|
||||
# Fetch the latest code from the target branch
|
||||
cd ${{ env.WKC }}
|
||||
git reset --hard
|
||||
git clean -f
|
||||
git remote prune origin
|
||||
git fetch
|
||||
git checkout "$target_branch"
|
||||
git remote prune origin
|
||||
git pull >/dev/null
|
||||
|
||||
# Check whether to run tdgpt test cases
|
||||
changed_files_non_doc=$(git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD $target_branch) | grep -v "^docs/en/" | grep -v "^docs/zh/" | grep -v ".md$" | tr '\n' ' ' || :)
|
||||
echo "changed files exclude doc: ${changed_files_non_doc}"
|
||||
|
||||
if [[ -n "$changed_files_non_doc" && "$changed_files_non_doc" =~ (forecastoperator\.c|anomalywindowoperator\.c|tanalytics\.h|tanalytics\.c|tdgpt_cases\.task|analytics|tdgpt) ]]; then
|
||||
run_tdgpt_test="true"
|
||||
else
|
||||
run_tdgpt_test="false"
|
||||
fi
|
||||
echo "run tdgpt test: ${run_tdgpt_test}"
|
||||
# # Fetch the latest code from the target branch
|
||||
# cd ${{ env.WKC }}
|
||||
# git reset --hard
|
||||
# git clean -f
|
||||
# git remote prune origin
|
||||
# git fetch
|
||||
# git checkout "$target_branch"
|
||||
# git remote prune origin
|
||||
# git pull >/dev/null
|
||||
|
||||
# Check whether to run function test cases
|
||||
changed_files_non_tdgpt=$(git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD $target_branch) | \
|
||||
grep -v "^docs/en/" | \
|
||||
grep -v "^docs/zh/" | \
|
||||
grep -v ".md$" | \
|
||||
grep -Ev "forecastoperator\.c|anomalywindowoperator\.c|tanalytics\.h|tanalytics\.c|tdgpt_cases\.task|analytics|tdgpt" | \
|
||||
tr '\n' ' ' || :)
|
||||
echo "changed files exclude tdgpt: ${changed_files_non_tdgpt}"
|
||||
|
||||
if [ -n "$changed_files_non_tdgpt" ]; then
|
||||
run_function_test="true"
|
||||
else
|
||||
run_function_test="false"
|
||||
fi
|
||||
# # Check whether to run tdgpt test cases
|
||||
# changed_files_non_doc=$(git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD $target_branch) | grep -v "^docs/en/" | grep -v "^docs/zh/" | grep -v ".md$" | tr '\n' ' ' || :)
|
||||
# echo "changed files exclude doc: ${changed_files_non_doc}"
|
||||
|
||||
# if [[ -n "$changed_files_non_doc" && "$changed_files_non_doc" =~ (forecastoperator\.c|anomalywindowoperator\.c|tanalytics\.h|tanalytics\.c|tdgpt_cases\.task|analytics|tdgpt) ]]; then
|
||||
# run_tdgpt_test="true"
|
||||
# else
|
||||
# run_tdgpt_test="false"
|
||||
# fi
|
||||
# echo "run tdgpt test: ${run_tdgpt_test}"
|
||||
|
||||
echo "run function test: ${run_function_test}"
|
||||
fi
|
||||
# # Check whether to run function test cases
|
||||
# changed_files_non_tdgpt=$(git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD $target_branch) | \
|
||||
# grep -v "^docs/en/" | \
|
||||
# grep -v "^docs/zh/" | \
|
||||
# grep -v ".md$" | \
|
||||
# grep -Ev "forecastoperator\.c|anomalywindowoperator\.c|tanalytics\.h|tanalytics\.c|tdgpt_cases\.task|analytics|tdgpt" | \
|
||||
# tr '\n' ' ' || :)
|
||||
# echo "changed files exclude tdgpt: ${changed_files_non_tdgpt}"
|
||||
|
||||
# if [ -n "$changed_files_non_tdgpt" ]; then
|
||||
# run_function_test="true"
|
||||
# else
|
||||
# run_function_test="false"
|
||||
# fi
|
||||
|
||||
# echo "run function test: ${run_function_test}"
|
||||
|
||||
run_tdgpt_test="true"
|
||||
run_function_test="true"
|
||||
# Output the results for GitHub Actions
|
||||
echo "run_function_test=$run_function_test" >> $GITHUB_OUTPUT
|
||||
echo "run_tdgpt_test=$run_tdgpt_test" >> $GITHUB_OUTPUT
|
||||
|
@ -102,19 +82,23 @@ jobs:
|
|||
echo ${{ github.event.pull_request.number }}
|
||||
|
||||
run-tests-on-linux:
|
||||
uses: taosdata/.github/.github/workflows/run-tests-on-linux.yml@ci/test/workflow
|
||||
uses: taosdata/.github/.github/workflows/run-tests-on-linux.yml@main
|
||||
needs: fetch-parameters
|
||||
if: ${{ needs.fetch-parameters.outputs.run_tdgpt_test == 'true' || needs.fetch-parameters.outputs.run_function_test == 'true' }}
|
||||
with:
|
||||
tdinternal: false
|
||||
run_function_test: ${{ needs.fetch-parameters.outputs.run_function_test == 'true' }}
|
||||
run_tdgpt_test: ${{ needs.fetch-parameters.outputs.run_tdgpt_test == 'true' }}
|
||||
specified_source_branch: ${{ github.event.inputs.specified_source_branch }}
|
||||
specified_target_branch: ${{ github.event.inputs.specified_target_branch }}
|
||||
specified_pr_number: ${{ github.event.inputs.specified_pr_number }}
|
||||
|
||||
run-tests-on-mac:
|
||||
uses: taosdata/.github/.github/workflows/run-tests-on-macos.yml@ci/test/workflow
|
||||
uses: taosdata/.github/.github/workflows/run-tests-on-macos.yml@main
|
||||
needs: fetch-parameters
|
||||
if: ${{ needs.fetch-parameters.outputs.run_function_test == 'true' }}
|
||||
with:
|
||||
tdinternal: false
|
||||
|
||||
run-tests-on-windows:
|
||||
uses: taosdata/.github/.github/workflows/run-tests-on-windows.yml@main
|
||||
needs: fetch-parameters
|
||||
if: ${{ needs.fetch-parameters.outputs.run_function_test == 'true' }}
|
||||
with:
|
||||
|
@ -122,13 +106,3 @@ jobs:
|
|||
specified_source_branch: ${{ github.event.inputs.specified_source_branch }}
|
||||
specified_target_branch: ${{ github.event.inputs.specified_target_branch }}
|
||||
specified_pr_number: ${{ github.event.inputs.specified_pr_number }}
|
||||
|
||||
run-tests-on-windows:
|
||||
uses: taosdata/.github/.github/workflows/run-tests-on-windows.yml@ci/test/workflow
|
||||
needs: fetch-parameters
|
||||
if: ${{ needs.fetch-parameters.outputs.run_function_test == 'true' }}
|
||||
with:
|
||||
tdinternal: false
|
||||
specified_source_branch: ${{ github.event.inputs.specified_source_branch }}
|
||||
specified_target_branch: ${{ github.event.inputs.specified_target_branch }}
|
||||
specified_pr_number: ${{ github.event.inputs.specified_pr_number }}
|
|
@ -339,20 +339,272 @@ Helm operates Kubernetes using kubectl and kubeconfig configurations, which can
|
|||
The TDengine Chart has not yet been released to the Helm repository, it can currently be downloaded directly from GitHub:
|
||||
|
||||
```shell
|
||||
wget https://github.com/taosdata/TDengine-Operator/raw/refs/heads/3.0/helm/tdengine-enterprise-3.5.0.tgz
|
||||
wget https://github.com/taosdata/TDengine-Operator/raw/refs/heads/3.0/helm/tdengine-3.5.0.tgz
|
||||
```
|
||||
|
||||
Note that it's for the enterprise edition, and the community edition is not yet available.
|
||||
|
||||
Follow the steps below to install the TDengine Chart:
|
||||
|
||||
```shell
|
||||
# Edit the values.yaml file to set the topology of the cluster
|
||||
vim values.yaml
|
||||
helm install tdengine tdengine-enterprise-3.5.0.tgz -f values.yaml
|
||||
helm install tdengine tdengine-3.5.0.tgz -f values.yaml
|
||||
```
|
||||
|
||||
#### Case 1: Simple 1-node Deployment
|
||||
If you are using community images, you can use the following command to install TDengine with Helm Chart:
|
||||
|
||||
<details>
|
||||
<summary>Helm Chart Use Cases for Community</summary>
|
||||
|
||||
#### Community Case 1: Simple 1-node Deployment
|
||||
|
||||
The following is a simple example of deploying a single-node TDengine cluster using Helm.
|
||||
|
||||
```yaml
|
||||
# This example is a simple deployment with one server replica.
|
||||
name: "tdengine"
|
||||
|
||||
image:
|
||||
repository: # Leave a trailing slash for the repository, or "" for no repository
|
||||
server: tdengine/tdengine:latest
|
||||
|
||||
# Set timezone here, not in taoscfg
|
||||
timezone: "Asia/Shanghai"
|
||||
|
||||
labels:
|
||||
app: "tdengine"
|
||||
# Add more labels as needed.
|
||||
|
||||
services:
|
||||
server:
|
||||
type: ClusterIP
|
||||
replica: 1
|
||||
ports:
|
||||
# TCP range required
|
||||
tcp: [6041, 6030, 6060]
|
||||
# UDP range, optional
|
||||
udp:
|
||||
volumes:
|
||||
- name: data
|
||||
mountPath: /var/lib/taos
|
||||
spec:
|
||||
storageClassName: "local-path"
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
resources:
|
||||
requests:
|
||||
storage: "10Gi"
|
||||
- name: log
|
||||
mountPath: /var/log/taos/
|
||||
spec:
|
||||
storageClassName: "local-path"
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
resources:
|
||||
requests:
|
||||
storage: "10Gi"
|
||||
files:
|
||||
- name: cfg # must be lower case.
|
||||
mountPath: /etc/taos/taos.cfg
|
||||
content: |
|
||||
dataDir /var/lib/taos/
|
||||
logDir /var/log/taos/
|
||||
```
|
||||
|
||||
Let's explain the above configuration:
|
||||
|
||||
- name: The name of the deployment, here it is "tdengine".
|
||||
- image:
|
||||
- repository: The image repository address, remember to leave a trailing slash for the repository, or set it to an empty string to use docker.io.
|
||||
- server: The specific name and tag of the server image. You need to ask your business partner for the TDengine Enterprise image.
|
||||
- timezone: Set the timezone, here it is "Asia/Shanghai".
|
||||
- labels: Add labels to the deployment, here is an app label with the value "tdengine", more labels can be added as needed.
|
||||
- services:
|
||||
- server: Configure the server service.
|
||||
- type: The service type, here it is **ClusterIP**.
|
||||
- replica: The number of replicas, here it is 1.
|
||||
- ports: Configure the ports of the service.
|
||||
- tcp: The required TCP port range, here it is [6041, 6030, 6060].
|
||||
- udp: The optional UDP port range, which is not configured here.
|
||||
- volumes: Configure the volumes.
|
||||
- name: The name of the volume, here there are two volumes, data and log.
|
||||
- mountPath: The mount path of the volume.
|
||||
- spec: The specification of the volume.
|
||||
- storageClassName: The storage class name, here it is **local-path**.
|
||||
- accessModes: The access mode, here it is **ReadWriteOnce**.
|
||||
- resources.requests.storage: The requested storage size, here it is **10Gi**.
|
||||
- files: Configure the files to mount in TDengine server.
|
||||
- name: The name of the file, here it is **cfg**.
|
||||
- mountPath: The mount path of the file, which is **taos.cfg**.
|
||||
- content: The content of the file, here the **dataDir** and **logDir** are configured.
|
||||
|
||||
After configuring the values.yaml file, use the following command to install the TDengine Chart:
|
||||
|
||||
```shell
|
||||
helm install simple tdengine-3.5.0.tgz -f values.yaml
|
||||
```
|
||||
|
||||
After installation, you can see the instructions to see the status of the TDengine cluster:
|
||||
|
||||
```shell
|
||||
NAME: simple
|
||||
LAST DEPLOYED: Sun Feb 9 13:40:00 2025 default
|
||||
STATUS: deployed
|
||||
REVISION: 1
|
||||
TEST SUITE: None
|
||||
NOTES:
|
||||
1. Get first POD name:
|
||||
|
||||
export POD_NAME=$(kubectl get pods --namespace default \
|
||||
-l "app.kubernetes.io/name=tdengine,app.kubernetes.io/instance=simple" -o jsonpath="{.items[0].metadata.name}")
|
||||
|
||||
2. Show dnodes/mnodes:
|
||||
|
||||
kubectl --namespace default exec $POD_NAME -- taos -s "show dnodes; show mnodes"
|
||||
|
||||
3. Run into TDengine CLI:
|
||||
|
||||
kubectl --namespace default exec -it $POD_NAME -- taos
|
||||
```
|
||||
|
||||
Follow the instructions to check the status of the TDengine cluster:
|
||||
|
||||
```shell
|
||||
root@u1-58:/data1/projects/helm# kubectl --namespace default exec $POD_NAME -- taos -s "show dnodes; show mnodes"
|
||||
Welcome to the TDengine Command Line Interface, Client Version:3.3.5.8
|
||||
Copyright (c) 2023 by TDengine, all rights reserved.
|
||||
|
||||
taos> show dnodes; show mnodes
|
||||
id | endpoint | vnodes | support_vnodes | status | create_time | reboot_time | note |
|
||||
=============================================================================================================================================================================
|
||||
1 | oss-tdengine-0.oss-tdengine... | 0 | 21 | ready | 2025-03-12 19:05:42.224 | 2025-03-12 19:05:42.044 | |
|
||||
Query OK, 1 row(s) in set (0.002545s)
|
||||
|
||||
id | endpoint | role | status | create_time | role_time |
|
||||
==================================================================================================================================
|
||||
1 | oss-tdengine-0.oss-tdengine... | leader | ready | 2025-03-12 19:05:42.239 | 2025-03-12 19:05:42.137 |
|
||||
Query OK, 1 row(s) in set (0.001343s)
|
||||
```
|
||||
|
||||
To clean up the TDengine cluster, use the following command:
|
||||
|
||||
```shell
|
||||
helm uninstall simple
|
||||
kubectl delete pvc -l app.kubernetes.io/instance=simple
|
||||
```
|
||||
|
||||
#### Community Case 2: 3-replica Deployment with Single taosX
|
||||
|
||||
```yaml
|
||||
# This example shows how to deploy a 3-replica TDengine cluster with separate taosx/explorer service.
|
||||
# Users should know that the explorer/taosx service is not cluster-ready, so it is recommended to deploy it separately.
|
||||
name: "tdengine"
|
||||
|
||||
image:
|
||||
repository: # Leave a trailing slash for the repository, or "" for no repository
|
||||
server: tdengine/tdengine:latest
|
||||
|
||||
# Set timezone here, not in taoscfg
|
||||
timezone: "Asia/Shanghai"
|
||||
|
||||
labels:
|
||||
# Add more labels as needed.
|
||||
|
||||
services:
|
||||
server:
|
||||
type: ClusterIP
|
||||
replica: 3
|
||||
ports:
|
||||
# TCP range required
|
||||
tcp: [6041, 6030]
|
||||
# UDP range, optional
|
||||
udp:
|
||||
volumes:
|
||||
- name: data
|
||||
mountPath: /var/lib/taos
|
||||
spec:
|
||||
storageClassName: "local-path"
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: "10Gi"
|
||||
- name: log
|
||||
mountPath: /var/log/taos/
|
||||
spec:
|
||||
storageClassName: "local-path"
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: "10Gi"
|
||||
```
|
||||
|
||||
You can see that the configuration is similar to the first one, with the addition of the taosx configuration. The taosx service is configured with similar storage configuration as the server service, and the server service is configured with 3 replicas. Since the taosx service is not cluster-ready, it is recommended to deploy it separately.
|
||||
|
||||
After configuring the values.yaml file, use the following command to install the TDengine Chart:
|
||||
|
||||
```shell
|
||||
helm install replica3 tdengine-3.5.0.tgz -f values.yaml
|
||||
```
|
||||
|
||||
To clean up the TDengine cluster, use the following command:
|
||||
|
||||
```shell
|
||||
helm uninstall replica3
|
||||
kubectl delete pvc -l app.kubernetes.io/instance=replica3
|
||||
```
|
||||
|
||||
You can use the following command to expose the explorer service to the outside world with ingress:
|
||||
|
||||
```shell
|
||||
tee replica3-ingress.yaml <<EOF
|
||||
# This is a helm chart example for deploying 3 replicas of TDengine Explorer
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: replica3-ingress
|
||||
namespace: default
|
||||
spec:
|
||||
rules:
|
||||
- host: replica3.local.tdengine.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: replica3-tdengine-taosx
|
||||
port:
|
||||
number: 6060
|
||||
EOF
|
||||
|
||||
kubectl apply -f replica3-ingress.yaml
|
||||
```
|
||||
|
||||
Use `kubectl get ingress` to view the ingress service.
|
||||
|
||||
```shell
|
||||
root@server:/data1/projects/helm# kubectl get ingress
|
||||
NAME CLASS HOSTS ADDRESS PORTS AGE
|
||||
replica3-ingress nginx replica3.local.tdengine.com 192.168.1.58 80 48m
|
||||
```
|
||||
|
||||
You can configure the domain name resolution to point to the ingress service's external IP address. For example, add the following line to the hosts file:
|
||||
|
||||
```conf
|
||||
192.168.1.58 replica3.local.tdengine.com
|
||||
```
|
||||
|
||||
Now you can access the explorer service through the domain name `replica3.local.tdengine.com`.
|
||||
|
||||
```shell
|
||||
curl http://replica3.local.tdengine.com
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
With TDengine Enterprise images, you can use the following command to install TDengine with Helm Chart:
|
||||
|
||||
<details>
|
||||
<summary>Helm Chart Use Cases for Enterprise</summary>
|
||||
|
||||
#### Enterprise Case 1: Simple 1-node Deployment
|
||||
|
||||
The following is a simple example of deploying a single-node TDengine cluster using Helm.
|
||||
|
||||
|
@ -435,7 +687,7 @@ Let's explain the above configuration:
|
|||
After configuring the values.yaml file, use the following command to install the TDengine Chart:
|
||||
|
||||
```shell
|
||||
helm install simple tdengine-enterprise-3.5.0.tgz -f values.yaml
|
||||
helm install simple tdengine-3.5.0.tgz -f values.yaml
|
||||
```
|
||||
|
||||
After installation, you can see the instructions to see the status of the TDengine cluster:
|
||||
|
@ -487,7 +739,7 @@ helm uninstall simple
|
|||
kubectl delete pvc -l app.kubernetes.io/instance=simple
|
||||
```
|
||||
|
||||
#### Case 2: Tiered-Storage Deployment
|
||||
#### Enterprise Case 2: Tiered-Storage Deployment
|
||||
|
||||
The following is an example of deploying a TDengine cluster with tiered storage using Helm.
|
||||
|
||||
|
@ -563,10 +815,10 @@ You can see that the configuration is similar to the previous one, with the addi
|
|||
After configuring the values.yaml file, use the following command to install the TDengine Chart:
|
||||
|
||||
```shell
|
||||
helm install tiered tdengine-enterprise-3.5.0.tgz -f values.yaml
|
||||
helm install tiered tdengine-3.5.0.tgz -f values.yaml
|
||||
```
|
||||
|
||||
#### Case 3: 2-replica Deployment
|
||||
#### Enterprise Case 3: 2-replica Deployment
|
||||
|
||||
TDengine support 2-replica deployment with an arbitrator, which can be configured as follows:
|
||||
|
||||
|
@ -634,7 +886,7 @@ services:
|
|||
|
||||
You can see that the configuration is similar to the first one, with the addition of the arbitrator configuration. The arbitrator service is configured with the same storage as the server service, and the server service is configured with 2 replicas (the arbitrator should be 1 replica and not able to be changed).
|
||||
|
||||
#### Case 4: 3-replica Deployment with Single taosX
|
||||
#### Enterprise Case 4: 3-replica Deployment with Single taosX
|
||||
|
||||
```yaml
|
||||
# This example shows how to deploy a 3-replica TDengine cluster with separate taosx/explorer service.
|
||||
|
@ -761,7 +1013,7 @@ You can see that the configuration is similar to the first one, with the additio
|
|||
After configuring the values.yaml file, use the following command to install the TDengine Chart:
|
||||
|
||||
```shell
|
||||
helm install replica3 tdengine-enterprise-3.5.0.tgz -f values.yaml
|
||||
helm install replica3 tdengine-3.5.0.tgz -f values.yaml
|
||||
```
|
||||
|
||||
You can use the following command to expose the explorer service to the outside world with ingress:
|
||||
|
@ -810,3 +1062,5 @@ Now you can access the explorer service through the domain name `replica3.local.
|
|||
```shell
|
||||
curl http://replica3.local.tdengine.com
|
||||
```
|
||||
|
||||
</details>
|
||||
|
|
|
@ -347,10 +347,14 @@ Specify the configuration parameters for tag and data columns in `super_tables`
|
|||
|
||||
### Query Parameters
|
||||
|
||||
In query scenarios, `filetype` must be set to `query`.
|
||||
`filetype` must be set to `query`.
|
||||
|
||||
`query_mode` connect method:
|
||||
- "taosc": Native.
|
||||
- "rest" : RESTful.
|
||||
|
||||
`query_times` specifies the number of times to run the query, numeric type.
|
||||
|
||||
Query scenarios can control the execution of slow query statements by setting `kill_slow_query_threshold` and `kill_slow_query_interval` parameters, where threshold controls that queries exceeding the specified exec_usec time will be killed by taosBenchmark, in seconds; interval controls the sleep time to avoid continuous slow query CPU consumption, in seconds.
|
||||
|
||||
For other common parameters, see [General Configuration Parameters](#general-configuration-parameters)
|
||||
|
||||
|
@ -358,13 +362,26 @@ For other common parameters, see [General Configuration Parameters](#general-con
|
|||
|
||||
Configuration parameters for querying specified tables (can specify supertables, subtables, or regular tables) are set in `specified_table_query`.
|
||||
- **mixed_query** : Query Mode . "yes" is `Mixed Query`, "no" is `General Query`, default is "no".
|
||||
`General Query`:
|
||||
`General Query`:
|
||||
Each SQL in `sqls` starts `threads` threads to query this SQL, Each thread exits after executing the `query_times` queries, and only after all threads executing this SQL have completed can the next SQL be executed.
|
||||
The total number of queries(`General Query`) = the number of `sqls` * `query_times` * `threads`
|
||||
`Mixed Query`:
|
||||
`Mixed Query`:
|
||||
All SQL statements in `sqls` are divided into `threads` groups, with each thread executing one group. Each SQL statement needs to execute `query_times` queries.
|
||||
The total number of queries(`Mixed Query`) = the number of `sqls` * `query_times`
|
||||
The total number of queries(`Mixed Query`) = the number of `sqls` * `query_times`.
|
||||
|
||||
- **batch_query** : Batch query power switch.
|
||||
"yes": indicates that it is enabled.
|
||||
"no": indicates that it is not enabled, and other values report errors.
|
||||
Batch query refers to dividing all SQL statements in SQL into `threads` groups, with each thread executing one group.
|
||||
Each SQL statement is queried only once before exiting, and the main thread waits for all threads to complete before determining if the `query_interval` parameter is set. If sleep is required for a specified time, each thread group is restarted and the previous process is repeated until the number of queries is exhausted.
|
||||
Functional limitations:
|
||||
- Only supports scenarios where `mixed_query` is set to 'yes'.
|
||||
- Restful queries are not supported, meaning `query_made` cannot be 'rest'.
|
||||
|
||||
- **query_interval** : Query interval, in millisecond, default is 0.
|
||||
When the 'batch_query' switch is turned on, it indicates the interval time after each batch query is completed, When closed, it indicates the interval time between each SQL query completion.
|
||||
If the execution time of the query exceeds the interval time, it will no longer wait. If the execution time of the query is less than the interval time, it is necessary to wait to make up for the interval time.
|
||||
|
||||
- **threads** : Number of threads executing the SQL query, default is 1.
|
||||
- **sqls**:
|
||||
- **sql**: The SQL command to execute, required.
|
||||
|
|
|
@ -253,27 +253,44 @@ taosBenchmark -f <json file>
|
|||
### 查询配置参数
|
||||
|
||||
查询场景下 `filetype` 必须设置为 `query`。
|
||||
|
||||
`query_mode` 查询连接方式,取值为:
|
||||
- “taosc”: 通过 Native 连接方式查询。
|
||||
- “rest” : 通过 restful 连接方式查询。
|
||||
|
||||
`query_times` 指定运行查询的次数,数值类型。
|
||||
|
||||
查询场景可以通过设置 `kill_slow_query_threshold` 和 `kill_slow_query_interval` 参数来控制杀掉慢查询语句的执行,threshold 控制如果 exec_usec 超过指定时间的查询将被 taosBenchmark 杀掉,单位为秒。
|
||||
interval 控制休眠时间,避免持续查询慢查询消耗 CPU,单位为秒。
|
||||
|
||||
其它通用参数详见 [通用配置参数](#通用配置参数)
|
||||
其它通用参数详见 [通用配置参数](#通用配置参数)。
|
||||
|
||||
#### 执行指定查询语句
|
||||
|
||||
查询指定表(可以指定超级表、子表或普通表)的配置参数在 `specified_table_query` 中设置。
|
||||
|
||||
- **mixed_query**:查询模式
|
||||
“yes”:`混合查询`
|
||||
"no"(默认值):`普通查询`
|
||||
`普通查询`:`sqls` 中每个 sql 启动 `threads` 个线程查询此 sql, 执行完 `query_times` 次查询后退出,执行此 sql 的所有线程都完成后进入下一个 sql
|
||||
- **mixed_query**:混合查询开关。
|
||||
“yes”: 开启 “混合查询”。
|
||||
“no” : 关闭 “混合查询” ,即 “普通查询”。
|
||||
|
||||
- 普通查询:
|
||||
|
||||
`sqls` 中每个 sql 启动 `threads` 个线程查询此 sql, 执行完 `query_times` 次查询后退出,执行此 sql 的所有线程都完成后进入下一个 sql
|
||||
`查询总次数` = `sqls` 个数 * `query_times` * `threads`
|
||||
|
||||
`混合查询`:`sqls` 中所有 sql 分成 `threads` 个组,每个线程执行一组, 每个 sql 都需执行 `query_times` 次查询
|
||||
- 混合查询:
|
||||
|
||||
`sqls` 中所有 sql 分成 `threads` 个组,每个线程执行一组, 每个 sql 都需执行 `query_times` 次查询
|
||||
`查询总次数` = `sqls` 个数 * `query_times`
|
||||
|
||||
- **batch_query**:批查询功开关。
|
||||
取值范围 “yes” 表示开启,"no" 不开启,其它值报错。
|
||||
批查询是指 `sqls` 中所有 sql 分成 `threads` 个组,每个线程执行一组,每个 sql 只执行一次查询后退出,主线程等待所有线程都执行完,再判断是否设置有 `query_interval` 参数,如果有需要 sleep 指定时间,再启动各线程组重复前面的过程,直到查询次数耗尽为止。
|
||||
功能限制条件:
|
||||
- 只支持 `mixed_query` 为 "yes" 的场景。
|
||||
- 不支持 restful 查询,即 `query_mode` 不能为 "rest"。
|
||||
|
||||
- **query_interval**:查询时间间隔,单位:millisecond,默认值为 0。
|
||||
"batch_query" 开关打开时,表示是每批查询完间隔时间;关闭时,表示每个 sql 查询完间隔时间
|
||||
如果执行查询的时间超过间隔时间,那么将不再等待,如果执行查询的时间不足间隔时间,需等待补足间隔时间
|
||||
|
||||
- **threads**:执行查询 SQL 的线程数,默认值为 1。
|
||||
|
||||
|
|
|
@ -17,7 +17,9 @@
|
|||
#include "function.h"
|
||||
#include "functionMgt.h"
|
||||
#include "operator.h"
|
||||
#include "query.h"
|
||||
#include "querytask.h"
|
||||
#include "taoserror.h"
|
||||
#include "tchecksum.h"
|
||||
#include "tcommon.h"
|
||||
#include "tcompare.h"
|
||||
|
@ -1016,6 +1018,11 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI
|
|||
if (colDataIsNull(pStateColInfoData, pBlock->info.rows, j, pAgg)) {
|
||||
continue;
|
||||
}
|
||||
if (pStateColInfoData->pData == NULL) {
|
||||
qError("%s:%d state column data is null", __FILE__, __LINE__);
|
||||
pTaskInfo->code = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR;
|
||||
T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
char* val = colDataGetData(pStateColInfoData, j);
|
||||
|
||||
|
|
|
@ -2196,9 +2196,17 @@ static int32_t createWindowPhysiNodeFinalize(SPhysiPlanContext* pCxt, SNodeList*
|
|||
SDataBlockDescNode* pChildTupe = (((SPhysiNode*)nodesListGetNode(pChildren, 0))->pOutputDataBlockDesc);
|
||||
// push down expression to pOutputDataBlockDesc of child node
|
||||
if (TSDB_CODE_SUCCESS == code && NULL != pPrecalcExprs) {
|
||||
code = setListSlotId(pCxt, pChildTupe->dataBlockId, -1, pPrecalcExprs, &pWindow->pExprs);
|
||||
SNodeList* pOutput;
|
||||
code = setListSlotId(pCxt, pChildTupe->dataBlockId, -1, pPrecalcExprs, &pOutput);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = addDataBlockSlots(pCxt, pWindow->pExprs, pChildTupe);
|
||||
code = addDataBlockSlots(pCxt, pOutput, pChildTupe);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
if (pWindow->pExprs == NULL) {
|
||||
pWindow->pExprs = pOutput;
|
||||
} else {
|
||||
code = nodesListAppendList(pWindow->pExprs, pOutput);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
{
|
||||
"filetype": "query",
|
||||
"cfgdir": "/etc/taos",
|
||||
"host": "127.0.0.1",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"databases": "test",
|
||||
"query_times": 5,
|
||||
"query_mode": "taosc",
|
||||
"specified_table_query": {
|
||||
"concurrent": 5,
|
||||
"query_interval": 0,
|
||||
"mixed_query": "no",
|
||||
"batch_query": "yes",
|
||||
"sqls": [
|
||||
{
|
||||
"sql": "select last_row(*) from test.meters"
|
||||
},
|
||||
{
|
||||
"sql": "select count(*) from test.meters"
|
||||
},
|
||||
{
|
||||
"sql": "select * from test.d0",
|
||||
"result": "./query_res1.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select count(*) from test.d1"
|
||||
},
|
||||
{
|
||||
"sql": "select * from test.d2"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
{
|
||||
"filetype": "query",
|
||||
"cfgdir": "/etc/taos",
|
||||
"host": "127.0.0.1",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"databases": "test",
|
||||
"query_times": 5,
|
||||
"query_mode": "rest",
|
||||
"specified_table_query": {
|
||||
"concurrent": 5,
|
||||
"query_interval": 1000,
|
||||
"mixed_query": "yes",
|
||||
"batch_query": "yes",
|
||||
"sqls": [
|
||||
{
|
||||
"sql": "select last_row(*) from test.meters"
|
||||
},
|
||||
{
|
||||
"sql": "select count(*) from test.meters"
|
||||
},
|
||||
{
|
||||
"sql": "select * from test.d0",
|
||||
"result": "./query_res1.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select count(*) from test.d1"
|
||||
},
|
||||
{
|
||||
"sql": "select * from test.d2"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
{
|
||||
"filetype": "query",
|
||||
"cfgdir": "/etc/taos",
|
||||
"host": "127.0.0.1",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"databases": "test",
|
||||
"query_times": 5,
|
||||
"query_mode": "taosc",
|
||||
"specified_table_query": {
|
||||
"concurrent": 5,
|
||||
"query_interval": 1000,
|
||||
"mixed_query": "yes",
|
||||
"batch_query": "yes",
|
||||
"sqls": [
|
||||
{
|
||||
"sql": "select last_row(*) from test.meters"
|
||||
},
|
||||
{
|
||||
"sql": "select count(*) from test.meters"
|
||||
},
|
||||
{
|
||||
"sql": "select * from test.d0",
|
||||
"result": "./query_res1.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select count(*) from test.d1"
|
||||
},
|
||||
{
|
||||
"sql": "select * from test.d2"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
{
|
||||
"filetype": "query",
|
||||
"cfgdir": "/etc/taos",
|
||||
"host": "127.0.0.1",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"databases": "test",
|
||||
"query_times": 5,
|
||||
"query_mode": "taosc",
|
||||
"specified_table_query": {
|
||||
"concurrent": 5,
|
||||
"query_interval": 100,
|
||||
"mixed_query": "yes",
|
||||
"batch_query": "no",
|
||||
"sqls": [
|
||||
{
|
||||
"sql": "select last_row(*) from test.meters"
|
||||
},
|
||||
{
|
||||
"sql": "select count(*) from test.meters"
|
||||
},
|
||||
{
|
||||
"sql": "select * from test.d0",
|
||||
"result": "./query_res1.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select count(*) from test.d1"
|
||||
},
|
||||
{
|
||||
"sql": "select * from test.d2"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
|
@ -40,7 +40,7 @@ def removeQuotation(origin):
|
|||
class TDTestCase(TBase):
|
||||
def caseDescription(self):
|
||||
"""
|
||||
[TD-11510] taosBenchmark test cases
|
||||
taosBenchmark query->Basic test cases
|
||||
"""
|
||||
|
||||
def runSeconds(self, command, timeout = 180):
|
||||
|
@ -99,15 +99,15 @@ class TDTestCase(TBase):
|
|||
fval = float(value)
|
||||
# compare
|
||||
if equal and fval != expect:
|
||||
tdLog.exit(f"check not expect. expect:{expect} real:{fval}, key:{key} end:{end} output:\n{output}")
|
||||
tdLog.exit(f"check not expect. expect:{expect} real:{fval}, key:'{key}' end:'{end}' output:\n{output}")
|
||||
elif equal == False and fval <= expect:
|
||||
tdLog.exit(f"failed because {fval} <= {expect}, key:{key} end:{end} output:\n{output}")
|
||||
tdLog.exit(f"failed because {fval} <= {expect}, key:'{key}' end:'{end}' output:\n{output}")
|
||||
else:
|
||||
# succ
|
||||
if equal:
|
||||
tdLog.info(f"check successfully. key:{key} expect:{expect} real:{fval}")
|
||||
tdLog.info(f"check successfully. key:'{key}' expect:{expect} real:{fval}")
|
||||
else:
|
||||
tdLog.info(f"check successfully. key:{key} {fval} > {expect}")
|
||||
tdLog.info(f"check successfully. key:'{key}' {fval} > {expect}")
|
||||
|
||||
|
||||
def checkAfterRun(self, benchmark, jsonFile, specMode, tbCnt):
|
||||
|
@ -137,16 +137,24 @@ class TDTestCase(TBase):
|
|||
sqls = data[label]["sqls"]
|
||||
|
||||
|
||||
# mix
|
||||
# batch_query
|
||||
try:
|
||||
batchQuery = data[label]["batch_query"]
|
||||
except:
|
||||
batchQuery = "no"
|
||||
|
||||
# mixed_query
|
||||
try:
|
||||
mixedQuery = data[label]["mixed_query"]
|
||||
except:
|
||||
mixedQuery = "no"
|
||||
|
||||
tdLog.info(f"queryTimes={queryTimes} concurrent={concurrent} mixedQuery={mixedQuery} len(sqls)={len(sqls)} label={label}\n")
|
||||
tdLog.info(f"queryTimes={queryTimes} concurrent={concurrent} mixedQuery={mixedQuery} "
|
||||
f"batchQuery={batchQuery} len(sqls)={len(sqls)} label={label}\n")
|
||||
|
||||
totalQueries = 0
|
||||
totalQueries = 0
|
||||
threadQueries = 0
|
||||
QPS = 10
|
||||
|
||||
if continueIfFail.lower() == "yes":
|
||||
allEnd = " "
|
||||
|
@ -163,13 +171,20 @@ class TDTestCase(TBase):
|
|||
minKey = "min:"
|
||||
else:
|
||||
# spec mixed or super
|
||||
|
||||
if specMode:
|
||||
# spec
|
||||
totalQueries = queryTimes * len(sqls)
|
||||
# spec mixed
|
||||
if batchQuery.lower() == "yes":
|
||||
# batch
|
||||
threadQueries = len(sqls)
|
||||
QPS = 2
|
||||
else:
|
||||
threadQueries = totalQueries
|
||||
else:
|
||||
# super
|
||||
totalQueries = queryTimes * len(sqls) * tbCnt
|
||||
threadQueries = totalQueries
|
||||
threadQueries = totalQueries
|
||||
|
||||
nSql = len(sqls)
|
||||
if specMode and nSql < concurrent :
|
||||
|
@ -191,7 +206,7 @@ class TDTestCase(TBase):
|
|||
["p99: ", "s", 0, False],
|
||||
["INFO: Spend ", " ", 0, False],
|
||||
["completed total queries: ", ",", totalQueries, True],
|
||||
["the QPS of all threads:", allEnd, 10 , False] # all qps need > 5
|
||||
["the QPS of all threads:", allEnd, QPS , False] # all qps need > 5
|
||||
]
|
||||
|
||||
# check
|
||||
|
@ -205,6 +220,7 @@ class TDTestCase(TBase):
|
|||
args = [
|
||||
["./tools/benchmark/basic/json/queryModeSpec", True],
|
||||
["./tools/benchmark/basic/json/queryModeSpecMix", True],
|
||||
["./tools/benchmark/basic/json/queryModeSpecMixBatch", True],
|
||||
["./tools/benchmark/basic/json/queryModeSuper", False]
|
||||
]
|
||||
|
||||
|
@ -231,8 +247,9 @@ class TDTestCase(TBase):
|
|||
self.expectFailed(f"{benchmark} -f ./tools/benchmark/basic/json/queryErrorBothSpecSuper.json")
|
||||
# json format error
|
||||
self.expectFailed(f"{benchmark} -f ./tools/benchmark/basic/json/queryErrorFormat.json")
|
||||
|
||||
|
||||
# batch query
|
||||
self.expectFailed(f"{benchmark} -f ./tools/benchmark/basic/json/queryErrorBatchNoMix.json")
|
||||
self.expectFailed(f"{benchmark} -f ./tools/benchmark/basic/json/queryErrorBatchRest.json")
|
||||
|
||||
def run(self):
|
||||
tbCnt = 10
|
||||
|
|
|
@ -203,7 +203,23 @@ class TDTestCase:
|
|||
tdSql.execute("insert into t0 values(now, 3,NULL,3,3,3,3,3,3,3)", queryTimes=1)
|
||||
tdSql.query("select first(c2) from t0 session(ts, 1s) order by ts", queryTimes=1)
|
||||
|
||||
def ts6079(self):
|
||||
ts = 1741757485230
|
||||
tdSql.execute("drop database if exists ts6079")
|
||||
tdSql.execute("create database ts6079 vgroups 2 replica 1")
|
||||
tdSql.execute("CREATE STABLE ts6079.`meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` VARCHAR(24))")
|
||||
for tableIndex in range(10):
|
||||
tdSql.execute(f"CREATE TABLE ts6079.t{tableIndex} USING ts6079.meters TAGS ({tableIndex}, 'tb{tableIndex}')")
|
||||
for num in range(10):
|
||||
tdSql.execute(f"INSERT INTO ts6079.t{tableIndex} VALUES({ts + num}, {num * 1.0}, {215 + num}, 0.0)")
|
||||
|
||||
tdSql.query("select _wstart ,first(ts),last(ts),count(*),to_char(ts, 'yyyymmdd') as ts from ts6079.meters partition by to_char(ts, 'yyyymmdd') as ts state_window(cast(current as varchar(2)));")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.checkData(0, 3, 10)
|
||||
|
||||
|
||||
def run(self):
|
||||
self.ts6079()
|
||||
self.test_crash_for_session_window()
|
||||
self.test_crash_for_state_window1()
|
||||
self.test_crash_for_state_window2()
|
||||
|
|
|
@ -648,6 +648,7 @@ typedef struct SpecifiedQueryInfo_S {
|
|||
TAOS_RES *res[MAX_QUERY_SQL_COUNT];
|
||||
uint64_t totalQueried;
|
||||
bool mixed_query;
|
||||
bool batchQuery; // mixed query have batch and no batch query
|
||||
// error rate
|
||||
uint64_t totalFail;
|
||||
} SpecifiedQueryInfo;
|
||||
|
|
|
@ -1813,6 +1813,23 @@ int32_t readSpecQueryJson(tools_cJSON * specifiedQuery) {
|
|||
}
|
||||
}
|
||||
|
||||
// batchQuery
|
||||
tools_cJSON *batchQueryObj =
|
||||
tools_cJSON_GetObjectItem(specifiedQuery, "batch_query");
|
||||
if (tools_cJSON_IsString(batchQueryObj)) {
|
||||
if (0 == strcasecmp(batchQueryObj->valuestring, "yes")) {
|
||||
g_queryInfo.specifiedQueryInfo.batchQuery = true;
|
||||
infoPrint("%s\n","batch_query is True");
|
||||
} else if (0 == strcasecmp(batchQueryObj->valuestring, "no")) {
|
||||
g_queryInfo.specifiedQueryInfo.batchQuery = false;
|
||||
infoPrint("%s\n","batch_query is False");
|
||||
} else {
|
||||
errorPrint("Invalid batch_query value: %s\n",
|
||||
batchQueryObj->valuestring);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
tools_cJSON *concurrent =
|
||||
tools_cJSON_GetObjectItem(specifiedQuery, "concurrent");
|
||||
if (tools_cJSON_IsNumber(concurrent)) {
|
||||
|
|
|
@ -47,7 +47,7 @@ void* benchCancelHandler(void* arg) {
|
|||
}
|
||||
#endif
|
||||
|
||||
void checkArgumentValid() {
|
||||
int checkArgumentValid() {
|
||||
// check prepared_rand valid
|
||||
if(g_arguments->prepared_rand < g_arguments->reqPerReq) {
|
||||
infoPrint("prepared_rand(%"PRIu64") < num_of_records_per_req(%d), so set num_of_records_per_req = prepared_rand\n",
|
||||
|
@ -64,13 +64,32 @@ void checkArgumentValid() {
|
|||
false,
|
||||
1)) {
|
||||
errorPrint("%s", "Failed to convert server address\n");
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
encodeAuthBase64();
|
||||
g_arguments->rest_server_ver_major =
|
||||
getServerVersionRest(g_arguments->port);
|
||||
}
|
||||
|
||||
// check batch query
|
||||
if (g_arguments->test_mode == QUERY_TEST) {
|
||||
if (g_queryInfo.specifiedQueryInfo.batchQuery) {
|
||||
// batch_query = yes
|
||||
if (!g_queryInfo.specifiedQueryInfo.mixed_query) {
|
||||
// mixed_query = no
|
||||
errorPrint("%s\n", "batch_query = yes require mixed_query is yes");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// rest not support
|
||||
if (g_queryInfo.iface == REST_IFACE) {
|
||||
errorPrint("%s\n", "batch_query = yes not support restful.");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
|
@ -144,7 +163,11 @@ int main(int argc, char* argv[]) {
|
|||
}
|
||||
|
||||
infoPrint("client version: %s\n", taos_get_client_info());
|
||||
checkArgumentValid();
|
||||
if (checkArgumentValid()) {
|
||||
errorPrint("failed to readJsonConfig %s\n", g_arguments->metaFile);
|
||||
exitLog();
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (g_arguments->test_mode == INSERT_TEST) {
|
||||
if (insertTestProcess()) {
|
||||
|
|
|
@ -80,10 +80,15 @@ int selectAndGetResult(qThreadInfo *pThreadInfo, char *command, bool record) {
|
|||
}
|
||||
|
||||
// interlligent sleep
|
||||
void autoSleep(uint64_t interval, uint64_t delay ) {
|
||||
int32_t autoSleep(uint64_t interval, uint64_t delay ) {
|
||||
int32_t msleep = 0;
|
||||
if (delay < interval * 1000) {
|
||||
toolsMsleep((int32_t)(interval * 1000 - delay)); // ms
|
||||
msleep = (int32_t)((interval - delay/1000));
|
||||
infoPrint("do sleep %dms ...\n", msleep);
|
||||
toolsMsleep(msleep); // ms
|
||||
debugPrint("%s\n","do sleep end");
|
||||
}
|
||||
return msleep;
|
||||
}
|
||||
|
||||
// reset
|
||||
|
@ -140,10 +145,13 @@ static void *specQueryMixThread(void *sarg) {
|
|||
|
||||
int64_t st = 0;
|
||||
int64_t et = 0;
|
||||
int64_t startTs = toolsGetTimestampMs();
|
||||
int64_t lastPrintTime = startTs;
|
||||
uint64_t queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes;
|
||||
uint64_t interval = g_queryInfo.specifiedQueryInfo.queryInterval;
|
||||
int64_t startTs = toolsGetTimestampMs();
|
||||
int64_t lastPrintTime = startTs;
|
||||
// batchQuery
|
||||
bool batchQuery = g_queryInfo.specifiedQueryInfo.batchQuery;
|
||||
uint64_t queryTimes = batchQuery ? 1 : g_queryInfo.specifiedQueryInfo.queryTimes;
|
||||
uint64_t interval = batchQuery ? 0 : g_queryInfo.specifiedQueryInfo.queryInterval;
|
||||
|
||||
pThreadInfo->query_delay_list = benchArrayInit(queryTimes, sizeof(int64_t));
|
||||
for (int i = pThreadInfo->start_sql; i <= pThreadInfo->end_sql; ++i) {
|
||||
SSQL * sql = benchArrayGet(g_queryInfo.specifiedQueryInfo.sqls, i);
|
||||
|
@ -382,7 +390,7 @@ static void *stbQueryThread(void *sarg) {
|
|||
// --------------------------------- firse level function ------------------------------
|
||||
//
|
||||
|
||||
void totalChildQuery(qThreadInfo* infos, int threadCnt, int64_t spend) {
|
||||
void totalChildQuery(qThreadInfo* infos, int threadCnt, int64_t spend, BArray *pDelays) {
|
||||
// valid check
|
||||
if (infos == NULL || threadCnt == 0) {
|
||||
return ;
|
||||
|
@ -444,9 +452,11 @@ void totalChildQuery(qThreadInfo* infos, int threadCnt, int64_t spend) {
|
|||
(int32_t)(delay_list->size * 0.99)))/1E6,
|
||||
*(int64_t *)(benchArrayGet(delay_list,
|
||||
(int32_t)(delay_list->size - 1)))/1E6);
|
||||
} else {
|
||||
errorPrint("%s() LN%d, delay_list size: %"PRId64"\n",
|
||||
__func__, __LINE__, (int64_t)delay_list->size);
|
||||
}
|
||||
|
||||
// copy to another
|
||||
if (pDelays) {
|
||||
benchArrayAddBatch(pDelays, delay_list->pData, delay_list->size, false);
|
||||
}
|
||||
benchArrayDestroy(delay_list);
|
||||
}
|
||||
|
@ -547,7 +557,7 @@ static int stbQuery(uint16_t iface, char* dbName) {
|
|||
}
|
||||
|
||||
// total show
|
||||
totalChildQuery(threadInfos, threadCnt, end - start);
|
||||
totalChildQuery(threadInfos, threadCnt, end - start, NULL);
|
||||
|
||||
ret = 0;
|
||||
|
||||
|
@ -825,7 +835,7 @@ static int specQueryMix(uint16_t iface, char* dbName) {
|
|||
}
|
||||
|
||||
// statistic
|
||||
totalChildQuery(infos, threadCnt, end - start);
|
||||
totalChildQuery(infos, threadCnt, end - start, NULL);
|
||||
ret = 0;
|
||||
|
||||
OVER:
|
||||
|
@ -838,6 +848,206 @@ OVER:
|
|||
return ret;
|
||||
}
|
||||
|
||||
void totalBatchQuery(int32_t allSleep, BArray *pDelays) {
|
||||
// sort
|
||||
qsort(pDelays->pData, pDelays->size, pDelays->elemSize, compare);
|
||||
|
||||
// total delays
|
||||
double totalDelays = 0;
|
||||
for (size_t i = 0; i < pDelays->size; i++) {
|
||||
int64_t *delay = benchArrayGet(pDelays, i);
|
||||
totalDelays += *delay;
|
||||
}
|
||||
|
||||
printf("\n");
|
||||
// show sleep times
|
||||
if (allSleep > 0) {
|
||||
infoPrint("All sleep spend: %.3fs\n", (float)allSleep/1000);
|
||||
}
|
||||
|
||||
// show P90 ...
|
||||
if (pDelays->size) {
|
||||
infoPrint(
|
||||
"Total delay: "
|
||||
"min delay: %.6fs, "
|
||||
"avg delay: %.6fs, "
|
||||
"p90: %.6fs, "
|
||||
"p95: %.6fs, "
|
||||
"p99: %.6fs, "
|
||||
"max: %.6fs\n",
|
||||
*(int64_t *)(benchArrayGet(pDelays, 0))/1E6,
|
||||
(double)totalDelays/pDelays->size/1E6,
|
||||
*(int64_t *)(benchArrayGet(pDelays,
|
||||
(int32_t)(pDelays->size * 0.9)))/1E6,
|
||||
*(int64_t *)(benchArrayGet(pDelays,
|
||||
(int32_t)(pDelays->size * 0.95)))/1E6,
|
||||
*(int64_t *)(benchArrayGet(pDelays,
|
||||
(int32_t)(pDelays->size * 0.99)))/1E6,
|
||||
*(int64_t *)(benchArrayGet(pDelays,
|
||||
(int32_t)(pDelays->size - 1)))/1E6);
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// specQuery Mix Batch
|
||||
//
|
||||
static int specQueryBatch(uint16_t iface, char* dbName) {
|
||||
// init
|
||||
BArray *pDelays = NULL;
|
||||
int ret = -1;
|
||||
int nConcurrent = g_queryInfo.specifiedQueryInfo.concurrent;
|
||||
uint64_t interval = g_queryInfo.specifiedQueryInfo.queryInterval;
|
||||
pthread_t * pids = benchCalloc(nConcurrent, sizeof(pthread_t), true);
|
||||
qThreadInfo *infos = benchCalloc(nConcurrent, sizeof(qThreadInfo), true);
|
||||
infoPrint("start batch query, sleep interval:%" PRIu64 "ms query times:%" PRIu64 " thread:%d \n",
|
||||
interval, g_queryInfo.query_times, nConcurrent);
|
||||
|
||||
// concurent calc
|
||||
int total_sql_num = g_queryInfo.specifiedQueryInfo.sqls->size;
|
||||
int start_sql = 0;
|
||||
int a = total_sql_num / nConcurrent;
|
||||
if (a < 1) {
|
||||
warnPrint("sqls num:%d < concurent:%d, set concurrent %d\n", total_sql_num, nConcurrent, nConcurrent);
|
||||
nConcurrent = total_sql_num;
|
||||
a = 1;
|
||||
}
|
||||
int b = 0;
|
||||
if (nConcurrent != 0) {
|
||||
b = total_sql_num % nConcurrent;
|
||||
}
|
||||
|
||||
//
|
||||
// connect
|
||||
//
|
||||
int connCnt = 0;
|
||||
for (int i = 0; i < nConcurrent; ++i) {
|
||||
qThreadInfo *pThreadInfo = infos + i;
|
||||
// create conn
|
||||
if (initQueryConn(pThreadInfo, iface)){
|
||||
ret = -1;
|
||||
goto OVER;
|
||||
}
|
||||
|
||||
connCnt ++;
|
||||
}
|
||||
|
||||
|
||||
// reset total
|
||||
g_queryInfo.specifiedQueryInfo.totalQueried = 0;
|
||||
g_queryInfo.specifiedQueryInfo.totalFail = 0;
|
||||
|
||||
//
|
||||
// running
|
||||
//
|
||||
int threadCnt = 0;
|
||||
int allSleep = 0;
|
||||
pDelays = benchArrayInit(10, sizeof(int64_t));
|
||||
for (int m = 0; m < g_queryInfo.query_times; ++m) {
|
||||
// reset
|
||||
threadCnt = 0;
|
||||
start_sql = 0;
|
||||
|
||||
// create thread
|
||||
for (int i = 0; i < nConcurrent; ++i) {
|
||||
qThreadInfo *pThreadInfo = infos + i;
|
||||
pThreadInfo->threadID = i;
|
||||
pThreadInfo->start_sql = start_sql;
|
||||
pThreadInfo->end_sql = i < b ? start_sql + a : start_sql + a - 1;
|
||||
start_sql = pThreadInfo->end_sql + 1;
|
||||
pThreadInfo->total_delay = 0;
|
||||
// total zero
|
||||
pThreadInfo->nSucc = 0;
|
||||
pThreadInfo->nFail = 0;
|
||||
|
||||
// main run
|
||||
int code = pthread_create(pids + i, NULL, specQueryMixThread, pThreadInfo);
|
||||
if (code != 0) {
|
||||
errorPrint("failed specQueryBatchThread create. error code =%d \n", code);
|
||||
break;
|
||||
}
|
||||
|
||||
threadCnt ++;
|
||||
}
|
||||
|
||||
bool needExit = false;
|
||||
if (threadCnt != nConcurrent) {
|
||||
// if failed, set termainte flag true like ctrl+c exit
|
||||
needExit = true;
|
||||
g_arguments->terminate = true;
|
||||
}
|
||||
|
||||
// wait thread finished
|
||||
int64_t start = toolsGetTimestampUs();
|
||||
for (int i = 0; i < threadCnt; ++i) {
|
||||
pthread_join(pids[i], NULL);
|
||||
qThreadInfo *pThreadInfo = infos + i;
|
||||
// total queries
|
||||
g_queryInfo.specifiedQueryInfo.totalQueried += pThreadInfo->nSucc;
|
||||
if (g_arguments->continueIfFail == YES_IF_FAILED) {
|
||||
// yes need add failed count
|
||||
g_queryInfo.specifiedQueryInfo.totalQueried += pThreadInfo->nFail;
|
||||
g_queryInfo.specifiedQueryInfo.totalFail += pThreadInfo->nFail;
|
||||
}
|
||||
|
||||
// destory
|
||||
if (needExit) {
|
||||
benchArrayDestroy(pThreadInfo->query_delay_list);
|
||||
pThreadInfo->query_delay_list = NULL;
|
||||
}
|
||||
}
|
||||
int64_t end = toolsGetTimestampUs();
|
||||
|
||||
// create
|
||||
if (needExit) {
|
||||
errorPrint("failed to create thread. expect nConcurrent=%d real threadCnt=%d, exit testing.\n", nConcurrent, threadCnt);
|
||||
goto OVER;
|
||||
}
|
||||
|
||||
// batch total
|
||||
printf("\n");
|
||||
totalChildQuery(infos, threadCnt, end - start, pDelays);
|
||||
|
||||
// show batch total
|
||||
int64_t delay = end - start;
|
||||
infoPrint("count:%d execute batch spend: %" PRId64 "ms\n", m + 1, delay/1000);
|
||||
|
||||
// sleep
|
||||
if ( g_queryInfo.specifiedQueryInfo.batchQuery && interval > 0) {
|
||||
allSleep += autoSleep(interval, delay);
|
||||
}
|
||||
|
||||
// check cancel
|
||||
if(g_arguments->terminate) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
ret = 0;
|
||||
|
||||
// all total
|
||||
totalBatchQuery(allSleep, pDelays);
|
||||
|
||||
OVER:
|
||||
// close conn
|
||||
for (int i = 0; i < connCnt; ++i) {
|
||||
qThreadInfo *pThreadInfo = infos + i;
|
||||
closeQueryConn(pThreadInfo, iface);
|
||||
}
|
||||
|
||||
// free threads
|
||||
tmfree(pids);
|
||||
tmfree(infos);
|
||||
|
||||
// free sqls
|
||||
freeSpecialQueryInfo();
|
||||
|
||||
// free delays
|
||||
if (pDelays) {
|
||||
benchArrayDestroy(pDelays);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
// total query for end
|
||||
void totalQuery(int64_t spends) {
|
||||
// total QPS
|
||||
|
@ -903,14 +1113,19 @@ int queryTestProcess() {
|
|||
// start running
|
||||
//
|
||||
|
||||
|
||||
uint64_t startTs = toolsGetTimestampMs();
|
||||
if(g_queryInfo.specifiedQueryInfo.sqls && g_queryInfo.specifiedQueryInfo.sqls->size > 0) {
|
||||
// specified table
|
||||
if (g_queryInfo.specifiedQueryInfo.mixed_query) {
|
||||
// mixed
|
||||
if (specQueryMix(g_queryInfo.iface, g_queryInfo.dbName)) {
|
||||
return -1;
|
||||
if(g_queryInfo.specifiedQueryInfo.batchQuery) {
|
||||
if (specQueryBatch(g_queryInfo.iface, g_queryInfo.dbName)) {
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
if (specQueryMix(g_queryInfo.iface, g_queryInfo.dbName)) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// no mixied
|
||||
|
|
Loading…
Reference in New Issue