Merge branch '3.0' of https://github.com/taosdata/TDengine into feat/vnode_compact
This commit is contained in:
commit
11bc53bbd7
|
@ -430,7 +430,7 @@ pipeline {
|
|||
date
|
||||
rm -rf ${WKC}/debug
|
||||
cd ${WKC}/tests/parallel_test
|
||||
time ./container_build.sh -w ${WKDIR} -t 10 -e
|
||||
time ./container_build.sh -w ${WKDIR} -e
|
||||
'''
|
||||
def extra_param = ""
|
||||
def log_server_file = "/home/log_server.json"
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.0.2.2")
|
||||
SET(TD_VER_NUMBER "3.0.2.4")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosadapter
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG a2e9920
|
||||
GIT_TAG 213f8b3
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG 4776778
|
||||
GIT_TAG 0cd564a
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -38,7 +38,7 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
|
|||
- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double.
|
||||
- Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h).
|
||||
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||
- It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3)
|
||||
- It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3, smlDataFormat is discarded since 3.0.3.0)
|
||||
:::
|
||||
|
||||
For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol)
|
||||
|
|
|
@ -94,22 +94,21 @@ void close() throws SQLException;
|
|||
<TabItem value="Python" label="Python">
|
||||
|
||||
```python
|
||||
class TaosConsumer():
|
||||
def __init__(self, *topics, **configs)
|
||||
class Consumer:
|
||||
def subscribe(self, topics):
|
||||
pass
|
||||
|
||||
def __iter__(self)
|
||||
def unsubscribe(self):
|
||||
pass
|
||||
|
||||
def __next__(self)
|
||||
def poll(self, timeout: float = 1.0):
|
||||
pass
|
||||
|
||||
def sync_next(self)
|
||||
|
||||
def subscription(self)
|
||||
def close(self):
|
||||
pass
|
||||
|
||||
def unsubscribe(self)
|
||||
|
||||
def close(self)
|
||||
|
||||
def __del__(self)
|
||||
def commit(self, message):
|
||||
pass
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -117,19 +116,22 @@ class TaosConsumer():
|
|||
<TabItem label="Go" value="Go">
|
||||
|
||||
```go
|
||||
func NewConsumer(conf *Config) (*Consumer, error)
|
||||
func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)
|
||||
|
||||
func (c *Consumer) Close() error
|
||||
// rebalanceCb is reserved for compatibility purpose
|
||||
func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error
|
||||
|
||||
func (c *Consumer) Commit(ctx context.Context, message unsafe.Pointer) error
|
||||
// rebalanceCb is reserved for compatibility purpose
|
||||
func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error
|
||||
|
||||
func (c *Consumer) FreeMessage(message unsafe.Pointer)
|
||||
func (c *Consumer) Poll(timeoutMs int) tmq.Event
|
||||
|
||||
func (c *Consumer) Poll(timeout time.Duration) (*Result, error)
|
||||
|
||||
func (c *Consumer) Subscribe(topics []string) error
|
||||
// tmq.TopicPartition is reserved for compatibility purpose
|
||||
func (c *Consumer) Commit() ([]tmq.TopicPartition, error)
|
||||
|
||||
func (c *Consumer) Unsubscribe() error
|
||||
|
||||
func (c *Consumer) Close() error
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -357,50 +359,20 @@ public class MetersDeserializer extends ReferenceDeserializer<Meters> {
|
|||
<TabItem label="Go" value="Go">
|
||||
|
||||
```go
|
||||
config := tmq.NewConfig()
|
||||
defer config.Destroy()
|
||||
err = config.SetGroupID("test")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetAutoOffsetReset("earliest")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectIP("127.0.0.1")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectUser("root")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectPass("taosdata")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectPort("6030")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetMsgWithTableName(true)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.EnableHeartBeat()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.EnableAutoCommit(func(result *wrapper.TMQCommitCallbackResult) {
|
||||
if result.ErrCode != 0 {
|
||||
errStr := wrapper.TMQErr2Str(result.ErrCode)
|
||||
err := errors.NewError(int(result.ErrCode), errStr)
|
||||
panic(err)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
conf := &tmq.ConfigMap{
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "earliest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"td.connect.port": "6030",
|
||||
"client.id": "test_tmq_c",
|
||||
"enable.auto.commit": "false",
|
||||
"enable.heartbeat.background": "true",
|
||||
"experimental.snapshot.enable": "true",
|
||||
"msg.with.table.name": "true",
|
||||
}
|
||||
consumer, err := NewConsumer(conf)
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -422,23 +394,31 @@ let mut consumer = tmq.build()?;
|
|||
|
||||
<TabItem value="Python" label="Python">
|
||||
|
||||
```python
|
||||
from taos.tmq import Consumer
|
||||
|
||||
# Syntax: `consumer = Consumer(configs)`
|
||||
#
|
||||
# Example:
|
||||
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
||||
```
|
||||
|
||||
Python programs use the following parameters:
|
||||
|
||||
| Parameter | Type | Description | Remarks |
|
||||
| :----------------------------: | :----: | -------------------------------------------------------- | ------------------------------------------- |
|
||||
| `td_connect_ip` | string | Used in establishing a connection; same as `taos_connect` | |
|
||||
| `td_connect_user` | string | Used in establishing a connection; same as `taos_connect` | |
|
||||
| `td_connect_pass` | string | Used in establishing a connection; same as `taos_connect` | |
|
||||
| `td_connect_port` | string | Used in establishing a connection; same as `taos_connect` | |
|
||||
| `group_id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. |
|
||||
| `client_id` | string | Client ID | Maximum length: 192. |
|
||||
| `auto_offset_reset` | string | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
|
||||
| `enable_auto_commit` | string | Commit automatically | Specify `true` or `false`. |
|
||||
| `auto_commit_interval_ms` | string | Interval for automatic commits, in milliseconds |
|
||||
| `enable_heartbeat_background` | string | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | Specify `true` or `false`. |
|
||||
| `experimental_snapshot_enable` | string | Specify whether to consume messages from the WAL or from TSBS | Specify `true` or `false`. |
|
||||
| `msg_with_table_name` | string | Specify whether to deserialize table names from messages | Specify `true` or `false`.
|
||||
| `timeout` | int | Consumer pull timeout | |
|
||||
| Parameter | Type | Description | Remarks |
|
||||
|:---------:|:----:|:-----------:|:-------:|
|
||||
| `td.connect.ip` | string | Used in establishing a connection||
|
||||
| `td.connect.user` | string | Used in establishing a connection||
|
||||
| `td.connect.pass` | string | Used in establishing a connection||
|
||||
| `td.connect.port` | string | Used in establishing a connection||
|
||||
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192 |
|
||||
| `client.id` | string | Client ID | Maximum length: 192 |
|
||||
| `msg.with.table.name` | string | Specify whether to deserialize table names from messages | pecify `true` or `false` |
|
||||
| `enable.auto.commit` | string | Commit automatically | pecify `true` or `false` |
|
||||
| `auto.commit.interval.ms` | string | Interval for automatic commits, in milliseconds | |
|
||||
| `auto.offset.reset` | string | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
|
||||
| `experimental.snapshot.enable` | string | Specify whether to consume messages from the WAL or from TSDB | Specify `true` or `false` |
|
||||
| `enable.heartbeat.background` | string | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | Specify `true` or `false` |
|
||||
|
||||
</TabItem>
|
||||
|
||||
|
@ -523,11 +503,7 @@ consumer.subscribe(topics);
|
|||
<TabItem value="Go" label="Go">
|
||||
|
||||
```go
|
||||
consumer, err := tmq.NewConsumer(config)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = consumer.Subscribe([]string{"example_tmq_topic"})
|
||||
err = consumer.Subscribe("example_tmq_topic", nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -545,7 +521,7 @@ consumer.subscribe(["tmq_meters"]).await?;
|
|||
<TabItem value="Python" label="Python">
|
||||
|
||||
```python
|
||||
consumer = TaosConsumer('topic_ctb_column', group_id='vg2')
|
||||
consumer.subscribe(['topic1', 'topic2'])
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -611,13 +587,17 @@ while(running){
|
|||
|
||||
```go
|
||||
for {
|
||||
result, err := consumer.Poll(time.Second)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
ev := consumer.Poll(0)
|
||||
if ev != nil {
|
||||
switch e := ev.(type) {
|
||||
case *tmqcommon.DataMessage:
|
||||
fmt.Println(e.Value())
|
||||
case tmqcommon.Error:
|
||||
fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e)
|
||||
panic(e)
|
||||
}
|
||||
consumer.Commit()
|
||||
}
|
||||
fmt.Println(result)
|
||||
consumer.Commit(context.Background(), result.Message)
|
||||
consumer.FreeMessage(result.Message)
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -660,9 +640,17 @@ for {
|
|||
<TabItem value="Python" label="Python">
|
||||
|
||||
```python
|
||||
for msg in consumer:
|
||||
for row in msg:
|
||||
print(row)
|
||||
while True:
|
||||
res = consumer.poll(100)
|
||||
if not res:
|
||||
continue
|
||||
err = res.error()
|
||||
if err is not None:
|
||||
raise err
|
||||
val = res.value()
|
||||
|
||||
for block in val:
|
||||
print(block.fetchall())
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -729,7 +717,11 @@ consumer.close();
|
|||
<TabItem value="Go" label="Go">
|
||||
|
||||
```go
|
||||
consumer.Close()
|
||||
/* Unsubscribe */
|
||||
_ = consumer.Unsubscribe()
|
||||
|
||||
/* Close consumer */
|
||||
_ = consumer.Close()
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
|
|
@ -22,7 +22,7 @@ Helm uses the kubectl and kubeconfig configurations to perform Kubernetes operat
|
|||
To use TDengine Chart, download it from GitHub:
|
||||
|
||||
```bash
|
||||
wget https://github.com/taosdata/TDengine-Operator/raw/3.0/helm/tdengine-3.0.0.tgz
|
||||
wget https://github.com/taosdata/TDengine-Operator/raw/3.0/helm/tdengine-3.0.2.tgz
|
||||
|
||||
```
|
||||
|
||||
|
@ -38,7 +38,7 @@ With minikube, the default value is standard.
|
|||
Use Helm commands to install TDengine:
|
||||
|
||||
```bash
|
||||
helm install tdengine tdengine-3.0.0.tgz \
|
||||
helm install tdengine tdengine-3.0.2.tgz \
|
||||
--set storage.className=<your storage class name>
|
||||
|
||||
```
|
||||
|
@ -46,7 +46,7 @@ helm install tdengine tdengine-3.0.0.tgz \
|
|||
You can configure a small storage size in minikube to ensure that your deployment does not exceed your available disk space.
|
||||
|
||||
```bash
|
||||
helm install tdengine tdengine-3.0.0.tgz \
|
||||
helm install tdengine tdengine-3.0.2.tgz \
|
||||
--set storage.className=standard \
|
||||
--set storage.dataSize=2Gi \
|
||||
--set storage.logSize=10Mi
|
||||
|
@ -83,14 +83,14 @@ You can configure custom parameters in TDengine with the `values.yaml` file.
|
|||
Run the `helm show values` command to see all parameters supported by TDengine Chart.
|
||||
|
||||
```bash
|
||||
helm show values tdengine-3.0.0.tgz
|
||||
helm show values tdengine-3.0.2.tgz
|
||||
|
||||
```
|
||||
|
||||
Save the output of this command as `values.yaml`. Then you can modify this file with your desired values and use it to deploy a TDengine cluster:
|
||||
|
||||
```bash
|
||||
helm install tdengine tdengine-3.0.0.tgz -f values.yaml
|
||||
helm install tdengine tdengine-3.0.2.tgz -f values.yaml
|
||||
|
||||
```
|
||||
|
||||
|
@ -107,7 +107,7 @@ image:
|
|||
prefix: tdengine/tdengine
|
||||
#pullPolicy: Always
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
# tag: "3.0.0.0"
|
||||
# tag: "3.0.2.0"
|
||||
|
||||
service:
|
||||
# ClusterIP is the default service type, use NodeIP only if you know what you are doing.
|
||||
|
@ -155,15 +155,15 @@ clusterDomainSuffix: ""
|
|||
# See the [Configuration Variables](../../reference/config)
|
||||
#
|
||||
# Note:
|
||||
# 1. firstEp/secondEp: should not be setted here, it's auto generated at scale-up.
|
||||
# 2. serverPort: should not be setted, we'll use the default 6030 in many places.
|
||||
# 3. fqdn: will be auto generated in kubenetes, user should not care about it.
|
||||
# 1. firstEp/secondEp: should not be set here, it's auto generated at scale-up.
|
||||
# 2. serverPort: should not be set, we'll use the default 6030 in many places.
|
||||
# 3. fqdn: will be auto generated in kubernetes, user should not care about it.
|
||||
# 4. role: currently role is not supported - every node is able to be mnode and vnode.
|
||||
#
|
||||
# Btw, keep quotes "" around the value like below, even the value will be number or not.
|
||||
taoscfg:
|
||||
# Starts as cluster or not, must be 0 or 1.
|
||||
# 0: all pods will start as a seperate TDengine server
|
||||
# 0: all pods will start as a separate TDengine server
|
||||
# 1: pods will start as TDengine server cluster. [default]
|
||||
CLUSTER: "1"
|
||||
|
||||
|
|
|
@ -30,8 +30,10 @@ database_option: {
|
|||
| WAL_LEVEL {1 | 2}
|
||||
| VGROUPS value
|
||||
| SINGLE_STABLE {0 | 1}
|
||||
| STT_TRIGGER value
|
||||
| TABLE_PREFIX value
|
||||
| TABLE_SUFFIX value
|
||||
| TSDB_PAGESIZE value
|
||||
| WAL_RETENTION_PERIOD value
|
||||
| WAL_ROLL_PERIOD value
|
||||
| WAL_RETENTION_SIZE value
|
||||
|
@ -56,7 +58,7 @@ database_option: {
|
|||
- WAL_FSYNC_PERIOD: specifies the interval (in milliseconds) at which data is written from the WAL to disk. This parameter takes effect only when the WAL parameter is set to 2. The default value is 3000. Enter a value between 0 and 180000. The value 0 indicates that incoming data is immediately written to disk.
|
||||
- MAXROWS: specifies the maximum number of rows recorded in a block. The default value is 4096.
|
||||
- MINROWS: specifies the minimum number of rows recorded in a block. The default value is 100.
|
||||
- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default.
|
||||
- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default. The Enterprise Edition supports [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function, thus multiple KEEP values (comma separated and up to 3 values supported, and meet keep 0 <= keep 1 <= keep 2, e.g. KEEP 100h,100d,3650d) are supported; the Community Edition does not support Tiered Storage function (although multiple keep values are configured, they do not take effect, only the maximum keep value is used as KEEP).
|
||||
- PAGES: specifies the number of pages in the metadata storage engine cache on each vnode. Enter a value greater than or equal to 64. The default value is 256. The space occupied by metadata storage on each vnode is equal to the product of the values of the PAGESIZE and PAGES parameters. The space occupied by default is 1 MB.
|
||||
- PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. The default value is 4. Enter a value between 1 and 16384.
|
||||
- PRECISION: specifies the precision at which a database records timestamps. Enter ms for milliseconds, us for microseconds, or ns for nanoseconds. The default value is ms.
|
||||
|
@ -69,8 +71,10 @@ database_option: {
|
|||
- SINGLE_STABLE: specifies whether the database can contain more than one supertable.
|
||||
- 0: The database can contain multiple supertables.
|
||||
- 1: The database can contain only one supertable.
|
||||
- STT_TRIGGER: specifies the number of file merges triggered by flushed files. The default is 8, ranging from 1 to 16. For high-frequency scenarios with few tables, it is recommended to use the default configuration or a smaller value for this parameter; For multi-table low-frequency scenarios, it is recommended to configure this parameter with a larger value.
|
||||
- TABLE_PREFIX:The prefix length in the table name that is ignored when distributing table to vnode based on table name.
|
||||
- TABLE_SUFFIX:The suffix length in the table name that is ignored when distributing table to vnode based on table name.
|
||||
- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB.
|
||||
- WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. Enter a time in seconds. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is 4 days.
|
||||
- WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. Enter a size in KB. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is -1.
|
||||
- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value of single copy is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk. The default values of multiple copy is 1 day.
|
||||
|
@ -112,6 +116,10 @@ alter_database_options:
|
|||
alter_database_option: {
|
||||
CACHEMODEL {'none' | 'last_row' | 'last_value' | 'both'}
|
||||
| CACHESIZE value
|
||||
| BUFFER value
|
||||
| PAGES value
|
||||
| REPLICA value
|
||||
| STT_TRIGGER value
|
||||
| WAL_LEVEL value
|
||||
| WAL_FSYNC_PERIOD value
|
||||
| KEEP value
|
||||
|
@ -154,3 +162,19 @@ TRIM DATABASE db_name;
|
|||
```
|
||||
|
||||
The preceding SQL statement deletes data that has expired and orders the remaining data in accordance with the storage configuration.
|
||||
|
||||
## Redistribute Vgroup
|
||||
|
||||
```sql
|
||||
REDISTRIBUTE VGROUP vgroup_no DNODE dnode_id1 [DNODE dnode_id2] [DNODE dnode_id3]
|
||||
```
|
||||
|
||||
Adjust the distribution of vnodes in the vgroup according to the given list of dnodes.
|
||||
|
||||
## Balance Vgroup
|
||||
|
||||
```sql
|
||||
BALANCE VGROUP
|
||||
```
|
||||
|
||||
Automatically adjusts the distribution of vnodes in all vgroups of the cluster, which is equivalent to load balancing the data of the cluster at the vnode level.
|
||||
|
|
|
@ -350,9 +350,9 @@ SELECT AVG(CASE WHEN voltage < 200 or voltage > 250 THEN 220 ELSE voltage END) F
|
|||
|
||||
## JOIN
|
||||
|
||||
TDengine supports natural joins between supertables, between standard tables, and between subqueries. The difference between natural joins and inner joins is that natural joins require that the fields being joined in the supertables or standard tables must have the same name. Data or tag columns must be joined with the equivalent column in another table.
|
||||
TDengine supports the `INTER JOIN` based on the timestamp primary key, that is, the `JOIN` condition must contain the timestamp primary key. As long as the requirement of timestamp-based primary key is met, `INTER JOIN` can be made between normal tables, sub-tables, super tables and sub-queries at will, and there is no limit on the number of tables.
|
||||
|
||||
For standard tables, only the timestamp (primary key) can be used in join operations. For example:
|
||||
For standard tables:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
|
@ -360,7 +360,7 @@ FROM temp_tb_1 t1, pressure_tb_1 t2
|
|||
WHERE t1.ts = t2.ts
|
||||
```
|
||||
|
||||
For supertables, tags as well as timestamps can be used in join operations. For example:
|
||||
For supertables:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
|
@ -368,21 +368,16 @@ FROM temp_stable t1, temp_stable t2
|
|||
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
|
||||
```
|
||||
|
||||
For sub-table and super table:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
FROM temp_ctable t1, temp_stable t2
|
||||
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
|
||||
```
|
||||
|
||||
Similarly, join operations can be performed on the result sets of multiple subqueries.
|
||||
|
||||
:::note
|
||||
|
||||
The following restriction apply to JOIN statements:
|
||||
|
||||
- The number of tables or supertables in a single join operation cannot exceed 10.
|
||||
- `FILL` cannot be used in a JOIN statement.
|
||||
- Arithmetic operations cannot be performed on the result sets of join operation.
|
||||
- `GROUP BY` is not allowed on a segment of the tables that participate in a join operation.
|
||||
- `OR` cannot be used in the conditions for join operation
|
||||
- Join operation can be performed only on tags or timestamps. You cannot perform a join operation on data columns.
|
||||
|
||||
:::
|
||||
|
||||
## Nested Query
|
||||
|
||||
Nested query is also called sub query. This means that in a single SQL statement the result of inner query can be used as the data source of the outer query.
|
||||
|
|
|
@ -877,7 +877,7 @@ INTERP(expr)
|
|||
- Interpolation is performed based on `FILL` parameter.
|
||||
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
|
||||
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.1.4).
|
||||
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.2.1).
|
||||
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.2.3).
|
||||
|
||||
### LAST
|
||||
|
||||
|
|
|
@ -363,7 +363,7 @@ Shows information about all vgroups in the system or about the vgroups for a spe
|
|||
## SHOW VNODES
|
||||
|
||||
```sql
|
||||
SHOW VNODES [dnode_name];
|
||||
SHOW VNODES {dnode_id | dnode_endpoint};
|
||||
```
|
||||
|
||||
Shows information about all vnodes in the system or about the vnodes for a specified dnode.
|
||||
|
|
|
@ -54,7 +54,6 @@ The following data types can be used in the schema for standard tables.
|
|||
| 27 | GRANT | Added | Grants permissions to a user.
|
||||
| 28 | KILL TRANSACTION | Added | Terminates an mnode transaction.
|
||||
| 29 | KILL STREAM | Deprecated | Terminated a continuous query. The continuous query feature has been replaced with the stream processing feature.
|
||||
| 30 | MERGE VGROUP | Added | Merges vgroups.
|
||||
| 31 | REVOKE | Added | Revokes permissions from a user.
|
||||
| 32 | SELECT | Modified | <ul><li>SELECT does not use the implicit results column. Output columns must be specified in the SELECT clause. </li><li>DISTINCT support is enhanced. In previous versions, DISTINCT only worked on the tag column and could not be used with JOIN or GROUP BY. </li><li>JOIN support is enhanced. The following are now supported after JOIN: a WHERE clause with OR, operations on multiple tables, and GROUP BY on multiple tables. </li><li>Subqueries after FROM are enhanced. Levels of nesting are no longer restricted. Subqueries can be used with UNION ALL. Other syntax restrictions are eliminated. </li><li>All scalar functions can be used after WHERE. </li><li>GROUP BY is enhanced. You can group by any scalar expression or combination thereof. </li><li>SESSION can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline. </li><li>STATE_WINDOW can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline. </li><li>ORDER BY is enhanced. It is no longer required to use ORDER BY and GROUP BY together. There is no longer a restriction on the number of order expressions. NULLS FIRST and NULLS LAST syntax has been added. Any expression that conforms to the ORDER BY semantics can be used. </li><li>Added PARTITION BY syntax. PARTITION BY replaces GROUP BY tags. </li></ul>
|
||||
| 33 | SHOW ACCOUNTS | Deprecated | This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported."
|
||||
|
@ -76,8 +75,9 @@ The following data types can be used in the schema for standard tables.
|
|||
| 49 | SHOW TRANSACTIONS | Added | Shows all running transactions in the system.
|
||||
| 50 | SHOW DNODE VARIABLES | Added | Shows the configuration of the specified dnode.
|
||||
| 51 | SHOW VNODES | Not supported | Shows information about vnodes in the system. Not supported.
|
||||
| 52 | SPLIT VGROUP | Added | Splits a vgroup into two vgroups.
|
||||
| 53 | TRIM DATABASE | Added | Deletes data that has expired and orders the remaining data in accordance with the storage configuration.
|
||||
| 52 | TRIM DATABASE | Added | Deletes data that has expired and orders the remaining data in accordance with the storage configuration.
|
||||
| 53 | REDISTRIBUTE VGROUP | Added | Adjust the distribution of VNODES in VGROUP.
|
||||
| 54 | BALANCE VGROUP | Added | Auto adjust the distribution of VNODES in VGROUP.
|
||||
|
||||
## SQL Functions
|
||||
|
||||
|
|
|
@ -355,26 +355,29 @@ The `af` package encapsulates TDengine advanced functions such as connection man
|
|||
|
||||
#### Subscribe
|
||||
|
||||
* `func NewConsumer(conf *Config) (*Consumer, error)`
|
||||
* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)`
|
||||
|
||||
Creates consumer group.
|
||||
|
||||
* `func (c *Consumer) Subscribe(topics []string) error`
|
||||
* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error`
|
||||
Note: `rebalanceCb` is reserved for compatibility purpose
|
||||
|
||||
Subscribes a topic.
|
||||
|
||||
* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error`
|
||||
Note: `rebalanceCb` is reserved for compatibility purpose
|
||||
|
||||
Subscribes to topics.
|
||||
|
||||
* `func (c *Consumer) Poll(timeout time.Duration) (*Result, error)`
|
||||
* `func (c *Consumer) Poll(timeoutMs int) tmq.Event`
|
||||
|
||||
Polling information.
|
||||
|
||||
* `func (c *Consumer) Commit(ctx context.Context, message unsafe.Pointer) error`
|
||||
* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)`
|
||||
Note: `tmq.TopicPartition` is reserved for compatibility purpose
|
||||
|
||||
Commit information.
|
||||
|
||||
* `func (c *Consumer) FreeMessage(message unsafe.Pointer)`
|
||||
|
||||
Free information.
|
||||
|
||||
* `func (c *Consumer) Unsubscribe() error`
|
||||
|
||||
Unsubscribe.
|
||||
|
@ -441,25 +444,36 @@ Close consumer.
|
|||
|
||||
### Subscribe via WebSocket
|
||||
|
||||
* `func NewConsumer(config *Config) (*Consumer, error)`
|
||||
* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)`
|
||||
|
||||
Creates consumer group.
|
||||
Creates consumer group.
|
||||
|
||||
* `func (c *Consumer) Subscribe(topic []string) error`
|
||||
* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error`
|
||||
Note: `rebalanceCb` is reserved for compatibility purpose
|
||||
|
||||
Subscribes to topics.
|
||||
Subscribes a topic.
|
||||
|
||||
* `func (c *Consumer) Poll(timeout time.Duration) (*Result, error)`
|
||||
* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error`
|
||||
Note: `rebalanceCb` is reserved for compatibility purpose
|
||||
|
||||
Polling information.
|
||||
Subscribes to topics.
|
||||
|
||||
* `func (c *Consumer) Commit(messageID uint64) error`
|
||||
* `func (c *Consumer) Poll(timeoutMs int) tmq.Event`
|
||||
|
||||
Commit information.
|
||||
Polling information.
|
||||
|
||||
* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)`
|
||||
Note: `tmq.TopicPartition` is reserved for compatibility purpose
|
||||
|
||||
Commit information.
|
||||
|
||||
* `func (c *Consumer) Unsubscribe() error`
|
||||
|
||||
Unsubscribe.
|
||||
|
||||
* `func (c *Consumer) Close() error`
|
||||
|
||||
Close consumer.
|
||||
Close consumer.
|
||||
|
||||
For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/3.0/examples/tmqoverws/main.go)
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ We recommend using the latest version of `taospy`, regardless of the version of
|
|||
|
||||
### Preparation
|
||||
|
||||
1. Install Python. Python >= 3.7 is recommended. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it.
|
||||
1. Install Python. The recent taospy package requires Python 3.6+. The earlier versions of taospy require Python 3.7+. The taos-ws-py package requires Python 3.7+. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it.
|
||||
2. Install [pip](https://pypi.org/project/pip/). In most cases, the Python installer comes with the pip utility. If not, please refer to [pip documentation](https://pip.pypa.io/en/stable/installation/) to install it.
|
||||
If you use a native connection, you will also need to [Install Client Driver](/reference/connector#Install-Client-Driver). The client install package includes the TDengine client dynamic link library (`libtaos.so` or `taos.dll`) and the TDengine CLI.
|
||||
|
||||
|
@ -78,6 +78,22 @@ pip3 install git+https://github.com/taosdata/taos-connector-python.git
|
|||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
#### Install `taos-ws-py` (Optional)
|
||||
|
||||
The taos-ws-py package provides the way to access TDengine via WebSocket.
|
||||
|
||||
##### Install taos-ws-py with taospy
|
||||
|
||||
```bash
|
||||
pip3 install taospy[ws]
|
||||
```
|
||||
|
||||
##### Install taos-ws-py only
|
||||
|
||||
```bash
|
||||
pip3 install taos-ws-py
|
||||
```
|
||||
|
||||
### Verify
|
||||
|
||||
<Tabs defaultValue="rest">
|
||||
|
|
|
@ -17,7 +17,7 @@ import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx"
|
|||
|
||||
`TDengine.Connector` is a C# language connector provided by TDengine that allows C# developers to develop C# applications that access TDengine cluster data.
|
||||
|
||||
The `TDengine.Connector` connector supports connect to TDengine instances via the TDengine client driver (taosc), providing data writing, querying, subscription, schemaless writing, bind interface, etc.The `TDengine.Connector` also supports WebSocket and developers can build connection through DSN, which supports data writing, querying, and parameter binding, etc.
|
||||
The `TDengine.Connector` connector supports connect to TDengine instances via the TDengine client driver (taosc), providing data writing, querying, subscription, schemaless writing, bind interface, etc.The `TDengine.Connector` also supports WebSocket from v3.0.1 and developers can build connection through DSN, which supports data writing, querying, and parameter binding, etc.
|
||||
|
||||
This article describes how to install `TDengine.Connector` in a Linux or Windows environment and connect to TDengine clusters via `TDengine.Connector` to perform basic operations such as data writing and querying.
|
||||
|
||||
|
@ -66,31 +66,43 @@ Please refer to [version support list](/reference/connector#version-support)
|
|||
* [Nuget Client](https://docs.microsoft.com/en-us/nuget/install-nuget-client-tools) (optional installation)
|
||||
* Install TDengine client driver, please refer to [Install client driver](/reference/connector/#install-client-driver) for details
|
||||
|
||||
### Install via dotnet CLI
|
||||
### Install `TDengine.Connector`
|
||||
|
||||
<Tabs defaultValue="CLI">
|
||||
<TabItem value="CLI" label="Get C# driver using dotnet CLI">
|
||||
<TabItem value="CLI" label="Native Connection">
|
||||
|
||||
You can reference the `TDengine.Connector` published in Nuget to the current project via the `dotnet` command under the path of the existing .NET project.
|
||||
You can reference the `TDengine.Connector` published in Nuget to the current project via the `dotnet` CLI under the path of the existing .NET project.
|
||||
|
||||
``` bash
|
||||
dotnet add package TDengine.Connector
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="source" label="Use source code to get C# driver">
|
||||
You may also modify the current.NET project file. You can include the following 'ItemGroup' in your project file (.csproj).
|
||||
|
||||
You can [download the source code](https://github.com/taosdata/taos-connector-dotnet/tree/3.0) and directly reference the latest version of the TDengine.Connector library.
|
||||
|
||||
```bash
|
||||
git clone -b 3.0 https://github.com/taosdata/taos-connector-dotnet.git
|
||||
cd taos-connector-dotnet
|
||||
cp -r src/ myProject
|
||||
|
||||
cd myProject
|
||||
dotnet add exmaple.csproj reference src/TDengine.csproj
|
||||
``` XML
|
||||
<ItemGroup>
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.*" />
|
||||
</ItemGroup>
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="source" label="WebSocket Connection">
|
||||
|
||||
In this scenario, modifying your project file is required in order to copy the WebSocket dependency dynamic library from the nuget package into your project.
|
||||
```XML
|
||||
<ItemGroup>
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.*" GeneratePathProperty="true" />
|
||||
</ItemGroup>
|
||||
<Target Name="copyDLLDepency" BeforeTargets="BeforeBuild">
|
||||
<ItemGroup>
|
||||
<DepDLLFiles Include="$(PkgTDengine_Connector)\runtimes\**\*.*" />
|
||||
</ItemGroup>
|
||||
<Copy SourceFiles="@(DepDLLFiles)" DestinationFolder="$(OutDir)" />
|
||||
</Target>
|
||||
```
|
||||
|
||||
Notice: `TDengine.Connector` only version>= 3.0.2 includes the dynamic library for WebSocket.
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
|
@ -252,19 +264,20 @@ ws://localhost:6041/test
|
|||
|
||||
|Sample program |Sample program description |
|
||||
|--------------------------------------------------------------------------------------------------------------------|--------------------------------------------|
|
||||
| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/Query/Query.cs) | Table creation, data insertion, and query examples with TDengine.Connector |
|
||||
| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/JSONTag) | Writing and querying JSON tag data with TDengine Connector |
|
||||
| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/Stmt) | Parameter binding with TDengine Connector |
|
||||
| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/schemaless) | Schemaless writes with TDengine Connector |
|
||||
| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/AsyncQuery/QueryAsync.cs) | Asynchronous queries with TDengine Connector |
|
||||
| [Subscription](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | Subscription example with TDengine Connector |
|
||||
| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSample.cs) | WebSocket basic data in and out with TDengine connector |
|
||||
| [WebSocket Parameter Binding](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSTMT.cs) | WebSocket parameter binding example |
|
||||
| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/Query/Query.cs) | Table creation, data insertion, and query examples with TDengine.Connector |
|
||||
| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/JSONTag) | Writing and querying JSON tag data with TDengine Connector |
|
||||
| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/NET6Examples/Stmt) | Parameter binding with TDengine Connector |
|
||||
| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/schemaless) | Schemaless writes with TDengine Connector |
|
||||
| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/AsyncQuery/QueryAsync.cs) | Asynchronous queries with TDengine Connector |
|
||||
| [Subscription](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/TMQ/TMQ.cs) | Subscription example with TDengine Connector |
|
||||
| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/FrameWork45/WS/WebSocketSample.cs) | WebSocket basic data in and out with TDengine connector |
|
||||
| [WebSocket Parameter Binding](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/FrameWork45/WS/WebSocketSTMT.cs) | WebSocket parameter binding example |
|
||||
|
||||
## Important update records
|
||||
|
||||
| TDengine.Connector | Description |
|
||||
|--------------------|--------------------------------|
|
||||
| 3.0.2 | Support .NET Framework 4.5 and above. Support .Net standard 2.0. Nuget package includes dynamic library for WebSocket.|
|
||||
| 3.0.1 | Support WebSocket and Cloud,With function query, insert, and parameter binding|
|
||||
| 3.0.0 | Supports TDengine 3.0.0.0. TDengine 2.x is not supported. Added `TDengine.Impl.GetData()` interface to deserialize query results. |
|
||||
| 1.0.7 | Fixed TDengine.Query() memory leak. |
|
||||
|
|
|
@ -59,11 +59,11 @@ The different database framework specifications for various programming language
|
|||
| -------------------------------------- | ------------- | --------------- | ------------- | ------------- | ------------- | ------------- |
|
||||
| **Connection Management** | Support | Support | Support | Support | Support | Support |
|
||||
| **Regular Query** | Support | Support | Support | Support | Support | Support |
|
||||
| **Parameter Binding** | Not supported | Not supported | support | Support | Not supported | Support |
|
||||
| **Subscription (TMQ) ** | Not supported | Not supported | support | Not supported | Not supported | Support |
|
||||
| **Schemaless** | Not supported | Not supported | Not supported | Not supported | Not supported | Not supported |
|
||||
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | support | Support | Support |
|
||||
| **DataFrame** | Not supported | Support | Not supported | Not supported | Not supported | Not supported |
|
||||
| **Parameter Binding** | Not Supported | Not Supported | Support | Support | Not Supported | Support |
|
||||
| **Subscription (TMQ) ** | Not Supported | Support | Support | Not Supported | Not Supported | Support |
|
||||
| **Schemaless** | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported |
|
||||
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | Support | Support | Support |
|
||||
| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
|
||||
|
||||
:::warning
|
||||
|
||||
|
|
|
@ -92,7 +92,7 @@ taosBenchmark -f <json file>
|
|||
|
||||
</details>
|
||||
|
||||
## Command-line argument in detailed
|
||||
## Command-line argument in detail
|
||||
|
||||
- **-f/--file <json file\>** :
|
||||
specify the configuration file to use. This file includes All parameters. Users should not use this parameter with other parameters on the command-line. There is no default value.
|
||||
|
@ -198,7 +198,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
|
|||
- **-R/--disorder-range <timeRange\>** :
|
||||
Specify the timestamp range for the disordered data. It leads the resulting disorder timestamp as the ordered timestamp minus a random value in this range. Valid only if the percentage of disordered data specified by `-O/--disorder` is greater than 0.
|
||||
|
||||
- **-F/--prepare_rand <Num\>** :
|
||||
- **-F/--prepared_rand <Num\>** :
|
||||
Specify the number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000.
|
||||
|
||||
- **-a/--replica <replicaNum\>** :
|
||||
|
@ -216,7 +216,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
|
|||
- **-? /--help** :
|
||||
Show help information and exit. Users should not use it with other parameters.
|
||||
|
||||
## Configuration file parameters in detailed
|
||||
## Configuration file parameters in detail
|
||||
|
||||
### General configuration parameters
|
||||
|
||||
|
@ -380,7 +380,7 @@ The configuration parameters for specifying super table tag columns and data col
|
|||
- **num_of_records_per_req** :
|
||||
Writing the number of rows of records per request to TDengine, the default value is 30000. When it is set too large, the TDengine client driver will return the corresponding error message, so you need to lower the setting of this parameter to meet the writing requirements.
|
||||
|
||||
- **prepare_rand**: The number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000.
|
||||
- **prepared_rand**: The number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000.
|
||||
|
||||
### Query scenario configuration parameters
|
||||
|
||||
|
|
|
@ -142,6 +142,15 @@ The parameters described in this document by the effect that they have on the sy
|
|||
| Meaning | Switch for allowing TDengine to collect and report service usage information |
|
||||
| Value Range | 0: Not allowed; 1: Allowed |
|
||||
| Default Value | 1 |
|
||||
### crashReporting
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning |Switch for allowing TDengine to collect and report crash related information |
|
||||
| Value Range | 0,1 0: Not allowed;1:allowed |
|
||||
| Default Value | 1 |
|
||||
|
||||
|
||||
## Query Parameters
|
||||
|
||||
|
@ -314,6 +323,7 @@ The charset that takes effect is UTF-8.
|
|||
| Applicable | Server Only |
|
||||
| Meaning | All data files are stored in this directory |
|
||||
| Default Value | /var/lib/taos |
|
||||
| Note | The [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function needs to be used in conjunction with the [KEEP](https://docs.tdengine.com/taos-sql/database/#parameters) parameter |
|
||||
|
||||
### tempDir
|
||||
|
||||
|
@ -594,7 +604,7 @@ The charset that takes effect is UTF-8.
|
|||
| Attribute | Description |
|
||||
| -------- | ----------------------------- |
|
||||
| Applicable | Client only |
|
||||
| Meaning | Whether schemaless columns are consistently ordered |
|
||||
| Meaning | Whether schemaless columns are consistently ordered, depat, discarded since 3.0.3.0|
|
||||
| Value Range | 0: not consistent; 1: consistent. |
|
||||
| Default | 1 |
|
||||
|
||||
|
@ -656,7 +666,7 @@ The charset that takes effect is UTF-8.
|
|||
| 20 | minimalTmpDirGB | Yes | Yes | |
|
||||
| 21 | smlChildTableName | Yes | Yes | |
|
||||
| 22 | smlTagName | Yes | Yes | |
|
||||
| 23 | smlDataFormat | No | Yes | |
|
||||
| 23 | smlDataFormat | No | Yes(discarded since 3.0.3.0) | |
|
||||
| 24 | statusInterval | Yes | Yes | |
|
||||
| 25 | logDir | Yes | Yes | |
|
||||
| 26 | minimalLogDirGB | Yes | Yes | |
|
||||
|
|
|
@ -80,7 +80,7 @@ You can configure smlChildTableName in taos.cfg to specify table names, for exam
|
|||
NULL.
|
||||
6. For BINARY or NCHAR columns, if the length of the value provided in a data row exceeds the column type limit, the maximum length of characters allowed to be stored in the column is automatically increased (only incremented and not decremented) to ensure complete preservation of the data.
|
||||
7. Errors encountered throughout the processing will interrupt the writing process and return an error code.
|
||||
8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3)
|
||||
8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3, discarded since 3.0.3.0)
|
||||
|
||||
:::tip
|
||||
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed
|
||||
|
|
|
@ -33,7 +33,7 @@ TDengine 3.0 is not compatible with the configuration and data files from previo
|
|||
4. Install TDengine 3.0.
|
||||
5. For assistance in migrating data to TDengine 3.0, contact [TDengine Support](https://tdengine.com/support).
|
||||
|
||||
### 4. How can I resolve the "Unable to establish connection" error?
|
||||
### 2. How can I resolve the "Unable to establish connection" error?
|
||||
|
||||
This error indicates that the client could not connect to the server. Perform the following troubleshooting steps:
|
||||
|
||||
|
@ -68,7 +68,7 @@ This error indicates that the client could not connect to the server. Perform th
|
|||
|
||||
11. You can also use the TDengine CLI to diagnose network issues. For more information, see [Problem Diagnostics](https://docs.tdengine.com/operation/diagnose/).
|
||||
|
||||
### 5. How can I resolve the "Unable to resolve FQDN" error?
|
||||
### 3. How can I resolve the "Unable to resolve FQDN" error?
|
||||
|
||||
Clients and dnodes must be able to resolve the FQDN of each required node. You can confirm your configuration as follows:
|
||||
|
||||
|
@ -79,15 +79,15 @@ Clients and dnodes must be able to resolve the FQDN of each required node. You c
|
|||
5. If TDengine has been previously installed and the `hostname` was modified, open `dnode.json` in the `data` folder and verify that the endpoint configuration is correct. The default location of the dnode file is `/var/lib/taos/dnode`. Ensure that you clean up previous installations before reinstalling TDengine.
|
||||
6. Confirm whether FQDNs are preconfigured in `/etc/hosts` and `/etc/hostname`.
|
||||
|
||||
### 6. What is the most effective way to write data to TDengine?
|
||||
### 4. What is the most effective way to write data to TDengine?
|
||||
|
||||
Writing data in batches provides higher efficiency in most situations. You can insert one or more data records into one or more tables in a single SQL statement.
|
||||
|
||||
### 9. Why are table names not fully displayed?
|
||||
### 5. Why are table names not fully displayed?
|
||||
|
||||
The number of columns in the TDengine CLI terminal display is limited. This can cause table names to be cut off, and if you use an incomplete name in a statement, the "Table does not exist" error will occur. You can increase the display size with the `maxBinaryDisplayWidth` parameter or the SQL statement `set max_binary_display_width`. You can also append `\G` to your SQL statement to bypass this limitation.
|
||||
|
||||
### 10. How can I migrate data?
|
||||
### 6. How can I migrate data?
|
||||
|
||||
In TDengine, the `hostname` uniquely identifies a machine. When you move data files to a new machine, you must configure the new machine to have the same `host name` as the original machine.
|
||||
|
||||
|
@ -97,7 +97,7 @@ The data structure of previous versions of TDengine is not compatible with versi
|
|||
|
||||
:::
|
||||
|
||||
### 11. How can I temporary change the log level from the TDengine Client?
|
||||
### 7. How can I temporary change the log level from the TDengine Client?
|
||||
|
||||
To change the log level for debugging purposes, you can use the following command:
|
||||
|
||||
|
@ -118,14 +118,14 @@ Use `resetlog` to remove all logs generated on the local client. Use the other p
|
|||
|
||||
For each parameter, you can set the value to `131` (error and warning), `135` (error, warning, and debug), or `143` (error, warning, debug, and trace).
|
||||
|
||||
### Why do TDengine components written in Go fail to compile?
|
||||
### 8. Why do TDengine components written in Go fail to compile?
|
||||
|
||||
TDengine includes taosAdapter, an independent component written in Go. This component provides the REST API as well as data access for other products such as Prometheus and Telegraf.
|
||||
When using the develop branch, you must run `git submodule update --init --recursive` to download the taosAdapter repository and then compile it.
|
||||
|
||||
TDengine Go components require Go version 1.14 or later.
|
||||
|
||||
### 13. How can I query the storage space being used by my data?
|
||||
### 9. How can I query the storage space being used by my data?
|
||||
|
||||
The TDengine data files are stored in `/var/lib/taos` by default. Log files are stored in `/var/log/taos`.
|
||||
|
||||
|
@ -133,7 +133,7 @@ To see how much space your data files occupy, run `du -sh /var/lib/taos/vnode --
|
|||
|
||||
If you want to see how much space is occupied by a single database, first determine which vgroup is storing the database by running `show vgroups`. Then check `/var/lib/taos/vnode` for the files associated with the vgroup ID.
|
||||
|
||||
### 15. How is timezone information processed for timestamps?
|
||||
### 10. How is timezone information processed for timestamps?
|
||||
|
||||
TDengine uses the timezone of the client for timestamps. The server timezone does not affect timestamps. The client converts Unix timestamps in SQL statements to UTC before sending them to the server. When you query data on the server, it provides timestamps in UTC to the client, which converts them to its local time.
|
||||
|
||||
|
@ -144,13 +144,13 @@ Timestamps are processed as follows:
|
|||
3. A timezone explicitly specified when establishing a connection to TDengine through a connector takes precedence over `taos.cfg` and the system timezone. For example, the Java connector allows you to specify a timezone in the JDBC URL.
|
||||
4. If you use an RFC 3339 timestamp (2013-04-12T15:52:01.123+08:00), or an ISO 8601 timestamp (2013-04-12T15:52:01.123+0800), the timezone specified in the timestamp is used instead of the timestamps configured using any other method.
|
||||
|
||||
### 16. Which network ports are required by TDengine?
|
||||
### 11. Which network ports are required by TDengine?
|
||||
|
||||
See [serverPort](https://docs.tdengine.com/reference/config/#serverport) in Configuration Parameters.
|
||||
|
||||
Note that ports are specified using 6030 as the default first port. If you change this port, all other ports change as well.
|
||||
|
||||
### 17. Why do applications such as Grafana fail to connect to TDengine over the REST API?
|
||||
### 12. Why do applications such as Grafana fail to connect to TDengine over the REST API?
|
||||
|
||||
In TDengine, the REST API is provided by taosAdapter. Ensure that taosAdapter is running before you connect an application to TDengine over the REST API. You can run `systemctl start taosadapter` to start the service.
|
||||
|
||||
|
@ -158,7 +158,7 @@ Note that the log path for taosAdapter must be configured separately. The defaul
|
|||
|
||||
For more information, see [taosAdapter](https://docs.tdengine.com/reference/taosadapter/).
|
||||
|
||||
### 18. How can I resolve out-of-memory (OOM) errors?
|
||||
### 13. How can I resolve out-of-memory (OOM) errors?
|
||||
|
||||
OOM errors are thrown by the operating system when its memory, including swap, becomes insufficient and it needs to terminate processes to remain operational. Most OOM errors in TDengine occur for one of the following reasons: free memory is less than the value of `vm.min_free_kbytes` or free memory is less than the size of the request. If TDengine occupies reserved memory, an OOM error can occur even when free memory is sufficient.
|
||||
|
||||
|
|
|
@ -10,6 +10,14 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.0.2.4
|
||||
|
||||
<Release type="tdengine" version="3.0.2.4" />
|
||||
|
||||
## 3.0.2.3
|
||||
|
||||
<Release type="tdengine" version="3.0.2.3" />
|
||||
|
||||
## 3.0.2.2
|
||||
|
||||
<Release type="tdengine" version="3.0.2.2" />
|
||||
|
|
|
@ -10,6 +10,14 @@ For other historical version installers, please visit [here](https://www.taosdat
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 2.4.2
|
||||
|
||||
<Release type="tools" version="2.4.2" />
|
||||
|
||||
## 2.4.1
|
||||
|
||||
<Release type="tools" version="2.4.1" />
|
||||
|
||||
## 2.4.0
|
||||
|
||||
<Release type="tools" version="2.4.0" />
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.1" />
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.*" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.0" />
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.*" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.0" />
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.*" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.0" />
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.*" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.0" />
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.*" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.1" />
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.*" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.0" />
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.*" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
|
|
|
@ -42,7 +42,7 @@ namespace TDengineExample
|
|||
|
||||
// 5. execute
|
||||
res = TDengine.StmtExecute(stmt);
|
||||
CheckStmtRes(res, "faild to execute");
|
||||
CheckStmtRes(res, "failed to execute");
|
||||
|
||||
// 6. free
|
||||
TaosMultiBind.FreeTaosBind(tags);
|
||||
|
@ -92,7 +92,7 @@ namespace TDengineExample
|
|||
int code = TDengine.StmtClose(stmt);
|
||||
if (code != 0)
|
||||
{
|
||||
throw new Exception($"falied to close stmt, {code} reason: {TDengine.StmtErrorStr(stmt)} ");
|
||||
throw new Exception($"failed to close stmt, {code} reason: {TDengine.StmtErrorStr(stmt)} ");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.0" />
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.*" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.1" />
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.*" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
|
|
|
@ -3,11 +3,16 @@
|
|||
<PropertyGroup>
|
||||
<OutputType>Exe</OutputType>
|
||||
<TargetFramework>net5.0</TargetFramework>
|
||||
<Nullable>enable</Nullable>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.1" />
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.*" GeneratePathProperty="true" />
|
||||
</ItemGroup>
|
||||
<Target Name="copyDLLDependency" BeforeTargets="BeforeBuild">
|
||||
<ItemGroup>
|
||||
<DepDLLFiles Include="$(PkgTDengine_Connector)\runtimes\**\*.*" />
|
||||
</ItemGroup>
|
||||
<Copy SourceFiles="@(DepDLLFiles)" DestinationFolder="$(OutDir)" />
|
||||
</Target>
|
||||
|
||||
</Project>
|
||||
|
|
|
@ -5,9 +5,13 @@
|
|||
<TargetFramework>net5.0</TargetFramework>
|
||||
<Nullable>enable</Nullable>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.1" />
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.*" GeneratePathProperty="true" />
|
||||
</ItemGroup>
|
||||
|
||||
<Target Name="copyDLLDependency" BeforeTargets="BeforeBuild">
|
||||
<ItemGroup>
|
||||
<DepDLLFiles Include="$(PkgTDengine_Connector)\runtimes\**\*.*" />
|
||||
</ItemGroup>
|
||||
<Copy SourceFiles="@(DepDLLFiles)" DestinationFolder="$(OutDir)" />
|
||||
</Target>
|
||||
</Project>
|
||||
|
|
|
@ -7,7 +7,13 @@
|
|||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.1" />
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.*" GeneratePathProperty="true" />
|
||||
</ItemGroup>
|
||||
<Target Name="copyDLLDependency" BeforeTargets="BeforeBuild">
|
||||
<ItemGroup>
|
||||
<DepDLLFiles Include="$(PkgTDengine_Connector)\runtimes\**\*.*" />
|
||||
</ItemGroup>
|
||||
<Copy SourceFiles="@(DepDLLFiles)" DestinationFolder="$(OutDir)" />
|
||||
</Target>
|
||||
|
||||
</Project>
|
||||
|
|
|
@ -7,7 +7,13 @@
|
|||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.1" />
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.*" GeneratePathProperty="true" />
|
||||
</ItemGroup>
|
||||
<Target Name="copyDLLDependency" BeforeTargets="BeforeBuild">
|
||||
<ItemGroup>
|
||||
<DepDLLFiles Include="$(PkgTDengine_Connector)\runtimes\**\*.*" />
|
||||
</ItemGroup>
|
||||
<Copy SourceFiles="@(DepDLLFiles)" DestinationFolder="$(OutDir)" />
|
||||
</Target>
|
||||
|
||||
</Project>
|
||||
|
|
|
@ -2,5 +2,5 @@ module goexample
|
|||
|
||||
go 1.17
|
||||
|
||||
require github.com/taosdata/driver-go/v3 3.0
|
||||
require github.com/taosdata/driver-go/v3 v3.1.0
|
||||
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/taosdata/driver-go/v3 v3.1.0/go.mod h1:H2vo/At+rOPY1aMzUV9P49SVX7NlXb3LAbKw+MCLrmU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
@ -1,17 +1,12 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
"os"
|
||||
|
||||
"github.com/taosdata/driver-go/v3/af"
|
||||
"github.com/taosdata/driver-go/v3/af/tmq"
|
||||
"github.com/taosdata/driver-go/v3/common"
|
||||
"github.com/taosdata/driver-go/v3/errors"
|
||||
"github.com/taosdata/driver-go/v3/wrapper"
|
||||
tmqcommon "github.com/taosdata/driver-go/v3/common/tmq"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@ -28,55 +23,26 @@ func main() {
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
config := tmq.NewConfig()
|
||||
defer config.Destroy()
|
||||
err = config.SetGroupID("test")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetAutoOffsetReset("earliest")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectIP("127.0.0.1")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectUser("root")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectPass("taosdata")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectPort("6030")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetMsgWithTableName(true)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.EnableHeartBeat()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.EnableAutoCommit(func(result *wrapper.TMQCommitCallbackResult) {
|
||||
if result.ErrCode != 0 {
|
||||
errStr := wrapper.TMQErr2Str(result.ErrCode)
|
||||
err := errors.NewError(int(result.ErrCode), errStr)
|
||||
panic(err)
|
||||
}
|
||||
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "earliest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"td.connect.port": "6030",
|
||||
"client.id": "test_tmq_client",
|
||||
"enable.auto.commit": "false",
|
||||
"enable.heartbeat.background": "true",
|
||||
"experimental.snapshot.enable": "true",
|
||||
"msg.with.table.name": "true",
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
consumer, err := tmq.NewConsumer(config)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = consumer.Subscribe([]string{"example_tmq_topic"})
|
||||
err = consumer.Subscribe("example_tmq_topic", nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -88,19 +54,25 @@ func main() {
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for {
|
||||
result, err := consumer.Poll(time.Second)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
for i := 0; i < 5; i++ {
|
||||
ev := consumer.Poll(0)
|
||||
if ev != nil {
|
||||
switch e := ev.(type) {
|
||||
case *tmqcommon.DataMessage:
|
||||
fmt.Println(e.String())
|
||||
case tmqcommon.Error:
|
||||
fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e)
|
||||
panic(e)
|
||||
}
|
||||
consumer.Commit()
|
||||
}
|
||||
if result.Type != common.TMQ_RES_DATA {
|
||||
panic("want message type 1 got " + strconv.Itoa(int(result.Type)))
|
||||
}
|
||||
data, _ := json.Marshal(result.Data)
|
||||
fmt.Println(string(data))
|
||||
consumer.Commit(context.Background(), result.Message)
|
||||
consumer.FreeMessage(result.Message)
|
||||
break
|
||||
}
|
||||
consumer.Close()
|
||||
err = consumer.Unsubscribe()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = consumer.Close()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,8 +1,11 @@
|
|||
import pandas
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy import create_engine, text
|
||||
|
||||
engine = create_engine("taos://root:taosdata@localhost:6030/power")
|
||||
df = pandas.read_sql("SELECT * FROM meters", engine)
|
||||
conn = engine.connect()
|
||||
df = pandas.read_sql(text("SELECT * FROM power.meters"), conn)
|
||||
conn.close()
|
||||
|
||||
|
||||
# print index
|
||||
print(df.index)
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
import pandas
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy import create_engine, text
|
||||
|
||||
engine = create_engine("taosrest://root:taosdata@localhost:6041")
|
||||
df: pandas.DataFrame = pandas.read_sql("SELECT * FROM power.meters", engine)
|
||||
conn = engine.connect()
|
||||
df: pandas.DataFrame = pandas.read_sql(text("SELECT * FROM power.meters"), conn)
|
||||
conn.close()
|
||||
|
||||
# print index
|
||||
print(df.index)
|
||||
|
|
|
@ -1,24 +1,25 @@
|
|||
# ANCHOR: connect
|
||||
from taosrest import connect, TaosRestConnection, TaosRestCursor
|
||||
|
||||
conn: TaosRestConnection = connect(url="http://localhost:6041",
|
||||
user="root",
|
||||
password="taosdata",
|
||||
timeout=30)
|
||||
conn = connect(url="http://localhost:6041",
|
||||
user="root",
|
||||
password="taosdata",
|
||||
timeout=30)
|
||||
|
||||
# ANCHOR_END: connect
|
||||
# ANCHOR: basic
|
||||
# create STable
|
||||
cursor: TaosRestCursor = conn.cursor()
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("DROP DATABASE IF EXISTS power")
|
||||
cursor.execute("CREATE DATABASE power")
|
||||
cursor.execute("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)")
|
||||
cursor.execute(
|
||||
"CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)")
|
||||
|
||||
# insert data
|
||||
cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
|
||||
power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
|
||||
power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
|
||||
power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""")
|
||||
cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
|
||||
power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
|
||||
power.d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
|
||||
power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""")
|
||||
print("inserted row count:", cursor.rowcount)
|
||||
|
||||
# query data
|
||||
|
@ -28,7 +29,7 @@ print("queried row count:", cursor.rowcount)
|
|||
# get column names from cursor
|
||||
column_names = [meta[0] for meta in cursor.description]
|
||||
# get rows
|
||||
data: list[tuple] = cursor.fetchall()
|
||||
data = cursor.fetchall()
|
||||
print(column_names)
|
||||
for row in data:
|
||||
print(row)
|
||||
|
|
|
@ -8,7 +8,7 @@ conn.execute("CREATE DATABASE test")
|
|||
# change database. same as execute "USE db"
|
||||
conn.select_db("test")
|
||||
conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)")
|
||||
affected_row: int = conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m, 24.4)")
|
||||
affected_row = conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m, 24.4)")
|
||||
print("affected_row", affected_row)
|
||||
# output:
|
||||
# affected_row 3
|
||||
|
@ -16,10 +16,10 @@ print("affected_row", affected_row)
|
|||
|
||||
# ANCHOR: query
|
||||
# Execute a sql and get its result set. It's useful for SELECT statement
|
||||
result: taos.TaosResult = conn.query("SELECT * from weather")
|
||||
result = conn.query("SELECT * from weather")
|
||||
|
||||
# Get fields from result
|
||||
fields: taos.field.TaosFields = result.fields
|
||||
fields = result.fields
|
||||
for field in fields:
|
||||
print(field) # {name: ts, type: 9, bytes: 8}
|
||||
|
||||
|
|
|
@ -1,15 +1,14 @@
|
|||
# install dependencies:
|
||||
# recommend python >= 3.8
|
||||
# pip3 install faster-fifo
|
||||
#
|
||||
|
||||
import logging
|
||||
import math
|
||||
import multiprocessing
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
from multiprocessing import Process
|
||||
from faster_fifo import Queue
|
||||
from multiprocessing import Process, Queue
|
||||
from mockdatasource import MockDataSource
|
||||
from queue import Empty
|
||||
from typing import List
|
||||
|
@ -22,8 +21,7 @@ TABLE_COUNT = 1000
|
|||
QUEUE_SIZE = 1000000
|
||||
MAX_BATCH_SIZE = 3000
|
||||
|
||||
read_processes = []
|
||||
write_processes = []
|
||||
_DONE_MESSAGE = '__DONE__'
|
||||
|
||||
|
||||
def get_connection():
|
||||
|
@ -44,41 +42,64 @@ def get_connection():
|
|||
|
||||
# ANCHOR: read
|
||||
|
||||
def run_read_task(task_id: int, task_queues: List[Queue]):
|
||||
def run_read_task(task_id: int, task_queues: List[Queue], infinity):
|
||||
table_count_per_task = TABLE_COUNT // READ_TASK_COUNT
|
||||
data_source = MockDataSource(f"tb{task_id}", table_count_per_task)
|
||||
data_source = MockDataSource(f"tb{task_id}", table_count_per_task, infinity)
|
||||
try:
|
||||
for batch in data_source:
|
||||
if isinstance(batch, tuple):
|
||||
batch = [batch]
|
||||
for table_id, rows in batch:
|
||||
# hash data to different queue
|
||||
i = table_id % len(task_queues)
|
||||
# block putting forever when the queue is full
|
||||
task_queues[i].put_many(rows, block=True, timeout=-1)
|
||||
for row in rows:
|
||||
task_queues[i].put(row)
|
||||
if not infinity:
|
||||
for queue in task_queues:
|
||||
queue.put(_DONE_MESSAGE)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
finally:
|
||||
logging.info('read task over')
|
||||
|
||||
|
||||
# ANCHOR_END: read
|
||||
|
||||
|
||||
# ANCHOR: write
|
||||
def run_write_task(task_id: int, queue: Queue):
|
||||
def run_write_task(task_id: int, queue: Queue, done_queue: Queue):
|
||||
from sql_writer import SQLWriter
|
||||
log = logging.getLogger(f"WriteTask-{task_id}")
|
||||
writer = SQLWriter(get_connection)
|
||||
lines = None
|
||||
try:
|
||||
while True:
|
||||
try:
|
||||
# get as many as possible
|
||||
lines = queue.get_many(block=False, max_messages_to_get=MAX_BATCH_SIZE)
|
||||
over = False
|
||||
lines = []
|
||||
for _ in range(MAX_BATCH_SIZE):
|
||||
try:
|
||||
line = queue.get_nowait()
|
||||
if line == _DONE_MESSAGE:
|
||||
over = True
|
||||
break
|
||||
if line:
|
||||
lines.append(line)
|
||||
except Empty:
|
||||
time.sleep(0.1)
|
||||
if len(lines) > 0:
|
||||
writer.process_lines(lines)
|
||||
except Empty:
|
||||
time.sleep(0.01)
|
||||
if over:
|
||||
done_queue.put(_DONE_MESSAGE)
|
||||
break
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
except BaseException as e:
|
||||
log.debug(f"lines={lines}")
|
||||
raise e
|
||||
finally:
|
||||
writer.close()
|
||||
log.debug('write task over')
|
||||
|
||||
|
||||
# ANCHOR_END: write
|
||||
|
@ -103,47 +124,64 @@ def set_global_config():
|
|||
|
||||
|
||||
# ANCHOR: monitor
|
||||
def run_monitor_process():
|
||||
def run_monitor_process(done_queue: Queue):
|
||||
log = logging.getLogger("DataBaseMonitor")
|
||||
conn = get_connection()
|
||||
conn.execute("DROP DATABASE IF EXISTS test")
|
||||
conn.execute("CREATE DATABASE test")
|
||||
conn.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) "
|
||||
"TAGS (location BINARY(64), groupId INT)")
|
||||
conn = None
|
||||
try:
|
||||
conn = get_connection()
|
||||
|
||||
def get_count():
|
||||
res = conn.query("SELECT count(*) FROM test.meters")
|
||||
rows = res.fetch_all()
|
||||
return rows[0][0] if rows else 0
|
||||
def get_count():
|
||||
res = conn.query("SELECT count(*) FROM test.meters")
|
||||
rows = res.fetch_all()
|
||||
return rows[0][0] if rows else 0
|
||||
|
||||
last_count = 0
|
||||
while True:
|
||||
time.sleep(10)
|
||||
count = get_count()
|
||||
log.info(f"count={count} speed={(count - last_count) / 10}")
|
||||
last_count = count
|
||||
last_count = 0
|
||||
while True:
|
||||
try:
|
||||
done = done_queue.get_nowait()
|
||||
if done == _DONE_MESSAGE:
|
||||
break
|
||||
except Empty:
|
||||
pass
|
||||
time.sleep(10)
|
||||
count = get_count()
|
||||
log.info(f"count={count} speed={(count - last_count) / 10}")
|
||||
last_count = count
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
|
||||
# ANCHOR_END: monitor
|
||||
# ANCHOR: main
|
||||
def main():
|
||||
def main(infinity):
|
||||
set_global_config()
|
||||
logging.info(f"READ_TASK_COUNT={READ_TASK_COUNT}, WRITE_TASK_COUNT={WRITE_TASK_COUNT}, "
|
||||
f"TABLE_COUNT={TABLE_COUNT}, QUEUE_SIZE={QUEUE_SIZE}, MAX_BATCH_SIZE={MAX_BATCH_SIZE}")
|
||||
|
||||
monitor_process = Process(target=run_monitor_process)
|
||||
conn = get_connection()
|
||||
conn.execute("DROP DATABASE IF EXISTS test")
|
||||
conn.execute("CREATE DATABASE IF NOT EXISTS test")
|
||||
conn.execute("CREATE STABLE IF NOT EXISTS test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) "
|
||||
"TAGS (location BINARY(64), groupId INT)")
|
||||
conn.close()
|
||||
|
||||
done_queue = Queue()
|
||||
monitor_process = Process(target=run_monitor_process, args=(done_queue,))
|
||||
monitor_process.start()
|
||||
time.sleep(3) # waiting for database ready.
|
||||
logging.debug(f"monitor task started with pid {monitor_process.pid}")
|
||||
|
||||
task_queues: List[Queue] = []
|
||||
write_processes = []
|
||||
read_processes = []
|
||||
|
||||
# create task queues
|
||||
for i in range(WRITE_TASK_COUNT):
|
||||
queue = Queue(max_size_bytes=QUEUE_SIZE)
|
||||
queue = Queue()
|
||||
task_queues.append(queue)
|
||||
|
||||
# create write processes
|
||||
for i in range(WRITE_TASK_COUNT):
|
||||
p = Process(target=run_write_task, args=(i, task_queues[i]))
|
||||
p = Process(target=run_write_task, args=(i, task_queues[i], done_queue))
|
||||
p.start()
|
||||
logging.debug(f"WriteTask-{i} started with pid {p.pid}")
|
||||
write_processes.append(p)
|
||||
|
@ -151,13 +189,19 @@ def main():
|
|||
# create read processes
|
||||
for i in range(READ_TASK_COUNT):
|
||||
queues = assign_queues(i, task_queues)
|
||||
p = Process(target=run_read_task, args=(i, queues))
|
||||
p = Process(target=run_read_task, args=(i, queues, infinity))
|
||||
p.start()
|
||||
logging.debug(f"ReadTask-{i} started with pid {p.pid}")
|
||||
read_processes.append(p)
|
||||
|
||||
try:
|
||||
monitor_process.join()
|
||||
for p in read_processes:
|
||||
p.join()
|
||||
for p in write_processes:
|
||||
p.join()
|
||||
time.sleep(1)
|
||||
return
|
||||
except KeyboardInterrupt:
|
||||
monitor_process.terminate()
|
||||
[p.terminate() for p in read_processes]
|
||||
|
@ -176,5 +220,6 @@ def assign_queues(read_task_id, task_queues):
|
|||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
multiprocessing.set_start_method('spawn')
|
||||
main(False)
|
||||
# ANCHOR_END: main
|
||||
|
|
|
@ -26,7 +26,8 @@ class Consumer(object):
|
|||
'bath_consume': True,
|
||||
'batch_size': 1000,
|
||||
'async_model': True,
|
||||
'workers': 10
|
||||
'workers': 10,
|
||||
'testing': False
|
||||
}
|
||||
|
||||
LOCATIONS = ['California.SanFrancisco', 'California.LosAngles', 'California.SanDiego', 'California.SanJose',
|
||||
|
@ -46,11 +47,12 @@ class Consumer(object):
|
|||
def __init__(self, **configs):
|
||||
self.config: dict = self.DEFAULT_CONFIGS
|
||||
self.config.update(configs)
|
||||
self.consumer = KafkaConsumer(
|
||||
self.config.get('kafka_topic'), # topic
|
||||
bootstrap_servers=self.config.get('kafka_brokers'),
|
||||
group_id=self.config.get('kafka_group_id'),
|
||||
)
|
||||
if not self.config.get('testing'):
|
||||
self.consumer = KafkaConsumer(
|
||||
self.config.get('kafka_topic'), # topic
|
||||
bootstrap_servers=self.config.get('kafka_brokers'),
|
||||
group_id=self.config.get('kafka_group_id'),
|
||||
)
|
||||
self.taos = taos.connect(
|
||||
host=self.config.get('taos_host'),
|
||||
user=self.config.get('taos_user'),
|
||||
|
@ -60,7 +62,7 @@ class Consumer(object):
|
|||
)
|
||||
if self.config.get('async_model'):
|
||||
self.pool = ThreadPoolExecutor(max_workers=self.config.get('workers'))
|
||||
self.tasks: list[Future] = []
|
||||
self.tasks = []
|
||||
# tags and table mapping # key: {location}_{groupId} value:
|
||||
self.tag_table_mapping = {}
|
||||
i = 0
|
||||
|
@ -115,14 +117,14 @@ class Consumer(object):
|
|||
if self.taos is not None:
|
||||
self.taos.close()
|
||||
|
||||
def _run(self, f: Callable[[ConsumerRecord], bool]):
|
||||
def _run(self, f):
|
||||
for message in self.consumer:
|
||||
if self.config.get('async_model'):
|
||||
self.pool.submit(f(message))
|
||||
else:
|
||||
f(message)
|
||||
|
||||
def _run_batch(self, f: Callable[[list[list[ConsumerRecord]]], None]):
|
||||
def _run_batch(self, f):
|
||||
while True:
|
||||
messages = self.consumer.poll(timeout_ms=500, max_records=self.config.get('batch_size'))
|
||||
if messages:
|
||||
|
@ -140,7 +142,7 @@ class Consumer(object):
|
|||
logging.info('## insert sql %s', sql)
|
||||
return self.taos.execute(sql=sql) == 1
|
||||
|
||||
def _to_taos_batch(self, messages: list[list[ConsumerRecord]]):
|
||||
def _to_taos_batch(self, messages):
|
||||
sql = self._build_sql_batch(messages=messages)
|
||||
if len(sql) == 0: # decode error, skip
|
||||
return
|
||||
|
@ -162,7 +164,7 @@ class Consumer(object):
|
|||
table_name = self._get_table_name(location=location, group_id=group_id)
|
||||
return self.INSERT_PART_SQL.format(table_name, ts, current, voltage, phase)
|
||||
|
||||
def _build_sql_batch(self, messages: list[list[ConsumerRecord]]) -> str:
|
||||
def _build_sql_batch(self, messages) -> str:
|
||||
sql_list = []
|
||||
for partition_messages in messages:
|
||||
for message in partition_messages:
|
||||
|
@ -186,7 +188,54 @@ def _get_location_and_group(key: str) -> (str, int):
|
|||
return fields[0], fields[1]
|
||||
|
||||
|
||||
def test_to_taos(consumer: Consumer):
|
||||
msg = {
|
||||
'location': 'California.SanFrancisco',
|
||||
'groupId': 1,
|
||||
'ts': '2022-12-06 15:13:38.643',
|
||||
'current': 3.41,
|
||||
'voltage': 105,
|
||||
'phase': 0.02027,
|
||||
}
|
||||
record = ConsumerRecord(checksum=None, headers=None, offset=1, key=None, value=json.dumps(msg), partition=1,
|
||||
topic='test', serialized_key_size=None, serialized_header_size=None,
|
||||
serialized_value_size=None, timestamp=time.time(), timestamp_type=None)
|
||||
assert consumer._to_taos(message=record)
|
||||
|
||||
|
||||
def test_to_taos_batch(consumer: Consumer):
|
||||
records = [
|
||||
[
|
||||
ConsumerRecord(checksum=None, headers=None, offset=1, key=None,
|
||||
value=json.dumps({'location': 'California.SanFrancisco',
|
||||
'groupId': 1,
|
||||
'ts': '2022-12-06 15:13:38.643',
|
||||
'current': 3.41,
|
||||
'voltage': 105,
|
||||
'phase': 0.02027, }),
|
||||
partition=1, topic='test', serialized_key_size=None, serialized_header_size=None,
|
||||
serialized_value_size=None, timestamp=time.time(), timestamp_type=None),
|
||||
ConsumerRecord(checksum=None, headers=None, offset=1, key=None,
|
||||
value=json.dumps({'location': 'California.LosAngles',
|
||||
'groupId': 2,
|
||||
'ts': '2022-12-06 15:13:39.643',
|
||||
'current': 3.41,
|
||||
'voltage': 102,
|
||||
'phase': 0.02027, }),
|
||||
partition=1, topic='test', serialized_key_size=None, serialized_header_size=None,
|
||||
serialized_value_size=None, timestamp=time.time(), timestamp_type=None),
|
||||
]
|
||||
]
|
||||
|
||||
consumer._to_taos_batch(messages=records)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
consumer = Consumer(async_model=True)
|
||||
consumer = Consumer(async_model=True, testing=True)
|
||||
# init env
|
||||
consumer.init_env()
|
||||
consumer.consume()
|
||||
# consumer.consume()
|
||||
# test build sql
|
||||
# test build sql batch
|
||||
test_to_taos(consumer)
|
||||
test_to_taos_batch(consumer)
|
||||
|
|
|
@ -10,13 +10,14 @@ class MockDataSource:
|
|||
"9.4,118,0.141,California.SanFrancisco,4"
|
||||
]
|
||||
|
||||
def __init__(self, tb_name_prefix, table_count):
|
||||
def __init__(self, tb_name_prefix, table_count, infinity=True):
|
||||
self.table_name_prefix = tb_name_prefix + "_"
|
||||
self.table_count = table_count
|
||||
self.max_rows = 10000000
|
||||
self.current_ts = round(time.time() * 1000) - self.max_rows * 100
|
||||
# [(tableId, tableName, values),]
|
||||
self.data = self._init_data()
|
||||
self.infinity = infinity
|
||||
|
||||
def _init_data(self):
|
||||
lines = self.samples * (self.table_count // 5 + 1)
|
||||
|
@ -28,14 +29,19 @@ class MockDataSource:
|
|||
|
||||
def __iter__(self):
|
||||
self.row = 0
|
||||
return self
|
||||
if not self.infinity:
|
||||
return iter(self._iter_data())
|
||||
else:
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
"""
|
||||
next 1000 rows for each table.
|
||||
return: {tableId:[row,...]}
|
||||
"""
|
||||
# generate 1000 timestamps
|
||||
return self._iter_data()
|
||||
|
||||
def _iter_data(self):
|
||||
ts = []
|
||||
for _ in range(1000):
|
||||
self.current_ts += 100
|
||||
|
@ -47,3 +53,9 @@ class MockDataSource:
|
|||
rows = [table_name + ',' + t + ',' + values for t in ts]
|
||||
result.append((table_id, rows))
|
||||
return result
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
datasource = MockDataSource('t', 10, False)
|
||||
for data in datasource:
|
||||
print(data)
|
||||
|
|
|
@ -25,10 +25,10 @@ def create_stable(conn: taos.TaosConnection):
|
|||
|
||||
|
||||
# The generated SQL is:
|
||||
# INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
|
||||
# d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
|
||||
# d1003 USING meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
|
||||
# d1004 USING meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)
|
||||
# INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
|
||||
# d1002 USING meters TAGS('California.SanFrancisco', 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
|
||||
# d1003 USING meters TAGS('California.LosAngeles', 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
|
||||
# d1004 USING meters TAGS('California.LosAngeles', 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)
|
||||
|
||||
def get_sql():
|
||||
global lines
|
||||
|
|
|
@ -10,6 +10,7 @@ class SQLWriter:
|
|||
self._tb_tags = {}
|
||||
self._conn = get_connection_func()
|
||||
self._max_sql_length = self.get_max_sql_length()
|
||||
self._conn.execute("create database if not exists test")
|
||||
self._conn.execute("USE test")
|
||||
|
||||
def get_max_sql_length(self):
|
||||
|
@ -20,7 +21,7 @@ class SQLWriter:
|
|||
return int(r[1])
|
||||
return 1024 * 1024
|
||||
|
||||
def process_lines(self, lines: str):
|
||||
def process_lines(self, lines: [str]):
|
||||
"""
|
||||
:param lines: [[tbName,ts,current,voltage,phase,location,groupId]]
|
||||
"""
|
||||
|
@ -60,6 +61,7 @@ class SQLWriter:
|
|||
buf.append(q)
|
||||
sql_len += len(q)
|
||||
sql += " ".join(buf)
|
||||
self.create_tables()
|
||||
self.execute_sql(sql)
|
||||
self._tb_values.clear()
|
||||
|
||||
|
@ -88,3 +90,22 @@ class SQLWriter:
|
|||
except BaseException as e:
|
||||
self.log.error("Execute SQL: %s", sql)
|
||||
raise e
|
||||
|
||||
def close(self):
|
||||
if self._conn:
|
||||
self._conn.close()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
def get_connection_func():
|
||||
conn = taos.connect()
|
||||
return conn
|
||||
|
||||
|
||||
writer = SQLWriter(get_connection_func=get_connection_func)
|
||||
writer.execute_sql(
|
||||
"create stable if not exists meters (ts timestamp, current float, voltage int, phase float) "
|
||||
"tags (location binary(64), groupId int)")
|
||||
writer.execute_sql(
|
||||
"INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) "
|
||||
"VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32)")
|
||||
|
|
|
@ -1,58 +1,55 @@
|
|||
from taos.tmq import Consumer
|
||||
import taos
|
||||
from taos.tmq import *
|
||||
|
||||
conn = taos.connect()
|
||||
|
||||
print("init")
|
||||
conn.execute("drop topic if exists topic_ctb_column")
|
||||
conn.execute("drop database if exists py_tmq")
|
||||
conn.execute("create database if not exists py_tmq vgroups 2")
|
||||
conn.select_db("py_tmq")
|
||||
conn.execute(
|
||||
"create stable if not exists stb1 (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)"
|
||||
)
|
||||
conn.execute("create table if not exists tb1 using stb1 tags(1)")
|
||||
conn.execute("create table if not exists tb2 using stb1 tags(2)")
|
||||
conn.execute("create table if not exists tb3 using stb1 tags(3)")
|
||||
|
||||
print("create topic")
|
||||
conn.execute(
|
||||
"create topic if not exists topic_ctb_column as select ts, c1, c2, c3 from stb1"
|
||||
)
|
||||
|
||||
print("build consumer")
|
||||
conf = TaosTmqConf()
|
||||
conf.set("group.id", "tg2")
|
||||
conf.set("td.connect.user", "root")
|
||||
conf.set("td.connect.pass", "taosdata")
|
||||
conf.set("enable.auto.commit", "true")
|
||||
|
||||
|
||||
def tmq_commit_cb_print(tmq, resp, offset, param=None):
|
||||
print(f"commit: {resp}, tmq: {tmq}, offset: {offset}, param: {param}")
|
||||
def init_tmq_env(db, topic):
|
||||
conn = taos.connect()
|
||||
conn.execute("drop topic if exists {}".format(topic))
|
||||
conn.execute("drop database if exists {}".format(db))
|
||||
conn.execute("create database if not exists {}".format(db))
|
||||
conn.select_db(db)
|
||||
conn.execute(
|
||||
"create stable if not exists stb1 (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))")
|
||||
conn.execute("create table if not exists tb1 using stb1 tags(1, 't1')")
|
||||
conn.execute("create table if not exists tb2 using stb1 tags(2, 't2')")
|
||||
conn.execute("create table if not exists tb3 using stb1 tags(3, 't3')")
|
||||
conn.execute("create topic if not exists {} as select ts, c1, c2, c3 from stb1".format(topic))
|
||||
conn.execute("insert into tb1 values (now, 1, 1.0, 'tmq test')")
|
||||
conn.execute("insert into tb2 values (now, 2, 2.0, 'tmq test')")
|
||||
conn.execute("insert into tb3 values (now, 3, 3.0, 'tmq test')")
|
||||
|
||||
|
||||
conf.set_auto_commit_cb(tmq_commit_cb_print, None)
|
||||
tmq = conf.new_consumer()
|
||||
def cleanup(db, topic):
|
||||
conn = taos.connect()
|
||||
conn.execute("drop topic if exists {}".format(topic))
|
||||
conn.execute("drop database if exists {}".format(db))
|
||||
|
||||
print("build topic list")
|
||||
|
||||
topic_list = TaosTmqList()
|
||||
topic_list.append("topic_ctb_column")
|
||||
if __name__ == '__main__':
|
||||
init_tmq_env("tmq_test", "tmq_test_topic") # init env
|
||||
consumer = Consumer(
|
||||
{
|
||||
"group.id": "tg2",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"enable.auto.commit": "true",
|
||||
}
|
||||
)
|
||||
consumer.subscribe(["tmq_test_topic"])
|
||||
|
||||
print("basic consume loop")
|
||||
tmq.subscribe(topic_list)
|
||||
try:
|
||||
while True:
|
||||
res = consumer.poll(1)
|
||||
if not res:
|
||||
break
|
||||
err = res.error()
|
||||
if err is not None:
|
||||
raise err
|
||||
val = res.value()
|
||||
|
||||
sub_list = tmq.subscription()
|
||||
|
||||
print("subscribed topics: ", sub_list)
|
||||
|
||||
while 1:
|
||||
res = tmq.poll(1000)
|
||||
if res:
|
||||
topic = res.get_topic_name()
|
||||
vg = res.get_vgroup_id()
|
||||
db = res.get_db_name()
|
||||
print(f"topic: {topic}\nvgroup id: {vg}\ndb: {db}")
|
||||
for row in res:
|
||||
print(row)
|
||||
for block in val:
|
||||
print(block.fetchall())
|
||||
finally:
|
||||
consumer.unsubscribe()
|
||||
consumer.close()
|
||||
cleanup("tmq_test", "tmq_test_topic")
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
#!/usr/bin/python3
|
||||
from taosws import Consumer
|
||||
|
||||
conf = {
|
||||
"td.connect.websocket.scheme": "ws",
|
||||
"group.id": "0",
|
||||
}
|
||||
consumer = Consumer(conf)
|
||||
|
||||
consumer.subscribe(["test"])
|
||||
|
||||
while True:
|
||||
message = consumer.poll(timeout=1.0)
|
||||
if message:
|
||||
id = message.vgroup()
|
||||
topic = message.topic()
|
||||
database = message.database()
|
||||
|
||||
for block in message:
|
||||
nrows = block.nrows()
|
||||
ncols = block.ncols()
|
||||
for row in block:
|
||||
print(row)
|
||||
values = block.fetchall()
|
||||
print(nrows, ncols)
|
||||
|
||||
# consumer.commit(message)
|
||||
else:
|
||||
break
|
||||
|
||||
consumer.close()
|
|
@ -4,6 +4,7 @@ description: '快速设置 TDengine 环境并体验其高效写入和查询'
|
|||
---
|
||||
|
||||
import xiaot from './xiaot.webp'
|
||||
import xiaot_new from './xiaot-new.webp'
|
||||
import channel from './channel.webp'
|
||||
import official_account from './official-account.webp'
|
||||
|
||||
|
@ -35,13 +36,13 @@ TDengine 知识地图中涵盖了 TDengine 的各种知识点,揭示了各概
|
|||
|
||||
<table width="100%">
|
||||
<tr align="center">
|
||||
<td style={{padding:'1em 3em',border:0}}><img src={xiaot} alt="小 T 的二维码" width="200" /></td>
|
||||
<td style={{padding:'1em 3em',border:0}}><img src={xiaot_new} alt="小 T 的二维码" width="200" /></td>
|
||||
<td style={{padding:'1em 3em',border:0}}><img src={channel} alt="TDengine 微信视频号" width="200" /></td>
|
||||
<td style={{padding:'1em 3em',border:0}}><img src={official_account} alt="TDengine 微信公众号" width="200" /></td>
|
||||
</tr>
|
||||
<tr align="center">
|
||||
<td style={{padding:'1em 3em',border:0}}>加入“物联网大数据技术前沿群”<br/>与大家进行技术交流</td>
|
||||
<td style={{padding:'1em 3em',border:0}}>关注 TDengine 微信视频号<br/>收看技术直播与教学视频</td>
|
||||
<td style={{padding:'1em 3em',border:0}}>关注 TDengine 微信公众号<br/>阅读核心技术与行业案例文章</td>
|
||||
<td style={{padding:'1em 3em',border:0}}>加入“物联网大数据技术群”<br/>与大家进行技术交流</td>
|
||||
<td style={{padding:'1em 3em',border:0}}>关注 TDengine 视频号<br/>收看技术直播与教学视频</td>
|
||||
<td style={{padding:'1em 3em',border:0}}>关注 TDengine 公众号<br/>阅读技术文章与行业案例</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
|
Binary file not shown.
After Width: | Height: | Size: 112 KiB |
|
@ -37,7 +37,7 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
|
|||
- tag_set 中的所有的数据自动转化为 NCHAR 数据类型;
|
||||
- field_set 中的每个数据项都需要对自身的数据类型进行描述, 比如 1.2f32 代表 FLOAT 类型的数值 1.2, 如果不带类型后缀会被当作 DOUBLE 处理;
|
||||
- timestamp 支持多种时间精度。写入数据的时候需要用参数指定时间精度,支持从小时到纳秒的 6 种时间精度。
|
||||
- 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数 smlDataFormat 为 false,否则,数据写入按照相同顺序写入,库中数据会异常。(3.0.1.3 之后的版本 smlDataFormat 默认为 false) [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||
- 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数 smlDataFormat 为 false,否则,数据写入按照相同顺序写入,库中数据会异常。(3.0.1.3 之后的版本 smlDataFormat 默认为 false,从3.0.3.0开始,该配置废弃) [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||
- 默认产生的子表名是根据规则生成的唯一 ID 值。用户也可以通过在 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||
:::
|
||||
|
||||
|
|
|
@ -92,22 +92,21 @@ void close() throws SQLException;
|
|||
<TabItem value="Python" label="Python">
|
||||
|
||||
```python
|
||||
class TaosConsumer():
|
||||
def __init__(self, *topics, **configs)
|
||||
class Consumer:
|
||||
def subscribe(self, topics):
|
||||
pass
|
||||
|
||||
def __iter__(self)
|
||||
def unsubscribe(self):
|
||||
pass
|
||||
|
||||
def __next__(self)
|
||||
def poll(self, timeout: float = 1.0):
|
||||
pass
|
||||
|
||||
def sync_next(self)
|
||||
|
||||
def subscription(self)
|
||||
def close(self):
|
||||
pass
|
||||
|
||||
def unsubscribe(self)
|
||||
|
||||
def close(self)
|
||||
|
||||
def __del__(self)
|
||||
def commit(self, message):
|
||||
pass
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -115,19 +114,22 @@ class TaosConsumer():
|
|||
<TabItem label="Go" value="Go">
|
||||
|
||||
```go
|
||||
func NewConsumer(conf *Config) (*Consumer, error)
|
||||
func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)
|
||||
|
||||
func (c *Consumer) Close() error
|
||||
// 出于兼容目的保留 rebalanceCb 参数,当前未使用
|
||||
func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error
|
||||
|
||||
func (c *Consumer) Commit(ctx context.Context, message unsafe.Pointer) error
|
||||
// 出于兼容目的保留 rebalanceCb 参数,当前未使用
|
||||
func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error
|
||||
|
||||
func (c *Consumer) FreeMessage(message unsafe.Pointer)
|
||||
func (c *Consumer) Poll(timeoutMs int) tmq.Event
|
||||
|
||||
func (c *Consumer) Poll(timeout time.Duration) (*Result, error)
|
||||
|
||||
func (c *Consumer) Subscribe(topics []string) error
|
||||
// 出于兼容目的保留 tmq.TopicPartition 参数,当前未使用
|
||||
func (c *Consumer) Commit() ([]tmq.TopicPartition, error)
|
||||
|
||||
func (c *Consumer) Unsubscribe() error
|
||||
|
||||
func (c *Consumer) Close() error
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -355,50 +357,20 @@ public class MetersDeserializer extends ReferenceDeserializer<Meters> {
|
|||
<TabItem label="Go" value="Go">
|
||||
|
||||
```go
|
||||
config := tmq.NewConfig()
|
||||
defer config.Destroy()
|
||||
err = config.SetGroupID("test")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetAutoOffsetReset("earliest")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectIP("127.0.0.1")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectUser("root")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectPass("taosdata")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectPort("6030")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetMsgWithTableName(true)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.EnableHeartBeat()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.EnableAutoCommit(func(result *wrapper.TMQCommitCallbackResult) {
|
||||
if result.ErrCode != 0 {
|
||||
errStr := wrapper.TMQErr2Str(result.ErrCode)
|
||||
err := errors.NewError(int(result.ErrCode), errStr)
|
||||
panic(err)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
conf := &tmq.ConfigMap{
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "earliest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"td.connect.port": "6030",
|
||||
"client.id": "test_tmq_c",
|
||||
"enable.auto.commit": "false",
|
||||
"enable.heartbeat.background": "true",
|
||||
"experimental.snapshot.enable": "true",
|
||||
"msg.with.table.name": "true",
|
||||
}
|
||||
consumer, err := NewConsumer(conf)
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -420,34 +392,33 @@ let mut consumer = tmq.build()?;
|
|||
|
||||
<TabItem value="Python" label="Python">
|
||||
|
||||
Python 语言下引入 `taos` 库的 `TaosConsumer` 类,创建一个 Consumer 示例:
|
||||
Python 语言下引入 `taos` 库的 `Consumer` 类,创建一个 Consumer 示例:
|
||||
|
||||
```python
|
||||
from taos.tmq import TaosConsumer
|
||||
from taos.tmq import Consumer
|
||||
|
||||
# Syntax: `consumer = TaosConsumer(*topics, **args)`
|
||||
# Syntax: `consumer = Consumer(configs)`
|
||||
#
|
||||
# Example:
|
||||
consumer = TaosConsumer('topic1', 'topic2', td_connect_ip = "127.0.0.1", group_id = "local")
|
||||
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
||||
```
|
||||
|
||||
其中,元组类型参数被视为 *Topics*,字典类型参数用于以下订阅配置设置:
|
||||
其中,`configs` 为 dict 类型,传递创建 Consumer 的参数。可以配置的参数有:
|
||||
|
||||
| 参数名称 | 类型 | 参数说明 | 备注 |
|
||||
| :----------------------------: | :----: | -------------------------------------------------------- | ------------------------------------------- |
|
||||
| `td_connect_ip` | string | 用于创建连接,同 `taos_connect` | |
|
||||
| `td_connect_user` | string | 用于创建连接,同 `taos_connect` | |
|
||||
| `td_connect_pass` | string | 用于创建连接,同 `taos_connect` | |
|
||||
| `td_connect_port` | string | 用于创建连接,同 `taos_connect` | |
|
||||
| `group_id` | string | 消费组 ID,同一消费组共享消费进度 | **必填项**。最大长度:192。 |
|
||||
| `client_id` | string | 客户端 ID | 最大长度:192。 |
|
||||
| `auto_offset_reset` | string | 消费组订阅的初始位置 | 可选:`earliest`(default), `latest`, `none` |
|
||||
| `enable_auto_commit` | string | 启用自动提交 | 合法值:`true`, `false`,默认为 true |
|
||||
| `auto_commit_interval_ms` | string | 以毫秒为单位的自动提交时间间隔 | 默认值:5000 ms |
|
||||
| `enable_heartbeat_background` | string | 启用后台心跳,启用后即使长时间不 poll 消息也不会造成离线 | 合法值:`true`, `false` |
|
||||
| `experimental_snapshot_enable` | string | 是否允许从 TSDB 消费数据 | 合法值:`true`, `false` |
|
||||
| `msg_with_table_name` | string | 是否允许从消息中解析表名,不适用于列订阅 | 合法值:`true`, `false` |
|
||||
| `timeout` | int | 消费者拉取数据的超时时间 | |
|
||||
| 参数名称 | 类型 | 参数说明 | 备注 |
|
||||
|:------:|:----:|:-------:|:---:|
|
||||
| `td.connect.ip` | string | 用于创建连接||
|
||||
| `td.connect.user` | string | 用于创建连接||
|
||||
| `td.connect.pass` | string | 用于创建连接||
|
||||
| `td.connect.port` | string | 用于创建连接||
|
||||
| `group.id` | string | 消费组 ID,同一消费组共享消费进度 | **必填项**。最大长度:192 |
|
||||
| `client.id` | string | 客户端 ID | 最大长度:192 |
|
||||
| `msg.with.table.name` | string | 是否允许从消息中解析表名,不适用于列订阅 | 合法值:`true`, `false` |
|
||||
| `enable.auto.commit` | string | 启用自动提交 | 合法值:`true`, `false` |
|
||||
| `auto.commit.interval.ms` | string | 以毫秒为单位的自动提交时间间隔 | 默认值:5000 ms |
|
||||
| `auto.offset.reset` | string | 消费组订阅的初始位置 | 可选:`earliest`(default), `latest`, `none` |
|
||||
| `experimental.snapshot.enable` | string | 是否允许从 TSDB 消费数据 | 合法值:`true`, `false` |
|
||||
| `enable.heartbeat.background` | string | 启用后台心跳,启用后即使长时间不 poll 消息也不会造成离线 | 合法值:`true`, `false` |
|
||||
|
||||
</TabItem>
|
||||
|
||||
|
@ -532,11 +503,7 @@ consumer.subscribe(topics);
|
|||
<TabItem value="Go" label="Go">
|
||||
|
||||
```go
|
||||
consumer, err := tmq.NewConsumer(config)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = consumer.Subscribe([]string{"example_tmq_topic"})
|
||||
err = consumer.Subscribe("example_tmq_topic", nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -554,7 +521,7 @@ consumer.subscribe(["tmq_meters"]).await?;
|
|||
<TabItem value="Python" label="Python">
|
||||
|
||||
```python
|
||||
consumer = TaosConsumer('topic_ctb_column', group_id='vg2')
|
||||
consumer.subscribe(['topic1', 'topic2'])
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -620,13 +587,17 @@ while(running){
|
|||
|
||||
```go
|
||||
for {
|
||||
result, err := consumer.Poll(time.Second)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
ev := consumer.Poll(0)
|
||||
if ev != nil {
|
||||
switch e := ev.(type) {
|
||||
case *tmqcommon.DataMessage:
|
||||
fmt.Println(e.Value())
|
||||
case tmqcommon.Error:
|
||||
fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e)
|
||||
panic(e)
|
||||
}
|
||||
consumer.Commit()
|
||||
}
|
||||
fmt.Println(result)
|
||||
consumer.Commit(context.Background(), result.Message)
|
||||
consumer.FreeMessage(result.Message)
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -669,9 +640,17 @@ for {
|
|||
<TabItem value="Python" label="Python">
|
||||
|
||||
```python
|
||||
for msg in consumer:
|
||||
for row in msg:
|
||||
print(row)
|
||||
while True:
|
||||
res = consumer.poll(100)
|
||||
if not res:
|
||||
continue
|
||||
err = res.error()
|
||||
if err is not None:
|
||||
raise err
|
||||
val = res.value()
|
||||
|
||||
for block in val:
|
||||
print(block.fetchall())
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -738,7 +717,11 @@ consumer.close();
|
|||
<TabItem value="Go" label="Go">
|
||||
|
||||
```go
|
||||
consumer.Close()
|
||||
/* Unsubscribe */
|
||||
_ = consumer.Unsubscribe()
|
||||
|
||||
/* Close consumer */
|
||||
_ = consumer.Close()
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
|
|
@ -15,7 +15,7 @@ import GoOpenTSDBTelnet from "../07-develop/03-insert-data/_go_opts_telnet.mdx"
|
|||
import GoOpenTSDBJson from "../07-develop/03-insert-data/_go_opts_json.mdx"
|
||||
import GoQuery from "../07-develop/04-query-data/_go.mdx"
|
||||
|
||||
`driver-go` 是 TDengine 的官方 Go 语言连接器,实现了 Go 语言[ database/sql ](https://golang.org/pkg/database/sql/) 包的接口。Go 开发人员可以通过它开发存取 TDengine 集群数据的应用软件。
|
||||
`driver-go` 是 TDengine 的官方 Go 语言连接器,实现了 Go 语言 [database/sql](https://golang.org/pkg/database/sql/) 包的接口。Go 开发人员可以通过它开发存取 TDengine 集群数据的应用软件。
|
||||
|
||||
`driver-go` 提供两种建立连接的方式。一种是**原生连接**,它通过 TDengine 客户端驱动程序(taosc)原生连接 TDengine 运行实例,支持数据写入、查询、订阅、schemaless 接口和参数绑定接口等功能。另外一种是 **REST 连接**,它通过 taosAdapter 提供的 REST 接口连接 TDengine 运行实例。REST 连接实现的功能特性集合和原生连接有少量不同。
|
||||
|
||||
|
@ -112,6 +112,7 @@ REST 连接支持所有能运行 Go 的平台。
|
|||
```text
|
||||
username:password@protocol(address)/dbname?param=value
|
||||
```
|
||||
|
||||
### 使用连接器进行连接
|
||||
|
||||
<Tabs defaultValue="rest">
|
||||
|
@ -176,6 +177,7 @@ func main() {
|
|||
}
|
||||
}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="WebSocket" label="WebSocket 连接">
|
||||
|
||||
|
@ -207,6 +209,7 @@ func main() {
|
|||
}
|
||||
}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
|
@ -357,33 +360,32 @@ func main() {
|
|||
|
||||
#### 订阅
|
||||
|
||||
* `func NewConsumer(conf *Config) (*Consumer, error)`
|
||||
* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)`
|
||||
|
||||
创建消费者。
|
||||
创建消费者。
|
||||
|
||||
* `func (c *Consumer) Subscribe(topics []string) error`
|
||||
* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error`
|
||||
注意:出于兼容目的保留 `rebalanceCb` 参数,当前未使用
|
||||
|
||||
订阅主题。
|
||||
订阅单个主题。
|
||||
|
||||
* `func (c *Consumer) Poll(timeout time.Duration) (*Result, error)`
|
||||
* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error`
|
||||
注意:出于兼容目的保留 `rebalanceCb` 参数,当前未使用
|
||||
|
||||
轮询消息。
|
||||
订阅主题。
|
||||
|
||||
* `func (c *Consumer) Commit(ctx context.Context, message unsafe.Pointer) error`
|
||||
* `func (c *Consumer) Poll(timeoutMs int) tmq.Event`
|
||||
|
||||
提交消息。
|
||||
轮询消息。
|
||||
|
||||
* `func (c *Consumer) FreeMessage(message unsafe.Pointer)`
|
||||
* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)`
|
||||
注意:出于兼容目的保留 `tmq.TopicPartition` 参数,当前未使用
|
||||
|
||||
释放消息。
|
||||
|
||||
* `func (c *Consumer) Unsubscribe() error`
|
||||
|
||||
取消订阅。
|
||||
提交消息。
|
||||
|
||||
* `func (c *Consumer) Close() error`
|
||||
|
||||
关闭消费者。
|
||||
关闭连接。
|
||||
|
||||
#### schemaless
|
||||
|
||||
|
@ -443,25 +445,32 @@ func main() {
|
|||
|
||||
### 通过 WebSocket 订阅
|
||||
|
||||
* `func NewConsumer(config *Config) (*Consumer, error)`
|
||||
* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)`
|
||||
|
||||
创建消费者。
|
||||
创建消费者。
|
||||
|
||||
* `func (c *Consumer) Subscribe(topic []string) error`
|
||||
* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error`
|
||||
注意:出于兼容目的保留 `rebalanceCb` 参数,当前未使用
|
||||
|
||||
订阅主题。
|
||||
订阅单个主题。
|
||||
|
||||
* `func (c *Consumer) Poll(timeout time.Duration) (*Result, error)`
|
||||
* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error`
|
||||
注意:出于兼容目的保留 `rebalanceCb` 参数,当前未使用
|
||||
|
||||
轮询消息。
|
||||
订阅主题。
|
||||
|
||||
* `func (c *Consumer) Commit(messageID uint64) error`
|
||||
* `func (c *Consumer) Poll(timeoutMs int) tmq.Event`
|
||||
|
||||
提交消息。
|
||||
轮询消息。
|
||||
|
||||
* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)`
|
||||
注意:出于兼容目的保留 `tmq.TopicPartition` 参数,当前未使用
|
||||
|
||||
提交消息。
|
||||
|
||||
* `func (c *Consumer) Close() error`
|
||||
|
||||
关闭消费者。
|
||||
关闭连接。
|
||||
|
||||
完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/3.0/examples/tmqoverws/main.go)
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con
|
|||
|
||||
### 准备
|
||||
|
||||
1. 安装 Python。建议使用 Python >= 3.7。如果系统上还没有 Python 可参考 [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) 安装。
|
||||
1. 安装 Python。新近版本 taospy 包要求 Python 3.6+。早期版本 taospy 包要求 Python 3.7+。taos-ws-py 包要求 Python 3.7+。如果系统上还没有 Python 可参考 [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) 安装。
|
||||
2. 安装 [pip](https://pypi.org/project/pip/)。大部分情况下 Python 的安装包都自带了 pip 工具, 如果没有请参考 [pip documentation](https://pip.pypa.io/en/stable/installation/) 安装。
|
||||
3. 如果使用原生连接,还需[安装客户端驱动](../#安装客户端驱动)。客户端软件包含了 TDengine 客户端动态链接库(libtaos.so 或 taos.dll) 和 TDengine CLI。
|
||||
|
||||
|
@ -78,6 +78,23 @@ pip3 install git+https://github.com/taosdata/taos-connector-python.git
|
|||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
#### 安装 `taos-ws-py`(可选)
|
||||
|
||||
taos-ws-py 包提供了通过 WebSocket 连接 TDengine 的能力,可选安装 taos-ws-py 以获得 WebSocket 连接 TDengine 的能力。
|
||||
|
||||
|
||||
##### 和 taospy 同时安装
|
||||
|
||||
```bash
|
||||
pip3 install taospy[ws]
|
||||
```
|
||||
|
||||
##### 单独安装
|
||||
|
||||
```bash
|
||||
pip3 install taos-ws-py
|
||||
```
|
||||
|
||||
### 安装验证
|
||||
|
||||
<Tabs defaultValue="rest">
|
||||
|
@ -306,6 +323,30 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
|
|||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### 数据订阅
|
||||
|
||||
连接器支持数据订阅功能,数据订阅功能请参考 [数据订阅](../../develop/tmq/)。
|
||||
|
||||
<Tabs defaultValue="native">
|
||||
<TabItem value="native" label="原生连接">
|
||||
|
||||
`Consumer` 提供了 Python 连接器订阅 TMQ 数据的 API,相关 API 定义请参考 [数据订阅文档](../../develop/tmq/#%E4%B8%BB%E8%A6%81%E6%95%B0%E6%8D%AE%E7%BB%93%E6%9E%84%E5%92%8C-api)。
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/tmq_example.py}}
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="rest" label="websocket 连接">
|
||||
|
||||
除了原生的连接方式,Python 连接器还支持通过 websocket 订阅 TMQ 数据。
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/tmq_websocket_example.py}}
|
||||
```
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### 其它示例程序
|
||||
|
||||
| 示例程序链接 | 示例程序内容 |
|
||||
|
@ -314,7 +355,7 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
|
|||
| [bind_row.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-row.py) | 参数绑定,一次绑定一行 |
|
||||
| [insert_lines.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/insert-lines.py) | InfluxDB 行协议写入 |
|
||||
| [json_tag.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/json-tag.py) | 使用 JSON 类型的标签 |
|
||||
| [tmq.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/tmq.py) | tmq 订阅 |
|
||||
| [tmq_consumer.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/tmq_consumer.py) | tmq 订阅 |
|
||||
|
||||
## 其它说明
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx"
|
|||
|
||||
`TDengine.Connector` 是 TDengine 提供的 C# 语言连接器。C# 开发人员可以通过它开发存取 TDengine 集群数据的 C# 应用软件。
|
||||
|
||||
`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、数据订阅、schemaless 数据写入、参数绑定接口数据写入等功能。 `TDengine.Connector` 还支持 WebSocket,通过 DSN 建立 WebSocket 连接,提供数据写入、查询、参数绑定接口数据写入等功能。
|
||||
`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、数据订阅、schemaless 数据写入、参数绑定接口数据写入等功能。 `TDengine.Connector` 自 v3.0.1 起还支持 WebSocket,通过 DSN 建立 WebSocket 连接,提供数据写入、查询、参数绑定接口数据写入等功能。
|
||||
|
||||
本文介绍如何在 Linux 或 Windows 环境中安装 `TDengine.Connector`,并通过 `TDengine.Connector` 连接 TDengine 集群,进行数据写入、查询等基本操作。
|
||||
|
||||
|
@ -67,30 +67,45 @@ import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx"
|
|||
* [Nuget 客户端](https://docs.microsoft.com/en-us/nuget/install-nuget-client-tools) (可选安装)
|
||||
* 安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动)
|
||||
|
||||
### 使用 dotnet CLI 安装
|
||||
### 安装 TDengine.Connector
|
||||
|
||||
<Tabs defaultValue="CLI">
|
||||
<TabItem value="CLI" label="使用 dotnet CLI 获取 C# 驱动">
|
||||
<TabItem value="CLI" label="使用本地连接">
|
||||
|
||||
可以在当前 .NET 项目的路径下,通过 dotnet 命令引用 Nuget 中发布的 `TDengine.Connector` 到当前项目。
|
||||
可以在当前 .NET 项目的路径下,通过 dotnet CLI 添加 Nuget package `TDengine.Connector` 到当前项目。
|
||||
|
||||
``` bash
|
||||
dotnet add package TDengine.Connector
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="source" label="使用源码获取 C# 驱动">
|
||||
也可以修改当前项目的 `.csproj` 文件,添加如下 ItemGroup。
|
||||
|
||||
也可以[下载源码](https://github.com/taosdata/taos-connector-dotnet/tree/3.0),直接引用 TDengine.Connector 库
|
||||
|
||||
```bash
|
||||
git clone -b 3.0 https://github.com/taosdata/taos-connector-dotnet.git
|
||||
cd taos-connector-dotnet
|
||||
cp -r src/ myProject
|
||||
|
||||
cd myProject
|
||||
dotnet add exmaple.csproj reference src/TDengine.csproj
|
||||
``` XML
|
||||
<ItemGroup>
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.*" />
|
||||
</ItemGroup>
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="source" label="使用 WebSocket 连接">
|
||||
|
||||
需要修改目标项目的 `.csproj` 项目文件,将 `.nupkg` 中的 `runtimes` 目录中的动态库复制到当前项目的 `$(OutDir)` 目录下。
|
||||
|
||||
```XML
|
||||
<ItemGroup>
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.*" GeneratePathProperty="true" />
|
||||
</ItemGroup>
|
||||
<Target Name="copyDLLDepency" BeforeTargets="BeforeBuild">
|
||||
<ItemGroup>
|
||||
<DepDLLFiles Include="$(PkgTDengine_Connector)\runtimes\**\*.*" />
|
||||
</ItemGroup>
|
||||
<Copy SourceFiles="@(DepDLLFiles)" DestinationFolder="$(OutDir)" />
|
||||
</Target>
|
||||
```
|
||||
|
||||
注意:`TDengine.Connector` 自 version>= 3.0.2 的 nuget package 中才会有动态库( taosws.dll,libtaows.so )。
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
|
@ -148,9 +163,9 @@ namespace TDengineExample
|
|||
|
||||
各部分意义见下表:
|
||||
|
||||
* **protocol**: 显示指定以何种方式建立连接,例如:`ws://localhost:6041` 指定以 Websocket 方式建立连接(支持http/ws)。
|
||||
* **protocol**: 显示指定以何种方式建立连接,例如:`ws://localhost:6041` 指定以 Websocket 方式建立连接(支持 http/ws )。
|
||||
|
||||
* **username/password**: 用于创建连接的用户名及密码(默认`root/taosdata`)。
|
||||
* **username/password**: 用于创建连接的用户名及密码(默认 `root/taosdata` )。
|
||||
|
||||
* **host/port**: 指定创建连接的服务器及端口,WebSocket 连接默认为 `localhost:6041` 。
|
||||
|
||||
|
@ -253,19 +268,20 @@ namespace TDengineExample
|
|||
|
||||
|示例程序 | 示例程序描述 |
|
||||
|--------------------------------------------------------------------------------------------------------------------|--------------------------------------------|
|
||||
| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/Query/Query.cs) | 使用 TDengine.Connector 实现的建表、插入、查询示例 |
|
||||
| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/JSONTag) | 使用 TDengine.Connector 实现的写入和查询 JSON tag 类型数据的示例 |
|
||||
| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/Stmt) | 使用 TDengine.Connector 实现的参数绑定插入和查询的示例 |
|
||||
| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/schemaless) | 使用 TDengine.Connector 实现的使用 schemaless 写入的示例 |
|
||||
| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/AsyncQuery/QueryAsync.cs) | 使用 TDengine.Connector 实现的异步查询的示例 |
|
||||
| [数据订阅(TMQ)](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | 使用 TDengine.Connector 实现的订阅数据的示例 |
|
||||
| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSample.cs) | 使用 TDengine.Connector 的 WebSocket 基本的示例 |
|
||||
| [Basic WebSocket STMT](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSTMT.cs) | 使用 TDengine.Connector 的 WebSocket STMT 基本的示例 |
|
||||
| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/Query/Query.cs) | 使用 TDengine.Connector 实现的建表、插入、查询示例 |
|
||||
| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/JSONTag) | 使用 TDengine.Connector 实现的写入和查询 JSON tag 类型数据的示例 |
|
||||
| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/NET6Examples/Stmt) | 使用 TDengine.Connector 实现的参数绑定插入和查询的示例 |
|
||||
| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/schemaless) | 使用 TDengine.Connector 实现的使用 schemaless 写入的示例 |
|
||||
| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/AsyncQuery/QueryAsync.cs) | 使用 TDengine.Connector 实现的异步查询的示例 |
|
||||
| [数据订阅(TMQ)](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/TMQ/TMQ.cs) | 使用 TDengine.Connector 实现的订阅数据的示例 |
|
||||
| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/FrameWork45/WS/WebSocketSample.cs) | 使用 TDengine.Connector 的 WebSocket 基本的示例 |
|
||||
| [Basic WebSocket STMT](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/FrameWork45/WS/WebSocketSTMT.cs) | 使用 TDengine.Connector 的 WebSocket STMT 基本的示例 |
|
||||
|
||||
## 重要更新记录
|
||||
|
||||
| TDengine.Connector | 说明 |
|
||||
|--------------------|--------------------------------|
|
||||
| 3.0.2 | 支持 .NET Framework 4.5 及以上,支持 .NET standard 2.0。Nuget Package 包含 WebSocket 动态库。 |
|
||||
| 3.0.1 | 支持 WebSocket 和 Cloud,查询,插入,参数绑定。 |
|
||||
| 3.0.0 | 支持 TDengine 3.0.0.0,不兼容 2.x。新增接口TDengine.Impl.GetData(),解析查询结果。 |
|
||||
| 1.0.7 | 修复 TDengine.Query()内存泄露。 |
|
||||
|
|
|
@ -60,7 +60,7 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
|
|||
| **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **参数绑定** | 暂不支持 | 暂不支持 | 支持 | 支持 | 暂不支持 | 支持 |
|
||||
| **数据订阅(TMQ)** | 暂不支持 | 暂不支持 | 支持 | 暂不支持 | 暂不支持 | 支持 |
|
||||
| **数据订阅(TMQ)** | 暂不支持 | 支持 | 支持 | 暂不支持 | 暂不支持 | 支持 |
|
||||
| **Schemaless** | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 |
|
||||
| **批量拉取(基于 WebSocket)** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 |
|
||||
|
|
|
@ -4,7 +4,7 @@ title: 使用 Helm 部署 TDengine 集群
|
|||
description: 使用 Helm 部署 TDengine 集群的详细指南
|
||||
---
|
||||
|
||||
Helm 是 Kubernetes 的包管理器,上一节使用 Kubernets 部署 TDengine 集群的操作已经足够简单,但 Helm 依然可以提供更强大的能力。
|
||||
Helm 是 Kubernetes 的包管理器,上一节使用 Kubernetes 部署 TDengine 集群的操作已经足够简单,但 Helm 依然可以提供更强大的能力。
|
||||
|
||||
## 安装 Helm
|
||||
|
||||
|
@ -23,7 +23,7 @@ Helm 会使用 kubectl 和 kubeconfig 的配置来操作 Kubernetes,可以参
|
|||
TDengine Chart 尚未发布到 Helm 仓库,当前可以从 GitHub 直接下载:
|
||||
|
||||
```bash
|
||||
wget https://github.com/taosdata/TDengine-Operator/raw/3.0/helm/tdengine-3.0.0.tgz
|
||||
wget https://github.com/taosdata/TDengine-Operator/raw/3.0/helm/tdengine-3.0.2.tgz
|
||||
|
||||
```
|
||||
|
||||
|
@ -39,7 +39,7 @@ kubectl get storageclass
|
|||
之后,使用 helm 命令安装:
|
||||
|
||||
```bash
|
||||
helm install tdengine tdengine-3.0.0.tgz \
|
||||
helm install tdengine tdengine-3.0.2.tgz \
|
||||
--set storage.className=<your storage class name>
|
||||
|
||||
```
|
||||
|
@ -47,7 +47,7 @@ helm install tdengine tdengine-3.0.0.tgz \
|
|||
在 minikube 环境下,可以设置一个较小的容量避免超出磁盘可用空间:
|
||||
|
||||
```bash
|
||||
helm install tdengine tdengine-3.0.0.tgz \
|
||||
helm install tdengine tdengine-3.0.2.tgz \
|
||||
--set storage.className=standard \
|
||||
--set storage.dataSize=2Gi \
|
||||
--set storage.logSize=10Mi
|
||||
|
@ -84,14 +84,14 @@ TDengine 支持 `values.yaml` 自定义。
|
|||
通过 helm show values 可以获取 TDengine Chart 支持的全部 values 列表:
|
||||
|
||||
```bash
|
||||
helm show values tdengine-3.0.0.tgz
|
||||
helm show values tdengine-3.0.2.tgz
|
||||
|
||||
```
|
||||
|
||||
你可以将结果保存为 values.yaml,之后可以修改其中的各项参数,如 replica 数量,存储类名称,容量大小,TDengine 配置等,然后使用如下命令安装 TDengine 集群:
|
||||
|
||||
```bash
|
||||
helm install tdengine tdengine-3.0.0.tgz -f values.yaml
|
||||
helm install tdengine tdengine-3.0.2.tgz -f values.yaml
|
||||
|
||||
```
|
||||
|
||||
|
@ -108,7 +108,7 @@ image:
|
|||
prefix: tdengine/tdengine
|
||||
#pullPolicy: Always
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
# tag: "3.0.0.0"
|
||||
# tag: "3.0.2.0"
|
||||
|
||||
service:
|
||||
# ClusterIP is the default service type, use NodeIP only if you know what you are doing.
|
||||
|
@ -156,15 +156,15 @@ clusterDomainSuffix: ""
|
|||
# See the variable list at https://www.taosdata.com/cn/documentation/administrator .
|
||||
#
|
||||
# Note:
|
||||
# 1. firstEp/secondEp: should not be setted here, it's auto generated at scale-up.
|
||||
# 2. serverPort: should not be setted, we'll use the default 6030 in many places.
|
||||
# 3. fqdn: will be auto generated in kubenetes, user should not care about it.
|
||||
# 1. firstEp/secondEp: should not be set here, it's auto generated at scale-up.
|
||||
# 2. serverPort: should not be set, we'll use the default 6030 in many places.
|
||||
# 3. fqdn: will be auto generated in kubernetes, user should not care about it.
|
||||
# 4. role: currently role is not supported - every node is able to be mnode and vnode.
|
||||
#
|
||||
# Btw, keep quotes "" around the value like below, even the value will be number or not.
|
||||
taoscfg:
|
||||
# Starts as cluster or not, must be 0 or 1.
|
||||
# 0: all pods will start as a seperate TDengine server
|
||||
# 0: all pods will start as a separate TDengine server
|
||||
# 1: pods will start as TDengine server cluster. [default]
|
||||
CLUSTER: "1"
|
||||
|
||||
|
|
|
@ -30,8 +30,10 @@ database_option: {
|
|||
| WAL_LEVEL {1 | 2}
|
||||
| VGROUPS value
|
||||
| SINGLE_STABLE {0 | 1}
|
||||
| STT_TRIGGER value
|
||||
| TABLE_PREFIX value
|
||||
| TABLE_SUFFIX value
|
||||
| TSDB_PAGESIZE value
|
||||
| WAL_RETENTION_PERIOD value
|
||||
| WAL_ROLL_PERIOD value
|
||||
| WAL_RETENTION_SIZE value
|
||||
|
@ -56,7 +58,7 @@ database_option: {
|
|||
- WAL_FSYNC_PERIOD:当 WAL 参数设置为 2 时,落盘的周期。默认为 3000,单位毫秒。最小为 0,表示每次写入立即落盘;最大为 180000,即三分钟。
|
||||
- MAXROWS:文件块中记录的最大条数,默认为 4096 条。
|
||||
- MINROWS:文件块中记录的最小条数,默认为 100 条。
|
||||
- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。
|
||||
- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 <= keep 1 <= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。
|
||||
- PAGES:一个 VNODE 中元数据存储引擎的缓存页个数,默认为 256,最小 64。一个 VNODE 元数据存储占用 PAGESIZE \* PAGES,默认情况下为 1MB 内存。
|
||||
- PAGESIZE:一个 VNODE 中元数据存储引擎的页大小,单位为 KB,默认为 4 KB。范围为 1 到 16384,即 1 KB 到 16 MB。
|
||||
- PRECISION:数据库的时间戳精度。ms 表示毫秒,us 表示微秒,ns 表示纳秒,默认 ms 毫秒。
|
||||
|
@ -69,8 +71,10 @@ database_option: {
|
|||
- SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表,用于超级表列非常多的情况。
|
||||
- 0:表示可以创建多张超级表。
|
||||
- 1:表示只可以创建一张超级表。
|
||||
- STT_TRIGGER:表示落盘文件触发文件合并的个数。默认为 1,范围 1 到 16。对于少表高频场景,此参数建议使用默认配置,或较小的值;而对于多表低频场景,此参数建议配置较大的值。
|
||||
- TABLE_PREFIX:内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度。
|
||||
- TABLE_SUFFIX:内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度。
|
||||
- TSDB_PAGESIZE:一个 VNODE 中时序数据存储引擎的页大小,单位为 KB,默认为 4 KB。范围为 1 到 16384,即 1 KB到 16 MB。
|
||||
- WAL_RETENTION_PERIOD:wal 文件的额外保留策略,用于数据订阅。wal 的保存时长,单位为 s。单副本默认为 0,即落盘后立即删除。-1 表示不删除。多副本默认为 4 天。
|
||||
- WAL_RETENTION_SIZE:wal 文件的额外保留策略,用于数据订阅。wal 的保存的最大上限,单位为 KB。单副本默认为 0,即落盘后立即删除。多副本默认为-1,表示不删除。
|
||||
- WAL_ROLL_PERIOD:wal 文件切换时长,单位为 s。当 wal 文件创建并写入后,经过该时间,会自动创建一个新的 wal 文件。单副本默认为 0,即仅在落盘时创建新文件。多副本默认为 1 天。
|
||||
|
@ -112,6 +116,10 @@ alter_database_options:
|
|||
alter_database_option: {
|
||||
CACHEMODEL {'none' | 'last_row' | 'last_value' | 'both'}
|
||||
| CACHESIZE value
|
||||
| BUFFER value
|
||||
| PAGES value
|
||||
| REPLICA value
|
||||
| STT_TRIGGER value
|
||||
| WAL_LEVEL value
|
||||
| WAL_FSYNC_PERIOD value
|
||||
| KEEP value
|
||||
|
@ -154,3 +162,19 @@ TRIM DATABASE db_name;
|
|||
```
|
||||
|
||||
删除过期数据,并根据多级存储的配置归整数据。
|
||||
|
||||
## 调整VGROUP中VNODE的分布
|
||||
|
||||
```sql
|
||||
REDISTRIBUTE VGROUP vgroup_no DNODE dnode_id1 [DNODE dnode_id2] [DNODE dnode_id3]
|
||||
```
|
||||
|
||||
按照给定的dnode列表,调整vgroup中的vnode分布。因为副本数目最大为3,所以最多输入3个dnode。
|
||||
|
||||
## 自动调整VGROUP中VNODE的分布
|
||||
|
||||
```sql
|
||||
BALANCE VGROUP
|
||||
```
|
||||
|
||||
自动调整集群所有vgroup中的vnode分布,相当于在vnode级别对集群进行数据的负载均衡操作。
|
|
@ -350,9 +350,9 @@ SELECT AVG(CASE WHEN voltage < 200 or voltage > 250 THEN 220 ELSE voltage END) F
|
|||
|
||||
## JOIN 子句
|
||||
|
||||
TDengine 支持“普通表与普通表之间”、“超级表与超级表之间”、“子查询与子查询之间” 进行自然连接。自然连接与内连接的主要区别是,自然连接要求参与连接的字段在不同的表/超级表中必须是同名字段。也即,TDengine 在连接关系的表达中,要求必须使用同名数据列/标签列的相等关系。
|
||||
TDengine 支持基于时间戳主键的内连接,即 JOIN 条件必须包含时间戳主键。只要满足基于时间戳主键这个要求,普通表、子表、超级表和子查询之间可以随意的进行内连接,且对表个数没有限制。
|
||||
|
||||
在普通表与普通表之间的 JOIN 操作中,只能使用主键时间戳之间的相等关系。例如:
|
||||
普通表与普通表之间的 JOIN 操作:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
|
@ -360,7 +360,7 @@ FROM temp_tb_1 t1, pressure_tb_1 t2
|
|||
WHERE t1.ts = t2.ts
|
||||
```
|
||||
|
||||
在超级表与超级表之间的 JOIN 操作中,除了主键时间戳一致的条件外,还要求引入能实现一一对应的标签列的相等关系。例如:
|
||||
超级表与超级表之间的 JOIN 操作:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
|
@ -368,21 +368,16 @@ FROM temp_stable t1, temp_stable t2
|
|||
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
|
||||
```
|
||||
|
||||
子表与超级表之间的 JOIN 操作:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
FROM temp_ctable t1, temp_stable t2
|
||||
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
|
||||
```
|
||||
|
||||
类似地,也可以对多个子查询的查询结果进行 JOIN 操作。
|
||||
|
||||
:::note
|
||||
|
||||
JOIN 语句存在如下限制要求:
|
||||
|
||||
- 参与一条语句中 JOIN 操作的表/超级表最多可以有 10 个。
|
||||
- 在包含 JOIN 操作的查询语句中不支持 FILL。
|
||||
- 暂不支持参与 JOIN 操作的表之间聚合后的四则运算。
|
||||
- 不支持只对其中一部分表做 GROUP BY。
|
||||
- JOIN 查询的不同表的过滤条件之间不能为 OR。
|
||||
- JOIN 查询要求连接条件不能是普通列,只能针对标签和主时间字段列(第一列)。
|
||||
|
||||
:::
|
||||
|
||||
## 嵌套查询
|
||||
|
||||
“嵌套查询”又称为“子查询”,也即在一条 SQL 语句中,“内层查询”的计算结果可以作为“外层查询”的计算对象来使用。
|
||||
|
|
|
@ -880,7 +880,7 @@ INTERP(expr)
|
|||
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。
|
||||
- INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 partition by tbname 一起使用。
|
||||
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.1.4版本以后支持)。
|
||||
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.2.1版本以后支持)。
|
||||
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.2.3版本以后支持)。
|
||||
|
||||
### LAST
|
||||
|
||||
|
|
|
@ -306,7 +306,7 @@ SHOW [db_name.]VGROUPS;
|
|||
## SHOW VNODES
|
||||
|
||||
```sql
|
||||
SHOW VNODES [dnode_name];
|
||||
SHOW VNODES {dnode_id | dnode_endpoint};
|
||||
```
|
||||
|
||||
显示当前系统中所有 VNODE 或某个 DNODE 的 VNODE 的信息。
|
||||
|
|
|
@ -54,7 +54,6 @@ description: "TDengine 3.0 版本的语法变更说明"
|
|||
| 27 | GRANT | 新增 | 授予用户权限。
|
||||
| 28 | KILL TRANSACTION | 新增 | 终止管理节点的事务。
|
||||
| 29 | KILL STREAM | 废除 | 终止连续查询。3.0版本不再支持连续查询,而是用更通用的流计算来代替。
|
||||
| 30 | MERGE VGROUP | 新增 | 合并VGROUP。
|
||||
| 31 | REVOKE | 新增 | 回收用户权限。
|
||||
| 32 | SELECT | 调整 | <ul><li>SELECT关闭隐式结果列,输出列均需要由SELECT子句来指定。</li><li>DISTINCT功能全面支持。2.x版本只支持对标签列去重,并且不可以和JOIN、GROUP BY等子句混用。</li><li>JOIN功能增强。增加支持:JOIN后WHERE条件中有OR条件;JOIN后的多表运算;JOIN后的多表GROUP BY。</li><li>FROM后子查询功能大幅增强。不限制子查询嵌套层数;支持子查询和UNION ALL混合使用;移除其他一些之前版本的语法限制。</li><li>WHERE后可以使用任意的标量表达式。</li><li>GROUP BY功能增强。支持任意标量表达式及其组合的分组。</li><li>SESSION可以用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。</li><li>STATE_WINDOW可以用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。</li><li>ORDER BY功能大幅增强。不再必须和GROUP BY子句一起使用;不再有排序表达式个数的限制;增加支持NULLS FIRST/LAST语法功能;支持符合语法语义的任意表达式。</li><li>新增PARTITION BY语法。替代原来的GROUP BY tags。</li></ul>
|
||||
| 33 | SHOW ACCOUNTS | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
|
||||
|
@ -76,8 +75,9 @@ description: "TDengine 3.0 版本的语法变更说明"
|
|||
| 49 | SHOW TRANSACTIONS | 新增 | 显示当前系统中正在执行的事务的信息。
|
||||
| 50 | SHOW DNODE VARIABLES | 新增 |显示指定DNODE的配置参数。
|
||||
| 51 | SHOW VNODES | 暂不支持 | 显示当前系统中VNODE的信息。3.0.0版本暂不支持。
|
||||
| 52 | SPLIT VGROUP | 新增 | 拆分VGROUP。
|
||||
| 53 | TRIM DATABASE | 新增 | 删除过期数据,并根据多级存储的配置归整数据。
|
||||
| 52 | TRIM DATABASE | 新增 | 删除过期数据,并根据多级存储的配置归整数据。
|
||||
| 53 | REDISTRIBUTE VGROUP | 新增 | 调整VGROUP中VNODE的分布。
|
||||
| 54 | BALANCE VGROUP | 新增 | 自动调整VGROUP中VNODE的分布。
|
||||
|
||||
## SQL 函数变更
|
||||
|
||||
|
|
|
@ -134,6 +134,24 @@ taos --dump-config
|
|||
| 取值范围 | 1-200000 |
|
||||
| 缺省值 | 30 |
|
||||
|
||||
### telemetryReporting
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 |是否上传 telemetry |
|
||||
| 取值范围 | 0,1 0: 不上传;1:上传 |
|
||||
| 缺省值 | 1 |
|
||||
|
||||
### crashReporting
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 |是否上传 crash 信息 |
|
||||
| 取值范围 | 0,1 0: 不上传;1:上传 |
|
||||
| 缺省值 | 1 |
|
||||
|
||||
## 查询相关
|
||||
|
||||
### queryPolicy
|
||||
|
@ -305,6 +323,7 @@ charset 的有效值是 UTF-8。
|
|||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 数据文件目录,所有的数据文件都将写入该目录 |
|
||||
| 缺省值 | /var/lib/taos |
|
||||
| 补充说明 | [多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8) 功能需要与 [KEEP](https://docs.taosdata.com/taos-sql/database/#%E5%8F%82%E6%95%B0%E8%AF%B4%E6%98%8E) 参数配合使用 |
|
||||
|
||||
### tempDir
|
||||
|
||||
|
@ -597,7 +616,7 @@ charset 的有效值是 UTF-8。
|
|||
| 属性 | 说明 |
|
||||
| -------- | ----------------------------- |
|
||||
| 适用范围 | 仅客户端适用 |
|
||||
| 含义 | schemaless 列数据是否顺序一致 |
|
||||
| 含义 | schemaless 列数据是否顺序一致,从3.0.3.0开始,该配置废弃 |
|
||||
| 值域 | 0:不一致;1: 一致 |
|
||||
| 缺省值 | 1 |
|
||||
|
||||
|
@ -657,7 +676,7 @@ charset 的有效值是 UTF-8。
|
|||
| 20 | minimalTmpDirGB | 是 | 是 | |
|
||||
| 21 | smlChildTableName | 是 | 是 | |
|
||||
| 22 | smlTagName | 是 | 是 | |
|
||||
| 23 | smlDataFormat | 否 | 是 | |
|
||||
| 23 | smlDataFormat | 否 | 是(从3.0.3.0开始,该配置废弃) | |
|
||||
| 24 | statusInterval | 是 | 是 | |
|
||||
| 25 | logDir | 是 | 是 | |
|
||||
| 26 | minimalLogDirGB | 是 | 是 | |
|
||||
|
@ -698,7 +717,7 @@ charset 的有效值是 UTF-8。
|
|||
| 2 | numOfThreadsPerCore | 是 | 否 | 有其它参数设置多种线程池的大小 |
|
||||
| 3 | numOfMnodes | 是 | 否 | 通过 create mnode 命令动态创建 mnode |
|
||||
| 4 | vnodeBak | 是 | 否 | 3.0 行为未知 |
|
||||
| 5 | balance | 是 | 否 | 负载均衡功能由 split/merge vgroups 实现 |
|
||||
| 5 | balance | 是 | 否 | 负载均衡功能由 split/merge vgroups 实现 (暂不支持) |
|
||||
| 6 | balanceInterval | 是 | 否 | 随着 balance 参数失效 |
|
||||
| 7 | offlineThreshold | 是 | 否 | 3.0 行为未知 |
|
||||
| 8 | role | 是 | 否 | 由 supportVnode 决定是否能够创建 |
|
||||
|
|
|
@ -83,7 +83,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
|
|||
NULL。
|
||||
6. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,自动增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。
|
||||
7. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。
|
||||
8. 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数 smlDataFormat 为 false,否则,数据写入按照相同顺序写入,库中数据会异常。
|
||||
8. 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数 smlDataFormat 为 false,否则,数据写入按照相同顺序写入,库中数据会异常,从3.0.3.0开始,自动检测顺序是否一致,该配置废弃。
|
||||
|
||||
:::tip
|
||||
无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过
|
||||
|
|
|
@ -243,3 +243,8 @@ sudo launchctl load -w /Library/LaunchDaemons/limit.maxfiles.plist
|
|||
```
|
||||
launchctl limit maxfiles
|
||||
```
|
||||
### 19 建库时提示Out of dnode
|
||||
该提示是创建db的vnode数量不够了,需要的vnode不能超过了dnode中vnode的上限。因为系统默认是一个dnode中有cpu核数两倍的vnode,也可以通过配置文件中的参数supportVnodes控制。
|
||||
正常调大taos.cfg种这个supportVnodes参数即可。
|
||||
|
||||
|
||||
|
|
|
@ -10,6 +10,14 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.0.2.4
|
||||
|
||||
<Release type="tdengine" version="3.0.2.4" />
|
||||
|
||||
## 3.0.2.3
|
||||
|
||||
<Release type="tdengine" version="3.0.2.3" />
|
||||
|
||||
## 3.0.2.2
|
||||
|
||||
<Release type="tdengine" version="3.0.2.2" />
|
||||
|
|
|
@ -10,6 +10,14 @@ taosTools 各版本安装包下载链接如下:
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 2.4.2
|
||||
|
||||
<Release type="tools" version="2.4.2" />
|
||||
|
||||
## 2.4.1
|
||||
|
||||
<Release type="tools" version="2.4.1" />
|
||||
|
||||
## 2.4.0
|
||||
|
||||
<Release type="tools" version="2.4.0" />
|
||||
|
|
|
@ -208,6 +208,7 @@ DLL_EXPORT TAOS_ROW *taos_result_block(TAOS_RES *res);
|
|||
|
||||
DLL_EXPORT const char *taos_get_server_info(TAOS *taos);
|
||||
DLL_EXPORT const char *taos_get_client_info();
|
||||
DLL_EXPORT int taos_get_current_db(TAOS *taos, char *database, int len, int *required);
|
||||
|
||||
DLL_EXPORT const char *taos_errstr(TAOS_RES *res);
|
||||
DLL_EXPORT int taos_errno(TAOS_RES *res);
|
||||
|
|
|
@ -36,6 +36,7 @@ extern "C" {
|
|||
#define TSDB_INS_TABLE_STABLES "ins_stables"
|
||||
#define TSDB_INS_TABLE_TABLES "ins_tables"
|
||||
#define TSDB_INS_TABLE_TAGS "ins_tags"
|
||||
#define TSDB_INS_TABLE_COLS "ins_columns"
|
||||
#define TSDB_INS_TABLE_TABLE_DISTRIBUTED "ins_table_distributed"
|
||||
#define TSDB_INS_TABLE_USERS "ins_users"
|
||||
#define TSDB_INS_TABLE_LICENCES "ins_grants"
|
||||
|
|
|
@ -162,6 +162,7 @@ typedef enum EStreamType {
|
|||
STREAM_PULL_DATA,
|
||||
STREAM_PULL_OVER,
|
||||
STREAM_FILL_OVER,
|
||||
STREAM_CREATE_CHILD_TABLE,
|
||||
} EStreamType;
|
||||
|
||||
#pragma pack(push, 1)
|
||||
|
@ -205,8 +206,6 @@ typedef struct SDataBlockInfo {
|
|||
TSKEY watermark; // used for stream
|
||||
|
||||
char parTbName[TSDB_TABLE_NAME_LEN]; // used for stream partition
|
||||
int32_t tagLen;
|
||||
void* pTag; // used for stream partition
|
||||
} SDataBlockInfo;
|
||||
|
||||
typedef struct SSDataBlock {
|
||||
|
@ -379,6 +378,11 @@ typedef struct SSortExecInfo {
|
|||
#define CALCULATE_END_TS_COLUMN_INDEX 5
|
||||
#define TABLE_NAME_COLUMN_INDEX 6
|
||||
|
||||
// stream create table block column
|
||||
#define UD_TABLE_NAME_COLUMN_INDEX 0
|
||||
#define UD_GROUPID_COLUMN_INDEX 1
|
||||
#define UD_TAG_COLUMN_INDEX 2
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -41,6 +41,12 @@ typedef struct SBlockOrderInfo {
|
|||
BMCharPos(bm_, r_) |= (1u << (7u - BitPos(r_))); \
|
||||
} while (0)
|
||||
|
||||
#define colDataSetNull_f_s(c_, r_) \
|
||||
do { \
|
||||
colDataSetNull_f((c_)->nullbitmap, r_); \
|
||||
memset(((char*)(c_)->pData) + (c_)->info.bytes * (r_), 0, (c_)->info.bytes); \
|
||||
} while (0)
|
||||
|
||||
#define colDataClearNull_f(bm_, r_) \
|
||||
do { \
|
||||
BMCharPos(bm_, r_) &= ((char)(~(1u << (7u - BitPos(r_))))); \
|
||||
|
@ -136,7 +142,7 @@ static FORCE_INLINE void colDataAppendNULL(SColumnInfoData* pColumnInfoData, uin
|
|||
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
|
||||
colDataSetNull_var(pColumnInfoData, currentRow); // it is a null value of VAR type.
|
||||
} else {
|
||||
colDataSetNull_f(pColumnInfoData->nullbitmap, currentRow);
|
||||
colDataSetNull_f_s(pColumnInfoData, currentRow);
|
||||
}
|
||||
|
||||
pColumnInfoData->hasNull = true;
|
||||
|
@ -151,6 +157,7 @@ static FORCE_INLINE void colDataAppendNNULL(SColumnInfoData* pColumnInfoData, ui
|
|||
for (int32_t i = start; i < start + nRows; ++i) {
|
||||
colDataSetNull_f(pColumnInfoData->nullbitmap, i);
|
||||
}
|
||||
memset(pColumnInfoData->pData + start * pColumnInfoData->info.bytes, 0, pColumnInfoData->info.bytes * nRows);
|
||||
}
|
||||
|
||||
pColumnInfoData->hasNull = true;
|
||||
|
@ -231,7 +238,6 @@ int32_t blockDataSort_rv(SSDataBlock* pDataBlock, SArray* pOrderInfo, bool nullF
|
|||
|
||||
int32_t colInfoDataEnsureCapacity(SColumnInfoData* pColumn, uint32_t numOfRows, bool clearPayload);
|
||||
int32_t blockDataEnsureCapacity(SSDataBlock* pDataBlock, uint32_t numOfRows);
|
||||
int32_t blockDataEnsureCapacityNoClear(SSDataBlock* pDataBlock, uint32_t numOfRows);
|
||||
|
||||
void colInfoDataCleanup(SColumnInfoData* pColumn, uint32_t numOfRows);
|
||||
void blockDataCleanup(SSDataBlock* pDataBlock);
|
||||
|
|
|
@ -265,7 +265,13 @@ struct STag {
|
|||
|
||||
// STSchema ================================
|
||||
STSchema *tBuildTSchema(SSchema *aSchema, int32_t numOfCols, int32_t version);
|
||||
void tDestroyTSchema(STSchema *pTSchema);
|
||||
#define tDestroyTSchema(pTSchema) \
|
||||
do { \
|
||||
if (pTSchema) { \
|
||||
taosMemoryFree(pTSchema); \
|
||||
pTSchema = NULL; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -69,6 +69,9 @@ extern int32_t tsElectInterval;
|
|||
extern int32_t tsHeartbeatInterval;
|
||||
extern int32_t tsHeartbeatTimeout;
|
||||
|
||||
// vnode
|
||||
extern int64_t tsVndCommitMaxIntervalMs;
|
||||
|
||||
// monitor
|
||||
extern bool tsEnableMonitor;
|
||||
extern int32_t tsMonitorInterval;
|
||||
|
@ -82,6 +85,10 @@ extern bool tsEnableTelem;
|
|||
extern int32_t tsTelemInterval;
|
||||
extern char tsTelemServer[];
|
||||
extern uint16_t tsTelemPort;
|
||||
extern bool tsEnableCrashReport;
|
||||
extern char* tsTelemUri;
|
||||
extern char* tsClientCrashReportUri;
|
||||
extern char* tsSvrCrashReportUri;
|
||||
|
||||
// query buffer management
|
||||
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing
|
||||
|
@ -134,8 +141,8 @@ extern char tsUdfdLdLibPath[];
|
|||
// schemaless
|
||||
extern char tsSmlChildTableName[];
|
||||
extern char tsSmlTagName[];
|
||||
extern bool tsSmlDataFormat;
|
||||
extern int32_t tsSmlBatchSize;
|
||||
//extern bool tsSmlDataFormat;
|
||||
//extern int32_t tsSmlBatchSize;
|
||||
|
||||
// wal
|
||||
extern int64_t tsWalFsyncDataSizeLimit;
|
||||
|
|
|
@ -115,6 +115,7 @@ typedef enum _mgmt_table {
|
|||
TSDB_MGMT_TABLE_STREAMS,
|
||||
TSDB_MGMT_TABLE_TABLE,
|
||||
TSDB_MGMT_TABLE_TAG,
|
||||
TSDB_MGMT_TABLE_COL,
|
||||
TSDB_MGMT_TABLE_USER,
|
||||
TSDB_MGMT_TABLE_GRANTS,
|
||||
TSDB_MGMT_TABLE_VGROUP,
|
||||
|
@ -343,7 +344,8 @@ void tFreeSSubmitRsp(SSubmitRsp* pRsp);
|
|||
#define COL_IS_SET(FLG) (((FLG) & (COL_SET_VAL | COL_SET_NULL)) != 0)
|
||||
#define COL_CLR_SET(FLG) ((FLG) &= (~(COL_SET_VAL | COL_SET_NULL)))
|
||||
|
||||
#define IS_BSMA_ON(s) (((s)->flags & 0x01) == COL_SMA_ON)
|
||||
#define IS_BSMA_ON(s) (((s)->flags & 0x01) == COL_SMA_ON)
|
||||
#define IS_SET_NULL(s) (((s)->flags & COL_SET_NULL) == COL_SET_NULL)
|
||||
|
||||
#define SSCHMEA_TYPE(s) ((s)->type)
|
||||
#define SSCHMEA_FLAGS(s) ((s)->flags)
|
||||
|
@ -381,6 +383,13 @@ static FORCE_INLINE void tDeleteSSchemaWrapper(SSchemaWrapper* pSchemaWrapper) {
|
|||
}
|
||||
}
|
||||
|
||||
static FORCE_INLINE void tDeleteSSchemaWrapperForHash(void* pSchemaWrapper) {
|
||||
if (pSchemaWrapper != NULL && *(SSchemaWrapper**)pSchemaWrapper != NULL) {
|
||||
taosMemoryFree((*(SSchemaWrapper**)pSchemaWrapper)->pSchema);
|
||||
taosMemoryFree(*(SSchemaWrapper**)pSchemaWrapper);
|
||||
}
|
||||
}
|
||||
|
||||
static FORCE_INLINE int32_t taosEncodeSSchema(void** buf, const SSchema* pSchema) {
|
||||
int32_t tlen = 0;
|
||||
tlen += taosEncodeFixedI8(buf, pSchema->type);
|
||||
|
@ -905,6 +914,7 @@ typedef struct {
|
|||
int32_t numOfRetensions;
|
||||
SArray* pRetensions;
|
||||
int8_t schemaless;
|
||||
int16_t sstTrigger;
|
||||
} SDbCfgRsp;
|
||||
|
||||
int32_t tSerializeSDbCfgRsp(void* buf, int32_t bufLen, const SDbCfgRsp* pRsp);
|
||||
|
@ -1392,6 +1402,7 @@ typedef struct {
|
|||
char db[TSDB_DB_FNAME_LEN];
|
||||
char tb[TSDB_TABLE_NAME_LEN];
|
||||
char user[TSDB_USER_LEN];
|
||||
char filterTb[TSDB_TABLE_NAME_LEN];
|
||||
int64_t showId;
|
||||
} SRetrieveTableReq;
|
||||
|
||||
|
@ -1754,6 +1765,12 @@ typedef struct {
|
|||
#define STREAM_CREATE_STABLE_TRUE 1
|
||||
#define STREAM_CREATE_STABLE_FALSE 0
|
||||
|
||||
typedef struct SColLocation {
|
||||
int16_t slotId;
|
||||
col_id_t colId;
|
||||
int8_t type;
|
||||
} SColLocation;
|
||||
|
||||
typedef struct {
|
||||
char name[TSDB_STREAM_FNAME_LEN];
|
||||
char sourceDB[TSDB_DB_FNAME_LEN];
|
||||
|
@ -1771,7 +1788,9 @@ typedef struct {
|
|||
// 3.0.20
|
||||
int64_t checkpointFreq; // ms
|
||||
// 3.0.2.3
|
||||
int8_t createStb;
|
||||
int8_t createStb;
|
||||
uint64_t targetStbUid;
|
||||
SArray* fillNullCols; // array of SColLocation
|
||||
} SCMCreateStreamReq;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -39,7 +39,7 @@ typedef enum {
|
|||
QUEUE_MAX,
|
||||
} EQueueType;
|
||||
|
||||
typedef int32_t (*UpdateDnodeInfoFp)(void* pData, int32_t* dnodeId, int64_t* clusterId, char* fqdn, uint16_t* port);
|
||||
typedef bool (*UpdateDnodeInfoFp)(void* pData, int32_t* dnodeId, int64_t* clusterId, char* fqdn, uint16_t* port);
|
||||
typedef int32_t (*PutToQueueFp)(void* pMgmt, EQueueType qtype, SRpcMsg* pMsg);
|
||||
typedef int32_t (*GetQueueSizeFp)(void* pMgmt, int32_t vgId, EQueueType qtype);
|
||||
typedef int32_t (*SendReqFp)(const SEpSet* pEpSet, SRpcMsg* pMsg);
|
||||
|
@ -70,7 +70,8 @@ void tmsgSendRsp(SRpcMsg* pMsg);
|
|||
void tmsgRegisterBrokenLinkArg(SRpcMsg* pMsg);
|
||||
void tmsgReleaseHandle(SRpcHandleInfo* pHandle, int8_t type);
|
||||
void tmsgReportStartup(const char* name, const char* desc);
|
||||
int32_t tmsgUpdateDnodeInfo(int32_t* dnodeId, int64_t* clusterId, char* fqdn, uint16_t* port);
|
||||
bool tmsgUpdateDnodeInfo(int32_t* dnodeId, int64_t* clusterId, char* fqdn, uint16_t* port);
|
||||
void tmsgUpdateDnodeEpSet(SEpSet* epset);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ typedef struct {
|
|||
|
||||
// output
|
||||
char* ctbShortName; // must have size of TSDB_TABLE_NAME_LEN;
|
||||
uint64_t uid; // child table uid, may be useful
|
||||
// uint64_t uid; // child table uid, may be useful
|
||||
} RandTableName;
|
||||
|
||||
void buildChildTableName(RandTableName* rName);
|
||||
|
|
|
@ -343,8 +343,8 @@ typedef struct tDataTypeDescriptor {
|
|||
extern tDataTypeDescriptor tDataTypes[TSDB_DATA_TYPE_MAX];
|
||||
bool isValidDataType(int32_t type);
|
||||
|
||||
int32_t operateVal(void *dst, void *s1, void *s2, int32_t optr, int32_t type);
|
||||
void assignVal(char *val, const char *src, int32_t len, int32_t type);
|
||||
void operateVal(void *dst, void *s1, void *s2, int32_t optr, int32_t type);
|
||||
void *getDataMin(int32_t type, void* value);
|
||||
void *getDataMax(int32_t type, void* value);
|
||||
|
||||
|
|
|
@ -154,6 +154,8 @@ void qCleanExecTaskBlockBuf(qTaskInfo_t tinfo);
|
|||
*/
|
||||
int32_t qAsyncKillTask(qTaskInfo_t tinfo, int32_t rspCode);
|
||||
|
||||
bool qTaskIsExecuting(qTaskInfo_t qinfo);
|
||||
|
||||
/**
|
||||
* destroy query info structure
|
||||
* @param qHandle
|
||||
|
|
|
@ -370,7 +370,8 @@ int32_t tDecodeSStreamTask(SDecoder* pDecoder, SStreamTask* pTask);
|
|||
void tFreeSStreamTask(SStreamTask* pTask);
|
||||
|
||||
static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem* pItem) {
|
||||
if (pItem->type == STREAM_INPUT__DATA_SUBMIT) {
|
||||
int8_t type = pItem->type;
|
||||
if (type == STREAM_INPUT__DATA_SUBMIT) {
|
||||
SStreamDataSubmit2* pSubmitClone = streamSubmitRefClone((SStreamDataSubmit2*)pItem);
|
||||
if (pSubmitClone == NULL) {
|
||||
qDebug("task %d %p submit enqueue failed since out of memory", pTask->taskId, pTask);
|
||||
|
@ -382,19 +383,19 @@ static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem
|
|||
pSubmitClone->submit.msgStr, pSubmitClone->submit.msgLen, pSubmitClone->submit.ver);
|
||||
taosWriteQitem(pTask->inputQueue->queue, pSubmitClone);
|
||||
// qStreamInput(pTask->exec.executor, pSubmitClone);
|
||||
} else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE ||
|
||||
pItem->type == STREAM_INPUT__REF_DATA_BLOCK) {
|
||||
} else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__DATA_RETRIEVE ||
|
||||
type == STREAM_INPUT__REF_DATA_BLOCK) {
|
||||
taosWriteQitem(pTask->inputQueue->queue, pItem);
|
||||
// qStreamInput(pTask->exec.executor, pItem);
|
||||
} else if (pItem->type == STREAM_INPUT__CHECKPOINT) {
|
||||
} else if (type == STREAM_INPUT__CHECKPOINT) {
|
||||
taosWriteQitem(pTask->inputQueue->queue, pItem);
|
||||
// qStreamInput(pTask->exec.executor, pItem);
|
||||
} else if (pItem->type == STREAM_INPUT__GET_RES) {
|
||||
} else if (type == STREAM_INPUT__GET_RES) {
|
||||
taosWriteQitem(pTask->inputQueue->queue, pItem);
|
||||
// qStreamInput(pTask->exec.executor, pItem);
|
||||
}
|
||||
|
||||
if (pItem->type != STREAM_INPUT__GET_RES && pItem->type != STREAM_INPUT__CHECKPOINT && pTask->triggerParam != 0) {
|
||||
if (type != STREAM_INPUT__GET_RES && type != STREAM_INPUT__CHECKPOINT && pTask->triggerParam != 0) {
|
||||
atomic_val_compare_exchange_8(&pTask->triggerStatus, TASK_TRIGGER_STATUS__INACTIVE, TASK_TRIGGER_STATUS__ACTIVE);
|
||||
}
|
||||
|
||||
|
|
|
@ -49,10 +49,12 @@ extern "C" {
|
|||
#define SYNC_HEARTBEAT_REPLY_SLOW_MS 1500
|
||||
#define SYNC_SNAP_RESEND_MS 1000 * 60
|
||||
|
||||
#define SYNC_VND_COMMIT_MIN_MS 1000
|
||||
|
||||
#define SYNC_MAX_BATCH_SIZE 1
|
||||
#define SYNC_INDEX_BEGIN 0
|
||||
#define SYNC_INDEX_INVALID -1
|
||||
#define SYNC_TERM_INVALID -1 // 0xFFFFFFFFFFFFFFFF
|
||||
#define SYNC_TERM_INVALID -1
|
||||
|
||||
typedef enum {
|
||||
SYNC_STRATEGY_NO_SNAPSHOT = 0,
|
||||
|
@ -191,7 +193,7 @@ typedef struct SSyncLogStore {
|
|||
SyncIndex (*syncLogLastIndex)(struct SSyncLogStore* pLogStore);
|
||||
SyncTerm (*syncLogLastTerm)(struct SSyncLogStore* pLogStore);
|
||||
|
||||
int32_t (*syncLogAppendEntry)(struct SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry);
|
||||
int32_t (*syncLogAppendEntry)(struct SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry, bool forcSync);
|
||||
int32_t (*syncLogGetEntry)(struct SSyncLogStore* pLogStore, SyncIndex index, SSyncRaftEntry** ppEntry);
|
||||
int32_t (*syncLogTruncate)(struct SSyncLogStore* pLogStore, SyncIndex fromIndex);
|
||||
|
||||
|
@ -232,6 +234,7 @@ int64_t syncOpen(SSyncInfo* pSyncInfo);
|
|||
int32_t syncStart(int64_t rid);
|
||||
void syncStop(int64_t rid);
|
||||
void syncPreStop(int64_t rid);
|
||||
void syncPostStop(int64_t rid);
|
||||
int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak, int64_t* seq);
|
||||
int32_t syncProcessMsg(int64_t rid, SRpcMsg* pMsg);
|
||||
int32_t syncReconfig(int64_t rid, SSyncCfg* pCfg);
|
||||
|
|
|
@ -24,7 +24,7 @@ extern "C" {
|
|||
|
||||
typedef enum { HTTP_GZIP, HTTP_FLAT } EHttpCompFlag;
|
||||
|
||||
int32_t taosSendHttpReport(const char* server, uint16_t port, char* pCont, int32_t contLen, EHttpCompFlag flag);
|
||||
int32_t taosSendHttpReport(const char* server, const char* uri, uint16_t port, char* pCont, int32_t contLen, EHttpCompFlag flag);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -201,6 +201,7 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead);
|
|||
int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead);
|
||||
int32_t walSkipFetchBody(SWalReader *pRead, const SWalCkHead *pHead);
|
||||
|
||||
SWalRef *walRefFirstVer(SWal *, SWalRef *);
|
||||
SWalRef *walRefCommittedVer(SWal *);
|
||||
|
||||
SWalRef *walOpenRef(SWal *);
|
||||
|
|
|
@ -46,27 +46,73 @@ void taosSetTerminalMode();
|
|||
int32_t taosGetOldTerminalMode();
|
||||
void taosResetTerminalMode();
|
||||
|
||||
#define STACKSIZE 100
|
||||
|
||||
#if !defined(WINDOWS)
|
||||
#define taosPrintTrace(flags, level, dflag) \
|
||||
{ \
|
||||
void* array[100]; \
|
||||
int32_t size = backtrace(array, 100); \
|
||||
char** strings = backtrace_symbols(array, size); \
|
||||
if (strings != NULL) { \
|
||||
taosPrintLog(flags, level, dflag, "obtained %d stack frames", size); \
|
||||
for (int32_t i = 0; i < size; i++) { \
|
||||
taosPrintLog(flags, level, dflag, "frame:%d, %s", i, strings[i]); \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
taosMemoryFree(strings); \
|
||||
#define taosLogTraceToBuf(buf, bufSize, ignoreNum) { \
|
||||
void* array[STACKSIZE]; \
|
||||
int32_t size = backtrace(array, STACKSIZE); \
|
||||
char** strings = backtrace_symbols(array, size); \
|
||||
int32_t offset = 0; \
|
||||
if (strings != NULL) { \
|
||||
offset = snprintf(buf, bufSize - 1, "obtained %d stack frames\n", (ignoreNum > 0) ? size - ignoreNum : size); \
|
||||
for (int32_t i = (ignoreNum > 0) ? ignoreNum : 0; i < size; i++) { \
|
||||
offset += snprintf(buf + offset, bufSize - 1 - offset, "frame:%d, %s\n", (ignoreNum > 0) ? i - ignoreNum : i, strings[i]); \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
taosMemoryFree(strings); \
|
||||
}
|
||||
|
||||
#define taosPrintTrace(flags, level, dflag, ignoreNum) \
|
||||
{ \
|
||||
void* array[STACKSIZE]; \
|
||||
int32_t size = backtrace(array, STACKSIZE); \
|
||||
char** strings = backtrace_symbols(array, size); \
|
||||
if (strings != NULL) { \
|
||||
taosPrintLog(flags, level, dflag, "obtained %d stack frames", (ignoreNum > 0) ? size - ignoreNum : size); \
|
||||
for (int32_t i = (ignoreNum > 0) ? ignoreNum : 0; i < size; i++) { \
|
||||
taosPrintLog(flags, level, dflag, "frame:%d, %s", (ignoreNum > 0) ? i - ignoreNum : i, strings[i]); \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
taosMemoryFree(strings); \
|
||||
}
|
||||
#else
|
||||
|
||||
#include <windows.h>
|
||||
#include <dbghelp.h>
|
||||
|
||||
#define STACKSIZE 64
|
||||
#define taosPrintTrace(flags, level, dflag) \
|
||||
#define taosLogTraceToBuf(buf, bufSize, ignoreNum) { \
|
||||
unsigned int i; \
|
||||
void* stack[STACKSIZE]; \
|
||||
unsigned short frames; \
|
||||
SYMBOL_INFO* symbol; \
|
||||
HANDLE process; \
|
||||
int32_t offset = 0; \
|
||||
\
|
||||
process = GetCurrentProcess(); \
|
||||
\
|
||||
SymInitialize(process, NULL, TRUE); \
|
||||
\
|
||||
frames = CaptureStackBackTrace(0, STACKSIZE, stack, NULL); \
|
||||
symbol = (SYMBOL_INFO*)calloc(sizeof(SYMBOL_INFO) + 256 * sizeof(char), 1); \
|
||||
if (symbol != NULL) { \
|
||||
symbol->MaxNameLen = 255; \
|
||||
symbol->SizeOfStruct = sizeof(SYMBOL_INFO); \
|
||||
\
|
||||
if (frames > 0) { \
|
||||
offset = snprintf(buf, bufSize - 1, "obtained %d stack frames\n", (ignoreNum > 0) ? frames - ignoreNum : frames); \
|
||||
for (i = (ignoreNum > 0) ? ignoreNum : 0; i < frames; i++) { \
|
||||
SymFromAddr(process, (DWORD64)(stack[i]), 0, symbol); \
|
||||
offset += snprintf(buf + offset, bufSize - 1 - offset, "frame:%i, %s - 0x%0X\n", (ignoreNum > 0) ? i - ignoreNum : i, symbol->Name, symbol->Address); \
|
||||
} \
|
||||
} \
|
||||
free(symbol); \
|
||||
} \
|
||||
}
|
||||
|
||||
#define taosPrintTrace(flags, level, dflag, ignoreNum) \
|
||||
{ \
|
||||
unsigned int i; \
|
||||
void* stack[STACKSIZE]; \
|
||||
|
@ -85,10 +131,10 @@ void taosResetTerminalMode();
|
|||
symbol->SizeOfStruct = sizeof(SYMBOL_INFO); \
|
||||
\
|
||||
if (frames > 0) { \
|
||||
taosPrintLog(flags, level, dflag, "obtained %d stack frames", frames); \
|
||||
for (i = 0; i < frames; i++) { \
|
||||
taosPrintLog(flags, level, dflag, "obtained %d stack frames\n", (ignoreNum > 0) ? frames - ignoreNum : frames); \
|
||||
for (i = (ignoreNum > 0) ? ignoreNum : 0; i < frames; i++) { \
|
||||
SymFromAddr(process, (DWORD64)(stack[i]), 0, symbol); \
|
||||
taosPrintLog(flags, level, dflag, "frame:%i: %s - 0x%0X", frames - i - 1, symbol->Name, symbol->Address); \
|
||||
taosPrintLog(flags, level, dflag, "frame:%i, %s - 0x%0X\n", (ignoreNum > 0) ? i - ignoreNum : i, symbol->Name, symbol->Address); \
|
||||
} \
|
||||
} \
|
||||
free(symbol); \
|
||||
|
|
|
@ -159,6 +159,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_TSC_NO_EXEC_NODE TAOS_DEF_ERROR_CODE(0, 0X022E)
|
||||
#define TSDB_CODE_TSC_NOT_STABLE_ERROR TAOS_DEF_ERROR_CODE(0, 0X022F)
|
||||
#define TSDB_CODE_TSC_STMT_CACHE_ERROR TAOS_DEF_ERROR_CODE(0, 0X0230)
|
||||
#define TSDB_CODE_TSC_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0X0231)
|
||||
|
||||
// mnode-common
|
||||
// #define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) // 2.x
|
||||
|
@ -414,6 +415,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_VND_NO_AVAIL_BUFPOOL TAOS_DEF_ERROR_CODE(0, 0x0528)
|
||||
#define TSDB_CODE_VND_STOPPED TAOS_DEF_ERROR_CODE(0, 0x0529)
|
||||
#define TSDB_CODE_VND_DUP_REQUEST TAOS_DEF_ERROR_CODE(0, 0x0530)
|
||||
#define TSDB_CODE_VND_QUERY_BUSY TAOS_DEF_ERROR_CODE(0, 0x0531)
|
||||
|
||||
// tsdb
|
||||
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600)
|
||||
|
@ -700,6 +702,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_SML_INVALID_DATA TAOS_DEF_ERROR_CODE(0, 0x3002)
|
||||
#define TSDB_CODE_SML_INVALID_DB_CONF TAOS_DEF_ERROR_CODE(0, 0x3003)
|
||||
#define TSDB_CODE_SML_NOT_SAME_TYPE TAOS_DEF_ERROR_CODE(0, 0x3004)
|
||||
#define TSDB_CODE_SML_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x3005)
|
||||
|
||||
//tsma
|
||||
#define TSDB_CODE_TSMA_INIT_FAILED TAOS_DEF_ERROR_CODE(0, 0x3100)
|
||||
|
|
|
@ -500,7 +500,7 @@ enum {
|
|||
#define DEFAULT_PAGESIZE 4096
|
||||
|
||||
#define VNODE_TIMEOUT_SEC 60
|
||||
#define MNODE_TIMEOUT_SEC 10
|
||||
#define MNODE_TIMEOUT_SEC 60
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -99,6 +99,11 @@ bool taosAssertRelease(bool condition);
|
|||
#endif
|
||||
#endif
|
||||
|
||||
void taosLogCrashInfo(char* nodeType, char* pMsg, int64_t msgLen, int signum, void *sigInfo);
|
||||
void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* pFd);
|
||||
void taosReleaseCrashLogFile(TdFilePtr pFile, bool truncateFile);
|
||||
int32_t taosGenCrashJsonMsg(int signum, char** pMsg, int64_t clusterId, int64_t startTime);
|
||||
|
||||
// clang-format off
|
||||
#define uFatal(...) { if (uDebugFlag & DEBUG_FATAL) { taosPrintLog("UTL FATAL", DEBUG_FATAL, tsLogEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }}
|
||||
#define uError(...) { if (uDebugFlag & DEBUG_ERROR) { taosPrintLog("UTL ERROR ", DEBUG_ERROR, tsLogEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }}
|
||||
|
|
|
@ -43,6 +43,9 @@
|
|||
# Switch for allowing TDengine to collect and report service usage information
|
||||
# telemetryReporting 1
|
||||
|
||||
# Switch for allowing TDengine to collect and report crash information
|
||||
# crashReporting 1
|
||||
|
||||
# The maximum number of vnodes supported by this dnode
|
||||
# supportVnodes 0
|
||||
|
||||
|
|
|
@ -1,319 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate the deb package for ubuntu, or rpm package for centos, or tar.gz package for other linux os
|
||||
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
# release.sh -v [cluster | edge]
|
||||
# -c [aarch32 | aarch64 | x64 | x86 | mips64 | loongarch64...]
|
||||
# -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
|
||||
# -V [stable | beta]
|
||||
# -l [full | lite]
|
||||
# -s [static | dynamic]
|
||||
# -d [taos | ...]
|
||||
# -n [2.0.0.3]
|
||||
# -m [2.0.0.0]
|
||||
# -H [ false | true]
|
||||
|
||||
# set parameters by default value
|
||||
verMode=edge # [cluster, edge, cloud]
|
||||
verType=stable # [stable, beta]
|
||||
cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 loongarch64...]
|
||||
osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
|
||||
pagMode=full # [full | lite]
|
||||
soMode=dynamic # [static | dynamic]
|
||||
dbName=taos # [taos | ...]
|
||||
allocator=glibc # [glibc | jemalloc]
|
||||
verNumber=""
|
||||
verNumberComp="3.0.0.0"
|
||||
httpdBuild=false
|
||||
|
||||
while getopts "hv:V:c:o:l:s:d:a:n:m:H:" arg; do
|
||||
case $arg in
|
||||
v)
|
||||
#echo "verMode=$OPTARG"
|
||||
verMode=$(echo $OPTARG)
|
||||
;;
|
||||
V)
|
||||
#echo "verType=$OPTARG"
|
||||
verType=$(echo $OPTARG)
|
||||
;;
|
||||
c)
|
||||
#echo "cpuType=$OPTARG"
|
||||
cpuType=$(echo $OPTARG)
|
||||
;;
|
||||
l)
|
||||
#echo "pagMode=$OPTARG"
|
||||
pagMode=$(echo $OPTARG)
|
||||
;;
|
||||
s)
|
||||
#echo "soMode=$OPTARG"
|
||||
soMode=$(echo $OPTARG)
|
||||
;;
|
||||
d)
|
||||
#echo "dbName=$OPTARG"
|
||||
dbName=$(echo $OPTARG)
|
||||
;;
|
||||
a)
|
||||
#echo "allocator=$OPTARG"
|
||||
allocator=$(echo $OPTARG)
|
||||
;;
|
||||
n)
|
||||
#echo "verNumber=$OPTARG"
|
||||
verNumber=$(echo $OPTARG)
|
||||
;;
|
||||
m)
|
||||
#echo "verNumberComp=$OPTARG"
|
||||
verNumberComp=$(echo $OPTARG)
|
||||
;;
|
||||
o)
|
||||
#echo "osType=$OPTARG"
|
||||
osType=$(echo $OPTARG)
|
||||
;;
|
||||
H)
|
||||
#echo "httpdBuild=$OPTARG"
|
||||
httpdBuild=$(echo $OPTARG)
|
||||
;;
|
||||
h)
|
||||
echo "Usage: $(basename $0) -v [cluster | edge] "
|
||||
echo " -c [aarch32 | aarch64 | x64 | x86 | mips64 | loongarch64 ...] "
|
||||
echo " -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] "
|
||||
echo " -V [stable | beta] "
|
||||
echo " -l [full | lite] "
|
||||
echo " -a [glibc | jemalloc] "
|
||||
echo " -s [static | dynamic] "
|
||||
echo " -d [taos | ...] "
|
||||
echo " -n [version number] "
|
||||
echo " -m [compatible version number] "
|
||||
echo " -H [false | true] "
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
osType=$(uname)
|
||||
|
||||
echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} allocator=${allocator} verNumber=${verNumber} verNumberComp=${verNumberComp} httpdBuild=${httpdBuild}"
|
||||
|
||||
curr_dir=$(pwd)
|
||||
|
||||
if [ "$osType" == "Darwin" ]; then
|
||||
script_dir=$(dirname $0)
|
||||
cd ${script_dir}
|
||||
script_dir="$(pwd)"
|
||||
top_dir=${script_dir}/..
|
||||
else
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/..)"
|
||||
fi
|
||||
|
||||
csudo=""
|
||||
#if command -v sudo > /dev/null; then
|
||||
# csudo="sudo "
|
||||
#fi
|
||||
|
||||
function is_valid_version() {
|
||||
[ -z $1 ] && return 1 || :
|
||||
|
||||
rx='^([0-9]+\.){3}(\*|[0-9]+)$'
|
||||
if [[ $1 =~ $rx ]]; then
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
function vercomp() {
|
||||
if [[ $1 == $2 ]]; then
|
||||
echo 0
|
||||
exit 0
|
||||
fi
|
||||
|
||||
local IFS=.
|
||||
local i ver1=($1) ver2=($2)
|
||||
|
||||
# fill empty fields in ver1 with zeros
|
||||
for ((i = ${#ver1[@]}; i < ${#ver2[@]}; i++)); do
|
||||
ver1[i]=0
|
||||
done
|
||||
|
||||
for ((i = 0; i < ${#ver1[@]}; i++)); do
|
||||
if [[ -z ${ver2[i]} ]]; then
|
||||
# fill empty fields in ver2 with zeros
|
||||
ver2[i]=0
|
||||
fi
|
||||
if ((10#${ver1[i]} > 10#${ver2[i]})); then
|
||||
echo 1
|
||||
exit 0
|
||||
fi
|
||||
if ((10#${ver1[i]} < 10#${ver2[i]})); then
|
||||
echo 2
|
||||
exit 0
|
||||
fi
|
||||
done
|
||||
echo 0
|
||||
}
|
||||
|
||||
# 1. check version information
|
||||
if ( (! is_valid_version $verNumber) || (! is_valid_version $verNumberComp) || [[ "$(vercomp $verNumber $verNumberComp)" == '2' ]]); then
|
||||
echo "please enter correct version"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "=======================new version number: ${verNumber}, compatible version: ${verNumberComp}======================================"
|
||||
|
||||
build_time=$(date +"%F %R")
|
||||
|
||||
# get commint id from git
|
||||
gitinfo=$(git rev-parse --verify HEAD)
|
||||
|
||||
if [[ "$verMode" == "cluster" ]] || [[ "$verMode" == "cloud" ]]; then
|
||||
enterprise_dir="${top_dir}/../enterprise"
|
||||
cd ${enterprise_dir}
|
||||
gitinfoOfInternal=$(git rev-parse --verify HEAD)
|
||||
else
|
||||
gitinfoOfInternal=NULL
|
||||
fi
|
||||
|
||||
cd "${curr_dir}"
|
||||
|
||||
# 2. cmake executable file
|
||||
compile_dir="${top_dir}/debug"
|
||||
if [ -d ${compile_dir} ]; then
|
||||
rm -rf ${compile_dir}
|
||||
fi
|
||||
|
||||
mkdir -p ${compile_dir}
|
||||
cd ${compile_dir}
|
||||
|
||||
if [[ "$allocator" == "jemalloc" ]]; then
|
||||
allocator_macro="-DJEMALLOC_ENABLED=true"
|
||||
else
|
||||
allocator_macro=""
|
||||
fi
|
||||
|
||||
if [[ "$dbName" != "taos" ]]; then
|
||||
source ${enterprise_dir}/packaging/oem/sed_$dbName.sh
|
||||
replace_community_$dbName
|
||||
fi
|
||||
|
||||
if [[ "$httpdBuild" == "true" ]]; then
|
||||
BUILD_HTTP=true
|
||||
else
|
||||
BUILD_HTTP=false
|
||||
fi
|
||||
|
||||
if [[ "$verMode" == "cluster" ]] || [[ "$verMode" == "cloud" ]]; then
|
||||
BUILD_HTTP=internal
|
||||
fi
|
||||
|
||||
if [[ "$pagMode" == "full" ]]; then
|
||||
BUILD_TOOLS=true
|
||||
else
|
||||
BUILD_TOOLS=false
|
||||
fi
|
||||
|
||||
# check support cpu type
|
||||
if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "arm64" ]] || [[ "$cpuType" == "arm32" ]] || [[ "$cpuType" == "mips64" ]] || [[ "$cpuType" == "loongarch64" ]] ; then
|
||||
if [ "$verMode" == "edge" ]; then
|
||||
# community-version compile
|
||||
cmake ../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
|
||||
elif [ "$verMode" == "cloud" ]; then
|
||||
cmake ../../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DBUILD_TAOSX=true -DBUILD_CLOUD=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
|
||||
elif [ "$verMode" == "cluster" ]; then
|
||||
if [[ "$dbName" != "taos" ]]; then
|
||||
replace_enterprise_$dbName
|
||||
fi
|
||||
cmake ../../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DBUILD_TAOSX=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
|
||||
fi
|
||||
else
|
||||
echo "input cpuType=${cpuType} error!!!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ostype=`uname`
|
||||
if [ "${ostype}" == "Darwin" ]; then
|
||||
CORES=$(sysctl -n hw.ncpu)
|
||||
else
|
||||
CORES=$(grep -c ^processor /proc/cpuinfo)
|
||||
fi
|
||||
|
||||
if [[ "$allocator" == "jemalloc" ]]; then
|
||||
# jemalloc need compile first, so disable parallel build
|
||||
make -j ${CORES} && ${csudo}make install
|
||||
else
|
||||
make -j ${CORES} && ${csudo}make install
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
|
||||
# 3. Call the corresponding script for packaging
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
if [[ "$verMode" != "cluster" ]] && [[ "$verMode" != "cloud" ]] && [[ "$pagMode" == "full" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then
|
||||
ret='0'
|
||||
command -v dpkg >/dev/null 2>&1 || { ret='1'; }
|
||||
if [ "$ret" -eq 0 ]; then
|
||||
echo "====do deb package for the ubuntu system===="
|
||||
output_dir="${top_dir}/debs"
|
||||
if [ -d ${output_dir} ]; then
|
||||
rm -rf ${output_dir}
|
||||
fi
|
||||
mkdir -p ${output_dir}
|
||||
cd ${script_dir}/deb
|
||||
${csudo}./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
|
||||
|
||||
if [[ "$pagMode" == "full" ]]; then
|
||||
if [ -d ${top_dir}/tools/taos-tools/packaging/deb ]; then
|
||||
cd ${top_dir}/tools/taos-tools/packaging/deb
|
||||
taos_tools_ver=$(git tag |grep -v taos | sort | tail -1)
|
||||
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
|
||||
|
||||
${csudo}./make-taos-tools-deb.sh ${top_dir} \
|
||||
${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType}
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo "==========dpkg command not exist, so not release deb package!!!"
|
||||
fi
|
||||
ret='0'
|
||||
command -v rpmbuild >/dev/null 2>&1 || { ret='1'; }
|
||||
if [ "$ret" -eq 0 ]; then
|
||||
echo "====do rpm package for the centos system===="
|
||||
output_dir="${top_dir}/rpms"
|
||||
if [ -d ${output_dir} ]; then
|
||||
rm -rf ${output_dir}
|
||||
fi
|
||||
mkdir -p ${output_dir}
|
||||
cd ${script_dir}/rpm
|
||||
${csudo}./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
|
||||
|
||||
if [[ "$pagMode" == "full" ]]; then
|
||||
if [ -d ${top_dir}/tools/taos-tools/packaging/rpm ]; then
|
||||
cd ${top_dir}/tools/taos-tools/packaging/rpm
|
||||
taos_tools_ver=$(git tag |grep -v taos | sort | tail -1)
|
||||
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
|
||||
|
||||
${csudo}./make-taos-tools-rpm.sh ${top_dir} \
|
||||
${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType}
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo "==========rpmbuild command not exist, so not release rpm package!!!"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "====do tar.gz package for all systems===="
|
||||
cd ${script_dir}/tools
|
||||
|
||||
${csudo}./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${verNumberComp} ${dbName}
|
||||
${csudo}./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
|
||||
|
||||
else
|
||||
cd ${script_dir}/tools
|
||||
./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${verNumberComp} ${dbName}
|
||||
./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
|
||||
fi
|
|
@ -481,11 +481,11 @@ function install_adapter_config() {
|
|||
${csudo}mkdir -p ${cfg_install_dir}
|
||||
[ -f ${script_dir}/cfg/${adapterName}.toml ] && ${csudo}cp ${script_dir}/cfg/${adapterName}.toml ${cfg_install_dir}
|
||||
[ -f ${cfg_install_dir}/${adapterName}.toml ] && ${csudo}chmod 644 ${cfg_install_dir}/${adapterName}.toml
|
||||
else
|
||||
[ -f ${script_dir}/cfg/${adapterName}.toml ] &&
|
||||
${csudo}cp -f ${script_dir}/cfg/${adapterName}.toml ${cfg_install_dir}/${adapterName}.toml.new
|
||||
fi
|
||||
|
||||
[ -f ${script_dir}/cfg/${adapterName}.toml ] &&
|
||||
${csudo}cp -f ${script_dir}/cfg/${adapterName}.toml ${cfg_install_dir}/${adapterName}.toml.new
|
||||
|
||||
[ -f ${cfg_install_dir}/${adapterName}.toml ] &&
|
||||
${csudo}ln -s ${cfg_install_dir}/${adapterName}.toml ${install_main_dir}/cfg/${adapterName}.toml
|
||||
|
||||
|
@ -499,9 +499,10 @@ function install_config() {
|
|||
${csudo}mkdir -p ${cfg_install_dir}
|
||||
[ -f ${script_dir}/cfg/${configFile} ] && ${csudo}cp ${script_dir}/cfg/${configFile} ${cfg_install_dir}
|
||||
${csudo}chmod 644 ${cfg_install_dir}/*
|
||||
else
|
||||
${csudo}cp -f ${script_dir}/cfg/${configFile} ${cfg_install_dir}/${configFile}.new
|
||||
fi
|
||||
|
||||
${csudo}cp -f ${script_dir}/cfg/${configFile} ${cfg_install_dir}/${configFile}.new
|
||||
${csudo}ln -s ${cfg_install_dir}/${configFile} ${install_main_dir}/cfg
|
||||
|
||||
[ ! -z $1 ] && return 0 || : # only install client
|
||||
|
@ -742,6 +743,34 @@ function is_version_compatible() {
|
|||
esac
|
||||
}
|
||||
|
||||
deb_erase() {
|
||||
confirm=""
|
||||
while [ "" == "${confirm}" ]; do
|
||||
echo -e -n "${RED}Existing TDengine deb is detected, do you want to remove it? [yes|no] ${NC}:"
|
||||
read confirm
|
||||
if [ "yes" == "$confirm" ]; then
|
||||
${csudo}dpkg --remove tdengine ||:
|
||||
break
|
||||
elif [ "no" == "$confirm" ]; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
rpm_erase() {
|
||||
confirm=""
|
||||
while [ "" == "${confirm}" ]; do
|
||||
echo -e -n "${RED}Existing TDengine rpm is detected, do you want to remove it? [yes|no] ${NC}:"
|
||||
read confirm
|
||||
if [ "yes" == "$confirm" ]; then
|
||||
${csudo}rpm -e tdengine ||:
|
||||
break
|
||||
elif [ "no" == "$confirm" ]; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
function updateProduct() {
|
||||
# Check if version compatible
|
||||
if ! is_version_compatible; then
|
||||
|
@ -754,6 +783,13 @@ function updateProduct() {
|
|||
echo "File ${tarName} does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if echo $osinfo | grep -qwi "centos"; then
|
||||
rpm -q tdengine 2>&1 > /dev/null && rpm_erase tdengine ||:
|
||||
elif echo $osinfo | grep -qwi "ubuntu"; then
|
||||
dpkg -l tdengine 2>&1 > /dev/null && deb_erase tdengine ||:
|
||||
fi
|
||||
|
||||
tar -zxf ${tarName}
|
||||
install_jemalloc
|
||||
|
||||
|
|
|
@ -48,6 +48,7 @@ fi
|
|||
|
||||
data_link_dir="${install_main_dir}/data"
|
||||
log_link_dir="${install_main_dir}/log"
|
||||
install_log_path="${log_dir}/taos_install.log"
|
||||
|
||||
# static directory
|
||||
cfg_dir="${install_main_dir}/cfg"
|
||||
|
@ -101,6 +102,11 @@ if [ "$osType" = "Darwin" ]; then
|
|||
${csudo}cp -rf tdengine ${install_main_dir}
|
||||
fi
|
||||
|
||||
function log_print(){
|
||||
now=$(date +"%D %T")
|
||||
echo "$now $1" >> ${install_log_path}
|
||||
}
|
||||
|
||||
function kill_taosadapter() {
|
||||
# ${csudo}pkill -f taosadapter || :
|
||||
pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}')
|
||||
|
@ -109,6 +115,13 @@ function kill_taosadapter() {
|
|||
fi
|
||||
}
|
||||
|
||||
function kill_taoskeeper() {
|
||||
pid=$(ps -ef | grep "taoskeeper" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function kill_taosd() {
|
||||
# ${csudo}pkill -f taosd || :
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
|
@ -118,6 +131,7 @@ function kill_taosd() {
|
|||
}
|
||||
|
||||
function install_include() {
|
||||
log_print "start install include from ${inc_dir} to ${inc_link_dir}"
|
||||
${csudo}mkdir -p ${inc_link_dir}
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h ${inc_link_dir}/taosudf.h || :
|
||||
[ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h ||:
|
||||
|
@ -128,39 +142,45 @@ function install_include() {
|
|||
${csudo}ln -s ${inc_dir}/taosudf.h ${inc_link_dir}/taosudf.h
|
||||
|
||||
[ -f ${inc_dir}/taosws.h ] && ${csudo}ln -sf ${inc_dir}/taosws.h ${inc_link_dir}/taosws.h ||:
|
||||
log_print "install include success"
|
||||
}
|
||||
|
||||
function install_lib() {
|
||||
log_print "start install lib from ${lib_dir} to ${lib_link_dir}"
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos* || :
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaos* || :
|
||||
|
||||
[ -f ${lib_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.${lib_file_ext} || :
|
||||
[ -f ${lib64_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.${lib_file_ext} || :
|
||||
|
||||
${csudo}ln -s ${lib_dir}/libtaos.* ${lib_link_dir}/libtaos.${lib_file_ext_1}
|
||||
${csudo}ln -s ${lib_link_dir}/libtaos.${lib_file_ext_1} ${lib_link_dir}/libtaos.${lib_file_ext}
|
||||
${csudo}ln -s ${lib_dir}/libtaos.* ${lib_link_dir}/libtaos.${lib_file_ext_1} 2>>${install_log_path} || return 1
|
||||
${csudo}ln -s ${lib_link_dir}/libtaos.${lib_file_ext_1} ${lib_link_dir}/libtaos.${lib_file_ext} 2>>${install_log_path} || return 1
|
||||
|
||||
[ -f ${lib_dir}/libtaosws.${lib_file_ext} ] && ${csudo}ln -sf ${lib_dir}/libtaosws.${lib_file_ext} ${lib_link_dir}/libtaosws.${lib_file_ext} ||:
|
||||
|
||||
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.${lib_file_ext} ]]; then
|
||||
${csudo}ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.${lib_file_ext_1} || :
|
||||
${csudo}ln -s ${lib64_link_dir}/libtaos.${lib_file_ext_1} ${lib64_link_dir}/libtaos.${lib_file_ext} || :
|
||||
${csudo}ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.${lib_file_ext_1} 2>>${install_log_path} || return 1
|
||||
${csudo}ln -s ${lib64_link_dir}/libtaos.${lib_file_ext_1} ${lib64_link_dir}/libtaos.${lib_file_ext} 2>>${install_log_path} || return 1
|
||||
|
||||
[ -f ${lib_dir}/libtaosws.${lib_file_ext} ] && ${csudo}ln -sf ${lib_dir}/libtaosws.${lib_file_ext} ${lib64_link_dir}/libtaosws.${lib_file_ext} || :
|
||||
[ -f ${lib_dir}/libtaosws.${lib_file_ext} ] && ${csudo}ln -sf ${lib_dir}/libtaosws.${lib_file_ext} ${lib64_link_dir}/libtaosws.${lib_file_ext} 2>>${install_log_path}
|
||||
fi
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}ldconfig
|
||||
fi
|
||||
|
||||
log_print "install lib success"
|
||||
}
|
||||
|
||||
function install_bin() {
|
||||
# Remove links
|
||||
log_print "start install bin from ${bin_dir} to ${bin_link_dir}"
|
||||
${csudo}rm -f ${bin_link_dir}/taos || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosd || :
|
||||
${csudo}rm -f ${bin_link_dir}/udfd || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosadapter || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosBenchmark || :
|
||||
${csudo}rm -f ${bin_link_dir}/taoskeeper || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdump || :
|
||||
${csudo}rm -f ${bin_link_dir}/rmtaos || :
|
||||
|
@ -169,16 +189,38 @@ function install_bin() {
|
|||
${csudo}chmod 0555 ${bin_dir}/*
|
||||
|
||||
#Make link
|
||||
[ -x ${bin_dir}/taos ] && ${csudo}ln -s ${bin_dir}/taos ${bin_link_dir}/taos || :
|
||||
[ -x ${bin_dir}/taosd ] && ${csudo}ln -s ${bin_dir}/taosd ${bin_link_dir}/taosd || :
|
||||
[ -x ${bin_dir}/udfd ] && ${csudo}ln -s ${bin_dir}/udfd ${bin_link_dir}/udfd || :
|
||||
[ -x ${bin_dir}/taosadapter ] && ${csudo}ln -s ${bin_dir}/taosadapter ${bin_link_dir}/taosadapter || :
|
||||
[ -x ${bin_dir}/taosBenchmark ] && ${csudo}ln -sf ${bin_dir}/taosBenchmark ${bin_link_dir}/taosdemo || :
|
||||
[ -x ${bin_dir}/taosBenchmark ] && ${csudo}ln -sf ${bin_dir}/taosBenchmark ${bin_link_dir}/taosBenchmark || :
|
||||
[ -x ${bin_dir}/TDinsight.sh ] && ${csudo}ln -sf ${bin_dir}/TDinsight.sh ${bin_link_dir}/TDinsight.sh || :
|
||||
[ -x ${bin_dir}/taosdump ] && ${csudo}ln -s ${bin_dir}/taosdump ${bin_link_dir}/taosdump || :
|
||||
[ -x ${bin_dir}/set_core.sh ] && ${csudo}ln -s ${bin_dir}/set_core.sh ${bin_link_dir}/set_core || :
|
||||
[ -x ${bin_dir}/remove.sh ] && ${csudo}ln -s ${bin_dir}/remove.sh ${bin_link_dir}/rmtaos || :
|
||||
if [ -x ${bin_dir}/taos ]; then
|
||||
${csudo}ln -s ${bin_dir}/taos ${bin_link_dir}/taos 2>>${install_log_path} || return 1
|
||||
fi
|
||||
if [ -x ${bin_dir}/taosd ]; then
|
||||
${csudo}ln -s ${bin_dir}/taosd ${bin_link_dir}/taosd 2>>${install_log_path} || return 1
|
||||
fi
|
||||
if [ -x ${bin_dir}/udfd ]; then
|
||||
${csudo}ln -s ${bin_dir}/udfd ${bin_link_dir}/udfd 2>>${install_log_path} || return 1
|
||||
fi
|
||||
if [ -x ${bin_dir}/taosadapter ]; then
|
||||
${csudo}ln -s ${bin_dir}/taosadapter ${bin_link_dir}/taosadapter 2>>${install_log_path} || return 1
|
||||
fi
|
||||
if [ -x ${bin_dir}/taosBenchmark ]; then
|
||||
${csudo}ln -sf ${bin_dir}/taosBenchmark ${bin_link_dir}/taosdemo 2>>${install_log_path}
|
||||
${csudo}ln -sf ${bin_dir}/taosBenchmark ${bin_link_dir}/taosBenchmark 2>>${install_log_path}
|
||||
fi
|
||||
if [ -x ${bin_dir}/TDinsight.sh ]; then
|
||||
${csudo}ln -sf ${bin_dir}/TDinsight.sh ${bin_link_dir}/TDinsight.sh 2>>${install_log_path} || return 1
|
||||
fi
|
||||
if [ -x ${bin_dir}/taosdump ]; then
|
||||
${csudo}ln -s ${bin_dir}/taosdump ${bin_link_dir}/taosdump 2>>${install_log_path} || return 1
|
||||
fi
|
||||
if [ -x ${bin_dir}/set_core.sh ]; then
|
||||
${csudo}ln -s ${bin_dir}/set_core.sh ${bin_link_dir}/set_core 2>>${install_log_path} || return 1
|
||||
fi
|
||||
if [ -x ${bin_dir}/remove.sh ]; then
|
||||
${csudo}ln -s ${bin_dir}/remove.sh ${bin_link_dir}/rmtaos 2>>${install_log_path} || return 1
|
||||
fi
|
||||
if [ -x ${bin_dir}/taoskeeper ]; then
|
||||
${csudo}ln -sf ${bin_dir}/taoskeeper ${bin_link_dir}/taoskeeper 2>>${install_log_path} || return 1
|
||||
fi
|
||||
log_print "install bin success"
|
||||
}
|
||||
|
||||
function add_newHostname_to_hosts() {
|
||||
|
@ -351,7 +393,24 @@ function install_taosadapter_config() {
|
|||
${csudo}ln -s ${cfg_install_dir}/taosadapter.toml ${cfg_dir}
|
||||
}
|
||||
|
||||
function install_taoskeeper_config() {
|
||||
if [ ! -f "${cfg_install_dir}/keeper.toml" ]; then
|
||||
[ ! -d %{cfg_install_dir} ] &&
|
||||
${csudo}${csudo}mkdir -p ${cfg_install_dir}
|
||||
[ -f ${cfg_dir}/keeper.toml ] && ${csudo}cp ${cfg_dir}/keeper.toml ${cfg_install_dir}
|
||||
[ -f ${cfg_install_dir}/keeper.toml ] &&
|
||||
${csudo}chmod 644 ${cfg_install_dir}/keeper.toml
|
||||
fi
|
||||
|
||||
[ -f ${cfg_dir}/keeper.toml ] &&
|
||||
${csudo}mv ${cfg_dir}/keeper.toml ${cfg_dir}/keeper.toml.new
|
||||
|
||||
[ -f ${cfg_install_dir}/keeper.toml ] &&
|
||||
${csudo}ln -s ${cfg_install_dir}/keeper.toml ${cfg_dir}
|
||||
}
|
||||
|
||||
function install_config() {
|
||||
log_print "start install config from ${cfg_dir} to ${cfg_install_dir}"
|
||||
if [ ! -f "${cfg_install_dir}/taos.cfg" ]; then
|
||||
${csudo}${csudo}mkdir -p ${cfg_install_dir}
|
||||
[ -f ${cfg_dir}/taos.cfg ] && ${csudo}cp ${cfg_dir}/taos.cfg ${cfg_install_dir}
|
||||
|
@ -419,6 +478,8 @@ function install_config() {
|
|||
break
|
||||
fi
|
||||
done
|
||||
|
||||
log_print "install config success"
|
||||
}
|
||||
|
||||
function clean_service_on_sysvinit() {
|
||||
|
@ -533,6 +594,7 @@ function install_taosadapter_service() {
|
|||
}
|
||||
|
||||
function install_service() {
|
||||
log_print "start install service"
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
if ((${service_mod}==0)); then
|
||||
install_service_on_systemd
|
||||
|
@ -546,6 +608,7 @@ function install_service() {
|
|||
else
|
||||
install_service_on_launchctl
|
||||
fi
|
||||
log_print "install service success"
|
||||
}
|
||||
|
||||
function install_app() {
|
||||
|
@ -566,6 +629,7 @@ function install_app() {
|
|||
|
||||
function install_TDengine() {
|
||||
echo -e "${GREEN}Start to install TDengine...${NC}"
|
||||
log_print "start to install TDengine"
|
||||
|
||||
#install log and data dir , then ln to /usr/local/taos
|
||||
${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir}
|
||||
|
@ -578,11 +642,18 @@ function install_TDengine() {
|
|||
${csudo}ln -s ${data_dir} ${data_link_dir} || :
|
||||
|
||||
# Install include, lib, binary and service
|
||||
install_include
|
||||
install_lib
|
||||
install_include &&
|
||||
install_lib &&
|
||||
install_bin
|
||||
|
||||
if [[ "$?" != 0 ]];then
|
||||
log_print "install TDengine failed!"
|
||||
return 1
|
||||
fi
|
||||
|
||||
install_config
|
||||
install_taosadapter_config
|
||||
install_taoskeeper_config
|
||||
install_taosadapter_service
|
||||
install_service
|
||||
install_app
|
||||
|
@ -622,11 +693,14 @@ function install_TDengine() {
|
|||
echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $serverFqdn${GREEN_DARK} to login into TDengine server${NC}"
|
||||
echo
|
||||
fi
|
||||
log_print "install TDengine successfully!"
|
||||
echo
|
||||
echo -e "\033[44;32;1mTDengine is installed successfully!${NC}"
|
||||
}
|
||||
|
||||
|
||||
## ==============================Main program starts from here============================
|
||||
${csudo}rm -rf ${install_log_path}
|
||||
log_print "installer start..."
|
||||
serverFqdn=$(hostname)
|
||||
install_TDengine
|
||||
|
|
|
@ -312,6 +312,8 @@ extern SAppInfo appInfo;
|
|||
extern int32_t clientReqRefPool;
|
||||
extern int32_t clientConnRefPool;
|
||||
extern int32_t timestampDeltaLimit;
|
||||
extern int64_t lastClusterId;
|
||||
|
||||
|
||||
__async_send_cb_fn_t getMsgRspHandle(int32_t msgType);
|
||||
|
||||
|
@ -339,6 +341,7 @@ void resetConnectDB(STscObj* pTscObj);
|
|||
int taos_options_imp(TSDB_OPTION option, const char* str);
|
||||
|
||||
void* openTransporter(const char* user, const char* auth, int32_t numOfThreads);
|
||||
void tscStopCrashReport();
|
||||
|
||||
typedef struct AsyncArg {
|
||||
SRpcMsg msg;
|
||||
|
|
|
@ -164,6 +164,7 @@ typedef struct {
|
|||
bool dataFormat; // true means that the name and order of keys in each line are the same(only for influx protocol)
|
||||
bool isRawLine;
|
||||
int32_t ttl;
|
||||
int32_t uid; // used for automatic create child table
|
||||
|
||||
NodeList *childTables;
|
||||
NodeList *superTables;
|
||||
|
@ -183,6 +184,7 @@ typedef struct {
|
|||
SSmlLineInfo *lines; // element is SSmlLineInfo
|
||||
bool parseJsonByLib;
|
||||
SArray *tagJsonArray;
|
||||
SArray *valueJsonArray;
|
||||
|
||||
//
|
||||
SArray *preLineTagKV;
|
||||
|
@ -232,6 +234,7 @@ int32_t smlClearForRerun(SSmlHandle *info);
|
|||
int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg);
|
||||
uint8_t smlGetTimestampLen(int64_t num);
|
||||
void clearColValArray(SArray* pCols);
|
||||
void smlDestroyTableInfo(SSmlHandle *info, SSmlTableInfo *tag);
|
||||
|
||||
int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements);
|
||||
int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements);
|
||||
|
|
|
@ -28,13 +28,16 @@
|
|||
#include "trpc.h"
|
||||
#include "tsched.h"
|
||||
#include "ttime.h"
|
||||
#include "thttp.h"
|
||||
|
||||
#define TSC_VAR_NOT_RELEASE 1
|
||||
#define TSC_VAR_RELEASED 0
|
||||
|
||||
SAppInfo appInfo;
|
||||
int64_t lastClusterId = 0;
|
||||
int32_t clientReqRefPool = -1;
|
||||
int32_t clientConnRefPool = -1;
|
||||
int32_t clientStop = 0;
|
||||
|
||||
int32_t timestampDeltaLimit = 900; // s
|
||||
|
||||
|
@ -62,7 +65,10 @@ static int32_t registerRequest(SRequestObj *pRequest, STscObj *pTscObj) {
|
|||
|
||||
static void deregisterRequest(SRequestObj *pRequest) {
|
||||
const static int64_t SLOW_QUERY_INTERVAL = 3000000L; // todo configurable
|
||||
assert(pRequest != NULL);
|
||||
if(pRequest == NULL){
|
||||
tscError("pRequest == NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
STscObj *pTscObj = pRequest->pTscObj;
|
||||
SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
|
||||
|
@ -359,6 +365,7 @@ void doDestroyRequest(void *p) {
|
|||
taosMemoryFreeClear(pRequest->pDb);
|
||||
|
||||
doFreeReqResultInfo(&pRequest->body.resInfo);
|
||||
tsem_destroy(&pRequest->body.rspSem);
|
||||
|
||||
taosArrayDestroy(pRequest->tableList);
|
||||
taosArrayDestroy(pRequest->dbList);
|
||||
|
@ -373,6 +380,9 @@ void doDestroyRequest(void *p) {
|
|||
}
|
||||
|
||||
if (pRequest->syncQuery) {
|
||||
if (pRequest->body.param){
|
||||
tsem_destroy(&((SSyncQueryParam*)pRequest->body.param)->sem);
|
||||
}
|
||||
taosMemoryFree(pRequest->body.param);
|
||||
}
|
||||
|
||||
|
@ -390,6 +400,122 @@ void destroyRequest(SRequestObj *pRequest) {
|
|||
removeRequest(pRequest->self);
|
||||
}
|
||||
|
||||
void crashReportThreadFuncUnexpectedStopped(void) { atomic_store_32(&clientStop, -1); }
|
||||
|
||||
static void *tscCrashReportThreadFp(void *param) {
|
||||
setThreadName("client-crashReport");
|
||||
char filepath[PATH_MAX] = {0};
|
||||
snprintf(filepath, sizeof(filepath), "%s%s.taosCrashLog", tsLogDir, TD_DIRSEP);
|
||||
char *pMsg = NULL;
|
||||
int64_t msgLen = 0;
|
||||
TdFilePtr pFile = NULL;
|
||||
bool truncateFile = false;
|
||||
int32_t sleepTime = 200;
|
||||
int32_t reportPeriodNum = 3600 * 1000 / sleepTime;
|
||||
int32_t loopTimes = reportPeriodNum;
|
||||
|
||||
#ifdef WINDOWS
|
||||
if (taosCheckCurrentInDll()) {
|
||||
atexit(crashReportThreadFuncUnexpectedStopped);
|
||||
}
|
||||
#endif
|
||||
|
||||
while (1) {
|
||||
if (clientStop) break;
|
||||
if (loopTimes++ < reportPeriodNum) {
|
||||
taosMsleep(sleepTime);
|
||||
continue;
|
||||
}
|
||||
|
||||
taosReadCrashInfo(filepath, &pMsg, &msgLen, &pFile);
|
||||
if (pMsg && msgLen > 0) {
|
||||
if (taosSendHttpReport(tsTelemServer, tsClientCrashReportUri, tsTelemPort, pMsg, msgLen, HTTP_FLAT) != 0) {
|
||||
tscError("failed to send crash report");
|
||||
if (pFile) {
|
||||
taosReleaseCrashLogFile(pFile, false);
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
tscInfo("succeed to send crash report");
|
||||
truncateFile = true;
|
||||
}
|
||||
} else {
|
||||
tscDebug("no crash info");
|
||||
}
|
||||
|
||||
taosMemoryFree(pMsg);
|
||||
|
||||
if (pMsg && msgLen > 0) {
|
||||
pMsg = NULL;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pFile) {
|
||||
taosReleaseCrashLogFile(pFile, truncateFile);
|
||||
truncateFile = false;
|
||||
}
|
||||
|
||||
taosMsleep(sleepTime);
|
||||
loopTimes = 0;
|
||||
}
|
||||
|
||||
clientStop = -1;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int32_t tscCrashReportInit() {
|
||||
if (!tsEnableCrashReport) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
TdThreadAttr thAttr;
|
||||
taosThreadAttrInit(&thAttr);
|
||||
taosThreadAttrSetDetachState(&thAttr, PTHREAD_CREATE_JOINABLE);
|
||||
TdThread crashReportThread;
|
||||
if (taosThreadCreate(&crashReportThread, &thAttr, tscCrashReportThreadFp, NULL) != 0) {
|
||||
tscError("failed to create crashReport thread since %s", strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
taosThreadAttrDestroy(&thAttr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tscStopCrashReport() {
|
||||
if (!tsEnableCrashReport) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (atomic_val_compare_exchange_32(&clientStop, 0, 1)) {
|
||||
tscDebug("hb thread already stopped");
|
||||
return;
|
||||
}
|
||||
|
||||
while (atomic_load_32(&clientStop) > 0) {
|
||||
taosMsleep(100);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void tscWriteCrashInfo(int signum, void *sigInfo, void *context) {
|
||||
char *pMsg = NULL;
|
||||
const char *flags = "UTL FATAL ";
|
||||
ELogLevel level = DEBUG_FATAL;
|
||||
int32_t dflag = 255;
|
||||
int64_t msgLen= -1;
|
||||
|
||||
if (tsEnableCrashReport) {
|
||||
if (taosGenCrashJsonMsg(signum, &pMsg, lastClusterId, appInfo.startTime)) {
|
||||
taosPrintLog(flags, level, dflag, "failed to generate crash json msg");
|
||||
} else {
|
||||
msgLen = strlen(pMsg);
|
||||
}
|
||||
}
|
||||
|
||||
taosLogCrashInfo("taos", pMsg, msgLen, signum, sigInfo);
|
||||
}
|
||||
|
||||
|
||||
void taos_init_imp(void) {
|
||||
// In the APIs of other program language, taos_cleanup is not available yet.
|
||||
// So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning.
|
||||
|
@ -397,6 +523,10 @@ void taos_init_imp(void) {
|
|||
errno = TSDB_CODE_SUCCESS;
|
||||
taosSeedRand(taosGetTimestampSec());
|
||||
|
||||
appInfo.pid = taosGetPId();
|
||||
appInfo.startTime = taosGetTimestampMs();
|
||||
appInfo.pInstMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
|
||||
|
||||
deltaToUtcInitOnce();
|
||||
|
||||
if (taosCreateLog("taoslog", 10, configDir, NULL, NULL, NULL, NULL, 1) != 0) {
|
||||
|
@ -412,7 +542,8 @@ void taos_init_imp(void) {
|
|||
initQueryModuleMsgHandle();
|
||||
|
||||
if (taosConvInit() != 0) {
|
||||
ASSERTS(0, "failed to init conv");
|
||||
tscError("failed to init conv");
|
||||
return;
|
||||
}
|
||||
|
||||
rpcInit();
|
||||
|
@ -438,9 +569,8 @@ void taos_init_imp(void) {
|
|||
taosGetAppName(appInfo.appName, NULL);
|
||||
taosThreadMutexInit(&appInfo.mutex, NULL);
|
||||
|
||||
appInfo.pid = taosGetPId();
|
||||
appInfo.startTime = taosGetTimestampMs();
|
||||
appInfo.pInstMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
|
||||
tscCrashReportInit();
|
||||
|
||||
tscDebug("client is initialized successfully");
|
||||
}
|
||||
|
||||
|
|
|
@ -376,7 +376,6 @@ int32_t hbBuildQueryDesc(SQueryHbReqBasic *hbBasic, STscObj *pObj) {
|
|||
desc.subPlanNum = 0;
|
||||
}
|
||||
desc.subPlanNum = taosArrayGetSize(desc.subDesc);
|
||||
ASSERT(desc.subPlanNum == taosArrayGetSize(desc.subDesc));
|
||||
} else {
|
||||
desc.subDesc = NULL;
|
||||
}
|
||||
|
@ -813,7 +812,10 @@ static void hbStopThread() {
|
|||
}
|
||||
|
||||
SAppHbMgr *appHbMgrInit(SAppInstInfo *pAppInstInfo, char *key) {
|
||||
hbMgrInit();
|
||||
if(hbMgrInit() != 0){
|
||||
terrno = TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
return NULL;
|
||||
}
|
||||
SAppHbMgr *pAppHbMgr = taosMemoryMalloc(sizeof(SAppHbMgr));
|
||||
if (pAppHbMgr == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -899,16 +901,28 @@ int hbMgrInit() {
|
|||
TdThreadMutexAttr attr = {0};
|
||||
|
||||
int ret = taosThreadMutexAttrInit(&attr);
|
||||
assert(ret == 0);
|
||||
if(ret != 0){
|
||||
uError("hbMgrInit:taosThreadMutexAttrInit error")
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = taosThreadMutexAttrSetType(&attr, PTHREAD_MUTEX_RECURSIVE);
|
||||
assert(ret == 0);
|
||||
if(ret != 0){
|
||||
uError("hbMgrInit:taosThreadMutexAttrSetType error")
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = taosThreadMutexInit(&clientHbMgr.lock, &attr);
|
||||
assert(ret == 0);
|
||||
if(ret != 0){
|
||||
uError("hbMgrInit:taosThreadMutexInit error")
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = taosThreadMutexAttrDestroy(&attr);
|
||||
assert(ret == 0);
|
||||
if(ret != 0){
|
||||
uError("hbMgrInit:taosThreadMutexAttrDestroy error")
|
||||
return ret;
|
||||
}
|
||||
|
||||
// init handle funcs
|
||||
hbMgrInitHandle();
|
||||
|
|
|
@ -159,6 +159,12 @@ STscObj* taos_connect_internal(const char* ip, const char* user, const char* pas
|
|||
return taosConnectImpl(user, &secretEncrypt[0], localDb, NULL, NULL, *pInst, connType);
|
||||
}
|
||||
|
||||
void freeQueryParam(SSyncQueryParam* param) {
|
||||
if (param == NULL) return;
|
||||
tsem_destroy(¶m->sem);
|
||||
taosMemoryFree(param);
|
||||
}
|
||||
|
||||
int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param, bool validateSql,
|
||||
SRequestObj** pRequest, int64_t reqid) {
|
||||
*pRequest = createRequest(connId, TSDB_SQL_SELECT, reqid);
|
||||
|
@ -180,17 +186,18 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param,
|
|||
(*pRequest)->sqlLen = sqlLen;
|
||||
(*pRequest)->validateOnly = validateSql;
|
||||
|
||||
SSyncQueryParam* newpParam;
|
||||
if (param == NULL) {
|
||||
SSyncQueryParam* pParam = taosMemoryCalloc(1, sizeof(SSyncQueryParam));
|
||||
if (pParam == NULL) {
|
||||
newpParam = taosMemoryCalloc(1, sizeof(SSyncQueryParam));
|
||||
if (newpParam == NULL) {
|
||||
destroyRequest(*pRequest);
|
||||
*pRequest = NULL;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
tsem_init(&pParam->sem, 0, 0);
|
||||
pParam->pRequest = (*pRequest);
|
||||
param = pParam;
|
||||
tsem_init(&newpParam->sem, 0, 0);
|
||||
newpParam->pRequest = (*pRequest);
|
||||
param = newpParam;
|
||||
}
|
||||
|
||||
(*pRequest)->body.param = param;
|
||||
|
@ -201,8 +208,7 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param,
|
|||
if (err) {
|
||||
tscError("%" PRId64 " failed to add to request container, reqId:0x%" PRIx64 ", conn:%" PRId64 ", %s",
|
||||
(*pRequest)->self, (*pRequest)->requestId, pTscObj->id, sql);
|
||||
|
||||
taosMemoryFree(param);
|
||||
freeQueryParam(newpParam);
|
||||
destroyRequest(*pRequest);
|
||||
*pRequest = NULL;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -214,6 +220,7 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param,
|
|||
nodesCreateAllocator((*pRequest)->requestId, tsQueryNodeChunkSize, &((*pRequest)->allocatorRefId))) {
|
||||
tscError("%" PRId64 " failed to create node allocator, reqId:0x%" PRIx64 ", conn:%" PRId64 ", %s",
|
||||
(*pRequest)->self, (*pRequest)->requestId, pTscObj->id, sql);
|
||||
freeQueryParam(newpParam);
|
||||
destroyRequest(*pRequest);
|
||||
*pRequest = NULL;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -452,7 +459,10 @@ int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArra
|
|||
}
|
||||
|
||||
void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t numOfCols) {
|
||||
ASSERT(pSchema != NULL && numOfCols > 0);
|
||||
if(pResInfo == NULL || pSchema == NULL || numOfCols <= 0){
|
||||
tscError("invalid paras, pResInfo == NULL || pSchema == NULL || numOfCols <= 0");
|
||||
return;
|
||||
}
|
||||
|
||||
pResInfo->numOfCols = numOfCols;
|
||||
if (pResInfo->fields != NULL) {
|
||||
|
@ -463,7 +473,10 @@ void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t
|
|||
}
|
||||
pResInfo->fields = taosMemoryCalloc(numOfCols, sizeof(TAOS_FIELD));
|
||||
pResInfo->userFields = taosMemoryCalloc(numOfCols, sizeof(TAOS_FIELD));
|
||||
ASSERT(numOfCols == pResInfo->numOfCols);
|
||||
if(numOfCols != pResInfo->numOfCols){
|
||||
tscError("numOfCols:%d != pResInfo->numOfCols:%d", numOfCols, pResInfo->numOfCols);
|
||||
return;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < pResInfo->numOfCols; ++i) {
|
||||
pResInfo->fields[i].bytes = pSchema[i].bytes;
|
||||
|
@ -1339,7 +1352,10 @@ int32_t doProcessMsgFromServer(void* param) {
|
|||
SEpSet* pEpSet = arg->pEpset;
|
||||
|
||||
SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle;
|
||||
assert(pMsg->info.ahandle != NULL);
|
||||
if(pMsg->info.ahandle == NULL){
|
||||
tscError("doProcessMsgFromServer pMsg->info.ahandle == NULL");
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
STscObj* pTscObj = NULL;
|
||||
|
||||
STraceId* trace = &pMsg->info.traceId;
|
||||
|
@ -1352,8 +1368,10 @@ int32_t doProcessMsgFromServer(void* param) {
|
|||
if (pSendInfo->requestObjRefId != 0) {
|
||||
SRequestObj* pRequest = (SRequestObj*)taosAcquireRef(clientReqRefPool, pSendInfo->requestObjRefId);
|
||||
if (pRequest) {
|
||||
assert(pRequest->self == pSendInfo->requestObjRefId);
|
||||
|
||||
if(pRequest->self != pSendInfo->requestObjRefId){
|
||||
tscError("doProcessMsgFromServer pRequest->self:%"PRId64" != pSendInfo->requestObjRefId:%"PRId64, pRequest->self, pSendInfo->requestObjRefId);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
pRequest->metric.rsp = taosGetTimestampUs();
|
||||
pTscObj = pRequest->pTscObj;
|
||||
/*
|
||||
|
@ -1495,7 +1513,9 @@ void doSetOneRowPtr(SReqResultInfo* pResultInfo) {
|
|||
}
|
||||
|
||||
void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4) {
|
||||
assert(pRequest != NULL);
|
||||
if(pRequest == NULL){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SReqResultInfo* pResultInfo = &pRequest->body.resInfo;
|
||||
if (pResultInfo->pData == NULL || pResultInfo->current >= pResultInfo->numOfRows) {
|
||||
|
@ -1549,7 +1569,9 @@ static void syncFetchFn(void* param, TAOS_RES* res, int32_t numOfRows) {
|
|||
}
|
||||
|
||||
void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4) {
|
||||
assert(pRequest != NULL);
|
||||
if(pRequest == NULL){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SReqResultInfo* pResultInfo = &pRequest->body.resInfo;
|
||||
if (pResultInfo->pData == NULL || pResultInfo->current >= pResultInfo->numOfRows) {
|
||||
|
@ -1613,8 +1635,10 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
|
|||
char* pStart = pCol->offset[j] + pCol->pData;
|
||||
|
||||
int32_t len = taosUcs4ToMbs((TdUcs4*)varDataVal(pStart), varDataLen(pStart), varDataVal(p));
|
||||
ASSERT(len <= bytes);
|
||||
ASSERT((p + len) < (pResultInfo->convertBuf[i] + colLength[i]));
|
||||
if(len > bytes || (p + len) >= (pResultInfo->convertBuf[i] + colLength[i])){
|
||||
tscError("doConvertUCS4 error, invalid data. len:%d, bytes:%d, (p + len):%p, (pResultInfo->convertBuf[i] + colLength[i]):%p", len, bytes, (p + len), (pResultInfo->convertBuf[i] + colLength[i]));
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
varDataSetLen(p, len);
|
||||
pCol->offset[j] = (p - pResultInfo->convertBuf[i]);
|
||||
|
@ -1631,9 +1655,6 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
|
|||
}
|
||||
|
||||
int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols) {
|
||||
int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3);
|
||||
ASSERT(numOfCols == cols);
|
||||
|
||||
return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) * 3 + sizeof(uint64_t) +
|
||||
numOfCols * (sizeof(int8_t) + sizeof(int32_t));
|
||||
}
|
||||
|
@ -1643,6 +1664,12 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i
|
|||
|
||||
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column
|
||||
// length |
|
||||
int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3);
|
||||
if(ASSERT(numOfCols == cols)){
|
||||
tscError("estimateJsonLen error: numOfCols:%d != cols:%d", numOfCols, cols);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t len = getVersion1BlockMetaSize(p, numOfCols);
|
||||
int32_t* colLength = (int32_t*)(p + len);
|
||||
len += sizeof(int32_t) * numOfCols;
|
||||
|
@ -1676,7 +1703,8 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i
|
|||
} else if (jsonInnerType == TSDB_DATA_TYPE_BOOL) {
|
||||
len += (VARSTR_HEADER_SIZE + 5);
|
||||
} else {
|
||||
ASSERT(0);
|
||||
tscError("estimateJsonLen error: invalid type:%d", jsonInnerType);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
} else if (IS_VAR_DATA_TYPE(pResultInfo->fields[i].type)) {
|
||||
|
@ -1710,12 +1738,21 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
|
|||
|
||||
char* p = (char*)pResultInfo->pData;
|
||||
int32_t dataLen = estimateJsonLen(pResultInfo, numOfCols, numOfRows);
|
||||
if(dataLen <= 0){
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
pResultInfo->convertJson = taosMemoryCalloc(1, dataLen);
|
||||
if (pResultInfo->convertJson == NULL) return TSDB_CODE_OUT_OF_MEMORY;
|
||||
char* p1 = pResultInfo->convertJson;
|
||||
|
||||
int32_t totalLen = 0;
|
||||
int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3);
|
||||
if(ASSERT(numOfCols == cols)){
|
||||
tscError("doConvertJson error: numOfCols:%d != cols:%d", numOfCols, cols);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
int32_t len = getVersion1BlockMetaSize(p, numOfCols);
|
||||
memcpy(p1, p, len);
|
||||
|
||||
|
@ -1736,8 +1773,10 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
|
|||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
int32_t colLen = htonl(colLength[i]);
|
||||
int32_t colLen1 = htonl(colLength1[i]);
|
||||
ASSERT(colLen < dataLen);
|
||||
|
||||
if(ASSERT(colLen < dataLen)){
|
||||
tscError("doConvertJson error: colLen:%d >= dataLen:%d", colLen, dataLen);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
if (pResultInfo->fields[i].type == TSDB_DATA_TYPE_JSON) {
|
||||
int32_t* offset = (int32_t*)pStart;
|
||||
int32_t* offset1 = (int32_t*)pStart1;
|
||||
|
@ -1782,7 +1821,8 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
|
|||
sprintf(varDataVal(dst), "%s", (*((char*)jsonInnerData) == 1) ? "true" : "false");
|
||||
varDataSetLen(dst, strlen(varDataVal(dst)));
|
||||
} else {
|
||||
ASSERT(0);
|
||||
tscError("doConvertJson error: invalid type:%d", jsonInnerType);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
offset1[j] = len;
|
||||
|
@ -1820,7 +1860,10 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
|
|||
|
||||
int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32_t numOfCols, int32_t numOfRows,
|
||||
bool convertUcs4) {
|
||||
assert(numOfCols > 0 && pFields != NULL && pResultInfo != NULL);
|
||||
if(ASSERT(numOfCols > 0 && pFields != NULL && pResultInfo != NULL)){
|
||||
tscError("setResultDataPtr paras error");
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
if (numOfRows == 0) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -1849,7 +1892,10 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
|
|||
int32_t cols = *(int32_t*)p;
|
||||
p += sizeof(int32_t);
|
||||
|
||||
ASSERT(rows == numOfRows && cols == numOfCols);
|
||||
if(ASSERT(rows == numOfRows && cols == numOfCols)){
|
||||
tscError("setResultDataPtr paras error:rows;%d numOfRows:%d cols:%d numOfCols:%d", rows, numOfRows, cols, numOfCols);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
int32_t hasColumnSeg = *(int32_t*)p;
|
||||
p += sizeof(int32_t);
|
||||
|
@ -1876,7 +1922,7 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
|
|||
colLength[i] = htonl(colLength[i]);
|
||||
if (colLength[i] >= dataLen) {
|
||||
tscError("invalid colLength %d, dataLen %d", colLength[i], dataLen);
|
||||
ASSERT(0);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
if (IS_VAR_DATA_TYPE(pResultInfo->fields[i].type)) {
|
||||
|
@ -1914,7 +1960,11 @@ char* getDbOfConnection(STscObj* pObj) {
|
|||
}
|
||||
|
||||
void setConnectionDB(STscObj* pTscObj, const char* db) {
|
||||
assert(db != NULL && pTscObj != NULL);
|
||||
if(db == NULL || pTscObj == NULL){
|
||||
tscError("setConnectionDB para is NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
taosThreadMutexLock(&pTscObj->mutex);
|
||||
tstrncpy(pTscObj->db, db, tListLen(pTscObj->db));
|
||||
taosThreadMutexUnlock(&pTscObj->mutex);
|
||||
|
@ -1932,7 +1982,10 @@ void resetConnectDB(STscObj* pTscObj) {
|
|||
|
||||
int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4,
|
||||
bool freeAfterUse) {
|
||||
assert(pResultInfo != NULL && pRsp != NULL);
|
||||
if(pResultInfo == NULL || pRsp == NULL){
|
||||
tscError("setQueryResultFromRsp paras is null");
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
if (freeAfterUse) taosMemoryFreeClear(pResultInfo->pRspMsg);
|
||||
|
||||
|
|
|
@ -574,7 +574,11 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchBlockImp(JNI
|
|||
TAOS_RES *tres = (TAOS_RES *)res;
|
||||
|
||||
int32_t numOfFields = taos_num_fields(tres);
|
||||
assert(numOfFields > 0);
|
||||
if(numOfFields <= 0){
|
||||
jniError("jobj:%p, conn:%p, query interrupted. taos_num_fields error code:%d, msg:%s", jobj, tscon, numOfFields,
|
||||
taos_errstr(tres));
|
||||
return JNI_RESULT_SET_NULL;
|
||||
}
|
||||
|
||||
void *data;
|
||||
int32_t numOfRows;
|
||||
|
|
|
@ -55,6 +55,8 @@ void taos_cleanup(void) {
|
|||
return;
|
||||
}
|
||||
|
||||
tscStopCrashReport();
|
||||
|
||||
int32_t id = clientReqRefPool;
|
||||
clientReqRefPool = -1;
|
||||
taosCloseRef(id);
|
||||
|
@ -106,7 +108,7 @@ TAOS *taos_connect(const char *ip, const char *user, const char *pass, const cha
|
|||
if (pass == NULL) {
|
||||
pass = TSDB_DEFAULT_PASS;
|
||||
}
|
||||
|
||||
|
||||
STscObj *pObj = taos_connect_internal(ip, user, pass, NULL, db, port, CONN_TYPE__QUERY);
|
||||
if (pObj) {
|
||||
int64_t *rid = taosMemoryCalloc(1, sizeof(int64_t));
|
||||
|
@ -291,7 +293,6 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) {
|
|||
tscError("invalid result passed to taos_fetch_row");
|
||||
return NULL;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) {
|
||||
|
@ -355,9 +356,13 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
|
|||
case TSDB_DATA_TYPE_NCHAR: {
|
||||
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
|
||||
if (fields[i].type == TSDB_DATA_TYPE_BINARY) {
|
||||
assert(charLen <= fields[i].bytes && charLen >= 0);
|
||||
if(ASSERT(charLen <= fields[i].bytes && charLen >= 0)){
|
||||
tscError("taos_print_row error binary. charLen:%d, fields[i].bytes:%d", charLen, fields[i].bytes);
|
||||
}
|
||||
} else {
|
||||
assert(charLen <= fields[i].bytes * TSDB_NCHAR_SIZE && charLen >= 0);
|
||||
if(ASSERT(charLen <= fields[i].bytes * TSDB_NCHAR_SIZE && charLen >= 0)){
|
||||
tscError("taos_print_row error. charLen:%d, fields[i].bytes:%d", charLen, fields[i].bytes);
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(str + len, row[i], charLen);
|
||||
|
@ -430,6 +435,22 @@ const char *taos_data_type(int type) {
|
|||
return "TSDB_DATA_TYPE_NCHAR";
|
||||
case TSDB_DATA_TYPE_JSON:
|
||||
return "TSDB_DATA_TYPE_JSON";
|
||||
case TSDB_DATA_TYPE_UTINYINT:
|
||||
return "TSDB_DATA_TYPE_UTINYINT";
|
||||
case TSDB_DATA_TYPE_USMALLINT:
|
||||
return "TSDB_DATA_TYPE_USMALLINT";
|
||||
case TSDB_DATA_TYPE_UINT:
|
||||
return "TSDB_DATA_TYPE_UINT";
|
||||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
return "TSDB_DATA_TYPE_UBIGINT";
|
||||
case TSDB_DATA_TYPE_VARBINARY:
|
||||
return "TSDB_DATA_TYPE_VARBINARY";
|
||||
case TSDB_DATA_TYPE_DECIMAL:
|
||||
return "TSDB_DATA_TYPE_DECIMAL";
|
||||
case TSDB_DATA_TYPE_BLOB:
|
||||
return "TSDB_DATA_TYPE_BLOB";
|
||||
case TSDB_DATA_TYPE_MEDIUMBLOB:
|
||||
return "TSDB_DATA_TYPE_MEDIUMBLOB";
|
||||
default:
|
||||
return "UNKNOWN";
|
||||
}
|
||||
|
@ -507,9 +528,8 @@ void taos_stop_query(TAOS_RES *res) {
|
|||
SRequestObj *pRequest = (SRequestObj *)res;
|
||||
pRequest->killed = true;
|
||||
|
||||
int32_t numOfFields = taos_num_fields(pRequest);
|
||||
// It is not a query, no need to stop.
|
||||
if (numOfFields == 0) {
|
||||
if (NULL == pRequest->pQuery || QUERY_EXEC_MODE_SCHEDULE != pRequest->pQuery->execMode) {
|
||||
tscDebug("request 0x%" PRIx64 " no need to be killed since not query", pRequest->requestId);
|
||||
return;
|
||||
}
|
||||
|
@ -577,7 +597,7 @@ int taos_fetch_block_s(TAOS_RES *res, int *numOfRows, TAOS_ROW *rows) {
|
|||
(*numOfRows) = pResultInfo->numOfRows;
|
||||
return 0;
|
||||
} else {
|
||||
ASSERT(0);
|
||||
tscError("taos_fetch_block_s invalid res type");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
@ -670,6 +690,32 @@ const char *taos_get_server_info(TAOS *taos) {
|
|||
return pTscObj->sDetailVer;
|
||||
}
|
||||
|
||||
int taos_get_current_db(TAOS *taos, char *database, int len, int *required) {
|
||||
STscObj *pTscObj = acquireTscObj(*(int64_t *)taos);
|
||||
if (pTscObj == NULL) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int code = TSDB_CODE_SUCCESS;
|
||||
taosThreadMutexLock(&pTscObj->mutex);
|
||||
if(database == NULL || len <= 0){
|
||||
if(required != NULL) *required = strlen(pTscObj->db) + 1;
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
code = -1;
|
||||
}else if(len < strlen(pTscObj->db) + 1){
|
||||
tstrncpy(database, pTscObj->db, len);
|
||||
if(required) *required = strlen(pTscObj->db) + 1;
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
code = -1;
|
||||
}else{
|
||||
strcpy(database, pTscObj->db);
|
||||
code = 0;
|
||||
}
|
||||
taosThreadMutexUnlock(&pTscObj->mutex);
|
||||
return code;
|
||||
}
|
||||
|
||||
static void destoryTablesReq(void *p) {
|
||||
STablesReq *pRes = (STablesReq *)p;
|
||||
taosArrayDestroy(pRes->pTables);
|
||||
|
@ -1000,8 +1046,14 @@ static void fetchCallback(void *pResult, void *param, int32_t code) {
|
|||
}
|
||||
|
||||
void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
|
||||
ASSERT(res != NULL && fp != NULL);
|
||||
ASSERT(TD_RES_QUERY(res));
|
||||
if(ASSERT(res != NULL && fp != NULL)){
|
||||
tscError("taos_fetch_rows_a invalid paras");
|
||||
return;
|
||||
}
|
||||
if(ASSERT(TD_RES_QUERY(res))){
|
||||
tscError("taos_fetch_rows_a res is NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
SRequestObj *pRequest = res;
|
||||
pRequest->body.fetchFp = fp;
|
||||
|
@ -1044,9 +1096,14 @@ void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
|
|||
}
|
||||
|
||||
void taos_fetch_raw_block_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
|
||||
ASSERT(res != NULL && fp != NULL);
|
||||
ASSERT(TD_RES_QUERY(res));
|
||||
|
||||
if(ASSERT(res != NULL && fp != NULL)){
|
||||
tscError("taos_fetch_rows_a invalid paras");
|
||||
return;
|
||||
}
|
||||
if(ASSERT(TD_RES_QUERY(res))){
|
||||
tscError("taos_fetch_rows_a res is NULL");
|
||||
return;
|
||||
}
|
||||
SRequestObj *pRequest = res;
|
||||
SReqResultInfo *pResultInfo = &pRequest->body.resInfo;
|
||||
|
||||
|
@ -1058,8 +1115,14 @@ void taos_fetch_raw_block_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
|
|||
}
|
||||
|
||||
const void *taos_get_raw_block(TAOS_RES *res) {
|
||||
ASSERT(res != NULL);
|
||||
ASSERT(TD_RES_QUERY(res));
|
||||
if(ASSERT(res != NULL)){
|
||||
tscError("taos_fetch_rows_a invalid paras");
|
||||
return NULL;
|
||||
}
|
||||
if(ASSERT(TD_RES_QUERY(res))){
|
||||
tscError("taos_fetch_rows_a res is NULL");
|
||||
return NULL;
|
||||
}
|
||||
SRequestObj *pRequest = res;
|
||||
|
||||
return pRequest->body.resInfo.pData;
|
||||
|
|
|
@ -119,6 +119,7 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
|
||||
// update the appInstInfo
|
||||
pTscObj->pAppInfo->clusterId = connectRsp.clusterId;
|
||||
lastClusterId = connectRsp.clusterId;
|
||||
|
||||
pTscObj->connType = connectRsp.connType;
|
||||
|
||||
|
@ -149,7 +150,6 @@ SMsgSendInfo* buildMsgInfoImpl(SRequestObj* pRequest) {
|
|||
pMsgSendInfo->msgType = pRequest->type;
|
||||
pMsgSendInfo->target.type = TARGET_TYPE_MNODE;
|
||||
|
||||
assert(pRequest != NULL);
|
||||
pMsgSendInfo->msgInfo = pRequest->body.requestMsg;
|
||||
pMsgSendInfo->fp = getMsgRspHandle(pRequest->type);
|
||||
return pMsgSendInfo;
|
||||
|
@ -273,7 +273,9 @@ int32_t processUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
}
|
||||
|
||||
int32_t processCreateSTableRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
||||
assert(pMsg != NULL && param != NULL);
|
||||
if(pMsg == NULL || param == NULL){
|
||||
return TSDB_CODE_TSC_INVALID_INPUT;
|
||||
}
|
||||
SRequestObj* pRequest = param;
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -454,7 +456,10 @@ static int32_t buildShowVariablesRsp(SArray* pVars, SRetrieveTableRsp** pRsp) {
|
|||
(*pRsp)->numOfCols = htonl(SHOW_VARIABLES_RESULT_COLS);
|
||||
|
||||
int32_t len = blockEncode(pBlock, (*pRsp)->data, SHOW_VARIABLES_RESULT_COLS);
|
||||
ASSERT(len == rspSize - sizeof(SRetrieveTableRsp));
|
||||
if(len != rspSize - sizeof(SRetrieveTableRsp)){
|
||||
uError("buildShowVariablesRsp error, len:%d != rspSize - sizeof(SRetrieveTableRsp):%" PRIu64, len, (uint64_t) (rspSize - sizeof(SRetrieveTableRsp)));
|
||||
return TSDB_CODE_TSC_INVALID_INPUT;
|
||||
}
|
||||
|
||||
blockDataDestroy(pBlock);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue