merge 3.0
This commit is contained in:
commit
0b1bb7a1fb
|
@ -1,6 +1,7 @@
|
|||
cmake_minimum_required(VERSION 3.0)
|
||||
|
||||
set(CMAKE_VERBOSE_MAKEFILE OFF)
|
||||
set(TD_BUILD_TAOSA_INTERNAL FALSE)
|
||||
|
||||
#set output directory
|
||||
SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/lib)
|
||||
|
@ -117,12 +118,18 @@ ELSE ()
|
|||
IF (${BUILD_SANITIZER})
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
MESSAGE(STATUS "Will compile with Address Sanitizer!")
|
||||
MESSAGE(STATUS "Compile with Address Sanitizer!")
|
||||
ELSE ()
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
ENDIF ()
|
||||
|
||||
# disable all assert
|
||||
IF ((${DISABLE_ASSERT} MATCHES "true") OR (${DISABLE_ASSERTS} MATCHES "true"))
|
||||
ADD_DEFINITIONS(-DDISABLE_ASSERT)
|
||||
MESSAGE(STATUS "Disable all asserts")
|
||||
ENDIF()
|
||||
|
||||
INCLUDE(CheckCCompilerFlag)
|
||||
IF (TD_ARM_64 OR TD_ARM_32)
|
||||
SET(COMPILER_SUPPORT_SSE42 false)
|
||||
|
@ -155,7 +162,7 @@ ELSE ()
|
|||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx2")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx2")
|
||||
ENDIF()
|
||||
MESSAGE(STATUS "SIMD instructions (AVX/AVX2) is ACTIVATED")
|
||||
MESSAGE(STATUS "SIMD instructions (FMA/AVX/AVX2) is ACTIVATED")
|
||||
ENDIF()
|
||||
|
||||
ENDIF ()
|
||||
|
|
|
@ -21,7 +21,7 @@ IF (TD_LINUX)
|
|||
ELSEIF (TD_WINDOWS)
|
||||
SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.bat")
|
||||
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
|
||||
INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} :needAdmin ${TD_SOURCE_DIR} ${PROJECT_BINARY_DIR} Windows ${TD_VER_NUMBER})")
|
||||
INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} :needAdmin ${TD_SOURCE_DIR} ${PROJECT_BINARY_DIR} Windows ${TD_VER_NUMBER} ${TD_BUILD_TAOSA_INTERNAL})")
|
||||
ELSEIF (TD_DARWIN)
|
||||
SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.sh")
|
||||
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.0.2.1")
|
||||
SET(TD_VER_NUMBER "3.0.2.2")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosadapter
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG 5662a6d
|
||||
GIT_TAG 213f8b3
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG 11b60a4
|
||||
GIT_TAG 723f696
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -117,19 +117,22 @@ class TaosConsumer():
|
|||
<TabItem label="Go" value="Go">
|
||||
|
||||
```go
|
||||
func NewConsumer(conf *Config) (*Consumer, error)
|
||||
func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)
|
||||
|
||||
func (c *Consumer) Close() error
|
||||
// rebalanceCb is reserved for compatibility purpose
|
||||
func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error
|
||||
|
||||
func (c *Consumer) Commit(ctx context.Context, message unsafe.Pointer) error
|
||||
// rebalanceCb is reserved for compatibility purpose
|
||||
func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error
|
||||
|
||||
func (c *Consumer) FreeMessage(message unsafe.Pointer)
|
||||
func (c *Consumer) Poll(timeoutMs int) tmq.Event
|
||||
|
||||
func (c *Consumer) Poll(timeout time.Duration) (*Result, error)
|
||||
|
||||
func (c *Consumer) Subscribe(topics []string) error
|
||||
// tmq.TopicPartition is reserved for compatibility purpose
|
||||
func (c *Consumer) Commit() ([]tmq.TopicPartition, error)
|
||||
|
||||
func (c *Consumer) Unsubscribe() error
|
||||
|
||||
func (c *Consumer) Close() error
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -357,50 +360,20 @@ public class MetersDeserializer extends ReferenceDeserializer<Meters> {
|
|||
<TabItem label="Go" value="Go">
|
||||
|
||||
```go
|
||||
config := tmq.NewConfig()
|
||||
defer config.Destroy()
|
||||
err = config.SetGroupID("test")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetAutoOffsetReset("earliest")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectIP("127.0.0.1")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectUser("root")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectPass("taosdata")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectPort("6030")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetMsgWithTableName(true)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.EnableHeartBeat()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.EnableAutoCommit(func(result *wrapper.TMQCommitCallbackResult) {
|
||||
if result.ErrCode != 0 {
|
||||
errStr := wrapper.TMQErr2Str(result.ErrCode)
|
||||
err := errors.NewError(int(result.ErrCode), errStr)
|
||||
panic(err)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
conf := &tmq.ConfigMap{
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "earliest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"td.connect.port": "6030",
|
||||
"client.id": "test_tmq_c",
|
||||
"enable.auto.commit": "false",
|
||||
"enable.heartbeat.background": "true",
|
||||
"experimental.snapshot.enable": "true",
|
||||
"msg.with.table.name": "true",
|
||||
}
|
||||
consumer, err := NewConsumer(conf)
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -523,11 +496,7 @@ consumer.subscribe(topics);
|
|||
<TabItem value="Go" label="Go">
|
||||
|
||||
```go
|
||||
consumer, err := tmq.NewConsumer(config)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = consumer.Subscribe([]string{"example_tmq_topic"})
|
||||
err = consumer.Subscribe("example_tmq_topic", nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -611,13 +580,17 @@ while(running){
|
|||
|
||||
```go
|
||||
for {
|
||||
result, err := consumer.Poll(time.Second)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
ev := consumer.Poll(0)
|
||||
if ev != nil {
|
||||
switch e := ev.(type) {
|
||||
case *tmqcommon.DataMessage:
|
||||
fmt.Println(e.Value())
|
||||
case tmqcommon.Error:
|
||||
fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e)
|
||||
panic(e)
|
||||
}
|
||||
consumer.Commit()
|
||||
}
|
||||
fmt.Println(result)
|
||||
consumer.Commit(context.Background(), result.Message)
|
||||
consumer.FreeMessage(result.Message)
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -729,7 +702,11 @@ consumer.close();
|
|||
<TabItem value="Go" label="Go">
|
||||
|
||||
```go
|
||||
consumer.Close()
|
||||
/* Unsubscribe */
|
||||
_ = consumer.Unsubscribe()
|
||||
|
||||
/* Close consumer */
|
||||
_ = consumer.Close()
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
|
|
@ -30,8 +30,10 @@ database_option: {
|
|||
| WAL_LEVEL {1 | 2}
|
||||
| VGROUPS value
|
||||
| SINGLE_STABLE {0 | 1}
|
||||
| STT_TRIGGER value
|
||||
| TABLE_PREFIX value
|
||||
| TABLE_SUFFIX value
|
||||
| TSDB_PAGESIZE value
|
||||
| WAL_RETENTION_PERIOD value
|
||||
| WAL_ROLL_PERIOD value
|
||||
| WAL_RETENTION_SIZE value
|
||||
|
@ -69,8 +71,10 @@ database_option: {
|
|||
- SINGLE_STABLE: specifies whether the database can contain more than one supertable.
|
||||
- 0: The database can contain multiple supertables.
|
||||
- 1: The database can contain only one supertable.
|
||||
- STT_TRIGGER: specifies the number of file merges triggered by flushed files. The default is 8, ranging from 1 to 16. For high-frequency scenarios with few tables, it is recommended to use the default configuration or a smaller value for this parameter; For multi-table low-frequency scenarios, it is recommended to configure this parameter with a larger value.
|
||||
- TABLE_PREFIX:The prefix length in the table name that is ignored when distributing table to vnode based on table name.
|
||||
- TABLE_SUFFIX:The suffix length in the table name that is ignored when distributing table to vnode based on table name.
|
||||
- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB.
|
||||
- WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. Enter a time in seconds. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is 4 days.
|
||||
- WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. Enter a size in KB. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is -1.
|
||||
- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value of single copy is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk. The default values of multiple copy is 1 day.
|
||||
|
@ -112,6 +116,10 @@ alter_database_options:
|
|||
alter_database_option: {
|
||||
CACHEMODEL {'none' | 'last_row' | 'last_value' | 'both'}
|
||||
| CACHESIZE value
|
||||
| BUFFER value
|
||||
| PAGES value
|
||||
| REPLICA value
|
||||
| STT_TRIGGER value
|
||||
| WAL_LEVEL value
|
||||
| WAL_FSYNC_PERIOD value
|
||||
| KEEP value
|
||||
|
@ -154,3 +162,19 @@ TRIM DATABASE db_name;
|
|||
```
|
||||
|
||||
The preceding SQL statement deletes data that has expired and orders the remaining data in accordance with the storage configuration.
|
||||
|
||||
## Redistribute Vgroup
|
||||
|
||||
```sql
|
||||
REDISTRIBUTE VGROUP vgroup_no DNODE dnode_id1 [DNODE dnode_id2] [DNODE dnode_id3]
|
||||
```
|
||||
|
||||
Adjust the distribution of vnodes in the vgroup according to the given list of dnodes.
|
||||
|
||||
## Balance Vgroup
|
||||
|
||||
```sql
|
||||
BALANCE VGROUP
|
||||
```
|
||||
|
||||
Automatically adjusts the distribution of vnodes in all vgroups of the cluster, which is equivalent to load balancing the data of the cluster at the vnode level.
|
||||
|
|
|
@ -350,9 +350,9 @@ SELECT AVG(CASE WHEN voltage < 200 or voltage > 250 THEN 220 ELSE voltage END) F
|
|||
|
||||
## JOIN
|
||||
|
||||
TDengine supports natural joins between supertables, between standard tables, and between subqueries. The difference between natural joins and inner joins is that natural joins require that the fields being joined in the supertables or standard tables must have the same name. Data or tag columns must be joined with the equivalent column in another table.
|
||||
TDengine supports the `INTER JOIN` based on the timestamp primary key, that is, the `JOIN` condition must contain the timestamp primary key. As long as the requirement of timestamp-based primary key is met, `INTER JOIN` can be made between normal tables, sub-tables, super tables and sub-queries at will, and there is no limit on the number of tables.
|
||||
|
||||
For standard tables, only the timestamp (primary key) can be used in join operations. For example:
|
||||
For standard tables:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
|
@ -360,7 +360,7 @@ FROM temp_tb_1 t1, pressure_tb_1 t2
|
|||
WHERE t1.ts = t2.ts
|
||||
```
|
||||
|
||||
For supertables, tags as well as timestamps can be used in join operations. For example:
|
||||
For supertables:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
|
@ -368,21 +368,16 @@ FROM temp_stable t1, temp_stable t2
|
|||
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
|
||||
```
|
||||
|
||||
For sub-table and super table:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
FROM temp_ctable t1, temp_stable t2
|
||||
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
|
||||
```
|
||||
|
||||
Similarly, join operations can be performed on the result sets of multiple subqueries.
|
||||
|
||||
:::note
|
||||
|
||||
The following restriction apply to JOIN statements:
|
||||
|
||||
- The number of tables or supertables in a single join operation cannot exceed 10.
|
||||
- `FILL` cannot be used in a JOIN statement.
|
||||
- Arithmetic operations cannot be performed on the result sets of join operation.
|
||||
- `GROUP BY` is not allowed on a segment of the tables that participate in a join operation.
|
||||
- `OR` cannot be used in the conditions for join operation
|
||||
- Join operation can be performed only on tags or timestamps. You cannot perform a join operation on data columns.
|
||||
|
||||
:::
|
||||
|
||||
## Nested Query
|
||||
|
||||
Nested query is also called sub query. This means that in a single SQL statement the result of inner query can be used as the data source of the outer query.
|
||||
|
|
|
@ -877,7 +877,7 @@ INTERP(expr)
|
|||
- Interpolation is performed based on `FILL` parameter.
|
||||
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
|
||||
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.1.4).
|
||||
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.2.1).
|
||||
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.2.3).
|
||||
|
||||
### LAST
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ The following list shows all reserved keywords:
|
|||
- ADD
|
||||
- AFTER
|
||||
- AGGREGATE
|
||||
- ALIVE
|
||||
- ALL
|
||||
- ALTER
|
||||
- ANALYZE
|
||||
|
|
|
@ -178,76 +178,138 @@ SHOW TABLE DISTRIBUTED table_name;
|
|||
|
||||
Shows how table data is distributed.
|
||||
|
||||
Examples: show table distributed d0\G; Display the block distribution of table `d0` in detailed format.
|
||||
Examples: Below is an example of this command to display the block distribution of table `d0` in detailed format.
|
||||
|
||||
```sql
|
||||
show table distributed d0\G;
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary> Show Example </summary>
|
||||
<pre><code>
|
||||
*************************** 1.row ***************************
|
||||
_block_dist: Total_Blocks=[5] Total_Size=[93.65 Kb] Average_size=[18.73 Kb] Compression_Ratio=[23.98 %]
|
||||
|
||||
Total_Blocks : Table `d0` contains total 5 blocks
|
||||
|
||||
Total_Size: The total size of all the data blocks in table `d0` is 93.65 KB
|
||||
|
||||
Average_size: The average size of each block is 18.73 KB
|
||||
|
||||
Compression_Ratio: The data compression rate is 23.98%
|
||||
|
||||
*************************** 2.row ***************************
|
||||
_block_dist: Total_Rows=[20000] Inmem_Rows=[0] MinRows=[3616] MaxRows=[4096] Average_Rows=[4000]
|
||||
|
||||
Total_Rows: Table `d0` contains 20,000 rows
|
||||
|
||||
Inmem_Rows: The rows still in memory, i.e. not committed in disk, is 0, i.e. none such rows
|
||||
|
||||
MinRows: The minimum number of rows in a block is 3,616
|
||||
|
||||
MaxRows: The maximum number of rows in a block is 4,096B
|
||||
|
||||
Average_Rows: The average number of rows in a block is 4,000
|
||||
|
||||
*************************** 3.row ***************************
|
||||
_block_dist: Total_Tables=[1] Total_Files=[2]
|
||||
|
||||
Total_Tables: The number of child tables, 1 in this example
|
||||
|
||||
Total_Files: The number of files storing the table's data, 2 in this example
|
||||
|
||||
*************************** 4.row ***************************
|
||||
|
||||
_block_dist: --------------------------------------------------------------------------------
|
||||
|
||||
*************************** 5.row ***************************
|
||||
|
||||
_block_dist: 0100 |
|
||||
|
||||
*************************** 6.row ***************************
|
||||
|
||||
_block_dist: 0299 |
|
||||
|
||||
*************************** 7.row ***************************
|
||||
|
||||
_block_dist: 0498 |
|
||||
|
||||
*************************** 8.row ***************************
|
||||
|
||||
_block_dist: 0697 |
|
||||
|
||||
*************************** 9.row ***************************
|
||||
|
||||
_block_dist: 0896 |
|
||||
|
||||
*************************** 10.row ***************************
|
||||
|
||||
_block_dist: 1095 |
|
||||
|
||||
*************************** 11.row ***************************
|
||||
|
||||
_block_dist: 1294 |
|
||||
|
||||
*************************** 12.row ***************************
|
||||
|
||||
_block_dist: 1493 |
|
||||
|
||||
*************************** 13.row ***************************
|
||||
|
||||
_block_dist: 1692 |
|
||||
|
||||
*************************** 14.row ***************************
|
||||
|
||||
_block_dist: 1891 |
|
||||
|
||||
*************************** 15.row ***************************
|
||||
|
||||
_block_dist: 2090 |
|
||||
|
||||
*************************** 16.row ***************************
|
||||
|
||||
_block_dist: 2289 |
|
||||
|
||||
*************************** 17.row ***************************
|
||||
|
||||
_block_dist: 2488 |
|
||||
|
||||
*************************** 18.row ***************************
|
||||
|
||||
_block_dist: 2687 |
|
||||
|
||||
*************************** 19.row ***************************
|
||||
|
||||
_block_dist: 2886 |
|
||||
|
||||
*************************** 20.row ***************************
|
||||
|
||||
_block_dist: 3085 |
|
||||
|
||||
*************************** 21.row ***************************
|
||||
|
||||
_block_dist: 3284 |
|
||||
|
||||
*************************** 22.row ***************************
|
||||
|
||||
_block_dist: 3483 ||||||||||||||||| 1 (20.00%)
|
||||
|
||||
*************************** 23.row ***************************
|
||||
|
||||
_block_dist: 3682 |
|
||||
|
||||
*************************** 24.row ***************************
|
||||
|
||||
_block_dist: 3881 ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| 4 (80.00%)
|
||||
|
||||
Query OK, 24 row(s) in set (0.002444s)
|
||||
|
||||
The above show the block distribution percentage according to the number of rows in each block. In the above example, `_block_dist: 3483 ||||||||||||||||| 1 (20.00%)` means there is one block whose rows is between 3,483 and 3,681. `_block_dist: 3881 ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| 4 (80.00%)` means there are 4 blocks whose rows is between 3,881 and 4,096. The number of blocks whose rows fall in other range is zero.
|
||||
</code></pre>
|
||||
</details>
|
||||
|
||||
The above show the block distribution percentage according to the number of rows in each block. In the above example, we can get below information:
|
||||
- `_block_dist: 3483 ||||||||||||||||| 1 (20.00%)` means there is one block whose rows is between 3,483 and 3,681.
|
||||
- `_block_dist: 3881 ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| 4 (80.00%)` means there are 4 blocks whose rows is between 3,881 and 4,096. - The number of blocks whose rows fall in other range is zero.
|
||||
|
||||
## SHOW TAGS
|
||||
|
||||
|
|
|
@ -54,7 +54,6 @@ The following data types can be used in the schema for standard tables.
|
|||
| 27 | GRANT | Added | Grants permissions to a user.
|
||||
| 28 | KILL TRANSACTION | Added | Terminates an mnode transaction.
|
||||
| 29 | KILL STREAM | Deprecated | Terminated a continuous query. The continuous query feature has been replaced with the stream processing feature.
|
||||
| 30 | MERGE VGROUP | Added | Merges vgroups.
|
||||
| 31 | REVOKE | Added | Revokes permissions from a user.
|
||||
| 32 | SELECT | Modified | <ul><li>SELECT does not use the implicit results column. Output columns must be specified in the SELECT clause. </li><li>DISTINCT support is enhanced. In previous versions, DISTINCT only worked on the tag column and could not be used with JOIN or GROUP BY. </li><li>JOIN support is enhanced. The following are now supported after JOIN: a WHERE clause with OR, operations on multiple tables, and GROUP BY on multiple tables. </li><li>Subqueries after FROM are enhanced. Levels of nesting are no longer restricted. Subqueries can be used with UNION ALL. Other syntax restrictions are eliminated. </li><li>All scalar functions can be used after WHERE. </li><li>GROUP BY is enhanced. You can group by any scalar expression or combination thereof. </li><li>SESSION can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline. </li><li>STATE_WINDOW can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline. </li><li>ORDER BY is enhanced. It is no longer required to use ORDER BY and GROUP BY together. There is no longer a restriction on the number of order expressions. NULLS FIRST and NULLS LAST syntax has been added. Any expression that conforms to the ORDER BY semantics can be used. </li><li>Added PARTITION BY syntax. PARTITION BY replaces GROUP BY tags. </li></ul>
|
||||
| 33 | SHOW ACCOUNTS | Deprecated | This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported."
|
||||
|
@ -76,8 +75,9 @@ The following data types can be used in the schema for standard tables.
|
|||
| 49 | SHOW TRANSACTIONS | Added | Shows all running transactions in the system.
|
||||
| 50 | SHOW DNODE VARIABLES | Added | Shows the configuration of the specified dnode.
|
||||
| 51 | SHOW VNODES | Not supported | Shows information about vnodes in the system. Not supported.
|
||||
| 52 | SPLIT VGROUP | Added | Splits a vgroup into two vgroups.
|
||||
| 53 | TRIM DATABASE | Added | Deletes data that has expired and orders the remaining data in accordance with the storage configuration.
|
||||
| 52 | TRIM DATABASE | Added | Deletes data that has expired and orders the remaining data in accordance with the storage configuration.
|
||||
| 53 | REDISTRIBUTE VGROUP | Added | Adjust the distribution of VNODES in VGROUP.
|
||||
| 54 | BALANCE VGROUP | Added | Auto adjust the distribution of VNODES in VGROUP.
|
||||
|
||||
## SQL Functions
|
||||
|
||||
|
|
|
@ -355,26 +355,29 @@ The `af` package encapsulates TDengine advanced functions such as connection man
|
|||
|
||||
#### Subscribe
|
||||
|
||||
* `func NewConsumer(conf *Config) (*Consumer, error)`
|
||||
* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)`
|
||||
|
||||
Creates consumer group.
|
||||
|
||||
* `func (c *Consumer) Subscribe(topics []string) error`
|
||||
* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error`
|
||||
Note: `rebalanceCb` is reserved for compatibility purpose
|
||||
|
||||
Subscribes a topic.
|
||||
|
||||
* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error`
|
||||
Note: `rebalanceCb` is reserved for compatibility purpose
|
||||
|
||||
Subscribes to topics.
|
||||
|
||||
* `func (c *Consumer) Poll(timeout time.Duration) (*Result, error)`
|
||||
* `func (c *Consumer) Poll(timeoutMs int) tmq.Event`
|
||||
|
||||
Polling information.
|
||||
|
||||
* `func (c *Consumer) Commit(ctx context.Context, message unsafe.Pointer) error`
|
||||
* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)`
|
||||
Note: `tmq.TopicPartition` is reserved for compatibility purpose
|
||||
|
||||
Commit information.
|
||||
|
||||
* `func (c *Consumer) FreeMessage(message unsafe.Pointer)`
|
||||
|
||||
Free information.
|
||||
|
||||
* `func (c *Consumer) Unsubscribe() error`
|
||||
|
||||
Unsubscribe.
|
||||
|
@ -441,25 +444,36 @@ Close consumer.
|
|||
|
||||
### Subscribe via WebSocket
|
||||
|
||||
* `func NewConsumer(config *Config) (*Consumer, error)`
|
||||
* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)`
|
||||
|
||||
Creates consumer group.
|
||||
Creates consumer group.
|
||||
|
||||
* `func (c *Consumer) Subscribe(topic []string) error`
|
||||
* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error`
|
||||
Note: `rebalanceCb` is reserved for compatibility purpose
|
||||
|
||||
Subscribes to topics.
|
||||
Subscribes a topic.
|
||||
|
||||
* `func (c *Consumer) Poll(timeout time.Duration) (*Result, error)`
|
||||
* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error`
|
||||
Note: `rebalanceCb` is reserved for compatibility purpose
|
||||
|
||||
Polling information.
|
||||
Subscribes to topics.
|
||||
|
||||
* `func (c *Consumer) Commit(messageID uint64) error`
|
||||
* `func (c *Consumer) Poll(timeoutMs int) tmq.Event`
|
||||
|
||||
Commit information.
|
||||
Polling information.
|
||||
|
||||
* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)`
|
||||
Note: `tmq.TopicPartition` is reserved for compatibility purpose
|
||||
|
||||
Commit information.
|
||||
|
||||
* `func (c *Consumer) Unsubscribe() error`
|
||||
|
||||
Unsubscribe.
|
||||
|
||||
* `func (c *Consumer) Close() error`
|
||||
|
||||
Close consumer.
|
||||
Close consumer.
|
||||
|
||||
For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/3.0/examples/tmqoverws/main.go)
|
||||
|
||||
|
|
|
@ -252,14 +252,14 @@ ws://localhost:6041/test
|
|||
|
||||
|Sample program |Sample program description |
|
||||
|--------------------------------------------------------------------------------------------------------------------|--------------------------------------------|
|
||||
| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/Query/Query.cs) | Table creation, data insertion, and query examples with TDengine.Connector |
|
||||
| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/JSONTag) | Writing and querying JSON tag data with TDengine Connector |
|
||||
| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/Stmt) | Parameter binding with TDengine Connector |
|
||||
| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/schemaless) | Schemaless writes with TDengine Connector |
|
||||
| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/AsyncQuery/QueryAsync.cs) | Asynchronous queries with TDengine Connector |
|
||||
| [Subscription](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | Subscription example with TDengine Connector |
|
||||
| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSample.cs) | WebSocket basic data in and out with TDengine connector |
|
||||
| [WebSocket Parameter Binding](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSTMT.cs) | WebSocket parameter binding example |
|
||||
| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/Query/Query.cs) | Table creation, data insertion, and query examples with TDengine.Connector |
|
||||
| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/JSONTag) | Writing and querying JSON tag data with TDengine Connector |
|
||||
| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/NET6Examples/Stmt) | Parameter binding with TDengine Connector |
|
||||
| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/schemaless) | Schemaless writes with TDengine Connector |
|
||||
| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/AsyncQuery/QueryAsync.cs) | Asynchronous queries with TDengine Connector |
|
||||
| [Subscription](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/TMQ/TMQ.cs) | Subscription example with TDengine Connector |
|
||||
| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/FrameWork45/WS/WebSocketSample.cs) | WebSocket basic data in and out with TDengine connector |
|
||||
| [WebSocket Parameter Binding](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/FrameWork45/WS/WebSocketSTMT.cs) | WebSocket parameter binding example |
|
||||
|
||||
## Important update records
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ taosAdapter provides the following features.
|
|||
- Seamless connection to collectd
|
||||
- Seamless connection to StatsD
|
||||
- Supports Prometheus remote_read and remote_write
|
||||
- Get table's VGroup ID
|
||||
|
||||
## taosAdapter architecture diagram
|
||||
|
||||
|
@ -178,6 +179,7 @@ See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/bl
|
|||
node_export is an exporter for machine metrics. Please visit [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) for more information.
|
||||
- Support for Prometheus remote_read and remote_write
|
||||
remote_read and remote_write are interfaces for Prometheus data read and write from/to other data storage solution. Please visit [https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) for more information.
|
||||
- Get table's VGroup ID. For more information about VGroup, please refer to [primary-logic-unit](/tdinternal/arch/#primary-logic-unit).
|
||||
|
||||
## Interfaces
|
||||
|
||||
|
@ -199,7 +201,7 @@ Support InfluxDB query parameters as follows.
|
|||
- `precision` The time precision used by TDengine
|
||||
- `u` TDengine user name
|
||||
- `p` TDengine password
|
||||
- `ttl` The time to live of automatically created sub-table. This value cannot be updated. TDengine will use the ttl value of the frist data of sub-table to create sub-table. For more information, please refer [Create Table](/taos-sql/table/#create-table)
|
||||
- `ttl` The time to live of automatically created sub-table. This value cannot be updated. TDengine will use the ttl value of the first data of sub-table to create sub-table. For more information, please refer [Create Table](/taos-sql/table/#create-table)
|
||||
|
||||
Note: InfluxDB token authorization is not supported at present. Only Basic authorization and query parameter validation are supported.
|
||||
Example: curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000"
|
||||
|
@ -241,6 +243,10 @@ node_export is an exporter of hardware and OS metrics exposed by the \*NIX kerne
|
|||
|
||||
<Prometheus />
|
||||
|
||||
### Get table's VGroup ID
|
||||
|
||||
You can call `http://<fqdn>:6041/rest/vgid?db=<db>&table=<table>` to get table's VGroup ID. For more information about VGroup, please refer to [primary-logic-unit](/tdinternal/arch/#primary-logic-unit).
|
||||
|
||||
## Memory usage optimization methods
|
||||
|
||||
taosAdapter will monitor its memory usage during operation and adjust it with two thresholds. Valid values are integers between 1 to 100, and represent a percentage of the system's physical memory.
|
||||
|
|
|
@ -92,7 +92,7 @@ taosBenchmark -f <json file>
|
|||
|
||||
</details>
|
||||
|
||||
## Command-line argument in detailed
|
||||
## Command-line argument in detail
|
||||
|
||||
- **-f/--file <json file\>** :
|
||||
specify the configuration file to use. This file includes All parameters. Users should not use this parameter with other parameters on the command-line. There is no default value.
|
||||
|
@ -198,7 +198,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
|
|||
- **-R/--disorder-range <timeRange\>** :
|
||||
Specify the timestamp range for the disordered data. It leads the resulting disorder timestamp as the ordered timestamp minus a random value in this range. Valid only if the percentage of disordered data specified by `-O/--disorder` is greater than 0.
|
||||
|
||||
- **-F/--prepare_rand <Num\>** :
|
||||
- **-F/--prepared_rand <Num\>** :
|
||||
Specify the number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000.
|
||||
|
||||
- **-a/--replica <replicaNum\>** :
|
||||
|
@ -216,7 +216,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
|
|||
- **-? /--help** :
|
||||
Show help information and exit. Users should not use it with other parameters.
|
||||
|
||||
## Configuration file parameters in detailed
|
||||
## Configuration file parameters in detail
|
||||
|
||||
### General configuration parameters
|
||||
|
||||
|
@ -380,7 +380,7 @@ The configuration parameters for specifying super table tag columns and data col
|
|||
- **num_of_records_per_req** :
|
||||
Writing the number of rows of records per request to TDengine, the default value is 30000. When it is set too large, the TDengine client driver will return the corresponding error message, so you need to lower the setting of this parameter to meet the writing requirements.
|
||||
|
||||
- **prepare_rand**: The number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000.
|
||||
- **prepared_rand**: The number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000.
|
||||
|
||||
### Query scenario configuration parameters
|
||||
|
||||
|
|
|
@ -142,6 +142,15 @@ The parameters described in this document by the effect that they have on the sy
|
|||
| Meaning | Switch for allowing TDengine to collect and report service usage information |
|
||||
| Value Range | 0: Not allowed; 1: Allowed |
|
||||
| Default Value | 1 |
|
||||
### crashReporting
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning |Switch for allowing TDengine to collect and report crash related information |
|
||||
| Value Range | 0,1 0: Not allowed;1:allowed |
|
||||
| Default Value | 1 |
|
||||
|
||||
|
||||
## Query Parameters
|
||||
|
||||
|
|
|
@ -10,6 +10,14 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.0.2.3
|
||||
|
||||
<Release type="tdengine" version="3.0.2.3" />
|
||||
|
||||
## 3.0.2.2
|
||||
|
||||
<Release type="tdengine" version="3.0.2.2" />
|
||||
|
||||
## 3.0.2.1
|
||||
|
||||
<Release type="tdengine" version="3.0.2.1" />
|
||||
|
|
|
@ -10,6 +10,14 @@ For other historical version installers, please visit [here](https://www.taosdat
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 2.4.1
|
||||
|
||||
<Release type="tools" version="2.4.1" />
|
||||
|
||||
## 2.4.0
|
||||
|
||||
<Release type="tools" version="2.4.0" />
|
||||
|
||||
## 2.3.3
|
||||
|
||||
<Release type="tools" version="2.3.3" />
|
||||
|
|
|
@ -42,7 +42,7 @@ namespace TDengineExample
|
|||
|
||||
// 5. execute
|
||||
res = TDengine.StmtExecute(stmt);
|
||||
CheckStmtRes(res, "faild to execute");
|
||||
CheckStmtRes(res, "failed to execute");
|
||||
|
||||
// 6. free
|
||||
TaosMultiBind.FreeTaosBind(tags);
|
||||
|
@ -92,7 +92,7 @@ namespace TDengineExample
|
|||
int code = TDengine.StmtClose(stmt);
|
||||
if (code != 0)
|
||||
{
|
||||
throw new Exception($"falied to close stmt, {code} reason: {TDengine.StmtErrorStr(stmt)} ");
|
||||
throw new Exception($"failed to close stmt, {code} reason: {TDengine.StmtErrorStr(stmt)} ");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,5 +2,5 @@ module goexample
|
|||
|
||||
go 1.17
|
||||
|
||||
require github.com/taosdata/driver-go/v3 3.0
|
||||
require github.com/taosdata/driver-go/v3 3.1.0
|
||||
|
||||
|
|
|
@ -1,17 +1,12 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
"os"
|
||||
|
||||
"github.com/taosdata/driver-go/v3/af"
|
||||
"github.com/taosdata/driver-go/v3/af/tmq"
|
||||
"github.com/taosdata/driver-go/v3/common"
|
||||
"github.com/taosdata/driver-go/v3/errors"
|
||||
"github.com/taosdata/driver-go/v3/wrapper"
|
||||
tmqcommon "github.com/taosdata/driver-go/v3/common/tmq"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@ -28,55 +23,26 @@ func main() {
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
config := tmq.NewConfig()
|
||||
defer config.Destroy()
|
||||
err = config.SetGroupID("test")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetAutoOffsetReset("earliest")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectIP("127.0.0.1")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectUser("root")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectPass("taosdata")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectPort("6030")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetMsgWithTableName(true)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.EnableHeartBeat()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.EnableAutoCommit(func(result *wrapper.TMQCommitCallbackResult) {
|
||||
if result.ErrCode != 0 {
|
||||
errStr := wrapper.TMQErr2Str(result.ErrCode)
|
||||
err := errors.NewError(int(result.ErrCode), errStr)
|
||||
panic(err)
|
||||
}
|
||||
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "earliest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"td.connect.port": "6030",
|
||||
"client.id": "test_tmq_client",
|
||||
"enable.auto.commit": "false",
|
||||
"enable.heartbeat.background": "true",
|
||||
"experimental.snapshot.enable": "true",
|
||||
"msg.with.table.name": "true",
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
consumer, err := tmq.NewConsumer(config)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = consumer.Subscribe([]string{"example_tmq_topic"})
|
||||
err = consumer.Subscribe("example_tmq_topic", nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -88,19 +54,25 @@ func main() {
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for {
|
||||
result, err := consumer.Poll(time.Second)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
for i := 0; i < 5; i++ {
|
||||
ev := consumer.Poll(0)
|
||||
if ev != nil {
|
||||
switch e := ev.(type) {
|
||||
case *tmqcommon.DataMessage:
|
||||
fmt.Println(e.String())
|
||||
case tmqcommon.Error:
|
||||
fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e)
|
||||
panic(e)
|
||||
}
|
||||
consumer.Commit()
|
||||
}
|
||||
if result.Type != common.TMQ_RES_DATA {
|
||||
panic("want message type 1 got " + strconv.Itoa(int(result.Type)))
|
||||
}
|
||||
data, _ := json.Marshal(result.Data)
|
||||
fmt.Println(string(data))
|
||||
consumer.Commit(context.Background(), result.Message)
|
||||
consumer.FreeMessage(result.Message)
|
||||
break
|
||||
}
|
||||
consumer.Close()
|
||||
err = consumer.Unsubscribe()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = consumer.Close()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,10 +15,10 @@ cursor.execute("CREATE DATABASE power")
|
|||
cursor.execute("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)")
|
||||
|
||||
# insert data
|
||||
cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
|
||||
power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
|
||||
power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
|
||||
power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""")
|
||||
cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
|
||||
power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
|
||||
power.d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
|
||||
power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""")
|
||||
print("inserted row count:", cursor.rowcount)
|
||||
|
||||
# query data
|
||||
|
|
|
@ -25,10 +25,10 @@ def create_stable(conn: taos.TaosConnection):
|
|||
|
||||
|
||||
# The generated SQL is:
|
||||
# INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
|
||||
# d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
|
||||
# d1003 USING meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
|
||||
# d1004 USING meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)
|
||||
# INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
|
||||
# d1002 USING meters TAGS('California.SanFrancisco', 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
|
||||
# d1003 USING meters TAGS('California.LosAngeles', 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
|
||||
# d1004 USING meters TAGS('California.LosAngeles', 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)
|
||||
|
||||
def get_sql():
|
||||
global lines
|
||||
|
|
|
@ -35,13 +35,13 @@ TDengine 知识地图中涵盖了 TDengine 的各种知识点,揭示了各概
|
|||
|
||||
<table width="100%">
|
||||
<tr align="center">
|
||||
<td style={{padding:'1em 3em',border:0}}><img src={xiaot} alt="小 T 的二维码" width="200" /></td>
|
||||
<td style={{padding:'1em 3em',border:0}}><img src={xiaot-new} alt="小 T 的二维码" width="200" /></td>
|
||||
<td style={{padding:'1em 3em',border:0}}><img src={channel} alt="TDengine 微信视频号" width="200" /></td>
|
||||
<td style={{padding:'1em 3em',border:0}}><img src={official_account} alt="TDengine 微信公众号" width="200" /></td>
|
||||
</tr>
|
||||
<tr align="center">
|
||||
<td style={{padding:'1em 3em',border:0}}>加入“物联网大数据技术前沿群”<br/>与大家进行技术交流</td>
|
||||
<td style={{padding:'1em 3em',border:0}}>关注 TDengine 微信视频号<br/>收看技术直播与教学视频</td>
|
||||
<td style={{padding:'1em 3em',border:0}}>关注 TDengine 微信公众号<br/>阅读核心技术与行业案例文章</td>
|
||||
<td style={{padding:'1em 3em',border:0}}>加入“物联网大数据技术群”<br/>与大家进行技术交流</td>
|
||||
<td style={{padding:'1em 3em',border:0}}>关注 TDengine 视频号<br/>收看技术直播与教学视频</td>
|
||||
<td style={{padding:'1em 3em',border:0}}>关注 TDengine 公众号<br/>阅读技术文章与行业案例</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
|
Binary file not shown.
After Width: | Height: | Size: 112 KiB |
|
@ -115,19 +115,22 @@ class TaosConsumer():
|
|||
<TabItem label="Go" value="Go">
|
||||
|
||||
```go
|
||||
func NewConsumer(conf *Config) (*Consumer, error)
|
||||
func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)
|
||||
|
||||
func (c *Consumer) Close() error
|
||||
// 出于兼容目的保留 rebalanceCb 参数,当前未使用
|
||||
func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error
|
||||
|
||||
func (c *Consumer) Commit(ctx context.Context, message unsafe.Pointer) error
|
||||
// 出于兼容目的保留 rebalanceCb 参数,当前未使用
|
||||
func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error
|
||||
|
||||
func (c *Consumer) FreeMessage(message unsafe.Pointer)
|
||||
func (c *Consumer) Poll(timeoutMs int) tmq.Event
|
||||
|
||||
func (c *Consumer) Poll(timeout time.Duration) (*Result, error)
|
||||
|
||||
func (c *Consumer) Subscribe(topics []string) error
|
||||
// 出于兼容目的保留 tmq.TopicPartition 参数,当前未使用
|
||||
func (c *Consumer) Commit() ([]tmq.TopicPartition, error)
|
||||
|
||||
func (c *Consumer) Unsubscribe() error
|
||||
|
||||
func (c *Consumer) Close() error
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -355,50 +358,20 @@ public class MetersDeserializer extends ReferenceDeserializer<Meters> {
|
|||
<TabItem label="Go" value="Go">
|
||||
|
||||
```go
|
||||
config := tmq.NewConfig()
|
||||
defer config.Destroy()
|
||||
err = config.SetGroupID("test")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetAutoOffsetReset("earliest")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectIP("127.0.0.1")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectUser("root")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectPass("taosdata")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectPort("6030")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetMsgWithTableName(true)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.EnableHeartBeat()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.EnableAutoCommit(func(result *wrapper.TMQCommitCallbackResult) {
|
||||
if result.ErrCode != 0 {
|
||||
errStr := wrapper.TMQErr2Str(result.ErrCode)
|
||||
err := errors.NewError(int(result.ErrCode), errStr)
|
||||
panic(err)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
conf := &tmq.ConfigMap{
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "earliest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"td.connect.port": "6030",
|
||||
"client.id": "test_tmq_c",
|
||||
"enable.auto.commit": "false",
|
||||
"enable.heartbeat.background": "true",
|
||||
"experimental.snapshot.enable": "true",
|
||||
"msg.with.table.name": "true",
|
||||
}
|
||||
consumer, err := NewConsumer(conf)
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -532,11 +505,7 @@ consumer.subscribe(topics);
|
|||
<TabItem value="Go" label="Go">
|
||||
|
||||
```go
|
||||
consumer, err := tmq.NewConsumer(config)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = consumer.Subscribe([]string{"example_tmq_topic"})
|
||||
err = consumer.Subscribe("example_tmq_topic", nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -620,13 +589,17 @@ while(running){
|
|||
|
||||
```go
|
||||
for {
|
||||
result, err := consumer.Poll(time.Second)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
ev := consumer.Poll(0)
|
||||
if ev != nil {
|
||||
switch e := ev.(type) {
|
||||
case *tmqcommon.DataMessage:
|
||||
fmt.Println(e.Value())
|
||||
case tmqcommon.Error:
|
||||
fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e)
|
||||
panic(e)
|
||||
}
|
||||
consumer.Commit()
|
||||
}
|
||||
fmt.Println(result)
|
||||
consumer.Commit(context.Background(), result.Message)
|
||||
consumer.FreeMessage(result.Message)
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -738,7 +711,11 @@ consumer.close();
|
|||
<TabItem value="Go" label="Go">
|
||||
|
||||
```go
|
||||
consumer.Close()
|
||||
/* Unsubscribe */
|
||||
_ = consumer.Unsubscribe()
|
||||
|
||||
/* Close consumer */
|
||||
_ = consumer.Close()
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
|
|
@ -15,7 +15,7 @@ import GoOpenTSDBTelnet from "../07-develop/03-insert-data/_go_opts_telnet.mdx"
|
|||
import GoOpenTSDBJson from "../07-develop/03-insert-data/_go_opts_json.mdx"
|
||||
import GoQuery from "../07-develop/04-query-data/_go.mdx"
|
||||
|
||||
`driver-go` 是 TDengine 的官方 Go 语言连接器,实现了 Go 语言[ database/sql ](https://golang.org/pkg/database/sql/) 包的接口。Go 开发人员可以通过它开发存取 TDengine 集群数据的应用软件。
|
||||
`driver-go` 是 TDengine 的官方 Go 语言连接器,实现了 Go 语言 [database/sql](https://golang.org/pkg/database/sql/) 包的接口。Go 开发人员可以通过它开发存取 TDengine 集群数据的应用软件。
|
||||
|
||||
`driver-go` 提供两种建立连接的方式。一种是**原生连接**,它通过 TDengine 客户端驱动程序(taosc)原生连接 TDengine 运行实例,支持数据写入、查询、订阅、schemaless 接口和参数绑定接口等功能。另外一种是 **REST 连接**,它通过 taosAdapter 提供的 REST 接口连接 TDengine 运行实例。REST 连接实现的功能特性集合和原生连接有少量不同。
|
||||
|
||||
|
@ -112,6 +112,7 @@ REST 连接支持所有能运行 Go 的平台。
|
|||
```text
|
||||
username:password@protocol(address)/dbname?param=value
|
||||
```
|
||||
|
||||
### 使用连接器进行连接
|
||||
|
||||
<Tabs defaultValue="rest">
|
||||
|
@ -176,6 +177,7 @@ func main() {
|
|||
}
|
||||
}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="WebSocket" label="WebSocket 连接">
|
||||
|
||||
|
@ -207,6 +209,7 @@ func main() {
|
|||
}
|
||||
}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
|
@ -357,33 +360,32 @@ func main() {
|
|||
|
||||
#### 订阅
|
||||
|
||||
* `func NewConsumer(conf *Config) (*Consumer, error)`
|
||||
* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)`
|
||||
|
||||
创建消费者。
|
||||
创建消费者。
|
||||
|
||||
* `func (c *Consumer) Subscribe(topics []string) error`
|
||||
* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error`
|
||||
注意:出于兼容目的保留 `rebalanceCb` 参数,当前未使用
|
||||
|
||||
订阅主题。
|
||||
订阅单个主题。
|
||||
|
||||
* `func (c *Consumer) Poll(timeout time.Duration) (*Result, error)`
|
||||
* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error`
|
||||
注意:出于兼容目的保留 `rebalanceCb` 参数,当前未使用
|
||||
|
||||
轮询消息。
|
||||
订阅主题。
|
||||
|
||||
* `func (c *Consumer) Commit(ctx context.Context, message unsafe.Pointer) error`
|
||||
* `func (c *Consumer) Poll(timeoutMs int) tmq.Event`
|
||||
|
||||
提交消息。
|
||||
轮询消息。
|
||||
|
||||
* `func (c *Consumer) FreeMessage(message unsafe.Pointer)`
|
||||
* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)`
|
||||
注意:出于兼容目的保留 `tmq.TopicPartition` 参数,当前未使用
|
||||
|
||||
释放消息。
|
||||
|
||||
* `func (c *Consumer) Unsubscribe() error`
|
||||
|
||||
取消订阅。
|
||||
提交消息。
|
||||
|
||||
* `func (c *Consumer) Close() error`
|
||||
|
||||
关闭消费者。
|
||||
关闭连接。
|
||||
|
||||
#### schemaless
|
||||
|
||||
|
@ -443,25 +445,32 @@ func main() {
|
|||
|
||||
### 通过 WebSocket 订阅
|
||||
|
||||
* `func NewConsumer(config *Config) (*Consumer, error)`
|
||||
* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)`
|
||||
|
||||
创建消费者。
|
||||
创建消费者。
|
||||
|
||||
* `func (c *Consumer) Subscribe(topic []string) error`
|
||||
* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error`
|
||||
注意:出于兼容目的保留 `rebalanceCb` 参数,当前未使用
|
||||
|
||||
订阅主题。
|
||||
订阅单个主题。
|
||||
|
||||
* `func (c *Consumer) Poll(timeout time.Duration) (*Result, error)`
|
||||
* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error`
|
||||
注意:出于兼容目的保留 `rebalanceCb` 参数,当前未使用
|
||||
|
||||
轮询消息。
|
||||
订阅主题。
|
||||
|
||||
* `func (c *Consumer) Commit(messageID uint64) error`
|
||||
* `func (c *Consumer) Poll(timeoutMs int) tmq.Event`
|
||||
|
||||
提交消息。
|
||||
轮询消息。
|
||||
|
||||
* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)`
|
||||
注意:出于兼容目的保留 `tmq.TopicPartition` 参数,当前未使用
|
||||
|
||||
提交消息。
|
||||
|
||||
* `func (c *Consumer) Close() error`
|
||||
|
||||
关闭消费者。
|
||||
关闭连接。
|
||||
|
||||
完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/3.0/examples/tmqoverws/main.go)
|
||||
|
||||
|
|
|
@ -253,14 +253,14 @@ namespace TDengineExample
|
|||
|
||||
|示例程序 | 示例程序描述 |
|
||||
|--------------------------------------------------------------------------------------------------------------------|--------------------------------------------|
|
||||
| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/Query/Query.cs) | 使用 TDengine.Connector 实现的建表、插入、查询示例 |
|
||||
| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/JSONTag) | 使用 TDengine.Connector 实现的写入和查询 JSON tag 类型数据的示例 |
|
||||
| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/Stmt) | 使用 TDengine.Connector 实现的参数绑定插入和查询的示例 |
|
||||
| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/schemaless) | 使用 TDengine.Connector 实现的使用 schemaless 写入的示例 |
|
||||
| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/AsyncQuery/QueryAsync.cs) | 使用 TDengine.Connector 实现的异步查询的示例 |
|
||||
| [数据订阅(TMQ)](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | 使用 TDengine.Connector 实现的订阅数据的示例 |
|
||||
| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSample.cs) | 使用 TDengine.Connector 的 WebSocket 基本的示例 |
|
||||
| [Basic WebSocket STMT](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSTMT.cs) | 使用 TDengine.Connector 的 WebSocket STMT 基本的示例 |
|
||||
| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/Query/Query.cs) | 使用 TDengine.Connector 实现的建表、插入、查询示例 |
|
||||
| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/JSONTag) | 使用 TDengine.Connector 实现的写入和查询 JSON tag 类型数据的示例 |
|
||||
| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/NET6Examples/Stmt) | 使用 TDengine.Connector 实现的参数绑定插入和查询的示例 |
|
||||
| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/schemaless) | 使用 TDengine.Connector 实现的使用 schemaless 写入的示例 |
|
||||
| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/AsyncQuery/QueryAsync.cs) | 使用 TDengine.Connector 实现的异步查询的示例 |
|
||||
| [数据订阅(TMQ)](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/TMQ/TMQ.cs) | 使用 TDengine.Connector 实现的订阅数据的示例 |
|
||||
| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/FrameWork45/WS/WebSocketSample.cs) | 使用 TDengine.Connector 的 WebSocket 基本的示例 |
|
||||
| [Basic WebSocket STMT](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/FrameWork45/WS/WebSocketSTMT.cs) | 使用 TDengine.Connector 的 WebSocket STMT 基本的示例 |
|
||||
|
||||
## 重要更新记录
|
||||
|
||||
|
|
|
@ -30,8 +30,10 @@ database_option: {
|
|||
| WAL_LEVEL {1 | 2}
|
||||
| VGROUPS value
|
||||
| SINGLE_STABLE {0 | 1}
|
||||
| STT_TRIGGER value
|
||||
| TABLE_PREFIX value
|
||||
| TABLE_SUFFIX value
|
||||
| TSDB_PAGESIZE value
|
||||
| WAL_RETENTION_PERIOD value
|
||||
| WAL_ROLL_PERIOD value
|
||||
| WAL_RETENTION_SIZE value
|
||||
|
@ -69,8 +71,10 @@ database_option: {
|
|||
- SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表,用于超级表列非常多的情况。
|
||||
- 0:表示可以创建多张超级表。
|
||||
- 1:表示只可以创建一张超级表。
|
||||
- STT_TRIGGER:表示落盘文件触发文件合并的个数。默认为 1,范围 1 到 16。对于少表高频场景,此参数建议使用默认配置,或较小的值;而对于多表低频场景,此参数建议配置较大的值。
|
||||
- TABLE_PREFIX:内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度。
|
||||
- TABLE_SUFFIX:内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度。
|
||||
- TSDB_PAGESIZE:一个 VNODE 中时序数据存储引擎的页大小,单位为 KB,默认为 4 KB。范围为 1 到 16384,即 1 KB到 16 MB。
|
||||
- WAL_RETENTION_PERIOD:wal 文件的额外保留策略,用于数据订阅。wal 的保存时长,单位为 s。单副本默认为 0,即落盘后立即删除。-1 表示不删除。多副本默认为 4 天。
|
||||
- WAL_RETENTION_SIZE:wal 文件的额外保留策略,用于数据订阅。wal 的保存的最大上限,单位为 KB。单副本默认为 0,即落盘后立即删除。多副本默认为-1,表示不删除。
|
||||
- WAL_ROLL_PERIOD:wal 文件切换时长,单位为 s。当 wal 文件创建并写入后,经过该时间,会自动创建一个新的 wal 文件。单副本默认为 0,即仅在落盘时创建新文件。多副本默认为 1 天。
|
||||
|
@ -112,6 +116,10 @@ alter_database_options:
|
|||
alter_database_option: {
|
||||
CACHEMODEL {'none' | 'last_row' | 'last_value' | 'both'}
|
||||
| CACHESIZE value
|
||||
| BUFFER value
|
||||
| PAGES value
|
||||
| REPLICA value
|
||||
| STT_TRIGGER value
|
||||
| WAL_LEVEL value
|
||||
| WAL_FSYNC_PERIOD value
|
||||
| KEEP value
|
||||
|
@ -154,3 +162,19 @@ TRIM DATABASE db_name;
|
|||
```
|
||||
|
||||
删除过期数据,并根据多级存储的配置归整数据。
|
||||
|
||||
## 调整VGROUP中VNODE的分布
|
||||
|
||||
```sql
|
||||
REDISTRIBUTE VGROUP vgroup_no DNODE dnode_id1 [DNODE dnode_id2] [DNODE dnode_id3]
|
||||
```
|
||||
|
||||
按照给定的dnode列表,调整vgroup中的vnode分布。因为副本数目最大为3,所以最多输入3个dnode。
|
||||
|
||||
## 自动调整VGROUP中VNODE的分布
|
||||
|
||||
```sql
|
||||
BALANCE VGROUP
|
||||
```
|
||||
|
||||
自动调整集群所有vgroup中的vnode分布,相当于在vnode级别对集群进行数据的负载均衡操作。
|
|
@ -350,9 +350,9 @@ SELECT AVG(CASE WHEN voltage < 200 or voltage > 250 THEN 220 ELSE voltage END) F
|
|||
|
||||
## JOIN 子句
|
||||
|
||||
TDengine 支持“普通表与普通表之间”、“超级表与超级表之间”、“子查询与子查询之间” 进行自然连接。自然连接与内连接的主要区别是,自然连接要求参与连接的字段在不同的表/超级表中必须是同名字段。也即,TDengine 在连接关系的表达中,要求必须使用同名数据列/标签列的相等关系。
|
||||
TDengine 支持基于时间戳主键的内连接,即 JOIN 条件必须包含时间戳主键。只要满足基于时间戳主键这个要求,普通表、子表、超级表和子查询之间可以随意的进行内连接,且对表个数没有限制。
|
||||
|
||||
在普通表与普通表之间的 JOIN 操作中,只能使用主键时间戳之间的相等关系。例如:
|
||||
普通表与普通表之间的 JOIN 操作:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
|
@ -360,7 +360,7 @@ FROM temp_tb_1 t1, pressure_tb_1 t2
|
|||
WHERE t1.ts = t2.ts
|
||||
```
|
||||
|
||||
在超级表与超级表之间的 JOIN 操作中,除了主键时间戳一致的条件外,还要求引入能实现一一对应的标签列的相等关系。例如:
|
||||
超级表与超级表之间的 JOIN 操作:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
|
@ -368,21 +368,16 @@ FROM temp_stable t1, temp_stable t2
|
|||
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
|
||||
```
|
||||
|
||||
子表与超级表之间的 JOIN 操作:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
FROM temp_ctable t1, temp_stable t2
|
||||
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
|
||||
```
|
||||
|
||||
类似地,也可以对多个子查询的查询结果进行 JOIN 操作。
|
||||
|
||||
:::note
|
||||
|
||||
JOIN 语句存在如下限制要求:
|
||||
|
||||
- 参与一条语句中 JOIN 操作的表/超级表最多可以有 10 个。
|
||||
- 在包含 JOIN 操作的查询语句中不支持 FILL。
|
||||
- 暂不支持参与 JOIN 操作的表之间聚合后的四则运算。
|
||||
- 不支持只对其中一部分表做 GROUP BY。
|
||||
- JOIN 查询的不同表的过滤条件之间不能为 OR。
|
||||
- JOIN 查询要求连接条件不能是普通列,只能针对标签和主时间字段列(第一列)。
|
||||
|
||||
:::
|
||||
|
||||
## 嵌套查询
|
||||
|
||||
“嵌套查询”又称为“子查询”,也即在一条 SQL 语句中,“内层查询”的计算结果可以作为“外层查询”的计算对象来使用。
|
||||
|
|
|
@ -880,7 +880,7 @@ INTERP(expr)
|
|||
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。
|
||||
- INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 partition by tbname 一起使用。
|
||||
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.1.4版本以后支持)。
|
||||
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.2.1版本以后支持)。
|
||||
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.2.3版本以后支持)。
|
||||
|
||||
### LAST
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@ description: TDengine 保留关键字的详细列表
|
|||
- ADD
|
||||
- AFTER
|
||||
- AGGREGATE
|
||||
- ALIVE
|
||||
- ALL
|
||||
- ALTER
|
||||
- ANALYZE
|
||||
|
|
|
@ -183,6 +183,10 @@ SHOW TABLE DISTRIBUTED table_name;
|
|||
|
||||
语句: show table distributed d0\G; 竖行显示表 d0 的 BLOCK 分布情况
|
||||
|
||||
<details>
|
||||
<summary>显示示例</summary>
|
||||
<pre><code>
|
||||
|
||||
*************************** 1.row ***************************
|
||||
|
||||
_block_dist: Total_Blocks=[5] Total_Size=[93.65 Kb] Average_size=[18.73 Kb] Compression_Ratio=[23.98 %]
|
||||
|
@ -244,6 +248,8 @@ _block_dist: 3881 ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
|||
|
||||
Query OK, 24 row(s) in set (0.002444s)
|
||||
|
||||
</code></pre>
|
||||
</details>
|
||||
|
||||
上面是块中包含数据行数的块儿分布情况图,这里的 0100 0299 0498 … 表示的是每个块中包含的数据行数,上面的意思就是这个表的 5 个块,分布在 3483 ~3681 行的块有 1 个,占整个块的 20%,分布在 3881 ~ 4096(最大行数)的块数为 4 个,占整个块的 80%, 其它区域内分布块数为 0。
|
||||
|
||||
|
|
|
@ -54,7 +54,6 @@ description: "TDengine 3.0 版本的语法变更说明"
|
|||
| 27 | GRANT | 新增 | 授予用户权限。
|
||||
| 28 | KILL TRANSACTION | 新增 | 终止管理节点的事务。
|
||||
| 29 | KILL STREAM | 废除 | 终止连续查询。3.0版本不再支持连续查询,而是用更通用的流计算来代替。
|
||||
| 30 | MERGE VGROUP | 新增 | 合并VGROUP。
|
||||
| 31 | REVOKE | 新增 | 回收用户权限。
|
||||
| 32 | SELECT | 调整 | <ul><li>SELECT关闭隐式结果列,输出列均需要由SELECT子句来指定。</li><li>DISTINCT功能全面支持。2.x版本只支持对标签列去重,并且不可以和JOIN、GROUP BY等子句混用。</li><li>JOIN功能增强。增加支持:JOIN后WHERE条件中有OR条件;JOIN后的多表运算;JOIN后的多表GROUP BY。</li><li>FROM后子查询功能大幅增强。不限制子查询嵌套层数;支持子查询和UNION ALL混合使用;移除其他一些之前版本的语法限制。</li><li>WHERE后可以使用任意的标量表达式。</li><li>GROUP BY功能增强。支持任意标量表达式及其组合的分组。</li><li>SESSION可以用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。</li><li>STATE_WINDOW可以用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。</li><li>ORDER BY功能大幅增强。不再必须和GROUP BY子句一起使用;不再有排序表达式个数的限制;增加支持NULLS FIRST/LAST语法功能;支持符合语法语义的任意表达式。</li><li>新增PARTITION BY语法。替代原来的GROUP BY tags。</li></ul>
|
||||
| 33 | SHOW ACCOUNTS | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
|
||||
|
@ -76,8 +75,9 @@ description: "TDengine 3.0 版本的语法变更说明"
|
|||
| 49 | SHOW TRANSACTIONS | 新增 | 显示当前系统中正在执行的事务的信息。
|
||||
| 50 | SHOW DNODE VARIABLES | 新增 |显示指定DNODE的配置参数。
|
||||
| 51 | SHOW VNODES | 暂不支持 | 显示当前系统中VNODE的信息。3.0.0版本暂不支持。
|
||||
| 52 | SPLIT VGROUP | 新增 | 拆分VGROUP。
|
||||
| 53 | TRIM DATABASE | 新增 | 删除过期数据,并根据多级存储的配置归整数据。
|
||||
| 52 | TRIM DATABASE | 新增 | 删除过期数据,并根据多级存储的配置归整数据。
|
||||
| 53 | REDISTRIBUTE VGROUP | 新增 | 调整VGROUP中VNODE的分布。
|
||||
| 54 | BALANCE VGROUP | 新增 | 自动调整VGROUP中VNODE的分布。
|
||||
|
||||
## SQL 函数变更
|
||||
|
||||
|
@ -94,6 +94,7 @@ description: "TDengine 3.0 版本的语法变更说明"
|
|||
| 9 | SAMPLE | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
|
||||
| 10 | STATECOUNT | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
|
||||
| 11 | STATEDURATION | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
|
||||
| 12 | TIMETRUNCATE | 增强 | 增加ignore_timezone参数,可选是否使用,默认值为1.
|
||||
|
||||
|
||||
## SCHEMALESS 变更
|
||||
|
|
|
@ -21,6 +21,7 @@ taosAdapter 提供以下功能:
|
|||
- 无缝连接到 collectd
|
||||
- 无缝连接到 StatsD
|
||||
- 支持 Prometheus remote_read 和 remote_write
|
||||
- 获取 table 所在的虚拟节点组(VGroup)的 VGroup ID
|
||||
|
||||
## taosAdapter 架构图
|
||||
|
||||
|
@ -178,6 +179,7 @@ AllowWebSockets
|
|||
node_export 是一个机器指标的导出器。请访问 [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) 了解更多信息。
|
||||
- 支持 Prometheus remote_read 和 remote_write
|
||||
remote_read 和 remote_write 是 Prometheus 数据读写分离的集群方案。请访问[https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) 了解更多信息。
|
||||
- 获取 table 所在的虚拟节点组(VGroup)的 VGroup ID。关于虚拟节点组(VGroup)的更多信息,请访问[整体架构文档](/tdinternal/arch/#主要逻辑单元) 。
|
||||
|
||||
## 接口
|
||||
|
||||
|
@ -240,6 +242,10 @@ Prometheus 使用的由 \*NIX 内核暴露的硬件和操作系统指标的输
|
|||
|
||||
<Prometheus />
|
||||
|
||||
### 获取 table 的 VGroup ID
|
||||
|
||||
可以访问 http 接口 `http://<fqdn>:6041/rest/vgid?db=<db>&table=<table>` 获取 table 的 VGroup ID。关于虚拟节点组(VGroup)的更多信息,请访问[整体架构文档](/tdinternal/arch/#主要逻辑单元) 。
|
||||
|
||||
## 内存使用优化方法
|
||||
|
||||
taosAdapter 将监测自身运行过程中内存使用率并通过两个阈值进行调节。有效值范围为 -1 到 100 的整数,单位为系统物理内存的百分比。
|
||||
|
@ -282,7 +288,7 @@ http 返回内容:
|
|||
|
||||
## taosAdapter 监控指标
|
||||
|
||||
taosAdapter 采集 http 相关指标、cpu 百分比和内存百分比。
|
||||
taosAdapter 采集 http 相关指标、CPU 百分比和内存百分比。
|
||||
|
||||
### http 接口
|
||||
|
||||
|
@ -294,13 +300,13 @@ http://<fqdn>:6041/metrics
|
|||
|
||||
### 写入 TDengine
|
||||
|
||||
taosAdapter 支持将 http 监控、cpu 百分比和内存百分比写入 TDengine。
|
||||
taosAdapter 支持将 http 监控、CPU 百分比和内存百分比写入 TDengine。
|
||||
|
||||
有关配置参数
|
||||
|
||||
| **配置项** | **描述** | **默认值** |
|
||||
|-------------------------|--------------------------------------------|----------|
|
||||
| monitor.collectDuration | cpu 和内存采集间隔 | 3s |
|
||||
| monitor.collectDuration | CPU 和内存采集间隔 | 3s |
|
||||
| monitor.identity | 当前taosadapter 的标识符如果不设置将使用 'hostname:port' | |
|
||||
| monitor.incgroup | 是否是 cgroup 中运行(容器中运行设置为 true) | false |
|
||||
| monitor.writeToTD | 是否写入到 TDengine | false |
|
||||
|
|
|
@ -134,6 +134,24 @@ taos --dump-config
|
|||
| 取值范围 | 1-200000 |
|
||||
| 缺省值 | 30 |
|
||||
|
||||
### telemetryReporting
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 |是否上传 telemetry |
|
||||
| 取值范围 | 0,1 0: 不上传;1:上传 |
|
||||
| 缺省值 | 1 |
|
||||
|
||||
### crashReporting
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 |是否上传 crash 信息 |
|
||||
| 取值范围 | 0,1 0: 不上传;1:上传 |
|
||||
| 缺省值 | 1 |
|
||||
|
||||
## 查询相关
|
||||
|
||||
### queryPolicy
|
||||
|
@ -698,7 +716,7 @@ charset 的有效值是 UTF-8。
|
|||
| 2 | numOfThreadsPerCore | 是 | 否 | 有其它参数设置多种线程池的大小 |
|
||||
| 3 | numOfMnodes | 是 | 否 | 通过 create mnode 命令动态创建 mnode |
|
||||
| 4 | vnodeBak | 是 | 否 | 3.0 行为未知 |
|
||||
| 5 | balance | 是 | 否 | 负载均衡功能由 split/merge vgroups 实现 |
|
||||
| 5 | balance | 是 | 否 | 负载均衡功能由 split/merge vgroups 实现 (暂不支持) |
|
||||
| 6 | balanceInterval | 是 | 否 | 随着 balance 参数失效 |
|
||||
| 7 | offlineThreshold | 是 | 否 | 3.0 行为未知 |
|
||||
| 8 | role | 是 | 否 | 由 supportVnode 决定是否能够创建 |
|
||||
|
|
|
@ -243,3 +243,8 @@ sudo launchctl load -w /Library/LaunchDaemons/limit.maxfiles.plist
|
|||
```
|
||||
launchctl limit maxfiles
|
||||
```
|
||||
### 19 建库时提示Out of dnode
|
||||
该提示是创建db的vnode数量不够了,需要的vnode不能超过了dnode中vnode的上限。因为系统默认是一个dnode中有cpu核数两倍的vnode,也可以通过配置文件中的参数supportVnodes控制。
|
||||
正常调大taos.cfg种这个supportVnodes参数即可。
|
||||
|
||||
|
||||
|
|
|
@ -10,6 +10,14 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.0.2.3
|
||||
|
||||
<Release type="tdengine" version="3.0.2.3" />
|
||||
|
||||
## 3.0.2.2
|
||||
|
||||
<Release type="tdengine" version="3.0.2.2" />
|
||||
|
||||
## 3.0.2.1
|
||||
|
||||
<Release type="tdengine" version="3.0.2.1" />
|
||||
|
|
|
@ -10,6 +10,14 @@ taosTools 各版本安装包下载链接如下:
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 2.4.1
|
||||
|
||||
<Release type="tools" version="2.4.1" />
|
||||
|
||||
## 2.4.0
|
||||
|
||||
<Release type="tools" version="2.4.0" />
|
||||
|
||||
## 2.3.3
|
||||
|
||||
<Release type="tools" version="2.3.3" />
|
||||
|
|
|
@ -161,6 +161,8 @@ DLL_EXPORT int taos_stmt_set_tags(TAOS_STMT *stmt, TAOS_MULTI_BIND *tags)
|
|||
DLL_EXPORT int taos_stmt_set_sub_tbname(TAOS_STMT *stmt, const char *name);
|
||||
DLL_EXPORT int taos_stmt_get_tag_fields(TAOS_STMT *stmt, int *fieldNum, TAOS_FIELD_E **fields);
|
||||
DLL_EXPORT int taos_stmt_get_col_fields(TAOS_STMT *stmt, int *fieldNum, TAOS_FIELD_E **fields);
|
||||
// let stmt to reclaim TAOS_FIELD_E that was allocated by `taos_stmt_get_tag_fields`/`taos_stmt_get_col_fields`
|
||||
DLL_EXPORT void taos_stmt_reclaim_fields(TAOS_STMT *stmt, TAOS_FIELD_E *fields);
|
||||
|
||||
DLL_EXPORT int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert);
|
||||
DLL_EXPORT int taos_stmt_num_params(TAOS_STMT *stmt, int *nums);
|
||||
|
@ -206,6 +208,7 @@ DLL_EXPORT TAOS_ROW *taos_result_block(TAOS_RES *res);
|
|||
|
||||
DLL_EXPORT const char *taos_get_server_info(TAOS *taos);
|
||||
DLL_EXPORT const char *taos_get_client_info();
|
||||
DLL_EXPORT int taos_get_current_db(TAOS *taos, char *database, int len, int *required);
|
||||
|
||||
DLL_EXPORT const char *taos_errstr(TAOS_RES *res);
|
||||
DLL_EXPORT int taos_errno(TAOS_RES *res);
|
||||
|
@ -218,6 +221,7 @@ DLL_EXPORT const void *taos_get_raw_block(TAOS_RES *res);
|
|||
|
||||
DLL_EXPORT int taos_get_db_route_info(TAOS *taos, const char *db, TAOS_DB_ROUTE_INFO *dbInfo);
|
||||
DLL_EXPORT int taos_get_table_vgId(TAOS *taos, const char *db, const char *table, int *vgId);
|
||||
DLL_EXPORT int taos_get_tables_vgId(TAOS *taos, const char *db, const char *table[], int tableNum, int *vgId);
|
||||
|
||||
DLL_EXPORT int taos_load_table_info(TAOS *taos, const char *tableNameList);
|
||||
|
||||
|
|
|
@ -36,6 +36,7 @@ extern "C" {
|
|||
#define TSDB_INS_TABLE_STABLES "ins_stables"
|
||||
#define TSDB_INS_TABLE_TABLES "ins_tables"
|
||||
#define TSDB_INS_TABLE_TAGS "ins_tags"
|
||||
#define TSDB_INS_TABLE_COLS "ins_columns"
|
||||
#define TSDB_INS_TABLE_TABLE_DISTRIBUTED "ins_table_distributed"
|
||||
#define TSDB_INS_TABLE_USERS "ins_users"
|
||||
#define TSDB_INS_TABLE_LICENCES "ins_grants"
|
||||
|
|
|
@ -30,6 +30,11 @@ typedef int64_t tb_uid_t;
|
|||
#define IS_TSWINDOW_SPECIFIED(win) (((win).skey != INT64_MIN) || ((win).ekey != INT64_MAX))
|
||||
#define TSWINDOW_IS_EQUAL(t1, t2) (((t1).skey == (t2).skey) && ((t1).ekey == (t2).ekey))
|
||||
|
||||
//define show cluster alive and show db.alive
|
||||
#define SHOW_STATUS_NOT_AVAILABLE 0
|
||||
#define SHOW_STATUS_AVAILABLE 1
|
||||
#define SHOW_STATUS_HALF_AVAILABLE 2
|
||||
|
||||
typedef enum {
|
||||
TSDB_SUPER_TABLE = 1, // super table
|
||||
TSDB_CHILD_TABLE = 2, // table created from super table
|
||||
|
|
|
@ -162,6 +162,7 @@ typedef enum EStreamType {
|
|||
STREAM_PULL_DATA,
|
||||
STREAM_PULL_OVER,
|
||||
STREAM_FILL_OVER,
|
||||
STREAM_CREATE_CHILD_TABLE,
|
||||
} EStreamType;
|
||||
|
||||
#pragma pack(push, 1)
|
||||
|
@ -205,8 +206,6 @@ typedef struct SDataBlockInfo {
|
|||
TSKEY watermark; // used for stream
|
||||
|
||||
char parTbName[TSDB_TABLE_NAME_LEN]; // used for stream partition
|
||||
int32_t tagLen;
|
||||
void* pTag; // used for stream partition
|
||||
} SDataBlockInfo;
|
||||
|
||||
typedef struct SSDataBlock {
|
||||
|
@ -292,6 +291,7 @@ typedef struct STableBlockDistInfo {
|
|||
uint16_t numOfFiles;
|
||||
uint32_t numOfTables;
|
||||
uint32_t numOfBlocks;
|
||||
uint32_t numOfVgroups;
|
||||
uint64_t totalSize;
|
||||
uint64_t totalRows;
|
||||
int32_t maxRows;
|
||||
|
@ -341,7 +341,7 @@ typedef struct SExprInfo {
|
|||
|
||||
typedef struct {
|
||||
const char* key;
|
||||
int32_t keyLen;
|
||||
size_t keyLen;
|
||||
uint8_t type;
|
||||
union {
|
||||
const char* value;
|
||||
|
@ -350,7 +350,7 @@ typedef struct {
|
|||
double d;
|
||||
float f;
|
||||
};
|
||||
int32_t length;
|
||||
size_t length;
|
||||
} SSmlKv;
|
||||
|
||||
#define QUERY_ASC_FORWARD_STEP 1
|
||||
|
@ -378,6 +378,11 @@ typedef struct SSortExecInfo {
|
|||
#define CALCULATE_END_TS_COLUMN_INDEX 5
|
||||
#define TABLE_NAME_COLUMN_INDEX 6
|
||||
|
||||
// stream create table block column
|
||||
#define UD_TABLE_NAME_COLUMN_INDEX 0
|
||||
#define UD_GROUPID_COLUMN_INDEX 1
|
||||
#define UD_TAG_COLUMN_INDEX 2
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -41,6 +41,12 @@ typedef struct SBlockOrderInfo {
|
|||
BMCharPos(bm_, r_) |= (1u << (7u - BitPos(r_))); \
|
||||
} while (0)
|
||||
|
||||
#define colDataSetNull_f_s(c_, r_) \
|
||||
do { \
|
||||
colDataSetNull_f((c_)->nullbitmap, r_); \
|
||||
memset(((char*)(c_)->pData) + (c_)->info.bytes * (r_), 0, (c_)->info.bytes); \
|
||||
} while (0)
|
||||
|
||||
#define colDataClearNull_f(bm_, r_) \
|
||||
do { \
|
||||
BMCharPos(bm_, r_) &= ((char)(~(1u << (7u - BitPos(r_))))); \
|
||||
|
@ -136,7 +142,7 @@ static FORCE_INLINE void colDataAppendNULL(SColumnInfoData* pColumnInfoData, uin
|
|||
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
|
||||
colDataSetNull_var(pColumnInfoData, currentRow); // it is a null value of VAR type.
|
||||
} else {
|
||||
colDataSetNull_f(pColumnInfoData->nullbitmap, currentRow);
|
||||
colDataSetNull_f_s(pColumnInfoData, currentRow);
|
||||
}
|
||||
|
||||
pColumnInfoData->hasNull = true;
|
||||
|
@ -151,6 +157,7 @@ static FORCE_INLINE void colDataAppendNNULL(SColumnInfoData* pColumnInfoData, ui
|
|||
for (int32_t i = start; i < start + nRows; ++i) {
|
||||
colDataSetNull_f(pColumnInfoData->nullbitmap, i);
|
||||
}
|
||||
memset(pColumnInfoData->pData + start * pColumnInfoData->info.bytes, 0, pColumnInfoData->info.bytes * nRows);
|
||||
}
|
||||
|
||||
pColumnInfoData->hasNull = true;
|
||||
|
@ -231,7 +238,6 @@ int32_t blockDataSort_rv(SSDataBlock* pDataBlock, SArray* pOrderInfo, bool nullF
|
|||
|
||||
int32_t colInfoDataEnsureCapacity(SColumnInfoData* pColumn, uint32_t numOfRows, bool clearPayload);
|
||||
int32_t blockDataEnsureCapacity(SSDataBlock* pDataBlock, uint32_t numOfRows);
|
||||
int32_t blockDataEnsureCapacityNoClear(SSDataBlock* pDataBlock, uint32_t numOfRows);
|
||||
|
||||
void colInfoDataCleanup(SColumnInfoData* pColumn, uint32_t numOfRows);
|
||||
void blockDataCleanup(SSDataBlock* pDataBlock);
|
||||
|
@ -265,7 +271,7 @@ void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag);
|
|||
// for debug
|
||||
char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf);
|
||||
|
||||
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataBlocks, STSchema* pTSchema, int32_t vgId,
|
||||
int32_t buildSubmitReqFromDataBlock(SSubmitReq2** pReq, const SSDataBlock* pDataBlocks, const STSchema* pTSchema, int64_t uid, int32_t vgId,
|
||||
tb_uid_t suid);
|
||||
|
||||
char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId);
|
||||
|
|
|
@ -44,18 +44,38 @@ typedef struct SColData SColData;
|
|||
#define HAS_VALUE ((uint8_t)0x4)
|
||||
|
||||
// bitmap ================================
|
||||
const static uint8_t BIT2_MAP[4][4] = {{0b00000000, 0b00000001, 0b00000010, 0},
|
||||
{0b00000000, 0b00000100, 0b00001000, 2},
|
||||
{0b00000000, 0b00010000, 0b00100000, 4},
|
||||
{0b00000000, 0b01000000, 0b10000000, 6}};
|
||||
const static uint8_t BIT1_MAP[8] = {0b11111110, 0b11111101, 0b11111011, 0b11110111,
|
||||
0b11101111, 0b11011111, 0b10111111, 0b01111111};
|
||||
|
||||
#define N1(n) ((((uint8_t)1) << (n)) - 1)
|
||||
#define BIT1_SIZE(n) ((((n)-1) >> 3) + 1)
|
||||
#define BIT2_SIZE(n) ((((n)-1) >> 2) + 1)
|
||||
#define SET_BIT1(p, i, v) ((p)[(i) >> 3] = (p)[(i) >> 3] & N1((i)&7) | (((uint8_t)(v)) << ((i)&7)))
|
||||
#define GET_BIT1(p, i) (((p)[(i) >> 3] >> ((i)&7)) & ((uint8_t)1))
|
||||
#define SET_BIT2(p, i, v) ((p)[(i) >> 2] = (p)[(i) >> 2] & N1(BIT2_MAP[(i)&3][3]) | BIT2_MAP[(i)&3][(v)])
|
||||
#define GET_BIT2(p, i) (((p)[(i) >> 2] >> BIT2_MAP[(i)&3][3]) & ((uint8_t)3))
|
||||
const static uint8_t BIT2_MAP[4] = {0b11111100, 0b11110011, 0b11001111, 0b00111111};
|
||||
|
||||
#define ONE ((uint8_t)1)
|
||||
#define THREE ((uint8_t)3)
|
||||
#define DIV_8(i) ((i) >> 3)
|
||||
#define MOD_8(i) ((i)&7)
|
||||
#define DIV_4(i) ((i) >> 2)
|
||||
#define MOD_4(i) ((i)&3)
|
||||
#define MOD_4_TIME_2(i) (MOD_4(i) << 1)
|
||||
#define BIT1_SIZE(n) (DIV_8((n)-1) + 1)
|
||||
#define BIT2_SIZE(n) (DIV_4((n)-1) + 1)
|
||||
#define SET_BIT1(p, i, v) ((p)[DIV_8(i)] = (p)[DIV_8(i)] & BIT1_MAP[MOD_8(i)] | ((v) << MOD_8(i)))
|
||||
#define SET_BIT1_EX(p, i, v) \
|
||||
do { \
|
||||
if (MOD_8(i) == 0) { \
|
||||
(p)[DIV_8(i)] = 0; \
|
||||
} \
|
||||
SET_BIT1(p, i, v); \
|
||||
} while (0)
|
||||
#define GET_BIT1(p, i) (((p)[DIV_8(i)] >> MOD_8(i)) & ONE)
|
||||
#define SET_BIT2(p, i, v) ((p)[DIV_4(i)] = (p)[DIV_4(i)] & BIT2_MAP[MOD_4(i)] | ((v) << MOD_4_TIME_2(i)))
|
||||
#define SET_BIT2_EX(p, i, v) \
|
||||
do { \
|
||||
if (MOD_4(i) == 0) { \
|
||||
(p)[DIV_4(i)] = 0; \
|
||||
} \
|
||||
SET_BIT2(p, i, v); \
|
||||
} while (0)
|
||||
#define GET_BIT2(p, i) (((p)[DIV_4(i)] >> MOD_4_TIME_2(i)) & THREE)
|
||||
|
||||
// SBuffer ================================
|
||||
struct SBuffer {
|
||||
|
@ -70,9 +90,6 @@ int32_t tBufferInit(SBuffer *pBuffer, int64_t size);
|
|||
int32_t tBufferPut(SBuffer *pBuffer, const void *pData, int64_t nData);
|
||||
int32_t tBufferReserve(SBuffer *pBuffer, int64_t nData, void **ppData);
|
||||
|
||||
// STSchema ================================
|
||||
void tDestroyTSchema(STSchema *pTSchema);
|
||||
|
||||
// SColVal ================================
|
||||
#define CV_FLAG_VALUE ((int8_t)0x0)
|
||||
#define CV_FLAG_NONE ((int8_t)0x1)
|
||||
|
@ -87,8 +104,12 @@ void tDestroyTSchema(STSchema *pTSchema);
|
|||
#define COL_VAL_IS_VALUE(CV) ((CV)->flag == CV_FLAG_VALUE)
|
||||
|
||||
// SRow ================================
|
||||
int32_t tRowBuild(SArray *aColVal, STSchema *pTSchema, SBuffer *pBuffer);
|
||||
int32_t tRowBuild(SArray *aColVal, const STSchema *pTSchema, SRow **ppRow);
|
||||
void tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal);
|
||||
void tRowDestroy(SRow *pRow);
|
||||
void tRowSort(SArray *aRowP);
|
||||
int32_t tRowMerge(SArray *aRowP, STSchema *pTSchema, int8_t flag);
|
||||
int32_t tRowAppendToColData(SRow *pRow, STSchema *pTSchema, SColData *aColData, int32_t nColData);
|
||||
|
||||
// SRowIter ================================
|
||||
int32_t tRowIterOpen(SRow *pRow, STSchema *pTSchema, SRowIter **ppIter);
|
||||
|
@ -110,15 +131,28 @@ void debugPrintSTag(STag *pTag, const char *tag, int32_t ln); // TODO: remov
|
|||
int32_t parseJsontoTagData(const char *json, SArray *pTagVals, STag **ppTag, void *pMsgBuf);
|
||||
|
||||
// SColData ================================
|
||||
typedef void *(*xMallocFn)(void *, int32_t);
|
||||
void tColDataDestroy(void *ph);
|
||||
void tColDataInit(SColData *pColData, int16_t cid, int8_t type, int8_t smaOn);
|
||||
void tColDataClear(SColData *pColData);
|
||||
void tColDataDeepClear(SColData *pColData);
|
||||
int32_t tColDataAppendValue(SColData *pColData, SColVal *pColVal);
|
||||
void tColDataGetValue(SColData *pColData, int32_t iVal, SColVal *pColVal);
|
||||
uint8_t tColDataGetBitValue(const SColData *pColData, int32_t iVal);
|
||||
int32_t tColDataCopy(SColData *pColDataSrc, SColData *pColDataDest);
|
||||
int32_t tColDataCopy(SColData *pColDataFrom, SColData *pColData, xMallocFn xMalloc, void *arg);
|
||||
extern void (*tColDataCalcSMA[])(SColData *pColData, int64_t *sum, int64_t *max, int64_t *min, int16_t *numOfNull);
|
||||
|
||||
// for stmt bind
|
||||
int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind);
|
||||
void tColDataSortMerge(SArray *colDataArr);
|
||||
|
||||
//for raw block
|
||||
int32_t tColDataAddValueByDataBlock(SColData *pColData, int8_t type, int32_t bytes,
|
||||
int32_t nRows, char* lengthOrbitmap, char *data);
|
||||
// for encode/decode
|
||||
int32_t tPutColData(uint8_t *pBuf, SColData *pColData);
|
||||
int32_t tGetColData(uint8_t *pBuf, SColData *pColData);
|
||||
|
||||
// STRUCT ================================
|
||||
struct STColumn {
|
||||
col_id_t colId;
|
||||
|
@ -225,23 +259,9 @@ struct STag {
|
|||
memcpy(varDataVal(x), (str), (_size)); \
|
||||
} while (0);
|
||||
|
||||
// ----------------- SCHEMA BUILDER DEFINITION
|
||||
typedef struct {
|
||||
int32_t tCols;
|
||||
int32_t nCols;
|
||||
schema_ver_t version;
|
||||
uint16_t flen;
|
||||
int32_t tlen;
|
||||
STColumn *columns;
|
||||
} STSchemaBuilder;
|
||||
|
||||
int32_t tdInitTSchemaBuilder(STSchemaBuilder *pBuilder, schema_ver_t version);
|
||||
void tdDestroyTSchemaBuilder(STSchemaBuilder *pBuilder);
|
||||
void tdResetTSchemaBuilder(STSchemaBuilder *pBuilder, schema_ver_t version);
|
||||
int32_t tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int8_t flags, col_id_t colId, col_bytes_t bytes);
|
||||
STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder);
|
||||
|
||||
// STSchema ================================
|
||||
STSchema *tBuildTSchema(SSchema *aSchema, int32_t numOfCols, int32_t version);
|
||||
void tDestroyTSchema(STSchema *pTSchema);
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ extern int32_t tsNumOfMnodeQueryThreads;
|
|||
extern int32_t tsNumOfMnodeFetchThreads;
|
||||
extern int32_t tsNumOfMnodeReadThreads;
|
||||
extern int32_t tsNumOfVnodeQueryThreads;
|
||||
extern int32_t tsNumOfVnodeStreamThreads;
|
||||
extern float tsRatioOfVnodeStreamThreads;
|
||||
extern int32_t tsNumOfVnodeFetchThreads;
|
||||
extern int32_t tsNumOfVnodeRsmaThreads;
|
||||
extern int32_t tsNumOfQnodeQueryThreads;
|
||||
|
@ -69,6 +69,9 @@ extern int32_t tsElectInterval;
|
|||
extern int32_t tsHeartbeatInterval;
|
||||
extern int32_t tsHeartbeatTimeout;
|
||||
|
||||
// vnode
|
||||
extern int64_t tsVndCommitMaxIntervalMs;
|
||||
|
||||
// monitor
|
||||
extern bool tsEnableMonitor;
|
||||
extern int32_t tsMonitorInterval;
|
||||
|
@ -82,6 +85,10 @@ extern bool tsEnableTelem;
|
|||
extern int32_t tsTelemInterval;
|
||||
extern char tsTelemServer[];
|
||||
extern uint16_t tsTelemPort;
|
||||
extern bool tsEnableCrashReport;
|
||||
extern char* tsTelemUri;
|
||||
extern char* tsClientCrashReportUri;
|
||||
extern char* tsSvrCrashReportUri;
|
||||
|
||||
// query buffer management
|
||||
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing
|
||||
|
|
|
@ -115,6 +115,7 @@ typedef enum _mgmt_table {
|
|||
TSDB_MGMT_TABLE_STREAMS,
|
||||
TSDB_MGMT_TABLE_TABLE,
|
||||
TSDB_MGMT_TABLE_TAG,
|
||||
TSDB_MGMT_TABLE_COL,
|
||||
TSDB_MGMT_TABLE_USER,
|
||||
TSDB_MGMT_TABLE_GRANTS,
|
||||
TSDB_MGMT_TABLE_VGROUP,
|
||||
|
@ -343,7 +344,8 @@ void tFreeSSubmitRsp(SSubmitRsp* pRsp);
|
|||
#define COL_IS_SET(FLG) (((FLG) & (COL_SET_VAL | COL_SET_NULL)) != 0)
|
||||
#define COL_CLR_SET(FLG) ((FLG) &= (~(COL_SET_VAL | COL_SET_NULL)))
|
||||
|
||||
#define IS_BSMA_ON(s) (((s)->flags & 0x01) == COL_SMA_ON)
|
||||
#define IS_BSMA_ON(s) (((s)->flags & 0x01) == COL_SMA_ON)
|
||||
#define IS_SET_NULL(s) (((s)->flags & COL_SET_NULL) == COL_SET_NULL)
|
||||
|
||||
#define SSCHMEA_TYPE(s) ((s)->type)
|
||||
#define SSCHMEA_FLAGS(s) ((s)->flags)
|
||||
|
@ -381,6 +383,13 @@ static FORCE_INLINE void tDeleteSSchemaWrapper(SSchemaWrapper* pSchemaWrapper) {
|
|||
}
|
||||
}
|
||||
|
||||
static FORCE_INLINE void tDeleteSSchemaWrapperForHash(void* pSchemaWrapper) {
|
||||
if (pSchemaWrapper != NULL && *(SSchemaWrapper**)pSchemaWrapper != NULL) {
|
||||
taosMemoryFree((*(SSchemaWrapper**)pSchemaWrapper)->pSchema);
|
||||
taosMemoryFree(*(SSchemaWrapper**)pSchemaWrapper);
|
||||
}
|
||||
}
|
||||
|
||||
static FORCE_INLINE int32_t taosEncodeSSchema(void** buf, const SSchema* pSchema) {
|
||||
int32_t tlen = 0;
|
||||
tlen += taosEncodeFixedI8(buf, pSchema->type);
|
||||
|
@ -482,8 +491,6 @@ static FORCE_INLINE int32_t tDecodeSSchemaWrapperEx(SDecoder* pDecoder, SSchemaW
|
|||
return 0;
|
||||
}
|
||||
|
||||
STSchema* tdGetSTSChemaFromSSChema(SSchema* pSchema, int32_t nCols, int32_t sver);
|
||||
|
||||
typedef struct {
|
||||
char name[TSDB_TABLE_FNAME_LEN];
|
||||
int8_t igExists;
|
||||
|
@ -1394,6 +1401,7 @@ typedef struct {
|
|||
char db[TSDB_DB_FNAME_LEN];
|
||||
char tb[TSDB_TABLE_NAME_LEN];
|
||||
char user[TSDB_USER_LEN];
|
||||
char filterTb[TSDB_TABLE_NAME_LEN];
|
||||
int64_t showId;
|
||||
} SRetrieveTableReq;
|
||||
|
||||
|
@ -1734,6 +1742,8 @@ typedef struct {
|
|||
int32_t execId;
|
||||
} STaskDropReq;
|
||||
|
||||
int32_t tSerializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
|
||||
int32_t tDeserializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
|
||||
int32_t tSerializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
|
||||
int32_t tDeserializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
|
||||
|
||||
|
@ -1751,6 +1761,8 @@ typedef struct {
|
|||
#define STREAM_FILL_HISTORY_ON 1
|
||||
#define STREAM_FILL_HISTORY_OFF 0
|
||||
#define STREAM_DEFAULT_FILL_HISTORY STREAM_FILL_HISTORY_OFF
|
||||
#define STREAM_CREATE_STABLE_TRUE 1
|
||||
#define STREAM_CREATE_STABLE_FALSE 0
|
||||
|
||||
typedef struct {
|
||||
char name[TSDB_STREAM_FNAME_LEN];
|
||||
|
@ -1768,6 +1780,10 @@ typedef struct {
|
|||
SArray* pTags; // array of SField
|
||||
// 3.0.20
|
||||
int64_t checkpointFreq; // ms
|
||||
// 3.0.2.3
|
||||
int8_t createStb;
|
||||
uint64_t targetStbUid;
|
||||
SArray* fillNullCols;
|
||||
} SCMCreateStreamReq;
|
||||
|
||||
typedef struct {
|
||||
|
@ -2081,10 +2097,15 @@ typedef struct SVCreateTbReq {
|
|||
};
|
||||
} SVCreateTbReq;
|
||||
|
||||
int tEncodeSVCreateTbReq(SEncoder* pCoder, const SVCreateTbReq* pReq);
|
||||
int tDecodeSVCreateTbReq(SDecoder* pCoder, SVCreateTbReq* pReq);
|
||||
int tEncodeSVCreateTbReq(SEncoder* pCoder, const SVCreateTbReq* pReq);
|
||||
int tDecodeSVCreateTbReq(SDecoder* pCoder, SVCreateTbReq* pReq);
|
||||
void tDestroySVCreateTbReq(SVCreateTbReq* pReq, int32_t flags);
|
||||
|
||||
static FORCE_INLINE void tdDestroySVCreateTbReq(SVCreateTbReq* req) {
|
||||
if (NULL == req) {
|
||||
return;
|
||||
}
|
||||
|
||||
taosMemoryFreeClear(req->name);
|
||||
taosMemoryFreeClear(req->comment);
|
||||
if (req->type == TSDB_CHILD_TABLE) {
|
||||
|
@ -3232,6 +3253,57 @@ int32_t tSerializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq);
|
|||
int32_t tDeserializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq);
|
||||
int32_t tSerializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
|
||||
int32_t tDeserializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
|
||||
int32_t tSerializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq);
|
||||
int32_t tDeserializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq);
|
||||
int32_t tSerializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
|
||||
int32_t tDeserializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
|
||||
|
||||
#define SUBMIT_REQ_AUTO_CREATE_TABLE 0x1
|
||||
#define SUBMIT_REQ_COLUMN_DATA_FORMAT 0x2
|
||||
|
||||
typedef struct {
|
||||
int32_t flags;
|
||||
SVCreateTbReq* pCreateTbReq;
|
||||
int64_t suid;
|
||||
int64_t uid;
|
||||
int32_t sver;
|
||||
union {
|
||||
SArray* aRowP;
|
||||
SArray* aCol;
|
||||
};
|
||||
} SSubmitTbData;
|
||||
|
||||
typedef struct {
|
||||
SArray* aSubmitTbData; // SArray<SSubmitTbData>
|
||||
} SSubmitReq2;
|
||||
|
||||
int32_t tEncodeSSubmitReq2(SEncoder* pCoder, const SSubmitReq2* pReq);
|
||||
int32_t tDecodeSSubmitReq2(SDecoder* pCoder, SSubmitReq2* pReq);
|
||||
void tDestroySSubmitTbData(SSubmitTbData* pTbData, int32_t flag);
|
||||
void tDestroySSubmitReq2(SSubmitReq2* pReq, int32_t flag);
|
||||
|
||||
typedef struct {
|
||||
int32_t affectedRows;
|
||||
SArray* aCreateTbRsp; // SArray<SVCreateTbRsp>
|
||||
} SSubmitRsp2;
|
||||
|
||||
int32_t tEncodeSSubmitRsp2(SEncoder* pCoder, const SSubmitRsp2* pRsp);
|
||||
int32_t tDecodeSSubmitRsp2(SDecoder* pCoder, SSubmitRsp2* pRsp);
|
||||
void tDestroySSubmitRsp2(SSubmitRsp2* pRsp, int32_t flag);
|
||||
|
||||
#define TSDB_MSG_FLG_ENCODE 0x1
|
||||
#define TSDB_MSG_FLG_DECODE 0x2
|
||||
|
||||
typedef struct {
|
||||
union {
|
||||
struct {
|
||||
void* msgStr;
|
||||
int32_t msgLen;
|
||||
int64_t ver;
|
||||
};
|
||||
void* pDataBlock;
|
||||
};
|
||||
} SPackedData;
|
||||
|
||||
#pragma pack(pop)
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@ typedef enum {
|
|||
QUEUE_MAX,
|
||||
} EQueueType;
|
||||
|
||||
typedef void (*UpdateDnodeInfoFp)(void* pData, int32_t* dnodeId, int64_t* clusterId, char* fqdn, uint16_t* port);
|
||||
typedef int32_t (*PutToQueueFp)(void* pMgmt, EQueueType qtype, SRpcMsg* pMsg);
|
||||
typedef int32_t (*GetQueueSizeFp)(void* pMgmt, int32_t vgId, EQueueType qtype);
|
||||
typedef int32_t (*SendReqFp)(const SEpSet* pEpSet, SRpcMsg* pMsg);
|
||||
|
@ -48,6 +49,7 @@ typedef void (*ReleaseHandleFp)(SRpcHandleInfo* pHandle, int8_t type);
|
|||
typedef void (*ReportStartup)(const char* name, const char* desc);
|
||||
|
||||
typedef struct {
|
||||
void* data;
|
||||
void* mgmt;
|
||||
void* clientRpc;
|
||||
PutToQueueFp putToQueueFp;
|
||||
|
@ -57,6 +59,7 @@ typedef struct {
|
|||
RegisterBrokenLinkArgFp registerBrokenLinkArgFp;
|
||||
ReleaseHandleFp releaseHandleFp;
|
||||
ReportStartup reportStartupFp;
|
||||
UpdateDnodeInfoFp updateDnodeInfoFp;
|
||||
} SMsgCb;
|
||||
|
||||
void tmsgSetDefault(const SMsgCb* msgcb);
|
||||
|
@ -67,6 +70,7 @@ void tmsgSendRsp(SRpcMsg* pMsg);
|
|||
void tmsgRegisterBrokenLinkArg(SRpcMsg* pMsg);
|
||||
void tmsgReleaseHandle(SRpcHandleInfo* pHandle, int8_t type);
|
||||
void tmsgReportStartup(const char* name, const char* desc);
|
||||
void tmsgUpdateDnodeInfo(int32_t* dnodeId, int64_t* clusterId, char* fqdn, uint16_t* port);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ typedef struct {
|
|||
|
||||
// output
|
||||
char* ctbShortName; // must have size of TSDB_TABLE_NAME_LEN;
|
||||
uint64_t uid; // child table uid, may be useful
|
||||
// uint64_t uid; // child table uid, may be useful
|
||||
} RandTableName;
|
||||
|
||||
void buildChildTableName(RandTableName* rName);
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#include "talgo.h"
|
||||
#include "taosdef.h"
|
||||
#include "taoserror.h"
|
||||
#include "tbuffer.h"
|
||||
#include "tdataformat.h"
|
||||
#include "tdef.h"
|
||||
#include "ttypes.h"
|
||||
|
|
|
@ -175,171 +175,172 @@
|
|||
#define TK_CONSUMERS 157
|
||||
#define TK_SUBSCRIPTIONS 158
|
||||
#define TK_VNODES 159
|
||||
#define TK_LIKE 160
|
||||
#define TK_TBNAME 161
|
||||
#define TK_QTAGS 162
|
||||
#define TK_AS 163
|
||||
#define TK_INDEX 164
|
||||
#define TK_FUNCTION 165
|
||||
#define TK_INTERVAL 166
|
||||
#define TK_COUNT 167
|
||||
#define TK_LAST_ROW 168
|
||||
#define TK_TOPIC 169
|
||||
#define TK_WITH 170
|
||||
#define TK_META 171
|
||||
#define TK_CONSUMER 172
|
||||
#define TK_GROUP 173
|
||||
#define TK_DESC 174
|
||||
#define TK_DESCRIBE 175
|
||||
#define TK_RESET 176
|
||||
#define TK_QUERY 177
|
||||
#define TK_CACHE 178
|
||||
#define TK_EXPLAIN 179
|
||||
#define TK_ANALYZE 180
|
||||
#define TK_VERBOSE 181
|
||||
#define TK_NK_BOOL 182
|
||||
#define TK_RATIO 183
|
||||
#define TK_NK_FLOAT 184
|
||||
#define TK_OUTPUTTYPE 185
|
||||
#define TK_AGGREGATE 186
|
||||
#define TK_BUFSIZE 187
|
||||
#define TK_STREAM 188
|
||||
#define TK_INTO 189
|
||||
#define TK_TRIGGER 190
|
||||
#define TK_AT_ONCE 191
|
||||
#define TK_WINDOW_CLOSE 192
|
||||
#define TK_IGNORE 193
|
||||
#define TK_EXPIRED 194
|
||||
#define TK_FILL_HISTORY 195
|
||||
#define TK_SUBTABLE 196
|
||||
#define TK_KILL 197
|
||||
#define TK_CONNECTION 198
|
||||
#define TK_TRANSACTION 199
|
||||
#define TK_BALANCE 200
|
||||
#define TK_VGROUP 201
|
||||
#define TK_MERGE 202
|
||||
#define TK_REDISTRIBUTE 203
|
||||
#define TK_SPLIT 204
|
||||
#define TK_DELETE 205
|
||||
#define TK_INSERT 206
|
||||
#define TK_NULL 207
|
||||
#define TK_NK_QUESTION 208
|
||||
#define TK_NK_ARROW 209
|
||||
#define TK_ROWTS 210
|
||||
#define TK_QSTART 211
|
||||
#define TK_QEND 212
|
||||
#define TK_QDURATION 213
|
||||
#define TK_WSTART 214
|
||||
#define TK_WEND 215
|
||||
#define TK_WDURATION 216
|
||||
#define TK_IROWTS 217
|
||||
#define TK_ISFILLED 218
|
||||
#define TK_CAST 219
|
||||
#define TK_NOW 220
|
||||
#define TK_TODAY 221
|
||||
#define TK_TIMEZONE 222
|
||||
#define TK_CLIENT_VERSION 223
|
||||
#define TK_SERVER_VERSION 224
|
||||
#define TK_SERVER_STATUS 225
|
||||
#define TK_CURRENT_USER 226
|
||||
#define TK_CASE 227
|
||||
#define TK_END 228
|
||||
#define TK_WHEN 229
|
||||
#define TK_THEN 230
|
||||
#define TK_ELSE 231
|
||||
#define TK_BETWEEN 232
|
||||
#define TK_IS 233
|
||||
#define TK_NK_LT 234
|
||||
#define TK_NK_GT 235
|
||||
#define TK_NK_LE 236
|
||||
#define TK_NK_GE 237
|
||||
#define TK_NK_NE 238
|
||||
#define TK_MATCH 239
|
||||
#define TK_NMATCH 240
|
||||
#define TK_CONTAINS 241
|
||||
#define TK_IN 242
|
||||
#define TK_JOIN 243
|
||||
#define TK_INNER 244
|
||||
#define TK_SELECT 245
|
||||
#define TK_DISTINCT 246
|
||||
#define TK_WHERE 247
|
||||
#define TK_PARTITION 248
|
||||
#define TK_BY 249
|
||||
#define TK_SESSION 250
|
||||
#define TK_STATE_WINDOW 251
|
||||
#define TK_EVENT_WINDOW 252
|
||||
#define TK_START 253
|
||||
#define TK_SLIDING 254
|
||||
#define TK_FILL 255
|
||||
#define TK_VALUE 256
|
||||
#define TK_NONE 257
|
||||
#define TK_PREV 258
|
||||
#define TK_LINEAR 259
|
||||
#define TK_NEXT 260
|
||||
#define TK_HAVING 261
|
||||
#define TK_RANGE 262
|
||||
#define TK_EVERY 263
|
||||
#define TK_ORDER 264
|
||||
#define TK_SLIMIT 265
|
||||
#define TK_SOFFSET 266
|
||||
#define TK_LIMIT 267
|
||||
#define TK_OFFSET 268
|
||||
#define TK_ASC 269
|
||||
#define TK_NULLS 270
|
||||
#define TK_ABORT 271
|
||||
#define TK_AFTER 272
|
||||
#define TK_ATTACH 273
|
||||
#define TK_BEFORE 274
|
||||
#define TK_BEGIN 275
|
||||
#define TK_BITAND 276
|
||||
#define TK_BITNOT 277
|
||||
#define TK_BITOR 278
|
||||
#define TK_BLOCKS 279
|
||||
#define TK_CHANGE 280
|
||||
#define TK_COMMA 281
|
||||
#define TK_COMPACT 282
|
||||
#define TK_CONCAT 283
|
||||
#define TK_CONFLICT 284
|
||||
#define TK_COPY 285
|
||||
#define TK_DEFERRED 286
|
||||
#define TK_DELIMITERS 287
|
||||
#define TK_DETACH 288
|
||||
#define TK_DIVIDE 289
|
||||
#define TK_DOT 290
|
||||
#define TK_EACH 291
|
||||
#define TK_FAIL 292
|
||||
#define TK_FILE 293
|
||||
#define TK_FOR 294
|
||||
#define TK_GLOB 295
|
||||
#define TK_ID 296
|
||||
#define TK_IMMEDIATE 297
|
||||
#define TK_IMPORT 298
|
||||
#define TK_INITIALLY 299
|
||||
#define TK_INSTEAD 300
|
||||
#define TK_ISNULL 301
|
||||
#define TK_KEY 302
|
||||
#define TK_MODULES 303
|
||||
#define TK_NK_BITNOT 304
|
||||
#define TK_NK_SEMI 305
|
||||
#define TK_NOTNULL 306
|
||||
#define TK_OF 307
|
||||
#define TK_PLUS 308
|
||||
#define TK_PRIVILEGE 309
|
||||
#define TK_RAISE 310
|
||||
#define TK_REPLACE 311
|
||||
#define TK_RESTRICT 312
|
||||
#define TK_ROW 313
|
||||
#define TK_SEMI 314
|
||||
#define TK_STAR 315
|
||||
#define TK_STATEMENT 316
|
||||
#define TK_STRICT 317
|
||||
#define TK_STRING 318
|
||||
#define TK_TIMES 319
|
||||
#define TK_UPDATE 320
|
||||
#define TK_VALUES 321
|
||||
#define TK_VARIABLE 322
|
||||
#define TK_VIEW 323
|
||||
#define TK_WAL 324
|
||||
#define TK_ALIVE 160
|
||||
#define TK_LIKE 161
|
||||
#define TK_TBNAME 162
|
||||
#define TK_QTAGS 163
|
||||
#define TK_AS 164
|
||||
#define TK_INDEX 165
|
||||
#define TK_FUNCTION 166
|
||||
#define TK_INTERVAL 167
|
||||
#define TK_COUNT 168
|
||||
#define TK_LAST_ROW 169
|
||||
#define TK_TOPIC 170
|
||||
#define TK_WITH 171
|
||||
#define TK_META 172
|
||||
#define TK_CONSUMER 173
|
||||
#define TK_GROUP 174
|
||||
#define TK_DESC 175
|
||||
#define TK_DESCRIBE 176
|
||||
#define TK_RESET 177
|
||||
#define TK_QUERY 178
|
||||
#define TK_CACHE 179
|
||||
#define TK_EXPLAIN 180
|
||||
#define TK_ANALYZE 181
|
||||
#define TK_VERBOSE 182
|
||||
#define TK_NK_BOOL 183
|
||||
#define TK_RATIO 184
|
||||
#define TK_NK_FLOAT 185
|
||||
#define TK_OUTPUTTYPE 186
|
||||
#define TK_AGGREGATE 187
|
||||
#define TK_BUFSIZE 188
|
||||
#define TK_STREAM 189
|
||||
#define TK_INTO 190
|
||||
#define TK_TRIGGER 191
|
||||
#define TK_AT_ONCE 192
|
||||
#define TK_WINDOW_CLOSE 193
|
||||
#define TK_IGNORE 194
|
||||
#define TK_EXPIRED 195
|
||||
#define TK_FILL_HISTORY 196
|
||||
#define TK_SUBTABLE 197
|
||||
#define TK_KILL 198
|
||||
#define TK_CONNECTION 199
|
||||
#define TK_TRANSACTION 200
|
||||
#define TK_BALANCE 201
|
||||
#define TK_VGROUP 202
|
||||
#define TK_MERGE 203
|
||||
#define TK_REDISTRIBUTE 204
|
||||
#define TK_SPLIT 205
|
||||
#define TK_DELETE 206
|
||||
#define TK_INSERT 207
|
||||
#define TK_NULL 208
|
||||
#define TK_NK_QUESTION 209
|
||||
#define TK_NK_ARROW 210
|
||||
#define TK_ROWTS 211
|
||||
#define TK_QSTART 212
|
||||
#define TK_QEND 213
|
||||
#define TK_QDURATION 214
|
||||
#define TK_WSTART 215
|
||||
#define TK_WEND 216
|
||||
#define TK_WDURATION 217
|
||||
#define TK_IROWTS 218
|
||||
#define TK_ISFILLED 219
|
||||
#define TK_CAST 220
|
||||
#define TK_NOW 221
|
||||
#define TK_TODAY 222
|
||||
#define TK_TIMEZONE 223
|
||||
#define TK_CLIENT_VERSION 224
|
||||
#define TK_SERVER_VERSION 225
|
||||
#define TK_SERVER_STATUS 226
|
||||
#define TK_CURRENT_USER 227
|
||||
#define TK_CASE 228
|
||||
#define TK_END 229
|
||||
#define TK_WHEN 230
|
||||
#define TK_THEN 231
|
||||
#define TK_ELSE 232
|
||||
#define TK_BETWEEN 233
|
||||
#define TK_IS 234
|
||||
#define TK_NK_LT 235
|
||||
#define TK_NK_GT 236
|
||||
#define TK_NK_LE 237
|
||||
#define TK_NK_GE 238
|
||||
#define TK_NK_NE 239
|
||||
#define TK_MATCH 240
|
||||
#define TK_NMATCH 241
|
||||
#define TK_CONTAINS 242
|
||||
#define TK_IN 243
|
||||
#define TK_JOIN 244
|
||||
#define TK_INNER 245
|
||||
#define TK_SELECT 246
|
||||
#define TK_DISTINCT 247
|
||||
#define TK_WHERE 248
|
||||
#define TK_PARTITION 249
|
||||
#define TK_BY 250
|
||||
#define TK_SESSION 251
|
||||
#define TK_STATE_WINDOW 252
|
||||
#define TK_EVENT_WINDOW 253
|
||||
#define TK_START 254
|
||||
#define TK_SLIDING 255
|
||||
#define TK_FILL 256
|
||||
#define TK_VALUE 257
|
||||
#define TK_NONE 258
|
||||
#define TK_PREV 259
|
||||
#define TK_LINEAR 260
|
||||
#define TK_NEXT 261
|
||||
#define TK_HAVING 262
|
||||
#define TK_RANGE 263
|
||||
#define TK_EVERY 264
|
||||
#define TK_ORDER 265
|
||||
#define TK_SLIMIT 266
|
||||
#define TK_SOFFSET 267
|
||||
#define TK_LIMIT 268
|
||||
#define TK_OFFSET 269
|
||||
#define TK_ASC 270
|
||||
#define TK_NULLS 271
|
||||
#define TK_ABORT 272
|
||||
#define TK_AFTER 273
|
||||
#define TK_ATTACH 274
|
||||
#define TK_BEFORE 275
|
||||
#define TK_BEGIN 276
|
||||
#define TK_BITAND 277
|
||||
#define TK_BITNOT 278
|
||||
#define TK_BITOR 279
|
||||
#define TK_BLOCKS 280
|
||||
#define TK_CHANGE 281
|
||||
#define TK_COMMA 282
|
||||
#define TK_COMPACT 283
|
||||
#define TK_CONCAT 284
|
||||
#define TK_CONFLICT 285
|
||||
#define TK_COPY 286
|
||||
#define TK_DEFERRED 287
|
||||
#define TK_DELIMITERS 288
|
||||
#define TK_DETACH 289
|
||||
#define TK_DIVIDE 290
|
||||
#define TK_DOT 291
|
||||
#define TK_EACH 292
|
||||
#define TK_FAIL 293
|
||||
#define TK_FILE 294
|
||||
#define TK_FOR 295
|
||||
#define TK_GLOB 296
|
||||
#define TK_ID 297
|
||||
#define TK_IMMEDIATE 298
|
||||
#define TK_IMPORT 299
|
||||
#define TK_INITIALLY 300
|
||||
#define TK_INSTEAD 301
|
||||
#define TK_ISNULL 302
|
||||
#define TK_KEY 303
|
||||
#define TK_MODULES 304
|
||||
#define TK_NK_BITNOT 305
|
||||
#define TK_NK_SEMI 306
|
||||
#define TK_NOTNULL 307
|
||||
#define TK_OF 308
|
||||
#define TK_PLUS 309
|
||||
#define TK_PRIVILEGE 310
|
||||
#define TK_RAISE 311
|
||||
#define TK_REPLACE 312
|
||||
#define TK_RESTRICT 313
|
||||
#define TK_ROW 314
|
||||
#define TK_SEMI 315
|
||||
#define TK_STAR 316
|
||||
#define TK_STATEMENT 317
|
||||
#define TK_STRICT 318
|
||||
#define TK_STRING 319
|
||||
#define TK_TIMES 320
|
||||
#define TK_UPDATE 321
|
||||
#define TK_VALUES 322
|
||||
#define TK_VARIABLE 323
|
||||
#define TK_VIEW 324
|
||||
#define TK_WAL 325
|
||||
|
||||
#define TK_NK_SPACE 600
|
||||
#define TK_NK_COMMENT 601
|
||||
|
|
|
@ -343,8 +343,8 @@ typedef struct tDataTypeDescriptor {
|
|||
extern tDataTypeDescriptor tDataTypes[TSDB_DATA_TYPE_MAX];
|
||||
bool isValidDataType(int32_t type);
|
||||
|
||||
int32_t operateVal(void *dst, void *s1, void *s2, int32_t optr, int32_t type);
|
||||
void assignVal(char *val, const char *src, int32_t len, int32_t type);
|
||||
void operateVal(void *dst, void *s1, void *s2, int32_t optr, int32_t type);
|
||||
void *getDataMin(int32_t type, void* value);
|
||||
void *getDataMax(int32_t type, void* value);
|
||||
|
||||
|
|
|
@ -210,6 +210,9 @@ int32_t catalogGetCachedTableMeta(SCatalog* pCtg, const SName* pTableName, STabl
|
|||
|
||||
int32_t catalogGetCachedSTableMeta(SCatalog* pCtg, const SName* pTableName, STableMeta** pTableMeta);
|
||||
|
||||
int32_t catalogGetTablesHashVgId(SCatalog* pCtg, SRequestConnInfo* pConn, int32_t acctId, const char* pDb, const char* pTableName[],
|
||||
int32_t tableNum, int32_t *vgId);
|
||||
|
||||
int32_t catalogGetCachedTableHashVgroup(SCatalog* pCtg, const SName* pTableName, SVgroupInfo* pVgroup, bool* exists);
|
||||
|
||||
int32_t catalogGetCachedTableVgMeta(SCatalog* pCtg, const SName* pTableName, SVgroupInfo* pVgroup, STableMeta** pTableMeta);
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
typedef struct SExplainCtx SExplainCtx;
|
||||
|
||||
int32_t qExecCommand(bool sysInfoUser, SNode *pStmt, SRetrieveTableRsp **pRsp);
|
||||
int32_t qExecCommand(int64_t* pConnId, bool sysInfoUser, SNode *pStmt, SRetrieveTableRsp **pRsp);
|
||||
|
||||
int32_t qExecStaticExplain(SQueryPlan *pDag, SRetrieveTableRsp **pRsp);
|
||||
int32_t qExecExplainBegin(SQueryPlan *pDag, SExplainCtx **pCtx, int64_t startTs);
|
||||
|
|
|
@ -154,6 +154,8 @@ void qCleanExecTaskBlockBuf(qTaskInfo_t tinfo);
|
|||
*/
|
||||
int32_t qAsyncKillTask(qTaskInfo_t tinfo, int32_t rspCode);
|
||||
|
||||
bool qTaskIsExecuting(qTaskInfo_t qinfo);
|
||||
|
||||
/**
|
||||
* destroy query info structure
|
||||
* @param qHandle
|
||||
|
@ -190,7 +192,9 @@ int32_t qStreamPrepareTsdbScan(qTaskInfo_t tinfo, uint64_t uid, int64_t ts);
|
|||
|
||||
int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subType);
|
||||
|
||||
int32_t qStreamScanMemData(qTaskInfo_t tinfo, const SSubmitReq* pReq);
|
||||
// int32_t qStreamScanMemData(qTaskInfo_t tinfo, const SSubmitReq* pReq, int64_t ver);
|
||||
//
|
||||
int32_t qStreamSetScanMemData(qTaskInfo_t tinfo, SPackedData submit);
|
||||
|
||||
int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset);
|
||||
|
||||
|
@ -214,6 +218,7 @@ int32_t qStreamSourceRecoverStep2(qTaskInfo_t tinfo, int64_t ver);
|
|||
int32_t qStreamRecoverFinish(qTaskInfo_t tinfo);
|
||||
int32_t qStreamRestoreParam(qTaskInfo_t tinfo);
|
||||
bool qStreamRecoverScanFinished(qTaskInfo_t tinfo);
|
||||
void qStreamCloseTsdbReader(void* task);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "tbuffer.h"
|
||||
#include "tcommon.h"
|
||||
#include "tvariant.h"
|
||||
|
||||
|
@ -138,7 +137,7 @@ typedef struct SqlFunctionCtx {
|
|||
char *pOutput; // final result output buffer, point to sdata->data
|
||||
int32_t numOfParams;
|
||||
// input parameter, e.g., top(k, 20), the number of results of top query is kept in param
|
||||
SFunctParam *param;
|
||||
SFunctParam *param;
|
||||
// corresponding output buffer for timestamp of each result, e.g., diff/csum
|
||||
SColumnInfoData *pTsOutput;
|
||||
int32_t offset;
|
||||
|
@ -152,6 +151,7 @@ typedef struct SqlFunctionCtx {
|
|||
struct SSDataBlock *pSrcBlock;
|
||||
struct SSDataBlock *pDstBlock; // used by indefinite rows function to set selectivity
|
||||
SSerializeDataHandle saveHandle;
|
||||
int32_t exprIdx;
|
||||
char udfName[TSDB_FUNC_NAME_LEN];
|
||||
} SqlFunctionCtx;
|
||||
|
||||
|
@ -182,9 +182,9 @@ struct SScalarParam {
|
|||
int32_t numOfQualified; // number of qualified elements in the final results
|
||||
};
|
||||
|
||||
void cleanupResultRowEntry(struct SResultRowEntryInfo *pCell);
|
||||
bool isRowEntryCompleted(struct SResultRowEntryInfo *pEntry);
|
||||
bool isRowEntryInitialized(struct SResultRowEntryInfo *pEntry);
|
||||
void cleanupResultRowEntry(struct SResultRowEntryInfo *pCell);
|
||||
bool isRowEntryCompleted(struct SResultRowEntryInfo *pEntry);
|
||||
bool isRowEntryInitialized(struct SResultRowEntryInfo *pEntry);
|
||||
|
||||
typedef struct SPoint {
|
||||
int64_t key;
|
||||
|
|
|
@ -40,6 +40,7 @@ extern "C" {
|
|||
#define SHOW_LOCAL_VARIABLES_RESULT_FIELD1_LEN (TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE)
|
||||
#define SHOW_LOCAL_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE)
|
||||
|
||||
#define SHOW_ALIVE_RESULT_COLS 1
|
||||
#define PRIVILEGE_TYPE_MASK(n) (1 << n)
|
||||
|
||||
#define PRIVILEGE_TYPE_ALL PRIVILEGE_TYPE_MASK(0)
|
||||
|
@ -262,6 +263,11 @@ typedef struct SShowCreateDatabaseStmt {
|
|||
void* pCfg; // SDbCfgInfo
|
||||
} SShowCreateDatabaseStmt;
|
||||
|
||||
typedef struct SShowAliveStmt {
|
||||
ENodeType type;
|
||||
char dbName[TSDB_DB_NAME_LEN];
|
||||
} SShowAliveStmt;
|
||||
|
||||
typedef struct SShowCreateTableStmt {
|
||||
ENodeType type;
|
||||
char dbName[TSDB_DB_NAME_LEN];
|
||||
|
@ -295,7 +301,7 @@ typedef struct SShowTableTagsStmt {
|
|||
SNodeList* pTags;
|
||||
} SShowTableTagsStmt;
|
||||
|
||||
typedef enum EIndexType { INDEX_TYPE_SMA = 1, INDEX_TYPE_FULLTEXT } EIndexType;
|
||||
typedef enum EIndexType { INDEX_TYPE_SMA = 1, INDEX_TYPE_FULLTEXT, INDEX_TYPE_NORMAL } EIndexType;
|
||||
|
||||
typedef struct SIndexOptions {
|
||||
ENodeType type;
|
||||
|
@ -401,6 +407,7 @@ typedef struct SCreateStreamStmt {
|
|||
SNode* pQuery;
|
||||
SNodeList* pTags;
|
||||
SNode* pSubtable;
|
||||
SNodeList* pCols;
|
||||
} SCreateStreamStmt;
|
||||
|
||||
typedef struct SDropStreamStmt {
|
||||
|
|
|
@ -208,6 +208,8 @@ typedef enum ENodeType {
|
|||
QUERY_NODE_DELETE_STMT,
|
||||
QUERY_NODE_INSERT_STMT,
|
||||
QUERY_NODE_QUERY,
|
||||
QUERY_NODE_SHOW_DB_ALIVE_STMT,
|
||||
QUERY_NODE_SHOW_CLUSTER_ALIVE_STMT,
|
||||
|
||||
// logic plan node
|
||||
QUERY_NODE_LOGIC_PLAN_SCAN = 1000,
|
||||
|
|
|
@ -121,6 +121,7 @@ typedef struct SAggLogicNode {
|
|||
bool hasLast;
|
||||
bool hasTimeLineFunc;
|
||||
bool onlyHasKeepOrderFunc;
|
||||
bool hasGroupKeyOptimized;
|
||||
} SAggLogicNode;
|
||||
|
||||
typedef struct SProjectLogicNode {
|
||||
|
@ -409,6 +410,7 @@ typedef struct SAggPhysiNode {
|
|||
SNodeList* pGroupKeys;
|
||||
SNodeList* pAggFuncs;
|
||||
bool mergeDataBlock;
|
||||
bool groupKeyOptimized;
|
||||
} SAggPhysiNode;
|
||||
|
||||
typedef struct SDownstreamSourceNode {
|
||||
|
|
|
@ -361,33 +361,33 @@ typedef struct SVgDataBlocks {
|
|||
void* pData; // SSubmitReq + SSubmitBlk + ...
|
||||
} SVgDataBlocks;
|
||||
|
||||
typedef void (*FFreeDataBlockHash)(SHashObj*);
|
||||
typedef void (*FFreeDataBlockArray)(SArray*);
|
||||
typedef void (*FFreeTableBlockHash)(SHashObj*);
|
||||
typedef void (*FFreeVgourpBlockArray)(SArray*);
|
||||
|
||||
typedef struct SVnodeModifyOpStmt {
|
||||
ENodeType nodeType;
|
||||
ENodeType sqlNodeType;
|
||||
SArray* pDataBlocks; // data block for each vgroup, SArray<SVgDataBlocks*>.
|
||||
uint32_t insertType; // insert data from [file|sql statement| bound statement]
|
||||
const char* pSql; // current sql statement position
|
||||
int32_t totalRowsNum;
|
||||
int32_t totalTbNum;
|
||||
SName targetTableName;
|
||||
SName usingTableName;
|
||||
const char* pBoundCols;
|
||||
struct STableMeta* pTableMeta;
|
||||
SHashObj* pVgroupsHashObj;
|
||||
SHashObj* pTableBlockHashObj;
|
||||
SHashObj* pSubTableHashObj;
|
||||
SHashObj* pTableNameHashObj;
|
||||
SHashObj* pDbFNameHashObj;
|
||||
SArray* pVgDataBlocks;
|
||||
SVCreateTbReq createTblReq;
|
||||
TdFilePtr fp;
|
||||
FFreeDataBlockHash freeHashFunc;
|
||||
FFreeDataBlockArray freeArrayFunc;
|
||||
bool usingTableProcessing;
|
||||
bool fileProcessing;
|
||||
ENodeType nodeType;
|
||||
ENodeType sqlNodeType;
|
||||
SArray* pDataBlocks; // data block for each vgroup, SArray<SVgDataBlocks*>.
|
||||
uint32_t insertType; // insert data from [file|sql statement| bound statement]
|
||||
const char* pSql; // current sql statement position
|
||||
int32_t totalRowsNum;
|
||||
int32_t totalTbNum;
|
||||
SName targetTableName;
|
||||
SName usingTableName;
|
||||
const char* pBoundCols;
|
||||
struct STableMeta* pTableMeta;
|
||||
SHashObj* pVgroupsHashObj;
|
||||
SHashObj* pTableBlockHashObj; // SHashObj<tuid, STableDataCxt*>
|
||||
SHashObj* pSubTableHashObj;
|
||||
SHashObj* pTableNameHashObj;
|
||||
SHashObj* pDbFNameHashObj;
|
||||
SArray* pVgDataBlocks; // SArray<SVgroupDataCxt*>
|
||||
SVCreateTbReq* pCreateTblReq;
|
||||
TdFilePtr fp;
|
||||
FFreeTableBlockHash freeHashFunc;
|
||||
FFreeVgourpBlockArray freeArrayFunc;
|
||||
bool usingTableProcessing;
|
||||
bool fileProcessing;
|
||||
} SVnodeModifyOpStmt;
|
||||
|
||||
typedef struct SExplainOptions {
|
||||
|
|
|
@ -58,7 +58,6 @@ typedef struct SParseContext {
|
|||
bool isSuperUser;
|
||||
bool enableSysInfo;
|
||||
bool async;
|
||||
int8_t schemalessType;
|
||||
const char* svrVer;
|
||||
bool nodeOffline;
|
||||
SArray* pTableMetaPos; // sql table pos => catalog data pos
|
||||
|
@ -85,12 +84,12 @@ int32_t qSetSTableIdForRsma(SNode* pStmt, int64_t uid);
|
|||
void qCleanupKeywordsTable();
|
||||
|
||||
int32_t qBuildStmtOutput(SQuery* pQuery, SHashObj* pVgHash, SHashObj* pBlockHash);
|
||||
int32_t qResetStmtDataBlock(void* block, bool keepBuf);
|
||||
int32_t qCloneStmtDataBlock(void** pDst, void* pSrc);
|
||||
void qFreeStmtDataBlock(void* pDataBlock);
|
||||
int32_t qRebuildStmtDataBlock(void** pDst, void* pSrc, uint64_t uid, int32_t vgId);
|
||||
void qDestroyStmtDataBlock(void* pBlock);
|
||||
STableMeta* qGetTableMetaInDataBlock(void* pDataBlock);
|
||||
int32_t qResetStmtDataBlock(STableDataCxt* block, bool keepBuf);
|
||||
int32_t qCloneStmtDataBlock(STableDataCxt** pDst, STableDataCxt* pSrc, bool reset);
|
||||
int32_t qRebuildStmtDataBlock(STableDataCxt** pDst, STableDataCxt* pSrc, uint64_t uid, uint64_t suid, int32_t vgId, bool rebuildCreateTb);
|
||||
void qDestroyStmtDataBlock(STableDataCxt* pBlock);
|
||||
STableMeta* qGetTableMetaInDataBlock(STableDataCxt* pDataBlock);
|
||||
int32_t qCloneCurrentTbData(STableDataCxt* pDataBlock, SSubmitTbData **pData);
|
||||
|
||||
int32_t qStmtBindParams(SQuery* pQuery, TAOS_MULTI_BIND* pParams, int32_t colIdx);
|
||||
int32_t qStmtParseQuerySql(SParseContext* pCxt, SQuery* pQuery);
|
||||
|
@ -105,11 +104,18 @@ void destroyBoundColumnInfo(void* pBoundInfo);
|
|||
int32_t qCreateSName(SName* pName, const char* pTableName, int32_t acctId, char* dbName, char* msgBuf,
|
||||
int32_t msgBufLen);
|
||||
|
||||
void* smlInitHandle(SQuery* pQuery);
|
||||
void smlDestroyHandle(void* pHandle);
|
||||
int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols, bool format, STableMeta* pTableMeta,
|
||||
void qDestroyBoundColInfo(void* pInfo);
|
||||
|
||||
SQuery* smlInitHandle();
|
||||
int32_t smlBuildRow(STableDataCxt* pTableCxt);
|
||||
int32_t smlBuildCol(STableDataCxt* pTableCxt, SSchema* schema, void *kv, int32_t index);
|
||||
STableDataCxt* smlInitTableDataCtx(SQuery* query, STableMeta* pTableMeta);
|
||||
|
||||
int32_t smlBindData(SQuery* handle, bool dataFormat, SArray* tags, SArray* colsSchema, SArray* cols, STableMeta* pTableMeta,
|
||||
char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl, char* msgBuf, int16_t msgBufLen);
|
||||
int32_t smlBuildOutput(void* handle, SHashObj* pVgHash);
|
||||
int32_t smlBuildOutput(SQuery* handle, SHashObj* pVgHash);
|
||||
|
||||
int rawBlockBindData(SQuery *query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, TAOS_FIELD *fields, int numFields);
|
||||
|
||||
int32_t rewriteToVnodeModifyOpStmt(SQuery* pQuery, SArray* pBufArray);
|
||||
SArray* serializeVgroupsCreateTableBatch(SHashObj* pVgroupHashmap);
|
||||
|
|
|
@ -163,6 +163,23 @@ typedef struct STargetInfo {
|
|||
int32_t vgId;
|
||||
} STargetInfo;
|
||||
|
||||
typedef struct SBoundColInfo {
|
||||
int16_t* pColIndex; // bound index => schema index
|
||||
int32_t numOfCols;
|
||||
int32_t numOfBound;
|
||||
} SBoundColInfo;
|
||||
|
||||
typedef struct STableDataCxt {
|
||||
STableMeta* pMeta;
|
||||
STSchema* pSchema;
|
||||
SBoundColInfo boundColsInfo;
|
||||
SArray* pValues;
|
||||
SSubmitTbData* pData;
|
||||
TSKEY lastTs;
|
||||
bool ordered;
|
||||
bool duplicateTs;
|
||||
} STableDataCxt;
|
||||
|
||||
typedef int32_t (*__async_send_cb_fn_t)(void* param, SDataBuf* pMsg, int32_t code);
|
||||
typedef int32_t (*__async_exec_fn_t)(void* param);
|
||||
|
||||
|
@ -190,6 +207,12 @@ typedef struct SQueryNodeStat {
|
|||
int32_t tableNum; // vg table number, unit is TSDB_TABLE_NUM_UNIT
|
||||
} SQueryNodeStat;
|
||||
|
||||
typedef struct SColLocation {
|
||||
int16_t slotId;
|
||||
col_id_t colId;
|
||||
int8_t type;
|
||||
} SColLocation;
|
||||
|
||||
int32_t initTaskQueue();
|
||||
int32_t cleanupTaskQueue();
|
||||
|
||||
|
@ -238,6 +261,7 @@ int32_t dataConverToStr(char* str, int type, void* buf, int32_t bufSize, int32_t
|
|||
char* parseTagDatatoJson(void* p);
|
||||
int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst);
|
||||
int32_t cloneDbVgInfo(SDBVgInfo* pSrc, SDBVgInfo** pDst);
|
||||
int32_t cloneSVreateTbReq(SVCreateTbReq* pSrc, SVCreateTbReq** pDst);
|
||||
void freeVgInfo(SDBVgInfo* vgInfo);
|
||||
|
||||
extern int32_t (*queryBuildMsg[TDMT_MAX])(void* input, char** msg, int32_t msgSize, int32_t* msgLen,
|
||||
|
@ -268,7 +292,7 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
|
|||
((_code) == TSDB_CODE_SYN_NOT_LEADER || (_code) == TSDB_CODE_SYN_RESTORING || (_code) == TSDB_CODE_SYN_INTERNAL_ERROR)
|
||||
#define SYNC_OTHER_LEADER_REDIRECT_ERROR(_code) ((_code) == TSDB_CODE_MNODE_NOT_FOUND)
|
||||
|
||||
#define NO_RET_REDIRECT_ERROR(_code) ((_code) == TSDB_CODE_RPC_BROKEN_LINK || (_code) == TSDB_CODE_RPC_NETWORK_UNAVAIL)
|
||||
#define NO_RET_REDIRECT_ERROR(_code) ((_code) == TSDB_CODE_RPC_BROKEN_LINK || (_code) == TSDB_CODE_RPC_NETWORK_UNAVAIL || (_code) == TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED)
|
||||
|
||||
#define NEED_REDIRECT_ERROR(_code) \
|
||||
(NO_RET_REDIRECT_ERROR(_code) || SYNC_UNKNOWN_LEADER_REDIRECT_ERROR(_code) || \
|
||||
|
|
|
@ -103,6 +103,7 @@ typedef struct {
|
|||
int8_t type;
|
||||
} SStreamQueueItem;
|
||||
|
||||
#if 0
|
||||
typedef struct {
|
||||
int8_t type;
|
||||
int64_t ver;
|
||||
|
@ -116,6 +117,21 @@ typedef struct {
|
|||
SArray* dataRefs; // SArray<int32_t*>
|
||||
SArray* reqs; // SArray<SSubmitReq*>
|
||||
} SStreamMergedSubmit;
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
int8_t type;
|
||||
int64_t ver;
|
||||
int32_t* dataRef;
|
||||
SPackedData submit;
|
||||
} SStreamDataSubmit2;
|
||||
|
||||
typedef struct {
|
||||
int8_t type;
|
||||
int64_t ver;
|
||||
SArray* dataRefs; // SArray<int32_t*>
|
||||
SArray* submits; // SArray<SPackedSubmit>
|
||||
} SStreamMergedSubmit2;
|
||||
|
||||
typedef struct {
|
||||
int8_t type;
|
||||
|
@ -219,11 +235,11 @@ static FORCE_INLINE void* streamQueueNextItem(SStreamQueue* queue) {
|
|||
}
|
||||
}
|
||||
|
||||
SStreamDataSubmit* streamDataSubmitNew(SSubmitReq* pReq);
|
||||
SStreamDataSubmit2* streamDataSubmitNew(SPackedData submit);
|
||||
|
||||
void streamDataSubmitRefDec(SStreamDataSubmit* pDataSubmit);
|
||||
void streamDataSubmitRefDec(SStreamDataSubmit2* pDataSubmit);
|
||||
|
||||
SStreamDataSubmit* streamSubmitRefClone(SStreamDataSubmit* pSubmit);
|
||||
SStreamDataSubmit2* streamSubmitRefClone(SStreamDataSubmit2* pSubmit);
|
||||
|
||||
typedef struct {
|
||||
char* qmsg;
|
||||
|
@ -355,14 +371,15 @@ void tFreeSStreamTask(SStreamTask* pTask);
|
|||
|
||||
static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem* pItem) {
|
||||
if (pItem->type == STREAM_INPUT__DATA_SUBMIT) {
|
||||
SStreamDataSubmit* pSubmitClone = streamSubmitRefClone((SStreamDataSubmit*)pItem);
|
||||
SStreamDataSubmit2* pSubmitClone = streamSubmitRefClone((SStreamDataSubmit2*)pItem);
|
||||
if (pSubmitClone == NULL) {
|
||||
qDebug("task %d %p submit enqueue failed since out of memory", pTask->taskId, pTask);
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
atomic_store_8(&pTask->inputStatus, TASK_INPUT_STATUS__FAILED);
|
||||
return -1;
|
||||
}
|
||||
qDebug("task %d %p submit enqueue %p %p %p", pTask->taskId, pTask, pItem, pSubmitClone, pSubmitClone->data);
|
||||
qDebug("task %d %p submit enqueue %p %p %p %d %" PRId64, pTask->taskId, pTask, pItem, pSubmitClone,
|
||||
pSubmitClone->submit.msgStr, pSubmitClone->submit.msgLen, pSubmitClone->submit.ver);
|
||||
taosWriteQitem(pTask->inputQueue->queue, pSubmitClone);
|
||||
// qStreamInput(pTask->exec.executor, pSubmitClone);
|
||||
} else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE ||
|
||||
|
|
|
@ -49,10 +49,12 @@ extern "C" {
|
|||
#define SYNC_HEARTBEAT_REPLY_SLOW_MS 1500
|
||||
#define SYNC_SNAP_RESEND_MS 1000 * 60
|
||||
|
||||
#define SYNC_VND_COMMIT_MIN_MS 1000
|
||||
|
||||
#define SYNC_MAX_BATCH_SIZE 1
|
||||
#define SYNC_INDEX_BEGIN 0
|
||||
#define SYNC_INDEX_INVALID -1
|
||||
#define SYNC_TERM_INVALID -1 // 0xFFFFFFFFFFFFFFFF
|
||||
#define SYNC_TERM_INVALID -1
|
||||
|
||||
typedef enum {
|
||||
SYNC_STRATEGY_NO_SNAPSHOT = 0,
|
||||
|
@ -78,6 +80,8 @@ typedef enum {
|
|||
} ESyncState;
|
||||
|
||||
typedef struct SNodeInfo {
|
||||
int64_t clusterId;
|
||||
int32_t nodeId;
|
||||
uint16_t nodePort;
|
||||
char nodeFqdn[TSDB_FQDN_LEN];
|
||||
} SNodeInfo;
|
||||
|
@ -230,6 +234,7 @@ int64_t syncOpen(SSyncInfo* pSyncInfo);
|
|||
int32_t syncStart(int64_t rid);
|
||||
void syncStop(int64_t rid);
|
||||
void syncPreStop(int64_t rid);
|
||||
void syncPostStop(int64_t rid);
|
||||
int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak, int64_t* seq);
|
||||
int32_t syncProcessMsg(int64_t rid, SRpcMsg* pMsg);
|
||||
int32_t syncReconfig(int64_t rid, SSyncCfg* pCfg);
|
||||
|
|
|
@ -24,7 +24,7 @@ extern "C" {
|
|||
|
||||
typedef enum { HTTP_GZIP, HTTP_FLAT } EHttpCompFlag;
|
||||
|
||||
int32_t taosSendHttpReport(const char* server, uint16_t port, char* pCont, int32_t contLen, EHttpCompFlag flag);
|
||||
int32_t taosSendHttpReport(const char* server, const char* uri, uint16_t port, char* pCont, int32_t contLen, EHttpCompFlag flag);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -116,6 +116,7 @@ extern "C" {
|
|||
#include "osTimer.h"
|
||||
#include "osTimezone.h"
|
||||
#include "taoserror.h"
|
||||
#include "tlog.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -22,12 +22,16 @@ extern "C" {
|
|||
|
||||
// If the error is in a third-party library, place this header file under the third-party library header file.
|
||||
// When you want to use this feature, you should find or add the same function in the following sectio
|
||||
#if !defined(WINDOWS)
|
||||
|
||||
#ifndef ALLOW_FORBID_FUNC
|
||||
#define malloc MALLOC_FUNC_TAOS_FORBID
|
||||
#define calloc CALLOC_FUNC_TAOS_FORBID
|
||||
#define realloc REALLOC_FUNC_TAOS_FORBID
|
||||
#define free FREE_FUNC_TAOS_FORBID
|
||||
#endif
|
||||
#endif // ifndef ALLOW_FORBID_FUNC
|
||||
|
||||
#endif // if !defined(WINDOWS)
|
||||
|
||||
void *taosMemoryMalloc(int64_t size);
|
||||
void *taosMemoryCalloc(int64_t num, int64_t size);
|
||||
|
|
|
@ -70,6 +70,7 @@ typedef struct {
|
|||
|
||||
SysNameInfo taosGetSysNameInfo();
|
||||
bool taosCheckCurrentInDll();
|
||||
int taosGetlocalhostname(char *hostname, size_t maxLen);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -46,27 +46,100 @@ void taosSetTerminalMode();
|
|||
int32_t taosGetOldTerminalMode();
|
||||
void taosResetTerminalMode();
|
||||
|
||||
#define STACKSIZE 100
|
||||
|
||||
#if !defined(WINDOWS)
|
||||
#define taosPrintTrace(flags, level, dflag) \
|
||||
{ \
|
||||
void* array[100]; \
|
||||
int32_t size = backtrace(array, 100); \
|
||||
char** strings = backtrace_symbols(array, size); \
|
||||
if (strings != NULL) { \
|
||||
taosPrintLog(flags, level, dflag, "obtained %d stack frames", size); \
|
||||
for (int32_t i = 0; i < size; i++) { \
|
||||
taosPrintLog(flags, level, dflag, "frame:%d, %s", i, strings[i]); \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
taosMemoryFree(strings); \
|
||||
#define taosLogTraceToBuf(buf, bufSize, ignoreNum) { \
|
||||
void* array[STACKSIZE]; \
|
||||
int32_t size = backtrace(array, STACKSIZE); \
|
||||
char** strings = backtrace_symbols(array, size); \
|
||||
int32_t offset = 0; \
|
||||
if (strings != NULL) { \
|
||||
offset = snprintf(buf, bufSize - 1, "obtained %d stack frames\n", (ignoreNum > 0) ? size - ignoreNum : size); \
|
||||
for (int32_t i = (ignoreNum > 0) ? ignoreNum : 0; i < size; i++) { \
|
||||
offset += snprintf(buf + offset, bufSize - 1 - offset, "frame:%d, %s\n", (ignoreNum > 0) ? i - ignoreNum : i, strings[i]); \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
taosMemoryFree(strings); \
|
||||
}
|
||||
|
||||
#define taosPrintTrace(flags, level, dflag, ignoreNum) \
|
||||
{ \
|
||||
void* array[STACKSIZE]; \
|
||||
int32_t size = backtrace(array, STACKSIZE); \
|
||||
char** strings = backtrace_symbols(array, size); \
|
||||
if (strings != NULL) { \
|
||||
taosPrintLog(flags, level, dflag, "obtained %d stack frames", (ignoreNum > 0) ? size - ignoreNum : size); \
|
||||
for (int32_t i = (ignoreNum > 0) ? ignoreNum : 0; i < size; i++) { \
|
||||
taosPrintLog(flags, level, dflag, "frame:%d, %s", (ignoreNum > 0) ? i - ignoreNum : i, strings[i]); \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
taosMemoryFree(strings); \
|
||||
}
|
||||
#else
|
||||
#define taosPrintTrace(flags, level, dflag) \
|
||||
|
||||
#include <windows.h>
|
||||
#include <dbghelp.h>
|
||||
|
||||
#define taosLogTraceToBuf(buf, bufSize, ignoreNum) { \
|
||||
unsigned int i; \
|
||||
void* stack[STACKSIZE]; \
|
||||
unsigned short frames; \
|
||||
SYMBOL_INFO* symbol; \
|
||||
HANDLE process; \
|
||||
int32_t offset = 0; \
|
||||
\
|
||||
process = GetCurrentProcess(); \
|
||||
\
|
||||
SymInitialize(process, NULL, TRUE); \
|
||||
\
|
||||
frames = CaptureStackBackTrace(0, STACKSIZE, stack, NULL); \
|
||||
symbol = (SYMBOL_INFO*)calloc(sizeof(SYMBOL_INFO) + 256 * sizeof(char), 1); \
|
||||
if (symbol != NULL) { \
|
||||
symbol->MaxNameLen = 255; \
|
||||
symbol->SizeOfStruct = sizeof(SYMBOL_INFO); \
|
||||
\
|
||||
if (frames > 0) { \
|
||||
offset = snprintf(buf, bufSize - 1, "obtained %d stack frames\n", (ignoreNum > 0) ? frames - ignoreNum : frames); \
|
||||
for (i = (ignoreNum > 0) ? ignoreNum : 0; i < frames; i++) { \
|
||||
SymFromAddr(process, (DWORD64)(stack[i]), 0, symbol); \
|
||||
offset += snprintf(buf + offset, bufSize - 1 - offset, "frame:%i, %s - 0x%0X\n", (ignoreNum > 0) ? i - ignoreNum : i, symbol->Name, symbol->Address); \
|
||||
} \
|
||||
} \
|
||||
free(symbol); \
|
||||
} \
|
||||
}
|
||||
|
||||
#define taosPrintTrace(flags, level, dflag, ignoreNum) \
|
||||
{ \
|
||||
taosPrintLog(flags, level, dflag, \
|
||||
"backtrace not implemented on windows, so detailed stack information cannot be printed"); \
|
||||
}
|
||||
unsigned int i; \
|
||||
void* stack[STACKSIZE]; \
|
||||
unsigned short frames; \
|
||||
SYMBOL_INFO* symbol; \
|
||||
HANDLE process; \
|
||||
\
|
||||
process = GetCurrentProcess(); \
|
||||
\
|
||||
SymInitialize(process, NULL, TRUE); \
|
||||
\
|
||||
frames = CaptureStackBackTrace(0, STACKSIZE, stack, NULL); \
|
||||
symbol = (SYMBOL_INFO*)calloc(sizeof(SYMBOL_INFO) + 256 * sizeof(char), 1); \
|
||||
if (symbol != NULL) { \
|
||||
symbol->MaxNameLen = 255; \
|
||||
symbol->SizeOfStruct = sizeof(SYMBOL_INFO); \
|
||||
\
|
||||
if (frames > 0) { \
|
||||
taosPrintLog(flags, level, dflag, "obtained %d stack frames\n", (ignoreNum > 0) ? frames - ignoreNum : frames); \
|
||||
for (i = (ignoreNum > 0) ? ignoreNum : 0; i < frames; i++) { \
|
||||
SymFromAddr(process, (DWORD64)(stack[i]), 0, symbol); \
|
||||
taosPrintLog(flags, level, dflag, "frame:%i, %s - 0x%0X\n", (ignoreNum > 0) ? i - ignoreNum : i, symbol->Name, symbol->Address); \
|
||||
} \
|
||||
} \
|
||||
free(symbol); \
|
||||
} \
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -52,11 +52,13 @@ _exit:
|
|||
return code;
|
||||
}
|
||||
|
||||
static FORCE_INLINE void tFree(uint8_t *pBuf) {
|
||||
if (pBuf) {
|
||||
taosMemoryFree(pBuf - sizeof(int64_t));
|
||||
}
|
||||
}
|
||||
#define tFree(BUF) \
|
||||
do { \
|
||||
if (BUF) { \
|
||||
taosMemoryFree((uint8_t *)(BUF) - sizeof(int64_t)); \
|
||||
(BUF) = NULL; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -65,6 +65,8 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_RPC_PORT_EADDRINUSE TAOS_DEF_ERROR_CODE(0, 0x0017) //
|
||||
#define TSDB_CODE_RPC_BROKEN_LINK TAOS_DEF_ERROR_CODE(0, 0x0018) //
|
||||
#define TSDB_CODE_RPC_TIMEOUT TAOS_DEF_ERROR_CODE(0, 0x0019) //
|
||||
#define TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED TAOS_DEF_ERROR_CODE(0, 0x0020) // "Vgroup could not be connected"
|
||||
#define TSDB_CODE_RPC_SOMENODE_BROKEN_LINK TAOS_DEF_ERROR_CODE(0, 0x0021) //
|
||||
|
||||
//common & util
|
||||
#define TSDB_CODE_OPS_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x0100) //
|
||||
|
@ -156,6 +158,8 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_TSC_QUERY_KILLED TAOS_DEF_ERROR_CODE(0, 0X022D)
|
||||
#define TSDB_CODE_TSC_NO_EXEC_NODE TAOS_DEF_ERROR_CODE(0, 0X022E)
|
||||
#define TSDB_CODE_TSC_NOT_STABLE_ERROR TAOS_DEF_ERROR_CODE(0, 0X022F)
|
||||
#define TSDB_CODE_TSC_STMT_CACHE_ERROR TAOS_DEF_ERROR_CODE(0, 0X0230)
|
||||
#define TSDB_CODE_TSC_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0X0231)
|
||||
|
||||
// mnode-common
|
||||
// #define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) // 2.x
|
||||
|
@ -343,6 +347,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_MND_TOPIC_SUBSCRIBED TAOS_DEF_ERROR_CODE(0, 0x03EB)
|
||||
#define TSDB_CODE_MND_CGROUP_USED TAOS_DEF_ERROR_CODE(0, 0x03EC)
|
||||
#define TSDB_CODE_MND_TOPIC_MUST_BE_DELETED TAOS_DEF_ERROR_CODE(0, 0x03ED)
|
||||
#define TSDB_CODE_MND_INVALID_SUB_OPTION TAOS_DEF_ERROR_CODE(0, 0x03EE)
|
||||
#define TSDB_CODE_MND_IN_REBALANCE TAOS_DEF_ERROR_CODE(0, 0x03EF)
|
||||
|
||||
// mnode-stream
|
||||
|
@ -410,6 +415,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_VND_NO_AVAIL_BUFPOOL TAOS_DEF_ERROR_CODE(0, 0x0528)
|
||||
#define TSDB_CODE_VND_STOPPED TAOS_DEF_ERROR_CODE(0, 0x0529)
|
||||
#define TSDB_CODE_VND_DUP_REQUEST TAOS_DEF_ERROR_CODE(0, 0x0530)
|
||||
#define TSDB_CODE_VND_QUERY_BUSY TAOS_DEF_ERROR_CODE(0, 0x0531)
|
||||
|
||||
// tsdb
|
||||
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600)
|
||||
|
@ -517,6 +523,8 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_SYN_STANDBY_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0912)
|
||||
#define TSDB_CODE_SYN_BATCH_ERROR TAOS_DEF_ERROR_CODE(0, 0x0913)
|
||||
#define TSDB_CODE_SYN_RESTORING TAOS_DEF_ERROR_CODE(0, 0x0914)
|
||||
#define TSDB_CODE_SYN_INVALID_SNAPSHOT_MSG TAOS_DEF_ERROR_CODE(0, 0x0915) // internal
|
||||
#define TSDB_CODE_SYN_BUFFER_FULL TAOS_DEF_ERROR_CODE(0, 0x0916) //
|
||||
#define TSDB_CODE_SYN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x09FF)
|
||||
|
||||
// tq
|
||||
|
@ -694,6 +702,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_SML_INVALID_DATA TAOS_DEF_ERROR_CODE(0, 0x3002)
|
||||
#define TSDB_CODE_SML_INVALID_DB_CONF TAOS_DEF_ERROR_CODE(0, 0x3003)
|
||||
#define TSDB_CODE_SML_NOT_SAME_TYPE TAOS_DEF_ERROR_CODE(0, 0x3004)
|
||||
#define TSDB_CODE_SML_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x3005)
|
||||
|
||||
//tsma
|
||||
#define TSDB_CODE_TSMA_INIT_FAILED TAOS_DEF_ERROR_CODE(0, 0x3100)
|
||||
|
|
|
@ -22,19 +22,6 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#if 0
|
||||
#define TARRAY(TYPE) \
|
||||
struct { \
|
||||
int32_t tarray_size_; \
|
||||
int32_t tarray_neles_; \
|
||||
struct TYPE* td_array_data_; \
|
||||
}
|
||||
|
||||
#define TARRAY_SIZE(ARRAY) (ARRAY)->tarray_size_
|
||||
#define TARRAY_NELES(ARRAY) (ARRAY)->tarray_neles_
|
||||
#define TARRAY_ELE_AT(ARRAY, IDX) ((ARRAY)->td_array_data_ + idx)
|
||||
#endif
|
||||
|
||||
#define TARRAY_MIN_SIZE 8
|
||||
#define TARRAY_GET_ELEM(array, index) ((void*)((char*)((array)->pData) + (index) * (array)->elemSize))
|
||||
#define TARRAY_ELEM_IDX(array, ele) (POINTER_DISTANCE(ele, (array)->pData) / (array)->elemSize)
|
||||
|
@ -46,6 +33,9 @@ typedef struct SArray {
|
|||
void* pData;
|
||||
} SArray;
|
||||
|
||||
#define TARRAY_SIZE(array) ((array)->size)
|
||||
#define TARRAY_DATA(array) ((array)->pData)
|
||||
|
||||
/**
|
||||
*
|
||||
* @param size
|
||||
|
@ -194,6 +184,13 @@ void taosArrayPopTailBatch(SArray* pArray, size_t cnt);
|
|||
*/
|
||||
void taosArrayRemove(SArray* pArray, size_t index);
|
||||
|
||||
/**
|
||||
* remove batch entry from the given index
|
||||
* @param pArray
|
||||
* @param index
|
||||
*/
|
||||
void taosArrayRemoveBatch(SArray* pArray, size_t index, size_t num, FDelete fp);
|
||||
|
||||
/**
|
||||
* copy the whole array from source to destination
|
||||
* @param pDst
|
||||
|
|
|
@ -1,168 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2020 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _TD_UTIL_BUFFER_H_
|
||||
#define _TD_UTIL_BUFFER_H_
|
||||
|
||||
#include "os.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// usage example
|
||||
/*
|
||||
#include <stdio.h>
|
||||
#include "texception.h"
|
||||
|
||||
int32_t main( int32_t argc, char** argv ) {
|
||||
SBufferWriter bw = tbufInitWriter( NULL, false );
|
||||
|
||||
TRY( 1 ) {
|
||||
//--------------------- write ------------------------
|
||||
// reserve 1024 bytes for the buffer to improve performance
|
||||
tbufEnsureCapacity( &bw, 1024 );
|
||||
|
||||
// reserve space for the interger count
|
||||
size_t pos = tbufReserve( &bw, sizeof(int32_t) );
|
||||
// write 5 integers to the buffer
|
||||
for( int32_t i = 0; i < 5; i++) {
|
||||
tbufWriteInt32( &bw, i );
|
||||
}
|
||||
// write the integer count to buffer at reserved position
|
||||
tbufWriteInt32At( &bw, pos, 5 );
|
||||
|
||||
// write a string to the buffer
|
||||
tbufWriteString( &bw, "this is a string.\n" );
|
||||
// acquire the result and close the write buffer
|
||||
size_t size = tbufTell( &bw );
|
||||
char* data = tbufGetData( &bw, false );
|
||||
|
||||
//------------------------ read -----------------------
|
||||
SBufferReader br = tbufInitReader( data, size, false );
|
||||
// read & print out all integers
|
||||
int32_t count = tbufReadInt32( &br );
|
||||
for( int32_t i = 0; i < count; i++ ) {
|
||||
printf( "%d\n", tbufReadInt32(&br) );
|
||||
}
|
||||
// read & print out a string
|
||||
puts( tbufReadString(&br, NULL) );
|
||||
// try read another integer, this result in an error as there no this integer
|
||||
tbufReadInt32( &br );
|
||||
printf( "you should not see this message.\n" );
|
||||
} CATCH( code ) {
|
||||
printf( "exception code is: %d, you will see this message after print out 5 integers and a string.\n", code );
|
||||
} END_TRY
|
||||
|
||||
tbufCloseWriter( &bw );
|
||||
return 0;
|
||||
}
|
||||
*/
|
||||
|
||||
typedef struct SBufferReader {
|
||||
bool endian;
|
||||
const char* data;
|
||||
size_t pos;
|
||||
size_t size;
|
||||
} SBufferReader;
|
||||
|
||||
typedef struct SBufferWriter {
|
||||
bool endian;
|
||||
char* data;
|
||||
size_t pos;
|
||||
size_t size;
|
||||
void* (*allocator)(void*, size_t);
|
||||
} SBufferWriter;
|
||||
|
||||
// common functions & macros for both reader & writer
|
||||
|
||||
#define tbufTell(buf) ((buf)->pos)
|
||||
|
||||
/* ------------------------ BUFFER WRITER FUNCTIONS AND MACROS ------------------------ */
|
||||
// *Allocator*, function to allocate memory, will use 'realloc' if NULL
|
||||
// *Endian*, if true, writer functions of primitive types will do 'hton' automatically
|
||||
#define tbufInitWriter(Allocator, Endian) \
|
||||
{ .endian = (Endian), .data = NULL, .pos = 0, .size = 0, .allocator = ((Allocator) == NULL ? realloc : (Allocator)) }
|
||||
|
||||
void tbufCloseWriter(SBufferWriter* buf);
|
||||
void tbufEnsureCapacity(SBufferWriter* buf, size_t size);
|
||||
size_t tbufReserve(SBufferWriter* buf, size_t size);
|
||||
char* tbufGetData(SBufferWriter* buf, bool takeOver);
|
||||
void tbufWrite(SBufferWriter* buf, const void* data, size_t size);
|
||||
void tbufWriteAt(SBufferWriter* buf, size_t pos, const void* data, size_t size);
|
||||
void tbufWriteStringLen(SBufferWriter* buf, const char* str, size_t len);
|
||||
void tbufWriteString(SBufferWriter* buf, const char* str);
|
||||
// the prototype of tbufWriteBinary and tbufWrite are identical
|
||||
// the difference is: tbufWriteBinary writes the length of the data to the buffer
|
||||
// first, then the actual data, which means the reader don't need to know data
|
||||
// size before read. Write only write the data itself, which means the reader
|
||||
// need to know data size before read.
|
||||
void tbufWriteBinary(SBufferWriter* buf, const void* data, size_t len);
|
||||
void tbufWriteBool(SBufferWriter* buf, bool data);
|
||||
void tbufWriteBoolAt(SBufferWriter* buf, size_t pos, bool data);
|
||||
void tbufWriteChar(SBufferWriter* buf, char data);
|
||||
void tbufWriteCharAt(SBufferWriter* buf, size_t pos, char data);
|
||||
void tbufWriteInt8(SBufferWriter* buf, int8_t data);
|
||||
void tbufWriteInt8At(SBufferWriter* buf, size_t pos, int8_t data);
|
||||
void tbufWriteUint8(SBufferWriter* buf, uint8_t data);
|
||||
void tbufWriteUint8At(SBufferWriter* buf, size_t pos, uint8_t data);
|
||||
void tbufWriteInt16(SBufferWriter* buf, int16_t data);
|
||||
void tbufWriteInt16At(SBufferWriter* buf, size_t pos, int16_t data);
|
||||
void tbufWriteUint16(SBufferWriter* buf, uint16_t data);
|
||||
void tbufWriteUint16At(SBufferWriter* buf, size_t pos, uint16_t data);
|
||||
void tbufWriteInt32(SBufferWriter* buf, int32_t data);
|
||||
void tbufWriteInt32At(SBufferWriter* buf, size_t pos, int32_t data);
|
||||
void tbufWriteUint32(SBufferWriter* buf, uint32_t data);
|
||||
void tbufWriteUint32At(SBufferWriter* buf, size_t pos, uint32_t data);
|
||||
void tbufWriteInt64(SBufferWriter* buf, int64_t data);
|
||||
void tbufWriteInt64At(SBufferWriter* buf, size_t pos, int64_t data);
|
||||
void tbufWriteUint64(SBufferWriter* buf, uint64_t data);
|
||||
void tbufWriteUint64At(SBufferWriter* buf, size_t pos, uint64_t data);
|
||||
void tbufWriteFloat(SBufferWriter* buf, float data);
|
||||
void tbufWriteFloatAt(SBufferWriter* buf, size_t pos, float data);
|
||||
void tbufWriteDouble(SBufferWriter* buf, double data);
|
||||
void tbufWriteDoubleAt(SBufferWriter* buf, size_t pos, double data);
|
||||
|
||||
/* ------------------------ BUFFER READER FUNCTIONS AND MACROS ------------------------ */
|
||||
// *Endian*, if true, reader functions of primitive types will do 'ntoh' automatically
|
||||
#define tbufInitReader(Data, Size, Endian) \
|
||||
{ .endian = (Endian), .data = (Data), .pos = 0, .size = ((Data) == NULL ? 0 : (Size)) }
|
||||
|
||||
size_t tbufSkip(SBufferReader* buf, size_t size);
|
||||
const char* tbufRead(SBufferReader* buf, size_t size);
|
||||
void tbufReadToBuffer(SBufferReader* buf, void* dst, size_t size);
|
||||
const char* tbufReadString(SBufferReader* buf, size_t* len);
|
||||
size_t tbufReadToString(SBufferReader* buf, char* dst, size_t size);
|
||||
const char* tbufReadBinary(SBufferReader* buf, size_t* len);
|
||||
size_t tbufReadToBinary(SBufferReader* buf, void* dst, size_t size);
|
||||
bool tbufReadBool(SBufferReader* buf);
|
||||
char tbufReadChar(SBufferReader* buf);
|
||||
int8_t tbufReadInt8(SBufferReader* buf);
|
||||
uint8_t tbufReadUint8(SBufferReader* buf);
|
||||
int16_t tbufReadInt16(SBufferReader* buf);
|
||||
uint16_t tbufReadUint16(SBufferReader* buf);
|
||||
int32_t tbufReadInt32(SBufferReader* buf);
|
||||
uint32_t tbufReadUint32(SBufferReader* buf);
|
||||
int64_t tbufReadInt64(SBufferReader* buf);
|
||||
uint64_t tbufReadUint64(SBufferReader* buf);
|
||||
float tbufReadFloat(SBufferReader* buf);
|
||||
double tbufReadDouble(SBufferReader* buf);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*_TD_UTIL_BUFFER_H_*/
|
|
@ -36,17 +36,18 @@ extern "C" {
|
|||
#define FLT_GREATEREQUAL(_x, _y) (FLT_EQUAL((_x), (_y)) || ((_x) > (_y)))
|
||||
#define FLT_LESSEQUAL(_x, _y) (FLT_EQUAL((_x), (_y)) || ((_x) < (_y)))
|
||||
|
||||
#define PATTERN_COMPARE_INFO_INITIALIZER \
|
||||
{ '%', '_' }
|
||||
#define PATTERN_COMPARE_INFO_INITIALIZER { '%', '_', L'%', L'_' }
|
||||
|
||||
typedef struct SPatternCompareInfo {
|
||||
char matchAll; // symbol for match all wildcard, default: '%'
|
||||
char matchOne; // symbol for match one wildcard, default: '_'
|
||||
char matchAll; // symbol for match all wildcard, default: '%'
|
||||
char matchOne; // symbol for match one wildcard, default: '_'
|
||||
TdUcs4 umatchAll; // unicode version matchAll
|
||||
TdUcs4 umatchOne; // unicode version matchOne
|
||||
} SPatternCompareInfo;
|
||||
|
||||
int32_t patternMatch(const char *pattern, const char *str, size_t size, const SPatternCompareInfo *pInfo);
|
||||
int32_t patternMatch(const char *pattern, size_t psize, const char *str, size_t ssize, const SPatternCompareInfo *pInfo);
|
||||
|
||||
int32_t WCSPatternMatch(const TdUcs4 *pattern, const TdUcs4 *str, size_t size, const SPatternCompareInfo *pInfo);
|
||||
int32_t wcsPatternMatch(const TdUcs4 *pattern, size_t psize, const TdUcs4 *str, size_t ssize, const SPatternCompareInfo *pInfo);
|
||||
|
||||
int32_t taosArrayCompareString(const void *a, const void *b);
|
||||
|
||||
|
@ -79,9 +80,11 @@ int32_t compareDoubleVal(const void *pLeft, const void *pRight);
|
|||
int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight);
|
||||
int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight);
|
||||
|
||||
int32_t compareStrRegexComp(const void *pLeft, const void *pRight);
|
||||
int32_t compareStrRegexCompMatch(const void *pLeft, const void *pRight);
|
||||
int32_t compareStrRegexCompNMatch(const void *pLeft, const void *pRight);
|
||||
int32_t comparestrRegexMatch(const void *pLeft, const void *pRight);
|
||||
int32_t comparestrRegexNMatch(const void *pLeft, const void *pRight);
|
||||
|
||||
int32_t comparewcsRegexMatch(const void *pLeft, const void *pRight);
|
||||
int32_t comparewcsRegexNMatch(const void *pLeft, const void *pRight);
|
||||
|
||||
int32_t compareInt8ValDesc(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt16ValDesc(const void *pLeft, const void *pRight);
|
||||
|
@ -99,11 +102,11 @@ int32_t compareUint64ValDesc(const void *pLeft, const void *pRight);
|
|||
int32_t compareLenPrefixedStrDesc(const void *pLeft, const void *pRight);
|
||||
int32_t compareLenPrefixedWStrDesc(const void *pLeft, const void *pRight);
|
||||
|
||||
int32_t compareStrPatternMatch(const void *pLeft, const void *pRight);
|
||||
int32_t compareStrPatternNotMatch(const void *pLeft, const void *pRight);
|
||||
int32_t comparestrPatternMatch(const void *pLeft, const void *pRight);
|
||||
int32_t comparestrPatternNMatch(const void *pLeft, const void *pRight);
|
||||
|
||||
int32_t compareWStrPatternMatch(const void *pLeft, const void *pRight);
|
||||
int32_t compareWStrPatternNotMatch(const void *pLeft, const void *pRight);
|
||||
int32_t comparewcsPatternMatch(const void *pLeft, const void *pRight);
|
||||
int32_t comparewcsPatternNMatch(const void *pLeft, const void *pRight);
|
||||
|
||||
int32_t compareInt8Int16(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt8Int32(const void *pLeft, const void *pRight);
|
||||
|
|
|
@ -189,12 +189,14 @@ typedef enum ELogicConditionType {
|
|||
#define TSDB_MAX_COLUMNS 4096
|
||||
#define TSDB_MIN_COLUMNS 2 // PRIMARY COLUMN(timestamp) + other columns
|
||||
|
||||
#define TSDB_NODE_NAME_LEN 64
|
||||
#define TSDB_TABLE_NAME_LEN 193 // it is a null-terminated string
|
||||
#define TSDB_TOPIC_NAME_LEN 193 // it is a null-terminated string
|
||||
#define TSDB_CGROUP_LEN 193 // it is a null-terminated string
|
||||
#define TSDB_DB_NAME_LEN 65
|
||||
#define TSDB_DB_FNAME_LEN (TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN + TSDB_NAME_DELIMITER_LEN)
|
||||
#define TSDB_NODE_NAME_LEN 64
|
||||
#define TSDB_TABLE_NAME_LEN 193 // it is a null-terminated string
|
||||
#define TSDB_TOPIC_NAME_LEN 193 // it is a null-terminated string
|
||||
#define TSDB_CGROUP_LEN 193 // it is a null-terminated string
|
||||
#define TSDB_USER_CGROUP_LEN (TSDB_USER_LEN + TSDB_CGROUP_LEN) // it is a null-terminated string
|
||||
#define TSDB_STREAM_NAME_LEN 193 // it is a null-terminated string
|
||||
#define TSDB_DB_NAME_LEN 65
|
||||
#define TSDB_DB_FNAME_LEN (TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN + TSDB_NAME_DELIMITER_LEN)
|
||||
|
||||
#define TSDB_FUNC_NAME_LEN 65
|
||||
#define TSDB_FUNC_COMMENT_LEN 1024 * 1024
|
||||
|
@ -212,8 +214,8 @@ typedef enum ELogicConditionType {
|
|||
#define TSDB_INDEX_FNAME_LEN (TSDB_DB_FNAME_LEN + TSDB_INDEX_NAME_LEN + TSDB_NAME_DELIMITER_LEN)
|
||||
#define TSDB_TYPE_STR_MAX_LEN 32
|
||||
#define TSDB_TABLE_FNAME_LEN (TSDB_DB_FNAME_LEN + TSDB_TABLE_NAME_LEN + TSDB_NAME_DELIMITER_LEN)
|
||||
#define TSDB_TOPIC_FNAME_LEN (TSDB_ACCT_ID_LEN + TSDB_TABLE_NAME_LEN + TSDB_NAME_DELIMITER_LEN)
|
||||
#define TSDB_STREAM_FNAME_LEN (TSDB_ACCT_ID_LEN + TSDB_TABLE_NAME_LEN + TSDB_NAME_DELIMITER_LEN)
|
||||
#define TSDB_TOPIC_FNAME_LEN (TSDB_ACCT_ID_LEN + TSDB_TOPIC_NAME_LEN + TSDB_NAME_DELIMITER_LEN)
|
||||
#define TSDB_STREAM_FNAME_LEN (TSDB_ACCT_ID_LEN + TSDB_STREAM_NAME_LEN + TSDB_NAME_DELIMITER_LEN)
|
||||
#define TSDB_SUBSCRIBE_KEY_LEN (TSDB_CGROUP_LEN + TSDB_TOPIC_FNAME_LEN + 2)
|
||||
#define TSDB_PARTITION_KEY_LEN (TSDB_SUBSCRIBE_KEY_LEN + 20)
|
||||
#define TSDB_COL_NAME_LEN 65
|
||||
|
@ -254,7 +256,7 @@ typedef enum ELogicConditionType {
|
|||
#define TSDB_EP_LEN (TSDB_FQDN_LEN + 6)
|
||||
#define TSDB_IPv4ADDR_LEN 16
|
||||
#define TSDB_FILENAME_LEN 128
|
||||
#define TSDB_SHOW_SQL_LEN 1024
|
||||
#define TSDB_SHOW_SQL_LEN 2048
|
||||
#define TSDB_SLOW_QUERY_SQL_LEN 512
|
||||
#define TSDB_SHOW_SUBQUERY_LEN 1000
|
||||
|
||||
|
@ -310,7 +312,7 @@ typedef enum ELogicConditionType {
|
|||
#define TSDB_MIN_KEEP (1 * 1440) // data in db to be reserved. unit minute
|
||||
#define TSDB_MAX_KEEP (365000 * 1440) // data in db to be reserved.
|
||||
#define TSDB_MAX_KEEP_NS (365 * 292 * 1440) // data in db to be reserved.
|
||||
#define TSDB_DEFAULT_KEEP (3650 * 1440) // ten years
|
||||
#define TSDB_DEFAULT_KEEP (3650 * 1440) // ten years
|
||||
#define TSDB_MIN_MINROWS_FBLOCK 10
|
||||
#define TSDB_MAX_MINROWS_FBLOCK 1000
|
||||
#define TSDB_DEFAULT_MINROWS_FBLOCK 100
|
||||
|
@ -498,7 +500,7 @@ enum {
|
|||
#define DEFAULT_PAGESIZE 4096
|
||||
|
||||
#define VNODE_TIMEOUT_SEC 60
|
||||
#define MNODE_TIMEOUT_SEC 10
|
||||
#define MNODE_TIMEOUT_SEC 60
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -116,6 +116,7 @@ static int32_t tEncodeI64v(SEncoder* pCoder, int64_t val);
|
|||
static int32_t tEncodeFloat(SEncoder* pCoder, float val);
|
||||
static int32_t tEncodeDouble(SEncoder* pCoder, double val);
|
||||
static int32_t tEncodeBinary(SEncoder* pCoder, const uint8_t* val, uint32_t len);
|
||||
static int32_t tEncodeBinaryEx(SEncoder* pCoder, const uint8_t* val, uint32_t len);
|
||||
static int32_t tEncodeCStrWithLen(SEncoder* pCoder, const char* val, uint32_t len);
|
||||
static int32_t tEncodeCStr(SEncoder* pCoder, const char* val);
|
||||
|
||||
|
|
|
@ -30,6 +30,27 @@ extern "C" {
|
|||
val = _tmp; \
|
||||
} while (0)
|
||||
|
||||
#define tjsonGetInt32ValueFromDouble(pJson, pName, val, code) \
|
||||
do { \
|
||||
double _tmp = 0; \
|
||||
code = tjsonGetDoubleValue(pJson, pName, &_tmp); \
|
||||
val = (int32_t)_tmp; \
|
||||
} while (0)
|
||||
|
||||
#define tjsonGetInt8ValueFromDouble(pJson, pName, val, code) \
|
||||
do { \
|
||||
double _tmp = 0; \
|
||||
code = tjsonGetDoubleValue(pJson, pName, &_tmp); \
|
||||
val = (int8_t)_tmp; \
|
||||
} while (0)
|
||||
|
||||
#define tjsonGetUInt16ValueFromDouble(pJson, pName, val, code) \
|
||||
do { \
|
||||
double _tmp = 0; \
|
||||
code = tjsonGetDoubleValue(pJson, pName, &_tmp); \
|
||||
val = (uint16_t)_tmp; \
|
||||
} while (0)
|
||||
|
||||
typedef void SJson;
|
||||
|
||||
SJson* tjsonCreateObject();
|
||||
|
|
|
@ -83,9 +83,26 @@ void taosPrintLongString(const char *flags, ELogLevel level, int32_t dflag, cons
|
|||
#endif
|
||||
;
|
||||
|
||||
bool taosAssert(bool condition, const char *file, int32_t line, const char *format, ...);
|
||||
#define ASSERTS(condition, ...) taosAssert(condition, __FILE__, __LINE__, __VA_ARGS__)
|
||||
#define ASSERT(condition) ASSERTS(condition, "assert info not provided")
|
||||
bool taosAssertDebug(bool condition, const char *file, int32_t line, const char *format, ...);
|
||||
bool taosAssertRelease(bool condition);
|
||||
|
||||
// Disable all asserts that may compromise the performance.
|
||||
#if defined DISABLE_ASSERT
|
||||
#define ASSERT(condition)
|
||||
#define ASSERTS(condition, ...)
|
||||
#else
|
||||
#define ASSERTS(condition, ...) taosAssertDebug(condition, __FILE__, __LINE__, __VA_ARGS__)
|
||||
#ifdef NDEBUG
|
||||
#define ASSERT(condition) taosAssertRelease(condition)
|
||||
#else
|
||||
#define ASSERT(condition) taosAssertDebug(condition, __FILE__, __LINE__, "assert info not provided")
|
||||
#endif
|
||||
#endif
|
||||
|
||||
void taosLogCrashInfo(char* nodeType, char* pMsg, int64_t msgLen, int signum, void *sigInfo);
|
||||
void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* pFd);
|
||||
void taosReleaseCrashLogFile(TdFilePtr pFile, bool truncateFile);
|
||||
int32_t taosGenCrashJsonMsg(int signum, char** pMsg, int64_t clusterId, int64_t startTime);
|
||||
|
||||
// clang-format off
|
||||
#define uFatal(...) { if (uDebugFlag & DEBUG_FATAL) { taosPrintLog("UTL FATAL", DEBUG_FATAL, tsLogEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }}
|
||||
|
|
|
@ -29,11 +29,17 @@ extern "C" {
|
|||
int32_t strdequote(char *src);
|
||||
size_t strtrim(char *src);
|
||||
char *strnchr(const char *haystack, char needle, int32_t len, bool skipquote);
|
||||
TdUcs4* wcsnchr(const TdUcs4* haystack, TdUcs4 needle, size_t len);
|
||||
|
||||
char **strsplit(char *src, const char *delim, int32_t *num);
|
||||
char *strtolower(char *dst, const char *src);
|
||||
char *strntolower(char *dst, const char *src, int32_t n);
|
||||
char *strntolower_s(char *dst, const char *src, int32_t n);
|
||||
int64_t strnatoi(char *num, int32_t len);
|
||||
|
||||
size_t tstrncspn(const char *str, size_t ssize, const char *reject, size_t rsize);
|
||||
size_t twcsncspn(const TdUcs4 *wcs, size_t size, const TdUcs4 *reject, size_t rsize);
|
||||
|
||||
char *strbetween(char *string, char *begin, char *end);
|
||||
char *paGetToken(char *src, char **token, int32_t *tokenLen);
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#define _TD_UTIL_WORKER_H_
|
||||
|
||||
#include "tqueue.h"
|
||||
#include "tarray.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -26,10 +27,10 @@ typedef struct SQWorkerPool SQWorkerPool;
|
|||
typedef struct SWWorkerPool SWWorkerPool;
|
||||
|
||||
typedef struct SQWorker {
|
||||
int32_t id; // worker id
|
||||
int64_t pid; // thread pid
|
||||
TdThread thread; // thread id
|
||||
SQWorkerPool *pool;
|
||||
int32_t id; // worker id
|
||||
int64_t pid; // thread pid
|
||||
TdThread thread; // thread id
|
||||
void *pool;
|
||||
} SQWorker;
|
||||
|
||||
typedef struct SQWorkerPool {
|
||||
|
@ -42,6 +43,14 @@ typedef struct SQWorkerPool {
|
|||
TdThreadMutex mutex;
|
||||
} SQWorkerPool;
|
||||
|
||||
typedef struct SAutoQWorkerPool {
|
||||
float ratio;
|
||||
STaosQset *qset;
|
||||
const char *name;
|
||||
SArray *workers;
|
||||
TdThreadMutex mutex;
|
||||
} SAutoQWorkerPool;
|
||||
|
||||
typedef struct SWWorker {
|
||||
int32_t id; // worker id
|
||||
int64_t pid; // thread pid
|
||||
|
@ -65,6 +74,11 @@ void tQWorkerCleanup(SQWorkerPool *pool);
|
|||
STaosQueue *tQWorkerAllocQueue(SQWorkerPool *pool, void *ahandle, FItem fp);
|
||||
void tQWorkerFreeQueue(SQWorkerPool *pool, STaosQueue *queue);
|
||||
|
||||
int32_t tAutoQWorkerInit(SAutoQWorkerPool *pool);
|
||||
void tAutoQWorkerCleanup(SAutoQWorkerPool *pool);
|
||||
STaosQueue *tAutoQWorkerAllocQueue(SAutoQWorkerPool *pool, void *ahandle, FItem fp);
|
||||
void tAutoQWorkerFreeQueue(SAutoQWorkerPool *pool, STaosQueue *queue);
|
||||
|
||||
int32_t tWWorkerInit(SWWorkerPool *pool);
|
||||
void tWWorkerCleanup(SWWorkerPool *pool);
|
||||
STaosQueue *tWWorkerAllocQueue(SWWorkerPool *pool, void *ahandle, FItems fp);
|
||||
|
|
|
@ -43,6 +43,9 @@
|
|||
# Switch for allowing TDengine to collect and report service usage information
|
||||
# telemetryReporting 1
|
||||
|
||||
# Switch for allowing TDengine to collect and report crash information
|
||||
# crashReporting 1
|
||||
|
||||
# The maximum number of vnodes supported by this dnode
|
||||
# supportVnodes 0
|
||||
|
||||
|
|
|
@ -1,319 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate the deb package for ubuntu, or rpm package for centos, or tar.gz package for other linux os
|
||||
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
# release.sh -v [cluster | edge]
|
||||
# -c [aarch32 | aarch64 | x64 | x86 | mips64 | loongarch64...]
|
||||
# -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
|
||||
# -V [stable | beta]
|
||||
# -l [full | lite]
|
||||
# -s [static | dynamic]
|
||||
# -d [taos | ...]
|
||||
# -n [2.0.0.3]
|
||||
# -m [2.0.0.0]
|
||||
# -H [ false | true]
|
||||
|
||||
# set parameters by default value
|
||||
verMode=edge # [cluster, edge, cloud]
|
||||
verType=stable # [stable, beta]
|
||||
cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 loongarch64...]
|
||||
osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
|
||||
pagMode=full # [full | lite]
|
||||
soMode=dynamic # [static | dynamic]
|
||||
dbName=taos # [taos | ...]
|
||||
allocator=glibc # [glibc | jemalloc]
|
||||
verNumber=""
|
||||
verNumberComp="3.0.0.0"
|
||||
httpdBuild=false
|
||||
|
||||
while getopts "hv:V:c:o:l:s:d:a:n:m:H:" arg; do
|
||||
case $arg in
|
||||
v)
|
||||
#echo "verMode=$OPTARG"
|
||||
verMode=$(echo $OPTARG)
|
||||
;;
|
||||
V)
|
||||
#echo "verType=$OPTARG"
|
||||
verType=$(echo $OPTARG)
|
||||
;;
|
||||
c)
|
||||
#echo "cpuType=$OPTARG"
|
||||
cpuType=$(echo $OPTARG)
|
||||
;;
|
||||
l)
|
||||
#echo "pagMode=$OPTARG"
|
||||
pagMode=$(echo $OPTARG)
|
||||
;;
|
||||
s)
|
||||
#echo "soMode=$OPTARG"
|
||||
soMode=$(echo $OPTARG)
|
||||
;;
|
||||
d)
|
||||
#echo "dbName=$OPTARG"
|
||||
dbName=$(echo $OPTARG)
|
||||
;;
|
||||
a)
|
||||
#echo "allocator=$OPTARG"
|
||||
allocator=$(echo $OPTARG)
|
||||
;;
|
||||
n)
|
||||
#echo "verNumber=$OPTARG"
|
||||
verNumber=$(echo $OPTARG)
|
||||
;;
|
||||
m)
|
||||
#echo "verNumberComp=$OPTARG"
|
||||
verNumberComp=$(echo $OPTARG)
|
||||
;;
|
||||
o)
|
||||
#echo "osType=$OPTARG"
|
||||
osType=$(echo $OPTARG)
|
||||
;;
|
||||
H)
|
||||
#echo "httpdBuild=$OPTARG"
|
||||
httpdBuild=$(echo $OPTARG)
|
||||
;;
|
||||
h)
|
||||
echo "Usage: $(basename $0) -v [cluster | edge] "
|
||||
echo " -c [aarch32 | aarch64 | x64 | x86 | mips64 | loongarch64 ...] "
|
||||
echo " -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] "
|
||||
echo " -V [stable | beta] "
|
||||
echo " -l [full | lite] "
|
||||
echo " -a [glibc | jemalloc] "
|
||||
echo " -s [static | dynamic] "
|
||||
echo " -d [taos | ...] "
|
||||
echo " -n [version number] "
|
||||
echo " -m [compatible version number] "
|
||||
echo " -H [false | true] "
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
osType=$(uname)
|
||||
|
||||
echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} allocator=${allocator} verNumber=${verNumber} verNumberComp=${verNumberComp} httpdBuild=${httpdBuild}"
|
||||
|
||||
curr_dir=$(pwd)
|
||||
|
||||
if [ "$osType" == "Darwin" ]; then
|
||||
script_dir=$(dirname $0)
|
||||
cd ${script_dir}
|
||||
script_dir="$(pwd)"
|
||||
top_dir=${script_dir}/..
|
||||
else
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/..)"
|
||||
fi
|
||||
|
||||
csudo=""
|
||||
#if command -v sudo > /dev/null; then
|
||||
# csudo="sudo "
|
||||
#fi
|
||||
|
||||
function is_valid_version() {
|
||||
[ -z $1 ] && return 1 || :
|
||||
|
||||
rx='^([0-9]+\.){3}(\*|[0-9]+)$'
|
||||
if [[ $1 =~ $rx ]]; then
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
function vercomp() {
|
||||
if [[ $1 == $2 ]]; then
|
||||
echo 0
|
||||
exit 0
|
||||
fi
|
||||
|
||||
local IFS=.
|
||||
local i ver1=($1) ver2=($2)
|
||||
|
||||
# fill empty fields in ver1 with zeros
|
||||
for ((i = ${#ver1[@]}; i < ${#ver2[@]}; i++)); do
|
||||
ver1[i]=0
|
||||
done
|
||||
|
||||
for ((i = 0; i < ${#ver1[@]}; i++)); do
|
||||
if [[ -z ${ver2[i]} ]]; then
|
||||
# fill empty fields in ver2 with zeros
|
||||
ver2[i]=0
|
||||
fi
|
||||
if ((10#${ver1[i]} > 10#${ver2[i]})); then
|
||||
echo 1
|
||||
exit 0
|
||||
fi
|
||||
if ((10#${ver1[i]} < 10#${ver2[i]})); then
|
||||
echo 2
|
||||
exit 0
|
||||
fi
|
||||
done
|
||||
echo 0
|
||||
}
|
||||
|
||||
# 1. check version information
|
||||
if ( (! is_valid_version $verNumber) || (! is_valid_version $verNumberComp) || [[ "$(vercomp $verNumber $verNumberComp)" == '2' ]]); then
|
||||
echo "please enter correct version"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "=======================new version number: ${verNumber}, compatible version: ${verNumberComp}======================================"
|
||||
|
||||
build_time=$(date +"%F %R")
|
||||
|
||||
# get commint id from git
|
||||
gitinfo=$(git rev-parse --verify HEAD)
|
||||
|
||||
if [[ "$verMode" == "cluster" ]] || [[ "$verMode" == "cloud" ]]; then
|
||||
enterprise_dir="${top_dir}/../enterprise"
|
||||
cd ${enterprise_dir}
|
||||
gitinfoOfInternal=$(git rev-parse --verify HEAD)
|
||||
else
|
||||
gitinfoOfInternal=NULL
|
||||
fi
|
||||
|
||||
cd "${curr_dir}"
|
||||
|
||||
# 2. cmake executable file
|
||||
compile_dir="${top_dir}/debug"
|
||||
if [ -d ${compile_dir} ]; then
|
||||
rm -rf ${compile_dir}
|
||||
fi
|
||||
|
||||
mkdir -p ${compile_dir}
|
||||
cd ${compile_dir}
|
||||
|
||||
if [[ "$allocator" == "jemalloc" ]]; then
|
||||
allocator_macro="-DJEMALLOC_ENABLED=true"
|
||||
else
|
||||
allocator_macro=""
|
||||
fi
|
||||
|
||||
if [[ "$dbName" != "taos" ]]; then
|
||||
source ${enterprise_dir}/packaging/oem/sed_$dbName.sh
|
||||
replace_community_$dbName
|
||||
fi
|
||||
|
||||
if [[ "$httpdBuild" == "true" ]]; then
|
||||
BUILD_HTTP=true
|
||||
else
|
||||
BUILD_HTTP=false
|
||||
fi
|
||||
|
||||
if [[ "$verMode" == "cluster" ]] || [[ "$verMode" == "cloud" ]]; then
|
||||
BUILD_HTTP=internal
|
||||
fi
|
||||
|
||||
if [[ "$pagMode" == "full" ]]; then
|
||||
BUILD_TOOLS=true
|
||||
else
|
||||
BUILD_TOOLS=false
|
||||
fi
|
||||
|
||||
# check support cpu type
|
||||
if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "arm64" ]] || [[ "$cpuType" == "arm32" ]] || [[ "$cpuType" == "mips64" ]] || [[ "$cpuType" == "loongarch64" ]] ; then
|
||||
if [ "$verMode" == "edge" ]; then
|
||||
# community-version compile
|
||||
cmake ../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
|
||||
elif [ "$verMode" == "cloud" ]; then
|
||||
cmake ../../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DBUILD_TAOSX=true -DBUILD_CLOUD=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
|
||||
elif [ "$verMode" == "cluster" ]; then
|
||||
if [[ "$dbName" != "taos" ]]; then
|
||||
replace_enterprise_$dbName
|
||||
fi
|
||||
cmake ../../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DBUILD_TAOSX=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
|
||||
fi
|
||||
else
|
||||
echo "input cpuType=${cpuType} error!!!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ostype=`uname`
|
||||
if [ "${ostype}" == "Darwin" ]; then
|
||||
CORES=$(sysctl -n hw.ncpu)
|
||||
else
|
||||
CORES=$(grep -c ^processor /proc/cpuinfo)
|
||||
fi
|
||||
|
||||
if [[ "$allocator" == "jemalloc" ]]; then
|
||||
# jemalloc need compile first, so disable parallel build
|
||||
make -j ${CORES} && ${csudo}make install
|
||||
else
|
||||
make -j ${CORES} && ${csudo}make install
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
|
||||
# 3. Call the corresponding script for packaging
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
if [[ "$verMode" != "cluster" ]] && [[ "$verMode" != "cloud" ]] && [[ "$pagMode" == "full" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then
|
||||
ret='0'
|
||||
command -v dpkg >/dev/null 2>&1 || { ret='1'; }
|
||||
if [ "$ret" -eq 0 ]; then
|
||||
echo "====do deb package for the ubuntu system===="
|
||||
output_dir="${top_dir}/debs"
|
||||
if [ -d ${output_dir} ]; then
|
||||
rm -rf ${output_dir}
|
||||
fi
|
||||
mkdir -p ${output_dir}
|
||||
cd ${script_dir}/deb
|
||||
${csudo}./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
|
||||
|
||||
if [[ "$pagMode" == "full" ]]; then
|
||||
if [ -d ${top_dir}/tools/taos-tools/packaging/deb ]; then
|
||||
cd ${top_dir}/tools/taos-tools/packaging/deb
|
||||
taos_tools_ver=$(git tag |grep -v taos | sort | tail -1)
|
||||
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
|
||||
|
||||
${csudo}./make-taos-tools-deb.sh ${top_dir} \
|
||||
${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType}
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo "==========dpkg command not exist, so not release deb package!!!"
|
||||
fi
|
||||
ret='0'
|
||||
command -v rpmbuild >/dev/null 2>&1 || { ret='1'; }
|
||||
if [ "$ret" -eq 0 ]; then
|
||||
echo "====do rpm package for the centos system===="
|
||||
output_dir="${top_dir}/rpms"
|
||||
if [ -d ${output_dir} ]; then
|
||||
rm -rf ${output_dir}
|
||||
fi
|
||||
mkdir -p ${output_dir}
|
||||
cd ${script_dir}/rpm
|
||||
${csudo}./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
|
||||
|
||||
if [[ "$pagMode" == "full" ]]; then
|
||||
if [ -d ${top_dir}/tools/taos-tools/packaging/rpm ]; then
|
||||
cd ${top_dir}/tools/taos-tools/packaging/rpm
|
||||
taos_tools_ver=$(git tag |grep -v taos | sort | tail -1)
|
||||
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
|
||||
|
||||
${csudo}./make-taos-tools-rpm.sh ${top_dir} \
|
||||
${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType}
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo "==========rpmbuild command not exist, so not release rpm package!!!"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "====do tar.gz package for all systems===="
|
||||
cd ${script_dir}/tools
|
||||
|
||||
${csudo}./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${verNumberComp} ${dbName}
|
||||
${csudo}./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
|
||||
|
||||
else
|
||||
cd ${script_dir}/tools
|
||||
./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${verNumberComp} ${dbName}
|
||||
./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
|
||||
fi
|
|
@ -481,11 +481,11 @@ function install_adapter_config() {
|
|||
${csudo}mkdir -p ${cfg_install_dir}
|
||||
[ -f ${script_dir}/cfg/${adapterName}.toml ] && ${csudo}cp ${script_dir}/cfg/${adapterName}.toml ${cfg_install_dir}
|
||||
[ -f ${cfg_install_dir}/${adapterName}.toml ] && ${csudo}chmod 644 ${cfg_install_dir}/${adapterName}.toml
|
||||
else
|
||||
[ -f ${script_dir}/cfg/${adapterName}.toml ] &&
|
||||
${csudo}cp -f ${script_dir}/cfg/${adapterName}.toml ${cfg_install_dir}/${adapterName}.toml.new
|
||||
fi
|
||||
|
||||
[ -f ${script_dir}/cfg/${adapterName}.toml ] &&
|
||||
${csudo}cp -f ${script_dir}/cfg/${adapterName}.toml ${cfg_install_dir}/${adapterName}.toml.new
|
||||
|
||||
[ -f ${cfg_install_dir}/${adapterName}.toml ] &&
|
||||
${csudo}ln -s ${cfg_install_dir}/${adapterName}.toml ${install_main_dir}/cfg/${adapterName}.toml
|
||||
|
||||
|
@ -499,9 +499,10 @@ function install_config() {
|
|||
${csudo}mkdir -p ${cfg_install_dir}
|
||||
[ -f ${script_dir}/cfg/${configFile} ] && ${csudo}cp ${script_dir}/cfg/${configFile} ${cfg_install_dir}
|
||||
${csudo}chmod 644 ${cfg_install_dir}/*
|
||||
else
|
||||
${csudo}cp -f ${script_dir}/cfg/${configFile} ${cfg_install_dir}/${configFile}.new
|
||||
fi
|
||||
|
||||
${csudo}cp -f ${script_dir}/cfg/${configFile} ${cfg_install_dir}/${configFile}.new
|
||||
${csudo}ln -s ${cfg_install_dir}/${configFile} ${install_main_dir}/cfg
|
||||
|
||||
[ ! -z $1 ] && return 0 || : # only install client
|
||||
|
|
|
@ -14,6 +14,7 @@ set binary_dir=%3
|
|||
set binary_dir=%binary_dir:/=\\%
|
||||
set osType=%4
|
||||
set verNumber=%5
|
||||
set Enterprise=%6
|
||||
set target_dir=C:\\TDengine
|
||||
|
||||
if not exist %target_dir% (
|
||||
|
@ -57,7 +58,33 @@ if exist %binary_dir%\\build\\lib\\taosws.dll (
|
|||
if exist %binary_dir%\\build\\bin\\taosdump.exe (
|
||||
copy %binary_dir%\\build\\bin\\taosdump.exe %target_dir% > nul
|
||||
)
|
||||
|
||||
if %Enterprise% == TRUE (
|
||||
if exist %binary_dir%\\build\\bin\\taosx.exe (
|
||||
copy %binary_dir%\\build\\bin\\taosx.exe %target_dir% > nul
|
||||
)
|
||||
if exist %binary_dir%\\build\\bin\\tmq_sim.exe (
|
||||
copy %binary_dir%\\build\\bin\\tmq_sim.exe %target_dir% > nul
|
||||
)
|
||||
if exist %binary_dir%\\build\\bin\\tsim.exe (
|
||||
copy %binary_dir%\\build\\bin\\tsim.exe %target_dir% > nul
|
||||
)
|
||||
if exist %binary_dir%\\build\\bin\\tmq_taosx_ci.exe (
|
||||
copy %binary_dir%\\build\\bin\\tmq_taosx_ci.exe %target_dir% > nul
|
||||
)
|
||||
if exist %binary_dir%\\build\\bin\\tmq_demo.exe (
|
||||
copy %binary_dir%\\build\\bin\\tmq_demo.exe %target_dir% > nul
|
||||
)
|
||||
if exist %binary_dir%\\build\\bin\\dumper.exe (
|
||||
copy %binary_dir%\\build\\bin\\dumper.exe %target_dir% > nul
|
||||
)
|
||||
if exist %binary_dir%\\build\\bin\\runUdf.exe (
|
||||
copy %binary_dir%\\build\\bin\\runUdf.exe %target_dir% > nul
|
||||
)
|
||||
if exist %binary_dir%\\build\\bin\\create_table.exe (
|
||||
copy %binary_dir%\\build\\bin\\create_table.exe %target_dir% > nul
|
||||
)
|
||||
)
|
||||
|
||||
copy %binary_dir%\\build\\bin\\taosd.exe %target_dir% > nul
|
||||
copy %binary_dir%\\build\\bin\\udfd.exe %target_dir% > nul
|
||||
|
||||
|
|
|
@ -109,6 +109,13 @@ function kill_taosadapter() {
|
|||
fi
|
||||
}
|
||||
|
||||
function kill_taoskeeper() {
|
||||
pid=$(ps -ef | grep "taoskeeper" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function kill_taosd() {
|
||||
# ${csudo}pkill -f taosd || :
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
|
@ -161,6 +168,7 @@ function install_bin() {
|
|||
${csudo}rm -f ${bin_link_dir}/udfd || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosadapter || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosBenchmark || :
|
||||
${csudo}rm -f ${bin_link_dir}/taoskeeper || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdump || :
|
||||
${csudo}rm -f ${bin_link_dir}/rmtaos || :
|
||||
|
@ -179,6 +187,7 @@ function install_bin() {
|
|||
[ -x ${bin_dir}/taosdump ] && ${csudo}ln -s ${bin_dir}/taosdump ${bin_link_dir}/taosdump || :
|
||||
[ -x ${bin_dir}/set_core.sh ] && ${csudo}ln -s ${bin_dir}/set_core.sh ${bin_link_dir}/set_core || :
|
||||
[ -x ${bin_dir}/remove.sh ] && ${csudo}ln -s ${bin_dir}/remove.sh ${bin_link_dir}/rmtaos || :
|
||||
[ -x ${bin_dir}/taoskeeper ] && ${csudo}ln -sf ${bin_dir}/taoskeeper ${bin_link_dir}/taoskeeper || :
|
||||
}
|
||||
|
||||
function add_newHostname_to_hosts() {
|
||||
|
@ -351,6 +360,22 @@ function install_taosadapter_config() {
|
|||
${csudo}ln -s ${cfg_install_dir}/taosadapter.toml ${cfg_dir}
|
||||
}
|
||||
|
||||
function install_taoskeeper_config() {
|
||||
if [ ! -f "${cfg_install_dir}/keeper.toml" ]; then
|
||||
[ ! -d %{cfg_install_dir} ] &&
|
||||
${csudo}${csudo}mkdir -p ${cfg_install_dir}
|
||||
[ -f ${cfg_dir}/keeper.toml ] && ${csudo}cp ${cfg_dir}/keeper.toml ${cfg_install_dir}
|
||||
[ -f ${cfg_install_dir}/keeper.toml ] &&
|
||||
${csudo}chmod 644 ${cfg_install_dir}/keeper.toml
|
||||
fi
|
||||
|
||||
[ -f ${cfg_dir}/keeper.toml ] &&
|
||||
${csudo}mv ${cfg_dir}/keeper.toml ${cfg_dir}/keeper.toml.new
|
||||
|
||||
[ -f ${cfg_install_dir}/keeper.toml ] &&
|
||||
${csudo}ln -s ${cfg_install_dir}/keeper.toml ${cfg_dir}
|
||||
}
|
||||
|
||||
function install_config() {
|
||||
if [ ! -f "${cfg_install_dir}/taos.cfg" ]; then
|
||||
${csudo}${csudo}mkdir -p ${cfg_install_dir}
|
||||
|
@ -583,6 +608,7 @@ function install_TDengine() {
|
|||
install_bin
|
||||
install_config
|
||||
install_taosadapter_config
|
||||
install_taoskeeper_config
|
||||
install_taosadapter_service
|
||||
install_service
|
||||
install_app
|
||||
|
|
|
@ -149,7 +149,6 @@ typedef struct STscObj {
|
|||
int32_t numOfReqs; // number of sqlObj bound to this connection
|
||||
SAppInstInfo* pAppInfo;
|
||||
SHashObj* pRequests;
|
||||
int8_t schemalessType; // todo remove it, this attribute should be move to request
|
||||
} STscObj;
|
||||
|
||||
typedef struct SResultColumn {
|
||||
|
@ -313,6 +312,8 @@ extern SAppInfo appInfo;
|
|||
extern int32_t clientReqRefPool;
|
||||
extern int32_t clientConnRefPool;
|
||||
extern int32_t timestampDeltaLimit;
|
||||
extern int64_t lastClusterId;
|
||||
|
||||
|
||||
__async_send_cb_fn_t getMsgRspHandle(int32_t msgType);
|
||||
|
||||
|
@ -340,6 +341,7 @@ void resetConnectDB(STscObj* pTscObj);
|
|||
int taos_options_imp(TSDB_OPTION option, const char* str);
|
||||
|
||||
void* openTransporter(const char* user, const char* auth, int32_t numOfThreads);
|
||||
void tscStopCrashReport();
|
||||
|
||||
typedef struct AsyncArg {
|
||||
SRpcMsg msg;
|
||||
|
|
|
@ -30,7 +30,7 @@ extern "C" {
|
|||
#define tscDebug(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSC ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tscTrace(...) do { if (cDebugFlag & DEBUG_TRACE) { taosPrintLog("TSC ", DEBUG_TRACE, cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tscDebugL(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLongString("TSC ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
//#define tscPerf(...) do { if (cDebugFlag & DEBUG_INFO) { taosPrintLog("TSC ", DEBUG_INFO, cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tscPerf(...) do { if (cDebugFlag & DEBUG_INFO) { taosPrintLog("TSC ", 0, cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
// clang-format on
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -0,0 +1,246 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef TDENGINE_CLIENTSML_H
|
||||
#define TDENGINE_CLIENTSML_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "catalog.h"
|
||||
#include "clientInt.h"
|
||||
#include "osThread.h"
|
||||
#include "query.h"
|
||||
#include "taos.h"
|
||||
#include "taoserror.h"
|
||||
#include "tcommon.h"
|
||||
#include "tdef.h"
|
||||
#include "tglobal.h"
|
||||
#include "tlog.h"
|
||||
#include "tmsg.h"
|
||||
#include "tname.h"
|
||||
#include "ttime.h"
|
||||
#include "ttypes.h"
|
||||
#include "cJSON.h"
|
||||
|
||||
#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
|
||||
# define expect(expr,value) (__builtin_expect ((expr),(value)) )
|
||||
#else
|
||||
# define expect(expr,value) (expr)
|
||||
#endif
|
||||
|
||||
#ifndef likely
|
||||
#define likely(expr) expect((expr) != 0, 1)
|
||||
#endif
|
||||
#ifndef unlikely
|
||||
#define unlikely(expr) expect((expr) != 0, 0)
|
||||
#endif
|
||||
|
||||
#define SPACE ' '
|
||||
#define COMMA ','
|
||||
#define EQUAL '='
|
||||
#define QUOTE '"'
|
||||
#define SLASH '\\'
|
||||
|
||||
#define JUMP_SPACE(sql, sqlEnd) \
|
||||
while (sql < sqlEnd) { \
|
||||
if (unlikely(*sql == SPACE)) \
|
||||
sql++; \
|
||||
else \
|
||||
break; \
|
||||
}
|
||||
|
||||
#define IS_INVALID_COL_LEN(len) ((len) <= 0 || (len) >= TSDB_COL_NAME_LEN)
|
||||
#define IS_INVALID_TABLE_LEN(len) ((len) <= 0 || (len) >= TSDB_TABLE_NAME_LEN)
|
||||
|
||||
#define TS "_ts"
|
||||
#define TS_LEN 3
|
||||
#define VALUE "_value"
|
||||
#define VALUE_LEN 6
|
||||
|
||||
#define OTD_JSON_FIELDS_NUM 4
|
||||
#define MAX_RETRY_TIMES 5
|
||||
typedef TSDB_SML_PROTOCOL_TYPE SMLProtocolType;
|
||||
|
||||
typedef enum {
|
||||
SCHEMA_ACTION_NULL,
|
||||
SCHEMA_ACTION_CREATE_STABLE,
|
||||
SCHEMA_ACTION_ADD_COLUMN,
|
||||
SCHEMA_ACTION_ADD_TAG,
|
||||
SCHEMA_ACTION_CHANGE_COLUMN_SIZE,
|
||||
SCHEMA_ACTION_CHANGE_TAG_SIZE,
|
||||
} ESchemaAction;
|
||||
|
||||
typedef struct {
|
||||
const void *key;
|
||||
int32_t keyLen;
|
||||
void *value;
|
||||
bool used;
|
||||
}Node;
|
||||
|
||||
typedef struct NodeList{
|
||||
Node data;
|
||||
struct NodeList* next;
|
||||
}NodeList;
|
||||
|
||||
typedef struct {
|
||||
char *measure;
|
||||
char *tags;
|
||||
char *cols;
|
||||
char *timestamp;
|
||||
|
||||
int32_t measureLen;
|
||||
int32_t measureTagsLen;
|
||||
int32_t tagsLen;
|
||||
int32_t colsLen;
|
||||
int32_t timestampLen;
|
||||
|
||||
SArray *colArray;
|
||||
} SSmlLineInfo;
|
||||
|
||||
typedef struct {
|
||||
const char *sTableName; // super table name
|
||||
int32_t sTableNameLen;
|
||||
char childTableName[TSDB_TABLE_NAME_LEN];
|
||||
uint64_t uid;
|
||||
void *key; // for openTsdb
|
||||
|
||||
SArray *tags;
|
||||
|
||||
// elements are SHashObj<cols key string, SSmlKv*> for find by key quickly
|
||||
SArray *cols;
|
||||
STableDataCxt *tableDataCtx;
|
||||
} SSmlTableInfo;
|
||||
|
||||
typedef struct {
|
||||
SArray *tags; // save the origin order to create table
|
||||
SHashObj *tagHash; // elements are <key, index in tags>
|
||||
|
||||
SArray *cols;
|
||||
SHashObj *colHash;
|
||||
|
||||
STableMeta *tableMeta;
|
||||
} SSmlSTableMeta;
|
||||
|
||||
typedef struct {
|
||||
int32_t len;
|
||||
char *buf;
|
||||
} SSmlMsgBuf;
|
||||
|
||||
typedef struct {
|
||||
int32_t code;
|
||||
int32_t lineNum;
|
||||
|
||||
int32_t numOfSTables;
|
||||
int32_t numOfCTables;
|
||||
int32_t numOfCreateSTables;
|
||||
int32_t numOfAlterColSTables;
|
||||
int32_t numOfAlterTagSTables;
|
||||
|
||||
int64_t parseTime;
|
||||
int64_t schemaTime;
|
||||
int64_t insertBindTime;
|
||||
int64_t insertRpcTime;
|
||||
int64_t endTime;
|
||||
} SSmlCostInfo;
|
||||
|
||||
typedef struct {
|
||||
int64_t id;
|
||||
|
||||
SMLProtocolType protocol;
|
||||
int8_t precision;
|
||||
bool reRun;
|
||||
bool dataFormat; // true means that the name and order of keys in each line are the same(only for influx protocol)
|
||||
bool isRawLine;
|
||||
int32_t ttl;
|
||||
int32_t uid; // used for automatic create child table
|
||||
|
||||
NodeList *childTables;
|
||||
NodeList *superTables;
|
||||
SHashObj *pVgHash;
|
||||
|
||||
STscObj *taos;
|
||||
SCatalog *pCatalog;
|
||||
SRequestObj *pRequest;
|
||||
SQuery *pQuery;
|
||||
|
||||
SSmlCostInfo cost;
|
||||
int32_t lineNum;
|
||||
SSmlMsgBuf msgBuf;
|
||||
|
||||
cJSON *root; // for parse json
|
||||
int8_t offset[OTD_JSON_FIELDS_NUM];
|
||||
SSmlLineInfo *lines; // element is SSmlLineInfo
|
||||
bool parseJsonByLib;
|
||||
SArray *tagJsonArray;
|
||||
|
||||
//
|
||||
SArray *preLineTagKV;
|
||||
SArray *maxTagKVs;
|
||||
SArray *preLineColKV;
|
||||
|
||||
SSmlLineInfo preLine;
|
||||
STableMeta *currSTableMeta;
|
||||
STableDataCxt *currTableDataCtx;
|
||||
bool needModifySchema;
|
||||
} SSmlHandle;
|
||||
|
||||
#define IS_SAME_CHILD_TABLE (elements->measureTagsLen == info->preLine.measureTagsLen \
|
||||
&& memcmp(elements->measure, info->preLine.measure, elements->measureTagsLen) == 0)
|
||||
|
||||
#define IS_SAME_SUPER_TABLE (elements->measureLen == info->preLine.measureLen \
|
||||
&& memcmp(elements->measure, info->preLine.measure, elements->measureLen) == 0)
|
||||
|
||||
#define IS_SAME_KEY (maxKV->keyLen == kv.keyLen && memcmp(maxKV->key, kv.key, kv.keyLen) == 0)
|
||||
|
||||
extern int64_t smlFactorNS[3];
|
||||
extern int64_t smlFactorS[3];
|
||||
|
||||
typedef int32_t (*_equal_fn_sml)(const void *, const void *);
|
||||
|
||||
SSmlHandle *smlBuildSmlInfo(TAOS *taos);
|
||||
void smlDestroyInfo(SSmlHandle *info);
|
||||
int smlJsonParseObjFirst(char **start, SSmlLineInfo *element, int8_t *offset);
|
||||
int smlJsonParseObj(char **start, SSmlLineInfo *element, int8_t *offset);
|
||||
//SArray *smlJsonParseTags(char *start, char *end);
|
||||
bool smlParseNumberOld(SSmlKv *kvVal, SSmlMsgBuf *msg);
|
||||
void* nodeListGet(NodeList* list, const void *key, int32_t len, _equal_fn_sml fn);
|
||||
int nodeListSet(NodeList** list, const void *key, int32_t len, void* value, _equal_fn_sml fn);
|
||||
int nodeListSize(NodeList* list);
|
||||
bool smlDoubleToInt64OverFlow(double num);
|
||||
int32_t smlBuildInvalidDataMsg(SSmlMsgBuf *pBuf, const char *msg1, const char *msg2);
|
||||
bool smlParseNumber(SSmlKv *kvVal, SSmlMsgBuf *msg);
|
||||
int64_t smlGetTimeValue(const char *value, int32_t len, uint8_t fromPrecision, uint8_t toPrecision);
|
||||
int8_t smlGetTsTypeByLen(int32_t len);
|
||||
SSmlTableInfo* smlBuildTableInfo(int numRows, const char* measure, int32_t measureLen);
|
||||
SSmlSTableMeta* smlBuildSTableMeta(bool isDataFormat);
|
||||
int32_t smlSetCTableName(SSmlTableInfo *oneTable);
|
||||
STableMeta* smlGetMeta(SSmlHandle *info, const void* measure, int32_t measureLen);
|
||||
int32_t is_same_child_table_telnet(const void *a, const void *b);
|
||||
int64_t smlParseOpenTsdbTime(SSmlHandle *info, const char *data, int32_t len);
|
||||
int32_t smlClearForRerun(SSmlHandle *info);
|
||||
int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg);
|
||||
uint8_t smlGetTimestampLen(int64_t num);
|
||||
void clearColValArray(SArray* pCols);
|
||||
void smlDestroyTableInfo(SSmlHandle *info, SSmlTableInfo *tag);
|
||||
|
||||
int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements);
|
||||
int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements);
|
||||
int32_t smlParseJSON(SSmlHandle *info, char *payload);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // TDENGINE_CLIENTSML_H
|
|
@ -21,8 +21,6 @@ extern "C" {
|
|||
#endif
|
||||
#include "catalog.h"
|
||||
|
||||
typedef void STableDataBlocks;
|
||||
|
||||
typedef enum {
|
||||
STMT_TYPE_INSERT = 1,
|
||||
STMT_TYPE_MULTI_INSERT,
|
||||
|
@ -43,8 +41,8 @@ typedef enum {
|
|||
} STMT_STATUS;
|
||||
|
||||
typedef struct SStmtTableCache {
|
||||
STableDataBlocks *pDataBlock;
|
||||
void *boundTags;
|
||||
STableDataCxt *pDataCtx;
|
||||
void *boundTags;
|
||||
} SStmtTableCache;
|
||||
|
||||
typedef struct SStmtQueryResInfo {
|
||||
|
@ -71,10 +69,11 @@ typedef struct SStmtBindInfo {
|
|||
} SStmtBindInfo;
|
||||
|
||||
typedef struct SStmtExecInfo {
|
||||
int32_t affectedRows;
|
||||
SRequestObj *pRequest;
|
||||
SHashObj *pBlockHash;
|
||||
bool autoCreateTbl;
|
||||
int32_t affectedRows;
|
||||
SRequestObj *pRequest;
|
||||
SHashObj *pBlockHash;
|
||||
STableDataCxt *pCurrBlock;
|
||||
SSubmitTbData *pCurrTbData;
|
||||
} SStmtExecInfo;
|
||||
|
||||
typedef struct SStmtSQLInfo {
|
||||
|
|
|
@ -28,13 +28,16 @@
|
|||
#include "trpc.h"
|
||||
#include "tsched.h"
|
||||
#include "ttime.h"
|
||||
#include "thttp.h"
|
||||
|
||||
#define TSC_VAR_NOT_RELEASE 1
|
||||
#define TSC_VAR_RELEASED 0
|
||||
|
||||
SAppInfo appInfo;
|
||||
int64_t lastClusterId = 0;
|
||||
int32_t clientReqRefPool = -1;
|
||||
int32_t clientConnRefPool = -1;
|
||||
int32_t clientStop = 0;
|
||||
|
||||
int32_t timestampDeltaLimit = 900; // s
|
||||
|
||||
|
@ -62,7 +65,10 @@ static int32_t registerRequest(SRequestObj *pRequest, STscObj *pTscObj) {
|
|||
|
||||
static void deregisterRequest(SRequestObj *pRequest) {
|
||||
const static int64_t SLOW_QUERY_INTERVAL = 3000000L; // todo configurable
|
||||
assert(pRequest != NULL);
|
||||
if(pRequest == NULL){
|
||||
tscError("pRequest == NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
STscObj *pTscObj = pRequest->pTscObj;
|
||||
SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
|
||||
|
@ -76,13 +82,19 @@ static void deregisterRequest(SRequestObj *pRequest) {
|
|||
"current:%d, app current:%d",
|
||||
pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000.0, num, currentInst);
|
||||
|
||||
tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
|
||||
"us, exec:%" PRId64 "us, stmtType:%d",
|
||||
duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
|
||||
pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
|
||||
pRequest->metric.execEnd - pRequest->metric.semanticEnd, pRequest->stmtType);
|
||||
|
||||
if (QUERY_NODE_VNODE_MODIFY_STMT == pRequest->stmtType) {
|
||||
// tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
|
||||
// "us, exec:%" PRId64 "us",
|
||||
// duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
|
||||
// pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd -
|
||||
// pRequest->metric.ctgEnd, pRequest->metric.execEnd - pRequest->metric.semanticEnd);
|
||||
atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration);
|
||||
// tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
|
||||
// "us, exec:%" PRId64 "us",
|
||||
// duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
|
||||
// pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd -
|
||||
// pRequest->metric.ctgEnd, pRequest->metric.execEnd - pRequest->metric.semanticEnd);
|
||||
// atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration);
|
||||
} else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) {
|
||||
// tscPerf("select duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
|
||||
// "us, planner:%" PRId64 "us, exec:%" PRId64 "us, reqId:0x%" PRIx64,
|
||||
|
@ -264,7 +276,6 @@ void *createTscObj(const char *user, const char *auth, const char *db, int32_t c
|
|||
|
||||
taosThreadMutexInit(&pObj->mutex, NULL);
|
||||
pObj->id = taosAddRef(clientConnRefPool, pObj);
|
||||
pObj->schemalessType = 1;
|
||||
|
||||
atomic_add_fetch_64(&pObj->pAppInfo->numOfConns, 1);
|
||||
|
||||
|
@ -385,6 +396,150 @@ void destroyRequest(SRequestObj *pRequest) {
|
|||
removeRequest(pRequest->self);
|
||||
}
|
||||
|
||||
void taosClientCrash(int signum, void *sigInfo, void *context) {
|
||||
taosIgnSignal(SIGTERM);
|
||||
taosIgnSignal(SIGHUP);
|
||||
taosIgnSignal(SIGINT);
|
||||
taosIgnSignal(SIGBREAK);
|
||||
|
||||
#if !defined(WINDOWS)
|
||||
taosIgnSignal(SIGBUS);
|
||||
#endif
|
||||
taosIgnSignal(SIGABRT);
|
||||
taosIgnSignal(SIGFPE);
|
||||
taosIgnSignal(SIGSEGV);
|
||||
|
||||
char *pMsg = NULL;
|
||||
const char *flags = "UTL FATAL ";
|
||||
ELogLevel level = DEBUG_FATAL;
|
||||
int32_t dflag = 255;
|
||||
int64_t msgLen= -1;
|
||||
|
||||
if (tsEnableCrashReport) {
|
||||
if (taosGenCrashJsonMsg(signum, &pMsg, lastClusterId, appInfo.startTime)) {
|
||||
taosPrintLog(flags, level, dflag, "failed to generate crash json msg");
|
||||
goto _return;
|
||||
} else {
|
||||
msgLen = strlen(pMsg);
|
||||
}
|
||||
}
|
||||
|
||||
_return:
|
||||
|
||||
taosLogCrashInfo("taos", pMsg, msgLen, signum, sigInfo);
|
||||
|
||||
#ifdef _TD_DARWIN_64
|
||||
exit(signum);
|
||||
#elif defined(WINDOWS)
|
||||
exit(signum);
|
||||
#endif
|
||||
}
|
||||
|
||||
void crashReportThreadFuncUnexpectedStopped(void) { atomic_store_32(&clientStop, -1); }
|
||||
|
||||
static void *tscCrashReportThreadFp(void *param) {
|
||||
setThreadName("client-crashReport");
|
||||
char filepath[PATH_MAX] = {0};
|
||||
snprintf(filepath, sizeof(filepath), "%s%s.taosCrashLog", tsLogDir, TD_DIRSEP);
|
||||
char *pMsg = NULL;
|
||||
int64_t msgLen = 0;
|
||||
TdFilePtr pFile = NULL;
|
||||
bool truncateFile = false;
|
||||
int32_t sleepTime = 200;
|
||||
int32_t reportPeriodNum = 3600 * 1000 / sleepTime;
|
||||
int32_t loopTimes = reportPeriodNum;
|
||||
|
||||
#ifdef WINDOWS
|
||||
if (taosCheckCurrentInDll()) {
|
||||
atexit(crashReportThreadFuncUnexpectedStopped);
|
||||
}
|
||||
#endif
|
||||
|
||||
while (1) {
|
||||
if (clientStop) break;
|
||||
if (loopTimes++ < reportPeriodNum) {
|
||||
taosMsleep(sleepTime);
|
||||
continue;
|
||||
}
|
||||
|
||||
taosReadCrashInfo(filepath, &pMsg, &msgLen, &pFile);
|
||||
if (pMsg && msgLen > 0) {
|
||||
if (taosSendHttpReport(tsTelemServer, tsClientCrashReportUri, tsTelemPort, pMsg, msgLen, HTTP_FLAT) != 0) {
|
||||
tscError("failed to send crash report");
|
||||
if (pFile) {
|
||||
taosReleaseCrashLogFile(pFile, false);
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
tscInfo("succeed to send crash report");
|
||||
truncateFile = true;
|
||||
}
|
||||
} else {
|
||||
tscDebug("no crash info");
|
||||
}
|
||||
|
||||
taosMemoryFree(pMsg);
|
||||
|
||||
if (pMsg && msgLen > 0) {
|
||||
pMsg = NULL;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pFile) {
|
||||
taosReleaseCrashLogFile(pFile, truncateFile);
|
||||
truncateFile = false;
|
||||
}
|
||||
|
||||
taosMsleep(sleepTime);
|
||||
loopTimes = 0;
|
||||
}
|
||||
|
||||
clientStop = -1;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int32_t tscCrashReportInit() {
|
||||
if (!tsEnableCrashReport) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
TdThreadAttr thAttr;
|
||||
taosThreadAttrInit(&thAttr);
|
||||
taosThreadAttrSetDetachState(&thAttr, PTHREAD_CREATE_JOINABLE);
|
||||
TdThread crashReportThread;
|
||||
if (taosThreadCreate(&crashReportThread, &thAttr, tscCrashReportThreadFp, NULL) != 0) {
|
||||
tscError("failed to create crashReport thread since %s", strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
taosThreadAttrDestroy(&thAttr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tscStopCrashReport() {
|
||||
if (!tsEnableCrashReport) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (atomic_val_compare_exchange_32(&clientStop, 0, 1)) {
|
||||
tscDebug("hb thread already stopped");
|
||||
return;
|
||||
}
|
||||
|
||||
while (atomic_load_32(&clientStop) > 0) {
|
||||
taosMsleep(100);
|
||||
}
|
||||
}
|
||||
|
||||
static void tscSetSignalHandle() {
|
||||
#if !defined(WINDOWS)
|
||||
taosSetSignal(SIGBUS, taosClientCrash);
|
||||
#endif
|
||||
taosSetSignal(SIGABRT, taosClientCrash);
|
||||
taosSetSignal(SIGFPE, taosClientCrash);
|
||||
taosSetSignal(SIGSEGV, taosClientCrash);
|
||||
}
|
||||
|
||||
void taos_init_imp(void) {
|
||||
// In the APIs of other program language, taos_cleanup is not available yet.
|
||||
// So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning.
|
||||
|
@ -392,6 +547,10 @@ void taos_init_imp(void) {
|
|||
errno = TSDB_CODE_SUCCESS;
|
||||
taosSeedRand(taosGetTimestampSec());
|
||||
|
||||
appInfo.pid = taosGetPId();
|
||||
appInfo.startTime = taosGetTimestampMs();
|
||||
appInfo.pInstMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
|
||||
|
||||
deltaToUtcInitOnce();
|
||||
|
||||
if (taosCreateLog("taoslog", 10, configDir, NULL, NULL, NULL, NULL, 1) != 0) {
|
||||
|
@ -404,10 +563,13 @@ void taos_init_imp(void) {
|
|||
return;
|
||||
}
|
||||
|
||||
tscSetSignalHandle();
|
||||
|
||||
initQueryModuleMsgHandle();
|
||||
|
||||
if (taosConvInit() != 0) {
|
||||
ASSERTS(0, "failed to init conv");
|
||||
tscError("failed to init conv");
|
||||
return;
|
||||
}
|
||||
|
||||
rpcInit();
|
||||
|
@ -433,9 +595,8 @@ void taos_init_imp(void) {
|
|||
taosGetAppName(appInfo.appName, NULL);
|
||||
taosThreadMutexInit(&appInfo.mutex, NULL);
|
||||
|
||||
appInfo.pid = taosGetPId();
|
||||
appInfo.startTime = taosGetTimestampMs();
|
||||
appInfo.pInstMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
|
||||
tscCrashReportInit();
|
||||
|
||||
tscDebug("client is initialized successfully");
|
||||
}
|
||||
|
||||
|
|
|
@ -376,7 +376,6 @@ int32_t hbBuildQueryDesc(SQueryHbReqBasic *hbBasic, STscObj *pObj) {
|
|||
desc.subPlanNum = 0;
|
||||
}
|
||||
desc.subPlanNum = taosArrayGetSize(desc.subDesc);
|
||||
ASSERT(desc.subPlanNum == taosArrayGetSize(desc.subDesc));
|
||||
} else {
|
||||
desc.subDesc = NULL;
|
||||
}
|
||||
|
@ -813,7 +812,10 @@ static void hbStopThread() {
|
|||
}
|
||||
|
||||
SAppHbMgr *appHbMgrInit(SAppInstInfo *pAppInstInfo, char *key) {
|
||||
hbMgrInit();
|
||||
if(hbMgrInit() != 0){
|
||||
terrno = TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
return NULL;
|
||||
}
|
||||
SAppHbMgr *pAppHbMgr = taosMemoryMalloc(sizeof(SAppHbMgr));
|
||||
if (pAppHbMgr == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -899,16 +901,28 @@ int hbMgrInit() {
|
|||
TdThreadMutexAttr attr = {0};
|
||||
|
||||
int ret = taosThreadMutexAttrInit(&attr);
|
||||
assert(ret == 0);
|
||||
if(ret != 0){
|
||||
uError("hbMgrInit:taosThreadMutexAttrInit error")
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = taosThreadMutexAttrSetType(&attr, PTHREAD_MUTEX_RECURSIVE);
|
||||
assert(ret == 0);
|
||||
if(ret != 0){
|
||||
uError("hbMgrInit:taosThreadMutexAttrSetType error")
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = taosThreadMutexInit(&clientHbMgr.lock, &attr);
|
||||
assert(ret == 0);
|
||||
if(ret != 0){
|
||||
uError("hbMgrInit:taosThreadMutexInit error")
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = taosThreadMutexAttrDestroy(&attr);
|
||||
assert(ret == 0);
|
||||
if(ret != 0){
|
||||
uError("hbMgrInit:taosThreadMutexAttrDestroy error")
|
||||
return ret;
|
||||
}
|
||||
|
||||
// init handle funcs
|
||||
hbMgrInitHandle();
|
||||
|
|
|
@ -239,7 +239,6 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC
|
|||
.pTransporter = pTscObj->pAppInfo->pTransporter,
|
||||
.pStmtCb = pStmtCb,
|
||||
.pUser = pTscObj->user,
|
||||
.schemalessType = pTscObj->schemalessType,
|
||||
.isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER)),
|
||||
.enableSysInfo = pTscObj->sysInfo,
|
||||
.svrVer = pTscObj->sVer,
|
||||
|
@ -273,7 +272,7 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC
|
|||
|
||||
int32_t execLocalCmd(SRequestObj* pRequest, SQuery* pQuery) {
|
||||
SRetrieveTableRsp* pRsp = NULL;
|
||||
int32_t code = qExecCommand(pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp);
|
||||
int32_t code = qExecCommand(&pRequest->pTscObj->id, pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp);
|
||||
if (TSDB_CODE_SUCCESS == code && NULL != pRsp) {
|
||||
code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, true);
|
||||
}
|
||||
|
@ -311,7 +310,7 @@ void asyncExecLocalCmd(SRequestObj* pRequest, SQuery* pQuery) {
|
|||
return;
|
||||
}
|
||||
|
||||
int32_t code = qExecCommand(pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp);
|
||||
int32_t code = qExecCommand(&pRequest->pTscObj->id ,pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp);
|
||||
if (TSDB_CODE_SUCCESS == code && NULL != pRsp) {
|
||||
code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, true);
|
||||
}
|
||||
|
@ -453,7 +452,10 @@ int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArra
|
|||
}
|
||||
|
||||
void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t numOfCols) {
|
||||
ASSERT(pSchema != NULL && numOfCols > 0);
|
||||
if(pResInfo == NULL || pSchema == NULL || numOfCols <= 0){
|
||||
tscError("invalid paras, pResInfo == NULL || pSchema == NULL || numOfCols <= 0");
|
||||
return;
|
||||
}
|
||||
|
||||
pResInfo->numOfCols = numOfCols;
|
||||
if (pResInfo->fields != NULL) {
|
||||
|
@ -464,7 +466,10 @@ void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t
|
|||
}
|
||||
pResInfo->fields = taosMemoryCalloc(numOfCols, sizeof(TAOS_FIELD));
|
||||
pResInfo->userFields = taosMemoryCalloc(numOfCols, sizeof(TAOS_FIELD));
|
||||
ASSERT(numOfCols == pResInfo->numOfCols);
|
||||
if(numOfCols != pResInfo->numOfCols){
|
||||
tscError("numOfCols:%d != pResInfo->numOfCols:%d", numOfCols, pResInfo->numOfCols);
|
||||
return;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < pResInfo->numOfCols; ++i) {
|
||||
pResInfo->fields[i].bytes = pSchema[i].bytes;
|
||||
|
@ -741,47 +746,21 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList
|
|||
}
|
||||
|
||||
int32_t handleSubmitExecRes(SRequestObj* pRequest, void* res, SCatalog* pCatalog, SEpSet* epset) {
|
||||
int32_t code = 0;
|
||||
SArray* pArray = NULL;
|
||||
SSubmitRsp* pRsp = (SSubmitRsp*)res;
|
||||
if (pRsp->nBlocks <= 0) {
|
||||
taosMemoryFreeClear(pRsp->pBlocks);
|
||||
SArray* pArray = NULL;
|
||||
SSubmitRsp2* pRsp = (SSubmitRsp2*)res;
|
||||
if (NULL == pRsp->aCreateTbRsp) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
pArray = taosArrayInit(pRsp->nBlocks, sizeof(STbSVersion));
|
||||
if (NULL == pArray) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
int32_t tbNum = taosArrayGetSize(pRsp->aCreateTbRsp);
|
||||
for (int32_t i = 0; i < tbNum; ++i) {
|
||||
SVCreateTbRsp* pTbRsp = (SVCreateTbRsp*)taosArrayGet(pRsp->aCreateTbRsp, i);
|
||||
if (pTbRsp->pMeta) {
|
||||
handleCreateTbExecRes(pTbRsp->pMeta, pCatalog);
|
||||
}
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < pRsp->nBlocks; ++i) {
|
||||
SSubmitBlkRsp* blk = pRsp->pBlocks + i;
|
||||
if (blk->pMeta) {
|
||||
handleCreateTbExecRes(blk->pMeta, pCatalog);
|
||||
tFreeSTableMetaRsp(blk->pMeta);
|
||||
taosMemoryFreeClear(blk->pMeta);
|
||||
}
|
||||
|
||||
if (NULL == blk->tblFName || 0 == blk->tblFName[0]) {
|
||||
continue;
|
||||
}
|
||||
|
||||
STbSVersion tbSver = {.tbFName = blk->tblFName, .sver = blk->sver};
|
||||
taosArrayPush(pArray, &tbSver);
|
||||
}
|
||||
|
||||
SRequestConnInfo conn = {.pTrans = pRequest->pTscObj->pAppInfo->pTransporter,
|
||||
.requestId = pRequest->requestId,
|
||||
.requestObjRefId = pRequest->self,
|
||||
.mgmtEps = *epset};
|
||||
|
||||
code = catalogChkTbMetaVersion(pCatalog, &conn, pArray);
|
||||
|
||||
_return:
|
||||
|
||||
taosArrayDestroy(pArray);
|
||||
return code;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t handleQueryExecRes(SRequestObj* pRequest, void* res, SCatalog* pCatalog, SEpSet* epset) {
|
||||
|
@ -1253,7 +1232,7 @@ STscObj* taosConnectImpl(const char* user, const char* auth, const char* db, __t
|
|||
|
||||
int64_t transporterId = 0;
|
||||
asyncSendMsgToServer(pTscObj->pAppInfo->pTransporter, &pTscObj->pAppInfo->mgmtEp.epSet, &transporterId, body);
|
||||
|
||||
|
||||
tsem_wait(&pRequest->body.rspSem);
|
||||
if (pRequest->code != TSDB_CODE_SUCCESS) {
|
||||
const char* errorMsg =
|
||||
|
@ -1366,7 +1345,10 @@ int32_t doProcessMsgFromServer(void* param) {
|
|||
SEpSet* pEpSet = arg->pEpset;
|
||||
|
||||
SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle;
|
||||
assert(pMsg->info.ahandle != NULL);
|
||||
if(pMsg->info.ahandle == NULL){
|
||||
tscError("doProcessMsgFromServer pMsg->info.ahandle == NULL");
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
STscObj* pTscObj = NULL;
|
||||
|
||||
STraceId* trace = &pMsg->info.traceId;
|
||||
|
@ -1379,8 +1361,10 @@ int32_t doProcessMsgFromServer(void* param) {
|
|||
if (pSendInfo->requestObjRefId != 0) {
|
||||
SRequestObj* pRequest = (SRequestObj*)taosAcquireRef(clientReqRefPool, pSendInfo->requestObjRefId);
|
||||
if (pRequest) {
|
||||
assert(pRequest->self == pSendInfo->requestObjRefId);
|
||||
|
||||
if(pRequest->self != pSendInfo->requestObjRefId){
|
||||
tscError("doProcessMsgFromServer pRequest->self:%"PRId64" != pSendInfo->requestObjRefId:%"PRId64, pRequest->self, pSendInfo->requestObjRefId);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
pRequest->metric.rsp = taosGetTimestampUs();
|
||||
pTscObj = pRequest->pTscObj;
|
||||
/*
|
||||
|
@ -1430,6 +1414,21 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
|
|||
memcpy((void*)tEpSet, (void*)pEpSet, sizeof(SEpSet));
|
||||
}
|
||||
|
||||
// pMsg is response msg
|
||||
if (pMsg->msgType == TDMT_MND_CONNECT + 1) {
|
||||
// restore origin code
|
||||
if (pMsg->code == TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED) {
|
||||
pMsg->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
|
||||
} else if (pMsg->code == TSDB_CODE_RPC_SOMENODE_BROKEN_LINK) {
|
||||
pMsg->code = TSDB_CODE_RPC_BROKEN_LINK;
|
||||
}
|
||||
} else {
|
||||
// uniform to one error code: TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED
|
||||
if (pMsg->code == TSDB_CODE_RPC_SOMENODE_BROKEN_LINK) {
|
||||
pMsg->code = TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED;
|
||||
}
|
||||
}
|
||||
|
||||
AsyncArg* arg = taosMemoryCalloc(1, sizeof(AsyncArg));
|
||||
arg->msg = *pMsg;
|
||||
arg->pEpset = tEpSet;
|
||||
|
@ -1507,7 +1506,9 @@ void doSetOneRowPtr(SReqResultInfo* pResultInfo) {
|
|||
}
|
||||
|
||||
void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4) {
|
||||
assert(pRequest != NULL);
|
||||
if(pRequest == NULL){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SReqResultInfo* pResultInfo = &pRequest->body.resInfo;
|
||||
if (pResultInfo->pData == NULL || pResultInfo->current >= pResultInfo->numOfRows) {
|
||||
|
@ -1561,7 +1562,9 @@ static void syncFetchFn(void* param, TAOS_RES* res, int32_t numOfRows) {
|
|||
}
|
||||
|
||||
void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4) {
|
||||
assert(pRequest != NULL);
|
||||
if(pRequest == NULL){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SReqResultInfo* pResultInfo = &pRequest->body.resInfo;
|
||||
if (pResultInfo->pData == NULL || pResultInfo->current >= pResultInfo->numOfRows) {
|
||||
|
@ -1625,8 +1628,10 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
|
|||
char* pStart = pCol->offset[j] + pCol->pData;
|
||||
|
||||
int32_t len = taosUcs4ToMbs((TdUcs4*)varDataVal(pStart), varDataLen(pStart), varDataVal(p));
|
||||
ASSERT(len <= bytes);
|
||||
ASSERT((p + len) < (pResultInfo->convertBuf[i] + colLength[i]));
|
||||
if(len > bytes || (p + len) >= (pResultInfo->convertBuf[i] + colLength[i])){
|
||||
tscError("doConvertUCS4 error, invalid data. len:%d, bytes:%d, (p + len):%p, (pResultInfo->convertBuf[i] + colLength[i]):%p", len, bytes, (p + len), (pResultInfo->convertBuf[i] + colLength[i]));
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
varDataSetLen(p, len);
|
||||
pCol->offset[j] = (p - pResultInfo->convertBuf[i]);
|
||||
|
@ -1643,9 +1648,6 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
|
|||
}
|
||||
|
||||
int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols) {
|
||||
int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3);
|
||||
ASSERT(numOfCols == cols);
|
||||
|
||||
return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) * 3 + sizeof(uint64_t) +
|
||||
numOfCols * (sizeof(int8_t) + sizeof(int32_t));
|
||||
}
|
||||
|
@ -1655,6 +1657,12 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i
|
|||
|
||||
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column
|
||||
// length |
|
||||
int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3);
|
||||
if(ASSERT(numOfCols == cols)){
|
||||
tscError("estimateJsonLen error: numOfCols:%d != cols:%d", numOfCols, cols);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t len = getVersion1BlockMetaSize(p, numOfCols);
|
||||
int32_t* colLength = (int32_t*)(p + len);
|
||||
len += sizeof(int32_t) * numOfCols;
|
||||
|
@ -1688,7 +1696,8 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i
|
|||
} else if (jsonInnerType == TSDB_DATA_TYPE_BOOL) {
|
||||
len += (VARSTR_HEADER_SIZE + 5);
|
||||
} else {
|
||||
ASSERT(0);
|
||||
tscError("estimateJsonLen error: invalid type:%d", jsonInnerType);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
} else if (IS_VAR_DATA_TYPE(pResultInfo->fields[i].type)) {
|
||||
|
@ -1722,12 +1731,21 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
|
|||
|
||||
char* p = (char*)pResultInfo->pData;
|
||||
int32_t dataLen = estimateJsonLen(pResultInfo, numOfCols, numOfRows);
|
||||
if(dataLen <= 0){
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
pResultInfo->convertJson = taosMemoryCalloc(1, dataLen);
|
||||
if (pResultInfo->convertJson == NULL) return TSDB_CODE_OUT_OF_MEMORY;
|
||||
char* p1 = pResultInfo->convertJson;
|
||||
|
||||
int32_t totalLen = 0;
|
||||
int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3);
|
||||
if(ASSERT(numOfCols == cols)){
|
||||
tscError("doConvertJson error: numOfCols:%d != cols:%d", numOfCols, cols);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
int32_t len = getVersion1BlockMetaSize(p, numOfCols);
|
||||
memcpy(p1, p, len);
|
||||
|
||||
|
@ -1748,8 +1766,10 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
|
|||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
int32_t colLen = htonl(colLength[i]);
|
||||
int32_t colLen1 = htonl(colLength1[i]);
|
||||
ASSERT(colLen < dataLen);
|
||||
|
||||
if(ASSERT(colLen < dataLen)){
|
||||
tscError("doConvertJson error: colLen:%d >= dataLen:%d", colLen, dataLen);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
if (pResultInfo->fields[i].type == TSDB_DATA_TYPE_JSON) {
|
||||
int32_t* offset = (int32_t*)pStart;
|
||||
int32_t* offset1 = (int32_t*)pStart1;
|
||||
|
@ -1794,7 +1814,8 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
|
|||
sprintf(varDataVal(dst), "%s", (*((char*)jsonInnerData) == 1) ? "true" : "false");
|
||||
varDataSetLen(dst, strlen(varDataVal(dst)));
|
||||
} else {
|
||||
ASSERT(0);
|
||||
tscError("doConvertJson error: invalid type:%d", jsonInnerType);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
offset1[j] = len;
|
||||
|
@ -1832,7 +1853,10 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
|
|||
|
||||
int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32_t numOfCols, int32_t numOfRows,
|
||||
bool convertUcs4) {
|
||||
assert(numOfCols > 0 && pFields != NULL && pResultInfo != NULL);
|
||||
if(ASSERT(numOfCols > 0 && pFields != NULL && pResultInfo != NULL)){
|
||||
tscError("setResultDataPtr paras error");
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
if (numOfRows == 0) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -1861,7 +1885,10 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
|
|||
int32_t cols = *(int32_t*)p;
|
||||
p += sizeof(int32_t);
|
||||
|
||||
ASSERT(rows == numOfRows && cols == numOfCols);
|
||||
if(ASSERT(rows == numOfRows && cols == numOfCols)){
|
||||
tscError("setResultDataPtr paras error:rows;%d numOfRows:%d cols:%d numOfCols:%d", rows, numOfRows, cols, numOfCols);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
int32_t hasColumnSeg = *(int32_t*)p;
|
||||
p += sizeof(int32_t);
|
||||
|
@ -1888,7 +1915,7 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
|
|||
colLength[i] = htonl(colLength[i]);
|
||||
if (colLength[i] >= dataLen) {
|
||||
tscError("invalid colLength %d, dataLen %d", colLength[i], dataLen);
|
||||
ASSERT(0);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
if (IS_VAR_DATA_TYPE(pResultInfo->fields[i].type)) {
|
||||
|
@ -1926,7 +1953,11 @@ char* getDbOfConnection(STscObj* pObj) {
|
|||
}
|
||||
|
||||
void setConnectionDB(STscObj* pTscObj, const char* db) {
|
||||
assert(db != NULL && pTscObj != NULL);
|
||||
if(db == NULL || pTscObj == NULL){
|
||||
tscError("setConnectionDB para is NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
taosThreadMutexLock(&pTscObj->mutex);
|
||||
tstrncpy(pTscObj->db, db, tListLen(pTscObj->db));
|
||||
taosThreadMutexUnlock(&pTscObj->mutex);
|
||||
|
@ -1944,7 +1975,10 @@ void resetConnectDB(STscObj* pTscObj) {
|
|||
|
||||
int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4,
|
||||
bool freeAfterUse) {
|
||||
assert(pResultInfo != NULL && pRsp != NULL);
|
||||
if(pResultInfo == NULL || pRsp == NULL){
|
||||
tscError("setQueryResultFromRsp paras is null");
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
if (freeAfterUse) taosMemoryFreeClear(pResultInfo->pRspMsg);
|
||||
|
||||
|
|
|
@ -574,7 +574,11 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchBlockImp(JNI
|
|||
TAOS_RES *tres = (TAOS_RES *)res;
|
||||
|
||||
int32_t numOfFields = taos_num_fields(tres);
|
||||
assert(numOfFields > 0);
|
||||
if(numOfFields <= 0){
|
||||
jniError("jobj:%p, conn:%p, query interrupted. taos_num_fields error code:%d, msg:%s", jobj, tscon, numOfFields,
|
||||
taos_errstr(tres));
|
||||
return JNI_RESULT_SET_NULL;
|
||||
}
|
||||
|
||||
void *data;
|
||||
int32_t numOfRows;
|
||||
|
|
|
@ -55,6 +55,8 @@ void taos_cleanup(void) {
|
|||
return;
|
||||
}
|
||||
|
||||
tscStopCrashReport();
|
||||
|
||||
int32_t id = clientReqRefPool;
|
||||
clientReqRefPool = -1;
|
||||
taosCloseRef(id);
|
||||
|
@ -106,7 +108,7 @@ TAOS *taos_connect(const char *ip, const char *user, const char *pass, const cha
|
|||
if (pass == NULL) {
|
||||
pass = TSDB_DEFAULT_PASS;
|
||||
}
|
||||
|
||||
|
||||
STscObj *pObj = taos_connect_internal(ip, user, pass, NULL, db, port, CONN_TYPE__QUERY);
|
||||
if (pObj) {
|
||||
int64_t *rid = taosMemoryCalloc(1, sizeof(int64_t));
|
||||
|
@ -291,7 +293,6 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) {
|
|||
tscError("invalid result passed to taos_fetch_row");
|
||||
return NULL;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) {
|
||||
|
@ -355,9 +356,13 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
|
|||
case TSDB_DATA_TYPE_NCHAR: {
|
||||
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
|
||||
if (fields[i].type == TSDB_DATA_TYPE_BINARY) {
|
||||
assert(charLen <= fields[i].bytes && charLen >= 0);
|
||||
if(ASSERT(charLen <= fields[i].bytes && charLen >= 0)){
|
||||
tscError("taos_print_row error binary. charLen:%d, fields[i].bytes:%d", charLen, fields[i].bytes);
|
||||
}
|
||||
} else {
|
||||
assert(charLen <= fields[i].bytes * TSDB_NCHAR_SIZE && charLen >= 0);
|
||||
if(ASSERT(charLen <= fields[i].bytes * TSDB_NCHAR_SIZE && charLen >= 0)){
|
||||
tscError("taos_print_row error. charLen:%d, fields[i].bytes:%d", charLen, fields[i].bytes);
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(str + len, row[i], charLen);
|
||||
|
@ -430,6 +435,22 @@ const char *taos_data_type(int type) {
|
|||
return "TSDB_DATA_TYPE_NCHAR";
|
||||
case TSDB_DATA_TYPE_JSON:
|
||||
return "TSDB_DATA_TYPE_JSON";
|
||||
case TSDB_DATA_TYPE_UTINYINT:
|
||||
return "TSDB_DATA_TYPE_UTINYINT";
|
||||
case TSDB_DATA_TYPE_USMALLINT:
|
||||
return "TSDB_DATA_TYPE_USMALLINT";
|
||||
case TSDB_DATA_TYPE_UINT:
|
||||
return "TSDB_DATA_TYPE_UINT";
|
||||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
return "TSDB_DATA_TYPE_UBIGINT";
|
||||
case TSDB_DATA_TYPE_VARBINARY:
|
||||
return "TSDB_DATA_TYPE_VARBINARY";
|
||||
case TSDB_DATA_TYPE_DECIMAL:
|
||||
return "TSDB_DATA_TYPE_DECIMAL";
|
||||
case TSDB_DATA_TYPE_BLOB:
|
||||
return "TSDB_DATA_TYPE_BLOB";
|
||||
case TSDB_DATA_TYPE_MEDIUMBLOB:
|
||||
return "TSDB_DATA_TYPE_MEDIUMBLOB";
|
||||
default:
|
||||
return "UNKNOWN";
|
||||
}
|
||||
|
@ -577,7 +598,7 @@ int taos_fetch_block_s(TAOS_RES *res, int *numOfRows, TAOS_ROW *rows) {
|
|||
(*numOfRows) = pResultInfo->numOfRows;
|
||||
return 0;
|
||||
} else {
|
||||
ASSERT(0);
|
||||
tscError("taos_fetch_block_s invalid res type");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
@ -670,6 +691,32 @@ const char *taos_get_server_info(TAOS *taos) {
|
|||
return pTscObj->sDetailVer;
|
||||
}
|
||||
|
||||
int taos_get_current_db(TAOS *taos, char *database, int len, int *required) {
|
||||
STscObj *pTscObj = acquireTscObj(*(int64_t *)taos);
|
||||
if (pTscObj == NULL) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int code = TSDB_CODE_SUCCESS;
|
||||
taosThreadMutexLock(&pTscObj->mutex);
|
||||
if(database == NULL || len <= 0){
|
||||
if(required != NULL) *required = strlen(pTscObj->db) + 1;
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
code = -1;
|
||||
}else if(len < strlen(pTscObj->db) + 1){
|
||||
tstrncpy(database, pTscObj->db, len);
|
||||
if(required) *required = strlen(pTscObj->db) + 1;
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
code = -1;
|
||||
}else{
|
||||
strcpy(database, pTscObj->db);
|
||||
code = 0;
|
||||
}
|
||||
taosThreadMutexUnlock(&pTscObj->mutex);
|
||||
return code;
|
||||
}
|
||||
|
||||
static void destoryTablesReq(void *p) {
|
||||
STablesReq *pRes = (STablesReq *)p;
|
||||
taosArrayDestroy(pRes->pTables);
|
||||
|
@ -802,7 +849,7 @@ static void doAsyncQueryFromParse(SMetaData *pResultMeta, void *param, int32_t c
|
|||
tstrerror(code));
|
||||
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
//pWrapper->pCatalogReq->forceUpdate = false;
|
||||
// pWrapper->pCatalogReq->forceUpdate = false;
|
||||
code = qContinueParseSql(pWrapper->pParseCtx, pWrapper->pCatalogReq, pResultMeta, pQuery);
|
||||
}
|
||||
|
||||
|
@ -831,8 +878,8 @@ void continueInsertFromCsv(SSqlCallbackWrapper *pWrapper, SRequestObj *pRequest)
|
|||
tstrerror(code), pWrapper->pRequest->requestId);
|
||||
destorySqlCallbackWrapper(pWrapper);
|
||||
terrno = code;
|
||||
pWrapper->pRequest->code = code;
|
||||
pWrapper->pRequest->body.queryFp(pWrapper->pRequest->body.param, pWrapper->pRequest, code);
|
||||
pRequest->code = code;
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -866,7 +913,6 @@ int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt) {
|
|||
.pTransporter = pTscObj->pAppInfo->pTransporter,
|
||||
.pStmtCb = NULL,
|
||||
.pUser = pTscObj->user,
|
||||
.schemalessType = pTscObj->schemalessType,
|
||||
.isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER)),
|
||||
.enableSysInfo = pTscObj->sysInfo,
|
||||
.async = true,
|
||||
|
@ -1001,8 +1047,14 @@ static void fetchCallback(void *pResult, void *param, int32_t code) {
|
|||
}
|
||||
|
||||
void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
|
||||
ASSERT(res != NULL && fp != NULL);
|
||||
ASSERT(TD_RES_QUERY(res));
|
||||
if(ASSERT(res != NULL && fp != NULL)){
|
||||
tscError("taos_fetch_rows_a invalid paras");
|
||||
return;
|
||||
}
|
||||
if(ASSERT(TD_RES_QUERY(res))){
|
||||
tscError("taos_fetch_rows_a res is NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
SRequestObj *pRequest = res;
|
||||
pRequest->body.fetchFp = fp;
|
||||
|
@ -1045,9 +1097,14 @@ void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
|
|||
}
|
||||
|
||||
void taos_fetch_raw_block_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
|
||||
ASSERT(res != NULL && fp != NULL);
|
||||
ASSERT(TD_RES_QUERY(res));
|
||||
|
||||
if(ASSERT(res != NULL && fp != NULL)){
|
||||
tscError("taos_fetch_rows_a invalid paras");
|
||||
return;
|
||||
}
|
||||
if(ASSERT(TD_RES_QUERY(res))){
|
||||
tscError("taos_fetch_rows_a res is NULL");
|
||||
return;
|
||||
}
|
||||
SRequestObj *pRequest = res;
|
||||
SReqResultInfo *pResultInfo = &pRequest->body.resInfo;
|
||||
|
||||
|
@ -1059,8 +1116,14 @@ void taos_fetch_raw_block_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
|
|||
}
|
||||
|
||||
const void *taos_get_raw_block(TAOS_RES *res) {
|
||||
ASSERT(res != NULL);
|
||||
ASSERT(TD_RES_QUERY(res));
|
||||
if(ASSERT(res != NULL)){
|
||||
tscError("taos_fetch_rows_a invalid paras");
|
||||
return NULL;
|
||||
}
|
||||
if(ASSERT(TD_RES_QUERY(res))){
|
||||
tscError("taos_fetch_rows_a res is NULL");
|
||||
return NULL;
|
||||
}
|
||||
SRequestObj *pRequest = res;
|
||||
|
||||
return pRequest->body.resInfo.pData;
|
||||
|
@ -1168,6 +1231,54 @@ _return:
|
|||
return code;
|
||||
}
|
||||
|
||||
int taos_get_tables_vgId(TAOS *taos, const char *db, const char *table[], int tableNum, int *vgId) {
|
||||
if (NULL == taos) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
return terrno;
|
||||
}
|
||||
|
||||
if (NULL == db || NULL == table || NULL == vgId || tableNum <= 0) {
|
||||
tscError("invalid input param, db:%p, table:%p, vgId:%p, tbNum:%d", db, table, vgId, tableNum);
|
||||
terrno = TSDB_CODE_TSC_INVALID_INPUT;
|
||||
return terrno;
|
||||
}
|
||||
|
||||
int64_t connId = *(int64_t *)taos;
|
||||
SRequestObj *pRequest = NULL;
|
||||
char *sql = "taos_get_table_vgId";
|
||||
int32_t code = buildRequest(connId, sql, strlen(sql), NULL, false, &pRequest, 0);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return terrno;
|
||||
}
|
||||
|
||||
pRequest->syncQuery = true;
|
||||
|
||||
STscObj *pTscObj = pRequest->pTscObj;
|
||||
SCatalog *pCtg = NULL;
|
||||
code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCtg);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _return;
|
||||
}
|
||||
|
||||
SRequestConnInfo conn = {
|
||||
.pTrans = pTscObj->pAppInfo->pTransporter, .requestId = pRequest->requestId, .requestObjRefId = pRequest->self};
|
||||
|
||||
conn.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
|
||||
|
||||
code = catalogGetTablesHashVgId(pCtg, &conn, pTscObj->acctId, db, table, tableNum, vgId);
|
||||
if (code) {
|
||||
goto _return;
|
||||
}
|
||||
|
||||
_return:
|
||||
|
||||
terrno = code;
|
||||
|
||||
destroyRequest(pRequest);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
||||
int taos_load_table_info(TAOS *taos, const char *tableNameList) {
|
||||
if (NULL == taos) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
|
@ -1331,6 +1442,13 @@ int taos_stmt_get_col_fields(TAOS_STMT *stmt, int *fieldNum, TAOS_FIELD_E **fiel
|
|||
return stmtGetColFields(stmt, fieldNum, fields);
|
||||
}
|
||||
|
||||
// let stmt to reclaim TAOS_FIELD_E that was allocated by `taos_stmt_get_tag_fields`/`taos_stmt_get_col_fields`
|
||||
void taos_stmt_reclaim_fields(TAOS_STMT *stmt, TAOS_FIELD_E *fields) {
|
||||
(void)stmt;
|
||||
if (!fields) return;
|
||||
taosMemoryFree(fields);
|
||||
}
|
||||
|
||||
int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind) {
|
||||
if (stmt == NULL || bind == NULL) {
|
||||
tscError("NULL parameter for %s", __FUNCTION__);
|
||||
|
|
|
@ -119,6 +119,7 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
|
||||
// update the appInstInfo
|
||||
pTscObj->pAppInfo->clusterId = connectRsp.clusterId;
|
||||
lastClusterId = connectRsp.clusterId;
|
||||
|
||||
pTscObj->connType = connectRsp.connType;
|
||||
|
||||
|
@ -149,7 +150,6 @@ SMsgSendInfo* buildMsgInfoImpl(SRequestObj* pRequest) {
|
|||
pMsgSendInfo->msgType = pRequest->type;
|
||||
pMsgSendInfo->target.type = TARGET_TYPE_MNODE;
|
||||
|
||||
assert(pRequest != NULL);
|
||||
pMsgSendInfo->msgInfo = pRequest->body.requestMsg;
|
||||
pMsgSendInfo->fp = getMsgRspHandle(pRequest->type);
|
||||
return pMsgSendInfo;
|
||||
|
@ -273,7 +273,9 @@ int32_t processUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
}
|
||||
|
||||
int32_t processCreateSTableRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
||||
assert(pMsg != NULL && param != NULL);
|
||||
if(pMsg == NULL || param == NULL){
|
||||
return TSDB_CODE_TSC_INVALID_INPUT;
|
||||
}
|
||||
SRequestObj* pRequest = param;
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -454,7 +456,10 @@ static int32_t buildShowVariablesRsp(SArray* pVars, SRetrieveTableRsp** pRsp) {
|
|||
(*pRsp)->numOfCols = htonl(SHOW_VARIABLES_RESULT_COLS);
|
||||
|
||||
int32_t len = blockEncode(pBlock, (*pRsp)->data, SHOW_VARIABLES_RESULT_COLS);
|
||||
ASSERT(len == rspSize - sizeof(SRetrieveTableRsp));
|
||||
if(len != rspSize - sizeof(SRetrieveTableRsp)){
|
||||
uError("buildShowVariablesRsp error, len:%d != rspSize - sizeof(SRetrieveTableRsp):%" PRIu64, len, (uint64_t) (rspSize - sizeof(SRetrieveTableRsp)));
|
||||
return TSDB_CODE_TSC_INVALID_INPUT;
|
||||
}
|
||||
|
||||
blockDataDestroy(pBlock);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
|
|
@ -373,7 +373,10 @@ static char* processCreateTable(SMqMetaRsp* metaRsp) {
|
|||
}
|
||||
|
||||
static char* processAutoCreateTable(STaosxRsp* rsp) {
|
||||
ASSERT(rsp->createTableNum != 0);
|
||||
if(rsp->createTableNum <= 0){
|
||||
uError("WriteRaw:processAutoCreateTable rsp->createTableNum <= 0");
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
SDecoder* decoder = taosMemoryCalloc(rsp->createTableNum, sizeof(SDecoder));
|
||||
SVCreateTbReq* pCreateReq = taosMemoryCalloc(rsp->createTableNum, sizeof(SVCreateTbReq));
|
||||
|
@ -389,7 +392,10 @@ static char* processAutoCreateTable(STaosxRsp* rsp) {
|
|||
goto _exit;
|
||||
}
|
||||
|
||||
ASSERT(pCreateReq[iReq].type == TSDB_CHILD_TABLE);
|
||||
if(pCreateReq[iReq].type != TSDB_CHILD_TABLE){
|
||||
uError("WriteRaw:processAutoCreateTable pCreateReq[iReq].type != TSDB_CHILD_TABLE");
|
||||
goto _exit;
|
||||
}
|
||||
}
|
||||
string = buildCreateCTableJson(pCreateReq, rsp->createTableNum);
|
||||
|
||||
|
@ -494,7 +500,10 @@ static char* processAlterTable(SMqMetaRsp* metaRsp) {
|
|||
char* buf = NULL;
|
||||
|
||||
if (vAlterTbReq.tagType == TSDB_DATA_TYPE_JSON) {
|
||||
ASSERT(tTagIsJson(vAlterTbReq.pTagVal) == true);
|
||||
if(!tTagIsJson(vAlterTbReq.pTagVal)){
|
||||
uError("processAlterTable isJson false");
|
||||
goto _exit;
|
||||
}
|
||||
buf = parseTagDatatoJson(vAlterTbReq.pTagVal);
|
||||
} else {
|
||||
buf = taosMemoryCalloc(vAlterTbReq.nTagVal + 1, 1);
|
||||
|
@ -1240,22 +1249,12 @@ static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
return code;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
SVgroupInfo vg;
|
||||
void* data;
|
||||
} VgData;
|
||||
|
||||
static void destroyVgHash(void* data) {
|
||||
VgData* vgData = (VgData*)data;
|
||||
taosMemoryFreeClear(vgData->data);
|
||||
}
|
||||
|
||||
int taos_write_raw_block_with_fields(TAOS* taos, int rows, char* pData, const char* tbname, TAOS_FIELD* fields,
|
||||
int numFields) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
STableMeta* pTableMeta = NULL;
|
||||
SQuery* pQuery = NULL;
|
||||
SSubmitReq* subReq = NULL;
|
||||
SHashObj* pVgHash = NULL;
|
||||
|
||||
SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0);
|
||||
if (!pRequest) {
|
||||
|
@ -1300,149 +1299,25 @@ int taos_write_raw_block_with_fields(TAOS* taos, int rows, char* pData, const ch
|
|||
uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbname);
|
||||
goto end;
|
||||
}
|
||||
uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
|
||||
uint64_t uid = pTableMeta->uid;
|
||||
int32_t numOfCols = pTableMeta->tableInfo.numOfColumns;
|
||||
|
||||
uint16_t fLen = 0;
|
||||
int32_t rowSize = 0;
|
||||
int16_t nVar = 0;
|
||||
for (int i = 0; i < pTableMeta->tableInfo.numOfColumns; i++) {
|
||||
SSchema* schema = pTableMeta->schema + i;
|
||||
fLen += TYPE_BYTES[schema->type];
|
||||
rowSize += schema->bytes;
|
||||
if (IS_VAR_DATA_TYPE(schema->type)) {
|
||||
nVar++;
|
||||
}
|
||||
}
|
||||
|
||||
fLen -= sizeof(TSKEY);
|
||||
|
||||
int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
|
||||
(int32_t)TD_BITMAP_BYTES(numOfCols - 1);
|
||||
int32_t schemaLen = 0;
|
||||
int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
|
||||
|
||||
int32_t totalLen = sizeof(SSubmitReq) + submitLen;
|
||||
subReq = taosMemoryCalloc(1, totalLen);
|
||||
SSubmitBlk* blk = POINTER_SHIFT(subReq, sizeof(SSubmitReq));
|
||||
void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk));
|
||||
STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen);
|
||||
|
||||
SRowBuilder rb = {0};
|
||||
tdSRowInit(&rb, pTableMeta->sversion);
|
||||
tdSRowSetTpInfo(&rb, numOfCols, fLen);
|
||||
int32_t dataLen = 0;
|
||||
|
||||
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column
|
||||
// length |
|
||||
char* pStart = pData + getVersion1BlockMetaSize(pData, numFields);
|
||||
int32_t* colLength = (int32_t*)pStart;
|
||||
pStart += sizeof(int32_t) * numFields;
|
||||
|
||||
SResultColumn* pCol = taosMemoryCalloc(numFields, sizeof(SResultColumn));
|
||||
|
||||
for (int32_t i = 0; i < numFields; ++i) {
|
||||
if (IS_VAR_DATA_TYPE(fields[i].type)) {
|
||||
pCol[i].offset = (int32_t*)pStart;
|
||||
pStart += rows * sizeof(int32_t);
|
||||
} else {
|
||||
pCol[i].nullbitmap = pStart;
|
||||
pStart += BitmapLen(rows);
|
||||
}
|
||||
|
||||
pCol[i].pData = pStart;
|
||||
pStart += colLength[i];
|
||||
}
|
||||
|
||||
SHashObj* schemaHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
|
||||
for (int i = 0; i < numFields; i++) {
|
||||
TAOS_FIELD* schema = &fields[i];
|
||||
taosHashPut(schemaHash, schema->name, strlen(schema->name), &i, sizeof(int32_t));
|
||||
}
|
||||
|
||||
for (int32_t j = 0; j < rows; j++) {
|
||||
tdSRowResetBuf(&rb, rowData);
|
||||
int32_t offset = 0;
|
||||
for (int32_t k = 0; k < numOfCols; k++) {
|
||||
const SSchema* pColumn = &pTableMeta->schema[k];
|
||||
int32_t* index = taosHashGet(schemaHash, pColumn->name, strlen(pColumn->name));
|
||||
if (!index) { // add none
|
||||
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NONE, NULL, false, offset, k);
|
||||
} else {
|
||||
if (IS_VAR_DATA_TYPE(pColumn->type)) {
|
||||
if (pCol[*index].offset[j] != -1) {
|
||||
char* data = pCol[*index].pData + pCol[*index].offset[j];
|
||||
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
|
||||
} else {
|
||||
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
|
||||
}
|
||||
} else {
|
||||
if (!colDataIsNull_f(pCol[*index].nullbitmap, j)) {
|
||||
char* data = pCol[*index].pData + pColumn->bytes * j;
|
||||
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
|
||||
} else {
|
||||
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (pColumn->colId != PRIMARYKEY_TIMESTAMP_COL_ID) {
|
||||
offset += TYPE_BYTES[pColumn->type];
|
||||
}
|
||||
}
|
||||
tdSRowEnd(&rb);
|
||||
int32_t rowLen = TD_ROW_LEN(rowData);
|
||||
rowData = POINTER_SHIFT(rowData, rowLen);
|
||||
dataLen += rowLen;
|
||||
}
|
||||
|
||||
taosHashCleanup(schemaHash);
|
||||
taosMemoryFree(pCol);
|
||||
|
||||
blk->uid = htobe64(uid);
|
||||
blk->suid = htobe64(suid);
|
||||
blk->sversion = htonl(pTableMeta->sversion);
|
||||
blk->schemaLen = htonl(schemaLen);
|
||||
blk->numOfRows = htonl(rows);
|
||||
blk->dataLen = htonl(dataLen);
|
||||
subReq->length = sizeof(SSubmitReq) + sizeof(SSubmitBlk) + schemaLen + dataLen;
|
||||
subReq->numOfBlocks = 1;
|
||||
|
||||
pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
|
||||
if (NULL == pQuery) {
|
||||
uError("create SQuery error");
|
||||
pQuery = smlInitHandle();
|
||||
if (pQuery == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto end;
|
||||
}
|
||||
pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
|
||||
pQuery->haveResultSet = false;
|
||||
pQuery->msgType = TDMT_VND_SUBMIT;
|
||||
pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT);
|
||||
if (NULL == pQuery->pRoot) {
|
||||
uError("create pQuery->pRoot error");
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto end;
|
||||
}
|
||||
SVnodeModifyOpStmt* nodeStmt = (SVnodeModifyOpStmt*)(pQuery->pRoot);
|
||||
nodeStmt->pDataBlocks = taosArrayInit(1, POINTER_BYTES);
|
||||
pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
|
||||
taosHashPut(pVgHash, (const char*)&vgData.vgId, sizeof(vgData.vgId), (char*)&vgData, sizeof(vgData));
|
||||
|
||||
SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
|
||||
if (NULL == dst) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
code = rawBlockBindData(pQuery, pTableMeta, pData, NULL, fields, numFields);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("WriteRaw:rawBlockBindData failed");
|
||||
goto end;
|
||||
}
|
||||
dst->vg = vgData;
|
||||
dst->numOfTables = subReq->numOfBlocks;
|
||||
dst->size = subReq->length;
|
||||
dst->pData = (char*)subReq;
|
||||
subReq->header.vgId = htonl(dst->vg.vgId);
|
||||
subReq->version = htonl(1);
|
||||
subReq->header.contLen = htonl(subReq->length);
|
||||
subReq->length = htonl(subReq->length);
|
||||
subReq->numOfBlocks = htonl(subReq->numOfBlocks);
|
||||
subReq = NULL; // no need free
|
||||
taosArrayPush(nodeStmt->pDataBlocks, &dst);
|
||||
|
||||
code = smlBuildOutput(pQuery, pVgHash);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("smlBuildOutput failed");
|
||||
return code;
|
||||
}
|
||||
|
||||
launchQueryImpl(pRequest, pQuery, true, NULL);
|
||||
code = pRequest->code;
|
||||
|
@ -1450,7 +1325,8 @@ int taos_write_raw_block_with_fields(TAOS* taos, int rows, char* pData, const ch
|
|||
end:
|
||||
taosMemoryFreeClear(pTableMeta);
|
||||
qDestroyQuery(pQuery);
|
||||
taosMemoryFree(subReq);
|
||||
destroyRequest(pRequest);
|
||||
taosHashCleanup(pVgHash);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -1458,7 +1334,7 @@ int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname)
|
|||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
STableMeta* pTableMeta = NULL;
|
||||
SQuery* pQuery = NULL;
|
||||
SSubmitReq* subReq = NULL;
|
||||
SHashObj* pVgHash = NULL;
|
||||
|
||||
SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0);
|
||||
if (!pRequest) {
|
||||
|
@ -1503,138 +1379,25 @@ int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname)
|
|||
uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbname);
|
||||
goto end;
|
||||
}
|
||||
uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
|
||||
uint64_t uid = pTableMeta->uid;
|
||||
int32_t numOfCols = pTableMeta->tableInfo.numOfColumns;
|
||||
|
||||
uint16_t fLen = 0;
|
||||
int32_t rowSize = 0;
|
||||
int16_t nVar = 0;
|
||||
for (int i = 0; i < numOfCols; i++) {
|
||||
SSchema* schema = pTableMeta->schema + i;
|
||||
fLen += TYPE_BYTES[schema->type];
|
||||
rowSize += schema->bytes;
|
||||
if (IS_VAR_DATA_TYPE(schema->type)) {
|
||||
nVar++;
|
||||
}
|
||||
}
|
||||
fLen -= sizeof(TSKEY);
|
||||
|
||||
int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
|
||||
(int32_t)TD_BITMAP_BYTES(numOfCols - 1);
|
||||
int32_t schemaLen = 0;
|
||||
int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
|
||||
|
||||
int32_t totalLen = sizeof(SSubmitReq) + submitLen;
|
||||
subReq = taosMemoryCalloc(1, totalLen);
|
||||
SSubmitBlk* blk = POINTER_SHIFT(subReq, sizeof(SSubmitReq));
|
||||
void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk));
|
||||
STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen);
|
||||
|
||||
SRowBuilder rb = {0};
|
||||
tdSRowInit(&rb, pTableMeta->sversion);
|
||||
tdSRowSetTpInfo(&rb, numOfCols, fLen);
|
||||
int32_t dataLen = 0;
|
||||
|
||||
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column
|
||||
// length |
|
||||
char* pStart = pData + getVersion1BlockMetaSize(pData, numOfCols);
|
||||
int32_t* colLength = (int32_t*)pStart;
|
||||
pStart += sizeof(int32_t) * numOfCols;
|
||||
|
||||
SResultColumn* pCol = taosMemoryCalloc(numOfCols, sizeof(SResultColumn));
|
||||
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
if (IS_VAR_DATA_TYPE(pTableMeta->schema[i].type)) {
|
||||
pCol[i].offset = (int32_t*)pStart;
|
||||
pStart += rows * sizeof(int32_t);
|
||||
} else {
|
||||
pCol[i].nullbitmap = pStart;
|
||||
pStart += BitmapLen(rows);
|
||||
}
|
||||
|
||||
pCol[i].pData = pStart;
|
||||
pStart += colLength[i];
|
||||
}
|
||||
|
||||
for (int32_t j = 0; j < rows; j++) {
|
||||
tdSRowResetBuf(&rb, rowData);
|
||||
int32_t offset = 0;
|
||||
for (int32_t k = 0; k < numOfCols; k++) {
|
||||
const SSchema* pColumn = &pTableMeta->schema[k];
|
||||
|
||||
if (IS_VAR_DATA_TYPE(pColumn->type)) {
|
||||
if (pCol[k].offset[j] != -1) {
|
||||
char* data = pCol[k].pData + pCol[k].offset[j];
|
||||
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
|
||||
} else {
|
||||
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
|
||||
}
|
||||
} else {
|
||||
if (!colDataIsNull_f(pCol[k].nullbitmap, j)) {
|
||||
char* data = pCol[k].pData + pColumn->bytes * j;
|
||||
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
|
||||
} else {
|
||||
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
|
||||
}
|
||||
}
|
||||
|
||||
if (pColumn->colId != PRIMARYKEY_TIMESTAMP_COL_ID) {
|
||||
offset += TYPE_BYTES[pColumn->type];
|
||||
}
|
||||
}
|
||||
tdSRowEnd(&rb);
|
||||
int32_t rowLen = TD_ROW_LEN(rowData);
|
||||
rowData = POINTER_SHIFT(rowData, rowLen);
|
||||
dataLen += rowLen;
|
||||
}
|
||||
|
||||
taosMemoryFree(pCol);
|
||||
|
||||
blk->uid = htobe64(uid);
|
||||
blk->suid = htobe64(suid);
|
||||
blk->sversion = htonl(pTableMeta->sversion);
|
||||
blk->schemaLen = htonl(schemaLen);
|
||||
blk->numOfRows = htonl(rows);
|
||||
blk->dataLen = htonl(dataLen);
|
||||
subReq->length = sizeof(SSubmitReq) + sizeof(SSubmitBlk) + schemaLen + dataLen;
|
||||
subReq->numOfBlocks = 1;
|
||||
|
||||
pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
|
||||
if (NULL == pQuery) {
|
||||
uError("create SQuery error");
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
taosMemoryFree(subReq);
|
||||
goto end;
|
||||
}
|
||||
pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
|
||||
pQuery->haveResultSet = false;
|
||||
pQuery->msgType = TDMT_VND_SUBMIT;
|
||||
pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT);
|
||||
if (NULL == pQuery->pRoot) {
|
||||
uError("create pQuery->pRoot error");
|
||||
pQuery = smlInitHandle();
|
||||
if (pQuery == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto end;
|
||||
}
|
||||
SVnodeModifyOpStmt* nodeStmt = (SVnodeModifyOpStmt*)(pQuery->pRoot);
|
||||
nodeStmt->pDataBlocks = taosArrayInit(1, POINTER_BYTES);
|
||||
pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
|
||||
taosHashPut(pVgHash, (const char*)&vgData.vgId, sizeof(vgData.vgId), (char*)&vgData, sizeof(vgData));
|
||||
|
||||
SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
|
||||
if (NULL == dst) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
code = rawBlockBindData(pQuery, pTableMeta, pData, NULL, NULL, 0);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("WriteRaw:rawBlockBindData failed");
|
||||
goto end;
|
||||
}
|
||||
dst->vg = vgData;
|
||||
dst->numOfTables = subReq->numOfBlocks;
|
||||
dst->size = subReq->length;
|
||||
dst->pData = (char*)subReq;
|
||||
subReq->header.vgId = htonl(dst->vg.vgId);
|
||||
subReq->version = htonl(1);
|
||||
subReq->header.contLen = htonl(subReq->length);
|
||||
subReq->length = htonl(subReq->length);
|
||||
subReq->numOfBlocks = htonl(subReq->numOfBlocks);
|
||||
subReq = NULL; // no need free
|
||||
taosArrayPush(nodeStmt->pDataBlocks, &dst);
|
||||
|
||||
code = smlBuildOutput(pQuery, pVgHash);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("smlBuildOutput failed");
|
||||
return code;
|
||||
}
|
||||
|
||||
launchQueryImpl(pRequest, pQuery, true, NULL);
|
||||
code = pRequest->code;
|
||||
|
@ -1642,7 +1405,8 @@ int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname)
|
|||
end:
|
||||
taosMemoryFreeClear(pTableMeta);
|
||||
qDestroyQuery(pQuery);
|
||||
taosMemoryFree(subReq);
|
||||
destroyRequest(pRequest);
|
||||
taosHashCleanup(pVgHash);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -1679,8 +1443,6 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
|
|||
goto end;
|
||||
}
|
||||
|
||||
pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
|
||||
taosHashSetFreeFp(pVgHash, destroyVgHash);
|
||||
struct SCatalog* pCatalog = NULL;
|
||||
code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -1694,6 +1456,12 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
|
|||
conn.requestObjRefId = pRequest->self;
|
||||
conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
|
||||
|
||||
pQuery = smlInitHandle();
|
||||
if (pQuery == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto end;
|
||||
}
|
||||
pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
|
||||
uDebug("raw data block num:%d\n", rspObj.rsp.blockNum);
|
||||
while (++rspObj.resIter < rspObj.rsp.blockNum) {
|
||||
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter);
|
||||
|
@ -1701,14 +1469,6 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
|
|||
uError("WriteRaw:no schema, iter:%d", rspObj.resIter);
|
||||
goto end;
|
||||
}
|
||||
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.rsp.blockSchema, rspObj.resIter);
|
||||
setResSchemaInfo(&rspObj.resInfo, pSW->pSchema, pSW->nCols);
|
||||
|
||||
code = setQueryResultFromRsp(&rspObj.resInfo, pRetrieve, false, false);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("WriteRaw: setQueryResultFromRsp error");
|
||||
goto end;
|
||||
}
|
||||
|
||||
const char* tbName = (const char*)taosArrayGetP(rspObj.rsp.blockTbName, rspObj.resIter);
|
||||
if (!tbName) {
|
||||
|
@ -1722,13 +1482,6 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
|
|||
strcpy(pName.dbname, pRequest->pDb);
|
||||
strcpy(pName.tname, tbName);
|
||||
|
||||
VgData vgData = {0};
|
||||
code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &(vgData.vg));
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("WriteRaw:catalogGetTableHashVgroup failed. table name: %s", tbName);
|
||||
goto end;
|
||||
}
|
||||
|
||||
code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
|
||||
if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
|
||||
uError("WriteRaw:catalogGetTableMeta table not exist. table name: %s", tbName);
|
||||
|
@ -1740,164 +1493,29 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
|
|||
goto end;
|
||||
}
|
||||
|
||||
uint16_t fLen = 0;
|
||||
int32_t rowSize = 0;
|
||||
int16_t nVar = 0;
|
||||
for (int i = 0; i < pTableMeta->tableInfo.numOfColumns; i++) {
|
||||
SSchema* schema = &pTableMeta->schema[i];
|
||||
fLen += TYPE_BYTES[schema->type];
|
||||
rowSize += schema->bytes;
|
||||
if (IS_VAR_DATA_TYPE(schema->type)) {
|
||||
nVar++;
|
||||
}
|
||||
}
|
||||
fLen -= sizeof(TSKEY);
|
||||
|
||||
int32_t rows = rspObj.resInfo.numOfRows;
|
||||
int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
|
||||
(int32_t)TD_BITMAP_BYTES(pTableMeta->tableInfo.numOfColumns - 1);
|
||||
int32_t schemaLen = 0;
|
||||
int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
|
||||
|
||||
SSubmitReq* subReq = NULL;
|
||||
SSubmitBlk* blk = NULL;
|
||||
void* hData = taosHashGet(pVgHash, &vgData.vg.vgId, sizeof(vgData.vg.vgId));
|
||||
if (hData) {
|
||||
vgData = *(VgData*)hData;
|
||||
|
||||
int32_t totalLen = ((SSubmitReq*)(vgData.data))->length + submitLen;
|
||||
void* tmp = taosMemoryRealloc(vgData.data, totalLen);
|
||||
if (tmp == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto end;
|
||||
}
|
||||
vgData.data = tmp;
|
||||
((VgData*)hData)->data = tmp;
|
||||
subReq = (SSubmitReq*)(vgData.data);
|
||||
blk = POINTER_SHIFT(vgData.data, subReq->length);
|
||||
} else {
|
||||
int32_t totalLen = sizeof(SSubmitReq) + submitLen;
|
||||
void* tmp = taosMemoryCalloc(1, totalLen);
|
||||
if (tmp == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto end;
|
||||
}
|
||||
vgData.data = tmp;
|
||||
taosHashPut(pVgHash, (const char*)&vgData.vg.vgId, sizeof(vgData.vg.vgId), (char*)&vgData, sizeof(vgData));
|
||||
subReq = (SSubmitReq*)(vgData.data);
|
||||
subReq->length = sizeof(SSubmitReq);
|
||||
subReq->numOfBlocks = 0;
|
||||
|
||||
blk = POINTER_SHIFT(vgData.data, sizeof(SSubmitReq));
|
||||
}
|
||||
|
||||
// pSW->pSchema should be same as pTableMeta->schema
|
||||
// ASSERT(pSW->nCols == pTableMeta->tableInfo.numOfColumns);
|
||||
uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
|
||||
uint64_t uid = pTableMeta->uid;
|
||||
int16_t sver = pTableMeta->sversion;
|
||||
|
||||
void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk));
|
||||
STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen);
|
||||
|
||||
SRowBuilder rb = {0};
|
||||
tdSRowInit(&rb, sver);
|
||||
tdSRowSetTpInfo(&rb, pTableMeta->tableInfo.numOfColumns, fLen);
|
||||
int32_t totalLen = 0;
|
||||
|
||||
SHashObj* schemaHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
|
||||
for (int i = 0; i < pSW->nCols; i++) {
|
||||
SSchema* schema = &pSW->pSchema[i];
|
||||
taosHashPut(schemaHash, schema->name, strlen(schema->name), &i, sizeof(int32_t));
|
||||
}
|
||||
|
||||
for (int32_t j = 0; j < rows; j++) {
|
||||
tdSRowResetBuf(&rb, rowData);
|
||||
|
||||
doSetOneRowPtr(&rspObj.resInfo);
|
||||
rspObj.resInfo.current += 1;
|
||||
|
||||
int32_t offset = 0;
|
||||
for (int32_t k = 0; k < pTableMeta->tableInfo.numOfColumns; k++) {
|
||||
const SSchema* pColumn = &pTableMeta->schema[k];
|
||||
int32_t* index = taosHashGet(schemaHash, pColumn->name, strlen(pColumn->name));
|
||||
if (!index) {
|
||||
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NONE, NULL, false, offset, k);
|
||||
} else {
|
||||
char* colData = rspObj.resInfo.row[*index];
|
||||
if (!colData) {
|
||||
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
|
||||
} else {
|
||||
if (IS_VAR_DATA_TYPE(pColumn->type)) {
|
||||
colData -= VARSTR_HEADER_SIZE;
|
||||
}
|
||||
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, colData, true, offset, k);
|
||||
}
|
||||
}
|
||||
if (pColumn->colId != PRIMARYKEY_TIMESTAMP_COL_ID) {
|
||||
offset += TYPE_BYTES[pColumn->type];
|
||||
}
|
||||
}
|
||||
tdSRowEnd(&rb);
|
||||
int32_t rowLen = TD_ROW_LEN(rowData);
|
||||
rowData = POINTER_SHIFT(rowData, rowLen);
|
||||
totalLen += rowLen;
|
||||
}
|
||||
|
||||
taosHashCleanup(schemaHash);
|
||||
blk->uid = htobe64(uid);
|
||||
blk->suid = htobe64(suid);
|
||||
blk->sversion = htonl(sver);
|
||||
blk->schemaLen = htonl(schemaLen);
|
||||
blk->numOfRows = htonl(rows);
|
||||
blk->dataLen = htonl(totalLen);
|
||||
subReq->length += sizeof(SSubmitBlk) + schemaLen + totalLen;
|
||||
subReq->numOfBlocks++;
|
||||
taosMemoryFreeClear(pTableMeta);
|
||||
rspObj.resInfo.pRspMsg = NULL;
|
||||
doFreeReqResultInfo(&rspObj.resInfo);
|
||||
}
|
||||
|
||||
pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
|
||||
if (NULL == pQuery) {
|
||||
uError("create SQuery error");
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto end;
|
||||
}
|
||||
pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
|
||||
pQuery->haveResultSet = false;
|
||||
pQuery->msgType = TDMT_VND_SUBMIT;
|
||||
pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT);
|
||||
if (NULL == pQuery->pRoot) {
|
||||
uError("create pQuery->pRoot error");
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto end;
|
||||
}
|
||||
SVnodeModifyOpStmt* nodeStmt = (SVnodeModifyOpStmt*)(pQuery->pRoot);
|
||||
|
||||
int32_t numOfVg = taosHashGetSize(pVgHash);
|
||||
nodeStmt->pDataBlocks = taosArrayInit(numOfVg, POINTER_BYTES);
|
||||
|
||||
VgData* vData = (VgData*)taosHashIterate(pVgHash, NULL);
|
||||
while (vData) {
|
||||
SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
|
||||
if (NULL == dst) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
SVgroupInfo vg;
|
||||
code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vg);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("WriteRaw:catalogGetTableHashVgroup failed. table name: %s", tbName);
|
||||
goto end;
|
||||
}
|
||||
dst->vg = vData->vg;
|
||||
SSubmitReq* subReq = (SSubmitReq*)(vData->data);
|
||||
dst->numOfTables = subReq->numOfBlocks;
|
||||
dst->size = subReq->length;
|
||||
dst->pData = (char*)subReq;
|
||||
vData->data = NULL; // no need free
|
||||
subReq->header.vgId = htonl(dst->vg.vgId);
|
||||
subReq->version = htonl(1);
|
||||
subReq->header.contLen = htonl(subReq->length);
|
||||
subReq->length = htonl(subReq->length);
|
||||
subReq->numOfBlocks = htonl(subReq->numOfBlocks);
|
||||
taosArrayPush(nodeStmt->pDataBlocks, &dst);
|
||||
vData = (VgData*)taosHashIterate(pVgHash, vData);
|
||||
|
||||
void* hData = taosHashGet(pVgHash, &vg.vgId, sizeof(vg.vgId));
|
||||
if (hData == NULL) {
|
||||
taosHashPut(pVgHash, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg));
|
||||
}
|
||||
|
||||
code = rawBlockBindData(pQuery, pTableMeta, pRetrieve->data, NULL, NULL, 0);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("WriteRaw:rawBlockBindData failed");
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
code = smlBuildOutput(pQuery, pVgHash);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("smlBuildOutput failed");
|
||||
return code;
|
||||
}
|
||||
|
||||
launchQueryImpl(pRequest, pQuery, true, NULL);
|
||||
|
@ -1905,8 +1523,6 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
|
|||
|
||||
end:
|
||||
tDeleteSMqDataRsp(&rspObj.rsp);
|
||||
rspObj.resInfo.pRspMsg = NULL;
|
||||
doFreeReqResultInfo(&rspObj.resInfo);
|
||||
tDecoderClear(&decoder);
|
||||
qDestroyQuery(pQuery);
|
||||
destroyRequest(pRequest);
|
||||
|
@ -1948,8 +1564,6 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
|
|||
goto end;
|
||||
}
|
||||
|
||||
pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
|
||||
taosHashSetFreeFp(pVgHash, destroyVgHash);
|
||||
struct SCatalog* pCatalog = NULL;
|
||||
code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -1963,6 +1577,13 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
|
|||
conn.requestObjRefId = pRequest->self;
|
||||
conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
|
||||
|
||||
pQuery = smlInitHandle();
|
||||
if (pQuery == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto end;
|
||||
}
|
||||
pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
|
||||
|
||||
uDebug("raw data block num:%d\n", rspObj.rsp.blockNum);
|
||||
while (++rspObj.resIter < rspObj.rsp.blockNum) {
|
||||
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter);
|
||||
|
@ -1970,14 +1591,6 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
|
|||
uError("WriteRaw:no schema, iter:%d", rspObj.resIter);
|
||||
goto end;
|
||||
}
|
||||
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.rsp.blockSchema, rspObj.resIter);
|
||||
setResSchemaInfo(&rspObj.resInfo, pSW->pSchema, pSW->nCols);
|
||||
|
||||
code = setQueryResultFromRsp(&rspObj.resInfo, pRetrieve, false, false);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("WriteRaw: setQueryResultFromRsp error");
|
||||
goto end;
|
||||
}
|
||||
|
||||
const char* tbName = (const char*)taosArrayGetP(rspObj.rsp.blockTbName, rspObj.resIter);
|
||||
if (!tbName) {
|
||||
|
@ -1991,44 +1604,32 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
|
|||
strcpy(pName.dbname, pRequest->pDb);
|
||||
strcpy(pName.tname, tbName);
|
||||
|
||||
VgData vgData = {0};
|
||||
code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &(vgData.vg));
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("WriteRaw:catalogGetTableHashVgroup failed. table name: %s", tbName);
|
||||
goto end;
|
||||
}
|
||||
|
||||
// find schema data info
|
||||
int32_t schemaLen = 0;
|
||||
void* schemaData = NULL;
|
||||
SVCreateTbReq pCreateReq = {0};
|
||||
|
||||
for (int j = 0; j < rspObj.rsp.createTableNum; j++) {
|
||||
void** dataTmp = taosArrayGet(rspObj.rsp.createTableReq, j);
|
||||
int32_t* lenTmp = taosArrayGet(rspObj.rsp.createTableLen, j);
|
||||
|
||||
SDecoder decoderTmp = {0};
|
||||
SVCreateTbReq pCreateReq = {0};
|
||||
|
||||
SDecoder decoderTmp = {0};
|
||||
tDecoderInit(&decoderTmp, *dataTmp, *lenTmp);
|
||||
memset(&pCreateReq, 0, sizeof(SVCreateTbReq));
|
||||
if (tDecodeSVCreateTbReq(&decoderTmp, &pCreateReq) < 0) {
|
||||
tDecoderClear(&decoderTmp);
|
||||
taosMemoryFreeClear(pCreateReq.comment);
|
||||
taosArrayDestroy(pCreateReq.ctb.tagName);
|
||||
goto end;
|
||||
}
|
||||
|
||||
ASSERT(pCreateReq.type == TSDB_CHILD_TABLE);
|
||||
if(pCreateReq.type != TSDB_CHILD_TABLE){
|
||||
uError("WriteRaw:pCreateReq.type != TSDB_CHILD_TABLE. table name: %s", tbName);
|
||||
code = TSDB_CODE_TSC_INVALID_VALUE;
|
||||
goto end;
|
||||
}
|
||||
if (strcmp(tbName, pCreateReq.name) == 0) {
|
||||
schemaLen = *lenTmp;
|
||||
schemaData = *dataTmp;
|
||||
strcpy(pName.tname, pCreateReq.ctb.stbName);
|
||||
tDecoderClear(&decoderTmp);
|
||||
taosMemoryFreeClear(pCreateReq.comment);
|
||||
taosArrayDestroy(pCreateReq.ctb.tagName);
|
||||
break;
|
||||
}
|
||||
tDecoderClear(&decoderTmp);
|
||||
taosMemoryFreeClear(pCreateReq.comment);
|
||||
taosArrayDestroy(pCreateReq.ctb.tagName);
|
||||
}
|
||||
|
||||
code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
|
||||
|
@ -2042,167 +1643,23 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
|
|||
goto end;
|
||||
}
|
||||
|
||||
uint16_t fLen = 0;
|
||||
int32_t rowSize = 0;
|
||||
int16_t nVar = 0;
|
||||
for (int i = 0; i < pTableMeta->tableInfo.numOfColumns; i++) {
|
||||
SSchema* schema = &pTableMeta->schema[i];
|
||||
fLen += TYPE_BYTES[schema->type];
|
||||
rowSize += schema->bytes;
|
||||
if (IS_VAR_DATA_TYPE(schema->type)) {
|
||||
nVar++;
|
||||
}
|
||||
}
|
||||
fLen -= sizeof(TSKEY);
|
||||
|
||||
int32_t rows = rspObj.resInfo.numOfRows;
|
||||
int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
|
||||
(int32_t)TD_BITMAP_BYTES(pTableMeta->tableInfo.numOfColumns - 1);
|
||||
|
||||
int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
|
||||
|
||||
SSubmitReq* subReq = NULL;
|
||||
SSubmitBlk* blk = NULL;
|
||||
void* hData = taosHashGet(pVgHash, &vgData.vg.vgId, sizeof(vgData.vg.vgId));
|
||||
if (hData) {
|
||||
vgData = *(VgData*)hData;
|
||||
|
||||
int32_t totalLen = ((SSubmitReq*)(vgData.data))->length + submitLen;
|
||||
void* tmp = taosMemoryRealloc(vgData.data, totalLen);
|
||||
if (tmp == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto end;
|
||||
}
|
||||
vgData.data = tmp;
|
||||
((VgData*)hData)->data = tmp;
|
||||
subReq = (SSubmitReq*)(vgData.data);
|
||||
blk = POINTER_SHIFT(vgData.data, subReq->length);
|
||||
} else {
|
||||
int32_t totalLen = sizeof(SSubmitReq) + submitLen;
|
||||
void* tmp = taosMemoryCalloc(1, totalLen);
|
||||
if (tmp == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto end;
|
||||
}
|
||||
vgData.data = tmp;
|
||||
taosHashPut(pVgHash, (const char*)&vgData.vg.vgId, sizeof(vgData.vg.vgId), (char*)&vgData, sizeof(vgData));
|
||||
subReq = (SSubmitReq*)(vgData.data);
|
||||
subReq->length = sizeof(SSubmitReq);
|
||||
subReq->numOfBlocks = 0;
|
||||
|
||||
blk = POINTER_SHIFT(vgData.data, sizeof(SSubmitReq));
|
||||
}
|
||||
|
||||
// pSW->pSchema should be same as pTableMeta->schema
|
||||
// ASSERT(pSW->nCols == pTableMeta->tableInfo.numOfColumns);
|
||||
uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
|
||||
uint64_t uid = pTableMeta->uid;
|
||||
int16_t sver = pTableMeta->sversion;
|
||||
|
||||
void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk));
|
||||
if (schemaData) {
|
||||
memcpy(blkSchema, schemaData, schemaLen);
|
||||
}
|
||||
STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen);
|
||||
|
||||
SRowBuilder rb = {0};
|
||||
tdSRowInit(&rb, sver);
|
||||
tdSRowSetTpInfo(&rb, pTableMeta->tableInfo.numOfColumns, fLen);
|
||||
int32_t totalLen = 0;
|
||||
|
||||
SHashObj* schemaHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
|
||||
for (int i = 0; i < pSW->nCols; i++) {
|
||||
SSchema* schema = &pSW->pSchema[i];
|
||||
taosHashPut(schemaHash, schema->name, strlen(schema->name), &i, sizeof(int32_t));
|
||||
}
|
||||
|
||||
for (int32_t j = 0; j < rows; j++) {
|
||||
tdSRowResetBuf(&rb, rowData);
|
||||
|
||||
doSetOneRowPtr(&rspObj.resInfo);
|
||||
rspObj.resInfo.current += 1;
|
||||
|
||||
int32_t offset = 0;
|
||||
for (int32_t k = 0; k < pTableMeta->tableInfo.numOfColumns; k++) {
|
||||
const SSchema* pColumn = &pTableMeta->schema[k];
|
||||
int32_t* index = taosHashGet(schemaHash, pColumn->name, strlen(pColumn->name));
|
||||
if (!index) {
|
||||
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NONE, NULL, false, offset, k);
|
||||
} else {
|
||||
char* colData = rspObj.resInfo.row[*index];
|
||||
if (!colData) {
|
||||
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
|
||||
} else {
|
||||
if (IS_VAR_DATA_TYPE(pColumn->type)) {
|
||||
colData -= VARSTR_HEADER_SIZE;
|
||||
}
|
||||
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, colData, true, offset, k);
|
||||
}
|
||||
}
|
||||
if (pColumn->colId != PRIMARYKEY_TIMESTAMP_COL_ID) {
|
||||
offset += TYPE_BYTES[pColumn->type];
|
||||
}
|
||||
}
|
||||
tdSRowEnd(&rb);
|
||||
int32_t rowLen = TD_ROW_LEN(rowData);
|
||||
rowData = POINTER_SHIFT(rowData, rowLen);
|
||||
totalLen += rowLen;
|
||||
}
|
||||
|
||||
taosHashCleanup(schemaHash);
|
||||
blk->uid = htobe64(uid);
|
||||
blk->suid = htobe64(suid);
|
||||
blk->sversion = htonl(sver);
|
||||
blk->schemaLen = htonl(schemaLen);
|
||||
blk->numOfRows = htonl(rows);
|
||||
blk->dataLen = htonl(totalLen);
|
||||
subReq->length += sizeof(SSubmitBlk) + schemaLen + totalLen;
|
||||
subReq->numOfBlocks++;
|
||||
taosMemoryFreeClear(pTableMeta);
|
||||
rspObj.resInfo.pRspMsg = NULL;
|
||||
doFreeReqResultInfo(&rspObj.resInfo);
|
||||
}
|
||||
|
||||
pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
|
||||
if (NULL == pQuery) {
|
||||
uError("create SQuery error");
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto end;
|
||||
}
|
||||
pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
|
||||
pQuery->haveResultSet = false;
|
||||
pQuery->msgType = TDMT_VND_SUBMIT;
|
||||
pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT);
|
||||
if (NULL == pQuery->pRoot) {
|
||||
uError("create pQuery->pRoot error");
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto end;
|
||||
}
|
||||
SVnodeModifyOpStmt* nodeStmt = (SVnodeModifyOpStmt*)(pQuery->pRoot);
|
||||
|
||||
int32_t numOfVg = taosHashGetSize(pVgHash);
|
||||
nodeStmt->pDataBlocks = taosArrayInit(numOfVg, POINTER_BYTES);
|
||||
|
||||
VgData* vData = (VgData*)taosHashIterate(pVgHash, NULL);
|
||||
while (vData) {
|
||||
SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
|
||||
if (NULL == dst) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
SVgroupInfo vg;
|
||||
code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vg);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("WriteRaw:catalogGetTableHashVgroup failed. table name: %s", tbName);
|
||||
goto end;
|
||||
}
|
||||
|
||||
void* hData = taosHashGet(pVgHash, &vg.vgId, sizeof(vg.vgId));
|
||||
if (hData == NULL) {
|
||||
taosHashPut(pVgHash, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg));
|
||||
}
|
||||
|
||||
code = rawBlockBindData(pQuery, pTableMeta, pRetrieve->data, &pCreateReq, NULL, 0);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("WriteRaw:rawBlockBindData failed");
|
||||
goto end;
|
||||
}
|
||||
dst->vg = vData->vg;
|
||||
SSubmitReq* subReq = (SSubmitReq*)(vData->data);
|
||||
dst->numOfTables = subReq->numOfBlocks;
|
||||
dst->size = subReq->length;
|
||||
dst->pData = (char*)subReq;
|
||||
vData->data = NULL; // no need free
|
||||
subReq->header.vgId = htonl(dst->vg.vgId);
|
||||
subReq->version = htonl(1);
|
||||
subReq->header.contLen = htonl(subReq->length);
|
||||
subReq->length = htonl(subReq->length);
|
||||
subReq->numOfBlocks = htonl(subReq->numOfBlocks);
|
||||
taosArrayPush(nodeStmt->pDataBlocks, &dst);
|
||||
vData = (VgData*)taosHashIterate(pVgHash, vData);
|
||||
}
|
||||
|
||||
launchQueryImpl(pRequest, pQuery, true, NULL);
|
||||
|
@ -2210,8 +1667,6 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
|
|||
|
||||
end:
|
||||
tDeleteSTaosxRsp(&rspObj.rsp);
|
||||
rspObj.resInfo.pRspMsg = NULL;
|
||||
doFreeReqResultInfo(&rspObj.resInfo);
|
||||
tDecoderClear(&decoder);
|
||||
qDestroyQuery(pQuery);
|
||||
destroyRequest(pRequest);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,664 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <ctype.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "clientSml.h"
|
||||
|
||||
// comma ,
|
||||
//#define IS_SLASH_COMMA(sql) (*(sql) == COMMA && *((sql)-1) == SLASH)
|
||||
#define IS_COMMA(sql) (*(sql) == COMMA && *((sql)-1) != SLASH)
|
||||
// space
|
||||
//#define IS_SLASH_SPACE(sql) (*(sql) == SPACE && *((sql)-1) == SLASH)
|
||||
#define IS_SPACE(sql) (*(sql) == SPACE && *((sql)-1) != SLASH)
|
||||
// equal =
|
||||
//#define IS_SLASH_EQUAL(sql) (*(sql) == EQUAL && *((sql)-1) == SLASH)
|
||||
#define IS_EQUAL(sql) (*(sql) == EQUAL && *((sql)-1) != SLASH)
|
||||
// quote "
|
||||
//#define IS_SLASH_QUOTE(sql) (*(sql) == QUOTE && *((sql)-1) == SLASH)
|
||||
#define IS_QUOTE(sql) (*(sql) == QUOTE && *((sql)-1) != SLASH)
|
||||
// SLASH
|
||||
//#define IS_SLASH_SLASH(sql) (*(sql) == SLASH && *((sql)-1) == SLASH)
|
||||
|
||||
#define IS_SLASH_LETTER(sql) \
|
||||
(*((sql)-1) == SLASH && (*(sql) == COMMA || *(sql) == SPACE || *(sql) == EQUAL || *(sql) == QUOTE || *(sql) == SLASH)) \
|
||||
// (IS_SLASH_COMMA(sql) || IS_SLASH_SPACE(sql) || IS_SLASH_EQUAL(sql) || IS_SLASH_QUOTE(sql) || IS_SLASH_SLASH(sql))
|
||||
|
||||
#define MOVE_FORWARD_ONE(sql, len) (memmove((void *)((sql)-1), (sql), len))
|
||||
|
||||
#define PROCESS_SLASH(key, keyLen) \
|
||||
for (int i = 1; i < keyLen; ++i) { \
|
||||
if (IS_SLASH_LETTER(key + i)) { \
|
||||
MOVE_FORWARD_ONE(key + i, keyLen - i); \
|
||||
i--; \
|
||||
keyLen--; \
|
||||
} \
|
||||
}
|
||||
|
||||
#define BINARY_ADD_LEN 2 // "binary" 2 means " "
|
||||
#define NCHAR_ADD_LEN 3 // L"nchar" 3 means L" "
|
||||
|
||||
uint8_t smlPrecisionConvert[7] = {TSDB_TIME_PRECISION_NANO, TSDB_TIME_PRECISION_HOURS, TSDB_TIME_PRECISION_MINUTES,
|
||||
TSDB_TIME_PRECISION_SECONDS, TSDB_TIME_PRECISION_MILLI, TSDB_TIME_PRECISION_MICRO,
|
||||
TSDB_TIME_PRECISION_NANO};
|
||||
|
||||
static int64_t smlParseInfluxTime(SSmlHandle *info, const char *data, int32_t len) {
|
||||
uint8_t toPrecision = info->currSTableMeta ? info->currSTableMeta->tableInfo.precision : TSDB_TIME_PRECISION_NANO;
|
||||
|
||||
if(unlikely(len == 0 || (len == 1 && data[0] == '0'))){
|
||||
return taosGetTimestampNs()/smlFactorNS[toPrecision];
|
||||
}
|
||||
|
||||
uint8_t fromPrecision = smlPrecisionConvert[info->precision];
|
||||
|
||||
int64_t ts = smlGetTimeValue(data, len, fromPrecision, toPrecision);
|
||||
if (unlikely(ts == -1)) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp", data);
|
||||
return -1;
|
||||
}
|
||||
return ts;
|
||||
}
|
||||
|
||||
int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) {
|
||||
if (pVal->value[0] == '"'){ // binary
|
||||
if (pVal->length >= 2 && pVal->value[pVal->length - 1] == '"') {
|
||||
pVal->type = TSDB_DATA_TYPE_BINARY;
|
||||
pVal->length -= BINARY_ADD_LEN;
|
||||
if (pVal->length > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) {
|
||||
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
|
||||
}
|
||||
pVal->value += (BINARY_ADD_LEN - 1);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||
}
|
||||
|
||||
if(pVal->value[0] == 'l' || pVal->value[0] == 'L'){ // nchar
|
||||
if (pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"' && pVal->length >= 3){
|
||||
pVal->type = TSDB_DATA_TYPE_NCHAR;
|
||||
pVal->length -= NCHAR_ADD_LEN;
|
||||
if (pVal->length > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
|
||||
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
|
||||
}
|
||||
pVal->value += (NCHAR_ADD_LEN - 1);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||
}
|
||||
|
||||
if (pVal->value[0] == 't' || pVal->value[0] == 'T'){
|
||||
if(pVal->length == 1 || (pVal->length == 4 && (pVal->value[1] == 'r' || pVal->value[1] == 'R')
|
||||
&& (pVal->value[2] == 'u' || pVal->value[2] == 'U')
|
||||
&& (pVal->value[3] == 'e' || pVal->value[3] == 'E'))){
|
||||
pVal->i = TSDB_TRUE;
|
||||
pVal->type = TSDB_DATA_TYPE_BOOL;
|
||||
pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||
}
|
||||
|
||||
if (pVal->value[0] == 'f' || pVal->value[0] == 'F'){
|
||||
if(pVal->length == 1 || (pVal->length == 5 && (pVal->value[1] == 'a' || pVal->value[1] == 'A')
|
||||
&& (pVal->value[2] == 'l' || pVal->value[2] == 'L')
|
||||
&& (pVal->value[3] == 's' || pVal->value[3] == 'S')
|
||||
&& (pVal->value[4] == 'e' || pVal->value[4] == 'E'))){
|
||||
pVal->i = TSDB_FALSE;
|
||||
pVal->type = TSDB_DATA_TYPE_BOOL;
|
||||
pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||
}
|
||||
|
||||
// number
|
||||
if (smlParseNumber(pVal, msg)) {
|
||||
pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||
}
|
||||
|
||||
static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd,
|
||||
SSmlLineInfo* currElement, bool isSameMeasure, bool isSameCTable){
|
||||
if(isSameCTable){
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int cnt = 0;
|
||||
SArray *preLineKV = info->preLineTagKV;
|
||||
SArray *maxKVs = info->maxTagKVs;
|
||||
bool isSuperKVInit = true;
|
||||
SArray *superKV = NULL;
|
||||
if(info->dataFormat){
|
||||
if(unlikely(!isSameMeasure)){
|
||||
SSmlSTableMeta *sMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, currElement->measure, currElement->measureLen, NULL);
|
||||
|
||||
if(unlikely(sMeta == NULL)){
|
||||
STableMeta * pTableMeta = smlGetMeta(info, currElement->measure, currElement->measureLen);
|
||||
if(pTableMeta == NULL){
|
||||
info->dataFormat = false;
|
||||
info->reRun = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
sMeta = smlBuildSTableMeta(info->dataFormat);
|
||||
sMeta->tableMeta = pTableMeta;
|
||||
nodeListSet(&info->superTables, currElement->measure, currElement->measureLen, sMeta, NULL);
|
||||
}
|
||||
info->currSTableMeta = sMeta->tableMeta;
|
||||
superKV = sMeta->tags;
|
||||
|
||||
if(unlikely(taosArrayGetSize(superKV) == 0)){
|
||||
isSuperKVInit = false;
|
||||
}
|
||||
taosArraySetSize(maxKVs, 0);
|
||||
}
|
||||
}else{
|
||||
taosArraySetSize(maxKVs, 0);
|
||||
}
|
||||
taosArraySetSize(preLineKV, 0);
|
||||
|
||||
while (*sql < sqlEnd) {
|
||||
if (unlikely(IS_SPACE(*sql))) {
|
||||
break;
|
||||
}
|
||||
|
||||
bool hasSlash = false;
|
||||
// parse key
|
||||
const char *key = *sql;
|
||||
size_t keyLen = 0;
|
||||
while (*sql < sqlEnd) {
|
||||
if (unlikely(IS_COMMA(*sql))) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql);
|
||||
return TSDB_CODE_SML_INVALID_DATA;
|
||||
}
|
||||
if (unlikely(IS_EQUAL(*sql))) {
|
||||
keyLen = *sql - key;
|
||||
(*sql)++;
|
||||
break;
|
||||
}
|
||||
if(!hasSlash){
|
||||
hasSlash = (*(*sql) == SLASH);
|
||||
}
|
||||
(*sql)++;
|
||||
}
|
||||
if(unlikely(hasSlash)) {
|
||||
PROCESS_SLASH(key, keyLen)
|
||||
}
|
||||
|
||||
if (unlikely(IS_INVALID_COL_LEN(keyLen))) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "invalid key or key is too long than 64", key);
|
||||
return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH;
|
||||
}
|
||||
|
||||
// parse value
|
||||
const char *value = *sql;
|
||||
size_t valueLen = 0;
|
||||
hasSlash = false;
|
||||
while (*sql < sqlEnd) {
|
||||
// parse value
|
||||
if (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql))) {
|
||||
break;
|
||||
}else if (unlikely(IS_EQUAL(*sql))) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql);
|
||||
return TSDB_CODE_SML_INVALID_DATA;
|
||||
}
|
||||
|
||||
if(!hasSlash){
|
||||
hasSlash = (*(*sql) == SLASH);
|
||||
}
|
||||
|
||||
(*sql)++;
|
||||
}
|
||||
valueLen = *sql - value;
|
||||
|
||||
if (unlikely(valueLen == 0)) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "invalid value", value);
|
||||
return TSDB_CODE_SML_INVALID_DATA;
|
||||
}
|
||||
|
||||
if(unlikely(hasSlash)) {
|
||||
PROCESS_SLASH(value, valueLen)
|
||||
}
|
||||
|
||||
if (unlikely(valueLen > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)) {
|
||||
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
|
||||
}
|
||||
|
||||
SSmlKv kv = {.key = key, .keyLen = keyLen, .type = TSDB_DATA_TYPE_NCHAR, .value = value, .length = valueLen};
|
||||
if(info->dataFormat){
|
||||
if(unlikely(cnt + 1 > info->currSTableMeta->tableInfo.numOfTags)){
|
||||
info->dataFormat = false;
|
||||
info->reRun = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if(isSameMeasure){
|
||||
if(unlikely(cnt >= taosArrayGetSize(maxKVs))) {
|
||||
info->dataFormat = false;
|
||||
info->reRun = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
SSmlKv *maxKV = (SSmlKv *)taosArrayGet(maxKVs, cnt);
|
||||
if(unlikely(kv.length > maxKV->length)){
|
||||
maxKV->length = kv.length;
|
||||
SSmlSTableMeta *tableMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, currElement->measure, currElement->measureLen, NULL);
|
||||
if(unlikely(NULL == tableMeta)){
|
||||
uError("SML:0x%" PRIx64 " NULL == tableMeta", info->id);
|
||||
return TSDB_CODE_SML_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
SSmlKv *oldKV = (SSmlKv *)taosArrayGet(tableMeta->tags, cnt);
|
||||
oldKV->length = kv.length;
|
||||
info->needModifySchema = true;
|
||||
}
|
||||
if(unlikely(!IS_SAME_KEY)){
|
||||
info->dataFormat = false;
|
||||
info->reRun = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}else{
|
||||
if(isSuperKVInit){
|
||||
if(unlikely(cnt >= taosArrayGetSize(superKV))) {
|
||||
info->dataFormat = false;
|
||||
info->reRun = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
SSmlKv *maxKV = (SSmlKv *)taosArrayGet(superKV, cnt);
|
||||
if(unlikely(kv.length > maxKV->length)) {
|
||||
maxKV->length = kv.length;
|
||||
}else{
|
||||
kv.length = maxKV->length;
|
||||
}
|
||||
info->needModifySchema = true;
|
||||
|
||||
if(unlikely(!IS_SAME_KEY)){
|
||||
info->dataFormat = false;
|
||||
info->reRun = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}else{
|
||||
taosArrayPush(superKV, &kv);
|
||||
}
|
||||
taosArrayPush(maxKVs, &kv);
|
||||
}
|
||||
}else{
|
||||
taosArrayPush(maxKVs, &kv);
|
||||
}
|
||||
taosArrayPush(preLineKV, &kv);
|
||||
|
||||
cnt++;
|
||||
if(IS_SPACE(*sql)){
|
||||
break;
|
||||
}
|
||||
(*sql)++;
|
||||
}
|
||||
|
||||
void* oneTable = nodeListGet(info->childTables, currElement->measure, currElement->measureTagsLen, NULL);
|
||||
if ((oneTable != NULL)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SSmlTableInfo *tinfo = smlBuildTableInfo(1, currElement->measure, currElement->measureLen);
|
||||
if (unlikely(!tinfo)) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
tinfo->tags = taosArrayDup(preLineKV, NULL);
|
||||
|
||||
smlSetCTableName(tinfo);
|
||||
tinfo->uid = info->uid++;
|
||||
if(info->dataFormat) {
|
||||
info->currSTableMeta->uid = tinfo->uid;
|
||||
tinfo->tableDataCtx = smlInitTableDataCtx(info->pQuery, info->currSTableMeta);
|
||||
if(tinfo->tableDataCtx == NULL){
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "smlInitTableDataCtx error", NULL);
|
||||
return TSDB_CODE_SML_INVALID_DATA;
|
||||
}
|
||||
}
|
||||
|
||||
nodeListSet(&info->childTables, currElement->measure, currElement->measureTagsLen, tinfo, NULL);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd,
|
||||
SSmlLineInfo* currElement, bool isSameMeasure, bool isSameCTable){
|
||||
int cnt = 0;
|
||||
SArray *preLineKV = info->preLineColKV;
|
||||
bool isSuperKVInit = true;
|
||||
SArray *superKV = NULL;
|
||||
if(info->dataFormat){
|
||||
if(unlikely(!isSameCTable)){
|
||||
SSmlTableInfo *oneTable = (SSmlTableInfo *)nodeListGet(info->childTables, currElement->measure, currElement->measureTagsLen, NULL);
|
||||
if (unlikely(oneTable == NULL)) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "child table should inside", currElement->measure);
|
||||
return TSDB_CODE_SML_INVALID_DATA;
|
||||
}
|
||||
info->currTableDataCtx = oneTable->tableDataCtx;
|
||||
}
|
||||
|
||||
if(unlikely(!isSameMeasure)){
|
||||
SSmlSTableMeta *sMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, currElement->measure, currElement->measureLen, NULL);
|
||||
|
||||
if(unlikely(sMeta == NULL)){
|
||||
STableMeta * pTableMeta = smlGetMeta(info, currElement->measure, currElement->measureLen);
|
||||
if(pTableMeta == NULL){
|
||||
info->dataFormat = false;
|
||||
info->reRun = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
sMeta = smlBuildSTableMeta(info->dataFormat);
|
||||
sMeta->tableMeta = pTableMeta;
|
||||
nodeListSet(&info->superTables, currElement->measure, currElement->measureLen, sMeta, NULL);
|
||||
}
|
||||
info->currSTableMeta = sMeta->tableMeta;
|
||||
superKV = sMeta->cols;
|
||||
if(unlikely(taosArrayGetSize(superKV) == 0)){
|
||||
isSuperKVInit = false;
|
||||
}
|
||||
taosArraySetSize(preLineKV, 0);
|
||||
}
|
||||
}
|
||||
|
||||
while (*sql < sqlEnd) {
|
||||
if (unlikely(IS_SPACE(*sql))) {
|
||||
break;
|
||||
}
|
||||
|
||||
bool hasSlash = false;
|
||||
// parse key
|
||||
const char *key = *sql;
|
||||
size_t keyLen = 0;
|
||||
while (*sql < sqlEnd) {
|
||||
if (unlikely(IS_COMMA(*sql))) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql);
|
||||
return TSDB_CODE_SML_INVALID_DATA;
|
||||
}
|
||||
if (unlikely(IS_EQUAL(*sql))) {
|
||||
keyLen = *sql - key;
|
||||
(*sql)++;
|
||||
break;
|
||||
}
|
||||
if(!hasSlash){
|
||||
hasSlash = (*(*sql) == SLASH);
|
||||
}
|
||||
(*sql)++;
|
||||
}
|
||||
if(unlikely(hasSlash)) {
|
||||
PROCESS_SLASH(key, keyLen)
|
||||
}
|
||||
|
||||
if (unlikely(IS_INVALID_COL_LEN(keyLen))) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "invalid key or key is too long than 64", key);
|
||||
return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH;
|
||||
}
|
||||
|
||||
// parse value
|
||||
const char *value = *sql;
|
||||
size_t valueLen = 0;
|
||||
hasSlash = false;
|
||||
bool isInQuote = false;
|
||||
while (*sql < sqlEnd) {
|
||||
// parse value
|
||||
if (unlikely(IS_QUOTE(*sql))) {
|
||||
isInQuote = !isInQuote;
|
||||
(*sql)++;
|
||||
continue;
|
||||
}
|
||||
if (!isInQuote){
|
||||
if (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql))) {
|
||||
break;
|
||||
} else if (unlikely(IS_EQUAL(*sql))) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql);
|
||||
return TSDB_CODE_SML_INVALID_DATA;
|
||||
}
|
||||
}
|
||||
if(!hasSlash){
|
||||
hasSlash = (*(*sql) == SLASH);
|
||||
}
|
||||
|
||||
(*sql)++;
|
||||
}
|
||||
valueLen = *sql - value;
|
||||
|
||||
if (unlikely(isInQuote)) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "only one quote", value);
|
||||
return TSDB_CODE_SML_INVALID_DATA;
|
||||
}
|
||||
if (unlikely(valueLen == 0)) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "invalid value", value);
|
||||
return TSDB_CODE_SML_INVALID_DATA;
|
||||
}
|
||||
if(unlikely(hasSlash)) {
|
||||
PROCESS_SLASH(value, valueLen)
|
||||
}
|
||||
|
||||
SSmlKv kv = {.key = key, .keyLen = keyLen, .value = value, .length = valueLen};
|
||||
int32_t ret = smlParseValue(&kv, &info->msgBuf);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "smlParseValue error", value);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if(info->dataFormat){
|
||||
//cnt begin 0, add ts so + 2
|
||||
if(unlikely(cnt + 2 > info->currSTableMeta->tableInfo.numOfColumns)){
|
||||
info->dataFormat = false;
|
||||
info->reRun = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
// bind data
|
||||
ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kv, cnt + 1);
|
||||
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
|
||||
uError("smlBuildCol error, retry");
|
||||
info->dataFormat = false;
|
||||
info->reRun = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if(isSameMeasure){
|
||||
if(cnt >= taosArrayGetSize(preLineKV)) {
|
||||
info->dataFormat = false;
|
||||
info->reRun = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
SSmlKv *maxKV = (SSmlKv *)taosArrayGet(preLineKV, cnt);
|
||||
if(kv.type != maxKV->type){
|
||||
info->dataFormat = false;
|
||||
info->reRun = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if(unlikely(IS_VAR_DATA_TYPE(kv.type) && kv.length > maxKV->length)){
|
||||
maxKV->length = kv.length;
|
||||
SSmlSTableMeta *tableMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, currElement->measure, currElement->measureLen, NULL);
|
||||
if(unlikely(NULL == tableMeta)){
|
||||
uError("SML:0x%" PRIx64 " NULL == tableMeta", info->id);
|
||||
return TSDB_CODE_SML_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
SSmlKv *oldKV = (SSmlKv *)taosArrayGet(tableMeta->cols, cnt);
|
||||
oldKV->length = kv.length;
|
||||
info->needModifySchema = true;
|
||||
}
|
||||
if(unlikely(!IS_SAME_KEY)){
|
||||
info->dataFormat = false;
|
||||
info->reRun = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}else{
|
||||
if(isSuperKVInit){
|
||||
if(unlikely(cnt >= taosArrayGetSize(superKV))) {
|
||||
info->dataFormat = false;
|
||||
info->reRun = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
SSmlKv *maxKV = (SSmlKv *)taosArrayGet(superKV, cnt);
|
||||
if(unlikely(kv.type != maxKV->type)){
|
||||
info->dataFormat = false;
|
||||
info->reRun = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if(IS_VAR_DATA_TYPE(kv.type)){
|
||||
if(kv.length > maxKV->length) {
|
||||
maxKV->length = kv.length;
|
||||
}else{
|
||||
kv.length = maxKV->length;
|
||||
}
|
||||
info->needModifySchema = true;
|
||||
}
|
||||
if(unlikely(!IS_SAME_KEY)){
|
||||
info->dataFormat = false;
|
||||
info->reRun = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}else{
|
||||
taosArrayPush(superKV, &kv);
|
||||
}
|
||||
taosArrayPush(preLineKV, &kv);
|
||||
}
|
||||
}else{
|
||||
if(currElement->colArray == NULL){
|
||||
currElement->colArray = taosArrayInit(16, sizeof(SSmlKv));
|
||||
taosArraySetSize(currElement->colArray, 1);
|
||||
}
|
||||
taosArrayPush(currElement->colArray, &kv); //reserve for timestamp
|
||||
}
|
||||
|
||||
cnt++;
|
||||
if(IS_SPACE(*sql)){
|
||||
break;
|
||||
}
|
||||
(*sql)++;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements) {
|
||||
if (!sql) return TSDB_CODE_SML_INVALID_DATA;
|
||||
JUMP_SPACE(sql, sqlEnd)
|
||||
if (unlikely(*sql == COMMA)) return TSDB_CODE_SML_INVALID_DATA;
|
||||
elements->measure = sql;
|
||||
|
||||
// parse measure
|
||||
while (sql < sqlEnd) {
|
||||
if (unlikely((sql != elements->measure) && IS_SLASH_LETTER(sql))) {
|
||||
MOVE_FORWARD_ONE(sql, sqlEnd - sql);
|
||||
sqlEnd--;
|
||||
continue;
|
||||
}
|
||||
if (unlikely(IS_COMMA(sql))) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (unlikely(IS_SPACE(sql))) {
|
||||
break;
|
||||
}
|
||||
sql++;
|
||||
}
|
||||
elements->measureLen = sql - elements->measure;
|
||||
if (unlikely(IS_INVALID_TABLE_LEN(elements->measureLen))) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "measure is empty or too large than 192", NULL);
|
||||
return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
|
||||
}
|
||||
|
||||
// to get measureTagsLen before
|
||||
const char* tmp = sql;
|
||||
while (tmp < sqlEnd){
|
||||
if (unlikely(IS_SPACE(tmp))) {
|
||||
break;
|
||||
}
|
||||
tmp++;
|
||||
}
|
||||
elements->measureTagsLen = tmp - elements->measure;
|
||||
|
||||
bool isSameCTable = false;
|
||||
bool isSameMeasure = false;
|
||||
if(IS_SAME_CHILD_TABLE){
|
||||
isSameCTable = true;
|
||||
isSameMeasure = true;
|
||||
}else if(info->dataFormat) {
|
||||
isSameMeasure = IS_SAME_SUPER_TABLE;
|
||||
}
|
||||
// parse tag
|
||||
if (*sql == COMMA) sql++;
|
||||
elements->tags = sql;
|
||||
|
||||
int ret = smlParseTagKv(info, &sql, sqlEnd, elements, isSameMeasure, isSameCTable);
|
||||
if(unlikely(ret != TSDB_CODE_SUCCESS)){
|
||||
return ret;
|
||||
}
|
||||
if(unlikely(info->reRun)){
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
sql = elements->measure + elements->measureTagsLen;
|
||||
elements->tagsLen = sql - elements->tags;
|
||||
|
||||
// parse cols
|
||||
JUMP_SPACE(sql, sqlEnd)
|
||||
elements->cols = sql;
|
||||
|
||||
ret = smlParseColKv(info, &sql, sqlEnd, elements, isSameMeasure, isSameCTable);
|
||||
if(unlikely(ret != TSDB_CODE_SUCCESS)){
|
||||
return ret;
|
||||
}
|
||||
|
||||
if(unlikely(info->reRun)){
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
elements->colsLen = sql - elements->cols;
|
||||
if (unlikely(elements->colsLen == 0)) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "cols is empty", NULL);
|
||||
return TSDB_CODE_SML_INVALID_DATA;
|
||||
}
|
||||
|
||||
// parse timestamp
|
||||
JUMP_SPACE(sql, sqlEnd)
|
||||
elements->timestamp = sql;
|
||||
while (sql < sqlEnd) {
|
||||
if (unlikely(isspace(*sql))) {
|
||||
break;
|
||||
}
|
||||
sql++;
|
||||
}
|
||||
elements->timestampLen = sql - elements->timestamp;
|
||||
|
||||
int64_t ts = smlParseInfluxTime(info, elements->timestamp, elements->timestampLen);
|
||||
if (unlikely(ts <= 0)) {
|
||||
uError("SML:0x%" PRIx64 " smlParseTS error:%" PRId64, info->id, ts);
|
||||
return TSDB_CODE_INVALID_TIMESTAMP;
|
||||
}
|
||||
// add ts to
|
||||
SSmlKv kv = { .key = TS, .keyLen = TS_LEN, .type = TSDB_DATA_TYPE_TIMESTAMP, .i = ts, .length = (size_t)tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes};
|
||||
if(info->dataFormat){
|
||||
smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kv, 0);
|
||||
smlBuildRow(info->currTableDataCtx);
|
||||
clearColValArray(info->currTableDataCtx->pValues);
|
||||
}else{
|
||||
taosArraySet(elements->colArray, 0, &kv);
|
||||
}
|
||||
info->preLine = *elements;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue