Merge branch '3.0' into doc/analysis

This commit is contained in:
Haojun Liao 2025-03-22 22:29:10 +08:00 committed by GitHub
commit 1d7c68766b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
54 changed files with 896 additions and 271 deletions

View File

@ -74,6 +74,11 @@ jobs:
snappy \
zlib
- name: prepare install path
run: |
sudo mkdir -p /usr/local/lib
sudo mkdir -p /usr/local/include
- name: Build and install TDengine
run: |
mkdir debug && cd debug

View File

@ -6,6 +6,7 @@ on:
- 'main'
- '3.0'
- '3.1'
- '3.3.6'
paths-ignore:
- 'packaging/**'
- 'docs/**'

View File

@ -2,7 +2,7 @@
# taosadapter
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
GIT_TAG 3.0
GIT_TAG 3.3.6
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

View File

@ -532,6 +532,24 @@ These fields are present only when "windowType" is "Count".
#### Fields for Window Invalidation
Due to scenarios such as data disorder, updates, or deletions during stream computing, windows that have already been generated might be removed or their results need to be recalculated. In such cases, a notification with the eventType "WINDOW_INVALIDATION" is sent to inform which windows have been invalidated.
For events with "eventType" as "WINDOW_INVALIDATION", the following fields are included:
1. "windowStart": A long integer timestamp representing the start time of the window.
1. "windowEnd": A long integer timestamp representing the end time of the window.
## Support for Virtual Tables in Stream Computing
Starting with v3.3.6.0, stream computing can use virtual tables—including virtual regular tables, virtual sub-tables, and virtual super tables—as data sources for computation. The syntax is identical to that for nonvirtual tables.
However, because the behavior of virtual tables differs from that of nonvirtual tables, the following restrictions apply when using stream computing:
1. The schema of virtual regular tables/virtual sub-tables involved in stream computing cannot be modified.
1. During stream computing, if the data source corresponding to a column in a virtual table is changed, the stream computation will not pick up the change; it will still read from the old data source.
1. During stream computing, if the original table corresponding to a column in a virtual table is deleted and later a new table with the same name and a column with the same name is created, the stream computation will not read data from the new table.
1. The watermark for stream computing must be 0; otherwise, an error will occur during creation.
1. If the data source for stream computing is a virtual super table, sub-tables that are added after the stream computing task starts will not participate in the computation.
1. The timestamps of different underlying tables in a virtual table may not be completely consistent; merging the data might produce null values, and interpolation is currently not supported.
1. Out-of-order data, updates, or deletions are not handled. In other words, when creating a stream, you cannot specify `ignore update 0` or `ignore expired 0`; otherwise, an error will be reported.
1. Historical data computation is not supported. That is, when creating a stream, you cannot specify `fill_history 1`; otherwise, an error will be reported.
1. The trigger modes MAX_DELAY, CONTINUOUS_WINDOW_CLOSE and FORCE_WINDOW_CLOSE are not supported.
1. The COUNT_WINDOW type is not supported.

View File

@ -584,4 +584,4 @@ This document details the server error codes that may be encountered when using
| 0x80006204 | Virtual table not support decimal type | Create virtual table using decimal type | create virtual table without using decimal type |
| 0x80006205 | Virtual table not support in STMT query and STMT insert | Use virtual table in stmt query and stmt insert | do not use virtual table in stmt query and insert |
| 0x80006206 | Virtual table not support in Topic | Use virtual table in topic | do not use virtual table in topic |
| 0x80006206 | Virtual super table query not support origin table from different databases | Virtual super table s child table's origin table from different databases | make sure virtual super table's child table's origin table from same database |
| 0x80006207 | Virtual super table query not support origin table from different databases | Virtual super table s child table's origin table from different databases | make sure virtual super table's child table's origin table from same database |

View File

@ -182,7 +182,7 @@ def test_json_to_taos(consumer: Consumer):
'voltage': 105,
'phase': 0.02027, }),
partition=1, topic='test', serialized_key_size=None, serialized_header_size=None,
serialized_value_size=None, timestamp=time.time(), timestamp_type=None),
serialized_value_size=None, timestamp=time.time(), timestamp_type=None, leader_epoch=0),
ConsumerRecord(checksum=None, headers=None, offset=1, key=None,
value=json.dumps({'table_name': 'd1',
'ts': '2022-12-06 15:13:39.643',
@ -190,7 +190,7 @@ def test_json_to_taos(consumer: Consumer):
'voltage': 102,
'phase': 0.02027, }),
partition=1, topic='test', serialized_key_size=None, serialized_header_size=None,
serialized_value_size=None, timestamp=time.time(), timestamp_type=None),
serialized_value_size=None, timestamp=time.time(), timestamp_type=None,leader_epoch=0 ),
]
]
@ -203,11 +203,11 @@ def test_line_to_taos(consumer: Consumer):
ConsumerRecord(checksum=None, headers=None, offset=1, key=None,
value="d0 values('2023-01-01 00:00:00.001', 3.49, 109, 0.02737)".encode('utf-8'),
partition=1, topic='test', serialized_key_size=None, serialized_header_size=None,
serialized_value_size=None, timestamp=time.time(), timestamp_type=None),
serialized_value_size=None, timestamp=time.time(), timestamp_type=None,leader_epoch=0 ),
ConsumerRecord(checksum=None, headers=None, offset=1, key=None,
value="d1 values('2023-01-01 00:00:00.002', 6.19, 112, 0.09171)".encode('utf-8'),
partition=1, topic='test', serialized_key_size=None, serialized_header_size=None,
serialized_value_size=None, timestamp=time.time(), timestamp_type=None),
serialized_value_size=None, timestamp=time.time(), timestamp_type=None,leader_epoch=0 ),
]
]
consumer._line_to_taos(messages=records)

View File

@ -77,6 +77,22 @@ toc_max_heading_level: 4
![数据模型示意图](./data-model.png)
### 虚拟表
“一个设备一张表”的设计解决了工业和物联网等场景下的大多数时序数据管理和分析难题,但是在遇到更复杂的场景时,这种设计受到了设备复杂性的挑战。这种复杂性的根源在于一个设备无法简单的用一个或一组数据采集点来描述或管理,而业务分析往往需要综合多个或多组采集点的数据才能完成。以汽车或发电风机为例,整个设备(汽车或风机)中含有非常大量的传感器(数据采集点),这些传感器的输出和采集频率千差万别。一个超级表只能描述其中一种传感器,当需要综合多个传感器的数据进行分析计算时,只能通过多级关联查询的方式来进行,而这往往会导致易用性和性能方面的问题。
为了解决这个问题TDengine 引入虚拟表Virtual Table简称为 VTable的概念。虚拟表是一种不存储实际数据而可以用于分析计算的表它的数据来源为其它真实存储数据的子表、普通表通过将不同列数据按照时间戳排序、对齐、合并的方式来生成虚拟表。同真实表类似虚拟表也可以分为虚拟超级表、虚拟子表、虚拟普通表。虚拟超级表可以是一个设备或一组分析计算所需数据的完整集合每个虚拟子表可以根据需要引用相同或不同的列因此可以灵活地根据业务需要进行定义最终可以达到千表千面的效果。虚拟表不能写入、删除数据在查询使用上同真实表基本相同支持虚拟超级表、虚拟子表、虚拟普通表上的任何查询。唯一的区别在于虚拟表的数据是每次查询计算时动态生成的只有一个查询中引用的列才会被合并进虚拟表中因此同一个虚拟表在不同的查询中所呈现的数据可能是不同的。
虚拟超级表的主要功能特点包括:
1. 列选择与拼接 <br />
用户可以从多个原始表中选择指定的列,按需组合到一张虚拟表中,形成统一的数据视图。
2. 基于时间戳对齐 <br />
以时间戳为依据对数据进行对齐,如果多个表在相同时间戳下存在数据,则对应列的值组合成同一行;若部分表在该时间戳下无数据,则对应列填充为 NULL。
3. 动态更新 <br />
虚拟表根据原始表的数据变化自动更新,确保数据的实时性。虚拟表不需实际存储,计算在生成时动态完成。
通过引入虚拟表的概念,现在 TDengine 可以非常方便的管理更大更复杂的设备数据。无论每个采集点如何建模(单列 or 多列),无论这些采集点的数据是分布在一个或多个库中,我们现在都可以通过定义虚拟子表的方式跨库跨表任意指定数据源,通过虚拟超级表的方式进行跨设备、跨分析的聚合运算,从此“一个设备一张表”彻底成为现实。
### 库
库是 TDengine 中用于管理一组表的集合。TDengine 允许一个运行实例包含多个库,并且每个库都可以配置不同的存储策略。由于不同类型的数据采集点通常具有不同的数据特征,如数据采集频率、数据保留期限、副本数量、数据块大小等。为了在各种场景下确保 TDengine 能够发挥最大效率,建议将具有不同数据特征的超级表创建在不同的库中。
@ -93,6 +109,7 @@ toc_max_heading_level: 4
在查询数据时TDengine 客户端会根据应用程序当前的时区设置,自动将保存的 UTC 时间戳转换成本地时间进行显示,确保用户在不同时区下都能看到正确的时间信息。
## 数据建模
本节用智能电表做例子,简要的介绍如何在 TDengine 里使用 SQL 创建数据库、超级表、表的基本操作。
@ -215,3 +232,177 @@ TDengine 支持灵活的数据模型设计,包括多列模型和单列模型
尽管 TDengine 推荐使用多列模型,因为这种模型在写入效率和存储效率方面通常更优,但在某些特定场景下,单列模型可能更为适用。例如,当一个数据采集点的采集量种类经常发生变化时,如果采用多列模型,就需要频繁修改超级表的结构定义,这会增加应用程序的复杂性。在这种情况下,采用单列模型可以简化应用程序的设计和管理,因为它允许独立地管理和扩展每个物理量的超级表。
总之TDengine 提供了灵活的数据模型选项,用户可以根据实际需求和场景选择最适合的模型,以优化性能和管理复杂性。
### 创建虚拟表
无论是选择单列模型还是多列模型TDengine 都可以通过使用虚拟表进行跨表的运算。为智能电表为例,这里介绍虚拟表的两种使用场景:
1. 单源多维度时序聚合
2. 跨源采集量对比分析
#### 单源多维度时序聚合
在单源多维度时序聚合场景中,“单源”并非指单一物理表,而是指来自**同一数据采集点**下的多个单列时序数据表。这些数据因业务需求或其他限制被拆分为多个单列存储的表,但通过设备标签和时间基准保持逻辑一致性。虚拟表在此场景中的作用是将一个采集点中“纵向“拆分的数据,还原为完整的“横向”状态。
例如,在建模时采用了单列模型,对于电流、电压和相位这 3 种物理量,分别建立 3 张超级表。在这种场景下,用户可以通过虚拟表将这 3 种不同的采集量聚合到一张表中,以便进行统一的查询和分析。
创建单列模型的超级表的 SQL 如下:
```sql
CREATE STABLE current_stb (
ts timestamp,
current float
) TAGS (
device_id varchar(64),
location varchar(64),
group_id int
);
CREATE STABLE voltage_stb (
ts timestamp,
voltage int
) TAGS (
device_id varchar(64),
location varchar(64),
group_id int
);
CREATE STABLE phase_stb (
ts timestamp,
phase float
) TAGS (
device_id varchar(64),
location varchar(64),
group_id int
);
```
假设分别有 d1001,d1002,d1003,d1004 四个设备分别对四个设备的电流、电压、相位采集量创建子表SQL 如下:
```sql
create table current_d1001 using current_stb(deviceid, location, group_id) tags("d1001", "California.SanFrancisco", 2);
create table current_d1002 using current_stb(deviceid, location, group_id) tags("d1002", "California.SanFrancisco", 3);
create table current_d1003 using current_stb(deviceid, location, group_id) tags("d1003", "California.LosAngeles", 3);
create table current_d1004 using current_stb(deviceid, location, group_id) tags("d1004", "California.LosAngeles", 2);
create table voltage_d1001 using voltage_stb(deviceid, location, group_id) tags("d1001", "California.SanFrancisco", 2);
create table voltage_d1002 using voltage_stb(deviceid, location, group_id) tags("d1002", "California.SanFrancisco", 3);
create table voltage_d1003 using voltage_stb(deviceid, location, group_id) tags("d1003", "California.LosAngeles", 3);
create table voltage_d1004 using voltage_stb(deviceid, location, group_id) tags("d1004", "California.LosAngeles", 2);
create table phase_d1001 using phase_stb(deviceid, location, group_id) tags("d1001", "California.SanFrancisco", 2);
create table phase_d1002 using phase_stb(deviceid, location, group_id) tags("d1002", "California.SanFrancisco", 3);
create table phase_d1003 using phase_stb(deviceid, location, group_id) tags("d1003", "California.LosAngeles", 3);
create table phase_d1004 using phase_stb(deviceid, location, group_id) tags("d1004", "California.LosAngeles", 2);
```
此时想要通过一张虚拟超级表来讲这三种采集量聚合到一张表中,创建虚拟超级表 SQL 如下:
```sql
CREATE STABLE meters_v (
ts timestamp,
current float,
voltage int,
phase float
) TAGS (
location varchar(64),
group_id int
) VIRTUAL 1;
```
并且对四个设备 d1001,d1002,d1003,d1004 分别创建虚拟子表SQL 如下:
```sql
CREATE VTABLE d1001_v (
current from current_d1001.current,
voltage from voltage_d1001.voltage,
phase from phase_d1001.phase
)
USING meters_v
TAGS (
"California.SanFrancisco",
2
);
CREATE VTABLE d1002_v (
current from current_d1002.current,
voltage from voltage_d1002.voltage,
phase from phase_d1002.phase
)
USING meters_v
TAGS (
"California.SanFrancisco",
3
);
CREATE VTABLE d1003_v (
current from current_d1003.current,
voltage from voltage_d1003.voltage,
phase from phase_d1003.phase
)
USING meters_v
TAGS (
"California.LosAngeles",
3
);
CREATE VTABLE d1004_v (
current from current_d1004.current,
voltage from voltage_d1004.voltage,
phase from phase_d1004.phase
)
USING meters_v
TAGS (
"California.LosAngeles",
2
);
```
以设备 d1001 为例,假设 d1001 设备的电流、电压、相位数据如下:
![data-model-origin-table.png](data-model-origin-table.png)
虚拟表 d1001_v 中的数据如下 :
| Timestamp | Current | Voltage | Phase |
|:--------------:|:-------:|:---------:|:-------:|
| 1538548685000 | 10.3 | 219 | 0.31 |
| 1538548695000 | 12.6 | 218 | 0.33 |
| 1538548696800 | 12.3 | 221 | 0.31 |
| 1538548697100 | 12.1 | 220 | NULL |
| 1538548697200 | NULL | NULL | 0.32 |
| 1538548697700 | 11.8 | NULL | NULL |
| 1538548697800 | NULL | 222 | 0.33 |
#### 跨源采集量对比分析
在跨源采集量对比分析中,“跨源”指数据来自**不同数据采集点**。在不同数据采集点中提取具有可比语义的采集量,通过虚拟表将这些采集量按照时间戳进行对齐和合并,并进行对比分析。
例如,用户可以将来自不同设备的电流数据聚合到一张虚拟表中,以便进行电流数据的对比分析。
以分析 d1001, d1002, d1003, d1004 四个设备的电流数据为例,创建虚拟表的 SQL 如下:
```sql
CREATE VTABLE current_v (
ts timestamp,
d1001_current float from current_d1001.current,
d1002_current float from current_d1002.current,
d1003_current float from current_d1003.current,
d1004_current float from current_d1004.current
);
```
假设 d1001, d1002, d1003, d1004 四个设备的电流数据如下:
![data-model-origin-table-2.png](data-model-origin-table-2.png)
虚拟表 current_v 中的数据如下:
| Timestamp | d1001_current | d1002_current | d1003_current | d1004_current |
|:--------------:|:-------------:|:-------------:|:-------------:|:-------------:|
| 1538548685000 | 10.3 | 11.7 | 11.2 | 12.4 |
| 1538548695000 | 12.6 | 11.9 | 10.8 | 11.3 |
| 1538548696800 | 12.3 | 12.4 | 12.3 | 10.1 |
| 1538548697100 | 12.1 | NULL | 11.1 | NULL |
| 1538548697200 | NULL | 12.2 | NULL | 11.7 |
| 1538548697700 | 11.8 | 11.4 | NULL | NULL |
| 1538548697800 | NULL | NULL | 12.1 | 12.6 |

Binary file not shown.

After

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

View File

@ -7,14 +7,14 @@ import fc_result from '../pic/fc-result.png';
时序数据预测处理以持续一个时间段的时序数据作为输入预测接下来一个连续时间区间内时间序列数据趋势。用户可以指定输出的预测时间序列数据点的数量因此其输出的结果行数不确定。为此TDengine 使用新 SQL 函数 `FORECAST` 提供时序数据预测服务。基础数据(用于预测的历史时间序列数据)是该函数的输入,预测结果是该函数的输出。用户可以通过 `FORECAST` 函数调用 Anode 提供的预测算法提供的服务。
在后续章节中,使用时序数据表`foo`作为示例,介绍预测和异常检测算法的使用方式,`foo` 表的模式如下:
在后续章节中,使用时序数据表 `foo` 作为示例,介绍预测和异常检测算法的使用方式,`foo` 表的模式如下:
|列名称|类型|说明|
|---|---|---|
|ts| timestamp| 主时间戳列|
|i32| int32| 4字节整数设备测量值 metric|
| 列名称 | 类型 | 说明 |
| ------ | --------- | ---------------------------- |
| ts | timestamp | 主时间戳列 |
| i32 | int32 | 4字节整数设备测量值 metric |
```bash
```sql
taos> select * from foo;
ts | i32 |
========================================
@ -29,6 +29,7 @@ taos> select * from foo;
```
### 语法
```SQL
FORECAST(column_expr, option_expr)
@ -41,21 +42,21 @@ algo=expr1
[,start=start_ts_val]
[,expr2]
"}
```
1. `column_expr`:预测的时序数据列,只支持数值类型列输入。
2. `options`:预测函数的参数。字符串类型,其中使用 K=V 方式调用算法及相关参数。采用逗号分隔的 K=V 字符串表示,其中的字符串不需要使用单引号、双引号、或转义号等符号,不能使用中文及其他宽字符。预测支持 `conf`, `every`, `rows`, `start`, `rows` 几个控制参数,其含义如下:
### 参数说明
|参数|含义|默认值|
|---|---|---|
|algo|预测分析使用的算法|holtwinters|
|wncheck|白噪声white noise data检查|默认值为 10 表示不进行检查|
|conf|预测数据的置信区间范围 ,取值范围 [0, 100]|95|
|every|预测数据的采样间隔|输入数据的采样间隔|
|start|预测结果的开始时间戳|输入数据最后一个时间戳加上一个采样间隔时间区间|
|rows|预测结果的记录数|10|
| 参数 | 含义 | 默认值 |
| ------- | ------------------------------------------ | ---------------------------------------------- |
| algo | 预测分析使用的算法 | holtwinters |
| wncheck | 白噪声white noise data检查 | 默认值为 10 表示不进行检查 |
| conf | 预测数据的置信区间范围 ,取值范围 [0, 100] | 95 |
| every | 预测数据的采样间隔 | 输入数据的采样间隔 |
| start | 预测结果的开始时间戳 | 输入数据最后一个时间戳加上一个采样间隔时间区间 |
| rows | 预测结果的记录数 | 10 |
1. 预测查询结果新增三个伪列,具体如下:`_FROWTS`:预测结果的时间戳、`_FLOW`:置信区间下界、`_FHIGH`:置信区间上界, 对于没有置信区间的预测算法,其置信区间同预测结果
2. 更改参数 `START`:返回预测结果的起始时间,改变起始时间不会影响返回的预测数值,只影响起始时间。
@ -73,7 +74,8 @@ FROM foo;
SELECT _flow, _fhigh, _frowts, FORECAST(i32, "algo=arima,alpha=95,period=10,wncheck=0")
FROM foo;
```
```
```sql
taos> select _flow, _fhigh, _frowts, forecast(i32) from foo;
_flow | _fhigh | _frowts | forecast(i32) |
========================================================================================
@ -89,8 +91,8 @@ taos> select _flow, _fhigh, _frowts, forecast(i32) from foo;
-1076.1566162 | 1214.4498291 | 2020-01-01 00:01:44.000 | 69 |
```
## 内置预测算法
- [arima](./02-arima.md)
- [holtwinters](./03-holtwinters.md)
- CES (Complex Exponential Smoothing)
@ -110,6 +112,7 @@ taos> select _flow, _fhigh, _frowts, forecast(i32) from foo;
- TimesNet
## 算法有效性评估工具
TDgpt 提供预测分析算法有效性评估工具 `analytics_compare`,调用该工具并设置合适的参数,能够使用 TDengine 中的数据作为回测依据,评估不同预测算法或相同的预测算法在不同的参数或训练模型的下的预测有效性。预测有效性的评估使用 `MSE``MAE` 指标作为依据,后续还将增加 `MAPE`指标。
```ini
@ -133,14 +136,10 @@ res_start_time = 1730000000000
gen_figure = true
```
算法对比分析运行完成以后,生成 fc-results.xlsx 文件,其中包含了调用算法的预测分析误差、执行时间、调用参数等信息,如下表:
|algorithm|params|MSE|elapsed_time(ms.)|
|---|---|---|---|
|holtwinters|{"trend":"add", "seasonal":"add"}|351.622| 125.1721|
|arima|{"time_step":3600000, "start_p":0, "max_p":10, "start_q":0, "max_q":10}|433.709| 45577.9187|
| algorithm | params | MSE | elapsed_time(ms.) |
| ----------- | ------------------------------------------------------------------------- | ------- | ----------------- |
| holtwinters | `{"trend":"add", "seasonal":"add"}` | 351.622 | 125.1721 |
| arima | `{"time_step":3600000, "start_p":0, "max_p":10, "start_q":0, "max_q":10}` | 433.709 | 45577.9187 |
如果设置了 `gen_figure` 为 true分析结果中还会有绘制的分析预测结果图如下图所示

View File

@ -39,13 +39,15 @@ algo=expr1
4. 输入数据默认进行白噪声检查,如果输入数据是白噪声,将不会有任何(异常)窗口信息返回。
### 参数说明
|参数|含义|默认值|
|---|---|---|
|algo|异常检测调用的算法|iqr|
|wncheck|对输入数据列是否进行白噪声检查取值为0或1|1|
| 参数 | 含义 | 默认值 |
| ------- | ------------------------------------------ | ------ |
| algo | 异常检测调用的算法 | iqr |
| wncheck | 对输入数据列是否进行白噪声检查取值为0或1 | 1 |
### 示例
```SQL
--- 使用 iqr 算法进行异常检测,检测列 i32 列。
SELECT _wstart, _wend, SUM(i32)
@ -64,11 +66,12 @@ taos> SELECT _wstart, _wend, count(*) FROM foo ANOMAYL_WINDOW(i32);
Query OK, 1 row(s) in set (0.028946s)
```
### 内置异常检测算法
分析平台内置了6个异常检查模型分为3个类别分别是[基于统计学的算法](./02-statistics-approach.md)、[基于数据密度的算法](./03-data-density.md)、以及[基于机器学习的算法](./04-machine-learning.md)。在不指定异常检测使用的方法的情况下,默认调用 IQR 进行异常检测。
### 异常检测算法有效性比较工具
TDgpt 提供自动化的工具对比不同数据集的不同算法监测有效性针对异常检测算法提供查全率recall和查准率precision两个指标衡量不同算法的有效性。
通过在配置文件中(analysis.ini)设置以下的选项可以调用需要使用的异常检测算法,异常检测算法测试用数据的时间范围、是否生成标注结果的图片、调用的异常检测算法以及相应的参数。
调用异常检测算法比较之前,需要人工手动标注异常监测数据集的结果,即设置[anno_res]选项的数值,第几个数值是异常点,需要标注在数组中,如下测试集中,第 9 个点是异常点,我们就标注异常结果为 [9].
@ -97,12 +100,13 @@ lof={"algorithm":"auto", "n_neighbor": 3}
对比程序执行完成以后,会自动生成名称为`ad_result.xlsx` 的文件,第一个卡片是算法运行结果(如下图所示),分别包含了算法名称、执行调用参数、查全率、查准率、执行时间 5 个指标。
|algorithm|params|precision(%)|recall(%)|elapsed_time(ms.)|
|---|---|---|---|---|
|ksigma|{"k":2}|100|100|0.453|
|iqr|{}|100|100|2.727|
|grubbs|{}|100|100|2.811|
|lof|{"algorithm":"auto", "n_neighbor":3}|0|0|4.660|
| algorithm | params | precision(%) | recall(%) | elapsed_time(ms.) |
| --------- | -------------------------------------- | ------------ | --------- | ----------------- |
| ksigma | `{"k":2}` | 100 | 100 | 0.453 |
| iqr | `{}` | 100 | 100 | 2.727 |
| grubbs | `{}` | 100 | 100 | 2.811 |
| lof | `{"algorithm":"auto", "n_neighbor":3}` | 0 | 0 | 4.660 |
如果设置了 `gen_figure``true`,比较程序会自动将每个参与比较的算法分析结果采用图片方式呈现出来(如下图所示为 ksigma 的异常检测结果标注)。

View File

@ -22,6 +22,7 @@ table_option: {
COMMENT 'string_value'
| SMA(col_name [, col_name] ...)
| KEEP value
| VIRTUAL {0 | 1}
}
```
@ -36,6 +37,7 @@ table_option: {
4. 关于 `ENCODE``COMPRESS` 的使用,请参考 [按列压缩](../compress)
5. 关于 table_option 中的参数说明,请参考 [建表 SQL 说明](../table)
6. 关于 table_option 中的 keep 参数仅对超级表生效keep 参数的详细说明可以参考 [数据库说明](02-database.md),唯一不同的是超级表 keep 不会立即影响查询结果,仅在 compact 后生效。
7. 关于 table_option 中的 virtual 参数,仅对超级表生效,指定为 1 表示创建虚拟超级表,为 0 表示创建超级表,默认为 0。创建虚拟超级表时column_definition 中只支持 type_name 选项,不支持定义额外主键列以及压缩选项。
## 查看超级表

View File

@ -527,6 +527,24 @@ CREATE STREAM avg_current_stream FILL_HISTORY 1
#### 窗口失效相关字段
因为流计算过程中会遇到数据乱序、更新、删除等情况,可能造成已生成的窗口被删除,或者结果需要重新计算。此时会向通知地址发送一条 WINDOW_INVALIDATION 的通知,说明哪些窗口已经被删除。
这部分是 eventType 为 WINDOW_INVALIDATION 时event 对象才有的字段。
1. windowStart长整型时间戳表示窗口的开始时间精度与结果表的时间精度一致。
1. windowEnd: 长整型时间戳,表示窗口的结束时间,精度与结果表的时间精度一致。
## 流式计算对虚拟表的支持
从 v3.3.6.0 开始,流计算能够使用虚拟表(包括虚拟普通表、虚拟子表、虚拟超级表)作为数据源进行计算,语法和非虚拟表完全一致。
但是虚拟表的行为与非虚拟表存在差异,所以目前在使用流计算对虚拟表进行计算时存在以下限制:
1. 流计算中涉及的虚拟普通表/虚拟子表的 schema 不允许更改。
1. 流计算过程中,如果修改虚拟表某一列对应的数据源,对流计算来说不生效。即:流计算仍只读取老的数据源。
1. 流计算过程中,如果虚拟表某一列对应的原始表被删除,之后新建了同名的表和同名的列,流计算不会读取新表的数据。
1. 流计算的 watermark 只能是 0否则创建时就报错。
1. 如果流计算的数据源是虚拟超级表,流计算任务启动后新增的子表不参与计算。
1. 虚拟表的不同原始表的时间戳不完全一致,数据合并后可能会产生空值,暂不支持插值处理。
1. 不处理数据的乱序、更新或删除。即:流创建时不能指定 `ignore update 0` 或者 `ignore expired 0`,否则报错。
1. 不支持历史数据计算,即:流创建时不能指定 `fill_history 1`,否则报错。
1. 不支持触发模式MAX_DELAY, FORCE_WINDOW_CLOSE, CONTINUOUS_WINDOW_CLOSE。
1. 不支持窗口类型COUNT_WINDOW。

View File

@ -151,18 +151,19 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
提供用户创建的超级表的相关信息。
| # | **列名** | **数据类型** | **说明** |
| --- | :-----------: | ------------ | ----------------------------------------------------------------------------------------------------- |
| 1 | stable_name | VARCHAR(192) | 超级表表名 |
| 2 | db_name | VARCHAR(64) | 超级表所在的数据库的名称 |
| 3 | create_time | TIMESTAMP | 创建时间 |
| 4 | columns | INT | 列数目 |
| 5 | tags | INT | 标签数目。需要注意,`tags` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 6 | last_update | TIMESTAMP | 最后更新时间 |
| 7 | table_comment | VARCHAR(1024) | 表注释 |
| 8 | watermark | VARCHAR(64) | 窗口的关闭时间。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 9 | max_delay | VARCHAR(64) | 推送计算结果的最大延迟。需要注意,`max_delay` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
| 10 | rollup | VARCHAR(128) | rollup 聚合函数。需要注意,`rollup` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| # | **列名** | **数据类型** | **说明** |
|----|:-------------:|---------------|-----------------------------------------------------------------|
| 1 | stable_name | VARCHAR(192) | 超级表表名 |
| 2 | db_name | VARCHAR(64) | 超级表所在的数据库的名称 |
| 3 | create_time | TIMESTAMP | 创建时间 |
| 4 | columns | INT | 列数目 |
| 5 | tags | INT | 标签数目。需要注意,`tags` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 6 | last_update | TIMESTAMP | 最后更新时间 |
| 7 | table_comment | VARCHAR(1024) | 表注释 |
| 8 | watermark | VARCHAR(64) | 窗口的关闭时间。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 9 | max_delay | VARCHAR(64) | 推送计算结果的最大延迟。需要注意,`max_delay` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 10 | rollup | VARCHAR(128) | rollup 聚合函数。需要注意,`rollup` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 11 | virtual | BOOL | 超级表是否是虚拟超级表。 |
## INS_TABLES
@ -194,17 +195,18 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
## INS_COLUMNS
| # | **列名** | **数据类型** | **说明** |
| --- | :-----------: | ------------ | ---------------------- |
| 1 | table_name | VARCHAR(192) | 表名 |
| 2 | db_name | VARCHAR(64) | 该表所在的数据库的名称 |
| 3 | table_type | VARCHAR(21) | 表类型 |
| 4 | col_name | VARCHAR(64) | 列 的名称 |
| 5 | col_type | VARCHAR(32) | 列 的类型 |
| 6 | col_length | INT | 列 的长度 |
| 7 | col_precision | INT | 列 的精度 |
| 8 | col_scale | INT | 列 的比例 |
| 9 | col_nullable | INT | 列 是否可以为空 |
| # | **列名** | **数据类型** | **说明** |
|----|:-------------:|--------------|---------------------------------------------------------------|
| 1 | table_name | VARCHAR(192) | 表名 |
| 2 | db_name | VARCHAR(64) | 该表所在的数据库的名称 |
| 3 | table_type | VARCHAR(21) | 表类型 |
| 4 | col_name | VARCHAR(64) | 列 的名称 |
| 5 | col_type | VARCHAR(32) | 列 的类型 |
| 6 | col_length | INT | 列 的长度 |
| 7 | col_precision | INT | 列 的精度 |
| 8 | col_scale | INT | 列 的比例 |
| 9 | col_nullable | INT | 列 是否可以为空 |
| 10 | col_source | VARCHAR(322) | 列 的数据来源。只有虚拟表的列才会有该值,表示虚拟表的数据来源,为 db_name.table_name.col_name |
## INS_USERS

View File

@ -0,0 +1,311 @@
---
sidebar_label: 虚拟表
title: 虚拟表
description: 对虚拟表的各种管理操作
---
## 创建虚拟表
`CREATE VTABLE` 语句用于创建普通虚拟表和以虚拟超级表为模板创建虚拟子表。
### 创建虚拟超级表
见 [创建超级表](./04-stable.md#创建超级表) 中的 `VIRTUAL` 参数。
### 创建虚拟普通表
```sql
CREATE VTABLE [IF NOT EXISTS] [db_name].vtb_name
ts_col_name timestamp,
(create_defination[ ,create_defination] ...)
create_definition:
vtb_col_name column_definition
column_definition:
type_name [FROM [db_name.]table_name.col_name]
```
### 创建虚拟子表
```sql
CREATE VTABLE [IF NOT EXISTS] [db_name].vtb_name
(create_defination[ ,create_defination] ...)
USING [db_name.]stb_name
[(tag_name [, tag_name] ...)]
TAGS (tag_value [, tag_value] ...)
create_definition:
[stb_col_name FROM] [db_name.]table_name.col_name
tag_value:
const_value
```
**使用说明**
1. 虚拟表(列)名命名规则参见 [名称命名规则](./19-limit.md#名称命名规则)。
2. 表名最大长度为 192。
3. 表的第一个字段必须是 TIMESTAMP并且系统自动将其设为主键。
4. 表的每行长度不能超过 64KB注意每个 VARCHAR/NCHAR/GEOMETRY 类型的列还会额外占用 2 个字节的存储位置)。
5. 使用数据类型 VARCHAR/NCHAR/GEOMETRY需指定其最长的字节数如 VARCHAR(20),表示 20 字节。
6. 创建虚拟表时使用 FROM 来指定列的数据源,支持使用 db_name 跨库指定数据源,不指定 db_name 时默认使用当前 use 的数据库,若不指定 db_name 且未 use 数据库,则会报错。
7. 创建虚拟表时不显式的指定 ts 列的数据源ts 列的取值是查询虚拟表时查询语句中包含的所有列对应的原始表的主键时间戳合并的结果。
8. 虚拟超级表下只支持创建虚拟子表,虚拟子表也只能以虚拟超级表为模版来创建。
9. 创建虚拟表时需要保证虚拟表中的列、标签和指定的数据来源列、标签的数据类型相同,否则会报错。
10. 在同一个数据库内,虚拟表名称不允许重名,虚拟表名和表名也不允许重名。虚拟表名和视图名允许重名(不推荐)当出现视图与虚拟表名重名时,写入、查询、授权、回收权限等操作优先使用同名表。
11. 创建虚拟子表和虚拟普通表时,使用 FROM 指定某一列的数据来源时,该列只能来源于普通子表或普通表,不支持来源于超级表、视图或其他虚拟表。也不支持来源于有复合主键的表。
## 查询虚拟表
虚拟表与正常表无论是查询语法还是范围都没有区别,不同之处在于虚拟表所呈现的数据集在不同的查询中可能是不相同的,具体可以参考虚拟表数据生成规则。
### 虚拟表数据生成规则
1. 虚拟表以时间戳为基准,对多个原始表的数据进行对齐。
2. 如果多个原始表在相同时间戳下有数据,则这些列的值组合成同一行;否则,对于缺失的列,填充 NULL。
3. 虚拟表的时间戳的值是查询中包含的所有列所在的原始表的时间戳的并集,因此当不同查询选择列不同时可能出现结果集行数不一样的情况。
4. 用户可以从多个表中选择任意列进行组合,未选择的列不会出现在虚拟表中。
**示例**
假设有表 t1, t2, t3 结构和数据如下:
![virtual-table-origin-table.png](pic/virtual-table-origin-table.png)
并且有虚拟普通表 v1 ,创建方式如下:
```sql
create vtable v1 (
ts timestamp,
c1 int from t1.value,
c2 int from t2.value,
c3 int from t3.value1,
c4 int from t3.value2);
```
那么根据虚拟表对于多表数据的整合规则,执行如下查询时:
```sql
select * from v1;
```
结果如下:
![virtual-table-query-res.png](pic/virtual-table-query-res.png)
如果没有选择全部列,只是选择了部分列,查询的结果只会包含选择的列的原始表的时间戳,例如执行如下查询:
```sql
select c1, c2 from v1;
```
得到的结果如下图所示:
![virtual-table-query-res-part.png](pic/virtual-table-query-res-part.png)
因为 c1, c2 列对应的原始表 t1, t2 中没有 0:00:03 这个时间戳,所以最后的结果也不会包含这个时间戳。
**使用限制**
1. 查询虚拟超级表时,暂不支持虚拟子表的数据源来自不同的数据库。
## 修改虚拟普通表
```sql
ALTER VTABLE [db_name.]vtb_name alter_table_clause
alter_table_clause: {
ADD COLUMN vtb_col_name vtb_column_type [FROM table_name.col_name]
| DROP COLUMN vtb_col_name
| ALTER COLUMN vtb_col_name SET {table_name.col_name | NULL }
| MODIFY COLUMN col_name column_type
| RENAME COLUMN old_col_name new_col_name
}
```
**使用说明**
对虚拟普通表可以进行如下修改操作
1. ADD COLUMN添加列。
2. DROP COLUMN删除列。
3. MODIFY COLUMN修改列定义如果数据列的类型是可变长类型那么可以使用此指令修改其宽度只能改大不能改小。如果虚拟表该列已指定数据源那么修改列宽会因为修改后的列宽和数据源的列宽不匹配而报错可以先将数据源置为空后再修改列宽。
4. RENAME COLUMN修改列名称。
5. ALTER COLUMN .. SET修改列的数据源。 SET NULL 表示将虚拟表某列的数据源置为空。
### 增加列
```sql
ALTER VTABLE vtb_name ADD COLUMN vtb_col_name vtb_col_type [FROM [db_name].table_name.col_name]
```
### 删除列
```sql
ALTER VTABLE vtb_name DROP COLUMN vtb_col_name
```
### 修改列宽
```sql
ALTER VTABLE vtb_name MODIFY COLUMN vtb_col_name data_type(length);
```
### 修改列名
```sql
ALTER VTABLE vtb_name RENAME COLUMN old_col_name new_col_name
```
### 修改列的数据源
```sql
ALTER VTABLE vtb_name ALTER COLUMN vtb_col_name SET {[db_name.]table_name.col_name | NULL}
```
## 修改虚拟子表
```sql
ALTER VTABLE [db_name.]vtb_name alter_table_clause
alter_table_clause: {
ALTER COLUMN vtb_col_name SET table_name.col_name
| SET TAG tag_name = new_tag_value
}
```
**使用说明**
1. 对虚拟子表的列和标签的修改,除了更改标签值以外,都要通过虚拟超级表才能进行。
### 修改虚拟子表标签值
```sql
ALTER VTABLE tb_name SET TAG tag_name1=new_tag_value1, tag_name2=new_tag_value2 ...;
```
### 修改列的数据源
```sql
ALTER VTABLE vtb_name ALTER COLUMN vtb_col_name SET {[db_name.]table_name.col_name | NULL}
```
## 删除虚拟表
```sql
DROP VTABLE [IF EXISTS] [dbname].vtb_name;
```
## 查看虚拟表的信息
### 显示某个数据库下所有虚拟表
如下 SQL 语句可以列出当前数据库中的所有虚拟表名。
```sql
SHOW [NORMAL | CHILD] [db_name.]VTABLES [LIKE 'pattern'];
```
**使用说明**
1. 如果没有指定 db_name, 显示当前数据库下的所有虚拟普通表和虚拟子表的信息。若没有使用数据库并且没有指定 db_name, 则会报错 database not specified。可以使用 LIKE 对表名进行模糊匹配。NORMAL 指定只显示虚拟普通表信息, CHILD 指定只显示虚拟子表信息。
### 显示虚拟表创建语句
```sql
SHOW CREATE VTABLE [db_name.]vtable_name;
```
显示 vtable_name 指定的虚拟表的创建语句。支持虚拟普通表和虚拟子表。常用于数据库迁移。对一个已经存在的虚拟表,返回其创建语句;在另一个集群中执行该语句,就能得到一个结构完全相同的虚拟表。
### 获取虚拟表结构信息
```sql
DESCRIBE [db_name.]vtb_name;
```
### 查看所有虚拟表信息
```sql
SELECT ... FROM information_schema.ins_tables where type = 'VIRTUAL_NORMAL_TABLE' or type = 'VIRTUAL_CHILD_TABLE';
```
## 写入虚拟表
不支持向虚拟表中写入数据,以及不支持删除虚拟表中的数据。虚拟表只是对原始表进行运算后的计算结果,是一张逻辑表,因此只能对其进行查询,不可以写入或删除数据。
## 虚拟表与视图
虚拟表与视图看起来相似,但是有很多不同点:
| 属性 |虚拟表 (Virtual Table) |视图 (View)|
|----------------|------------------------|-----------|
| 定义 |虚拟表是一种动态数据结构,根据多表的列和时间戳组合规则生成逻辑表。 |视图是一种基于 SQL 查询的虚拟化表结构,用于保存查询逻辑的定义。|
| 数据来源 |来自多个原始表,可以动态选择列,并通过时间戳对齐数据。 |来自单个或多个表的查询结果,通常是一个复杂的 SQL 查询。|
| 数据存储 |不实际存储数据,所有数据在查询时动态生成。 |不实际存储数据,仅保存 SQL 查询逻辑。|
| 时间戳处理 |通过时间戳对齐将不同表的列整合到统一的时间轴上。| 不支持时间戳对齐,数据由查询逻辑直接决定。|
| 更新机制 |动态更新,原始表数据变更时,虚拟表数据实时反映变化。| 动态更新,但依赖于视图定义的查询逻辑,不涉及对齐或数据整合。|
| 功能特性 |支持空值填充和插值(如 prev、next、linear。 |不支持内置填充和插值功能,需通过查询逻辑自行实现。|
| 应用场景 |时间序列对齐、跨表数据整合、多源数据对比分析等场景。| 简化复杂查询逻辑、限制用户访问、封装业务逻辑等场景。|
| 性能 |由于多表对齐和空值处理,查询复杂度可能较高,尤其在数据量大时。| 性能通常取决于视图的查询语句复杂度,与单表查询性能相似。|
不支持虚拟表和视图之间的相互转化,如根据虚拟表建立视图或者根据视图建立虚拟表。
## 虚拟表的权限
### 权限说明
虚拟表的权限分为 READ、WRITE 两种,查询操作需要具备 READ 权限,对虚拟表本身的删除和修改操作需要具备 WRITE 权限。
### 语法
#### 授权
```sql
GRANT privileges ON [db_name.]vtable_name TO user_name
privileges: {
ALL,
| priv_type [, priv_type] ...
}
priv_type: {
READ
| WRITE
}
```
#### 回收权限
```sql
REVOKE privileges ON [db_name.]vtable_name FROM user_name
privileges: {
ALL,
| priv_type [, priv_type] ...
}
priv_type: {
READ
| WRITE
}
```
### 权限规则
1. 虚拟表的创建者和 root 用户默认具备所有权限。
2. 用户可以通过 dbname.vtbname 来为指定的虚拟表表(包括虚拟超级表和虚拟普通表)授予或回收其读写权限,不支持直接对虚拟子表授予或回收权限。
3. 虚拟子表和虚拟超级表不支持基于标签的授权(表级授权),虚拟子表继承虚拟超级表的权限。
4. 对其他用户进行授权与回收权限可以通过 GRANT 和 REVOKE 语句进行,该操作只能由 root 用户进行。
5. 具体相关权限控制细则总结如下:
| 序号 | 操作 | 权限要求 |
|------|------|----------------------------------------------------------|
| 1 | CREATE VTABLE | 用户对虚拟表所属数据库有 WRITE 权限 且<br /> 用户对虚拟表的数据源对应的原始表有 READ 权限。 |
| 2 | DROP/ALTER VTABLE | 用户对虚拟表有 WRITE 权限,若要指定某一列的数据源,需要同时对数据源对应的原始表有 READ 权限。 |
| 3 |SHOW VTABLES | 无 |
| 4 | SHOW CREATE VTABLE | 无 |
| 5 | DESCRIBE VTABLE | 无 |
| 6 | 系统表查询 | 无 |
| 7 | SELECT FROM VTABLE | 操作用户对虚拟表有 READ 权限 |
| 8 | GRANT/REVOKE | 只有 root 用户有权限 |
## 使用场景
| SQL 查询 | SQL 写入 | STMT 查询 | STMT 写入 | 订阅 | 流计算 |
|---------|--------|---------|------|--------|---|
| 支持 | 不支持 | 不支持 | 不支持 | 不支持 | 支持 |

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.9 KiB

View File

@ -49,10 +49,16 @@ extern "C" {
} \
} while (0)
#define HAS_BIND_VALUE ((uint8_t)0x1)
#define IS_FIXED_VALUE ((uint8_t)0x2)
#define USING_CLAUSE ((uint8_t)0x4)
#define IS_FIXED_TAG ((uint8_t)0x8)
#define NO_DATA_USING_CLAUSE ((uint8_t)0x7)
typedef struct SStmtCallback {
TAOS_STMT* pStmt;
int32_t (*getTbNameFn)(TAOS_STMT*, char**);
int32_t (*setInfoFn)(TAOS_STMT*, STableMeta*, void*, SName*, bool, SHashObj*, SHashObj*, const char*, bool);
int32_t (*setInfoFn)(TAOS_STMT*, STableMeta*, void*, SName*, bool, SHashObj*, SHashObj*, const char*, uint8_t);
int32_t (*getExecInfoFn)(TAOS_STMT*, SHashObj**, SHashObj**);
} SStmtCallback;
@ -175,7 +181,7 @@ int32_t qBindStmtColsValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bind, c
int32_t qBindStmtSingleColValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen,
int32_t colIdx, int32_t rowNum, void* charsetCxt);
int32_t qBuildStmtColFields(void* pDataBlock, int32_t* fieldNum, TAOS_FIELD_E** fields);
int32_t qBuildStmtStbColFields(void* pBlock, void* boundTags, bool hasCtbName, int32_t* fieldNum,
int32_t qBuildStmtStbColFields(void* pBlock, void* boundTags, uint8_t tbNameFlag, int32_t* fieldNum,
TAOS_FIELD_ALL** fields);
int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TAOS_FIELD_E** fields);
int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const char* sTableName, char* tName,

View File

@ -64,7 +64,7 @@ typedef struct SStmtBindInfo {
int32_t sBindLastIdx;
int8_t tbType;
bool tagsCached;
bool preCtbname;
uint8_t tbNameFlag;
void *boundTags;
char tbName[TSDB_TABLE_FNAME_LEN];
char tbFName[TSDB_TABLE_FNAME_LEN];

View File

@ -75,7 +75,7 @@ static int32_t smlProcessTagTelnet(SSmlHandle *info, char *data, char *sqlEnd){
const char *sql = data;
while (sql < sqlEnd) {
JUMP_SPACE(sql, sqlEnd)
if (unlikely(*sql == '\0')) break;
if (unlikely(*sql == '\0' || *sql == '\n')) break;
const char *key = sql;
size_t keyLen = 0;

View File

@ -238,7 +238,7 @@ int32_t stmtRestoreQueryFields(STscStmt* pStmt) {
}
*/
int32_t stmtUpdateBindInfo(TAOS_STMT* stmt, STableMeta* pTableMeta, void* tags, SName* tbName, const char* sTableName,
bool autoCreateTbl) {
bool autoCreateTbl, uint8_t tbNameFlag) {
STscStmt* pStmt = (STscStmt*)stmt;
char tbFName[TSDB_TABLE_FNAME_LEN];
int32_t code = tNameExtractFullName(tbName, tbFName);
@ -256,6 +256,7 @@ int32_t stmtUpdateBindInfo(TAOS_STMT* stmt, STableMeta* pTableMeta, void* tags,
pStmt->bInfo.tbType = pTableMeta->tableType;
pStmt->bInfo.boundTags = tags;
pStmt->bInfo.tagsCached = false;
pStmt->bInfo.tbNameFlag = tbNameFlag;
tstrncpy(pStmt->bInfo.stbFName, sTableName, sizeof(pStmt->bInfo.stbFName));
return TSDB_CODE_SUCCESS;
@ -271,10 +272,10 @@ int32_t stmtUpdateExecInfo(TAOS_STMT* stmt, SHashObj* pVgHash, SHashObj* pBlockH
}
int32_t stmtUpdateInfo(TAOS_STMT* stmt, STableMeta* pTableMeta, void* tags, SName* tbName, bool autoCreateTbl,
SHashObj* pVgHash, SHashObj* pBlockHash, const char* sTableName, bool preCtbname) {
SHashObj* pVgHash, SHashObj* pBlockHash, const char* sTableName, uint8_t tbNameFlag) {
STscStmt* pStmt = (STscStmt*)stmt;
STMT_ERR_RET(stmtUpdateBindInfo(stmt, pTableMeta, tags, tbName, sTableName, autoCreateTbl));
STMT_ERR_RET(stmtUpdateBindInfo(stmt, pTableMeta, tags, tbName, sTableName, autoCreateTbl, tbNameFlag));
STMT_ERR_RET(stmtUpdateExecInfo(stmt, pVgHash, pBlockHash));
pStmt->sql.autoCreateTbl = autoCreateTbl;
@ -1729,7 +1730,10 @@ int stmtGetTagFields(TAOS_STMT* stmt, int* nums, TAOS_FIELD_E** fields) {
STMT_ERRI_JRET(stmtFetchTagFields(stmt, nums, fields));
_return:
// compatible with previous versions
if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST && (pStmt->bInfo.tbNameFlag & NO_DATA_USING_CLAUSE) == 0x0) {
code = TSDB_CODE_TSC_STMT_TBNAME_ERROR;
}
pStmt->errCode = preCode;
return code;

View File

@ -193,7 +193,7 @@ static int32_t stmtGetTbName(TAOS_STMT2* stmt, char** tbName) {
}
static int32_t stmtUpdateBindInfo(TAOS_STMT2* stmt, STableMeta* pTableMeta, void* tags, SName* tbName,
const char* sTableName, bool autoCreateTbl, bool preCtbname) {
const char* sTableName, bool autoCreateTbl, int8_t tbNameFlag) {
STscStmt2* pStmt = (STscStmt2*)stmt;
char tbFName[TSDB_TABLE_FNAME_LEN];
int32_t code = tNameExtractFullName(tbName, tbFName);
@ -217,7 +217,7 @@ static int32_t stmtUpdateBindInfo(TAOS_STMT2* stmt, STableMeta* pTableMeta, void
pStmt->bInfo.boundTags = tags;
pStmt->bInfo.tagsCached = false;
pStmt->bInfo.preCtbname = preCtbname;
pStmt->bInfo.tbNameFlag = tbNameFlag;
tstrncpy(pStmt->bInfo.stbFName, sTableName, sizeof(pStmt->bInfo.stbFName));
return TSDB_CODE_SUCCESS;
@ -233,10 +233,10 @@ static int32_t stmtUpdateExecInfo(TAOS_STMT2* stmt, SHashObj* pVgHash, SHashObj*
}
static int32_t stmtUpdateInfo(TAOS_STMT2* stmt, STableMeta* pTableMeta, void* tags, SName* tbName, bool autoCreateTbl,
SHashObj* pVgHash, SHashObj* pBlockHash, const char* sTableName, bool preCtbname) {
SHashObj* pVgHash, SHashObj* pBlockHash, const char* sTableName, uint8_t tbNameFlag) {
STscStmt2* pStmt = (STscStmt2*)stmt;
STMT_ERR_RET(stmtUpdateBindInfo(stmt, pTableMeta, tags, tbName, sTableName, autoCreateTbl, preCtbname));
STMT_ERR_RET(stmtUpdateBindInfo(stmt, pTableMeta, tags, tbName, sTableName, autoCreateTbl, tbNameFlag));
STMT_ERR_RET(stmtUpdateExecInfo(stmt, pVgHash, pBlockHash));
pStmt->sql.autoCreateTbl = autoCreateTbl;
@ -1233,7 +1233,8 @@ static int stmtFetchStbColFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIEL
}
STMT_ERRI_JRET(
qBuildStmtStbColFields(*pDataBlock, pStmt->bInfo.boundTags, pStmt->bInfo.preCtbname, fieldNum, fields));
qBuildStmtStbColFields(*pDataBlock, pStmt->bInfo.boundTags, pStmt->bInfo.tbNameFlag, fieldNum, fields));
if (pStmt->bInfo.tbType == TSDB_SUPER_TABLE && cleanStb) {
pStmt->bInfo.needParse = true;
qDestroyStmtDataBlock(*pDataBlock);
@ -2022,7 +2023,9 @@ int stmtParseColFields2(TAOS_STMT2* stmt) {
STMT_TYPE_MULTI_INSERT != pStmt->sql.type) {
pStmt->bInfo.needParse = false;
}
if (pStmt->sql.stbInterlaceMode && pStmt->sql.siInfo.pDataCtx != NULL) {
pStmt->bInfo.needParse = false;
}
if (pStmt->exec.pRequest && STMT_TYPE_QUERY == pStmt->sql.type && pStmt->sql.runTimes) {
taos_free_result(pStmt->exec.pRequest);
pStmt->exec.pRequest = NULL;
@ -2036,6 +2039,10 @@ int stmtParseColFields2(TAOS_STMT2* stmt) {
}
_return:
// compatible with previous versions
if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST && (pStmt->bInfo.tbNameFlag & NO_DATA_USING_CLAUSE) == 0x0) {
code = TSDB_CODE_TSC_STMT_TBNAME_ERROR;
}
pStmt->errCode = preCode;

View File

@ -129,4 +129,4 @@ add_test(
add_test(
NAME userOperTest
COMMAND userOperTest
)
)

View File

@ -534,7 +534,7 @@ TEST(stmt2Case, insert_ctb_using_get_fields_Test) {
printf("support case \n");
// case 1 : test child table already exist
{
const char* sql = "INSERT INTO stmt2_testdb_3.t0(ts,b)using stmt2_testdb_3.stb (t1,t2) TAGS(?,?) VALUES (?,?)";
const char* sql = "INSERT INTO stmt2_testdb_3.t0(ts,b)using stmt2_testdb_3.stb (t1,t2) TAGS(?,?) VALUES(?,?)";
TAOS_FIELD_ALL expectedFields[4] = {{"t1", TSDB_DATA_TYPE_INT, 0, 0, 4, TAOS_FIELD_TAG},
{"t2", TSDB_DATA_TYPE_BINARY, 0, 0, 12, TAOS_FIELD_TAG},
{"ts", TSDB_DATA_TYPE_TIMESTAMP, 2, 0, 8, TAOS_FIELD_COL},
@ -612,7 +612,7 @@ TEST(stmt2Case, insert_ctb_using_get_fields_Test) {
// case 8 : 'db' 'stb'
{
const char* sql = "INSERT INTO 'stmt2_testdb_3'.? using 'stmt2_testdb_3'.'stb' (t1,t2) TAGS(?,?) (ts,b)VALUES(?,?)";
const char* sql = "INSERT INTO 'stmt2_testdb_3'.? using 'stmt2_testdb_3'.'stb' (t1,t2) TAGS(?,?)(ts,b)VALUES(?,?)";
TAOS_FIELD_ALL expectedFields[5] = {{"tbname", TSDB_DATA_TYPE_BINARY, 0, 0, 271, TAOS_FIELD_TBNAME},
{"t1", TSDB_DATA_TYPE_INT, 0, 0, 4, TAOS_FIELD_TAG},
{"t2", TSDB_DATA_TYPE_BINARY, 0, 0, 12, TAOS_FIELD_TAG},
@ -634,9 +634,20 @@ TEST(stmt2Case, insert_ctb_using_get_fields_Test) {
printf("case 9 : %s\n", sql);
getFieldsSuccess(taos, sql, expectedFields, 5);
}
// case 11: TD-34097
{
do_query(taos, "use stmt2_testdb_3");
const char* sql = "INSERT INTO ? using stb (t1,t2) TAGS(1,'abc') (ts,b)VALUES(?,?)";
TAOS_FIELD_ALL expectedFields[3] = {{"tbname", TSDB_DATA_TYPE_BINARY, 0, 0, 271, TAOS_FIELD_TBNAME},
{"ts", TSDB_DATA_TYPE_TIMESTAMP, 2, 0, 8, TAOS_FIELD_COL},
{"b", TSDB_DATA_TYPE_BINARY, 0, 0, 12, TAOS_FIELD_COL}};
printf("case 11 : %s\n", sql);
getFieldsSuccess(taos, sql, expectedFields, 3);
}
// case 10 : test all types
{
do_query(taos, "use stmt2_testdb_3");
const char* sql =
"insert into ? using all_stb tags(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)";
TAOS_FIELD_ALL expectedFields[33] = {{"tbname", TSDB_DATA_TYPE_BINARY, 0, 0, 271, TAOS_FIELD_TBNAME},
@ -711,7 +722,27 @@ TEST(stmt2Case, insert_ctb_using_get_fields_Test) {
printf("case 5 : %s\n", sql);
getFieldsError(taos, sql, TSDB_CODE_TSC_SQL_SYNTAX_ERROR);
}
// case 6 : mix value and ?
{
do_query(taos, "use stmt2_testdb_3");
const char* sql = "INSERT INTO ? using stb (t1,t2) TAGS(1,?) (ts,b)VALUES(?,?)";
printf("case 6 : %s\n", sql);
getFieldsError(taos, sql, TSDB_CODE_TSC_SQL_SYNTAX_ERROR);
}
// case 7 : mix value and ?
{
do_query(taos, "use stmt2_testdb_3");
const char* sql = "INSERT INTO ? using stb (t1,t2) TAGS(?,?) (ts,b)VALUES(15910606280001,?)";
printf("case 7 : %s\n", sql);
getFieldsError(taos, sql, TSDB_CODE_TSC_INVALID_OPERATION);
}
// case 8 : mix value and ?
{
do_query(taos, "use stmt2_testdb_3");
const char* sql = "INSERT INTO ? using stb (t1,t2) TAGS(?,?) (ts,b)VALUES(15910606280001,'abc')";
printf("case 8 : %s\n", sql);
getFieldsError(taos, sql, TSDB_CODE_TSC_INVALID_OPERATION);
}
do_query(taos, "drop database if exists stmt2_testdb_3");
taos_close(taos);
}
@ -1002,6 +1033,15 @@ TEST(stmt2Case, stmt2_insert_non_statndard) {
printf("stmt2 [%s] : %s\n", "less params", sql);
int code = taos_stmt2_prepare(stmt, sql, 0);
checkError(stmt, code);
// test get fields
int fieldNum = 0;
TAOS_FIELD_ALL* pFields = NULL;
code = taos_stmt2_get_fields(stmt, &fieldNum, &pFields);
checkError(stmt, code);
ASSERT_EQ(fieldNum, 2);
ASSERT_STREQ(pFields[0].name, "tbname");
ASSERT_STREQ(pFields[1].name, "ts");
int total_affect_rows = 0;
int t64_len[2] = {sizeof(int64_t), sizeof(int64_t)};
@ -1024,11 +1064,22 @@ TEST(stmt2Case, stmt2_insert_non_statndard) {
code = taos_stmt2_bind_param(stmt, &bindv, -1);
checkError(stmt, code);
code = taos_stmt2_get_fields(stmt, &fieldNum, &pFields);
checkError(stmt, code);
ASSERT_EQ(fieldNum, 2);
ASSERT_STREQ(pFields[0].name, "tbname");
ASSERT_STREQ(pFields[1].name, "ts");
int affected_rows;
taos_stmt2_exec(stmt, &affected_rows);
total_affect_rows += affected_rows;
checkError(stmt, code);
code = taos_stmt2_get_fields(stmt, &fieldNum, &pFields);
checkError(stmt, code);
ASSERT_EQ(fieldNum, 2);
ASSERT_STREQ(pFields[0].name, "tbname");
ASSERT_STREQ(pFields[1].name, "ts");
}
ASSERT_EQ(total_affect_rows, 12);
@ -1959,27 +2010,27 @@ void stmt2_async_test(std::atomic<bool>& stop_task) {
stop_task = true;
}
TEST(stmt2Case, async_order) {
std::atomic<bool> stop_task(false);
std::thread t(stmt2_async_test, std::ref(stop_task));
// TEST(stmt2Case, async_order) {
// std::atomic<bool> stop_task(false);
// std::thread t(stmt2_async_test, std::ref(stop_task));
// 等待 60 秒钟
auto start_time = std::chrono::steady_clock::now();
while (!stop_task) {
auto elapsed_time = std::chrono::steady_clock::now() - start_time;
if (std::chrono::duration_cast<std::chrono::seconds>(elapsed_time).count() > 100) {
if (t.joinable()) {
t.detach();
}
FAIL() << "Test[stmt2_async_test] timed out";
break;
}
std::this_thread::sleep_for(std::chrono::seconds(1)); // 每 1s 检查一次
}
if (t.joinable()) {
t.join();
}
}
// // 等待 60 秒钟
// auto start_time = std::chrono::steady_clock::now();
// while (!stop_task) {
// auto elapsed_time = std::chrono::steady_clock::now() - start_time;
// if (std::chrono::duration_cast<std::chrono::seconds>(elapsed_time).count() > 100) {
// if (t.joinable()) {
// t.detach();
// }
// FAIL() << "Test[stmt2_async_test] timed out";
// break;
// }
// std::this_thread::sleep_for(std::chrono::seconds(1)); // 每 1s 检查一次
// }
// if (t.joinable()) {
// t.join();
// }
// }
TEST(stmt2Case, rowformat_bind) {
TAOS* taos = taos_connect("localhost", "root", "taosdata", "", 0);

View File

@ -39,7 +39,7 @@ EDriverType tsDriverType = DRIVER_NATIVE;
void *tsDriver = NULL;
static int32_t tossGetDevelopPath(char *driverPath, const char *driverName) {
static int32_t taosGetDevelopPath(char *driverPath, const char *driverName) {
char appPath[PATH_MAX] = {0};
int32_t ret = taosAppPath(appPath, PATH_MAX);
if (ret == 0) {
@ -67,7 +67,7 @@ int32_t taosDriverInit(EDriverType driverType) {
driverName = DRIVER_WSBSOCKET_NAME;
}
if (tsDriver == NULL && tossGetDevelopPath(driverPath, driverName) == 0) {
if (tsDriver == NULL && taosGetDevelopPath(driverPath, driverName) == 0) {
tsDriver = taosLoadDll(driverPath);
}

View File

@ -19,6 +19,9 @@
static TdThreadOnce tsDriverOnce = PTHREAD_ONCE_INIT;
volatile int32_t tsDriverOnceRet = 0;
static TdThreadOnce tsInitOnce = PTHREAD_ONCE_INIT;
volatile int32_t tsInitOnceRet = 0;
#define ERR_VOID(code) \
terrno = code; \
return;
@ -89,21 +92,25 @@ setConfRet taos_set_config(const char *config) {
return (*fp_taos_set_config)(config);
}
static void taos_init_wrapper(void) {
static void taos_init_driver(void) {
tsDriverOnceRet = taosDriverInit(tsDriverType);
if (tsDriverOnceRet != 0) return;
tsDriverOnceRet = 0;
}
static void taos_init_wrapper(void) {
if (fp_taos_init == NULL) {
terrno = TSDB_CODE_DLL_FUNC_NOT_LOAD;
tsDriverOnceRet = -1;
tsInitOnceRet = -1;
} else {
tsDriverOnceRet = (*fp_taos_init)();
tsInitOnceRet = (*fp_taos_init)();
}
}
int taos_init(void) {
(void)taosThreadOnce(&tsDriverOnce, taos_init_wrapper);
return tsDriverOnceRet;
(void)taosThreadOnce(&tsDriverOnce, taos_init_driver);
(void)taosThreadOnce(&tsInitOnce, taos_init_wrapper);
return tsInitOnceRet;
}
void taos_cleanup(void) {
@ -126,11 +133,7 @@ int taos_options(TSDB_OPTION option, const void *arg, ...) {
terrno = TSDB_CODE_REPEAT_INIT;
return -1;
}
if (taos_init() != 0) {
terrno = TSDB_CODE_DLL_NOT_LOAD;
return -1;
}
(void)taosThreadOnce(&tsDriverOnce, taos_init_driver);
CHECK_INT(fp_taos_options);
return (*fp_taos_options)(option, arg);
@ -143,7 +146,7 @@ int taos_options_connection(TAOS *taos, TSDB_OPTION_CONNECTION option, const voi
TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port) {
if (taos_init() != 0) {
terrno = TSDB_CODE_DLL_NOT_LOAD;
//terrno = TSDB_CODE_DLL_NOT_LOAD;
return NULL;
}
@ -646,6 +649,7 @@ TAOS_RES *taos_schemaless_insert_ttl_with_reqid_tbname_key(TAOS *taos, char *lin
}
tmq_conf_t *tmq_conf_new() {
taos_init();
CHECK_PTR(fp_tmq_conf_new);
return (*fp_tmq_conf_new)();
}
@ -666,6 +670,7 @@ void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *para
}
tmq_list_t *tmq_list_new() {
taos_init();
CHECK_PTR(fp_tmq_list_new);
return (*fp_tmq_list_new)();
}
@ -691,6 +696,7 @@ char **tmq_list_to_c_array(const tmq_list_t *tlist) {
}
tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen) {
taos_init();
CHECK_PTR(fp_tmq_consumer_new);
return (*fp_tmq_consumer_new)(conf, errstr, errstrLen);
}
@ -860,11 +866,13 @@ TSDB_SERVER_STATUS taos_check_server_status(const char *fqdn, int port, char *de
}
void taos_write_crashinfo(int signum, void *sigInfo, void *context) {
taos_init();
CHECK_VOID(fp_taos_write_crashinfo);
(*fp_taos_write_crashinfo)(signum, sigInfo, context);
}
char *getBuildInfo() {
taos_init();
CHECK_PTR(fp_getBuildInfo);
return (*fp_getBuildInfo)();
}

View File

@ -32,7 +32,7 @@ typedef struct SInsertParseContext {
bool needTableTagVal;
bool needRequest; // whether or not request server
bool isStmtBind; // whether is stmt bind
bool preCtbname;
uint8_t stmtTbNameFlag;
} SInsertParseContext;
typedef int32_t (*_row_append_fn_t)(SMsgBuf* pMsgBuf, const void* value, int32_t len, void* param);
@ -993,6 +993,10 @@ static int32_t parseTagsClauseImpl(SInsertParseContext* pCxt, SVnodeModifyOpStmt
code = buildSyntaxErrMsg(&pCxt->msg, "? only used in stmt", token.z);
break;
}
if (pTagVals->size != 0) {
code = buildSyntaxErrMsg(&pCxt->msg, "no mix usage for ? and tag values", token.z);
break;
}
continue;
}
@ -1026,6 +1030,10 @@ static int32_t parseTagsClauseImpl(SInsertParseContext* pCxt, SVnodeModifyOpStmt
pTag = NULL;
}
if (code == TSDB_CODE_SUCCESS && !isParseBindParam) {
pCxt->stmtTbNameFlag |= IS_FIXED_TAG;
}
_exit:
for (int32_t i = 0; i < taosArrayGetSize(pTagVals); ++i) {
STagVal* p = (STagVal*)TARRAY_GET_ELEM(pTagVals, i);
@ -1416,6 +1424,7 @@ static int32_t parseUsingTableName(SInsertParseContext* pCxt, SVnodeModifyOpStmt
return getTargetTableSchema(pCxt, pStmt);
}
pStmt->usingTableProcessing = true;
pCxt->stmtTbNameFlag |= USING_CLAUSE;
// pStmt->pSql -> stb_name [(tag1_name, ...)
pStmt->pSql += index;
int32_t code = parseDuplicateUsingClause(pCxt, pStmt, &pCxt->usingDuplicateTable);
@ -1465,7 +1474,7 @@ static int32_t getTableDataCxt(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pS
char tbFName[TSDB_TABLE_FNAME_LEN];
int32_t code = 0;
if (pCxt->preCtbname) {
if ((pCxt->stmtTbNameFlag & NO_DATA_USING_CLAUSE) == USING_CLAUSE) {
tstrncpy(pStmt->targetTableName.tname, pStmt->usingTableName.tname, sizeof(pStmt->targetTableName.tname));
tstrncpy(pStmt->targetTableName.dbname, pStmt->usingTableName.dbname, sizeof(pStmt->targetTableName.dbname));
pStmt->targetTableName.type = TSDB_SUPER_TABLE;
@ -2764,6 +2773,7 @@ static int32_t checkTableClauseFirstToken(SInsertParseContext* pCxt, SVnodeModif
}
if (TK_NK_QUESTION == pTbName->type) {
pCxt->stmtTbNameFlag &= ~IS_FIXED_VALUE;
pCxt->isStmtBind = true;
if (NULL == pCxt->pComCxt->pStmtCb) {
return buildSyntaxErrMsg(&pCxt->msg, "? only used in stmt", pTbName->z);
@ -2772,14 +2782,15 @@ static int32_t checkTableClauseFirstToken(SInsertParseContext* pCxt, SVnodeModif
char* tbName = NULL;
int32_t code = (*pCxt->pComCxt->pStmtCb->getTbNameFn)(pCxt->pComCxt->pStmtCb->pStmt, &tbName);
if (TSDB_CODE_SUCCESS == code) {
pCxt->stmtTbNameFlag |= HAS_BIND_VALUE;
pTbName->z = tbName;
pTbName->n = strlen(tbName);
} else if (code == TSDB_CODE_TSC_STMT_TBNAME_ERROR) {
pCxt->preCtbname = true;
code = TSDB_CODE_SUCCESS;
} else {
return code;
}
if (code == TSDB_CODE_TSC_STMT_TBNAME_ERROR) {
pCxt->stmtTbNameFlag &= ~HAS_BIND_VALUE;
code = TSDB_CODE_SUCCESS;
}
return code;
}
if (TK_NK_ID != pTbName->type && TK_NK_STRING != pTbName->type && TK_NK_QUESTION != pTbName->type) {
@ -2788,26 +2799,34 @@ static int32_t checkTableClauseFirstToken(SInsertParseContext* pCxt, SVnodeModif
// db.? situationensure that the only thing following the '.' mark is '?'
char* tbNameAfterDbName = strnchr(pTbName->z, '.', pTbName->n, true);
if ((tbNameAfterDbName != NULL) && (*(tbNameAfterDbName + 1) == '?')) {
char* tbName = NULL;
if (NULL == pCxt->pComCxt->pStmtCb) {
return buildSyntaxErrMsg(&pCxt->msg, "? only used in stmt", pTbName->z);
}
int32_t code = (*pCxt->pComCxt->pStmtCb->getTbNameFn)(pCxt->pComCxt->pStmtCb->pStmt, &tbName);
if (code != TSDB_CODE_SUCCESS) {
pCxt->preCtbname = true;
if (tbNameAfterDbName != NULL) {
if (*(tbNameAfterDbName + 1) == '?') {
pCxt->stmtTbNameFlag &= ~IS_FIXED_VALUE;
char* tbName = NULL;
if (NULL == pCxt->pComCxt->pStmtCb) {
return buildSyntaxErrMsg(&pCxt->msg, "? only used in stmt", pTbName->z);
}
int32_t code = (*pCxt->pComCxt->pStmtCb->getTbNameFn)(pCxt->pComCxt->pStmtCb->pStmt, &tbName);
if (TSDB_CODE_SUCCESS == code) {
pCxt->stmtTbNameFlag |= HAS_BIND_VALUE;
pTbName->z = tbName;
pTbName->n = strlen(tbName);
}
if (code == TSDB_CODE_TSC_STMT_TBNAME_ERROR) {
pCxt->stmtTbNameFlag &= ~HAS_BIND_VALUE;
code = TSDB_CODE_SUCCESS;
}
} else {
pTbName->z = tbName;
pTbName->n = strlen(tbName);
}
}
if (pCxt->isStmtBind) {
if (TK_NK_ID == pTbName->type || (tbNameAfterDbName != NULL && *(tbNameAfterDbName + 1) != '?')) {
// In SQL statements, the table name has already been specified.
pCxt->stmtTbNameFlag |= IS_FIXED_VALUE;
parserWarn("QID:0x%" PRIx64 ", table name is specified in sql, ignore the table name in bind param",
pCxt->pComCxt->requestId);
*pHasData = true;
}
return TSDB_CODE_SUCCESS;
}
if (TK_NK_ID == pTbName->type) {
pCxt->stmtTbNameFlag |= IS_FIXED_VALUE;
}
*pHasData = true;
@ -2824,7 +2843,7 @@ static int32_t setStmtInfo(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt)
SStmtCallback* pStmtCb = pCxt->pComCxt->pStmtCb;
int32_t code = (*pStmtCb->setInfoFn)(pStmtCb->pStmt, pStmt->pTableMeta, tags, &pStmt->targetTableName,
pStmt->usingTableProcessing, pStmt->pVgroupsHashObj, pStmt->pTableBlockHashObj,
pStmt->usingTableName.tname, pCxt->preCtbname);
pStmt->usingTableName.tname, pCxt->stmtTbNameFlag);
memset(&pCxt->tags, 0, sizeof(pCxt->tags));
pStmt->pVgroupsHashObj = NULL;
@ -2880,9 +2899,6 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pS
if (TSDB_CODE_SUCCESS == code && hasData) {
code = parseInsertTableClause(pCxt, pStmt, &token);
}
if (TSDB_CODE_PAR_TABLE_NOT_EXIST == code && pCxt->preCtbname) {
code = TSDB_CODE_TSC_STMT_TBNAME_ERROR;
}
}
if (TSDB_CODE_SUCCESS == code && !pCxt->missCache) {

View File

@ -1070,10 +1070,11 @@ int32_t buildBoundFields(int32_t numOfBound, int16_t* boundColumns, SSchema* pSc
}
int32_t buildStbBoundFields(SBoundColInfo boundColsInfo, SSchema* pSchema, int32_t* fieldNum, TAOS_FIELD_ALL** fields,
STableMeta* pMeta, void* boundTags, bool preCtbname) {
STableMeta* pMeta, void* boundTags, uint8_t tbNameFlag) {
SBoundColInfo* tags = (SBoundColInfo*)boundTags;
bool hastag = tags != NULL;
int32_t numOfBound = boundColsInfo.numOfBound + (preCtbname ? 1 : 0);
bool hastag = (tags != NULL) && !(tbNameFlag & IS_FIXED_TAG);
int32_t numOfBound =
boundColsInfo.numOfBound + ((tbNameFlag & IS_FIXED_VALUE) == 0 && (tbNameFlag & USING_CLAUSE) != 0 ? 1 : 0);
if (hastag) {
numOfBound += tags->mixTagsCols ? 0 : tags->numOfBound;
}
@ -1084,7 +1085,7 @@ int32_t buildStbBoundFields(SBoundColInfo boundColsInfo, SSchema* pSchema, int32
return terrno;
}
if (preCtbname && numOfBound != boundColsInfo.numOfBound) {
if ((tbNameFlag & IS_FIXED_VALUE) == 0 && (tbNameFlag & USING_CLAUSE) != 0) {
(*fields)[idx].field_type = TAOS_FIELD_TBNAME;
tstrncpy((*fields)[idx].name, "tbname", sizeof((*fields)[idx].name));
(*fields)[idx].type = TSDB_DATA_TYPE_BINARY;
@ -1188,7 +1189,7 @@ int32_t qBuildStmtColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD_E** fiel
return TSDB_CODE_SUCCESS;
}
int32_t qBuildStmtStbColFields(void* pBlock, void* boundTags, bool preCtbname, int32_t* fieldNum,
int32_t qBuildStmtStbColFields(void* pBlock, void* boundTags, uint8_t tbNameFlag, int32_t* fieldNum,
TAOS_FIELD_ALL** fields) {
STableDataCxt* pDataBlock = (STableDataCxt*)pBlock;
SSchema* pSchema = getTableColumnSchema(pDataBlock->pMeta);
@ -1202,7 +1203,7 @@ int32_t qBuildStmtStbColFields(void* pBlock, void* boundTags, bool preCtbname, i
}
CHECK_CODE(buildStbBoundFields(pDataBlock->boundColsInfo, pSchema, fieldNum, fields, pDataBlock->pMeta, boundTags,
preCtbname));
tbNameFlag));
return TSDB_CODE_SUCCESS;
}

View File

@ -1029,7 +1029,7 @@ void mpUpdateSystemAvailableMemorySize() {
atomic_store_64(&tsCurrentAvailMemorySize, sysAvailSize);
uDebug("system available memory size: %" PRId64, sysAvailSize);
uTrace("system available memory size: %" PRId64, sysAvailSize);
}
void mpSchedTrim(int64_t* loopTimes) {

View File

@ -729,7 +729,7 @@ int32_t mptGetMemPoolMaxMemSize(void* pHandle, int64_t* maxSize) {
}
if (TSDB_CODE_SUCCESS != code) {
uError("get system avaiable memory size failed, error: 0x%x", code);
uError("get system available memory size failed, error: 0x%x", code);
return code;
}

View File

@ -263,7 +263,7 @@ class TDTestCase(TBase):
['-a "abc"', "[0x80000357]"],
]
for arg in args:
rlist = self.taos("Z 0 " + arg[0])
rlist = self.taos("-Z 0 " + arg[0])
if arg[1] != None:
self.checkListString(rlist, arg[1])
@ -340,9 +340,14 @@ class TDTestCase(TBase):
self.checkExcept(taos + " -s 'show dnodes;' " + option)
def checkModeVersion(self):
# check default conn mode
#DEFAULT_CONN = "WebSocket"
DEFAULT_CONN = "Native"
# results
results = [
"WebSocket Client Version",
f"{DEFAULT_CONN} Client Version",
"2022-10-01 00:01:39.000",
"Query OK, 100 row(s) in set"
]
@ -351,8 +356,10 @@ class TDTestCase(TBase):
cmd = f"-s 'select ts from test.d0'"
rlist = self.taos(cmd, checkRun = True)
self.checkManyString(rlist, results)
# websocket
cmd = f"-Z 1 -s 'select ts from test.d0'"
results[0] = "WebSocket Client Version"
rlist = self.taos(cmd, checkRun = True)
self.checkManyString(rlist, results)

View File

@ -196,9 +196,9 @@ class TBase:
tdSql.checkFirstValue(sql, expect)
# order by desc limit 1 with last
sql = f"select first({col}) from {self.db}.{self.db}."
sql = f"select first({col}) from {self.db}.{self.stb}"
expect = tdSql.getFirstValue(sql)
sql = f"select {col} from {self.db}.{self.db}. order by _c0 asc limit 1"
sql = f"select {col} from {self.db}.{self.stb} order by _c0 asc limit 1"
tdSql.checkFirstValue(sql, expect)

View File

@ -1,52 +0,0 @@
[02/10 13:52:16.164959] SUCC: created database (test)
[02/10 13:52:16.182024] INFO: start creating 1000 table(s) with 8 thread(s)
[02/10 13:52:16.396337] SUCC: Spent 0.2140 seconds to create 1000 table(s) with 8 thread(s) speed: 4673 tables/s, already exist 0 table(s), actual 1000 table(s) pre created, 0 table(s) will be auto created
[02/10 13:53:05.155428] SUCC: thread[2] progressive mode, completed total inserted rows: 12500000, 339193.01 records/second
[02/10 13:53:05.160652] SUCC: thread[7] progressive mode, completed total inserted rows: 12500000, 341816.65 records/second
[02/10 13:53:05.207601] SUCC: thread[0] progressive mode, completed total inserted rows: 12500000, 340556.51 records/second
[02/10 13:53:05.215370] SUCC: thread[4] progressive mode, completed total inserted rows: 12500000, 338804.97 records/second
[02/10 13:53:05.224077] SUCC: thread[5] progressive mode, completed total inserted rows: 12500000, 338596.28 records/second
[02/10 13:53:05.249786] SUCC: thread[1] progressive mode, completed total inserted rows: 12500000, 339208.40 records/second
[02/10 13:53:05.256970] SUCC: thread[3] progressive mode, completed total inserted rows: 12500000, 339174.04 records/second
[02/10 13:53:05.274900] SUCC: thread[6] progressive mode, completed total inserted rows: 12500000, 339551.12 records/second
[02/10 13:53:05.275900] SUCC: Spent 48.867685 (real 36.806958) seconds to insert rows: 100000000 with 8 thread(s) into test 2046342.08 (real 2716877.61) records/second
[02/10 13:53:05.275909] SUCC: insert delay, min: 11.2580ms, avg: 29.4456ms, p90: 32.7750ms, p95: 34.1120ms, p99: 39.5900ms, max: 70.3780ms
[02/12 15:46:06.469780] SUCC: created database (test)
[02/12 15:46:06.499844] INFO: start creating 10000 table(s) with 8 thread(s)
[02/12 15:46:08.185009] SUCC: Spent 1.6860 seconds to create 10000 table(s) with 8 thread(s) speed: 5931 tables/s, already exist 0 table(s), actual 10000 table(s) pre created, 0 table(s) will be auto created
[02/12 15:46:57.356674] SUCC: thread[0] progressive mode, completed total inserted rows: 12500000, 339076.93 records/second
[02/12 15:46:57.434553] SUCC: thread[1] progressive mode, completed total inserted rows: 12500000, 338528.52 records/second
[02/12 15:46:57.452522] SUCC: thread[2] progressive mode, completed total inserted rows: 12500000, 339844.37 records/second
[02/12 15:46:57.452921] SUCC: thread[5] progressive mode, completed total inserted rows: 12500000, 339349.90 records/second
[02/12 15:46:57.463726] SUCC: thread[4] progressive mode, completed total inserted rows: 12500000, 339986.37 records/second
[02/12 15:46:57.466467] SUCC: thread[3] progressive mode, completed total inserted rows: 12500000, 339785.50 records/second
[02/12 15:46:57.499118] SUCC: thread[6] progressive mode, completed total inserted rows: 12500000, 339326.86 records/second
[02/12 15:46:57.501694] SUCC: thread[7] progressive mode, completed total inserted rows: 12500000, 338309.30 records/second
[02/12 15:46:57.502535] SUCC: Spent 49.309586 (real 36.843268) seconds to insert rows: 100000000 with 8 thread(s) into test 2028003.24 (real 2714200.05) records/second
[02/12 15:46:57.502546] SUCC: insert delay, min: 10.9580ms, avg: 29.4746ms, p90: 32.6960ms, p95: 33.8290ms, p99: 36.8390ms, max: 77.9940ms
[02/14 15:27:32.543409] SUCC: created database (test)
[02/14 15:27:32.568881] INFO: start creating 10000 table(s) with 8 thread(s)
[02/14 15:27:34.249759] SUCC: Spent 1.6810 seconds to create 10000 table(s) with 8 thread(s) speed: 5949 tables/s, already exist 0 table(s), actual 10000 table(s) pre created, 0 table(s) will be auto created
[02/14 15:28:26.165699] SUCC: thread[0] progressive mode, completed total inserted rows: 12500000, 321266.73 records/second
[02/14 15:28:26.281188] SUCC: thread[4] progressive mode, completed total inserted rows: 12500000, 319863.00 records/second
[02/14 15:28:26.326975] SUCC: thread[5] progressive mode, completed total inserted rows: 12500000, 321802.51 records/second
[02/14 15:28:26.328615] SUCC: thread[6] progressive mode, completed total inserted rows: 12500000, 321804.13 records/second
[02/14 15:28:26.379189] SUCC: thread[7] progressive mode, completed total inserted rows: 12500000, 320719.22 records/second
[02/14 15:28:26.400891] SUCC: thread[1] progressive mode, completed total inserted rows: 12500000, 321512.59 records/second
[02/14 15:28:26.470912] SUCC: thread[2] progressive mode, completed total inserted rows: 12500000, 319026.94 records/second
[02/14 15:28:26.565079] SUCC: thread[3] progressive mode, completed total inserted rows: 12500000, 317248.21 records/second
[02/14 15:28:26.566013] SUCC: Spent 52.307623 (real 39.013939) seconds to insert rows: 100000000 with 8 thread(s) into test 1911767.24 (real 2563186.45) records/second
[02/14 15:28:26.566024] SUCC: insert delay, min: 11.1290ms, avg: 31.2112ms, p90: 35.4900ms, p95: 37.0580ms, p99: 41.5180ms, max: 68.5900ms
[02/17 14:09:42.181835] SUCC: created database (test)
[02/17 14:09:42.210373] INFO: start creating 10000 table(s) with 8 thread(s)
[02/17 14:09:44.199467] SUCC: Spent 1.9890 seconds to create 10000 table(s) with 8 thread(s) speed: 5028 tables/s, already exist 0 table(s), actual 10000 table(s) pre created, 0 table(s) will be auto created
[02/17 14:10:32.845475] SUCC: thread[3] progressive mode, completed total inserted rows: 12500000, 338184.62 records/second
[02/17 14:10:32.872586] SUCC: thread[4] progressive mode, completed total inserted rows: 12500000, 338445.48 records/second
[02/17 14:10:32.873271] SUCC: thread[1] progressive mode, completed total inserted rows: 12500000, 339256.73 records/second
[02/17 14:10:32.938231] SUCC: thread[5] progressive mode, completed total inserted rows: 12500000, 338737.29 records/second
[02/17 14:10:32.947655] SUCC: thread[2] progressive mode, completed total inserted rows: 12500000, 338938.99 records/second
[02/17 14:10:32.952985] SUCC: thread[0] progressive mode, completed total inserted rows: 12500000, 338652.89 records/second
[02/17 14:10:32.962370] SUCC: thread[6] progressive mode, completed total inserted rows: 12500000, 338890.00 records/second
[02/17 14:10:32.998729] SUCC: thread[7] progressive mode, completed total inserted rows: 12500000, 339216.19 records/second
[02/17 14:10:32.999680] SUCC: Spent 48.790057 (real 36.896020) seconds to insert rows: 100000000 with 8 thread(s) into test 2049597.93 (real 2710319.43) records/second
[02/17 14:10:32.999696] SUCC: insert delay, min: 10.7720ms, avg: 29.5168ms, p90: 32.6910ms, p95: 33.8370ms, p99: 36.6750ms, max: 76.0590ms

View File

@ -37,9 +37,6 @@ import taos
import taosrest
import taosws
from taos.cinterface import *
taos.taos_options(6, "native")
def checkRunTimeError():
import win32gui
timeCount = 0
@ -261,9 +258,8 @@ if __name__ == "__main__":
#
# do exeCmd command
#
taosAdapter = True # default is websocket , so must start taosAdapter
if not execCmd == "":
if taosAdapter or restful or websocket:
if taosAdapter or taosAdapter or restful or websocket:
tAdapter.init(deployPath)
else:
tdDnodes.init(deployPath)

View File

@ -68,14 +68,10 @@ class TDTestCase(TBase):
tdSql.checkData(0, 0, 10*10000)
# add normal table
cmd = "%s -N -I sml -t 2 -n 10000 -y" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.query("select count(*) from test.d0")
tdSql.checkData(0, 0, 1*10000)
tdSql.query("select count(*) from test.d1")
tdSql.checkData(0, 0, 1*10000)
cmd = "-N -I sml -t 2 -n 10000 -y"
rlist = self.benchmark(cmd, checkRun = False)
# expect failed
self.checkListString(rlist, "schemaless cannot work without stable")
def stop(self):
tdSql.close()

View File

@ -68,12 +68,15 @@ class TDTestCase(TBase):
os.environ['TDENGINE_CLOUD_DSN'] = ""
def checkCommandLine(self):
# default CONN_MODE
DEFAULT_CONN_MODE = "Native"
# modes
modes = ["", "-Z 1 -B 1", "-Z websocket", "-Z 0", "-Z native -B 2"]
# result
Rows = "insert rows: 9990"
results1 = [
["Connect mode is : WebSocket", Rows],
[f"Connect mode is : {DEFAULT_CONN_MODE}", Rows],
["Connect mode is : WebSocket", Rows],
["Connect mode is : WebSocket", Rows],
["Connect mode is : Native", Rows],
@ -112,7 +115,7 @@ class TDTestCase(TBase):
# ommand
#
self.benchmarkCmd("-h 127.0.0.1", 5, 100, 10, ["insert rows: 500"])
self.benchmarkCmd("-h 127.0.0.1 -P 6041 -uroot -ptaosdata", 5, 100, 10, ["insert rows: 500"])
self.benchmarkCmd("-h 127.0.0.1 -uroot -ptaosdata", 5, 100, 10, ["insert rows: 500"])
self.benchmarkCmd("-Z 0 -h 127.0.0.1 -P 6030 -uroot -ptaosdata", 5, 100, 10, ["insert rows: 500"])
#
@ -120,7 +123,7 @@ class TDTestCase(TBase):
#
# 6041 is default
options = "-h 127.0.0.1 -P 6041 -uroot -ptaosdata"
options = "-Z 1 -h 127.0.0.1 -P 6041 -uroot -ptaosdata"
json = "tools/benchmark/basic/json/connModePriorityErrHost.json"
self.insertBenchJson(json, options, True)

View File

@ -221,7 +221,7 @@ class TDTestCase(TBase):
def checkTmqJson(self, benchmark, json):
OK_RESULT = "Consumed total msgs: 30, total rows: 300000"
cmd = benchmark + " -f " + json
output,error = frame.eos.run(cmd, 600)
output, error, code = frame.eos.run(cmd, 600)
if output.find(OK_RESULT) != -1:
tdLog.info(f"succ: {cmd} found '{OK_RESULT}'")
else:

View File

@ -29,7 +29,7 @@ class TDTestCase(TBase):
def run(self):
binPath = etool.benchMarkFile()
cmd = "%s -t 1 -n 1 -y -W http://localhost:6041 -D 30" % binPath
cmd = "%s -t 1 -n 1 -y -W http://localhost:6041 " % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")

View File

@ -158,7 +158,7 @@ class TDTestCase(TBase):
def basicCommandLine(self, tmpdir):
#command and check result
checkItems = [
[f"-h 127.0.0.1 -P 6041 -uroot -ptaosdata -A -N -o {tmpdir}", ["OK: Database test dumped"]],
[f"-Z 0 -h 127.0.0.1 -P 6030 -uroot -ptaosdata -A -N -o {tmpdir}", ["OK: Database test dumped"]],
[f"-r result -a -e test d0 -o {tmpdir}", ["OK: table: d0 dumped", "OK: 100 row(s) dumped out!"]],
[f"-n -D test -o {tmpdir}", ["OK: Database test dumped", "OK: 205 row(s) dumped out!"]],
[f"-Z 0 -P 6030 -n -D test -o {tmpdir}", ["OK: Database test dumped", "OK: 205 row(s) dumped out!"]],
@ -348,19 +348,12 @@ class TDTestCase(TBase):
self.exceptCommandLine(taosdump, db, stb, tmpdir)
tdLog.info("4. except command line ................................. [Passed]")
json = "./tools/taosdump/native/json/insertOther.json"
# insert
db, stb, childCount, insertRows = self.insertData(json)
# dump in/out
self.dumpInOutMode("", db , json, tmpdir)
tdLog.info("5. native varbinary geometry ........................... [Passed]")
#
# check connMode
#
self.checkConnMode(db, stb, childCount, insertRows, tmpdir)
tdLog.info("6. check conn mode ..................................... [Passed]")
tdLog.info("5. check conn mode ..................................... [Passed]")
def stop(self):

View File

@ -38,8 +38,6 @@ from util.taosadapter import *
import taos
import taosrest
from taos.cinterface import *
taos.taos_options(6, "native")
def checkRunTimeError():
import win32gui

View File

@ -126,7 +126,7 @@ python3 mockdatasource.py
python3 fast_write_example.py
# 20
pip3 install kafka-python
pip3 install kafka-python==2.1.2
python3 kafka_example_consumer.py
# 21
@ -196,4 +196,5 @@ check_transactions || exit 1
reset_cache || exit 1
python3 tmq_websocket_example.py
python3 stmt2_native.py
python3 stmt2_native.py

View File

@ -108,7 +108,6 @@
,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/custom_col_tag.py
,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/default_json.py
,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/default_tmq_json.py
,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/demo.py
,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/csv-export.py

View File

@ -9,6 +9,9 @@
set +e
#set -x
export ASAN_OPTIONS=detect_odr_violation=0
echo "forbid check ODR violation."
FILE_NAME=
VALGRIND=0
TEST=0

View File

@ -40,9 +40,6 @@ import taos
import taosrest
import taosws
from taos.cinterface import *
taos.taos_options(6, "native")
def checkRunTimeError():
import win32gui
timeCount = 0
@ -73,6 +70,7 @@ def get_local_classes_in_order(file_path):
def dynamicLoadModule(fileName):
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
return importlib.import_module(moduleName, package='..')
#
# run case on previous cluster
#
@ -254,9 +252,8 @@ if __name__ == "__main__":
#
# do exeCmd command
#
taosAdapter = True # default is websocket , so must start taosAdapter
if not execCmd == "":
if taosAdapter or restful or websocket:
if restful or websocket:
tAdapter.init(deployPath)
else:
tdDnodes.init(deployPath)
@ -295,7 +292,7 @@ if __name__ == "__main__":
if valgrind:
time.sleep(2)
if taosAdapter or restful or websocket:
if restful or websocket:
toBeKilled = "taosadapter"
# killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" % toBeKilled
@ -391,7 +388,7 @@ if __name__ == "__main__":
tdDnodes.deploy(1,updateCfgDict)
tdDnodes.start(1)
tdCases.logSql(logSql)
if taosAdapter or restful or websocket:
if restful or websocket:
tAdapter.deploy(adapter_cfg_dict)
tAdapter.start()
@ -431,7 +428,7 @@ if __name__ == "__main__":
tdDnodes.starttaosd(dnode.index)
tdCases.logSql(logSql)
if taosAdapter or restful or websocket:
if restful or websocket:
tAdapter.deploy(adapter_cfg_dict)
tAdapter.start()
@ -553,7 +550,7 @@ if __name__ == "__main__":
except:
pass
if taosAdapter or restful or websocket:
if restful or websocket:
tAdapter.init(deployPath, masterIp)
tAdapter.stop(force_kill=True)
@ -563,7 +560,7 @@ if __name__ == "__main__":
tdDnodes.start(1)
tdCases.logSql(logSql)
if taosAdapter or restful or websocket:
if restful or websocket:
tAdapter.deploy(adapter_cfg_dict)
tAdapter.start()
@ -618,7 +615,7 @@ if __name__ == "__main__":
tdDnodes.starttaosd(dnode.index)
tdCases.logSql(logSql)
if taosAdapter or restful or websocket:
if restful or websocket:
tAdapter.deploy(adapter_cfg_dict)
tAdapter.start()

View File

@ -17,6 +17,7 @@
#define PUB_H_
#include <stdio.h>
#include <stdbool.h>
#include <ctype.h>
#include <inttypes.h>
#include <stdint.h>
@ -72,10 +73,11 @@ int8_t getConnMode(char *arg);
char* strToLowerCopy(const char *str);
int32_t parseDsn(char* dsn, char **host, char **port, char **user, char **pwd, char* error);
int32_t setConnMode(int8_t connMode, char *dsn);
int32_t setConnMode(int8_t connMode, char *dsn, bool show);
uint16_t defaultPort(int8_t connMode, char *dsn);
int8_t defaultMode(int8_t connMode, char *dsn);
// working connect mode
int8_t workingMode(int8_t connMode, char *dsn);
#endif // PUB_H_

View File

@ -219,9 +219,6 @@ static int32_t shellParseSingleOpt(int32_t key, char *arg) {
break;
#if defined(LINUX)
case 'o':
printf(" -o need todo optins.\n");
// need todo pass tsLogOutput to engine
/*
if (strlen(arg) >= PATH_MAX) {
printf("failed to set log output since length overflow, max length is %d\r\n", PATH_MAX);
return TSDB_CODE_INVALID_CFG;
@ -235,7 +232,6 @@ static int32_t shellParseSingleOpt(int32_t key, char *arg) {
printf("failed to expand log output: '%s' since %s\r\n", arg, tstrerror(terrno));
return terrno;
}
*/
break;
#endif
case 'E':

View File

@ -1355,17 +1355,22 @@ TAOS* createConnect(SShellArgs *pArgs) {
}
// connect main
TAOS * taos = NULL;
if (pArgs->auth) {
return taos_connect_auth(host, user, pArgs->auth, pArgs->database, port);
taos = taos_connect_auth(host, user, pArgs->auth, pArgs->database, port);
} else {
return taos_connect(host, user, pwd, pArgs->database, port);
taos = taos_connect(host, user, pwd, pArgs->database, port);
}
// host user pointer in dsnc address
free(dsnc);
return taos;
}
int32_t shellExecute(int argc, char *argv[]) {
int32_t code = 0;
printf(shell.info.clientVersion, shell.info.cusName,
defaultMode(shell.args.connMode, shell.args.dsn) == CONN_MODE_NATIVE ? STR_NATIVE : STR_WEBSOCKET,
workingMode(shell.args.connMode, shell.args.dsn) == CONN_MODE_NATIVE ? STR_NATIVE : STR_WEBSOCKET,
taos_get_client_info(), shell.info.cusName);
fflush(stdout);

View File

@ -101,7 +101,7 @@ int main(int argc, char *argv[]) {
return -1;
}
if (setConnMode(shell.args.connMode, shell.args.dsn)) {
if (setConnMode(shell.args.connMode, shell.args.dsn, false)) {
return -1;
}

View File

@ -91,7 +91,7 @@
}
// set conn mode
int32_t setConnMode(int8_t connMode, char *dsn) {
int32_t setConnMode(int8_t connMode, char *dsn, bool show) {
// check default
if (connMode == CONN_MODE_INVALID) {
if (dsn && dsn[0] != 0) {
@ -109,11 +109,16 @@ int32_t setConnMode(int8_t connMode, char *dsn) {
fprintf(stderr, "failed to load driver. since %s [0x%08X]\r\n", taos_errstr(NULL), taos_errno(NULL));
return code;
}
if (show) {
fprintf(stdout, "\nConnect mode is : %s\n\n", strMode);
}
return 0;
}
// default mode
int8_t defaultMode(int8_t connMode, char *dsn) {
int8_t workingMode(int8_t connMode, char *dsn) {
int8_t mode = connMode;
if (connMode == CONN_MODE_INVALID) {
// no input from command line or config
@ -129,10 +134,15 @@ int8_t defaultMode(int8_t connMode, char *dsn) {
// get default port
uint16_t defaultPort(int8_t connMode, char *dsn) {
// port 0 is default
return 0;
/*
// consistent with setConnMode
int8_t mode = defaultMode(connMode, dsn);
int8_t mode = workingMode(connMode, dsn);
// default port
return mode == CONN_MODE_NATIVE ? DEFAULT_PORT_NATIVE : DEFAULT_PORT_WS_LOCAL;
*/
}

View File

@ -256,13 +256,13 @@ char* genPrepareSql(SSuperTable *stbInfo, char* tagData, uint64_t tableSeq, char
"INSERT INTO ? USING `%s`.`%s` TAGS (%s) %s VALUES(?,%s)",
db, stbInfo->stbName, tagQ, ttl, colQ);
} else {
if (g_arguments->connMode == CONN_MODE_NATIVE) {
if (workingMode(g_arguments->connMode, g_arguments->dsn) == CONN_MODE_NATIVE) {
// native
n = snprintf(prepare + len, TSDB_MAX_ALLOWED_SQL_LEN - len,
"INSERT INTO ? VALUES(?,%s)", colQ);
} else {
// websocket
bool ntb = stbInfo->tags == NULL || stbInfo->tags->size == 0; // nomral table
bool ntb = stbInfo->tags == NULL || stbInfo->tags->size == 0; // normal table
colNames = genColNames(stbInfo->cols, !ntb);
n = snprintf(prepare + len, TSDB_MAX_ALLOWED_SQL_LEN - len,
"INSERT INTO `%s`.`%s`(%s) VALUES(%s,%s)", db, stbInfo->stbName, colNames,

View File

@ -168,7 +168,7 @@ int main(int argc, char* argv[]) {
}
// conn mode
if (setConnMode(g_arguments->connMode, g_arguments->dsn) != 0) {
if (setConnMode(g_arguments->connMode, g_arguments->dsn, true) != 0) {
exitLog();
return -1;
}

View File

@ -150,6 +150,7 @@ static struct argp_option options[] = {
{"inspect", 'I', 0, 0,
"inspect avro file content and print on screen", 10},
{"no-escape", 'n', 0, 0, "No escape char '`'. Default is using it.", 10},
{"restful", 'R', 0, 0, "Use RESTful interface to connect server", 11},
{"cloud", 'C', "CLOUD_DSN", 0, OLD_DSN_DESC, 11},
{"timeout", 't', "SECONDS", 0, "The timeout seconds for "
"websocket to interact."},
@ -691,7 +692,9 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
}
g_args.thread_num = atoi((const char *)arg);
break;
case 'R':
warnPrint("%s\n", "'-R' is not supported, ignore this options.");
break;
case 'C':
case 'X':
if (arg) {
@ -10910,7 +10913,7 @@ int main(int argc, char *argv[]) {
}
// conn mode
if (setConnMode(g_args.connMode, g_args.dsn) != 0) {
if (setConnMode(g_args.connMode, g_args.dsn, true) != 0) {
return -1;
}

View File

@ -87,7 +87,8 @@ int smlProcess_telnet_Test() {
const char *sql1[] = {"sys.if.bytes.out 1479496100 1.3E0 host=web01 interface=eth0",
"sys.if.bytes.out 1479496101 1.3E1 interface=eth0 host=web01 ",
"sys.if.bytes.out 1479496102 1.3E3 network=tcp",
" sys.procs.running 1479496100 42 host=web01 "};
" sys.procs.running 1479496100 42 host=web01 ",
" newline 1479496100 42 host=web\n01 t=fsb\n "};
// for(int i = 0; i < 4; i++){
// strncpy(sql[i], sql1[i], 128);
@ -2355,12 +2356,35 @@ int sml_td17324_Test() {
return code;
}
int smlProcess_34114_Test() {
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_34114_db schemaless 1");
taos_free_result(pRes);
pRes = taos_query(taos, "use sml_34114_db");
taos_free_result(pRes);
char *sql = {"sys.if.bytes.out 1479496100 1.3E0 host=web01 interface=eth0 \nsys.if.bytes.out 1479496101 1.3E1 interface=eth0 host=web01 "};
int32_t totalRows = 0;
pRes = taos_schemaless_insert_raw(taos, sql, strlen(sql), &totalRows, TSDB_SML_TELNET_PROTOCOL,
TSDB_SML_TIMESTAMP_NANO_SECONDS);
printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
int code = taos_errno(pRes);
taos_free_result(pRes);
taos_close(taos);
return code;
}
int main(int argc, char *argv[]) {
if (argc == 2) {
taos_options(TSDB_OPTION_CONFIGDIR, argv[1]);
}
int ret = smlProcess_json0_Test();
int ret = smlProcess_34114_Test();
ASSERT(!ret);
ret = smlProcess_json0_Test();
ASSERT(!ret);
ret = sml_ts5528_test();
ASSERT(!ret);