Merge branch 'develop' into test/TD-5369
This commit is contained in:
commit
b73a2f7070
|
@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
|
|||
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
|
||||
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
|
||||
IF (TD_MVN_INSTALLED)
|
||||
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-*-dist.jar DESTINATION connector/jdbc)
|
||||
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.34-dist.jar DESTINATION connector/jdbc)
|
||||
ENDIF ()
|
||||
ELSEIF (TD_DARWIN)
|
||||
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
|
||||
|
|
|
@ -179,16 +179,14 @@ taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
|
|||
| | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **华为 EulerOS** |
|
||||
| -------------- | --------------------- | ------------------------ | --------------- | --------------- | ------------------------- | --------------------- | --------------------- |
|
||||
| X64 | ● | ● | | ○ | ● | ● | ● |
|
||||
| 树莓派 ARM32 | | ● | ● | | | | |
|
||||
| 龙芯 MIPS64 | | | ● | | | | |
|
||||
| 鲲鹏 ARM64 | | ○ | ○ | | ● | | |
|
||||
| 申威 Alpha64 | | | ○ | ● | | | |
|
||||
| 飞腾 ARM64 | | ○ 优麒麟 | | | | | |
|
||||
| 海光 X64 | ● | ● | ● | ○ | ● | ● | |
|
||||
| 瑞芯微 ARM64/32 | | | ○ | | | | |
|
||||
| 全志 ARM64/32 | | | ○ | | | | |
|
||||
| 炬力 ARM64/32 | | | ○ | | | | |
|
||||
| TI ARM32 | | | ○ | | | | |
|
||||
| 瑞芯微 ARM64 | | | ○ | | | | |
|
||||
| 全志 ARM64 | | | ○ | | | | |
|
||||
| 炬力 ARM64 | | | ○ | | | | |
|
||||
| 华为云 ARM64 | | | | | | | ● |
|
||||
|
||||
注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。
|
||||
|
|
|
@ -208,7 +208,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
|
|||
|
||||
说明:可在like中使用通配符进行名称的匹配,这一通配符字符串最长不能超过24字节。
|
||||
|
||||
通配符匹配:1)'%'(百分号)匹配0到任意个字符;2)'\_'下划线匹配单个任意字符。(如果希望匹配表名中带有的下划线,那么这里可以用反斜线进行转义,也就是说 '\\\_' 会被用于匹配表名中原始带有的下划线符号)
|
||||
通配符匹配:1)'%'(百分号)匹配0到任意个字符;2)'\_'下划线匹配单个任意字符。
|
||||
|
||||
- **显示一个数据表的创建语句**
|
||||
|
||||
|
@ -715,7 +715,7 @@ Query OK, 1 row(s) in set (0.001091s)
|
|||
2. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
|
||||
3. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。
|
||||
4. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
|
||||
5. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功。<!-- REPLACE_OPEN_TO_ENTERPRISE__IN_OPERATOR_AND_UNSIGNED_INTEGER -->
|
||||
5. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。<!-- REPLACE_OPEN_TO_ENTERPRISE__IN_OPERATOR_AND_UNSIGNED_INTEGER -->
|
||||
|
||||
<!--
|
||||
<a class="anchor" id="having"></a>
|
||||
|
@ -1338,7 +1338,8 @@ SELECT function_list FROM stb_name
|
|||
- 查询过滤、聚合等操作按照每个切分窗口为独立的单位执行。聚合查询目前支持三种窗口的划分方式:
|
||||
1. 时间窗口:聚合时间段的窗口宽度由关键词 INTERVAL 指定,最短时间间隔 10 毫秒(10a);并且支持偏移 offset(偏移必须小于间隔),也即时间窗口划分与“UTC 时刻 0”相比的偏移量。SLIDING 语句用于指定聚合时间段的前向增量,也即每次窗口向前滑动的时长。当 SLIDING 与 INTERVAL 取值相等的时候,滑动窗口即为翻转窗口。
|
||||
* 从 2.1.5.0 版本开始,INTERVAL 语句允许的最短时间间隔调整为 1 微秒(1u),当然如果所查询的 DATABASE 的时间精度设置为毫秒级,那么允许的最短时间间隔为 1 毫秒(1a)。
|
||||
2. 状态窗口:使用整数(布尔值)或字符串来标识产生记录时设备的状态量,产生的记录如果具有相同的状态量取值则归属于同一个状态窗口,数值改变后该窗口关闭。状态量所对应的列作为 STATE_WINDOW 语句的参数来指定。
|
||||
* **注意:**用到 INTERVAL 语句时,除非极特殊的情况,都要求把客户端和服务端的 taos.cfg 配置文件中的 timezone 参数配置为相同的取值,以避免时间处理函数频繁进行跨时区转换而导致的严重性能影响。
|
||||
2. 状态窗口:使用整数或布尔值来标识产生记录时设备的状态量,产生的记录如果具有相同的状态量取值则归属于同一个状态窗口,数值改变后该窗口关闭。状态量所对应的列作为 STATE_WINDOW 语句的参数来指定。
|
||||
3. 会话窗口:时间戳所在的列由 SESSION 语句的 ts_col 参数指定,会话窗口根据相邻两条记录的时间戳差值来确定是否属于同一个会话——如果时间戳差异在 tol_val 以内,则认为记录仍属于同一个窗口;如果时间变化超过 tol_val,则自动开启下一个窗口。
|
||||
- WHERE 语句可以指定查询的起止时间和其他过滤条件。
|
||||
- FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填充模式包括以下几种:
|
||||
|
|
|
@ -11,7 +11,7 @@ One of the modules of TDengine is the time-series database. However, in addition
|
|||
- **Full-stack time-series data processing engine**: Integrate database, message queue, cache, stream computing, and other functions, and the applications do not need to integrate with software such as Kafka/Redis/HBase/Spark/HDFS, thus greatly reducing the complexity cost of application development and maintenance.
|
||||
- **Powerful analysis functions**: Data from ten years ago or one second ago, can all be queried based on a specified time range. Data can be aggregated on a timeline or multiple devices. Ad-hoc queries can be made at any time through Shell, Python, R, and MATLAB.
|
||||
- **Seamless connection with third-party tools**: Integration with Telegraf, Grafana, EMQ, HiveMQ, Prometheus, MATLAB, R, etc. without even one single line of code. OPC, Hadoop, Spark, etc. will be supported in the future, and more BI tools will be seamlessly connected to.
|
||||
- **Zero operation cost & zero learning cost**: Installing clusters is simple and quick, with real-time backup built-in, and no need to split libraries or tables. Similar to standard SQL, TDengine can support RESTful, Python/Java/C/C + +/C#/Go/Node.js, and similar to MySQL with zero learning cost.
|
||||
- **Zero operation cost & zero learning cost**: Installing clusters is simple and quick, with real-time backup built-in, and no need to split libraries or tables. Similar to standard SQL, TDengine can support RESTful, Python/Java/C/C++/C#/Go/Node.js, and similar to MySQL with zero learning cost.
|
||||
|
||||
With TDengine, the total cost of ownership of typical IoT, Internet of Vehicles, and Industrial Internet Big Data platforms can be greatly reduced. However, it should be pointed out that due to making full use of the characteristics of IoT time-series data, TDengine cannot be used to process general data from web crawlers, microblogs, WeChat, e-commerce, ERP, CRM, and other sources.
|
||||
|
||||
|
|
|
@ -188,16 +188,14 @@ List of platforms supported by TDengine server
|
|||
| | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | UnionTech UOS | NeoKylin | LINX V60/V80 |
|
||||
| ------------------ | ---------------- | ------------------- | --------------- | ------------- | -------- | ------------ |
|
||||
| X64 | ● | ● | | ○ | ● | ● |
|
||||
| Raspberry ARM32 | | ● | ● | | | |
|
||||
| Loongson MIPS64 | | | ● | | | |
|
||||
| Kunpeng ARM64 | | ○ | ○ | | ● | |
|
||||
| SWCPU Alpha64 | | | ○ | ● | | |
|
||||
| FT ARM64 | | ○Ubuntu Kylin | | | | |
|
||||
| Hygon X64 | ● | ● | ● | ○ | ● | ● |
|
||||
| Rockchip ARM64/32 | | | ○ | | | |
|
||||
| Allwinner ARM64/32 | | | ○ | | | |
|
||||
| Actions ARM64/32 | | | ○ | | | |
|
||||
| TI ARM32 | | | ○ | | | |
|
||||
| Rockchip ARM64 | | | ○ | | | |
|
||||
| Allwinner ARM64 | | | ○ | | | |
|
||||
| Actions ARM64 | | | ○ | | | |
|
||||
|
||||
Note: ● has been verified by official tests; ○ has been verified by unofficial tests.
|
||||
|
||||
|
|
|
@ -172,7 +172,7 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc
|
|||
|
||||
**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high reliability of the system. The virtual node group is managed in a master/slave structure. Write operations can only be performed on the master vnode, and the system synchronizes data to the slave vnode via replication, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter replica when creating DB, and the default is 1. Using the multi-replica feature of TDengine, the same high data reliability can be done without the need for expensive storage devices such as disk arrays. Virtual node group is created and managed by management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes has the same vnode group ID, means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused.
|
||||
|
||||
**TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interface interaction between application and cluster, and provides the native interface of C/C + + language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through taosc instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, taosc also need to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C + +/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, taosc has a running instance on each dnode of TDengine cluster.
|
||||
**TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interface interaction between application and cluster, and provides the native interface of C/C++ language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through taosc instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, taosc also need to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, taosc has a running instance on each dnode of TDengine cluster.
|
||||
|
||||
### Node Communication
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ TDengine supports multiple interfaces to write data, including SQL, Prometheus,
|
|||
|
||||
## <a class="anchor" id="sql"></a> SQL Writing
|
||||
|
||||
Applications insert data by executing SQL insert statements through C/C + +, JDBC, GO, or Python Connector, and users can manually enter SQL insert statements to insert data through TAOS Shell. For example, the following insert writes a record to table d1001:
|
||||
Applications insert data by executing SQL insert statements through C/C++, JDBC, GO, or Python Connector, and users can manually enter SQL insert statements to insert data through TAOS Shell. For example, the following insert writes a record to table d1001:
|
||||
|
||||
```mysql
|
||||
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31);
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
## <a class="anchor" id="queries"></a> Main Query Features
|
||||
|
||||
TDengine uses SQL as the query language. Applications can send SQL statements through C/C + +, Java, Go, Python connectors, and users can manually execute SQL Ad-Hoc Query through the Command Line Interface (CLI) tool TAOS Shell provided by TDengine. TDengine supports the following query functions:
|
||||
TDengine uses SQL as the query language. Applications can send SQL statements through C/C++, Java, Go, Python connectors, and users can manually execute SQL Ad-Hoc Query through the Command Line Interface (CLI) tool TAOS Shell provided by TDengine. TDengine supports the following query functions:
|
||||
|
||||
- Single-column and multi-column data query
|
||||
- Multiple filters for tags and numeric values: >, <, =, < >, like, etc
|
||||
|
|
|
@ -22,7 +22,7 @@ Note: ● stands for that has been verified by official tests; ○ stands for th
|
|||
Note:
|
||||
|
||||
- To access the TDengine database through connectors (except RESTful) in the system without TDengine server software, it is necessary to install the corresponding version of the client installation package to make the application driver (the file name is [libtaos.so](http://libtaos.so/) in Linux system and taos.dll in Windows system) installed in the system, otherwise, the error that the corresponding library file cannot be found will occur.
|
||||
- All APIs that execute SQL statements, such as `tao_query`, `taos_query_a`, `taos_subscribe` in C/C + + Connector, and APIs corresponding to them in other languages, can only execute one SQL statement at a time. If the actual parameters contain multiple statements, their behavior is undefined.
|
||||
- All APIs that execute SQL statements, such as `tao_query`, `taos_query_a`, `taos_subscribe` in C/C++ Connector, and APIs corresponding to them in other languages, can only execute one SQL statement at a time. If the actual parameters contain multiple statements, their behavior is undefined.
|
||||
- Users upgrading to TDengine 2.0. 8.0 must update the JDBC connection. TDengine must upgrade taos-jdbcdriver to 2.0.12 and above.
|
||||
- No matter which programming language connector is selected, TDengine version 2.0 and above recommends that each thread of database application establish an independent connection or establish a connection pool based on threads to avoid mutual interference between threads of "USE statement" state variables in the connection (but query and write operations of the connection are thread-safe).
|
||||
|
||||
|
@ -152,7 +152,7 @@ Under cmd, enter the c:\ tdengine directory and directly execute taos.exe, and y
|
|||
| **OS Type** | Linux | Win64 | Win32 | Linux | Linux |
|
||||
| **Supported or Not** | Yes | **Yes** | **Yes** | **Yes** | **In development** |
|
||||
|
||||
The C/C + + API is similar to MySQL's C API. When application use it, it needs to include the TDengine header file taos.h (after installed, it is located in/usr/local/taos/include):
|
||||
The C/C++ API is similar to MySQL's C API. When application use it, it needs to include the TDengine header file taos.h (after installed, it is located in/usr/local/taos/include):
|
||||
|
||||
```C
|
||||
#include <taos.h>
|
||||
|
@ -923,7 +923,7 @@ Manually install the following tools:
|
|||
|
||||
If the steps above cannot be performed successfully, you can refer to Microsoft's Node.js User Manual [Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules).
|
||||
|
||||
If you use ARM64 Node.js on Windows 10 ARM, you also need to add "Visual C + + compilers and libraries for ARM64" and "Visual C + + ATL for ARM64".
|
||||
If you use ARM64 Node.js on Windows 10 ARM, you also need to add "Visual C++ compilers and libraries for ARM64" and "Visual C++ ATL for ARM64".
|
||||
|
||||
#### Sample
|
||||
|
||||
|
|
|
@ -240,7 +240,7 @@ Client configuration parameters:
|
|||
```sql
|
||||
SELECT count(*) FROM table_name WHERE TS<1554984068000;
|
||||
```
|
||||
In order to avoid the uncertainty caused by using string time format, Unix timestamp can also be used directly. In addition, timestamp strings with time zones can also be used in SQL statements, such as: timestamp strings in RFC3339 format, 2013-04-12T15: 52: 01.123 +08:00, or ISO-8601 format timestamp strings 2013-04-12T15: 52: 01.123 +0800. The conversion of the above two strings into Unix timestamps is not affected by the time zone in which the system is located.
|
||||
In order to avoid the uncertainty caused by using string time format, Unix timestamp can also be used directly. In addition, timestamp strings with time zones can also be used in SQL statements, such as: timestamp strings in RFC3339 format, 2013-04-12T15:52:01.123+08:00, or ISO-8601 format timestamp strings 2013-04-12T15:52:01.123+0800. The conversion of the above two strings into Unix timestamps is not affected by the time zone in which the system is located.
|
||||
|
||||
When starting taos, you can also specify an end point for an instance of taosd from the command line, otherwise read from taos.cfg.
|
||||
|
||||
|
|
|
@ -1132,7 +1132,7 @@ TDengine supports aggregations over data, they are listed below:
|
|||
```
|
||||
Function: Return the difference between the max value and the min value of a column in statistics /STable.
|
||||
|
||||
Return Data Type: Same as applicable fields.
|
||||
Return Data Type: Double.
|
||||
|
||||
Applicable Fields: All types except binary, nchar, bool.
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ pkg_name=${install_dir}-${osType}-${cpuType}
|
|||
# exit 1
|
||||
# fi
|
||||
|
||||
if [ "$verType" == "beta" ]; then
|
||||
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
|
||||
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
pkg_name=${pkg_name}
|
||||
|
|
|
@ -182,7 +182,7 @@ pkg_name=${install_dir}-${osType}-${cpuType}
|
|||
# exit 1
|
||||
# fi
|
||||
|
||||
if [ "$verType" == "beta" ]; then
|
||||
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
|
||||
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
pkg_name=${pkg_name}
|
||||
|
|
|
@ -215,7 +215,7 @@ pkg_name=${install_dir}-${osType}-${cpuType}
|
|||
# exit 1
|
||||
# fi
|
||||
|
||||
if [ "$verType" == "beta" ]; then
|
||||
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
|
||||
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
pkg_name=${pkg_name}
|
||||
|
|
|
@ -273,7 +273,7 @@ void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo);
|
|||
|
||||
int tscGetSTableVgroupInfo(SSqlObj* pSql, SQueryInfo* pQueryInfo);
|
||||
int tscGetTableMeta(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo);
|
||||
int tscGetTableMetaEx(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, bool createIfNotExists);
|
||||
int tscGetTableMetaEx(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo, bool createIfNotExists, bool onlyLocal);
|
||||
int32_t tscGetUdfFromNode(SSqlObj *pSql, SQueryInfo* pQueryInfo);
|
||||
|
||||
void tscResetForNextRetrieve(SSqlRes* pRes);
|
||||
|
@ -344,6 +344,7 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta* pChild, const char* name, v
|
|||
STableMeta* tscTableMetaDup(STableMeta* pTableMeta);
|
||||
SVgroupsInfo* tscVgroupsInfoDup(SVgroupsInfo* pVgroupsInfo);
|
||||
|
||||
int32_t tscGetColFilterSerializeLen(SQueryInfo* pQueryInfo);
|
||||
int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr);
|
||||
void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pOperator, char* sql, void* addr, int32_t stage, uint64_t qId);
|
||||
|
||||
|
|
|
@ -1482,7 +1482,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
|
|||
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
||||
}
|
||||
|
||||
code = tscGetTableMetaEx(pSql, pTableMetaInfo, true);
|
||||
code = tscGetTableMetaEx(pSql, pTableMetaInfo, true, false);
|
||||
if (TSDB_CODE_TSC_ACTION_IN_PROGRESS == code) {
|
||||
return code;
|
||||
}
|
||||
|
@ -1493,7 +1493,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
|
|||
}
|
||||
|
||||
sql = sToken.z;
|
||||
code = tscGetTableMetaEx(pSql, pTableMetaInfo, false);
|
||||
code = tscGetTableMetaEx(pSql, pTableMetaInfo, false, false);
|
||||
if (pInsertParam->sql == NULL) {
|
||||
assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS);
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1640,7 +1640,7 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
|
|||
|
||||
memcpy(&pTableMetaInfo->name, &fullname, sizeof(fullname));
|
||||
|
||||
code = tscGetTableMeta(pSql, pTableMetaInfo);
|
||||
code = tscGetTableMetaEx(pSql, pTableMetaInfo, false, true);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
STMT_RET(code);
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include "os.h"
|
||||
#include "tscLog.h"
|
||||
#include "tsclient.h"
|
||||
#include "tsocket.h"
|
||||
#include "ttimer.h"
|
||||
#include "tutil.h"
|
||||
#include "taosmsg.h"
|
||||
|
@ -252,6 +253,16 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
|
|||
//pQdesc->useconds = htobe64(pSql->res.useconds);
|
||||
pQdesc->useconds = htobe64(now - pSql->stime);
|
||||
pQdesc->qId = htobe64(pSql->res.qId);
|
||||
pQdesc->sqlObjId = htobe64(pSql->self);
|
||||
pQdesc->pid = pHeartbeat->pid;
|
||||
if (pSql->cmd.pQueryInfo->stableQuery == true) {
|
||||
pQdesc->numOfSub = pSql->subState.numOfSub;
|
||||
} else {
|
||||
pQdesc->numOfSub = 1;
|
||||
}
|
||||
pQdesc->numOfSub = htonl(pQdesc->numOfSub);
|
||||
|
||||
taosGetFqdn(pQdesc->fqdn);
|
||||
|
||||
pHeartbeat->numOfQueries++;
|
||||
pQdesc++;
|
||||
|
|
|
@ -1947,10 +1947,11 @@ bool isValidDistinctSql(SQueryInfo* pQueryInfo) {
|
|||
if (pQueryInfo == NULL) {
|
||||
return false;
|
||||
}
|
||||
if ((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_QUERY) != TSDB_QUERY_TYPE_STABLE_QUERY) {
|
||||
if ((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_QUERY) != TSDB_QUERY_TYPE_STABLE_QUERY
|
||||
&& (pQueryInfo->type & TSDB_QUERY_TYPE_TABLE_QUERY) != TSDB_QUERY_TYPE_TABLE_QUERY) {
|
||||
return false;
|
||||
}
|
||||
if (tscQueryTags(pQueryInfo) && tscNumOfExprs(pQueryInfo) == 1){
|
||||
if (tscNumOfExprs(pQueryInfo) == 1){
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -2046,7 +2047,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
|
|||
const char* msg1 = "too many items in selection clause";
|
||||
const char* msg2 = "functions or others can not be mixed up";
|
||||
const char* msg3 = "not support query expression";
|
||||
const char* msg4 = "only support distinct one tag";
|
||||
const char* msg4 = "only support distinct one column or tag";
|
||||
const char* msg5 = "invalid function name";
|
||||
|
||||
// too many result columns not support order by in query
|
||||
|
@ -2106,13 +2107,13 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
|
|||
}
|
||||
|
||||
if (hasDistinct == true) {
|
||||
if (!isValidDistinctSql(pQueryInfo)) {
|
||||
if (!isValidDistinctSql(pQueryInfo) ) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
||||
}
|
||||
|
||||
pQueryInfo->distinctTag = true;
|
||||
pQueryInfo->distinct = true;
|
||||
}
|
||||
|
||||
|
||||
// there is only one user-defined column in the final result field, add the timestamp column.
|
||||
size_t numOfSrcCols = taosArrayGetSize(pQueryInfo->colList);
|
||||
if ((numOfSrcCols <= 0 || !hasNoneUserDefineExpr(pQueryInfo)) && !tscQueryTags(pQueryInfo) && !tscQueryBlockInfo(pQueryInfo)) {
|
||||
|
@ -3976,8 +3977,10 @@ static int32_t getTablenameCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr*
|
|||
|
||||
static int32_t getColumnQueryCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, int32_t relOptr) {
|
||||
if (pExpr == NULL) {
|
||||
pQueryInfo->onlyHasTagCond &= true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
pQueryInfo->onlyHasTagCond &= false;
|
||||
|
||||
if (!tSqlExprIsParentOfLeaf(pExpr)) { // internal node
|
||||
int32_t ret = getColumnQueryCondInfo(pCmd, pQueryInfo, pExpr->pLeft, pExpr->tokenId);
|
||||
|
@ -4104,6 +4107,7 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS
|
|||
|
||||
static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr) {
|
||||
if (pExpr == NULL) {
|
||||
pQueryInfo->onlyHasTagCond &= true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -4783,8 +4787,11 @@ static int32_t getTimeRangeFromExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlE
|
|||
int32_t code = 0;
|
||||
|
||||
if (pExpr == NULL) {
|
||||
pQueryInfo->onlyHasTagCond &= true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
pQueryInfo->onlyHasTagCond &= false;
|
||||
|
||||
|
||||
if (!tSqlExprIsParentOfLeaf(pExpr)) {
|
||||
if (pExpr->tokenId == TK_OR) {
|
||||
|
@ -4833,11 +4840,13 @@ static int32_t validateJoinExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr
|
|||
|
||||
if (!QUERY_IS_JOIN_QUERY(pQueryInfo->type)) {
|
||||
if (pQueryInfo->numOfTables == 1) {
|
||||
pQueryInfo->onlyHasTagCond &= true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
}
|
||||
}
|
||||
pQueryInfo->onlyHasTagCond &= false;
|
||||
|
||||
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // for stable join, tag columns
|
||||
|
@ -5184,6 +5193,7 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq
|
|||
return ret;
|
||||
}
|
||||
|
||||
|
||||
// 3. get the tag query condition
|
||||
if ((ret = getTagQueryCondExpr(&pSql->cmd, pQueryInfo, &condExpr, pExpr)) != TSDB_CODE_SUCCESS) {
|
||||
return ret;
|
||||
|
@ -5492,7 +5502,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
|
|||
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
|
||||
|
||||
if (pQueryInfo->distinctTag == true) {
|
||||
if (pQueryInfo->distinct == true) {
|
||||
pQueryInfo->order.order = TSDB_ORDER_ASC;
|
||||
pQueryInfo->order.orderColId = 0;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -6020,7 +6030,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
int16_t i;
|
||||
uint32_t nLen = 0;
|
||||
for (i = 0; i < numOfColumns; ++i) {
|
||||
nLen += pSchema[i].colId != columnIndex.columnIndex ? pSchema[i].bytes : pItem->bytes;
|
||||
nLen += (i != columnIndex.columnIndex) ? pSchema[i].bytes : pItem->bytes;
|
||||
}
|
||||
if (nLen >= TSDB_MAX_BYTES_PER_ROW) {
|
||||
return invalidOperationMsg(pMsg, msg24);
|
||||
|
@ -6066,14 +6076,14 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
return invalidOperationMsg(pMsg, msg22);
|
||||
}
|
||||
|
||||
SSchema* pSchema = (SSchema*) pTableMetaInfo->pTableMeta->schema;
|
||||
int16_t numOfColumns = pTableMetaInfo->pTableMeta->tableInfo.numOfColumns;
|
||||
SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
|
||||
int16_t numOfTags = tscGetNumOfTags(pTableMetaInfo->pTableMeta);
|
||||
int16_t i;
|
||||
uint32_t nLen = 0;
|
||||
for (i = 0; i < numOfColumns; ++i) {
|
||||
nLen += pSchema[i].colId != columnIndex.columnIndex ? pSchema[i].bytes : pItem->bytes;
|
||||
for (i = 0; i < numOfTags; ++i) {
|
||||
nLen += (i != columnIndex.columnIndex) ? pSchema[i].bytes : pItem->bytes;
|
||||
}
|
||||
if (nLen >= TSDB_MAX_BYTES_PER_ROW) {
|
||||
if (nLen >= TSDB_MAX_TAGS_LEN) {
|
||||
return invalidOperationMsg(pMsg, msg24);
|
||||
}
|
||||
|
||||
|
@ -8565,7 +8575,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
|||
if (validateGroupbyNode(pQueryInfo, pSqlNode->pGroupby, pCmd) != TSDB_CODE_SUCCESS) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
pQueryInfo->onlyHasTagCond = true;
|
||||
// set where info
|
||||
if (pSqlNode->pWhere != NULL) {
|
||||
if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) {
|
||||
|
@ -8588,6 +8598,10 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
|||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
if (isSTable && tscQueryTags(pQueryInfo) && pQueryInfo->distinct && !pQueryInfo->onlyHasTagCond) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
// parse the window_state
|
||||
if (validateStateWindowNode(pCmd, pQueryInfo, pSqlNode, isSTable) != TSDB_CODE_SUCCESS) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
|
|
|
@ -632,7 +632,7 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql) {
|
|||
SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
|
||||
|
||||
int32_t srcColListSize = (int32_t)(taosArrayGetSize(pQueryInfo->colList) * sizeof(SColumnInfo));
|
||||
|
||||
int32_t srcColFilterSize = tscGetColFilterSerializeLen(pQueryInfo);
|
||||
size_t numOfExprs = tscNumOfExprs(pQueryInfo);
|
||||
int32_t exprSize = (int32_t)(sizeof(SSqlExpr) * numOfExprs * 2);
|
||||
|
||||
|
@ -653,7 +653,7 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql) {
|
|||
tableSerialize = totalTables * sizeof(STableIdInfo);
|
||||
}
|
||||
|
||||
return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + exprSize + tsBufSize +
|
||||
return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + srcColFilterSize + exprSize + tsBufSize +
|
||||
tableSerialize + sqlLen + 4096 + pQueryInfo->bufLen;
|
||||
}
|
||||
|
||||
|
@ -2776,7 +2776,7 @@ int32_t getMultiTableMetaFromMnode(SSqlObj *pSql, SArray* pNameList, SArray* pVg
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool autocreate) {
|
||||
int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool autocreate, bool onlyLocal) {
|
||||
assert(tIsValidName(&pTableMetaInfo->name));
|
||||
|
||||
uint32_t size = tscGetTableMetaMaxSize();
|
||||
|
@ -2822,15 +2822,20 @@ int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool
|
|||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (onlyLocal) {
|
||||
return TSDB_CODE_TSC_NO_META_CACHED;
|
||||
}
|
||||
|
||||
return getTableMetaFromMnode(pSql, pTableMetaInfo, autocreate);
|
||||
}
|
||||
|
||||
int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
|
||||
return tscGetTableMetaImpl(pSql, pTableMetaInfo, false);
|
||||
return tscGetTableMetaImpl(pSql, pTableMetaInfo, false, false);
|
||||
}
|
||||
|
||||
int tscGetTableMetaEx(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo, bool createIfNotExists) {
|
||||
return tscGetTableMetaImpl(pSql, pTableMetaInfo, createIfNotExists);
|
||||
int tscGetTableMetaEx(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo, bool createIfNotExists, bool onlyLocal) {
|
||||
return tscGetTableMetaImpl(pSql, pTableMetaInfo, createIfNotExists, onlyLocal);
|
||||
}
|
||||
|
||||
int32_t tscGetUdfFromNode(SSqlObj *pSql, SQueryInfo* pQueryInfo) {
|
||||
|
|
|
@ -2892,7 +2892,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
|
|||
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" retrieve numOfRows:%d totalNumOfRows:%" PRIu64 " from ep:%s, orderOfSub:%d",
|
||||
pParentSql->self, pSql->self, pRes->numOfRows, pState->numOfRetrievedRows, pSql->epSet.fqdn[pSql->epSet.inUse], idx);
|
||||
|
||||
if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0) && !(tscGetQueryInfo(&pParentSql->cmd)->distinctTag)) {
|
||||
if (num > tsMaxNumOfOrderedResults && /*tscIsProjectionQueryOnSTable(pQueryInfo, 0) &&*/ !(tscGetQueryInfo(&pParentSql->cmd)->distinct)) {
|
||||
tscError("0x%"PRIx64" sub:0x%"PRIx64" num of OrderedRes is too many, max allowed:%" PRId32 " , current:%" PRId64,
|
||||
pParentSql->self, pSql->self, tsMaxNumOfOrderedResults, num);
|
||||
tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_SORTED_RES_TOO_MANY);
|
||||
|
|
|
@ -4580,6 +4580,22 @@ static int32_t createTagColumnInfo(SQueryAttr* pQueryAttr, SQueryInfo* pQueryInf
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t tscGetColFilterSerializeLen(SQueryInfo* pQueryInfo) {
|
||||
int16_t numOfCols = (int16_t)taosArrayGetSize(pQueryInfo->colList);
|
||||
int32_t len = 0;
|
||||
|
||||
for(int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumn* pCol = taosArrayGetP(pQueryInfo->colList, i);
|
||||
for (int32_t j = 0; j < pCol->info.flist.numOfFilters; ++j) {
|
||||
len += sizeof(SColumnFilterInfo);
|
||||
if (pCol->info.flist.filterInfo[j].filterstr) {
|
||||
len += (int32_t)pCol->info.flist.filterInfo[j].len + 1 * TSDB_NCHAR_SIZE;
|
||||
}
|
||||
}
|
||||
}
|
||||
return len;
|
||||
}
|
||||
|
||||
int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr) {
|
||||
memset(pQueryAttr, 0, sizeof(SQueryAttr));
|
||||
|
||||
|
@ -4598,7 +4614,7 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt
|
|||
pQueryAttr->queryBlockDist = isBlockDistQuery(pQueryInfo);
|
||||
pQueryAttr->pointInterpQuery = tscIsPointInterpQuery(pQueryInfo);
|
||||
pQueryAttr->timeWindowInterpo = timeWindowInterpoRequired(pQueryInfo);
|
||||
pQueryAttr->distinctTag = pQueryInfo->distinctTag;
|
||||
pQueryAttr->distinct = pQueryInfo->distinct;
|
||||
pQueryAttr->sw = pQueryInfo->sessionWindow;
|
||||
pQueryAttr->stateWindow = pQueryInfo->stateWindow;
|
||||
|
||||
|
|
|
@ -210,8 +210,8 @@ extern int32_t debugFlag;
|
|||
extern char lossyColumns[];
|
||||
extern double fPrecision;
|
||||
extern double dPrecision;
|
||||
extern uint32_t maxIntervals;
|
||||
extern uint32_t intervals;
|
||||
extern uint32_t maxRange;
|
||||
extern uint32_t curRange;
|
||||
extern char Compressor[];
|
||||
#endif
|
||||
|
||||
|
|
|
@ -252,8 +252,8 @@ char lossyColumns[32] = ""; // "float|double" means all float and double column
|
|||
// below option can take effect when tsLossyColumns not empty
|
||||
double fPrecision = 1E-8; // float column precision
|
||||
double dPrecision = 1E-16; // double column precision
|
||||
uint32_t maxIntervals = 500; // max intervals
|
||||
uint32_t intervals = 100; // intervals
|
||||
uint32_t maxRange = 500; // max range
|
||||
uint32_t curRange = 100; // range
|
||||
char Compressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR
|
||||
#endif
|
||||
|
||||
|
@ -1565,8 +1565,8 @@ static void doInitGlobalConfig(void) {
|
|||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||
taosInitConfigOption(cfg);
|
||||
|
||||
cfg.option = "maxIntervals";
|
||||
cfg.ptr = &maxIntervals;
|
||||
cfg.option = "maxRange";
|
||||
cfg.ptr = &maxRange;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
|
||||
cfg.minValue = 0;
|
||||
|
@ -1575,8 +1575,8 @@ static void doInitGlobalConfig(void) {
|
|||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||
taosInitConfigOption(cfg);
|
||||
|
||||
cfg.option = "intervals";
|
||||
cfg.ptr = &intervals;
|
||||
cfg.option = "range";
|
||||
cfg.ptr = &curRange;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
|
||||
cfg.minValue = 0;
|
||||
|
|
|
@ -38,7 +38,11 @@ const int32_t TYPE_BYTES[15] = {
|
|||
|
||||
#define DO_STATICS(__sum, __min, __max, __minIndex, __maxIndex, _list, _index) \
|
||||
do { \
|
||||
if (_list[(_index)] >= (INT64_MAX - (__sum))) { \
|
||||
__sum = INT64_MAX; \
|
||||
} else { \
|
||||
(__sum) += (_list)[(_index)]; \
|
||||
} \
|
||||
if ((__min) > (_list)[(_index)]) { \
|
||||
(__min) = (_list)[(_index)]; \
|
||||
(__minIndex) = (_index); \
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 3530c6df097134a410bacec6b3cd013ef38a61aa
|
||||
Subproject commit 4a4d79099b076b8ff12d5b4fdbcba54049a6866d
|
|
@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
|
|||
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
|
||||
POST_BUILD
|
||||
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-*-dist.jar ${LIBRARY_OUTPUT_PATH}
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.34-dist.jar ${LIBRARY_OUTPUT_PATH}
|
||||
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
||||
COMMENT "build jdbc driver")
|
||||
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>2.0.32</version>
|
||||
<version>2.0.34</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<name>JDBCDriver</name>
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>2.0.33</version>
|
||||
<version>2.0.34</version>
|
||||
<packaging>jar</packaging>
|
||||
<name>JDBCDriver</name>
|
||||
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
|
||||
|
|
|
@ -130,7 +130,7 @@ public abstract class TSDBConstants {
|
|||
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
|
||||
return Types.NCHAR;
|
||||
}
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE);
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE);
|
||||
}
|
||||
|
||||
public static String taosType2JdbcTypeName(int taosType) throws SQLException {
|
||||
|
@ -160,7 +160,7 @@ public abstract class TSDBConstants {
|
|||
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
|
||||
return "NCHAR";
|
||||
default:
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE);
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -31,8 +31,8 @@ public class TSDBError {
|
|||
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_URL_NOT_SET, "url is not set");
|
||||
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_INVALID_SQL, "invalid sql");
|
||||
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE, "numeric value out of range");
|
||||
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE, "unknown taos type in tdengine");
|
||||
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PERCISION, "unknown timestamp precision");
|
||||
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE, "unknown taos type in tdengine");
|
||||
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PRECISION, "unknown timestamp precision");
|
||||
|
||||
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN, "unknown error");
|
||||
|
||||
|
|
|
@ -25,8 +25,10 @@ public class TSDBErrorNumbers {
|
|||
public static final int ERROR_URL_NOT_SET = 0x2312; // url is not set
|
||||
public static final int ERROR_INVALID_SQL = 0x2313; // invalid sql
|
||||
public static final int ERROR_NUMERIC_VALUE_OUT_OF_RANGE = 0x2314; // numeric value out of range
|
||||
public static final int ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE = 0x2315; //unknown taos type in tdengine
|
||||
public static final int ERROR_UNKNOWN_TIMESTAMP_PERCISION = 0x2316; // unknown timestamp precision
|
||||
public static final int ERROR_UNKNOWN_TAOS_TYPE = 0x2315; //unknown taos type in tdengine
|
||||
public static final int ERROR_UNKNOWN_TIMESTAMP_PRECISION = 0x2316; // unknown timestamp precision
|
||||
public static final int ERROR_RESTFul_Client_Protocol_Exception = 0x2317;
|
||||
public static final int ERROR_RESTFul_Client_IOException = 0x2318;
|
||||
|
||||
public static final int ERROR_UNKNOWN = 0x2350; //unknown error
|
||||
|
||||
|
@ -62,8 +64,11 @@ public class TSDBErrorNumbers {
|
|||
errorNumbers.add(ERROR_URL_NOT_SET);
|
||||
errorNumbers.add(ERROR_INVALID_SQL);
|
||||
errorNumbers.add(ERROR_NUMERIC_VALUE_OUT_OF_RANGE);
|
||||
errorNumbers.add(ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE);
|
||||
errorNumbers.add(ERROR_UNKNOWN_TIMESTAMP_PERCISION);
|
||||
errorNumbers.add(ERROR_UNKNOWN_TAOS_TYPE);
|
||||
errorNumbers.add(ERROR_UNKNOWN_TIMESTAMP_PRECISION);
|
||||
errorNumbers.add(ERROR_RESTFul_Client_IOException);
|
||||
|
||||
errorNumbers.add(ERROR_RESTFul_Client_Protocol_Exception);
|
||||
|
||||
errorNumbers.add(ERROR_SUBSCRIBE_FAILED);
|
||||
errorNumbers.add(ERROR_UNSUPPORTED_ENCODING);
|
||||
|
|
|
@ -213,7 +213,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
|||
long nanoAdjustment = Integer.parseInt(value.substring(20));
|
||||
return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
|
||||
}
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PERCISION);
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PRECISION);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,15 +1,18 @@
|
|||
package com.taosdata.jdbc.utils;
|
||||
|
||||
import com.taosdata.jdbc.TSDBError;
|
||||
import com.taosdata.jdbc.TSDBErrorNumbers;
|
||||
import org.apache.http.HeaderElement;
|
||||
import org.apache.http.HeaderElementIterator;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.client.ClientProtocolException;
|
||||
import org.apache.http.client.HttpRequestRetryHandler;
|
||||
import org.apache.http.client.config.RequestConfig;
|
||||
import org.apache.http.client.methods.*;
|
||||
import org.apache.http.client.protocol.HttpClientContext;
|
||||
import org.apache.http.conn.ConnectionKeepAliveStrategy;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.impl.client.CloseableHttpClient;
|
||||
import org.apache.http.impl.client.DefaultHttpRequestRetryHandler;
|
||||
import org.apache.http.impl.client.HttpClients;
|
||||
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
|
||||
import org.apache.http.message.BasicHeaderElementIterator;
|
||||
|
@ -17,35 +20,24 @@ import org.apache.http.protocol.HTTP;
|
|||
import org.apache.http.protocol.HttpContext;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
|
||||
import javax.net.ssl.SSLException;
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.net.UnknownHostException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
import java.sql.SQLException;
|
||||
|
||||
public class HttpClientPoolUtil {
|
||||
|
||||
private static final String DEFAULT_CONTENT_TYPE = "application/json";
|
||||
private static final int DEFAULT_MAX_TOTAL = 200;
|
||||
private static final int DEFAULT_MAX_PER_ROUTE = 20;
|
||||
private static final int DEFAULT_TIME_OUT = 15000;
|
||||
private static final int DEFAULT_MAX_PER_ROUTE = 32;
|
||||
private static final int DEFAULT_MAX_TOTAL = 1000;
|
||||
private static final int DEFAULT_HTTP_KEEP_TIME = 15000;
|
||||
|
||||
private static CloseableHttpClient httpClient;
|
||||
|
||||
private static synchronized void initPools() {
|
||||
if (httpClient == null) {
|
||||
PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager();
|
||||
connectionManager.setDefaultMaxPerRoute(DEFAULT_MAX_PER_ROUTE);
|
||||
connectionManager.setMaxTotal(DEFAULT_MAX_TOTAL);
|
||||
httpClient = HttpClients.custom()
|
||||
.setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY)
|
||||
.setConnectionManager(connectionManager)
|
||||
.setRetryHandler(new DefaultHttpRequestRetryHandler(3, true))
|
||||
.build();
|
||||
}
|
||||
}
|
||||
private static final int DEFAULT_MAX_RETRY_COUNT = 5;
|
||||
|
||||
private static final ConnectionKeepAliveStrategy DEFAULT_KEEP_ALIVE_STRATEGY = (response, context) -> {
|
||||
HeaderElementIterator it = new BasicHeaderElementIterator(response.headerIterator(HTTP.CONN_KEEP_ALIVE));
|
||||
int keepTime = DEFAULT_HTTP_KEEP_TIME * 1000;
|
||||
while (it.hasNext()) {
|
||||
HeaderElement headerElement = it.nextElement();
|
||||
String param = headerElement.getName();
|
||||
|
@ -53,34 +45,73 @@ public class HttpClientPoolUtil {
|
|||
if (value != null && param.equalsIgnoreCase("timeout")) {
|
||||
try {
|
||||
return Long.parseLong(value) * 1000;
|
||||
} catch (Exception e) {
|
||||
new Exception("format KeepAlive timeout exception, exception:" + e.toString()).printStackTrace();
|
||||
} catch (NumberFormatException ignore) {
|
||||
}
|
||||
}
|
||||
}
|
||||
return keepTime;
|
||||
return DEFAULT_HTTP_KEEP_TIME * 1000;
|
||||
};
|
||||
|
||||
/**
|
||||
* 执行http post请求
|
||||
* 默认采用Content-Type:application/json,Accept:application/json
|
||||
*
|
||||
* @param uri 请求地址
|
||||
* @param data 请求数据
|
||||
* @return responseBody
|
||||
*/
|
||||
public static String execute(String uri, String data, String token) {
|
||||
long startTime = System.currentTimeMillis();
|
||||
private static final HttpRequestRetryHandler retryHandler = (exception, executionCount, httpContext) -> {
|
||||
if (executionCount >= DEFAULT_MAX_RETRY_COUNT)
|
||||
// do not retry if over max retry count
|
||||
return false;
|
||||
if (exception instanceof InterruptedIOException)
|
||||
// timeout
|
||||
return false;
|
||||
if (exception instanceof UnknownHostException)
|
||||
// unknown host
|
||||
return false;
|
||||
if (exception instanceof SSLException)
|
||||
// SSL handshake exception
|
||||
return false;
|
||||
return true;
|
||||
};
|
||||
|
||||
private static CloseableHttpClient httpClient;
|
||||
|
||||
static {
|
||||
PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager();
|
||||
connectionManager.setMaxTotal(DEFAULT_MAX_TOTAL);
|
||||
connectionManager.setDefaultMaxPerRoute(DEFAULT_MAX_PER_ROUTE);
|
||||
httpClient = HttpClients.custom().setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY).setConnectionManager(connectionManager).setRetryHandler(retryHandler).build();
|
||||
}
|
||||
|
||||
/*** execute GET request ***/
|
||||
public static String execute(String uri) throws SQLException {
|
||||
HttpEntity httpEntity = null;
|
||||
HttpEntityEnclosingRequestBase method = null;
|
||||
String responseBody = "";
|
||||
try {
|
||||
if (httpClient == null) {
|
||||
initPools();
|
||||
HttpRequestBase method = getRequest(uri, HttpGet.METHOD_NAME);
|
||||
HttpContext context = HttpClientContext.create();
|
||||
CloseableHttpResponse httpResponse = httpClient.execute(method, context);
|
||||
httpEntity = httpResponse.getEntity();
|
||||
if (httpEntity != null) {
|
||||
responseBody = EntityUtils.toString(httpEntity, StandardCharsets.UTF_8);
|
||||
}
|
||||
method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME, DEFAULT_CONTENT_TYPE, 0);
|
||||
method.setHeader("Content-Type", "text/plain");
|
||||
method.setHeader("Connection", "keep-alive");
|
||||
} catch (ClientProtocolException e) {
|
||||
e.printStackTrace();
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_Protocol_Exception, e.getMessage());
|
||||
} catch (IOException exception) {
|
||||
exception.printStackTrace();
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_IOException, exception.getMessage());
|
||||
} finally {
|
||||
if (httpEntity != null) {
|
||||
EntityUtils.consumeQuietly(httpEntity);
|
||||
}
|
||||
}
|
||||
return responseBody;
|
||||
}
|
||||
|
||||
|
||||
/*** execute POST request ***/
|
||||
public static String execute(String uri, String data, String token) throws SQLException {
|
||||
HttpEntity httpEntity = null;
|
||||
String responseBody = "";
|
||||
try {
|
||||
HttpEntityEnclosingRequestBase method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME);
|
||||
method.setHeader(HTTP.CONTENT_TYPE, "text/plain");
|
||||
method.setHeader(HTTP.CONN_DIRECTIVE, HTTP.CONN_KEEP_ALIVE);
|
||||
method.setHeader("Authorization", "Taosd " + token);
|
||||
|
||||
method.setEntity(new StringEntity(data, StandardCharsets.UTF_8));
|
||||
|
@ -88,46 +119,31 @@ public class HttpClientPoolUtil {
|
|||
CloseableHttpResponse httpResponse = httpClient.execute(method, context);
|
||||
httpEntity = httpResponse.getEntity();
|
||||
if (httpEntity != null) {
|
||||
responseBody = EntityUtils.toString(httpEntity, "UTF-8");
|
||||
responseBody = EntityUtils.toString(httpEntity, StandardCharsets.UTF_8);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
if (method != null) {
|
||||
method.abort();
|
||||
}
|
||||
new Exception("execute post request exception, url:" + uri + ", exception:" + e.toString() + ", cost time(ms):" + (System.currentTimeMillis() - startTime)).printStackTrace();
|
||||
} catch (ClientProtocolException e) {
|
||||
e.printStackTrace();
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_Protocol_Exception, e.getMessage());
|
||||
} catch (IOException exception) {
|
||||
exception.printStackTrace();
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_IOException, exception.getMessage());
|
||||
} finally {
|
||||
if (httpEntity != null) {
|
||||
try {
|
||||
EntityUtils.consumeQuietly(httpEntity);
|
||||
} catch (Exception e) {
|
||||
new Exception("close response exception, url:" + uri + ", exception:" + e.toString() + ", cost time(ms):" + (System.currentTimeMillis() - startTime)).printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
||||
return responseBody;
|
||||
}
|
||||
|
||||
/**
|
||||
* * 创建请求
|
||||
*
|
||||
* @param uri 请求url
|
||||
* @param methodName 请求的方法类型
|
||||
* @param contentType contentType类型
|
||||
* @param timeout 超时时间
|
||||
* @return HttpRequestBase 返回类型
|
||||
* @author lisc
|
||||
*/
|
||||
private static HttpRequestBase getRequest(String uri, String methodName, String contentType, int timeout) {
|
||||
if (httpClient == null) {
|
||||
initPools();
|
||||
}
|
||||
/*** create http request ***/
|
||||
private static HttpRequestBase getRequest(String uri, String methodName) {
|
||||
HttpRequestBase method;
|
||||
if (timeout <= 0) {
|
||||
timeout = DEFAULT_TIME_OUT;
|
||||
}
|
||||
RequestConfig requestConfig = RequestConfig.custom().setSocketTimeout(timeout * 1000)
|
||||
.setConnectTimeout(timeout * 1000).setConnectionRequestTimeout(timeout * 1000)
|
||||
.setExpectContinueEnabled(false).build();
|
||||
RequestConfig requestConfig = RequestConfig.custom()
|
||||
.setSocketTimeout(DEFAULT_TIME_OUT * 1000)
|
||||
.setConnectTimeout(DEFAULT_TIME_OUT * 1000)
|
||||
.setConnectionRequestTimeout(DEFAULT_TIME_OUT * 1000)
|
||||
.setExpectContinueEnabled(false)
|
||||
.build();
|
||||
if (HttpPut.METHOD_NAME.equalsIgnoreCase(methodName)) {
|
||||
method = new HttpPut(uri);
|
||||
} else if (HttpPost.METHOD_NAME.equalsIgnoreCase(methodName)) {
|
||||
|
@ -137,52 +153,10 @@ public class HttpClientPoolUtil {
|
|||
} else {
|
||||
method = new HttpPost(uri);
|
||||
}
|
||||
|
||||
if (contentType == null || contentType.isEmpty() || contentType.replaceAll("\\s", "").isEmpty()) {
|
||||
contentType = DEFAULT_CONTENT_TYPE;
|
||||
}
|
||||
method.addHeader("Content-Type", contentType);
|
||||
method.addHeader("Accept", contentType);
|
||||
method.addHeader(HTTP.CONTENT_TYPE, DEFAULT_CONTENT_TYPE);
|
||||
method.addHeader("Accept", DEFAULT_CONTENT_TYPE);
|
||||
method.setConfig(requestConfig);
|
||||
return method;
|
||||
}
|
||||
|
||||
/**
|
||||
* 执行GET 请求
|
||||
*
|
||||
* @param uri 网址
|
||||
* @return responseBody
|
||||
*/
|
||||
public static String execute(String uri) {
|
||||
long startTime = System.currentTimeMillis();
|
||||
HttpEntity httpEntity = null;
|
||||
HttpRequestBase method = null;
|
||||
String responseBody = "";
|
||||
try {
|
||||
if (httpClient == null) {
|
||||
initPools();
|
||||
}
|
||||
method = getRequest(uri, HttpGet.METHOD_NAME, DEFAULT_CONTENT_TYPE, 0);
|
||||
HttpContext context = HttpClientContext.create();
|
||||
CloseableHttpResponse httpResponse = httpClient.execute(method, context);
|
||||
httpEntity = httpResponse.getEntity();
|
||||
if (httpEntity != null) {
|
||||
responseBody = EntityUtils.toString(httpEntity, "UTF-8");
|
||||
}
|
||||
} catch (Exception e) {
|
||||
if (method != null) {
|
||||
method.abort();
|
||||
}
|
||||
e.printStackTrace();
|
||||
} finally {
|
||||
if (httpEntity != null) {
|
||||
try {
|
||||
EntityUtils.consumeQuietly(httpEntity);
|
||||
} catch (Exception e) {
|
||||
new Exception("close response exception, url:" + uri + ", exception:" + e.toString() + ",cost time(ms):" + (System.currentTimeMillis() - startTime)).printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
||||
return responseBody;
|
||||
}
|
||||
}
|
|
@ -40,8 +40,9 @@
|
|||
#include "dnodeShell.h"
|
||||
#include "dnodeTelemetry.h"
|
||||
#include "module.h"
|
||||
#include "qScript.h"
|
||||
#include "mnode.h"
|
||||
#include "qScript.h"
|
||||
#include "tcache.h"
|
||||
#include "tscompression.h"
|
||||
|
||||
#if !defined(_MODULE) || !defined(_TD_LINUX)
|
||||
|
@ -208,6 +209,7 @@ void dnodeCleanUpSystem() {
|
|||
dnodeCleanupComponents();
|
||||
taos_cleanup();
|
||||
taosCloseLog();
|
||||
taosStopCacheRefreshWorker();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -102,6 +102,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_TSC_EXCEED_SQL_LIMIT TAOS_DEF_ERROR_CODE(0, 0x0219) //"SQL statement too long check maxSQLLength config")
|
||||
#define TSDB_CODE_TSC_FILE_EMPTY TAOS_DEF_ERROR_CODE(0, 0x021A) //"File is empty")
|
||||
#define TSDB_CODE_TSC_LINE_SYNTAX_ERROR TAOS_DEF_ERROR_CODE(0, 0x021B) //"Syntax error in Line")
|
||||
#define TSDB_CODE_TSC_NO_META_CACHED TAOS_DEF_ERROR_CODE(0, 0x021C) //"No table meta cached")
|
||||
|
||||
// mnode
|
||||
#define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) //"Message not processed")
|
||||
|
|
|
@ -874,6 +874,10 @@ typedef struct {
|
|||
int64_t useconds;
|
||||
int64_t stime;
|
||||
uint64_t qId;
|
||||
uint64_t sqlObjId;
|
||||
int32_t pid;
|
||||
char fqdn[TSDB_FQDN_LEN];
|
||||
int32_t numOfSub;
|
||||
} SQueryDesc;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -138,6 +138,8 @@ typedef struct {
|
|||
#define IS_VALID_USMALLINT(_t) ((_t) >= 0 && (_t) < UINT16_MAX)
|
||||
#define IS_VALID_UINT(_t) ((_t) >= 0 && (_t) < UINT32_MAX)
|
||||
#define IS_VALID_UBIGINT(_t) ((_t) >= 0 && (_t) < UINT64_MAX)
|
||||
#define IS_VALID_FLOAT(_t) ((_t) >= -FLT_MAX && (_t) <= FLT_MAX)
|
||||
#define IS_VALID_DOUBLE(_t) ((_t) >= -DBL_MAX && (_t) <= DBL_MAX)
|
||||
|
||||
static FORCE_INLINE bool isNull(const char *val, int32_t type) {
|
||||
switch (type) {
|
||||
|
|
|
@ -35,6 +35,8 @@ struct Command {
|
|||
};
|
||||
|
||||
extern void backspaceChar(Command *cmd);
|
||||
extern void clearLineBefore(Command *cmd);
|
||||
extern void clearLineAfter(Command *cmd);
|
||||
extern void deleteChar(Command *cmd);
|
||||
extern void moveCursorLeft(Command *cmd);
|
||||
extern void moveCursorRight(Command *cmd);
|
||||
|
|
|
@ -102,6 +102,28 @@ void backspaceChar(Command *cmd) {
|
|||
}
|
||||
}
|
||||
|
||||
void clearLineBefore(Command *cmd) {
|
||||
assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset);
|
||||
|
||||
clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size);
|
||||
memmove(cmd->command, cmd->command + cmd->cursorOffset,
|
||||
cmd->commandSize - cmd->cursorOffset);
|
||||
cmd->commandSize -= cmd->cursorOffset;
|
||||
cmd->cursorOffset = 0;
|
||||
cmd->screenOffset = 0;
|
||||
cmd->endOffset = cmd->commandSize;
|
||||
showOnScreen(cmd);
|
||||
}
|
||||
|
||||
void clearLineAfter(Command *cmd) {
|
||||
assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset);
|
||||
|
||||
clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size);
|
||||
cmd->commandSize -= cmd->endOffset - cmd->cursorOffset;
|
||||
cmd->endOffset = cmd->cursorOffset;
|
||||
showOnScreen(cmd);
|
||||
}
|
||||
|
||||
void deleteChar(Command *cmd) {
|
||||
assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset);
|
||||
|
||||
|
|
|
@ -238,10 +238,16 @@ int32_t shellReadCommand(TAOS *con, char *command) {
|
|||
updateBuffer(&cmd);
|
||||
}
|
||||
break;
|
||||
case 11: // Ctrl + K;
|
||||
clearLineAfter(&cmd);
|
||||
break;
|
||||
case 12: // Ctrl + L;
|
||||
system("clear");
|
||||
showOnScreen(&cmd);
|
||||
break;
|
||||
case 21: // Ctrl + U
|
||||
clearLineBefore(&cmd);
|
||||
break;
|
||||
}
|
||||
} else if (c == '\033') {
|
||||
c = getchar();
|
||||
|
|
|
@ -238,10 +238,16 @@ int32_t shellReadCommand(TAOS *con, char *command) {
|
|||
updateBuffer(&cmd);
|
||||
}
|
||||
break;
|
||||
case 11: // Ctrl + K;
|
||||
clearLineAfter(&cmd);
|
||||
break;
|
||||
case 12: // Ctrl + L;
|
||||
system("clear");
|
||||
showOnScreen(&cmd);
|
||||
break;
|
||||
case 21: // Ctrl + U;
|
||||
clearLineBefore(&cmd);
|
||||
break;
|
||||
}
|
||||
} else if (c == '\033') {
|
||||
c = (char)getchar();
|
||||
|
|
|
@ -66,10 +66,6 @@
|
|||
|
||||
extern char configDir[];
|
||||
|
||||
#define INSERT_JSON_NAME "insert.json"
|
||||
#define QUERY_JSON_NAME "query.json"
|
||||
#define SUBSCRIBE_JSON_NAME "subscribe.json"
|
||||
|
||||
#define STR_INSERT_INTO "INSERT INTO "
|
||||
|
||||
#define MAX_RECORDS_PER_REQ 32766
|
||||
|
@ -79,9 +75,10 @@ extern char configDir[];
|
|||
#define BUFFER_SIZE TSDB_MAX_ALLOWED_SQL_LEN
|
||||
#define COND_BUF_LEN (BUFFER_SIZE - 30)
|
||||
#define COL_BUFFER_LEN ((TSDB_COL_NAME_LEN + 15) * TSDB_MAX_COLUMNS)
|
||||
|
||||
#define MAX_USERNAME_SIZE 64
|
||||
#define MAX_PASSWORD_SIZE 64
|
||||
#define MAX_HOSTNAME_SIZE 64
|
||||
#define MAX_HOSTNAME_SIZE 253 // https://man7.org/linux/man-pages/man7/hostname.7.html
|
||||
#define MAX_TB_NAME_SIZE 64
|
||||
#define MAX_DATA_SIZE (16*TSDB_MAX_COLUMNS)+20 // max record len: 16*MAX_COLUMNS, timestamp string and ,('') need extra space
|
||||
#define OPT_ABORT 1 /* –abort */
|
||||
|
@ -100,6 +97,11 @@ extern char configDir[];
|
|||
#define MAX_DATABASE_COUNT 256
|
||||
#define INPUT_BUF_LEN 256
|
||||
|
||||
#define TBNAME_PREFIX_LEN (TSDB_TABLE_NAME_LEN - 20) // 20 characters reserved for seq
|
||||
#define SMALL_BUFF_LEN 8
|
||||
#define DATATYPE_BUFF_LEN (SMALL_BUFF_LEN*3)
|
||||
#define NOTE_BUFF_LEN (SMALL_BUFF_LEN*16)
|
||||
|
||||
#define DEFAULT_TIMESTAMP_STEP 1
|
||||
|
||||
|
||||
|
@ -225,6 +227,7 @@ typedef struct SArguments_S {
|
|||
uint32_t num_of_CPR;
|
||||
uint32_t num_of_threads;
|
||||
uint64_t insert_interval;
|
||||
uint64_t timestamp_step;
|
||||
int64_t query_times;
|
||||
uint32_t interlace_rows;
|
||||
uint32_t num_of_RPR; // num_of_records_per_req
|
||||
|
@ -243,16 +246,15 @@ typedef struct SArguments_S {
|
|||
|
||||
typedef struct SColumn_S {
|
||||
char field[TSDB_COL_NAME_LEN];
|
||||
char dataType[16];
|
||||
char dataType[DATATYPE_BUFF_LEN];
|
||||
uint32_t dataLen;
|
||||
char note[128];
|
||||
char note[NOTE_BUFF_LEN];
|
||||
} StrColumn;
|
||||
|
||||
typedef struct SSuperTable_S {
|
||||
char sTblName[TSDB_TABLE_NAME_LEN];
|
||||
char dataSource[MAX_TB_NAME_SIZE]; // rand_gen or sample
|
||||
char childTblPrefix[TSDB_TABLE_NAME_LEN - 20]; // 20 characters reserved for seq
|
||||
char insertMode[MAX_TB_NAME_SIZE]; // taosc, rest
|
||||
char dataSource[SMALL_BUFF_LEN]; // rand_gen or sample
|
||||
char childTblPrefix[TBNAME_PREFIX_LEN];
|
||||
uint16_t childTblExists;
|
||||
int64_t childTblCount;
|
||||
uint64_t batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql
|
||||
|
@ -271,7 +273,7 @@ typedef struct SSuperTable_S {
|
|||
int64_t insertRows;
|
||||
int64_t timeStampStep;
|
||||
char startTimestamp[MAX_TB_NAME_SIZE];
|
||||
char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json
|
||||
char sampleFormat[SMALL_BUFF_LEN]; // csv, json
|
||||
char sampleFile[MAX_FILE_NAME_LEN];
|
||||
char tagsFile[MAX_FILE_NAME_LEN];
|
||||
|
||||
|
@ -307,7 +309,7 @@ typedef struct {
|
|||
int16_t replica;
|
||||
int16_t quorum;
|
||||
int16_t days;
|
||||
char keeplist[32];
|
||||
char keeplist[64];
|
||||
int32_t cache; //MB
|
||||
int32_t blocks;
|
||||
int32_t minrows;
|
||||
|
@ -316,7 +318,7 @@ typedef struct {
|
|||
int32_t fsync;
|
||||
int8_t comp;
|
||||
int8_t cachelast;
|
||||
char precision[8]; // time resolution
|
||||
char precision[SMALL_BUFF_LEN]; // time resolution
|
||||
int8_t update;
|
||||
char status[16];
|
||||
} SDbInfo;
|
||||
|
@ -336,7 +338,7 @@ typedef struct SDbCfg_S {
|
|||
int cache;
|
||||
int blocks;
|
||||
int quorum;
|
||||
char precision[8];
|
||||
char precision[SMALL_BUFF_LEN];
|
||||
} SDbCfg;
|
||||
|
||||
typedef struct SDataBase_S {
|
||||
|
@ -402,7 +404,7 @@ typedef struct SuperQueryInfo_S {
|
|||
int subscribeKeepProgress;
|
||||
uint64_t queryTimes;
|
||||
int64_t childTblCount;
|
||||
char childTblPrefix[TSDB_TABLE_NAME_LEN - 20]; // 20 characters reserved for seq
|
||||
char childTblPrefix[TBNAME_PREFIX_LEN]; // 20 characters reserved for seq
|
||||
int sqlCount;
|
||||
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
|
||||
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN];
|
||||
|
@ -422,7 +424,7 @@ typedef struct SQueryMetaInfo_S {
|
|||
char user[MAX_USERNAME_SIZE];
|
||||
char password[MAX_PASSWORD_SIZE];
|
||||
char dbName[TSDB_DB_NAME_LEN];
|
||||
char queryMode[MAX_TB_NAME_SIZE]; // taosc, rest
|
||||
char queryMode[SMALL_BUFF_LEN]; // taosc, rest
|
||||
|
||||
SpecifiedQueryInfo specifiedQueryInfo;
|
||||
SuperQueryInfo superQueryInfo;
|
||||
|
@ -605,6 +607,7 @@ SArguments g_args = {
|
|||
4, // num_of_CPR
|
||||
10, // num_of_connections/thread
|
||||
0, // insert_interval
|
||||
DEFAULT_TIMESTAMP_STEP, // timestamp_step
|
||||
1, // query_times
|
||||
0, // interlace_rows;
|
||||
30000, // num_of_RPR
|
||||
|
@ -645,7 +648,7 @@ static FILE * g_fpOfInsertResult = NULL;
|
|||
fprintf(stderr, "PERF: "fmt, __VA_ARGS__); } while(0)
|
||||
|
||||
#define errorPrint(fmt, ...) \
|
||||
do { fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); } while(0)
|
||||
do { fprintf(stderr, " \033[31m"); fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); fprintf(stderr, " \033[0m"); } while(0)
|
||||
|
||||
// for strncpy buffer overflow
|
||||
#define min(a, b) (((a) < (b)) ? (a) : (b))
|
||||
|
@ -740,6 +743,9 @@ static void printHelp() {
|
|||
"The number of threads. Default is 10.");
|
||||
printf("%s%s%s%s\n", indent, "-i", indent,
|
||||
"The sleep time (ms) between insertion. Default is 0.");
|
||||
printf("%s%s%s%s%d.\n", indent, "-S", indent,
|
||||
"The timestamp step between insertion. Default is ",
|
||||
DEFAULT_TIMESTAMP_STEP);
|
||||
printf("%s%s%s%s\n", indent, "-r", indent,
|
||||
"The number of records per request. Default is 30000.");
|
||||
printf("%s%s%s%s\n", indent, "-t", indent,
|
||||
|
@ -881,6 +887,14 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
|
|||
exit(EXIT_FAILURE);
|
||||
}
|
||||
arguments->insert_interval = atoi(argv[++i]);
|
||||
} else if (strcmp(argv[i], "-S") == 0) {
|
||||
if ((argc == i+1) ||
|
||||
(!isStringNumber(argv[i+1]))) {
|
||||
printHelp();
|
||||
errorPrint("\n\t%s%s", argv[i], " need a number following!\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
arguments->timestamp_step = atoi(argv[++i]);
|
||||
} else if (strcmp(argv[i], "-qt") == 0) {
|
||||
if ((argc == i+1)
|
||||
|| (!isStringNumber(argv[i+1]))) {
|
||||
|
@ -2667,12 +2681,14 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName,
|
|||
fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
|
||||
tstrncpy(superTbls->tags[tagIndex].dataType,
|
||||
(char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
|
||||
min(15, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes));
|
||||
min(DATATYPE_BUFF_LEN,
|
||||
fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1);
|
||||
superTbls->tags[tagIndex].dataLen =
|
||||
*((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
|
||||
tstrncpy(superTbls->tags[tagIndex].note,
|
||||
(char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
|
||||
fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes);
|
||||
min(NOTE_BUFF_LEN,
|
||||
fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes) + 1);
|
||||
tagIndex++;
|
||||
} else {
|
||||
tstrncpy(superTbls->columns[columnIndex].field,
|
||||
|
@ -2680,12 +2696,14 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName,
|
|||
fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
|
||||
tstrncpy(superTbls->columns[columnIndex].dataType,
|
||||
(char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
|
||||
min(15, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes));
|
||||
min(DATATYPE_BUFF_LEN,
|
||||
fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1);
|
||||
superTbls->columns[columnIndex].dataLen =
|
||||
*((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
|
||||
tstrncpy(superTbls->columns[columnIndex].note,
|
||||
(char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
|
||||
fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes);
|
||||
min(NOTE_BUFF_LEN,
|
||||
fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes) + 1);
|
||||
columnIndex++;
|
||||
}
|
||||
count++;
|
||||
|
@ -3448,8 +3466,9 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
|
|||
__func__, __LINE__);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
//tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
|
||||
tstrncpy(columnCase.dataType, dataType->valuestring, strlen(dataType->valuestring) + 1);
|
||||
//tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, DATATYPE_BUFF_LEN);
|
||||
tstrncpy(columnCase.dataType, dataType->valuestring,
|
||||
min(DATATYPE_BUFF_LEN, strlen(dataType->valuestring) + 1));
|
||||
|
||||
cJSON* dataLen = cJSON_GetObjectItem(column, "len");
|
||||
if (dataLen && dataLen->type == cJSON_Number) {
|
||||
|
@ -3459,12 +3478,13 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
|
|||
__func__, __LINE__);
|
||||
goto PARSE_OVER;
|
||||
} else {
|
||||
columnCase.dataLen = 8;
|
||||
columnCase.dataLen = SMALL_BUFF_LEN;
|
||||
}
|
||||
|
||||
for (int n = 0; n < count; ++n) {
|
||||
tstrncpy(superTbls->columns[index].dataType,
|
||||
columnCase.dataType, strlen(columnCase.dataType) + 1);
|
||||
columnCase.dataType,
|
||||
min(DATATYPE_BUFF_LEN, strlen(columnCase.dataType) + 1));
|
||||
superTbls->columns[index].dataLen = columnCase.dataLen;
|
||||
index++;
|
||||
}
|
||||
|
@ -3520,7 +3540,8 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
|
|||
__func__, __LINE__);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
tstrncpy(columnCase.dataType, dataType->valuestring, strlen(dataType->valuestring) + 1);
|
||||
tstrncpy(columnCase.dataType, dataType->valuestring,
|
||||
min(DATATYPE_BUFF_LEN, strlen(dataType->valuestring) + 1));
|
||||
|
||||
cJSON* dataLen = cJSON_GetObjectItem(tag, "len");
|
||||
if (dataLen && dataLen->type == cJSON_Number) {
|
||||
|
@ -3535,7 +3556,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
|
|||
|
||||
for (int n = 0; n < count; ++n) {
|
||||
tstrncpy(superTbls->tags[index].dataType, columnCase.dataType,
|
||||
strlen(columnCase.dataType) + 1);
|
||||
min(DATATYPE_BUFF_LEN, strlen(columnCase.dataType) + 1));
|
||||
superTbls->tags[index].dataLen = columnCase.dataLen;
|
||||
index++;
|
||||
}
|
||||
|
@ -3779,9 +3800,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
|
|||
if (precision && precision->type == cJSON_String
|
||||
&& precision->valuestring != NULL) {
|
||||
tstrncpy(g_Dbs.db[i].dbCfg.precision, precision->valuestring,
|
||||
8);
|
||||
SMALL_BUFF_LEN);
|
||||
} else if (!precision) {
|
||||
memset(g_Dbs.db[i].dbCfg.precision, 0, 8);
|
||||
memset(g_Dbs.db[i].dbCfg.precision, 0, SMALL_BUFF_LEN);
|
||||
} else {
|
||||
printf("ERROR: failed to read json, precision not found\n");
|
||||
goto PARSE_OVER;
|
||||
|
@ -3966,7 +3987,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
|
|||
goto PARSE_OVER;
|
||||
}
|
||||
tstrncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring,
|
||||
TSDB_TABLE_NAME_LEN - 20);
|
||||
TBNAME_PREFIX_LEN);
|
||||
|
||||
cJSON *autoCreateTbl = cJSON_GetObjectItem(stbInfo, "auto_create_table");
|
||||
if (autoCreateTbl
|
||||
|
@ -4034,9 +4055,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
|
|||
if (dataSource && dataSource->type == cJSON_String
|
||||
&& dataSource->valuestring != NULL) {
|
||||
tstrncpy(g_Dbs.db[i].superTbls[j].dataSource,
|
||||
dataSource->valuestring, TSDB_DB_NAME_LEN);
|
||||
dataSource->valuestring,
|
||||
min(SMALL_BUFF_LEN, strlen(dataSource->valuestring) + 1));
|
||||
} else if (!dataSource) {
|
||||
tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", TSDB_DB_NAME_LEN);
|
||||
tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand",
|
||||
min(SMALL_BUFF_LEN, strlen("rand") + 1));
|
||||
} else {
|
||||
errorPrint("%s() LN%d, failed to read json, data_source not found\n",
|
||||
__func__, __LINE__);
|
||||
|
@ -4107,7 +4130,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
|
|||
if (timestampStep && timestampStep->type == cJSON_Number) {
|
||||
g_Dbs.db[i].superTbls[j].timeStampStep = timestampStep->valueint;
|
||||
} else if (!timestampStep) {
|
||||
g_Dbs.db[i].superTbls[j].timeStampStep = DEFAULT_TIMESTAMP_STEP;
|
||||
g_Dbs.db[i].superTbls[j].timeStampStep = g_args.timestamp_step;
|
||||
} else {
|
||||
printf("ERROR: failed to read json, timestamp_step not found\n");
|
||||
goto PARSE_OVER;
|
||||
|
@ -4117,9 +4140,12 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
|
|||
if (sampleFormat && sampleFormat->type
|
||||
== cJSON_String && sampleFormat->valuestring != NULL) {
|
||||
tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat,
|
||||
sampleFormat->valuestring, TSDB_DB_NAME_LEN);
|
||||
sampleFormat->valuestring,
|
||||
min(SMALL_BUFF_LEN,
|
||||
strlen(sampleFormat->valuestring) + 1));
|
||||
} else if (!sampleFormat) {
|
||||
tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", TSDB_DB_NAME_LEN);
|
||||
tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv",
|
||||
SMALL_BUFF_LEN);
|
||||
} else {
|
||||
printf("ERROR: failed to read json, sample_format not found\n");
|
||||
goto PARSE_OVER;
|
||||
|
@ -4129,9 +4155,12 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
|
|||
if (sampleFile && sampleFile->type == cJSON_String
|
||||
&& sampleFile->valuestring != NULL) {
|
||||
tstrncpy(g_Dbs.db[i].superTbls[j].sampleFile,
|
||||
sampleFile->valuestring, MAX_FILE_NAME_LEN);
|
||||
sampleFile->valuestring,
|
||||
min(MAX_FILE_NAME_LEN,
|
||||
strlen(sampleFile->valuestring) + 1));
|
||||
} else if (!sampleFile) {
|
||||
memset(g_Dbs.db[i].superTbls[j].sampleFile, 0, MAX_FILE_NAME_LEN);
|
||||
memset(g_Dbs.db[i].superTbls[j].sampleFile, 0,
|
||||
MAX_FILE_NAME_LEN);
|
||||
} else {
|
||||
printf("ERROR: failed to read json, sample_file not found\n");
|
||||
goto PARSE_OVER;
|
||||
|
@ -4371,10 +4400,14 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
|
|||
}
|
||||
|
||||
cJSON* queryMode = cJSON_GetObjectItem(root, "query_mode");
|
||||
if (queryMode && queryMode->type == cJSON_String && queryMode->valuestring != NULL) {
|
||||
tstrncpy(g_queryInfo.queryMode, queryMode->valuestring, MAX_TB_NAME_SIZE);
|
||||
if (queryMode
|
||||
&& queryMode->type == cJSON_String
|
||||
&& queryMode->valuestring != NULL) {
|
||||
tstrncpy(g_queryInfo.queryMode, queryMode->valuestring,
|
||||
min(SMALL_BUFF_LEN, strlen(queryMode->valuestring) + 1));
|
||||
} else if (!queryMode) {
|
||||
tstrncpy(g_queryInfo.queryMode, "taosc", MAX_TB_NAME_SIZE);
|
||||
tstrncpy(g_queryInfo.queryMode, "taosc",
|
||||
min(SMALL_BUFF_LEN, strlen("taosc") + 1));
|
||||
} else {
|
||||
printf("ERROR: failed to read json, query_mode not found\n");
|
||||
goto PARSE_OVER;
|
||||
|
@ -5121,8 +5154,10 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k)
|
|||
debugPrint("%s() LN%d, stmt=%p",
|
||||
__func__, __LINE__, pThreadInfo->stmt);
|
||||
if (0 != taos_stmt_execute(pThreadInfo->stmt)) {
|
||||
errorPrint("%s() LN%d, failied to execute insert statement\n",
|
||||
__func__, __LINE__);
|
||||
errorPrint("%s() LN%d, failied to execute insert statement. reason: %s\n",
|
||||
__func__, __LINE__, taos_stmt_errstr(pThreadInfo->stmt));
|
||||
|
||||
fprintf(stderr, "\n\033[31m === Please reduce batch number if WAL size exceeds limit. ===\033[0m\n\n");
|
||||
exit(-1);
|
||||
}
|
||||
affectedRows = k;
|
||||
|
@ -5190,13 +5225,13 @@ static int32_t generateDataTailWithoutStb(
|
|||
if (g_args.disorderRatio) {
|
||||
retLen = generateData(data, data_type,
|
||||
startTime + getTSRandTail(
|
||||
(int64_t) DEFAULT_TIMESTAMP_STEP, k,
|
||||
g_args.timestamp_step, k,
|
||||
g_args.disorderRatio,
|
||||
g_args.disorderRange),
|
||||
lenOfBinary);
|
||||
} else {
|
||||
retLen = generateData(data, data_type,
|
||||
startTime + (int64_t) (DEFAULT_TIMESTAMP_STEP* k),
|
||||
startTime + g_args.timestamp_step * k,
|
||||
lenOfBinary);
|
||||
}
|
||||
|
||||
|
@ -5685,7 +5720,7 @@ static int32_t prepareStmtWithoutStb(
|
|||
int ret = taos_stmt_set_tbname(stmt, tableName);
|
||||
if (ret != 0) {
|
||||
errorPrint("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n",
|
||||
tableName, ret, taos_errstr(NULL));
|
||||
tableName, ret, taos_stmt_errstr(stmt));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -5714,11 +5749,11 @@ static int32_t prepareStmtWithoutStb(
|
|||
|
||||
if (g_args.disorderRatio) {
|
||||
*bind_ts = startTime + getTSRandTail(
|
||||
(int64_t)DEFAULT_TIMESTAMP_STEP, k,
|
||||
g_args.timestamp_step, k,
|
||||
g_args.disorderRatio,
|
||||
g_args.disorderRange);
|
||||
} else {
|
||||
*bind_ts = startTime + (int64_t)(DEFAULT_TIMESTAMP_STEP * k);
|
||||
*bind_ts = startTime + g_args.timestamp_step * k;
|
||||
}
|
||||
bind->buffer_length = sizeof(int64_t);
|
||||
bind->buffer = bind_ts;
|
||||
|
@ -5738,9 +5773,17 @@ static int32_t prepareStmtWithoutStb(
|
|||
return -1;
|
||||
}
|
||||
}
|
||||
taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray);
|
||||
if (0 != taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray)) {
|
||||
errorPrint("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
|
||||
__func__, __LINE__, taos_stmt_errstr(stmt));
|
||||
break;
|
||||
}
|
||||
// if msg > 3MB, break
|
||||
taos_stmt_add_batch(stmt);
|
||||
if (0 != taos_stmt_add_batch(stmt)) {
|
||||
errorPrint("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
|
||||
__func__, __LINE__, taos_stmt_errstr(stmt));
|
||||
break;
|
||||
}
|
||||
|
||||
k++;
|
||||
recordFrom ++;
|
||||
|
@ -5907,22 +5950,28 @@ static int32_t prepareStbStmt(
|
|||
|
||||
if (-1 == prepareStbStmtBind(
|
||||
tagsArray, stbInfo, tagRand, -1, -1, false /* is tag */)) {
|
||||
free(tagsArray);
|
||||
tmfree(tagsValBuf);
|
||||
tmfree(tagsArray);
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = taos_stmt_set_tbname_tags(stmt, tableName, (TAOS_BIND *)tagsArray);
|
||||
|
||||
tmfree(tagsValBuf);
|
||||
tmfree((char *)tagsArray);
|
||||
tmfree(tagsArray);
|
||||
|
||||
if (0 != ret) {
|
||||
errorPrint("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n",
|
||||
__func__, __LINE__, taos_stmt_errstr(stmt));
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
ret = taos_stmt_set_tbname(stmt, tableName);
|
||||
if (0 != ret) {
|
||||
errorPrint("%s() LN%d, stmt_set_tbname() failed! reason: %s\n",
|
||||
__func__, __LINE__, taos_stmt_errstr(stmt));
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (ret != 0) {
|
||||
errorPrint("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n",
|
||||
tableName, ret, taos_errstr(NULL));
|
||||
return ret;
|
||||
}
|
||||
|
||||
char *bindArray = calloc(1, sizeof(TAOS_BIND) * (stbInfo->columnCount + 1));
|
||||
|
@ -5940,9 +5989,19 @@ static int32_t prepareStbStmt(
|
|||
free(bindArray);
|
||||
return -1;
|
||||
}
|
||||
taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray);
|
||||
ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray);
|
||||
if (0 != ret) {
|
||||
errorPrint("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
|
||||
__func__, __LINE__, taos_stmt_errstr(stmt));
|
||||
return -1;
|
||||
}
|
||||
// if msg > 3MB, break
|
||||
taos_stmt_add_batch(stmt);
|
||||
ret = taos_stmt_add_batch(stmt);
|
||||
if (0 != ret) {
|
||||
errorPrint("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
|
||||
__func__, __LINE__, taos_stmt_errstr(stmt));
|
||||
return -1;
|
||||
}
|
||||
|
||||
k++;
|
||||
recordFrom ++;
|
||||
|
@ -6109,7 +6168,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
|
|||
insertRows = g_args.num_of_DPT;
|
||||
interlaceRows = g_args.interlace_rows;
|
||||
maxSqlLen = g_args.max_sql_len;
|
||||
nTimeStampStep = DEFAULT_TIMESTAMP_STEP;
|
||||
nTimeStampStep = g_args.timestamp_step;
|
||||
insert_interval = g_args.insert_interval;
|
||||
}
|
||||
|
||||
|
@ -6369,7 +6428,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
|
|||
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
|
||||
uint64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len;
|
||||
int64_t timeStampStep =
|
||||
superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP;
|
||||
superTblInfo?superTblInfo->timeStampStep:g_args.timestamp_step;
|
||||
int64_t insertRows =
|
||||
(superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT;
|
||||
verbosePrint("%s() LN%d insertRows=%"PRId64"\n",
|
||||
|
@ -6511,9 +6570,10 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
|
|||
} // num_of_DPT
|
||||
|
||||
if ((g_args.verbose_print) &&
|
||||
(tableSeq == pThreadInfo->ntables - 1) && (superTblInfo) &&
|
||||
(0 == strncasecmp(
|
||||
superTblInfo->dataSource, "sample", strlen("sample")))) {
|
||||
(tableSeq == pThreadInfo->ntables - 1) && (superTblInfo)
|
||||
&& (0 == strncasecmp(
|
||||
superTblInfo->dataSource,
|
||||
"sample", strlen("sample")))) {
|
||||
verbosePrint("%s() LN%d samplePos=%"PRId64"\n",
|
||||
__func__, __LINE__, pThreadInfo->samplePos);
|
||||
}
|
||||
|
@ -6890,7 +6950,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
|
|||
int ret = taos_stmt_prepare(pThreadInfo->stmt, buffer, 0);
|
||||
if (ret != 0){
|
||||
errorPrint("failed to execute taos_stmt_prepare. return 0x%x. reason: %s\n",
|
||||
ret, taos_errstr(NULL));
|
||||
ret, taos_stmt_errstr(pThreadInfo->stmt));
|
||||
free(pids);
|
||||
free(infos);
|
||||
exit(-1);
|
||||
|
@ -7351,7 +7411,7 @@ static void *specifiedTableQuery(void *sarg) {
|
|||
|
||||
static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) {
|
||||
char sourceString[32] = "xxxx";
|
||||
char subTblName[MAX_TB_NAME_SIZE*3];
|
||||
char subTblName[TSDB_TABLE_NAME_LEN];
|
||||
sprintf(subTblName, "%s.%s",
|
||||
g_queryInfo.dbName,
|
||||
g_queryInfo.superQueryInfo.childTblName + tblIndex*TSDB_TABLE_NAME_LEN);
|
||||
|
@ -7510,8 +7570,8 @@ static int queryTestProcess() {
|
|||
|
||||
if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) {
|
||||
|
||||
char sqlStr[MAX_TB_NAME_SIZE*2];
|
||||
sprintf(sqlStr, "use %s", g_queryInfo.dbName);
|
||||
char sqlStr[TSDB_DB_NAME_LEN + 5];
|
||||
sprintf(sqlStr, "USE %s", g_queryInfo.dbName);
|
||||
if (0 != queryDbExec(taos, sqlStr, NO_INSERT_TYPE, false)) {
|
||||
taos_close(taos);
|
||||
free(infos);
|
||||
|
@ -7700,8 +7760,8 @@ static void *superSubscribe(void *sarg) {
|
|||
}
|
||||
}
|
||||
|
||||
char sqlStr[MAX_TB_NAME_SIZE*2];
|
||||
sprintf(sqlStr, "use %s", g_queryInfo.dbName);
|
||||
char sqlStr[TSDB_DB_NAME_LEN + 5];
|
||||
sprintf(sqlStr, "USE %s", g_queryInfo.dbName);
|
||||
if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
|
||||
taos_close(pThreadInfo->taos);
|
||||
errorPrint( "use database %s failed!\n\n",
|
||||
|
@ -7842,8 +7902,8 @@ static void *specifiedSubscribe(void *sarg) {
|
|||
}
|
||||
}
|
||||
|
||||
char sqlStr[MAX_TB_NAME_SIZE*2];
|
||||
sprintf(sqlStr, "use %s", g_queryInfo.dbName);
|
||||
char sqlStr[TSDB_DB_NAME_LEN + 5];
|
||||
sprintf(sqlStr, "USE %s", g_queryInfo.dbName);
|
||||
if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
|
||||
taos_close(pThreadInfo->taos);
|
||||
return NULL;
|
||||
|
@ -8138,7 +8198,7 @@ static void setParaFromArg() {
|
|||
|
||||
tstrncpy(g_Dbs.db[0].dbName, g_args.database, TSDB_DB_NAME_LEN);
|
||||
g_Dbs.db[0].dbCfg.replica = g_args.replica;
|
||||
tstrncpy(g_Dbs.db[0].dbCfg.precision, "ms", 8);
|
||||
tstrncpy(g_Dbs.db[0].dbCfg.precision, "ms", SMALL_BUFF_LEN);
|
||||
|
||||
tstrncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN);
|
||||
|
||||
|
@ -8171,8 +8231,8 @@ static void setParaFromArg() {
|
|||
g_Dbs.db[0].superTbls[0].disorderRange = g_args.disorderRange;
|
||||
g_Dbs.db[0].superTbls[0].disorderRatio = g_args.disorderRatio;
|
||||
tstrncpy(g_Dbs.db[0].superTbls[0].childTblPrefix,
|
||||
g_args.tb_prefix, TSDB_TABLE_NAME_LEN - 20);
|
||||
tstrncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", MAX_TB_NAME_SIZE);
|
||||
g_args.tb_prefix, TBNAME_PREFIX_LEN);
|
||||
tstrncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", SMALL_BUFF_LEN);
|
||||
|
||||
if (g_args.iface == INTERFACE_BUT) {
|
||||
g_Dbs.db[0].superTbls[0].iface = TAOSC_IFACE;
|
||||
|
@ -8181,7 +8241,7 @@ static void setParaFromArg() {
|
|||
}
|
||||
tstrncpy(g_Dbs.db[0].superTbls[0].startTimestamp,
|
||||
"2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE);
|
||||
g_Dbs.db[0].superTbls[0].timeStampStep = DEFAULT_TIMESTAMP_STEP;
|
||||
g_Dbs.db[0].superTbls[0].timeStampStep = g_args.timestamp_step;
|
||||
|
||||
g_Dbs.db[0].superTbls[0].insertRows = g_args.num_of_DPT;
|
||||
g_Dbs.db[0].superTbls[0].maxSqlLen = g_args.max_sql_len;
|
||||
|
@ -8193,7 +8253,7 @@ static void setParaFromArg() {
|
|||
}
|
||||
|
||||
tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType,
|
||||
data_type[i], strlen(data_type[i]) + 1);
|
||||
data_type[i], min(DATATYPE_BUFF_LEN, strlen(data_type[i]) + 1));
|
||||
g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.len_of_binary;
|
||||
g_Dbs.db[0].superTbls[0].columnCount++;
|
||||
}
|
||||
|
@ -8204,18 +8264,18 @@ static void setParaFromArg() {
|
|||
for (int i = g_Dbs.db[0].superTbls[0].columnCount;
|
||||
i < g_args.num_of_CPR; i++) {
|
||||
tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType,
|
||||
"INT", strlen("INT") + 1);
|
||||
"INT", min(DATATYPE_BUFF_LEN, strlen("INT") + 1));
|
||||
g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0;
|
||||
g_Dbs.db[0].superTbls[0].columnCount++;
|
||||
}
|
||||
}
|
||||
|
||||
tstrncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType,
|
||||
"INT", strlen("INT") + 1);
|
||||
"INT", min(DATATYPE_BUFF_LEN, strlen("INT") + 1));
|
||||
g_Dbs.db[0].superTbls[0].tags[0].dataLen = 0;
|
||||
|
||||
tstrncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType,
|
||||
"BINARY", strlen("BINARY") + 1);
|
||||
"BINARY", min(DATATYPE_BUFF_LEN, strlen("BINARY") + 1));
|
||||
g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.len_of_binary;
|
||||
g_Dbs.db[0].superTbls[0].tagCount = 2;
|
||||
} else {
|
||||
|
@ -8351,7 +8411,7 @@ static void queryResult() {
|
|||
pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1;
|
||||
pThreadInfo->superTblInfo = &g_Dbs.db[0].superTbls[0];
|
||||
tstrncpy(pThreadInfo->tb_prefix,
|
||||
g_Dbs.db[0].superTbls[0].childTblPrefix, TSDB_TABLE_NAME_LEN - 20);
|
||||
g_Dbs.db[0].superTbls[0].childTblPrefix, TBNAME_PREFIX_LEN);
|
||||
} else {
|
||||
pThreadInfo->ntables = g_args.num_of_tables;
|
||||
pThreadInfo->end_table_to = g_args.num_of_tables -1;
|
||||
|
|
|
@ -60,7 +60,7 @@ typedef struct {
|
|||
fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0)
|
||||
|
||||
#define errorPrint(fmt, ...) \
|
||||
do { fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); } while(0)
|
||||
do { fprintf(stderr, "\033[31m"); fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); fprintf(stderr, "\033[0m"); } while(0)
|
||||
|
||||
|
||||
// -------------------------- SHOW DATABASE INTERFACE-----------------------
|
||||
|
@ -234,9 +234,9 @@ static struct argp_option options[] = {
|
|||
{"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 4},
|
||||
{"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 5},
|
||||
#if TSDB_SUPPORT_NANOSECOND == 1
|
||||
{"precision", 'C', "PRECISION", 0, "Epoch precision. Valid value is one of ms, us, and ns. Default is ms.", 6},
|
||||
{"precision", 'C', "PRECISION", 0, "Specify precision for converting human-readable time to epoch. Valid value is one of ms, us, and ns. Default is ms.", 6},
|
||||
#else
|
||||
{"precision", 'C', "PRECISION", 0, "Epoch precision. Valid value is one of ms and us. Default is ms.", 6},
|
||||
{"precision", 'C', "PRECISION", 0, "Use specified precision to convert human-readable time. Valid value is one of ms and us. Default is ms.", 6},
|
||||
#endif
|
||||
{"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 3},
|
||||
{"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3},
|
||||
|
@ -453,6 +453,8 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
|
|||
case 'E':
|
||||
g_args.end_time = atol(arg);
|
||||
break;
|
||||
case 'C':
|
||||
break;
|
||||
case 'B':
|
||||
g_args.data_batch = atoi(arg);
|
||||
if (g_args.data_batch > MAX_RECORDS_PER_REQ) {
|
||||
|
@ -545,8 +547,8 @@ static void parse_precision_first(
|
|||
free(tmp);
|
||||
exit(-1);
|
||||
}
|
||||
strncpy(g_args.precision, tmp,
|
||||
min(DB_PRECISION_LEN - 1, strlen(tmp)));
|
||||
tstrncpy(g_args.precision, tmp,
|
||||
min(DB_PRECISION_LEN, strlen(tmp) + 1));
|
||||
free(tmp);
|
||||
}
|
||||
}
|
||||
|
@ -597,10 +599,11 @@ static void parse_timestamp(
|
|||
return;
|
||||
}
|
||||
} else {
|
||||
tstrncpy(arguments->precision, "n/a", strlen("n/a") + 1);
|
||||
tmpEpoch = atoll(tmp);
|
||||
}
|
||||
|
||||
sprintf(argv[i], "%"PRId64"", tmpEpoch);
|
||||
sprintf(argv[i+1], "%"PRId64"", tmpEpoch);
|
||||
debugPrint("%s() LN%d, tmp is: %s, argv[%d]: %s\n",
|
||||
__func__, __LINE__, tmp, i, argv[i]);
|
||||
free(tmp);
|
||||
|
@ -792,12 +795,14 @@ static int taosGetTableRecordInfo(
|
|||
while ((row = taos_fetch_row(result)) != NULL) {
|
||||
isSet = true;
|
||||
pTableRecordInfo->isMetric = false;
|
||||
strncpy(pTableRecordInfo->tableRecord.name,
|
||||
tstrncpy(pTableRecordInfo->tableRecord.name,
|
||||
(char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
|
||||
fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
|
||||
strncpy(pTableRecordInfo->tableRecord.metric,
|
||||
min(TSDB_TABLE_NAME_LEN,
|
||||
fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes) + 1);
|
||||
tstrncpy(pTableRecordInfo->tableRecord.metric,
|
||||
(char *)row[TSDB_SHOW_TABLES_METRIC_INDEX],
|
||||
fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes);
|
||||
min(TSDB_TABLE_NAME_LEN,
|
||||
fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes) + 1);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1078,8 +1083,8 @@ _dump_db_point:
|
|||
goto _exit_failure;
|
||||
}
|
||||
|
||||
strncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
|
||||
fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
|
||||
tstrncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
|
||||
min(TSDB_DB_NAME_LEN, fields[TSDB_SHOW_DB_NAME_INDEX].bytes) + 1);
|
||||
if (g_args.with_property) {
|
||||
g_dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
|
||||
g_dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
|
||||
|
@ -1087,8 +1092,8 @@ _dump_db_point:
|
|||
g_dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
|
||||
g_dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
|
||||
|
||||
strncpy(g_dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
|
||||
fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
|
||||
tstrncpy(g_dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
|
||||
min(32, fields[TSDB_SHOW_DB_KEEP_INDEX].bytes) + 1);
|
||||
//g_dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]);
|
||||
//g_dbInfos[count]->daysToKeep1;
|
||||
//g_dbInfos[count]->daysToKeep2;
|
||||
|
@ -1101,8 +1106,8 @@ _dump_db_point:
|
|||
g_dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
|
||||
g_dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
|
||||
|
||||
strncpy(g_dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
|
||||
fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
|
||||
tstrncpy(g_dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
|
||||
min(8, fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes) + 1);
|
||||
//g_dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]);
|
||||
g_dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
|
||||
}
|
||||
|
@ -1253,17 +1258,19 @@ static int taosGetTableDes(
|
|||
|
||||
tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN);
|
||||
while ((row = taos_fetch_row(res)) != NULL) {
|
||||
strncpy(tableDes->cols[count].field,
|
||||
tstrncpy(tableDes->cols[count].field,
|
||||
(char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
|
||||
fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
|
||||
strncpy(tableDes->cols[count].type,
|
||||
min(TSDB_COL_NAME_LEN + 1,
|
||||
fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes + 1));
|
||||
tstrncpy(tableDes->cols[count].type,
|
||||
(char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
|
||||
min(15, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes));
|
||||
min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1);
|
||||
tableDes->cols[count].length =
|
||||
*((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
|
||||
strncpy(tableDes->cols[count].note,
|
||||
tstrncpy(tableDes->cols[count].note,
|
||||
(char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
|
||||
fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes);
|
||||
min(COL_NOTE_LEN,
|
||||
fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes + 1));
|
||||
|
||||
count++;
|
||||
}
|
||||
|
@ -1698,8 +1705,9 @@ static int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE
|
|||
|
||||
while ((row = taos_fetch_row(res)) != NULL) {
|
||||
memset(&tableRecord, 0, sizeof(STableRecord));
|
||||
strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
|
||||
fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
|
||||
tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
|
||||
min(TSDB_TABLE_NAME_LEN,
|
||||
fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes) + 1);
|
||||
taosWrite(fd, &tableRecord, sizeof(STableRecord));
|
||||
}
|
||||
|
||||
|
@ -1773,9 +1781,11 @@ static int taosDumpDb(SDbInfo *dbInfo, FILE *fp, TAOS *taosCon) {
|
|||
while ((row = taos_fetch_row(res)) != NULL) {
|
||||
memset(&tableRecord, 0, sizeof(STableRecord));
|
||||
tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
|
||||
fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
|
||||
min(TSDB_TABLE_NAME_LEN,
|
||||
fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes) + 1);
|
||||
tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX],
|
||||
min(TSDB_TABLE_NAME_LEN, fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes));
|
||||
min(TSDB_TABLE_NAME_LEN,
|
||||
fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes) + 1);
|
||||
|
||||
taosWrite(fd, &tableRecord, sizeof(STableRecord));
|
||||
|
||||
|
@ -2163,7 +2173,7 @@ static int taosCheckParam(struct arguments *arguments) {
|
|||
|
||||
if (g_args.arg_list_len == 0) {
|
||||
if ((!g_args.all_databases) && (!g_args.isDumpIn)) {
|
||||
fprintf(stderr, "taosdump requires parameters\n");
|
||||
errorPrint("%s", "taosdump requires parameters for database and operation\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1146,6 +1146,7 @@ static int32_t mnodeRetrieveConfigs(SShowObj *pShow, char *data, int32_t rows, v
|
|||
numOfRows++;
|
||||
break;
|
||||
case TAOS_CFG_VTYPE_FLOAT:
|
||||
case TAOS_CFG_VTYPE_DOUBLE:
|
||||
t = snprintf(varDataVal(pWrite), TSDB_CFG_VALUE_LEN, "%f", *((float *)cfg->ptr));
|
||||
varDataSetLen(pWrite, t);
|
||||
numOfRows++;
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#define CONN_KEEP_TIME (tsShellActivityTimer * 3)
|
||||
#define CONN_CHECK_TIME (tsShellActivityTimer * 2)
|
||||
#define QUERY_ID_SIZE 20
|
||||
#define QUERY_OBJ_ID_SIZE 10
|
||||
#define QUERY_STREAM_SAVE_SIZE 20
|
||||
|
||||
static SCacheObj *tsMnodeConnCache = NULL;
|
||||
|
@ -361,6 +362,30 @@ static int32_t mnodeGetQueryMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC
|
|||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
pShow->bytes[cols] = QUERY_OBJ_ID_SIZE + VARSTR_HEADER_SIZE;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
|
||||
strcpy(pSchema[cols].name, "sql_obj_id");
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
pShow->bytes[cols] = 4;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_INT;
|
||||
strcpy(pSchema[cols].name, "pid");
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
pShow->bytes[cols] = TSDB_EP_LEN + VARSTR_HEADER_SIZE;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
|
||||
strcpy(pSchema[cols].name, "ep");
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
pShow->bytes[cols] = 4;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_INT;
|
||||
strcpy(pSchema[cols].name, "sub_queries");
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
pShow->bytes[cols] = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
|
||||
strcpy(pSchema[cols].name, "sql");
|
||||
|
@ -434,6 +459,29 @@ static int32_t mnodeRetrieveQueries(SShowObj *pShow, char *data, int32_t rows, v
|
|||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
*(int64_t *)pWrite = htobe64(pDesc->useconds);
|
||||
cols++;
|
||||
/*
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
*(int64_t *)pWrite = htobe64(pDesc->sqlObjId);
|
||||
cols++;
|
||||
*/
|
||||
snprintf(str, tListLen(str), "0x%08" PRIx64, htobe64(pDesc->sqlObjId));
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, str, pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
*(int32_t *)pWrite = htonl(pDesc->pid);
|
||||
cols++;
|
||||
|
||||
char epBuf[TSDB_EP_LEN + 1] = {0};
|
||||
snprintf(epBuf, tListLen(epBuf), "%s:%u", pDesc->fqdn, pConnObj->port);
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, epBuf, pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
*(int32_t *)pWrite = htonl(pDesc->numOfSub);
|
||||
cols++;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pDesc->sql, pShow->bytes[cols]);
|
||||
|
|
|
@ -199,7 +199,7 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
|
|||
|
||||
for (int32_t i = dataFields; i >= 0; i--) {
|
||||
httpJsonItemToken(jsonBuf);
|
||||
if (row[i] == NULL) {
|
||||
if (row == NULL || i >= num_fields || row[i] == NULL) {
|
||||
httpJsonOriginString(jsonBuf, "null", 4);
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -216,7 +216,7 @@ typedef struct SQueryAttr {
|
|||
bool simpleAgg;
|
||||
bool pointInterpQuery; // point interpolation query
|
||||
bool needReverseScan; // need reverse scan
|
||||
bool distinctTag; // distinct tag query
|
||||
bool distinct; // distinct query or not
|
||||
bool stateWindow; // window State on sub/normal table
|
||||
bool createFilterOperator; // if filter operator is needed
|
||||
int32_t interBufSize; // intermediate buffer sizse
|
||||
|
@ -514,6 +514,7 @@ typedef struct SDistinctOperatorInfo {
|
|||
bool recordNullVal; //has already record the null value, no need to try again
|
||||
int64_t threshold;
|
||||
int64_t outputCapacity;
|
||||
int32_t colIndex;
|
||||
} SDistinctOperatorInfo;
|
||||
|
||||
struct SGlobalMerger;
|
||||
|
|
|
@ -121,7 +121,8 @@ typedef struct SQueryInfo {
|
|||
int64_t vgroupLimit; // table limit in case of super table projection query + global order + limit
|
||||
|
||||
int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX
|
||||
bool distinctTag; // distinct tag or not
|
||||
bool distinct; // distinct tag or not
|
||||
bool onlyHasTagCond;
|
||||
int32_t round; // 0/1/....
|
||||
int32_t bufLen;
|
||||
char* buf;
|
||||
|
|
|
@ -97,12 +97,47 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) {
|
|||
#define GET_NUM_OF_TABLEGROUP(q) taosArrayGetSize((q)->tableqinfoGroupInfo.pGroupList)
|
||||
#define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0)
|
||||
|
||||
#define TSKEY_MAX_ADD(a,b) \
|
||||
do { \
|
||||
if (a < 0) { a = a + b; break;} \
|
||||
if (sizeof(a) == sizeof(int32_t)) { \
|
||||
if((b) > 0 && ((b) >= INT32_MAX - (a))){\
|
||||
a = INT32_MAX; \
|
||||
} else { \
|
||||
a = a + b; \
|
||||
} \
|
||||
} else { \
|
||||
if((b) > 0 && ((b) >= INT64_MAX - (a))){\
|
||||
a = INT64_MAX; \
|
||||
} else { \
|
||||
a = a + b; \
|
||||
} \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
#define TSKEY_MIN_SUB(a,b) \
|
||||
do { \
|
||||
if (a >= 0) { a = a + b; break;} \
|
||||
if (sizeof(a) == sizeof(int32_t)){ \
|
||||
if((b) < 0 && ((b) <= INT32_MIN - (a))){\
|
||||
a = INT32_MIN; \
|
||||
} else { \
|
||||
a = a + b; \
|
||||
} \
|
||||
} else { \
|
||||
if((b) < 0 && ((b) <= INT64_MIN-(a))) {\
|
||||
a = INT64_MIN; \
|
||||
} else { \
|
||||
a = a + b; \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
uint64_t queryHandleId = 0;
|
||||
|
||||
int32_t getMaximumIdleDurationSec() {
|
||||
return tsShellActivityTimer * 2;
|
||||
}
|
||||
|
||||
int64_t genQueryId(void) {
|
||||
int64_t uid = 0;
|
||||
int64_t did = tsDnodeId;
|
||||
|
@ -3124,8 +3159,10 @@ void setTagValue(SOperatorInfo* pOperatorInfo, void *pTable, SQLFunctionCtx* pCt
|
|||
|| pLocalExprInfo->base.resType == TSDB_DATA_TYPE_TIMESTAMP) {
|
||||
memcpy(pRuntimeEnv->tagVal + offset, &pCtx[idx].tag.i64, pLocalExprInfo->base.resBytes);
|
||||
} else {
|
||||
if (pCtx[idx].tag.pz != NULL) {
|
||||
memcpy(pRuntimeEnv->tagVal + offset, pCtx[idx].tag.pz, pCtx[idx].tag.nLen);
|
||||
}
|
||||
}
|
||||
|
||||
offset += pLocalExprInfo->base.resBytes;
|
||||
}
|
||||
|
@ -3934,8 +3971,8 @@ static void toSSDataBlock(SGroupResInfo *pGroupResInfo, SQueryRuntimeEnv* pRunti
|
|||
|
||||
// refactor : extract method
|
||||
SColumnInfoData* pInfoData = taosArrayGet(pBlock->pDataBlock, 0);
|
||||
|
||||
if (pInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP) {
|
||||
//add condition (pBlock->info.rows >= 1) just to runtime happy
|
||||
if (pInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP && pBlock->info.rows >= 1) {
|
||||
STimeWindow* w = &pBlock->info.window;
|
||||
w->skey = *(int64_t*)pInfoData->pData;
|
||||
w->ekey = *(int64_t*)(((char*)pInfoData->pData) + TSDB_KEYSIZE * (pBlock->info.rows - 1));
|
||||
|
@ -5273,7 +5310,15 @@ static SSDataBlock* doSTableAggregate(void* param, bool* newgroup) {
|
|||
// the pDataBlock are always the same one, no need to call this again
|
||||
setInputDataBlock(pOperator, pInfo->pCtx, pBlock, order);
|
||||
|
||||
TSKEY key = QUERY_IS_ASC_QUERY(pQueryAttr)? pBlock->info.window.ekey + 1:pBlock->info.window.skey-1;
|
||||
TSKEY key = 0;
|
||||
if (QUERY_IS_ASC_QUERY(pQueryAttr)) {
|
||||
key = pBlock->info.window.ekey;
|
||||
TSKEY_MAX_ADD(key, 1);
|
||||
} else {
|
||||
key = pBlock->info.window.skey;
|
||||
TSKEY_MIN_SUB(key, -1);
|
||||
}
|
||||
|
||||
setExecutionContext(pRuntimeEnv, pInfo, pOperator->numOfOutput, pRuntimeEnv->current->groupIndex, key);
|
||||
doAggregateImpl(pOperator, pQueryAttr->window.skey, pInfo->pCtx, pBlock);
|
||||
}
|
||||
|
@ -6479,7 +6524,7 @@ static SSDataBlock* doTagScan(void* param, bool* newgroup) {
|
|||
pOperator->status = OP_EXEC_DONE;
|
||||
qDebug("QInfo:0x%"PRIx64" create count(tbname) query, res:%d rows:1", GET_QID(pRuntimeEnv), count);
|
||||
} else { // return only the tags|table name etc.
|
||||
SExprInfo* pExprInfo = pOperator->pExpr; // todo use the column list instead of exprinfo
|
||||
SExprInfo* pExprInfo = &pOperator->pExpr[0]; // todo use the column list instead of exprinfo
|
||||
|
||||
count = 0;
|
||||
while(pInfo->curPos < pInfo->totalTables && count < maxNumOfTables) {
|
||||
|
@ -6565,13 +6610,25 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
|
|||
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
|
||||
|
||||
if (pBlock == NULL) {
|
||||
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
break;
|
||||
}
|
||||
if (pInfo->colIndex == -1) {
|
||||
for (int i = 0; i < taosArrayGetSize(pBlock->pDataBlock); i++) {
|
||||
SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, i);
|
||||
if (pColDataInfo->info.colId == pOperator->pExpr[0].base.resColId) {
|
||||
pInfo->colIndex = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (pInfo->colIndex == -1) {
|
||||
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
assert(pBlock->info.numOfCols == 1);
|
||||
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, 0);
|
||||
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pInfo->colIndex);
|
||||
|
||||
int16_t bytes = pColInfoData->info.bytes;
|
||||
int16_t type = pColInfoData->info.type;
|
||||
|
@ -6623,7 +6680,8 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
|
|||
|
||||
SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
|
||||
SDistinctOperatorInfo* pInfo = calloc(1, sizeof(SDistinctOperatorInfo));
|
||||
|
||||
pInfo->colIndex = -1;
|
||||
pInfo->threshold = 10000000; // distinct result threshold
|
||||
pInfo->outputCapacity = 4096;
|
||||
pInfo->pSet = taosHashInit(64, taosGetDefaultHashFunction(pExpr->base.colType), false, HASH_NO_LOCK);
|
||||
pInfo->pRes = createOutputBuf(pExpr, numOfOutput, (int32_t) pInfo->outputCapacity);
|
||||
|
@ -6638,6 +6696,7 @@ SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperat
|
|||
pOperator->info = pInfo;
|
||||
pOperator->pRuntimeEnv = pRuntimeEnv;
|
||||
pOperator->exec = hashDistinct;
|
||||
pOperator->pExpr = pExpr;
|
||||
pOperator->cleanup = destroyDistinctOperatorInfo;
|
||||
|
||||
appendUpstream(pOperator, upstream);
|
||||
|
|
|
@ -104,7 +104,7 @@ static SQueryNode* doAddTableColumnNode(SQueryInfo* pQueryInfo, STableMetaInfo*
|
|||
int32_t num = (int32_t) taosArrayGetSize(pExprs);
|
||||
SQueryNode* pNode = createQueryNode(QNODE_TAGSCAN, "TableTagScan", NULL, 0, pExprs->pData, num, info, NULL);
|
||||
|
||||
if (pQueryInfo->distinctTag) {
|
||||
if (pQueryInfo->distinct) {
|
||||
pNode = createQueryNode(QNODE_DISTINCT, "Distinct", &pNode, 1, pExprs->pData, num, info, NULL);
|
||||
}
|
||||
|
||||
|
@ -551,9 +551,11 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
|
|||
int32_t op = 0;
|
||||
|
||||
if (onlyQueryTags(pQueryAttr)) { // do nothing for tags query
|
||||
if (onlyQueryTags(pQueryAttr)) {
|
||||
op = OP_TagScan;
|
||||
taosArrayPush(plan, &op);
|
||||
if (pQueryAttr->distinctTag) {
|
||||
}
|
||||
if (pQueryAttr->distinct) {
|
||||
op = OP_Distinct;
|
||||
taosArrayPush(plan, &op);
|
||||
}
|
||||
|
@ -630,8 +632,13 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
|
|||
} else {
|
||||
op = OP_Project;
|
||||
taosArrayPush(plan, &op);
|
||||
if (pQueryAttr->distinct) {
|
||||
op = OP_Distinct;
|
||||
taosArrayPush(plan, &op);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (pQueryAttr->limit.limit > 0 || pQueryAttr->limit.offset > 0) {
|
||||
op = OP_Limit;
|
||||
|
@ -651,7 +658,7 @@ SArray* createGlobalMergePlan(SQueryAttr* pQueryAttr) {
|
|||
int32_t op = OP_MultiwayMergeSort;
|
||||
taosArrayPush(plan, &op);
|
||||
|
||||
if (pQueryAttr->distinctTag) {
|
||||
if (pQueryAttr->distinct) {
|
||||
op = OP_Distinct;
|
||||
taosArrayPush(plan, &op);
|
||||
}
|
||||
|
|
|
@ -712,9 +712,8 @@ void tSetColumnType(TAOS_FIELD *pField, SStrToken *type) {
|
|||
} else {
|
||||
int32_t bytes = -(int32_t)(type->type);
|
||||
if (bytes > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
|
||||
// we have to postpone reporting the error because it cannot be done here
|
||||
// as pField->bytes is int16_t, use 'TSDB_MAX_NCHAR_LEN + 1' to avoid overflow
|
||||
bytes = TSDB_MAX_NCHAR_LEN + 1;
|
||||
// overflowed. set bytes to -1 so that error can be reported
|
||||
bytes = -1;
|
||||
} else {
|
||||
bytes = bytes * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE;
|
||||
}
|
||||
|
@ -727,8 +726,8 @@ void tSetColumnType(TAOS_FIELD *pField, SStrToken *type) {
|
|||
} else {
|
||||
int32_t bytes = -(int32_t)(type->type);
|
||||
if (bytes > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) {
|
||||
// refer comment for NCHAR above
|
||||
bytes = TSDB_MAX_BINARY_LEN + 1;
|
||||
// overflowed. set bytes to -1 so that error can be reported
|
||||
bytes = -1;
|
||||
} else {
|
||||
bytes += VARSTR_HEADER_SIZE;
|
||||
}
|
||||
|
|
|
@ -722,7 +722,8 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea
|
|||
// OK,let's load row from backward to get not-null column
|
||||
for (int32_t rowId = pBlock->numOfRows - 1; rowId >= 0; rowId--) {
|
||||
SDataCol *pDataCol = pReadh->pDCols[0]->cols + i;
|
||||
tdAppendColVal(memRowDataBody(row), tdGetColDataOfRow(pDataCol, rowId), pCol->type, pCol->offset);
|
||||
const void* pColData = tdGetColDataOfRow(pDataCol, rowId);
|
||||
tdAppendColVal(memRowDataBody(row), pColData, pCol->type, pCol->offset);
|
||||
//SDataCol *pDataCol = readh.pDCols[0]->cols + j;
|
||||
void *value = tdGetRowDataOfCol(memRowDataBody(row), (int8_t)pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset);
|
||||
if (isNull(value, pCol->type)) {
|
||||
|
@ -735,11 +736,12 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea
|
|||
continue;
|
||||
}
|
||||
// save not-null column
|
||||
uint16_t bytes = IS_VAR_DATA_TYPE(pCol->type) ? varDataTLen(pColData) : pCol->bytes;
|
||||
SDataCol *pLastCol = &(pTable->lastCols[idx]);
|
||||
pLastCol->pData = malloc(pCol->bytes);
|
||||
pLastCol->bytes = pCol->bytes;
|
||||
pLastCol->pData = malloc(bytes);
|
||||
pLastCol->bytes = bytes;
|
||||
pLastCol->colId = pCol->colId;
|
||||
memcpy(pLastCol->pData, value, pCol->bytes);
|
||||
memcpy(pLastCol->pData, value, bytes);
|
||||
|
||||
// save row ts(in column 0)
|
||||
pDataCol = pReadh->pDCols[0]->cols + 0;
|
||||
|
|
|
@ -1019,7 +1019,7 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro
|
|||
|
||||
if (isDataRow) {
|
||||
value = tdGetRowDataOfCol(memRowDataBody(row), (int8_t)pTCol->type,
|
||||
TD_DATA_ROW_HEAD_SIZE + pSchema->columns[j].offset);
|
||||
TD_DATA_ROW_HEAD_SIZE + pTCol->offset);
|
||||
} else {
|
||||
// SKVRow
|
||||
SColIdx *pColIdx = tdGetKVRowIdxOfCol(memRowKvBody(row), pTCol->colId);
|
||||
|
@ -1034,14 +1034,17 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro
|
|||
|
||||
SDataCol *pDataCol = &(pLatestCols[idx]);
|
||||
if (pDataCol->pData == NULL) {
|
||||
pDataCol->pData = malloc(pSchema->columns[j].bytes);
|
||||
pDataCol->bytes = pSchema->columns[j].bytes;
|
||||
} else if (pDataCol->bytes < pSchema->columns[j].bytes) {
|
||||
pDataCol->pData = realloc(pDataCol->pData, pSchema->columns[j].bytes);
|
||||
pDataCol->bytes = pSchema->columns[j].bytes;
|
||||
pDataCol->pData = malloc(pTCol->bytes);
|
||||
pDataCol->bytes = pTCol->bytes;
|
||||
} else if (pDataCol->bytes < pTCol->bytes) {
|
||||
pDataCol->pData = realloc(pDataCol->pData, pTCol->bytes);
|
||||
pDataCol->bytes = pTCol->bytes;
|
||||
}
|
||||
|
||||
memcpy(pDataCol->pData, value, pDataCol->bytes);
|
||||
// the actual value size
|
||||
uint16_t bytes = IS_VAR_DATA_TYPE(pTCol->type) ? varDataTLen(value) : pTCol->bytes;
|
||||
// the actual data size CANNOT larger than column size
|
||||
assert(pTCol->bytes >= bytes);
|
||||
memcpy(pDataCol->pData, value, bytes);
|
||||
//tsdbInfo("updateTableLatestColumn vgId:%d cache column %d for %d,%s", REPO_ID(pRepo), j, pDataCol->bytes, (char*)pDataCol->pData);
|
||||
pDataCol->ts = memRowKey(row);
|
||||
}
|
||||
|
|
|
@ -640,7 +640,7 @@ static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGr
|
|||
size_t numOfGroup = taosArrayGetSize(pGroupList->pGroupList);
|
||||
|
||||
STableGroupInfo* pNew = calloc(1, sizeof(STableGroupInfo));
|
||||
pNew->pGroupList = taosArrayInit(numOfGroup, sizeof(SArray));
|
||||
pNew->pGroupList = taosArrayInit(numOfGroup, POINTER_BYTES);
|
||||
|
||||
for(int32_t i = 0; i < numOfGroup; ++i) {
|
||||
SArray* oneGroup = taosArrayGetP(pGroupList->pGroupList, i);
|
||||
|
@ -3383,12 +3383,14 @@ static int32_t tableGroupComparFn(const void *p1, const void *p2, const void *pa
|
|||
type = TSDB_DATA_TYPE_BINARY;
|
||||
bytes = tGetTbnameColumnSchema()->bytes;
|
||||
} else {
|
||||
if (pTableGroupSupp->pTagSchema && colIndex < pTableGroupSupp->pTagSchema->numOfCols) {
|
||||
STColumn* pCol = schemaColAt(pTableGroupSupp->pTagSchema, colIndex);
|
||||
bytes = pCol->bytes;
|
||||
type = pCol->type;
|
||||
f1 = tdGetKVRowValOfCol(pTable1->tagVal, pCol->colId);
|
||||
f2 = tdGetKVRowValOfCol(pTable2->tagVal, pCol->colId);
|
||||
}
|
||||
}
|
||||
|
||||
// this tags value may be NULL
|
||||
if (f1 == NULL && f2 == NULL) {
|
||||
|
|
|
@ -83,6 +83,7 @@ typedef struct {
|
|||
uint8_t deleting; // set the deleting flag to stop refreshing ASAP.
|
||||
pthread_t refreshWorker;
|
||||
bool extendLifespan; // auto extend life span when one item is accessed.
|
||||
int64_t checkTick; // tick used to record the check times of the refresh threads
|
||||
#if defined(LINUX)
|
||||
pthread_rwlock_t lock;
|
||||
#else
|
||||
|
@ -177,6 +178,11 @@ void taosCacheCleanup(SCacheObj *pCacheObj);
|
|||
*/
|
||||
void taosCacheRefresh(SCacheObj *pCacheObj, __cache_free_fn_t fp);
|
||||
|
||||
/**
|
||||
* stop background refresh worker thread
|
||||
*/
|
||||
void taosStopCacheRefreshWorker();
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -54,6 +54,45 @@ static FORCE_INLINE void __cache_lock_destroy(SCacheObj *pCacheObj) {
|
|||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* do cleanup the taos cache
|
||||
* @param pCacheObj
|
||||
*/
|
||||
static void doCleanupDataCache(SCacheObj *pCacheObj);
|
||||
|
||||
/**
|
||||
* refresh cache to remove data in both hash list and trash, if any nodes' refcount == 0, every pCacheObj->refreshTime
|
||||
* @param handle Cache object handle
|
||||
*/
|
||||
static void* taosCacheTimedRefresh(void *handle);
|
||||
|
||||
static pthread_t cacheRefreshWorker = {0};
|
||||
static pthread_once_t cacheThreadInit = PTHREAD_ONCE_INIT;
|
||||
static pthread_mutex_t guard = PTHREAD_MUTEX_INITIALIZER;
|
||||
static SArray* pCacheArrayList = NULL;
|
||||
static bool stopRefreshWorker = false;
|
||||
|
||||
static void doInitRefreshThread(void) {
|
||||
pCacheArrayList = taosArrayInit(4, POINTER_BYTES);
|
||||
|
||||
pthread_attr_t thattr;
|
||||
pthread_attr_init(&thattr);
|
||||
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
|
||||
|
||||
pthread_create(&cacheRefreshWorker, &thattr, taosCacheTimedRefresh, NULL);
|
||||
pthread_attr_destroy(&thattr);
|
||||
}
|
||||
|
||||
pthread_t doRegisterCacheObj(SCacheObj* pCacheObj) {
|
||||
pthread_once(&cacheThreadInit, doInitRefreshThread);
|
||||
|
||||
pthread_mutex_lock(&guard);
|
||||
taosArrayPush(pCacheArrayList, &pCacheObj);
|
||||
pthread_mutex_unlock(&guard);
|
||||
|
||||
return cacheRefreshWorker;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param key key of object for hash, usually a null-terminated string
|
||||
* @param keyLen length of key
|
||||
|
@ -142,19 +181,9 @@ static FORCE_INLINE void doDestroyTrashcanElem(SCacheObj* pCacheObj, STrashElem
|
|||
free(pElem);
|
||||
}
|
||||
|
||||
/**
|
||||
* do cleanup the taos cache
|
||||
* @param pCacheObj
|
||||
*/
|
||||
static void doCleanupDataCache(SCacheObj *pCacheObj);
|
||||
|
||||
/**
|
||||
* refresh cache to remove data in both hash list and trash, if any nodes' refcount == 0, every pCacheObj->refreshTime
|
||||
* @param handle Cache object handle
|
||||
*/
|
||||
static void* taosCacheTimedRefresh(void *handle);
|
||||
|
||||
SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_free_fn_t fn, const char* cacheName) {
|
||||
const int32_t SLEEP_DURATION = 500; //500 ms
|
||||
|
||||
if (refreshTimeInSeconds <= 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
@ -176,7 +205,8 @@ SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool ext
|
|||
// set free cache node callback function
|
||||
pCacheObj->freeFp = fn;
|
||||
pCacheObj->refreshTime = refreshTimeInSeconds * 1000;
|
||||
pCacheObj->extendLifespan = extendLifespan;
|
||||
pCacheObj->checkTick = pCacheObj->refreshTime / SLEEP_DURATION;
|
||||
pCacheObj->extendLifespan = extendLifespan; // the TTL after the last access
|
||||
|
||||
if (__cache_lock_init(pCacheObj) != 0) {
|
||||
taosHashCleanup(pCacheObj->pHashTable);
|
||||
|
@ -186,13 +216,7 @@ SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool ext
|
|||
return NULL;
|
||||
}
|
||||
|
||||
pthread_attr_t thattr;
|
||||
pthread_attr_init(&thattr);
|
||||
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
|
||||
|
||||
pthread_create(&pCacheObj->refreshWorker, &thattr, taosCacheTimedRefresh, pCacheObj);
|
||||
|
||||
pthread_attr_destroy(&thattr);
|
||||
doRegisterCacheObj(pCacheObj);
|
||||
return pCacheObj;
|
||||
}
|
||||
|
||||
|
@ -364,7 +388,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
|
|||
|
||||
if (pCacheObj->extendLifespan && (!inTrashcan) && (!_remove)) {
|
||||
atomic_store_64(&pNode->expireTime, pNode->lifespan + taosGetTimestampMs());
|
||||
uDebug("cache:%s data:%p extend expire time: %"PRId64, pCacheObj->name, pNode->data, pNode->expireTime);
|
||||
uDebug("cache:%s, data:%p extend expire time: %"PRId64, pCacheObj->name, pNode->data, pNode->expireTime);
|
||||
}
|
||||
|
||||
if (_remove) {
|
||||
|
@ -510,8 +534,10 @@ void taosCacheCleanup(SCacheObj *pCacheObj) {
|
|||
}
|
||||
|
||||
pCacheObj->deleting = 1;
|
||||
if (taosCheckPthreadValid(pCacheObj->refreshWorker)) {
|
||||
pthread_join(pCacheObj->refreshWorker, NULL);
|
||||
|
||||
// wait for the refresh thread quit before destroying the cache object.
|
||||
while(atomic_load_8(&pCacheObj->deleting) != 0) {
|
||||
taosMsleep(50);
|
||||
}
|
||||
|
||||
uInfo("cache:%s will be cleaned up", pCacheObj->name);
|
||||
|
@ -650,39 +676,60 @@ static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_free_fn_t
|
|||
}
|
||||
|
||||
void* taosCacheTimedRefresh(void *handle) {
|
||||
SCacheObj* pCacheObj = handle;
|
||||
if (pCacheObj == NULL) {
|
||||
uDebug("object is destroyed. no refresh retry");
|
||||
return NULL;
|
||||
}
|
||||
assert(pCacheArrayList != NULL);
|
||||
uDebug("cache refresh thread starts");
|
||||
|
||||
setThreadName("cacheTimedRefre");
|
||||
|
||||
const int32_t SLEEP_DURATION = 500; //500 ms
|
||||
int64_t totalTick = pCacheObj->refreshTime / SLEEP_DURATION;
|
||||
|
||||
int64_t count = 0;
|
||||
while(1) {
|
||||
taosMsleep(500);
|
||||
|
||||
// check if current cache object will be deleted every 500ms.
|
||||
if (pCacheObj->deleting) {
|
||||
uDebug("%s refresh threads quit", pCacheObj->name);
|
||||
break;
|
||||
while(1) {
|
||||
taosMsleep(SLEEP_DURATION);
|
||||
if (stopRefreshWorker) {
|
||||
goto _end;
|
||||
}
|
||||
|
||||
if (++count < totalTick) {
|
||||
pthread_mutex_lock(&guard);
|
||||
size_t size = taosArrayGetSize(pCacheArrayList);
|
||||
pthread_mutex_unlock(&guard);
|
||||
|
||||
count += 1;
|
||||
|
||||
for(int32_t i = 0; i < size; ++i) {
|
||||
pthread_mutex_lock(&guard);
|
||||
SCacheObj* pCacheObj = taosArrayGetP(pCacheArrayList, i);
|
||||
|
||||
if (pCacheObj == NULL) {
|
||||
uError("object is destroyed. ignore and try next");
|
||||
pthread_mutex_unlock(&guard);
|
||||
continue;
|
||||
}
|
||||
|
||||
// check if current cache object will be deleted every 500ms.
|
||||
if (pCacheObj->deleting) {
|
||||
taosArrayRemove(pCacheArrayList, i);
|
||||
size = taosArrayGetSize(pCacheArrayList);
|
||||
|
||||
uDebug("%s is destroying, remove it from refresh list, remain cache obj:%"PRIzu, pCacheObj->name, size);
|
||||
pCacheObj->deleting = 0; //reset the deleting flag to enable pCacheObj to continue releasing resources.
|
||||
|
||||
pthread_mutex_unlock(&guard);
|
||||
continue;
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&guard);
|
||||
|
||||
if ((count % pCacheObj->checkTick) != 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// reset the count value
|
||||
count = 0;
|
||||
size_t elemInHash = taosHashGetSize(pCacheObj->pHashTable);
|
||||
if (elemInHash + pCacheObj->numOfElemsInTrash == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
uDebug("%s refresh thread timed scan", pCacheObj->name);
|
||||
uDebug("%s refresh thread scan", pCacheObj->name);
|
||||
pCacheObj->statistics.refreshCount++;
|
||||
|
||||
// refresh data in hash table
|
||||
|
@ -693,7 +740,15 @@ void* taosCacheTimedRefresh(void *handle) {
|
|||
|
||||
taosTrashcanEmpty(pCacheObj, false);
|
||||
}
|
||||
}
|
||||
|
||||
_end:
|
||||
taosArrayDestroy(pCacheArrayList);
|
||||
|
||||
pCacheArrayList = NULL;
|
||||
pthread_mutex_destroy(&guard);
|
||||
|
||||
uDebug("cache refresh thread quits");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -705,3 +760,7 @@ void taosCacheRefresh(SCacheObj *pCacheObj, __cache_free_fn_t fp) {
|
|||
int64_t now = taosGetTimestampMs();
|
||||
doCacheRefresh(pCacheObj, now, fp);
|
||||
}
|
||||
|
||||
void taosStopCacheRefreshWorker() {
|
||||
stopRefreshWorker = false;
|
||||
}
|
|
@ -85,7 +85,7 @@ int tsCompressInit(){
|
|||
if(lossyFloat == false && lossyDouble == false)
|
||||
return 0;
|
||||
|
||||
tdszInit(fPrecision, dPrecision, maxIntervals, intervals, Compressor);
|
||||
tdszInit(fPrecision, dPrecision, maxRange, curRange, Compressor);
|
||||
if(lossyFloat)
|
||||
uInfo("lossy compression float is opened. ");
|
||||
if(lossyDouble)
|
||||
|
@ -159,7 +159,7 @@ int tsCompressINTImp(const char *const input, const int nelements, char *const o
|
|||
break;
|
||||
}
|
||||
// Get difference.
|
||||
if (!safeInt64Add(curr_value, -prev_value)) goto _copy_and_exit;
|
||||
if (!safeInt64Add(curr_value, -prev_value_tmp)) goto _copy_and_exit;
|
||||
|
||||
int64_t diff = curr_value - prev_value_tmp;
|
||||
// Zigzag encode the value.
|
||||
|
|
|
@ -484,6 +484,9 @@ void taosPrintGlobalCfg() {
|
|||
case TAOS_CFG_VTYPE_FLOAT:
|
||||
uInfo(" %s:%s%f%s", cfg->option, blank, *((float *)cfg->ptr), tsGlobalUnit[cfg->unitType]);
|
||||
break;
|
||||
case TAOS_CFG_VTYPE_DOUBLE:
|
||||
uInfo(" %s:%s%f%s", cfg->option, blank, *((double *)cfg->ptr), tsGlobalUnit[cfg->unitType]);
|
||||
break;
|
||||
case TAOS_CFG_VTYPE_STRING:
|
||||
case TAOS_CFG_VTYPE_IPSTR:
|
||||
case TAOS_CFG_VTYPE_DIRECTORY:
|
||||
|
|
|
@ -110,6 +110,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DB_NOT_SELECTED, "Database not specifie
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_TABLE_NAME, "Table does not exist")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_EXCEED_SQL_LIMIT, "SQL statement too long, check maxSQLLength config")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_FILE_EMPTY, "File is empty")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_LINE_SYNTAX_ERROR, "Syntax error in Line")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_NO_META_CACHED, "No table meta cached")
|
||||
|
||||
// mnode
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_MSG_NOT_PROCESSED, "Message not processed")
|
||||
|
|
|
@ -69,12 +69,12 @@ void taosStringBuilderAppendNull(SStringBuilder* sb) { taosStringBuilderAppendSt
|
|||
|
||||
void taosStringBuilderAppendInteger(SStringBuilder* sb, int64_t v) {
|
||||
char buf[64];
|
||||
size_t len = sprintf(buf, "%" PRId64, v);
|
||||
taosStringBuilderAppendStringLen(sb, buf, len);
|
||||
size_t len = snprintf(buf, sizeof(buf), "%" PRId64, v);
|
||||
taosStringBuilderAppendStringLen(sb, buf, MIN(len, sizeof(buf)));
|
||||
}
|
||||
|
||||
void taosStringBuilderAppendDouble(SStringBuilder* sb, double v) {
|
||||
char buf[64];
|
||||
size_t len = sprintf(buf, "%.9lf", v);
|
||||
taosStringBuilderAppendStringLen(sb, buf, len);
|
||||
char buf[512];
|
||||
size_t len = snprintf(buf, sizeof(buf), "%.9lf", v);
|
||||
taosStringBuilderAppendStringLen(sb, buf, MIN(len, sizeof(buf)));
|
||||
}
|
||||
|
|
|
@ -359,7 +359,7 @@ void verify_prepare(TAOS* taos) {
|
|||
v.v8 = (int64_t)(i * 8);
|
||||
v.f4 = (float)(i * 40);
|
||||
v.f8 = (double)(i * 80);
|
||||
for (int j = 0; j < sizeof(v.bin) - 1; ++j) {
|
||||
for (int j = 0; j < sizeof(v.bin); ++j) {
|
||||
v.bin[j] = (char)(i + '0');
|
||||
}
|
||||
|
||||
|
@ -556,7 +556,7 @@ void verify_prepare2(TAOS* taos) {
|
|||
v.v8[i] = (int64_t)(i * 8);
|
||||
v.f4[i] = (float)(i * 40);
|
||||
v.f8[i] = (double)(i * 80);
|
||||
for (int j = 0; j < sizeof(v.bin[0]) - 1; ++j) {
|
||||
for (int j = 0; j < sizeof(v.bin[0]); ++j) {
|
||||
v.bin[i][j] = (char)(i + '0');
|
||||
}
|
||||
strcpy(v.blob[i], "一二三四五六七八九十");
|
||||
|
@ -808,7 +808,7 @@ void verify_prepare3(TAOS* taos) {
|
|||
v.v8[i] = (int64_t)(i * 8);
|
||||
v.f4[i] = (float)(i * 40);
|
||||
v.f8[i] = (double)(i * 80);
|
||||
for (int j = 0; j < sizeof(v.bin[0]) - 1; ++j) {
|
||||
for (int j = 0; j < sizeof(v.bin[0]); ++j) {
|
||||
v.bin[i][j] = (char)(i + '0');
|
||||
}
|
||||
strcpy(v.blob[i], "一二三四五六七八九十");
|
||||
|
@ -954,7 +954,7 @@ int32_t verify_schema_less(TAOS* taos) {
|
|||
result = taos_query(taos, "drop database if exists test;");
|
||||
taos_free_result(result);
|
||||
usleep(100000);
|
||||
result = taos_query(taos, "create database test precision 'us';");
|
||||
result = taos_query(taos, "create database test precision 'us' update 1;");
|
||||
taos_free_result(result);
|
||||
usleep(100000);
|
||||
|
||||
|
@ -963,6 +963,8 @@ int32_t verify_schema_less(TAOS* taos) {
|
|||
taos_free_result(result);
|
||||
usleep(100000);
|
||||
|
||||
int code = 0;
|
||||
|
||||
char* lines[] = {
|
||||
"st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000ns",
|
||||
"st,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000ns",
|
||||
|
@ -975,8 +977,8 @@ int32_t verify_schema_less(TAOS* taos) {
|
|||
"stf,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 1626006933641000000ns"
|
||||
};
|
||||
|
||||
int code = 0;
|
||||
code = taos_insert_lines(taos, lines , sizeof(lines)/sizeof(char*));
|
||||
|
||||
char* lines2[] = {
|
||||
"stg,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000ns",
|
||||
"stg,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000ns"
|
||||
|
@ -989,7 +991,27 @@ int32_t verify_schema_less(TAOS* taos) {
|
|||
"sth,t1=4i64,t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 1626006933654ms"
|
||||
};
|
||||
code = taos_insert_lines(taos, lines3, 2);
|
||||
return code;
|
||||
|
||||
char* lines4[] = {
|
||||
"st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000ns",
|
||||
"dgtyqodr,t2=5f64,t3=L\"ste\" c1=tRue,c2=4i64,c3=\"iam\" 1626056811823316532ns"
|
||||
};
|
||||
code = taos_insert_lines(taos, lines4, 2);
|
||||
|
||||
char* lines5[] = {
|
||||
"zqlbgs,id=\"zqlbgs_39302_21680\",t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"binaryColValue\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000ns",
|
||||
"zqlbgs,t9=f,id=\"zqlbgs_39302_21680\",t0=f,t1=127i8,t11=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\",t10=L\"ncharTagValue\" c10=f,c0=f,c1=127i8,c12=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"binaryColValue\",c8=L\"ncharColValue\",c9=7u64,c11=L\"ncharColValue\" 1626006833639000000ns"
|
||||
};
|
||||
code = taos_insert_lines(taos, &lines5[0], 1);
|
||||
code = taos_insert_lines(taos, &lines5[1], 1);
|
||||
|
||||
|
||||
char* lines6[] = {
|
||||
"st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000ns",
|
||||
"dgtyqodr,t2=5f64,t3=L\"ste\" c1=tRue,c2=4i64,c3=\"iam\" 1626056811823316532ns"
|
||||
};
|
||||
code = taos_insert_lines(taos, lines6, 2);
|
||||
return (code);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
|
@ -1027,14 +1049,12 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
printf("************ verify prepare2 *************\n");
|
||||
verify_prepare2(taos);
|
||||
|
||||
printf("************ verify prepare3 *************\n");
|
||||
verify_prepare3(taos);
|
||||
|
||||
printf("************ verify stream *************\n");
|
||||
verify_stream(taos);
|
||||
printf("done\n");
|
||||
|
||||
taos_close(taos);
|
||||
taos_cleanup();
|
||||
}
|
||||
|
|
|
@ -9,8 +9,8 @@
|
|||
#include <unistd.h>
|
||||
|
||||
int numSuperTables = 8;
|
||||
int numChildTables = 1024;
|
||||
int numRowsPerChildTable = 128;
|
||||
int numChildTables = 4;
|
||||
int numRowsPerChildTable = 2048;
|
||||
|
||||
void shuffle(char**lines, size_t n)
|
||||
{
|
||||
|
@ -157,5 +157,45 @@ int main(int argc, char* argv[]) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
//Duplicate key check;
|
||||
char* lines_003_1[] = {
|
||||
"std,id=\"std_3_1\",t1=4i64,Id=\"std\",t2=true c1=true 1626006834s"
|
||||
};
|
||||
|
||||
code = taos_insert_lines(taos, lines_003_1 , sizeof(lines_003_1)/sizeof(char*));
|
||||
if (0 == code) {
|
||||
printf("taos_insert_lines() lines_003_1 return code:%d (%s)\n", code, (char*)tstrerror(code));
|
||||
return -1;
|
||||
}
|
||||
|
||||
char* lines_003_2[] = {
|
||||
"std,id=\"std_3_2\",tag1=4i64,Tag2=true,tAg3=2,TaG2=\"dup!\" c1=true 1626006834s"
|
||||
};
|
||||
|
||||
code = taos_insert_lines(taos, lines_003_2 , sizeof(lines_003_2)/sizeof(char*));
|
||||
if (0 == code) {
|
||||
printf("taos_insert_lines() lines_003_2 return code:%d (%s)\n", code, (char*)tstrerror(code));
|
||||
return -1;
|
||||
}
|
||||
|
||||
char* lines_003_3[] = {
|
||||
"std,id=\"std_3_3\",tag1=4i64 field1=true,Field2=2,FIElD1=\"dup!\",fIeLd4=true 1626006834s"
|
||||
};
|
||||
|
||||
code = taos_insert_lines(taos, lines_003_3 , sizeof(lines_003_3)/sizeof(char*));
|
||||
if (0 == code) {
|
||||
printf("taos_insert_lines() lines_003_3 return code:%d (%s)\n", code, (char*)tstrerror(code));
|
||||
return -1;
|
||||
}
|
||||
|
||||
char* lines_003_4[] = {
|
||||
"std,id=\"std_3_4\",tag1=4i64,dupkey=4i16,tag2=T field1=true,dUpkEy=1e3f32,field2=\"1234\" 1626006834s"
|
||||
};
|
||||
|
||||
code = taos_insert_lines(taos, lines_003_4 , sizeof(lines_003_4)/sizeof(char*));
|
||||
if (0 == code) {
|
||||
printf("taos_insert_lines() lines_003_4 return code:%d (%s)\n", code, (char*)tstrerror(code));
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -44,6 +44,7 @@ python3 ./test.py -f table/del_stable.py
|
|||
|
||||
#stable
|
||||
python3 ./test.py -f stable/insert.py
|
||||
python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py
|
||||
|
||||
# tag
|
||||
python3 ./test.py -f tag_lite/filter.py
|
||||
|
@ -161,12 +162,13 @@ python3 test.py -f tools/taosdemoTestTblAlt.py
|
|||
python3 test.py -f tools/taosdemoTestSampleData.py
|
||||
python3 test.py -f tools/taosdemoTestInterlace.py
|
||||
python3 test.py -f tools/taosdemoTestQuery.py
|
||||
|
||||
# nano support
|
||||
python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py
|
||||
python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py
|
||||
python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py
|
||||
python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertTime_step.py
|
||||
|
||||
python3 test.py -f tools/taosdumpTestNanoSupport.py
|
||||
|
||||
# update
|
||||
python3 ./test.py -f update/allow_update.py
|
||||
|
@ -198,6 +200,10 @@ python3 ./test.py -f perfbenchmark/bug3433.py
|
|||
#python3 ./test.py -f perfbenchmark/bug3589.py
|
||||
python3 ./test.py -f perfbenchmark/taosdemoInsert.py
|
||||
|
||||
#taosdemo
|
||||
python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
|
||||
python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
|
||||
|
||||
#query
|
||||
python3 ./test.py -f query/filter.py
|
||||
python3 ./test.py -f query/filterCombo.py
|
||||
|
@ -357,10 +363,10 @@ python3 ./test.py -f alter/alter_debugFlag.py
|
|||
python3 ./test.py -f query/queryBetweenAnd.py
|
||||
python3 ./test.py -f tag_lite/alter_tag.py
|
||||
|
||||
python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
|
||||
python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py
|
||||
python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
|
||||
|
||||
python3 test.py -f tools/taosdemoAllTest/TD-4985/query-limit-offset.py
|
||||
python3 test.py -f tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py
|
||||
python3 test.py -f tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py
|
||||
python3 ./test.py -f tag_lite/drop_auto_create.py
|
||||
python3 test.py -f insert/insert_before_use_db.py
|
||||
python3 test.py -f alter/alter_keep.py
|
||||
|
|
|
@ -70,6 +70,14 @@ class TDTestCase:
|
|||
tdSql.query("select * from (select avg(value), sum(value) from st group by tbname slimit 5 soffset 7)")
|
||||
tdSql.checkRows(3)
|
||||
|
||||
# https://jira.taosdata.com:18080/browse/TD-5497
|
||||
tdSql.execute("create table tt(ts timestamp ,i int)")
|
||||
tdSql.execute("insert into tt values(now, 11)(now + 1s, -12)")
|
||||
tdSql.query("select * from (select max(i),0-min(i) from tt)")
|
||||
tdSql.checkRows(1);
|
||||
tdSql.checkData(0, 0, 11);
|
||||
tdSql.checkData(0, 1, 12.0);
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
|
|
@ -0,0 +1,536 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
from util.log import tdLog
|
||||
from util.cases import tdCases
|
||||
from util.sql import tdSql
|
||||
import random
|
||||
import time
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
self.ts = 1600000000000
|
||||
self.num = 10
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
# test case for https://jira.taosdata.com:18080/browse/TD-5074
|
||||
|
||||
startTime = time.time()
|
||||
|
||||
tdSql.execute('''create stable stable_1
|
||||
(ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint,
|
||||
q_bool bool , q_binary binary(20) , q_nchar nchar(20) ,
|
||||
q_float float , q_double double , q_ts timestamp)
|
||||
tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint,
|
||||
t_bool bool , t_binary binary(20) , t_nchar nchar(20) ,
|
||||
t_float float , t_double double , t_ts timestamp);''')
|
||||
tdSql.execute('''create stable stable_2
|
||||
(ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint,
|
||||
q_bool bool , q_binary binary(20) , q_nchar nchar(20) ,
|
||||
q_float float , q_double double , q_ts timestamp)
|
||||
tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint,
|
||||
t_bool bool , t_binary binary(20) , t_nchar nchar(20) ,
|
||||
t_float float , t_double double , t_ts timestamp);''')
|
||||
tdSql.execute('''create table table_0 using stable_1
|
||||
tags('table_0' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')''')
|
||||
tdSql.execute('''create table table_1 using stable_1
|
||||
tags('table_1' , '2147483647' , '9223372036854775807' , '32767' , '127' , 1 ,
|
||||
'binary1' , 'nchar1' , '1' , '11' , \'1999-09-09 09:09:09.090\')''')
|
||||
tdSql.execute('''create table table_2 using stable_1
|
||||
tags('table_2' , '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false ,
|
||||
'binary2' , 'nchar2nchar2' , '-2.2' , '-22.22' , \'2099-09-09 09:09:09.090\')''')
|
||||
tdSql.execute('''create table table_3 using stable_1
|
||||
tags('table_3' , '3' , '3' , '3' , '3' , true , 'binary3' , 'nchar3' , '33.33' , '3333.3333' , '0')''')
|
||||
tdSql.execute('''create table table_4 using stable_1
|
||||
tags('table_4' , '4' , '4' , '4' , '4' , false , 'binary4' , 'nchar4' , '-444.444' , '-444444.444444' , '0')''')
|
||||
tdSql.execute('''create table table_5 using stable_1
|
||||
tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''')
|
||||
tdSql.execute('''create table table_21 using stable_2
|
||||
tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''')
|
||||
#regular table
|
||||
tdSql.execute('''create table regular_table_1
|
||||
(ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint,
|
||||
q_bool bool , q_binary binary(20) , q_nchar nchar(20) ,
|
||||
q_float float , q_double double , q_ts timestamp) ;''')
|
||||
|
||||
for i in range(self.num):
|
||||
tdSql.execute('''insert into table_0 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)'''
|
||||
% (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
|
||||
tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)'''
|
||||
% (self.ts + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i,
|
||||
i, i, random.random(), random.random(), 1262304000001 + i))
|
||||
tdSql.execute('''insert into table_2 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)'''
|
||||
% (self.ts + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i,
|
||||
i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i))
|
||||
tdSql.execute('''insert into table_3 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)'''
|
||||
% (self.ts + i, random.randint(-2147483647, 2147483647),
|
||||
random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767),
|
||||
random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000),
|
||||
random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i))
|
||||
tdSql.execute('''insert into table_4 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)'''
|
||||
% (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
|
||||
tdSql.execute('''insert into table_5 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)'''
|
||||
% (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
|
||||
tdSql.execute('''insert into table_21 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)'''
|
||||
% (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
|
||||
|
||||
tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)'''
|
||||
% (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
|
||||
tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)'''
|
||||
% (self.ts + 100 + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i,
|
||||
i, i, random.random(), random.random(), 1262304000001 + i))
|
||||
tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)'''
|
||||
% (self.ts + 200 + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i,
|
||||
i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i))
|
||||
tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)'''
|
||||
% (self.ts + 300 + i, random.randint(-2147483647, 2147483647),
|
||||
random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767),
|
||||
random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000),
|
||||
random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i))
|
||||
tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)'''
|
||||
% (self.ts + 400 + i, i, i, i, i, i, i, i, i, self.ts + i))
|
||||
tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)'''
|
||||
% (self.ts + 500 + i, i, i, i, i, i, i, i, i, self.ts + i))
|
||||
|
||||
tdLog.info("========== operator=1(OP_TableScan) ==========")
|
||||
tdLog.info("========== operator=7(OP_Project) ==========")
|
||||
sql = '''select * from stable_1'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(6*self.num)
|
||||
sql = '''select * from regular_table_1'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(6*self.num)
|
||||
|
||||
tdLog.info("========== operator=14(OP_MultiTableAggregate ) ==========")
|
||||
sql = '''select last_row(*) from stable_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkData(0,1,self.num-1)
|
||||
|
||||
tdLog.info("========== operator=6(OP_Aggregate) ==========")
|
||||
sql = '''select last_row(*) from regular_table_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkData(0,1,self.num-1)
|
||||
|
||||
tdLog.info("========== operator=9(OP_Limit) ==========")
|
||||
sql = '''select * from stable_1 where loc = 'table_0' limit 5;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(5)
|
||||
sql = '''select last_row(*) from (select * from stable_1 where loc = 'table_0');'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
|
||||
sql = '''select * from regular_table_1 ;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(6*self.num)
|
||||
sql = '''select last_row(*) from (select * from regular_table_1);'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0,1,self.num-1)
|
||||
|
||||
|
||||
sql = '''select last_row(*) from
|
||||
((select * from table_0) union all
|
||||
(select * from table_1) union all
|
||||
(select * from table_2));'''
|
||||
tdSql.error(sql)
|
||||
|
||||
tdLog.info("========== operator=16(OP_DummyInput) ==========")
|
||||
sql = '''select last_row(*) from
|
||||
((select last_row(*) from table_0) union all
|
||||
(select last_row(*) from table_1) union all
|
||||
(select last_row(*) from table_2));'''
|
||||
tdSql.error(sql)
|
||||
|
||||
sql = '''select last_row(*) from
|
||||
((select * from table_0 limit 5 offset 5) union all
|
||||
(select * from table_1 limit 5 offset 5) union all
|
||||
(select * from regular_table_1 limit 5 offset 5));'''
|
||||
tdSql.error(sql)
|
||||
|
||||
tdLog.info("========== operator=10(OP_SLimit) ==========")
|
||||
sql = '''select count(*) from stable_1 group by loc slimit 3 soffset 2 ;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(3)
|
||||
|
||||
sql = '''select last_row(*) from
|
||||
((select * from table_0) union all
|
||||
(select * from table_1) union all
|
||||
(select * from table_2));'''
|
||||
tdSql.error(sql)
|
||||
|
||||
tdLog.info("========== operator=20(OP_Distinct) ==========")
|
||||
tdLog.info("========== operator=4(OP_TagScan) ==========")
|
||||
sql = '''select distinct(t_bool) from stable_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(2)
|
||||
sql = '''select distinct(loc) from stable_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(6)
|
||||
sql = '''select distinct(t_int) from stable_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(6)
|
||||
sql = '''select distinct(t_bigint) from stable_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(6)
|
||||
sql = '''select distinct(t_smallint) from stable_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(6)
|
||||
sql = '''select distinct(t_tinyint) from stable_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(6)
|
||||
sql = '''select distinct(t_nchar) from stable_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(6)
|
||||
sql = '''select distinct(t_float) from stable_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(6)
|
||||
sql = '''select distinct(t_double) from stable_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(6)
|
||||
sql = '''select distinct(t_ts) from stable_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(3)
|
||||
sql = '''select distinct(tbname) from stable_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(6)
|
||||
|
||||
tdLog.info("========== operator=2(OP_DataBlocksOptScan) ==========")
|
||||
sql = '''select last(q_int),first(q_int) from stable_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
sql = '''select last(q_bigint),first(q_bigint) from stable_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
sql = '''select last(q_smallint),first(q_smallint) from stable_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
sql = '''select last(q_tinyint),first(q_tinyint) from stable_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
sql = '''select last(q_bool),first(q_bool) from stable_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
sql = '''select last(q_binary),first(q_binary) from stable_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
sql = '''select last(q_nchar),first(q_nchar) from stable_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
sql = '''select last(q_float),first(q_float) from stable_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
sql = '''select last(q_double),first(q_double) from stable_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
sql = '''select last(q_ts),first(q_ts) from stable_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar),
|
||||
last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint),
|
||||
first(q_bool),first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from stable_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar),
|
||||
last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint),first(q_bool),
|
||||
first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from regular_table_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
|
||||
tdLog.info("========== operator=8(OP_Groupby) ==========")
|
||||
sql = '''select stddev(q_int) from table_0 group by q_int;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(self.num)
|
||||
sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from stable_1 group by q_int;'''
|
||||
tdSql.query(sql)
|
||||
sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from table_1 group by q_bigint;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(self.num)
|
||||
sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from regular_table_1 group by q_smallint;'''
|
||||
tdSql.query(sql)
|
||||
|
||||
tdLog.info("========== operator=11(OP_TimeWindow) ==========")
|
||||
sql = '''select last(q_int) from table_0 interval(1m);'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),
|
||||
first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from table_1 interval(1m);'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),
|
||||
first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from stable_1 interval(1m);'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),
|
||||
first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from regular_table_1 interval(1m);'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
|
||||
tdLog.info("========== operator=12(OP_SessionWindow) ==========")
|
||||
sql = '''select count(*) from table_1 session(ts,1s);'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
sql = '''select count(*) from regular_table_1 session(ts,1s);'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
|
||||
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
|
||||
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
|
||||
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
|
||||
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
|
||||
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
|
||||
from table_1 session(ts,1s);'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
|
||||
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
|
||||
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
|
||||
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
|
||||
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
|
||||
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
|
||||
from regular_table_1 session(ts,1s);'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
|
||||
tdLog.info("========== operator=13(OP_Fill) ==========")
|
||||
sql = '''select sum(q_int) from table_0
|
||||
where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkData(0,1,'None')
|
||||
sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
|
||||
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
|
||||
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
|
||||
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
|
||||
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
|
||||
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
|
||||
from stable_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkData(0,1,'None')
|
||||
sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
|
||||
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
|
||||
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
|
||||
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
|
||||
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
|
||||
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
|
||||
from regular_table_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkData(0,1,'None')
|
||||
sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
|
||||
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
|
||||
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
|
||||
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
|
||||
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
|
||||
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
|
||||
from table_0 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkData(0,1,'None')
|
||||
#TD-5190
|
||||
sql = '''select sum(q_tinyint),stddev(q_float) from stable_1
|
||||
where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkData(0,1,'None')
|
||||
|
||||
tdLog.info("========== operator=15(OP_MultiTableTimeInterval) ==========")
|
||||
sql = '''select avg(q_int) from stable_1 where ts<now interval(10m);'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
|
||||
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
|
||||
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
|
||||
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
|
||||
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
|
||||
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
|
||||
from table_1 where ts<now interval(10m);'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
|
||||
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
|
||||
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
|
||||
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
|
||||
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
|
||||
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
|
||||
from stable_1 where ts<now interval(10m);'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
|
||||
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
|
||||
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
|
||||
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
|
||||
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
|
||||
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
|
||||
from regular_table_1 where ts<now interval(10m);'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
|
||||
tdLog.info("========== operator=3(OP_TableSeqScan) ==========")
|
||||
tdLog.info("========== operator=6(OP_Aggregate) ==========")
|
||||
sql = '''select * from table_1,table_2
|
||||
where table_1.ts = table_2.ts;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(self.num)
|
||||
#TD-5206
|
||||
sql = '''select * from stable_1,stable_2
|
||||
where stable_1.t_nchar = stable_2.t_nchar and stable_1.ts = stable_2.ts;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(self.num)
|
||||
#TD-5139
|
||||
sql = '''select * from table_1,regular_table_1
|
||||
where table_1.ts = regular_table_1.ts;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(self.num)
|
||||
|
||||
tdLog.info("========== operator=5(OP_TableBlockInfoScan) ==========")
|
||||
sql = '''select _block_dist() from stable_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
sql = '''select _block_dist() from table_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
sql = '''select _block_dist() from regular_table_1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
|
||||
tdLog.info("========== operator=17(OP_MultiwayMergeSort) ==========")
|
||||
tdLog.info("========== operator=18(OP_GlobalAggregate) ==========")
|
||||
tdLog.info("========== operator=19(OP_Filter) ==========")
|
||||
sql = '''select loc,sum(q_int) from stable_1
|
||||
group by loc having sum(q_int)>=0;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkData(0,0,'table_0')
|
||||
sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
|
||||
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
|
||||
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
|
||||
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
|
||||
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
|
||||
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
|
||||
from stable_1 group by loc having sum(q_int)>=0;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkData(0,0,'table_0')
|
||||
sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
|
||||
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
|
||||
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
|
||||
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
|
||||
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
|
||||
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
|
||||
from stable_1 group by loc having avg(q_int)>=0;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkData(0,0,'table_0')
|
||||
sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
|
||||
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
|
||||
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
|
||||
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
|
||||
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
|
||||
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
|
||||
from stable_1 group by loc having min(q_int)>=0;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkData(0,0,'table_0')
|
||||
sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
|
||||
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
|
||||
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
|
||||
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
|
||||
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
|
||||
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
|
||||
from stable_1 group by loc having max(q_int)>=0;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkData(0,0,'table_0')
|
||||
sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
|
||||
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
|
||||
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
|
||||
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
|
||||
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
|
||||
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
|
||||
from stable_1 group by loc having first(q_int)>=0;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkData(0,0,'table_0')
|
||||
sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
|
||||
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
|
||||
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
|
||||
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
|
||||
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
|
||||
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
|
||||
from stable_1 group by loc having last(q_int)>=0;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkData(0,0,'table_0')
|
||||
|
||||
tdLog.info("========== operator=21(OP_Join) ==========")
|
||||
sql = '''select t1.q_int,t2.q_int from
|
||||
(select ts,q_int from table_1) t1 , (select ts,q_int from table_2) t2
|
||||
where t2.ts = t1.ts;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(self.num)
|
||||
sql = '''select t1.*,t2.* from
|
||||
(select * from table_1) t1 , (select * from table_2) t2
|
||||
where t2.ts = t1.ts;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(self.num)
|
||||
sql = '''select t1.*,t2.* from
|
||||
(select * from regular_table_1) t1 , (select * from table_0) t2
|
||||
where t2.ts = t1.ts;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(self.num)
|
||||
sql = '''select t1.*,t2.* from
|
||||
(select * from stable_1) t1 , (select * from table_2) t2
|
||||
where t2.ts = t1.ts;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(self.num)
|
||||
sql = '''select t1.*,t2.* from
|
||||
(select * from regular_table_1) t1 , (select * from stable_1) t2
|
||||
where t2.ts = t1.ts;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(self.num)
|
||||
sql = '''select t1.*,t2.*,t3.* from
|
||||
(select * from regular_table_1) t1 , (select * from stable_1) t2, (select * from table_0) t3
|
||||
where t2.ts = t1.ts and t3.ts = t1.ts and t2.ts = t3.ts;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(self.num)
|
||||
|
||||
tdLog.info("========== operator=22(OP_StateWindow) ==========")
|
||||
sql = '''select avg(q_int),sum(q_smallint) from table_1 state_window(q_int);'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(self.num)
|
||||
sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
|
||||
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
|
||||
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
|
||||
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
|
||||
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
|
||||
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
|
||||
from table_1 state_window(q_bigint);'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(self.num)
|
||||
sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
|
||||
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
|
||||
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
|
||||
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
|
||||
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
|
||||
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
|
||||
from regular_table_1 state_window(q_smallint);'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(6*self.num)
|
||||
|
||||
endTime = time.time()
|
||||
print("total time %ds" % (endTime - startTime))
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -51,7 +51,7 @@ class TDTestCase:
|
|||
tdSql.error("select last_row as latest from st")
|
||||
|
||||
# query distinct on normal colnum
|
||||
tdSql.error("select distinct tagtype from st")
|
||||
#tdSql.error("select distinct tagtype from st")
|
||||
|
||||
# query .. order by non-time field
|
||||
tdSql.error("select * from st order by name")
|
||||
|
|
|
@ -0,0 +1,703 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import random
|
||||
import string
|
||||
import os
|
||||
import time
|
||||
from util.log import tdLog
|
||||
from util.cases import tdCases
|
||||
from util.sql import tdSql
|
||||
from util.dnodes import tdDnodes
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict={'maxSQLLength':1048576}
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
self.ts = 1538548685000
|
||||
self.num = 100
|
||||
|
||||
def get_random_string(self, length):
|
||||
letters = string.ascii_lowercase
|
||||
result_str = ''.join(random.choice(letters) for i in range(length))
|
||||
return result_str
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
# test case for https://jira.taosdata.com:18080/browse/TD-5213
|
||||
|
||||
print("==============step1, regular table, 1 ts + 4094 cols + 1 binary==============")
|
||||
startTime = time.time()
|
||||
sql = "create table regular_table_1(ts timestamp, "
|
||||
for i in range(4094):
|
||||
sql += "col%d int, " % (i + 1)
|
||||
sql += "col4095 binary(22))"
|
||||
tdLog.info(len(sql))
|
||||
tdSql.execute(sql)
|
||||
|
||||
for i in range(self.num):
|
||||
sql = "insert into regular_table_1 values(%d, "
|
||||
for j in range(4094):
|
||||
str = "'%s', " % random.randint(0,1000)
|
||||
sql += str
|
||||
sql += "'%s')" % self.get_random_string(22)
|
||||
tdSql.execute(sql % (self.ts + i))
|
||||
time.sleep(1)
|
||||
tdSql.query("select count(*) from regular_table_1")
|
||||
tdSql.checkData(0, 0, self.num)
|
||||
tdSql.query("select * from regular_table_1")
|
||||
tdSql.checkRows(self.num)
|
||||
tdSql.checkCols(4096)
|
||||
|
||||
endTime = time.time()
|
||||
print("total time %ds" % (endTime - startTime))
|
||||
|
||||
#insert in order
|
||||
tdLog.info('test insert in order')
|
||||
for i in range(self.num):
|
||||
sql = "insert into regular_table_1 (ts,col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col4095) values(%d, "
|
||||
for j in range(10):
|
||||
str = "'%s', " % random.randint(0,1000)
|
||||
sql += str
|
||||
sql += "'%s')" % self.get_random_string(22)
|
||||
tdSql.execute(sql % (self.ts + i + 1000))
|
||||
time.sleep(1)
|
||||
tdSql.query("select count(*) from regular_table_1")
|
||||
tdSql.checkData(0, 0, 2*self.num)
|
||||
tdSql.query("select * from regular_table_1")
|
||||
tdSql.checkRows(2*self.num)
|
||||
tdSql.checkCols(4096)
|
||||
|
||||
#insert out of order
|
||||
tdLog.info('test insert out of order')
|
||||
for i in range(self.num):
|
||||
sql = "insert into regular_table_1 (ts,col123,col2213,col331,col41,col523,col236,col71,col813,col912,col1320,col4095) values(%d, "
|
||||
for j in range(10):
|
||||
str = "'%s', " % random.randint(0,1000)
|
||||
sql += str
|
||||
sql += "'%s')" % self.get_random_string(22)
|
||||
tdSql.execute(sql % (self.ts + i + 2000))
|
||||
time.sleep(1)
|
||||
tdSql.query("select count(*) from regular_table_1")
|
||||
tdSql.checkData(0, 0, 3*self.num)
|
||||
tdSql.query("select * from regular_table_1")
|
||||
tdSql.checkRows(3*self.num)
|
||||
tdSql.checkCols(4096)
|
||||
|
||||
|
||||
print("==============step2,regular table error col or value==============")
|
||||
tdLog.info('test regular table exceeds row num')
|
||||
# column > 4096
|
||||
sql = "create table regular_table_2(ts timestamp, "
|
||||
for i in range(4095):
|
||||
sql += "col%d int, " % (i + 1)
|
||||
sql += "col4096 binary(22))"
|
||||
tdLog.info(len(sql))
|
||||
tdSql.error(sql)
|
||||
|
||||
# column > 4096
|
||||
sql = "insert into regular_table_1 values(%d, "
|
||||
for j in range(4095):
|
||||
str = "'%s', " % random.randint(0,1000)
|
||||
sql += str
|
||||
sql += "'%s')" % self.get_random_string(22)
|
||||
tdSql.error(sql)
|
||||
|
||||
# insert column < 4096
|
||||
sql = "insert into regular_table_1 values(%d, "
|
||||
for j in range(4092):
|
||||
str = "'%s', " % random.randint(0,1000)
|
||||
sql += str
|
||||
sql += "'%s')" % self.get_random_string(22)
|
||||
tdSql.error(sql)
|
||||
|
||||
# alter column > 4096
|
||||
sql = "alter table regular_table_1 add column max int; "
|
||||
tdSql.error(sql)
|
||||
|
||||
print("==============step3,regular table , mix data type==============")
|
||||
startTime = time.time()
|
||||
sql = "create table regular_table_3(ts timestamp, "
|
||||
for i in range(2000):
|
||||
sql += "col%d int, " % (i + 1)
|
||||
for i in range(2000,4094):
|
||||
sql += "col%d bigint, " % (i + 1)
|
||||
sql += "col4095 binary(22))"
|
||||
tdLog.info(len(sql))
|
||||
tdSql.execute(sql)
|
||||
|
||||
for i in range(self.num):
|
||||
sql = "insert into regular_table_3 values(%d, "
|
||||
for j in range(4094):
|
||||
str = "'%s', " % random.randint(0,1000)
|
||||
sql += str
|
||||
sql += "'%s')" % self.get_random_string(22)
|
||||
tdSql.execute(sql % (self.ts + i))
|
||||
time.sleep(1)
|
||||
tdSql.query("select count(*) from regular_table_3")
|
||||
tdSql.checkData(0, 0, self.num)
|
||||
tdSql.query("select * from regular_table_3")
|
||||
tdSql.checkRows(self.num)
|
||||
tdSql.checkCols(4096)
|
||||
|
||||
endTime = time.time()
|
||||
print("total time %ds" % (endTime - startTime))
|
||||
|
||||
sql = "create table regular_table_4(ts timestamp, "
|
||||
for i in range(500):
|
||||
sql += "int_%d int, " % (i + 1)
|
||||
for i in range(500,1000):
|
||||
sql += "smallint_%d smallint, " % (i + 1)
|
||||
for i in range(1000,1500):
|
||||
sql += "tinyint_%d tinyint, " % (i + 1)
|
||||
for i in range(1500,2000):
|
||||
sql += "double_%d double, " % (i + 1)
|
||||
for i in range(2000,2500):
|
||||
sql += "float_%d float, " % (i + 1)
|
||||
for i in range(2500,3000):
|
||||
sql += "bool_%d bool, " % (i + 1)
|
||||
for i in range(3000,3500):
|
||||
sql += "bigint_%d bigint, " % (i + 1)
|
||||
for i in range(3500,3800):
|
||||
sql += "nchar_%d nchar(4), " % (i + 1)
|
||||
for i in range(3800,4090):
|
||||
sql += "binary_%d binary(10), " % (i + 1)
|
||||
for i in range(4090,4094):
|
||||
sql += "timestamp_%d timestamp, " % (i + 1)
|
||||
sql += "col4095 binary(22))"
|
||||
tdLog.info(len(sql))
|
||||
tdSql.execute(sql)
|
||||
|
||||
for i in range(self.num):
|
||||
sql = "insert into regular_table_4 values(%d, "
|
||||
for j in range(500):
|
||||
str = "'%s', " % random.randint(-2147483647,2147483647)
|
||||
sql += str
|
||||
for j in range(500,1000):
|
||||
str = "'%s', " % random.randint(-32767,32767 )
|
||||
sql += str
|
||||
for j in range(1000,1500):
|
||||
str = "'%s', " % random.randint(-127,127)
|
||||
sql += str
|
||||
for j in range(1500,2000):
|
||||
str = "'%s', " % random.randint(-922337203685477580700,922337203685477580700)
|
||||
sql += str
|
||||
for j in range(2000,2500):
|
||||
str = "'%s', " % random.randint(-92233720368547758070,92233720368547758070)
|
||||
sql += str
|
||||
for j in range(2500,3000):
|
||||
str = "'%s', " % random.choice(['true','false'])
|
||||
sql += str
|
||||
for j in range(3000,3500):
|
||||
str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807)
|
||||
sql += str
|
||||
for j in range(3500,3800):
|
||||
str = "'%s', " % self.get_random_string(4)
|
||||
sql += str
|
||||
for j in range(3800,4090):
|
||||
str = "'%s', " % self.get_random_string(10)
|
||||
sql += str
|
||||
for j in range(4090,4094):
|
||||
str = "%s, " % (self.ts + j)
|
||||
sql += str
|
||||
sql += "'%s')" % self.get_random_string(22)
|
||||
tdSql.execute(sql % (self.ts + i))
|
||||
time.sleep(1)
|
||||
tdSql.query("select count(*) from regular_table_4")
|
||||
tdSql.checkData(0, 0, self.num)
|
||||
tdSql.query("select * from regular_table_4")
|
||||
tdSql.checkRows(self.num)
|
||||
tdSql.checkCols(4096)
|
||||
tdLog.info("end ,now new one")
|
||||
|
||||
#insert null value
|
||||
tdLog.info('test insert null value')
|
||||
for i in range(self.num):
|
||||
sql = "insert into regular_table_4 values(%d, "
|
||||
for j in range(2500):
|
||||
str = "'%s', " % random.choice(['NULL' ,'NULL' ,'NULL' ,1 , 10 ,100 ,-100 ,-10, 88 ,66 ,'NULL' ,'NULL' ,'NULL' ])
|
||||
sql += str
|
||||
for j in range(2500,3000):
|
||||
str = "'%s', " % random.choice(['true' ,'false'])
|
||||
sql += str
|
||||
for j in range(3000,3500):
|
||||
str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807)
|
||||
sql += str
|
||||
for j in range(3500,3800):
|
||||
str = "'%s', " % self.get_random_string(4)
|
||||
sql += str
|
||||
for j in range(3800,4090):
|
||||
str = "'%s', " % self.get_random_string(10)
|
||||
sql += str
|
||||
for j in range(4090,4094):
|
||||
str = "%s, " % (self.ts + j)
|
||||
sql += str
|
||||
sql += "'%s')" % self.get_random_string(22)
|
||||
tdSql.execute(sql % (self.ts + i + 10000))
|
||||
time.sleep(1)
|
||||
tdSql.query("select count(*) from regular_table_4")
|
||||
tdSql.checkData(0, 0, 2*self.num)
|
||||
tdSql.query("select * from regular_table_4")
|
||||
tdSql.checkRows(2*self.num)
|
||||
tdSql.checkCols(4096)
|
||||
|
||||
#insert in order
|
||||
tdLog.info('test insert in order')
|
||||
for i in range(self.num):
|
||||
sql = "insert into regular_table_4 (ts,int_2,int_22,int_169,smallint_537,smallint_607,tinyint_1030,tinyint_1491,double_1629,double_1808,float_2075,col4095) values(%d, "
|
||||
for j in range(10):
|
||||
str = "'%s', " % random.randint(0,100)
|
||||
sql += str
|
||||
sql += "'%s')" % self.get_random_string(22)
|
||||
tdSql.execute(sql % (self.ts + i + 1000))
|
||||
time.sleep(1)
|
||||
tdSql.query("select count(*) from regular_table_4")
|
||||
tdSql.checkData(0, 0, 3*self.num)
|
||||
tdSql.query("select * from regular_table_4")
|
||||
tdSql.checkRows(3*self.num)
|
||||
tdSql.checkCols(4096)
|
||||
|
||||
#insert out of order
|
||||
tdLog.info('test insert out of order')
|
||||
for i in range(self.num):
|
||||
sql = "insert into regular_table_4 (ts,int_169,float_2075,int_369,tinyint_1491,tinyint_1030,float_2360,smallint_537,double_1808,double_1608,double_1629,col4095) values(%d, "
|
||||
for j in range(10):
|
||||
str = "'%s', " % random.randint(0,100)
|
||||
sql += str
|
||||
sql += "'%s')" % self.get_random_string(22)
|
||||
tdSql.execute(sql % (self.ts + i + 2000))
|
||||
time.sleep(1)
|
||||
tdSql.query("select count(*) from regular_table_4")
|
||||
tdSql.checkData(0, 0, 4*self.num)
|
||||
tdSql.query("select * from regular_table_4")
|
||||
tdSql.checkRows(4*self.num)
|
||||
tdSql.checkCols(4096)
|
||||
|
||||
#define TSDB_MAX_BYTES_PER_ROW 49151[old:1024 && 16384]
|
||||
#ts:8\int:4\smallint:2\bigint:8\bool:1\float:4\tinyint:1\nchar:4*()+2[offset]\binary:1*()+2[offset]
|
||||
tdLog.info('test regular_table max bytes per row 49151')
|
||||
sql = "create table regular_table_5(ts timestamp, "
|
||||
for i in range(500):
|
||||
sql += "int_%d int, " % (i + 1)
|
||||
for i in range(500,1000):
|
||||
sql += "smallint_%d smallint, " % (i + 1)
|
||||
for i in range(1000,1500):
|
||||
sql += "tinyint_%d tinyint, " % (i + 1)
|
||||
for i in range(1500,2000):
|
||||
sql += "double_%d double, " % (i + 1)
|
||||
for i in range(2000,2500):
|
||||
sql += "float_%d float, " % (i + 1)
|
||||
for i in range(2500,3000):
|
||||
sql += "bool_%d bool, " % (i + 1)
|
||||
for i in range(3000,3500):
|
||||
sql += "bigint_%d bigint, " % (i + 1)
|
||||
for i in range(3500,3800):
|
||||
sql += "nchar_%d nchar(20), " % (i + 1)
|
||||
for i in range(3800,4090):
|
||||
sql += "binary_%d binary(34), " % (i + 1)
|
||||
for i in range(4090,4094):
|
||||
sql += "timestamp_%d timestamp, " % (i + 1)
|
||||
sql += "col4095 binary(69))"
|
||||
tdSql.execute(sql)
|
||||
tdSql.query("select * from regular_table_5")
|
||||
tdSql.checkCols(4096)
|
||||
# TD-5324
|
||||
sql = "alter table regular_table_5 modify column col4095 binary(70); "
|
||||
tdSql.error(sql)
|
||||
|
||||
# drop and add
|
||||
sql = "alter table regular_table_5 drop column col4095; "
|
||||
tdSql.execute(sql)
|
||||
sql = "select * from regular_table_5; "
|
||||
tdSql.query(sql)
|
||||
tdSql.checkCols(4095)
|
||||
sql = "alter table regular_table_5 add column col4095 binary(70); "
|
||||
tdSql.error(sql)
|
||||
sql = "alter table regular_table_5 add column col4095 binary(69); "
|
||||
tdSql.execute(sql)
|
||||
sql = "select * from regular_table_5; "
|
||||
tdSql.query(sql)
|
||||
tdSql.checkCols(4096)
|
||||
|
||||
#out TSDB_MAX_BYTES_PER_ROW 49151
|
||||
tdLog.info('test regular_table max bytes per row out 49151')
|
||||
sql = "create table regular_table_6(ts timestamp, "
|
||||
for i in range(500):
|
||||
sql += "int_%d int, " % (i + 1)
|
||||
for i in range(500,1000):
|
||||
sql += "smallint_%d smallint, " % (i + 1)
|
||||
for i in range(1000,1500):
|
||||
sql += "tinyint_%d tinyint, " % (i + 1)
|
||||
for i in range(1500,2000):
|
||||
sql += "double_%d double, " % (i + 1)
|
||||
for i in range(2000,2500):
|
||||
sql += "float_%d float, " % (i + 1)
|
||||
for i in range(2500,3000):
|
||||
sql += "bool_%d bool, " % (i + 1)
|
||||
for i in range(3000,3500):
|
||||
sql += "bigint_%d bigint, " % (i + 1)
|
||||
for i in range(3500,3800):
|
||||
sql += "nchar_%d nchar(20), " % (i + 1)
|
||||
for i in range(3800,4090):
|
||||
sql += "binary_%d binary(34), " % (i + 1)
|
||||
for i in range(4090,4094):
|
||||
sql += "timestamp_%d timestamp, " % (i + 1)
|
||||
sql += "col4095 binary(70))"
|
||||
tdLog.info(len(sql))
|
||||
tdSql.error(sql)
|
||||
|
||||
|
||||
print("==============step4, super table , 1 ts + 4090 cols + 4 tags ==============")
|
||||
startTime = time.time()
|
||||
sql = "create stable stable_1(ts timestamp, "
|
||||
for i in range(4090):
|
||||
sql += "col%d int, " % (i + 1)
|
||||
sql += "col4091 binary(22))"
|
||||
sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) "
|
||||
tdLog.info(len(sql))
|
||||
tdSql.execute(sql)
|
||||
sql = '''create table table_0 using stable_1
|
||||
tags('table_0' , '1' , '2' , '3' );'''
|
||||
tdSql.execute(sql)
|
||||
|
||||
for i in range(self.num):
|
||||
sql = "insert into table_0 values(%d, "
|
||||
for j in range(4090):
|
||||
str = "'%s', " % random.randint(0,1000)
|
||||
sql += str
|
||||
sql += "'%s')" % self.get_random_string(22)
|
||||
tdSql.execute(sql % (self.ts + i))
|
||||
time.sleep(1)
|
||||
tdSql.query("select count(*) from table_0")
|
||||
tdSql.checkData(0, 0, self.num)
|
||||
tdSql.query("select * from table_0")
|
||||
tdSql.checkRows(self.num)
|
||||
tdSql.checkCols(4092)
|
||||
|
||||
sql = '''create table table_1 using stable_1
|
||||
tags('table_1' , '1' , '2' , '3' );'''
|
||||
tdSql.execute(sql)
|
||||
|
||||
for i in range(self.num):
|
||||
sql = "insert into table_1 values(%d, "
|
||||
for j in range(2080):
|
||||
sql += "'%d', " % random.randint(0,1000)
|
||||
for j in range(2080,4080):
|
||||
sql += "'%s', " % 'NULL'
|
||||
for j in range(4080,4090):
|
||||
sql += "'%s', " % random.randint(0,10000)
|
||||
sql += "'%s')" % self.get_random_string(22)
|
||||
tdSql.execute(sql % (self.ts + i))
|
||||
time.sleep(1)
|
||||
tdSql.query("select count(*) from table_1")
|
||||
tdSql.checkData(0, 0, self.num)
|
||||
tdSql.query("select * from table_1")
|
||||
tdSql.checkRows(self.num)
|
||||
tdSql.checkCols(4092)
|
||||
|
||||
endTime = time.time()
|
||||
print("total time %ds" % (endTime - startTime))
|
||||
|
||||
#insert in order
|
||||
tdLog.info('test insert in order')
|
||||
for i in range(self.num):
|
||||
sql = "insert into table_1 (ts,col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col4091) values(%d, "
|
||||
for j in range(10):
|
||||
str = "'%s', " % random.randint(0,1000)
|
||||
sql += str
|
||||
sql += "'%s')" % self.get_random_string(22)
|
||||
tdSql.execute(sql % (self.ts + i + 1000))
|
||||
time.sleep(1)
|
||||
tdSql.query("select count(*) from table_1")
|
||||
tdSql.checkData(0, 0, 2*self.num)
|
||||
tdSql.query("select * from table_1")
|
||||
tdSql.checkRows(2*self.num)
|
||||
tdSql.checkCols(4092)
|
||||
|
||||
#insert out of order
|
||||
tdLog.info('test insert out of order')
|
||||
for i in range(self.num):
|
||||
sql = "insert into table_1 (ts,col123,col2213,col331,col41,col523,col236,col71,col813,col912,col1320,col4091) values(%d, "
|
||||
for j in range(10):
|
||||
str = "'%s', " % random.randint(0,1000)
|
||||
sql += str
|
||||
sql += "'%s')" % self.get_random_string(22)
|
||||
tdSql.execute(sql % (self.ts + i + 2000))
|
||||
time.sleep(1)
|
||||
tdSql.query("select count(*) from table_1")
|
||||
tdSql.checkData(0, 0, 3*self.num)
|
||||
tdSql.query("select * from table_1")
|
||||
tdSql.checkRows(3*self.num)
|
||||
tdSql.checkCols(4092)
|
||||
|
||||
print("==============step5,stable table , mix data type==============")
|
||||
sql = "create stable stable_3(ts timestamp, "
|
||||
for i in range(500):
|
||||
sql += "int_%d int, " % (i + 1)
|
||||
for i in range(500,1000):
|
||||
sql += "smallint_%d smallint, " % (i + 1)
|
||||
for i in range(1000,1500):
|
||||
sql += "tinyint_%d tinyint, " % (i + 1)
|
||||
for i in range(1500,2000):
|
||||
sql += "double_%d double, " % (i + 1)
|
||||
for i in range(2000,2500):
|
||||
sql += "float_%d float, " % (i + 1)
|
||||
for i in range(2500,3000):
|
||||
sql += "bool_%d bool, " % (i + 1)
|
||||
for i in range(3000,3500):
|
||||
sql += "bigint_%d bigint, " % (i + 1)
|
||||
for i in range(3500,3800):
|
||||
sql += "nchar_%d nchar(4), " % (i + 1)
|
||||
for i in range(3800,4090):
|
||||
sql += "binary_%d binary(10), " % (i + 1)
|
||||
sql += "col4091 binary(22))"
|
||||
sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) "
|
||||
tdLog.info(len(sql))
|
||||
tdSql.execute(sql)
|
||||
sql = '''create table table_30 using stable_3
|
||||
tags('table_30' , '1' , '2' , '3' );'''
|
||||
tdSql.execute(sql)
|
||||
|
||||
for i in range(self.num):
|
||||
sql = "insert into table_30 values(%d, "
|
||||
for j in range(500):
|
||||
str = "'%s', " % random.randint(-2147483647,2147483647)
|
||||
sql += str
|
||||
for j in range(500,1000):
|
||||
str = "'%s', " % random.randint(-32767,32767 )
|
||||
sql += str
|
||||
for j in range(1000,1500):
|
||||
str = "'%s', " % random.randint(-127,127)
|
||||
sql += str
|
||||
for j in range(1500,2000):
|
||||
str = "'%s', " % random.randint(-922337203685477580700,922337203685477580700)
|
||||
sql += str
|
||||
for j in range(2000,2500):
|
||||
str = "'%s', " % random.randint(-92233720368547758070,92233720368547758070)
|
||||
sql += str
|
||||
for j in range(2500,3000):
|
||||
str = "'%s', " % random.choice(['true','false'])
|
||||
sql += str
|
||||
for j in range(3000,3500):
|
||||
str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807)
|
||||
sql += str
|
||||
for j in range(3500,3800):
|
||||
str = "'%s', " % self.get_random_string(4)
|
||||
sql += str
|
||||
for j in range(3800,4090):
|
||||
str = "'%s', " % self.get_random_string(10)
|
||||
sql += str
|
||||
sql += "'%s')" % self.get_random_string(22)
|
||||
tdSql.execute(sql % (self.ts + i))
|
||||
time.sleep(1)
|
||||
tdSql.query("select count(*) from table_30")
|
||||
tdSql.checkData(0, 0, self.num)
|
||||
tdSql.query("select * from table_30")
|
||||
tdSql.checkRows(self.num)
|
||||
tdSql.checkCols(4092)
|
||||
|
||||
#insert null value
|
||||
tdLog.info('test insert null value')
|
||||
sql = '''create table table_31 using stable_3
|
||||
tags('table_31' , '1' , '2' , '3' );'''
|
||||
tdSql.execute(sql)
|
||||
|
||||
for i in range(self.num):
|
||||
sql = "insert into table_31 values(%d, "
|
||||
for j in range(2500):
|
||||
str = "'%s', " % random.choice(['NULL' ,'NULL' ,'NULL' ,1 , 10 ,100 ,-100 ,-10, 88 ,66 ,'NULL' ,'NULL' ,'NULL' ])
|
||||
sql += str
|
||||
for j in range(2500,3000):
|
||||
str = "'%s', " % random.choice(['true' ,'false'])
|
||||
sql += str
|
||||
for j in range(3000,3500):
|
||||
str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807)
|
||||
sql += str
|
||||
for j in range(3500,3800):
|
||||
str = "'%s', " % self.get_random_string(4)
|
||||
sql += str
|
||||
for j in range(3800,4090):
|
||||
str = "'%s', " % self.get_random_string(10)
|
||||
sql += str
|
||||
sql += "'%s')" % self.get_random_string(22)
|
||||
tdSql.execute(sql % (self.ts + i))
|
||||
time.sleep(1)
|
||||
tdSql.query("select count(*) from table_31")
|
||||
tdSql.checkData(0, 0, self.num)
|
||||
tdSql.query("select * from table_31")
|
||||
tdSql.checkRows(self.num)
|
||||
tdSql.checkCols(4092)
|
||||
|
||||
#insert in order
|
||||
tdLog.info('test insert in order')
|
||||
for i in range(self.num):
|
||||
sql = "insert into table_31 (ts,int_2,int_22,int_169,smallint_537,smallint_607,tinyint_1030,tinyint_1491,double_1629,double_1808,float_2075,col4091) values(%d, "
|
||||
for j in range(10):
|
||||
str = "'%s', " % random.randint(0,100)
|
||||
sql += str
|
||||
sql += "'%s')" % self.get_random_string(22)
|
||||
tdSql.execute(sql % (self.ts + i + 1000))
|
||||
time.sleep(1)
|
||||
tdSql.query("select count(*) from table_31")
|
||||
tdSql.checkData(0, 0, 2*self.num)
|
||||
tdSql.query("select * from table_31")
|
||||
tdSql.checkRows(2*self.num)
|
||||
tdSql.checkCols(4092)
|
||||
|
||||
#insert out of order
|
||||
tdLog.info('test insert out of order')
|
||||
for i in range(self.num):
|
||||
sql = "insert into table_31 (ts,int_169,float_2075,int_369,tinyint_1491,tinyint_1030,float_2360,smallint_537,double_1808,double_1608,double_1629,col4091) values(%d, "
|
||||
for j in range(10):
|
||||
str = "'%s', " % random.randint(0,100)
|
||||
sql += str
|
||||
sql += "'%s')" % self.get_random_string(22)
|
||||
tdSql.execute(sql % (self.ts + i + 2000))
|
||||
time.sleep(1)
|
||||
tdSql.query("select count(*) from table_31")
|
||||
tdSql.checkData(0, 0, 3*self.num)
|
||||
tdSql.query("select * from table_31")
|
||||
tdSql.checkRows(3*self.num)
|
||||
tdSql.checkCols(4092)
|
||||
|
||||
#define TSDB_MAX_BYTES_PER_ROW 49151 TSDB_MAX_TAGS_LEN 16384
|
||||
#ts:8\int:4\smallint:2\bigint:8\bool:1\float:4\tinyint:1\nchar:4*()+2[offset]\binary:1*()+2[offset]
|
||||
tdLog.info('test super table max bytes per row 49151')
|
||||
sql = "create table stable_4(ts timestamp, "
|
||||
for i in range(500):
|
||||
sql += "int_%d int, " % (i + 1)
|
||||
for i in range(500,1000):
|
||||
sql += "smallint_%d smallint, " % (i + 1)
|
||||
for i in range(1000,1500):
|
||||
sql += "tinyint_%d tinyint, " % (i + 1)
|
||||
for i in range(1500,2000):
|
||||
sql += "double_%d double, " % (i + 1)
|
||||
for i in range(2000,2500):
|
||||
sql += "float_%d float, " % (i + 1)
|
||||
for i in range(2500,3000):
|
||||
sql += "bool_%d bool, " % (i + 1)
|
||||
for i in range(3000,3500):
|
||||
sql += "bigint_%d bigint, " % (i + 1)
|
||||
for i in range(3500,3800):
|
||||
sql += "nchar_%d nchar(20), " % (i + 1)
|
||||
for i in range(3800,4090):
|
||||
sql += "binary_%d binary(34), " % (i + 1)
|
||||
sql += "col4091 binary(101))"
|
||||
sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) "
|
||||
tdSql.execute(sql)
|
||||
sql = '''create table table_40 using stable_4
|
||||
tags('table_40' , '1' , '2' , '3' );'''
|
||||
tdSql.execute(sql)
|
||||
tdSql.query("select * from table_40")
|
||||
tdSql.checkCols(4092)
|
||||
tdSql.query("describe table_40")
|
||||
tdSql.checkRows(4096)
|
||||
|
||||
tdLog.info('test super table drop and add column or tag')
|
||||
sql = "alter stable stable_4 drop column col4091; "
|
||||
tdSql.execute(sql)
|
||||
sql = "select * from stable_4; "
|
||||
tdSql.query(sql)
|
||||
tdSql.checkCols(4095)
|
||||
sql = "alter table stable_4 add column col4091 binary(102); "
|
||||
tdSql.error(sql)
|
||||
sql = "alter table stable_4 add column col4091 binary(101); "
|
||||
tdSql.execute(sql)
|
||||
sql = "select * from stable_4; "
|
||||
tdSql.query(sql)
|
||||
tdSql.checkCols(4096)
|
||||
|
||||
sql = "alter stable stable_4 drop tag tag_1; "
|
||||
tdSql.execute(sql)
|
||||
sql = "select * from stable_4; "
|
||||
tdSql.query(sql)
|
||||
tdSql.checkCols(4095)
|
||||
sql = "alter table stable_4 add tag tag_1 int; "
|
||||
tdSql.execute(sql)
|
||||
sql = "select * from stable_4; "
|
||||
tdSql.query(sql)
|
||||
tdSql.checkCols(4096)
|
||||
sql = "alter table stable_4 add tag loc1 nchar(10); "
|
||||
tdSql.error(sql)
|
||||
|
||||
tdLog.info('test super table max bytes per row 49151')
|
||||
sql = "create table stable_5(ts timestamp, "
|
||||
for i in range(500):
|
||||
sql += "int_%d int, " % (i + 1)
|
||||
for i in range(500,1000):
|
||||
sql += "smallint_%d smallint, " % (i + 1)
|
||||
for i in range(1000,1500):
|
||||
sql += "tinyint_%d tinyint, " % (i + 1)
|
||||
for i in range(1500,2000):
|
||||
sql += "double_%d double, " % (i + 1)
|
||||
for i in range(2000,2500):
|
||||
sql += "float_%d float, " % (i + 1)
|
||||
for i in range(2500,3000):
|
||||
sql += "bool_%d bool, " % (i + 1)
|
||||
for i in range(3000,3500):
|
||||
sql += "bigint_%d bigint, " % (i + 1)
|
||||
for i in range(3500,3800):
|
||||
sql += "nchar_%d nchar(20), " % (i + 1)
|
||||
for i in range(3800,4090):
|
||||
sql += "binary_%d binary(34), " % (i + 1)
|
||||
sql += "col4091 binary(102))"
|
||||
sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) "
|
||||
tdSql.error(sql)
|
||||
|
||||
print("==============step6, super table error col ==============")
|
||||
tdLog.info('test exceeds row num')
|
||||
# column + tag > 4096
|
||||
sql = "create stable stable_2(ts timestamp, "
|
||||
for i in range(4091):
|
||||
sql += "col%d int, " % (i + 1)
|
||||
sql += "col4092 binary(22))"
|
||||
sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) "
|
||||
tdLog.info(len(sql))
|
||||
tdSql.error(sql)
|
||||
|
||||
# column + tag > 4096
|
||||
sql = "create stable stable_2(ts timestamp, "
|
||||
for i in range(4090):
|
||||
sql += "col%d int, " % (i + 1)
|
||||
sql += "col4091 binary(22))"
|
||||
sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int,tag_4 int) "
|
||||
tdLog.info(len(sql))
|
||||
tdSql.error(sql)
|
||||
|
||||
# alter column + tag > 4096
|
||||
sql = "alter table stable_1 add column max int; "
|
||||
tdSql.error(sql)
|
||||
# TD-5322
|
||||
sql = "alter table stable_1 add tag max int; "
|
||||
tdSql.error(sql)
|
||||
# TD-5324
|
||||
sql = "alter table stable_4 modify column col4091 binary(102); "
|
||||
tdSql.error(sql)
|
||||
sql = "alter table stable_4 modify tag loc nchar(20); "
|
||||
tdSql.query("select * from table_40")
|
||||
tdSql.checkCols(4092)
|
||||
tdSql.query("describe table_40")
|
||||
tdSql.checkRows(4096)
|
||||
|
||||
os.system("rm -rf tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py.sql")
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
File diff suppressed because one or more lines are too long
|
@ -15,7 +15,7 @@
|
|||
"max_sql_len": 102400000,
|
||||
"databases": [{
|
||||
"dbinfo": {
|
||||
"name": "db",
|
||||
"name": "json",
|
||||
"drop": "yes",
|
||||
"replica": 1,
|
||||
"days": 10,
|
||||
|
@ -35,13 +35,13 @@
|
|||
"super_tables": [{
|
||||
"name": "stb_old",
|
||||
"child_table_exists":"no",
|
||||
"childtable_count": 10,
|
||||
"childtable_count": 1,
|
||||
"childtable_prefix": "stb_old_",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 5,
|
||||
"data_source": "sample",
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 100,
|
||||
"insert_rows": 10,
|
||||
"childtable_limit": 0,
|
||||
"childtable_offset":0,
|
||||
"multi_thread_write_one_tbl": "no",
|
||||
|
@ -55,18 +55,18 @@
|
|||
"sample_format": "csv",
|
||||
"sample_file": "./tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT","count":4000}, {"type": "BINARY", "len": 16, "count":1}],
|
||||
"columns": [{"type": "INT","count":1000}, {"type": "BINARY", "len": 16, "count":20}],
|
||||
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
|
||||
},{
|
||||
"name": "stb_new",
|
||||
"child_table_exists":"no",
|
||||
"childtable_count": 10,
|
||||
"childtable_count": 1,
|
||||
"childtable_prefix": "stb_new_",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 5,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 100,
|
||||
"insert_rows": 10,
|
||||
"childtable_limit": 0,
|
||||
"childtable_offset":0,
|
||||
"multi_thread_write_one_tbl": "no",
|
||||
|
@ -80,18 +80,18 @@
|
|||
"sample_format": "csv",
|
||||
"sample_file": "./tools/taosdemoAllTest/sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "DOUBLE","count":1020}],
|
||||
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
|
||||
"columns": [{"type": "INT","count":4000}, {"type": "BINARY", "len": 16, "count":90}],
|
||||
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":3}]
|
||||
},{
|
||||
"name": "stb_int",
|
||||
"name": "stb_mix",
|
||||
"child_table_exists":"no",
|
||||
"childtable_count": 10,
|
||||
"childtable_prefix": "stb_int_",
|
||||
"childtable_count": 1,
|
||||
"childtable_prefix": "stb_mix_",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 5,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 100,
|
||||
"insert_rows": 10,
|
||||
"childtable_limit": 0,
|
||||
"childtable_offset":0,
|
||||
"multi_thread_write_one_tbl": "no",
|
||||
|
@ -105,8 +105,33 @@
|
|||
"sample_format": "csv",
|
||||
"sample_file": "./tools/taosdemoAllTest/sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "int","count":1020}],
|
||||
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
|
||||
"columns": [{"type": "INT","count":500},{"type": "SMALLINT","count":500},{"type": "TINYINT","count":500},{"type": "DOUBLE","count":500},{"type": "FLOAT","count":500},{"type": "BOOL","count":500},{"type": "BIGINT","count":500},{"type": "NCHAR","len": 20,"count":300},{"type": "BINARY","len": 34,"count":290},{"type": "BINARY","len": 101,"count":1}],
|
||||
"tags": [{"type": "INT", "count":3}, {"type": "NCHAR", "len": 10, "count":1}]
|
||||
},{
|
||||
"name": "stb_excel",
|
||||
"child_table_exists":"no",
|
||||
"childtable_count": 1,
|
||||
"childtable_prefix": "stb_excel_",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 5,
|
||||
"data_source": "sample",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 10,
|
||||
"childtable_limit": 0,
|
||||
"childtable_offset":0,
|
||||
"multi_thread_write_one_tbl": "no",
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 1,
|
||||
"start_timestamp": "2020-10-01 00:00:00.000",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT","count":500},{"type": "SMALLINT","count":500},{"type": "SMALLINT","count":500},{"type": "DOUBLE","count":500},{"type": "FLOAT","count":500},{"type": "BOOL","count":500},{"type": "BIGINT","count":500},{"type": "NCHAR","len": 19,"count":300},{"type": "BINARY","len": 34,"count":290},{"type": "BINARY","len": 101,"count":1}],
|
||||
"tags": [{"type": "INT", "count":3}, {"type": "NCHAR", "len": 10, "count":1}]
|
||||
}]
|
||||
}]
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
|
@ -48,83 +49,121 @@ class TDTestCase:
|
|||
tdLog.info("taosd found in %s" % buildPath)
|
||||
binPath = buildPath+ "/build/bin/"
|
||||
|
||||
#-N:regular table -d:database name -t:table num -n:rows num per table -l:col num -y:force
|
||||
#regular old && new
|
||||
startTime = time.time()
|
||||
os.system("%staosdemo -N -d regular_old -t 1 -n 10 -l 1023 -y" % binPath)
|
||||
tdSql.execute("use regular_old")
|
||||
tdSql.query("show tables;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select * from d0;")
|
||||
tdSql.checkCols(1024)
|
||||
tdSql.query("describe d0;")
|
||||
tdSql.checkRows(1024)
|
||||
|
||||
os.system("%staosdemo -N -d regular_new -t 1 -n 10 -l 4095 -y" % binPath)
|
||||
tdSql.execute("use regular_new")
|
||||
tdSql.query("show tables;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select * from d0;")
|
||||
tdSql.checkCols(4096)
|
||||
tdSql.query("describe d0;")
|
||||
tdSql.checkRows(4096)
|
||||
|
||||
#super table -d:database name -t:table num -n:rows num per table -l:col num -y:force
|
||||
os.system("%staosdemo -d super_old -t 1 -n 10 -l 1021 -y" % binPath)
|
||||
tdSql.execute("use super_old")
|
||||
tdSql.query("show tables;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select * from meters;")
|
||||
tdSql.checkCols(1024)
|
||||
tdSql.query("select * from d0;")
|
||||
tdSql.checkCols(1022)
|
||||
tdSql.query("describe meters;")
|
||||
tdSql.checkRows(1024)
|
||||
tdSql.query("describe d0;")
|
||||
tdSql.checkRows(1024)
|
||||
|
||||
os.system("%staosdemo -d super_new -t 1 -n 10 -l 4093 -y" % binPath)
|
||||
tdSql.execute("use super_new")
|
||||
tdSql.query("show tables;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select * from meters;")
|
||||
tdSql.checkCols(4096)
|
||||
tdSql.query("select * from d0;")
|
||||
tdSql.checkCols(4094)
|
||||
tdSql.query("describe meters;")
|
||||
tdSql.checkRows(4096)
|
||||
tdSql.query("describe d0;")
|
||||
tdSql.checkRows(4096)
|
||||
tdSql.execute("create table stb_new1_1 using meters tags(1,2)")
|
||||
tdSql.query("select * from stb_new1_1")
|
||||
tdSql.checkCols(4094)
|
||||
tdSql.query("describe stb_new1_1;")
|
||||
tdSql.checkRows(4096)
|
||||
|
||||
# insert: create one or mutiple tables per sql and insert multiple rows per sql
|
||||
# test case for https://jira.taosdata.com:18080/browse/TD-5213
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json -y " % binPath)
|
||||
tdSql.execute("use db")
|
||||
tdSql.execute("use json")
|
||||
tdSql.query("select count (tbname) from stb_old")
|
||||
tdSql.checkData(0, 0, 10)
|
||||
tdSql.checkData(0, 0, 1)
|
||||
|
||||
# tdSql.query("select * from stb_old")
|
||||
# tdSql.checkRows(10)
|
||||
# tdSql.checkCols(1024)
|
||||
tdSql.query("select * from stb_old")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.checkCols(1024)
|
||||
|
||||
# tdSql.query("select count (tbname) from stb_new")
|
||||
# tdSql.checkData(0, 0, 10)
|
||||
tdSql.query("select count (tbname) from stb_new")
|
||||
tdSql.checkData(0, 0, 1)
|
||||
|
||||
# tdSql.query("select * from stb_new")
|
||||
# tdSql.checkRows(10)
|
||||
# tdSql.checkCols(4096)
|
||||
tdSql.query("select * from stb_new")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.checkCols(4096)
|
||||
tdSql.query("describe stb_new;")
|
||||
tdSql.checkRows(4096)
|
||||
tdSql.query("select * from stb_new_0")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.checkCols(4091)
|
||||
tdSql.query("describe stb_new_0;")
|
||||
tdSql.checkRows(4096)
|
||||
tdSql.execute("create table stb_new1_1 using stb_new tags(1,2,3,4,5)")
|
||||
tdSql.query("select * from stb_new1_1")
|
||||
tdSql.checkCols(4091)
|
||||
tdSql.query("describe stb_new1_1;")
|
||||
tdSql.checkRows(4096)
|
||||
|
||||
# tdLog.info("stop dnode to commit data to disk")
|
||||
# tdDnodes.stop(1)
|
||||
# tdDnodes.start(1)
|
||||
tdSql.query("select count (tbname) from stb_mix")
|
||||
tdSql.checkData(0, 0, 1)
|
||||
|
||||
#regular table
|
||||
sql = "create table tb(ts timestamp, "
|
||||
for i in range(1022):
|
||||
sql += "c%d binary(14), " % (i + 1)
|
||||
sql += "c1023 binary(22))"
|
||||
tdSql.execute(sql)
|
||||
tdSql.query("select * from stb_mix")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.checkCols(4096)
|
||||
tdSql.query("describe stb_mix;")
|
||||
tdSql.checkRows(4096)
|
||||
tdSql.query("select * from stb_mix_0")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.checkCols(4092)
|
||||
tdSql.query("describe stb_mix_0;")
|
||||
tdSql.checkRows(4096)
|
||||
|
||||
for i in range(4):
|
||||
sql = "insert into tb values(%d, "
|
||||
for j in range(1022):
|
||||
str = "'%s', " % self.get_random_string(14)
|
||||
sql += str
|
||||
sql += "'%s')" % self.get_random_string(22)
|
||||
tdSql.execute(sql % (self.ts + i))
|
||||
tdSql.query("select count (tbname) from stb_excel")
|
||||
tdSql.checkData(0, 0, 1)
|
||||
|
||||
time.sleep(10)
|
||||
tdSql.query("select count(*) from tb")
|
||||
tdSql.checkData(0, 0, 4)
|
||||
|
||||
tdDnodes.stop(1)
|
||||
tdDnodes.start(1)
|
||||
|
||||
time.sleep(1)
|
||||
tdSql.query("select count(*) from tb")
|
||||
tdSql.checkData(0, 0, 4)
|
||||
tdSql.query("select * from stb_excel")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.checkCols(4096)
|
||||
tdSql.query("describe stb_excel;")
|
||||
tdSql.checkRows(4096)
|
||||
tdSql.query("select * from stb_excel_0")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.checkCols(4092)
|
||||
tdSql.query("describe stb_excel_0;")
|
||||
tdSql.checkRows(4096)
|
||||
endTime = time.time()
|
||||
print("total time %ds" % (endTime - startTime))
|
||||
|
||||
|
||||
sql = "create table tb1(ts timestamp, "
|
||||
for i in range(4094):
|
||||
sql += "c%d binary(14), " % (i + 1)
|
||||
sql += "c4095 binary(22))"
|
||||
tdSql.execute(sql)
|
||||
|
||||
for i in range(4):
|
||||
sql = "insert into tb1 values(%d, "
|
||||
for j in range(4094):
|
||||
str = "'%s', " % self.get_random_string(14)
|
||||
sql += str
|
||||
sql += "'%s')" % self.get_random_string(22)
|
||||
tdSql.execute(sql % (self.ts + i))
|
||||
|
||||
time.sleep(10)
|
||||
tdSql.query("select count(*) from tb1")
|
||||
tdSql.checkData(0, 0, 4)
|
||||
|
||||
tdDnodes.stop(1)
|
||||
tdDnodes.start(1)
|
||||
|
||||
time.sleep(1)
|
||||
tdSql.query("select count(*) from tb1")
|
||||
tdSql.checkData(0, 0, 4)
|
||||
|
||||
|
||||
|
||||
#os.system("rm -rf tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py.sql")
|
||||
os.system("rm -rf tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py.sql")
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -160,7 +160,7 @@ class TDTestCase:
|
|||
tdSql.execute("drop database if exists db")
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertSigcolumnsNum4096-stmt.json -y " % binPath)
|
||||
tdSql.error("select * from db.stb0")
|
||||
tdSql.execute("drop database if exists db")
|
||||
# tdSql.execute("drop database if exists db")
|
||||
# os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertColumnsAndTagNum4096-stmt.json -y " % binPath)
|
||||
# tdSql.query("select count(*) from db.stb0")
|
||||
# tdSql.checkData(0, 0, 10000)
|
||||
|
@ -247,7 +247,7 @@ class TDTestCase:
|
|||
# # insert: sample json
|
||||
# os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-sample-stmt.json -y " % binPath)
|
||||
# tdSql.execute("use dbtest123")
|
||||
# tdSql.query("select col2 from stb0")
|
||||
# tdSql.query("select c2 from stb0")
|
||||
# tdSql.checkData(0, 0, 2147483647)
|
||||
# tdSql.query("select * from stb1 where t1=-127")
|
||||
# tdSql.checkRows(20)
|
||||
|
|
|
@ -0,0 +1,362 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import os
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.dnodes import *
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
self.ts = 1625068800000000000 # this is timestamp "2021-07-01 00:00:00"
|
||||
self.numberOfTables = 10
|
||||
self.numberOfRecords = 100
|
||||
|
||||
def checkCommunity(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
if ("community" in selfPath):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosdump" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
|
||||
|
||||
def createdb(self, precision="ns"):
|
||||
tb_nums = self.numberOfTables
|
||||
per_tb_rows = self.numberOfRecords
|
||||
|
||||
def build_db(precision, start_time):
|
||||
tdSql.execute("drop database if exists timedb1")
|
||||
tdSql.execute(
|
||||
"create database timedb1 days 10 keep 365 blocks 8 precision "+"\""+precision+"\"")
|
||||
|
||||
tdSql.execute("use timedb1")
|
||||
tdSql.execute(
|
||||
"create stable st(ts timestamp, c1 int, c2 nchar(10),c3 timestamp) tags(t1 int, t2 binary(10))")
|
||||
for tb in range(tb_nums):
|
||||
tbname = "t"+str(tb)
|
||||
tdSql.execute("create table " + tbname +
|
||||
" using st tags(1, 'beijing')")
|
||||
sql = "insert into " + tbname + " values"
|
||||
currts = start_time
|
||||
if precision == "ns":
|
||||
ts_seed = 1000000000
|
||||
elif precision == "us":
|
||||
ts_seed = 1000000
|
||||
else:
|
||||
ts_seed = 1000
|
||||
|
||||
for i in range(per_tb_rows):
|
||||
sql += "(%d, %d, 'nchar%d',%d)" % (currts + i*ts_seed, i %
|
||||
100, i % 100, currts + i*100) # currts +1000ms (1000000000ns)
|
||||
tdSql.execute(sql)
|
||||
|
||||
if precision == "ns":
|
||||
start_time = 1625068800000000000
|
||||
build_db(precision, start_time)
|
||||
|
||||
elif precision == "us":
|
||||
start_time = 1625068800000000
|
||||
build_db(precision, start_time)
|
||||
|
||||
elif precision == "ms":
|
||||
start_time = 1625068800000
|
||||
build_db(precision, start_time)
|
||||
|
||||
else:
|
||||
print("other time precision not valid , please check! ")
|
||||
|
||||
|
||||
def run(self):
|
||||
|
||||
# clear envs
|
||||
os.system("rm -rf ./taosdumptest/")
|
||||
tdSql.execute("drop database if exists dumptmp1")
|
||||
tdSql.execute("drop database if exists dumptmp2")
|
||||
tdSql.execute("drop database if exists dumptmp3")
|
||||
|
||||
if not os.path.exists("./taosdumptest/tmp1"):
|
||||
os.makedirs("./taosdumptest/dumptmp1")
|
||||
else:
|
||||
print("path exist!")
|
||||
|
||||
if not os.path.exists("./taosdumptest/dumptmp2"):
|
||||
os.makedirs("./taosdumptest/dumptmp2")
|
||||
|
||||
if not os.path.exists("./taosdumptest/dumptmp3"):
|
||||
os.makedirs("./taosdumptest/dumptmp3")
|
||||
|
||||
buildPath = self.getBuildPath()
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosdump not found!")
|
||||
else:
|
||||
tdLog.info("taosdump found in %s" % buildPath)
|
||||
binPath = buildPath + "/build/bin/"
|
||||
|
||||
# create nano second database
|
||||
|
||||
self.createdb(precision="ns")
|
||||
|
||||
# dump all data
|
||||
|
||||
os.system(
|
||||
"%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath)
|
||||
|
||||
# dump part data with -S -E
|
||||
os.system(
|
||||
'%staosdump --databases timedb1 -S 1625068810000000000 -E 1625068860000000000 -C ns -o ./taosdumptest/dumptmp2 ' %
|
||||
binPath)
|
||||
os.system(
|
||||
'%staosdump --databases timedb1 -S 1625068810000000000 -o ./taosdumptest/dumptmp3 ' %
|
||||
binPath)
|
||||
|
||||
# replace strings to dump in databases
|
||||
os.system(
|
||||
"sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`")
|
||||
os.system(
|
||||
"sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`")
|
||||
os.system(
|
||||
"sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`")
|
||||
|
||||
os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath)
|
||||
os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath)
|
||||
os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath)
|
||||
|
||||
# dump data and check for taosdump
|
||||
tdSql.query("select count(*) from dumptmp1.st")
|
||||
tdSql.checkData(0,0,1000)
|
||||
|
||||
tdSql.query("select count(*) from dumptmp2.st")
|
||||
tdSql.checkData(0,0,510)
|
||||
|
||||
tdSql.query("select count(*) from dumptmp3.st")
|
||||
tdSql.checkData(0,0,900)
|
||||
|
||||
# check data
|
||||
origin_res = tdSql.getResult("select * from timedb1.st")
|
||||
dump_res = tdSql.getResult("select * from dumptmp1.st")
|
||||
if origin_res == dump_res:
|
||||
tdLog.info("test nano second : dump check data pass for all data!" )
|
||||
else:
|
||||
tdLog.info("test nano second : dump check data failed for all data!" )
|
||||
|
||||
origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000000 and ts <= 1625068860000000000")
|
||||
dump_res = tdSql.getResult("select * from dumptmp2.st")
|
||||
if origin_res == dump_res:
|
||||
tdLog.info(" test nano second : dump check data pass for data! " )
|
||||
else:
|
||||
tdLog.info(" test nano second : dump check data failed for data !" )
|
||||
|
||||
origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000000 ")
|
||||
dump_res = tdSql.getResult("select * from dumptmp3.st")
|
||||
if origin_res == dump_res:
|
||||
tdLog.info(" test nano second : dump check data pass for data! " )
|
||||
else:
|
||||
tdLog.info(" test nano second : dump check data failed for data !" )
|
||||
|
||||
|
||||
# us second support test case
|
||||
|
||||
os.system("rm -rf ./taosdumptest/")
|
||||
tdSql.execute("drop database if exists dumptmp1")
|
||||
tdSql.execute("drop database if exists dumptmp2")
|
||||
tdSql.execute("drop database if exists dumptmp3")
|
||||
|
||||
if not os.path.exists("./taosdumptest/tmp1"):
|
||||
os.makedirs("./taosdumptest/dumptmp1")
|
||||
else:
|
||||
print("path exits!")
|
||||
|
||||
if not os.path.exists("./taosdumptest/dumptmp2"):
|
||||
os.makedirs("./taosdumptest/dumptmp2")
|
||||
|
||||
if not os.path.exists("./taosdumptest/dumptmp3"):
|
||||
os.makedirs("./taosdumptest/dumptmp3")
|
||||
|
||||
buildPath = self.getBuildPath()
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosdump not found!")
|
||||
else:
|
||||
tdLog.info("taosdump found in %s" % buildPath)
|
||||
binPath = buildPath + "/build/bin/"
|
||||
|
||||
self.createdb(precision="us")
|
||||
|
||||
os.system(
|
||||
"%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath)
|
||||
|
||||
os.system(
|
||||
'%staosdump --databases timedb1 -S 1625068810000000 -E 1625068860000000 -C us -o ./taosdumptest/dumptmp2 ' %
|
||||
binPath)
|
||||
os.system(
|
||||
'%staosdump --databases timedb1 -S 1625068810000000 -o ./taosdumptest/dumptmp3 ' %
|
||||
binPath)
|
||||
|
||||
os.system(
|
||||
"sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`")
|
||||
os.system(
|
||||
"sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`")
|
||||
os.system(
|
||||
"sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`")
|
||||
|
||||
os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath)
|
||||
os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath)
|
||||
os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath)
|
||||
|
||||
|
||||
tdSql.query("select count(*) from dumptmp1.st")
|
||||
tdSql.checkData(0,0,1000)
|
||||
|
||||
tdSql.query("select count(*) from dumptmp2.st")
|
||||
tdSql.checkData(0,0,510)
|
||||
|
||||
tdSql.query("select count(*) from dumptmp3.st")
|
||||
tdSql.checkData(0,0,900)
|
||||
|
||||
|
||||
origin_res = tdSql.getResult("select * from timedb1.st")
|
||||
dump_res = tdSql.getResult("select * from dumptmp1.st")
|
||||
if origin_res == dump_res:
|
||||
tdLog.info("test us second : dump check data pass for all data!" )
|
||||
else:
|
||||
tdLog.info("test us second : dump check data failed for all data!" )
|
||||
|
||||
origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000 and ts <= 1625068860000000")
|
||||
dump_res = tdSql.getResult("select * from dumptmp2.st")
|
||||
if origin_res == dump_res:
|
||||
tdLog.info(" test us second : dump check data pass for data! " )
|
||||
else:
|
||||
tdLog.info(" test us second : dump check data failed for data!" )
|
||||
|
||||
origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000 ")
|
||||
dump_res = tdSql.getResult("select * from dumptmp3.st")
|
||||
if origin_res == dump_res:
|
||||
tdLog.info(" test us second : dump check data pass for data! " )
|
||||
else:
|
||||
tdLog.info(" test us second : dump check data failed for data! " )
|
||||
|
||||
|
||||
# ms second support test case
|
||||
|
||||
os.system("rm -rf ./taosdumptest/")
|
||||
tdSql.execute("drop database if exists dumptmp1")
|
||||
tdSql.execute("drop database if exists dumptmp2")
|
||||
tdSql.execute("drop database if exists dumptmp3")
|
||||
|
||||
if not os.path.exists("./taosdumptest/tmp1"):
|
||||
os.makedirs("./taosdumptest/dumptmp1")
|
||||
else:
|
||||
print("path exits!")
|
||||
|
||||
if not os.path.exists("./taosdumptest/dumptmp2"):
|
||||
os.makedirs("./taosdumptest/dumptmp2")
|
||||
|
||||
if not os.path.exists("./taosdumptest/dumptmp3"):
|
||||
os.makedirs("./taosdumptest/dumptmp3")
|
||||
|
||||
buildPath = self.getBuildPath()
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosdump not found!")
|
||||
else:
|
||||
tdLog.info("taosdump found in %s" % buildPath)
|
||||
binPath = buildPath + "/build/bin/"
|
||||
|
||||
self.createdb(precision="ms")
|
||||
|
||||
os.system(
|
||||
"%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath)
|
||||
|
||||
os.system(
|
||||
'%staosdump --databases timedb1 -S 1625068810000 -E 1625068860000 -C ms -o ./taosdumptest/dumptmp2 ' %
|
||||
binPath)
|
||||
os.system(
|
||||
'%staosdump --databases timedb1 -S 1625068810000 -o ./taosdumptest/dumptmp3 ' %
|
||||
binPath)
|
||||
|
||||
os.system(
|
||||
"sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`")
|
||||
os.system(
|
||||
"sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`")
|
||||
os.system(
|
||||
"sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`")
|
||||
|
||||
os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath)
|
||||
os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath)
|
||||
os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath)
|
||||
|
||||
|
||||
tdSql.query("select count(*) from dumptmp1.st")
|
||||
tdSql.checkData(0,0,1000)
|
||||
|
||||
tdSql.query("select count(*) from dumptmp2.st")
|
||||
tdSql.checkData(0,0,510)
|
||||
|
||||
tdSql.query("select count(*) from dumptmp3.st")
|
||||
tdSql.checkData(0,0,900)
|
||||
|
||||
|
||||
origin_res = tdSql.getResult("select * from timedb1.st")
|
||||
dump_res = tdSql.getResult("select * from dumptmp1.st")
|
||||
if origin_res == dump_res:
|
||||
tdLog.info("test ms second : dump check data pass for all data!" )
|
||||
else:
|
||||
tdLog.info("test ms second : dump check data failed for all data!" )
|
||||
|
||||
origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000 and ts <= 1625068860000")
|
||||
dump_res = tdSql.getResult("select * from dumptmp2.st")
|
||||
if origin_res == dump_res:
|
||||
tdLog.info(" test ms second : dump check data pass for data! " )
|
||||
else:
|
||||
tdLog.info(" test ms second : dump check data failed for data!" )
|
||||
|
||||
origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000 ")
|
||||
dump_res = tdSql.getResult("select * from dumptmp3.st")
|
||||
if origin_res == dump_res:
|
||||
tdLog.info(" test ms second : dump check data pass for data! " )
|
||||
else:
|
||||
tdLog.info(" test ms second : dump check data failed for data! " )
|
||||
|
||||
|
||||
os.system("rm -rf ./taosdumptest/")
|
||||
os.system("rm -rf ./dump_result.txt")
|
||||
os.system("rm -rf *.py.sql")
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -197,6 +197,19 @@ class TDSql:
|
|||
self.checkRowCol(row, col)
|
||||
return self.queryResult[row][col]
|
||||
|
||||
def getResult(self, sql):
|
||||
self.sql = sql
|
||||
try:
|
||||
self.cursor.execute(sql)
|
||||
self.queryResult = self.cursor.fetchall()
|
||||
except Exception as e:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, sql, repr(e))
|
||||
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
|
||||
raise Exception(repr(e))
|
||||
return self.queryResult
|
||||
|
||||
|
||||
def executeTimes(self, sql, times):
|
||||
for i in range(times):
|
||||
try:
|
||||
|
|
|
@ -23,7 +23,9 @@ menu(){
|
|||
echo "=============================="
|
||||
echo "3 arbitrator"
|
||||
echo "=============================="
|
||||
echo "4 exit"
|
||||
echo "4 alter replica"
|
||||
echo "=============================="
|
||||
echo "5 exit"
|
||||
echo "=============================="
|
||||
}
|
||||
|
||||
|
@ -310,6 +312,7 @@ do
|
|||
2)
|
||||
var=`ps -ef | grep tarbitrator | awk '{print $2}' | head -n 1`
|
||||
kill -9 $var
|
||||
echo -e "\033[32mSuccessfully stop arbitrator $3 \033[0m"
|
||||
break
|
||||
;;
|
||||
3)
|
||||
|
@ -318,6 +321,13 @@ do
|
|||
esac
|
||||
;;
|
||||
4)
|
||||
read -p "Enter replica number: " rep
|
||||
read -p "Enter database name: " db
|
||||
taos -s "alter database $db replica $rep"
|
||||
echo -e "\033[32mSuccessfully change $db's replica to $rep \033[0m"
|
||||
break
|
||||
;;
|
||||
5)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
while :
|
||||
do
|
||||
dlog=`taos -s "show dnodes"`
|
||||
mlog=`taos -s "show mnodes"`
|
||||
echo "$dlog" | tee -a dnode.log
|
||||
echo "$mlog" | tee -a mnode.log
|
||||
sleep 1s
|
||||
done
|
|
@ -13,7 +13,9 @@ all: $(TARGET)
|
|||
exe:
|
||||
gcc $(CFLAGS) ./batchprepare.c -o $(ROOT)batchprepare $(LFLAGS)
|
||||
gcc $(CFLAGS) ./stmtBatchTest.c -o $(ROOT)stmtBatchTest $(LFLAGS)
|
||||
gcc $(CFLAGS) ./stmtTest.c -o $(ROOT)stmtTest $(LFLAGS)
|
||||
|
||||
clean:
|
||||
rm $(ROOT)batchprepare
|
||||
rm $(ROOT)stmtBatchTest
|
||||
rm $(ROOT)stmtTest
|
||||
|
|
|
@ -0,0 +1,238 @@
|
|||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "taos.h"
|
||||
#include <sys/time.h>
|
||||
#include <pthread.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#define PRINT_ERROR printf("\033[31m");
|
||||
#define PRINT_SUCCESS printf("\033[32m");
|
||||
|
||||
void execute_simple_sql(void *taos, char *sql) {
|
||||
TAOS_RES *result = taos_query(taos, sql);
|
||||
if ( result == NULL || taos_errno(result) != 0) {
|
||||
PRINT_ERROR
|
||||
printf("failed to %s, Reason: %s\n", sql, taos_errstr(result));
|
||||
taos_free_result(result);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
taos_free_result(result);
|
||||
PRINT_SUCCESS
|
||||
printf("Successfully %s\n", sql);
|
||||
}
|
||||
|
||||
void check_result(TAOS *taos, int id, int expected) {
|
||||
char sql[256] = {0};
|
||||
sprintf(sql, "select * from t%d", id);
|
||||
TAOS_RES *result;
|
||||
result = taos_query(taos, sql);
|
||||
if ( result == NULL || taos_errno(result) != 0) {
|
||||
PRINT_ERROR
|
||||
printf("failed to %s, Reason: %s\n", sql, taos_errstr(result));
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
PRINT_SUCCESS
|
||||
printf("Successfully execute %s\n", sql);
|
||||
int rows = 0;
|
||||
TAOS_ROW row;
|
||||
while ((row = taos_fetch_row(result))) {
|
||||
rows++;
|
||||
}
|
||||
if (rows == expected) {
|
||||
PRINT_SUCCESS
|
||||
printf("table t%d's %d rows are fetched as expected\n", id, rows);
|
||||
} else {
|
||||
PRINT_ERROR
|
||||
printf("table t%d's %d rows are fetched but %d expected\n", id, rows, expected);
|
||||
}
|
||||
taos_free_result(result);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
void *taos = taos_connect("127.0.0.1", "root", "taosdata", NULL, 0);
|
||||
if (taos == NULL) {
|
||||
PRINT_ERROR
|
||||
printf("TDengine error: failed to connect\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
PRINT_SUCCESS
|
||||
printf("Successfully connected to TDengine\n");
|
||||
|
||||
execute_simple_sql(taos, "drop database if exists test");
|
||||
execute_simple_sql(taos, "create database test");
|
||||
execute_simple_sql(taos, "use test");
|
||||
execute_simple_sql(taos, "create table super(ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 binary(8), c6 smallint, c7 tinyint, c8 bool, c9 nchar(8), c10 timestamp) tags (t1 int, t2 bigint, t3 float, t4 double, t5 binary(8), t6 smallint, t7 tinyint, t8 bool, t9 nchar(8))");
|
||||
|
||||
char *sql = calloc(1, 1024*1024);
|
||||
int sqlLen = 0;
|
||||
sqlLen = sprintf(sql, "create table");
|
||||
for (int i = 0; i < 10; i++) {
|
||||
sqlLen += sprintf(sql + sqlLen, " t%d using super tags (%d, 2147483648, 0.1, 0.000000001, 'abcdefgh', 32767, 127, 1, '一二三四五六七八')", i, i);
|
||||
}
|
||||
execute_simple_sql(taos, sql);
|
||||
|
||||
|
||||
int code = taos_load_table_info(taos, "t0,t1,t2,t3,t4,t5,t6,t7,t8,t9");
|
||||
if (code != 0) {
|
||||
PRINT_ERROR
|
||||
printf("failed to load table info: 0x%08x\n", code);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
PRINT_SUCCESS
|
||||
printf("Successfully load table info\n");
|
||||
|
||||
TAOS_STMT *stmt = taos_stmt_init(taos);
|
||||
if (stmt == NULL) {
|
||||
PRINT_ERROR
|
||||
printf("TDengine error: failed to init taos_stmt\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
PRINT_SUCCESS
|
||||
printf("Successfully init taos_stmt\n");
|
||||
|
||||
uintptr_t c10len = 0;
|
||||
struct {
|
||||
int64_t c1;
|
||||
int32_t c2;
|
||||
int64_t c3;
|
||||
float c4;
|
||||
double c5;
|
||||
unsigned char c6[8];
|
||||
int16_t c7;
|
||||
int8_t c8;
|
||||
int8_t c9;
|
||||
char c10[32];
|
||||
} v = {0};
|
||||
TAOS_BIND params[11];
|
||||
params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
|
||||
params[0].buffer_length = sizeof(v.c1);
|
||||
params[0].buffer = &v.c1;
|
||||
params[0].length = ¶ms[0].buffer_length;
|
||||
params[0].is_null = NULL;
|
||||
|
||||
params[1].buffer_type = TSDB_DATA_TYPE_INT;
|
||||
params[1].buffer_length = sizeof(v.c2);
|
||||
params[1].buffer = &v.c2;
|
||||
params[1].length = ¶ms[1].buffer_length;
|
||||
params[1].is_null = NULL;
|
||||
|
||||
params[2].buffer_type = TSDB_DATA_TYPE_BIGINT;
|
||||
params[2].buffer_length = sizeof(v.c3);
|
||||
params[2].buffer = &v.c3;
|
||||
params[2].length = ¶ms[2].buffer_length;
|
||||
params[2].is_null = NULL;
|
||||
|
||||
params[3].buffer_type = TSDB_DATA_TYPE_FLOAT;
|
||||
params[3].buffer_length = sizeof(v.c4);
|
||||
params[3].buffer = &v.c4;
|
||||
params[3].length = ¶ms[3].buffer_length;
|
||||
params[3].is_null = NULL;
|
||||
|
||||
params[4].buffer_type = TSDB_DATA_TYPE_DOUBLE;
|
||||
params[4].buffer_length = sizeof(v.c5);
|
||||
params[4].buffer = &v.c5;
|
||||
params[4].length = ¶ms[4].buffer_length;
|
||||
params[4].is_null = NULL;
|
||||
|
||||
params[5].buffer_type = TSDB_DATA_TYPE_BINARY;
|
||||
params[5].buffer_length = sizeof(v.c6);
|
||||
params[5].buffer = &v.c6;
|
||||
params[5].length = ¶ms[5].buffer_length;
|
||||
params[5].is_null = NULL;
|
||||
|
||||
params[6].buffer_type = TSDB_DATA_TYPE_SMALLINT;
|
||||
params[6].buffer_length = sizeof(v.c7);
|
||||
params[6].buffer = &v.c7;
|
||||
params[6].length = ¶ms[6].buffer_length;
|
||||
params[6].is_null = NULL;
|
||||
|
||||
params[7].buffer_type = TSDB_DATA_TYPE_TINYINT;
|
||||
params[7].buffer_length = sizeof(v.c8);
|
||||
params[7].buffer = &v.c8;
|
||||
params[7].length = ¶ms[7].buffer_length;
|
||||
params[7].is_null = NULL;
|
||||
|
||||
params[8].buffer_type = TSDB_DATA_TYPE_BOOL;
|
||||
params[8].buffer_length = sizeof(v.c9);
|
||||
params[8].buffer = &v.c9;
|
||||
params[8].length = ¶ms[8].buffer_length;
|
||||
params[8].is_null = NULL;
|
||||
|
||||
params[9].buffer_type = TSDB_DATA_TYPE_NCHAR;
|
||||
params[9].buffer_length = sizeof(v.c10);
|
||||
params[9].buffer = &v.c10;
|
||||
params[9].length = &c10len;
|
||||
params[9].is_null = NULL;
|
||||
|
||||
params[10].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
|
||||
params[10].buffer_length = sizeof(v.c1);
|
||||
params[10].buffer = &v.c1;
|
||||
params[10].length = ¶ms[10].buffer_length;
|
||||
params[10].is_null = NULL;
|
||||
|
||||
char *stmt_sql = "insert into ? values (?,?,?,?,?,?,?,?,?,?,?)";
|
||||
code = taos_stmt_prepare(stmt, stmt_sql, 0);
|
||||
if (code != 0){
|
||||
PRINT_ERROR
|
||||
printf("failed to execute taos_stmt_prepare. code:0x%x\n", code);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
PRINT_SUCCESS
|
||||
printf("Successfully execute taos_stmt_prepare\n");
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
char buf[32];
|
||||
sprintf(buf, "t%d", i);
|
||||
if (i == 0) {
|
||||
code = taos_stmt_set_tbname(stmt, buf);
|
||||
if (code != 0) {
|
||||
PRINT_ERROR
|
||||
printf("failed to execute taos_stmt_set_tbname. code:0x%x\n", code);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
PRINT_SUCCESS
|
||||
printf("Successfully execute taos_stmt_set_tbname\n");
|
||||
} else {
|
||||
code = taos_stmt_set_sub_tbname(stmt, buf);
|
||||
if (code != 0) {
|
||||
PRINT_ERROR
|
||||
printf("failed to execute taos_stmt_set_sub_tbname. code:0x%x\n", code);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
PRINT_SUCCESS
|
||||
printf("Successfully execute taos_stmt_set_sub_tbname\n");
|
||||
}
|
||||
|
||||
v.c1 = (int64_t)1591060628000;
|
||||
v.c2 = (int32_t)2147483647;
|
||||
v.c3 = (int64_t)2147483648;
|
||||
v.c4 = (float)0.1;
|
||||
v.c5 = (double)0.000000001;
|
||||
for (int j = 0; j < sizeof(v.c6); j++) {
|
||||
v.c6[j] = (char)('a');
|
||||
}
|
||||
v.c7 = 32767;
|
||||
v.c8 = 127;
|
||||
v.c9 = 1;
|
||||
strcpy(v.c10, "一二三四五六七八");
|
||||
c10len=strlen(v.c10);
|
||||
taos_stmt_bind_param(stmt, params);
|
||||
taos_stmt_add_batch(stmt);
|
||||
}
|
||||
|
||||
if (taos_stmt_execute(stmt) != 0) {
|
||||
PRINT_ERROR
|
||||
printf("failed to execute insert statement.\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
PRINT_SUCCESS
|
||||
printf("Successfully execute insert statement.\n");
|
||||
|
||||
taos_stmt_close(stmt);
|
||||
for (int i = 0; i < 10; i++) {
|
||||
check_result(taos, i, 1);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
Loading…
Reference in New Issue