Merge branch 'develop' into feature/TD-2581

This commit is contained in:
wpan 2021-07-29 08:32:41 +08:00
commit 2594aa6e0d
88 changed files with 2926 additions and 877 deletions

View File

@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.33-dist.jar DESTINATION connector/jdbc)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.34-dist.jar DESTINATION connector/jdbc)
ENDIF ()
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")

View File

@ -179,17 +179,15 @@ taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
| | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **华为 EulerOS** |
| -------------- | --------------------- | ------------------------ | --------------- | --------------- | ------------------------- | --------------------- | --------------------- |
| X64 | ● | ● | | ○ | ● | ● | ● |
| 树莓派 ARM32 | | ● | ● | | | | |
| 龙芯 MIPS64 | | | ● | | | | |
| 鲲鹏 ARM64 | | ○ | ○ | | ● | | |
| 申威 Alpha64 | | | ○ | ● | | | |
| 鲲鹏 ARM64 | | ○ | ○ | | ● | | |
| 申威 Alpha64 | | | ○ | ● | | | |
| 飞腾 ARM64 | | ○ 优麒麟 | | | | | |
| 海光 X64 | ● | ● | ● | ○ | ● | ● | |
| 瑞芯微 ARM64/32 | | | ○ | | | | |
| 全志 ARM64/32 | | | ○ | | | | |
| 炬力 ARM64/32 | | | ○ | | | | |
| TI ARM32 | | | ○ | | | | |
| 华为云 ARM64 | | | | | | | ● |
| 瑞芯微 ARM64 | | | ○ | | | | |
| 全志 ARM64 | | | ○ | | | | |
| 炬力 ARM64 | | | ○ | | | | |
| 华为云 ARM64 | | | | | | | ● |
注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。

View File

@ -111,7 +111,7 @@ TDengine采取的是Master-Slave模式进行同步与流行的RAFT一致性
2. 应用将打上版本号的写请求封装一个WAL Head, 写入WAL(Write Ahead Log)
3. 应用调用API syncForwardToPeer如果vnode B是slave状态sync模块将包含WAL Head的数据包通过Forward消息发送给vnode B否则就不转发。
4. vnode B收到Forward消息后调用回调函数writeToCache, 交给应用处理
5. vnode B应用在写入成功后都需要调用syncAckForward通知sync模块已经写入成功。
5. vnode B应用在写入成功后都需要调用syncConfirmForward通知sync模块已经写入成功。
6. 如果quorum大于1vnode B需要等待应用的回复确认收到确认后vnode B发送Forward Response消息给node A。
7. 如果quorum大于1vnode A需要等待vnode B或其他副本对Forward消息的确认。
8. 如果quorum大于1vnode A收到quorum-1条确认消息后调用回调函数confirmForward通知应用写入成功。
@ -140,7 +140,7 @@ TDengine采取的是Master-Slave模式进行同步与流行的RAFT一致性
整个数据恢复流程分为两大步骤第一步先恢复archived data(file), 然后恢复wal。具体流程如下
![replica-forward.png](page://images/architecture/replica-forward.png)
![replica-restore.png](page://images/architecture/replica-restore.png)
1. 通过已经建立的TCP连接发送sync req给master节点
2. master收到sync req后以client的身份向vnode B主动建立一新的专用于同步的TCP连接syncFd)

View File

@ -323,8 +323,6 @@ Vnode会保持一个数据版本号(Version),对内存数据进行持久化存
采用同步复制系统的性能会有所下降而且latency会增加。因为元数据要强一致mnode之间的数据同步缺省就是采用的同步复制。
vnode之间的同步复制仅仅企业版支持
## <a class="anchor" id="persistence"></a>缓存与持久化
### 缓存

View File

@ -208,7 +208,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
说明可在like中使用通配符进行名称的匹配这一通配符字符串最长不能超过24字节。
通配符匹配1'%'百分号匹配0到任意个字符2'\_'下划线匹配单个任意字符。(如果希望匹配表名中带有的下划线,那么这里可以用反斜线进行转义,也就是说 '\\\_' 会被用于匹配表名中原始带有的下划线符号)
通配符匹配1'%'百分号匹配0到任意个字符2'\_'下划线匹配单个任意字符。
- **显示一个数据表的创建语句**

View File

@ -11,7 +11,7 @@ One of the modules of TDengine is the time-series database. However, in addition
- **Full-stack time-series data processing engine**: Integrate database, message queue, cache, stream computing, and other functions, and the applications do not need to integrate with software such as Kafka/Redis/HBase/Spark/HDFS, thus greatly reducing the complexity cost of application development and maintenance.
- **Powerful analysis functions**: Data from ten years ago or one second ago, can all be queried based on a specified time range. Data can be aggregated on a timeline or multiple devices. Ad-hoc queries can be made at any time through Shell, Python, R, and MATLAB.
- **Seamless connection with third-party tools**: Integration with Telegraf, Grafana, EMQ, HiveMQ, Prometheus, MATLAB, R, etc. without even one single line of code. OPC, Hadoop, Spark, etc. will be supported in the future, and more BI tools will be seamlessly connected to.
- **Zero operation cost & zero learning cost**: Installing clusters is simple and quick, with real-time backup built-in, and no need to split libraries or tables. Similar to standard SQL, TDengine can support RESTful, Python/Java/C/C + +/C#/Go/Node.js, and similar to MySQL with zero learning cost.
- **Zero operation cost & zero learning cost**: Installing clusters is simple and quick, with real-time backup built-in, and no need to split libraries or tables. Similar to standard SQL, TDengine can support RESTful, Python/Java/C/C++/C#/Go/Node.js, and similar to MySQL with zero learning cost.
With TDengine, the total cost of ownership of typical IoT, Internet of Vehicles, and Industrial Internet Big Data platforms can be greatly reduced. However, it should be pointed out that due to making full use of the characteristics of IoT time-series data, TDengine cannot be used to process general data from web crawlers, microblogs, WeChat, e-commerce, ERP, CRM, and other sources.

View File

@ -188,16 +188,14 @@ List of platforms supported by TDengine server
| | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | UnionTech UOS | NeoKylin | LINX V60/V80 |
| ------------------ | ---------------- | ------------------- | --------------- | ------------- | -------- | ------------ |
| X64 | ● | ● | | ○ | ● | ● |
| Raspberry ARM32 | | ● | ● | | | |
| Loongson MIPS64 | | | ● | | | |
| Kunpeng ARM64 | | ○ | ○ | | ● | |
| SWCPU Alpha64 | | | ○ | ● | | |
| FT ARM64 | | ○Ubuntu Kylin | | | | |
| Hygon X64 | ● | ● | ● | ○ | ● | ● |
| Rockchip ARM64/32 | | | ○ | | | |
| Allwinner ARM64/32 | | | ○ | | | |
| Actions ARM64/32 | | | ○ | | | |
| TI ARM32 | | | ○ | | | |
| Rockchip ARM64 | | | ○ | | | |
| Allwinner ARM64 | | | ○ | | | |
| Actions ARM64 | | | ○ | | | |
Note: ● has been verified by official tests; ○ has been verified by unofficial tests.

View File

@ -172,7 +172,7 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc
**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high reliability of the system. The virtual node group is managed in a master/slave structure. Write operations can only be performed on the master vnode, and the system synchronizes data to the slave vnode via replication, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter replica when creating DB, and the default is 1. Using the multi-replica feature of TDengine, the same high data reliability can be done without the need for expensive storage devices such as disk arrays. Virtual node group is created and managed by management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes has the same vnode group ID, means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused.
**TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interface interaction between application and cluster, and provides the native interface of C/C + + language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through taosc instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, taosc also need to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C + +/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, taosc has a running instance on each dnode of TDengine cluster.
**TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interface interaction between application and cluster, and provides the native interface of C/C++ language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through taosc instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, taosc also need to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, taosc has a running instance on each dnode of TDengine cluster.
### Node Communication
@ -322,8 +322,6 @@ For scenarios with higher data consistency requirements, asynchronous data repli
With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistent, the default for data synchronization between mnodes is synchronous replication.
Note: synchronous replication between vnodes is only supported in Enterprise Edition
## <a class="anchor" id="persistence"></a> Caching and Persistence
### Caching

View File

@ -4,7 +4,7 @@ TDengine supports multiple interfaces to write data, including SQL, Prometheus,
## <a class="anchor" id="sql"></a> SQL Writing
Applications insert data by executing SQL insert statements through C/C + +, JDBC, GO, or Python Connector, and users can manually enter SQL insert statements to insert data through TAOS Shell. For example, the following insert writes a record to table d1001:
Applications insert data by executing SQL insert statements through C/C++, JDBC, GO, or Python Connector, and users can manually enter SQL insert statements to insert data through TAOS Shell. For example, the following insert writes a record to table d1001:
```mysql
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31);

View File

@ -2,7 +2,7 @@
## <a class="anchor" id="queries"></a> Main Query Features
TDengine uses SQL as the query language. Applications can send SQL statements through C/C + +, Java, Go, Python connectors, and users can manually execute SQL Ad-Hoc Query through the Command Line Interface (CLI) tool TAOS Shell provided by TDengine. TDengine supports the following query functions:
TDengine uses SQL as the query language. Applications can send SQL statements through C/C++, Java, Go, Python connectors, and users can manually execute SQL Ad-Hoc Query through the Command Line Interface (CLI) tool TAOS Shell provided by TDengine. TDengine supports the following query functions:
- Single-column and multi-column data query
- Multiple filters for tags and numeric values: >, <, =, < >, like, etc

View File

@ -22,7 +22,7 @@ Note: ● stands for that has been verified by official tests; ○ stands for th
Note:
- To access the TDengine database through connectors (except RESTful) in the system without TDengine server software, it is necessary to install the corresponding version of the client installation package to make the application driver (the file name is [libtaos.so](http://libtaos.so/) in Linux system and taos.dll in Windows system) installed in the system, otherwise, the error that the corresponding library file cannot be found will occur.
- All APIs that execute SQL statements, such as `tao_query`, `taos_query_a`, `taos_subscribe` in C/C + + Connector, and APIs corresponding to them in other languages, can only execute one SQL statement at a time. If the actual parameters contain multiple statements, their behavior is undefined.
- All APIs that execute SQL statements, such as `tao_query`, `taos_query_a`, `taos_subscribe` in C/C++ Connector, and APIs corresponding to them in other languages, can only execute one SQL statement at a time. If the actual parameters contain multiple statements, their behavior is undefined.
- Users upgrading to TDengine 2.0. 8.0 must update the JDBC connection. TDengine must upgrade taos-jdbcdriver to 2.0.12 and above.
- No matter which programming language connector is selected, TDengine version 2.0 and above recommends that each thread of database application establish an independent connection or establish a connection pool based on threads to avoid mutual interference between threads of "USE statement" state variables in the connection (but query and write operations of the connection are thread-safe).
@ -152,7 +152,7 @@ Under cmd, enter the c:\ tdengine directory and directly execute taos.exe, and y
| **OS Type** | Linux | Win64 | Win32 | Linux | Linux |
| **Supported or Not** | Yes | **Yes** | **Yes** | **Yes** | **In development** |
The C/C + + API is similar to MySQL's C API. When application use it, it needs to include the TDengine header file taos.h (after installed, it is located in/usr/local/taos/include):
The C/C++ API is similar to MySQL's C API. When application use it, it needs to include the TDengine header file taos.h (after installed, it is located in/usr/local/taos/include):
```C
#include <taos.h>
@ -923,7 +923,7 @@ Manually install the following tools:
If the steps above cannot be performed successfully, you can refer to Microsoft's Node.js User Manual [Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules).
If you use ARM64 Node.js on Windows 10 ARM, you also need to add "Visual C + + compilers and libraries for ARM64" and "Visual C + + ATL for ARM64".
If you use ARM64 Node.js on Windows 10 ARM, you also need to add "Visual C++ compilers and libraries for ARM64" and "Visual C++ ATL for ARM64".
#### Sample

View File

@ -240,7 +240,7 @@ Client configuration parameters:
```sql
SELECT count(*) FROM table_name WHERE TS<1554984068000;
```
In order to avoid the uncertainty caused by using string time format, Unix timestamp can also be used directly. In addition, timestamp strings with time zones can also be used in SQL statements, such as: timestamp strings in RFC3339 format, 2013-04-12T15: 52: 01.123 +08:00, or ISO-8601 format timestamp strings 2013-04-12T15: 52: 01.123 +0800. The conversion of the above two strings into Unix timestamps is not affected by the time zone in which the system is located.
In order to avoid the uncertainty caused by using string time format, Unix timestamp can also be used directly. In addition, timestamp strings with time zones can also be used in SQL statements, such as: timestamp strings in RFC3339 format, 2013-04-12T15:52:01.123+08:00, or ISO-8601 format timestamp strings 2013-04-12T15:52:01.123+0800. The conversion of the above two strings into Unix timestamps is not affected by the time zone in which the system is located.
When starting taos, you can also specify an end point for an instance of taosd from the command line, otherwise read from taos.cfg.

View File

@ -23,7 +23,7 @@
static SBnThread tsBnThread;
static void *bnThreadFunc(void *arg) {
setThreadName("bnThreadd");
setThreadName("balance");
while (1) {
pthread_mutex_lock(&tsBnThread.mutex);

View File

@ -346,6 +346,7 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta* pChild, const char* name, v
STableMeta* tscTableMetaDup(STableMeta* pTableMeta);
SVgroupsInfo* tscVgroupsInfoDup(SVgroupsInfo* pVgroupsInfo);
int32_t tscGetColFilterSerializeLen(SQueryInfo* pQueryInfo);
int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr);
void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pOperator, char* sql, void* addr, int32_t stage, uint64_t qId);
@ -357,6 +358,8 @@ bool vgroupInfoIdentical(SNewVgroupInfo *pExisted, SVgroupMsg* src);
SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg);
STblCond* tsGetTableFilter(SArray* filters, uint64_t uid, int16_t idx);
void tscRemoveTableMetaBuf(STableMetaInfo* pTableMetaInfo, uint64_t id);
#ifdef __cplusplus
}
#endif

View File

@ -151,7 +151,8 @@ typedef struct STableDataBlocks {
typedef struct {
STableMeta *pTableMeta;
SVgroupsInfo *pVgroupInfo;
SArray *vgroupIdList;
// SVgroupsInfo *pVgroupsInfo;
} STableMetaVgroupInfo;
typedef struct SInsertStatementParam {
@ -375,6 +376,8 @@ void tscResetSqlCmd(SSqlCmd *pCmd, bool removeMeta);
*/
void tscFreeSqlResult(SSqlObj *pSql);
void* tscCleanupTableMetaMap(SHashObj* pTableMetaMap);
/**
* free sql object, release allocated resource
* @param pObj
@ -415,7 +418,8 @@ int32_t tscValidateSqlInfo(SSqlObj *pSql, struct SSqlInfo *pInfo);
int32_t tsSetBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTableMeta, int32_t numOfRows);
extern int32_t sentinel;
extern SHashObj *tscVgroupMap;
extern SHashObj *tscTableMetaInfo;
extern SHashObj *tscTableMetaMap;
extern SCacheObj *tscVgroupListBuf;
extern int tscObjRef;
extern void *tscTmr;

View File

@ -325,61 +325,6 @@ void tscAsyncResultOnError(SSqlObj* pSql) {
int tscSendMsgToServer(SSqlObj *pSql);
static int32_t updateMetaBeforeRetryQuery(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SQueryInfo* pQueryInfo) {
// handle the invalid table error code for super table.
// update the pExpr info, colList info, number of table columns
// TODO Re-parse this sql and issue the corresponding subquery as an alternative for this case.
if (pSql->retryReason == TSDB_CODE_TDB_INVALID_TABLE_ID) {
int32_t numOfExprs = (int32_t) tscNumOfExprs(pQueryInfo);
int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
int32_t numOfTags = tscGetNumOfTags(pTableMetaInfo->pTableMeta);
SSchema *pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
SSchema *pTagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
for (int32_t i = 0; i < numOfExprs; ++i) {
SSqlExpr *pExpr = &(tscExprGet(pQueryInfo, i)->base);
// update the table uid
pExpr->uid = pTableMetaInfo->pTableMeta->id.uid;
if (pExpr->colInfo.colIndex >= 0) {
int32_t index = pExpr->colInfo.colIndex;
if ((TSDB_COL_IS_NORMAL_COL(pExpr->colInfo.flag) && index >= numOfCols) ||
(TSDB_COL_IS_TAG(pExpr->colInfo.flag) && (index < 0 || index >= numOfTags))) {
return pSql->retryReason;
}
if (TSDB_COL_IS_TAG(pExpr->colInfo.flag)) {
if ((pTagSchema[pExpr->colInfo.colIndex].colId != pExpr->colInfo.colId) &&
strcasecmp(pExpr->colInfo.name, pTagSchema[pExpr->colInfo.colIndex].name) != 0) {
return pSql->retryReason;
}
} else if (TSDB_COL_IS_NORMAL_COL(pExpr->colInfo.flag)) {
if ((pSchema[pExpr->colInfo.colIndex].colId != pExpr->colInfo.colId) &&
strcasecmp(pExpr->colInfo.name, pSchema[pExpr->colInfo.colIndex].name) != 0) {
return pSql->retryReason;
}
} else { // do nothing for udc
}
}
}
// validate the table columns information
for (int32_t i = 0; i < taosArrayGetSize(pQueryInfo->colList); ++i) {
SColumn *pCol = taosArrayGetP(pQueryInfo->colList, i);
if (pCol->columnIndex >= numOfCols) {
return pSql->retryReason;
}
}
} else {
// do nothing
}
return TSDB_CODE_SUCCESS;
}
void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, (int64_t)param);
if (pSql == NULL) return;
@ -391,7 +336,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
pRes->code = code;
SSqlObj *sub = (SSqlObj*) res;
const char* msg = (sub->cmd.command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"table-meta";
const char* msg = (sub->cmd.command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"multi-tableMeta";
if (code != TSDB_CODE_SUCCESS) {
tscError("0x%"PRIx64" get %s failed, code:%s", pSql->self, msg, tstrerror(code));
goto _error;
@ -401,85 +346,56 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
if (pSql->pStream == NULL) {
SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
// check if it is a sub-query of super table query first, if true, enter another routine
if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY | TSDB_QUERY_TYPE_SUBQUERY |
TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) {
tscDebug("0x%" PRIx64 " update cached table-meta, continue to process sql and send the corresponding query", pSql->self);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
code = tscGetTableMeta(pSql, pTableMetaInfo);
assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS || code == TSDB_CODE_SUCCESS);
if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT)) {
tscDebug("0x%" PRIx64 " continue parse sql after get table-meta", pSql->self);
code = tsParseSql(pSql, false);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
taosReleaseRef(tscObjRef, pSql->self);
return;
}
assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0));
code = updateMetaBeforeRetryQuery(pSql, pTableMetaInfo, pQueryInfo);
if (code != TSDB_CODE_SUCCESS) {
} else if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
// tscBuildAndSendRequest can add error into async res
tscBuildAndSendRequest(pSql, NULL);
taosReleaseRef(tscObjRef, pSql->self);
return;
} else { // continue to process normal async query
if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT)) {
tscDebug("0x%" PRIx64 " continue parse sql after get table-meta", pSql->self);
code = tsParseSql(pSql, false);
if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_STMT_INSERT)) { // stmt insert
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
code = tscGetTableMeta(pSql, pTableMetaInfo);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
taosReleaseRef(tscObjRef, pSql->self);
return;
} else if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_STMT_INSERT)) {
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
code = tscGetTableMeta(pSql, pTableMetaInfo);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
taosReleaseRef(tscObjRef, pSql->self);
return;
} else {
assert(code == TSDB_CODE_SUCCESS);
}
(*pSql->fp)(pSql->param, pSql, code);
} else {
if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_FILE_INSERT)) {
tscImportDataFromFile(pSql);
} else {
tscHandleMultivnodeInsert(pSql);
}
assert(code == TSDB_CODE_SUCCESS);
}
(*pSql->fp)(pSql->param, pSql, code);
} else if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_FILE_INSERT)) { // file insert
tscImportDataFromFile(pSql);
} else { // sql string insert
tscHandleMultivnodeInsert(pSql);
}
} else {
if (pSql->retryReason != TSDB_CODE_SUCCESS) {
tscDebug("0x%" PRIx64 " update cached table-meta, re-validate sql statement and send query again", pSql->self);
tscResetSqlCmd(pCmd, false);
pSql->retryReason = TSDB_CODE_SUCCESS;
} else {
if (pSql->retryReason != TSDB_CODE_SUCCESS) {
tscDebug("0x%" PRIx64 " update cached table-meta, re-validate sql statement and send query again",
pSql->self);
tscResetSqlCmd(pCmd, false);
pSql->retryReason = TSDB_CODE_SUCCESS;
} else {
tscDebug("0x%" PRIx64 " cached table-meta, continue validate sql statement and send query", pSql->self);
}
code = tsParseSql(pSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
taosReleaseRef(tscObjRef, pSql->self);
return;
} else if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
SQueryInfo *pQueryInfo1 = tscGetQueryInfo(pCmd);
executeQuery(pSql, pQueryInfo1);
tscDebug("0x%" PRIx64 " cached table-meta, continue validate sql statement and send query", pSql->self);
}
taosReleaseRef(tscObjRef, pSql->self);
return;
code = tsParseSql(pSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
taosReleaseRef(tscObjRef, pSql->self);
return;
} else if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
SQueryInfo *pQueryInfo1 = tscGetQueryInfo(pCmd);
executeQuery(pSql, pQueryInfo1);
}
taosReleaseRef(tscObjRef, pSql->self);
return;
} else { // stream computing
tscDebug("0x%"PRIx64" stream:%p meta is updated, start new query, command:%d", pSql->self, pSql->pStream, pCmd->command);

View File

@ -920,7 +920,8 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
} else if (pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE) {
pRes->code = tscProcessShowCreateDatabase(pSql);
} else if (pCmd->command == TSDB_SQL_RESET_CACHE) {
taosHashClear(tscTableMetaInfo);
taosHashClear(tscTableMetaMap);
taosCacheEmpty(tscVgroupListBuf);
pRes->code = TSDB_CODE_SUCCESS;
} else if (pCmd->command == TSDB_SQL_SERV_VERSION) {
pRes->code = tscProcessServerVer(pSql);

File diff suppressed because it is too large Load Diff

View File

@ -81,8 +81,8 @@ static void getColumnName(tSqlExprItem* pItem, char* resultFieldName, char* rawN
static int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSqlExprItem* pItem,
bool finalResult, SUdfInfo* pUdfInfo);
static int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnList* pIdList, int16_t bytes,
int8_t type, char* fieldName, SExprInfo* pSqlExpr);
static int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnList* pColList, int16_t bytes,
int8_t type, char* fieldName, SExprInfo* pSqlExpr);
static uint8_t convertRelationalOperator(SStrToken *pToken);
@ -1952,12 +1952,13 @@ bool isValidDistinctSql(SQueryInfo* pQueryInfo) {
if (pQueryInfo == NULL) {
return false;
}
if ((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_QUERY) != TSDB_QUERY_TYPE_STABLE_QUERY) {
if ((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_QUERY) != TSDB_QUERY_TYPE_STABLE_QUERY
&& (pQueryInfo->type & TSDB_QUERY_TYPE_TABLE_QUERY) != TSDB_QUERY_TYPE_TABLE_QUERY) {
return false;
}
if (tscQueryTags(pQueryInfo) && tscNumOfExprs(pQueryInfo) == 1){
if (tscNumOfExprs(pQueryInfo) == 1){
return true;
}
}
return false;
}
@ -2051,7 +2052,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
const char* msg1 = "too many items in selection clause";
const char* msg2 = "functions or others can not be mixed up";
const char* msg3 = "not support query expression";
const char* msg4 = "only support distinct one tag";
const char* msg4 = "only support distinct one column or tag";
const char* msg5 = "invalid function name";
// too many result columns not support order by in query
@ -2111,13 +2112,13 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
}
if (hasDistinct == true) {
if (!isValidDistinctSql(pQueryInfo)) {
if (!isValidDistinctSql(pQueryInfo) ) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
pQueryInfo->distinctTag = true;
pQueryInfo->distinct = true;
}
// there is only one user-defined column in the final result field, add the timestamp column.
size_t numOfSrcCols = taosArrayGetSize(pQueryInfo->colList);
if ((numOfSrcCols <= 0 || !hasNoneUserDefineExpr(pQueryInfo)) && !tscQueryTags(pQueryInfo) && !tscQueryBlockInfo(pQueryInfo)) {
@ -4032,8 +4033,10 @@ static int32_t getColQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlEx
static int32_t checkColumnQueryCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, int32_t relOptr) {
if (pExpr == NULL) {
pQueryInfo->onlyHasTagCond &= true;
return TSDB_CODE_SUCCESS;
}
pQueryInfo->onlyHasTagCond &= false;
if (!tSqlExprIsParentOfLeaf(pExpr)) { // internal node
int32_t ret = checkColumnQueryCondInfo(pCmd, pQueryInfo, pExpr->pLeft, pExpr->tokenId);
@ -4160,6 +4163,7 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS
static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr) {
if (pExpr == NULL) {
pQueryInfo->onlyHasTagCond &= true;
return TSDB_CODE_SUCCESS;
}
@ -5006,8 +5010,11 @@ static int32_t convertTimeRangeFromExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t
STimeWindow win = {.skey = INT64_MIN, .ekey = INT64_MAX};
if (pExpr == NULL) {
pQueryInfo->onlyHasTagCond &= true;
return TSDB_CODE_SUCCESS;
}
pQueryInfo->onlyHasTagCond &= false;
if (!tSqlExprIsParentOfLeaf(pExpr)) {
code = convertTimeRangeFromExpr(pCmd, pQueryInfo, pExpr->pLeft);
@ -5049,11 +5056,13 @@ static int32_t validateJoinExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr
if (!QUERY_IS_JOIN_QUERY(pQueryInfo->type)) {
if (pQueryInfo->numOfTables == 1) {
pQueryInfo->onlyHasTagCond &= true;
return TSDB_CODE_SUCCESS;
} else {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
pQueryInfo->onlyHasTagCond &= false;
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // for stable join, tag columns
@ -5409,7 +5418,7 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq
if (pExpr == NULL) {
return TSDB_CODE_SUCCESS;
}
const char* msg1 = "invalid expression";
// const char* msg2 = "invalid filter expression";
@ -5462,6 +5471,7 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq
if ((ret = getQueryTimeRange(&pSql->cmd, pQueryInfo, &condExpr.pTimewindow)) != TSDB_CODE_SUCCESS) {
goto PARSE_WHERE_EXIT;
}
// 3. get the tag query condition
if ((ret = getTagQueryCondExpr(&pSql->cmd, pQueryInfo, &condExpr)) != TSDB_CODE_SUCCESS) {
@ -5770,7 +5780,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (pQueryInfo->distinctTag == true) {
if (pQueryInfo->distinct == true) {
pQueryInfo->order.order = TSDB_ORDER_ASC;
pQueryInfo->order.orderColId = 0;
return TSDB_CODE_SUCCESS;
@ -7515,7 +7525,7 @@ void tscPrintSelNodeList(SSqlObj* pSql, int32_t subClauseIndex) {
}
tmpLen =
sprintf(tmpBuf, "%s(uid:%" PRId64 ", %d)", name, pExpr->base.uid, pExpr->base.colInfo.colId);
sprintf(tmpBuf, "%s(uid:%" PRIu64 ", %d)", name, pExpr->base.uid, pExpr->base.colInfo.colId);
if (tmpLen + offset >= totalBufSize - 1) break;
@ -8391,6 +8401,7 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
}
pTableMeta = calloc(1, maxSize);
plist = taosArrayInit(4, POINTER_BYTES);
@ -8406,9 +8417,13 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) {
size_t len = strlen(name);
memset(pTableMeta, 0, maxSize);
taosHashGetClone(tscTableMetaInfo, name, len, NULL, pTableMeta, -1);
taosHashGetClone(tscTableMetaMap, name, len, NULL, pTableMeta);
if (pTableMeta->id.uid > 0) {
tscDebug("0x%"PRIx64" retrieve table meta %s from local buf", pSql->self, name);
// avoid mem leak, may should update pTableMeta
void* pVgroupIdList = NULL;
if (pTableMeta->tableType == TSDB_CHILD_TABLE) {
code = tscCreateTableMetaFromSTableMeta(pTableMeta, name, pSql->pBuf);
@ -8420,23 +8435,34 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
} else if (pTableMeta->tableType == TSDB_SUPER_TABLE) {
// the vgroup list of super table is not kept in local buffer, so here need retrieve it from the mnode each time
char* t = strdup(name);
taosArrayPush(pVgroupList, &t);
tscDebug("0x%"PRIx64" try to acquire cached super table %s vgroup id list", pSql->self, name);
void* pv = taosCacheAcquireByKey(tscVgroupListBuf, name, len);
if (pv == NULL) {
char* t = strdup(name);
taosArrayPush(pVgroupList, &t);
tscDebug("0x%"PRIx64" failed to retrieve stable %s vgroup id list in cache, try fetch from mnode", pSql->self, name);
} else {
tFilePage* pdata = (tFilePage*) pv;
pVgroupIdList = taosArrayInit((size_t) pdata->num, sizeof(int32_t));
if (pVgroupIdList == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
taosArrayAddBatch(pVgroupIdList, pdata->data, (int32_t) pdata->num);
taosCacheRelease(tscVgroupListBuf, &pv, false);
}
}
//STableMeta* pMeta = tscTableMetaDup(pTableMeta);
//STableMetaVgroupInfo p = { .pTableMeta = pMeta };
//const char* px = tNameGetTableName(pname);
//taosHashPut(pCmd->pTableMetaMap, px, strlen(px), &p, sizeof(STableMetaVgroupInfo));
// avoid mem leak, may should update pTableMeta
const char* px = tNameGetTableName(pname);
if (taosHashGet(pCmd->pTableMetaMap, px, strlen(px)) == NULL) {
if (taosHashGet(pCmd->pTableMetaMap, name, len) == NULL) {
STableMeta* pMeta = tscTableMetaDup(pTableMeta);
STableMetaVgroupInfo p = { .pTableMeta = pMeta, .pVgroupInfo = NULL};
taosHashPut(pCmd->pTableMetaMap, px, strlen(px), &p, sizeof(STableMetaVgroupInfo));
STableMetaVgroupInfo tvi = { .pTableMeta = pMeta, .vgroupIdList = pVgroupIdList};
taosHashPut(pCmd->pTableMetaMap, name, len, &tvi, sizeof(STableMetaVgroupInfo));
}
} else { // add to the retrieve table meta array list.
} else {
// Add to the retrieve table meta array list.
// If the tableMeta is missing, the cached vgroup list for the corresponding super table will be ignored.
tscDebug("0x%"PRIx64" failed to retrieve table meta %s from local buf", pSql->self, name);
char* t = strdup(name);
taosArrayPush(plist, &t);
}
@ -8550,22 +8576,44 @@ static int32_t doLoadAllTableMeta(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNod
strncpy(pTableMetaInfo->aliasName, tNameGetTableName(&pTableMetaInfo->name), tListLen(pTableMetaInfo->aliasName));
}
const char* name = tNameGetTableName(&pTableMetaInfo->name);
STableMetaVgroupInfo* p = taosHashGet(pCmd->pTableMetaMap, name, strlen(name));
char fname[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, fname);
STableMetaVgroupInfo* p = taosHashGet(pCmd->pTableMetaMap, fname, strnlen(fname, TSDB_TABLE_FNAME_LEN));
pTableMetaInfo->pTableMeta = tscTableMetaDup(p->pTableMeta);
assert(pTableMetaInfo->pTableMeta != NULL);
if (p->pVgroupInfo != NULL) {
pTableMetaInfo->vgroupList = tscVgroupsInfoDup(p->pVgroupInfo);
}
if (p->vgroupIdList != NULL) {
size_t s = taosArrayGetSize(p->vgroupIdList);
if (code != TSDB_CODE_SUCCESS) {
return code;
size_t vgroupsz = sizeof(SVgroupInfo) * s + sizeof(SVgroupsInfo);
pTableMetaInfo->vgroupList = calloc(1, vgroupsz);
if (pTableMetaInfo->vgroupList == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
pTableMetaInfo->vgroupList->numOfVgroups = (int32_t) s;
for(int32_t j = 0; j < s; ++j) {
int32_t* id = taosArrayGet(p->vgroupIdList, j);
// check if current buffer contains the vgroup info. If not, add it
SNewVgroupInfo existVgroupInfo = {.inUse = -1,};
taosHashGetClone(tscVgroupMap, id, sizeof(*id), NULL, &existVgroupInfo);
assert(existVgroupInfo.inUse >= 0);
SVgroupInfo *pVgroup = &pTableMetaInfo->vgroupList->vgroups[j];
pVgroup->numOfEps = existVgroupInfo.numOfEps;
pVgroup->vgId = existVgroupInfo.vgId;
for (int32_t k = 0; k < existVgroupInfo.numOfEps; ++k) {
pVgroup->epAddr[k].port = existVgroupInfo.ep[k].port;
pVgroup->epAddr[k].fqdn = strndup(existVgroupInfo.ep[k].fqdn, TSDB_FQDN_LEN);
}
}
}
}
return TSDB_CODE_SUCCESS;
return code;
}
static STableMeta* extractTempTableMetaFromSubquery(SQueryInfo* pUpstream) {
@ -8843,7 +8891,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
if (validateGroupbyNode(pQueryInfo, pSqlNode->pGroupby, pCmd) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
pQueryInfo->onlyHasTagCond = true;
// set where info
if (pSqlNode->pWhere != NULL) {
if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) {
@ -8866,6 +8914,10 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
return TSDB_CODE_TSC_INVALID_OPERATION;
}
if (isSTable && tscQueryTags(pQueryInfo) && pQueryInfo->distinct && !pQueryInfo->onlyHasTagCond) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
// parse the window_state
if (validateStateWindowNode(pCmd, pQueryInfo, pSqlNode, isSTable) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;

View File

@ -157,7 +157,7 @@ static void tscUpdateVgroupInfo(SSqlObj *pSql, SRpcEpSet *pEpSet) {
assert(vgId > 0);
SNewVgroupInfo vgroupInfo = {.vgId = -1};
taosHashGetClone(tscVgroupMap, &vgId, sizeof(vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo));
taosHashGetClone(tscVgroupMap, &vgId, sizeof(vgId), NULL, &vgroupInfo);
assert(vgroupInfo.numOfEps > 0 && vgroupInfo.vgId > 0);
tscDebug("before: Endpoint in use:%d, numOfEps:%d", vgroupInfo.inUse, vgroupInfo.numOfEps);
@ -344,6 +344,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
rpcFreeCont(rpcMsg->pCont);
return;
}
assert(pSql->self == handle);
STscObj *pObj = pSql->pTscObj;
@ -389,33 +390,40 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
pSql->cmd.insertParam.schemaAttached = 1;
}
// single table query error need to be handled here.
if ((cmd == TSDB_SQL_SELECT || cmd == TSDB_SQL_UPDATE_TAGS_VAL) &&
(rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID ||
rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID ||
rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL ||
(((rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || // change the retry procedure
rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID)) ||
rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || // change the retry procedure
rpcMsg->code == TSDB_CODE_APP_NOT_READY)) {
pSql->retry++;
tscWarn("0x%"PRIx64" it shall renew table meta, code:%s, retry:%d", pSql->self, tstrerror(rpcMsg->code), pSql->retry);
pSql->res.code = rpcMsg->code; // keep the previous error code
if (pSql->retry > pSql->maxRetry) {
tscError("0x%"PRIx64" max retry %d reached, give up", pSql->self, pSql->maxRetry);
} else {
// wait for a little bit moment and then retry
// todo do not sleep in rpc callback thread, add this process into queueu to process
if (rpcMsg->code == TSDB_CODE_APP_NOT_READY || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) {
int32_t duration = getWaitingTimeInterval(pSql->retry);
taosMsleep(duration);
}
if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY | TSDB_QUERY_TYPE_SUBQUERY |
TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) &&
!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_PROJECTION_QUERY)) {
// do nothing in case of super table subquery
} else {
pSql->retry += 1;
tscWarn("0x%" PRIx64 " it shall renew table meta, code:%s, retry:%d", pSql->self, tstrerror(rpcMsg->code), pSql->retry);
pSql->retryReason = rpcMsg->code;
rpcMsg->code = tscRenewTableMeta(pSql, 0);
// if there is an error occurring, proceed to the following error handling procedure.
if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
taosReleaseRef(tscObjRef, handle);
rpcFreeCont(rpcMsg->pCont);
return;
pSql->res.code = rpcMsg->code; // keep the previous error code
if (pSql->retry > pSql->maxRetry) {
tscError("0x%" PRIx64 " max retry %d reached, give up", pSql->self, pSql->maxRetry);
} else {
// wait for a little bit moment and then retry
// todo do not sleep in rpc callback thread, add this process into queue to process
if (rpcMsg->code == TSDB_CODE_APP_NOT_READY || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) {
int32_t duration = getWaitingTimeInterval(pSql->retry);
taosMsleep(duration);
}
pSql->retryReason = rpcMsg->code;
rpcMsg->code = tscRenewTableMeta(pSql, 0);
// if there is an error occurring, proceed to the following error handling procedure.
if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
taosReleaseRef(tscObjRef, handle);
rpcFreeCont(rpcMsg->pCont);
return;
}
}
}
}
@ -614,7 +622,7 @@ int tscBuildSubmitMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pSql->cmd.msgType = TSDB_MSG_TYPE_SUBMIT;
SNewVgroupInfo vgroupInfo = {0};
taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo));
taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo);
tscDumpEpSetFromVgroupInfo(&pSql->epSet, &vgroupInfo);
tscDebug("0x%"PRIx64" submit msg built, numberOfEP:%d", pSql->self, pSql->epSet.numOfEps);
@ -632,7 +640,7 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql) {
SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
int32_t srcColListSize = (int32_t)(taosArrayGetSize(pQueryInfo->colList) * sizeof(SColumnInfo));
int32_t srcColFilterSize = tscGetColFilterSerializeLen(pQueryInfo);
size_t numOfExprs = tscNumOfExprs(pQueryInfo);
int32_t exprSize = (int32_t)(sizeof(SSqlExpr) * numOfExprs * 2);
@ -653,7 +661,7 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql) {
tableSerialize = totalTables * sizeof(STableIdInfo);
}
return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + exprSize + tsBufSize +
return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + srcColFilterSize + exprSize + tsBufSize +
tableSerialize + sqlLen + 4096 + pQueryInfo->bufLen;
}
@ -687,7 +695,7 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab
vgId = pTableMeta->vgId;
SNewVgroupInfo vgroupInfo = {0};
taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo));
taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo);
tscDumpEpSetFromVgroupInfo(&pSql->epSet, &vgroupInfo);
}
@ -1593,7 +1601,7 @@ int tscBuildUpdateTagMsg(SSqlObj* pSql, SSqlInfo *pInfo) {
STableMeta *pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta;
SNewVgroupInfo vgroupInfo = {.vgId = -1};
taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo));
taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo);
assert(vgroupInfo.vgId > 0);
tscDumpEpSetFromVgroupInfo(&pSql->epSet, &vgroupInfo);
@ -1820,34 +1828,6 @@ int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return TSDB_CODE_SUCCESS;
}
int tscBuildTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
#if 0
SSqlCmd *pCmd = &pSql->cmd;
SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableInfoMsg *pInfoMsg = (STableInfoMsg *)pCmd->payload;
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pInfoMsg->tableFname);
if (code != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
pInfoMsg->createFlag = htons(pSql->cmd.autoCreated ? 1 : 0);
char *pMsg = (char *)pInfoMsg + sizeof(STableInfoMsg);
if (pCmd->autoCreated && pCmd->tagData.dataLen != 0) {
pMsg = serializeTagData(&pCmd->tagData, pMsg);
}
pCmd->payloadLen = (int32_t)(pMsg - (char*)pInfoMsg);
pCmd->msgType = TSDB_MSG_TYPE_CM_TABLE_META;
#endif
return TSDB_CODE_SUCCESS;
}
/**
* multi table meta req pkg format:
* |SMultiTableInfoMsg | tableId0 | tableId1 | tableId2 | ......
@ -2007,20 +1987,17 @@ static int32_t tableMetaMsgConvert(STableMetaMsg* pMetaMsg) {
}
// update the vgroupInfo if needed
static void doUpdateVgroupInfo(STableMeta *pTableMeta, SVgroupMsg *pVgroupMsg) {
if (pTableMeta->vgId > 0) {
int32_t vgId = pTableMeta->vgId;
assert(pTableMeta->tableType != TSDB_SUPER_TABLE);
static void doUpdateVgroupInfo(int32_t vgId, SVgroupMsg *pVgroupMsg) {
assert(vgId > 0);
SNewVgroupInfo vgroupInfo = {.inUse = -1};
taosHashGetClone(tscVgroupMap, &vgId, sizeof(vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo));
SNewVgroupInfo vgroupInfo = {.inUse = -1};
taosHashGetClone(tscVgroupMap, &vgId, sizeof(vgId), NULL, &vgroupInfo);
// vgroup info exists, compare with it
if (((vgroupInfo.inUse >= 0) && !vgroupInfoIdentical(&vgroupInfo, pVgroupMsg)) || (vgroupInfo.inUse < 0)) {
vgroupInfo = createNewVgroupInfo(pVgroupMsg);
taosHashPut(tscVgroupMap, &vgId, sizeof(vgId), &vgroupInfo, sizeof(vgroupInfo));
tscDebug("add new VgroupInfo, vgId:%d, total cached:%d", vgId, (int32_t) taosHashGetSize(tscVgroupMap));
}
// vgroup info exists, compare with it
if (((vgroupInfo.inUse >= 0) && !vgroupInfoIdentical(&vgroupInfo, pVgroupMsg)) || (vgroupInfo.inUse < 0)) {
vgroupInfo = createNewVgroupInfo(pVgroupMsg);
taosHashPut(tscVgroupMap, &vgId, sizeof(vgId), &vgroupInfo, sizeof(vgroupInfo));
tscDebug("add/update new VgroupInfo, vgId:%d, total cached:%d", vgId, (int32_t) taosHashGetSize(tscVgroupMap));
}
}
@ -2033,18 +2010,18 @@ static void doAddTableMetaToLocalBuf(STableMeta* pTableMeta, STableMetaMsg* pMet
if (updateSTable) {
STableMeta* pSupTableMeta = createSuperTableMeta(pMetaMsg);
uint32_t size = tscGetTableMetaSize(pSupTableMeta);
int32_t code = taosHashPut(tscTableMetaInfo, pTableMeta->sTableName, len, pSupTableMeta, size);
int32_t code = taosHashPut(tscTableMetaMap, pTableMeta->sTableName, len, pSupTableMeta, size);
assert(code == TSDB_CODE_SUCCESS);
tfree(pSupTableMeta);
}
CChildTableMeta* cMeta = tscCreateChildMeta(pTableMeta);
taosHashPut(tscTableMetaInfo, pMetaMsg->tableFname, strlen(pMetaMsg->tableFname), cMeta, sizeof(CChildTableMeta));
taosHashPut(tscTableMetaMap, pMetaMsg->tableFname, strlen(pMetaMsg->tableFname), cMeta, sizeof(CChildTableMeta));
tfree(cMeta);
} else {
uint32_t s = tscGetTableMetaSize(pTableMeta);
taosHashPut(tscTableMetaInfo, pMetaMsg->tableFname, strlen(pMetaMsg->tableFname), pTableMeta, s);
taosHashPut(tscTableMetaMap, pMetaMsg->tableFname, strlen(pMetaMsg->tableFname), pTableMeta, s);
}
}
@ -2069,7 +2046,9 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
assert(strncmp(pMetaMsg->tableFname, name, tListLen(pMetaMsg->tableFname)) == 0);
doAddTableMetaToLocalBuf(pTableMeta, pMetaMsg, true);
doUpdateVgroupInfo(pTableMeta, &pMetaMsg->vgroup);
if (pTableMeta->tableType != TSDB_SUPER_TABLE) {
doUpdateVgroupInfo(pTableMeta->vgId, &pMetaMsg->vgroup);
}
tscDebug("0x%"PRIx64" recv table meta, uid:%" PRIu64 ", tid:%d, name:%s, numOfCols:%d, numOfTags:%d", pSql->self,
pTableMeta->id.uid, pTableMeta->id.tid, tNameGetTableName(&pTableMetaInfo->name), pTableMeta->tableInfo.numOfColumns,
@ -2079,6 +2058,37 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
return TSDB_CODE_SUCCESS;
}
static SArray* createVgroupIdListFromMsg(char* pMsg, SHashObj* pSet, char* name, int32_t* size, uint64_t id) {
SVgroupsMsg *pVgroupMsg = (SVgroupsMsg *)pMsg;
pVgroupMsg->numOfVgroups = htonl(pVgroupMsg->numOfVgroups);
*size = (int32_t)(sizeof(SVgroupMsg) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsMsg));
SArray* vgroupIdList = taosArrayInit(pVgroupMsg->numOfVgroups, sizeof(int32_t));
if (pVgroupMsg->numOfVgroups <= 0) {
tscDebug("0x%" PRIx64 " empty vgroup id list, no corresponding tables for stable:%s", id, name);
} else {
// just init, no need to lock
for (int32_t j = 0; j < pVgroupMsg->numOfVgroups; ++j) {
SVgroupMsg *vmsg = &pVgroupMsg->vgroups[j];
vmsg->vgId = htonl(vmsg->vgId);
for (int32_t k = 0; k < vmsg->numOfEps; ++k) {
vmsg->epAddr[k].port = htons(vmsg->epAddr[k].port);
}
taosArrayPush(vgroupIdList, &vmsg->vgId);
if (taosHashGet(pSet, &vmsg->vgId, sizeof(vmsg->vgId)) == NULL) {
taosHashPut(pSet, &vmsg->vgId, sizeof(vmsg->vgId), "", 0);
doUpdateVgroupInfo(vmsg->vgId, vmsg);
}
}
}
return vgroupIdList;
}
static SVgroupsInfo* createVgroupInfoFromMsg(char* pMsg, int32_t* size, uint64_t id) {
SVgroupsMsg *pVgroupMsg = (SVgroupsMsg *)pMsg;
pVgroupMsg->numOfVgroups = htonl(pVgroupMsg->numOfVgroups);
@ -2103,24 +2113,14 @@ static SVgroupsInfo* createVgroupInfoFromMsg(char* pMsg, int32_t* size, uint64_t
vmsg->epAddr[k].port = htons(vmsg->epAddr[k].port);
}
SNewVgroupInfo newVi = createNewVgroupInfo(vmsg);
pVgroup->numOfEps = newVi.numOfEps;
pVgroup->vgId = newVi.vgId;
pVgroup->numOfEps = vmsg->numOfEps;
pVgroup->vgId = vmsg->vgId;
for (int32_t k = 0; k < vmsg->numOfEps; ++k) {
pVgroup->epAddr[k].port = newVi.ep[k].port;
pVgroup->epAddr[k].fqdn = strndup(newVi.ep[k].fqdn, TSDB_FQDN_LEN);
pVgroup->epAddr[k].port = vmsg->epAddr[k].port;
pVgroup->epAddr[k].fqdn = strndup(vmsg->epAddr[k].fqdn, TSDB_FQDN_LEN);
}
// check if current buffer contains the vgroup info.
// If not, add it
SNewVgroupInfo existVgroupInfo = {.inUse = -1};
taosHashGetClone(tscVgroupMap, &newVi.vgId, sizeof(newVi.vgId), NULL, &existVgroupInfo, sizeof(SNewVgroupInfo));
if (((existVgroupInfo.inUse >= 0) && !vgroupInfoIdentical(&existVgroupInfo, vmsg)) ||
(existVgroupInfo.inUse < 0)) { // vgroup info exists, compare with it
taosHashPut(tscVgroupMap, &newVi.vgId, sizeof(newVi.vgId), &newVi, sizeof(newVi));
tscDebug("0x%" PRIx64 " add new VgroupInfo, vgId:%d, total cached:%d", id, newVi.vgId, (int32_t)taosHashGetSize(tscVgroupMap));
}
doUpdateVgroupInfo(pVgroup->vgId, vmsg);
}
}
@ -2198,6 +2198,8 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) {
char* buf = NULL;
char* pMsg = pMultiMeta->meta;
// decompresss the message payload
if (pMultiMeta->compressed) {
buf = malloc(pMultiMeta->rawLen - sizeof(SMultiTableMeta));
int32_t len = tsDecompressString(pMultiMeta->meta, pMultiMeta->contLen - sizeof(SMultiTableMeta), 1,
@ -2230,15 +2232,13 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) {
return TSDB_CODE_TSC_INVALID_VALUE;
}
SName sn = {0};
tNameFromString(&sn, pMetaMsg->tableFname, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
if (pMultiMeta->metaClone == 1 || pTableMeta->tableType == TSDB_SUPER_TABLE) {
STableMetaVgroupInfo p = {.pTableMeta = pTableMeta,};
size_t keyLen = strnlen(pMetaMsg->tableFname, TSDB_TABLE_FNAME_LEN);
void* t = taosHashGet(pParentCmd->pTableMetaMap, pMetaMsg->tableFname, keyLen);
assert(t == NULL);
const char* tableName = tNameGetTableName(&sn);
size_t keyLen = strlen(tableName);
taosHashPut(pParentCmd->pTableMetaMap, tableName, keyLen, &p, sizeof(STableMetaVgroupInfo));
taosHashPut(pParentCmd->pTableMetaMap, pMetaMsg->tableFname, keyLen, &p, sizeof(STableMetaVgroupInfo));
} else {
freeMeta = true;
}
@ -2256,7 +2256,7 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) {
// for each vgroup, only update the information once.
int64_t vgId = pMetaMsg->vgroup.vgId;
if (pTableMeta->tableType != TSDB_SUPER_TABLE && taosHashGet(pSet, &vgId, sizeof(vgId)) == NULL) {
doUpdateVgroupInfo(pTableMeta, &pMetaMsg->vgroup);
doUpdateVgroupInfo((int32_t) vgId, &pMetaMsg->vgroup);
taosHashPut(pSet, &vgId, sizeof(vgId), "", 0);
}
@ -2267,18 +2267,33 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) {
}
for(int32_t i = 0; i < pMultiMeta->numOfVgroup; ++i) {
char* name = pMsg;
pMsg += TSDB_TABLE_NAME_LEN;
char fname[TSDB_TABLE_FNAME_LEN] = {0};
tstrncpy(fname, pMsg, TSDB_TABLE_FNAME_LEN);
size_t len = strnlen(fname, TSDB_TABLE_FNAME_LEN);
STableMetaVgroupInfo* p = taosHashGet(pParentCmd->pTableMetaMap, name, strnlen(name, TSDB_TABLE_NAME_LEN));
pMsg += TSDB_TABLE_FNAME_LEN;
STableMetaVgroupInfo* p = taosHashGet(pParentCmd->pTableMetaMap, fname, len);
assert(p != NULL);
int32_t size = 0;
if (p->pVgroupInfo!= NULL) {
tscVgroupInfoClear(p->pVgroupInfo);
//tfree(p->pTableMeta);
if (p->vgroupIdList!= NULL) {
taosArrayDestroy(p->vgroupIdList);
}
p->pVgroupInfo = createVgroupInfoFromMsg(pMsg, &size, pSql->self);
p->vgroupIdList = createVgroupIdListFromMsg(pMsg, pSet, fname, &size, pSql->self);
int32_t numOfVgId = (int32_t) taosArrayGetSize(p->vgroupIdList);
int32_t s = sizeof(tFilePage) + numOfVgId * sizeof(int32_t);
tFilePage* idList = calloc(1, s);
idList->num = numOfVgId;
memcpy(idList->data, TARRAY_GET_START(p->vgroupIdList), numOfVgId * sizeof(int32_t));
void* idListInst = taosCachePut(tscVgroupListBuf, fname, len, idList, s, 5000);
taosCacheRelease(tscVgroupListBuf, (void*) &idListInst, false);
tfree(idList);
pMsg += size;
}
@ -2343,14 +2358,18 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) {
SSqlCmd* pCmd = &parent->cmd;
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
char fName[TSDB_TABLE_FNAME_LEN] = {0};
for(int32_t i = 0; i < pStableVgroup->numOfTables; ++i) {
char* name = pMsg;
pMsg += TSDB_TABLE_NAME_LEN;
pMsg += TSDB_TABLE_FNAME_LEN;
STableMetaInfo *pInfo = NULL;
for(int32_t j = 0; j < pQueryInfo->numOfTables; ++j) {
STableMetaInfo *pInfo1 = tscGetTableMetaInfoFromCmd(pCmd, j);
if (strcmp(name, tNameGetTableName(&pInfo1->name)) != 0) {
memset(fName, 0, tListLen(fName));
tNameExtractFullName(&pInfo1->name, fName);
if (strcmp(name, fName) != 0) {
continue;
}
@ -2510,24 +2529,20 @@ int tscProcessUseDbRsp(SSqlObj *pSql) {
return ret;
}
//todo only invalid the buffered data that belongs to dropped databases
int tscProcessDropDbRsp(SSqlObj *pSql) {
//TODO LOCK DB WHEN MODIFY IT
//pSql->pTscObj->db[0] = 0;
taosHashClear(tscTableMetaInfo);
taosHashClear(tscTableMetaMap);
taosHashClear(tscVgroupMap);
taosCacheEmpty(tscVgroupListBuf);
return 0;
}
int tscProcessDropTableRsp(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0);
//The cached tableMeta is expired in this case, so clean it in hash table
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
tscDebug("0x%"PRIx64" remove table meta after drop table:%s, numOfRemain:%d", pSql->self, name, (int32_t) taosHashGetSize(tscTableMetaInfo));
tscRemoveTableMetaBuf(pTableMetaInfo, pSql->self);
tfree(pTableMetaInfo->pTableMeta);
return 0;
}
@ -2541,11 +2556,11 @@ int tscProcessAlterTableMsgRsp(SSqlObj *pSql) {
tscDebug("0x%"PRIx64" remove tableMeta in hashMap after alter-table: %s", pSql->self, name);
bool isSuperTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
tfree(pTableMetaInfo->pTableMeta);
if (isSuperTable) { // if it is a super table, iterate the hashTable and remove all the childTableMeta
taosHashClear(tscTableMetaInfo);
taosHashClear(tscTableMetaMap);
}
return 0;
@ -2812,7 +2827,7 @@ int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool
tNameExtractFullName(&pTableMetaInfo->name, name);
size_t len = strlen(name);
taosHashGetClone(tscTableMetaInfo, name, len, NULL, pTableMetaInfo->pTableMeta, -1);
taosHashGetClone(tscTableMetaMap, name, len, NULL, pTableMetaInfo->pTableMeta);
// TODO resize the tableMeta
assert(size < 80 * TSDB_MAX_COLUMNS);
@ -2903,6 +2918,10 @@ int32_t tscGetUdfFromNode(SSqlObj *pSql, SQueryInfo* pQueryInfo) {
return code;
}
static void freeElem(void* p) {
tfree(*(char**)p);
}
/**
* retrieve table meta from mnode, and then update the local table meta hashmap.
* @param pSql sql object
@ -2910,7 +2929,7 @@ int32_t tscGetUdfFromNode(SSqlObj *pSql, SQueryInfo* pQueryInfo) {
* @return status code
*/
int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlCmd* pCmd = &pSql->cmd;
SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, tableIndex);
@ -2924,15 +2943,26 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) {
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
if (pTableMeta) {
tscDebug("0x%"PRIx64" update table meta:%s, old meta numOfTags:%d, numOfCols:%d, uid:%" PRId64, pSql->self, name,
tscDebug("0x%"PRIx64" update table meta:%s, old meta numOfTags:%d, numOfCols:%d, uid:%" PRIu64, pSql->self, name,
tscGetNumOfTags(pTableMeta), tscGetNumOfColumns(pTableMeta), pTableMeta->id.uid);
}
// remove stored tableMeta info in hash table
size_t len = strlen(name);
taosHashRemove(tscTableMetaInfo, name, len);
tscRemoveTableMetaBuf(pTableMetaInfo, pSql->self);
return getTableMetaFromMnode(pSql, pTableMetaInfo, false);
pCmd->pTableMetaMap = tscCleanupTableMetaMap(pCmd->pTableMetaMap);
pCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
SArray* pNameList = taosArrayInit(1, POINTER_BYTES);
SArray* vgroupList = taosArrayInit(1, POINTER_BYTES);
char* n = strdup(name);
taosArrayPush(pNameList, &n);
code = getMultiTableMetaFromMnode(pSql, pNameList, vgroupList, NULL, tscTableMetaCallBack, true);
taosArrayDestroyEx(pNameList, freeElem);
taosArrayDestroyEx(vgroupList, freeElem);
return code;
}
static bool allVgroupInfoRetrieved(SQueryInfo* pQueryInfo) {
@ -2982,8 +3012,6 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, SQueryInfo* pQueryInfo) {
tscDebug("0x%"PRIx64" svgroupRid from %" PRId64 " to %" PRId64 , pSql->self, pSql->svgroupRid, pNew->self);
pSql->svgroupRid = pNew->self;
tscDebug("0x%"PRIx64" new sqlObj:%p to get vgroupInfo, numOfTables:%d", pSql->self, pNew, pNewQueryInfo->numOfTables);
pNew->fp = tscTableMetaCallBack;
@ -3026,7 +3054,6 @@ void tscInitMsgsFp() {
tscBuildMsg[TSDB_SQL_CONNECT] = tscBuildConnectMsg;
tscBuildMsg[TSDB_SQL_USE_DB] = tscBuildUseDbMsg;
// tscBuildMsg[TSDB_SQL_META] = tscBuildTableMetaMsg;
tscBuildMsg[TSDB_SQL_STABLEVGROUP] = tscBuildSTableVgroupMsg;
tscBuildMsg[TSDB_SQL_RETRIEVE_FUNC] = tscBuildRetrieveFuncMsg;

View File

@ -206,7 +206,7 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
tfree(pTableMetaInfo->pTableMeta);

View File

@ -2713,8 +2713,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
tstrerror(pParentSql->res.code));
// release allocated resource
tscDestroyGlobalMergerEnv(trsupport->pExtMemBuffer, trsupport->pOrderDescriptor,
pState->numOfSub);
tscDestroyGlobalMergerEnv(trsupport->pExtMemBuffer, trsupport->pOrderDescriptor, pState->numOfSub);
tscFreeRetrieveSup(pSql);
@ -2722,7 +2721,35 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
SQueryInfo *pQueryInfo = tscGetQueryInfo(&pParentSql->cmd);
if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) {
(*pParentSql->fp)(pParentSql->param, pParentSql, pParentSql->res.code);
int32_t code = pParentSql->res.code;
if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && pParentSql->retry < pParentSql->maxRetry) {
// remove the cached tableMeta and vgroup id list, and then parse the sql again
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pParentSql->cmd, 0);
tscRemoveTableMetaBuf(pTableMetaInfo, pParentSql->self);
tscResetSqlCmd(&pParentSql->cmd, true);
pParentSql->res.code = TSDB_CODE_SUCCESS;
pParentSql->retry++;
tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self,
tstrerror(code), pParentSql->retry);
code = tsParseSql(pParentSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
return;
}
if (code != TSDB_CODE_SUCCESS) {
pParentSql->res.code = code;
tscAsyncResultOnError(pParentSql);
return;
}
executeQuery(pParentSql, pQueryInfo);
} else {
(*pParentSql->fp)(pParentSql->param, pParentSql, pParentSql->res.code);
}
} else { // regular super table query
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
tscAsyncResultOnError(pParentSql);
@ -2901,7 +2928,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" retrieve numOfRows:%d totalNumOfRows:%" PRIu64 " from ep:%s, orderOfSub:%d",
pParentSql->self, pSql->self, pRes->numOfRows, pState->numOfRetrievedRows, pSql->epSet.fqdn[pSql->epSet.inUse], idx);
if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0) && !(tscGetQueryInfo(&pParentSql->cmd)->distinctTag)) {
if (num > tsMaxNumOfOrderedResults && /*tscIsProjectionQueryOnSTable(pQueryInfo, 0) &&*/ !(tscGetQueryInfo(&pParentSql->cmd)->distinct)) {
tscError("0x%"PRIx64" sub:0x%"PRIx64" num of OrderedRes is too many, max allowed:%" PRId32 " , current:%" PRId64,
pParentSql->self, pSql->self, tsMaxNumOfOrderedResults, num);
tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_SORTED_RES_TOO_MANY);
@ -3005,7 +3032,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
assert(code == taos_errno(pSql));
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) {
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && (code != TSDB_CODE_TDB_INVALID_TABLE_ID)) {
tscError("0x%"PRIx64" sub:0x%"PRIx64" failed code:%s, retry:%d", pParentSql->self, pSql->self, tstrerror(code), trsupport->numOfRetry);
int32_t sent = 0;
@ -3014,7 +3041,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
return;
}
} else {
tscError("0x%"PRIx64" sub:0x%"PRIx64" reach the max retry times, set global code:%s", pParentSql->self, pSql->self, tstrerror(code));
tscError("0x%"PRIx64" sub:0x%"PRIx64" reach the max retry times or no need to retry, set global code:%s", pParentSql->self, pSql->self, tstrerror(code));
atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, code); // set global code and abort
}
@ -3134,12 +3161,10 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
for(int32_t i = 0; i < pParentObj->cmd.insertParam.numOfTables; ++i) {
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(pParentObj->cmd.insertParam.pTableNameList[i], name);
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
}
pParentObj->res.code = TSDB_CODE_SUCCESS;
// pParentObj->cmd.parseFinished = false;
tscResetSqlCmd(&pParentObj->cmd, false);
// in case of insert, redo parsing the sql string and build new submit data block for two reasons:

View File

@ -19,15 +19,12 @@
#include "trpc.h"
#include "tnote.h"
#include "ttimer.h"
#include "tutil.h"
#include "tsched.h"
#include "tscLog.h"
#include "tscUtil.h"
#include "tsclient.h"
#include "tglobal.h"
#include "tconfig.h"
#include "ttimezone.h"
#include "tlocale.h"
#include "qScript.h"
// global, not configurable
@ -36,8 +33,10 @@
int32_t sentinel = TSC_VAR_NOT_RELEASE;
SHashObj *tscVgroupMap; // hash map to keep the global vgroup info
SHashObj *tscTableMetaInfo; // table meta info
SHashObj *tscVgroupMap; // hash map to keep the vgroup info from mnode
SHashObj *tscTableMetaMap; // table meta info buffer
SCacheObj *tscVgroupListBuf; // super table vgroup list information, only survives 5 seconds for each super table vgroup list
int32_t tscObjRef = -1;
void *tscTmr;
void *tscQhandle;
@ -45,17 +44,21 @@ int32_t tscRefId = -1;
int32_t tscNumOfObj = 0; // number of sqlObj in current process.
static void *tscCheckDiskUsageTmr;
void *tscRpcCache; // cache to keep rpc obj
int32_t tscNumOfThreads = 1; // num of rpc threads
char tscLogFileName[12] = "taoslog";
int tscLogFileNum = 10;
static pthread_mutex_t rpcObjMutex; // mutex to protect open the rpc obj concurrently
static pthread_once_t tscinit = PTHREAD_ONCE_INIT;
int32_t tscNumOfThreads = 1; // num of rpc threads
char tscLogFileName[12] = "taoslog";
int tscLogFileNum = 10;
static pthread_mutex_t rpcObjMutex; // mutex to protect open the rpc obj concurrently
static pthread_once_t tscinit = PTHREAD_ONCE_INIT;
// pthread_once can not return result code, so result code is set to a global variable.
static volatile int tscInitRes = 0;
void tscCheckDiskUsage(void *UNUSED_PARAM(para), void *UNUSED_PARAM(param)) {
taosGetDisk();
taosTmrReset(tscCheckDiskUsage, 20 * 1000, NULL, tscTmr, &tscCheckDiskUsageTmr);
}
void tscFreeRpcObj(void *param) {
assert(param);
SRpcObj *pRpcObj = (SRpcObj *)(param);
@ -67,10 +70,9 @@ void tscReleaseRpc(void *param) {
if (param == NULL) {
return;
}
pthread_mutex_lock(&rpcObjMutex);
taosCacheRelease(tscRpcCache, (void *)&param, false);
pthread_mutex_unlock(&rpcObjMutex);
}
taosCacheRelease(tscRpcCache, (void *)&param, false);
}
int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncrypt, void **ppRpcObj) {
pthread_mutex_lock(&rpcObjMutex);
@ -80,7 +82,7 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry
*ppRpcObj = pRpcObj;
pthread_mutex_unlock(&rpcObjMutex);
return 0;
}
}
SRpcInit rpcInit;
memset(&rpcInit, 0, sizeof(rpcInit));
@ -104,7 +106,8 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry
pthread_mutex_unlock(&rpcObjMutex);
tscError("failed to init connection to TDengine");
return -1;
}
}
pRpcObj = taosCachePut(tscRpcCache, rpcObj.key, strlen(rpcObj.key), &rpcObj, sizeof(rpcObj), 1000*5);
if (pRpcObj == NULL) {
rpcClose(rpcObj.pDnodeConn);
@ -118,7 +121,7 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry
}
void taos_init_imp(void) {
char temp[128] = {0};
char temp[128] = {0};
errno = TSDB_CODE_SUCCESS;
srand(taosGetTimestampSec());
@ -151,36 +154,41 @@ void taos_init_imp(void) {
rpcInit();
scriptEnvPoolInit();
tscDebug("starting to initialize TAOS client ...");
tscDebug("Local End Point is:%s", tsLocalEp);
}
taosSetCoreDump();
tscInitMsgsFp();
int queueSize = tsMaxConnections*2;
double factor = (tscEmbedded == 0)? 2.0:4.0;
tscNumOfThreads = (int)(tsNumOfCores * tsNumOfThreadsPerCore / factor);
if (tscNumOfThreads < 2) {
tscNumOfThreads = 2;
}
int32_t queueSize = tsMaxConnections*2;
tscQhandle = taosInitScheduler(queueSize, tscNumOfThreads, "tsc");
if (NULL == tscQhandle) {
tscError("failed to init scheduler");
tscError("failed to init task queue");
tscInitRes = -1;
return;
}
tscDebug("client task queue is initialized, numOfWorkers: %d", tscNumOfThreads);
tscTmr = taosTmrInit(tsMaxConnections * 2, 200, 60000, "TSC");
if(0 == tscEmbedded){
taosTmrReset(tscCheckDiskUsage, 20 * 1000, NULL, tscTmr, &tscCheckDiskUsageTmr);
}
if (tscTableMetaInfo == NULL) {
tscObjRef = taosOpenRef(40960, tscFreeRegisteredSqlObj);
tscVgroupMap = taosHashInit(256, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
tscTableMetaInfo = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
tscDebug("TableMeta:%p", tscTableMetaInfo);
if (tscTableMetaMap == NULL) {
tscObjRef = taosOpenRef(40960, tscFreeRegisteredSqlObj);
tscVgroupMap = taosHashInit(256, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
tscTableMetaMap = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
tscVgroupListBuf = taosCacheInit(TSDB_DATA_TYPE_BINARY, 5, false, NULL, "stable-vgroup-list");
tscDebug("TableMeta:%p, vgroup:%p is initialized", tscTableMetaMap, tscVgroupMap);
}
int refreshTime = 5;
@ -189,14 +197,17 @@ void taos_init_imp(void) {
tscRefId = taosOpenRef(200, tscCloseTscObj);
// in other language APIs, taos_cleanup is not available yet.
// So, to make sure taos_cleanup will be invoked to clean up the allocated
// resource to suppress the valgrind warning.
// In the APIs of other program language, taos_cleanup is not available yet.
// So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning.
atexit(taos_cleanup);
tscDebug("client is initialized successfully");
}
int taos_init() { pthread_once(&tscinit, taos_init_imp); return tscInitRes;}
int taos_init() {
pthread_once(&tscinit, taos_init_imp);
return tscInitRes;
}
// this function may be called by user or system, or by both simultaneously.
void taos_cleanup(void) {
@ -205,11 +216,13 @@ void taos_cleanup(void) {
if (atomic_val_compare_exchange_32(&sentinel, TSC_VAR_NOT_RELEASE, TSC_VAR_RELEASED) != TSC_VAR_NOT_RELEASE) {
return;
}
if (tscEmbedded == 0) {
scriptEnvPoolCleanup();
}
taosHashCleanup(tscTableMetaInfo);
tscTableMetaInfo = NULL;
taosHashCleanup(tscTableMetaMap);
tscTableMetaMap = NULL;
taosHashCleanup(tscVgroupMap);
tscVgroupMap = NULL;
@ -236,6 +249,9 @@ void taos_cleanup(void) {
pthread_mutex_destroy(&rpcObjMutex);
}
taosCacheCleanup(tscVgroupListBuf);
tscVgroupListBuf = NULL;
if (tscEmbedded == 0) {
rpcCleanup();
taosCloseLog();

View File

@ -1420,7 +1420,7 @@ void tscResetSqlCmd(SSqlCmd* pCmd, bool clearCachedMeta) {
if (pCmd->pTableMetaMap != NULL) {
STableMetaVgroupInfo* p = taosHashIterate(pCmd->pTableMetaMap, NULL);
while (p) {
tscVgroupInfoClear(p->pVgroupInfo);
taosArrayDestroy(p->vgroupIdList);
tfree(p->pTableMeta);
p = taosHashIterate(pCmd->pTableMetaMap, p);
}
@ -1430,6 +1430,22 @@ void tscResetSqlCmd(SSqlCmd* pCmd, bool clearCachedMeta) {
}
}
void* tscCleanupTableMetaMap(SHashObj* pTableMetaMap) {
if (pTableMetaMap == NULL) {
return NULL;
}
STableMetaVgroupInfo* p = taosHashIterate(pTableMetaMap, NULL);
while (p) {
taosArrayDestroy(p->vgroupIdList);
tfree(p->pTableMeta);
p = taosHashIterate(pTableMetaMap, p);
}
taosHashCleanup(pTableMetaMap);
return NULL;
}
void tscFreeSqlResult(SSqlObj* pSql) {
SSqlRes* pRes = &pSql->res;
@ -1554,7 +1570,7 @@ void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta) {
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pDataBlock->tableName, name);
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
}
if (!pDataBlock->cloned) {
@ -3466,7 +3482,7 @@ void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta) {
if (removeMeta) {
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
}
tscFreeVgroupTableInfo(pTableMetaInfo->pVgroupTables);
@ -3582,11 +3598,9 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, in
SSqlCmd* pCmd = &pNew->cmd;
pCmd->command = cmd;
tsem_init(&pNew->rspSem, 0 ,0);
if (tscAddQueryInfo(pCmd) != TSDB_CODE_SUCCESS) {
#ifdef __APPLE__
// to satisfy later tsem_destroy in taos_free_result
tsem_init(&pNew->rspSem, 0, 0);
#endif // __APPLE__
tscFreeSqlObj(pNew);
return NULL;
}
@ -4466,7 +4480,7 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta* pChild, const char* name, v
assert(pChild != NULL && buf != NULL);
STableMeta* p = buf;
taosHashGetClone(tscTableMetaInfo, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, p, -1);
taosHashGetClone(tscTableMetaMap, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, p);
// tableMeta exists, build child table meta according to the super table meta
// the uid need to be checked in addition to the general name of the super table.
@ -4480,7 +4494,7 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta* pChild, const char* name, v
memcpy(pChild->schema, p->schema, sizeof(SSchema) *total);
return TSDB_CODE_SUCCESS;
} else { // super table has been removed, current tableMeta is also expired. remove it here
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
return -1;
}
}
@ -4686,6 +4700,22 @@ static int32_t createTagColumnInfo(SQueryAttr* pQueryAttr, SQueryInfo* pQueryInf
return TSDB_CODE_SUCCESS;
}
int32_t tscGetColFilterSerializeLen(SQueryInfo* pQueryInfo) {
int16_t numOfCols = (int16_t)taosArrayGetSize(pQueryInfo->colList);
int32_t len = 0;
for(int32_t i = 0; i < numOfCols; ++i) {
SColumn* pCol = taosArrayGetP(pQueryInfo->colList, i);
for (int32_t j = 0; j < pCol->info.flist.numOfFilters; ++j) {
len += sizeof(SColumnFilterInfo);
if (pCol->info.flist.filterInfo[j].filterstr) {
len += (int32_t)pCol->info.flist.filterInfo[j].len + 1 * TSDB_NCHAR_SIZE;
}
}
}
return len;
}
int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr) {
memset(pQueryAttr, 0, sizeof(SQueryAttr));
@ -4704,7 +4734,7 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt
pQueryAttr->queryBlockDist = isBlockDistQuery(pQueryInfo);
pQueryAttr->pointInterpQuery = tscIsPointInterpQuery(pQueryInfo);
pQueryAttr->timeWindowInterpo = timeWindowInterpoRequired(pQueryInfo);
pQueryAttr->distinctTag = pQueryInfo->distinctTag;
pQueryAttr->distinct = pQueryInfo->distinct;
pQueryAttr->sw = pQueryInfo->sessionWindow;
pQueryAttr->stateWindow = pQueryInfo->stateWindow;
@ -4963,3 +4993,19 @@ SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg) {
return info;
}
void tscRemoveTableMetaBuf(STableMetaInfo* pTableMetaInfo, uint64_t id) {
char fname[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, fname);
int32_t len = (int32_t) strnlen(fname, TSDB_TABLE_FNAME_LEN);
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
void* pv = taosCacheAcquireByKey(tscVgroupListBuf, fname, len);
if (pv != NULL) {
taosCacheRelease(tscVgroupListBuf, &pv, true);
}
}
taosHashRemove(tscTableMetaMap, fname, len);
tscDebug("0x%"PRIx64" remove table meta %s, numOfRemain:%d", id, fname, (int32_t) taosHashGetSize(tscTableMetaMap));
}

View File

@ -88,10 +88,7 @@ enum {
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_STABLE, "show-create-stable")
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_DATABASE, "show-create-database")
/*
* build empty result instead of accessing dnode to fetch result
* reset the client cache
*/
// build empty result instead of accessing dnode to fetch result reset the client cache
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RETRIEVE_EMPTY_RESULT, "retrieve-empty-result" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RESET_CACHE, "reset-cache" )

View File

@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
POST_BUILD
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.33-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.34-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMENT "build jdbc driver")
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})

View File

@ -5,7 +5,7 @@
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.33</version>
<version>2.0.34</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>

View File

@ -3,7 +3,7 @@
<modelVersion>4.0.0</modelVersion>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.33</version>
<version>2.0.34</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>

View File

@ -130,7 +130,7 @@ public abstract class TSDBConstants {
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
return Types.NCHAR;
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE);
}
public static String taosType2JdbcTypeName(int taosType) throws SQLException {
@ -160,7 +160,7 @@ public abstract class TSDBConstants {
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
return "NCHAR";
default:
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE);
}
}

View File

@ -31,8 +31,8 @@ public class TSDBError {
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_URL_NOT_SET, "url is not set");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_INVALID_SQL, "invalid sql");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE, "numeric value out of range");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE, "unknown taos type in tdengine");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PERCISION, "unknown timestamp precision");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE, "unknown taos type in tdengine");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PRECISION, "unknown timestamp precision");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN, "unknown error");

View File

@ -25,8 +25,10 @@ public class TSDBErrorNumbers {
public static final int ERROR_URL_NOT_SET = 0x2312; // url is not set
public static final int ERROR_INVALID_SQL = 0x2313; // invalid sql
public static final int ERROR_NUMERIC_VALUE_OUT_OF_RANGE = 0x2314; // numeric value out of range
public static final int ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE = 0x2315; //unknown taos type in tdengine
public static final int ERROR_UNKNOWN_TIMESTAMP_PERCISION = 0x2316; // unknown timestamp precision
public static final int ERROR_UNKNOWN_TAOS_TYPE = 0x2315; //unknown taos type in tdengine
public static final int ERROR_UNKNOWN_TIMESTAMP_PRECISION = 0x2316; // unknown timestamp precision
public static final int ERROR_RESTFul_Client_Protocol_Exception = 0x2317;
public static final int ERROR_RESTFul_Client_IOException = 0x2318;
public static final int ERROR_UNKNOWN = 0x2350; //unknown error
@ -62,8 +64,11 @@ public class TSDBErrorNumbers {
errorNumbers.add(ERROR_URL_NOT_SET);
errorNumbers.add(ERROR_INVALID_SQL);
errorNumbers.add(ERROR_NUMERIC_VALUE_OUT_OF_RANGE);
errorNumbers.add(ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE);
errorNumbers.add(ERROR_UNKNOWN_TIMESTAMP_PERCISION);
errorNumbers.add(ERROR_UNKNOWN_TAOS_TYPE);
errorNumbers.add(ERROR_UNKNOWN_TIMESTAMP_PRECISION);
errorNumbers.add(ERROR_RESTFul_Client_IOException);
errorNumbers.add(ERROR_RESTFul_Client_Protocol_Exception);
errorNumbers.add(ERROR_SUBSCRIBE_FAILED);
errorNumbers.add(ERROR_UNSUPPORTED_ENCODING);

View File

@ -213,7 +213,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
long nanoAdjustment = Integer.parseInt(value.substring(20));
return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PERCISION);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PRECISION);
}
}
}

View File

@ -1,15 +1,18 @@
package com.taosdata.jdbc.utils;
import com.taosdata.jdbc.TSDBError;
import com.taosdata.jdbc.TSDBErrorNumbers;
import org.apache.http.HeaderElement;
import org.apache.http.HeaderElementIterator;
import org.apache.http.HttpEntity;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.HttpRequestRetryHandler;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.*;
import org.apache.http.client.protocol.HttpClientContext;
import org.apache.http.conn.ConnectionKeepAliveStrategy;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.DefaultHttpRequestRetryHandler;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.apache.http.message.BasicHeaderElementIterator;
@ -17,35 +20,24 @@ import org.apache.http.protocol.HTTP;
import org.apache.http.protocol.HttpContext;
import org.apache.http.util.EntityUtils;
import javax.net.ssl.SSLException;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets;
import java.sql.SQLException;
public class HttpClientPoolUtil {
private static final String DEFAULT_CONTENT_TYPE = "application/json";
private static final int DEFAULT_MAX_TOTAL = 200;
private static final int DEFAULT_MAX_PER_ROUTE = 20;
private static final int DEFAULT_TIME_OUT = 15000;
private static final int DEFAULT_MAX_PER_ROUTE = 32;
private static final int DEFAULT_MAX_TOTAL = 1000;
private static final int DEFAULT_HTTP_KEEP_TIME = 15000;
private static CloseableHttpClient httpClient;
private static synchronized void initPools() {
if (httpClient == null) {
PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager();
connectionManager.setDefaultMaxPerRoute(DEFAULT_MAX_PER_ROUTE);
connectionManager.setMaxTotal(DEFAULT_MAX_TOTAL);
httpClient = HttpClients.custom()
.setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY)
.setConnectionManager(connectionManager)
.setRetryHandler(new DefaultHttpRequestRetryHandler(3, true))
.build();
}
}
private static final int DEFAULT_MAX_RETRY_COUNT = 5;
private static final ConnectionKeepAliveStrategy DEFAULT_KEEP_ALIVE_STRATEGY = (response, context) -> {
HeaderElementIterator it = new BasicHeaderElementIterator(response.headerIterator(HTTP.CONN_KEEP_ALIVE));
int keepTime = DEFAULT_HTTP_KEEP_TIME * 1000;
while (it.hasNext()) {
HeaderElement headerElement = it.nextElement();
String param = headerElement.getName();
@ -53,34 +45,73 @@ public class HttpClientPoolUtil {
if (value != null && param.equalsIgnoreCase("timeout")) {
try {
return Long.parseLong(value) * 1000;
} catch (Exception e) {
new Exception("format KeepAlive timeout exception, exception:" + e.toString()).printStackTrace();
} catch (NumberFormatException ignore) {
}
}
}
return keepTime;
return DEFAULT_HTTP_KEEP_TIME * 1000;
};
/**
* 执行http post请求
* 默认采用Content-Typeapplication/jsonAcceptapplication/json
*
* @param uri 请求地址
* @param data 请求数据
* @return responseBody
*/
public static String execute(String uri, String data, String token) {
long startTime = System.currentTimeMillis();
private static final HttpRequestRetryHandler retryHandler = (exception, executionCount, httpContext) -> {
if (executionCount >= DEFAULT_MAX_RETRY_COUNT)
// do not retry if over max retry count
return false;
if (exception instanceof InterruptedIOException)
// timeout
return false;
if (exception instanceof UnknownHostException)
// unknown host
return false;
if (exception instanceof SSLException)
// SSL handshake exception
return false;
return true;
};
private static CloseableHttpClient httpClient;
static {
PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager();
connectionManager.setMaxTotal(DEFAULT_MAX_TOTAL);
connectionManager.setDefaultMaxPerRoute(DEFAULT_MAX_PER_ROUTE);
httpClient = HttpClients.custom().setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY).setConnectionManager(connectionManager).setRetryHandler(retryHandler).build();
}
/*** execute GET request ***/
public static String execute(String uri) throws SQLException {
HttpEntity httpEntity = null;
HttpEntityEnclosingRequestBase method = null;
String responseBody = "";
try {
if (httpClient == null) {
initPools();
HttpRequestBase method = getRequest(uri, HttpGet.METHOD_NAME);
HttpContext context = HttpClientContext.create();
CloseableHttpResponse httpResponse = httpClient.execute(method, context);
httpEntity = httpResponse.getEntity();
if (httpEntity != null) {
responseBody = EntityUtils.toString(httpEntity, StandardCharsets.UTF_8);
}
method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME, DEFAULT_CONTENT_TYPE, 0);
method.setHeader("Content-Type", "text/plain");
method.setHeader("Connection", "keep-alive");
} catch (ClientProtocolException e) {
e.printStackTrace();
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_Protocol_Exception, e.getMessage());
} catch (IOException exception) {
exception.printStackTrace();
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_IOException, exception.getMessage());
} finally {
if (httpEntity != null) {
EntityUtils.consumeQuietly(httpEntity);
}
}
return responseBody;
}
/*** execute POST request ***/
public static String execute(String uri, String data, String token) throws SQLException {
HttpEntity httpEntity = null;
String responseBody = "";
try {
HttpEntityEnclosingRequestBase method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME);
method.setHeader(HTTP.CONTENT_TYPE, "text/plain");
method.setHeader(HTTP.CONN_DIRECTIVE, HTTP.CONN_KEEP_ALIVE);
method.setHeader("Authorization", "Taosd " + token);
method.setEntity(new StringEntity(data, StandardCharsets.UTF_8));
@ -88,46 +119,31 @@ public class HttpClientPoolUtil {
CloseableHttpResponse httpResponse = httpClient.execute(method, context);
httpEntity = httpResponse.getEntity();
if (httpEntity != null) {
responseBody = EntityUtils.toString(httpEntity, "UTF-8");
responseBody = EntityUtils.toString(httpEntity, StandardCharsets.UTF_8);
}
} catch (Exception e) {
if (method != null) {
method.abort();
}
new Exception("execute post request exception, url:" + uri + ", exception:" + e.toString() + ", cost time(ms):" + (System.currentTimeMillis() - startTime)).printStackTrace();
} catch (ClientProtocolException e) {
e.printStackTrace();
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_Protocol_Exception, e.getMessage());
} catch (IOException exception) {
exception.printStackTrace();
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_IOException, exception.getMessage());
} finally {
if (httpEntity != null) {
try {
EntityUtils.consumeQuietly(httpEntity);
} catch (Exception e) {
new Exception("close response exception, url:" + uri + ", exception:" + e.toString() + ", cost time(ms):" + (System.currentTimeMillis() - startTime)).printStackTrace();
}
EntityUtils.consumeQuietly(httpEntity);
}
}
return responseBody;
}
/**
* * 创建请求
*
* @param uri 请求url
* @param methodName 请求的方法类型
* @param contentType contentType类型
* @param timeout 超时时间
* @return HttpRequestBase 返回类型
* @author lisc
*/
private static HttpRequestBase getRequest(String uri, String methodName, String contentType, int timeout) {
if (httpClient == null) {
initPools();
}
/*** create http request ***/
private static HttpRequestBase getRequest(String uri, String methodName) {
HttpRequestBase method;
if (timeout <= 0) {
timeout = DEFAULT_TIME_OUT;
}
RequestConfig requestConfig = RequestConfig.custom().setSocketTimeout(timeout * 1000)
.setConnectTimeout(timeout * 1000).setConnectionRequestTimeout(timeout * 1000)
.setExpectContinueEnabled(false).build();
RequestConfig requestConfig = RequestConfig.custom()
.setSocketTimeout(DEFAULT_TIME_OUT * 1000)
.setConnectTimeout(DEFAULT_TIME_OUT * 1000)
.setConnectionRequestTimeout(DEFAULT_TIME_OUT * 1000)
.setExpectContinueEnabled(false)
.build();
if (HttpPut.METHOD_NAME.equalsIgnoreCase(methodName)) {
method = new HttpPut(uri);
} else if (HttpPost.METHOD_NAME.equalsIgnoreCase(methodName)) {
@ -137,52 +153,10 @@ public class HttpClientPoolUtil {
} else {
method = new HttpPost(uri);
}
if (contentType == null || contentType.isEmpty() || contentType.replaceAll("\\s", "").isEmpty()) {
contentType = DEFAULT_CONTENT_TYPE;
}
method.addHeader("Content-Type", contentType);
method.addHeader("Accept", contentType);
method.addHeader(HTTP.CONTENT_TYPE, DEFAULT_CONTENT_TYPE);
method.addHeader("Accept", DEFAULT_CONTENT_TYPE);
method.setConfig(requestConfig);
return method;
}
/**
* 执行GET 请求
*
* @param uri 网址
* @return responseBody
*/
public static String execute(String uri) {
long startTime = System.currentTimeMillis();
HttpEntity httpEntity = null;
HttpRequestBase method = null;
String responseBody = "";
try {
if (httpClient == null) {
initPools();
}
method = getRequest(uri, HttpGet.METHOD_NAME, DEFAULT_CONTENT_TYPE, 0);
HttpContext context = HttpClientContext.create();
CloseableHttpResponse httpResponse = httpClient.execute(method, context);
httpEntity = httpResponse.getEntity();
if (httpEntity != null) {
responseBody = EntityUtils.toString(httpEntity, "UTF-8");
}
} catch (Exception e) {
if (method != null) {
method.abort();
}
e.printStackTrace();
} finally {
if (httpEntity != null) {
try {
EntityUtils.consumeQuietly(httpEntity);
} catch (Exception e) {
new Exception("close response exception, url:" + uri + ", exception:" + e.toString() + ",cost time(ms):" + (System.currentTimeMillis() - startTime)).printStackTrace();
}
}
}
return responseBody;
}
}

View File

@ -245,7 +245,7 @@ static void* telemetryThread(void* param) {
clock_gettime(CLOCK_REALTIME, &end);
end.tv_sec += 300; // wait 5 minutes before send first report
setThreadName("telemetryThrd");
setThreadName("telemetry");
while (!tsExit) {
int r = 0;

View File

@ -118,10 +118,11 @@ static void *dnodeProcessReadQueue(void *wparam) {
SVReadMsg * pRead;
int32_t qtype;
void * pVnode;
char name[16];
memset(name, 0, 16);
snprintf(name, 16, "%s-dnReadQ", pPool->name);
char* threadname = strcmp(pPool->name, "vquery") == 0? "dnodeQueryQ":"dnodeFetchQ";
char name[16] = {0};
snprintf(name, tListLen(name), "%s", threadname);
setThreadName(name);
while (1) {

View File

@ -90,7 +90,6 @@ static void *dnodeOpenVnode(void *param) {
char stepDesc[TSDB_STEP_DESC_LEN] = {0};
dDebug("thread:%d, start to open %d vnodes", pThread->threadIndex, pThread->vnodeNum);
setThreadName("dnodeOpenVnode");
for (int32_t v = 0; v < pThread->vnodeNum; ++v) {

View File

@ -810,7 +810,7 @@ typedef struct SMultiTableMeta {
int32_t contLen;
uint8_t compressed; // denote if compressed or not
uint32_t rawLen; // size before compress
uint8_t metaClone; // make meta clone after retrieve meta from mnode
uint8_t metaClone; // make meta clone after retrieve meta from mnode
char meta[];
} SMultiTableMeta;

View File

@ -139,6 +139,8 @@ typedef struct {
#define IS_VALID_USMALLINT(_t) ((_t) >= 0 && (_t) < UINT16_MAX)
#define IS_VALID_UINT(_t) ((_t) >= 0 && (_t) < UINT32_MAX)
#define IS_VALID_UBIGINT(_t) ((_t) >= 0 && (_t) < UINT64_MAX)
#define IS_VALID_FLOAT(_t) ((_t) >= -FLT_MAX && (_t) <= FLT_MAX)
#define IS_VALID_DOUBLE(_t) ((_t) >= -DBL_MAX && (_t) <= DBL_MAX)
static FORCE_INLINE bool isNull(const char *val, int32_t type) {
switch (type) {

View File

@ -35,6 +35,8 @@ struct Command {
};
extern void backspaceChar(Command *cmd);
extern void clearLineBefore(Command *cmd);
extern void clearLineAfter(Command *cmd);
extern void deleteChar(Command *cmd);
extern void moveCursorLeft(Command *cmd);
extern void moveCursorRight(Command *cmd);

View File

@ -102,6 +102,28 @@ void backspaceChar(Command *cmd) {
}
}
void clearLineBefore(Command *cmd) {
assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset);
clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size);
memmove(cmd->command, cmd->command + cmd->cursorOffset,
cmd->commandSize - cmd->cursorOffset);
cmd->commandSize -= cmd->cursorOffset;
cmd->cursorOffset = 0;
cmd->screenOffset = 0;
cmd->endOffset = cmd->commandSize;
showOnScreen(cmd);
}
void clearLineAfter(Command *cmd) {
assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset);
clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size);
cmd->commandSize -= cmd->endOffset - cmd->cursorOffset;
cmd->endOffset = cmd->cursorOffset;
showOnScreen(cmd);
}
void deleteChar(Command *cmd) {
assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset);

View File

@ -238,10 +238,16 @@ int32_t shellReadCommand(TAOS *con, char *command) {
updateBuffer(&cmd);
}
break;
case 11: // Ctrl + K;
clearLineAfter(&cmd);
break;
case 12: // Ctrl + L;
system("clear");
showOnScreen(&cmd);
break;
case 21: // Ctrl + U
clearLineBefore(&cmd);
break;
}
} else if (c == '\033') {
c = getchar();

View File

@ -238,10 +238,16 @@ int32_t shellReadCommand(TAOS *con, char *command) {
updateBuffer(&cmd);
}
break;
case 11: // Ctrl + K;
clearLineAfter(&cmd);
break;
case 12: // Ctrl + L;
system("clear");
showOnScreen(&cmd);
break;
case 21: // Ctrl + U;
clearLineBefore(&cmd);
break;
}
} else if (c == '\033') {
c = (char)getchar();

View File

@ -92,7 +92,6 @@ extern char configDir[];
#define MAX_SUPER_TABLE_COUNT 200
#define MAX_QUERY_SQL_COUNT 100
#define MAX_QUERY_SQL_LENGTH BUFFER_SIZE
#define MAX_DATABASE_COUNT 256
#define INPUT_BUF_LEN 256
@ -383,7 +382,7 @@ typedef struct SpecifiedQueryInfo_S {
uint64_t queryTimes;
bool subscribeRestart;
int subscribeKeepProgress;
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE+1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN];
int resubAfterConsume[MAX_QUERY_SQL_COUNT];
int endAfterConsume[MAX_QUERY_SQL_COUNT];
@ -406,7 +405,7 @@ typedef struct SuperQueryInfo_S {
int64_t childTblCount;
char childTblPrefix[TBNAME_PREFIX_LEN]; // 20 characters reserved for seq
int sqlCount;
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE+1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN];
int resubAfterConsume;
int endAfterConsume;
@ -648,7 +647,7 @@ static FILE * g_fpOfInsertResult = NULL;
fprintf(stderr, "PERF: "fmt, __VA_ARGS__); } while(0)
#define errorPrint(fmt, ...) \
do { fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); } while(0)
do { fprintf(stderr, " \033[31m"); fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); fprintf(stderr, " \033[0m"); } while(0)
// for strncpy buffer overflow
#define min(a, b) (((a) < (b)) ? (a) : (b))
@ -1253,14 +1252,14 @@ static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) {
// fetch the records row by row
while((row = taos_fetch_row(res))) {
if (totalLen >= 100*1024*1024 - 32000) {
if (totalLen >= (100*1024*1024 - HEAD_BUFF_LEN*2)) {
if (strlen(pThreadInfo->filePath) > 0)
appendResultBufToFile(databuf, pThreadInfo);
totalLen = 0;
memset(databuf, 0, 100*1024*1024);
}
num_rows++;
char temp[16000] = {0};
char temp[HEAD_BUFF_LEN] = {0};
int len = taos_print_row(temp, row, fields, num_fields);
len += sprintf(temp + len, "\n");
//printf("query result:%s\n", temp);
@ -2165,15 +2164,15 @@ static void printfDbInfoForQueryToFile(
}
static void printfQuerySystemInfo(TAOS * taos) {
char filename[MAX_QUERY_SQL_LENGTH+1] = {0};
char buffer[MAX_QUERY_SQL_LENGTH+1] = {0};
char filename[BUFFER_SIZE+1] = {0};
char buffer[BUFFER_SIZE+1] = {0};
TAOS_RES* res;
time_t t;
struct tm* lt;
time(&t);
lt = localtime(&t);
snprintf(filename, MAX_QUERY_SQL_LENGTH, "querySystemInfo-%d-%d-%d %d:%d:%d",
snprintf(filename, BUFFER_SIZE, "querySystemInfo-%d-%d-%d %d:%d:%d",
lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min,
lt->tm_sec);
@ -2205,12 +2204,12 @@ static void printfQuerySystemInfo(TAOS * taos) {
printfDbInfoForQueryToFile(filename, dbInfos[i], i);
// show db.vgroups
snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.vgroups;", dbInfos[i]->name);
snprintf(buffer, BUFFER_SIZE, "show %s.vgroups;", dbInfos[i]->name);
res = taos_query(taos, buffer);
xDumpResultToFile(filename, res);
// show db.stables
snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.stables;", dbInfos[i]->name);
snprintf(buffer, BUFFER_SIZE, "show %s.stables;", dbInfos[i]->name);
res = taos_query(taos, buffer);
xDumpResultToFile(filename, res);
free(dbInfos[i]);
@ -4549,7 +4548,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
goto PARSE_OVER;
}
tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j],
sqlStr->valuestring, MAX_QUERY_SQL_LENGTH);
sqlStr->valuestring, BUFFER_SIZE);
// default value is -1, which mean infinite loop
g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1;
@ -4771,7 +4770,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
goto PARSE_OVER;
}
tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring,
MAX_QUERY_SQL_LENGTH);
BUFFER_SIZE);
cJSON *result = cJSON_GetObjectItem(sql, "result");
if (result != NULL && result->type == cJSON_String
@ -5156,6 +5155,8 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k)
if (0 != taos_stmt_execute(pThreadInfo->stmt)) {
errorPrint("%s() LN%d, failied to execute insert statement. reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(pThreadInfo->stmt));
fprintf(stderr, "\n\033[31m === Please reduce batch number if WAL size exceeds limit. ===\033[0m\n\n");
exit(-1);
}
affectedRows = k;
@ -7423,14 +7424,14 @@ static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) {
tstrncpy(outSql, inSql, pos - inSql + 1);
//printf("1: %s\n", outSql);
strncat(outSql, subTblName, MAX_QUERY_SQL_LENGTH - 1);
strncat(outSql, subTblName, BUFFER_SIZE - 1);
//printf("2: %s\n", outSql);
strncat(outSql, pos+strlen(sourceString), MAX_QUERY_SQL_LENGTH - 1);
strncat(outSql, pos+strlen(sourceString), BUFFER_SIZE - 1);
//printf("3: %s\n", outSql);
}
static void *superTableQuery(void *sarg) {
char sqlstr[MAX_QUERY_SQL_LENGTH];
char sqlstr[BUFFER_SIZE];
threadInfo *pThreadInfo = (threadInfo *)sarg;
setThreadName("superTableQuery");
@ -7733,7 +7734,7 @@ static TAOS_SUB* subscribeImpl(
static void *superSubscribe(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg;
char subSqlstr[MAX_QUERY_SQL_LENGTH];
char subSqlstr[BUFFER_SIZE];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0};
uint64_t tsubSeq;

View File

@ -1812,12 +1812,8 @@ static int32_t getVgroupInfoLength(SSTableVgroupMsg* pInfo, int32_t numOfTable)
}
static char* serializeVgroupInfo(SSTableObj *pTable, char* name, char* msg, SMnodeMsg* pMsgBody, void* handle) {
SName sn = {0};
tNameFromString(&sn, name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
const char* tableName = tNameGetTableName(&sn);
strncpy(msg, tableName, TSDB_TABLE_NAME_LEN);
msg += TSDB_TABLE_NAME_LEN;
strncpy(msg, name, TSDB_TABLE_FNAME_LEN);
msg += TSDB_TABLE_FNAME_LEN;
if (pTable->vgHash == NULL) {
mDebug("msg:%p, app:%p stable:%s, no vgroup exist while get stable vgroup info", pMsgBody, handle, name);

View File

@ -199,7 +199,7 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
for (int32_t i = dataFields; i >= 0; i--) {
httpJsonItemToken(jsonBuf);
if (row[i] == NULL) {
if (row == NULL || i >= num_fields || row[i] == NULL) {
httpJsonOriginString(jsonBuf, "null", 4);
continue;
}

View File

@ -264,8 +264,7 @@ void httpJsonUInt64(JsonBuf* buf, uint64_t num) {
void httpJsonTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) {
char ts[35] = {0};
struct tm* ptm;
int32_t fractionLen;
char* format = NULL;
time_t quot = 0;
@ -301,8 +300,9 @@ void httpJsonTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) {
assert(false);
}
ptm = localtime(&quot);
int32_t length = (int32_t)strftime(ts, 35, "%Y-%m-%d %H:%M:%S", ptm);
struct tm ptm = {0};
localtime_r(&quot, &ptm);
int32_t length = (int32_t)strftime(ts, 35, "%Y-%m-%d %H:%M:%S", &ptm);
length += snprintf(ts + length, fractionLen, format, mod);
httpJsonString(buf, ts, length);

View File

@ -114,7 +114,7 @@ int32_t monStartSystem() {
static void *monThreadFunc(void *param) {
monDebug("starting to initialize monitor module ...");
setThreadName("monThrd");
setThreadName("monitor");
while (1) {
static int32_t accessTimes = 0;

View File

@ -216,7 +216,7 @@ typedef struct SQueryAttr {
bool simpleAgg;
bool pointInterpQuery; // point interpolation query
bool needReverseScan; // need reverse scan
bool distinctTag; // distinct tag query
bool distinct; // distinct query or not
bool stateWindow; // window State on sub/normal table
bool createFilterOperator; // if filter operator is needed
int32_t interBufSize; // intermediate buffer sizse
@ -519,6 +519,7 @@ typedef struct SDistinctOperatorInfo {
bool recordNullVal; //has already record the null value, no need to try again
int64_t threshold;
int64_t outputCapacity;
int32_t colIndex;
} SDistinctOperatorInfo;
struct SGlobalMerger;

View File

@ -138,7 +138,8 @@ typedef struct SQueryInfo {
int64_t vgroupLimit; // table limit in case of super table projection query + global order + limit
int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX
bool distinctTag; // distinct tag or not
bool distinct; // distinct tag or not
bool onlyHasTagCond;
int32_t round; // 0/1/....
int32_t bufLen;
char* buf;

View File

@ -97,12 +97,47 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) {
#define GET_NUM_OF_TABLEGROUP(q) taosArrayGetSize((q)->tableqinfoGroupInfo.pGroupList)
#define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0)
#define TSKEY_MAX_ADD(a,b) \
do { \
if (a < 0) { a = a + b; break;} \
if (sizeof(a) == sizeof(int32_t)) { \
if((b) > 0 && ((b) >= INT32_MAX - (a))){\
a = INT32_MAX; \
} else { \
a = a + b; \
} \
} else { \
if((b) > 0 && ((b) >= INT64_MAX - (a))){\
a = INT64_MAX; \
} else { \
a = a + b; \
} \
} \
} while(0)
#define TSKEY_MIN_SUB(a,b) \
do { \
if (a >= 0) { a = a + b; break;} \
if (sizeof(a) == sizeof(int32_t)){ \
if((b) < 0 && ((b) <= INT32_MIN - (a))){\
a = INT32_MIN; \
} else { \
a = a + b; \
} \
} else { \
if((b) < 0 && ((b) <= INT64_MIN-(a))) {\
a = INT64_MIN; \
} else { \
a = a + b; \
} \
} \
} while (0)
uint64_t queryHandleId = 0;
int32_t getMaximumIdleDurationSec() {
return tsShellActivityTimer * 2;
}
int64_t genQueryId(void) {
int64_t uid = 0;
int64_t did = tsDnodeId;
@ -3116,7 +3151,9 @@ void setTagValue(SOperatorInfo* pOperatorInfo, void *pTable, SQLFunctionCtx* pCt
|| pLocalExprInfo->base.resType == TSDB_DATA_TYPE_TIMESTAMP) {
memcpy(pRuntimeEnv->tagVal + offset, &pCtx[idx].tag.i64, pLocalExprInfo->base.resBytes);
} else {
memcpy(pRuntimeEnv->tagVal + offset, pCtx[idx].tag.pz, pCtx[idx].tag.nLen);
if (pCtx[idx].tag.pz != NULL) {
memcpy(pRuntimeEnv->tagVal + offset, pCtx[idx].tag.pz, pCtx[idx].tag.nLen);
}
}
offset += pLocalExprInfo->base.resBytes;
@ -3926,8 +3963,8 @@ static void toSSDataBlock(SGroupResInfo *pGroupResInfo, SQueryRuntimeEnv* pRunti
// refactor : extract method
SColumnInfoData* pInfoData = taosArrayGet(pBlock->pDataBlock, 0);
if (pInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP) {
//add condition (pBlock->info.rows >= 1) just to runtime happy
if (pInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP && pBlock->info.rows >= 1) {
STimeWindow* w = &pBlock->info.window;
w->skey = *(int64_t*)pInfoData->pData;
w->ekey = *(int64_t*)(((char*)pInfoData->pData) + TSDB_KEYSIZE * (pBlock->info.rows - 1));
@ -5265,7 +5302,15 @@ static SSDataBlock* doSTableAggregate(void* param, bool* newgroup) {
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(pOperator, pInfo->pCtx, pBlock, order);
TSKEY key = QUERY_IS_ASC_QUERY(pQueryAttr)? pBlock->info.window.ekey + 1:pBlock->info.window.skey-1;
TSKEY key = 0;
if (QUERY_IS_ASC_QUERY(pQueryAttr)) {
key = pBlock->info.window.ekey;
TSKEY_MAX_ADD(key, 1);
} else {
key = pBlock->info.window.skey;
TSKEY_MIN_SUB(key, -1);
}
setExecutionContext(pRuntimeEnv, pInfo, pOperator->numOfOutput, pRuntimeEnv->current->groupIndex, key);
doAggregateImpl(pOperator, pQueryAttr->window.skey, pInfo->pCtx, pBlock);
}
@ -6471,7 +6516,7 @@ static SSDataBlock* doTagScan(void* param, bool* newgroup) {
pOperator->status = OP_EXEC_DONE;
qDebug("QInfo:0x%"PRIx64" create count(tbname) query, res:%d rows:1", GET_QID(pRuntimeEnv), count);
} else { // return only the tags|table name etc.
SExprInfo* pExprInfo = pOperator->pExpr; // todo use the column list instead of exprinfo
SExprInfo* pExprInfo = &pOperator->pExpr[0]; // todo use the column list instead of exprinfo
count = 0;
while(pInfo->curPos < pInfo->totalTables && count < maxNumOfTables) {
@ -6546,9 +6591,11 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
return NULL;
}
SDistinctOperatorInfo* pInfo = pOperator->info;
SSDataBlock* pRes = pInfo->pRes;
pRes->info.rows = 0;
SSDataBlock* pBlock = NULL;
while(1) {
@ -6557,13 +6604,25 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
if (pBlock == NULL) {
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
pOperator->status = OP_EXEC_DONE;
break;
}
if (pInfo->colIndex == -1) {
for (int i = 0; i < taosArrayGetSize(pBlock->pDataBlock); i++) {
SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, i);
if (pColDataInfo->info.colId == pOperator->pExpr[0].base.resColId) {
pInfo->colIndex = i;
break;
}
}
}
if (pInfo->colIndex == -1) {
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
pOperator->status = OP_EXEC_DONE;
return NULL;
}
assert(pBlock->info.numOfCols == 1);
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, 0);
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pInfo->colIndex);
int16_t bytes = pColInfoData->info.bytes;
int16_t type = pColInfoData->info.type;
@ -6615,7 +6674,8 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
SDistinctOperatorInfo* pInfo = calloc(1, sizeof(SDistinctOperatorInfo));
pInfo->colIndex = -1;
pInfo->threshold = 10000000; // distinct result threshold
pInfo->outputCapacity = 4096;
pInfo->pSet = taosHashInit(64, taosGetDefaultHashFunction(pExpr->base.colType), false, HASH_NO_LOCK);
pInfo->pRes = createOutputBuf(pExpr, numOfOutput, (int32_t) pInfo->outputCapacity);
@ -6630,6 +6690,7 @@ SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperat
pOperator->info = pInfo;
pOperator->pRuntimeEnv = pRuntimeEnv;
pOperator->exec = hashDistinct;
pOperator->pExpr = pExpr;
pOperator->cleanup = destroyDistinctOperatorInfo;
appendUpstream(pOperator, upstream);

View File

@ -104,7 +104,7 @@ static SQueryNode* doAddTableColumnNode(SQueryInfo* pQueryInfo, STableMetaInfo*
int32_t num = (int32_t) taosArrayGetSize(pExprs);
SQueryNode* pNode = createQueryNode(QNODE_TAGSCAN, "TableTagScan", NULL, 0, pExprs->pData, num, info, NULL);
if (pQueryInfo->distinctTag) {
if (pQueryInfo->distinct) {
pNode = createQueryNode(QNODE_DISTINCT, "Distinct", &pNode, 1, pExprs->pData, num, info, NULL);
}
@ -551,9 +551,11 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
int32_t op = 0;
if (onlyQueryTags(pQueryAttr)) { // do nothing for tags query
op = OP_TagScan;
taosArrayPush(plan, &op);
if (pQueryAttr->distinctTag) {
if (onlyQueryTags(pQueryAttr)) {
op = OP_TagScan;
taosArrayPush(plan, &op);
}
if (pQueryAttr->distinct) {
op = OP_Distinct;
taosArrayPush(plan, &op);
}
@ -630,8 +632,13 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
} else {
op = OP_Project;
taosArrayPush(plan, &op);
if (pQueryAttr->distinct) {
op = OP_Distinct;
taosArrayPush(plan, &op);
}
}
}
if (pQueryAttr->limit.limit > 0 || pQueryAttr->limit.offset > 0) {
op = OP_Limit;
@ -651,7 +658,7 @@ SArray* createGlobalMergePlan(SQueryAttr* pQueryAttr) {
int32_t op = OP_MultiwayMergeSort;
taosArrayPush(plan, &op);
if (pQueryAttr->distinctTag) {
if (pQueryAttr->distinct) {
op = OP_Distinct;
taosArrayPush(plan, &op);
}

View File

@ -342,6 +342,7 @@ int32_t scriptEnvPoolInit() {
env->lua_state = createLuaEnv();
tdListAppend(pool->scriptEnvs, (void *)(&env));
}
pool->mSize = size;
pool->cSize = size;
return 0;

View File

@ -529,10 +529,9 @@ static void *taosProcessTcpData(void *param) {
SFdObj *pFdObj;
struct epoll_event events[maxEvents];
SRecvInfo recvInfo;
char name[16];
memset(name, 0, sizeof(name));
snprintf(name, 16, "%s-tcpData", pThreadObj->label);
char name[16] = {0};
snprintf(name, tListLen(name), "%s-tcp", pThreadObj->label);
setThreadName(name);
while (1) {

View File

@ -48,8 +48,6 @@ static void *sendRequest(void *param) {
SInfo *pInfo = (SInfo *)param;
SRpcMsg rpcMsg = {0};
setThreadName("sendCliReq");
tDebug("thread:%d, start to send request", pInfo->index);
while ( pInfo->numOfReqs == 0 || pInfo->num < pInfo->numOfReqs) {

View File

@ -40,9 +40,7 @@ static int terror = 0;
static void *sendRequest(void *param) {
SInfo *pInfo = (SInfo *)param;
SRpcMsg rpcMsg, rspMsg;
setThreadName("sendSrvReq");
tDebug("thread:%d, start to send request", pInfo->index);
while ( pInfo->numOfReqs == 0 || pInfo->num < pInfo->numOfReqs) {

View File

@ -415,7 +415,6 @@ static int32_t syncRetrieveDataStepByStep(SSyncPeer *pPeer) {
}
void *syncRetrieveData(void *param) {
setThreadName("syncRetrievData");
int64_t rid = (int64_t)param;
SSyncPeer *pPeer = syncAcquirePeer(rid);
if (pPeer == NULL) {

View File

@ -48,8 +48,6 @@ void *sendRequest(void *param) {
SInfo * pInfo = (SInfo *)param;
SRpcMsg rpcMsg = {0};
setThreadName("sendCliReq");
uDebug("thread:%d, start to send request", pInfo->index);
while (pInfo->numOfReqs == 0 || pInfo->num < pInfo->numOfReqs) {

View File

@ -178,7 +178,7 @@ void *processWriteQueue(void *param) {
int type;
void *item;
setThreadName("writeQ");
setThreadName("syncWrite");
while (1) {
int ret = taosReadQitem(qhandle, &type, &item);

View File

@ -722,7 +722,8 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea
// OK,let's load row from backward to get not-null column
for (int32_t rowId = pBlock->numOfRows - 1; rowId >= 0; rowId--) {
SDataCol *pDataCol = pReadh->pDCols[0]->cols + i;
tdAppendColVal(memRowDataBody(row), tdGetColDataOfRow(pDataCol, rowId), pCol->type, pCol->offset);
const void* pColData = tdGetColDataOfRow(pDataCol, rowId);
tdAppendColVal(memRowDataBody(row), pColData, pCol->type, pCol->offset);
//SDataCol *pDataCol = readh.pDCols[0]->cols + j;
void *value = tdGetRowDataOfCol(memRowDataBody(row), (int8_t)pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset);
if (isNull(value, pCol->type)) {
@ -735,11 +736,12 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea
continue;
}
// save not-null column
uint16_t bytes = IS_VAR_DATA_TYPE(pCol->type) ? varDataTLen(pColData) : pCol->bytes;
SDataCol *pLastCol = &(pTable->lastCols[idx]);
pLastCol->pData = malloc(pCol->bytes);
pLastCol->bytes = pCol->bytes;
pLastCol->pData = malloc(bytes);
pLastCol->bytes = bytes;
pLastCol->colId = pCol->colId;
memcpy(pLastCol->pData, value, pCol->bytes);
memcpy(pLastCol->pData, value, bytes);
// save row ts(in column 0)
pDataCol = pReadh->pDCols[0]->cols + 0;
@ -991,4 +993,4 @@ int tsdbCacheLastData(STsdbRepo *pRepo, STsdbCfg* oldCfg) {
}
return 0;
}
}

View File

@ -1019,7 +1019,7 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro
if (isDataRow) {
value = tdGetRowDataOfCol(memRowDataBody(row), (int8_t)pTCol->type,
TD_DATA_ROW_HEAD_SIZE + pSchema->columns[j].offset);
TD_DATA_ROW_HEAD_SIZE + pTCol->offset);
} else {
// SKVRow
SColIdx *pColIdx = tdGetKVRowIdxOfCol(memRowKvBody(row), pTCol->colId);
@ -1034,14 +1034,17 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro
SDataCol *pDataCol = &(pLatestCols[idx]);
if (pDataCol->pData == NULL) {
pDataCol->pData = malloc(pSchema->columns[j].bytes);
pDataCol->bytes = pSchema->columns[j].bytes;
} else if (pDataCol->bytes < pSchema->columns[j].bytes) {
pDataCol->pData = realloc(pDataCol->pData, pSchema->columns[j].bytes);
pDataCol->bytes = pSchema->columns[j].bytes;
pDataCol->pData = malloc(pTCol->bytes);
pDataCol->bytes = pTCol->bytes;
} else if (pDataCol->bytes < pTCol->bytes) {
pDataCol->pData = realloc(pDataCol->pData, pTCol->bytes);
pDataCol->bytes = pTCol->bytes;
}
memcpy(pDataCol->pData, value, pDataCol->bytes);
// the actual value size
uint16_t bytes = IS_VAR_DATA_TYPE(pTCol->type) ? varDataTLen(value) : pTCol->bytes;
// the actual data size CANNOT larger than column size
assert(pTCol->bytes >= bytes);
memcpy(pDataCol->pData, value, bytes);
//tsdbInfo("updateTableLatestColumn vgId:%d cache column %d for %d,%s", REPO_ID(pRepo), j, pDataCol->bytes, (char*)pDataCol->pData);
pDataCol->ts = memRowKey(row);
}

View File

@ -640,7 +640,7 @@ static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGr
size_t numOfGroup = taosArrayGetSize(pGroupList->pGroupList);
STableGroupInfo* pNew = calloc(1, sizeof(STableGroupInfo));
pNew->pGroupList = taosArrayInit(numOfGroup, sizeof(SArray));
pNew->pGroupList = taosArrayInit(numOfGroup, POINTER_BYTES);
for(int32_t i = 0; i < numOfGroup; ++i) {
SArray* oneGroup = taosArrayGetP(pGroupList->pGroupList, i);
@ -3383,11 +3383,13 @@ static int32_t tableGroupComparFn(const void *p1, const void *p2, const void *pa
type = TSDB_DATA_TYPE_BINARY;
bytes = tGetTbnameColumnSchema()->bytes;
} else {
STColumn* pCol = schemaColAt(pTableGroupSupp->pTagSchema, colIndex);
bytes = pCol->bytes;
type = pCol->type;
f1 = tdGetKVRowValOfCol(pTable1->tagVal, pCol->colId);
f2 = tdGetKVRowValOfCol(pTable2->tagVal, pCol->colId);
if (pTableGroupSupp->pTagSchema && colIndex < pTableGroupSupp->pTagSchema->numOfCols) {
STColumn* pCol = schemaColAt(pTableGroupSupp->pTagSchema, colIndex);
bytes = pCol->bytes;
type = pCol->type;
f1 = tdGetKVRowValOfCol(pTable1->tagVal, pCol->colId);
f2 = tdGetKVRowValOfCol(pTable2->tagVal, pCol->colId);
}
}
// this tags value may be NULL

View File

@ -123,10 +123,9 @@ void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen);
* @param keyLen
* @param fp
* @param d
* @param dsize
* @return
*/
void* taosHashGetClone(SHashObj *pHashObj, const void *key, size_t keyLen, void (*fp)(void *), void* d, size_t dsize);
void* taosHashGetClone(SHashObj *pHashObj, const void *key, size_t keyLen, void (*fp)(void *), void* d);
/**
* remove item with the specified key

View File

@ -294,10 +294,10 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *da
}
void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen) {
return taosHashGetClone(pHashObj, key, keyLen, NULL, NULL, 0);
return taosHashGetClone(pHashObj, key, keyLen, NULL, NULL);
}
void* taosHashGetClone(SHashObj *pHashObj, const void *key, size_t keyLen, void (*fp)(void *), void* d, size_t dsize) {
void* taosHashGetClone(SHashObj *pHashObj, const void *key, size_t keyLen, void (*fp)(void *), void* d) {
if (taosHashTableEmpty(pHashObj) || keyLen == 0 || key == NULL) {
return NULL;
}

View File

@ -132,11 +132,11 @@ static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheDataNo
return;
}
pCacheObj->totalSize -= pNode->size;
atomic_sub_fetch_64(&pCacheObj->totalSize, pNode->size);
int32_t size = (int32_t)taosHashGetSize(pCacheObj->pHashTable);
assert(size > 0);
uDebug("cache:%s, key:%p, %p is destroyed from cache, size:%dbytes, totalNum:%d size:%" PRId64 "bytes",
uDebug("cache:%s, key:%p, %p is destroyed from cache, size:%dbytes, total num:%d size:%" PRId64 "bytes",
pCacheObj->name, pNode->key, pNode->data, pNode->size, size - 1, pCacheObj->totalSize);
if (pCacheObj->freeFp) {
@ -252,6 +252,7 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v
pCacheObj->freeFp(p->data);
}
atomic_sub_fetch_64(&pCacheObj->totalSize, p->size);
tfree(p);
} else {
taosAddToTrashcan(pCacheObj, p);
@ -302,7 +303,7 @@ void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen
}
SCacheDataNode* ptNode = NULL;
taosHashGetClone(pCacheObj->pHashTable, key, keyLen, incRefFn, &ptNode, sizeof(void*));
taosHashGetClone(pCacheObj->pHashTable, key, keyLen, incRefFn, &ptNode);
void* pData = (ptNode != NULL)? ptNode->data:NULL;
@ -679,7 +680,7 @@ void* taosCacheTimedRefresh(void *handle) {
assert(pCacheArrayList != NULL);
uDebug("cache refresh thread starts");
setThreadName("cacheTimedRefre");
setThreadName("cacheRefresh");
const int32_t SLEEP_DURATION = 500; //500 ms
int64_t count = 0;

View File

@ -159,7 +159,7 @@ int tsCompressINTImp(const char *const input, const int nelements, char *const o
break;
}
// Get difference.
if (!safeInt64Add(curr_value, -prev_value)) goto _copy_and_exit;
if (!safeInt64Add(curr_value, -prev_value_tmp)) goto _copy_and_exit;
int64_t diff = curr_value - prev_value_tmp;
// Zigzag encode the value.
@ -993,4 +993,4 @@ int tsDecompressDoubleLossyImp(const char * input, int compressedSize, const int
// decompressed with sz
return tdszDecompress(SZ_DOUBLE, input + 1, compressedSize - 1, nelements, output);
}
#endif
#endif

View File

@ -178,8 +178,6 @@ static void *taosThreadToOpenNewFile(void *param) {
char keepName[LOG_FILE_NAME_LEN + 20];
sprintf(keepName, "%s.%d", tsLogObj.logName, tsLogObj.flag);
setThreadName("openNewFile");
tsLogObj.flag ^= 1;
tsLogObj.lines = 0;
char name[LOG_FILE_NAME_LEN + 20];
@ -689,12 +687,9 @@ static void taosWriteLog(SLogBuff *tLogBuff) {
static void *taosAsyncOutputLog(void *param) {
SLogBuff *tLogBuff = (SLogBuff *)param;
setThreadName("asyncOutputLog");
setThreadName("log");
while (1) {
//tsem_wait(&(tLogBuff->buffNotEmpty));
taosMsleep(writeInterval);
// Polling the buffer

View File

@ -122,7 +122,9 @@ void *taosProcessSchedQueue(void *scheduler) {
SSchedQueue *pSched = (SSchedQueue *)scheduler;
int ret = 0;
setThreadName("schedQ");
char name[16] = {0};
snprintf(name, tListLen(name), "%s-taskQ", pSched->label);
setThreadName(name);
while (1) {
if ((ret = tsem_wait(&pSched->fullSem)) != 0) {

View File

@ -69,12 +69,12 @@ void taosStringBuilderAppendNull(SStringBuilder* sb) { taosStringBuilderAppendSt
void taosStringBuilderAppendInteger(SStringBuilder* sb, int64_t v) {
char buf[64];
size_t len = sprintf(buf, "%" PRId64, v);
taosStringBuilderAppendStringLen(sb, buf, len);
size_t len = snprintf(buf, sizeof(buf), "%" PRId64, v);
taosStringBuilderAppendStringLen(sb, buf, MIN(len, sizeof(buf)));
}
void taosStringBuilderAppendDouble(SStringBuilder* sb, double v) {
char buf[64];
size_t len = sprintf(buf, "%.9lf", v);
taosStringBuilderAppendStringLen(sb, buf, len);
char buf[512];
size_t len = snprintf(buf, sizeof(buf), "%.9lf", v);
taosStringBuilderAppendStringLen(sb, buf, MIN(len, sizeof(buf)));
}

View File

@ -35,8 +35,6 @@ void *addRef(void *param) {
SRefSpace *pSpace = (SRefSpace *)param;
int id;
setThreadName("addRef");
for (int i=0; i < pSpace->steps; ++i) {
printf("a");
id = random() % pSpace->refNum;
@ -54,8 +52,6 @@ void *removeRef(void *param) {
SRefSpace *pSpace = (SRefSpace *)param;
int id, code;
setThreadName("removeRef");
for (int i=0; i < pSpace->steps; ++i) {
printf("d");
id = random() % pSpace->refNum;
@ -74,8 +70,6 @@ void *acquireRelease(void *param) {
SRefSpace *pSpace = (SRefSpace *)param;
int id;
setThreadName("acquireRelease");
for (int i=0; i < pSpace->steps; ++i) {
printf("a");
@ -97,8 +91,6 @@ void myfree(void *p) {
void *openRefSpace(void *param) {
SRefSpace *pSpace = (SRefSpace *)param;
setThreadName("openRefSpace");
printf("c");
pSpace->rsetId = taosOpenRef(50, myfree);

View File

@ -93,7 +93,7 @@ static void vnodeIncRef(void *ptNode) {
void *vnodeAcquire(int32_t vgId) {
SVnodeObj *pVnode = NULL;
if (tsVnodesHash != NULL) {
taosHashGetClone(tsVnodesHash, &vgId, sizeof(int32_t), vnodeIncRef, &pVnode, sizeof(void *));
taosHashGetClone(tsVnodesHash, &vgId, sizeof(int32_t), vnodeIncRef, &pVnode);
}
if (pVnode == NULL) {

View File

@ -25,7 +25,7 @@
typedef enum {
VNODE_WORKER_ACTION_CLEANUP,
VNODE_WORKER_ACTION_DESTROUY
VNODE_WORKER_ACTION_DESTROY
} EVMWorkerAction;
typedef struct {
@ -155,7 +155,7 @@ int32_t vnodeCleanupInMWorker(SVnodeObj *pVnode) {
int32_t vnodeDestroyInMWorker(SVnodeObj *pVnode) {
vTrace("vgId:%d, will destroy in vmworker", pVnode->vgId);
return vnodeWriteIntoMWorker(pVnode, VNODE_WORKER_ACTION_DESTROUY, NULL);
return vnodeWriteIntoMWorker(pVnode, VNODE_WORKER_ACTION_DESTROY, NULL);
}
static void vnodeFreeMWorkerMsg(SVMWorkerMsg *pMsg) {
@ -179,7 +179,7 @@ static void vnodeProcessMWorkerMsg(SVMWorkerMsg *pMsg) {
case VNODE_WORKER_ACTION_CLEANUP:
vnodeCleanUp(pMsg->pVnode);
break;
case VNODE_WORKER_ACTION_DESTROUY:
case VNODE_WORKER_ACTION_DESTROY:
vnodeDestroy(pMsg->pVnode);
break;
default:

View File

@ -192,7 +192,7 @@ static void walFsyncAll() {
static void *walThreadFunc(void *param) {
int stop = 0;
setThreadName("walThrd");
setThreadName("wal");
while (1) {
walUpdateSeq();
walFsyncAll();

View File

@ -359,7 +359,7 @@ void verify_prepare(TAOS* taos) {
v.v8 = (int64_t)(i * 8);
v.f4 = (float)(i * 40);
v.f8 = (double)(i * 80);
for (int j = 0; j < sizeof(v.bin) - 1; ++j) {
for (int j = 0; j < sizeof(v.bin); ++j) {
v.bin[j] = (char)(i + '0');
}
@ -556,7 +556,7 @@ void verify_prepare2(TAOS* taos) {
v.v8[i] = (int64_t)(i * 8);
v.f4[i] = (float)(i * 40);
v.f8[i] = (double)(i * 80);
for (int j = 0; j < sizeof(v.bin[0]) - 1; ++j) {
for (int j = 0; j < sizeof(v.bin[0]); ++j) {
v.bin[i][j] = (char)(i + '0');
}
strcpy(v.blob[i], "一二三四五六七八九十");
@ -808,7 +808,7 @@ void verify_prepare3(TAOS* taos) {
v.v8[i] = (int64_t)(i * 8);
v.f4[i] = (float)(i * 40);
v.f8[i] = (double)(i * 80);
for (int j = 0; j < sizeof(v.bin[0]) - 1; ++j) {
for (int j = 0; j < sizeof(v.bin[0]); ++j) {
v.bin[i][j] = (char)(i + '0');
}
strcpy(v.blob[i], "一二三四五六七八九十");
@ -1031,7 +1031,7 @@ int main(int argc, char *argv[]) {
info = taos_get_client_info(taos);
printf("client info: %s\n", info);
printf("************ verify shemaless *************\n");
printf("************ verify schema-less *************\n");
verify_schema_less(taos);
@ -1049,14 +1049,12 @@ int main(int argc, char *argv[]) {
printf("************ verify prepare2 *************\n");
verify_prepare2(taos);
printf("************ verify prepare3 *************\n");
verify_prepare3(taos);
printf("************ verify stream *************\n");
verify_stream(taos);
printf("done\n");
taos_close(taos);
taos_cleanup();
}

View File

@ -157,5 +157,45 @@ int main(int argc, char* argv[]) {
return -1;
}
//Duplicate key check;
char* lines_003_1[] = {
"std,id=\"std_3_1\",t1=4i64,Id=\"std\",t2=true c1=true 1626006834s"
};
code = taos_insert_lines(taos, lines_003_1 , sizeof(lines_003_1)/sizeof(char*));
if (0 == code) {
printf("taos_insert_lines() lines_003_1 return code:%d (%s)\n", code, (char*)tstrerror(code));
return -1;
}
char* lines_003_2[] = {
"std,id=\"std_3_2\",tag1=4i64,Tag2=true,tAg3=2,TaG2=\"dup!\" c1=true 1626006834s"
};
code = taos_insert_lines(taos, lines_003_2 , sizeof(lines_003_2)/sizeof(char*));
if (0 == code) {
printf("taos_insert_lines() lines_003_2 return code:%d (%s)\n", code, (char*)tstrerror(code));
return -1;
}
char* lines_003_3[] = {
"std,id=\"std_3_3\",tag1=4i64 field1=true,Field2=2,FIElD1=\"dup!\",fIeLd4=true 1626006834s"
};
code = taos_insert_lines(taos, lines_003_3 , sizeof(lines_003_3)/sizeof(char*));
if (0 == code) {
printf("taos_insert_lines() lines_003_3 return code:%d (%s)\n", code, (char*)tstrerror(code));
return -1;
}
char* lines_003_4[] = {
"std,id=\"std_3_4\",tag1=4i64,dupkey=4i16,tag2=T field1=true,dUpkEy=1e3f32,field2=\"1234\" 1626006834s"
};
code = taos_insert_lines(taos, lines_003_4 , sizeof(lines_003_4)/sizeof(char*));
if (0 == code) {
printf("taos_insert_lines() lines_003_4 return code:%d (%s)\n", code, (char*)tstrerror(code));
return -1;
}
return 0;
}

View File

@ -163,7 +163,7 @@ python3 test.py -f tools/taosdemoTestSampleData.py
python3 test.py -f tools/taosdemoTestInterlace.py
python3 test.py -f tools/taosdemoTestQuery.py
python3 test.py -f tools/taosdumpTestNanoSupport.py
# update
python3 ./test.py -f update/allow_update.py
@ -360,6 +360,8 @@ python3 ./test.py -f tag_lite/alter_tag.py
python3 test.py -f tools/taosdemoAllTest/TD-4985/query-limit-offset.py
python3 test.py -f tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py
python3 test.py -f tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py
python3 ./test.py -f tag_lite/drop_auto_create.py
python3 test.py -f insert/insert_before_use_db.py
python3 test.py -f alter/alter_keep.py

View File

@ -77,6 +77,8 @@ class TDTestCase:
"sth,t1=4i64,t2=5f64,t4=5f64,ID=\"childtable\" c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 1626006933641ms",
"sth,t1=4i64,t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 1626006933654ms"
])
tdSql.execute('reset query cache')
tdSql.query('select tbname, * from sth')
tdSql.checkRows(2)

View File

@ -70,6 +70,14 @@ class TDTestCase:
tdSql.query("select * from (select avg(value), sum(value) from st group by tbname slimit 5 soffset 7)")
tdSql.checkRows(3)
# https://jira.taosdata.com:18080/browse/TD-5497
tdSql.execute("create table tt(ts timestamp ,i int)")
tdSql.execute("insert into tt values(now, 11)(now + 1s, -12)")
tdSql.query("select * from (select max(i),0-min(i) from tt)")
tdSql.checkRows(1);
tdSql.checkData(0, 0, 11);
tdSql.checkData(0, 1, 12.0);
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)

View File

@ -0,0 +1,536 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
import random
import time
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.ts = 1600000000000
self.num = 10
def run(self):
tdSql.prepare()
# test case for https://jira.taosdata.com:18080/browse/TD-5074
startTime = time.time()
tdSql.execute('''create stable stable_1
(ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint,
q_bool bool , q_binary binary(20) , q_nchar nchar(20) ,
q_float float , q_double double , q_ts timestamp)
tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint,
t_bool bool , t_binary binary(20) , t_nchar nchar(20) ,
t_float float , t_double double , t_ts timestamp);''')
tdSql.execute('''create stable stable_2
(ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint,
q_bool bool , q_binary binary(20) , q_nchar nchar(20) ,
q_float float , q_double double , q_ts timestamp)
tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint,
t_bool bool , t_binary binary(20) , t_nchar nchar(20) ,
t_float float , t_double double , t_ts timestamp);''')
tdSql.execute('''create table table_0 using stable_1
tags('table_0' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')''')
tdSql.execute('''create table table_1 using stable_1
tags('table_1' , '2147483647' , '9223372036854775807' , '32767' , '127' , 1 ,
'binary1' , 'nchar1' , '1' , '11' , \'1999-09-09 09:09:09.090\')''')
tdSql.execute('''create table table_2 using stable_1
tags('table_2' , '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false ,
'binary2' , 'nchar2nchar2' , '-2.2' , '-22.22' , \'2099-09-09 09:09:09.090\')''')
tdSql.execute('''create table table_3 using stable_1
tags('table_3' , '3' , '3' , '3' , '3' , true , 'binary3' , 'nchar3' , '33.33' , '3333.3333' , '0')''')
tdSql.execute('''create table table_4 using stable_1
tags('table_4' , '4' , '4' , '4' , '4' , false , 'binary4' , 'nchar4' , '-444.444' , '-444444.444444' , '0')''')
tdSql.execute('''create table table_5 using stable_1
tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''')
tdSql.execute('''create table table_21 using stable_2
tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''')
#regular table
tdSql.execute('''create table regular_table_1
(ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint,
q_bool bool , q_binary binary(20) , q_nchar nchar(20) ,
q_float float , q_double double , q_ts timestamp) ;''')
for i in range(self.num):
tdSql.execute('''insert into table_0 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)'''
% (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)'''
% (self.ts + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i,
i, i, random.random(), random.random(), 1262304000001 + i))
tdSql.execute('''insert into table_2 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)'''
% (self.ts + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i,
i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i))
tdSql.execute('''insert into table_3 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)'''
% (self.ts + i, random.randint(-2147483647, 2147483647),
random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767),
random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000),
random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i))
tdSql.execute('''insert into table_4 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)'''
% (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
tdSql.execute('''insert into table_5 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)'''
% (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
tdSql.execute('''insert into table_21 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)'''
% (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)'''
% (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)'''
% (self.ts + 100 + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i,
i, i, random.random(), random.random(), 1262304000001 + i))
tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)'''
% (self.ts + 200 + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i,
i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i))
tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)'''
% (self.ts + 300 + i, random.randint(-2147483647, 2147483647),
random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767),
random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000),
random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i))
tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)'''
% (self.ts + 400 + i, i, i, i, i, i, i, i, i, self.ts + i))
tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)'''
% (self.ts + 500 + i, i, i, i, i, i, i, i, i, self.ts + i))
tdLog.info("========== operator=1(OP_TableScan) ==========")
tdLog.info("========== operator=7(OP_Project) ==========")
sql = '''select * from stable_1'''
tdSql.query(sql)
tdSql.checkRows(6*self.num)
sql = '''select * from regular_table_1'''
tdSql.query(sql)
tdSql.checkRows(6*self.num)
tdLog.info("========== operator=14(OP_MultiTableAggregate ) ==========")
sql = '''select last_row(*) from stable_1;'''
tdSql.query(sql)
tdSql.checkData(0,1,self.num-1)
tdLog.info("========== operator=6(OP_Aggregate) ==========")
sql = '''select last_row(*) from regular_table_1;'''
tdSql.query(sql)
tdSql.checkData(0,1,self.num-1)
tdLog.info("========== operator=9(OP_Limit) ==========")
sql = '''select * from stable_1 where loc = 'table_0' limit 5;'''
tdSql.query(sql)
tdSql.checkRows(5)
sql = '''select last_row(*) from (select * from stable_1 where loc = 'table_0');'''
tdSql.query(sql)
tdSql.checkRows(1)
sql = '''select * from regular_table_1 ;'''
tdSql.query(sql)
tdSql.checkRows(6*self.num)
sql = '''select last_row(*) from (select * from regular_table_1);'''
tdSql.query(sql)
tdSql.checkRows(1)
tdSql.checkData(0,1,self.num-1)
sql = '''select last_row(*) from
((select * from table_0) union all
(select * from table_1) union all
(select * from table_2));'''
tdSql.error(sql)
tdLog.info("========== operator=16(OP_DummyInput) ==========")
sql = '''select last_row(*) from
((select last_row(*) from table_0) union all
(select last_row(*) from table_1) union all
(select last_row(*) from table_2));'''
tdSql.error(sql)
sql = '''select last_row(*) from
((select * from table_0 limit 5 offset 5) union all
(select * from table_1 limit 5 offset 5) union all
(select * from regular_table_1 limit 5 offset 5));'''
tdSql.error(sql)
tdLog.info("========== operator=10(OP_SLimit) ==========")
sql = '''select count(*) from stable_1 group by loc slimit 3 soffset 2 ;'''
tdSql.query(sql)
tdSql.checkRows(3)
sql = '''select last_row(*) from
((select * from table_0) union all
(select * from table_1) union all
(select * from table_2));'''
tdSql.error(sql)
tdLog.info("========== operator=20(OP_Distinct) ==========")
tdLog.info("========== operator=4(OP_TagScan) ==========")
sql = '''select distinct(t_bool) from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(2)
sql = '''select distinct(loc) from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(6)
sql = '''select distinct(t_int) from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(6)
sql = '''select distinct(t_bigint) from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(6)
sql = '''select distinct(t_smallint) from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(6)
sql = '''select distinct(t_tinyint) from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(6)
sql = '''select distinct(t_nchar) from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(6)
sql = '''select distinct(t_float) from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(6)
sql = '''select distinct(t_double) from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(6)
sql = '''select distinct(t_ts) from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(3)
sql = '''select distinct(tbname) from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(6)
tdLog.info("========== operator=2(OP_DataBlocksOptScan) ==========")
sql = '''select last(q_int),first(q_int) from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(1)
sql = '''select last(q_bigint),first(q_bigint) from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(1)
sql = '''select last(q_smallint),first(q_smallint) from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(1)
sql = '''select last(q_tinyint),first(q_tinyint) from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(1)
sql = '''select last(q_bool),first(q_bool) from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(1)
sql = '''select last(q_binary),first(q_binary) from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(1)
sql = '''select last(q_nchar),first(q_nchar) from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(1)
sql = '''select last(q_float),first(q_float) from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(1)
sql = '''select last(q_double),first(q_double) from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(1)
sql = '''select last(q_ts),first(q_ts) from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(1)
sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar),
last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint),
first(q_bool),first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(1)
sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar),
last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint),first(q_bool),
first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from regular_table_1;'''
tdSql.query(sql)
tdSql.checkRows(1)
tdLog.info("========== operator=8(OP_Groupby) ==========")
sql = '''select stddev(q_int) from table_0 group by q_int;'''
tdSql.query(sql)
tdSql.checkRows(self.num)
sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from stable_1 group by q_int;'''
tdSql.query(sql)
sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from table_1 group by q_bigint;'''
tdSql.query(sql)
tdSql.checkRows(self.num)
sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from regular_table_1 group by q_smallint;'''
tdSql.query(sql)
tdLog.info("========== operator=11(OP_TimeWindow) ==========")
sql = '''select last(q_int) from table_0 interval(1m);'''
tdSql.query(sql)
tdSql.checkRows(1)
sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),
first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from table_1 interval(1m);'''
tdSql.query(sql)
tdSql.checkRows(1)
sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),
first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from stable_1 interval(1m);'''
tdSql.query(sql)
tdSql.checkRows(1)
sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),
first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from regular_table_1 interval(1m);'''
tdSql.query(sql)
tdSql.checkRows(1)
tdLog.info("========== operator=12(OP_SessionWindow) ==========")
sql = '''select count(*) from table_1 session(ts,1s);'''
tdSql.query(sql)
tdSql.checkRows(1)
sql = '''select count(*) from regular_table_1 session(ts,1s);'''
tdSql.query(sql)
tdSql.checkRows(1)
sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
from table_1 session(ts,1s);'''
tdSql.query(sql)
tdSql.checkRows(1)
sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
from regular_table_1 session(ts,1s);'''
tdSql.query(sql)
tdSql.checkRows(1)
tdLog.info("========== operator=13(OP_Fill) ==========")
sql = '''select sum(q_int) from table_0
where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);'''
tdSql.query(sql)
tdSql.checkData(0,1,'None')
sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
from stable_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);'''
tdSql.query(sql)
tdSql.checkData(0,1,'None')
sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
from regular_table_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);'''
tdSql.query(sql)
tdSql.checkData(0,1,'None')
sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
from table_0 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);'''
tdSql.query(sql)
tdSql.checkData(0,1,'None')
#TD-5190
sql = '''select sum(q_tinyint),stddev(q_float) from stable_1
where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);'''
tdSql.query(sql)
tdSql.checkData(0,1,'None')
tdLog.info("========== operator=15(OP_MultiTableTimeInterval) ==========")
sql = '''select avg(q_int) from stable_1 where ts<now interval(10m);'''
tdSql.query(sql)
tdSql.checkRows(1)
sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
from table_1 where ts<now interval(10m);'''
tdSql.query(sql)
tdSql.checkRows(1)
sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
from stable_1 where ts<now interval(10m);'''
tdSql.query(sql)
tdSql.checkRows(1)
sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
from regular_table_1 where ts<now interval(10m);'''
tdSql.query(sql)
tdSql.checkRows(1)
tdLog.info("========== operator=3(OP_TableSeqScan) ==========")
tdLog.info("========== operator=6(OP_Aggregate) ==========")
sql = '''select * from table_1,table_2
where table_1.ts = table_2.ts;'''
tdSql.query(sql)
tdSql.checkRows(self.num)
#TD-5206
sql = '''select * from stable_1,stable_2
where stable_1.t_nchar = stable_2.t_nchar and stable_1.ts = stable_2.ts;'''
tdSql.query(sql)
tdSql.checkRows(self.num)
#TD-5139
sql = '''select * from table_1,regular_table_1
where table_1.ts = regular_table_1.ts;'''
tdSql.query(sql)
tdSql.checkRows(self.num)
tdLog.info("========== operator=5(OP_TableBlockInfoScan) ==========")
sql = '''select _block_dist() from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(1)
sql = '''select _block_dist() from table_1;'''
tdSql.query(sql)
tdSql.checkRows(1)
sql = '''select _block_dist() from regular_table_1;'''
tdSql.query(sql)
tdSql.checkRows(1)
tdLog.info("========== operator=17(OP_MultiwayMergeSort) ==========")
tdLog.info("========== operator=18(OP_GlobalAggregate) ==========")
tdLog.info("========== operator=19(OP_Filter) ==========")
sql = '''select loc,sum(q_int) from stable_1
group by loc having sum(q_int)>=0;'''
tdSql.query(sql)
tdSql.checkData(0,0,'table_0')
sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
from stable_1 group by loc having sum(q_int)>=0;'''
tdSql.query(sql)
tdSql.checkData(0,0,'table_0')
sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
from stable_1 group by loc having avg(q_int)>=0;'''
tdSql.query(sql)
tdSql.checkData(0,0,'table_0')
sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
from stable_1 group by loc having min(q_int)>=0;'''
tdSql.query(sql)
tdSql.checkData(0,0,'table_0')
sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
from stable_1 group by loc having max(q_int)>=0;'''
tdSql.query(sql)
tdSql.checkData(0,0,'table_0')
sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
from stable_1 group by loc having first(q_int)>=0;'''
tdSql.query(sql)
tdSql.checkData(0,0,'table_0')
sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
from stable_1 group by loc having last(q_int)>=0;'''
tdSql.query(sql)
tdSql.checkData(0,0,'table_0')
tdLog.info("========== operator=21(OP_Join) ==========")
sql = '''select t1.q_int,t2.q_int from
(select ts,q_int from table_1) t1 , (select ts,q_int from table_2) t2
where t2.ts = t1.ts;'''
tdSql.query(sql)
tdSql.checkRows(self.num)
sql = '''select t1.*,t2.* from
(select * from table_1) t1 , (select * from table_2) t2
where t2.ts = t1.ts;'''
tdSql.query(sql)
tdSql.checkRows(self.num)
sql = '''select t1.*,t2.* from
(select * from regular_table_1) t1 , (select * from table_0) t2
where t2.ts = t1.ts;'''
tdSql.query(sql)
tdSql.checkRows(self.num)
sql = '''select t1.*,t2.* from
(select * from stable_1) t1 , (select * from table_2) t2
where t2.ts = t1.ts;'''
tdSql.query(sql)
tdSql.checkRows(self.num)
sql = '''select t1.*,t2.* from
(select * from regular_table_1) t1 , (select * from stable_1) t2
where t2.ts = t1.ts;'''
tdSql.query(sql)
tdSql.checkRows(self.num)
sql = '''select t1.*,t2.*,t3.* from
(select * from regular_table_1) t1 , (select * from stable_1) t2, (select * from table_0) t3
where t2.ts = t1.ts and t3.ts = t1.ts and t2.ts = t3.ts;'''
tdSql.query(sql)
tdSql.checkRows(self.num)
tdLog.info("========== operator=22(OP_StateWindow) ==========")
sql = '''select avg(q_int),sum(q_smallint) from table_1 state_window(q_int);'''
tdSql.query(sql)
tdSql.checkRows(self.num)
sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
from table_1 state_window(q_bigint);'''
tdSql.query(sql)
tdSql.checkRows(self.num)
sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
from regular_table_1 state_window(q_smallint);'''
tdSql.query(sql)
tdSql.checkRows(6*self.num)
endTime = time.time()
print("total time %ds" % (endTime - startTime))
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -51,7 +51,7 @@ class TDTestCase:
tdSql.error("select last_row as latest from st")
# query distinct on normal colnum
tdSql.error("select distinct tagtype from st")
#tdSql.error("select distinct tagtype from st")
# query .. order by non-time field
tdSql.error("select * from st order by name")

View File

@ -0,0 +1,703 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import random
import string
import os
import time
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
from util.dnodes import tdDnodes
class TDTestCase:
updatecfgDict={'maxSQLLength':1048576}
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.ts = 1538548685000
self.num = 100
def get_random_string(self, length):
letters = string.ascii_lowercase
result_str = ''.join(random.choice(letters) for i in range(length))
return result_str
def run(self):
tdSql.prepare()
# test case for https://jira.taosdata.com:18080/browse/TD-5213
print("==============step1, regular table, 1 ts + 4094 cols + 1 binary==============")
startTime = time.time()
sql = "create table regular_table_1(ts timestamp, "
for i in range(4094):
sql += "col%d int, " % (i + 1)
sql += "col4095 binary(22))"
tdLog.info(len(sql))
tdSql.execute(sql)
for i in range(self.num):
sql = "insert into regular_table_1 values(%d, "
for j in range(4094):
str = "'%s', " % random.randint(0,1000)
sql += str
sql += "'%s')" % self.get_random_string(22)
tdSql.execute(sql % (self.ts + i))
time.sleep(1)
tdSql.query("select count(*) from regular_table_1")
tdSql.checkData(0, 0, self.num)
tdSql.query("select * from regular_table_1")
tdSql.checkRows(self.num)
tdSql.checkCols(4096)
endTime = time.time()
print("total time %ds" % (endTime - startTime))
#insert in order
tdLog.info('test insert in order')
for i in range(self.num):
sql = "insert into regular_table_1 (ts,col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col4095) values(%d, "
for j in range(10):
str = "'%s', " % random.randint(0,1000)
sql += str
sql += "'%s')" % self.get_random_string(22)
tdSql.execute(sql % (self.ts + i + 1000))
time.sleep(1)
tdSql.query("select count(*) from regular_table_1")
tdSql.checkData(0, 0, 2*self.num)
tdSql.query("select * from regular_table_1")
tdSql.checkRows(2*self.num)
tdSql.checkCols(4096)
#insert out of order
tdLog.info('test insert out of order')
for i in range(self.num):
sql = "insert into regular_table_1 (ts,col123,col2213,col331,col41,col523,col236,col71,col813,col912,col1320,col4095) values(%d, "
for j in range(10):
str = "'%s', " % random.randint(0,1000)
sql += str
sql += "'%s')" % self.get_random_string(22)
tdSql.execute(sql % (self.ts + i + 2000))
time.sleep(1)
tdSql.query("select count(*) from regular_table_1")
tdSql.checkData(0, 0, 3*self.num)
tdSql.query("select * from regular_table_1")
tdSql.checkRows(3*self.num)
tdSql.checkCols(4096)
print("==============step2,regular table error col or value==============")
tdLog.info('test regular table exceeds row num')
# column > 4096
sql = "create table regular_table_2(ts timestamp, "
for i in range(4095):
sql += "col%d int, " % (i + 1)
sql += "col4096 binary(22))"
tdLog.info(len(sql))
tdSql.error(sql)
# column > 4096
sql = "insert into regular_table_1 values(%d, "
for j in range(4095):
str = "'%s', " % random.randint(0,1000)
sql += str
sql += "'%s')" % self.get_random_string(22)
tdSql.error(sql)
# insert column < 4096
sql = "insert into regular_table_1 values(%d, "
for j in range(4092):
str = "'%s', " % random.randint(0,1000)
sql += str
sql += "'%s')" % self.get_random_string(22)
tdSql.error(sql)
# alter column > 4096
sql = "alter table regular_table_1 add column max int; "
tdSql.error(sql)
print("==============step3,regular table , mix data type==============")
startTime = time.time()
sql = "create table regular_table_3(ts timestamp, "
for i in range(2000):
sql += "col%d int, " % (i + 1)
for i in range(2000,4094):
sql += "col%d bigint, " % (i + 1)
sql += "col4095 binary(22))"
tdLog.info(len(sql))
tdSql.execute(sql)
for i in range(self.num):
sql = "insert into regular_table_3 values(%d, "
for j in range(4094):
str = "'%s', " % random.randint(0,1000)
sql += str
sql += "'%s')" % self.get_random_string(22)
tdSql.execute(sql % (self.ts + i))
time.sleep(1)
tdSql.query("select count(*) from regular_table_3")
tdSql.checkData(0, 0, self.num)
tdSql.query("select * from regular_table_3")
tdSql.checkRows(self.num)
tdSql.checkCols(4096)
endTime = time.time()
print("total time %ds" % (endTime - startTime))
sql = "create table regular_table_4(ts timestamp, "
for i in range(500):
sql += "int_%d int, " % (i + 1)
for i in range(500,1000):
sql += "smallint_%d smallint, " % (i + 1)
for i in range(1000,1500):
sql += "tinyint_%d tinyint, " % (i + 1)
for i in range(1500,2000):
sql += "double_%d double, " % (i + 1)
for i in range(2000,2500):
sql += "float_%d float, " % (i + 1)
for i in range(2500,3000):
sql += "bool_%d bool, " % (i + 1)
for i in range(3000,3500):
sql += "bigint_%d bigint, " % (i + 1)
for i in range(3500,3800):
sql += "nchar_%d nchar(4), " % (i + 1)
for i in range(3800,4090):
sql += "binary_%d binary(10), " % (i + 1)
for i in range(4090,4094):
sql += "timestamp_%d timestamp, " % (i + 1)
sql += "col4095 binary(22))"
tdLog.info(len(sql))
tdSql.execute(sql)
for i in range(self.num):
sql = "insert into regular_table_4 values(%d, "
for j in range(500):
str = "'%s', " % random.randint(-2147483647,2147483647)
sql += str
for j in range(500,1000):
str = "'%s', " % random.randint(-32767,32767 )
sql += str
for j in range(1000,1500):
str = "'%s', " % random.randint(-127,127)
sql += str
for j in range(1500,2000):
str = "'%s', " % random.randint(-922337203685477580700,922337203685477580700)
sql += str
for j in range(2000,2500):
str = "'%s', " % random.randint(-92233720368547758070,92233720368547758070)
sql += str
for j in range(2500,3000):
str = "'%s', " % random.choice(['true','false'])
sql += str
for j in range(3000,3500):
str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807)
sql += str
for j in range(3500,3800):
str = "'%s', " % self.get_random_string(4)
sql += str
for j in range(3800,4090):
str = "'%s', " % self.get_random_string(10)
sql += str
for j in range(4090,4094):
str = "%s, " % (self.ts + j)
sql += str
sql += "'%s')" % self.get_random_string(22)
tdSql.execute(sql % (self.ts + i))
time.sleep(1)
tdSql.query("select count(*) from regular_table_4")
tdSql.checkData(0, 0, self.num)
tdSql.query("select * from regular_table_4")
tdSql.checkRows(self.num)
tdSql.checkCols(4096)
tdLog.info("end ,now new one")
#insert null value
tdLog.info('test insert null value')
for i in range(self.num):
sql = "insert into regular_table_4 values(%d, "
for j in range(2500):
str = "'%s', " % random.choice(['NULL' ,'NULL' ,'NULL' ,1 , 10 ,100 ,-100 ,-10, 88 ,66 ,'NULL' ,'NULL' ,'NULL' ])
sql += str
for j in range(2500,3000):
str = "'%s', " % random.choice(['true' ,'false'])
sql += str
for j in range(3000,3500):
str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807)
sql += str
for j in range(3500,3800):
str = "'%s', " % self.get_random_string(4)
sql += str
for j in range(3800,4090):
str = "'%s', " % self.get_random_string(10)
sql += str
for j in range(4090,4094):
str = "%s, " % (self.ts + j)
sql += str
sql += "'%s')" % self.get_random_string(22)
tdSql.execute(sql % (self.ts + i + 10000))
time.sleep(1)
tdSql.query("select count(*) from regular_table_4")
tdSql.checkData(0, 0, 2*self.num)
tdSql.query("select * from regular_table_4")
tdSql.checkRows(2*self.num)
tdSql.checkCols(4096)
#insert in order
tdLog.info('test insert in order')
for i in range(self.num):
sql = "insert into regular_table_4 (ts,int_2,int_22,int_169,smallint_537,smallint_607,tinyint_1030,tinyint_1491,double_1629,double_1808,float_2075,col4095) values(%d, "
for j in range(10):
str = "'%s', " % random.randint(0,100)
sql += str
sql += "'%s')" % self.get_random_string(22)
tdSql.execute(sql % (self.ts + i + 1000))
time.sleep(1)
tdSql.query("select count(*) from regular_table_4")
tdSql.checkData(0, 0, 3*self.num)
tdSql.query("select * from regular_table_4")
tdSql.checkRows(3*self.num)
tdSql.checkCols(4096)
#insert out of order
tdLog.info('test insert out of order')
for i in range(self.num):
sql = "insert into regular_table_4 (ts,int_169,float_2075,int_369,tinyint_1491,tinyint_1030,float_2360,smallint_537,double_1808,double_1608,double_1629,col4095) values(%d, "
for j in range(10):
str = "'%s', " % random.randint(0,100)
sql += str
sql += "'%s')" % self.get_random_string(22)
tdSql.execute(sql % (self.ts + i + 2000))
time.sleep(1)
tdSql.query("select count(*) from regular_table_4")
tdSql.checkData(0, 0, 4*self.num)
tdSql.query("select * from regular_table_4")
tdSql.checkRows(4*self.num)
tdSql.checkCols(4096)
#define TSDB_MAX_BYTES_PER_ROW 49151[old:1024 && 16384]
#ts:8\int:4\smallint:2\bigint:8\bool:1\float:4\tinyint:1\nchar:4*+2[offset]\binary:1*+2[offset]
tdLog.info('test regular_table max bytes per row 49151')
sql = "create table regular_table_5(ts timestamp, "
for i in range(500):
sql += "int_%d int, " % (i + 1)
for i in range(500,1000):
sql += "smallint_%d smallint, " % (i + 1)
for i in range(1000,1500):
sql += "tinyint_%d tinyint, " % (i + 1)
for i in range(1500,2000):
sql += "double_%d double, " % (i + 1)
for i in range(2000,2500):
sql += "float_%d float, " % (i + 1)
for i in range(2500,3000):
sql += "bool_%d bool, " % (i + 1)
for i in range(3000,3500):
sql += "bigint_%d bigint, " % (i + 1)
for i in range(3500,3800):
sql += "nchar_%d nchar(20), " % (i + 1)
for i in range(3800,4090):
sql += "binary_%d binary(34), " % (i + 1)
for i in range(4090,4094):
sql += "timestamp_%d timestamp, " % (i + 1)
sql += "col4095 binary(69))"
tdSql.execute(sql)
tdSql.query("select * from regular_table_5")
tdSql.checkCols(4096)
# TD-5324
sql = "alter table regular_table_5 modify column col4095 binary(70); "
tdSql.error(sql)
# drop and add
sql = "alter table regular_table_5 drop column col4095; "
tdSql.execute(sql)
sql = "select * from regular_table_5; "
tdSql.query(sql)
tdSql.checkCols(4095)
sql = "alter table regular_table_5 add column col4095 binary(70); "
tdSql.error(sql)
sql = "alter table regular_table_5 add column col4095 binary(69); "
tdSql.execute(sql)
sql = "select * from regular_table_5; "
tdSql.query(sql)
tdSql.checkCols(4096)
#out TSDB_MAX_BYTES_PER_ROW 49151
tdLog.info('test regular_table max bytes per row out 49151')
sql = "create table regular_table_6(ts timestamp, "
for i in range(500):
sql += "int_%d int, " % (i + 1)
for i in range(500,1000):
sql += "smallint_%d smallint, " % (i + 1)
for i in range(1000,1500):
sql += "tinyint_%d tinyint, " % (i + 1)
for i in range(1500,2000):
sql += "double_%d double, " % (i + 1)
for i in range(2000,2500):
sql += "float_%d float, " % (i + 1)
for i in range(2500,3000):
sql += "bool_%d bool, " % (i + 1)
for i in range(3000,3500):
sql += "bigint_%d bigint, " % (i + 1)
for i in range(3500,3800):
sql += "nchar_%d nchar(20), " % (i + 1)
for i in range(3800,4090):
sql += "binary_%d binary(34), " % (i + 1)
for i in range(4090,4094):
sql += "timestamp_%d timestamp, " % (i + 1)
sql += "col4095 binary(70))"
tdLog.info(len(sql))
tdSql.error(sql)
print("==============step4, super table , 1 ts + 4090 cols + 4 tags ==============")
startTime = time.time()
sql = "create stable stable_1(ts timestamp, "
for i in range(4090):
sql += "col%d int, " % (i + 1)
sql += "col4091 binary(22))"
sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) "
tdLog.info(len(sql))
tdSql.execute(sql)
sql = '''create table table_0 using stable_1
tags('table_0' , '1' , '2' , '3' );'''
tdSql.execute(sql)
for i in range(self.num):
sql = "insert into table_0 values(%d, "
for j in range(4090):
str = "'%s', " % random.randint(0,1000)
sql += str
sql += "'%s')" % self.get_random_string(22)
tdSql.execute(sql % (self.ts + i))
time.sleep(1)
tdSql.query("select count(*) from table_0")
tdSql.checkData(0, 0, self.num)
tdSql.query("select * from table_0")
tdSql.checkRows(self.num)
tdSql.checkCols(4092)
sql = '''create table table_1 using stable_1
tags('table_1' , '1' , '2' , '3' );'''
tdSql.execute(sql)
for i in range(self.num):
sql = "insert into table_1 values(%d, "
for j in range(2080):
sql += "'%d', " % random.randint(0,1000)
for j in range(2080,4080):
sql += "'%s', " % 'NULL'
for j in range(4080,4090):
sql += "'%s', " % random.randint(0,10000)
sql += "'%s')" % self.get_random_string(22)
tdSql.execute(sql % (self.ts + i))
time.sleep(1)
tdSql.query("select count(*) from table_1")
tdSql.checkData(0, 0, self.num)
tdSql.query("select * from table_1")
tdSql.checkRows(self.num)
tdSql.checkCols(4092)
endTime = time.time()
print("total time %ds" % (endTime - startTime))
#insert in order
tdLog.info('test insert in order')
for i in range(self.num):
sql = "insert into table_1 (ts,col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col4091) values(%d, "
for j in range(10):
str = "'%s', " % random.randint(0,1000)
sql += str
sql += "'%s')" % self.get_random_string(22)
tdSql.execute(sql % (self.ts + i + 1000))
time.sleep(1)
tdSql.query("select count(*) from table_1")
tdSql.checkData(0, 0, 2*self.num)
tdSql.query("select * from table_1")
tdSql.checkRows(2*self.num)
tdSql.checkCols(4092)
#insert out of order
tdLog.info('test insert out of order')
for i in range(self.num):
sql = "insert into table_1 (ts,col123,col2213,col331,col41,col523,col236,col71,col813,col912,col1320,col4091) values(%d, "
for j in range(10):
str = "'%s', " % random.randint(0,1000)
sql += str
sql += "'%s')" % self.get_random_string(22)
tdSql.execute(sql % (self.ts + i + 2000))
time.sleep(1)
tdSql.query("select count(*) from table_1")
tdSql.checkData(0, 0, 3*self.num)
tdSql.query("select * from table_1")
tdSql.checkRows(3*self.num)
tdSql.checkCols(4092)
print("==============step5,stable table , mix data type==============")
sql = "create stable stable_3(ts timestamp, "
for i in range(500):
sql += "int_%d int, " % (i + 1)
for i in range(500,1000):
sql += "smallint_%d smallint, " % (i + 1)
for i in range(1000,1500):
sql += "tinyint_%d tinyint, " % (i + 1)
for i in range(1500,2000):
sql += "double_%d double, " % (i + 1)
for i in range(2000,2500):
sql += "float_%d float, " % (i + 1)
for i in range(2500,3000):
sql += "bool_%d bool, " % (i + 1)
for i in range(3000,3500):
sql += "bigint_%d bigint, " % (i + 1)
for i in range(3500,3800):
sql += "nchar_%d nchar(4), " % (i + 1)
for i in range(3800,4090):
sql += "binary_%d binary(10), " % (i + 1)
sql += "col4091 binary(22))"
sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) "
tdLog.info(len(sql))
tdSql.execute(sql)
sql = '''create table table_30 using stable_3
tags('table_30' , '1' , '2' , '3' );'''
tdSql.execute(sql)
for i in range(self.num):
sql = "insert into table_30 values(%d, "
for j in range(500):
str = "'%s', " % random.randint(-2147483647,2147483647)
sql += str
for j in range(500,1000):
str = "'%s', " % random.randint(-32767,32767 )
sql += str
for j in range(1000,1500):
str = "'%s', " % random.randint(-127,127)
sql += str
for j in range(1500,2000):
str = "'%s', " % random.randint(-922337203685477580700,922337203685477580700)
sql += str
for j in range(2000,2500):
str = "'%s', " % random.randint(-92233720368547758070,92233720368547758070)
sql += str
for j in range(2500,3000):
str = "'%s', " % random.choice(['true','false'])
sql += str
for j in range(3000,3500):
str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807)
sql += str
for j in range(3500,3800):
str = "'%s', " % self.get_random_string(4)
sql += str
for j in range(3800,4090):
str = "'%s', " % self.get_random_string(10)
sql += str
sql += "'%s')" % self.get_random_string(22)
tdSql.execute(sql % (self.ts + i))
time.sleep(1)
tdSql.query("select count(*) from table_30")
tdSql.checkData(0, 0, self.num)
tdSql.query("select * from table_30")
tdSql.checkRows(self.num)
tdSql.checkCols(4092)
#insert null value
tdLog.info('test insert null value')
sql = '''create table table_31 using stable_3
tags('table_31' , '1' , '2' , '3' );'''
tdSql.execute(sql)
for i in range(self.num):
sql = "insert into table_31 values(%d, "
for j in range(2500):
str = "'%s', " % random.choice(['NULL' ,'NULL' ,'NULL' ,1 , 10 ,100 ,-100 ,-10, 88 ,66 ,'NULL' ,'NULL' ,'NULL' ])
sql += str
for j in range(2500,3000):
str = "'%s', " % random.choice(['true' ,'false'])
sql += str
for j in range(3000,3500):
str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807)
sql += str
for j in range(3500,3800):
str = "'%s', " % self.get_random_string(4)
sql += str
for j in range(3800,4090):
str = "'%s', " % self.get_random_string(10)
sql += str
sql += "'%s')" % self.get_random_string(22)
tdSql.execute(sql % (self.ts + i))
time.sleep(1)
tdSql.query("select count(*) from table_31")
tdSql.checkData(0, 0, self.num)
tdSql.query("select * from table_31")
tdSql.checkRows(self.num)
tdSql.checkCols(4092)
#insert in order
tdLog.info('test insert in order')
for i in range(self.num):
sql = "insert into table_31 (ts,int_2,int_22,int_169,smallint_537,smallint_607,tinyint_1030,tinyint_1491,double_1629,double_1808,float_2075,col4091) values(%d, "
for j in range(10):
str = "'%s', " % random.randint(0,100)
sql += str
sql += "'%s')" % self.get_random_string(22)
tdSql.execute(sql % (self.ts + i + 1000))
time.sleep(1)
tdSql.query("select count(*) from table_31")
tdSql.checkData(0, 0, 2*self.num)
tdSql.query("select * from table_31")
tdSql.checkRows(2*self.num)
tdSql.checkCols(4092)
#insert out of order
tdLog.info('test insert out of order')
for i in range(self.num):
sql = "insert into table_31 (ts,int_169,float_2075,int_369,tinyint_1491,tinyint_1030,float_2360,smallint_537,double_1808,double_1608,double_1629,col4091) values(%d, "
for j in range(10):
str = "'%s', " % random.randint(0,100)
sql += str
sql += "'%s')" % self.get_random_string(22)
tdSql.execute(sql % (self.ts + i + 2000))
time.sleep(1)
tdSql.query("select count(*) from table_31")
tdSql.checkData(0, 0, 3*self.num)
tdSql.query("select * from table_31")
tdSql.checkRows(3*self.num)
tdSql.checkCols(4092)
#define TSDB_MAX_BYTES_PER_ROW 49151 TSDB_MAX_TAGS_LEN 16384
#ts:8\int:4\smallint:2\bigint:8\bool:1\float:4\tinyint:1\nchar:4*+2[offset]\binary:1*+2[offset]
tdLog.info('test super table max bytes per row 49151')
sql = "create table stable_4(ts timestamp, "
for i in range(500):
sql += "int_%d int, " % (i + 1)
for i in range(500,1000):
sql += "smallint_%d smallint, " % (i + 1)
for i in range(1000,1500):
sql += "tinyint_%d tinyint, " % (i + 1)
for i in range(1500,2000):
sql += "double_%d double, " % (i + 1)
for i in range(2000,2500):
sql += "float_%d float, " % (i + 1)
for i in range(2500,3000):
sql += "bool_%d bool, " % (i + 1)
for i in range(3000,3500):
sql += "bigint_%d bigint, " % (i + 1)
for i in range(3500,3800):
sql += "nchar_%d nchar(20), " % (i + 1)
for i in range(3800,4090):
sql += "binary_%d binary(34), " % (i + 1)
sql += "col4091 binary(101))"
sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) "
tdSql.execute(sql)
sql = '''create table table_40 using stable_4
tags('table_40' , '1' , '2' , '3' );'''
tdSql.execute(sql)
tdSql.query("select * from table_40")
tdSql.checkCols(4092)
tdSql.query("describe table_40")
tdSql.checkRows(4096)
tdLog.info('test super table drop and add column or tag')
sql = "alter stable stable_4 drop column col4091; "
tdSql.execute(sql)
sql = "select * from stable_4; "
tdSql.query(sql)
tdSql.checkCols(4095)
sql = "alter table stable_4 add column col4091 binary(102); "
tdSql.error(sql)
sql = "alter table stable_4 add column col4091 binary(101); "
tdSql.execute(sql)
sql = "select * from stable_4; "
tdSql.query(sql)
tdSql.checkCols(4096)
sql = "alter stable stable_4 drop tag tag_1; "
tdSql.execute(sql)
sql = "select * from stable_4; "
tdSql.query(sql)
tdSql.checkCols(4095)
sql = "alter table stable_4 add tag tag_1 int; "
tdSql.execute(sql)
sql = "select * from stable_4; "
tdSql.query(sql)
tdSql.checkCols(4096)
sql = "alter table stable_4 add tag loc1 nchar(10); "
tdSql.error(sql)
tdLog.info('test super table max bytes per row 49151')
sql = "create table stable_5(ts timestamp, "
for i in range(500):
sql += "int_%d int, " % (i + 1)
for i in range(500,1000):
sql += "smallint_%d smallint, " % (i + 1)
for i in range(1000,1500):
sql += "tinyint_%d tinyint, " % (i + 1)
for i in range(1500,2000):
sql += "double_%d double, " % (i + 1)
for i in range(2000,2500):
sql += "float_%d float, " % (i + 1)
for i in range(2500,3000):
sql += "bool_%d bool, " % (i + 1)
for i in range(3000,3500):
sql += "bigint_%d bigint, " % (i + 1)
for i in range(3500,3800):
sql += "nchar_%d nchar(20), " % (i + 1)
for i in range(3800,4090):
sql += "binary_%d binary(34), " % (i + 1)
sql += "col4091 binary(102))"
sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) "
tdSql.error(sql)
print("==============step6, super table error col ==============")
tdLog.info('test exceeds row num')
# column + tag > 4096
sql = "create stable stable_2(ts timestamp, "
for i in range(4091):
sql += "col%d int, " % (i + 1)
sql += "col4092 binary(22))"
sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) "
tdLog.info(len(sql))
tdSql.error(sql)
# column + tag > 4096
sql = "create stable stable_2(ts timestamp, "
for i in range(4090):
sql += "col%d int, " % (i + 1)
sql += "col4091 binary(22))"
sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int,tag_4 int) "
tdLog.info(len(sql))
tdSql.error(sql)
# alter column + tag > 4096
sql = "alter table stable_1 add column max int; "
tdSql.error(sql)
# TD-5322
sql = "alter table stable_1 add tag max int; "
tdSql.error(sql)
# TD-5324
sql = "alter table stable_4 modify column col4091 binary(102); "
tdSql.error(sql)
sql = "alter table stable_4 modify tag loc nchar(20); "
tdSql.query("select * from table_40")
tdSql.checkCols(4092)
tdSql.query("describe table_40")
tdSql.checkRows(4096)
os.system("rm -rf tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py.sql")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

File diff suppressed because one or more lines are too long

View File

@ -15,7 +15,7 @@
"max_sql_len": 102400000,
"databases": [{
"dbinfo": {
"name": "db",
"name": "json",
"drop": "yes",
"replica": 1,
"days": 10,
@ -35,13 +35,13 @@
"super_tables": [{
"name": "stb_old",
"child_table_exists":"no",
"childtable_count": 10,
"childtable_count": 1,
"childtable_prefix": "stb_old_",
"auto_create_table": "no",
"batch_create_tbl_num": 5,
"data_source": "sample",
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"insert_rows": 10,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
@ -55,18 +55,18 @@
"sample_format": "csv",
"sample_file": "./tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv",
"tags_file": "",
"columns": [{"type": "INT","count":4000}, {"type": "BINARY", "len": 16, "count":1}],
"columns": [{"type": "INT","count":1000}, {"type": "BINARY", "len": 16, "count":20}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
},{
"name": "stb_new",
"child_table_exists":"no",
"childtable_count": 10,
"childtable_count": 1,
"childtable_prefix": "stb_new_",
"auto_create_table": "no",
"batch_create_tbl_num": 5,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"insert_rows": 10,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
@ -80,18 +80,18 @@
"sample_format": "csv",
"sample_file": "./tools/taosdemoAllTest/sample.csv",
"tags_file": "",
"columns": [{"type": "DOUBLE","count":1020}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
"columns": [{"type": "INT","count":4000}, {"type": "BINARY", "len": 16, "count":90}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":3}]
},{
"name": "stb_int",
"name": "stb_mix",
"child_table_exists":"no",
"childtable_count": 10,
"childtable_prefix": "stb_int_",
"childtable_count": 1,
"childtable_prefix": "stb_mix_",
"auto_create_table": "no",
"batch_create_tbl_num": 5,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"insert_rows": 10,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
@ -105,8 +105,33 @@
"sample_format": "csv",
"sample_file": "./tools/taosdemoAllTest/sample.csv",
"tags_file": "",
"columns": [{"type": "int","count":1020}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
"columns": [{"type": "INT","count":500},{"type": "SMALLINT","count":500},{"type": "TINYINT","count":500},{"type": "DOUBLE","count":500},{"type": "FLOAT","count":500},{"type": "BOOL","count":500},{"type": "BIGINT","count":500},{"type": "NCHAR","len": 20,"count":300},{"type": "BINARY","len": 34,"count":290},{"type": "BINARY","len": 101,"count":1}],
"tags": [{"type": "INT", "count":3}, {"type": "NCHAR", "len": 10, "count":1}]
},{
"name": "stb_excel",
"child_table_exists":"no",
"childtable_count": 1,
"childtable_prefix": "stb_excel_",
"auto_create_table": "no",
"batch_create_tbl_num": 5,
"data_source": "sample",
"insert_mode": "taosc",
"insert_rows": 10,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv",
"tags_file": "",
"columns": [{"type": "INT","count":500},{"type": "SMALLINT","count":500},{"type": "SMALLINT","count":500},{"type": "DOUBLE","count":500},{"type": "FLOAT","count":500},{"type": "BOOL","count":500},{"type": "BIGINT","count":500},{"type": "NCHAR","len": 19,"count":300},{"type": "BINARY","len": 34,"count":290},{"type": "BINARY","len": 101,"count":1}],
"tags": [{"type": "INT", "count":3}, {"type": "NCHAR", "len": 10, "count":1}]
}]
}]
}

View File

@ -13,6 +13,7 @@
import sys
import os
import time
from util.log import *
from util.cases import *
from util.sql import *
@ -23,7 +24,7 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@ -39,7 +40,7 @@ class TDTestCase:
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
@ -48,86 +49,124 @@ class TDTestCase:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
# insert: create one or mutiple tables per sql and insert multiple rows per sql
#-N:regular table -d:database name -t:table num -n:rows num per table -l:col num -y:force
#regular old && new
startTime = time.time()
os.system("%staosdemo -N -d regular_old -t 1 -n 10 -l 1023 -y" % binPath)
tdSql.execute("use regular_old")
tdSql.query("show tables;")
tdSql.checkRows(1)
tdSql.query("select * from d0;")
tdSql.checkCols(1024)
tdSql.query("describe d0;")
tdSql.checkRows(1024)
os.system("%staosdemo -N -d regular_new -t 1 -n 10 -l 4095 -y" % binPath)
tdSql.execute("use regular_new")
tdSql.query("show tables;")
tdSql.checkRows(1)
tdSql.query("select * from d0;")
tdSql.checkCols(4096)
tdSql.query("describe d0;")
tdSql.checkRows(4096)
#super table -d:database name -t:table num -n:rows num per table -l:col num -y:force
os.system("%staosdemo -d super_old -t 1 -n 10 -l 1021 -y" % binPath)
tdSql.execute("use super_old")
tdSql.query("show tables;")
tdSql.checkRows(1)
tdSql.query("select * from meters;")
tdSql.checkCols(1024)
tdSql.query("select * from d0;")
tdSql.checkCols(1022)
tdSql.query("describe meters;")
tdSql.checkRows(1024)
tdSql.query("describe d0;")
tdSql.checkRows(1024)
os.system("%staosdemo -d super_new -t 1 -n 10 -l 4093 -y" % binPath)
tdSql.execute("use super_new")
tdSql.query("show tables;")
tdSql.checkRows(1)
tdSql.query("select * from meters;")
tdSql.checkCols(4096)
tdSql.query("select * from d0;")
tdSql.checkCols(4094)
tdSql.query("describe meters;")
tdSql.checkRows(4096)
tdSql.query("describe d0;")
tdSql.checkRows(4096)
tdSql.execute("create table stb_new1_1 using meters tags(1,2)")
tdSql.query("select * from stb_new1_1")
tdSql.checkCols(4094)
tdSql.query("describe stb_new1_1;")
tdSql.checkRows(4096)
# insert: create one or mutiple tables per sql and insert multiple rows per sql
# test case for https://jira.taosdata.com:18080/browse/TD-5213
os.system("%staosdemo -f tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json -y " % binPath)
tdSql.execute("use db")
tdSql.execute("use json")
tdSql.query("select count (tbname) from stb_old")
tdSql.checkData(0, 0, 10)
tdSql.checkData(0, 0, 1)
# tdSql.query("select * from stb_old")
# tdSql.checkRows(10)
# tdSql.checkCols(1024)
tdSql.query("select * from stb_old")
tdSql.checkRows(10)
tdSql.checkCols(1024)
tdSql.query("select count (tbname) from stb_new")
tdSql.checkData(0, 0, 1)
# tdSql.query("select count (tbname) from stb_new")
# tdSql.checkData(0, 0, 10)
tdSql.query("select * from stb_new")
tdSql.checkRows(10)
tdSql.checkCols(4096)
tdSql.query("describe stb_new;")
tdSql.checkRows(4096)
tdSql.query("select * from stb_new_0")
tdSql.checkRows(10)
tdSql.checkCols(4091)
tdSql.query("describe stb_new_0;")
tdSql.checkRows(4096)
tdSql.execute("create table stb_new1_1 using stb_new tags(1,2,3,4,5)")
tdSql.query("select * from stb_new1_1")
tdSql.checkCols(4091)
tdSql.query("describe stb_new1_1;")
tdSql.checkRows(4096)
# tdSql.query("select * from stb_new")
# tdSql.checkRows(10)
# tdSql.checkCols(4096)
tdSql.query("select count (tbname) from stb_mix")
tdSql.checkData(0, 0, 1)
# tdLog.info("stop dnode to commit data to disk")
# tdDnodes.stop(1)
# tdDnodes.start(1)
tdSql.query("select * from stb_mix")
tdSql.checkRows(10)
tdSql.checkCols(4096)
tdSql.query("describe stb_mix;")
tdSql.checkRows(4096)
tdSql.query("select * from stb_mix_0")
tdSql.checkRows(10)
tdSql.checkCols(4092)
tdSql.query("describe stb_mix_0;")
tdSql.checkRows(4096)
#regular table
sql = "create table tb(ts timestamp, "
for i in range(1022):
sql += "c%d binary(14), " % (i + 1)
sql += "c1023 binary(22))"
tdSql.execute(sql)
for i in range(4):
sql = "insert into tb values(%d, "
for j in range(1022):
str = "'%s', " % self.get_random_string(14)
sql += str
sql += "'%s')" % self.get_random_string(22)
tdSql.execute(sql % (self.ts + i))
time.sleep(10)
tdSql.query("select count(*) from tb")
tdSql.checkData(0, 0, 4)
tdDnodes.stop(1)
tdDnodes.start(1)
time.sleep(1)
tdSql.query("select count(*) from tb")
tdSql.checkData(0, 0, 4)
sql = "create table tb1(ts timestamp, "
for i in range(4094):
sql += "c%d binary(14), " % (i + 1)
sql += "c4095 binary(22))"
tdSql.execute(sql)
for i in range(4):
sql = "insert into tb1 values(%d, "
for j in range(4094):
str = "'%s', " % self.get_random_string(14)
sql += str
sql += "'%s')" % self.get_random_string(22)
tdSql.execute(sql % (self.ts + i))
time.sleep(10)
tdSql.query("select count(*) from tb1")
tdSql.checkData(0, 0, 4)
tdDnodes.stop(1)
tdDnodes.start(1)
time.sleep(1)
tdSql.query("select count(*) from tb1")
tdSql.checkData(0, 0, 4)
#os.system("rm -rf tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py.sql")
tdSql.query("select count (tbname) from stb_excel")
tdSql.checkData(0, 0, 1)
tdSql.query("select * from stb_excel")
tdSql.checkRows(10)
tdSql.checkCols(4096)
tdSql.query("describe stb_excel;")
tdSql.checkRows(4096)
tdSql.query("select * from stb_excel_0")
tdSql.checkRows(10)
tdSql.checkCols(4092)
tdSql.query("describe stb_excel_0;")
tdSql.checkRows(4096)
endTime = time.time()
print("total time %ds" % (endTime - startTime))
os.system("rm -rf tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py.sql")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)

View File

@ -7,7 +7,7 @@
"password": "taosdata",
"confirm_parameter_prompt": "no",
"databases": "db",
"query_times":3,
"query_times": 3,
"specified_table_query": {
"query_interval": 0,
"concurrent": 1,
@ -34,4 +34,4 @@
]
}
}

View File

@ -7,7 +7,7 @@
"password": "taosdata",
"confirm_parameter_prompt": "no",
"databases": "db",
"query_times":3,
"query_times": 3,
"specified_table_query": {
"query_interval": 0,
"concurrent": 1,
@ -34,4 +34,4 @@
]
}
}

View File

@ -0,0 +1,362 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.ts = 1625068800000000000 # this is timestamp "2021-07-01 00:00:00"
self.numberOfTables = 10
self.numberOfRecords = 100
def checkCommunity(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
return False
else:
return True
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosdump" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def createdb(self, precision="ns"):
tb_nums = self.numberOfTables
per_tb_rows = self.numberOfRecords
def build_db(precision, start_time):
tdSql.execute("drop database if exists timedb1")
tdSql.execute(
"create database timedb1 days 10 keep 365 blocks 8 precision "+"\""+precision+"\"")
tdSql.execute("use timedb1")
tdSql.execute(
"create stable st(ts timestamp, c1 int, c2 nchar(10),c3 timestamp) tags(t1 int, t2 binary(10))")
for tb in range(tb_nums):
tbname = "t"+str(tb)
tdSql.execute("create table " + tbname +
" using st tags(1, 'beijing')")
sql = "insert into " + tbname + " values"
currts = start_time
if precision == "ns":
ts_seed = 1000000000
elif precision == "us":
ts_seed = 1000000
else:
ts_seed = 1000
for i in range(per_tb_rows):
sql += "(%d, %d, 'nchar%d',%d)" % (currts + i*ts_seed, i %
100, i % 100, currts + i*100) # currts +1000ms (1000000000ns)
tdSql.execute(sql)
if precision == "ns":
start_time = 1625068800000000000
build_db(precision, start_time)
elif precision == "us":
start_time = 1625068800000000
build_db(precision, start_time)
elif precision == "ms":
start_time = 1625068800000
build_db(precision, start_time)
else:
print("other time precision not valid , please check! ")
def run(self):
# clear envs
os.system("rm -rf ./taosdumptest/")
tdSql.execute("drop database if exists dumptmp1")
tdSql.execute("drop database if exists dumptmp2")
tdSql.execute("drop database if exists dumptmp3")
if not os.path.exists("./taosdumptest/tmp1"):
os.makedirs("./taosdumptest/dumptmp1")
else:
print("path exist!")
if not os.path.exists("./taosdumptest/dumptmp2"):
os.makedirs("./taosdumptest/dumptmp2")
if not os.path.exists("./taosdumptest/dumptmp3"):
os.makedirs("./taosdumptest/dumptmp3")
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosdump not found!")
else:
tdLog.info("taosdump found in %s" % buildPath)
binPath = buildPath + "/build/bin/"
# create nano second database
self.createdb(precision="ns")
# dump all data
os.system(
"%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath)
# dump part data with -S -E
os.system(
'%staosdump --databases timedb1 -S 1625068810000000000 -E 1625068860000000000 -C ns -o ./taosdumptest/dumptmp2 ' %
binPath)
os.system(
'%staosdump --databases timedb1 -S 1625068810000000000 -o ./taosdumptest/dumptmp3 ' %
binPath)
# replace strings to dump in databases
os.system(
"sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`")
os.system(
"sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`")
os.system(
"sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`")
os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath)
os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath)
os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath)
# dump data and check for taosdump
tdSql.query("select count(*) from dumptmp1.st")
tdSql.checkData(0,0,1000)
tdSql.query("select count(*) from dumptmp2.st")
tdSql.checkData(0,0,510)
tdSql.query("select count(*) from dumptmp3.st")
tdSql.checkData(0,0,900)
# check data
origin_res = tdSql.getResult("select * from timedb1.st")
dump_res = tdSql.getResult("select * from dumptmp1.st")
if origin_res == dump_res:
tdLog.info("test nano second : dump check data pass for all data!" )
else:
tdLog.info("test nano second : dump check data failed for all data!" )
origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000000 and ts <= 1625068860000000000")
dump_res = tdSql.getResult("select * from dumptmp2.st")
if origin_res == dump_res:
tdLog.info(" test nano second : dump check data pass for data! " )
else:
tdLog.info(" test nano second : dump check data failed for data !" )
origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000000 ")
dump_res = tdSql.getResult("select * from dumptmp3.st")
if origin_res == dump_res:
tdLog.info(" test nano second : dump check data pass for data! " )
else:
tdLog.info(" test nano second : dump check data failed for data !" )
# us second support test case
os.system("rm -rf ./taosdumptest/")
tdSql.execute("drop database if exists dumptmp1")
tdSql.execute("drop database if exists dumptmp2")
tdSql.execute("drop database if exists dumptmp3")
if not os.path.exists("./taosdumptest/tmp1"):
os.makedirs("./taosdumptest/dumptmp1")
else:
print("path exits!")
if not os.path.exists("./taosdumptest/dumptmp2"):
os.makedirs("./taosdumptest/dumptmp2")
if not os.path.exists("./taosdumptest/dumptmp3"):
os.makedirs("./taosdumptest/dumptmp3")
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosdump not found!")
else:
tdLog.info("taosdump found in %s" % buildPath)
binPath = buildPath + "/build/bin/"
self.createdb(precision="us")
os.system(
"%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath)
os.system(
'%staosdump --databases timedb1 -S 1625068810000000 -E 1625068860000000 -C us -o ./taosdumptest/dumptmp2 ' %
binPath)
os.system(
'%staosdump --databases timedb1 -S 1625068810000000 -o ./taosdumptest/dumptmp3 ' %
binPath)
os.system(
"sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`")
os.system(
"sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`")
os.system(
"sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`")
os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath)
os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath)
os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath)
tdSql.query("select count(*) from dumptmp1.st")
tdSql.checkData(0,0,1000)
tdSql.query("select count(*) from dumptmp2.st")
tdSql.checkData(0,0,510)
tdSql.query("select count(*) from dumptmp3.st")
tdSql.checkData(0,0,900)
origin_res = tdSql.getResult("select * from timedb1.st")
dump_res = tdSql.getResult("select * from dumptmp1.st")
if origin_res == dump_res:
tdLog.info("test us second : dump check data pass for all data!" )
else:
tdLog.info("test us second : dump check data failed for all data!" )
origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000 and ts <= 1625068860000000")
dump_res = tdSql.getResult("select * from dumptmp2.st")
if origin_res == dump_res:
tdLog.info(" test us second : dump check data pass for data! " )
else:
tdLog.info(" test us second : dump check data failed for data!" )
origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000 ")
dump_res = tdSql.getResult("select * from dumptmp3.st")
if origin_res == dump_res:
tdLog.info(" test us second : dump check data pass for data! " )
else:
tdLog.info(" test us second : dump check data failed for data! " )
# ms second support test case
os.system("rm -rf ./taosdumptest/")
tdSql.execute("drop database if exists dumptmp1")
tdSql.execute("drop database if exists dumptmp2")
tdSql.execute("drop database if exists dumptmp3")
if not os.path.exists("./taosdumptest/tmp1"):
os.makedirs("./taosdumptest/dumptmp1")
else:
print("path exits!")
if not os.path.exists("./taosdumptest/dumptmp2"):
os.makedirs("./taosdumptest/dumptmp2")
if not os.path.exists("./taosdumptest/dumptmp3"):
os.makedirs("./taosdumptest/dumptmp3")
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosdump not found!")
else:
tdLog.info("taosdump found in %s" % buildPath)
binPath = buildPath + "/build/bin/"
self.createdb(precision="ms")
os.system(
"%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath)
os.system(
'%staosdump --databases timedb1 -S 1625068810000 -E 1625068860000 -C ms -o ./taosdumptest/dumptmp2 ' %
binPath)
os.system(
'%staosdump --databases timedb1 -S 1625068810000 -o ./taosdumptest/dumptmp3 ' %
binPath)
os.system(
"sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`")
os.system(
"sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`")
os.system(
"sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`")
os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath)
os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath)
os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath)
tdSql.query("select count(*) from dumptmp1.st")
tdSql.checkData(0,0,1000)
tdSql.query("select count(*) from dumptmp2.st")
tdSql.checkData(0,0,510)
tdSql.query("select count(*) from dumptmp3.st")
tdSql.checkData(0,0,900)
origin_res = tdSql.getResult("select * from timedb1.st")
dump_res = tdSql.getResult("select * from dumptmp1.st")
if origin_res == dump_res:
tdLog.info("test ms second : dump check data pass for all data!" )
else:
tdLog.info("test ms second : dump check data failed for all data!" )
origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000 and ts <= 1625068860000")
dump_res = tdSql.getResult("select * from dumptmp2.st")
if origin_res == dump_res:
tdLog.info(" test ms second : dump check data pass for data! " )
else:
tdLog.info(" test ms second : dump check data failed for data!" )
origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000 ")
dump_res = tdSql.getResult("select * from dumptmp3.st")
if origin_res == dump_res:
tdLog.info(" test ms second : dump check data pass for data! " )
else:
tdLog.info(" test ms second : dump check data failed for data! " )
os.system("rm -rf ./taosdumptest/")
os.system("rm -rf ./dump_result.txt")
os.system("rm -rf *.py.sql")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -197,6 +197,19 @@ class TDSql:
self.checkRowCol(row, col)
return self.queryResult[row][col]
def getResult(self, sql):
self.sql = sql
try:
self.cursor.execute(sql)
self.queryResult = self.cursor.fetchall()
except Exception as e:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, repr(e))
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
raise Exception(repr(e))
return self.queryResult
def executeTimes(self, sql, times):
for i in range(times):
try:
@ -282,4 +295,4 @@ class TDSql:
tdLog.info("dir: %s is created" %dir)
pass
tdSql = TDSql()
tdSql = TDSql()

View File

@ -14,7 +14,7 @@ while [ -n "$PID" ]; do
echo kill -9 $PID
pkill -9 taosd
echo "Killing processes locking on port 6030"
if [[ "$OS_TYPE" != "Darwin" ]]; then
if [ "$OS_TYPE" != "Darwin" ]; then
fuser -k -n tcp 6030
else
lsof -nti:6030 | xargs kill -9
@ -26,7 +26,7 @@ PID=`ps -ef|grep -w tarbitrator | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]; do
echo kill -9 $PID
pkill -9 tarbitrator
if [[ "$OS_TYPE" != "Darwin" ]]; then
if [ "$OS_TYPE" != "Darwin" ]; then
fuser -k -n tcp 6040
else
lsof -nti:6040 | xargs kill -9