Merge branch '3.0' of https://github.com/taosdata/TDengine into enh/tsdb_optimize
This commit is contained in:
commit
8f058c8571
|
@ -72,8 +72,8 @@ database_option: {
|
||||||
- 0: The database can contain multiple supertables.
|
- 0: The database can contain multiple supertables.
|
||||||
- 1: The database can contain only one supertable.
|
- 1: The database can contain only one supertable.
|
||||||
- STT_TRIGGER: specifies the number of file merges triggered by flushed files. The default is 8, ranging from 1 to 16. For high-frequency scenarios with few tables, it is recommended to use the default configuration or a smaller value for this parameter; For multi-table low-frequency scenarios, it is recommended to configure this parameter with a larger value.
|
- STT_TRIGGER: specifies the number of file merges triggered by flushed files. The default is 8, ranging from 1 to 16. For high-frequency scenarios with few tables, it is recommended to use the default configuration or a smaller value for this parameter; For multi-table low-frequency scenarios, it is recommended to configure this parameter with a larger value.
|
||||||
- TABLE_PREFIX:The prefix length in the table name that is ignored when distributing table to vnode based on table name.
|
- TABLE_PREFIX: The prefix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the prefix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "0001" is used if TSDB_PREFIX is set to 2 but "v3" is used if TSDB_PREFIX is set to -2; It can help you to control the distribution of tables.
|
||||||
- TABLE_SUFFIX:The suffix length in the table name that is ignored when distributing table to vnode based on table name.
|
- TABLE_SUFFIX:The suffix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the suffix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "v300" is used if TSDB_SUFFIX is set to 2 but "01" is used if TSDB_SUFFIX is set to -2; It can help you to control the distribution of tables.
|
||||||
- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB.
|
- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB.
|
||||||
- WAL_RETENTION_PERIOD: specifies the maximum time of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a time in seconds. The default value 0. A value of 0 indicates that WAL files are not required to keep for consumption. Alter it with a proper value at first to create topics.
|
- WAL_RETENTION_PERIOD: specifies the maximum time of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a time in seconds. The default value 0. A value of 0 indicates that WAL files are not required to keep for consumption. Alter it with a proper value at first to create topics.
|
||||||
- WAL_RETENTION_SIZE: specifies the maximum total size of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that the total size of WAL files to keep for consumption has no upper limit.
|
- WAL_RETENTION_SIZE: specifies the maximum total size of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that the total size of WAL files to keep for consumption has no upper limit.
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
#include <sys/time.h>
|
#include <sys/time.h>
|
||||||
#include <taos.h>
|
#include <taos.h>
|
||||||
|
|
||||||
typedef int16_t VarDataLenT;
|
typedef uint16_t VarDataLenT;
|
||||||
|
|
||||||
#define TSDB_NCHAR_SIZE sizeof(int32_t)
|
#define TSDB_NCHAR_SIZE sizeof(int32_t)
|
||||||
#define VARSTR_HEADER_SIZE sizeof(VarDataLenT)
|
#define VARSTR_HEADER_SIZE sizeof(VarDataLenT)
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <taos.h>
|
#include <taos.h>
|
||||||
|
|
||||||
typedef int16_t VarDataLenT;
|
typedef uint16_t VarDataLenT;
|
||||||
|
|
||||||
#define TSDB_NCHAR_SIZE sizeof(int32_t)
|
#define TSDB_NCHAR_SIZE sizeof(int32_t)
|
||||||
#define VARSTR_HEADER_SIZE sizeof(VarDataLenT)
|
#define VARSTR_HEADER_SIZE sizeof(VarDataLenT)
|
||||||
|
|
|
@ -217,7 +217,7 @@ gcc -g -O0 -fPIC -shared bit_and.c -o libbitand.so
|
||||||
|
|
||||||
### C UDF 示例代码
|
### C UDF 示例代码
|
||||||
|
|
||||||
#### 标量函数示例 [bit_and](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/bit_and.c)
|
#### 标量函数示例 [bit_and](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/bit_and.c)
|
||||||
|
|
||||||
bit_add 实现多列的按位与功能。如果只有一列,返回这一列。bit_add 忽略空值。
|
bit_add 实现多列的按位与功能。如果只有一列,返回这一列。bit_add 忽略空值。
|
||||||
|
|
||||||
|
@ -230,7 +230,7 @@ bit_add 实现多列的按位与功能。如果只有一列,返回这一列。
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
#### 聚合函数示例1 返回值为数值类型 [l2norm](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/l2norm.c)
|
#### 聚合函数示例1 返回值为数值类型 [l2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/l2norm.c)
|
||||||
|
|
||||||
l2norm 实现了输入列的所有数据的二阶范数,即对每个数据先平方,再累加求和,最后开方。
|
l2norm 实现了输入列的所有数据的二阶范数,即对每个数据先平方,再累加求和,最后开方。
|
||||||
|
|
||||||
|
@ -243,7 +243,7 @@ l2norm 实现了输入列的所有数据的二阶范数,即对每个数据先
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
#### 聚合函数示例2 返回值为字符串类型 [max_vol](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/max_vol.c)
|
#### 聚合函数示例2 返回值为字符串类型 [max_vol](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/max_vol.c)
|
||||||
|
|
||||||
max_vol 实现了从多个输入的电压列中找到最大电压,返回由设备ID + 最大电压所在(行,列)+ 最大电压值 组成的组合字符串值
|
max_vol 实现了从多个输入的电压列中找到最大电压,返回由设备ID + 最大电压所在(行,列)+ 最大电压值 组成的组合字符串值
|
||||||
|
|
||||||
|
|
|
@ -71,8 +71,8 @@ database_option: {
|
||||||
- 0:表示可以创建多张超级表。
|
- 0:表示可以创建多张超级表。
|
||||||
- 1:表示只可以创建一张超级表。
|
- 1:表示只可以创建一张超级表。
|
||||||
- STT_TRIGGER:表示落盘文件触发文件合并的个数。默认为 1,范围 1 到 16。对于少表高频场景,此参数建议使用默认配置,或较小的值;而对于多表低频场景,此参数建议配置较大的值。
|
- STT_TRIGGER:表示落盘文件触发文件合并的个数。默认为 1,范围 1 到 16。对于少表高频场景,此参数建议使用默认配置,或较小的值;而对于多表低频场景,此参数建议配置较大的值。
|
||||||
- TABLE_PREFIX:内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度。
|
- TABLE_PREFIX:当其为正值时,在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的前缀;当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的前缀;例如,假定表名为 "v30001",当 TSDB_PREFIX = 2 时 使用 "0001" 来决定分配到哪个 vgroup ,当 TSDB_PREFIX = -2 时使用 "v3" 来决定分配到哪个 vgroup
|
||||||
- TABLE_SUFFIX:内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度。
|
- TABLE_SUFFIX:当其为正值时,在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的后缀;当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的后缀;例如,假定表名为 "v30001",当 TSDB_SUFFIX = 2 时 使用 "v300" 来决定分配到哪个 vgroup ,当 TSDB_SUFFIX = -2 时使用 "01" 来决定分配到哪个 vgroup。
|
||||||
- TSDB_PAGESIZE:一个 VNODE 中时序数据存储引擎的页大小,单位为 KB,默认为 4 KB。范围为 1 到 16384,即 1 KB到 16 MB。
|
- TSDB_PAGESIZE:一个 VNODE 中时序数据存储引擎的页大小,单位为 KB,默认为 4 KB。范围为 1 到 16384,即 1 KB到 16 MB。
|
||||||
- WAL_RETENTION_PERIOD: 为了数据订阅消费,需要WAL日志文件额外保留的最大时长策略。WAL日志清理,不受订阅客户端消费状态影响。单位为 s。默认为 0,表示无需为订阅保留。新建订阅,应先设置恰当的时长策略。
|
- WAL_RETENTION_PERIOD: 为了数据订阅消费,需要WAL日志文件额外保留的最大时长策略。WAL日志清理,不受订阅客户端消费状态影响。单位为 s。默认为 0,表示无需为订阅保留。新建订阅,应先设置恰当的时长策略。
|
||||||
- WAL_RETENTION_SIZE:为了数据订阅消费,需要WAL日志文件额外保留的最大累计大小策略。单位为 KB。默认为 0,表示累计大小无上限。
|
- WAL_RETENTION_SIZE:为了数据订阅消费,需要WAL日志文件额外保留的最大累计大小策略。单位为 KB。默认为 0,表示累计大小无上限。
|
||||||
|
|
|
@ -102,7 +102,7 @@ typedef struct SResultDataInfo {
|
||||||
int16_t precision;
|
int16_t precision;
|
||||||
int16_t scale;
|
int16_t scale;
|
||||||
int16_t type;
|
int16_t type;
|
||||||
int16_t bytes;
|
uint16_t bytes;
|
||||||
int32_t interBufSize;
|
int32_t interBufSize;
|
||||||
} SResultDataInfo;
|
} SResultDataInfo;
|
||||||
|
|
||||||
|
|
|
@ -114,7 +114,7 @@ STableDataCxt* smlInitTableDataCtx(SQuery* query, STableMeta* pTableMeta);
|
||||||
|
|
||||||
int32_t smlBindData(SQuery* handle, bool dataFormat, SArray* tags, SArray* colsSchema, SArray* cols,
|
int32_t smlBindData(SQuery* handle, bool dataFormat, SArray* tags, SArray* colsSchema, SArray* cols,
|
||||||
STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl,
|
STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl,
|
||||||
char* msgBuf, int16_t msgBufLen);
|
char* msgBuf, int32_t msgBufLen);
|
||||||
int32_t smlBuildOutput(SQuery* handle, SHashObj* pVgHash);
|
int32_t smlBuildOutput(SQuery* handle, SHashObj* pVgHash);
|
||||||
int rawBlockBindData(SQuery *query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, TAOS_FIELD *fields, int numFields, bool needChangeLength);
|
int rawBlockBindData(SQuery *query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, TAOS_FIELD *fields, int numFields, bool needChangeLength);
|
||||||
|
|
||||||
|
|
|
@ -232,13 +232,7 @@ typedef enum ELogicConditionType {
|
||||||
#define TSDB_QUERY_ID_LEN 26
|
#define TSDB_QUERY_ID_LEN 26
|
||||||
#define TSDB_TRANS_OPER_LEN 16
|
#define TSDB_TRANS_OPER_LEN 16
|
||||||
|
|
||||||
/**
|
#define TSDB_MAX_BYTES_PER_ROW 65531 // 49151:65531
|
||||||
* In some scenarios uint16_t (0~65535) is used to store the row len.
|
|
||||||
* - Firstly, we use 65531(65535 - 4), as the SDataRow/SKVRow contains 4 bits header.
|
|
||||||
* - Secondly, if all cols are VarDataT type except primary key, we need 4 bits to store the offset, thus
|
|
||||||
* the final value is 65531-(4096-1)*4 = 49151.
|
|
||||||
*/
|
|
||||||
#define TSDB_MAX_BYTES_PER_ROW 49151
|
|
||||||
#define TSDB_MAX_TAGS_LEN 16384
|
#define TSDB_MAX_TAGS_LEN 16384
|
||||||
#define TSDB_MAX_TAGS 128
|
#define TSDB_MAX_TAGS 128
|
||||||
|
|
||||||
|
@ -410,9 +404,9 @@ typedef enum ELogicConditionType {
|
||||||
#define TSDB_EXPLAIN_RESULT_ROW_SIZE (16 * 1024)
|
#define TSDB_EXPLAIN_RESULT_ROW_SIZE (16 * 1024)
|
||||||
#define TSDB_EXPLAIN_RESULT_COLUMN_NAME "QUERY_PLAN"
|
#define TSDB_EXPLAIN_RESULT_COLUMN_NAME "QUERY_PLAN"
|
||||||
|
|
||||||
#define TSDB_MAX_FIELD_LEN 16384
|
#define TSDB_MAX_FIELD_LEN 65519 // 16384:65519
|
||||||
#define TSDB_MAX_BINARY_LEN (TSDB_MAX_FIELD_LEN - TSDB_KEYSIZE) // keep 16384
|
#define TSDB_MAX_BINARY_LEN TSDB_MAX_FIELD_LEN // 16384-8:65519
|
||||||
#define TSDB_MAX_NCHAR_LEN (TSDB_MAX_FIELD_LEN - TSDB_KEYSIZE) // keep 16384
|
#define TSDB_MAX_NCHAR_LEN TSDB_MAX_FIELD_LEN // 16384-8:65519
|
||||||
#define PRIMARYKEY_TIMESTAMP_COL_ID 1
|
#define PRIMARYKEY_TIMESTAMP_COL_ID 1
|
||||||
#define COL_REACH_END(colId, maxColId) ((colId) > (maxColId))
|
#define COL_REACH_END(colId, maxColId) ((colId) > (maxColId))
|
||||||
|
|
||||||
|
|
|
@ -361,7 +361,7 @@ void stopAllRequests(SHashObj* pRequests);
|
||||||
|
|
||||||
// conn level
|
// conn level
|
||||||
int hbRegisterConn(SAppHbMgr* pAppHbMgr, int64_t tscRefId, int64_t clusterId, int8_t connType);
|
int hbRegisterConn(SAppHbMgr* pAppHbMgr, int64_t tscRefId, int64_t clusterId, int8_t connType);
|
||||||
void hbDeregisterConn(SAppHbMgr* pAppHbMgr, SClientHbKey connKey, void* param);
|
void hbDeregisterConn(STscObj* pTscObj, SClientHbKey connKey);
|
||||||
|
|
||||||
typedef struct SSqlCallbackWrapper {
|
typedef struct SSqlCallbackWrapper {
|
||||||
SParseContext* pParseCtx;
|
SParseContext* pParseCtx;
|
||||||
|
|
|
@ -244,7 +244,7 @@ void destroyTscObj(void *pObj) {
|
||||||
tscTrace("begin to destroy tscObj %" PRIx64 " p:%p", tscId, pTscObj);
|
tscTrace("begin to destroy tscObj %" PRIx64 " p:%p", tscId, pTscObj);
|
||||||
|
|
||||||
SClientHbKey connKey = {.tscRid = pTscObj->id, .connType = pTscObj->connType};
|
SClientHbKey connKey = {.tscRid = pTscObj->id, .connType = pTscObj->connType};
|
||||||
hbDeregisterConn(pTscObj->pAppInfo->pAppHbMgr, connKey, pTscObj->passInfo.fp);
|
hbDeregisterConn(pTscObj, connKey);
|
||||||
|
|
||||||
destroyAllRequests(pTscObj->pRequests);
|
destroyAllRequests(pTscObj->pRequests);
|
||||||
taosHashCleanup(pTscObj->pRequests);
|
taosHashCleanup(pTscObj->pRequests);
|
||||||
|
|
|
@ -994,6 +994,7 @@ SAppHbMgr *appHbMgrInit(SAppInstInfo *pAppInstInfo, char *key) {
|
||||||
// init stat
|
// init stat
|
||||||
pAppHbMgr->startTime = taosGetTimestampMs();
|
pAppHbMgr->startTime = taosGetTimestampMs();
|
||||||
pAppHbMgr->connKeyCnt = 0;
|
pAppHbMgr->connKeyCnt = 0;
|
||||||
|
pAppHbMgr->passKeyCnt = 0;
|
||||||
pAppHbMgr->reportCnt = 0;
|
pAppHbMgr->reportCnt = 0;
|
||||||
pAppHbMgr->reportBytes = 0;
|
pAppHbMgr->reportBytes = 0;
|
||||||
pAppHbMgr->key = taosStrdup(key);
|
pAppHbMgr->key = taosStrdup(key);
|
||||||
|
@ -1154,7 +1155,8 @@ int hbRegisterConn(SAppHbMgr *pAppHbMgr, int64_t tscRefId, int64_t clusterId, in
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void hbDeregisterConn(SAppHbMgr *pAppHbMgr, SClientHbKey connKey, void *param) {
|
void hbDeregisterConn(STscObj *pTscObj, SClientHbKey connKey) {
|
||||||
|
SAppHbMgr *pAppHbMgr = pTscObj->pAppInfo->pAppHbMgr;
|
||||||
SClientHbReq *pReq = taosHashAcquire(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
|
SClientHbReq *pReq = taosHashAcquire(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
|
||||||
if (pReq) {
|
if (pReq) {
|
||||||
tFreeClientHbReq(pReq);
|
tFreeClientHbReq(pReq);
|
||||||
|
@ -1167,7 +1169,10 @@ void hbDeregisterConn(SAppHbMgr *pAppHbMgr, SClientHbKey connKey, void *param) {
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic_sub_fetch_32(&pAppHbMgr->connKeyCnt, 1);
|
atomic_sub_fetch_32(&pAppHbMgr->connKeyCnt, 1);
|
||||||
if (param) {
|
|
||||||
|
taosThreadMutexLock(&pTscObj->mutex);
|
||||||
|
if (pTscObj->passInfo.fp) {
|
||||||
atomic_sub_fetch_32(&pAppHbMgr->passKeyCnt, 1);
|
atomic_sub_fetch_32(&pAppHbMgr->passKeyCnt, 1);
|
||||||
}
|
}
|
||||||
|
taosThreadMutexUnlock(&pTscObj->mutex);
|
||||||
}
|
}
|
|
@ -134,11 +134,15 @@ int taos_set_notify_cb(TAOS *taos, __taos_notify_fn_t fp, void *param, int type)
|
||||||
|
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case TAOS_NOTIFY_PASSVER: {
|
case TAOS_NOTIFY_PASSVER: {
|
||||||
|
taosThreadMutexLock(&pObj->mutex);
|
||||||
|
if (fp && !pObj->passInfo.fp) {
|
||||||
|
atomic_add_fetch_32(&pObj->pAppInfo->pAppHbMgr->passKeyCnt, 1);
|
||||||
|
} else if (!fp && pObj->passInfo.fp) {
|
||||||
|
atomic_sub_fetch_32(&pObj->pAppInfo->pAppHbMgr->passKeyCnt, 1);
|
||||||
|
}
|
||||||
pObj->passInfo.fp = fp;
|
pObj->passInfo.fp = fp;
|
||||||
pObj->passInfo.param = param;
|
pObj->passInfo.param = param;
|
||||||
if (fp) {
|
taosThreadMutexUnlock(&pObj->mutex);
|
||||||
atomic_add_fetch_32(&pObj->pAppInfo->pAppHbMgr->passKeyCnt, 1);
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default: {
|
default: {
|
||||||
|
|
|
@ -558,15 +558,21 @@ static int32_t smlGenerateSchemaAction(SSchema *colField, SHashObj *colHash, SSm
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define BOUNDARY 1024
|
||||||
static int32_t smlFindNearestPowerOf2(int32_t length, uint8_t type) {
|
static int32_t smlFindNearestPowerOf2(int32_t length, uint8_t type) {
|
||||||
int32_t result = 1;
|
int32_t result = 1;
|
||||||
|
if (length >= BOUNDARY){
|
||||||
|
result = length;
|
||||||
|
}else{
|
||||||
while (result <= length) {
|
while (result <= length) {
|
||||||
result *= 2;
|
result <<= 1;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (type == TSDB_DATA_TYPE_BINARY && result > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) {
|
if (type == TSDB_DATA_TYPE_BINARY && result > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) {
|
||||||
result = TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE;
|
result = TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE;
|
||||||
} else if (type == TSDB_DATA_TYPE_NCHAR && result > (TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
|
} else if (type == TSDB_DATA_TYPE_NCHAR && result > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
|
||||||
result = (TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
result = (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (type == TSDB_DATA_TYPE_NCHAR) {
|
if (type == TSDB_DATA_TYPE_NCHAR) {
|
||||||
|
@ -649,6 +655,17 @@ static int32_t smlBuildFieldsList(SSmlHandle *info, SSchema *schemaField, SHashO
|
||||||
field->bytes = getBytes(kv->type, kv->length);
|
field->bytes = getBytes(kv->type, kv->length);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t maxLen = isTag ? TSDB_MAX_TAGS_LEN : TSDB_MAX_BYTES_PER_ROW;
|
||||||
|
int32_t len = 0;
|
||||||
|
for (int j = 0; j < taosArrayGetSize(results); ++j) {
|
||||||
|
SField *field = taosArrayGet(results, j);
|
||||||
|
len += field->bytes;
|
||||||
|
}
|
||||||
|
if (len > maxLen) {
|
||||||
|
return isTag ? TSDB_CODE_PAR_INVALID_TAGS_LENGTH : TSDB_CODE_PAR_INVALID_ROW_LENGTH;
|
||||||
|
}
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -781,11 +798,15 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
||||||
code = smlBuildFieldsList(info, NULL, NULL, sTableData->tags, pTags, 0, true);
|
code = smlBuildFieldsList(info, NULL, NULL, sTableData->tags, pTags, 0, true);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
uError("SML:0x%" PRIx64 " smlBuildFieldsList tag1 failed. %s", info->id, pName.tname);
|
uError("SML:0x%" PRIx64 " smlBuildFieldsList tag1 failed. %s", info->id, pName.tname);
|
||||||
|
taosArrayDestroy(pColumns);
|
||||||
|
taosArrayDestroy(pTags);
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
code = smlBuildFieldsList(info, NULL, NULL, sTableData->cols, pColumns, 0, false);
|
code = smlBuildFieldsList(info, NULL, NULL, sTableData->cols, pColumns, 0, false);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
uError("SML:0x%" PRIx64 " smlBuildFieldsList col1 failed. %s", info->id, pName.tname);
|
uError("SML:0x%" PRIx64 " smlBuildFieldsList col1 failed. %s", info->id, pName.tname);
|
||||||
|
taosArrayDestroy(pColumns);
|
||||||
|
taosArrayDestroy(pTags);
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
code = smlSendMetaMsg(info, &pName, pColumns, pTags, NULL, SCHEMA_ACTION_CREATE_STABLE);
|
code = smlSendMetaMsg(info, &pName, pColumns, pTags, NULL, SCHEMA_ACTION_CREATE_STABLE);
|
||||||
|
@ -837,6 +858,23 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
||||||
pTableMeta->tableInfo.numOfColumns, true);
|
pTableMeta->tableInfo.numOfColumns, true);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
uError("SML:0x%" PRIx64 " smlBuildFieldsList tag2 failed. %s", info->id, pName.tname);
|
uError("SML:0x%" PRIx64 " smlBuildFieldsList tag2 failed. %s", info->id, pName.tname);
|
||||||
|
taosArrayDestroy(pColumns);
|
||||||
|
taosArrayDestroy(pTags);
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (taosArrayGetSize(pTags) + pTableMeta->tableInfo.numOfColumns > TSDB_MAX_COLUMNS) {
|
||||||
|
uError("SML:0x%" PRIx64 " too many columns than 4096", info->id);
|
||||||
|
code = TSDB_CODE_PAR_TOO_MANY_COLUMNS;
|
||||||
|
taosArrayDestroy(pColumns);
|
||||||
|
taosArrayDestroy(pTags);
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
|
if (taosArrayGetSize(pTags) > TSDB_MAX_TAGS) {
|
||||||
|
uError("SML:0x%" PRIx64 " too many tags than 128", info->id);
|
||||||
|
code = TSDB_CODE_PAR_INVALID_TAGS_NUM;
|
||||||
|
taosArrayDestroy(pColumns);
|
||||||
|
taosArrayDestroy(pTags);
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -891,6 +929,16 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
||||||
pTableMeta->tableInfo.numOfColumns, false);
|
pTableMeta->tableInfo.numOfColumns, false);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
uError("SML:0x%" PRIx64 " smlBuildFieldsList col2 failed. %s", info->id, pName.tname);
|
uError("SML:0x%" PRIx64 " smlBuildFieldsList col2 failed. %s", info->id, pName.tname);
|
||||||
|
taosArrayDestroy(pColumns);
|
||||||
|
taosArrayDestroy(pTags);
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (taosArrayGetSize(pColumns) + pTableMeta->tableInfo.numOfTags > TSDB_MAX_COLUMNS) {
|
||||||
|
uError("SML:0x%" PRIx64 " too many columns than 4096", info->id);
|
||||||
|
code = TSDB_CODE_PAR_TOO_MANY_COLUMNS;
|
||||||
|
taosArrayDestroy(pColumns);
|
||||||
|
taosArrayDestroy(pTags);
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1498,8 +1546,11 @@ static int smlProcess(SSmlHandle *info, char *lines[], char *rawLine, char *rawL
|
||||||
|
|
||||||
do {
|
do {
|
||||||
code = smlModifyDBSchemas(info);
|
code = smlModifyDBSchemas(info);
|
||||||
if (code == 0) break;
|
if (code == 0 || code == TSDB_CODE_SML_INVALID_DATA || code == TSDB_CODE_PAR_TOO_MANY_COLUMNS ||
|
||||||
taosMsleep(500);
|
code == TSDB_CODE_PAR_INVALID_TAGS_NUM || code == TSDB_CODE_PAR_INVALID_ROW_LENGTH ||
|
||||||
|
code == TSDB_CODE_PAR_INVALID_TAGS_LENGTH)
|
||||||
|
break;
|
||||||
|
taosMsleep(100);
|
||||||
uInfo("SML:0x%" PRIx64 " smlModifyDBSchemas retry code:%s, times:%d", info->id, tstrerror(code), retryNum);
|
uInfo("SML:0x%" PRIx64 " smlModifyDBSchemas retry code:%s, times:%d", info->id, tstrerror(code), retryNum);
|
||||||
} while (retryNum++ < taosHashGetSize(info->superTables) * MAX_RETRY_TIMES);
|
} while (retryNum++ < taosHashGetSize(info->superTables) * MAX_RETRY_TIMES);
|
||||||
|
|
||||||
|
|
|
@ -575,7 +575,7 @@ static int32_t smlConvertJSONString(SSmlKv *pVal, char *typeStr, cJSON *value) {
|
||||||
uError("OTD:invalid type(%s) for JSON String", typeStr);
|
uError("OTD:invalid type(%s) for JSON String", typeStr);
|
||||||
return TSDB_CODE_TSC_INVALID_JSON_TYPE;
|
return TSDB_CODE_TSC_INVALID_JSON_TYPE;
|
||||||
}
|
}
|
||||||
pVal->length = (int16_t)strlen(value->valuestring);
|
pVal->length = strlen(value->valuestring);
|
||||||
|
|
||||||
if (pVal->type == TSDB_DATA_TYPE_BINARY && pVal->length > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) {
|
if (pVal->type == TSDB_DATA_TYPE_BINARY && pVal->length > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) {
|
||||||
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
|
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
|
||||||
|
|
|
@ -236,7 +236,7 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin
|
||||||
PROCESS_SLASH(value, valueLen)
|
PROCESS_SLASH(value, valueLen)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(valueLen > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)) {
|
if (unlikely(valueLen > (TSDB_MAX_TAGS_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)) {
|
||||||
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
|
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -158,7 +158,7 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS
|
||||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(valueLen > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)) {
|
if (unlikely(valueLen > (TSDB_MAX_TAGS_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)) {
|
||||||
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
|
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -50,6 +50,8 @@ void *mndBuildCreateVnodeReq(SMnode *, SDnodeObj *pDnode, SDbObj *pDb, SVgObj *p
|
||||||
void *mndBuildDropVnodeReq(SMnode *, SDnodeObj *pDnode, SDbObj *pDb, SVgObj *pVgroup, int32_t *pContLen);
|
void *mndBuildDropVnodeReq(SMnode *, SDnodeObj *pDnode, SDbObj *pDb, SVgObj *pVgroup, int32_t *pContLen);
|
||||||
bool mndVgroupInDb(SVgObj *pVgroup, int64_t dbUid);
|
bool mndVgroupInDb(SVgObj *pVgroup, int64_t dbUid);
|
||||||
|
|
||||||
|
int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgroup);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -556,7 +556,7 @@ RETRIEVE_FUNC_OVER:
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *mnodeGenTypeStr(char *buf, int32_t buflen, uint8_t type, int16_t len) {
|
static void *mnodeGenTypeStr(char *buf, int32_t buflen, uint8_t type, int32_t len) {
|
||||||
char *msg = "unknown";
|
char *msg = "unknown";
|
||||||
if (type >= sizeof(tDataTypes) / sizeof(tDataTypes[0])) {
|
if (type >= sizeof(tDataTypes) / sizeof(tDataTypes[0])) {
|
||||||
return msg;
|
return msg;
|
||||||
|
|
|
@ -2103,7 +2103,7 @@ static int32_t mndAddAdjustVnodeHashRangeAction(SMnode *pMnode, STrans *pTrans,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgroup) {
|
int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgroup) {
|
||||||
int32_t code = -1;
|
int32_t code = -1;
|
||||||
STrans *pTrans = NULL;
|
STrans *pTrans = NULL;
|
||||||
SSdbRaw *pRaw = NULL;
|
SSdbRaw *pRaw = NULL;
|
||||||
|
@ -2164,7 +2164,7 @@ static int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj
|
||||||
mInfo("vgId:%d, vnode:%d dnode:%d", newVg2.vgId, i, newVg2.vnodeGid[i].dnodeId);
|
mInfo("vgId:%d, vnode:%d dnode:%d", newVg2.vgId, i, newVg2.vnodeGid[i].dnodeId);
|
||||||
}
|
}
|
||||||
|
|
||||||
// alter hash range
|
// alter vgId and hash range
|
||||||
int32_t maxVgId = sdbGetMaxId(pMnode->pSdb, SDB_VGROUP);
|
int32_t maxVgId = sdbGetMaxId(pMnode->pSdb, SDB_VGROUP);
|
||||||
if (mndAddAlterVnodeHashRangeAction(pMnode, pTrans, &newVg1, maxVgId) != 0) goto _OVER;
|
if (mndAddAlterVnodeHashRangeAction(pMnode, pTrans, &newVg1, maxVgId) != 0) goto _OVER;
|
||||||
newVg1.vgId = maxVgId;
|
newVg1.vgId = maxVgId;
|
||||||
|
|
|
@ -236,8 +236,6 @@ int32_t vnodeAlterHashRange(const char *srcPath, const char *dstPath, SAlterVnod
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo vnode compact here
|
|
||||||
|
|
||||||
vInfo("vgId:%d, vnode hashrange is altered", info.config.vgId);
|
vInfo("vgId:%d, vnode hashrange is altered", info.config.vgId);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -425,7 +425,10 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
case TDMT_VND_ALTER_CONFIRM:
|
case TDMT_VND_ALTER_CONFIRM:
|
||||||
vnodeProcessAlterConfirmReq(pVnode, version, pReq, len, pRsp);
|
needCommit = pVnode->config.hashChange;
|
||||||
|
if (vnodeProcessAlterConfirmReq(pVnode, version, pReq, len, pRsp) < 0) {
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case TDMT_VND_ALTER_CONFIG:
|
case TDMT_VND_ALTER_CONFIG:
|
||||||
vnodeProcessAlterConfigReq(pVnode, version, pReq, len, pRsp);
|
vnodeProcessAlterConfigReq(pVnode, version, pReq, len, pRsp);
|
||||||
|
@ -1472,6 +1475,11 @@ static int32_t vnodeProcessAlterConfirmReq(SVnode *pVnode, int64_t version, void
|
||||||
}
|
}
|
||||||
|
|
||||||
code = vnodeConsolidateAlterHashRange(pVnode, version);
|
code = vnodeConsolidateAlterHashRange(pVnode, version);
|
||||||
|
if (code < 0) {
|
||||||
|
vError("vgId:%d, failed to consolidate alter hashrange since %s. version:%" PRId64, TD_VID(pVnode), terrstr(),
|
||||||
|
version);
|
||||||
|
goto _exit;
|
||||||
|
}
|
||||||
pVnode->config.hashChange = false;
|
pVnode->config.hashChange = false;
|
||||||
|
|
||||||
_exit:
|
_exit:
|
||||||
|
@ -1480,7 +1488,7 @@ _exit:
|
||||||
pRsp->pCont = NULL;
|
pRsp->pCont = NULL;
|
||||||
pRsp->contLen = 0;
|
pRsp->contLen = 0;
|
||||||
|
|
||||||
return 0;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t vnodeProcessAlterConfigReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
|
static int32_t vnodeProcessAlterConfigReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
|
||||||
|
|
|
@ -467,8 +467,8 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n
|
||||||
getBufferPgSize(pAggSup->resultRowSize, &defaultPgsz, &defaultBufsz);
|
getBufferPgSize(pAggSup->resultRowSize, &defaultPgsz, &defaultBufsz);
|
||||||
|
|
||||||
if (!osTempSpaceAvailable()) {
|
if (!osTempSpaceAvailable()) {
|
||||||
code = TSDB_CODE_NO_AVAIL_DISK;
|
code = TSDB_CODE_NO_DISKSPACE;
|
||||||
qError("Init stream agg supporter failed since %s, %s", terrstr(code), pKey);
|
qError("Init stream agg supporter failed since %s, key:%s, tempDir:%s", terrstr(code), pKey, tsTempDir);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1114,7 +1114,7 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI
|
||||||
|
|
||||||
bool masterScan = true;
|
bool masterScan = true;
|
||||||
int32_t numOfOutput = pOperator->exprSupp.numOfExprs;
|
int32_t numOfOutput = pOperator->exprSupp.numOfExprs;
|
||||||
int16_t bytes = pStateColInfoData->info.bytes;
|
int32_t bytes = pStateColInfoData->info.bytes;
|
||||||
|
|
||||||
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pInfo->tsSlotId);
|
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pInfo->tsSlotId);
|
||||||
TSKEY* tsList = (TSKEY*)pColInfoData->pData;
|
TSKEY* tsList = (TSKEY*)pColInfoData->pData;
|
||||||
|
|
|
@ -53,7 +53,7 @@ typedef int32_t (*__perc_hash_func_t)(struct tMemBucket *pBucket, const void *va
|
||||||
typedef struct tMemBucket {
|
typedef struct tMemBucket {
|
||||||
int16_t numOfSlots;
|
int16_t numOfSlots;
|
||||||
int16_t type;
|
int16_t type;
|
||||||
int16_t bytes;
|
int32_t bytes;
|
||||||
int32_t total;
|
int32_t total;
|
||||||
int32_t elemPerPage; // number of elements for each object
|
int32_t elemPerPage; // number of elements for each object
|
||||||
int32_t maxCapacity; // maximum allowed number of elements that can be sort directly to get the result
|
int32_t maxCapacity; // maximum allowed number of elements that can be sort directly to get the result
|
||||||
|
@ -67,7 +67,7 @@ typedef struct tMemBucket {
|
||||||
SHashObj *groupPagesMap; // disk page map for different groups;
|
SHashObj *groupPagesMap; // disk page map for different groups;
|
||||||
} tMemBucket;
|
} tMemBucket;
|
||||||
|
|
||||||
tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval, double maxval);
|
tMemBucket *tMemBucketCreate(int32_t nElemSize, int16_t dataType, double minval, double maxval);
|
||||||
|
|
||||||
void tMemBucketDestroy(tMemBucket *pBucket);
|
void tMemBucketDestroy(tMemBucket *pBucket);
|
||||||
|
|
||||||
|
|
|
@ -200,7 +200,7 @@ typedef struct SSampleInfo {
|
||||||
int32_t totalPoints;
|
int32_t totalPoints;
|
||||||
int32_t numSampled;
|
int32_t numSampled;
|
||||||
uint8_t colType;
|
uint8_t colType;
|
||||||
int16_t colBytes;
|
uint16_t colBytes;
|
||||||
|
|
||||||
STuplePos nullTuplePos;
|
STuplePos nullTuplePos;
|
||||||
bool nullTupleSaved;
|
bool nullTupleSaved;
|
||||||
|
@ -220,7 +220,7 @@ typedef struct STailInfo {
|
||||||
int32_t numAdded;
|
int32_t numAdded;
|
||||||
int32_t offset;
|
int32_t offset;
|
||||||
uint8_t colType;
|
uint8_t colType;
|
||||||
int16_t colBytes;
|
uint16_t colBytes;
|
||||||
STailItem** pItems;
|
STailItem** pItems;
|
||||||
} STailInfo;
|
} STailInfo;
|
||||||
|
|
||||||
|
@ -233,7 +233,7 @@ typedef struct SUniqueItem {
|
||||||
typedef struct SUniqueInfo {
|
typedef struct SUniqueInfo {
|
||||||
int32_t numOfPoints;
|
int32_t numOfPoints;
|
||||||
uint8_t colType;
|
uint8_t colType;
|
||||||
int16_t colBytes;
|
uint16_t colBytes;
|
||||||
bool hasNull; // null is not hashable, handle separately
|
bool hasNull; // null is not hashable, handle separately
|
||||||
SHashObj* pHash;
|
SHashObj* pHash;
|
||||||
char pItems[];
|
char pItems[];
|
||||||
|
@ -247,7 +247,7 @@ typedef struct SModeItem {
|
||||||
|
|
||||||
typedef struct SModeInfo {
|
typedef struct SModeInfo {
|
||||||
uint8_t colType;
|
uint8_t colType;
|
||||||
int16_t colBytes;
|
uint16_t colBytes;
|
||||||
SHashObj* pHash;
|
SHashObj* pHash;
|
||||||
|
|
||||||
STuplePos nullTuplePos;
|
STuplePos nullTuplePos;
|
||||||
|
|
|
@ -236,7 +236,7 @@ static void resetSlotInfo(tMemBucket *pBucket) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval, double maxval) {
|
tMemBucket *tMemBucketCreate(int32_t nElemSize, int16_t dataType, double minval, double maxval) {
|
||||||
tMemBucket *pBucket = (tMemBucket *)taosMemoryCalloc(1, sizeof(tMemBucket));
|
tMemBucket *pBucket = (tMemBucket *)taosMemoryCalloc(1, sizeof(tMemBucket));
|
||||||
if (pBucket == NULL) {
|
if (pBucket == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
|
@ -1208,7 +1208,7 @@ SDataType createDataType(uint8_t type) {
|
||||||
}
|
}
|
||||||
|
|
||||||
SDataType createVarLenDataType(uint8_t type, const SToken* pLen) {
|
SDataType createVarLenDataType(uint8_t type, const SToken* pLen) {
|
||||||
SDataType dt = {.type = type, .precision = 0, .scale = 0, .bytes = taosStr2Int16(pLen->z, NULL, 10)};
|
SDataType dt = {.type = type, .precision = 0, .scale = 0, .bytes = taosStr2Int32(pLen->z, NULL, 10)};
|
||||||
return dt;
|
return dt;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -250,7 +250,7 @@ end:
|
||||||
|
|
||||||
int32_t smlBindData(SQuery* query, bool dataFormat, SArray* tags, SArray* colsSchema, SArray* cols,
|
int32_t smlBindData(SQuery* query, bool dataFormat, SArray* tags, SArray* colsSchema, SArray* cols,
|
||||||
STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl,
|
STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl,
|
||||||
char* msgBuf, int16_t msgBufLen) {
|
char* msgBuf, int32_t msgBufLen) {
|
||||||
SMsgBuf pBuf = {.buf = msgBuf, .len = msgBufLen};
|
SMsgBuf pBuf = {.buf = msgBuf, .len = msgBufLen};
|
||||||
|
|
||||||
SSchema* pTagsSchema = getTableTagSchema(pTableMeta);
|
SSchema* pTagsSchema = getTableTagSchema(pTableMeta);
|
||||||
|
|
|
@ -4498,8 +4498,8 @@ static int32_t checkTableTagsSchema(STranslateContext* pCxt, SHashObj* pHash, SN
|
||||||
code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG);
|
code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG);
|
||||||
}
|
}
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
if ((TSDB_DATA_TYPE_VARCHAR == pTag->dataType.type && calcTypeBytes(pTag->dataType) > TSDB_MAX_BINARY_LEN) ||
|
if ((TSDB_DATA_TYPE_VARCHAR == pTag->dataType.type && calcTypeBytes(pTag->dataType) > TSDB_MAX_TAGS_LEN) ||
|
||||||
(TSDB_DATA_TYPE_NCHAR == pTag->dataType.type && calcTypeBytes(pTag->dataType) > TSDB_MAX_NCHAR_LEN)) {
|
(TSDB_DATA_TYPE_NCHAR == pTag->dataType.type && calcTypeBytes(pTag->dataType) > TSDB_MAX_TAGS_LEN)) {
|
||||||
code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN);
|
code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5245,7 +5245,7 @@ static int32_t checkAlterSuperTableBySchema(STranslateContext* pCxt, SAlterTable
|
||||||
}
|
}
|
||||||
|
|
||||||
if (TSDB_ALTER_TABLE_UPDATE_TAG_BYTES == pStmt->alterType) {
|
if (TSDB_ALTER_TABLE_UPDATE_TAG_BYTES == pStmt->alterType) {
|
||||||
if (calcTypeBytes(pStmt->dataType) > TSDB_MAX_FIELD_LEN) {
|
if (calcTypeBytes(pStmt->dataType) > TSDB_MAX_TAGS_LEN) {
|
||||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN);
|
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,7 @@ typedef double (*_double_fn)(double);
|
||||||
typedef double (*_double_fn_2)(double, double);
|
typedef double (*_double_fn_2)(double, double);
|
||||||
typedef int (*_conv_fn)(int);
|
typedef int (*_conv_fn)(int);
|
||||||
typedef void (*_trim_fn)(char *, char *, int32_t, int32_t);
|
typedef void (*_trim_fn)(char *, char *, int32_t, int32_t);
|
||||||
typedef int16_t (*_len_fn)(char *, int32_t);
|
typedef uint16_t (*_len_fn)(char *, int32_t);
|
||||||
|
|
||||||
/** Math functions **/
|
/** Math functions **/
|
||||||
static double tlog(double v) { return log(v); }
|
static double tlog(double v) { return log(v); }
|
||||||
|
@ -286,9 +286,9 @@ static int32_t doScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarP
|
||||||
}
|
}
|
||||||
|
|
||||||
/** String functions **/
|
/** String functions **/
|
||||||
static int16_t tlength(char *input, int32_t type) { return varDataLen(input); }
|
static VarDataLenT tlength(char *input, int32_t type) { return varDataLen(input); }
|
||||||
|
|
||||||
static int16_t tcharlength(char *input, int32_t type) {
|
static VarDataLenT tcharlength(char *input, int32_t type) {
|
||||||
if (type == TSDB_DATA_TYPE_VARCHAR) {
|
if (type == TSDB_DATA_TYPE_VARCHAR) {
|
||||||
return varDataLen(input);
|
return varDataLen(input);
|
||||||
} else { // NCHAR
|
} else { // NCHAR
|
||||||
|
@ -377,7 +377,7 @@ static int32_t doLengthFunction(SScalarParam *pInput, int32_t inputNum, SScalarP
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t concatCopyHelper(const char *input, char *output, bool hasNchar, int32_t type, int16_t *dataLen) {
|
static int32_t concatCopyHelper(const char *input, char *output, bool hasNchar, int32_t type, VarDataLenT *dataLen) {
|
||||||
if (hasNchar && type == TSDB_DATA_TYPE_VARCHAR) {
|
if (hasNchar && type == TSDB_DATA_TYPE_VARCHAR) {
|
||||||
TdUcs4 *newBuf = taosMemoryCalloc((varDataLen(input) + 1) * TSDB_NCHAR_SIZE, 1);
|
TdUcs4 *newBuf = taosMemoryCalloc((varDataLen(input) + 1) * TSDB_NCHAR_SIZE, 1);
|
||||||
int32_t len = varDataLen(input);
|
int32_t len = varDataLen(input);
|
||||||
|
@ -457,7 +457,7 @@ int32_t concatFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOu
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
int16_t dataLen = 0;
|
VarDataLenT dataLen = 0;
|
||||||
for (int32_t i = 0; i < inputNum; ++i) {
|
for (int32_t i = 0; i < inputNum; ++i) {
|
||||||
int32_t rowIdx = (pInput[i].numOfRows == 1) ? 0 : k;
|
int32_t rowIdx = (pInput[i].numOfRows == 1) ? 0 : k;
|
||||||
input[i] = colDataGetData(pInputData[i], rowIdx);
|
input[i] = colDataGetData(pInputData[i], rowIdx);
|
||||||
|
@ -526,7 +526,7 @@ int32_t concatWsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *p
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
int16_t dataLen = 0;
|
VarDataLenT dataLen = 0;
|
||||||
bool hasNull = false;
|
bool hasNull = false;
|
||||||
for (int32_t i = 1; i < inputNum; ++i) {
|
for (int32_t i = 1; i < inputNum; ++i) {
|
||||||
if (colDataIsNull_s(pInputData[i], k) || IS_NULL_TYPE(GET_PARAM_TYPE(&pInput[i]))) {
|
if (colDataIsNull_s(pInputData[i], k) || IS_NULL_TYPE(GET_PARAM_TYPE(&pInput[i]))) {
|
||||||
|
@ -695,7 +695,7 @@ int32_t substrFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOu
|
||||||
/** Conversion functions **/
|
/** Conversion functions **/
|
||||||
int32_t castFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
|
int32_t castFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
|
||||||
int16_t inputType = GET_PARAM_TYPE(&pInput[0]);
|
int16_t inputType = GET_PARAM_TYPE(&pInput[0]);
|
||||||
int16_t inputLen = GET_PARAM_BYTES(&pInput[0]);
|
int32_t inputLen = GET_PARAM_BYTES(&pInput[0]);
|
||||||
int16_t outputType = GET_PARAM_TYPE(&pOutput[0]);
|
int16_t outputType = GET_PARAM_TYPE(&pOutput[0]);
|
||||||
int64_t outputLen = GET_PARAM_BYTES(&pOutput[0]);
|
int64_t outputLen = GET_PARAM_BYTES(&pOutput[0]);
|
||||||
|
|
||||||
|
|
|
@ -1814,6 +1814,11 @@ int tdbBtreeNext(SBTC *pBtc, void **ppKey, int *kLen, void **ppVal, int *vLen) {
|
||||||
|
|
||||||
*ppVal = pVal;
|
*ppVal = pVal;
|
||||||
*vLen = cd.vLen;
|
*vLen = cd.vLen;
|
||||||
|
} else {
|
||||||
|
if (TDB_CELLDECODER_FREE_VAL(&cd)) {
|
||||||
|
tdbTrace("tdb/btree-next2 decoder: %p pVal free: %p", &cd, cd.pVal);
|
||||||
|
tdbFree(cd.pVal);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = tdbBtcMoveToNext(pBtc);
|
ret = tdbBtcMoveToNext(pBtc);
|
||||||
|
|
|
@ -1241,7 +1241,7 @@ int32_t taosArrayCompareString(const void *a, const void *b) {
|
||||||
int32_t comparestrPatternMatch(const void *pLeft, const void *pRight) {
|
int32_t comparestrPatternMatch(const void *pLeft, const void *pRight) {
|
||||||
SPatternCompareInfo pInfo = PATTERN_COMPARE_INFO_INITIALIZER;
|
SPatternCompareInfo pInfo = PATTERN_COMPARE_INFO_INITIALIZER;
|
||||||
|
|
||||||
ASSERT(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN);
|
ASSERT(varDataTLen(pRight) <= TSDB_MAX_FIELD_LEN);
|
||||||
size_t pLen = varDataLen(pRight);
|
size_t pLen = varDataLen(pRight);
|
||||||
size_t sz = varDataLen(pLeft);
|
size_t sz = varDataLen(pLeft);
|
||||||
|
|
||||||
|
|
|
@ -338,6 +338,7 @@
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/delete_childtable.py
|
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/delete_childtable.py
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/delete_normaltable.py
|
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/delete_normaltable.py
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/keep_expired.py
|
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/keep_expired.py
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/stmt_error.py
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/drop.py
|
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/drop.py
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/drop.py -N 3 -M 3 -i False -n 3
|
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/drop.py -N 3 -M 3 -i False -n 3
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/join2.py
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/join2.py
|
||||||
|
|
|
@ -33,8 +33,7 @@
|
||||||
#define nUser 10
|
#define nUser 10
|
||||||
#define USER_LEN 24
|
#define USER_LEN 24
|
||||||
|
|
||||||
void Test(TAOS *taos, char *qstr);
|
void createUsers(TAOS *taos, const char *host, char *qstr);
|
||||||
void createUers(TAOS *taos, const char *host, char *qstr);
|
|
||||||
void passVerTestMulti(const char *host, char *qstr);
|
void passVerTestMulti(const char *host, char *qstr);
|
||||||
|
|
||||||
int nPassVerNotified = 0;
|
int nPassVerNotified = 0;
|
||||||
|
@ -98,14 +97,14 @@ int main(int argc, char *argv[]) {
|
||||||
printf("failed to connect to server, reason:%s\n", "null taos" /*taos_errstr(taos)*/);
|
printf("failed to connect to server, reason:%s\n", "null taos" /*taos_errstr(taos)*/);
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
createUers(taos, argv[1], qstr);
|
createUsers(taos, argv[1], qstr);
|
||||||
passVerTestMulti(argv[1], qstr);
|
passVerTestMulti(argv[1], qstr);
|
||||||
|
|
||||||
taos_close(taos);
|
taos_close(taos);
|
||||||
taos_cleanup();
|
taos_cleanup();
|
||||||
}
|
}
|
||||||
|
|
||||||
void createUers(TAOS *taos, const char *host, char *qstr) {
|
void createUsers(TAOS *taos, const char *host, char *qstr) {
|
||||||
// users
|
// users
|
||||||
for (int i = 0; i < nUser; ++i) {
|
for (int i = 0; i < nUser; ++i) {
|
||||||
sprintf(users[i], "user%d", i);
|
sprintf(users[i], "user%d", i);
|
||||||
|
|
|
@ -166,6 +166,61 @@ class TDTestCase:
|
||||||
else:
|
else:
|
||||||
tdLog.exit("error info is not true")
|
tdLog.exit("error info is not true")
|
||||||
tdSql.execute('drop database db')
|
tdSql.execute('drop database db')
|
||||||
|
|
||||||
|
def row_col_tag_maxlen_check(self):
|
||||||
|
tdSql.prepare()
|
||||||
|
tdSql.execute('use db')
|
||||||
|
tdSql.execute('create table if not exists stb1 (ts timestamp, c1 int,c2 binary(1000)) tags (city binary(16382))')
|
||||||
|
tdSql.error('create table if not exists stb1 (ts timestamp, c1 int,c2 binary(1000)) tags (city binary(16383))')
|
||||||
|
tdSql.execute('create table if not exists stb2 (ts timestamp, c0 tinyint, c1 int, c2 nchar(16379)) tags (city binary(16382))')
|
||||||
|
tdSql.error('create table if not exists stb2 (ts timestamp, c0 smallint, c1 int, c2 nchar(16379)) tags (city binary(16382))')
|
||||||
|
tdSql.execute('create table if not exists stb3 (ts timestamp, c1 int, c2 binary(65517)) tags (city binary(16382))')
|
||||||
|
tdSql.error('create table if not exists stb3 (ts timestamp, c0 bool, c1 int, c2 binary(65517)) tags (city binary(16382))')
|
||||||
|
# prepare the column and tag data
|
||||||
|
char100='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN0123456789'
|
||||||
|
tag_max_16382=''
|
||||||
|
binary_max_65517 = ''
|
||||||
|
nchar_max_16379=''
|
||||||
|
for num in range(163):
|
||||||
|
nchar_max_16379 += char100
|
||||||
|
for num in range(4):
|
||||||
|
binary_max_65517 += char100
|
||||||
|
|
||||||
|
nchar_max_16379 += '0123456789012345678901234567890123456789012345678901234567890123456789012345678'
|
||||||
|
tag_max_16382 = nchar_max_16379
|
||||||
|
tag_max_16382 += '9ab'
|
||||||
|
|
||||||
|
for num in range(3):
|
||||||
|
binary_max_65517 += char100
|
||||||
|
binary_max_65517 += '01234567890123456'
|
||||||
|
|
||||||
|
# insert/query and check
|
||||||
|
tdSql.execute(f"create table ct1 using stb1 tags('{tag_max_16382}')")
|
||||||
|
tdSql.execute(f"create table ct2 using stb2 tags('{tag_max_16382}')")
|
||||||
|
tdSql.execute(f"create table ct3 using stb3 tags('{tag_max_16382}')")
|
||||||
|
tdSql.execute(f"insert into ct1 values (now,1,'nchar_max_16379')")
|
||||||
|
tdSql.execute(f"insert into ct2 values (now,1,1,'{nchar_max_16379}')")
|
||||||
|
tdSql.execute(f"insert into ct3 values (now,1,'{binary_max_65517}')")
|
||||||
|
|
||||||
|
tdSql.query("select * from stb1")
|
||||||
|
tdSql.checkEqual(tdSql.queryResult[0][3],tag_max_16382)
|
||||||
|
|
||||||
|
tdSql.query("select * from ct2")
|
||||||
|
tdSql.checkEqual(tdSql.queryResult[0][3],nchar_max_16379)
|
||||||
|
|
||||||
|
tdSql.query("select * from stb2")
|
||||||
|
tdSql.checkEqual(tdSql.queryResult[0][3],nchar_max_16379)
|
||||||
|
tdSql.checkEqual(tdSql.queryResult[0][4],tag_max_16382)
|
||||||
|
|
||||||
|
tdSql.query("select * from ct3")
|
||||||
|
tdSql.checkEqual(tdSql.queryResult[0][2],binary_max_65517)
|
||||||
|
|
||||||
|
tdSql.query("select * from stb3")
|
||||||
|
tdSql.checkEqual(tdSql.queryResult[0][2],binary_max_65517)
|
||||||
|
tdSql.checkEqual(tdSql.queryResult[0][3],tag_max_16382)
|
||||||
|
|
||||||
|
tdSql.execute('drop database db')
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
self.dbname_length_check()
|
self.dbname_length_check()
|
||||||
self.tbname_length_check()
|
self.tbname_length_check()
|
||||||
|
@ -174,6 +229,7 @@ class TDTestCase:
|
||||||
self.username_length_check()
|
self.username_length_check()
|
||||||
self.password_length_check()
|
self.password_length_check()
|
||||||
self.sql_length_check()
|
self.sql_length_check()
|
||||||
|
self.row_col_tag_maxlen_check()
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
tdSql.close()
|
tdSql.close()
|
||||||
|
|
|
@ -439,7 +439,7 @@ class TDTestCase:
|
||||||
for input_sql in [self.genLongSql(127, 1)[0], self.genLongSql(1, 4093)[0]]:
|
for input_sql in [self.genLongSql(127, 1)[0], self.genLongSql(1, 4093)[0]]:
|
||||||
tdCom.cleanTb(dbname="test")
|
tdCom.cleanTb(dbname="test")
|
||||||
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
||||||
for input_sql in [self.genLongSql(129, 1)[0], self.genLongSql(1, 4095)[0]]:
|
for input_sql in [self.genLongSql(128, 1)[0], self.genLongSql(1, 4094)[0]]:
|
||||||
tdCom.cleanTb(dbname="test")
|
tdCom.cleanTb(dbname="test")
|
||||||
try:
|
try:
|
||||||
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
||||||
|
@ -578,10 +578,16 @@ class TDTestCase:
|
||||||
|
|
||||||
# binary
|
# binary
|
||||||
stb_name = tdCom.getLongName(7, "letters")
|
stb_name = tdCom.getLongName(7, "letters")
|
||||||
input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16374, "letters")}" c0=f 1626006833639000000'
|
input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(4091, "letters")}" c0=f 1626006833639000000'
|
||||||
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
||||||
|
|
||||||
input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16375, "letters")}" c0=f 1626006833639000000'
|
input_sql = f'{stb_name},t0="a",t1="{tdCom.getLongName(4088, "letters")}" c0=f 1626006833639000000'
|
||||||
|
try:
|
||||||
|
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
||||||
|
except SchemalessError as err:
|
||||||
|
tdSql.checkNotEqual(err.errno, 0)
|
||||||
|
|
||||||
|
input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(4092, "letters")}" c0=f 1626006833639000000'
|
||||||
try:
|
try:
|
||||||
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
||||||
except SchemalessError as err:
|
except SchemalessError as err:
|
||||||
|
@ -590,10 +596,10 @@ class TDTestCase:
|
||||||
# nchar
|
# nchar
|
||||||
# * legal nchar could not be larger than 16374/4
|
# * legal nchar could not be larger than 16374/4
|
||||||
stb_name = tdCom.getLongName(7, "letters")
|
stb_name = tdCom.getLongName(7, "letters")
|
||||||
input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4093, "letters")}" c0=f 1626006833639000000'
|
input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4090, "letters")}" c0=f 1626006833639000000'
|
||||||
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
||||||
|
|
||||||
input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4094, "letters")}" c0=f 1626006833639000000'
|
input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4091, "letters")}" c0=f 1626006833639000000'
|
||||||
try:
|
try:
|
||||||
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
||||||
except SchemalessError as err:
|
except SchemalessError as err:
|
||||||
|
@ -672,28 +678,34 @@ class TDTestCase:
|
||||||
except SchemalessError as err:
|
except SchemalessError as err:
|
||||||
tdSql.checkNotEqual(err.errno, 0)
|
tdSql.checkNotEqual(err.errno, 0)
|
||||||
|
|
||||||
# # # binary
|
# binary
|
||||||
# stb_name = tdCom.getLongName(7, "letters")
|
stb_name = tdCom.getLongName(7, "letters")
|
||||||
# input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16374, "letters")}" 1626006833639000000'
|
input_sql = f'{stb_name},t0=t c0=1i32,c1="{tdCom.getLongName(65517, "letters")}" 1626006833639000000'
|
||||||
# self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
||||||
|
|
||||||
# input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16375, "letters")}" 1626006833639000000'
|
input_sql = f'{stb_name},t0=t c0=1i32,c1="{tdCom.getLongName(65517, "letters")},c2=f" 1626006833639000000'
|
||||||
# try:
|
try:
|
||||||
# self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
||||||
# except SchemalessError as err:
|
except SchemalessError as err:
|
||||||
# tdSql.checkNotEqual(err.errno, 0)
|
tdSql.checkNotEqual(err.errno, 0)
|
||||||
|
|
||||||
# # nchar
|
input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(65518, "letters")}" 1626006833639000000'
|
||||||
# # * legal nchar could not be larger than 16374/4
|
try:
|
||||||
# stb_name = tdCom.getLongName(7, "letters")
|
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
||||||
# input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(4093, "letters")}" 1626006833639000000'
|
except SchemalessError as err:
|
||||||
# self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
tdSql.checkNotEqual(err.errno, 0)
|
||||||
|
|
||||||
# input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(4094, "letters")}" 1626006833639000000'
|
# nchar
|
||||||
# try:
|
# * legal nchar could not be larger than 16374/4
|
||||||
# self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
stb_name = tdCom.getLongName(7, "letters")
|
||||||
# except SchemalessError as err:
|
input_sql = f'{stb_name},t0=t c0=1i32,c1=L"{tdCom.getLongName(16379, "letters")}",c2=f 1626006833639000000'
|
||||||
# tdSql.checkNotEqual(err.errno, 0)
|
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
||||||
|
|
||||||
|
input_sql = f'{stb_name},t0=t c0=1i32,c1=L"{tdCom.getLongName(16380, "letters")}",c2=1i16 1626006833639000000'
|
||||||
|
try:
|
||||||
|
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
||||||
|
except SchemalessError as err:
|
||||||
|
tdSql.checkNotEqual(err.errno, 0)
|
||||||
|
|
||||||
def tagColIllegalValueCheckCase(self):
|
def tagColIllegalValueCheckCase(self):
|
||||||
|
|
||||||
|
@ -884,7 +896,7 @@ class TDTestCase:
|
||||||
tdSql.checkRows(2)
|
tdSql.checkRows(2)
|
||||||
tdSql.checkNotEqual(tb_name1, tb_name3)
|
tdSql.checkNotEqual(tb_name1, tb_name3)
|
||||||
|
|
||||||
# * tag binary max is 16384, col+ts binary max 49151
|
# * tag binary max is 16384-2, col+ts binary max 65531
|
||||||
def tagColBinaryMaxLengthCheckCase(self):
|
def tagColBinaryMaxLengthCheckCase(self):
|
||||||
"""
|
"""
|
||||||
every binary and nchar must be length+2
|
every binary and nchar must be length+2
|
||||||
|
@ -896,35 +908,59 @@ class TDTestCase:
|
||||||
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
||||||
|
|
||||||
# * every binary and nchar must be length+2, so here is two tag, max length could not larger than 16384-2*2
|
# * every binary and nchar must be length+2, so here is two tag, max length could not larger than 16384-2*2
|
||||||
input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16374, "letters")}",t2="{tdCom.getLongName(5, "letters")}" c0=f 1626006833639000000'
|
stb_name = tdCom.getLongName(8, "letters")
|
||||||
|
input_sql = f'{stb_name},t0=f,t1="{tdCom.getLongName(4091, "letters")}", c0=f 1626006833639000000'
|
||||||
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
||||||
|
|
||||||
tdSql.query(f"select * from {stb_name}")
|
tdSql.query(f"select * from {stb_name}")
|
||||||
tdSql.checkRows(2)
|
tdSql.checkRows(1)
|
||||||
input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16374, "letters")}",t2="{tdCom.getLongName(6, "letters")}" c0=f 1626006833639000000'
|
|
||||||
|
input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(4092, "letters")}", c0=f 1626006833639000000'
|
||||||
try:
|
try:
|
||||||
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
||||||
raise Exception("should not reach here")
|
raise Exception("should not reach here")
|
||||||
except SchemalessError as err:
|
except SchemalessError as err:
|
||||||
tdSql.checkNotEqual(err.errno, 0)
|
tdSql.checkNotEqual(err.errno, 0)
|
||||||
tdSql.query(f"select * from {stb_name}")
|
tdSql.query(f"select * from {stb_name}")
|
||||||
tdSql.checkRows(2)
|
tdSql.checkRows(1)
|
||||||
|
|
||||||
|
|
||||||
|
stb_name = tdCom.getLongName(9, "letters")
|
||||||
# # * check col,col+ts max in describe ---> 16143
|
# # * check col,col+ts max in describe ---> 16143
|
||||||
input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16374, "letters")}",c2="{tdCom.getLongName(16374, "letters")}",c3="{tdCom.getLongName(16374, "letters")}",c4="{tdCom.getLongName(12, "letters")}" 1626006833639000000'
|
input_sql = f'{stb_name},t0=t c0=1i32,c1="{tdCom.getLongName(65517, "letters")}" 1626006833639000000'
|
||||||
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
||||||
|
|
||||||
|
input_sql = f'{stb_name},t0=t c0=1i32,c1="{tdCom.getLongName(65517, "letters")}",c2=f 1626006833639000000'
|
||||||
|
try:
|
||||||
|
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
||||||
|
except SchemalessError as err:
|
||||||
|
tdSql.checkNotEqual(err.errno, 0)
|
||||||
|
|
||||||
tdSql.query(f"select * from {stb_name}")
|
tdSql.query(f"select * from {stb_name}")
|
||||||
tdSql.checkRows(3)
|
tdSql.checkRows(1)
|
||||||
|
|
||||||
|
|
||||||
|
stb_name = tdCom.getLongName(10, "letters")
|
||||||
|
input_sql = f'{stb_name},t0=t c0=1i16,c1="{tdCom.getLongName(49133, "letters")}",c2="{tdCom.getLongName(16384, "letters")}" 1626006833639000000'
|
||||||
|
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
||||||
|
|
||||||
|
input_sql = f'{stb_name},t0=t c0=1i16,c1="{tdCom.getLongName(49133, "letters")}",c2="{tdCom.getLongName(16384, "letters")},c3=t" 1626006833639000000'
|
||||||
|
try:
|
||||||
|
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
||||||
|
except SchemalessError as err:
|
||||||
|
tdSql.checkNotEqual(err.errno, 0)
|
||||||
|
|
||||||
|
tdSql.query(f"select * from {stb_name}")
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
|
||||||
input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16374, "letters")}",c2="{tdCom.getLongName(16374, "letters")}",c3="{tdCom.getLongName(16374, "letters")}",c4="{tdCom.getLongName(13, "letters")}" 1626006833639000000'
|
input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16374, "letters")}",c2="{tdCom.getLongName(16374, "letters")}",c3="{tdCom.getLongName(16374, "letters")}",c4="{tdCom.getLongName(13, "letters")}" 1626006833639000000'
|
||||||
try:
|
try:
|
||||||
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
||||||
except SchemalessError as err:
|
except SchemalessError as err:
|
||||||
tdSql.checkNotEqual(err.errno, 0)
|
tdSql.checkNotEqual(err.errno, 0)
|
||||||
tdSql.query(f"select * from {stb_name}")
|
|
||||||
tdSql.checkRows(3)
|
|
||||||
|
|
||||||
# * tag nchar max is 16374/4, col+ts nchar max 49151
|
|
||||||
|
# * tag nchar max is (16384-2)/4, col+ts nchar max 65531
|
||||||
def tagColNcharMaxLengthCheckCase(self):
|
def tagColNcharMaxLengthCheckCase(self):
|
||||||
"""
|
"""
|
||||||
check nchar length limit
|
check nchar length limit
|
||||||
|
@ -935,30 +971,31 @@ class TDTestCase:
|
||||||
input_sql = f'{stb_name},id="{tb_name}",t0=t c0=f 1626006833639000000'
|
input_sql = f'{stb_name},id="{tb_name}",t0=t c0=f 1626006833639000000'
|
||||||
code = self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
code = self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
||||||
|
|
||||||
# * legal nchar could not be larger than 16374/4
|
# * legal tag nchar could not be larger than (16384-2)/4
|
||||||
input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4093, "letters")}",t2=L"{tdCom.getLongName(1, "letters")}" c0=f 1626006833639000000'
|
# input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4093, "letters")}",t2=L"{tdCom.getLongName(1, "letters")}" c0=f 1626006833639000000'
|
||||||
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
# self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
||||||
tdSql.query(f"select * from {stb_name}")
|
# tdSql.query(f"select * from {stb_name}")
|
||||||
tdSql.checkRows(2)
|
# tdSql.checkRows(2)
|
||||||
input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4093, "letters")}",t2=L"{tdCom.getLongName(2, "letters")}" c0=f 1626006833639000000'
|
# input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4093, "letters")}",t2=L"{tdCom.getLongName(2, "letters")}" c0=f 1626006833639000000'
|
||||||
try:
|
# try:
|
||||||
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
# self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
||||||
except SchemalessError as err:
|
# except SchemalessError as err:
|
||||||
tdSql.checkNotEqual(err.errno, 0)
|
# tdSql.checkNotEqual(err.errno, 0)
|
||||||
tdSql.query(f"select * from {stb_name}")
|
# tdSql.query(f"select * from {stb_name}")
|
||||||
tdSql.checkRows(2)
|
# tdSql.checkRows(2)
|
||||||
|
|
||||||
input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(4093, "letters")}",c2=L"{tdCom.getLongName(4093, "letters")}",c3=L"{tdCom.getLongName(4093, "letters")}",c4=L"{tdCom.getLongName(4, "letters")}" 1626006833639000000'
|
input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(4093, "letters")}",c2=L"{tdCom.getLongName(4093, "letters")}",c3=L"{tdCom.getLongName(4093, "letters")}",c4=L"{tdCom.getLongName(4, "letters")}" 1626006833639000000'
|
||||||
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
||||||
tdSql.query(f"select * from {stb_name}")
|
tdSql.query(f"select * from {stb_name}")
|
||||||
tdSql.checkRows(3)
|
tdSql.checkRows(2)
|
||||||
|
|
||||||
input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(4093, "letters")}",c2=L"{tdCom.getLongName(4093, "letters")}",c3=L"{tdCom.getLongName(4093, "letters")}",c4=L"{tdCom.getLongName(5, "letters")}" 1626006833639000000'
|
input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(4093, "letters")}",c2=L"{tdCom.getLongName(4093, "letters")}",c3=L"{tdCom.getLongName(4093, "letters")}",c4=L"{tdCom.getLongName(5, "letters")}" 1626006833639000000'
|
||||||
try:
|
try:
|
||||||
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
|
||||||
except SchemalessError as err:
|
except SchemalessError as err:
|
||||||
tdSql.checkNotEqual(err.errno, 0)
|
tdSql.checkNotEqual(err.errno, 0)
|
||||||
tdSql.query(f"select * from {stb_name}")
|
tdSql.query(f"select * from {stb_name}")
|
||||||
tdSql.checkRows(3)
|
tdSql.checkRows(2)
|
||||||
|
|
||||||
def batchInsertCheckCase(self):
|
def batchInsertCheckCase(self):
|
||||||
"""
|
"""
|
||||||
|
@ -1274,13 +1311,13 @@ class TDTestCase:
|
||||||
self.idSeqCheckCase()
|
self.idSeqCheckCase()
|
||||||
self.idUpperCheckCase()
|
self.idUpperCheckCase()
|
||||||
self.noIdCheckCase()
|
self.noIdCheckCase()
|
||||||
# self.maxColTagCheckCase()
|
self.maxColTagCheckCase()
|
||||||
self.idIllegalNameCheckCase()
|
self.idIllegalNameCheckCase()
|
||||||
self.idStartWithNumCheckCase()
|
self.idStartWithNumCheckCase()
|
||||||
self.nowTsCheckCase()
|
self.nowTsCheckCase()
|
||||||
self.dateFormatTsCheckCase()
|
self.dateFormatTsCheckCase()
|
||||||
self.illegalTsCheckCase()
|
self.illegalTsCheckCase()
|
||||||
# self.tagValueLengthCheckCase()
|
self.tagValueLengthCheckCase()
|
||||||
self.colValueLengthCheckCase()
|
self.colValueLengthCheckCase()
|
||||||
self.tagColIllegalValueCheckCase()
|
self.tagColIllegalValueCheckCase()
|
||||||
self.duplicateIdTagColInsertCheckCase()
|
self.duplicateIdTagColInsertCheckCase()
|
||||||
|
@ -1290,8 +1327,8 @@ class TDTestCase:
|
||||||
self.tagColAddDupIDCheckCase()
|
self.tagColAddDupIDCheckCase()
|
||||||
self.tagColAddCheckCase()
|
self.tagColAddCheckCase()
|
||||||
self.tagMd5Check()
|
self.tagMd5Check()
|
||||||
# self.tagColBinaryMaxLengthCheckCase()
|
self.tagColBinaryMaxLengthCheckCase()
|
||||||
# self.tagColNcharMaxLengthCheckCase()
|
self.tagColNcharMaxLengthCheckCase()
|
||||||
self.batchInsertCheckCase()
|
self.batchInsertCheckCase()
|
||||||
self.multiInsertCheckCase(10)
|
self.multiInsertCheckCase(10)
|
||||||
self.batchErrorInsertCheckCase()
|
self.batchErrorInsertCheckCase()
|
||||||
|
|
|
@ -0,0 +1,224 @@
|
||||||
|
# encoding:UTF-8
|
||||||
|
from taos import *
|
||||||
|
|
||||||
|
from ctypes import *
|
||||||
|
from datetime import datetime
|
||||||
|
import taos
|
||||||
|
|
||||||
|
import taos
|
||||||
|
import time
|
||||||
|
|
||||||
|
from util.log import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.dnodes import *
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def __init__(self):
|
||||||
|
self.err_case = 0
|
||||||
|
self.curret_case = 0
|
||||||
|
|
||||||
|
def caseDescription(self):
|
||||||
|
|
||||||
|
'''
|
||||||
|
case1 <wenzhouwww>: [TD-11899] : this is an test case for check stmt error use .
|
||||||
|
'''
|
||||||
|
return
|
||||||
|
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
self.replicaVar = int(replicaVar)
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
|
||||||
|
def conn(self):
|
||||||
|
# type: () -> taos.TaosConnection
|
||||||
|
return connect()
|
||||||
|
|
||||||
|
def test_stmt_insert(self,conn):
|
||||||
|
# type: (TaosConnection) -> None
|
||||||
|
|
||||||
|
dbname = "pytest_taos_stmt"
|
||||||
|
try:
|
||||||
|
conn.execute("drop database if exists %s" % dbname)
|
||||||
|
conn.execute("create database if not exists %s" % dbname)
|
||||||
|
conn.select_db(dbname)
|
||||||
|
|
||||||
|
conn.execute(
|
||||||
|
"create table if not exists log(ts timestamp, bo bool, nil tinyint, ti tinyint, si smallint, ii int,\
|
||||||
|
bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \
|
||||||
|
ff float, dd double, bb binary(65059), nn nchar(100), tt timestamp)",
|
||||||
|
)
|
||||||
|
conn.load_table_info("log")
|
||||||
|
|
||||||
|
|
||||||
|
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
|
||||||
|
params = new_bind_params(16)
|
||||||
|
params[0].timestamp(1626861392589, PrecisionEnum.Milliseconds)
|
||||||
|
params[1].bool(True)
|
||||||
|
params[2].tinyint(None)
|
||||||
|
params[3].tinyint(2)
|
||||||
|
params[4].smallint(3)
|
||||||
|
params[5].int(4)
|
||||||
|
params[6].bigint(5)
|
||||||
|
params[7].tinyint_unsigned(6)
|
||||||
|
params[8].smallint_unsigned(7)
|
||||||
|
params[9].int_unsigned(8)
|
||||||
|
params[10].bigint_unsigned(9)
|
||||||
|
params[11].float(10.1)
|
||||||
|
params[12].double(10.11)
|
||||||
|
binaryStr = '123456789'
|
||||||
|
for i in range(1301):
|
||||||
|
binaryStr += "1234567890abcdefghij1234567890abcdefghij12345hello"
|
||||||
|
params[13].binary(binaryStr)
|
||||||
|
params[14].nchar("stmt")
|
||||||
|
params[15].timestamp(1626861392589, PrecisionEnum.Milliseconds)
|
||||||
|
|
||||||
|
stmt.bind_param(params)
|
||||||
|
stmt.execute()
|
||||||
|
|
||||||
|
assert stmt.affected_rows == 1
|
||||||
|
stmt.close()
|
||||||
|
|
||||||
|
querystmt=conn.statement("select ?, bo, nil, ti, si, ii,bi, tu, su, iu, bu, ff, dd, bb, nn, tt from log")
|
||||||
|
queryparam=new_bind_params(1)
|
||||||
|
print(type(queryparam))
|
||||||
|
queryparam[0].binary("ts")
|
||||||
|
querystmt.bind_param(queryparam)
|
||||||
|
querystmt.execute()
|
||||||
|
result=querystmt.use_result()
|
||||||
|
|
||||||
|
row=result.fetch_all()
|
||||||
|
print(row)
|
||||||
|
|
||||||
|
assert row[0][1] == True
|
||||||
|
assert row[0][2] == None
|
||||||
|
for i in range(3, 10):
|
||||||
|
assert row[0][i] == i - 1
|
||||||
|
#float == may not work as expected
|
||||||
|
# assert row[0][11] == c_float(10.1)
|
||||||
|
assert row[0][12] == 10.11
|
||||||
|
assert row[0][13][65054:] == "hello"
|
||||||
|
assert row[0][14] == "stmt"
|
||||||
|
|
||||||
|
conn.execute("drop database if exists %s" % dbname)
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
except Exception as err:
|
||||||
|
conn.execute("drop database if exists %s" % dbname)
|
||||||
|
conn.close()
|
||||||
|
raise err
|
||||||
|
|
||||||
|
def test_stmt_insert_error(self,conn):
|
||||||
|
# type: (TaosConnection) -> None
|
||||||
|
|
||||||
|
dbname = "pytest_taos_stmt_error"
|
||||||
|
try:
|
||||||
|
conn.execute("drop database if exists %s" % dbname)
|
||||||
|
conn.execute("create database if not exists %s" % dbname)
|
||||||
|
conn.select_db(dbname)
|
||||||
|
|
||||||
|
conn.execute(
|
||||||
|
"create table if not exists log(ts timestamp, bo bool, nil tinyint, ti tinyint, si smallint, ii int,\
|
||||||
|
bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \
|
||||||
|
ff float, dd double, bb binary(100), nn nchar(100), tt timestamp , error_data int )",
|
||||||
|
)
|
||||||
|
conn.load_table_info("log")
|
||||||
|
|
||||||
|
|
||||||
|
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,1000)")
|
||||||
|
params = new_bind_params(16)
|
||||||
|
params[0].timestamp(1626861392589, PrecisionEnum.Milliseconds)
|
||||||
|
params[1].bool(True)
|
||||||
|
params[2].tinyint(None)
|
||||||
|
params[3].tinyint(2)
|
||||||
|
params[4].smallint(3)
|
||||||
|
params[5].int(4)
|
||||||
|
params[6].bigint(5)
|
||||||
|
params[7].tinyint_unsigned(6)
|
||||||
|
params[8].smallint_unsigned(7)
|
||||||
|
params[9].int_unsigned(8)
|
||||||
|
params[10].bigint_unsigned(9)
|
||||||
|
params[11].float(10.1)
|
||||||
|
params[12].double(10.11)
|
||||||
|
params[13].binary("hello")
|
||||||
|
params[14].nchar("stmt")
|
||||||
|
params[15].timestamp(1626861392589, PrecisionEnum.Milliseconds)
|
||||||
|
|
||||||
|
stmt.bind_param(params)
|
||||||
|
stmt.execute()
|
||||||
|
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
except Exception as err:
|
||||||
|
conn.execute("drop database if exists %s" % dbname)
|
||||||
|
conn.close()
|
||||||
|
raise err
|
||||||
|
|
||||||
|
def test_stmt_insert_error_null_timestamp(self,conn):
|
||||||
|
|
||||||
|
dbname = "pytest_taos_stmt_error_null_ts"
|
||||||
|
try:
|
||||||
|
conn.execute("drop database if exists %s" % dbname)
|
||||||
|
conn.execute("create database if not exists %s" % dbname)
|
||||||
|
conn.execute("alter database %s keep 36500" % dbname)
|
||||||
|
conn.select_db(dbname)
|
||||||
|
|
||||||
|
conn.execute("create stable STB(ts timestamp, n int) tags(b int)")
|
||||||
|
|
||||||
|
stmt = conn.statement("insert into ? using STB tags(?) values(?, ?)")
|
||||||
|
params = new_bind_params(1)
|
||||||
|
params[0].int(4);
|
||||||
|
stmt.set_tbname_tags("ct", params);
|
||||||
|
|
||||||
|
multi_params = new_multi_binds(2);
|
||||||
|
multi_params[0].timestamp([9223372036854775808])
|
||||||
|
multi_params[1].int([123])
|
||||||
|
stmt.bind_param_batch(multi_params)
|
||||||
|
|
||||||
|
stmt.execute()
|
||||||
|
result = stmt.use_result()
|
||||||
|
|
||||||
|
result.close()
|
||||||
|
stmt.close()
|
||||||
|
|
||||||
|
stmt = conn.statement("select * from STB")
|
||||||
|
stmt.execute()
|
||||||
|
result = stmt.use_result()
|
||||||
|
print(result.affected_rows)
|
||||||
|
row = result.next()
|
||||||
|
print(row)
|
||||||
|
|
||||||
|
result.close()
|
||||||
|
stmt.close()
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
except Exception as err:
|
||||||
|
conn.close()
|
||||||
|
raise err
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
|
||||||
|
self.test_stmt_insert(self.conn())
|
||||||
|
try:
|
||||||
|
self.test_stmt_insert_error(self.conn())
|
||||||
|
except Exception as error :
|
||||||
|
|
||||||
|
if str(error)=='[0x0200]: no mix usage for ? and values':
|
||||||
|
tdLog.info('=========stmt error occured for bind part column ==============')
|
||||||
|
else:
|
||||||
|
tdLog.exit("expect error(%s) not occured" % str(error))
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.test_stmt_insert_error_null_timestamp(self.conn())
|
||||||
|
tdLog.exit("expect error not occured - 1")
|
||||||
|
except Exception as error :
|
||||||
|
if str(error)=='[0x060b]: Timestamp data out of range':
|
||||||
|
tdLog.info('=========stmt error occured for bind part column(NULL Timestamp) ==============')
|
||||||
|
else:
|
||||||
|
tdLog.exit("expect error(%s) not occured - 2" % str(error))
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -287,6 +287,7 @@ python3 ./test.py -f 1-insert/tb_100w_data_order.py -P
|
||||||
python3 ./test.py -f 1-insert/delete_childtable.py -P
|
python3 ./test.py -f 1-insert/delete_childtable.py -P
|
||||||
python3 ./test.py -f 1-insert/delete_normaltable.py -P
|
python3 ./test.py -f 1-insert/delete_normaltable.py -P
|
||||||
python3 ./test.py -f 1-insert/keep_expired.py -P
|
python3 ./test.py -f 1-insert/keep_expired.py -P
|
||||||
|
python3 ./test.py -f 1-insert/stmt_error.py -P
|
||||||
python3 ./test.py -f 1-insert/drop.py -P
|
python3 ./test.py -f 1-insert/drop.py -P
|
||||||
python3 ./test.py -f 2-query/join2.py -P
|
python3 ./test.py -f 2-query/join2.py -P
|
||||||
python3 ./test.py -f 2-query/union1.py -P
|
python3 ./test.py -f 2-query/union1.py -P
|
||||||
|
|
|
@ -218,6 +218,7 @@ python3 ./test.py -f 1-insert/delete_stable.py
|
||||||
python3 ./test.py -f 1-insert/delete_childtable.py
|
python3 ./test.py -f 1-insert/delete_childtable.py
|
||||||
python3 ./test.py -f 1-insert/delete_normaltable.py
|
python3 ./test.py -f 1-insert/delete_normaltable.py
|
||||||
python3 ./test.py -f 1-insert/keep_expired.py
|
python3 ./test.py -f 1-insert/keep_expired.py
|
||||||
|
python3 ./test.py -f 1-insert/stmt_error.py
|
||||||
python3 ./test.py -f 1-insert/drop.py
|
python3 ./test.py -f 1-insert/drop.py
|
||||||
python3 ./test.py -f 1-insert/drop.py -N 3 -M 3 -i False -n 3
|
python3 ./test.py -f 1-insert/drop.py -N 3 -M 3 -i False -n 3
|
||||||
python3 ./test.py -f 2-query/join2.py
|
python3 ./test.py -f 2-query/join2.py
|
||||||
|
|
|
@ -712,7 +712,7 @@ int32_t shellCalcColWidth(TAOS_FIELD *field, int32_t precision) {
|
||||||
|
|
||||||
case TSDB_DATA_TYPE_NCHAR:
|
case TSDB_DATA_TYPE_NCHAR:
|
||||||
case TSDB_DATA_TYPE_JSON: {
|
case TSDB_DATA_TYPE_JSON: {
|
||||||
int16_t bytes = field->bytes * TSDB_NCHAR_SIZE;
|
uint16_t bytes = field->bytes * TSDB_NCHAR_SIZE;
|
||||||
if (bytes > shell.args.displayWidth) {
|
if (bytes > shell.args.displayWidth) {
|
||||||
return TMAX(shell.args.displayWidth, width);
|
return TMAX(shell.args.displayWidth, width);
|
||||||
} else {
|
} else {
|
||||||
|
|
Loading…
Reference in New Issue