Merge branch '3.0' into case/TD_28027_3.0_New

This commit is contained in:
menshibin 2024-01-22 14:52:05 +08:00
commit b1b4ca6a6f
21 changed files with 588 additions and 67 deletions

View File

@ -1,6 +1,6 @@
# openssl # openssl
ExternalProject_Add(openssl ExternalProject_Add(openssl
URL https://www.openssl.org/source/openssl-3.1.3.tar.gz URL https://github.com/openssl/openssl/releases/download/openssl-3.1.3/openssl-3.1.3.tar.gz
URL_HASH SHA256=f0316a2ebd89e7f2352976445458689f80302093788c466692fb2a188b2eacf6 URL_HASH SHA256=f0316a2ebd89e7f2352976445458689f80302093788c466692fb2a188b2eacf6
DOWNLOAD_NO_PROGRESS 1 DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"

View File

@ -67,7 +67,7 @@ If a stream is created with PARTITION BY clause and SUBTABLE clause, the name of
CREATE STREAM avg_vol_s INTO avg_vol SUBTABLE(CONCAT('new-', tname)) AS SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname tname INTERVAL(1m); CREATE STREAM avg_vol_s INTO avg_vol SUBTABLE(CONCAT('new-', tname)) AS SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname tname INTERVAL(1m);
``` ```
IN PARTITION clause, 'tbname', representing each subtable name of source supertable, is given alias 'tname'. And 'tname' is used in SUBTABLE clause. In SUBTABLE clause, each auto created subtable will concat 'new-' and source subtable name as their name. Other expressions are also allowed in SUBTABLE clause, but the output type must be varchar. IN PARTITION clause, 'tbname', representing each subtable name of source supertable, is given alias 'tname'. And 'tname' is used in SUBTABLE clause. In SUBTABLE clause, each auto created subtable will concat 'new-' and source subtable name as their name(Starting from 3.2.3.0, in order to avoid the expression in subtable being unable to distinguish between different subtables, add '_groupId' to the end of subtable name).
If the output length exceeds the limitation of TDengine(192), the name will be truncated. If the generated name is occupied by some other table, the creation and writing of the new subtable will be failed. If the output length exceeds the limitation of TDengine(192), the name will be truncated. If the generated name is occupied by some other table, the creation and writing of the new subtable will be failed.

View File

@ -84,18 +84,22 @@ Schemaless writes process row data according to the following principles.
1. You can use the following rules to generate the subtable names: first, combine the measurement name and the key and value of the label into the next string: 1. You can use the following rules to generate the subtable names: first, combine the measurement name and the key and value of the label into the next string:
```json ```json
"measurement,tag_key1=tag_value1,tag_key2=tag_value2" "measurement,tag_key1=tag_value1,tag_key2=tag_value2"
``` ```
:::tip :::tip
Note that tag_key1, tag_key2 are not the original order of the tags entered by the user but the result of using the tag names in ascending order of the strings. Therefore, tag_key1 is not the first tag entered in the line protocol. Note that tag_key1, tag_key2 are not the original order of the tags entered by the user but the result of using the tag names in ascending order of the strings. Therefore, tag_key1 is not the first tag entered in the line protocol.
The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t\_" is a fixed prefix that every table generated by this mapping relationship has. The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t\_" is a fixed prefix that every table generated by this mapping relationship has.
::: :::
If you do not want to use an automatically generated table name, there are two ways to specify sub table names, the first one has a higher priority. If you do not want to use an automatically generated table name, there are two ways to specify sub table names(the first one has a higher priority).
You can configure smlAutoChildTableNameDelimiter in taos.cfg(except for `@ # space \r \t \n`), for example, `smlAutoChildTableNameDelimiter=tname`. You can insert `st,t0=cpul,t1=4 c1=3 1626006833639000000` and the table name will be cpu1-4.
You can configure smlChildTableName in taos.cfg to specify table names, for example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored. 1. You can configure smlAutoChildTableNameDelimiter in taos.cfg(except for `@ # space \r \t \n`).
1. For example, `smlAutoChildTableNameDelimiter=tname`. You can insert `st,t0=cpul,t1=4 c1=3 1626006833639000000` and the table name will be cpu1-4.
2. You can configure smlChildTableName in taos.cfg to specify table names.
2. For example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
2. If the super table obtained by parsing the line protocol does not exist, this super table is created. 2. If the super table obtained by parsing the line protocol does not exist, this super table is created.
**Important:** Manually creating supertables for schemaless writing is not supported. Schemaless writing creates appropriate supertables automatically. **Important:** Manually creating supertables for schemaless writing is not supported. Schemaless writing creates appropriate supertables automatically.

View File

@ -34,7 +34,7 @@ subquery: SELECT select_list
stb_name 是保存计算结果的超级表的表名如果该超级表不存在会自动创建如果已存在则检查列的schema信息。详见 写入已存在的超级表 stb_name 是保存计算结果的超级表的表名如果该超级表不存在会自动创建如果已存在则检查列的schema信息。详见 写入已存在的超级表
TAGS 句定义了流计算中创建TAG的规则可以为每个partition对应的子表生成自定义的TAG值详见 自定义TAG TAGS 句定义了流计算中创建TAG的规则可以为每个partition对应的子表生成自定义的TAG值详见 自定义TAG
```sql ```sql
create_definition: create_definition:
col_name column_definition col_name column_definition
@ -77,7 +77,7 @@ SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(
CREATE STREAM avg_vol_s INTO avg_vol SUBTABLE(CONCAT('new-', tname)) AS SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname tname INTERVAL(1m); CREATE STREAM avg_vol_s INTO avg_vol SUBTABLE(CONCAT('new-', tname)) AS SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname tname INTERVAL(1m);
``` ```
PARTITION 子句中,为 tbname 定义了一个别名 tname, 在PARTITION 子句中的别名可以用于 SUBTABLE 子句中的表达式计算,在上述示例中,流新创建的子表将以前缀 'new-' 连接原表名作为表名。 PARTITION 子句中,为 tbname 定义了一个别名 tname, 在PARTITION 子句中的别名可以用于 SUBTABLE 子句中的表达式计算,在上述示例中,流新创建的子表将以前缀 'new-' 连接原表名作为表名(从3.2.3.0开始,为了避免 sutable 中的表达式无法区分各个子表,即误将多个相同时间线写入一个子表,在指定的子表名后面加上 _groupId)
注意,子表名的长度若超过 TDengine 的限制,将被截断。若要生成的子表名已经存在于另一超级表,由于 TDengine 的子表名是唯一的,因此对应新子表的创建以及数据的写入将会失败。 注意,子表名的长度若超过 TDengine 的限制,将被截断。若要生成的子表名已经存在于另一超级表,由于 TDengine 的子表名是唯一的,因此对应新子表的创建以及数据的写入将会失败。
@ -212,8 +212,8 @@ CREATE STREAM streams2 trigger at_once INTO st1 TAGS(cc varchar(100)) as select
PARTITION 子句中,为 concat("tag-", tbname)定义了一个别名cc, 对应超级表st1的自定义TAG的名字。在上述示例中流新创建的子表的TAG将以前缀 'new-' 连接原表名作为TAG的值。 PARTITION 子句中,为 concat("tag-", tbname)定义了一个别名cc, 对应超级表st1的自定义TAG的名字。在上述示例中流新创建的子表的TAG将以前缀 'new-' 连接原表名作为TAG的值。
会对TAG信息进行如下检查 会对TAG信息进行如下检查
1.检查tag的schema信息是否匹配对于不匹配的则自动进行数据类型转换当前只有数据长度大于4096byte时才报错其余场景都能进行类型转换。 1. 检查tag的schema信息是否匹配对于不匹配的则自动进行数据类型转换当前只有数据长度大于4096byte时才报错其余场景都能进行类型转换。
2.检查tag的个数是否相同如果不同需要显示的指定超级表与subquery的tag的对应关系否则报错如果相同可以指定对应关系也可以不指定不指定则按位置顺序对应。 2. 检查tag的个数是否相同如果不同需要显示的指定超级表与subquery的tag的对应关系否则报错如果相同可以指定对应关系也可以不指定不指定则按位置顺序对应。
## 清理中间状态 ## 清理中间状态

View File

@ -87,19 +87,20 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
1. 将使用如下规则来生成子表名:首先将 measurement 的名称和标签的 key 和 value 组合成为如下的字符串 1. 将使用如下规则来生成子表名:首先将 measurement 的名称和标签的 key 和 value 组合成为如下的字符串
```json ```json
"measurement,tag_key1=tag_value1,tag_key2=tag_value2" "measurement,tag_key1=tag_value1,tag_key2=tag_value2"
``` ```
:::tip :::tip
需要注意的是,这里的 tag_key1, tag_key2 并不是用户输入的标签的原始顺序而是使用了标签名称按照字符串升序排列后的结果。所以tag_key1 并不是在行协议中输入的第一个标签。 需要注意的是,这里的 tag_key1, tag_key2 并不是用户输入的标签的原始顺序而是使用了标签名称按照字符串升序排列后的结果。所以tag_key1 并不是在行协议中输入的第一个标签。
排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。 排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。
:::tip :::tip
如果不想用自动生成的表名,有两种指定子表名的方式,第一种优先级更高:
通过在taos.cfg里配置 smlAutoChildTableNameDelimiter 参数来指定(`@ # 空格 回车 换行 制表符`除外)。 如果不想用自动生成的表名,有两种指定子表名的方式(第一种优先级更高)。
举例如下:配置 smlAutoChildTableNameDelimiter=- 插入数据为 st,t0=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1-4。 1. 通过在taos.cfg里配置 smlAutoChildTableNameDelimiter 参数来指定(`@ # 空格 回车 换行 制表符`除外)。
通过在taos.cfg里配置 smlChildTableName 参数来指定。 1. 举例如下:配置 smlAutoChildTableNameDelimiter=- 插入数据为 st,t0=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1-4。
举例如下:配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略 2. 通过在taos.cfg里配置 smlChildTableName 参数来指定。
1. 举例如下:配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略。
2. 如果解析行协议获得的超级表不存在,则会创建这个超级表(不建议手动创建超级表,不然插入数据可能异常)。 2. 如果解析行协议获得的超级表不存在,则会创建这个超级表(不建议手动创建超级表,不然插入数据可能异常)。
3. 如果解析行协议获得子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。 3. 如果解析行协议获得子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。

View File

@ -1975,6 +1975,14 @@ typedef struct {
char data[]; char data[];
} SRetrieveTableRsp; } SRetrieveTableRsp;
typedef struct {
int64_t version;
int64_t numOfRows;
int8_t compressed;
int8_t precision;
char data[];
} SRetrieveTableRspForTmq;
typedef struct { typedef struct {
int64_t handle; int64_t handle;
int64_t useconds; int64_t useconds;

View File

@ -150,7 +150,7 @@ int32_t smlBindData(SQuery* handle, bool dataFormat, SArray* tags, SArray* colsS
STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl, STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl,
char* msgBuf, int32_t msgBufLen); char* msgBuf, int32_t msgBufLen);
int32_t smlBuildOutput(SQuery* handle, SHashObj* pVgHash); int32_t smlBuildOutput(SQuery* handle, SHashObj* pVgHash);
int rawBlockBindData(SQuery *query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, TAOS_FIELD *fields, int numFields, bool needChangeLength); int rawBlockBindData(SQuery *query, STableMeta* pTableMeta, void* data, SVCreateTbReq** pCreateTb, TAOS_FIELD *fields, int numFields, bool needChangeLength);
int32_t rewriteToVnodeModifyOpStmt(SQuery* pQuery, SArray* pBufArray); int32_t rewriteToVnodeModifyOpStmt(SQuery* pQuery, SArray* pBufArray);
SArray* serializeVgroupsCreateTableBatch(SHashObj* pVgroupHashmap); SArray* serializeVgroupsCreateTableBatch(SHashObj* pVgroupHashmap);

View File

@ -299,6 +299,8 @@ void doSetOneRowPtr(SReqResultInfo* pResultInfo);
void setResPrecision(SReqResultInfo* pResInfo, int32_t precision); void setResPrecision(SReqResultInfo* pResInfo, int32_t precision);
int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4, int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4,
bool freeAfterUse); bool freeAfterUse);
int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32_t numOfCols, int32_t numOfRows,
bool convertUcs4);
void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t numOfCols); void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t numOfCols);
void doFreeReqResultInfo(SReqResultInfo* pResInfo); void doFreeReqResultInfo(SReqResultInfo* pResInfo);
int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName, SArray** pReq); int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName, SArray** pReq);

View File

@ -1795,6 +1795,7 @@ int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols) {
static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows) { static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows) {
char* p = (char*)pResultInfo->pData; char* p = (char*)pResultInfo->pData;
int32_t blockVersion = *(int32_t*)p;
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column // | version | total length | total rows | total columns | flag seg| block group id | column schema | each column
// length | // length |
@ -1810,7 +1811,7 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i
char* pStart = p + len; char* pStart = p + len;
for (int32_t i = 0; i < numOfCols; ++i) { for (int32_t i = 0; i < numOfCols; ++i) {
int32_t colLen = htonl(colLength[i]); int32_t colLen = (blockVersion == 1) ? htonl(colLength[i]) : colLength[i];
if (pResultInfo->fields[i].type == TSDB_DATA_TYPE_JSON) { if (pResultInfo->fields[i].type == TSDB_DATA_TYPE_JSON) {
int32_t* offset = (int32_t*)pStart; int32_t* offset = (int32_t*)pStart;
@ -1873,6 +1874,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
tscDebug("start to convert form json format string"); tscDebug("start to convert form json format string");
char* p = (char*)pResultInfo->pData; char* p = (char*)pResultInfo->pData;
int32_t blockVersion = *(int32_t*)p;
int32_t dataLen = estimateJsonLen(pResultInfo, numOfCols, numOfRows); int32_t dataLen = estimateJsonLen(pResultInfo, numOfCols, numOfRows);
if (dataLen <= 0) { if (dataLen <= 0) {
return TSDB_CODE_TSC_INTERNAL_ERROR; return TSDB_CODE_TSC_INTERNAL_ERROR;
@ -1908,8 +1910,8 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
char* pStart = p; char* pStart = p;
char* pStart1 = p1; char* pStart1 = p1;
for (int32_t i = 0; i < numOfCols; ++i) { for (int32_t i = 0; i < numOfCols; ++i) {
int32_t colLen = htonl(colLength[i]); int32_t colLen = (blockVersion == 1) ? htonl(colLength[i]) : colLength[i];
int32_t colLen1 = htonl(colLength1[i]); int32_t colLen1 = (blockVersion == 1) ? htonl(colLength1[i]) : colLength1[i];
if (ASSERT(colLen < dataLen)) { if (ASSERT(colLen < dataLen)) {
tscError("doConvertJson error: colLen:%d >= dataLen:%d", colLen, dataLen); tscError("doConvertJson error: colLen:%d >= dataLen:%d", colLen, dataLen);
return TSDB_CODE_TSC_INTERNAL_ERROR; return TSDB_CODE_TSC_INTERNAL_ERROR;
@ -1968,7 +1970,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
} }
colLen1 = len; colLen1 = len;
totalLen += colLen1; totalLen += colLen1;
colLength1[i] = htonl(len); colLength1[i] = (blockVersion == 1) ? htonl(len) : len;
} else if (IS_VAR_DATA_TYPE(pResultInfo->fields[i].type)) { } else if (IS_VAR_DATA_TYPE(pResultInfo->fields[i].type)) {
len = numOfRows * sizeof(int32_t); len = numOfRows * sizeof(int32_t);
memcpy(pStart1, pStart, len); memcpy(pStart1, pStart, len);
@ -2057,7 +2059,9 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
char* pStart = p; char* pStart = p;
for (int32_t i = 0; i < numOfCols; ++i) { for (int32_t i = 0; i < numOfCols; ++i) {
colLength[i] = htonl(colLength[i]); if(blockVersion == 1){
colLength[i] = htonl(colLength[i]);
}
if (colLength[i] >= dataLen) { if (colLength[i] >= dataLen) {
tscError("invalid colLength %d, dataLen %d", colLength[i], dataLen); tscError("invalid colLength %d, dataLen %d", colLength[i], dataLen);
return TSDB_CODE_TSC_INTERNAL_ERROR; return TSDB_CODE_TSC_INTERNAL_ERROR;

View File

@ -1553,6 +1553,18 @@ end:
return code; return code;
} }
static void* getRawDataFromRes(void *pRetrieve){
void* rawData = NULL;
// deal with compatibility
if(*(int64_t*)pRetrieve == 0){
rawData = ((SRetrieveTableRsp*)pRetrieve)->data;
}else if(*(int64_t*)pRetrieve == 1){
rawData = ((SRetrieveTableRspForTmq*)pRetrieve)->data;
}
ASSERT(rawData != NULL);
return rawData;
}
static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) { static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
if(taos == NULL || data == NULL){ if(taos == NULL || data == NULL){
terrno = TSDB_CODE_INVALID_PARA; terrno = TSDB_CODE_INVALID_PARA;
@ -1607,7 +1619,7 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
} }
pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
while (++rspObj.resIter < rspObj.rsp.blockNum) { while (++rspObj.resIter < rspObj.rsp.blockNum) {
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter); void* pRetrieve = taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter);
if (!rspObj.rsp.withSchema) { if (!rspObj.rsp.withSchema) {
goto end; goto end;
} }
@ -1653,7 +1665,8 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
fields[i].bytes = pSW->pSchema[i].bytes; fields[i].bytes = pSW->pSchema[i].bytes;
tstrncpy(fields[i].name, pSW->pSchema[i].name, tListLen(pSW->pSchema[i].name)); tstrncpy(fields[i].name, pSW->pSchema[i].name, tListLen(pSW->pSchema[i].name));
} }
code = rawBlockBindData(pQuery, pTableMeta, pRetrieve->data, NULL, fields, pSW->nCols, true); void* rawData = getRawDataFromRes(pRetrieve);
code = rawBlockBindData(pQuery, pTableMeta, rawData, NULL, fields, pSW->nCols, true);
taosMemoryFree(fields); taosMemoryFree(fields);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
goto end; goto end;
@ -1737,7 +1750,7 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
uDebug(LOG_ID_TAG" write raw metadata block num:%d", LOG_ID_VALUE, rspObj.rsp.blockNum); uDebug(LOG_ID_TAG" write raw metadata block num:%d", LOG_ID_VALUE, rspObj.rsp.blockNum);
while (++rspObj.resIter < rspObj.rsp.blockNum) { while (++rspObj.resIter < rspObj.rsp.blockNum) {
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter); void* pRetrieve = taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter);
if (!rspObj.rsp.withSchema) { if (!rspObj.rsp.withSchema) {
goto end; goto end;
} }
@ -1824,12 +1837,12 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
fields[i].bytes = pSW->pSchema[i].bytes; fields[i].bytes = pSW->pSchema[i].bytes;
tstrncpy(fields[i].name, pSW->pSchema[i].name, tListLen(pSW->pSchema[i].name)); tstrncpy(fields[i].name, pSW->pSchema[i].name, tListLen(pSW->pSchema[i].name));
} }
code = rawBlockBindData(pQuery, pTableMeta, pRetrieve->data, pCreateReqDst, fields, pSW->nCols, true); void* rawData = getRawDataFromRes(pRetrieve);
code = rawBlockBindData(pQuery, pTableMeta, rawData, &pCreateReqDst, fields, pSW->nCols, true);
taosMemoryFree(fields); taosMemoryFree(fields);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
goto end; goto end;
} }
pCreateReqDst = NULL;
taosMemoryFreeClear(pTableMeta); taosMemoryFreeClear(pTableMeta);
} }

View File

@ -1577,16 +1577,24 @@ SMqRspObj* tmqBuildRspFromWrapper(SMqPollRspWrapper* pWrapper, SMqClientVg* pVg,
pRspObj->resInfo.totalRows = 0; pRspObj->resInfo.totalRows = 0;
pRspObj->resInfo.precision = TSDB_TIME_PRECISION_MILLI; pRspObj->resInfo.precision = TSDB_TIME_PRECISION_MILLI;
if (!pWrapper->dataRsp.withSchema) { bool needTransformSchema = !pRspObj->rsp.withSchema;
setResSchemaInfo(&pRspObj->resInfo, pWrapper->topicHandle->schema.pSchema, pWrapper->topicHandle->schema.nCols); if (!pRspObj->rsp.withSchema) { // withSchema is false if subscribe subquery, true if subscribe db or stable
pRspObj->rsp.withSchema = true;
pRspObj->rsp.blockSchema = taosArrayInit(pRspObj->rsp.blockNum, sizeof(void*));
} }
// extract the rows in this data packet
// extract the rows in this data packet
for (int32_t i = 0; i < pRspObj->rsp.blockNum; ++i) { for (int32_t i = 0; i < pRspObj->rsp.blockNum; ++i) {
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(pRspObj->rsp.blockData, i); SRetrieveTableRspForTmq* pRetrieve = (SRetrieveTableRspForTmq*)taosArrayGetP(pRspObj->rsp.blockData, i);
int64_t rows = htobe64(pRetrieve->numOfRows); int64_t rows = htobe64(pRetrieve->numOfRows);
pVg->numOfRows += rows; pVg->numOfRows += rows;
(*numOfRows) += rows; (*numOfRows) += rows;
if (needTransformSchema) { //withSchema is false if subscribe subquery, true if subscribe db or stable
SSchemaWrapper *schema = tCloneSSchemaWrapper(&pWrapper->topicHandle->schema);
if(schema){
taosArrayPush(pRspObj->rsp.blockSchema, &schema);
}
}
} }
return pRspObj; return pRspObj;
@ -1603,13 +1611,10 @@ SMqTaosxRspObj* tmqBuildTaosxRspFromWrapper(SMqPollRspWrapper* pWrapper, SMqClie
pRspObj->resInfo.totalRows = 0; pRspObj->resInfo.totalRows = 0;
pRspObj->resInfo.precision = TSDB_TIME_PRECISION_MILLI; pRspObj->resInfo.precision = TSDB_TIME_PRECISION_MILLI;
if (!pWrapper->taosxRsp.withSchema) {
setResSchemaInfo(&pRspObj->resInfo, pWrapper->topicHandle->schema.pSchema, pWrapper->topicHandle->schema.nCols);
}
// extract the rows in this data packet // extract the rows in this data packet
for (int32_t i = 0; i < pRspObj->rsp.blockNum; ++i) { for (int32_t i = 0; i < pRspObj->rsp.blockNum; ++i) {
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(pRspObj->rsp.blockData, i); SRetrieveTableRspForTmq* pRetrieve = (SRetrieveTableRspForTmq*)taosArrayGetP(pRspObj->rsp.blockData, i);
int64_t rows = htobe64(pRetrieve->numOfRows); int64_t rows = htobe64(pRetrieve->numOfRows);
pVg->numOfRows += rows; pVg->numOfRows += rows;
(*numOfRows) += rows; (*numOfRows) += rows;
@ -2548,7 +2553,7 @@ SReqResultInfo* tmqGetNextResInfo(TAOS_RES* res, bool convertUcs4) {
pRspObj->resIter++; pRspObj->resIter++;
if (pRspObj->resIter < pRspObj->rsp.blockNum) { if (pRspObj->resIter < pRspObj->rsp.blockNum) {
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(pRspObj->rsp.blockData, pRspObj->resIter); SRetrieveTableRspForTmq* pRetrieveTmq = (SRetrieveTableRspForTmq*)taosArrayGetP(pRspObj->rsp.blockData, pRspObj->resIter);
if (pRspObj->rsp.withSchema) { if (pRspObj->rsp.withSchema) {
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(pRspObj->rsp.blockSchema, pRspObj->resIter); SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(pRspObj->rsp.blockSchema, pRspObj->resIter);
setResSchemaInfo(&pRspObj->resInfo, pSW->pSchema, pSW->nCols); setResSchemaInfo(&pRspObj->resInfo, pSW->pSchema, pSW->nCols);
@ -2559,7 +2564,16 @@ SReqResultInfo* tmqGetNextResInfo(TAOS_RES* res, bool convertUcs4) {
taosMemoryFreeClear(pRspObj->resInfo.convertJson); taosMemoryFreeClear(pRspObj->resInfo.convertJson);
} }
setQueryResultFromRsp(&pRspObj->resInfo, pRetrieve, convertUcs4, false); pRspObj->resInfo.pData = (void*)pRetrieveTmq->data;
pRspObj->resInfo.numOfRows = htobe64(pRetrieveTmq->numOfRows);
pRspObj->resInfo.current = 0;
pRspObj->resInfo.precision = pRetrieveTmq->precision;
// TODO handle the compressed case
pRspObj->resInfo.totalRows += pRspObj->resInfo.numOfRows;
setResultDataPtr(&pRspObj->resInfo, pRspObj->resInfo.fields, pRspObj->resInfo.numOfCols, pRspObj->resInfo.numOfRows,
convertUcs4);
return &pRspObj->resInfo; return &pRspObj->resInfo;
} }

View File

@ -2196,7 +2196,7 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
// todo extract method // todo extract method
int32_t* version = (int32_t*)data; int32_t* version = (int32_t*)data;
*version = 1; *version = 2;
data += sizeof(int32_t); data += sizeof(int32_t);
int32_t* actualLen = (int32_t*)data; int32_t* actualLen = (int32_t*)data;
@ -2277,7 +2277,7 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
data += colSizes[col]; data += colSizes[col];
} }
colSizes[col] = htonl(colSizes[col]); // colSizes[col] = htonl(colSizes[col]);
// uError("blockEncode col bytes:%d, type:%d, size:%d, htonl size:%d", pColRes->info.bytes, pColRes->info.type, // uError("blockEncode col bytes:%d, type:%d, size:%d, htonl size:%d", pColRes->info.bytes, pColRes->info.type,
// htonl(colSizes[col]), colSizes[col]); // htonl(colSizes[col]), colSizes[col]);
} }
@ -2293,7 +2293,6 @@ const char* blockDecode(SSDataBlock* pBlock, const char* pData) {
int32_t version = *(int32_t*)pStart; int32_t version = *(int32_t*)pStart;
pStart += sizeof(int32_t); pStart += sizeof(int32_t);
ASSERT(version == 1);
// total length sizeof(int32_t) // total length sizeof(int32_t)
int32_t dataLen = *(int32_t*)pStart; int32_t dataLen = *(int32_t*)pStart;
@ -2339,7 +2338,9 @@ const char* blockDecode(SSDataBlock* pBlock, const char* pData) {
pStart += sizeof(int32_t) * numOfCols; pStart += sizeof(int32_t) * numOfCols;
for (int32_t i = 0; i < numOfCols; ++i) { for (int32_t i = 0; i < numOfCols; ++i) {
colLen[i] = htonl(colLen[i]); if(version == 1){
colLen[i] = htonl(colLen[i]);
}
ASSERT(colLen[i] >= 0); ASSERT(colLen[i] >= 0);
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i); SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);

View File

@ -16,21 +16,20 @@
#include "tq.h" #include "tq.h"
int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t numOfCols, int8_t precision) { int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t numOfCols, int8_t precision) {
int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock); int32_t dataStrLen = sizeof(SRetrieveTableRspForTmq) + blockGetEncodeSize(pBlock);
void* buf = taosMemoryCalloc(1, dataStrLen); void* buf = taosMemoryCalloc(1, dataStrLen);
if (buf == NULL) { if (buf == NULL) {
return TSDB_CODE_OUT_OF_MEMORY; return TSDB_CODE_OUT_OF_MEMORY;
} }
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)buf; SRetrieveTableRspForTmq* pRetrieve = (SRetrieveTableRspForTmq*)buf;
pRetrieve->useconds = 0; pRetrieve->version = 1;
pRetrieve->precision = precision; pRetrieve->precision = precision;
pRetrieve->compressed = 0; pRetrieve->compressed = 0;
pRetrieve->completed = 1;
pRetrieve->numOfRows = htobe64((int64_t)pBlock->info.rows); pRetrieve->numOfRows = htobe64((int64_t)pBlock->info.rows);
int32_t actualLen = blockEncode(pBlock, pRetrieve->data, numOfCols); int32_t actualLen = blockEncode(pBlock, pRetrieve->data, numOfCols);
actualLen += sizeof(SRetrieveTableRsp); actualLen += sizeof(SRetrieveTableRspForTmq);
taosArrayPush(pRsp->blockDataLen, &actualLen); taosArrayPush(pRsp->blockDataLen, &actualLen);
taosArrayPush(pRsp->blockData, &buf); taosArrayPush(pRsp->blockData, &buf);

View File

@ -247,13 +247,13 @@ static int32_t createTableDataCxt(STableMeta* pTableMeta, SVCreateTbReq** pCreat
if (NULL == pTableCxt->pData) { if (NULL == pTableCxt->pData) {
code = TSDB_CODE_OUT_OF_MEMORY; code = TSDB_CODE_OUT_OF_MEMORY;
} else { } else {
pTableCxt->pData->flags = NULL != *pCreateTbReq ? SUBMIT_REQ_AUTO_CREATE_TABLE : 0; pTableCxt->pData->flags = (pCreateTbReq != NULL && NULL != *pCreateTbReq) ? SUBMIT_REQ_AUTO_CREATE_TABLE : 0;
pTableCxt->pData->flags |= colMode ? SUBMIT_REQ_COLUMN_DATA_FORMAT : 0; pTableCxt->pData->flags |= colMode ? SUBMIT_REQ_COLUMN_DATA_FORMAT : 0;
pTableCxt->pData->suid = pTableMeta->suid; pTableCxt->pData->suid = pTableMeta->suid;
pTableCxt->pData->uid = pTableMeta->uid; pTableCxt->pData->uid = pTableMeta->uid;
pTableCxt->pData->sver = pTableMeta->sversion; pTableCxt->pData->sver = pTableMeta->sversion;
pTableCxt->pData->pCreateTbReq = *pCreateTbReq; pTableCxt->pData->pCreateTbReq = pCreateTbReq != NULL ? *pCreateTbReq : NULL;
*pCreateTbReq = NULL; if(pCreateTbReq != NULL) *pCreateTbReq = NULL;
if (pTableCxt->pData->flags & SUBMIT_REQ_COLUMN_DATA_FORMAT) { if (pTableCxt->pData->flags & SUBMIT_REQ_COLUMN_DATA_FORMAT) {
pTableCxt->pData->aCol = taosArrayInit(128, sizeof(SColData)); pTableCxt->pData->aCol = taosArrayInit(128, sizeof(SColData));
if (NULL == pTableCxt->pData->aCol) { if (NULL == pTableCxt->pData->aCol) {
@ -640,12 +640,12 @@ static bool findFileds(SSchema* pSchema, TAOS_FIELD* fields, int numFields) {
return false; return false;
} }
int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, TAOS_FIELD* tFields, int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreateTbReq** pCreateTb, TAOS_FIELD* tFields,
int numFields, bool needChangeLength) { int numFields, bool needChangeLength) {
void* tmp = taosHashGet(((SVnodeModifyOpStmt*)(query->pRoot))->pTableBlockHashObj, &pTableMeta->uid, sizeof(pTableMeta->uid)); void* tmp = taosHashGet(((SVnodeModifyOpStmt*)(query->pRoot))->pTableBlockHashObj, &pTableMeta->uid, sizeof(pTableMeta->uid));
STableDataCxt* pTableCxt = NULL; STableDataCxt* pTableCxt = NULL;
int ret = insGetTableDataCxt(((SVnodeModifyOpStmt*)(query->pRoot))->pTableBlockHashObj, &pTableMeta->uid, int ret = insGetTableDataCxt(((SVnodeModifyOpStmt*)(query->pRoot))->pTableBlockHashObj, &pTableMeta->uid,
sizeof(pTableMeta->uid), pTableMeta, &pCreateTb, &pTableCxt, true, false); sizeof(pTableMeta->uid), pTableMeta, pCreateTb, &pTableCxt, true, false);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
uError("insGetTableDataCxt error"); uError("insGetTableDataCxt error");
goto end; goto end;
@ -662,6 +662,7 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate
char* p = (char*)data; char* p = (char*)data;
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column // | version | total length | total rows | total columns | flag seg| block group id | column schema | each column
// length | // length |
int32_t version = *(int32_t*)data;
p += sizeof(int32_t); p += sizeof(int32_t);
p += sizeof(int32_t); p += sizeof(int32_t);
@ -717,7 +718,7 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate
goto end; goto end;
} }
fields += sizeof(int8_t) + sizeof(int32_t); fields += sizeof(int8_t) + sizeof(int32_t);
if (needChangeLength) { if (needChangeLength && version == 1) {
pStart += htonl(colLength[j]); pStart += htonl(colLength[j]);
} else { } else {
pStart += colLength[j]; pStart += colLength[j];
@ -748,7 +749,7 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate
goto end; goto end;
} }
fields += sizeof(int8_t) + sizeof(int32_t); fields += sizeof(int8_t) + sizeof(int32_t);
if (needChangeLength) { if (needChangeLength && version == 1) {
pStart += htonl(colLength[i]); pStart += htonl(colLength[i]);
} else { } else {
pStart += colLength[i]; pStart += colLength[i];

View File

@ -0,0 +1,67 @@
import taos
import sys
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
class TDTestCase(TBase):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
#tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql) # output sql.txt file
def run(self):
dbname = "db"
stbname = "ocloud_point"
tbname = "ocloud_point_170658_3837620225_1701134595725266945"
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
tdSql.execute(
f'''create stable if not exists {dbname}.{stbname}
(wstart timestamp, point_value float) tags (location binary(64), groupId int)
'''
)
tdSql.execute(
f'''create table if not exists {dbname}.{tbname} using {dbname}.{stbname} tags("California.SanFrancisco", 2)'''
)
sqls = []
for i in range(35, 41):
if i == 38 or i == 40:
sqls.append(f"insert into {dbname}.{tbname} values('2023-12-26 10:{i}:00.000', null)")
else:
sqls.append(f"insert into {dbname}.{tbname} values('2023-12-26 10:{i}:00.000', 5.0)")
# sqls.append(f"insert into {dbname}.{tbname} values('2023-12-26 10:36:00.000', 5.0)")
# sqls.append(f"insert into {dbname}.{tbname} values('2023-12-26 10:37:00.000', 5.0)")
# sqls.append(f"insert into {dbname}.{tbname} values('2023-12-26 10:38:00.000', null)")
# sqls.append(f"insert into {dbname}.{tbname} values('2023-12-26 10:39:00.000', 5.0)")
# sqls.append(f"insert into {dbname}.{tbname} values('2023-12-26 10:40:00.000', null)")
tdSql.executes(sqls)
tdLog.printNoPrefix("==========step3:fill data")
tdSql.query(f"select first(point_value) as pointValue from {dbname}.{tbname} where wstart between '2023-12-26 10:35:00' and '2023-12-26 10:40:00' interval(1M) fill(prev) order by wstart desc limit 100")
data = []
for i in range(6):
row = [5]
data.append(row)
tdSql.checkDataMem(data)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -447,6 +447,48 @@ class TDSql:
if(show): if(show):
tdLog.info("check successfully") tdLog.info("check successfully")
def checkDataMem(self, mem):
if not isinstance(mem, list):
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, self.sql)
tdLog.exit("%s(%d) failed: sql:%s, expect data is error, must is array[][]" % args)
if len(mem) != self.queryRows:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, self.sql, len(mem), self.queryRows)
tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args)
# row, col, data
for row, rowData in enumerate(mem):
for col, colData in enumerate(rowData):
self.checkData(row, col, colData)
tdLog.info("check successfully")
def checkDataCsv(self, csvfilePath):
if not isinstance(csvfilePath, str) or len(csvfilePath) == 0:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, self.sql, csvfilePath)
tdLog.exit("%s(%d) failed: sql:%s, expect csvfile path error:%s" % args)
tdLog.info("read csvfile read begin")
data = []
try:
with open(csvfilePath) as csvfile:
csv_reader = csv.reader(csvfile) # csv.reader read csvfile\
# header = next(csv_reader) # Read the header of each column in the first row
for row in csv_reader: # csv file save to data
data.append(row)
except FileNotFoundError:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, self.sql, csvfilePath)
tdLog.exit("%s(%d) failed: sql:%s, expect csvfile not find error:%s" % args)
except Exception as e:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, self.sql, csvfilePath, str(e))
tdLog.exit("%s(%d) failed: sql:%s, expect csvfile path:%s, read error:%s" % args)
tdLog.info("read csvfile read successfully")
self.checkDataMem(data)
# return true or false replace exit, no print out # return true or false replace exit, no print out
def checkRowColNoExit(self, row, col): def checkRowColNoExit(self, row, col):
caller = inspect.getframeinfo(inspect.stack()[2][0]) caller = inspect.getframeinfo(inspect.stack()[2][0])

View File

@ -17,6 +17,7 @@ fi
,,y,army,./pytest.sh python3 ./test.py -f enterprise/multi-level/mlevel_basic.py -N 3 -L 3 -D 2 ,,y,army,./pytest.sh python3 ./test.py -f enterprise/multi-level/mlevel_basic.py -N 3 -L 3 -D 2
,,y,army,./pytest.sh python3 ./test.py -f enterprise/s3/s3_basic.py -L 3 -D 1 ,,y,army,./pytest.sh python3 ./test.py -f enterprise/s3/s3_basic.py -L 3 -D 1
,,y,army,./pytest.sh python3 ./test.py -f community/cluster/snapshot.py -N 3 -L 3 -D 2 ,,y,army,./pytest.sh python3 ./test.py -f community/cluster/snapshot.py -N 3 -L 3 -D 2
,,y,army,./pytest.sh python3 ./test.py -f community/query/fill/fill_desc.py -N 3 -L 3 -D 2
,,y,army,./pytest.sh python3 ./test.py -f community/cluster/incSnapshot.py -N 3 -L 3 -D 2 ,,y,army,./pytest.sh python3 ./test.py -f community/cluster/incSnapshot.py -N 3 -L 3 -D 2
,,y,army,./pytest.sh python3 ./test.py -f community/query/query_basic.py -N 3 ,,y,army,./pytest.sh python3 ./test.py -f community/query/query_basic.py -N 3
,,n,army,python3 ./test.py -f community/cmdline/fullopt.py ,,n,army,python3 ./test.py -f community/cmdline/fullopt.py

View File

@ -0,0 +1,81 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"connection_pool_size": 8,
"num_of_records_per_req": 2000,
"thread_count": 2,
"create_table_thread_count": 10,
"result_file": "./insert_res_mix.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"check_sql": "yes",
"continue_if_fail": "yes",
"databases": [
{
"dbinfo": {
"name": "curdb",
"drop": "yes",
"vgroups": 2,
"replica": 1,
"precision": "ms",
"stt_trigger": 8,
"minRows": 100,
"maxRows": 4096
},
"super_tables": [
{
"name": "meters",
"child_table_exists": "no",
"childtable_count": 5,
"insert_rows": 100000,
"childtable_prefix": "d",
"insert_mode": "taosc",
"insert_interval": 0,
"timestamp_step": 1000,
"start_timestamp":"2022-09-01 10:00:00",
"disorder_ratio": 60,
"update_ratio": 70,
"delete_ratio": 30,
"disorder_fill_interval": 300,
"update_fill_interval": 25,
"generate_row_rule": 2,
"columns": [
{ "type": "bool", "name": "bc"},
{ "type": "float", "name": "fc", "max": 1, "min": 0 },
{ "type": "double", "name": "dc", "max": 1, "min": 0 },
{ "type": "tinyint", "name": "ti", "max": 100, "min": 0 },
{ "type": "smallint", "name": "si", "max": 100, "min": 0 },
{ "type": "int", "name": "ic", "max": 100, "min": 0 },
{ "type": "bigint", "name": "bi", "max": 100, "min": 0 },
{ "type": "utinyint", "name": "uti", "max": 100, "min": 0 },
{ "type": "usmallint", "name": "usi", "max": 100, "min": 0 },
{ "type": "uint", "name": "ui", "max": 100, "min": 0 },
{ "type": "ubigint", "name": "ubi", "max": 100, "min": 0 },
{ "type": "binary", "name": "bin", "len": 32},
{ "type": "nchar", "name": "nch", "len": 64}
],
"tags": [
{
"type": "tinyint",
"name": "groupid",
"max": 10,
"min": 1
},
{
"name": "location",
"type": "binary",
"len": 16,
"values": ["San Francisco", "Los Angles", "San Diego",
"San Jose", "Palo Alto", "Campbell", "Mountain View",
"Sunnyvale", "Santa Clara", "Cupertino"]
}
]
}
]
}
]
}

View File

@ -152,6 +152,13 @@ class TDTestCase:
tdLog.info(f" LD_LIBRARY_PATH=/usr/lib taosBenchmark -t {tableNumbers} -n {recordNumbers1} -y ") tdLog.info(f" LD_LIBRARY_PATH=/usr/lib taosBenchmark -t {tableNumbers} -n {recordNumbers1} -y ")
os.system(f"LD_LIBRARY_PATH=/usr/lib taosBenchmark -t {tableNumbers} -n {recordNumbers1} -y ") os.system(f"LD_LIBRARY_PATH=/usr/lib taosBenchmark -t {tableNumbers} -n {recordNumbers1} -y ")
os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'flush database test '") os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'flush database test '")
os.system("LD_LIBRARY_PATH=/usr/lib taosBenchmark -f 0-others/com_alltypedata.json -y")
os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'flush database curdb '")
os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'select count(*) from curdb.meters '")
os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'select sum(fc) from curdb.meters '")
os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'select avg(ic) from curdb.meters '")
os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'select min(ui) from curdb.meters '")
os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'select max(bi) from curdb.meters '")
# os.system(f"LD_LIBRARY_PATH=/usr/lib taos -s 'use test;create stream current_stream into current_stream_output_stb as select _wstart as `start`, _wend as wend, max(current) as max_current from meters where voltage <= 220 interval (5s);' ") # os.system(f"LD_LIBRARY_PATH=/usr/lib taos -s 'use test;create stream current_stream into current_stream_output_stb as select _wstart as `start`, _wend as wend, max(current) as max_current from meters where voltage <= 220 interval (5s);' ")
# os.system('LD_LIBRARY_PATH=/usr/lib taos -s "use test;create stream power_stream into power_stream_output_stb as select ts, concat_ws(\\".\\", location, tbname) as meter_location, current*voltage*cos(phase) as active_power, current*voltage*sin(phase) as reactive_power from meters partition by tbname;" ') # os.system('LD_LIBRARY_PATH=/usr/lib taos -s "use test;create stream power_stream into power_stream_output_stb as select ts, concat_ws(\\".\\", location, tbname) as meter_location, current*voltage*cos(phase) as active_power, current*voltage*sin(phase) as reactive_power from meters partition by tbname;" ')

View File

@ -0,0 +1,275 @@
from urllib.parse import uses_relative
import taos
import sys
import os
import time
import platform
import inspect
from taos.tmq import Consumer
from pathlib import Path
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
from util.cluster import *
import subprocess
BASEVERSION = "3.0.2.3"
class TDTestCase:
def caseDescription(self):
f'''
3.0 data compatibility test
case1: basedata version is {BASEVERSION}
'''
return
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.deletedDataSql= '''drop database if exists deldata;create database deldata duration 300 stt_trigger 1; ;use deldata;
create table deldata.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int);
create table deldata.ct1 using deldata.stb1 tags ( 1 );
insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a ) ( now()-10s, 1, 11111, 111, 11, 1.11, 11.11, 1, 'binary1', 'nchar1', now()+1a ) ( now()-20s, 2, 22222, 222, 22, 2.22, 22.22, 0, 'binary2', 'nchar2', now()+2a ) ( now()-30s, 3, 33333, 333, 33, 3.33, 33.33, 1, 'binary3', 'nchar3', now()+3a );
select avg(c1) from deldata.ct1;
delete from deldata.stb1;
flush database deldata;
insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a ) ( now()-10s, 1, 11111, 111, 11, 1.11, 11.11, 1, 'binary1', 'nchar1', now()+1a ) ( now()-20s, 2, 22222, 222, 22, 2.22, 22.22, 0, 'binary2', 'nchar2', now()+2a ) ( now()-30s, 3, 33333, 333, 33, 3.33, 33.33, 1, 'binary3', 'nchar3', now()+3a );
delete from deldata.ct1;
insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a );
flush database deldata;'''
def checkProcessPid(self,processName):
i=0
while i<60:
print(f"wait stop {processName}")
processPid = subprocess.getstatusoutput(f'ps aux|grep {processName} |grep -v "grep"|awk \'{{print $2}}\'')[1]
print(f"times:{i},{processName}-pid:{processPid}")
if(processPid == ""):
break
i += 1
sleep(1)
else:
print(f'this processName is not stoped in 60s')
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
self.projPath = projPath
for root, dirs, files in os.walk(projPath):
if ("taosd" in files or "taosd.exe" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def getCfgPath(self):
buildPath = self.getBuildPath()
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
cfgPath = buildPath + "/../sim/dnode1/cfg/"
else:
cfgPath = buildPath + "/../sim/dnode1/cfg/"
return cfgPath
def installTaosd(self,bPath,cPath):
# os.system(f"rmtaos && mkdir -p {self.getBuildPath()}/build/lib/temp && mv {self.getBuildPath()}/build/lib/libtaos.so* {self.getBuildPath()}/build/lib/temp/ ")
# os.system(f" mv {bPath}/build {bPath}/build_bak ")
# os.system(f"mv {self.getBuildPath()}/build/lib/libtaos.so {self.getBuildPath()}/build/lib/libtaos.so_bak ")
# os.system(f"mv {self.getBuildPath()}/build/lib/libtaos.so.1 {self.getBuildPath()}/build/lib/libtaos.so.1_bak ")
packagePath = "/usr/local/src/"
dataPath = cPath + "/../data/"
if platform.system() == "Linux" and platform.machine() == "aarch64":
packageName = "TDengine-server-"+ BASEVERSION + "-Linux-arm64.tar.gz"
else:
packageName = "TDengine-server-"+ BASEVERSION + "-Linux-x64.tar.gz"
packageTPath = packageName.split("-Linux-")[0]
my_file = Path(f"{packagePath}/{packageName}")
if not my_file.exists():
print(f"{packageName} is not exists")
tdLog.info(f"cd {packagePath} && wget https://www.tdengine.com/assets-download/3.0/{packageName}")
os.system(f"cd {packagePath} && wget https://www.tdengine.com/assets-download/3.0/{packageName}")
else:
print(f"{packageName} has been exists")
os.system(f" cd {packagePath} && tar xvf {packageName} && cd {packageTPath} && ./install.sh -e no " )
tdDnodes.stop(1)
print(f"start taosd: rm -rf {dataPath}/* && nohup taosd -c {cPath} & ")
os.system(f"rm -rf {dataPath}/* && nohup taosd -c {cPath} & " )
sleep(5)
def buildTaosd(self,bPath):
# os.system(f"mv {bPath}/build_bak {bPath}/build ")
os.system(f" cd {bPath} ")
def is_list_same_as_ordered_list(self,unordered_list, ordered_list):
sorted_list = sorted(unordered_list)
return sorted_list == ordered_list
def run(self):
scriptsPath = os.path.dirname(os.path.realpath(__file__))
distro_id = distro.id()
if distro_id == "alpine":
tdLog.info(f"alpine skip compatibility test")
return True
if platform.system().lower() == 'windows':
tdLog.info(f"Windows skip compatibility test")
return True
bPath = self.getBuildPath()
cPath = self.getCfgPath()
dbname = "test"
stb = f"{dbname}.meters"
os.system("echo 'debugFlag 143' > /etc/taos/taos.cfg ")
tableNumbers=100
recordNumbers1=100
recordNumbers2=1000
# tdsqlF=tdCom.newTdSql()
# print(tdsqlF)
# tdsqlF.query(f"SELECT SERVER_VERSION();")
# print(tdsqlF.query(f"SELECT SERVER_VERSION();"))
# oldServerVersion=tdsqlF.queryResult[0][0]
# tdLog.info(f"Base server version is {oldServerVersion}")
# tdsqlF.query(f"SELECT CLIENT_VERSION();")
# # the oldClientVersion can't be updated in the same python process,so the version is new compiled verison
# oldClientVersion=tdsqlF.queryResult[0][0]
# tdLog.info(f"Base client version is {oldClientVersion}")
# baseVersion = "3.0.1.8"
tdLog.printNoPrefix(f"==========step1:prepare and check data in old version-{BASEVERSION}")
os.system(f"rm -rf {cPath}/../data")
print(self.projPath)
# this data file is special for coverage test in 192.168.1.96
os.system("cp -r f{self.projPath}/../comp_testdata/data/ {self.projPath}/sim/dnode1")
tdDnodes.stop(1)
tdDnodes.start(1)
tdsql=tdCom.newTdSql()
tdsql.query(f"SELECT SERVER_VERSION();")
nowServerVersion=tdsql.queryResult[0][0]
tdLog.printNoPrefix(f"==========step3:prepare and check data in new version-{nowServerVersion}")
tdsql.query(f"select count(*) from {stb}")
tdsql.checkData(0,0,tableNumbers*recordNumbers1)
# tdsql.query("show streams;")
# os.system(f"taosBenchmark -t {tableNumbers} -n {recordNumbers2} -y ")
# tdsql.query("show streams;")
# tdsql.query(f"select count(*) from {stb}")
# tdsql.checkData(0,0,tableNumbers*recordNumbers2)
# checkout db4096
tdsql.query("select count(*) from db4096.stb0")
tdsql.checkData(0,0,50000)
# checkout deleted data
tdsql.execute("insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a ) ( now()-10s, 1, 11111, 111, 11, 1.11, 11.11, 1, 'binary1', 'nchar1', now()+1a ) ( now()-20s, 2, 22222, 222, 22, 2.22, 22.22, 0, 'binary2', 'nchar2', now()+2a ) ( now()-30s, 3, 33333, 333, 33, 3.33, 33.33, 1, 'binary3', 'nchar3', now()+3a );")
tdsql.execute("flush database deldata;")
tdsql.query("select avg(c1) from deldata.ct1;")
tdsql=tdCom.newTdSql()
tdLog.printNoPrefix("==========step4:verify backticks in taos Sql-TD18542")
tdsql.execute("drop database if exists db")
tdsql.execute("create database db")
tdsql.execute("use db")
tdsql.execute("create stable db.stb1 (ts timestamp, c1 int) tags (t1 int);")
tdsql.execute("insert into db.ct1 using db.stb1 TAGS(1) values(now(),11);")
tdsql.error(" insert into `db.ct2` using db.stb1 TAGS(9) values(now(),11);")
tdsql.error(" insert into db.`db.ct2` using db.stb1 TAGS(9) values(now(),11);")
tdsql.execute("insert into `db`.ct3 using db.stb1 TAGS(3) values(now(),13);")
tdsql.query("select * from db.ct3")
tdsql.checkData(0,1,13)
tdsql.execute("insert into db.`ct4` using db.stb1 TAGS(4) values(now(),14);")
tdsql.query("select * from db.ct4")
tdsql.checkData(0,1,14)
#check retentions
tdsql=tdCom.newTdSql()
tdsql.query("describe information_schema.ins_databases;")
qRows=tdsql.queryRows
comFlag=True
j=0
while comFlag:
for i in range(qRows) :
if tdsql.queryResult[i][0] == "retentions" :
print("parameters include retentions")
comFlag=False
break
else :
comFlag=True
j=j+1
if j == qRows:
print("parameters don't include retentions")
caller = inspect.getframeinfo(inspect.stack()[0][0])
args = (caller.filename, caller.lineno)
tdLog.exit("%s(%d) failed" % args)
# check stream
tdsql.query("show streams;")
tdsql.checkRows(0)
#check TS-3131
tdsql.query("select *,tbname from d0.almlog where mcid='m0103';")
tdsql.checkRows(6)
expectList = [0,3003,20031,20032,20033,30031]
resultList = []
for i in range(6):
resultList.append(tdsql.queryResult[i][3])
print(resultList)
if self.is_list_same_as_ordered_list(resultList,expectList):
print("The unordered list is the same as the ordered list.")
else:
tdLog.exit("The unordered list is not the same as the ordered list.")
tdsql.execute("insert into test.d80 values (now+1s, 11, 103, 0.21);")
tdsql.execute("insert into test.d9 values (now+5s, 4.3, 104, 0.4);")
# check tmq
conn = taos.connect()
consumer = Consumer(
{
"group.id": "tg75",
"client.id": "124",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"enable.auto.commit": "true",
"experimental.snapshot.enable": "true",
}
)
consumer.subscribe(["tmq_test_topic"])
while True:
res = consumer.poll(10)
if not res:
break
err = res.error()
if err is not None:
raise err
val = res.value()
for block in val:
print(block.fetchall())
tdsql.query("show topics;")
tdsql.checkRows(1)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -51,7 +51,8 @@ class TDTestCase:
tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix']) tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
tdLog.info("insert data") tdLog.info("insert data")
tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]) tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
tdSql.execute("insert into ctb0 values(now,1,'');")
tdLog.info("create topics from stb with filter") tdLog.info("create topics from stb with filter")
queryString = "select ts,c1,c2 from %s.%s" %(paraDict['dbName'], paraDict['stbName']) queryString = "select ts,c1,c2 from %s.%s" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as stable %s.%s" %(topicNameList[0], paraDict["dbName"],paraDict["stbName"]) sqlString = "create topic %s as stable %s.%s" %(topicNameList[0], paraDict["dbName"],paraDict["stbName"])