Merge branch 'develop' into feature/TD-3963
This commit is contained in:
commit
97b0ca05e4
|
@ -1,49 +1,49 @@
|
|||
version: 1.0.{build}
|
||||
image:
|
||||
- Visual Studio 2015
|
||||
- macos
|
||||
environment:
|
||||
matrix:
|
||||
- ARCH: amd64
|
||||
- ARCH: x86
|
||||
matrix:
|
||||
exclude:
|
||||
- image: macos
|
||||
ARCH: x86
|
||||
for:
|
||||
-
|
||||
matrix:
|
||||
only:
|
||||
- image: Visual Studio 2015
|
||||
clone_folder: c:\dev\TDengine
|
||||
clone_depth: 1
|
||||
|
||||
init:
|
||||
- call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %ARCH%
|
||||
|
||||
before_build:
|
||||
- cd c:\dev\TDengine
|
||||
- md build
|
||||
|
||||
build_script:
|
||||
- cd build
|
||||
- cmake -G "NMake Makefiles" ..
|
||||
- nmake install
|
||||
-
|
||||
matrix:
|
||||
only:
|
||||
- image: macos
|
||||
clone_depth: 1
|
||||
|
||||
build_script:
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake .. > /dev/null
|
||||
- make > /dev/null
|
||||
notifications:
|
||||
- provider: Email
|
||||
to:
|
||||
- sangshuduo@gmail.com
|
||||
on_build_success: true
|
||||
on_build_failure: true
|
||||
on_build_status_changed: true
|
||||
version: 1.0.{build}
|
||||
image:
|
||||
- Visual Studio 2015
|
||||
- macos
|
||||
environment:
|
||||
matrix:
|
||||
- ARCH: amd64
|
||||
- ARCH: x86
|
||||
matrix:
|
||||
exclude:
|
||||
- image: macos
|
||||
ARCH: x86
|
||||
for:
|
||||
-
|
||||
matrix:
|
||||
only:
|
||||
- image: Visual Studio 2015
|
||||
clone_folder: c:\dev\TDengine
|
||||
clone_depth: 1
|
||||
|
||||
init:
|
||||
- call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %ARCH%
|
||||
|
||||
before_build:
|
||||
- cd c:\dev\TDengine
|
||||
- md build
|
||||
|
||||
build_script:
|
||||
- cd build
|
||||
- cmake -G "NMake Makefiles" .. -DBUILD_JDBC=false
|
||||
- nmake install
|
||||
-
|
||||
matrix:
|
||||
only:
|
||||
- image: macos
|
||||
clone_depth: 1
|
||||
|
||||
build_script:
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake .. > /dev/null
|
||||
- make > /dev/null
|
||||
notifications:
|
||||
- provider: Email
|
||||
to:
|
||||
- sangshuduo@gmail.com
|
||||
on_build_success: true
|
||||
on_build_failure: true
|
||||
on_build_status_changed: true
|
||||
|
|
|
@ -14,11 +14,13 @@ MESSAGE(STATUS "Project binary files output path: " ${PROJECT_BINARY_DIR})
|
|||
MESSAGE(STATUS "Project executable files output path: " ${EXECUTABLE_OUTPUT_PATH})
|
||||
MESSAGE(STATUS "Project library files output path: " ${LIBRARY_OUTPUT_PATH})
|
||||
|
||||
FIND_PROGRAM(TD_MVN_INSTALLED mvn)
|
||||
IF (TD_MVN_INSTALLED)
|
||||
MESSAGE(STATUS "MVN is installed and JDBC will be compiled")
|
||||
ELSE ()
|
||||
MESSAGE(STATUS "MVN is not installed and JDBC is not compiled")
|
||||
IF (TD_BUILD_JDBC)
|
||||
FIND_PROGRAM(TD_MVN_INSTALLED mvn)
|
||||
IF (TD_MVN_INSTALLED)
|
||||
MESSAGE(STATUS "MVN is installed and JDBC will be compiled")
|
||||
ELSE ()
|
||||
MESSAGE(STATUS "MVN is not installed and JDBC is not compiled")
|
||||
ENDIF ()
|
||||
ENDIF ()
|
||||
|
||||
#
|
||||
|
@ -55,4 +57,4 @@ ELSE ()
|
|||
SET(CMAKE_BUILD_TYPE "Debug")
|
||||
MESSAGE(STATUS "Build Debug Version as default")
|
||||
ENDIF()
|
||||
ENDIF ()
|
||||
ENDIF ()
|
||||
|
|
|
@ -77,3 +77,9 @@ IF (${JEMALLOC_ENABLED} MATCHES "true")
|
|||
SET(TD_JEMALLOC_ENABLED TRUE)
|
||||
MESSAGE(STATUS "build with jemalloc enabled")
|
||||
ENDIF ()
|
||||
|
||||
SET(TD_BUILD_JDBC TRUE)
|
||||
|
||||
IF (${BUILD_JDBC} MATCHES "false")
|
||||
SET(TD_BUILD_JDBC FALSE)
|
||||
ENDIF ()
|
||||
|
|
|
@ -266,7 +266,9 @@ while(resultSet.next()){
|
|||
> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
|
||||
|
||||
### 处理异常
|
||||
|
||||
在报错后,通过SQLException可以获取到错误的信息和错误码:
|
||||
|
||||
```java
|
||||
try (Statement statement = connection.createStatement()) {
|
||||
// executeQuery
|
||||
|
@ -279,11 +281,87 @@ try (Statement statement = connection.createStatement()) {
|
|||
e.printStackTrace();
|
||||
}
|
||||
```
|
||||
|
||||
JDBC连接器可能报错的错误码包括3种:JDBC driver本身的报错(错误码在0x2301到0x2350之间),JNI方法的报错(错误码在0x2351到0x2400之间),TDengine其他功能模块的报错。
|
||||
具体的错误码请参考:
|
||||
* https://github.com/taosdata/TDengine/blob/develop/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
|
||||
* https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h
|
||||
|
||||
### <a class="anchor" id="stmt-java"></a>通过参数绑定写入数据
|
||||
|
||||
从 2.1.2.0 版本开始,TDengine 的 **JDBC-JNI** 实现大幅改进了参数绑定方式对数据写入(INSERT)场景的支持。采用这种方式写入数据时,能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。(注意:**JDBC-RESTful** 实现并不提供参数绑定这种使用方式。)
|
||||
|
||||
```java
|
||||
Statement stmt = conn.createStatement();
|
||||
Random r = new Random();
|
||||
|
||||
// INSERT 语句中,VALUES 部分允许指定具体的数据列;如果采取自动建表,则 TAGS 部分需要设定全部 TAGS 列的参数值:
|
||||
TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags (?, ?) (ts, c1, c2) values(?, ?, ?)");
|
||||
|
||||
// 设定数据表名:
|
||||
s.setTableName("w1");
|
||||
// 设定 TAGS 取值:
|
||||
s.setTagInt(0, r.nextInt(10));
|
||||
s.setTagString(1, "Beijing");
|
||||
|
||||
int numOfRows = 10;
|
||||
|
||||
// VALUES 部分以逐列的方式进行设置:
|
||||
ArrayList<Long> ts = new ArrayList<>();
|
||||
for (int i = 0; i < numOfRows; i++){
|
||||
ts.add(System.currentTimeMillis() + i);
|
||||
}
|
||||
s.setTimestamp(0, ts);
|
||||
|
||||
ArrayList<Integer> s1 = new ArrayList<>();
|
||||
for (int i = 0; i < numOfRows; i++){
|
||||
s1.add(r.nextInt(100));
|
||||
}
|
||||
s.setInt(1, s1);
|
||||
|
||||
ArrayList<String> s2 = new ArrayList<>();
|
||||
for (int i = 0; i < numOfRows; i++){
|
||||
s2.add("test" + r.nextInt(100));
|
||||
}
|
||||
s.setString(2, s2, 10);
|
||||
|
||||
// AddBatch 之后,可以再设定新的表名、TAGS、VALUES 取值,这样就能实现一次执行向多个数据表写入:
|
||||
s.columnDataAddBatch();
|
||||
// 执行语句:
|
||||
s.columnDataExecuteBatch();
|
||||
// 执行完毕,释放资源:
|
||||
s.columnDataCloseBatch();
|
||||
```
|
||||
|
||||
用于设定 TAGS 取值的方法总共有:
|
||||
```java
|
||||
public void setTagNull(int index, int type)
|
||||
public void setTagBoolean(int index, boolean value)
|
||||
public void setTagInt(int index, int value)
|
||||
public void setTagByte(int index, byte value)
|
||||
public void setTagShort(int index, short value)
|
||||
public void setTagLong(int index, long value)
|
||||
public void setTagTimestamp(int index, long value)
|
||||
public void setTagFloat(int index, float value)
|
||||
public void setTagDouble(int index, double value)
|
||||
public void setTagString(int index, String value)
|
||||
public void setTagNString(int index, String value)
|
||||
```
|
||||
|
||||
用于设定 VALUES 数据列的取值的方法总共有:
|
||||
```java
|
||||
public void setInt(int columnIndex, ArrayList<Integer> list) throws SQLException
|
||||
public void setFloat(int columnIndex, ArrayList<Float> list) throws SQLException
|
||||
public void setTimestamp(int columnIndex, ArrayList<Long> list) throws SQLException
|
||||
public void setLong(int columnIndex, ArrayList<Long> list) throws SQLException
|
||||
public void setDouble(int columnIndex, ArrayList<Double> list) throws SQLException
|
||||
public void setBoolean(int columnIndex, ArrayList<Boolean> list) throws SQLException
|
||||
public void setByte(int columnIndex, ArrayList<Byte> list) throws SQLException
|
||||
public void setShort(int columnIndex, ArrayList<Short> list) throws SQLException
|
||||
public void setString(int columnIndex, ArrayList<String> list, int size) throws SQLException
|
||||
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
|
||||
```
|
||||
|
||||
### <a class="anchor" id="subscribe"></a>订阅
|
||||
|
||||
#### 创建
|
||||
|
|
|
@ -291,9 +291,25 @@ typedef struct taosField {
|
|||
|
||||
TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线程同时打开多张表,并可以同时对每张打开的表进行查询或者插入操作。需要指出的是,**客户端应用必须确保对同一张表的操作完全串行化**,即对同一个表的插入或查询操作未完成时(未返回时),不能够执行第二个插入或查询操作。
|
||||
|
||||
### 参数绑定API
|
||||
<a class="anchor" id="stmt"></a>
|
||||
### 参数绑定 API
|
||||
|
||||
除了直接调用 `taos_query` 进行查询,TDengine也提供了支持参数绑定的Prepare API,与 MySQL 一样,这些API目前也仅支持用问号`?`来代表待绑定的参数,具体如下:
|
||||
除了直接调用 `taos_query` 进行查询,TDengine 也提供了支持参数绑定的 Prepare API,与 MySQL 一样,这些 API 目前也仅支持用问号 `?` 来代表待绑定的参数。
|
||||
|
||||
从 2.1.1.0 和 2.1.2.0 版本开始,TDengine 大幅改进了参数绑定接口对数据写入(INSERT)场景的支持。这样在通过参数绑定接口写入数据时,就避免了 SQL 语法解析的资源消耗,从而在绝大多数情况下显著提升写入性能。此时的典型操作步骤如下:
|
||||
1. 调用 `taos_stmt_init` 创建参数绑定对象;
|
||||
2. 调用 `taos_stmt_prepare` 解析 INSERT 语句;
|
||||
3. 如果 INSERT 语句中预留了表名但没有预留 TAGS,那么调用 `taos_stmt_set_tbname` 来设置表名;
|
||||
4. 如果 INSERT 语句中既预留了表名又预留了 TAGS(例如 INSERT 语句采取的是自动建表的方式),那么调用 `taos_stmt_set_tbname_tags` 来设置表名和 TAGS 的值;
|
||||
5. 调用 `taos_stmt_bind_param_batch` 以多列的方式设置 VALUES 的值;
|
||||
6. 调用 `taos_stmt_add_batch` 把当前绑定的参数加入批处理;
|
||||
7. 可以重复第 3~6 步,为批处理加入更多的数据行;
|
||||
8. 调用 `taos_stmt_execute` 执行已经准备好的批处理指令;
|
||||
9. 执行完毕,调用 `taos_stmt_close` 释放所有资源。
|
||||
|
||||
除 C/C++ 语言外,TDengine 的 Java 语言 JNI Connector 也提供参数绑定接口支持,具体请另外参见:[参数绑定接口的 Java 用法](https://www.taosdata.com/cn/documentation/connector/java#stmt-java)。
|
||||
|
||||
接口相关的具体函数如下(也可以参考 [apitest.c](https://github.com/taosdata/TDengine/blob/develop/tests/examples/c/apitest.c) 文件中使用对应函数的方式):
|
||||
|
||||
- `TAOS_STMT* taos_stmt_init(TAOS *taos)`
|
||||
|
||||
|
@ -301,11 +317,12 @@ TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线
|
|||
|
||||
- `int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length)`
|
||||
|
||||
解析一条sql语句,将解析结果和参数信息绑定到stmt上,如果参数length大于0,将使用此参数作为sql语句的长度,如等于0,将自动判断sql语句的长度。
|
||||
解析一条 SQL 语句,将解析结果和参数信息绑定到 stmt 上,如果参数 length 大于 0,将使用此参数作为 SQL 语句的长度,如等于 0,将自动判断 SQL 语句的长度。
|
||||
|
||||
- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind)`
|
||||
|
||||
进行参数绑定,bind指向一个数组,需保证此数组的元素数量和顺序与sql语句中的参数完全一致。TAOS_BIND 的使用方法与 MySQL中的 MYSQL_BIND 一致,具体定义如下:
|
||||
不如 `taos_stmt_bind_param_batch` 效率高,但可以支持非 INSERT 类型的 SQL 语句。
|
||||
进行参数绑定,bind 指向一个数组(代表所要绑定的一行数据),需保证此数组中的元素数量和顺序与 SQL 语句中的参数完全一致。TAOS_BIND 的使用方法与 MySQL 中的 MYSQL_BIND 一致,具体定义如下:
|
||||
|
||||
```c
|
||||
typedef struct TAOS_BIND {
|
||||
|
@ -319,9 +336,35 @@ typedef struct TAOS_BIND {
|
|||
} TAOS_BIND;
|
||||
```
|
||||
|
||||
- `int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name)`
|
||||
|
||||
(2.1.1.0 版本新增)
|
||||
当 SQL 语句中的表名使用了 `?` 占位时,可以使用此函数绑定一个具体的表名。
|
||||
|
||||
- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags)`
|
||||
|
||||
(2.1.2.0 版本新增)
|
||||
当 SQL 语句中的表名和 TAGS 都使用了 `?` 占位时,可以使用此函数绑定具体的表名和具体的 TAGS 取值。最典型的使用场景是使用了自动建表功能的 INSERT 语句(目前版本不支持指定具体的 TAGS 列)。tags 参数中的列数量需要与 SQL 语句中要求的 TAGS 数量完全一致。
|
||||
|
||||
- `int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind)`
|
||||
|
||||
(2.1.1.0 版本新增)
|
||||
以多列的方式传递待绑定的数据,需要保证这里传递的数据列的顺序、列的数量与 SQL 语句中的 VALUES 参数完全一致。TAOS_MULTI_BIND 的具体定义如下:
|
||||
|
||||
```c
|
||||
typedef struct TAOS_MULTI_BIND {
|
||||
int buffer_type;
|
||||
void * buffer;
|
||||
uintptr_t buffer_length;
|
||||
int32_t * length;
|
||||
char * is_null;
|
||||
int num; // 列的个数,即 buffer 中的参数个数
|
||||
} TAOS_MULTI_BIND;
|
||||
```
|
||||
|
||||
- `int taos_stmt_add_batch(TAOS_STMT *stmt)`
|
||||
|
||||
将当前绑定的参数加入批处理中,调用此函数后,可以再次调用`taos_stmt_bind_param`绑定新的参数。需要注意,此函数仅支持 insert/import 语句,如果是select等其他SQL语句,将返回错误。
|
||||
将当前绑定的参数加入批处理中,调用此函数后,可以再次调用 `taos_stmt_bind_param` 或 `taos_stmt_bind_param_batch` 绑定新的参数。需要注意,此函数仅支持 INSERT/IMPORT 语句,如果是 SELECT 等其他 SQL 语句,将返回错误。
|
||||
|
||||
- `int taos_stmt_execute(TAOS_STMT *stmt)`
|
||||
|
||||
|
@ -329,7 +372,7 @@ typedef struct TAOS_BIND {
|
|||
|
||||
- `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)`
|
||||
|
||||
获取语句的结果集。结果集的使用方式与非参数化调用时一致,使用完成后,应对此结果集调用 `taos_free_result`以释放资源。
|
||||
获取语句的结果集。结果集的使用方式与非参数化调用时一致,使用完成后,应对此结果集调用 `taos_free_result` 以释放资源。
|
||||
|
||||
- `int taos_stmt_close(TAOS_STMT *stmt)`
|
||||
|
||||
|
|
|
@ -5142,6 +5142,10 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
const char* msg18 = "primary timestamp column cannot be dropped";
|
||||
const char* msg19 = "invalid new tag name";
|
||||
const char* msg20 = "table is not super table";
|
||||
const char* msg21 = "only binary/nchar column length could be modified";
|
||||
const char* msg22 = "new column length should be bigger than old one";
|
||||
const char* msg23 = "only column length coulbe be modified";
|
||||
const char* msg24 = "invalid binary/nchar column length";
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
|
@ -5172,13 +5176,13 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
}
|
||||
|
||||
if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN ||
|
||||
pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN) {
|
||||
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
|
||||
pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN) {
|
||||
if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
}
|
||||
} else if ((pAlterSQL->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) && (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo))) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
||||
} else if ((pAlterSQL->type == TSDB_ALTER_TABLE_ADD_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_COLUMN) &&
|
||||
} else if ((pAlterSQL->type == TSDB_ALTER_TABLE_ADD_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) &&
|
||||
UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo)) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
|
||||
}
|
||||
|
@ -5394,6 +5398,85 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
tstrncpy(name1, pItem->pVar.pz, sizeof(name1));
|
||||
TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name1, tDataTypes[TSDB_DATA_TYPE_INT].bytes);
|
||||
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||
} else if (pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) {
|
||||
if (taosArrayGetSize(pAlterSQL->pAddColumns) >= 2) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg16);
|
||||
}
|
||||
|
||||
|
||||
TAOS_FIELD* pItem = taosArrayGet(pAlterSQL->pAddColumns, 0);
|
||||
if (pItem->type != TSDB_DATA_TYPE_BINARY && pItem->type != TSDB_DATA_TYPE_NCHAR) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg21);
|
||||
}
|
||||
|
||||
SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER;
|
||||
SStrToken name = {.type = TK_STRING, .z = pItem->name, .n = (uint32_t)strlen(pItem->name)};
|
||||
if (getColumnIndexByName(pCmd, &name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg17);
|
||||
}
|
||||
|
||||
SSchema* pColSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex);
|
||||
|
||||
if (pColSchema->type != TSDB_DATA_TYPE_BINARY && pColSchema->type != TSDB_DATA_TYPE_NCHAR) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg21);
|
||||
}
|
||||
|
||||
if (pItem->type != pColSchema->type) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg23);
|
||||
}
|
||||
|
||||
if ((pItem->type == TSDB_DATA_TYPE_BINARY && (pItem->bytes <= 0 || pItem->bytes > TSDB_MAX_BINARY_LEN)) ||
|
||||
(pItem->type == TSDB_DATA_TYPE_NCHAR && (pItem->bytes <= 0 || pItem->bytes > TSDB_MAX_NCHAR_LEN))) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg24);
|
||||
}
|
||||
|
||||
if (pItem->bytes <= pColSchema->bytes) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg22);
|
||||
}
|
||||
|
||||
TAOS_FIELD f = tscCreateField(pColSchema->type, name.z, pItem->bytes);
|
||||
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||
}else if (pAlterSQL->type == TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN) {
|
||||
if (taosArrayGetSize(pAlterSQL->pAddColumns) >= 2) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg16);
|
||||
}
|
||||
|
||||
TAOS_FIELD* pItem = taosArrayGet(pAlterSQL->pAddColumns, 0);
|
||||
if (pItem->type != TSDB_DATA_TYPE_BINARY && pItem->type != TSDB_DATA_TYPE_NCHAR) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg21);
|
||||
}
|
||||
|
||||
SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER;
|
||||
SStrToken name = {.type = TK_STRING, .z = pItem->name, .n = (uint32_t)strlen(pItem->name)};
|
||||
if (getColumnIndexByName(pCmd, &name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg17);
|
||||
}
|
||||
|
||||
SSchema* pColSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex);
|
||||
|
||||
if (columnIndex.columnIndex < tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg10);
|
||||
}
|
||||
|
||||
if (pColSchema->type != TSDB_DATA_TYPE_BINARY && pColSchema->type != TSDB_DATA_TYPE_NCHAR) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg21);
|
||||
}
|
||||
|
||||
if (pItem->type != pColSchema->type) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg23);
|
||||
}
|
||||
|
||||
if ((pItem->type == TSDB_DATA_TYPE_BINARY && (pItem->bytes <= 0 || pItem->bytes > TSDB_MAX_BINARY_LEN)) ||
|
||||
(pItem->type == TSDB_DATA_TYPE_NCHAR && (pItem->bytes <= 0 || pItem->bytes > TSDB_MAX_NCHAR_LEN))) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg24);
|
||||
}
|
||||
|
||||
if (pItem->bytes <= pColSchema->bytes) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg22);
|
||||
}
|
||||
|
||||
TAOS_FIELD f = tscCreateField(pColSchema->type, name.z, pItem->bytes);
|
||||
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -5745,11 +5828,17 @@ static int32_t setKeepOption(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDbInfo* p
|
|||
tVariantListItem* p0 = taosArrayGet(pKeep, 0);
|
||||
switch (s) {
|
||||
case 1: {
|
||||
if ((int32_t)p0->pVar.i64 <= 0) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||
}
|
||||
pMsg->daysToKeep = htonl((int32_t)p0->pVar.i64);
|
||||
}
|
||||
break;
|
||||
case 2: {
|
||||
tVariantListItem* p1 = taosArrayGet(pKeep, 1);
|
||||
if ((int32_t)p0->pVar.i64 <= 0 || (int32_t)p1->pVar.i64 <= 0) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||
}
|
||||
pMsg->daysToKeep = htonl((int32_t)p0->pVar.i64);
|
||||
pMsg->daysToKeep1 = htonl((int32_t)p1->pVar.i64);
|
||||
break;
|
||||
|
@ -5758,6 +5847,10 @@ static int32_t setKeepOption(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDbInfo* p
|
|||
tVariantListItem* p1 = taosArrayGet(pKeep, 1);
|
||||
tVariantListItem* p2 = taosArrayGet(pKeep, 2);
|
||||
|
||||
if ((int32_t)p0->pVar.i64 <= 0 || (int32_t)p1->pVar.i64 <= 0 || (int32_t)p2->pVar.i64 <= 0) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||
}
|
||||
|
||||
pMsg->daysToKeep = htonl((int32_t)p0->pVar.i64);
|
||||
pMsg->daysToKeep1 = htonl((int32_t)p1->pVar.i64);
|
||||
pMsg->daysToKeep2 = htonl((int32_t)p2->pVar.i64);
|
||||
|
@ -7186,8 +7279,9 @@ static int32_t getTableNameFromSqlNode(SSqlNode* pSqlNode, SArray* tableNameList
|
|||
}
|
||||
|
||||
SName name = {0};
|
||||
if (tscSetTableFullName(&name, t, pSql) != TSDB_CODE_SUCCESS) {
|
||||
return invalidOperationMsg(msgBuf, msg1);
|
||||
int32_t code = tscSetTableFullName(&name, t, pSql);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
taosArrayPush(tableNameList, &name);
|
||||
|
|
|
@ -110,7 +110,8 @@ static void doLaunchQuery(void* param, TAOS_RES* tres, int32_t code) {
|
|||
// failed to get table Meta or vgroup list, retry in 10sec.
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
tscTansformFuncForSTableQuery(pQueryInfo);
|
||||
tscDebug("0x%"PRIx64" stream:%p started to query table:%s", pSql->self, pStream, tNameGetTableName(&pTableMetaInfo->name));
|
||||
|
||||
tscDebug("0x%"PRIx64" stream:%p, start stream query on:%s QueryInfo->skey=%"PRId64" ekey=%"PRId64" ", pSql->self, pStream, tNameGetTableName(&pTableMetaInfo->name), pQueryInfo->window.skey, pQueryInfo->window.ekey);
|
||||
|
||||
pQueryInfo->command = TSDB_SQL_SELECT;
|
||||
|
||||
|
@ -164,7 +165,11 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
|
|||
if (etime > pStream->etime) {
|
||||
etime = pStream->etime;
|
||||
} else if (pStream->interval.intervalUnit != 'y' && pStream->interval.intervalUnit != 'n') {
|
||||
etime = pStream->stime + (etime - pStream->stime) / pStream->interval.interval * pStream->interval.interval;
|
||||
if(pStream->stime == INT64_MIN) {
|
||||
etime = taosTimeTruncate(etime, &pStream->interval, pStream->precision);
|
||||
} else {
|
||||
etime = pStream->stime + (etime - pStream->stime) / pStream->interval.interval * pStream->interval.interval;
|
||||
}
|
||||
} else {
|
||||
etime = taosTimeTruncate(etime, &pStream->interval, pStream->precision);
|
||||
}
|
||||
|
@ -353,8 +358,8 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
|
|||
tscDebug("0x%"PRIx64" stream:%p, next start at %" PRId64 "(ts window ekey), in %" PRId64 " ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql->self, pStream,
|
||||
now + timer, timer, delay, pStream->stime, etime);
|
||||
} else {
|
||||
tscDebug("0x%"PRIx64" stream:%p, next start at %" PRId64 "(ts window ekey), in %" PRId64 " ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql->self, pStream,
|
||||
pStream->stime, timer, delay, pStream->stime - pStream->interval.interval, pStream->stime - 1);
|
||||
tscDebug("0x%"PRIx64" stream:%p, next start at %" PRId64 " - %" PRId64 " end, in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql->self, pStream,
|
||||
pStream->stime, pStream->etime, timer, delay, pStream->stime - pStream->interval.interval, pStream->stime - 1);
|
||||
}
|
||||
|
||||
pSql->cmd.command = TSDB_SQL_SELECT;
|
||||
|
@ -663,6 +668,11 @@ TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const c
|
|||
STscObj *pObj = (STscObj *)taos;
|
||||
if (pObj == NULL || pObj->signature != pObj) return NULL;
|
||||
|
||||
if(fp == NULL){
|
||||
tscError(" taos_open_stream api fp param must not NULL.");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SSqlObj *pSql = (SSqlObj *)calloc(1, sizeof(SSqlObj));
|
||||
if (pSql == NULL) {
|
||||
return NULL;
|
||||
|
|
|
@ -1119,6 +1119,8 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
|
|||
|
||||
SOperatorInfo* pSourceOperator = createDummyInputOperator(pSqlObjList[0], pSchema, numOfCol1, pFilterInfo, numOfFilterCols);
|
||||
|
||||
pOutput->precision = pSqlObjList[0]->res.precision;
|
||||
|
||||
SSchema* schema = NULL;
|
||||
if (px->numOfTables > 1) {
|
||||
SOperatorInfo** p = calloc(px->numOfTables, POINTER_BYTES);
|
||||
|
@ -4477,4 +4479,4 @@ SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg) {
|
|||
}
|
||||
|
||||
return info;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -243,15 +243,15 @@ public class TSDBPreparedStatementTest {
|
|||
s.setNString(1, s2, 4);
|
||||
|
||||
random = 10 + r.nextInt(5);
|
||||
ArrayList<String> s5 = new ArrayList<String>();
|
||||
ArrayList<String> s3 = new ArrayList<String>();
|
||||
for(int i = 0; i < numOfRows; i++) {
|
||||
if(i % random == 0) {
|
||||
s5.add(null);
|
||||
s3.add(null);
|
||||
}else{
|
||||
s5.add("test" + i % 10);
|
||||
s3.add("test" + i % 10);
|
||||
}
|
||||
}
|
||||
s.setString(2, s5, 10);
|
||||
s.setString(2, s3, 10);
|
||||
|
||||
s.columnDataAddBatch();
|
||||
s.columnDataExecuteBatch();
|
||||
|
@ -268,7 +268,126 @@ public class TSDBPreparedStatementTest {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void bindDataWithSingleTagTest() throws SQLException {
|
||||
Statement stmt = conn.createStatement();
|
||||
|
||||
String types[] = new String[] {"tinyint", "smallint", "int", "bigint", "bool", "float", "double", "binary(10)", "nchar(10)"};
|
||||
|
||||
for (String type : types) {
|
||||
stmt.execute("drop table if exists weather_test");
|
||||
stmt.execute("create table weather_test(ts timestamp, f1 nchar(10), f2 binary(10)) tags (t " + type + ")");
|
||||
|
||||
int numOfRows = 1;
|
||||
|
||||
TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags(?) values(?, ?, ?)");
|
||||
Random r = new Random();
|
||||
s.setTableName("w1");
|
||||
|
||||
switch(type) {
|
||||
case "tinyint":
|
||||
case "smallint":
|
||||
case "int":
|
||||
case "bigint":
|
||||
s.setTagInt(0, 1);
|
||||
break;
|
||||
case "float":
|
||||
s.setTagFloat(0, 1.23f);
|
||||
break;
|
||||
case "double":
|
||||
s.setTagDouble(0, 3.14159265);
|
||||
break;
|
||||
case "bool":
|
||||
s.setTagBoolean(0, true);
|
||||
break;
|
||||
case "binary(10)":
|
||||
s.setTagString(0, "test");
|
||||
break;
|
||||
case "nchar(10)":
|
||||
s.setTagNString(0, "test");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
ArrayList<Long> ts = new ArrayList<Long>();
|
||||
for(int i = 0; i < numOfRows; i++) {
|
||||
ts.add(System.currentTimeMillis() + i);
|
||||
}
|
||||
s.setTimestamp(0, ts);
|
||||
|
||||
int random = 10 + r.nextInt(5);
|
||||
ArrayList<String> s2 = new ArrayList<String>();
|
||||
for(int i = 0; i < numOfRows; i++) {
|
||||
s2.add("分支" + i % 4);
|
||||
}
|
||||
s.setNString(1, s2, 10);
|
||||
|
||||
random = 10 + r.nextInt(5);
|
||||
ArrayList<String> s3 = new ArrayList<String>();
|
||||
for(int i = 0; i < numOfRows; i++) {
|
||||
s3.add("test" + i % 4);
|
||||
}
|
||||
s.setString(2, s3, 10);
|
||||
|
||||
s.columnDataAddBatch();
|
||||
s.columnDataExecuteBatch();
|
||||
s.columnDataCloseBatch();
|
||||
|
||||
String sql = "select * from weather_test";
|
||||
PreparedStatement statement = conn.prepareStatement(sql);
|
||||
ResultSet rs = statement.executeQuery();
|
||||
int rows = 0;
|
||||
while(rs.next()) {
|
||||
rows++;
|
||||
}
|
||||
Assert.assertEquals(numOfRows, rows);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void bindDataWithMultipleTagsTest() throws SQLException {
|
||||
Statement stmt = conn.createStatement();
|
||||
|
||||
stmt.execute("drop table if exists weather_test");
|
||||
stmt.execute("create table weather_test(ts timestamp, f1 nchar(10), f2 binary(10)) tags (t1 int, t2 binary(10))");
|
||||
|
||||
int numOfRows = 1;
|
||||
|
||||
TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags(?,?) (ts, f2) values(?, ?)");
|
||||
s.setTableName("w2");
|
||||
s.setTagInt(0, 1);
|
||||
s.setTagString(1, "test");
|
||||
|
||||
|
||||
ArrayList<Long> ts = new ArrayList<Long>();
|
||||
for(int i = 0; i < numOfRows; i++) {
|
||||
ts.add(System.currentTimeMillis() + i);
|
||||
}
|
||||
s.setTimestamp(0, ts);
|
||||
|
||||
ArrayList<String> s2 = new ArrayList<String>();
|
||||
for(int i = 0; i < numOfRows; i++) {
|
||||
s2.add("test" + i % 4);
|
||||
}
|
||||
s.setString(1, s2, 10);
|
||||
|
||||
s.columnDataAddBatch();
|
||||
s.columnDataExecuteBatch();
|
||||
s.columnDataCloseBatch();
|
||||
|
||||
String sql = "select * from weather_test";
|
||||
PreparedStatement statement = conn.prepareStatement(sql);
|
||||
ResultSet rs = statement.executeQuery();
|
||||
int rows = 0;
|
||||
while(rs.next()) {
|
||||
rows++;
|
||||
}
|
||||
Assert.assertEquals(numOfRows, rows);
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void setBoolean() throws SQLException {
|
||||
|
|
|
@ -88,13 +88,24 @@ static SStep tsDnodeSteps[] = {
|
|||
|
||||
static SStep tsDnodeCompactSteps[] = {
|
||||
{"dnode-tfile", tfInit, tfCleanup},
|
||||
{"dnode-globalcfg", taosCheckGlobalCfg, NULL},
|
||||
{"dnode-storage", dnodeInitStorage, dnodeCleanupStorage},
|
||||
{"dnode-cfg", dnodeInitCfg, dnodeCleanupCfg},
|
||||
{"dnode-eps", dnodeInitEps, dnodeCleanupEps},
|
||||
{"dnode-minfos", dnodeInitMInfos, dnodeCleanupMInfos},
|
||||
{"dnode-wal", walInit, walCleanUp},
|
||||
{"dnode-sync", syncInit, syncCleanUp},
|
||||
{"dnode-vread", dnodeInitVRead, dnodeCleanupVRead},
|
||||
{"dnode-vwrite", dnodeInitVWrite, dnodeCleanupVWrite},
|
||||
{"dnode-vmgmt", dnodeInitVMgmt, dnodeCleanupVMgmt},
|
||||
{"dnode-mread", dnodeInitMRead, NULL},
|
||||
{"dnode-mwrite", dnodeInitMWrite, NULL},
|
||||
{"dnode-mpeer", dnodeInitMPeer, NULL},
|
||||
{"dnode-vnodes", dnodeInitVnodes, dnodeCleanupVnodes},
|
||||
{"dnode-modules", dnodeInitModules, dnodeCleanupModules},
|
||||
{"dnode-mread", NULL, dnodeCleanupMRead},
|
||||
{"dnode-mwrite", NULL, dnodeCleanupMWrite},
|
||||
{"dnode-mpeer", NULL, dnodeCleanupMPeer},
|
||||
};
|
||||
|
||||
static int dnodeCreateDir(const char *dir) {
|
||||
|
|
|
@ -161,6 +161,7 @@ enum _mgmt_table {
|
|||
#define TSDB_ALTER_TABLE_ADD_COLUMN 5
|
||||
#define TSDB_ALTER_TABLE_DROP_COLUMN 6
|
||||
#define TSDB_ALTER_TABLE_CHANGE_COLUMN 7
|
||||
#define TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN 8
|
||||
|
||||
#define TSDB_FILL_NONE 0
|
||||
#define TSDB_FILL_NULL 1
|
||||
|
|
|
@ -156,54 +156,62 @@
|
|||
#define TK_SYNCDB 137
|
||||
#define TK_ADD 138
|
||||
#define TK_COLUMN 139
|
||||
#define TK_TAG 140
|
||||
#define TK_CHANGE 141
|
||||
#define TK_SET 142
|
||||
#define TK_KILL 143
|
||||
#define TK_CONNECTION 144
|
||||
#define TK_STREAM 145
|
||||
#define TK_COLON 146
|
||||
#define TK_ABORT 147
|
||||
#define TK_AFTER 148
|
||||
#define TK_ATTACH 149
|
||||
#define TK_BEFORE 150
|
||||
#define TK_BEGIN 151
|
||||
#define TK_CASCADE 152
|
||||
#define TK_CLUSTER 153
|
||||
#define TK_CONFLICT 154
|
||||
#define TK_COPY 155
|
||||
#define TK_DEFERRED 156
|
||||
#define TK_DELIMITERS 157
|
||||
#define TK_DETACH 158
|
||||
#define TK_EACH 159
|
||||
#define TK_END 160
|
||||
#define TK_EXPLAIN 161
|
||||
#define TK_FAIL 162
|
||||
#define TK_FOR 163
|
||||
#define TK_IGNORE 164
|
||||
#define TK_IMMEDIATE 165
|
||||
#define TK_INITIALLY 166
|
||||
#define TK_INSTEAD 167
|
||||
#define TK_MATCH 168
|
||||
#define TK_KEY 169
|
||||
#define TK_OF 170
|
||||
#define TK_RAISE 171
|
||||
#define TK_REPLACE 172
|
||||
#define TK_RESTRICT 173
|
||||
#define TK_ROW 174
|
||||
#define TK_STATEMENT 175
|
||||
#define TK_TRIGGER 176
|
||||
#define TK_VIEW 177
|
||||
#define TK_SEMI 178
|
||||
#define TK_NONE 179
|
||||
#define TK_PREV 180
|
||||
#define TK_LINEAR 181
|
||||
#define TK_IMPORT 182
|
||||
#define TK_TBNAME 183
|
||||
#define TK_JOIN 184
|
||||
#define TK_INSERT 185
|
||||
#define TK_INTO 186
|
||||
#define TK_VALUES 187
|
||||
#define TK_MODIFY 140
|
||||
#define TK_TAG 141
|
||||
#define TK_CHANGE 142
|
||||
#define TK_SET 143
|
||||
#define TK_KILL 144
|
||||
#define TK_CONNECTION 145
|
||||
#define TK_STREAM 146
|
||||
#define TK_COLON 147
|
||||
#define TK_ABORT 148
|
||||
#define TK_AFTER 149
|
||||
#define TK_ATTACH 150
|
||||
#define TK_BEFORE 151
|
||||
#define TK_BEGIN 152
|
||||
#define TK_CASCADE 153
|
||||
#define TK_CLUSTER 154
|
||||
#define TK_CONFLICT 155
|
||||
#define TK_COPY 156
|
||||
#define TK_DEFERRED 157
|
||||
#define TK_DELIMITERS 158
|
||||
#define TK_DETACH 159
|
||||
#define TK_EACH 160
|
||||
#define TK_END 161
|
||||
#define TK_EXPLAIN 162
|
||||
#define TK_FAIL 163
|
||||
#define TK_FOR 164
|
||||
#define TK_IGNORE 165
|
||||
#define TK_IMMEDIATE 166
|
||||
#define TK_INITIALLY 167
|
||||
#define TK_INSTEAD 168
|
||||
#define TK_MATCH 169
|
||||
#define TK_KEY 170
|
||||
#define TK_OF 171
|
||||
#define TK_RAISE 172
|
||||
#define TK_REPLACE 173
|
||||
#define TK_RESTRICT 174
|
||||
#define TK_ROW 175
|
||||
#define TK_STATEMENT 176
|
||||
#define TK_TRIGGER 177
|
||||
#define TK_VIEW 178
|
||||
#define TK_SEMI 179
|
||||
#define TK_NONE 180
|
||||
#define TK_PREV 181
|
||||
#define TK_LINEAR 182
|
||||
#define TK_IMPORT 183
|
||||
#define TK_TBNAME 184
|
||||
#define TK_JOIN 185
|
||||
#define TK_INSERT 186
|
||||
#define TK_INTO 187
|
||||
#define TK_VALUES 188
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#define TK_SPACE 300
|
||||
#define TK_COMMENT 301
|
||||
|
|
|
@ -799,7 +799,7 @@ static int32_t mnodeGetDnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC
|
|||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
pShow->bytes[cols] = 40 + VARSTR_HEADER_SIZE;
|
||||
pShow->bytes[cols] = TSDB_EP_LEN + VARSTR_HEADER_SIZE;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
|
||||
strcpy(pSchema[cols].name, "end_point");
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
|
|
|
@ -485,7 +485,7 @@ static int32_t mnodeGetMnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC
|
|||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
pShow->bytes[cols] = 40 + VARSTR_HEADER_SIZE;
|
||||
pShow->bytes[cols] = TSDB_EP_LEN + VARSTR_HEADER_SIZE;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
|
||||
strcpy(pSchema[cols].name, "end_point");
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
|
|
|
@ -1176,9 +1176,10 @@ int32_t mnodeCompactWal() {
|
|||
return -1;
|
||||
}
|
||||
|
||||
// close wal
|
||||
walFsync(tsSdbMgmt.wal, true);
|
||||
walClose(tsSdbMgmt.wal);
|
||||
// close sdb and sync to disk
|
||||
//walFsync(tsSdbMgmt.wal, true);
|
||||
//walClose(tsSdbMgmt.wal);
|
||||
sdbCleanUp();
|
||||
|
||||
// rename old wal to wal_bak
|
||||
if (taosRename(tsMnodeDir, tsMnodeBakDir) != 0) {
|
||||
|
|
|
@ -93,6 +93,9 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg);
|
|||
static void mnodeProcessAlterTableRsp(SRpcMsg *rpcMsg);
|
||||
|
||||
static int32_t mnodeFindSuperTableColumnIndex(SSTableObj *pStable, char *colName);
|
||||
static int32_t mnodeChangeSuperTableColumn(SMnodeMsg *pMsg);
|
||||
static int32_t mnodeChangeSuperTableTag(SMnodeMsg *pMsg);
|
||||
static int32_t mnodeChangeNormalTableColumn(SMnodeMsg *pMsg);
|
||||
|
||||
static void mnodeDestroyChildTable(SCTableObj *pTable) {
|
||||
tfree(pTable->info.tableId);
|
||||
|
@ -1457,31 +1460,52 @@ static int32_t mnodeChangeSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) {
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t mnodeChangeSuperTableColumn(SMnodeMsg *pMsg, char *oldName, char *newName) {
|
||||
static int32_t mnodeChangeSuperTableColumn(SMnodeMsg *pMsg) {
|
||||
SAlterTableMsg *pAlter = pMsg->rpcMsg.pCont;
|
||||
char* name = pAlter->schema[0].name;
|
||||
SSTableObj *pStable = (SSTableObj *)pMsg->pTable;
|
||||
int32_t col = mnodeFindSuperTableColumnIndex(pStable, oldName);
|
||||
int32_t col = mnodeFindSuperTableColumnIndex(pStable, name);
|
||||
if (col < 0) {
|
||||
mError("msg:%p, app:%p stable:%s, change column, oldName:%s, newName:%s", pMsg, pMsg->rpcMsg.ahandle,
|
||||
pStable->info.tableId, oldName, newName);
|
||||
mError("msg:%p, app:%p stable:%s, change column, name:%s", pMsg, pMsg->rpcMsg.ahandle,
|
||||
pStable->info.tableId, name);
|
||||
return TSDB_CODE_MND_FIELD_NOT_EXIST;
|
||||
}
|
||||
|
||||
// int32_t rowSize = 0;
|
||||
uint32_t len = (uint32_t)strlen(newName);
|
||||
if (len >= TSDB_COL_NAME_LEN) {
|
||||
return TSDB_CODE_MND_COL_NAME_TOO_LONG;
|
||||
}
|
||||
|
||||
if (mnodeFindSuperTableColumnIndex(pStable, newName) >= 0) {
|
||||
return TSDB_CODE_MND_FIELD_ALREAY_EXIST;
|
||||
}
|
||||
|
||||
// update
|
||||
SSchema *schema = (SSchema *) (pStable->schema + col);
|
||||
tstrncpy(schema->name, newName, sizeof(schema->name));
|
||||
ASSERT(schema->type == TSDB_DATA_TYPE_BINARY || schema->type == TSDB_DATA_TYPE_NCHAR);
|
||||
schema->bytes = pAlter->schema[0].bytes;
|
||||
mInfo("msg:%p, app:%p stable %s, start to modify column %s len to %d", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId,
|
||||
name, schema->bytes);
|
||||
|
||||
mInfo("msg:%p, app:%p stable %s, start to modify column %s to %s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId,
|
||||
oldName, newName);
|
||||
SSdbRow row = {
|
||||
.type = SDB_OPER_GLOBAL,
|
||||
.pTable = tsSuperTableSdb,
|
||||
.pObj = pStable,
|
||||
.pMsg = pMsg,
|
||||
.fpRsp = mnodeChangeSuperTableColumnCb
|
||||
};
|
||||
|
||||
return sdbUpdateRow(&row);
|
||||
}
|
||||
|
||||
static int32_t mnodeChangeSuperTableTag(SMnodeMsg *pMsg) {
|
||||
SAlterTableMsg *pAlter = pMsg->rpcMsg.pCont;
|
||||
char* name = pAlter->schema[0].name;
|
||||
SSTableObj *pStable = (SSTableObj *)pMsg->pTable;
|
||||
int32_t col = mnodeFindSuperTableTagIndex(pStable, name);
|
||||
if (col < 0) {
|
||||
mError("msg:%p, app:%p stable:%s, change column, name:%s", pMsg, pMsg->rpcMsg.ahandle,
|
||||
pStable->info.tableId, name);
|
||||
return TSDB_CODE_MND_FIELD_NOT_EXIST;
|
||||
}
|
||||
|
||||
// update
|
||||
SSchema *schema = (SSchema *) (pStable->schema + col);
|
||||
ASSERT(schema->type == TSDB_DATA_TYPE_BINARY || schema->type == TSDB_DATA_TYPE_NCHAR);
|
||||
schema->bytes = pAlter->schema[0].bytes;
|
||||
mInfo("msg:%p, app:%p stable %s, start to modify tag len %s to %d", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId,
|
||||
name, schema->bytes);
|
||||
|
||||
SSdbRow row = {
|
||||
.type = SDB_OPER_GLOBAL,
|
||||
|
@ -2355,31 +2379,23 @@ static int32_t mnodeDropNormalTableColumn(SMnodeMsg *pMsg, char *colName) {
|
|||
return sdbUpdateRow(&row);
|
||||
}
|
||||
|
||||
static int32_t mnodeChangeNormalTableColumn(SMnodeMsg *pMsg, char *oldName, char *newName) {
|
||||
static int32_t mnodeChangeNormalTableColumn(SMnodeMsg *pMsg) {
|
||||
SAlterTableMsg *pAlter = pMsg->rpcMsg.pCont;
|
||||
char* name = pAlter->schema[0].name;
|
||||
SCTableObj *pTable = (SCTableObj *)pMsg->pTable;
|
||||
int32_t col = mnodeFindNormalTableColumnIndex(pTable, oldName);
|
||||
int32_t col = mnodeFindNormalTableColumnIndex(pTable, name);
|
||||
if (col < 0) {
|
||||
mError("msg:%p, app:%p ctable:%s, change column, oldName: %s, newName: %s", pMsg, pMsg->rpcMsg.ahandle,
|
||||
pTable->info.tableId, oldName, newName);
|
||||
mError("msg:%p, app:%p ctable:%s, change column, name: %s", pMsg, pMsg->rpcMsg.ahandle,
|
||||
pTable->info.tableId, name);
|
||||
return TSDB_CODE_MND_FIELD_NOT_EXIST;
|
||||
}
|
||||
|
||||
// int32_t rowSize = 0;
|
||||
uint32_t len = (uint32_t)strlen(newName);
|
||||
if (len >= TSDB_COL_NAME_LEN) {
|
||||
return TSDB_CODE_MND_COL_NAME_TOO_LONG;
|
||||
}
|
||||
|
||||
if (mnodeFindNormalTableColumnIndex(pTable, newName) >= 0) {
|
||||
return TSDB_CODE_MND_FIELD_ALREAY_EXIST;
|
||||
}
|
||||
|
||||
// update
|
||||
SSchema *schema = (SSchema *) (pTable->schema + col);
|
||||
tstrncpy(schema->name, newName, sizeof(schema->name));
|
||||
ASSERT(schema->type == TSDB_DATA_TYPE_BINARY || schema->type == TSDB_DATA_TYPE_NCHAR);
|
||||
schema->bytes = pAlter->schema[0].bytes;
|
||||
|
||||
mInfo("msg:%p, app:%p ctable %s, start to modify column %s to %s", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId,
|
||||
oldName, newName);
|
||||
mInfo("msg:%p, app:%p ctable %s, start to modify column %s len to %d", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId,
|
||||
name, schema->bytes);
|
||||
|
||||
SSdbRow row = {
|
||||
.type = SDB_OPER_GLOBAL,
|
||||
|
@ -3214,7 +3230,9 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
|
|||
} else if (pAlter->type == TSDB_ALTER_TABLE_DROP_COLUMN) {
|
||||
code = mnodeDropSuperTableColumn(pMsg, pAlter->schema[0].name);
|
||||
} else if (pAlter->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) {
|
||||
code = mnodeChangeSuperTableColumn(pMsg, pAlter->schema[0].name, pAlter->schema[1].name);
|
||||
code = mnodeChangeSuperTableColumn(pMsg);
|
||||
} else if (pAlter->type == TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN) {
|
||||
code = mnodeChangeSuperTableTag(pMsg);
|
||||
} else {
|
||||
}
|
||||
} else {
|
||||
|
@ -3226,7 +3244,7 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
|
|||
} else if (pAlter->type == TSDB_ALTER_TABLE_DROP_COLUMN) {
|
||||
code = mnodeDropNormalTableColumn(pMsg, pAlter->schema[0].name);
|
||||
} else if (pAlter->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) {
|
||||
code = mnodeChangeNormalTableColumn(pMsg, pAlter->schema[0].name, pAlter->schema[1].name);
|
||||
code = mnodeChangeNormalTableColumn(pMsg);
|
||||
} else {
|
||||
}
|
||||
}
|
||||
|
@ -3417,4 +3435,4 @@ int32_t mnodeCompactTables() {
|
|||
mnodeCompactChildTables();
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -759,6 +759,12 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) DROP COLUMN ids(A). {
|
|||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
}
|
||||
|
||||
cmd ::= ALTER TABLE ids(X) cpxName(F) MODIFY COLUMN columnlist(A). {
|
||||
X.n += F.n;
|
||||
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, A, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, -1);
|
||||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
}
|
||||
|
||||
//////////////////////////////////ALTER TAGS statement/////////////////////////////////////
|
||||
cmd ::= ALTER TABLE ids(X) cpxName(Y) ADD TAG columnlist(A). {
|
||||
X.n += Y.n;
|
||||
|
@ -799,6 +805,11 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). {
|
|||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
}
|
||||
|
||||
cmd ::= ALTER TABLE ids(X) cpxName(F) MODIFY TAG columnlist(A). {
|
||||
X.n += F.n;
|
||||
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, A, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, -1);
|
||||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
}
|
||||
|
||||
///////////////////////////////////ALTER STABLE statement//////////////////////////////////
|
||||
cmd ::= ALTER STABLE ids(X) cpxName(F) ADD COLUMN columnlist(A). {
|
||||
|
@ -817,6 +828,12 @@ cmd ::= ALTER STABLE ids(X) cpxName(F) DROP COLUMN ids(A). {
|
|||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
}
|
||||
|
||||
cmd ::= ALTER STABLE ids(X) cpxName(F) MODIFY COLUMN columnlist(A). {
|
||||
X.n += F.n;
|
||||
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, A, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, TSDB_SUPER_TABLE);
|
||||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
}
|
||||
|
||||
//////////////////////////////////ALTER TAGS statement/////////////////////////////////////
|
||||
cmd ::= ALTER STABLE ids(X) cpxName(Y) ADD TAG columnlist(A). {
|
||||
X.n += Y.n;
|
||||
|
@ -846,6 +863,23 @@ cmd ::= ALTER STABLE ids(X) cpxName(F) CHANGE TAG ids(Y) ids(Z). {
|
|||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
}
|
||||
|
||||
cmd ::= ALTER STABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). {
|
||||
X.n += F.n;
|
||||
|
||||
toTSDBType(Y.type);
|
||||
SArray* A = tVariantListAppendToken(NULL, &Y, -1);
|
||||
A = tVariantListAppend(A, &Z, -1);
|
||||
|
||||
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, TSDB_SUPER_TABLE);
|
||||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
}
|
||||
|
||||
cmd ::= ALTER STABLE ids(X) cpxName(F) MODIFY TAG columnlist(A). {
|
||||
X.n += F.n;
|
||||
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, A, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, TSDB_SUPER_TABLE);
|
||||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||
}
|
||||
|
||||
////////////////////////////////////////kill statement///////////////////////////////////////
|
||||
cmd ::= KILL CONNECTION INTEGER(Y). {setKillSql(pInfo, TSDB_SQL_KILL_CONNECTION, &Y);}
|
||||
cmd ::= KILL STREAM INTEGER(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setKillSql(pInfo, TSDB_SQL_KILL_STREAM, &X);}
|
||||
|
|
|
@ -1347,7 +1347,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInf
|
|||
pInfo->start = j;
|
||||
} else if (tsList[j] - pInfo->prevTs <= gap) {
|
||||
pInfo->curWindow.ekey = tsList[j];
|
||||
//pInfo->prevTs = tsList[j];
|
||||
pInfo->prevTs = tsList[j];
|
||||
pInfo->numOfRows += 1;
|
||||
if (j == 0 && pInfo->start != 0) {
|
||||
pInfo->numOfRows = 1;
|
||||
|
|
|
@ -893,7 +893,7 @@ SAlterTableInfo *tSetAlterTableInfo(SStrToken *pTableName, SArray *pCols, SArray
|
|||
pAlterTable->type = type;
|
||||
pAlterTable->tableType = tableType;
|
||||
|
||||
if (type == TSDB_ALTER_TABLE_ADD_COLUMN || type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN) {
|
||||
if (type == TSDB_ALTER_TABLE_ADD_COLUMN || type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN || type == TSDB_ALTER_TABLE_CHANGE_COLUMN || type == TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN) {
|
||||
pAlterTable->pAddColumns = pCols;
|
||||
assert(pVals == NULL);
|
||||
} else {
|
||||
|
|
1732
src/query/src/sql.c
1732
src/query/src/sql.c
File diff suppressed because it is too large
Load Diff
|
@ -218,7 +218,8 @@ static SKeyword keywordTable[] = {
|
|||
{"DISTINCT", TK_DISTINCT},
|
||||
{"PARTITIONS", TK_PARTITIONS},
|
||||
{"TOPIC", TK_TOPIC},
|
||||
{"TOPICS", TK_TOPICS}
|
||||
{"TOPICS", TK_TOPICS},
|
||||
{"MODIFY", TK_MODIFY}
|
||||
};
|
||||
|
||||
static const char isIdChar[] = {
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
#TODO: after TD-4518 and TD-4510 is resolved, add the exception test case for these situations
|
||||
|
||||
import sys
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdSql.error('alter database keep db 0')
|
||||
tdSql.error('alter database keep db -10')
|
||||
tdSql.error('alter database keep db -2147483648')
|
||||
|
||||
#this is the test case problem for keep overflow
|
||||
#the error is caught, but type is wrong.
|
||||
#TODO: can be solved in the future, but improvement is minimal
|
||||
tdSql.error('alter database keep db -2147483649')
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -314,6 +314,8 @@ python3 ./test.py -f query/last_row_cache.py
|
|||
python3 ./test.py -f account/account_create.py
|
||||
python3 ./test.py -f alter/alter_table.py
|
||||
python3 ./test.py -f query/queryGroupbySort.py
|
||||
python3 ./test.py -f functions/function_session.py
|
||||
python3 ./test.py -f functions/function_stateWindow.py
|
||||
|
||||
python3 ./test.py -f insert/unsignedInt.py
|
||||
python3 ./test.py -f insert/unsignedBigint.py
|
||||
|
@ -337,4 +339,5 @@ python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
|
|||
python3 ./test.py -f tag_lite/drop_auto_create.py
|
||||
python3 test.py -f insert/insert_before_use_db.py
|
||||
python3 test.py -f alter/alter_cacheLastRow.py
|
||||
python3 test.py -f alter/alter_keep_exception.py
|
||||
#======================p4-end===============
|
||||
|
|
|
@ -0,0 +1,86 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
#import numpy as np
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
|
||||
tdSql.execute("create table test1 using test tags('beijing')")
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||
|
||||
# operation not allowed on super table
|
||||
tdSql.error("select count(*) from test session(ts, 1s)")
|
||||
# operation not allowde on col pro
|
||||
tdSql.error("select * from test1 session(ts, 1s)")
|
||||
# operation not allowed on col except primary ts
|
||||
tdSql.error("select * from test1 session(col1, 1s)")
|
||||
|
||||
tdSql.query("select count(*) from test1 session(ts, 1s)")
|
||||
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 1, 10)
|
||||
# append more data
|
||||
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||
% (self.ts + 2000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||
|
||||
tdSql.query("select count(*) from test1 session(ts, 1s)")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 1, 10)
|
||||
tdSql.checkData(1, 1, 1)
|
||||
|
||||
tdSql.query("select count(*) from test1 session(ts, 1m)")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 1, 11)
|
||||
|
||||
tdSql.query("select first(col1) from test1 session(ts, 1s)")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 1, 1)
|
||||
tdSql.checkData(1, 1, 1)
|
||||
|
||||
|
||||
tdSql.query("select first(col1), last(col2) from test1 session(ts, 1s)")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 1, 1)
|
||||
tdSql.checkData(0, 2, 10)
|
||||
tdSql.checkData(1, 1, 1)
|
||||
tdSql.checkData(1, 1, 1)
|
||||
|
||||
# add more function
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,109 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
#import numpy as np
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
|
||||
tdSql.execute("create table test1 using test tags('beijing')")
|
||||
col0 = 0
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||
% (self.ts + i, col0, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||
|
||||
# operation not allowed on super table
|
||||
tdSql.error("select count(*) from test session(ts, 1s)")
|
||||
# operation not allowde on col pro
|
||||
tdSql.error("select * from test1 session(ts, 1s)")
|
||||
# operation not allowed on col except primary ts
|
||||
tdSql.error("select * from test1 session(col1, 1s)")
|
||||
|
||||
tdSql.query("select count(*) from test1 state_window(col1)")
|
||||
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, self.rowNum)
|
||||
# append more data
|
||||
|
||||
col0 = col0 + 1
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||
% (self.ts + i + 10000, col0, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||
|
||||
tdSql.query("select count(*) from test1 state_window(col1)")
|
||||
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 0, self.rowNum)
|
||||
tdSql.checkData(1, 0, self.rowNum)
|
||||
|
||||
|
||||
tdSql.query("select first(col1) from test1 state_window(col1)")
|
||||
tdSql.checkRows(2)
|
||||
col0 = col0 - 1
|
||||
tdSql.checkData(0, 0, col0)
|
||||
col0 = col0 + 1
|
||||
tdSql.checkData(1, 0, col0)
|
||||
|
||||
tdSql.query("select first(col2) from test1 state_window(col1)")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 0, 1)
|
||||
tdSql.checkData(1, 0, 1)
|
||||
|
||||
tdSql.query("select count(col1), first(col2) from test1 state_window(col1)")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 0, 10)
|
||||
tdSql.checkData(0, 1, 1)
|
||||
|
||||
tdSql.checkData(1, 0, 10)
|
||||
tdSql.checkData(1, 1, 1)
|
||||
|
||||
|
||||
#tdSql.query("select count(*) from test1 session(ts, 1m)")
|
||||
#tdSql.checkRows(1)
|
||||
#tdSql.checkData(0, 1, 11)
|
||||
|
||||
#tdSql.query("select first(col1) from test1 session(ts, 1s)")
|
||||
#tdSql.checkRows(2)
|
||||
#tdSql.checkData(0, 1, 1)
|
||||
#tdSql.checkData(1, 1, 1)
|
||||
|
||||
#tdSql.query("select first(col1), last(col2) from test1 session(ts, 1s)")
|
||||
#tdSql.checkRows(2)
|
||||
#tdSql.checkData(0, 1, 1)
|
||||
#tdSql.checkData(0, 2, 10)
|
||||
#tdSql.checkData(1, 1, 1)
|
||||
#tdSql.checkData(1, 1, 1)
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,82 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.dnodes import tdDnodes
|
||||
|
||||
##TODO: auto test version is currently unsupported, need to come up with
|
||||
# an auto test version in the future
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root)-len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
buildPath = self.getBuildPath()
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found in %s" % buildPath)
|
||||
binPath = buildPath+ "/build/bin/"
|
||||
|
||||
#alter cache block to 3, then check alter
|
||||
tdSql.execute('alter database db blocks 3')
|
||||
tdSql.query('show databases')
|
||||
tdSql.checkData(0,9,3)
|
||||
|
||||
#run taosdemo to occupy all cache, need to manually check memory consumption
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/manual_block1_comp.json" % binPath)
|
||||
input("please check memory usage for taosd. After checking, press enter")
|
||||
|
||||
#alter cache block to 8, then check alter
|
||||
tdSql.execute('alter database db blocks 8')
|
||||
tdSql.query('show databases')
|
||||
tdSql.checkData(0,9,8)
|
||||
|
||||
#run taosdemo to occupy all cache, need to manually check memory consumption
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/manual_block2.json" % binPath)
|
||||
input("please check memory usage for taosd. After checking, press enter")
|
||||
|
||||
##expected result the peak memory consumption should increase by around 80MB = 5 blocks of cache
|
||||
|
||||
##test results
|
||||
#2021/06/02 before:2621700K after: 2703640K memory usage increased by 80MB = 5 block
|
||||
# confirm with the change in block. Baosheng Chang
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.debug("%s alter block manual check finish" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,126 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.dnodes import tdDnodes
|
||||
|
||||
##TODO: auto test version is currently unsupported, need to come up with
|
||||
# an auto test version in the future
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def getRootPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
print(selfPath)
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
print(projPath)
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
print("test" + projPath)
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ('data' in dirs and 'sim' in root):
|
||||
rootPath = root
|
||||
|
||||
return rootPath
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root)-len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
|
||||
|
||||
def run(self):
|
||||
dnodePath = self.getRootPath()
|
||||
os.system(f'rm -rf {dnodePath}/data/* {dnodePath}/log/*')
|
||||
tdSql.prepare()
|
||||
buildPath = self.getBuildPath()
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found in %s" % buildPath)
|
||||
binPath = buildPath+ "/build/bin/"
|
||||
|
||||
#comp is at 14
|
||||
#check disk usage when comp=2
|
||||
tdSql.query('show databases')
|
||||
tdSql.execute('alter database db blocks 3') # minimize the data in cache
|
||||
tdSql.checkData(0,14,2)
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/manual_block1_comp.json" % binPath)
|
||||
print("default location is at /home/bryan/Documents/Github/TDengine/sim/dnode1/data/vnode")
|
||||
print('comp = 2')
|
||||
input("please check disk usage for taosd. After checking, press enter")
|
||||
|
||||
#removing all data file
|
||||
os.system(f'sudo rm -rf {dnodePath}/data/* {dnodePath}/log/*')
|
||||
#print(f'rm -rf {dnodePath}/data/* {dnodePath}/log/*') #for showing the command ran
|
||||
input("please check if the pervious data is being deleted. Then, press enter")
|
||||
|
||||
#check disk usage when comp=0
|
||||
tdSql.prepare()
|
||||
tdSql.query('show databases')
|
||||
tdSql.checkData(0,14,2)
|
||||
tdSql.execute('alter database db comp 0')
|
||||
tdSql.query('show databases')
|
||||
tdSql.checkData(0,14,0)
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/manual_block1_comp.json" % binPath)
|
||||
print("default location is at /home/bryan/Documents/Github/TDengine/sim/dnode1/data")
|
||||
print('comp = 0')
|
||||
input("please check disk usage for taosd. After checking, press enter")
|
||||
|
||||
#removing all data file
|
||||
os.system(f'sudo rm -rf {dnodePath}/data/* {dnodePath}/log/*')
|
||||
#print(f'rm -rf {dnodePath}/data/* {dnodePath}/log/*') #for showing the command ran
|
||||
input("please check if the pervious data is being deleted. Then, press enter")
|
||||
|
||||
#check disk usage when comp=1
|
||||
tdSql.prepare()
|
||||
tdSql.query('show databases')
|
||||
tdSql.checkData(0,14,2)
|
||||
tdSql.execute('alter database db comp 1')
|
||||
tdSql.query('show databases')
|
||||
tdSql.checkData(0,14,1)
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/manual_block1_comp.json" % binPath)
|
||||
print("default location is at /home/bryan/Documents/Github/TDengine/sim/dnode1/data")
|
||||
print('comp = 1')
|
||||
input("please check disk usage for taosd. After checking, press enter")
|
||||
|
||||
##test result
|
||||
# 2021/06/02 comp=2:13M comp=1:57M comp=0:399M. Test past
|
||||
# each row entered is identical Tester - Baosheng Chang
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.debug("%s alter block manual check finish" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -45,7 +45,7 @@ class TDTestCase:
|
|||
tdSql.query("select * from st")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
tdSql.execute("alter table st add column length int")
|
||||
tdSql.execute("alter table st add column len int")
|
||||
tdSql.execute("insert into t1 values(now, 1, 2)")
|
||||
tdSql.query("select last(*) from st")
|
||||
tdSql.checkData(0, 2, 2);
|
||||
|
|
|
@ -176,7 +176,15 @@ class TDTestCase:
|
|||
tdSql.error("select count(join_mt0.c1), first(join_mt0.c1), first(join_mt1.c9) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts interval(10a) group by join_mt0.t1, join_mt0.t2 order by join_mt0.t1 desc slimit 3")
|
||||
tdSql.error("select count(join_mt0.c1), first(join_mt0.c1) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts interval(10a) group by join_mt0.t1, join_mt0.t2, join_mt1.t1 order by join_mt0.ts desc, join_mt1.ts asc limit 10;")
|
||||
tdSql.error("select join_mt1.c1,join_mt0.c1 from join_mt1,join_mt0 where join_mt1.ts = join_mt0.ts and join_mt1.t1 = join_mt0.t1 order by t")
|
||||
|
||||
#TD-4458 join on database which using precision us
|
||||
tdSql.execute("create database test_join_us precision 'us'")
|
||||
tdSql.execute("use test_join_us")
|
||||
ts = 1538548685000000
|
||||
for i in range(2):
|
||||
tdSql.execute("create table t%d (ts timestamp, i int)"%i)
|
||||
tdSql.execute("insert into t%d values(%d,11)(%d,12)"%(i,ts,ts+1))
|
||||
tdSql.query("select t1.ts from t0,t1 where t0.ts = t1.ts")
|
||||
tdSql.checkData(0,0,'2018-10-03 14:38:05.000000')
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -0,0 +1,181 @@
|
|||
{
|
||||
"filetype": "insert",
|
||||
"cfgdir": "/etc/taos",
|
||||
"host": "127.0.0.1",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"thread_count": 4,
|
||||
"thread_count_create_tbl": 4,
|
||||
"result_file": "./insert_res.txt",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"insert_interval": 0,
|
||||
"interlace_rows": 100,
|
||||
"num_of_records_per_req": 100,
|
||||
"databases": [{
|
||||
"dbinfo": {
|
||||
"name": "db",
|
||||
"drop": "no",
|
||||
"replica": 1,
|
||||
"days": 10,
|
||||
"cache": 16,
|
||||
"blocks": 8,
|
||||
"precision": "ms",
|
||||
"keep": 3650,
|
||||
"minRows": 100,
|
||||
"maxRows": 4096,
|
||||
"comp":2,
|
||||
"walLevel":1,
|
||||
"cachelast":0,
|
||||
"quorum":1,
|
||||
"fsync":3000,
|
||||
"update": 0
|
||||
},
|
||||
"super_tables": [{
|
||||
"name": "stb",
|
||||
"child_table_exists":"no",
|
||||
"auto_create_table": "123",
|
||||
"childtable_count": 20,
|
||||
"childtable_prefix": "NN123_",
|
||||
"batch_create_tbl_num": 100,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 5,
|
||||
"childtable_limit": 40,
|
||||
"childtable_offset":0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 10,
|
||||
"start_timestamp": "now",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}],
|
||||
"tags": [{"type": "TINYINT"}]
|
||||
},{
|
||||
"name": "stb",
|
||||
"child_table_exists":"no",
|
||||
"auto_create_table": "no",
|
||||
"childtable_count": 20,
|
||||
"childtable_prefix": "NNN_",
|
||||
"batch_create_tbl_num": 100,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 5,
|
||||
"childtable_limit": 40,
|
||||
"childtable_offset":0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 10,
|
||||
"start_timestamp": "now",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}],
|
||||
"tags": [{"type": "TINYINT"}]
|
||||
},{
|
||||
"name": "stb",
|
||||
"child_table_exists":"no",
|
||||
"auto_create_table": "yes",
|
||||
"childtable_count": 20,
|
||||
"childtable_prefix": "NNY_",
|
||||
"batch_create_tbl_num": 100,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 5,
|
||||
"childtable_limit": 40,
|
||||
"childtable_offset":0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 10,
|
||||
"start_timestamp": "now",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}],
|
||||
"tags": [{"type": "TINYINT"}]
|
||||
},{
|
||||
"name": "stb",
|
||||
"child_table_exists":"yes",
|
||||
"auto_create_table": "123",
|
||||
"childtable_count": 20,
|
||||
"childtable_prefix": "NY123_",
|
||||
"batch_create_tbl_num": 100,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 5,
|
||||
"childtable_limit": 40,
|
||||
"childtable_offset":0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 10,
|
||||
"start_timestamp": "now",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}],
|
||||
"tags": [{"type": "TINYINT"}]
|
||||
},{
|
||||
"name": "stb",
|
||||
"child_table_exists":"yes",
|
||||
"auto_create_table": "no",
|
||||
"childtable_count": 20,
|
||||
"childtable_prefix": "NYN_",
|
||||
"batch_create_tbl_num": 100,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 5,
|
||||
"childtable_limit": 40,
|
||||
"childtable_offset":0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 10,
|
||||
"start_timestamp": "now",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}],
|
||||
"tags": [{"type": "TINYINT"}]
|
||||
},{
|
||||
"name": "stb",
|
||||
"child_table_exists":"yes",
|
||||
"auto_create_table": "yes",
|
||||
"childtable_count": 20,
|
||||
"childtable_prefix": "NYY_",
|
||||
"batch_create_tbl_num": 100,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 5,
|
||||
"childtable_limit": 40,
|
||||
"childtable_offset":0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 10,
|
||||
"start_timestamp": "now",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}],
|
||||
"tags": [{"type": "TINYINT"}]
|
||||
}
|
||||
]
|
||||
}]
|
||||
}
|
|
@ -0,0 +1,181 @@
|
|||
{
|
||||
"filetype": "insert",
|
||||
"cfgdir": "/etc/taos",
|
||||
"host": "127.0.0.1",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"thread_count": 4,
|
||||
"thread_count_create_tbl": 4,
|
||||
"result_file": "./insert_res.txt",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"insert_interval": 0,
|
||||
"interlace_rows": 100,
|
||||
"num_of_records_per_req": 100,
|
||||
"databases": [{
|
||||
"dbinfo": {
|
||||
"name": "db",
|
||||
"drop": "yes",
|
||||
"replica": 1,
|
||||
"days": 10,
|
||||
"cache": 16,
|
||||
"blocks": 8,
|
||||
"precision": "ms",
|
||||
"keep": 3650,
|
||||
"minRows": 100,
|
||||
"maxRows": 4096,
|
||||
"comp":2,
|
||||
"walLevel":1,
|
||||
"cachelast":0,
|
||||
"quorum":1,
|
||||
"fsync":3000,
|
||||
"update": 0
|
||||
},
|
||||
"super_tables": [{
|
||||
"name": "stb",
|
||||
"child_table_exists":"no",
|
||||
"auto_create_table": "123",
|
||||
"childtable_count": 20,
|
||||
"childtable_prefix": "YN123_",
|
||||
"batch_create_tbl_num": 100,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 5,
|
||||
"childtable_limit": 40,
|
||||
"childtable_offset":0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 10,
|
||||
"start_timestamp": "now",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}],
|
||||
"tags": [{"type": "TINYINT"}]
|
||||
},{
|
||||
"name": "stb",
|
||||
"child_table_exists":"no",
|
||||
"auto_create_table": "no",
|
||||
"childtable_count": 20,
|
||||
"childtable_prefix": "YNN_",
|
||||
"batch_create_tbl_num": 100,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 5,
|
||||
"childtable_limit": 40,
|
||||
"childtable_offset":0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 10,
|
||||
"start_timestamp": "now",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}],
|
||||
"tags": [{"type": "TINYINT"}]
|
||||
},{
|
||||
"name": "stb",
|
||||
"child_table_exists":"no",
|
||||
"auto_create_table": "yes",
|
||||
"childtable_count": 20,
|
||||
"childtable_prefix": "YNY_",
|
||||
"batch_create_tbl_num": 100,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 5,
|
||||
"childtable_limit": 40,
|
||||
"childtable_offset":0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 10,
|
||||
"start_timestamp": "now",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}],
|
||||
"tags": [{"type": "TINYINT"}]
|
||||
},{
|
||||
"name": "stb",
|
||||
"child_table_exists":"yes",
|
||||
"auto_create_table": "123",
|
||||
"childtable_count": 20,
|
||||
"childtable_prefix": "YY123_",
|
||||
"batch_create_tbl_num": 100,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 5,
|
||||
"childtable_limit": 40,
|
||||
"childtable_offset":0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 10,
|
||||
"start_timestamp": "now",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}],
|
||||
"tags": [{"type": "TINYINT"}]
|
||||
},{
|
||||
"name": "stb",
|
||||
"child_table_exists":"yes",
|
||||
"auto_create_table": "no",
|
||||
"childtable_count": 20,
|
||||
"childtable_prefix": "YYN_",
|
||||
"batch_create_tbl_num": 100,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 5,
|
||||
"childtable_limit": 40,
|
||||
"childtable_offset":0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 10,
|
||||
"start_timestamp": "now",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}],
|
||||
"tags": [{"type": "TINYINT"}]
|
||||
},{
|
||||
"name": "stb",
|
||||
"child_table_exists":"yes",
|
||||
"auto_create_table": "yes",
|
||||
"childtable_count": 20,
|
||||
"childtable_prefix": "YYY_",
|
||||
"batch_create_tbl_num": 100,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 5,
|
||||
"childtable_limit": 40,
|
||||
"childtable_offset":0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 10,
|
||||
"start_timestamp": "now",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}],
|
||||
"tags": [{"type": "TINYINT"}]
|
||||
}
|
||||
]
|
||||
}]
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
{
|
||||
"filetype": "insert",
|
||||
"cfgdir": "/etc/taos",
|
||||
"host": "127.0.0.1",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"thread_count": 4,
|
||||
"thread_count_create_tbl": 4,
|
||||
"result_file": "./insert_res.txt",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"insert_interval": 0,
|
||||
"interlace_rows": 100,
|
||||
"num_of_records_per_req": 32766,
|
||||
"databases": [{
|
||||
"dbinfo": {
|
||||
"name": "db",
|
||||
"drop": "no",
|
||||
"replica": 1,
|
||||
"days": 10,
|
||||
"cache": 16,
|
||||
"blocks": 3,
|
||||
"precision": "ms",
|
||||
"keep": 3650,
|
||||
"minRows": 1000,
|
||||
"maxRows": 4096,
|
||||
"comp":2,
|
||||
"walLevel":1,
|
||||
"cachelast":0,
|
||||
"quorum":1,
|
||||
"fsync":3000,
|
||||
"update": 0
|
||||
},
|
||||
"super_tables": [{
|
||||
"name": "stb",
|
||||
"child_table_exists":"no",
|
||||
"childtable_count": 500,
|
||||
"childtable_prefix": "stb_",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 20,
|
||||
"data_source": "sample",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 10000,
|
||||
"childtable_limit": 10,
|
||||
"childtable_offset":100,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 1,
|
||||
"start_timestamp": "2019-10-01 00:00:00.000",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./tools/taosdemoAllTest/sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT", "count":3}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}, {"type": "BOOL"}],
|
||||
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
|
||||
}]
|
||||
}]
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
{
|
||||
"filetype": "insert",
|
||||
"cfgdir": "/etc/taos",
|
||||
"host": "127.0.0.1",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"thread_count": 4,
|
||||
"thread_count_create_tbl": 4,
|
||||
"result_file": "./insert_res.txt",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"insert_interval": 0,
|
||||
"interlace_rows": 100,
|
||||
"num_of_records_per_req": 32766,
|
||||
"databases": [{
|
||||
"dbinfo": {
|
||||
"name": "db",
|
||||
"drop": "no",
|
||||
"replica": 1,
|
||||
"days": 10,
|
||||
"cache": 16,
|
||||
"blocks": 8,
|
||||
"precision": "ms",
|
||||
"keep": 3650,
|
||||
"minRows": 100,
|
||||
"maxRows": 4096,
|
||||
"comp":2,
|
||||
"walLevel":1,
|
||||
"cachelast":0,
|
||||
"quorum":1,
|
||||
"fsync":3000,
|
||||
"update": 0
|
||||
},
|
||||
"super_tables": [{
|
||||
"name": "stb",
|
||||
"child_table_exists":"yes",
|
||||
"childtable_count": 500,
|
||||
"childtable_prefix": "stb_",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 20,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 100000,
|
||||
"childtable_limit": 500,
|
||||
"childtable_offset":0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 1,
|
||||
"start_timestamp": "now",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}],
|
||||
"tags": [{"type": "TINYINT", "count":2}]
|
||||
}]
|
||||
}]
|
||||
}
|
|
@ -238,10 +238,12 @@ class TDTestCase:
|
|||
tdSql.execute("use dbtest123")
|
||||
tdSql.query("select col2 from stb0")
|
||||
tdSql.checkData(0, 0, 2147483647)
|
||||
tdSql.query("select t1 from stb1")
|
||||
tdSql.checkData(0, 0, -127)
|
||||
tdSql.query("select t2 from stb1")
|
||||
tdSql.checkData(1, 0, 126)
|
||||
tdSql.query("select * from stb1 where t1=-127")
|
||||
tdSql.checkRows(20)
|
||||
tdSql.query("select * from stb1 where t2=127")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select * from stb1 where t2=126")
|
||||
tdSql.checkRows(10)
|
||||
|
||||
# insert: test interlace parament
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insert-interlace-row.json -y " % binPath)
|
||||
|
@ -252,6 +254,42 @@ class TDTestCase:
|
|||
tdSql.checkData(0, 0, 15000)
|
||||
|
||||
|
||||
# # insert: auto_create
|
||||
|
||||
tdSql.execute('drop database if exists db')
|
||||
tdSql.execute('create database db')
|
||||
tdSql.execute('use db')
|
||||
os.system("%staosdemo -y -f tools/taosdemoAllTest/insert-drop-exist-auto-N00.json " % binPath) # drop = no, child_table_exists, auto_create_table varies
|
||||
tdSql.execute('use db')
|
||||
tdSql.query('show tables like \'NN123%\'') #child_table_exists = no, auto_create_table varies = 123
|
||||
tdSql.checkRows(20)
|
||||
tdSql.query('show tables like \'NNN%\'') #child_table_exists = no, auto_create_table varies = no
|
||||
tdSql.checkRows(20)
|
||||
tdSql.query('show tables like \'NNY%\'') #child_table_exists = no, auto_create_table varies = yes
|
||||
tdSql.checkRows(20)
|
||||
tdSql.query('show tables like \'NYN%\'') #child_table_exists = yes, auto_create_table varies = no
|
||||
tdSql.checkRows(0)
|
||||
tdSql.query('show tables like \'NY123%\'') #child_table_exists = yes, auto_create_table varies = 123
|
||||
tdSql.checkRows(0)
|
||||
tdSql.query('show tables like \'NYY%\'') #child_table_exists = yes, auto_create_table varies = yes
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.execute('drop database if exists db')
|
||||
os.system("%staosdemo -y -f tools/taosdemoAllTest/insert-drop-exist-auto-Y00.json " % binPath) # drop = yes, child_table_exists, auto_create_table varies
|
||||
tdSql.execute('use db')
|
||||
tdSql.query('show tables like \'YN123%\'') #child_table_exists = no, auto_create_table varies = 123
|
||||
tdSql.checkRows(20)
|
||||
tdSql.query('show tables like \'YNN%\'') #child_table_exists = no, auto_create_table varies = no
|
||||
tdSql.checkRows(20)
|
||||
tdSql.query('show tables like \'YNY%\'') #child_table_exists = no, auto_create_table varies = yes
|
||||
tdSql.checkRows(20)
|
||||
tdSql.query('show tables like \'YYN%\'') #child_table_exists = yes, auto_create_table varies = no
|
||||
tdSql.checkRows(20)
|
||||
tdSql.query('show tables like \'YY123%\'') #child_table_exists = yes, auto_create_table varies = 123
|
||||
tdSql.checkRows(20)
|
||||
tdSql.query('show tables like \'YYY%\'') #child_table_exists = yes, auto_create_table varies = yes
|
||||
tdSql.checkRows(20)
|
||||
|
||||
os.system("rm -rf ./insert_res.txt")
|
||||
os.system("rm -rf tools/taosdemoAllTest/taosdemoTestInsertWithJson.py.sql")
|
||||
|
||||
|
|
|
@ -129,8 +129,8 @@ sql alter database db keep 20
|
|||
sql_error alter database db keep 10
|
||||
sql_error alter database db keep 9
|
||||
sql_error alter database db keep 1
|
||||
sql alter database db keep 0
|
||||
sql alter database db keep -1
|
||||
sql_error alter database db keep 0
|
||||
sql_error alter database db keep -1
|
||||
sql_error alter database db keep 365001
|
||||
|
||||
print ============== step cache
|
||||
|
|
|
@ -385,8 +385,8 @@ sql alter database db keep 20
|
|||
sql_error alter database db keep 10
|
||||
sql_error alter database db keep 9
|
||||
sql_error alter database db keep 1
|
||||
sql alter database db keep 0
|
||||
sql alter database db keep -1
|
||||
sql_error alter database db keep 0
|
||||
sql_error alter database db keep -1
|
||||
sql_error alter database db keep 365001
|
||||
|
||||
sql_error alter topic db keep 40
|
||||
|
|
|
@ -0,0 +1,118 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 100
|
||||
sql connect
|
||||
|
||||
$dbPrefix = m_alt_db
|
||||
$tbPrefix = m_alt_tb
|
||||
$mtPrefix = m_alt_mt
|
||||
$tbNum = 10
|
||||
$rowNum = 5
|
||||
$totalNum = $tbNum * $rowNum
|
||||
$ts0 = 1537146000000
|
||||
$delta = 600000
|
||||
print ========== alter.sim
|
||||
$i = 0
|
||||
$db = $dbPrefix . $i
|
||||
$mt = $mtPrefix . $i
|
||||
|
||||
sql drop database if exists $db
|
||||
sql create database $db
|
||||
sql use $db
|
||||
##### alter table test, simeplest case
|
||||
sql create table tb (ts timestamp, c1 int, c2 binary(10), c3 nchar(10))
|
||||
sql insert into tb values (now, 1, "1", "1")
|
||||
sql alter table tb modify column c2 binary(20);
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
sql alter table tb modify column c3 nchar(20);
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
||||
sql create stable stb (ts timestamp, c1 int, c2 binary(10), c3 nchar(10)) tags(id1 int, id2 binary(10), id3 nchar(10))
|
||||
sql create table tb1 using stb tags(1, "a", "b")
|
||||
sql insert into tb1 values (now, 1, "1", "1")
|
||||
sql alter stable stb modify column c2 binary(20);
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
sql alter table stb modify column c2 binary(30);
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
sql alter stable stb modify column c3 nchar(20);
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
sql alter table stb modify column c3 nchar(30);
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql alter table stb modify tag id2 binary(11);
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
sql alter stable stb modify tag id2 binary(11);
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
sql alter table stb modify tag id3 nchar(11);
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
sql alter stable stb modify tag id3 nchar(11);
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
##### ILLEGAL OPERATIONS
|
||||
|
||||
# try dropping columns that are defined in metric
|
||||
sql_error alter table tb modify column c1 binary(10);
|
||||
sql_error alter table tb modify column c1 double;
|
||||
sql_error alter table tb modify column c2 int;
|
||||
sql_error alter table tb modify column c2 binary(10);
|
||||
sql_error alter table tb modify column c2 binary(9);
|
||||
sql_error alter table tb modify column c2 binary(-9);
|
||||
sql_error alter table tb modify column c2 binary(0);
|
||||
sql_error alter table tb modify column c2 binary(17000);
|
||||
sql_error alter table tb modify column c2 nchar(30);
|
||||
sql_error alter table tb modify column c3 double;
|
||||
sql_error alter table tb modify column c3 nchar(10);
|
||||
sql_error alter table tb modify column c3 nchar(0);
|
||||
sql_error alter table tb modify column c3 nchar(-1);
|
||||
sql_error alter table tb modify column c3 binary(80);
|
||||
sql_error alter table tb modify column c3 nchar(17000);
|
||||
sql_error alter table tb modify column c3 nchar(100), c2 binary(30);
|
||||
sql_error alter table tb modify column c1 nchar(100), c2 binary(30);
|
||||
sql_error alter stable tb modify column c2 binary(30);
|
||||
sql_error alter table tb modify tag c2 binary(30);
|
||||
sql_error alter table stb modify tag id2 binary(10);
|
||||
sql_error alter table stb modify tag id2 nchar(30);
|
||||
sql_error alter stable stb modify tag id2 binary(10);
|
||||
sql_error alter stable stb modify tag id2 nchar(30);
|
||||
sql_error alter table stb modify tag id3 nchar(10);
|
||||
sql_error alter table stb modify tag id3 binary(30);
|
||||
sql_error alter stable stb modify tag id3 nchar(10);
|
||||
sql_error alter stable stb modify tag id3 binary(30);
|
||||
sql_error alter stable stb modify tag id1 binary(30);
|
||||
sql_error alter stable stb modify tag c1 binary(30);
|
||||
|
||||
|
||||
sql_error alter table tb1 modify column c2 binary(30);
|
||||
sql_error alter table tb1 modify column c3 nchar(30);
|
||||
sql_error alter table tb1 modify tag id2 binary(30);
|
||||
sql_error alter table tb1 modify tag id3 nchar(30);
|
||||
sql_error alter stable tb1 modify tag id2 binary(30);
|
||||
sql_error alter stable tb1 modify tag id3 nchar(30);
|
||||
sql_error alter stable tb1 modify column c2 binary(30);
|
||||
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -233,6 +233,10 @@ totalExampleFailed=0
|
|||
|
||||
if [ "${OS}" == "Linux" ]; then
|
||||
corepath=`grep -oP '.*(?=core_)' /proc/sys/kernel/core_pattern||grep -oP '.*(?=core-)' /proc/sys/kernel/core_pattern`
|
||||
if [ -z "$corepath" ];then
|
||||
echo "/coredump/core_%e_%p_%t" > /proc/sys/kernel/core_pattern || echo "Permission denied"
|
||||
corepath="/coredump/"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$2" != "jdbc" ] && [ "$2" != "python" ] && [ "$2" != "unit" ] && [ "$2" != "example" ]; then
|
||||
|
|
Loading…
Reference in New Issue