Merge branch 'develop' of github.com:taosdata/TDengine into dev/chr
This commit is contained in:
commit
f8d81ede65
|
@ -1,49 +1,49 @@
|
||||||
version: 1.0.{build}
|
version: 1.0.{build}
|
||||||
image:
|
image:
|
||||||
- Visual Studio 2015
|
- Visual Studio 2015
|
||||||
- macos
|
- macos
|
||||||
environment:
|
environment:
|
||||||
matrix:
|
matrix:
|
||||||
- ARCH: amd64
|
- ARCH: amd64
|
||||||
- ARCH: x86
|
- ARCH: x86
|
||||||
matrix:
|
matrix:
|
||||||
exclude:
|
exclude:
|
||||||
- image: macos
|
- image: macos
|
||||||
ARCH: x86
|
ARCH: x86
|
||||||
for:
|
for:
|
||||||
-
|
-
|
||||||
matrix:
|
matrix:
|
||||||
only:
|
only:
|
||||||
- image: Visual Studio 2015
|
- image: Visual Studio 2015
|
||||||
clone_folder: c:\dev\TDengine
|
clone_folder: c:\dev\TDengine
|
||||||
clone_depth: 1
|
clone_depth: 1
|
||||||
|
|
||||||
init:
|
init:
|
||||||
- call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %ARCH%
|
- call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %ARCH%
|
||||||
|
|
||||||
before_build:
|
before_build:
|
||||||
- cd c:\dev\TDengine
|
- cd c:\dev\TDengine
|
||||||
- md build
|
- md build
|
||||||
|
|
||||||
build_script:
|
build_script:
|
||||||
- cd build
|
- cd build
|
||||||
- cmake -G "NMake Makefiles" ..
|
- cmake -G "NMake Makefiles" .. -DBUILD_JDBC=false
|
||||||
- nmake install
|
- nmake install
|
||||||
-
|
-
|
||||||
matrix:
|
matrix:
|
||||||
only:
|
only:
|
||||||
- image: macos
|
- image: macos
|
||||||
clone_depth: 1
|
clone_depth: 1
|
||||||
|
|
||||||
build_script:
|
build_script:
|
||||||
- mkdir debug
|
- mkdir debug
|
||||||
- cd debug
|
- cd debug
|
||||||
- cmake .. > /dev/null
|
- cmake .. > /dev/null
|
||||||
- make > /dev/null
|
- make > /dev/null
|
||||||
notifications:
|
notifications:
|
||||||
- provider: Email
|
- provider: Email
|
||||||
to:
|
to:
|
||||||
- sangshuduo@gmail.com
|
- sangshuduo@gmail.com
|
||||||
on_build_success: true
|
on_build_success: true
|
||||||
on_build_failure: true
|
on_build_failure: true
|
||||||
on_build_status_changed: true
|
on_build_status_changed: true
|
||||||
|
|
|
@ -14,11 +14,13 @@ MESSAGE(STATUS "Project binary files output path: " ${PROJECT_BINARY_DIR})
|
||||||
MESSAGE(STATUS "Project executable files output path: " ${EXECUTABLE_OUTPUT_PATH})
|
MESSAGE(STATUS "Project executable files output path: " ${EXECUTABLE_OUTPUT_PATH})
|
||||||
MESSAGE(STATUS "Project library files output path: " ${LIBRARY_OUTPUT_PATH})
|
MESSAGE(STATUS "Project library files output path: " ${LIBRARY_OUTPUT_PATH})
|
||||||
|
|
||||||
FIND_PROGRAM(TD_MVN_INSTALLED mvn)
|
IF (TD_BUILD_JDBC)
|
||||||
IF (TD_MVN_INSTALLED)
|
FIND_PROGRAM(TD_MVN_INSTALLED mvn)
|
||||||
MESSAGE(STATUS "MVN is installed and JDBC will be compiled")
|
IF (TD_MVN_INSTALLED)
|
||||||
ELSE ()
|
MESSAGE(STATUS "MVN is installed and JDBC will be compiled")
|
||||||
MESSAGE(STATUS "MVN is not installed and JDBC is not compiled")
|
ELSE ()
|
||||||
|
MESSAGE(STATUS "MVN is not installed and JDBC is not compiled")
|
||||||
|
ENDIF ()
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
#
|
#
|
||||||
|
@ -55,4 +57,4 @@ ELSE ()
|
||||||
SET(CMAKE_BUILD_TYPE "Debug")
|
SET(CMAKE_BUILD_TYPE "Debug")
|
||||||
MESSAGE(STATUS "Build Debug Version as default")
|
MESSAGE(STATUS "Build Debug Version as default")
|
||||||
ENDIF()
|
ENDIF()
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
|
@ -77,3 +77,9 @@ IF (${JEMALLOC_ENABLED} MATCHES "true")
|
||||||
SET(TD_JEMALLOC_ENABLED TRUE)
|
SET(TD_JEMALLOC_ENABLED TRUE)
|
||||||
MESSAGE(STATUS "build with jemalloc enabled")
|
MESSAGE(STATUS "build with jemalloc enabled")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
|
SET(TD_BUILD_JDBC TRUE)
|
||||||
|
|
||||||
|
IF (${BUILD_JDBC} MATCHES "false")
|
||||||
|
SET(TD_BUILD_JDBC FALSE)
|
||||||
|
ENDIF ()
|
||||||
|
|
|
@ -5142,6 +5142,10 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
||||||
const char* msg18 = "primary timestamp column cannot be dropped";
|
const char* msg18 = "primary timestamp column cannot be dropped";
|
||||||
const char* msg19 = "invalid new tag name";
|
const char* msg19 = "invalid new tag name";
|
||||||
const char* msg20 = "table is not super table";
|
const char* msg20 = "table is not super table";
|
||||||
|
const char* msg21 = "only binary/nchar column length could be modified";
|
||||||
|
const char* msg22 = "new column length should be bigger than old one";
|
||||||
|
const char* msg23 = "only column length coulbe be modified";
|
||||||
|
const char* msg24 = "invalid binary/nchar column length";
|
||||||
|
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
|
@ -5172,13 +5176,13 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN ||
|
if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN ||
|
||||||
pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN) {
|
pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN) {
|
||||||
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
|
if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||||
}
|
}
|
||||||
} else if ((pAlterSQL->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) && (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo))) {
|
} else if ((pAlterSQL->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) && (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo))) {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
||||||
} else if ((pAlterSQL->type == TSDB_ALTER_TABLE_ADD_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_COLUMN) &&
|
} else if ((pAlterSQL->type == TSDB_ALTER_TABLE_ADD_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) &&
|
||||||
UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo)) {
|
UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo)) {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
|
||||||
}
|
}
|
||||||
|
@ -5394,6 +5398,85 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
||||||
tstrncpy(name1, pItem->pVar.pz, sizeof(name1));
|
tstrncpy(name1, pItem->pVar.pz, sizeof(name1));
|
||||||
TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name1, tDataTypes[TSDB_DATA_TYPE_INT].bytes);
|
TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name1, tDataTypes[TSDB_DATA_TYPE_INT].bytes);
|
||||||
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||||
|
} else if (pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) {
|
||||||
|
if (taosArrayGetSize(pAlterSQL->pAddColumns) >= 2) {
|
||||||
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg16);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
TAOS_FIELD* pItem = taosArrayGet(pAlterSQL->pAddColumns, 0);
|
||||||
|
if (pItem->type != TSDB_DATA_TYPE_BINARY && pItem->type != TSDB_DATA_TYPE_NCHAR) {
|
||||||
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg21);
|
||||||
|
}
|
||||||
|
|
||||||
|
SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER;
|
||||||
|
SStrToken name = {.type = TK_STRING, .z = pItem->name, .n = (uint32_t)strlen(pItem->name)};
|
||||||
|
if (getColumnIndexByName(pCmd, &name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) {
|
||||||
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg17);
|
||||||
|
}
|
||||||
|
|
||||||
|
SSchema* pColSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex);
|
||||||
|
|
||||||
|
if (pColSchema->type != TSDB_DATA_TYPE_BINARY && pColSchema->type != TSDB_DATA_TYPE_NCHAR) {
|
||||||
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg21);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pItem->type != pColSchema->type) {
|
||||||
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg23);
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((pItem->type == TSDB_DATA_TYPE_BINARY && (pItem->bytes <= 0 || pItem->bytes > TSDB_MAX_BINARY_LEN)) ||
|
||||||
|
(pItem->type == TSDB_DATA_TYPE_NCHAR && (pItem->bytes <= 0 || pItem->bytes > TSDB_MAX_NCHAR_LEN))) {
|
||||||
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg24);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pItem->bytes <= pColSchema->bytes) {
|
||||||
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg22);
|
||||||
|
}
|
||||||
|
|
||||||
|
TAOS_FIELD f = tscCreateField(pColSchema->type, name.z, pItem->bytes);
|
||||||
|
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||||
|
}else if (pAlterSQL->type == TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN) {
|
||||||
|
if (taosArrayGetSize(pAlterSQL->pAddColumns) >= 2) {
|
||||||
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg16);
|
||||||
|
}
|
||||||
|
|
||||||
|
TAOS_FIELD* pItem = taosArrayGet(pAlterSQL->pAddColumns, 0);
|
||||||
|
if (pItem->type != TSDB_DATA_TYPE_BINARY && pItem->type != TSDB_DATA_TYPE_NCHAR) {
|
||||||
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg21);
|
||||||
|
}
|
||||||
|
|
||||||
|
SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER;
|
||||||
|
SStrToken name = {.type = TK_STRING, .z = pItem->name, .n = (uint32_t)strlen(pItem->name)};
|
||||||
|
if (getColumnIndexByName(pCmd, &name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) {
|
||||||
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg17);
|
||||||
|
}
|
||||||
|
|
||||||
|
SSchema* pColSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex);
|
||||||
|
|
||||||
|
if (columnIndex.columnIndex < tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
|
||||||
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg10);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pColSchema->type != TSDB_DATA_TYPE_BINARY && pColSchema->type != TSDB_DATA_TYPE_NCHAR) {
|
||||||
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg21);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pItem->type != pColSchema->type) {
|
||||||
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg23);
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((pItem->type == TSDB_DATA_TYPE_BINARY && (pItem->bytes <= 0 || pItem->bytes > TSDB_MAX_BINARY_LEN)) ||
|
||||||
|
(pItem->type == TSDB_DATA_TYPE_NCHAR && (pItem->bytes <= 0 || pItem->bytes > TSDB_MAX_NCHAR_LEN))) {
|
||||||
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg24);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pItem->bytes <= pColSchema->bytes) {
|
||||||
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg22);
|
||||||
|
}
|
||||||
|
|
||||||
|
TAOS_FIELD f = tscCreateField(pColSchema->type, name.z, pItem->bytes);
|
||||||
|
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||||
}
|
}
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
@ -7186,8 +7269,9 @@ static int32_t getTableNameFromSqlNode(SSqlNode* pSqlNode, SArray* tableNameList
|
||||||
}
|
}
|
||||||
|
|
||||||
SName name = {0};
|
SName name = {0};
|
||||||
if (tscSetTableFullName(&name, t, pSql) != TSDB_CODE_SUCCESS) {
|
int32_t code = tscSetTableFullName(&name, t, pSql);
|
||||||
return invalidOperationMsg(msgBuf, msg1);
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
taosArrayPush(tableNameList, &name);
|
taosArrayPush(tableNameList, &name);
|
||||||
|
|
|
@ -1119,6 +1119,8 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
|
||||||
|
|
||||||
SOperatorInfo* pSourceOperator = createDummyInputOperator(pSqlObjList[0], pSchema, numOfCol1, pFilterInfo, numOfFilterCols);
|
SOperatorInfo* pSourceOperator = createDummyInputOperator(pSqlObjList[0], pSchema, numOfCol1, pFilterInfo, numOfFilterCols);
|
||||||
|
|
||||||
|
pOutput->precision = pSqlObjList[0]->res.precision;
|
||||||
|
|
||||||
SSchema* schema = NULL;
|
SSchema* schema = NULL;
|
||||||
if (px->numOfTables > 1) {
|
if (px->numOfTables > 1) {
|
||||||
SOperatorInfo** p = calloc(px->numOfTables, POINTER_BYTES);
|
SOperatorInfo** p = calloc(px->numOfTables, POINTER_BYTES);
|
||||||
|
@ -4477,4 +4479,4 @@ SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg) {
|
||||||
}
|
}
|
||||||
|
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
|
|
|
@ -243,15 +243,15 @@ public class TSDBPreparedStatementTest {
|
||||||
s.setNString(1, s2, 4);
|
s.setNString(1, s2, 4);
|
||||||
|
|
||||||
random = 10 + r.nextInt(5);
|
random = 10 + r.nextInt(5);
|
||||||
ArrayList<String> s5 = new ArrayList<String>();
|
ArrayList<String> s3 = new ArrayList<String>();
|
||||||
for(int i = 0; i < numOfRows; i++) {
|
for(int i = 0; i < numOfRows; i++) {
|
||||||
if(i % random == 0) {
|
if(i % random == 0) {
|
||||||
s5.add(null);
|
s3.add(null);
|
||||||
}else{
|
}else{
|
||||||
s5.add("test" + i % 10);
|
s3.add("test" + i % 10);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.setString(2, s5, 10);
|
s.setString(2, s3, 10);
|
||||||
|
|
||||||
s.columnDataAddBatch();
|
s.columnDataAddBatch();
|
||||||
s.columnDataExecuteBatch();
|
s.columnDataExecuteBatch();
|
||||||
|
@ -268,7 +268,126 @@ public class TSDBPreparedStatementTest {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void bindDataWithSingleTagTest() throws SQLException {
|
||||||
|
Statement stmt = conn.createStatement();
|
||||||
|
|
||||||
|
String types[] = new String[] {"tinyint", "smallint", "int", "bigint", "bool", "float", "double", "binary(10)", "nchar(10)"};
|
||||||
|
|
||||||
|
for (String type : types) {
|
||||||
|
stmt.execute("drop table if exists weather_test");
|
||||||
|
stmt.execute("create table weather_test(ts timestamp, f1 nchar(10), f2 binary(10)) tags (t " + type + ")");
|
||||||
|
|
||||||
|
int numOfRows = 1;
|
||||||
|
|
||||||
|
TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags(?) values(?, ?, ?)");
|
||||||
|
Random r = new Random();
|
||||||
|
s.setTableName("w1");
|
||||||
|
|
||||||
|
switch(type) {
|
||||||
|
case "tinyint":
|
||||||
|
case "smallint":
|
||||||
|
case "int":
|
||||||
|
case "bigint":
|
||||||
|
s.setTagInt(0, 1);
|
||||||
|
break;
|
||||||
|
case "float":
|
||||||
|
s.setTagFloat(0, 1.23f);
|
||||||
|
break;
|
||||||
|
case "double":
|
||||||
|
s.setTagDouble(0, 3.14159265);
|
||||||
|
break;
|
||||||
|
case "bool":
|
||||||
|
s.setTagBoolean(0, true);
|
||||||
|
break;
|
||||||
|
case "binary(10)":
|
||||||
|
s.setTagString(0, "test");
|
||||||
|
break;
|
||||||
|
case "nchar(10)":
|
||||||
|
s.setTagNString(0, "test");
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
ArrayList<Long> ts = new ArrayList<Long>();
|
||||||
|
for(int i = 0; i < numOfRows; i++) {
|
||||||
|
ts.add(System.currentTimeMillis() + i);
|
||||||
|
}
|
||||||
|
s.setTimestamp(0, ts);
|
||||||
|
|
||||||
|
int random = 10 + r.nextInt(5);
|
||||||
|
ArrayList<String> s2 = new ArrayList<String>();
|
||||||
|
for(int i = 0; i < numOfRows; i++) {
|
||||||
|
s2.add("分支" + i % 4);
|
||||||
|
}
|
||||||
|
s.setNString(1, s2, 10);
|
||||||
|
|
||||||
|
random = 10 + r.nextInt(5);
|
||||||
|
ArrayList<String> s3 = new ArrayList<String>();
|
||||||
|
for(int i = 0; i < numOfRows; i++) {
|
||||||
|
s3.add("test" + i % 4);
|
||||||
|
}
|
||||||
|
s.setString(2, s3, 10);
|
||||||
|
|
||||||
|
s.columnDataAddBatch();
|
||||||
|
s.columnDataExecuteBatch();
|
||||||
|
s.columnDataCloseBatch();
|
||||||
|
|
||||||
|
String sql = "select * from weather_test";
|
||||||
|
PreparedStatement statement = conn.prepareStatement(sql);
|
||||||
|
ResultSet rs = statement.executeQuery();
|
||||||
|
int rows = 0;
|
||||||
|
while(rs.next()) {
|
||||||
|
rows++;
|
||||||
|
}
|
||||||
|
Assert.assertEquals(numOfRows, rows);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void bindDataWithMultipleTagsTest() throws SQLException {
|
||||||
|
Statement stmt = conn.createStatement();
|
||||||
|
|
||||||
|
stmt.execute("drop table if exists weather_test");
|
||||||
|
stmt.execute("create table weather_test(ts timestamp, f1 nchar(10), f2 binary(10)) tags (t1 int, t2 binary(10))");
|
||||||
|
|
||||||
|
int numOfRows = 1;
|
||||||
|
|
||||||
|
TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags(?,?) (ts, f2) values(?, ?)");
|
||||||
|
s.setTableName("w2");
|
||||||
|
s.setTagInt(0, 1);
|
||||||
|
s.setTagString(1, "test");
|
||||||
|
|
||||||
|
|
||||||
|
ArrayList<Long> ts = new ArrayList<Long>();
|
||||||
|
for(int i = 0; i < numOfRows; i++) {
|
||||||
|
ts.add(System.currentTimeMillis() + i);
|
||||||
|
}
|
||||||
|
s.setTimestamp(0, ts);
|
||||||
|
|
||||||
|
ArrayList<String> s2 = new ArrayList<String>();
|
||||||
|
for(int i = 0; i < numOfRows; i++) {
|
||||||
|
s2.add("test" + i % 4);
|
||||||
|
}
|
||||||
|
s.setString(1, s2, 10);
|
||||||
|
|
||||||
|
s.columnDataAddBatch();
|
||||||
|
s.columnDataExecuteBatch();
|
||||||
|
s.columnDataCloseBatch();
|
||||||
|
|
||||||
|
String sql = "select * from weather_test";
|
||||||
|
PreparedStatement statement = conn.prepareStatement(sql);
|
||||||
|
ResultSet rs = statement.executeQuery();
|
||||||
|
int rows = 0;
|
||||||
|
while(rs.next()) {
|
||||||
|
rows++;
|
||||||
|
}
|
||||||
|
Assert.assertEquals(numOfRows, rows);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void setBoolean() throws SQLException {
|
public void setBoolean() throws SQLException {
|
||||||
|
|
|
@ -88,12 +88,17 @@ static SStep tsDnodeSteps[] = {
|
||||||
|
|
||||||
static SStep tsDnodeCompactSteps[] = {
|
static SStep tsDnodeCompactSteps[] = {
|
||||||
{"dnode-tfile", tfInit, tfCleanup},
|
{"dnode-tfile", tfInit, tfCleanup},
|
||||||
|
{"dnode-globalcfg", taosCheckGlobalCfg, NULL},
|
||||||
{"dnode-storage", dnodeInitStorage, dnodeCleanupStorage},
|
{"dnode-storage", dnodeInitStorage, dnodeCleanupStorage},
|
||||||
|
{"dnode-cfg", dnodeInitCfg, dnodeCleanupCfg},
|
||||||
{"dnode-eps", dnodeInitEps, dnodeCleanupEps},
|
{"dnode-eps", dnodeInitEps, dnodeCleanupEps},
|
||||||
|
{"dnode-minfos", dnodeInitMInfos, dnodeCleanupMInfos},
|
||||||
{"dnode-wal", walInit, walCleanUp},
|
{"dnode-wal", walInit, walCleanUp},
|
||||||
|
{"dnode-sync", syncInit, syncCleanUp},
|
||||||
{"dnode-mread", dnodeInitMRead, NULL},
|
{"dnode-mread", dnodeInitMRead, NULL},
|
||||||
{"dnode-mwrite", dnodeInitMWrite, NULL},
|
{"dnode-mwrite", dnodeInitMWrite, NULL},
|
||||||
{"dnode-mpeer", dnodeInitMPeer, NULL},
|
{"dnode-mpeer", dnodeInitMPeer, NULL},
|
||||||
|
{"dnode-vnodes", dnodeInitVnodes, dnodeCleanupVnodes},
|
||||||
{"dnode-modules", dnodeInitModules, dnodeCleanupModules},
|
{"dnode-modules", dnodeInitModules, dnodeCleanupModules},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -161,6 +161,7 @@ enum _mgmt_table {
|
||||||
#define TSDB_ALTER_TABLE_ADD_COLUMN 5
|
#define TSDB_ALTER_TABLE_ADD_COLUMN 5
|
||||||
#define TSDB_ALTER_TABLE_DROP_COLUMN 6
|
#define TSDB_ALTER_TABLE_DROP_COLUMN 6
|
||||||
#define TSDB_ALTER_TABLE_CHANGE_COLUMN 7
|
#define TSDB_ALTER_TABLE_CHANGE_COLUMN 7
|
||||||
|
#define TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN 8
|
||||||
|
|
||||||
#define TSDB_FILL_NONE 0
|
#define TSDB_FILL_NONE 0
|
||||||
#define TSDB_FILL_NULL 1
|
#define TSDB_FILL_NULL 1
|
||||||
|
|
|
@ -156,54 +156,62 @@
|
||||||
#define TK_SYNCDB 137
|
#define TK_SYNCDB 137
|
||||||
#define TK_ADD 138
|
#define TK_ADD 138
|
||||||
#define TK_COLUMN 139
|
#define TK_COLUMN 139
|
||||||
#define TK_TAG 140
|
#define TK_MODIFY 140
|
||||||
#define TK_CHANGE 141
|
#define TK_TAG 141
|
||||||
#define TK_SET 142
|
#define TK_CHANGE 142
|
||||||
#define TK_KILL 143
|
#define TK_SET 143
|
||||||
#define TK_CONNECTION 144
|
#define TK_KILL 144
|
||||||
#define TK_STREAM 145
|
#define TK_CONNECTION 145
|
||||||
#define TK_COLON 146
|
#define TK_STREAM 146
|
||||||
#define TK_ABORT 147
|
#define TK_COLON 147
|
||||||
#define TK_AFTER 148
|
#define TK_ABORT 148
|
||||||
#define TK_ATTACH 149
|
#define TK_AFTER 149
|
||||||
#define TK_BEFORE 150
|
#define TK_ATTACH 150
|
||||||
#define TK_BEGIN 151
|
#define TK_BEFORE 151
|
||||||
#define TK_CASCADE 152
|
#define TK_BEGIN 152
|
||||||
#define TK_CLUSTER 153
|
#define TK_CASCADE 153
|
||||||
#define TK_CONFLICT 154
|
#define TK_CLUSTER 154
|
||||||
#define TK_COPY 155
|
#define TK_CONFLICT 155
|
||||||
#define TK_DEFERRED 156
|
#define TK_COPY 156
|
||||||
#define TK_DELIMITERS 157
|
#define TK_DEFERRED 157
|
||||||
#define TK_DETACH 158
|
#define TK_DELIMITERS 158
|
||||||
#define TK_EACH 159
|
#define TK_DETACH 159
|
||||||
#define TK_END 160
|
#define TK_EACH 160
|
||||||
#define TK_EXPLAIN 161
|
#define TK_END 161
|
||||||
#define TK_FAIL 162
|
#define TK_EXPLAIN 162
|
||||||
#define TK_FOR 163
|
#define TK_FAIL 163
|
||||||
#define TK_IGNORE 164
|
#define TK_FOR 164
|
||||||
#define TK_IMMEDIATE 165
|
#define TK_IGNORE 165
|
||||||
#define TK_INITIALLY 166
|
#define TK_IMMEDIATE 166
|
||||||
#define TK_INSTEAD 167
|
#define TK_INITIALLY 167
|
||||||
#define TK_MATCH 168
|
#define TK_INSTEAD 168
|
||||||
#define TK_KEY 169
|
#define TK_MATCH 169
|
||||||
#define TK_OF 170
|
#define TK_KEY 170
|
||||||
#define TK_RAISE 171
|
#define TK_OF 171
|
||||||
#define TK_REPLACE 172
|
#define TK_RAISE 172
|
||||||
#define TK_RESTRICT 173
|
#define TK_REPLACE 173
|
||||||
#define TK_ROW 174
|
#define TK_RESTRICT 174
|
||||||
#define TK_STATEMENT 175
|
#define TK_ROW 175
|
||||||
#define TK_TRIGGER 176
|
#define TK_STATEMENT 176
|
||||||
#define TK_VIEW 177
|
#define TK_TRIGGER 177
|
||||||
#define TK_SEMI 178
|
#define TK_VIEW 178
|
||||||
#define TK_NONE 179
|
#define TK_SEMI 179
|
||||||
#define TK_PREV 180
|
#define TK_NONE 180
|
||||||
#define TK_LINEAR 181
|
#define TK_PREV 181
|
||||||
#define TK_IMPORT 182
|
#define TK_LINEAR 182
|
||||||
#define TK_TBNAME 183
|
#define TK_IMPORT 183
|
||||||
#define TK_JOIN 184
|
#define TK_TBNAME 184
|
||||||
#define TK_INSERT 185
|
#define TK_JOIN 185
|
||||||
#define TK_INTO 186
|
#define TK_INSERT 186
|
||||||
#define TK_VALUES 187
|
#define TK_INTO 187
|
||||||
|
#define TK_VALUES 188
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#define TK_SPACE 300
|
#define TK_SPACE 300
|
||||||
#define TK_COMMENT 301
|
#define TK_COMMENT 301
|
||||||
|
|
|
@ -3214,7 +3214,15 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
|
||||||
} else if (pAlter->type == TSDB_ALTER_TABLE_DROP_COLUMN) {
|
} else if (pAlter->type == TSDB_ALTER_TABLE_DROP_COLUMN) {
|
||||||
code = mnodeDropSuperTableColumn(pMsg, pAlter->schema[0].name);
|
code = mnodeDropSuperTableColumn(pMsg, pAlter->schema[0].name);
|
||||||
} else if (pAlter->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) {
|
} else if (pAlter->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) {
|
||||||
code = mnodeChangeSuperTableColumn(pMsg, pAlter->schema[0].name, pAlter->schema[1].name);
|
//code = mnodeChangeSuperTableColumn(pMsg, pAlter->schema[0].name, pAlter->schema[1].name);
|
||||||
|
(void)mnodeChangeSuperTableColumn;
|
||||||
|
mError("change table[%s] column[%s] length to [%d] is not processed", pAlter->tableFname, pAlter->schema[0].name, pAlter->schema[0].bytes);
|
||||||
|
code = TSDB_CODE_SUCCESS;
|
||||||
|
} else if (pAlter->type == TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN) {
|
||||||
|
//code = mnodeChangeSuperTableColumn(pMsg, pAlter->schema[0].name, pAlter->schema[1].name);
|
||||||
|
(void)mnodeChangeSuperTableColumn;
|
||||||
|
mError("change table[%s] tag[%s] length to [%d] is not processed", pAlter->tableFname, pAlter->schema[0].name, pAlter->schema[0].bytes);
|
||||||
|
code = TSDB_CODE_SUCCESS;
|
||||||
} else {
|
} else {
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -3226,7 +3234,10 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
|
||||||
} else if (pAlter->type == TSDB_ALTER_TABLE_DROP_COLUMN) {
|
} else if (pAlter->type == TSDB_ALTER_TABLE_DROP_COLUMN) {
|
||||||
code = mnodeDropNormalTableColumn(pMsg, pAlter->schema[0].name);
|
code = mnodeDropNormalTableColumn(pMsg, pAlter->schema[0].name);
|
||||||
} else if (pAlter->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) {
|
} else if (pAlter->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) {
|
||||||
code = mnodeChangeNormalTableColumn(pMsg, pAlter->schema[0].name, pAlter->schema[1].name);
|
//code = mnodeChangeNormalTableColumn(pMsg, pAlter->schema[0].name, pAlter->schema[1].name);
|
||||||
|
(void)mnodeChangeNormalTableColumn;
|
||||||
|
mError("change table[%s] column[%s] length to [%d] is not processed", pAlter->tableFname, pAlter->schema[0].name, pAlter->schema[0].bytes);
|
||||||
|
code = TSDB_CODE_SUCCESS;
|
||||||
} else {
|
} else {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3417,4 +3428,4 @@ int32_t mnodeCompactTables() {
|
||||||
mnodeCompactChildTables();
|
mnodeCompactChildTables();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -759,6 +759,12 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) DROP COLUMN ids(A). {
|
||||||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cmd ::= ALTER TABLE ids(X) cpxName(F) MODIFY COLUMN columnlist(A). {
|
||||||
|
X.n += F.n;
|
||||||
|
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, A, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, -1);
|
||||||
|
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||||
|
}
|
||||||
|
|
||||||
//////////////////////////////////ALTER TAGS statement/////////////////////////////////////
|
//////////////////////////////////ALTER TAGS statement/////////////////////////////////////
|
||||||
cmd ::= ALTER TABLE ids(X) cpxName(Y) ADD TAG columnlist(A). {
|
cmd ::= ALTER TABLE ids(X) cpxName(Y) ADD TAG columnlist(A). {
|
||||||
X.n += Y.n;
|
X.n += Y.n;
|
||||||
|
@ -799,6 +805,11 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). {
|
||||||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cmd ::= ALTER TABLE ids(X) cpxName(F) MODIFY TAG columnlist(A). {
|
||||||
|
X.n += F.n;
|
||||||
|
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, A, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, -1);
|
||||||
|
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||||
|
}
|
||||||
|
|
||||||
///////////////////////////////////ALTER STABLE statement//////////////////////////////////
|
///////////////////////////////////ALTER STABLE statement//////////////////////////////////
|
||||||
cmd ::= ALTER STABLE ids(X) cpxName(F) ADD COLUMN columnlist(A). {
|
cmd ::= ALTER STABLE ids(X) cpxName(F) ADD COLUMN columnlist(A). {
|
||||||
|
@ -817,6 +828,12 @@ cmd ::= ALTER STABLE ids(X) cpxName(F) DROP COLUMN ids(A). {
|
||||||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cmd ::= ALTER STABLE ids(X) cpxName(F) MODIFY COLUMN columnlist(A). {
|
||||||
|
X.n += F.n;
|
||||||
|
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, A, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, TSDB_SUPER_TABLE);
|
||||||
|
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||||
|
}
|
||||||
|
|
||||||
//////////////////////////////////ALTER TAGS statement/////////////////////////////////////
|
//////////////////////////////////ALTER TAGS statement/////////////////////////////////////
|
||||||
cmd ::= ALTER STABLE ids(X) cpxName(Y) ADD TAG columnlist(A). {
|
cmd ::= ALTER STABLE ids(X) cpxName(Y) ADD TAG columnlist(A). {
|
||||||
X.n += Y.n;
|
X.n += Y.n;
|
||||||
|
@ -846,6 +863,23 @@ cmd ::= ALTER STABLE ids(X) cpxName(F) CHANGE TAG ids(Y) ids(Z). {
|
||||||
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cmd ::= ALTER STABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). {
|
||||||
|
X.n += F.n;
|
||||||
|
|
||||||
|
toTSDBType(Y.type);
|
||||||
|
SArray* A = tVariantListAppendToken(NULL, &Y, -1);
|
||||||
|
A = tVariantListAppend(A, &Z, -1);
|
||||||
|
|
||||||
|
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, TSDB_SUPER_TABLE);
|
||||||
|
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd ::= ALTER STABLE ids(X) cpxName(F) MODIFY TAG columnlist(A). {
|
||||||
|
X.n += F.n;
|
||||||
|
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, A, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, TSDB_SUPER_TABLE);
|
||||||
|
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
|
||||||
|
}
|
||||||
|
|
||||||
////////////////////////////////////////kill statement///////////////////////////////////////
|
////////////////////////////////////////kill statement///////////////////////////////////////
|
||||||
cmd ::= KILL CONNECTION INTEGER(Y). {setKillSql(pInfo, TSDB_SQL_KILL_CONNECTION, &Y);}
|
cmd ::= KILL CONNECTION INTEGER(Y). {setKillSql(pInfo, TSDB_SQL_KILL_CONNECTION, &Y);}
|
||||||
cmd ::= KILL STREAM INTEGER(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setKillSql(pInfo, TSDB_SQL_KILL_STREAM, &X);}
|
cmd ::= KILL STREAM INTEGER(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setKillSql(pInfo, TSDB_SQL_KILL_STREAM, &X);}
|
||||||
|
|
|
@ -893,7 +893,7 @@ SAlterTableInfo *tSetAlterTableInfo(SStrToken *pTableName, SArray *pCols, SArray
|
||||||
pAlterTable->type = type;
|
pAlterTable->type = type;
|
||||||
pAlterTable->tableType = tableType;
|
pAlterTable->tableType = tableType;
|
||||||
|
|
||||||
if (type == TSDB_ALTER_TABLE_ADD_COLUMN || type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN) {
|
if (type == TSDB_ALTER_TABLE_ADD_COLUMN || type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN || type == TSDB_ALTER_TABLE_CHANGE_COLUMN || type == TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN) {
|
||||||
pAlterTable->pAddColumns = pCols;
|
pAlterTable->pAddColumns = pCols;
|
||||||
assert(pVals == NULL);
|
assert(pVals == NULL);
|
||||||
} else {
|
} else {
|
||||||
|
|
1732
src/query/src/sql.c
1732
src/query/src/sql.c
File diff suppressed because it is too large
Load Diff
|
@ -218,7 +218,8 @@ static SKeyword keywordTable[] = {
|
||||||
{"DISTINCT", TK_DISTINCT},
|
{"DISTINCT", TK_DISTINCT},
|
||||||
{"PARTITIONS", TK_PARTITIONS},
|
{"PARTITIONS", TK_PARTITIONS},
|
||||||
{"TOPIC", TK_TOPIC},
|
{"TOPIC", TK_TOPIC},
|
||||||
{"TOPICS", TK_TOPICS}
|
{"TOPICS", TK_TOPICS},
|
||||||
|
{"MODIFY", TK_MODIFY}
|
||||||
};
|
};
|
||||||
|
|
||||||
static const char isIdChar[] = {
|
static const char isIdChar[] = {
|
||||||
|
|
|
@ -314,6 +314,8 @@ python3 ./test.py -f query/last_row_cache.py
|
||||||
python3 ./test.py -f account/account_create.py
|
python3 ./test.py -f account/account_create.py
|
||||||
python3 ./test.py -f alter/alter_table.py
|
python3 ./test.py -f alter/alter_table.py
|
||||||
python3 ./test.py -f query/queryGroupbySort.py
|
python3 ./test.py -f query/queryGroupbySort.py
|
||||||
|
python3 ./test.py -f functions/function_session.py
|
||||||
|
python3 ./test.py -f functions/function_stateWindow.py
|
||||||
|
|
||||||
python3 ./test.py -f insert/unsignedInt.py
|
python3 ./test.py -f insert/unsignedInt.py
|
||||||
python3 ./test.py -f insert/unsignedBigint.py
|
python3 ./test.py -f insert/unsignedBigint.py
|
||||||
|
|
|
@ -0,0 +1,86 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import taos
|
||||||
|
from util.log import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.sql import *
|
||||||
|
#import numpy as np
|
||||||
|
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor())
|
||||||
|
|
||||||
|
self.rowNum = 10
|
||||||
|
self.ts = 1537146000000
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdSql.prepare()
|
||||||
|
|
||||||
|
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||||
|
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
|
||||||
|
tdSql.execute("create table test1 using test tags('beijing')")
|
||||||
|
for i in range(self.rowNum):
|
||||||
|
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||||
|
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||||
|
|
||||||
|
# operation not allowed on super table
|
||||||
|
tdSql.error("select count(*) from test session(ts, 1s)")
|
||||||
|
# operation not allowde on col pro
|
||||||
|
tdSql.error("select * from test1 session(ts, 1s)")
|
||||||
|
# operation not allowed on col except primary ts
|
||||||
|
tdSql.error("select * from test1 session(col1, 1s)")
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from test1 session(ts, 1s)")
|
||||||
|
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0, 1, 10)
|
||||||
|
# append more data
|
||||||
|
|
||||||
|
for i in range(self.rowNum):
|
||||||
|
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||||
|
% (self.ts + 2000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from test1 session(ts, 1s)")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
tdSql.checkData(0, 1, 10)
|
||||||
|
tdSql.checkData(1, 1, 1)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from test1 session(ts, 1m)")
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0, 1, 11)
|
||||||
|
|
||||||
|
tdSql.query("select first(col1) from test1 session(ts, 1s)")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
tdSql.checkData(0, 1, 1)
|
||||||
|
tdSql.checkData(1, 1, 1)
|
||||||
|
|
||||||
|
|
||||||
|
tdSql.query("select first(col1), last(col2) from test1 session(ts, 1s)")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
tdSql.checkData(0, 1, 1)
|
||||||
|
tdSql.checkData(0, 2, 10)
|
||||||
|
tdSql.checkData(1, 1, 1)
|
||||||
|
tdSql.checkData(1, 1, 1)
|
||||||
|
|
||||||
|
# add more function
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,109 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import taos
|
||||||
|
from util.log import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.sql import *
|
||||||
|
#import numpy as np
|
||||||
|
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor())
|
||||||
|
|
||||||
|
self.rowNum = 10
|
||||||
|
self.ts = 1537146000000
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdSql.prepare()
|
||||||
|
|
||||||
|
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||||
|
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
|
||||||
|
tdSql.execute("create table test1 using test tags('beijing')")
|
||||||
|
col0 = 0
|
||||||
|
for i in range(self.rowNum):
|
||||||
|
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||||
|
% (self.ts + i, col0, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||||
|
|
||||||
|
# operation not allowed on super table
|
||||||
|
tdSql.error("select count(*) from test session(ts, 1s)")
|
||||||
|
# operation not allowde on col pro
|
||||||
|
tdSql.error("select * from test1 session(ts, 1s)")
|
||||||
|
# operation not allowed on col except primary ts
|
||||||
|
tdSql.error("select * from test1 session(col1, 1s)")
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from test1 state_window(col1)")
|
||||||
|
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0, 0, self.rowNum)
|
||||||
|
# append more data
|
||||||
|
|
||||||
|
col0 = col0 + 1
|
||||||
|
for i in range(self.rowNum):
|
||||||
|
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||||
|
% (self.ts + i + 10000, col0, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from test1 state_window(col1)")
|
||||||
|
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
tdSql.checkData(0, 0, self.rowNum)
|
||||||
|
tdSql.checkData(1, 0, self.rowNum)
|
||||||
|
|
||||||
|
|
||||||
|
tdSql.query("select first(col1) from test1 state_window(col1)")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
col0 = col0 - 1
|
||||||
|
tdSql.checkData(0, 0, col0)
|
||||||
|
col0 = col0 + 1
|
||||||
|
tdSql.checkData(1, 0, col0)
|
||||||
|
|
||||||
|
tdSql.query("select first(col2) from test1 state_window(col1)")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
tdSql.checkData(0, 0, 1)
|
||||||
|
tdSql.checkData(1, 0, 1)
|
||||||
|
|
||||||
|
tdSql.query("select count(col1), first(col2) from test1 state_window(col1)")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
tdSql.checkData(0, 0, 10)
|
||||||
|
tdSql.checkData(0, 1, 1)
|
||||||
|
|
||||||
|
tdSql.checkData(1, 0, 10)
|
||||||
|
tdSql.checkData(1, 1, 1)
|
||||||
|
|
||||||
|
|
||||||
|
#tdSql.query("select count(*) from test1 session(ts, 1m)")
|
||||||
|
#tdSql.checkRows(1)
|
||||||
|
#tdSql.checkData(0, 1, 11)
|
||||||
|
|
||||||
|
#tdSql.query("select first(col1) from test1 session(ts, 1s)")
|
||||||
|
#tdSql.checkRows(2)
|
||||||
|
#tdSql.checkData(0, 1, 1)
|
||||||
|
#tdSql.checkData(1, 1, 1)
|
||||||
|
|
||||||
|
#tdSql.query("select first(col1), last(col2) from test1 session(ts, 1s)")
|
||||||
|
#tdSql.checkRows(2)
|
||||||
|
#tdSql.checkData(0, 1, 1)
|
||||||
|
#tdSql.checkData(0, 2, 10)
|
||||||
|
#tdSql.checkData(1, 1, 1)
|
||||||
|
#tdSql.checkData(1, 1, 1)
|
||||||
|
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,82 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from util.log import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.dnodes import tdDnodes
|
||||||
|
|
||||||
|
##TODO: auto test version is currently unsupported, need to come up with
|
||||||
|
# an auto test version in the future
|
||||||
|
class TDTestCase:
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
|
||||||
|
def getBuildPath(self):
|
||||||
|
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
|
if ("community" in selfPath):
|
||||||
|
projPath = selfPath[:selfPath.find("community")]
|
||||||
|
else:
|
||||||
|
projPath = selfPath[:selfPath.find("tests")]
|
||||||
|
|
||||||
|
for root, dirs, files in os.walk(projPath):
|
||||||
|
if ("taosd" in files):
|
||||||
|
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||||
|
if ("packaging" not in rootRealPath):
|
||||||
|
buildPath = root[:len(root)-len("/build/bin")]
|
||||||
|
break
|
||||||
|
return buildPath
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdSql.prepare()
|
||||||
|
buildPath = self.getBuildPath()
|
||||||
|
if (buildPath == ""):
|
||||||
|
tdLog.exit("taosd not found!")
|
||||||
|
else:
|
||||||
|
tdLog.info("taosd found in %s" % buildPath)
|
||||||
|
binPath = buildPath+ "/build/bin/"
|
||||||
|
|
||||||
|
#alter cache block to 3, then check alter
|
||||||
|
tdSql.execute('alter database db blocks 3')
|
||||||
|
tdSql.query('show databases')
|
||||||
|
tdSql.checkData(0,9,3)
|
||||||
|
|
||||||
|
#run taosdemo to occupy all cache, need to manually check memory consumption
|
||||||
|
os.system("%staosdemo -f tools/taosdemoAllTest/manual_block1_comp.json" % binPath)
|
||||||
|
input("please check memory usage for taosd. After checking, press enter")
|
||||||
|
|
||||||
|
#alter cache block to 8, then check alter
|
||||||
|
tdSql.execute('alter database db blocks 8')
|
||||||
|
tdSql.query('show databases')
|
||||||
|
tdSql.checkData(0,9,8)
|
||||||
|
|
||||||
|
#run taosdemo to occupy all cache, need to manually check memory consumption
|
||||||
|
os.system("%staosdemo -f tools/taosdemoAllTest/manual_block2.json" % binPath)
|
||||||
|
input("please check memory usage for taosd. After checking, press enter")
|
||||||
|
|
||||||
|
##expected result the peak memory consumption should increase by around 80MB = 5 blocks of cache
|
||||||
|
|
||||||
|
##test results
|
||||||
|
#2021/06/02 before:2621700K after: 2703640K memory usage increased by 80MB = 5 block
|
||||||
|
# confirm with the change in block. Baosheng Chang
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.debug("%s alter block manual check finish" % __file__)
|
||||||
|
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,126 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from util.log import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.dnodes import tdDnodes
|
||||||
|
|
||||||
|
##TODO: auto test version is currently unsupported, need to come up with
|
||||||
|
# an auto test version in the future
|
||||||
|
class TDTestCase:
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
|
||||||
|
def getRootPath(self):
|
||||||
|
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
print(selfPath)
|
||||||
|
if ("community" in selfPath):
|
||||||
|
projPath = selfPath[:selfPath.find("community")]
|
||||||
|
print(projPath)
|
||||||
|
else:
|
||||||
|
projPath = selfPath[:selfPath.find("tests")]
|
||||||
|
print("test" + projPath)
|
||||||
|
|
||||||
|
for root, dirs, files in os.walk(projPath):
|
||||||
|
if ('data' in dirs and 'sim' in root):
|
||||||
|
rootPath = root
|
||||||
|
|
||||||
|
return rootPath
|
||||||
|
|
||||||
|
def getBuildPath(self):
|
||||||
|
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
|
if ("community" in selfPath):
|
||||||
|
projPath = selfPath[:selfPath.find("community")]
|
||||||
|
else:
|
||||||
|
projPath = selfPath[:selfPath.find("tests")]
|
||||||
|
|
||||||
|
for root, dirs, files in os.walk(projPath):
|
||||||
|
if ("taosd" in files):
|
||||||
|
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||||
|
if ("packaging" not in rootRealPath):
|
||||||
|
buildPath = root[:len(root)-len("/build/bin")]
|
||||||
|
break
|
||||||
|
return buildPath
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
dnodePath = self.getRootPath()
|
||||||
|
os.system(f'rm -rf {dnodePath}/data/* {dnodePath}/log/*')
|
||||||
|
tdSql.prepare()
|
||||||
|
buildPath = self.getBuildPath()
|
||||||
|
if (buildPath == ""):
|
||||||
|
tdLog.exit("taosd not found!")
|
||||||
|
else:
|
||||||
|
tdLog.info("taosd found in %s" % buildPath)
|
||||||
|
binPath = buildPath+ "/build/bin/"
|
||||||
|
|
||||||
|
#comp is at 14
|
||||||
|
#check disk usage when comp=2
|
||||||
|
tdSql.query('show databases')
|
||||||
|
tdSql.execute('alter database db blocks 3') # minimize the data in cache
|
||||||
|
tdSql.checkData(0,14,2)
|
||||||
|
os.system("%staosdemo -f tools/taosdemoAllTest/manual_block1_comp.json" % binPath)
|
||||||
|
print("default location is at /home/bryan/Documents/Github/TDengine/sim/dnode1/data/vnode")
|
||||||
|
print('comp = 2')
|
||||||
|
input("please check disk usage for taosd. After checking, press enter")
|
||||||
|
|
||||||
|
#removing all data file
|
||||||
|
os.system(f'sudo rm -rf {dnodePath}/data/* {dnodePath}/log/*')
|
||||||
|
#print(f'rm -rf {dnodePath}/data/* {dnodePath}/log/*') #for showing the command ran
|
||||||
|
input("please check if the pervious data is being deleted. Then, press enter")
|
||||||
|
|
||||||
|
#check disk usage when comp=0
|
||||||
|
tdSql.prepare()
|
||||||
|
tdSql.query('show databases')
|
||||||
|
tdSql.checkData(0,14,2)
|
||||||
|
tdSql.execute('alter database db comp 0')
|
||||||
|
tdSql.query('show databases')
|
||||||
|
tdSql.checkData(0,14,0)
|
||||||
|
os.system("%staosdemo -f tools/taosdemoAllTest/manual_block1_comp.json" % binPath)
|
||||||
|
print("default location is at /home/bryan/Documents/Github/TDengine/sim/dnode1/data")
|
||||||
|
print('comp = 0')
|
||||||
|
input("please check disk usage for taosd. After checking, press enter")
|
||||||
|
|
||||||
|
#removing all data file
|
||||||
|
os.system(f'sudo rm -rf {dnodePath}/data/* {dnodePath}/log/*')
|
||||||
|
#print(f'rm -rf {dnodePath}/data/* {dnodePath}/log/*') #for showing the command ran
|
||||||
|
input("please check if the pervious data is being deleted. Then, press enter")
|
||||||
|
|
||||||
|
#check disk usage when comp=1
|
||||||
|
tdSql.prepare()
|
||||||
|
tdSql.query('show databases')
|
||||||
|
tdSql.checkData(0,14,2)
|
||||||
|
tdSql.execute('alter database db comp 1')
|
||||||
|
tdSql.query('show databases')
|
||||||
|
tdSql.checkData(0,14,1)
|
||||||
|
os.system("%staosdemo -f tools/taosdemoAllTest/manual_block1_comp.json" % binPath)
|
||||||
|
print("default location is at /home/bryan/Documents/Github/TDengine/sim/dnode1/data")
|
||||||
|
print('comp = 1')
|
||||||
|
input("please check disk usage for taosd. After checking, press enter")
|
||||||
|
|
||||||
|
##test result
|
||||||
|
# 2021/06/02 comp=2:13M comp=1:57M comp=0:399M. Test past
|
||||||
|
# each row entered is identical Tester - Baosheng Chang
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.debug("%s alter block manual check finish" % __file__)
|
||||||
|
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -45,7 +45,7 @@ class TDTestCase:
|
||||||
tdSql.query("select * from st")
|
tdSql.query("select * from st")
|
||||||
tdSql.checkRows(1)
|
tdSql.checkRows(1)
|
||||||
|
|
||||||
tdSql.execute("alter table st add column length int")
|
tdSql.execute("alter table st add column len int")
|
||||||
tdSql.execute("insert into t1 values(now, 1, 2)")
|
tdSql.execute("insert into t1 values(now, 1, 2)")
|
||||||
tdSql.query("select last(*) from st")
|
tdSql.query("select last(*) from st")
|
||||||
tdSql.checkData(0, 2, 2);
|
tdSql.checkData(0, 2, 2);
|
||||||
|
|
|
@ -0,0 +1,60 @@
|
||||||
|
{
|
||||||
|
"filetype": "insert",
|
||||||
|
"cfgdir": "/etc/taos",
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
"port": 6030,
|
||||||
|
"user": "root",
|
||||||
|
"password": "taosdata",
|
||||||
|
"thread_count": 4,
|
||||||
|
"thread_count_create_tbl": 4,
|
||||||
|
"result_file": "./insert_res.txt",
|
||||||
|
"confirm_parameter_prompt": "no",
|
||||||
|
"insert_interval": 0,
|
||||||
|
"interlace_rows": 100,
|
||||||
|
"num_of_records_per_req": 32766,
|
||||||
|
"databases": [{
|
||||||
|
"dbinfo": {
|
||||||
|
"name": "db",
|
||||||
|
"drop": "no",
|
||||||
|
"replica": 1,
|
||||||
|
"days": 10,
|
||||||
|
"cache": 16,
|
||||||
|
"blocks": 3,
|
||||||
|
"precision": "ms",
|
||||||
|
"keep": 3650,
|
||||||
|
"minRows": 1000,
|
||||||
|
"maxRows": 4096,
|
||||||
|
"comp":2,
|
||||||
|
"walLevel":1,
|
||||||
|
"cachelast":0,
|
||||||
|
"quorum":1,
|
||||||
|
"fsync":3000,
|
||||||
|
"update": 0
|
||||||
|
},
|
||||||
|
"super_tables": [{
|
||||||
|
"name": "stb",
|
||||||
|
"child_table_exists":"no",
|
||||||
|
"childtable_count": 500,
|
||||||
|
"childtable_prefix": "stb_",
|
||||||
|
"auto_create_table": "no",
|
||||||
|
"batch_create_tbl_num": 20,
|
||||||
|
"data_source": "sample",
|
||||||
|
"insert_mode": "taosc",
|
||||||
|
"insert_rows": 10000,
|
||||||
|
"childtable_limit": 10,
|
||||||
|
"childtable_offset":100,
|
||||||
|
"interlace_rows": 0,
|
||||||
|
"insert_interval":0,
|
||||||
|
"max_sql_len": 1024000,
|
||||||
|
"disorder_ratio": 0,
|
||||||
|
"disorder_range": 1000,
|
||||||
|
"timestamp_step": 1,
|
||||||
|
"start_timestamp": "2019-10-01 00:00:00.000",
|
||||||
|
"sample_format": "csv",
|
||||||
|
"sample_file": "./tools/taosdemoAllTest/sample.csv",
|
||||||
|
"tags_file": "",
|
||||||
|
"columns": [{"type": "INT", "count":3}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}, {"type": "BOOL"}],
|
||||||
|
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}
|
|
@ -0,0 +1,60 @@
|
||||||
|
{
|
||||||
|
"filetype": "insert",
|
||||||
|
"cfgdir": "/etc/taos",
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
"port": 6030,
|
||||||
|
"user": "root",
|
||||||
|
"password": "taosdata",
|
||||||
|
"thread_count": 4,
|
||||||
|
"thread_count_create_tbl": 4,
|
||||||
|
"result_file": "./insert_res.txt",
|
||||||
|
"confirm_parameter_prompt": "no",
|
||||||
|
"insert_interval": 0,
|
||||||
|
"interlace_rows": 100,
|
||||||
|
"num_of_records_per_req": 32766,
|
||||||
|
"databases": [{
|
||||||
|
"dbinfo": {
|
||||||
|
"name": "db",
|
||||||
|
"drop": "no",
|
||||||
|
"replica": 1,
|
||||||
|
"days": 10,
|
||||||
|
"cache": 16,
|
||||||
|
"blocks": 8,
|
||||||
|
"precision": "ms",
|
||||||
|
"keep": 3650,
|
||||||
|
"minRows": 100,
|
||||||
|
"maxRows": 4096,
|
||||||
|
"comp":2,
|
||||||
|
"walLevel":1,
|
||||||
|
"cachelast":0,
|
||||||
|
"quorum":1,
|
||||||
|
"fsync":3000,
|
||||||
|
"update": 0
|
||||||
|
},
|
||||||
|
"super_tables": [{
|
||||||
|
"name": "stb",
|
||||||
|
"child_table_exists":"yes",
|
||||||
|
"childtable_count": 500,
|
||||||
|
"childtable_prefix": "stb_",
|
||||||
|
"auto_create_table": "no",
|
||||||
|
"batch_create_tbl_num": 20,
|
||||||
|
"data_source": "rand",
|
||||||
|
"insert_mode": "taosc",
|
||||||
|
"insert_rows": 100000,
|
||||||
|
"childtable_limit": 500,
|
||||||
|
"childtable_offset":0,
|
||||||
|
"interlace_rows": 0,
|
||||||
|
"insert_interval":0,
|
||||||
|
"max_sql_len": 1024000,
|
||||||
|
"disorder_ratio": 0,
|
||||||
|
"disorder_range": 1000,
|
||||||
|
"timestamp_step": 1,
|
||||||
|
"start_timestamp": "now",
|
||||||
|
"sample_format": "csv",
|
||||||
|
"sample_file": "./sample.csv",
|
||||||
|
"tags_file": "",
|
||||||
|
"columns": [{"type": "INT"}],
|
||||||
|
"tags": [{"type": "TINYINT", "count":2}]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}
|
|
@ -0,0 +1,118 @@
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
|
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
||||||
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
sleep 100
|
||||||
|
sql connect
|
||||||
|
|
||||||
|
$dbPrefix = m_alt_db
|
||||||
|
$tbPrefix = m_alt_tb
|
||||||
|
$mtPrefix = m_alt_mt
|
||||||
|
$tbNum = 10
|
||||||
|
$rowNum = 5
|
||||||
|
$totalNum = $tbNum * $rowNum
|
||||||
|
$ts0 = 1537146000000
|
||||||
|
$delta = 600000
|
||||||
|
print ========== alter.sim
|
||||||
|
$i = 0
|
||||||
|
$db = $dbPrefix . $i
|
||||||
|
$mt = $mtPrefix . $i
|
||||||
|
|
||||||
|
sql drop database if exists $db
|
||||||
|
sql create database $db
|
||||||
|
sql use $db
|
||||||
|
##### alter table test, simeplest case
|
||||||
|
sql create table tb (ts timestamp, c1 int, c2 binary(10), c3 nchar(10))
|
||||||
|
sql insert into tb values (now, 1, "1", "1")
|
||||||
|
sql alter table tb modify column c2 binary(20);
|
||||||
|
if $rows != 0 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql alter table tb modify column c3 nchar(20);
|
||||||
|
if $rows != 0 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
sql create stable stb (ts timestamp, c1 int, c2 binary(10), c3 nchar(10)) tags(id1 int, id2 binary(10), id3 nchar(10))
|
||||||
|
sql create table tb1 using stb tags(1, "a", "b")
|
||||||
|
sql insert into tb1 values (now, 1, "1", "1")
|
||||||
|
sql alter stable stb modify column c2 binary(20);
|
||||||
|
if $rows != 0 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql alter table stb modify column c2 binary(30);
|
||||||
|
if $rows != 0 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql alter stable stb modify column c3 nchar(20);
|
||||||
|
if $rows != 0 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql alter table stb modify column c3 nchar(30);
|
||||||
|
if $rows != 0 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql alter table stb modify tag id2 binary(11);
|
||||||
|
if $rows != 0 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql alter stable stb modify tag id2 binary(11);
|
||||||
|
if $rows != 0 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql alter table stb modify tag id3 nchar(11);
|
||||||
|
if $rows != 0 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql alter stable stb modify tag id3 nchar(11);
|
||||||
|
if $rows != 0 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
##### ILLEGAL OPERATIONS
|
||||||
|
|
||||||
|
# try dropping columns that are defined in metric
|
||||||
|
sql_error alter table tb modify column c1 binary(10);
|
||||||
|
sql_error alter table tb modify column c1 double;
|
||||||
|
sql_error alter table tb modify column c2 int;
|
||||||
|
sql_error alter table tb modify column c2 binary(10);
|
||||||
|
sql_error alter table tb modify column c2 binary(9);
|
||||||
|
sql_error alter table tb modify column c2 binary(-9);
|
||||||
|
sql_error alter table tb modify column c2 binary(0);
|
||||||
|
sql_error alter table tb modify column c2 binary(17000);
|
||||||
|
sql_error alter table tb modify column c2 nchar(30);
|
||||||
|
sql_error alter table tb modify column c3 double;
|
||||||
|
sql_error alter table tb modify column c3 nchar(10);
|
||||||
|
sql_error alter table tb modify column c3 nchar(0);
|
||||||
|
sql_error alter table tb modify column c3 nchar(-1);
|
||||||
|
sql_error alter table tb modify column c3 binary(80);
|
||||||
|
sql_error alter table tb modify column c3 nchar(17000);
|
||||||
|
sql_error alter table tb modify column c3 nchar(100), c2 binary(30);
|
||||||
|
sql_error alter table tb modify column c1 nchar(100), c2 binary(30);
|
||||||
|
sql_error alter stable tb modify column c2 binary(30);
|
||||||
|
sql_error alter table tb modify tag c2 binary(30);
|
||||||
|
sql_error alter table stb modify tag id2 binary(10);
|
||||||
|
sql_error alter table stb modify tag id2 nchar(30);
|
||||||
|
sql_error alter stable stb modify tag id2 binary(10);
|
||||||
|
sql_error alter stable stb modify tag id2 nchar(30);
|
||||||
|
sql_error alter table stb modify tag id3 nchar(10);
|
||||||
|
sql_error alter table stb modify tag id3 binary(30);
|
||||||
|
sql_error alter stable stb modify tag id3 nchar(10);
|
||||||
|
sql_error alter stable stb modify tag id3 binary(30);
|
||||||
|
sql_error alter stable stb modify tag id1 binary(30);
|
||||||
|
sql_error alter stable stb modify tag c1 binary(30);
|
||||||
|
|
||||||
|
|
||||||
|
sql_error alter table tb1 modify column c2 binary(30);
|
||||||
|
sql_error alter table tb1 modify column c3 nchar(30);
|
||||||
|
sql_error alter table tb1 modify tag id2 binary(30);
|
||||||
|
sql_error alter table tb1 modify tag id3 nchar(30);
|
||||||
|
sql_error alter stable tb1 modify tag id2 binary(30);
|
||||||
|
sql_error alter stable tb1 modify tag id3 nchar(30);
|
||||||
|
sql_error alter stable tb1 modify column c2 binary(30);
|
||||||
|
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
Loading…
Reference in New Issue