diff --git a/cmake/install.inc b/cmake/install.inc
index 30aa801122..fced638966 100755
--- a/cmake/install.inc
+++ b/cmake/install.inc
@@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
- INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.32-dist.jar DESTINATION connector/jdbc)
+ INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-*-dist.jar DESTINATION connector/jdbc)
ENDIF ()
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt
index 81af0ec144..7791317969 100644
--- a/src/connector/jdbc/CMakeLists.txt
+++ b/src/connector/jdbc/CMakeLists.txt
@@ -8,8 +8,8 @@ IF (TD_MVN_INSTALLED)
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
POST_BUILD
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
- COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.32-dist.jar ${LIBRARY_OUTPUT_PATH}
+ COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-*-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMENT "build jdbc driver")
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
-ENDIF ()
\ No newline at end of file
+ENDIF ()
diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml
index 61d7fb85ef..a1aa41b351 100644
--- a/src/connector/jdbc/pom.xml
+++ b/src/connector/jdbc/pom.xml
@@ -3,7 +3,7 @@
4.0.0
com.taosdata.jdbc
taos-jdbcdriver
- 2.0.32
+ 2.0.33
jar
JDBCDriver
https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc
@@ -40,7 +40,7 @@
junit
junit
- 4.13
+ 4.13.1
test
@@ -57,7 +57,7 @@
com.google.guava
guava
- 29.0-jre
+ 30.0-jre
diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c
index fa3d263678..9c547ff755 100644
--- a/src/kit/taosdemo/taosdemo.c
+++ b/src/kit/taosdemo/taosdemo.c
@@ -75,7 +75,7 @@ enum TEST_MODE {
#define MAX_RECORDS_PER_REQ 32766
-#define HEAD_BUFF_LEN 1024*24 // 16*1024 + (192+32)*2 + insert into ..
+#define HEAD_BUFF_LEN TSDB_MAX_COLUMNS*24 // 16*MAX_COLUMNS + (192+32)*2 + insert into ..
#define MAX_SQL_SIZE 65536
#define BUFFER_SIZE (65536*2)
@@ -84,26 +84,23 @@ enum TEST_MODE {
#define MAX_PASSWORD_SIZE 64
#define MAX_HOSTNAME_SIZE 64
#define MAX_TB_NAME_SIZE 64
-#define MAX_DATA_SIZE (16*1024)+20 // max record len: 16*1024, timestamp string and ,('') need extra space
-#define MAX_NUM_DATATYPE 10
+#define MAX_DATA_SIZE (16*TSDB_MAX_COLUMNS)+20 // max record len: 16*MAX_COLUMNS, timestamp string and ,('') need extra space
#define OPT_ABORT 1 /* –abort */
#define STRING_LEN 60000
#define MAX_PREPARED_RAND 1000000
#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255.
-#define MAX_SAMPLES_ONCE_FROM_FILE 10000
-#define MAX_NUM_DATATYPE 10
+#define MAX_SAMPLES_ONCE_FROM_FILE 10000
+#define MAX_NUM_COLUMNS (TSDB_MAX_COLUMNS - 1) // exclude first column timestamp
-#define MAX_DB_COUNT 8
-#define MAX_SUPER_TABLE_COUNT 200
-#define MAX_COLUMN_COUNT 1024
-#define MAX_TAG_COUNT 128
+#define MAX_DB_COUNT 8
+#define MAX_SUPER_TABLE_COUNT 200
-#define MAX_QUERY_SQL_COUNT 100
-#define MAX_QUERY_SQL_LENGTH 1024
+#define MAX_QUERY_SQL_COUNT 100
+#define MAX_QUERY_SQL_LENGTH 1024
-#define MAX_DATABASE_COUNT 256
-#define INPUT_BUF_LEN 256
+#define MAX_DATABASE_COUNT 256
+#define INPUT_BUF_LEN 256
#define DEFAULT_TIMESTAMP_STEP 1
@@ -218,7 +215,7 @@ typedef struct SArguments_S {
bool performance_print;
char * output_file;
bool async_mode;
- char * datatype[MAX_NUM_DATATYPE + 1];
+ char * datatype[MAX_NUM_COLUMNS + 1];
uint32_t len_of_binary;
uint32_t num_of_CPR;
uint32_t num_of_threads;
@@ -274,9 +271,9 @@ typedef struct SSuperTable_S {
char tagsFile[MAX_FILE_NAME_LEN];
uint32_t columnCount;
- StrColumn columns[MAX_COLUMN_COUNT];
+ StrColumn columns[TSDB_MAX_COLUMNS];
uint32_t tagCount;
- StrColumn tags[MAX_TAG_COUNT];
+ StrColumn tags[TSDB_MAX_TAGS];
char* childTblName;
char* colsOfCreateChildTable;
@@ -565,6 +562,8 @@ double randdouble[MAX_PREPARED_RAND];
char *aggreFunc[] = {"*", "count(*)", "avg(col0)", "sum(col0)",
"max(col0)", "min(col0)", "first(col0)", "last(col0)"};
+#define DEFAULT_DATATYPE_NUM 3
+
SArguments g_args = {
NULL, // metaFile
0, // test_mode
@@ -595,7 +594,7 @@ SArguments g_args = {
{
"FLOAT", // datatype
"INT", // datatype
- "FLOAT", // datatype
+ "FLOAT", // datatype. DEFAULT_DATATYPE_NUM is 3
},
16, // len_of_binary
4, // num_of_CPR
@@ -725,9 +724,13 @@ static void printHelp() {
"The data_type of columns, default: FLOAT, INT, FLOAT.");
printf("%s%s%s%s\n", indent, "-w", indent,
"The length of data_type 'BINARY' or 'NCHAR'. Default is 16");
- printf("%s%s%s%s%d\n", indent, "-l", indent,
- "The number of columns per record. Default is 3. Max values is ",
- MAX_NUM_DATATYPE);
+ printf("%s%s%s%s%d%s%d\n", indent, "-l", indent,
+ "The number of columns per record. Default is ",
+ DEFAULT_DATATYPE_NUM,
+ ". Max values is ",
+ MAX_NUM_COLUMNS);
+ printf("%s%s%s%s\n", indent, indent, indent,
+ "All of the new column(s) type is INT. If use -b to specify column type, -l will be ignored.");
printf("%s%s%s%s\n", indent, "-T", indent,
"The number of threads. Default is 10.");
printf("%s%s%s%s\n", indent, "-i", indent,
@@ -931,16 +934,18 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
}
arguments->num_of_CPR = atoi(argv[++i]);
- if (arguments->num_of_CPR > MAX_NUM_DATATYPE) {
- printf("WARNING: max acceptible columns count is %d\n", MAX_NUM_DATATYPE);
+ if (arguments->num_of_CPR > MAX_NUM_COLUMNS) {
+ printf("WARNING: max acceptible columns count is %d\n", MAX_NUM_COLUMNS);
prompt();
- arguments->num_of_CPR = MAX_NUM_DATATYPE;
+ arguments->num_of_CPR = MAX_NUM_COLUMNS;
}
- for (int col = arguments->num_of_CPR; col < MAX_NUM_DATATYPE; col++) {
+ for (int col = DEFAULT_DATATYPE_NUM; col < arguments->num_of_CPR; col ++) {
+ arguments->datatype[col] = "INT";
+ }
+ for (int col = arguments->num_of_CPR; col < MAX_NUM_COLUMNS; col++) {
arguments->datatype[col] = NULL;
}
-
} else if (strcmp(argv[i], "-b") == 0) {
arguments->demo_mode = false;
if (argc == i+1) {
@@ -990,7 +995,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
}
arguments->datatype[index++] = token;
token = strsep(&running, ",");
- if (index >= MAX_NUM_DATATYPE) break;
+ if (index >= MAX_NUM_COLUMNS) break;
}
arguments->datatype[index] = NULL;
}
@@ -1086,7 +1091,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
}
int columnCount;
- for (columnCount = 0; columnCount < MAX_NUM_DATATYPE; columnCount ++) {
+ for (columnCount = 0; columnCount < MAX_NUM_COLUMNS; columnCount ++) {
if (g_args.datatype[columnCount] == NULL) {
break;
}
@@ -1111,7 +1116,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
arguments->use_metric ? "true" : "false");
if (*(arguments->datatype)) {
printf("# Specified data type: ");
- for (int i = 0; i < MAX_NUM_DATATYPE; i++)
+ for (int i = 0; i < MAX_NUM_COLUMNS; i++)
if (arguments->datatype[i])
printf("%s,", arguments->datatype[i]);
else
@@ -2389,8 +2394,15 @@ static char* generateTagVaulesForStb(SSuperTable* stbInfo, int32_t tableSeq) {
tmfree(buf);
} else if (0 == strncasecmp(stbInfo->tags[i].dataType,
"int", strlen("int"))) {
- dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
+ if ((g_args.demo_mode) && (i == 0)) {
+ dataLen += snprintf(dataBuf + dataLen,
+ TSDB_MAX_SQL_LEN - dataLen,
+ "%d, ", tableSeq % 10);
+ } else {
+ dataLen += snprintf(dataBuf + dataLen,
+ TSDB_MAX_SQL_LEN - dataLen,
"%d, ", tableSeq);
+ }
} else if (0 == strncasecmp(stbInfo->tags[i].dataType,
"bigint", strlen("bigint"))) {
dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
@@ -2787,16 +2799,26 @@ static int createSuperTable(
char* dataType = superTbl->tags[tagIndex].dataType;
if (strcasecmp(dataType, "BINARY") == 0) {
- len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex,
- "BINARY", superTbl->tags[tagIndex].dataLen);
+ if ((g_args.demo_mode) && (tagIndex == 1)) {
+ len += snprintf(tags + len, STRING_LEN - len,
+ "loction BINARY(%d), ",
+ superTbl->tags[tagIndex].dataLen);
+ } else {
+ len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ",
+ tagIndex, "BINARY", superTbl->tags[tagIndex].dataLen);
+ }
lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3;
} else if (strcasecmp(dataType, "NCHAR") == 0) {
len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex,
"NCHAR", superTbl->tags[tagIndex].dataLen);
lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3;
} else if (strcasecmp(dataType, "INT") == 0) {
- len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
+ if ((g_args.demo_mode) && (tagIndex == 0)) {
+ len += snprintf(tags + len, STRING_LEN - len, "groupId INT, ");
+ } else {
+ len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
"INT");
+ }
lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 11;
} else if (strcasecmp(dataType, "BIGINT") == 0) {
len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
@@ -3081,7 +3103,7 @@ static int startMultiThreadCreateChildTable(
char* cols, int threads, uint64_t tableFrom, int64_t ntables,
char* db_name, SSuperTable* superTblInfo) {
- pthread_t *pids = malloc(threads * sizeof(pthread_t));
+ pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
if ((NULL == pids) || (NULL == infos)) {
@@ -3352,9 +3374,9 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
}
int columnSize = cJSON_GetArraySize(columns);
- if ((columnSize + 1/* ts */) > MAX_COLUMN_COUNT) {
+ if ((columnSize + 1/* ts */) > TSDB_MAX_COLUMNS) {
errorPrint("%s() LN%d, failed to read json, column size overflow, max column size is %d\n",
- __func__, __LINE__, MAX_COLUMN_COUNT);
+ __func__, __LINE__, TSDB_MAX_COLUMNS);
goto PARSE_OVER;
}
@@ -3410,9 +3432,9 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
}
}
- if ((index + 1 /* ts */) > MAX_COLUMN_COUNT) {
+ if ((index + 1 /* ts */) > MAX_NUM_COLUMNS) {
errorPrint("%s() LN%d, failed to read json, column size overflow, allowed max column size is %d\n",
- __func__, __LINE__, MAX_COLUMN_COUNT);
+ __func__, __LINE__, MAX_NUM_COLUMNS);
goto PARSE_OVER;
}
@@ -3429,9 +3451,9 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
}
int tagSize = cJSON_GetArraySize(tags);
- if (tagSize > MAX_TAG_COUNT) {
+ if (tagSize > TSDB_MAX_TAGS) {
errorPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n",
- __func__, __LINE__, MAX_TAG_COUNT);
+ __func__, __LINE__, TSDB_MAX_TAGS);
goto PARSE_OVER;
}
@@ -3481,17 +3503,17 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
}
}
- if (index > MAX_TAG_COUNT) {
+ if (index > TSDB_MAX_TAGS) {
errorPrint("%s() LN%d, failed to read json, tags size overflow, allowed max tag count is %d\n",
- __func__, __LINE__, MAX_TAG_COUNT);
+ __func__, __LINE__, TSDB_MAX_TAGS);
goto PARSE_OVER;
}
superTbls->tagCount = index;
- if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > MAX_COLUMN_COUNT) {
+ if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > TSDB_MAX_COLUMNS) {
errorPrint("%s() LN%d, columns + tags is more than allowed max columns count: %d\n",
- __func__, __LINE__, MAX_COLUMN_COUNT);
+ __func__, __LINE__, TSDB_MAX_COLUMNS);
goto PARSE_OVER;
}
ret = true;
@@ -6579,7 +6601,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
}
}
- pthread_t *pids = malloc(threads * sizeof(pthread_t));
+ pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
assert(pids != NULL);
threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
@@ -7238,8 +7260,8 @@ static int queryTestProcess() {
if ((nSqlCount > 0) && (nConcurrent > 0)) {
- pids = malloc(nConcurrent * nSqlCount * sizeof(pthread_t));
- infos = malloc(nConcurrent * nSqlCount * sizeof(threadInfo));
+ pids = calloc(1, nConcurrent * nSqlCount * sizeof(pthread_t));
+ infos = calloc(1, nConcurrent * nSqlCount * sizeof(threadInfo));
if ((NULL == pids) || (NULL == infos)) {
taos_close(taos);
@@ -7284,8 +7306,8 @@ static int queryTestProcess() {
//==== create sub threads for query from all sub table of the super table
if ((g_queryInfo.superQueryInfo.sqlCount > 0)
&& (g_queryInfo.superQueryInfo.threadCnt > 0)) {
- pidsOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * sizeof(pthread_t));
- infosOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo));
+ pidsOfSub = calloc(1, g_queryInfo.superQueryInfo.threadCnt * sizeof(pthread_t));
+ infosOfSub = calloc(1, g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo));
if ((NULL == pidsOfSub) || (NULL == infosOfSub)) {
free(infos);
@@ -7718,11 +7740,13 @@ static int subscribeTestProcess() {
exit(-1);
}
- pids = malloc(
+ pids = calloc(
+ 1,
g_queryInfo.specifiedQueryInfo.sqlCount *
g_queryInfo.specifiedQueryInfo.concurrent *
sizeof(pthread_t));
- infos = malloc(
+ infos = calloc(
+ 1,
g_queryInfo.specifiedQueryInfo.sqlCount *
g_queryInfo.specifiedQueryInfo.concurrent *
sizeof(threadInfo));
@@ -7751,11 +7775,13 @@ static int subscribeTestProcess() {
} else {
if ((g_queryInfo.superQueryInfo.sqlCount > 0)
&& (g_queryInfo.superQueryInfo.threadCnt > 0)) {
- pidsOfStable = malloc(
+ pidsOfStable = calloc(
+ 1,
g_queryInfo.superQueryInfo.sqlCount *
g_queryInfo.superQueryInfo.threadCnt *
sizeof(pthread_t));
- infosOfStable = malloc(
+ infosOfStable = calloc(
+ 1,
g_queryInfo.superQueryInfo.sqlCount *
g_queryInfo.superQueryInfo.threadCnt *
sizeof(threadInfo));
@@ -7919,7 +7945,7 @@ static void setParaFromArg(){
g_Dbs.db[0].superTbls[0].maxSqlLen = g_args.max_sql_len;
g_Dbs.db[0].superTbls[0].columnCount = 0;
- for (int i = 0; i < MAX_NUM_DATATYPE; i++) {
+ for (int i = 0; i < MAX_NUM_COLUMNS; i++) {
if (data_type[i] == NULL) {
break;
}
@@ -8072,7 +8098,7 @@ static void queryResult() {
// query data
pthread_t read_id;
- threadInfo *pThreadInfo = malloc(sizeof(threadInfo));
+ threadInfo *pThreadInfo = calloc(1, sizeof(threadInfo));
assert(pThreadInfo);
pThreadInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000
pThreadInfo->start_table_from = 0;
diff --git a/src/query/inc/qUtil.h b/src/query/inc/qUtil.h
index 0756e41785..c8741030c0 100644
--- a/src/query/inc/qUtil.h
+++ b/src/query/inc/qUtil.h
@@ -24,7 +24,18 @@
memcpy((_k) + sizeof(uint64_t), (_ori), (_len)); \
} while (0)
+#define SET_RES_EXT_WINDOW_KEY(_k, _ori, _len, _uid, _buf) \
+ do { \
+ assert(sizeof(_uid) == sizeof(uint64_t)); \
+ *(void **)(_k) = (_buf); \
+ *(uint64_t *)((_k) + POINTER_BYTES) = (_uid); \
+ memcpy((_k) + POINTER_BYTES + sizeof(uint64_t), (_ori), (_len)); \
+ } while (0)
+
+
#define GET_RES_WINDOW_KEY_LEN(_l) ((_l) + sizeof(uint64_t))
+#define GET_RES_EXT_WINDOW_KEY_LEN(_l) ((_l) + sizeof(uint64_t) + POINTER_BYTES)
+
#define GET_QID(_r) (((SQInfo*)((_r)->qinfo))->qId)
#define curTimeWindowIndex(_winres) ((_winres)->curIndex)
diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c
index 3d585afb87..fa2ddb05b8 100644
--- a/src/query/src/qExecutor.c
+++ b/src/query/src/qExecutor.c
@@ -433,8 +433,8 @@ static SResultRow* doSetResultOutBufByKey(SQueryRuntimeEnv* pRuntimeEnv, SResult
existed = (pResultRowInfo->pResult[0] == (*p1));
pResultRowInfo->curPos = 0;
} else { // check if current pResultRowInfo contains the existed pResultRow
- SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tid);
- int64_t* index = taosHashGet(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
+ SET_RES_EXT_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tid, pResultRowInfo);
+ int64_t* index = taosHashGet(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_EXT_WINDOW_KEY_LEN(bytes));
if (index != NULL) {
pResultRowInfo->curPos = (int32_t) *index;
existed = true;
@@ -471,8 +471,8 @@ static SResultRow* doSetResultOutBufByKey(SQueryRuntimeEnv* pRuntimeEnv, SResult
pResultRowInfo->pResult[pResultRowInfo->size++] = pResult;
int64_t index = pResultRowInfo->curPos;
- SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tid);
- taosHashPut(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &index, POINTER_BYTES);
+ SET_RES_EXT_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tid, pResultRowInfo);
+ taosHashPut(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_EXT_WINDOW_KEY_LEN(bytes), &index, POINTER_BYTES);
}
// too many time window in query
@@ -1790,7 +1790,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
pRuntimeEnv->pResultRowHashTable = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
pRuntimeEnv->pResultRowListSet = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
- pRuntimeEnv->keyBuf = malloc(pQueryAttr->maxTableColumnWidth + sizeof(int64_t));
+ pRuntimeEnv->keyBuf = malloc(pQueryAttr->maxTableColumnWidth + sizeof(int64_t) + POINTER_BYTES);
pRuntimeEnv->pool = initResultRowPool(getResultRowSize(pRuntimeEnv));
pRuntimeEnv->prevRow = malloc(POINTER_BYTES * pQueryAttr->numOfCols + pQueryAttr->srcRowSize);
diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c
index 979e4e4cdd..f826c1aecd 100644
--- a/src/vnode/src/vnodeMain.c
+++ b/src/vnode/src/vnodeMain.c
@@ -47,9 +47,6 @@ int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg) {
return terrno;
}
- char rootDir[TSDB_FILENAME_LEN] = {0};
- sprintf(rootDir, "%s/vnode%d", tsVnodeDir, pVnodeCfg->cfg.vgId);
-
char vnodeDir[TSDB_FILENAME_LEN] = "\0";
snprintf(vnodeDir, TSDB_FILENAME_LEN, "/vnode/vnode%d", pVnodeCfg->cfg.vgId);
if (tfsMkdir(vnodeDir) < 0) {
@@ -63,23 +60,6 @@ int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg) {
return code;
}
- // STsdbCfg tsdbCfg = {0};
- // tsdbCfg.tsdbId = pVnodeCfg->cfg.vgId;
- // tsdbCfg.cacheBlockSize = pVnodeCfg->cfg.cacheBlockSize;
- // tsdbCfg.totalBlocks = pVnodeCfg->cfg.totalBlocks;
- // tsdbCfg.daysPerFile = pVnodeCfg->cfg.daysPerFile;
- // tsdbCfg.keep = pVnodeCfg->cfg.daysToKeep;
- // tsdbCfg.keep1 = pVnodeCfg->cfg.daysToKeep1;
- // tsdbCfg.keep2 = pVnodeCfg->cfg.daysToKeep2;
- // tsdbCfg.minRowsPerFileBlock = pVnodeCfg->cfg.minRowsPerFileBlock;
- // tsdbCfg.maxRowsPerFileBlock = pVnodeCfg->cfg.maxRowsPerFileBlock;
- // tsdbCfg.precision = pVnodeCfg->cfg.precision;
- // tsdbCfg.compression = pVnodeCfg->cfg.compression;
- // tsdbCfg.update = pVnodeCfg->cfg.update;
- // tsdbCfg.cacheLastRow = pVnodeCfg->cfg.cacheLastRow;
-
- // char tsdbDir[TSDB_FILENAME_LEN] = {0};
- // sprintf(tsdbDir, "vnode/vnode%d/tsdb", pVnodeCfg->cfg.vgId);
if (tsdbCreateRepo(pVnodeCfg->cfg.vgId) < 0) {
vError("vgId:%d, failed to create tsdb in vnode, reason:%s", pVnodeCfg->cfg.vgId, tstrerror(terrno));
return TSDB_CODE_VND_INIT_FAILED;
diff --git a/tests/comparisonTest/cassandra/cassandratest/pom.xml b/tests/comparisonTest/cassandra/cassandratest/pom.xml
index 8eeb5c3aa0..00630d93d1 100644
--- a/tests/comparisonTest/cassandra/cassandratest/pom.xml
+++ b/tests/comparisonTest/cassandra/cassandratest/pom.xml
@@ -75,7 +75,7 @@
junit
junit
- 4.11
+ 4.13.1
test
diff --git a/tests/comparisonTest/opentsdb/opentsdbtest/pom.xml b/tests/comparisonTest/opentsdb/opentsdbtest/pom.xml
index e0ada8b763..b55a136c73 100644
--- a/tests/comparisonTest/opentsdb/opentsdbtest/pom.xml
+++ b/tests/comparisonTest/opentsdb/opentsdbtest/pom.xml
@@ -87,14 +87,14 @@
junit
junit
- 4.11
+ 4.13.1
test
com.google.guava
guava
- 29.0-jre
+ 30.0-jre
diff --git a/tests/examples/JDBC/SpringJdbcTemplate/pom.xml b/tests/examples/JDBC/SpringJdbcTemplate/pom.xml
index 64a91b951b..eac3dec0a9 100644
--- a/tests/examples/JDBC/SpringJdbcTemplate/pom.xml
+++ b/tests/examples/JDBC/SpringJdbcTemplate/pom.xml
@@ -40,7 +40,7 @@
junit
junit
- 4.13
+ 4.13.1
test
diff --git a/tests/examples/JDBC/connectionPools/pom.xml b/tests/examples/JDBC/connectionPools/pom.xml
index 045e9d336c..34518900ed 100644
--- a/tests/examples/JDBC/connectionPools/pom.xml
+++ b/tests/examples/JDBC/connectionPools/pom.xml
@@ -4,6 +4,11 @@
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
4.0.0
+
+ 1.8
+ 1.8
+
+
com.taosdata.demo
connectionPools
1.0-SNAPSHOT
@@ -46,9 +51,9 @@
- log4j
- log4j
- 1.2.17
+ org.apache.logging.log4j
+ log4j-core
+ 2.14.1
@@ -108,4 +113,4 @@
-
\ No newline at end of file
+
diff --git a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/ConnectionPoolDemo.java b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/ConnectionPoolDemo.java
index bd57d138b2..96ad65aa4f 100644
--- a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/ConnectionPoolDemo.java
+++ b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/ConnectionPoolDemo.java
@@ -5,7 +5,8 @@ import com.taosdata.example.pool.C3p0Builder;
import com.taosdata.example.pool.DbcpBuilder;
import com.taosdata.example.pool.DruidPoolBuilder;
import com.taosdata.example.pool.HikariCpBuilder;
-import org.apache.log4j.Logger;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
import javax.sql.DataSource;
import java.sql.Connection;
@@ -17,7 +18,7 @@ import java.util.concurrent.TimeUnit;
public class ConnectionPoolDemo {
- private static Logger logger = Logger.getLogger(DruidPoolBuilder.class);
+ private static Logger logger = LogManager.getLogger(DruidPoolBuilder.class);
private static final String dbName = "pool_test";
private static String poolType = "hikari";
diff --git a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/common/InsertTask.java b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/common/InsertTask.java
index da7c9a22b5..f8f1555c08 100644
--- a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/common/InsertTask.java
+++ b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/common/InsertTask.java
@@ -1,6 +1,7 @@
package com.taosdata.example.common;
-import org.apache.log4j.Logger;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
import javax.sql.DataSource;
import java.sql.Connection;
@@ -10,7 +11,7 @@ import java.util.Random;
public class InsertTask implements Runnable {
private final Random random = new Random(System.currentTimeMillis());
- private static final Logger logger = Logger.getLogger(InsertTask.class);
+ private static final Logger logger = LogManager.getLogger(InsertTask.class);
private final DataSource ds;
private final String dbName;
diff --git a/tests/examples/JDBC/mybatisplus-demo/pom.xml b/tests/examples/JDBC/mybatisplus-demo/pom.xml
index a83d0a00e6..ad6a63e800 100644
--- a/tests/examples/JDBC/mybatisplus-demo/pom.xml
+++ b/tests/examples/JDBC/mybatisplus-demo/pom.xml
@@ -68,7 +68,7 @@
junit
junit
- 4.12
+ 4.13.1
test
diff --git a/tests/examples/JDBC/taosdemo/pom.xml b/tests/examples/JDBC/taosdemo/pom.xml
index 22c2f3b63e..91b976c2ae 100644
--- a/tests/examples/JDBC/taosdemo/pom.xml
+++ b/tests/examples/JDBC/taosdemo/pom.xml
@@ -4,7 +4,7 @@
4.0.0
com.taosdata
taosdemo
- 2.0
+ 2.0.1
taosdemo
jar
Demo project for TDengine
@@ -81,20 +81,20 @@
mysql
mysql-connector-java
- 5.1.47
+ 8.0.16
test
- log4j
- log4j
- 1.2.17
+ org.apache.logging.log4j
+ log4j-core
+ 2.14.1
junit
junit
- 4.12
+ 4.13.1
test
diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java
index c361df82b0..d4f5ff2688 100644
--- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java
+++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java
@@ -8,7 +8,8 @@ import com.taosdata.taosdemo.service.SqlExecuteTask;
import com.taosdata.taosdemo.service.SubTableService;
import com.taosdata.taosdemo.service.SuperTableService;
import com.taosdata.taosdemo.service.data.SuperTableMetaGenerator;
-import org.apache.log4j.Logger;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
import javax.sql.DataSource;
import java.io.IOException;
@@ -20,7 +21,7 @@ import java.util.Map;
public class TaosDemoApplication {
- private static final Logger logger = Logger.getLogger(TaosDemoApplication.class);
+ private static final Logger logger = LogManager.getLogger(TaosDemoApplication.class);
public static void main(String[] args) throws IOException {
// 读配置参数
diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/DatabaseMapperImpl.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/DatabaseMapperImpl.java
index 421a2dea1f..9340fc3fdd 100644
--- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/DatabaseMapperImpl.java
+++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/DatabaseMapperImpl.java
@@ -1,14 +1,15 @@
package com.taosdata.taosdemo.dao;
import com.taosdata.taosdemo.utils.SqlSpeller;
-import org.apache.log4j.Logger;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
import org.springframework.jdbc.core.JdbcTemplate;
import javax.sql.DataSource;
import java.util.Map;
public class DatabaseMapperImpl implements DatabaseMapper {
- private static final Logger logger = Logger.getLogger(DatabaseMapperImpl.class);
+ private static final Logger logger = LogManager.getLogger(DatabaseMapperImpl.class);
private final JdbcTemplate jdbcTemplate;
diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SubTableMapperImpl.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SubTableMapperImpl.java
index 90b0990a2b..db0d43ff05 100644
--- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SubTableMapperImpl.java
+++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SubTableMapperImpl.java
@@ -3,7 +3,8 @@ package com.taosdata.taosdemo.dao;
import com.taosdata.taosdemo.domain.SubTableMeta;
import com.taosdata.taosdemo.domain.SubTableValue;
import com.taosdata.taosdemo.utils.SqlSpeller;
-import org.apache.log4j.Logger;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
import org.springframework.jdbc.core.JdbcTemplate;
import javax.sql.DataSource;
@@ -11,7 +12,7 @@ import java.util.List;
public class SubTableMapperImpl implements SubTableMapper {
- private static final Logger logger = Logger.getLogger(SubTableMapperImpl.class);
+ private static final Logger logger = LogManager.getLogger(SubTableMapperImpl.class);
private final JdbcTemplate jdbcTemplate;
public SubTableMapperImpl(DataSource dataSource) {
diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SuperTableMapperImpl.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SuperTableMapperImpl.java
index efa9a1f39e..658a403a0c 100644
--- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SuperTableMapperImpl.java
+++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SuperTableMapperImpl.java
@@ -2,13 +2,14 @@ package com.taosdata.taosdemo.dao;
import com.taosdata.taosdemo.domain.SuperTableMeta;
import com.taosdata.taosdemo.utils.SqlSpeller;
-import org.apache.log4j.Logger;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
import org.springframework.jdbc.core.JdbcTemplate;
import javax.sql.DataSource;
public class SuperTableMapperImpl implements SuperTableMapper {
- private static final Logger logger = Logger.getLogger(SuperTableMapperImpl.class);
+ private static final Logger logger = LogManager.getLogger(SuperTableMapperImpl.class);
private JdbcTemplate jdbcTemplate;
public SuperTableMapperImpl(DataSource dataSource) {
diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/TableMapperImpl.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/TableMapperImpl.java
index b049fbe197..16bc094848 100644
--- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/TableMapperImpl.java
+++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/TableMapperImpl.java
@@ -3,13 +3,14 @@ package com.taosdata.taosdemo.dao;
import com.taosdata.taosdemo.domain.TableMeta;
import com.taosdata.taosdemo.domain.TableValue;
import com.taosdata.taosdemo.utils.SqlSpeller;
-import org.apache.log4j.Logger;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
import org.springframework.jdbc.core.JdbcTemplate;
import java.util.List;
public class TableMapperImpl implements TableMapper {
- private static final Logger logger = Logger.getLogger(TableMapperImpl.class);
+ private static final Logger logger = LogManager.getLogger(TableMapperImpl.class);
private JdbcTemplate template;
@Override
diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java
index cea98a1c5d..b0a79dea78 100644
--- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java
+++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java
@@ -8,7 +8,8 @@ import com.taosdata.taosdemo.domain.SubTableValue;
import com.taosdata.taosdemo.domain.SuperTableMeta;
import com.taosdata.taosdemo.service.data.SubTableMetaGenerator;
import com.taosdata.taosdemo.service.data.SubTableValueGenerator;
-import org.apache.log4j.Logger;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
import javax.sql.DataSource;
import java.util.ArrayList;
@@ -20,7 +21,7 @@ import java.util.stream.IntStream;
public class SubTableService extends AbstractService {
private SubTableMapper mapper;
- private static final Logger logger = Logger.getLogger(SubTableService.class);
+ private static final Logger logger = LogManager.getLogger(SubTableService.class);
public SubTableService(DataSource datasource) {
this.mapper = new SubTableMapperImpl(datasource);