Merge branch 'develop' into feature/TD-4666
This commit is contained in:
commit
7d94ae6319
|
@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
|
|||
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
|
||||
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
|
||||
IF (TD_MVN_INSTALLED)
|
||||
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.32-dist.jar DESTINATION connector/jdbc)
|
||||
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-*-dist.jar DESTINATION connector/jdbc)
|
||||
ENDIF ()
|
||||
ELSEIF (TD_DARWIN)
|
||||
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
|
||||
|
|
|
@ -8,8 +8,8 @@ IF (TD_MVN_INSTALLED)
|
|||
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
|
||||
POST_BUILD
|
||||
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.32-dist.jar ${LIBRARY_OUTPUT_PATH}
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-*-dist.jar ${LIBRARY_OUTPUT_PATH}
|
||||
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
||||
COMMENT "build jdbc driver")
|
||||
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
|
||||
ENDIF ()
|
||||
ENDIF ()
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>2.0.32</version>
|
||||
<version>2.0.33</version>
|
||||
<packaging>jar</packaging>
|
||||
<name>JDBCDriver</name>
|
||||
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
|
||||
|
@ -40,7 +40,7 @@
|
|||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>4.13</version>
|
||||
<version>4.13.1</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<!-- for restful -->
|
||||
|
@ -57,7 +57,7 @@
|
|||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
<version>29.0-jre</version>
|
||||
<version>30.0-jre</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
|
|
|
@ -75,7 +75,7 @@ enum TEST_MODE {
|
|||
|
||||
#define MAX_RECORDS_PER_REQ 32766
|
||||
|
||||
#define HEAD_BUFF_LEN 1024*24 // 16*1024 + (192+32)*2 + insert into ..
|
||||
#define HEAD_BUFF_LEN TSDB_MAX_COLUMNS*24 // 16*MAX_COLUMNS + (192+32)*2 + insert into ..
|
||||
|
||||
#define MAX_SQL_SIZE 65536
|
||||
#define BUFFER_SIZE (65536*2)
|
||||
|
@ -84,26 +84,23 @@ enum TEST_MODE {
|
|||
#define MAX_PASSWORD_SIZE 64
|
||||
#define MAX_HOSTNAME_SIZE 64
|
||||
#define MAX_TB_NAME_SIZE 64
|
||||
#define MAX_DATA_SIZE (16*1024)+20 // max record len: 16*1024, timestamp string and ,('') need extra space
|
||||
#define MAX_NUM_DATATYPE 10
|
||||
#define MAX_DATA_SIZE (16*TSDB_MAX_COLUMNS)+20 // max record len: 16*MAX_COLUMNS, timestamp string and ,('') need extra space
|
||||
#define OPT_ABORT 1 /* –abort */
|
||||
#define STRING_LEN 60000
|
||||
#define MAX_PREPARED_RAND 1000000
|
||||
#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255.
|
||||
|
||||
#define MAX_SAMPLES_ONCE_FROM_FILE 10000
|
||||
#define MAX_NUM_DATATYPE 10
|
||||
#define MAX_SAMPLES_ONCE_FROM_FILE 10000
|
||||
#define MAX_NUM_COLUMNS (TSDB_MAX_COLUMNS - 1) // exclude first column timestamp
|
||||
|
||||
#define MAX_DB_COUNT 8
|
||||
#define MAX_SUPER_TABLE_COUNT 200
|
||||
#define MAX_COLUMN_COUNT 1024
|
||||
#define MAX_TAG_COUNT 128
|
||||
#define MAX_DB_COUNT 8
|
||||
#define MAX_SUPER_TABLE_COUNT 200
|
||||
|
||||
#define MAX_QUERY_SQL_COUNT 100
|
||||
#define MAX_QUERY_SQL_LENGTH 1024
|
||||
#define MAX_QUERY_SQL_COUNT 100
|
||||
#define MAX_QUERY_SQL_LENGTH 1024
|
||||
|
||||
#define MAX_DATABASE_COUNT 256
|
||||
#define INPUT_BUF_LEN 256
|
||||
#define MAX_DATABASE_COUNT 256
|
||||
#define INPUT_BUF_LEN 256
|
||||
|
||||
#define DEFAULT_TIMESTAMP_STEP 1
|
||||
|
||||
|
@ -218,7 +215,7 @@ typedef struct SArguments_S {
|
|||
bool performance_print;
|
||||
char * output_file;
|
||||
bool async_mode;
|
||||
char * datatype[MAX_NUM_DATATYPE + 1];
|
||||
char * datatype[MAX_NUM_COLUMNS + 1];
|
||||
uint32_t len_of_binary;
|
||||
uint32_t num_of_CPR;
|
||||
uint32_t num_of_threads;
|
||||
|
@ -274,9 +271,9 @@ typedef struct SSuperTable_S {
|
|||
char tagsFile[MAX_FILE_NAME_LEN];
|
||||
|
||||
uint32_t columnCount;
|
||||
StrColumn columns[MAX_COLUMN_COUNT];
|
||||
StrColumn columns[TSDB_MAX_COLUMNS];
|
||||
uint32_t tagCount;
|
||||
StrColumn tags[MAX_TAG_COUNT];
|
||||
StrColumn tags[TSDB_MAX_TAGS];
|
||||
|
||||
char* childTblName;
|
||||
char* colsOfCreateChildTable;
|
||||
|
@ -565,6 +562,8 @@ double randdouble[MAX_PREPARED_RAND];
|
|||
char *aggreFunc[] = {"*", "count(*)", "avg(col0)", "sum(col0)",
|
||||
"max(col0)", "min(col0)", "first(col0)", "last(col0)"};
|
||||
|
||||
#define DEFAULT_DATATYPE_NUM 3
|
||||
|
||||
SArguments g_args = {
|
||||
NULL, // metaFile
|
||||
0, // test_mode
|
||||
|
@ -595,7 +594,7 @@ SArguments g_args = {
|
|||
{
|
||||
"FLOAT", // datatype
|
||||
"INT", // datatype
|
||||
"FLOAT", // datatype
|
||||
"FLOAT", // datatype. DEFAULT_DATATYPE_NUM is 3
|
||||
},
|
||||
16, // len_of_binary
|
||||
4, // num_of_CPR
|
||||
|
@ -725,9 +724,13 @@ static void printHelp() {
|
|||
"The data_type of columns, default: FLOAT, INT, FLOAT.");
|
||||
printf("%s%s%s%s\n", indent, "-w", indent,
|
||||
"The length of data_type 'BINARY' or 'NCHAR'. Default is 16");
|
||||
printf("%s%s%s%s%d\n", indent, "-l", indent,
|
||||
"The number of columns per record. Default is 3. Max values is ",
|
||||
MAX_NUM_DATATYPE);
|
||||
printf("%s%s%s%s%d%s%d\n", indent, "-l", indent,
|
||||
"The number of columns per record. Default is ",
|
||||
DEFAULT_DATATYPE_NUM,
|
||||
". Max values is ",
|
||||
MAX_NUM_COLUMNS);
|
||||
printf("%s%s%s%s\n", indent, indent, indent,
|
||||
"All of the new column(s) type is INT. If use -b to specify column type, -l will be ignored.");
|
||||
printf("%s%s%s%s\n", indent, "-T", indent,
|
||||
"The number of threads. Default is 10.");
|
||||
printf("%s%s%s%s\n", indent, "-i", indent,
|
||||
|
@ -931,16 +934,18 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
|
|||
}
|
||||
arguments->num_of_CPR = atoi(argv[++i]);
|
||||
|
||||
if (arguments->num_of_CPR > MAX_NUM_DATATYPE) {
|
||||
printf("WARNING: max acceptible columns count is %d\n", MAX_NUM_DATATYPE);
|
||||
if (arguments->num_of_CPR > MAX_NUM_COLUMNS) {
|
||||
printf("WARNING: max acceptible columns count is %d\n", MAX_NUM_COLUMNS);
|
||||
prompt();
|
||||
arguments->num_of_CPR = MAX_NUM_DATATYPE;
|
||||
arguments->num_of_CPR = MAX_NUM_COLUMNS;
|
||||
}
|
||||
|
||||
for (int col = arguments->num_of_CPR; col < MAX_NUM_DATATYPE; col++) {
|
||||
for (int col = DEFAULT_DATATYPE_NUM; col < arguments->num_of_CPR; col ++) {
|
||||
arguments->datatype[col] = "INT";
|
||||
}
|
||||
for (int col = arguments->num_of_CPR; col < MAX_NUM_COLUMNS; col++) {
|
||||
arguments->datatype[col] = NULL;
|
||||
}
|
||||
|
||||
} else if (strcmp(argv[i], "-b") == 0) {
|
||||
arguments->demo_mode = false;
|
||||
if (argc == i+1) {
|
||||
|
@ -990,7 +995,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
|
|||
}
|
||||
arguments->datatype[index++] = token;
|
||||
token = strsep(&running, ",");
|
||||
if (index >= MAX_NUM_DATATYPE) break;
|
||||
if (index >= MAX_NUM_COLUMNS) break;
|
||||
}
|
||||
arguments->datatype[index] = NULL;
|
||||
}
|
||||
|
@ -1086,7 +1091,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
|
|||
}
|
||||
|
||||
int columnCount;
|
||||
for (columnCount = 0; columnCount < MAX_NUM_DATATYPE; columnCount ++) {
|
||||
for (columnCount = 0; columnCount < MAX_NUM_COLUMNS; columnCount ++) {
|
||||
if (g_args.datatype[columnCount] == NULL) {
|
||||
break;
|
||||
}
|
||||
|
@ -1111,7 +1116,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
|
|||
arguments->use_metric ? "true" : "false");
|
||||
if (*(arguments->datatype)) {
|
||||
printf("# Specified data type: ");
|
||||
for (int i = 0; i < MAX_NUM_DATATYPE; i++)
|
||||
for (int i = 0; i < MAX_NUM_COLUMNS; i++)
|
||||
if (arguments->datatype[i])
|
||||
printf("%s,", arguments->datatype[i]);
|
||||
else
|
||||
|
@ -2389,8 +2394,15 @@ static char* generateTagVaulesForStb(SSuperTable* stbInfo, int32_t tableSeq) {
|
|||
tmfree(buf);
|
||||
} else if (0 == strncasecmp(stbInfo->tags[i].dataType,
|
||||
"int", strlen("int"))) {
|
||||
dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
|
||||
if ((g_args.demo_mode) && (i == 0)) {
|
||||
dataLen += snprintf(dataBuf + dataLen,
|
||||
TSDB_MAX_SQL_LEN - dataLen,
|
||||
"%d, ", tableSeq % 10);
|
||||
} else {
|
||||
dataLen += snprintf(dataBuf + dataLen,
|
||||
TSDB_MAX_SQL_LEN - dataLen,
|
||||
"%d, ", tableSeq);
|
||||
}
|
||||
} else if (0 == strncasecmp(stbInfo->tags[i].dataType,
|
||||
"bigint", strlen("bigint"))) {
|
||||
dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
|
||||
|
@ -2787,16 +2799,26 @@ static int createSuperTable(
|
|||
char* dataType = superTbl->tags[tagIndex].dataType;
|
||||
|
||||
if (strcasecmp(dataType, "BINARY") == 0) {
|
||||
len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex,
|
||||
"BINARY", superTbl->tags[tagIndex].dataLen);
|
||||
if ((g_args.demo_mode) && (tagIndex == 1)) {
|
||||
len += snprintf(tags + len, STRING_LEN - len,
|
||||
"loction BINARY(%d), ",
|
||||
superTbl->tags[tagIndex].dataLen);
|
||||
} else {
|
||||
len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ",
|
||||
tagIndex, "BINARY", superTbl->tags[tagIndex].dataLen);
|
||||
}
|
||||
lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3;
|
||||
} else if (strcasecmp(dataType, "NCHAR") == 0) {
|
||||
len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex,
|
||||
"NCHAR", superTbl->tags[tagIndex].dataLen);
|
||||
lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3;
|
||||
} else if (strcasecmp(dataType, "INT") == 0) {
|
||||
len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
|
||||
if ((g_args.demo_mode) && (tagIndex == 0)) {
|
||||
len += snprintf(tags + len, STRING_LEN - len, "groupId INT, ");
|
||||
} else {
|
||||
len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
|
||||
"INT");
|
||||
}
|
||||
lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 11;
|
||||
} else if (strcasecmp(dataType, "BIGINT") == 0) {
|
||||
len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
|
||||
|
@ -3081,7 +3103,7 @@ static int startMultiThreadCreateChildTable(
|
|||
char* cols, int threads, uint64_t tableFrom, int64_t ntables,
|
||||
char* db_name, SSuperTable* superTblInfo) {
|
||||
|
||||
pthread_t *pids = malloc(threads * sizeof(pthread_t));
|
||||
pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
|
||||
threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
|
||||
|
||||
if ((NULL == pids) || (NULL == infos)) {
|
||||
|
@ -3352,9 +3374,9 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
|
|||
}
|
||||
|
||||
int columnSize = cJSON_GetArraySize(columns);
|
||||
if ((columnSize + 1/* ts */) > MAX_COLUMN_COUNT) {
|
||||
if ((columnSize + 1/* ts */) > TSDB_MAX_COLUMNS) {
|
||||
errorPrint("%s() LN%d, failed to read json, column size overflow, max column size is %d\n",
|
||||
__func__, __LINE__, MAX_COLUMN_COUNT);
|
||||
__func__, __LINE__, TSDB_MAX_COLUMNS);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
|
||||
|
@ -3410,9 +3432,9 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
|
|||
}
|
||||
}
|
||||
|
||||
if ((index + 1 /* ts */) > MAX_COLUMN_COUNT) {
|
||||
if ((index + 1 /* ts */) > MAX_NUM_COLUMNS) {
|
||||
errorPrint("%s() LN%d, failed to read json, column size overflow, allowed max column size is %d\n",
|
||||
__func__, __LINE__, MAX_COLUMN_COUNT);
|
||||
__func__, __LINE__, MAX_NUM_COLUMNS);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
|
||||
|
@ -3429,9 +3451,9 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
|
|||
}
|
||||
|
||||
int tagSize = cJSON_GetArraySize(tags);
|
||||
if (tagSize > MAX_TAG_COUNT) {
|
||||
if (tagSize > TSDB_MAX_TAGS) {
|
||||
errorPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n",
|
||||
__func__, __LINE__, MAX_TAG_COUNT);
|
||||
__func__, __LINE__, TSDB_MAX_TAGS);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
|
||||
|
@ -3481,17 +3503,17 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
|
|||
}
|
||||
}
|
||||
|
||||
if (index > MAX_TAG_COUNT) {
|
||||
if (index > TSDB_MAX_TAGS) {
|
||||
errorPrint("%s() LN%d, failed to read json, tags size overflow, allowed max tag count is %d\n",
|
||||
__func__, __LINE__, MAX_TAG_COUNT);
|
||||
__func__, __LINE__, TSDB_MAX_TAGS);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
|
||||
superTbls->tagCount = index;
|
||||
|
||||
if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > MAX_COLUMN_COUNT) {
|
||||
if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > TSDB_MAX_COLUMNS) {
|
||||
errorPrint("%s() LN%d, columns + tags is more than allowed max columns count: %d\n",
|
||||
__func__, __LINE__, MAX_COLUMN_COUNT);
|
||||
__func__, __LINE__, TSDB_MAX_COLUMNS);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
ret = true;
|
||||
|
@ -6579,7 +6601,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
|
|||
}
|
||||
}
|
||||
|
||||
pthread_t *pids = malloc(threads * sizeof(pthread_t));
|
||||
pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
|
||||
assert(pids != NULL);
|
||||
|
||||
threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
|
||||
|
@ -7238,8 +7260,8 @@ static int queryTestProcess() {
|
|||
|
||||
if ((nSqlCount > 0) && (nConcurrent > 0)) {
|
||||
|
||||
pids = malloc(nConcurrent * nSqlCount * sizeof(pthread_t));
|
||||
infos = malloc(nConcurrent * nSqlCount * sizeof(threadInfo));
|
||||
pids = calloc(1, nConcurrent * nSqlCount * sizeof(pthread_t));
|
||||
infos = calloc(1, nConcurrent * nSqlCount * sizeof(threadInfo));
|
||||
|
||||
if ((NULL == pids) || (NULL == infos)) {
|
||||
taos_close(taos);
|
||||
|
@ -7284,8 +7306,8 @@ static int queryTestProcess() {
|
|||
//==== create sub threads for query from all sub table of the super table
|
||||
if ((g_queryInfo.superQueryInfo.sqlCount > 0)
|
||||
&& (g_queryInfo.superQueryInfo.threadCnt > 0)) {
|
||||
pidsOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * sizeof(pthread_t));
|
||||
infosOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo));
|
||||
pidsOfSub = calloc(1, g_queryInfo.superQueryInfo.threadCnt * sizeof(pthread_t));
|
||||
infosOfSub = calloc(1, g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo));
|
||||
|
||||
if ((NULL == pidsOfSub) || (NULL == infosOfSub)) {
|
||||
free(infos);
|
||||
|
@ -7718,11 +7740,13 @@ static int subscribeTestProcess() {
|
|||
exit(-1);
|
||||
}
|
||||
|
||||
pids = malloc(
|
||||
pids = calloc(
|
||||
1,
|
||||
g_queryInfo.specifiedQueryInfo.sqlCount *
|
||||
g_queryInfo.specifiedQueryInfo.concurrent *
|
||||
sizeof(pthread_t));
|
||||
infos = malloc(
|
||||
infos = calloc(
|
||||
1,
|
||||
g_queryInfo.specifiedQueryInfo.sqlCount *
|
||||
g_queryInfo.specifiedQueryInfo.concurrent *
|
||||
sizeof(threadInfo));
|
||||
|
@ -7751,11 +7775,13 @@ static int subscribeTestProcess() {
|
|||
} else {
|
||||
if ((g_queryInfo.superQueryInfo.sqlCount > 0)
|
||||
&& (g_queryInfo.superQueryInfo.threadCnt > 0)) {
|
||||
pidsOfStable = malloc(
|
||||
pidsOfStable = calloc(
|
||||
1,
|
||||
g_queryInfo.superQueryInfo.sqlCount *
|
||||
g_queryInfo.superQueryInfo.threadCnt *
|
||||
sizeof(pthread_t));
|
||||
infosOfStable = malloc(
|
||||
infosOfStable = calloc(
|
||||
1,
|
||||
g_queryInfo.superQueryInfo.sqlCount *
|
||||
g_queryInfo.superQueryInfo.threadCnt *
|
||||
sizeof(threadInfo));
|
||||
|
@ -7919,7 +7945,7 @@ static void setParaFromArg(){
|
|||
g_Dbs.db[0].superTbls[0].maxSqlLen = g_args.max_sql_len;
|
||||
|
||||
g_Dbs.db[0].superTbls[0].columnCount = 0;
|
||||
for (int i = 0; i < MAX_NUM_DATATYPE; i++) {
|
||||
for (int i = 0; i < MAX_NUM_COLUMNS; i++) {
|
||||
if (data_type[i] == NULL) {
|
||||
break;
|
||||
}
|
||||
|
@ -8072,7 +8098,7 @@ static void queryResult() {
|
|||
// query data
|
||||
|
||||
pthread_t read_id;
|
||||
threadInfo *pThreadInfo = malloc(sizeof(threadInfo));
|
||||
threadInfo *pThreadInfo = calloc(1, sizeof(threadInfo));
|
||||
assert(pThreadInfo);
|
||||
pThreadInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000
|
||||
pThreadInfo->start_table_from = 0;
|
||||
|
|
|
@ -24,7 +24,18 @@
|
|||
memcpy((_k) + sizeof(uint64_t), (_ori), (_len)); \
|
||||
} while (0)
|
||||
|
||||
#define SET_RES_EXT_WINDOW_KEY(_k, _ori, _len, _uid, _buf) \
|
||||
do { \
|
||||
assert(sizeof(_uid) == sizeof(uint64_t)); \
|
||||
*(void **)(_k) = (_buf); \
|
||||
*(uint64_t *)((_k) + POINTER_BYTES) = (_uid); \
|
||||
memcpy((_k) + POINTER_BYTES + sizeof(uint64_t), (_ori), (_len)); \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define GET_RES_WINDOW_KEY_LEN(_l) ((_l) + sizeof(uint64_t))
|
||||
#define GET_RES_EXT_WINDOW_KEY_LEN(_l) ((_l) + sizeof(uint64_t) + POINTER_BYTES)
|
||||
|
||||
#define GET_QID(_r) (((SQInfo*)((_r)->qinfo))->qId)
|
||||
|
||||
#define curTimeWindowIndex(_winres) ((_winres)->curIndex)
|
||||
|
|
|
@ -433,8 +433,8 @@ static SResultRow* doSetResultOutBufByKey(SQueryRuntimeEnv* pRuntimeEnv, SResult
|
|||
existed = (pResultRowInfo->pResult[0] == (*p1));
|
||||
pResultRowInfo->curPos = 0;
|
||||
} else { // check if current pResultRowInfo contains the existed pResultRow
|
||||
SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tid);
|
||||
int64_t* index = taosHashGet(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
|
||||
SET_RES_EXT_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tid, pResultRowInfo);
|
||||
int64_t* index = taosHashGet(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_EXT_WINDOW_KEY_LEN(bytes));
|
||||
if (index != NULL) {
|
||||
pResultRowInfo->curPos = (int32_t) *index;
|
||||
existed = true;
|
||||
|
@ -471,8 +471,8 @@ static SResultRow* doSetResultOutBufByKey(SQueryRuntimeEnv* pRuntimeEnv, SResult
|
|||
pResultRowInfo->pResult[pResultRowInfo->size++] = pResult;
|
||||
|
||||
int64_t index = pResultRowInfo->curPos;
|
||||
SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tid);
|
||||
taosHashPut(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &index, POINTER_BYTES);
|
||||
SET_RES_EXT_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tid, pResultRowInfo);
|
||||
taosHashPut(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_EXT_WINDOW_KEY_LEN(bytes), &index, POINTER_BYTES);
|
||||
}
|
||||
|
||||
// too many time window in query
|
||||
|
@ -1790,7 +1790,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
|
|||
|
||||
pRuntimeEnv->pResultRowHashTable = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
|
||||
pRuntimeEnv->pResultRowListSet = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
|
||||
pRuntimeEnv->keyBuf = malloc(pQueryAttr->maxTableColumnWidth + sizeof(int64_t));
|
||||
pRuntimeEnv->keyBuf = malloc(pQueryAttr->maxTableColumnWidth + sizeof(int64_t) + POINTER_BYTES);
|
||||
pRuntimeEnv->pool = initResultRowPool(getResultRowSize(pRuntimeEnv));
|
||||
|
||||
pRuntimeEnv->prevRow = malloc(POINTER_BYTES * pQueryAttr->numOfCols + pQueryAttr->srcRowSize);
|
||||
|
|
|
@ -47,9 +47,6 @@ int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg) {
|
|||
return terrno;
|
||||
}
|
||||
|
||||
char rootDir[TSDB_FILENAME_LEN] = {0};
|
||||
sprintf(rootDir, "%s/vnode%d", tsVnodeDir, pVnodeCfg->cfg.vgId);
|
||||
|
||||
char vnodeDir[TSDB_FILENAME_LEN] = "\0";
|
||||
snprintf(vnodeDir, TSDB_FILENAME_LEN, "/vnode/vnode%d", pVnodeCfg->cfg.vgId);
|
||||
if (tfsMkdir(vnodeDir) < 0) {
|
||||
|
@ -63,23 +60,6 @@ int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg) {
|
|||
return code;
|
||||
}
|
||||
|
||||
// STsdbCfg tsdbCfg = {0};
|
||||
// tsdbCfg.tsdbId = pVnodeCfg->cfg.vgId;
|
||||
// tsdbCfg.cacheBlockSize = pVnodeCfg->cfg.cacheBlockSize;
|
||||
// tsdbCfg.totalBlocks = pVnodeCfg->cfg.totalBlocks;
|
||||
// tsdbCfg.daysPerFile = pVnodeCfg->cfg.daysPerFile;
|
||||
// tsdbCfg.keep = pVnodeCfg->cfg.daysToKeep;
|
||||
// tsdbCfg.keep1 = pVnodeCfg->cfg.daysToKeep1;
|
||||
// tsdbCfg.keep2 = pVnodeCfg->cfg.daysToKeep2;
|
||||
// tsdbCfg.minRowsPerFileBlock = pVnodeCfg->cfg.minRowsPerFileBlock;
|
||||
// tsdbCfg.maxRowsPerFileBlock = pVnodeCfg->cfg.maxRowsPerFileBlock;
|
||||
// tsdbCfg.precision = pVnodeCfg->cfg.precision;
|
||||
// tsdbCfg.compression = pVnodeCfg->cfg.compression;
|
||||
// tsdbCfg.update = pVnodeCfg->cfg.update;
|
||||
// tsdbCfg.cacheLastRow = pVnodeCfg->cfg.cacheLastRow;
|
||||
|
||||
// char tsdbDir[TSDB_FILENAME_LEN] = {0};
|
||||
// sprintf(tsdbDir, "vnode/vnode%d/tsdb", pVnodeCfg->cfg.vgId);
|
||||
if (tsdbCreateRepo(pVnodeCfg->cfg.vgId) < 0) {
|
||||
vError("vgId:%d, failed to create tsdb in vnode, reason:%s", pVnodeCfg->cfg.vgId, tstrerror(terrno));
|
||||
return TSDB_CODE_VND_INIT_FAILED;
|
||||
|
|
|
@ -75,7 +75,7 @@
|
|||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>4.11</version>
|
||||
<version>4.13.1</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
|
|
|
@ -87,14 +87,14 @@
|
|||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>4.11</version>
|
||||
<version>4.13.1</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
<version>29.0-jre</version>
|
||||
<version>30.0-jre</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>4.13</version>
|
||||
<version>4.13.1</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
|
|
|
@ -4,6 +4,11 @@
|
|||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<properties>
|
||||
<maven.compiler.source>1.8</maven.compiler.source>
|
||||
<maven.compiler.target>1.8</maven.compiler.target>
|
||||
</properties>
|
||||
|
||||
<groupId>com.taosdata.demo</groupId>
|
||||
<artifactId>connectionPools</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
|
@ -46,9 +51,9 @@
|
|||
</dependency>
|
||||
<!-- log4j -->
|
||||
<dependency>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
<version>1.2.17</version>
|
||||
<groupId>org.apache.logging.log4j</groupId>
|
||||
<artifactId>log4j-core</artifactId>
|
||||
<version>2.14.1</version>
|
||||
</dependency>
|
||||
<!-- proxool -->
|
||||
<dependency>
|
||||
|
@ -108,4 +113,4 @@
|
|||
</plugins>
|
||||
</build>
|
||||
|
||||
</project>
|
||||
</project>
|
||||
|
|
|
@ -5,7 +5,8 @@ import com.taosdata.example.pool.C3p0Builder;
|
|||
import com.taosdata.example.pool.DbcpBuilder;
|
||||
import com.taosdata.example.pool.DruidPoolBuilder;
|
||||
import com.taosdata.example.pool.HikariCpBuilder;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
import javax.sql.DataSource;
|
||||
import java.sql.Connection;
|
||||
|
@ -17,7 +18,7 @@ import java.util.concurrent.TimeUnit;
|
|||
|
||||
public class ConnectionPoolDemo {
|
||||
|
||||
private static Logger logger = Logger.getLogger(DruidPoolBuilder.class);
|
||||
private static Logger logger = LogManager.getLogger(DruidPoolBuilder.class);
|
||||
private static final String dbName = "pool_test";
|
||||
|
||||
private static String poolType = "hikari";
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package com.taosdata.example.common;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
import javax.sql.DataSource;
|
||||
import java.sql.Connection;
|
||||
|
@ -10,7 +11,7 @@ import java.util.Random;
|
|||
|
||||
public class InsertTask implements Runnable {
|
||||
private final Random random = new Random(System.currentTimeMillis());
|
||||
private static final Logger logger = Logger.getLogger(InsertTask.class);
|
||||
private static final Logger logger = LogManager.getLogger(InsertTask.class);
|
||||
|
||||
private final DataSource ds;
|
||||
private final String dbName;
|
||||
|
|
|
@ -68,7 +68,7 @@
|
|||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>4.12</version>
|
||||
<version>4.13.1</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.taosdata</groupId>
|
||||
<artifactId>taosdemo</artifactId>
|
||||
<version>2.0</version>
|
||||
<version>2.0.1</version>
|
||||
<name>taosdemo</name>
|
||||
<packaging>jar</packaging>
|
||||
<description>Demo project for TDengine</description>
|
||||
|
@ -81,20 +81,20 @@
|
|||
<dependency>
|
||||
<groupId>mysql</groupId>
|
||||
<artifactId>mysql-connector-java</artifactId>
|
||||
<version>5.1.47</version>
|
||||
<version>8.0.16</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<!-- log4j -->
|
||||
<dependency>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
<version>1.2.17</version>
|
||||
<groupId>org.apache.logging.log4j</groupId>
|
||||
<artifactId>log4j-core</artifactId>
|
||||
<version>2.14.1</version>
|
||||
</dependency>
|
||||
<!-- junit -->
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>4.12</version>
|
||||
<version>4.13.1</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<!-- lombok -->
|
||||
|
|
|
@ -8,7 +8,8 @@ import com.taosdata.taosdemo.service.SqlExecuteTask;
|
|||
import com.taosdata.taosdemo.service.SubTableService;
|
||||
import com.taosdata.taosdemo.service.SuperTableService;
|
||||
import com.taosdata.taosdemo.service.data.SuperTableMetaGenerator;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
import javax.sql.DataSource;
|
||||
import java.io.IOException;
|
||||
|
@ -20,7 +21,7 @@ import java.util.Map;
|
|||
|
||||
public class TaosDemoApplication {
|
||||
|
||||
private static final Logger logger = Logger.getLogger(TaosDemoApplication.class);
|
||||
private static final Logger logger = LogManager.getLogger(TaosDemoApplication.class);
|
||||
|
||||
public static void main(String[] args) throws IOException {
|
||||
// 读配置参数
|
||||
|
|
|
@ -1,14 +1,15 @@
|
|||
package com.taosdata.taosdemo.dao;
|
||||
|
||||
import com.taosdata.taosdemo.utils.SqlSpeller;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
|
||||
import javax.sql.DataSource;
|
||||
import java.util.Map;
|
||||
|
||||
public class DatabaseMapperImpl implements DatabaseMapper {
|
||||
private static final Logger logger = Logger.getLogger(DatabaseMapperImpl.class);
|
||||
private static final Logger logger = LogManager.getLogger(DatabaseMapperImpl.class);
|
||||
|
||||
private final JdbcTemplate jdbcTemplate;
|
||||
|
||||
|
|
|
@ -3,7 +3,8 @@ package com.taosdata.taosdemo.dao;
|
|||
import com.taosdata.taosdemo.domain.SubTableMeta;
|
||||
import com.taosdata.taosdemo.domain.SubTableValue;
|
||||
import com.taosdata.taosdemo.utils.SqlSpeller;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
|
||||
import javax.sql.DataSource;
|
||||
|
@ -11,7 +12,7 @@ import java.util.List;
|
|||
|
||||
public class SubTableMapperImpl implements SubTableMapper {
|
||||
|
||||
private static final Logger logger = Logger.getLogger(SubTableMapperImpl.class);
|
||||
private static final Logger logger = LogManager.getLogger(SubTableMapperImpl.class);
|
||||
private final JdbcTemplate jdbcTemplate;
|
||||
|
||||
public SubTableMapperImpl(DataSource dataSource) {
|
||||
|
|
|
@ -2,13 +2,14 @@ package com.taosdata.taosdemo.dao;
|
|||
|
||||
import com.taosdata.taosdemo.domain.SuperTableMeta;
|
||||
import com.taosdata.taosdemo.utils.SqlSpeller;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
|
||||
import javax.sql.DataSource;
|
||||
|
||||
public class SuperTableMapperImpl implements SuperTableMapper {
|
||||
private static final Logger logger = Logger.getLogger(SuperTableMapperImpl.class);
|
||||
private static final Logger logger = LogManager.getLogger(SuperTableMapperImpl.class);
|
||||
private JdbcTemplate jdbcTemplate;
|
||||
|
||||
public SuperTableMapperImpl(DataSource dataSource) {
|
||||
|
|
|
@ -3,13 +3,14 @@ package com.taosdata.taosdemo.dao;
|
|||
import com.taosdata.taosdemo.domain.TableMeta;
|
||||
import com.taosdata.taosdemo.domain.TableValue;
|
||||
import com.taosdata.taosdemo.utils.SqlSpeller;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class TableMapperImpl implements TableMapper {
|
||||
private static final Logger logger = Logger.getLogger(TableMapperImpl.class);
|
||||
private static final Logger logger = LogManager.getLogger(TableMapperImpl.class);
|
||||
private JdbcTemplate template;
|
||||
|
||||
@Override
|
||||
|
|
|
@ -8,7 +8,8 @@ import com.taosdata.taosdemo.domain.SubTableValue;
|
|||
import com.taosdata.taosdemo.domain.SuperTableMeta;
|
||||
import com.taosdata.taosdemo.service.data.SubTableMetaGenerator;
|
||||
import com.taosdata.taosdemo.service.data.SubTableValueGenerator;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
import javax.sql.DataSource;
|
||||
import java.util.ArrayList;
|
||||
|
@ -20,7 +21,7 @@ import java.util.stream.IntStream;
|
|||
public class SubTableService extends AbstractService {
|
||||
|
||||
private SubTableMapper mapper;
|
||||
private static final Logger logger = Logger.getLogger(SubTableService.class);
|
||||
private static final Logger logger = LogManager.getLogger(SubTableService.class);
|
||||
|
||||
public SubTableService(DataSource datasource) {
|
||||
this.mapper = new SubTableMapperImpl(datasource);
|
||||
|
|
Loading…
Reference in New Issue