Merge branch 'master' into hotfix/TD-5582
This commit is contained in:
commit
63e5c2147b
|
@ -777,7 +777,7 @@ function is_version_compatible() {
|
|||
if [ -f ${script_dir}/driver/vercomp.txt ]; then
|
||||
min_compatible_version=`cat ${script_dir}/driver/vercomp.txt`
|
||||
else
|
||||
min_compatible_version=$(${script_dir}/bin/tqd -V | head -1 | cut -d ' ' -f 5)
|
||||
min_compatible_version=$(${script_dir}/bin/taosd -V | head -1 | cut -d ' ' -f 5)
|
||||
fi
|
||||
|
||||
vercomp $curr_version $min_compatible_version
|
||||
|
|
|
@ -746,7 +746,7 @@ function is_version_compatible() {
|
|||
if [ -f ${script_dir}/driver/vercomp.txt ]; then
|
||||
min_compatible_version=`cat ${script_dir}/driver/vercomp.txt`
|
||||
else
|
||||
min_compatible_version=$(${script_dir}/bin/tqd -V | head -1 | cut -d ' ' -f 5)
|
||||
min_compatible_version=$(${script_dir}/bin/powerd -V | head -1 | cut -d ' ' -f 5)
|
||||
fi
|
||||
|
||||
vercomp $curr_version $min_compatible_version
|
||||
|
|
|
@ -41,10 +41,10 @@ fi
|
|||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
#strip ${build_dir}/bin/taosd
|
||||
#strip ${build_dir}/bin/taosd
|
||||
strip ${build_dir}/bin/taos
|
||||
bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh"
|
||||
else
|
||||
else
|
||||
bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo \
|
||||
${script_dir}/remove_client.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh ${script_dir}/taosd-dump-cfg.gdb"
|
||||
fi
|
||||
|
@ -139,7 +139,7 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
|||
cp -r ${examples_dir}/C# ${install_dir}/examples
|
||||
fi
|
||||
# Copy driver
|
||||
mkdir -p ${install_dir}/driver
|
||||
mkdir -p ${install_dir}/driver
|
||||
cp ${lib_files} ${install_dir}/driver
|
||||
|
||||
# Copy connector
|
||||
|
@ -168,7 +168,7 @@ fi
|
|||
|
||||
# exit 1
|
||||
|
||||
cd ${release_dir}
|
||||
cd ${release_dir}
|
||||
|
||||
# install_dir has been distinguishes cluster from edege, so comments this code
|
||||
pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
|
|
|
@ -1964,6 +1964,7 @@ static void freeQueryInfoImpl(SQueryInfo* pQueryInfo) {
|
|||
pQueryInfo->tsBuf = tsBufDestroy(pQueryInfo->tsBuf);
|
||||
|
||||
tfree(pQueryInfo->fillVal);
|
||||
pQueryInfo->fillType = 0;
|
||||
tfree(pQueryInfo->buf);
|
||||
}
|
||||
|
||||
|
|
|
@ -95,7 +95,6 @@ extern char configDir[];
|
|||
#define MAX_SUPER_TABLE_COUNT 200
|
||||
|
||||
#define MAX_QUERY_SQL_COUNT 100
|
||||
#define MAX_QUERY_SQL_LENGTH BUFFER_SIZE
|
||||
|
||||
#define MAX_DATABASE_COUNT 256
|
||||
#define INPUT_BUF_LEN 256
|
||||
|
@ -382,7 +381,7 @@ typedef struct SpecifiedQueryInfo_S {
|
|||
uint64_t queryTimes;
|
||||
bool subscribeRestart;
|
||||
int subscribeKeepProgress;
|
||||
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
|
||||
char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE+1];
|
||||
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN];
|
||||
int resubAfterConsume[MAX_QUERY_SQL_COUNT];
|
||||
int endAfterConsume[MAX_QUERY_SQL_COUNT];
|
||||
|
@ -405,7 +404,7 @@ typedef struct SuperQueryInfo_S {
|
|||
int64_t childTblCount;
|
||||
char childTblPrefix[TSDB_TABLE_NAME_LEN - 20]; // 20 characters reserved for seq
|
||||
int sqlCount;
|
||||
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
|
||||
char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE+1];
|
||||
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN];
|
||||
int resubAfterConsume;
|
||||
int endAfterConsume;
|
||||
|
@ -1252,14 +1251,14 @@ static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) {
|
|||
|
||||
// fetch the records row by row
|
||||
while((row = taos_fetch_row(res))) {
|
||||
if (totalLen >= 100*1024*1024 - 32000) {
|
||||
if (totalLen >= (100*1024*1024 - HEAD_BUFF_LEN*2)) {
|
||||
if (strlen(pThreadInfo->filePath) > 0)
|
||||
appendResultBufToFile(databuf, pThreadInfo);
|
||||
totalLen = 0;
|
||||
memset(databuf, 0, 100*1024*1024);
|
||||
}
|
||||
num_rows++;
|
||||
char temp[16000] = {0};
|
||||
char temp[HEAD_BUFF_LEN] = {0};
|
||||
int len = taos_print_row(temp, row, fields, num_fields);
|
||||
len += sprintf(temp + len, "\n");
|
||||
//printf("query result:%s\n", temp);
|
||||
|
@ -2164,15 +2163,15 @@ static void printfDbInfoForQueryToFile(
|
|||
}
|
||||
|
||||
static void printfQuerySystemInfo(TAOS * taos) {
|
||||
char filename[MAX_QUERY_SQL_LENGTH+1] = {0};
|
||||
char buffer[MAX_QUERY_SQL_LENGTH+1] = {0};
|
||||
char filename[BUFFER_SIZE+1] = {0};
|
||||
char buffer[BUFFER_SIZE+1] = {0};
|
||||
TAOS_RES* res;
|
||||
|
||||
time_t t;
|
||||
struct tm* lt;
|
||||
time(&t);
|
||||
lt = localtime(&t);
|
||||
snprintf(filename, MAX_QUERY_SQL_LENGTH, "querySystemInfo-%d-%d-%d %d:%d:%d",
|
||||
snprintf(filename, BUFFER_SIZE, "querySystemInfo-%d-%d-%d %d:%d:%d",
|
||||
lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min,
|
||||
lt->tm_sec);
|
||||
|
||||
|
@ -2204,12 +2203,12 @@ static void printfQuerySystemInfo(TAOS * taos) {
|
|||
printfDbInfoForQueryToFile(filename, dbInfos[i], i);
|
||||
|
||||
// show db.vgroups
|
||||
snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.vgroups;", dbInfos[i]->name);
|
||||
snprintf(buffer, BUFFER_SIZE, "show %s.vgroups;", dbInfos[i]->name);
|
||||
res = taos_query(taos, buffer);
|
||||
xDumpResultToFile(filename, res);
|
||||
|
||||
// show db.stables
|
||||
snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.stables;", dbInfos[i]->name);
|
||||
snprintf(buffer, BUFFER_SIZE, "show %s.stables;", dbInfos[i]->name);
|
||||
res = taos_query(taos, buffer);
|
||||
xDumpResultToFile(filename, res);
|
||||
free(dbInfos[i]);
|
||||
|
@ -4529,7 +4528,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
|
|||
goto PARSE_OVER;
|
||||
}
|
||||
tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j],
|
||||
sqlStr->valuestring, MAX_QUERY_SQL_LENGTH);
|
||||
sqlStr->valuestring, BUFFER_SIZE);
|
||||
|
||||
// default value is -1, which mean infinite loop
|
||||
g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1;
|
||||
|
@ -4751,7 +4750,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
|
|||
goto PARSE_OVER;
|
||||
}
|
||||
tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring,
|
||||
MAX_QUERY_SQL_LENGTH);
|
||||
BUFFER_SIZE);
|
||||
|
||||
cJSON *result = cJSON_GetObjectItem(sql, "result");
|
||||
if (result != NULL && result->type == cJSON_String
|
||||
|
@ -7404,14 +7403,14 @@ static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) {
|
|||
|
||||
tstrncpy(outSql, inSql, pos - inSql + 1);
|
||||
//printf("1: %s\n", outSql);
|
||||
strncat(outSql, subTblName, MAX_QUERY_SQL_LENGTH - 1);
|
||||
strncat(outSql, subTblName, BUFFER_SIZE - 1);
|
||||
//printf("2: %s\n", outSql);
|
||||
strncat(outSql, pos+strlen(sourceString), MAX_QUERY_SQL_LENGTH - 1);
|
||||
strncat(outSql, pos+strlen(sourceString), BUFFER_SIZE - 1);
|
||||
//printf("3: %s\n", outSql);
|
||||
}
|
||||
|
||||
static void *superTableQuery(void *sarg) {
|
||||
char sqlstr[MAX_QUERY_SQL_LENGTH];
|
||||
char sqlstr[BUFFER_SIZE];
|
||||
threadInfo *pThreadInfo = (threadInfo *)sarg;
|
||||
|
||||
setThreadName("superTableQuery");
|
||||
|
@ -7714,7 +7713,7 @@ static TAOS_SUB* subscribeImpl(
|
|||
|
||||
static void *superSubscribe(void *sarg) {
|
||||
threadInfo *pThreadInfo = (threadInfo *)sarg;
|
||||
char subSqlstr[MAX_QUERY_SQL_LENGTH];
|
||||
char subSqlstr[BUFFER_SIZE];
|
||||
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0};
|
||||
uint64_t tsubSeq;
|
||||
|
||||
|
|
|
@ -1,5 +1,32 @@
|
|||
def pre_test(){
|
||||
|
||||
sh '''
|
||||
sudo rmtaos||echo 'no taosd installed'
|
||||
'''
|
||||
sh '''
|
||||
cd ${WKC}
|
||||
git reset --hard
|
||||
git checkout $BRANCH_NAME
|
||||
git pull
|
||||
git submodule update
|
||||
cd ${WK}
|
||||
git reset --hard
|
||||
git checkout $BRANCH_NAME
|
||||
git pull
|
||||
export TZ=Asia/Harbin
|
||||
date
|
||||
rm -rf ${WK}/debug
|
||||
mkdir debug
|
||||
cd debug
|
||||
cmake .. > /dev/null
|
||||
make > /dev/null
|
||||
make install > /dev/null
|
||||
pip3 install ${WKC}/src/connector/python
|
||||
'''
|
||||
return 1
|
||||
}
|
||||
def pre_test_p(){
|
||||
|
||||
sh '''
|
||||
sudo rmtaos||echo 'no taosd installed'
|
||||
'''
|
||||
|
@ -39,7 +66,7 @@ pipeline {
|
|||
stage('pytest') {
|
||||
agent{label 'slad1'}
|
||||
steps {
|
||||
pre_test()
|
||||
pre_test_p()
|
||||
sh '''
|
||||
cd ${WKC}/tests
|
||||
find pytest -name '*'sql|xargs rm -rf
|
||||
|
@ -48,7 +75,7 @@ pipeline {
|
|||
}
|
||||
}
|
||||
stage('test_b1') {
|
||||
agent{label 'master'}
|
||||
agent{label 'slad2'}
|
||||
steps {
|
||||
pre_test()
|
||||
|
||||
|
@ -62,7 +89,7 @@ pipeline {
|
|||
}
|
||||
|
||||
stage('test_crash_gen') {
|
||||
agent{label "slad2"}
|
||||
agent{label "slad3"}
|
||||
steps {
|
||||
pre_test()
|
||||
sh '''
|
||||
|
@ -92,7 +119,7 @@ pipeline {
|
|||
}
|
||||
|
||||
sh'''
|
||||
systemctl start taosd
|
||||
nohup taosd >/dev/null &
|
||||
sleep 10
|
||||
'''
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
|
@ -110,16 +137,8 @@ pipeline {
|
|||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WKC}/tests/examples/JDBC/JDBCDemo/
|
||||
mvn clean package assembly:single -DskipTests >/dev/null
|
||||
java -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
|
||||
'''
|
||||
}
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WKC}/src/connector/jdbc
|
||||
mvn clean package -Dmaven.test.skip=true >/dev/null
|
||||
cd ${WKC}/tests/examples/JDBC/JDBCDemo/
|
||||
java --class-path=../../../../src/connector/jdbc/target:$JAVA_HOME/jre/lib/ext -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
|
||||
mvn clean package >/dev/null
|
||||
java -jar target/JdbcRestfulDemo-jar-with-dependencies.jar
|
||||
'''
|
||||
}
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
|
@ -136,7 +155,7 @@ pipeline {
|
|||
'''
|
||||
}
|
||||
sh '''
|
||||
systemctl stop taosd
|
||||
pkill -9 taosd || echo 1
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh b2
|
||||
date
|
||||
|
@ -149,7 +168,7 @@ pipeline {
|
|||
}
|
||||
|
||||
stage('test_valgrind') {
|
||||
agent{label "slad3"}
|
||||
agent{label "slad4"}
|
||||
|
||||
steps {
|
||||
pre_test()
|
||||
|
@ -228,11 +247,11 @@ pipeline {
|
|||
}
|
||||
|
||||
}
|
||||
post {
|
||||
post {
|
||||
success {
|
||||
emailext (
|
||||
subject: "SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
|
||||
body: '''<!DOCTYPE html>
|
||||
subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' SUCCESS",
|
||||
body: """<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
|
@ -248,29 +267,29 @@ pipeline {
|
|||
<td>
|
||||
<ul>
|
||||
<div style="font-size:18px">
|
||||
<li>构建名称>>分支:${PROJECT_NAME}</li>
|
||||
<li>构建名称>>分支:${env.BRANCH_NAME}</li>
|
||||
<li>构建结果:<span style="color:green"> Successful </span></li>
|
||||
<li>构建编号:${BUILD_NUMBER}</li>
|
||||
<li>触发用户:${CAUSE}</li>
|
||||
<li>变更概要:${CHANGES}</li>
|
||||
<li>触发用户:${env.CHANGE_AUTHOR}</li>
|
||||
<li>提交信息:${env.CHANGE_TITLE}</li>
|
||||
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
|
||||
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
|
||||
<li>变更集:${JELLY_SCRIPT}</li>
|
||||
|
||||
</div>
|
||||
</ul>
|
||||
</td>
|
||||
</tr>
|
||||
</table></font>
|
||||
</body>
|
||||
</html>''',
|
||||
</html>""",
|
||||
to: "yqliu@taosdata.com,pxiao@taosdata.com",
|
||||
from: "support@taosdata.com"
|
||||
)
|
||||
}
|
||||
failure {
|
||||
emailext (
|
||||
subject: "FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
|
||||
body: '''<!DOCTYPE html>
|
||||
subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' FAIL",
|
||||
body: """<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
|
@ -286,21 +305,21 @@ pipeline {
|
|||
<td>
|
||||
<ul>
|
||||
<div style="font-size:18px">
|
||||
<li>构建名称>>分支:${PROJECT_NAME}</li>
|
||||
<li>构建结果:<span style="color:green"> Successful </span></li>
|
||||
<li>构建名称>>分支:${env.BRANCH_NAME}</li>
|
||||
<li>构建结果:<span style="color:red"> Failure </span></li>
|
||||
<li>构建编号:${BUILD_NUMBER}</li>
|
||||
<li>触发用户:${CAUSE}</li>
|
||||
<li>变更概要:${CHANGES}</li>
|
||||
<li>触发用户:${env.CHANGE_AUTHOR}</li>
|
||||
<li>提交信息:${env.CHANGE_TITLE}</li>
|
||||
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
|
||||
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
|
||||
<li>变更集:${JELLY_SCRIPT}</li>
|
||||
|
||||
</div>
|
||||
</ul>
|
||||
</td>
|
||||
</tr>
|
||||
</table></font>
|
||||
</body>
|
||||
</html>''',
|
||||
</html>""",
|
||||
to: "yqliu@taosdata.com,pxiao@taosdata.com",
|
||||
from: "support@taosdata.com"
|
||||
)
|
||||
|
|
|
@ -1,5 +1,32 @@
|
|||
def pre_test(){
|
||||
|
||||
sh '''
|
||||
sudo rmtaos||echo 'no taosd installed'
|
||||
'''
|
||||
sh '''
|
||||
cd ${WKC}
|
||||
git reset --hard
|
||||
git checkout $BRANCH_NAME
|
||||
git pull
|
||||
git submodule update
|
||||
cd ${WK}
|
||||
git reset --hard
|
||||
git checkout $BRANCH_NAME
|
||||
git pull
|
||||
export TZ=Asia/Harbin
|
||||
date
|
||||
rm -rf ${WK}/debug
|
||||
mkdir debug
|
||||
cd debug
|
||||
cmake .. > /dev/null
|
||||
make > /dev/null
|
||||
make install > /dev/null
|
||||
pip3 install ${WKC}/src/connector/python/ || echo 0
|
||||
'''
|
||||
return 1
|
||||
}
|
||||
def pre_test_p(){
|
||||
|
||||
sh '''
|
||||
sudo rmtaos||echo 'no taosd installed'
|
||||
'''
|
||||
|
@ -39,7 +66,7 @@ pipeline {
|
|||
stage('pytest') {
|
||||
agent{label 'slam1'}
|
||||
steps {
|
||||
pre_test()
|
||||
pre_test_p()
|
||||
sh '''
|
||||
cd ${WKC}/tests
|
||||
find pytest -name '*'sql|xargs rm -rf
|
||||
|
@ -92,7 +119,7 @@ pipeline {
|
|||
}
|
||||
|
||||
sh'''
|
||||
systemctl start taosd
|
||||
nohup taosd >/dev/null &
|
||||
sleep 10
|
||||
'''
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
|
@ -136,7 +163,7 @@ pipeline {
|
|||
'''
|
||||
}
|
||||
sh '''
|
||||
systemctl stop taosd
|
||||
pkill -9 taosd || echo 1
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh b2
|
||||
date
|
||||
|
|
|
@ -808,6 +808,11 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) {
|
|||
break;
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
if (length[i] < 0 || length[i] > 1 << 20) {
|
||||
fprintf(stderr, "Invalid length(%d) of BINARY or NCHAR\n", length[i]);
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
memset(value, 0, MAX_QUERY_VALUE_LEN);
|
||||
memcpy(value, row[i], length[i]);
|
||||
value[length[i]] = 0;
|
||||
|
|
Loading…
Reference in New Issue