Merge pull request #29389 from taosdata/merge/mainto3.0
merge: from main to 3.0 branch
This commit is contained in:
commit
3ae93c9c90
|
@ -0,0 +1,66 @@
|
|||
name: TDengine Build
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
- '3.0'
|
||||
- '3.1'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
name: Run unit tests
|
||||
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.18
|
||||
|
||||
- name: Install system dependencies
|
||||
run: |
|
||||
sudo apt update -y
|
||||
sudo apt install -y build-essential cmake \
|
||||
libgeos-dev libjansson-dev libsnappy-dev liblzma-dev libz-dev \
|
||||
zlib1g pkg-config libssl-dev gawk
|
||||
|
||||
|
||||
- name: Build and install TDengine
|
||||
run: |
|
||||
mkdir debug && cd debug
|
||||
cmake .. -DBUILD_HTTP=false -DBUILD_JDBC=false \
|
||||
-DBUILD_TOOLS=true -DBUILD_TEST=off \
|
||||
-DBUILD_KEEPER=true -DBUILD_DEPENDENCY_TESTS=false
|
||||
make -j 4
|
||||
sudo make install
|
||||
which taosd
|
||||
which taosadapter
|
||||
which taoskeeper
|
||||
|
||||
- name: Start taosd
|
||||
run: |
|
||||
cp /etc/taos/taos.cfg ./
|
||||
sudo echo "supportVnodes 256" >> taos.cfg
|
||||
nohup sudo taosd -c taos.cfg &
|
||||
|
||||
- name: Start taosadapter
|
||||
run: nohup sudo taosadapter &
|
||||
|
||||
- name: Run tests with taosBenchmark
|
||||
run: |
|
||||
taosBenchmark -t 10 -n 10 -y
|
||||
taos -s "select count(*) from test.meters"
|
||||
|
||||
- name: Clean up
|
||||
if: always()
|
||||
run: |
|
||||
if pgrep taosd; then sudo pkill taosd; fi
|
||||
if pgrep taosadapter; then sudo pkill taosadapter; fi
|
|
@ -10,6 +10,7 @@
|
|||
</p>
|
||||
<p>
|
||||
|
||||
[](https://github.com/taosdata/TDengine/actions/workflows/taosd-ci-build.yml)
|
||||
[](https://coveralls.io/github/taosdata/TDengine?branch=3.0)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/4201)
|
||||
<br />
|
||||
|
|
|
@ -148,6 +148,7 @@ When using time windows, note:
|
|||
- The window width of the aggregation period is specified by the keyword INTERVAL, with the shortest interval being 10 milliseconds (10a); it also supports an offset (the offset must be less than the interval), which is the offset of the time window division compared to "UTC moment 0". The SLIDING statement is used to specify the forward increment of the aggregation period, i.e., the duration of each window slide forward.
|
||||
- When using the INTERVAL statement, unless in very special cases, it is required to configure the timezone parameter in the taos.cfg configuration files of both the client and server to the same value to avoid frequent cross-time zone conversions by time processing functions, which can cause severe performance impacts.
|
||||
- The returned results have a strictly monotonically increasing time-series.
|
||||
- When using AUTO as the window offset, if the WHERE time condition is complex, such as multiple AND/OR/IN combinations, AUTO may not take effect. In such cases, you can manually specify the window offset to resolve the issue.
|
||||
- When using AUTO as the window offset, if the window width unit is d (day), n (month), w (week), y (year), such as: INTERVAL(1d, AUTO), INTERVAL(3w, AUTO), the TSMA optimization cannot take effect. If TSMA is manually created on the target table, the statement will report an error and exit; in this case, you can explicitly specify the Hint SKIP_TSMA or not use AUTO as the window offset.
|
||||
|
||||
### State Window
|
||||
|
|
|
@ -251,7 +251,7 @@ charset 的有效值是 UTF-8。
|
|||
|minimalLogDirGB | |不支持动态修改 |日志文件夹所在磁盘可用空间大小小于该值时,停止写日志,单位 GB,默认值 1|
|
||||
|numOfLogLines | |支持动态修改 立即生效 |单个日志文件允许的最大行数,默认值 10,000,000|
|
||||
|asyncLog | |支持动态修改 立即生效 |日志写入模式,0:同步,1:异步,默认值 1|
|
||||
|logKeepDays | |支持动态修改 立即生效 |日志文件的最长保存时间,单位:天,默认值 0,意味着无限保存,日志文件不会被重命名,也不会有新的日志文件滚动产生,但日志文件的内容有可能会不断滚动,取决于日志文件大小的设置;当设置为大于 0 的值时,当日志文件大小达到设置的上限时会被重命名为 taosdlog.yyy,其中 yyy 为日志文件最后修改的时间戳,并滚动产生新的日志文件|
|
||||
|logKeepDays | |支持动态修改 立即生效 |日志文件的最长保存时间,单位:天,默认值 0,小于等于0意味着只有两个日志文件相互切换保存日志,超过两个文件保存数量的日志会被删除;当设置为大于 0 的值时,当日志文件大小达到设置的上限时会被重命名为 taosdlog.yyy,其中 yyy 为日志文件最后修改的时间戳,并滚动产生新的日志文件|
|
||||
|slowLogThreshold|3.3.3.0 后|支持动态修改 立即生效 |慢查询门限值,大于等于门限值认为是慢查询,单位秒,默认值 3 |
|
||||
|slowLogMaxLen |3.3.3.0 后|支持动态修改 立即生效 |慢查询日志最大长度,取值范围 1-16384,默认值 4096|
|
||||
|slowLogScope |3.3.3.0 后|支持动态修改 立即生效 |慢查询记录类型,取值范围 ALL/QUERY/INSERT/OTHERS/NONE,默认值 QUERY|
|
||||
|
|
|
@ -14,7 +14,7 @@ taosAdapter 是一个 TDengine 的配套工具,是 TDengine 集群和应用程
|
|||
|
||||
taosAdapter 提供以下功能:
|
||||
|
||||
- RESTful 接口
|
||||
- Websocket/RESTful 接口
|
||||
- 兼容 InfluxDB v1 写接口
|
||||
- 兼容 OpenTSDB JSON 和 telnet 格式写入
|
||||
- 无缝连接到 Telegraf
|
||||
|
|
|
@ -138,6 +138,7 @@ SELECT COUNT(*) FROM meters WHERE _rowts - voltage > 1000000;
|
|||
- 聚合时间段的窗口宽度由关键词 INTERVAL 指定,最短时间间隔 10 毫秒(10a);并且支持偏移 offset(偏移必须小于间隔),也即时间窗口划分与“UTC 时刻 0”相比的偏移量。SLIDING 语句用于指定聚合时间段的前向增量,也即每次窗口向前滑动的时长。
|
||||
- 使用 INTERVAL 语句时,除非极特殊的情况,都要求把客户端和服务端的 taos.cfg 配置文件中的 timezone 参数配置为相同的取值,以避免时间处理函数频繁进行跨时区转换而导致的严重性能影响。
|
||||
- 返回的结果中时间序列严格单调递增。
|
||||
- 使用 AUTO 作为窗口偏移量时,如果 WHERE 时间条件比较复杂,比如多个 AND/OR/IN 互相组合,那么 AUTO 可能不生效,这种情况可以通过手动指定窗口偏移量进行解决。
|
||||
- 使用 AUTO 作为窗口偏移量时,如果窗口宽度的单位是 d (天), n (月), w (周), y (年),比如: INTERVAL(1d, AUTO), INTERVAL(3w, AUTO),此时 TSMA 优化无法生效。如果目标表上手动创建了TSMA,语句会报错退出;这种情况下,可以显式指定 Hint SKIP_TSMA 或者不使用 AUTO 作为窗口偏移量。
|
||||
|
||||
### 状态窗口
|
||||
|
|
|
@ -290,7 +290,6 @@ extern int tsStreamAggCnt;
|
|||
extern bool tsFilterScalarMode;
|
||||
extern int32_t tsMaxStreamBackendCache;
|
||||
extern int32_t tsPQSortMemThreshold;
|
||||
extern int32_t tsResolveFQDNRetryTime;
|
||||
extern bool tsStreamCoverage;
|
||||
extern int8_t tsS3EpNum;
|
||||
|
||||
|
|
|
@ -326,7 +326,6 @@ char tsUdfdLdLibPath[512] = "";
|
|||
bool tsDisableStream = false;
|
||||
int64_t tsStreamBufferSize = 128 * 1024 * 1024;
|
||||
bool tsFilterScalarMode = false;
|
||||
int tsResolveFQDNRetryTime = 100; // seconds
|
||||
int tsStreamAggCnt = 100000;
|
||||
bool tsStreamCoverage = false;
|
||||
|
||||
|
@ -953,7 +952,6 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "filterScalarMode", tsFilterScalarMode, CFG_SCOPE_SERVER, CFG_DYN_NONE,CFG_CATEGORY_LOCAL));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "maxStreamBackendCache", tsMaxStreamBackendCache, 16, 1024, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER_LAZY,CFG_CATEGORY_LOCAL));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "pqSortMemThreshold", tsPQSortMemThreshold, 1, 10240, CFG_SCOPE_SERVER, CFG_DYN_NONE,CFG_CATEGORY_LOCAL));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "resolveFQDNRetryTime", tsResolveFQDNRetryTime, 1, 10240, CFG_SCOPE_SERVER, CFG_DYN_NONE,CFG_CATEGORY_GLOBAL));
|
||||
|
||||
TAOS_CHECK_RETURN(cfgAddString(pCfg, "s3Accesskey", tsS3AccessKey[0], CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER_LAZY,CFG_CATEGORY_GLOBAL));
|
||||
TAOS_CHECK_RETURN(cfgAddString(pCfg, "s3Endpoint", tsS3Endpoint[0], CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER_LAZY,CFG_CATEGORY_GLOBAL));
|
||||
|
@ -1817,9 +1815,6 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
|||
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "pqSortMemThreshold");
|
||||
tsPQSortMemThreshold = pItem->i32;
|
||||
|
||||
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "resolveFQDNRetryTime");
|
||||
tsResolveFQDNRetryTime = pItem->i32;
|
||||
|
||||
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "minDiskFreeSize");
|
||||
tsMinDiskFreeSize = pItem->i64;
|
||||
|
||||
|
@ -2461,7 +2456,6 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) {
|
|||
{"randErrorDivisor", &tsRandErrDivisor},
|
||||
{"randErrorScope", &tsRandErrScope},
|
||||
{"syncLogBufferMemoryAllowed", &tsLogBufferMemoryAllowed},
|
||||
{"resolveFQDNRetryTime", &tsResolveFQDNRetryTime},
|
||||
{"syncHeartbeatInterval", &tsHeartbeatInterval},
|
||||
{"syncHeartbeatTimeout", &tsHeartbeatTimeout},
|
||||
{"syncSnapReplMaxWaitN", &tsSnapReplMaxWaitN},
|
||||
|
|
|
@ -4466,6 +4466,19 @@ _end:
|
|||
return code;
|
||||
}
|
||||
|
||||
static bool isWinResult(SSessionKey* pKey, SSHashObj* pSeUpdate, SSHashObj* pResults) {
|
||||
SSessionKey checkKey = {0};
|
||||
getSessionHashKey(pKey, &checkKey);
|
||||
if (tSimpleHashGet(pSeUpdate, &checkKey, sizeof(SSessionKey)) != NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (tSimpleHashGet(pResults, &checkKey, sizeof(SSessionKey)) != NULL) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBlock, SSHashObj* pSeUpdated,
|
||||
SSHashObj* pStDeleted) {
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
@ -4518,7 +4531,9 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl
|
|||
code = setStateOutputBuf(pAggSup, tsCols[i], groupId, pKeyData, &curWin, &nextWin);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
|
||||
releaseOutputBuf(pAggSup->pState, nextWin.winInfo.pStatePos, &pAPI->stateStore);
|
||||
if (isWinResult(&nextWin.winInfo.sessionWin, pSeUpdated, pAggSup->pResultRows) == false) {
|
||||
releaseOutputBuf(pAggSup->pState, nextWin.winInfo.pStatePos, &pAPI->stateStore);
|
||||
}
|
||||
|
||||
setSessionWinOutputInfo(pSeUpdated, &curWin.winInfo);
|
||||
code = updateStateWindowInfo(pAggSup, &curWin, &nextWin, tsCols, groupId, pKeyColInfo, rows, i, &allEqual,
|
||||
|
|
|
@ -6429,7 +6429,7 @@ int32_t blockDistFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
return code;
|
||||
}
|
||||
|
||||
len = tsnprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE, "Inmem_Rows=[%d] Stt_Rows=[%d] ",
|
||||
len = tsnprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE, "Inmem_Rows=[%u] Stt_Rows=[%u] ",
|
||||
pData->numOfInmemRows, pData->numOfSttRows);
|
||||
varDataSetLen(st, len);
|
||||
code = colDataSetVal(pColInfo, row++, st, false);
|
||||
|
|
|
@ -24,6 +24,8 @@
|
|||
#include "tglobal.h"
|
||||
#include "ttime.h"
|
||||
|
||||
#define FQDNRETRYTIMES 100
|
||||
|
||||
static void syncCfg2SimpleStr(const SSyncCfg* pCfg, char* buf, int32_t bufLen) {
|
||||
int32_t len = tsnprintf(buf, bufLen, "{num:%d, as:%d, [", pCfg->replicaNum, pCfg->myIndex);
|
||||
for (int32_t i = 0; i < pCfg->replicaNum; ++i) {
|
||||
|
@ -45,7 +47,8 @@ void syncUtilNodeInfo2EpSet(const SNodeInfo* pInfo, SEpSet* pEpSet) {
|
|||
bool syncUtilNodeInfo2RaftId(const SNodeInfo* pInfo, SyncGroupId vgId, SRaftId* raftId) {
|
||||
uint32_t ipv4 = 0xFFFFFFFF;
|
||||
sDebug("vgId:%d, resolve sync addr from fqdn, ep:%s:%u", vgId, pInfo->nodeFqdn, pInfo->nodePort);
|
||||
for (int32_t i = 0; i < tsResolveFQDNRetryTime; i++) {
|
||||
|
||||
for (int32_t i = 0; i < FQDNRETRYTIMES; i++) {
|
||||
int32_t code = taosGetIpv4FromFqdn(pInfo->nodeFqdn, &ipv4);
|
||||
if (code) {
|
||||
sError("vgId:%d, failed to resolve sync addr, dnode:%d fqdn:%s, retry", vgId, pInfo->nodeId, pInfo->nodeFqdn);
|
||||
|
|
|
@ -310,11 +310,6 @@ class TDTestCase:
|
|||
"value": 1024 * 1024 * 20 * 10,
|
||||
"category": "global"
|
||||
},
|
||||
{
|
||||
"name": "resolveFQDNRetryTime",
|
||||
"value": 500,
|
||||
"category": "global"
|
||||
},
|
||||
{
|
||||
"name": "syncHeartbeatInterval",
|
||||
"value": 3000,
|
||||
|
|
|
@ -230,7 +230,7 @@ endi
|
|||
sql_error show create stable t0;
|
||||
|
||||
sql show variables;
|
||||
if $rows != 94 then
|
||||
if $rows != 93 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
|
|
@ -2,13 +2,28 @@ system sh/stop_dnodes.sh
|
|||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c debugflag -v 135
|
||||
system sh/cfg.sh -n dnode1 -c streamBufferSize -v 10
|
||||
system sh/cfg.sh -n dnode1 -c checkpointinterval -v 60
|
||||
system sh/cfg.sh -n dnode1 -c snodeAddress -v 127.0.0.1:873
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sleep 500
|
||||
|
||||
sql connect
|
||||
print step1 =============
|
||||
|
||||
print step1=============
|
||||
print ================ create snode
|
||||
sql show snodes
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql create snode on dnode 1;
|
||||
sql show snodes;
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ============== snode created , create db
|
||||
|
||||
sql create database test3 vgroups 1;
|
||||
sql use test3;
|
||||
|
@ -57,7 +72,7 @@ loop8:
|
|||
sleep 200
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
if $loop_count == 30 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -87,7 +102,7 @@ loop9:
|
|||
sleep 200
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
if $loop_count == 30 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -127,7 +142,7 @@ loop10:
|
|||
sleep 200
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
if $loop_count == 30 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -162,7 +177,7 @@ loop11:
|
|||
sleep 200
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
if $loop_count == 30 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -194,7 +209,7 @@ loop11:
|
|||
sleep 200
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
if $loop_count == 30 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -239,7 +254,7 @@ loop12:
|
|||
sleep 200
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
if $loop_count == 30 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -315,7 +330,7 @@ loop13:
|
|||
sleep 200
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
if $loop_count == 30 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -369,7 +384,6 @@ if $data24 != 1 then
|
|||
endi
|
||||
|
||||
print step4=============
|
||||
|
||||
sql create database test6 vgroups 4;
|
||||
sql use test6;
|
||||
sql create stable st(ts timestamp,a int,b int,c int,d int) tags(ta int,tb int,tc int);
|
||||
|
@ -396,7 +410,7 @@ loop14:
|
|||
sleep 200
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
if $loop_count == 30 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -449,4 +463,21 @@ if $data25 != 2 then
|
|||
goto loop14
|
||||
endi
|
||||
|
||||
print sleep for 1min for checkpoint generate
|
||||
sleep 60000
|
||||
|
||||
print ================== restart to load checkpoint from snode
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sleep 500
|
||||
sql connect
|
||||
|
||||
sleep 30000
|
||||
|
||||
sql select start_ver, checkpoint_ver from information_schema.ins_stream_tasks where level='source';
|
||||
sleep 500
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
|
||||
|
|
|
@ -120,7 +120,7 @@ if $rows != 3 then
|
|||
endi
|
||||
|
||||
sql show variables;
|
||||
if $rows != 94 then
|
||||
if $rows != 93 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
|
|
@ -207,11 +207,6 @@ class TDTestCase:
|
|||
"value": 1024 * 1024 * 20 * 10,
|
||||
"category": "global"
|
||||
},
|
||||
{
|
||||
"name": "resolveFQDNRetryTime",
|
||||
"value": 500,
|
||||
"category": "global"
|
||||
},
|
||||
{
|
||||
"name": "syncElectInterval",
|
||||
"value": 50000,
|
||||
|
|
|
@ -47,7 +47,7 @@ class TDTestCase:
|
|||
|
||||
def case2(self):
|
||||
tdSql.query("show variables")
|
||||
tdSql.checkRows(94)
|
||||
tdSql.checkRows(93)
|
||||
|
||||
for i in range(self.replicaVar):
|
||||
tdSql.query("show dnode %d variables like 'debugFlag'" % (i + 1))
|
||||
|
|
Loading…
Reference in New Issue