chore: adjust bucket size of overview metric (#9)

This commit is contained in:
silenceqi 2024-12-06 16:30:23 +08:00 committed by GitHub
parent eedcda0fec
commit e5dbe3123a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 16 additions and 27 deletions

View File

@ -133,13 +133,10 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
var top = len(clusterIDs) + 1 var top = len(clusterIDs) + 1
bucketSize := GetMinBucketSize() bucketSize := GetMinBucketSize()
if bucketSize < 20 { if bucketSize < 60 {
bucketSize = 20 bucketSize = 60
} }
var metricLen = 15 var metricLen = 15
if bucketSize <= 60 {
metricLen += 2
}
var bucketSizeStr = fmt.Sprintf("%vs", bucketSize) var bucketSizeStr = fmt.Sprintf("%vs", bucketSize)
indexMetricItems := []GroupMetricItem{} indexMetricItems := []GroupMetricItem{}
metricItem := newMetricItem("cluster_indexing", 2, "cluster") metricItem := newMetricItem("cluster_indexing", 2, "cluster")
@ -260,7 +257,7 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
for _, line := range indexMetrics["cluster_indexing"].Lines { for _, line := range indexMetrics["cluster_indexing"].Lines {
// remove first metric dot // remove first metric dot
data := line.Data data := line.Data
if v, ok := data.([][]interface{}); ok && len(v)> 0 && bucketSize <= 60 { if v, ok := data.([][]interface{}); ok && len(v)> 0 {
// remove first metric dot // remove first metric dot
temp := v[1:] temp := v[1:]
// // remove first last dot // // remove first last dot
@ -274,7 +271,7 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
searchMetricData := util.MapStr{} searchMetricData := util.MapStr{}
for _, line := range indexMetrics["cluster_search"].Lines { for _, line := range indexMetrics["cluster_search"].Lines {
data := line.Data data := line.Data
if v, ok := data.([][]interface{}); ok && len(v)> 0 && bucketSize <= 60 { if v, ok := data.([][]interface{}); ok && len(v)> 0 {
// remove first metric dot // remove first metric dot
temp := v[1:] temp := v[1:]
// // remove first last dot // // remove first last dot
@ -286,12 +283,6 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
searchMetricData[line.Metric.Label] = data searchMetricData[line.Metric.Label] = data
} }
searchR1, err := elastic.GetClient(clusterID).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
if err != nil {
panic(err)
}
//fetch recent cluster health status //fetch recent cluster health status
bucketItem := common.NewBucketItem( bucketItem := common.NewBucketItem(
common.TermsBucket, util.MapStr{ common.TermsBucket, util.MapStr{
@ -409,7 +400,7 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
util.MergeFields(query, aggs, true) util.MergeFields(query, aggs, true)
searchR1, err = elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query)) searchR1, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@ -365,11 +365,11 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
return return
} }
bucketSize, min, max, err := h.getMetricRangeAndBucketSize(req, 60, (15)) bucketSize := GetMinBucketSize()
if err != nil { if bucketSize < 60 {
panic(err) bucketSize = 60
return
} }
var metricLen = 15
// 索引速率 // 索引速率
indexMetric:=newMetricItem("indexing", 1, OperationGroupKey) indexMetric:=newMetricItem("indexing", 1, OperationGroupKey)
indexMetric.OnlyPrimary = true indexMetric.OnlyPrimary = true
@ -425,8 +425,7 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
{ {
"range": util.MapStr{ "range": util.MapStr{
"timestamp": util.MapStr{ "timestamp": util.MapStr{
"gte": min, "gte": fmt.Sprintf("now-%ds", metricLen * bucketSize),
"lte": max,
}, },
}, },
}, },

View File

@ -305,11 +305,6 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
h.WriteError(w, err.Error(), http.StatusInternalServerError) h.WriteError(w, err.Error(), http.StatusInternalServerError)
return return
} }
bucketSize, min, max, err := h.getMetricRangeAndBucketSize(req, 60, (15))
if err != nil {
panic(err)
return
}
// 索引速率 // 索引速率
indexMetric:=newMetricItem("indexing", 1, OperationGroupKey) indexMetric:=newMetricItem("indexing", 1, OperationGroupKey)
indexMetric.AddAxi("indexing rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) indexMetric.AddAxi("indexing rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
@ -335,6 +330,11 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
Units: "Search/s", Units: "Search/s",
}) })
bucketSize := GetMinBucketSize()
if bucketSize < 60 {
bucketSize = 60
}
var metricLen = 15
aggs:=map[string]interface{}{} aggs:=map[string]interface{}{}
query=map[string]interface{}{} query=map[string]interface{}{}
query["query"]=util.MapStr{ query["query"]=util.MapStr{
@ -364,8 +364,7 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
{ {
"range": util.MapStr{ "range": util.MapStr{
"timestamp": util.MapStr{ "timestamp": util.MapStr{
"gte": min, "gte": fmt.Sprintf("now-%ds", metricLen * bucketSize),
"lte": max,
}, },
}, },
}, },