chore: adjust bucket size of overview metric (#9)
This commit is contained in:
parent
eedcda0fec
commit
e5dbe3123a
|
@ -133,13 +133,10 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
|
|||
var top = len(clusterIDs) + 1
|
||||
|
||||
bucketSize := GetMinBucketSize()
|
||||
if bucketSize < 20 {
|
||||
bucketSize = 20
|
||||
if bucketSize < 60 {
|
||||
bucketSize = 60
|
||||
}
|
||||
var metricLen = 15
|
||||
if bucketSize <= 60 {
|
||||
metricLen += 2
|
||||
}
|
||||
var bucketSizeStr = fmt.Sprintf("%vs", bucketSize)
|
||||
indexMetricItems := []GroupMetricItem{}
|
||||
metricItem := newMetricItem("cluster_indexing", 2, "cluster")
|
||||
|
@ -260,7 +257,7 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
|
|||
for _, line := range indexMetrics["cluster_indexing"].Lines {
|
||||
// remove first metric dot
|
||||
data := line.Data
|
||||
if v, ok := data.([][]interface{}); ok && len(v)> 0 && bucketSize <= 60 {
|
||||
if v, ok := data.([][]interface{}); ok && len(v)> 0 {
|
||||
// remove first metric dot
|
||||
temp := v[1:]
|
||||
// // remove first last dot
|
||||
|
@ -274,7 +271,7 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
|
|||
searchMetricData := util.MapStr{}
|
||||
for _, line := range indexMetrics["cluster_search"].Lines {
|
||||
data := line.Data
|
||||
if v, ok := data.([][]interface{}); ok && len(v)> 0 && bucketSize <= 60 {
|
||||
if v, ok := data.([][]interface{}); ok && len(v)> 0 {
|
||||
// remove first metric dot
|
||||
temp := v[1:]
|
||||
// // remove first last dot
|
||||
|
@ -286,12 +283,6 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
|
|||
searchMetricData[line.Metric.Label] = data
|
||||
}
|
||||
|
||||
searchR1, err := elastic.GetClient(clusterID).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
|
||||
//fetch recent cluster health status
|
||||
bucketItem := common.NewBucketItem(
|
||||
common.TermsBucket, util.MapStr{
|
||||
|
@ -409,7 +400,7 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
|
|||
|
||||
util.MergeFields(query, aggs, true)
|
||||
|
||||
searchR1, err = elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
|
||||
searchR1, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
|
|
@ -365,11 +365,11 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
|
|||
return
|
||||
}
|
||||
|
||||
bucketSize, min, max, err := h.getMetricRangeAndBucketSize(req, 60, (15))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return
|
||||
bucketSize := GetMinBucketSize()
|
||||
if bucketSize < 60 {
|
||||
bucketSize = 60
|
||||
}
|
||||
var metricLen = 15
|
||||
// 索引速率
|
||||
indexMetric:=newMetricItem("indexing", 1, OperationGroupKey)
|
||||
indexMetric.OnlyPrimary = true
|
||||
|
@ -425,8 +425,7 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
|
|||
{
|
||||
"range": util.MapStr{
|
||||
"timestamp": util.MapStr{
|
||||
"gte": min,
|
||||
"lte": max,
|
||||
"gte": fmt.Sprintf("now-%ds", metricLen * bucketSize),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -305,11 +305,6 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
|
|||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
bucketSize, min, max, err := h.getMetricRangeAndBucketSize(req, 60, (15))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return
|
||||
}
|
||||
// 索引速率
|
||||
indexMetric:=newMetricItem("indexing", 1, OperationGroupKey)
|
||||
indexMetric.AddAxi("indexing rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
|
||||
|
@ -335,6 +330,11 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
|
|||
Units: "Search/s",
|
||||
})
|
||||
|
||||
bucketSize := GetMinBucketSize()
|
||||
if bucketSize < 60 {
|
||||
bucketSize = 60
|
||||
}
|
||||
var metricLen = 15
|
||||
aggs:=map[string]interface{}{}
|
||||
query=map[string]interface{}{}
|
||||
query["query"]=util.MapStr{
|
||||
|
@ -364,8 +364,7 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
|
|||
{
|
||||
"range": util.MapStr{
|
||||
"timestamp": util.MapStr{
|
||||
"gte": min,
|
||||
"lte": max,
|
||||
"gte": fmt.Sprintf("now-%ds", metricLen * bucketSize),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
Loading…
Reference in New Issue