feat: splitting metric query, adding query param key for query metric api to support single metric query (#7)

* feat: support query es cluster metrics with special key
* feat: adding context param to control metric request timeout
* feat: splitting metric query, adding query param `key` for query metric  api to support single metric query
* chore: clean unused code
* fix: wrong metric key
This commit is contained in:
silenceqi 2024-12-06 14:02:16 +08:00 committed by GitHub
parent 004f4bdc15
commit 46a5976bf3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
16 changed files with 3077 additions and 4456 deletions

View File

@ -24,6 +24,7 @@
package api
import (
"context"
"fmt"
"infini.sh/framework/modules/elastic/adapter"
"net/http"
@ -254,7 +255,7 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
},
},
}
indexMetrics := h.getMetrics(query, indexMetricItems, bucketSize)
indexMetrics := h.getMetrics(context.Background(), query, indexMetricItems, bucketSize)
indexingMetricData := util.MapStr{}
for _, line := range indexMetrics["cluster_indexing"].Lines {
// remove first metric dot

View File

@ -28,6 +28,7 @@
package api
import (
"context"
"fmt"
log "github.com/cihub/seelog"
httprouter "infini.sh/framework/core/api/router"
@ -604,10 +605,10 @@ func (h *APIHandler) getSingleHostMetric(agentID string, min, max int64, bucketS
},
},
}
return h.getSingleMetrics(metricItems, query, bucketSize)
return h.getSingleMetrics(context.Background(), metricItems, query, bucketSize)
}
func (h *APIHandler) getSingleHostMetricFromNode(nodeID string, min, max int64, bucketSize int) map[string]*common.MetricItem {
func (h *APIHandler) getSingleHostMetricFromNode(ctx context.Context, nodeID string, min, max int64, bucketSize int) map[string]*common.MetricItem {
var must = []util.MapStr{
{
"term": util.MapStr{
@ -669,7 +670,7 @@ func (h *APIHandler) getSingleHostMetricFromNode(nodeID string, min, max int64,
return 100 - value*100/value2
}
metricItems = append(metricItems, metricItem)
return h.getSingleMetrics(metricItems, query, bucketSize)
return h.getSingleMetrics(ctx, metricItems, query, bucketSize)
}
func (h *APIHandler) GetSingleHostMetrics(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
@ -696,7 +697,7 @@ func (h *APIHandler) GetSingleHostMetrics(w http.ResponseWriter, req *http.Reque
return
}
if hostInfo.AgentID == "" {
resBody["metrics"] = h.getSingleHostMetricFromNode(hostInfo.NodeID, min, max, bucketSize)
resBody["metrics"] = h.getSingleHostMetricFromNode(context.Background(), hostInfo.NodeID, min, max, bucketSize)
h.WriteJSON(w, resBody, http.StatusOK)
return
}
@ -866,7 +867,7 @@ func (h *APIHandler) getGroupHostMetric(agentIDs []string, min, max int64, bucke
},
},
}
return h.getMetrics(query, hostMetricItems, bucketSize)
return h.getMetrics(context.Background(), query, hostMetricItems, bucketSize)
}
func getHost(hostID string) (*host.HostInfo, error) {

View File

@ -24,8 +24,10 @@
package api
import (
"context"
"fmt"
log "github.com/cihub/seelog"
v1 "infini.sh/console/modules/elastic/api/v1"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/global"
"infini.sh/framework/core/radix"
@ -38,7 +40,7 @@ import (
"time"
)
func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucketSize int, min, max int64, indexName string, top int, shardID string) (map[string]*common.MetricItem, error){
func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clusterID string, bucketSize int, min, max int64, indexName string, top int, shardID string, metricKey string) (map[string]*common.MetricItem, error){
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
clusterUUID, err := adapter.GetClusterUUID(clusterID)
if err != nil {
@ -141,12 +143,13 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
},
},
}
indexMetricItems := []GroupMetricItem{}
switch metricKey {
case v1.IndexStorageMetricKey:
//索引存储大小
indexStorageMetric := newMetricItem("index_storage", 1, StorageGroupKey)
indexStorageMetric := newMetricItem(v1.IndexStorageMetricKey, 1, StorageGroupKey)
indexStorageMetric.AddAxi("Index storage","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems := []GroupMetricItem{
{
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "index_storage",
Field: "payload.elasticsearch.shard_stats.store.size_in_bytes",
ID: util.GetUUID(),
@ -154,10 +157,10 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
MetricItem: indexStorageMetric,
FormatType: "bytes",
Units: "",
},
}
})
case v1.SegmentCountMetricKey:
// segment 数量
segmentCountMetric:=newMetricItem("segment_count", 15, StorageGroupKey)
segmentCountMetric:=newMetricItem(v1.SegmentCountMetricKey, 15, StorageGroupKey)
segmentCountMetric.AddAxi("segment count","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "segment_count",
@ -168,8 +171,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "",
})
case v1.DocCountMetricKey:
//索引文档个数
docCountMetric := newMetricItem("doc_count", 2, DocumentGroupKey)
docCountMetric := newMetricItem(v1.DocCountMetricKey, 2, DocumentGroupKey)
docCountMetric.AddAxi("Doc count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
@ -181,8 +185,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "",
})
case v1.DocsDeletedMetricKey:
// docs 删除数量
docsDeletedMetric:=newMetricItem("docs_deleted", 17, DocumentGroupKey)
docsDeletedMetric:=newMetricItem(v1.DocsDeletedMetricKey, 17, DocumentGroupKey)
docsDeletedMetric.AddAxi("docs deleted","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "docs_deleted",
@ -193,6 +198,7 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "",
})
case v1.QueryTimesMetricKey:
//查询次数
queryTimesMetric := newMetricItem("query_times", 2, OperationGroupKey)
queryTimesMetric.AddAxi("Query times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
@ -206,9 +212,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "requests/s",
})
case v1.FetchTimesMetricKey:
//Fetch次数
fetchTimesMetric := newMetricItem("fetch_times", 3, OperationGroupKey)
fetchTimesMetric := newMetricItem(v1.FetchTimesMetricKey, 3, OperationGroupKey)
fetchTimesMetric.AddAxi("Fetch times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "fetch_times",
@ -219,8 +225,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "requests/s",
})
case v1.ScrollTimesMetricKey:
//scroll 次数
scrollTimesMetric := newMetricItem("scroll_times", 4, OperationGroupKey)
scrollTimesMetric := newMetricItem(v1.ScrollTimesMetricKey, 4, OperationGroupKey)
scrollTimesMetric.AddAxi("scroll times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "scroll_times",
@ -231,8 +238,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "requests/s",
})
case v1.MergeTimesMetricKey:
//Merge次数
mergeTimesMetric := newMetricItem("merge_times", 7, OperationGroupKey)
mergeTimesMetric := newMetricItem(v1.MergeTimesMetricKey, 7, OperationGroupKey)
mergeTimesMetric.AddAxi("Merge times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "merge_times",
@ -243,8 +251,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "requests/s",
})
case v1.RefreshTimesMetricKey:
//Refresh次数
refreshTimesMetric := newMetricItem("refresh_times", 5, OperationGroupKey)
refreshTimesMetric := newMetricItem(v1.RefreshTimesMetricKey, 5, OperationGroupKey)
refreshTimesMetric.AddAxi("Refresh times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "refresh_times",
@ -255,8 +264,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "requests/s",
})
case v1.FlushTimesMetricKey:
//flush 次数
flushTimesMetric := newMetricItem("flush_times", 6, OperationGroupKey)
flushTimesMetric := newMetricItem(v1.FlushTimesMetricKey, 6, OperationGroupKey)
flushTimesMetric.AddAxi("flush times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "flush_times",
@ -267,9 +277,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "requests/s",
})
case v1.IndexingRateMetricKey:
//写入速率
indexingRateMetric := newMetricItem("indexing_rate", 1, OperationGroupKey)
indexingRateMetric := newMetricItem(v1.IndexingRateMetricKey, 1, OperationGroupKey)
if shardID == "" {
indexingRateMetric.OnlyPrimary = true
}
@ -283,7 +293,8 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "doc/s",
})
indexingBytesMetric := newMetricItem("indexing_bytes", 2, OperationGroupKey)
case v1.IndexingBytesMetricKey:
indexingBytesMetric := newMetricItem(v1.IndexingBytesMetricKey, 2, OperationGroupKey)
if shardID == "" {
indexingBytesMetric.OnlyPrimary = true
}
@ -297,8 +308,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "bytes",
Units: "bytes/s",
})
case v1.IndexingLatencyMetricKey:
//写入时延
indexingLatencyMetric := newMetricItem("indexing_latency", 1, LatencyGroupKey)
indexingLatencyMetric := newMetricItem(v1.IndexingLatencyMetricKey, 1, LatencyGroupKey)
if shardID == "" {
indexingLatencyMetric.OnlyPrimary = true
}
@ -316,9 +328,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "ms",
})
case v1.QueryLatencyMetricKey:
//查询时延
queryLatencyMetric := newMetricItem("query_latency", 2, LatencyGroupKey)
queryLatencyMetric := newMetricItem(v1.QueryLatencyMetricKey, 2, LatencyGroupKey)
queryLatencyMetric.AddAxi("Query latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "query_latency",
@ -333,8 +345,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "ms",
})
case FetchLatencyMetricKey:
//fetch时延
fetchLatencyMetric := newMetricItem("fetch_latency", 3, LatencyGroupKey)
fetchLatencyMetric := newMetricItem(v1.FetchLatencyMetricKey, 3, LatencyGroupKey)
fetchLatencyMetric.AddAxi("Fetch latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "fetch_latency",
@ -349,9 +362,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "ms",
})
case v1.MergeLatencyMetricKey:
//merge时延
mergeLatencyMetric := newMetricItem("merge_latency", 7, LatencyGroupKey)
mergeLatencyMetric := newMetricItem(v1.MergeLatencyMetricKey, 7, LatencyGroupKey)
mergeLatencyMetric.AddAxi("Merge latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "merge_latency",
@ -366,8 +379,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "ms",
})
case RefreshLatencyMetricKey:
//refresh时延
refreshLatencyMetric := newMetricItem("refresh_latency", 5, LatencyGroupKey)
refreshLatencyMetric := newMetricItem(v1.RefreshLatencyMetricKey, 5, LatencyGroupKey)
refreshLatencyMetric.AddAxi("Refresh latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "refresh_latency",
@ -382,8 +396,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "ms",
})
case v1.ScrollLatencyMetricKey:
//scroll时延
scrollLatencyMetric := newMetricItem("scroll_latency", 4, LatencyGroupKey)
scrollLatencyMetric := newMetricItem(v1.ScrollLatencyMetricKey, 4, LatencyGroupKey)
scrollLatencyMetric.AddAxi("Scroll Latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "scroll_latency",
@ -398,8 +413,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "ms",
})
case v1.FlushLatencyMetricKey:
//flush 时延
flushLatencyMetric := newMetricItem("flush_latency", 6, LatencyGroupKey)
flushLatencyMetric := newMetricItem(v1.FlushLatencyMetricKey, 6, LatencyGroupKey)
flushLatencyMetric.AddAxi("Flush latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "flush_latency",
@ -414,8 +430,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "ms",
})
case v1.QueryCacheMetricKey:
//queryCache
queryCacheMetric := newMetricItem("query_cache", 1, CacheGroupKey)
queryCacheMetric := newMetricItem(v1.QueryCacheMetricKey, 1, CacheGroupKey)
queryCacheMetric.AddAxi("Query cache","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "query_cache",
@ -426,8 +443,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "bytes",
Units: "",
})
case v1.RequestCacheMetricKey:
//requestCache
requestCacheMetric := newMetricItem("request_cache", 2, CacheGroupKey)
requestCacheMetric := newMetricItem(v1.RequestCacheMetricKey, 2, CacheGroupKey)
requestCacheMetric.AddAxi("request cache","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "request_cache",
@ -438,8 +456,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "bytes",
Units: "",
})
case v1.RequestCacheHitMetricKey:
// Request Cache Hit
requestCacheHitMetric:=newMetricItem("request_cache_hit", 6, CacheGroupKey)
requestCacheHitMetric:=newMetricItem(v1.RequestCacheHitMetricKey, 6, CacheGroupKey)
requestCacheHitMetric.AddAxi("request cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "request_cache_hit",
@ -450,8 +469,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "hits",
})
case v1.RequestCacheMissMetricKey:
// Request Cache Miss
requestCacheMissMetric:=newMetricItem("request_cache_miss", 8, CacheGroupKey)
requestCacheMissMetric:=newMetricItem(v1.RequestCacheMissMetricKey, 8, CacheGroupKey)
requestCacheMissMetric.AddAxi("request cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "request_cache_miss",
@ -462,8 +482,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "misses",
})
case v1.QueryCacheCountMetricKey:
// Query Cache Count
queryCacheCountMetric:=newMetricItem("query_cache_count", 4, CacheGroupKey)
queryCacheCountMetric:=newMetricItem(v1.QueryCacheCountMetricKey, 4, CacheGroupKey)
queryCacheCountMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "query_cache_count",
@ -474,8 +495,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "",
})
case v1.QueryCacheHitMetricKey:
// Query Cache Miss
queryCacheHitMetric:=newMetricItem("query_cache_hit", 5, CacheGroupKey)
queryCacheHitMetric:=newMetricItem(v1.QueryCacheHitMetricKey, 5, CacheGroupKey)
queryCacheHitMetric.AddAxi("query cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "query_cache_hit",
@ -486,22 +508,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "hits",
})
//// Query Cache evictions
//queryCacheEvictionsMetric:=newMetricItem("query_cache_evictions", 11, CacheGroupKey)
//queryCacheEvictionsMetric.AddAxi("query cache evictions","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
//indexMetricItems=append(indexMetricItems, GroupMetricItem{
// Key: "query_cache_evictions",
// Field: "payload.elasticsearch.index_stats.total.query_cache.evictions",
// ID: util.GetUUID(),
// IsDerivative: true,
// MetricItem: queryCacheEvictionsMetric,
// FormatType: "num",
// Units: "evictions",
//})
case v1.QueryCacheMissMetricKey:
// Query Cache Miss
queryCacheMissMetric:=newMetricItem("query_cache_miss", 7, CacheGroupKey)
queryCacheMissMetric:=newMetricItem(v1.QueryCacheMissMetricKey, 7, CacheGroupKey)
queryCacheMissMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "query_cache_miss",
@ -512,8 +521,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "misses",
})
case v1.FielddataCacheMetricKey:
// Fielddata内存占用大小
fieldDataCacheMetric:=newMetricItem("fielddata_cache", 3, CacheGroupKey)
fieldDataCacheMetric:=newMetricItem(v1.FielddataCacheMetricKey, 3, CacheGroupKey)
fieldDataCacheMetric.AddAxi("FieldData Cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "fielddata_cache",
@ -524,8 +534,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "bytes",
Units: "",
})
case v1.SegmentMemoryMetricKey:
//segment memory
segmentMemoryMetric := newMetricItem("segment_memory", 13, MemoryGroupKey)
segmentMemoryMetric := newMetricItem(v1.SegmentMemoryMetricKey, 13, MemoryGroupKey)
segmentMemoryMetric.AddAxi("Segment memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "segment_memory",
@ -536,9 +547,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "bytes",
Units: "",
})
case v1.SegmentDocValuesMemoryMetricKey:
//segment doc values memory
docValuesMemoryMetric := newMetricItem("segment_doc_values_memory", 13, MemoryGroupKey)
docValuesMemoryMetric := newMetricItem(v1.SegmentDocValuesMemoryMetricKey, 13, MemoryGroupKey)
docValuesMemoryMetric.AddAxi("Segment Doc values Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "segment_doc_values_memory",
@ -549,9 +560,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "bytes",
Units: "",
})
case v1.SegmentTermsMemoryMetricKey:
//segment terms memory
termsMemoryMetric := newMetricItem("segment_terms_memory", 13, MemoryGroupKey)
termsMemoryMetric := newMetricItem(v1.SegmentTermsMemoryMetricKey, 13, MemoryGroupKey)
termsMemoryMetric.AddAxi("Segment Terms Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "segment_terms_memory",
@ -562,9 +573,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "bytes",
Units: "",
})
case v1.SegmentFieldsMemoryMetricKey:
//segment fields memory
fieldsMemoryMetric := newMetricItem("segment_fields_memory", 13, MemoryGroupKey)
fieldsMemoryMetric := newMetricItem(v1.SegmentFieldsMemoryMetricKey, 13, MemoryGroupKey)
fieldsMemoryMetric.AddAxi("Segment Fields Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "segment_fields_memory",
@ -575,8 +586,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "bytes",
Units: "",
})
case v1.SegmentIndexWriterMemoryMetricKey:
// segment index writer memory
segmentIndexWriterMemoryMetric:=newMetricItem("segment_index_writer_memory", 16, MemoryGroupKey)
segmentIndexWriterMemoryMetric:=newMetricItem(v1.SegmentIndexWriterMemoryMetricKey, 16, MemoryGroupKey)
segmentIndexWriterMemoryMetric.AddAxi("segment doc values memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "segment_index_writer_memory",
@ -587,8 +599,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "bytes",
Units: "",
})
case v1.SegmentTermVectorsMemoryMetricKey:
// segment term vectors memory
segmentTermVectorsMemoryMetric:=newMetricItem("segment_term_vectors_memory", 16, MemoryGroupKey)
segmentTermVectorsMemoryMetric:=newMetricItem(v1.SegmentTermVectorsMemoryMetricKey, 16, MemoryGroupKey)
segmentTermVectorsMemoryMetric.AddAxi("segment term vectors memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "segment_term_vectors_memory",
@ -599,6 +612,8 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "bytes",
Units: "",
})
}
aggs:=map[string]interface{}{}
sumAggs := util.MapStr{}
@ -727,7 +742,7 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
},
},
}
return h.getMetrics(query, indexMetricItems, bucketSize), nil
return h.getMetrics(ctx, query, indexMetricItems, bucketSize), nil
}

View File

@ -28,6 +28,7 @@
package api
import (
"context"
"fmt"
log "github.com/cihub/seelog"
httprouter "infini.sh/framework/core/api/router"
@ -40,6 +41,7 @@ import (
"infini.sh/framework/modules/elastic/common"
"net/http"
"strings"
"time"
)
func (h *APIHandler) SearchIndexMetadata(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
@ -503,7 +505,7 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
},
},
}
metrics := h.getMetrics(query, nodeMetricItems, bucketSize)
metrics := h.getMetrics(context.Background(), query, nodeMetricItems, bucketSize)
indexMetrics := map[string]util.MapStr{}
for key, item := range metrics {
for _, line := range item.Lines {
@ -851,6 +853,16 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ
if bucketSize <= 60 {
min = min - int64(2 * bucketSize * 1000)
}
metricKey := h.GetParameter(req, "key")
timeout := h.GetParameterOrDefault(req, "timeout", "60s")
du, err := time.ParseDuration(timeout)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
ctx, cancel := context.WithTimeout(context.Background(), du)
defer cancel()
query := map[string]interface{}{}
query["query"] = util.MapStr{
"bool": util.MapStr{
@ -870,76 +882,87 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
metricItems := []*common.MetricItem{}
metricItem:=newMetricItem("index_throughput", 1, OperationGroupKey)
metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
if shardID == "" {
metricItem.AddLine("Indexing Rate","Primary Indexing","Number of documents being indexed for node.","group1","payload.elasticsearch.shard_stats.indexing.index_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true)
metricItem.AddLine("Deleting Rate","Primary Deleting","Number of documents being deleted for node.","group1","payload.elasticsearch.shard_stats.indexing.delete_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true)
metricItem.Lines[0].Metric.OnlyPrimary = true
metricItem.Lines[1].Metric.OnlyPrimary = true
}else{
metricItem.AddLine("Indexing Rate","Indexing Rate","Number of documents being indexed for node.","group1","payload.elasticsearch.shard_stats.indexing.index_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true)
metricItem.AddLine("Deleting Rate","Deleting Rate","Number of documents being deleted for node.","group1","payload.elasticsearch.shard_stats.indexing.delete_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true)
metrics := map[string]*common.MetricItem{}
if metricKey == ShardStateMetricKey {
shardStateMetric, err := h.getIndexShardsMetric(ctx, clusterID, indexName, min, max, bucketSize)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
metricItems=append(metricItems,metricItem)
metricItem=newMetricItem("search_throughput", 2, OperationGroupKey)
metricItem.AddAxi("searching","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,false)
metricItem.AddLine("Search Rate","Search Rate",
"Number of search requests being executed.",
"group1","payload.elasticsearch.shard_stats.search.query_total","max",bucketSizeStr,"query/s","num","0,0.[00]","0,0.[00]",false,true)
metricItems=append(metricItems,metricItem)
metricItem=newMetricItem("index_latency", 3, LatencyGroupKey)
metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
if shardID == "" { //index level
metricItem.AddLine("Indexing Latency","Primary Indexing Latency","Average latency for indexing documents.","group1","payload.elasticsearch.shard_stats.indexing.index_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
metricItem.AddLine("Deleting Latency","Primary Deleting Latency","Average latency for delete documents.","group1","payload.elasticsearch.shard_stats.indexing.delete_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
metrics["shard_state"] = shardStateMetric
}else {
switch metricKey {
case IndexThroughputMetricKey:
metricItem := newMetricItem("index_throughput", 1, OperationGroupKey)
metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
if shardID == "" {
metricItem.AddLine("Indexing Rate", "Primary Indexing", "Number of documents being indexed for node.", "group1", "payload.elasticsearch.shard_stats.indexing.index_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.AddLine("Deleting Rate", "Primary Deleting", "Number of documents being deleted for node.", "group1", "payload.elasticsearch.shard_stats.indexing.delete_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.Lines[0].Metric.OnlyPrimary = true
metricItem.Lines[1].Metric.OnlyPrimary = true
}else{ // shard level
metricItem.AddLine("Indexing Latency","Indexing Latency","Average latency for indexing documents.","group1","payload.elasticsearch.shard_stats.indexing.index_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
metricItem.AddLine("Deleting Latency","Deleting Latency","Average latency for delete documents.","group1","payload.elasticsearch.shard_stats.indexing.delete_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
} else {
metricItem.AddLine("Indexing Rate", "Indexing Rate", "Number of documents being indexed for node.", "group1", "payload.elasticsearch.shard_stats.indexing.index_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.AddLine("Deleting Rate", "Deleting Rate", "Number of documents being deleted for node.", "group1", "payload.elasticsearch.shard_stats.indexing.delete_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
}
metricItems = append(metricItems, metricItem)
case SearchThroughputMetricKey:
metricItem := newMetricItem("search_throughput", 2, OperationGroupKey)
metricItem.AddAxi("searching", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
metricItem.AddLine("Search Rate", "Search Rate",
"Number of search requests being executed.",
"group1", "payload.elasticsearch.shard_stats.search.query_total", "max", bucketSizeStr, "query/s", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItems = append(metricItems, metricItem)
case IndexLatencyMetricKey:
metricItem := newMetricItem("index_latency", 3, LatencyGroupKey)
metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
if shardID == "" { //index level
metricItem.AddLine("Indexing Latency", "Primary Indexing Latency", "Average latency for indexing documents.", "group1", "payload.elasticsearch.shard_stats.indexing.index_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.AddLine("Deleting Latency", "Primary Deleting Latency", "Average latency for delete documents.", "group1", "payload.elasticsearch.shard_stats.indexing.delete_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.Lines[0].Metric.OnlyPrimary = true
metricItem.Lines[1].Metric.OnlyPrimary = true
} else { // shard level
metricItem.AddLine("Indexing Latency", "Indexing Latency", "Average latency for indexing documents.", "group1", "payload.elasticsearch.shard_stats.indexing.index_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.AddLine("Deleting Latency", "Deleting Latency", "Average latency for delete documents.", "group1", "payload.elasticsearch.shard_stats.indexing.delete_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
}
metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.shard_stats.indexing.index_total"
metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
return value/value2
return value / value2
}
metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.shard_stats.indexing.delete_total"
metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
return value/value2
return value / value2
}
metricItems=append(metricItems,metricItem)
metricItems = append(metricItems, metricItem)
case SearchLatencyMetricKey:
metricItem := newMetricItem("search_latency", 4, LatencyGroupKey)
metricItem.AddAxi("searching", "group2", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
metricItem=newMetricItem("search_latency", 4, LatencyGroupKey)
metricItem.AddAxi("searching","group2",common.PositionLeft,"num","0,0","0,0.[00]",5,false)
metricItem.AddLine("Searching","Query Latency","Average latency for searching query.","group2","payload.elasticsearch.shard_stats.search.query_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
metricItem.AddLine("Searching", "Query Latency", "Average latency for searching query.", "group2", "payload.elasticsearch.shard_stats.search.query_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.shard_stats.search.query_total"
metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
return value/value2
return value / value2
}
metricItem.AddLine("Searching","Fetch Latency","Average latency for searching fetch.","group2","payload.elasticsearch.shard_stats.search.fetch_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
metricItem.AddLine("Searching", "Fetch Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.shard_stats.search.fetch_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.shard_stats.search.fetch_total"
metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
return value/value2
return value / value2
}
metricItem.AddLine("Searching","Scroll Latency","Average latency for searching fetch.","group2","payload.elasticsearch.shard_stats.search.scroll_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
metricItem.AddLine("Searching", "Scroll Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.shard_stats.search.scroll_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.Lines[2].Metric.Field2 = "payload.elasticsearch.shard_stats.search.scroll_total"
metricItem.Lines[2].Metric.Calc = func(value, value2 float64) float64 {
return value/value2
return value / value2
}
metricItems=append(metricItems,metricItem)
metrics := h.getSingleIndexMetrics(metricItems,query, bucketSize)
shardStateMetric, err := h.getIndexShardsMetric(clusterID, indexName, min, max, bucketSize)
if err != nil {
log.Error(err)
metricItems = append(metricItems, metricItem)
}
metrics["shard_state"] = shardStateMetric
metrics = h.getSingleIndexMetrics(context.Background(), metricItems, query, bucketSize)
}
resBody["metrics"] = metrics
h.WriteJSON(w, resBody, http.StatusOK)
}
func (h *APIHandler) getIndexShardsMetric(id, indexName string, min, max int64, bucketSize int)(*common.MetricItem, error){
func (h *APIHandler) getIndexShardsMetric(ctx context.Context, id, indexName string, min, max int64, bucketSize int)(*common.MetricItem, error){
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
if err != nil {
@ -1007,7 +1030,8 @@ func (h *APIHandler) getIndexShardsMetric(id, indexName string, min, max int64,
},
},
}
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
queryDSL := util.MustToJSONBytes(query)
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
if err != nil {
log.Error(err)
return nil, err
@ -1025,6 +1049,7 @@ func (h *APIHandler) getIndexShardsMetric(id, indexName string, min, max int64,
}
metricItem.Lines[0].Data = metricData
metricItem.Lines[0].Type = common.GraphTypeBar
metricItem.Request = string(queryDSL)
return metricItem, nil
}

View File

@ -532,6 +532,7 @@ func (h *APIHandler) HandleClusterMetricsAction(w http.ResponseWriter, req *http
h.APIHandler.HandleClusterMetricsAction(w, req, ps)
return
}
key := h.GetParameter(req, "key")
bucketSize, min, max, err := h.getMetricRangeAndBucketSize(req, 10, 90)
if err != nil {
@ -546,18 +547,23 @@ func (h *APIHandler) HandleClusterMetricsAction(w http.ResponseWriter, req *http
}
}
//fmt.Println(min," vs ",max,",",rangeFrom,rangeTo,"range hours:",hours)
//metrics:=h.GetClusterMetrics(id,bucketSize,min,max)
isOverview := h.GetIntOrDefault(req, "overview", 0)
var metrics interface{}
if bucketSize <= 60 {
min = min - int64(2*bucketSize*1000)
}
if isOverview == 1 {
metrics = h.GetClusterIndexMetrics(id, bucketSize, min, max)
} else {
metrics = h.GetClusterMetrics(id, bucketSize, min, max)
timeout := h.GetParameterOrDefault(req, "timeout", "60s")
du, err := time.ParseDuration(timeout)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
ctx, cancel := context.WithTimeout(context.Background(), du)
defer cancel()
if util.StringInArray([]string{IndexThroughputMetricKey, SearchThroughputMetricKey, IndexLatencyMetricKey, SearchLatencyMetricKey}, key) {
metrics = h.GetClusterIndexMetrics(ctx, id, bucketSize, min, max, key)
}else{
metrics = h.GetClusterMetrics(ctx, id, bucketSize, min, max, key)
}
resBody["metrics"] = metrics
@ -584,7 +590,17 @@ func (h *APIHandler) HandleNodeMetricsAction(w http.ResponseWriter, req *http.Re
if bucketSize <= 60 {
min = min - int64(2*bucketSize*1000)
}
resBody["metrics"], err = h.getNodeMetrics(id, bucketSize, min, max, nodeName, top)
key := h.GetParameter(req, "key")
timeout := h.GetParameterOrDefault(req, "timeout", "60s")
du, err := time.ParseDuration(timeout)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
ctx, cancel := context.WithTimeout(context.Background(), du)
defer cancel()
resBody["metrics"], err = h.getNodeMetrics(ctx, id, bucketSize, min, max, nodeName, top, key)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
@ -627,13 +643,35 @@ func (h *APIHandler) HandleIndexMetricsAction(w http.ResponseWriter, req *http.R
if bucketSize <= 60 {
min = min - int64(2*bucketSize*1000)
}
metrics, err := h.getIndexMetrics(req, id, bucketSize, min, max, indexName, top, shardID)
key := h.GetParameter(req, "key")
timeout := h.GetParameterOrDefault(req, "timeout", "60s")
du, err := time.ParseDuration(timeout)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
ctx, cancel := context.WithTimeout(context.Background(), du)
defer cancel()
var metrics map[string]*common.MetricItem
if key == v1.DocPercentMetricKey {
metrics, err = h.getIndexMetrics(ctx, req, id, bucketSize, min, max, indexName, top, shardID, v1.DocCountMetricKey)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
docsDeletedMetrics, err := h.getIndexMetrics(ctx, req, id, bucketSize, min, max, indexName, top, shardID, v1.DocsDeletedMetricKey)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
for k, v := range docsDeletedMetrics {
if v != nil {
metrics[k] = v
}
}
if metrics["doc_count"] != nil && metrics["docs_deleted"] != nil && len(metrics["doc_count"].Lines) > 0 && len(metrics["docs_deleted"].Lines) > 0 {
metricA := metrics["doc_count"]
metricB := metrics["docs_deleted"]
@ -679,6 +717,14 @@ func (h *APIHandler) HandleIndexMetricsAction(w http.ResponseWriter, req *http.R
}
}
}else{
metrics, err = h.getIndexMetrics(ctx, req, id, bucketSize, min, max, indexName, top, shardID, key)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
}
resBody["metrics"] = metrics
ver := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).GetVersion()
if ver.Distribution == "" {
@ -711,7 +757,17 @@ func (h *APIHandler) HandleQueueMetricsAction(w http.ResponseWriter, req *http.R
if bucketSize <= 60 {
min = min - int64(2*bucketSize*1000)
}
resBody["metrics"], err = h.getThreadPoolMetrics(id, bucketSize, min, max, nodeName, top)
key := h.GetParameter(req, "key")
timeout := h.GetParameterOrDefault(req, "timeout", "60s")
du, err := time.ParseDuration(timeout)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
ctx, cancel := context.WithTimeout(context.Background(), du)
defer cancel()
resBody["metrics"], err = h.getThreadPoolMetrics(ctx, id, bucketSize, min, max, nodeName, top, key)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
@ -837,11 +893,50 @@ const (
CircuitBreakerGroupKey = "circuit_breaker"
)
func (h *APIHandler) GetClusterMetrics(id string, bucketSize int, min, max int64) map[string]*common.MetricItem {
const (
ClusterStorageMetricKey = "cluster_storage"
ClusterDocumentsMetricKey = "cluster_documents"
ClusterIndicesMetricKey = "cluster_indices"
ClusterNodeCountMetricKey = "node_count"
ClusterHealthMetricKey = "cluster_health"
ShardCountMetricKey = "shard_count"
CircuitBreakerMetricKey = "circuit_breaker"
)
func (h *APIHandler) GetClusterMetrics(ctx context.Context, id string, bucketSize int, min, max int64, metricKey string) map[string]*common.MetricItem {
var clusterMetricsResult = map[string]*common.MetricItem {}
switch metricKey {
case ClusterDocumentsMetricKey,
ClusterStorageMetricKey,
ClusterIndicesMetricKey,
ClusterNodeCountMetricKey:
clusterMetricsResult = h.getClusterMetricsByKey(ctx, id, bucketSize, min, max, metricKey)
case IndexLatencyMetricKey, IndexThroughputMetricKey, SearchThroughputMetricKey, SearchLatencyMetricKey:
clusterMetricsResult = h.GetClusterIndexMetrics(ctx, id, bucketSize, min, max, metricKey)
case ClusterHealthMetricKey:
statusMetric, err := h.getClusterStatusMetric(ctx, id, min, max, bucketSize)
if err == nil {
clusterMetricsResult[ClusterHealthMetricKey] = statusMetric
} else {
log.Error("get cluster status metric error: ", err)
}
case ShardCountMetricKey:
clusterMetricsResult = h.getShardsMetric(ctx, id, min, max, bucketSize)
case CircuitBreakerMetricKey:
clusterMetricsResult = h.getCircuitBreakerMetric(ctx, id, min, max, bucketSize)
}
return clusterMetricsResult
}
func (h *APIHandler) getClusterMetricsByKey(ctx context.Context, id string, bucketSize int, min, max int64, metricKey string) map[string]*common.MetricItem {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
clusterMetricItems := []*common.MetricItem{}
switch metricKey {
case ClusterStorageMetricKey:
metricItem := newMetricItem("cluster_storage", 8, StorageGroupKey)
metricItem.AddAxi("indices_storage", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
metricItem.AddAxi("available_storage", "group2", common.PositionRight, "bytes", "0.[0]", "0.[0]", 5, true)
@ -851,19 +946,20 @@ func (h *APIHandler) GetClusterMetrics(id string, bucketSize int, min, max int64
clusterMetricItems = append(clusterMetricItems, metricItem)
metricItem = newMetricItem("cluster_documents", 4, StorageGroupKey)
case ClusterDocumentsMetricKey:
metricItem := newMetricItem("cluster_documents", 4, StorageGroupKey)
metricItem.AddAxi("count", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
metricItem.AddAxi("deleted", "group2", common.PositionRight, "num", "0,0", "0,0.[00]", 5, false)
metricItem.AddLine("Documents Count", "Documents Count", "", "group1", "payload.elasticsearch.cluster_stats.indices.docs.count", "max", bucketSizeStr, "", "num", "0,0.[00]", "0,0.[00]", false, false)
metricItem.AddLine("Documents Deleted", "Documents Deleted", "", "group2", "payload.elasticsearch.cluster_stats.indices.docs.deleted", "max", bucketSizeStr, "", "num", "0,0.[00]", "0,0.[00]", false, false)
clusterMetricItems = append(clusterMetricItems, metricItem)
metricItem = newMetricItem("cluster_indices", 6, StorageGroupKey)
case ClusterIndicesMetricKey:
metricItem := newMetricItem("cluster_indices", 6, StorageGroupKey)
metricItem.AddAxi("count", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
metricItem.AddLine("Indices Count", "Indices Count", "", "group1", "payload.elasticsearch.cluster_stats.indices.count", "max", bucketSizeStr, "", "num", "0,0.[00]", "0,0.[00]", false, false)
clusterMetricItems = append(clusterMetricItems, metricItem)
metricItem = newMetricItem("node_count", 5, MemoryGroupKey)
case ClusterNodeCountMetricKey:
metricItem := newMetricItem("node_count", 5, MemoryGroupKey)
metricItem.AddAxi("count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
meta := elastic.GetMetadata(id)
if meta == nil {
@ -873,8 +969,6 @@ func (h *APIHandler) GetClusterMetrics(id string, bucketSize int, min, max int64
majorVersion := meta.GetMajorVersion()
metricItem.AddLine("Total", "Total Nodes", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.total", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
//TODO check version difference
if majorVersion < 5 {
metricItem.AddLine("Master Only", "Master Only", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.master_only", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
metricItem.AddLine("Data Node", "Data Only", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.data_only", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
@ -887,6 +981,8 @@ func (h *APIHandler) GetClusterMetrics(id string, bucketSize int, min, max int64
}
clusterMetricItems = append(clusterMetricItems, metricItem)
}
query := map[string]interface{}{}
query["query"] = util.MapStr{
"bool": util.MapStr{
@ -925,46 +1021,34 @@ func (h *APIHandler) GetClusterMetrics(id string, bucketSize int, min, max int64
},
},
}
//todo: since there is four queries, we can change these query to async
indexMetricsResult := h.GetClusterIndexMetrics(id, bucketSize, min, max)
clusterMetricsResult := h.getSingleMetrics(clusterMetricItems, query, bucketSize)
for k, v := range clusterMetricsResult {
indexMetricsResult[k] = v
}
statusMetric, err := h.getClusterStatusMetric(id, min, max, bucketSize)
if err == nil {
indexMetricsResult["cluster_health"] = statusMetric
} else {
log.Error("get cluster status metric error: ", err)
}
clusterHealthMetricsResult := h.getShardsMetric(id, min, max, bucketSize)
for k, v := range clusterHealthMetricsResult {
indexMetricsResult[k] = v
}
// get CircuitBreaker metric
circuitBreakerMetricsResult := h.getCircuitBreakerMetric(id, min, max, bucketSize)
for k, v := range circuitBreakerMetricsResult {
indexMetricsResult[k] = v
}
return indexMetricsResult
return h.getSingleMetrics(ctx, clusterMetricItems, query, bucketSize)
}
func (h *APIHandler) GetClusterIndexMetrics(id string, bucketSize int, min, max int64) map[string]*common.MetricItem {
const (
IndexThroughputMetricKey = "index_throughput"
SearchThroughputMetricKey = "search_throughput"
IndexLatencyMetricKey = "index_latency"
SearchLatencyMetricKey = "search_latency"
)
func (h *APIHandler) GetClusterIndexMetrics(ctx context.Context, id string, bucketSize int, min, max int64, metricKey string) map[string]*common.MetricItem {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
metricItems := []*common.MetricItem{}
metricItem := newMetricItem("index_throughput", 2, OperationGroupKey)
switch metricKey {
case IndexThroughputMetricKey:
metricItem := newMetricItem(IndexThroughputMetricKey, 2, OperationGroupKey)
metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
metricItem.AddLine("Indexing Rate", "Total Indexing", "Number of documents being indexed for primary and replica shards.", "group1", "payload.elasticsearch.node_stats.indices.indexing.index_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItems = append(metricItems, metricItem)
metricItem = newMetricItem("search_throughput", 2, OperationGroupKey)
case SearchThroughputMetricKey:
metricItem := newMetricItem(SearchThroughputMetricKey, 2, OperationGroupKey)
metricItem.AddAxi("searching", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
metricItem.AddLine("Search Rate", "Total Query",
"Number of search requests being executed across primary and replica shards. A single search can run against multiple shards!",
"group1", "payload.elasticsearch.node_stats.indices.search.query_total", "max", bucketSizeStr, "query/s", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItems = append(metricItems, metricItem)
metricItem = newMetricItem("index_latency", 3, LatencyGroupKey)
case IndexLatencyMetricKey:
metricItem := newMetricItem(IndexLatencyMetricKey, 3, LatencyGroupKey)
metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
metricItem.AddLine("Indexing", "Indexing Latency", "Average latency for indexing documents.", "group1", "payload.elasticsearch.node_stats.indices.indexing.index_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
@ -978,8 +1062,8 @@ func (h *APIHandler) GetClusterIndexMetrics(id string, bucketSize int, min, max
return value / value2
}
metricItems = append(metricItems, metricItem)
metricItem = newMetricItem("search_latency", 3, LatencyGroupKey)
case SearchLatencyMetricKey:
metricItem := newMetricItem(SearchLatencyMetricKey, 3, LatencyGroupKey)
metricItem.AddAxi("searching", "group2", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
metricItem.AddLine("Searching", "Query Latency", "Average latency for searching query.", "group2", "payload.elasticsearch.node_stats.indices.search.query_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
@ -998,6 +1082,9 @@ func (h *APIHandler) GetClusterIndexMetrics(id string, bucketSize int, min, max
return value / value2
}
metricItems = append(metricItems, metricItem)
default:
panic("unknown metric key: " + metricKey)
}
query := map[string]interface{}{}
clusterUUID, err := adapter.GetClusterUUID(id)
if err != nil {
@ -1040,10 +1127,10 @@ func (h *APIHandler) GetClusterIndexMetrics(id string, bucketSize int, min, max
},
},
}
return h.getSingleIndexMetricsByNodeStats(metricItems, query, bucketSize)
return h.getSingleIndexMetricsByNodeStats(ctx, metricItems, query, bucketSize)
}
func (h *APIHandler) getShardsMetric(id string, min, max int64, bucketSize int) map[string]*common.MetricItem {
func (h *APIHandler) getShardsMetric(ctx context.Context, id string, min, max int64, bucketSize int) map[string]*common.MetricItem {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
query := util.MapStr{
"query": util.MapStr{
@ -1102,10 +1189,10 @@ func (h *APIHandler) getShardsMetric(id string, min, max int64, bucketSize int)
metricItem.AddLine("Delayed Unassigned Shards", "Delayed Unassigned Shards", "", "group1", "payload.elasticsearch.cluster_health.delayed_unassigned_shards", "max", bucketSizeStr, "", "num", "0,0.[00]", "0,0.[00]", false, false)
var clusterHealthMetrics []*common.MetricItem
clusterHealthMetrics = append(clusterHealthMetrics, metricItem)
return h.getSingleMetrics(clusterHealthMetrics, query, bucketSize)
return h.getSingleMetrics(ctx, clusterHealthMetrics, query, bucketSize)
}
func (h *APIHandler) getCircuitBreakerMetric(id string, min, max int64, bucketSize int) map[string]*common.MetricItem {
func (h *APIHandler) getCircuitBreakerMetric(ctx context.Context, id string, min, max int64, bucketSize int) map[string]*common.MetricItem {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
query := util.MapStr{
"query": util.MapStr{
@ -1163,10 +1250,10 @@ func (h *APIHandler) getCircuitBreakerMetric(id string, min, max int64, bucketSi
metricItem.AddLine("In Flight Requests Breaker Tripped", "In Flight Requests Tripped", "", "group1", "payload.elasticsearch.node_stats.breakers.in_flight_requests.tripped", "sum", bucketSizeStr, "times/s", "num", "0,0.[00]", "0,0.[00]", false, true)
var circuitBreakerMetrics []*common.MetricItem
circuitBreakerMetrics = append(circuitBreakerMetrics, metricItem)
return h.getSingleMetrics(circuitBreakerMetrics, query, bucketSize)
return h.getSingleMetrics(ctx, circuitBreakerMetrics, query, bucketSize)
}
func (h *APIHandler) getClusterStatusMetric(id string, min, max int64, bucketSize int) (*common.MetricItem, error) {
func (h *APIHandler) getClusterStatusMetric(ctx context.Context, id string, min, max int64, bucketSize int) (*common.MetricItem, error) {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
if err != nil {
@ -1227,7 +1314,8 @@ func (h *APIHandler) getClusterStatusMetric(id string, min, max int64, bucketSiz
},
},
}
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
queryDSL := util.MustToJSONBytes(query)
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
if err != nil {
log.Error(err)
return nil, err
@ -1244,6 +1332,7 @@ func (h *APIHandler) getClusterStatusMetric(id string, min, max int64, bucketSiz
}
metricItem.Lines[0].Data = metricData
metricItem.Lines[0].Type = common.GraphTypeBar
metricItem.Request = string(queryDSL)
return metricItem, nil
}

View File

@ -24,6 +24,7 @@
package api
import (
"context"
"fmt"
"infini.sh/framework/core/env"
"net/http"
@ -109,9 +110,10 @@ func generateGroupAggs(nodeMetricItems []GroupMetricItem) map[string]interface{}
return aggs
}
func (h *APIHandler) getMetrics(query map[string]interface{}, grpMetricItems []GroupMetricItem, bucketSize int) map[string]*common.MetricItem {
func (h *APIHandler) getMetrics(ctx context.Context, query map[string]interface{}, grpMetricItems []GroupMetricItem, bucketSize int) map[string]*common.MetricItem {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
queryDSL := util.MustToJSONBytes(query)
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(),nil, queryDSL)
if err != nil {
log.Error(err)
panic(err)
@ -214,6 +216,7 @@ func (h *APIHandler) getMetrics(query map[string]interface{}, grpMetricItems []G
line.Data = temp
}
}
metricItem.MetricItem.Request = string(queryDSL)
result[metricItem.Key] = metricItem.MetricItem
}
return result
@ -337,7 +340,7 @@ func GetMetricRangeAndBucketSize(minStr string, maxStr string, bucketSize int, m
}
// 获取单个指标,可以包含多条曲线
func (h *APIHandler) getSingleMetrics(metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int) map[string]*common.MetricItem {
func (h *APIHandler) getSingleMetrics(ctx context.Context, metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int) map[string]*common.MetricItem {
metricData := map[string][][]interface{}{}
aggs := map[string]interface{}{}
@ -396,7 +399,8 @@ func (h *APIHandler) getSingleMetrics(metricItems []*common.MetricItem, query ma
"aggs": aggs,
},
}
response, err := elastic.GetClient(clusterID).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
queryDSL := util.MustToJSONBytes(query)
response, err := elastic.GetClient(clusterID).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
if err != nil {
log.Error(err)
panic(err)
@ -467,6 +471,7 @@ func (h *APIHandler) getSingleMetrics(metricItems []*common.MetricItem, query ma
line.Data = temp
}
}
metricItem.Request = string(queryDSL)
result[metricItem.Key] = metricItem
}
@ -959,7 +964,7 @@ func parseGroupMetricData(buckets []elastic.BucketBase, isPercent bool) ([]inter
return metricData, nil
}
func (h *APIHandler) getSingleIndexMetricsByNodeStats(metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int) map[string]*common.MetricItem {
func (h *APIHandler) getSingleIndexMetricsByNodeStats(ctx context.Context, metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int) map[string]*common.MetricItem {
metricData := map[string][][]interface{}{}
aggs := util.MapStr{}
@ -1041,10 +1046,10 @@ func (h *APIHandler) getSingleIndexMetricsByNodeStats(metricItems []*common.Metr
"aggs": sumAggs,
},
}
return parseSingleIndexMetrics(clusterID, metricItems, query, bucketSize,metricData, metricItemsMap)
return parseSingleIndexMetrics(ctx, clusterID, metricItems, query, bucketSize,metricData, metricItemsMap)
}
func (h *APIHandler) getSingleIndexMetrics(metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int) map[string]*common.MetricItem {
func (h *APIHandler) getSingleIndexMetrics(ctx context.Context, metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int) map[string]*common.MetricItem {
metricData := map[string][][]interface{}{}
aggs := util.MapStr{}
@ -1146,11 +1151,12 @@ func (h *APIHandler) getSingleIndexMetrics(metricItems []*common.MetricItem, que
"aggs": sumAggs,
},
}
return parseSingleIndexMetrics(clusterID, metricItems, query, bucketSize,metricData, metricItemsMap)
return parseSingleIndexMetrics(ctx, clusterID, metricItems, query, bucketSize,metricData, metricItemsMap)
}
func parseSingleIndexMetrics(clusterID string, metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int, metricData map[string][][]interface{}, metricItemsMap map[string]*common.MetricLine) map[string]*common.MetricItem {
response, err := elastic.GetClient(clusterID).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
func parseSingleIndexMetrics(ctx context.Context, clusterID string, metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int, metricData map[string][][]interface{}, metricItemsMap map[string]*common.MetricLine) map[string]*common.MetricItem {
queryDSL := util.MustToJSONBytes(query)
response, err := elastic.GetClient(clusterID).QueryDSL(ctx, getAllMetricsIndex(), nil, util.MustToJSONBytes(query))
if err != nil {
panic(err)
}
@ -1220,6 +1226,7 @@ func parseSingleIndexMetrics(clusterID string, metricItems []*common.MetricItem,
line.Data = temp
}
}
metricItem.Request = string(queryDSL)
result[metricItem.Key] = metricItem
}

View File

@ -24,6 +24,7 @@
package api
import (
"context"
"fmt"
log "github.com/cihub/seelog"
"infini.sh/framework/core/elastic"
@ -36,7 +37,78 @@ import (
"time"
)
func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max int64, nodeName string, top int) (map[string]*common.MetricItem, error){
const (
NodeOSCPUMetricKey = "os_cpu"
NodeProcessCPUMetricKey = "cpu"
OSUsedMemoryMetricKey = "os_used_mem"
OSLoadAverage1mMetricKey = "os_load_average_1m"
OSUsedSwapMetricKey = "os_used_swap"
OpenFileMetricKey = "open_file"
OpenFilePercentMetricKey = "open_file_percent"
TotalDiskMetricKey = "disk"
IndexingRateMetricKey = "indexing_rate"
IndexingBytesMetricKey = "indexing_bytes"
IndexingLatencyMetricKey = "indexing_latency"
QueryRateMetricKey = "query_rate"
QueryLatencyMetricKey = "query_latency"
FetchRateMetricKey = "fetch_rate"
ScrollRateMetricKey = "scroll_rate"
RefreshRateMetricKey = "refresh_rate"
FlushRateMetricKey = "flush_rate"
MergesRateMetricKey = "merges_rate"
FetchLatencyMetricKey = "fetch_latency"
ScrollLatencyMetricKey = "scroll_latency"
MergeLatencyMetricKey = "merge_latency"
RefreshLatencyMetricKey = "refresh_latency"
FlushLatencyMetricKey = "flush_latency"
QueryCacheMetricKey = "query_cache"
RequestCacheMetricKey = "request_cache"
RequestCacheHitMetricKey = "request_cache_hit"
RequestCacheMissMetricKey = "request_cache_miss"
QueryCacheCountMetricKey = "query_cache_count"
QueryCacheMissMetricKey = "query_cache_miss"
QueryCacheHitMetricKey = "query_cache_hit"
FielddataCacheMetricKey = "fielddata_cache"
HttpConnectNumMetricKey = "http_connect_num"
HttpRateMetricKey = "http_rate"
SegmentCountMetricKey = "segment_count"
SegmentMemoryMetricKey = "segment_memory"
SegmentStoredFieldsMemoryMetricKey = "segment_stored_fields_memory"
SegmentTermsMemoryMetricKey = "segment_terms_memory"
SegmentDocValuesMemoryMetricKey = "segment_doc_values_memory"
SegmentIndexWriterMemoryMetricKey = "segment_index_writer_memory"
SegmentTermVectorsMemoryMetricKey = "segment_term_vectors_memory"
DocsCountMetricKey = "docs_count"
DocsDeletedMetricKey = "docs_deleted"
IndexStorageMetricKey = "index_storage"
JVMHeapUsedPercentMetricKey = "jvm_heap_used_percent"
JVMMemYoungUsedMetricKey = "jvm_mem_young_used"
JVMMemYoungPeakUsedMetricKey = "jvm_mem_young_peak_used"
JVMMemOldUsedMetricKey = "jvm_mem_old_used"
JVMMemOldPeakUsedMetricKey = "jvm_mem_old_peak_used"
JVMUsedHeapMetricKey = "jvm_used_heap"
JVMYoungGCRateMetricKey = "jvm_young_gc_rate"
JVMYoungGCLatencyMetricKey = "jvm_young_gc_latency"
JVMOldGCRateMetricKey = "jvm_old_gc_rate"
JVMOldGCLatencyMetricKey = "jvm_old_gc_latency"
TransportTXRateMetricKey = "transport_tx_rate"
TransportRXRateMetricKey = "transport_rx_rate"
TransportTXBytesMetricKey = "transport_tx_bytes"
TransportRXBytesMetricKey = "transport_rx_bytes"
TransportTCPOutboundMetricKey = "transport_outbound_connections"
TotalIOOperationsMetricKey = "total_io_operations"
TotalReadIOOperationsMetricKey = "total_read_io_operations"
TotalWriteIOOperationsMetricKey = "total_write_io_operations"
ScrollOpenContextsMetricKey = "scroll_open_contexts"
ParentBreakerMetricKey = "parent_breaker"
AccountingBreakerMetricKey = "accounting_breaker"
FielddataBreakerMetricKey = "fielddata_breaker"
RequestBreakerMetricKey = "request_breaker"
InFlightRequestsBreakerMetricKey = "in_flight_requests_breaker"
ModelInferenceBreakerMetricKey = "model_inference_breaker"
)
func (h *APIHandler) getNodeMetrics(ctx context.Context, clusterID string, bucketSize int, min, max int64, nodeName string, top int, metricKey string) (map[string]*common.MetricItem, error){
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
clusterUUID, err := adapter.GetClusterUUID(clusterID)
if err != nil {
@ -115,11 +187,12 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
},
},
}
cpuMetric := newMetricItem("cpu", 1, SystemGroupKey)
nodeMetricItems := []GroupMetricItem{}
switch metricKey {
case NodeProcessCPUMetricKey:
cpuMetric := newMetricItem(NodeProcessCPUMetricKey, 1, SystemGroupKey)
cpuMetric.AddAxi("cpu","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
nodeMetricItems := []GroupMetricItem{
{
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "cpu",
Field: "payload.elasticsearch.node_stats.process.cpu.percent",
ID: util.GetUUID(),
@ -127,10 +200,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
MetricItem: cpuMetric,
FormatType: "ratio",
Units: "%",
},
}
osCpuMetric := newMetricItem("os_cpu", 2, SystemGroupKey)
})
case NodeOSCPUMetricKey:
osCpuMetric := newMetricItem(NodeOSCPUMetricKey, 2, SystemGroupKey)
osCpuMetric.AddAxi("OS CPU Percent","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "os_cpu",
@ -141,8 +213,8 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "ratio",
Units: "%",
})
osMemMetric := newMetricItem("os_used_mem", 2, SystemGroupKey)
case OSUsedMemoryMetricKey:
osMemMetric := newMetricItem(OSUsedMemoryMetricKey, 2, SystemGroupKey)
osMemMetric.AddAxi("OS Mem Used Percent","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "os_used_mem",
@ -153,7 +225,8 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "ratio",
Units: "%",
})
osLoadMetric := newMetricItem("os_load_average_1m", 2, SystemGroupKey)
case OSLoadAverage1mMetricKey:
osLoadMetric := newMetricItem(OSLoadAverage1mMetricKey, 2, SystemGroupKey)
osLoadMetric.AddAxi("OS Load 1m Average","group1",common.PositionLeft,"","0.[0]","0.[0]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "os_load_average_1m",
@ -164,8 +237,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "",
})
case OSUsedSwapMetricKey:
//swap usage
osSwapMetric := newMetricItem("os_used_swap", 3, SystemGroupKey)
osSwapMetric := newMetricItem(OSUsedSwapMetricKey, 3, SystemGroupKey)
osSwapMetric.AddAxi("OS Swap Used Percent","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "os_used_swap",
@ -180,7 +254,8 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "ratio",
Units: "%",
})
openFileMetric := newMetricItem("open_file", 2, SystemGroupKey)
case OpenFileMetricKey:
openFileMetric := newMetricItem(OpenFileMetricKey, 2, SystemGroupKey)
openFileMetric.AddAxi("Open File Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "open_file",
@ -191,7 +266,8 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "",
})
openFilePercentMetric := newMetricItem("open_file_percent", 2, SystemGroupKey)
case OpenFilePercentMetricKey:
openFilePercentMetric := newMetricItem(OpenFilePercentMetricKey, 2, SystemGroupKey)
openFilePercentMetric.AddAxi("Open File Percent","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "open_file_percent",
@ -209,8 +285,8 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "ratio",
Units: "%",
})
diskMetric := newMetricItem("disk", 2, SystemGroupKey)
case TotalDiskMetricKey:
diskMetric := newMetricItem(TotalDiskMetricKey, 2, SystemGroupKey)
diskMetric.AddAxi("disk available percent","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
@ -226,8 +302,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
return util.ToFixed((value2 / value)*100, 2)
},
})
case IndexingRateMetricKey:
// 索引速率
indexMetric:=newMetricItem("indexing_rate", 1, OperationGroupKey)
indexMetric:=newMetricItem(IndexingRateMetricKey, 1, OperationGroupKey)
indexMetric.AddAxi("indexing rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "indexing_rate",
@ -238,8 +315,8 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "doc/s",
})
indexingBytesMetric := newMetricItem("indexing_bytes", 2, OperationGroupKey)
case IndexingBytesMetricKey:
indexingBytesMetric := newMetricItem(IndexingBytesMetricKey, 2, OperationGroupKey)
indexingBytesMetric.AddAxi("Indexing bytes","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
Key: "indexing_bytes",
@ -250,9 +327,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "bytes",
Units: "bytes/s",
})
case IndexingLatencyMetricKey:
// 索引延时
indexLatencyMetric:=newMetricItem("indexing_latency", 1, LatencyGroupKey)
indexLatencyMetric:=newMetricItem(IndexingLatencyMetricKey, 1, LatencyGroupKey)
indexLatencyMetric.AddAxi("indexing latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "indexing_latency",
@ -267,8 +344,8 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "ms",
})
queryMetric:=newMetricItem("query_rate", 2, OperationGroupKey)
case QueryRateMetricKey:
queryMetric:=newMetricItem(QueryRateMetricKey, 2, OperationGroupKey)
queryMetric.AddAxi("query rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "query_rate",
@ -279,9 +356,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "requests/s",
})
case QueryLatencyMetricKey:
// 查询延时
queryLatencyMetric:=newMetricItem("query_latency", 2, LatencyGroupKey)
queryLatencyMetric:=newMetricItem(QueryLatencyMetricKey, 2, LatencyGroupKey)
queryLatencyMetric.AddAxi("query latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "query_latency",
@ -296,8 +373,8 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "ms",
})
fetchMetric:=newMetricItem("fetch_rate", 3, OperationGroupKey)
case FetchRateMetricKey:
fetchMetric:=newMetricItem(FetchRateMetricKey, 3, OperationGroupKey)
fetchMetric.AddAxi("fetch rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "fetch_rate",
@ -308,7 +385,8 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "requests/s",
})
scrollMetric:=newMetricItem("scroll_rate", 4, OperationGroupKey)
case ScrollRateMetricKey:
scrollMetric:=newMetricItem(ScrollRateMetricKey, 4, OperationGroupKey)
scrollMetric.AddAxi("scroll rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "scroll_rate",
@ -319,8 +397,8 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "requests/s",
})
refreshMetric:=newMetricItem("refresh_rate", 5, OperationGroupKey)
case RefreshRateMetricKey:
refreshMetric:=newMetricItem(RefreshRateMetricKey, 5, OperationGroupKey)
refreshMetric.AddAxi("refresh rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "refresh_rate",
@ -331,7 +409,8 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "requests/s",
})
flushMetric:=newMetricItem("flush_rate", 6, OperationGroupKey)
case FlushRateMetricKey:
flushMetric:=newMetricItem(FlushRateMetricKey, 6, OperationGroupKey)
flushMetric.AddAxi("flush rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "flush_rate",
@ -342,7 +421,8 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "requests/s",
})
mergeMetric:=newMetricItem("merges_rate", 7, OperationGroupKey)
case MergesRateMetricKey:
mergeMetric:=newMetricItem(MergesRateMetricKey, 7, OperationGroupKey)
mergeMetric.AddAxi("merges rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "merges_rate",
@ -353,9 +433,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "requests/s",
})
case FetchLatencyMetricKey:
// fetch延时
fetchLatencyMetric:=newMetricItem("fetch_latency", 3, LatencyGroupKey)
fetchLatencyMetric:=newMetricItem(FetchLatencyMetricKey, 3, LatencyGroupKey)
fetchLatencyMetric.AddAxi("fetch latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "fetch_latency",
@ -370,8 +450,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "ms",
})
case ScrollLatencyMetricKey:
// scroll 延时
scrollLatencyMetric:=newMetricItem("scroll_latency", 4, LatencyGroupKey)
scrollLatencyMetric:=newMetricItem(ScrollLatencyMetricKey, 4, LatencyGroupKey)
scrollLatencyMetric.AddAxi("scroll latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "scroll_latency",
@ -386,9 +467,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "ms",
})
case MergeLatencyMetricKey:
// merge 延时
mergeLatencyMetric:=newMetricItem("merge_latency", 7, LatencyGroupKey)
mergeLatencyMetric:=newMetricItem(MergeLatencyMetricKey, 7, LatencyGroupKey)
mergeLatencyMetric.AddAxi("merge latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "merge_latency",
@ -403,9 +484,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "ms",
})
case RefreshLatencyMetricKey:
// refresh 延时
refreshLatencyMetric:=newMetricItem("refresh_latency", 5, LatencyGroupKey)
refreshLatencyMetric:=newMetricItem(RefreshLatencyMetricKey, 5, LatencyGroupKey)
refreshLatencyMetric.AddAxi("refresh latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "refresh_latency",
@ -420,8 +501,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "ms",
})
case FlushLatencyMetricKey:
// flush 时延
flushLatencyMetric:=newMetricItem("flush_latency", 6, LatencyGroupKey)
flushLatencyMetric:=newMetricItem(FlushLatencyMetricKey, 6, LatencyGroupKey)
flushLatencyMetric.AddAxi("flush latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "flush_latency",
@ -436,8 +518,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "ms",
})
case QueryCacheMetricKey:
// Query Cache 内存占用大小
queryCacheMetric:=newMetricItem("query_cache", 1, CacheGroupKey)
queryCacheMetric:=newMetricItem(QueryCacheMetricKey, 1, CacheGroupKey)
queryCacheMetric.AddAxi("query cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "query_cache",
@ -448,8 +531,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "bytes",
Units: "",
})
case RequestCacheMetricKey:
// Request Cache 内存占用大小
requestCacheMetric:=newMetricItem("request_cache", 2, CacheGroupKey)
requestCacheMetric:=newMetricItem(RequestCacheMetricKey, 2, CacheGroupKey)
requestCacheMetric.AddAxi("request cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "request_cache",
@ -460,8 +544,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "bytes",
Units: "",
})
case RequestCacheHitMetricKey:
// Request Cache Hit
requestCacheHitMetric:=newMetricItem("request_cache_hit", 6, CacheGroupKey)
requestCacheHitMetric:=newMetricItem(RequestCacheHitMetricKey, 6, CacheGroupKey)
requestCacheHitMetric.AddAxi("request cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "request_cache_hit",
@ -472,8 +557,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "hits",
})
case RequestCacheMissMetricKey:
// Request Cache Miss
requestCacheMissMetric:=newMetricItem("request_cache_miss", 8, CacheGroupKey)
requestCacheMissMetric:=newMetricItem(RequestCacheMissMetricKey, 8, CacheGroupKey)
requestCacheMissMetric.AddAxi("request cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "request_cache_miss",
@ -484,8 +570,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "misses",
})
case QueryCacheCountMetricKey:
// Query Cache Count
queryCacheCountMetric:=newMetricItem("query_cache_count", 4, CacheGroupKey)
queryCacheCountMetric:=newMetricItem(QueryCacheCountMetricKey, 4, CacheGroupKey)
queryCacheCountMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "query_cache_count",
@ -496,8 +583,8 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "",
})
// Query Cache Miss
queryCacheHitMetric:=newMetricItem("query_cache_hit", 5, CacheGroupKey)
case QueryCacheHitMetricKey:
queryCacheHitMetric:=newMetricItem(QueryCacheHitMetricKey, 5, CacheGroupKey)
queryCacheHitMetric.AddAxi("query cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "query_cache_hit",
@ -508,22 +595,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "hits",
})
//// Query Cache evictions
//queryCacheEvictionsMetric:=newMetricItem("query_cache_evictions", 5, CacheGroupKey)
//queryCacheEvictionsMetric.AddAxi("query cache evictions","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
//nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
// Key: "query_cache_evictions",
// Field: "payload.elasticsearch.node_stats.indices.query_cache.evictions",
// ID: util.GetUUID(),
// IsDerivative: true,
// MetricItem: queryCacheEvictionsMetric,
// FormatType: "num",
// Units: "evictions",
//})
case QueryCacheMissMetricKey:
// Query Cache Miss
queryCacheMissMetric:=newMetricItem("query_cache_miss", 7, CacheGroupKey)
queryCacheMissMetric:=newMetricItem(QueryCacheMissMetricKey, 7, CacheGroupKey)
queryCacheMissMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "query_cache_miss",
@ -534,9 +608,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "misses",
})
case FielddataCacheMetricKey:
// Fielddata内存占用大小
fieldDataCacheMetric:=newMetricItem("fielddata_cache", 3, CacheGroupKey)
fieldDataCacheMetric:=newMetricItem(FielddataCacheMetricKey, 3, CacheGroupKey)
fieldDataCacheMetric.AddAxi("FieldData Cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "fielddata_cache",
@ -547,9 +621,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "bytes",
Units: "",
})
case HttpConnectNumMetricKey:
// http 活跃连接数
httpActiveMetric:=newMetricItem("http_connect_num", 12, HttpGroupKey)
httpActiveMetric:=newMetricItem(HttpConnectNumMetricKey, 12, HttpGroupKey)
httpActiveMetric.AddAxi("http connect number","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "http_connect_num",
@ -560,8 +634,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "conns",
})
case HttpRateMetricKey:
// http 活跃连接数速率
httpRateMetric:=newMetricItem("http_rate", 12, HttpGroupKey)
httpRateMetric:=newMetricItem(HttpRateMetricKey, 12, HttpGroupKey)
httpRateMetric.AddAxi("http rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "http_rate",
@ -572,9 +647,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "conns/s",
})
case SegmentCountMetricKey:
// segment 数量
segmentCountMetric:=newMetricItem("segment_count", 15, StorageGroupKey)
segmentCountMetric:=newMetricItem(SegmentCountMetricKey, 15, StorageGroupKey)
segmentCountMetric.AddAxi("segment count","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "segment_count",
@ -585,9 +660,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "",
})
case SegmentMemoryMetricKey:
// segment memory
segmentMemoryMetric:=newMetricItem("segment_memory", 16, MemoryGroupKey)
segmentMemoryMetric:=newMetricItem(SegmentMemoryMetricKey, 16, MemoryGroupKey)
segmentMemoryMetric.AddAxi("segment memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "segment_memory",
@ -598,8 +673,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "bytes",
Units: "",
})
case SegmentStoredFieldsMemoryMetricKey:
// segment stored fields memory
segmentStoredFieldsMemoryMetric:=newMetricItem("segment_stored_fields_memory", 16, MemoryGroupKey)
segmentStoredFieldsMemoryMetric:=newMetricItem(SegmentStoredFieldsMemoryMetricKey, 16, MemoryGroupKey)
segmentStoredFieldsMemoryMetric.AddAxi("segment stored fields memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "segment_stored_fields_memory",
@ -610,8 +686,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "bytes",
Units: "",
})
case SegmentTermsMemoryMetricKey:
// segment terms fields memory
segmentTermsMemoryMetric:=newMetricItem("segment_terms_memory", 16, MemoryGroupKey)
segmentTermsMemoryMetric:=newMetricItem(SegmentTermsMemoryMetricKey, 16, MemoryGroupKey)
segmentTermsMemoryMetric.AddAxi("segment terms memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "segment_terms_memory",
@ -622,8 +699,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "bytes",
Units: "",
})
case SegmentDocValuesMemoryMetricKey:
// segment doc values memory
segmentDocValuesMemoryMetric:=newMetricItem("segment_doc_values_memory", 16, MemoryGroupKey)
segmentDocValuesMemoryMetric:=newMetricItem(SegmentDocValuesMemoryMetricKey, 16, MemoryGroupKey)
segmentDocValuesMemoryMetric.AddAxi("segment doc values memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "segment_doc_values_memory",
@ -634,8 +712,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "bytes",
Units: "",
})
case SegmentIndexWriterMemoryMetricKey:
// segment index writer memory
segmentIndexWriterMemoryMetric:=newMetricItem("segment_index_writer_memory", 16, MemoryGroupKey)
segmentIndexWriterMemoryMetric:=newMetricItem(SegmentIndexWriterMemoryMetricKey, 16, MemoryGroupKey)
segmentIndexWriterMemoryMetric.AddAxi("segment doc values memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "segment_index_writer_memory",
@ -646,8 +725,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "bytes",
Units: "",
})
case SegmentTermVectorsMemoryMetricKey:
// segment term vectors memory
segmentTermVectorsMemoryMetric:=newMetricItem("segment_term_vectors_memory", 16, MemoryGroupKey)
segmentTermVectorsMemoryMetric:=newMetricItem(SegmentTermVectorsMemoryMetricKey, 16, MemoryGroupKey)
segmentTermVectorsMemoryMetric.AddAxi("segment term vectors memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "segment_term_vectors_memory",
@ -658,9 +738,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "bytes",
Units: "",
})
case DocsCountMetricKey:
// docs 数量
docsCountMetric:=newMetricItem("docs_count", 17, DocumentGroupKey)
docsCountMetric:=newMetricItem(DocsCountMetricKey, 17, DocumentGroupKey)
docsCountMetric.AddAxi("docs count","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "docs_count",
@ -671,8 +751,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "",
})
case DocsDeletedMetricKey:
// docs 删除数量
docsDeletedMetric:=newMetricItem("docs_deleted", 17, DocumentGroupKey)
docsDeletedMetric:=newMetricItem(DocsDeletedMetricKey, 17, DocumentGroupKey)
docsDeletedMetric.AddAxi("docs deleted","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "docs_deleted",
@ -683,9 +764,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "",
})
case IndexStorageMetricKey:
// index store size
indexStoreMetric:=newMetricItem("index_storage", 18, StorageGroupKey)
indexStoreMetric:=newMetricItem(IndexStorageMetricKey, 18, StorageGroupKey)
indexStoreMetric.AddAxi("indices storage","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "index_storage",
@ -696,9 +777,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "bytes",
Units: "",
})
case JVMHeapUsedPercentMetricKey:
// jvm used heap
jvmUsedPercentMetric:=newMetricItem("jvm_heap_used_percent", 1, JVMGroupKey)
jvmUsedPercentMetric:=newMetricItem(JVMHeapUsedPercentMetricKey, 1, JVMGroupKey)
jvmUsedPercentMetric.AddAxi("JVM heap used percent","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "jvm_heap_used_percent",
@ -709,8 +790,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "%",
})
case JVMMemYoungUsedMetricKey:
//JVM mem Young pools used
youngPoolsUsedMetric:=newMetricItem("jvm_mem_young_used", 2, JVMGroupKey)
youngPoolsUsedMetric:=newMetricItem(JVMMemYoungUsedMetricKey, 2, JVMGroupKey)
youngPoolsUsedMetric.AddAxi("Mem Pools Young Used","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "jvm_mem_young_used",
@ -721,8 +803,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "bytes",
Units: "",
})
case JVMMemYoungPeakUsedMetricKey:
//JVM mem Young pools peak used
youngPoolsUsedPeakMetric:=newMetricItem("jvm_mem_young_peak_used", 2, JVMGroupKey)
youngPoolsUsedPeakMetric:=newMetricItem(JVMMemYoungPeakUsedMetricKey, 2, JVMGroupKey)
youngPoolsUsedPeakMetric.AddAxi("Mem Pools Young Peak Used","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "jvm_mem_young_peak_used",
@ -733,9 +816,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "bytes",
Units: "",
})
case JVMMemOldUsedMetricKey:
//JVM mem old pools used
oldPoolsUsedMetric:=newMetricItem("jvm_mem_old_used", 3, JVMGroupKey)
oldPoolsUsedMetric:=newMetricItem(JVMMemOldUsedMetricKey, 3, JVMGroupKey)
oldPoolsUsedMetric.AddAxi("Mem Pools Old Used","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "jvm_mem_old_used",
@ -746,8 +829,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "bytes",
Units: "",
})
case JVMMemOldPeakUsedMetricKey:
//JVM mem old pools peak used
oldPoolsUsedPeakMetric:=newMetricItem("jvm_mem_old_peak_used", 3, JVMGroupKey)
oldPoolsUsedPeakMetric:=newMetricItem(JVMMemOldPeakUsedMetricKey, 3, JVMGroupKey)
oldPoolsUsedPeakMetric.AddAxi("Mem Pools Old Peak Used","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "jvm_mem_old_peak_used",
@ -758,9 +842,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "bytes",
Units: "",
})
case JVMUsedHeapMetricKey:
//JVM used heap
heapUsedMetric:=newMetricItem("jvm_used_heap", 1, JVMGroupKey)
heapUsedMetric:=newMetricItem(JVMUsedHeapMetricKey, 1, JVMGroupKey)
heapUsedMetric.AddAxi("JVM Used Heap","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "jvm_used_heap",
@ -771,8 +855,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "bytes",
Units: "",
})
case JVMYoungGCRateMetricKey:
//JVM Young GC Rate
gcYoungRateMetric:=newMetricItem("jvm_young_gc_rate", 2, JVMGroupKey)
gcYoungRateMetric:=newMetricItem(JVMYoungGCRateMetricKey, 2, JVMGroupKey)
gcYoungRateMetric.AddAxi("JVM Young GC Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "jvm_young_gc_rate",
@ -783,8 +868,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "times/s",
})
case JVMYoungGCLatencyMetricKey:
//JVM Young GC Latency
gcYoungLatencyMetric:=newMetricItem("jvm_young_gc_latency", 2, JVMGroupKey)
gcYoungLatencyMetric:=newMetricItem(JVMYoungGCLatencyMetricKey, 2, JVMGroupKey)
gcYoungLatencyMetric.AddAxi("JVM Young GC Time","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "jvm_young_gc_latency",
@ -795,9 +881,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "ms",
})
case JVMOldGCRateMetricKey:
//JVM old GC Rate
gcOldRateMetric:=newMetricItem("jvm_old_gc_rate", 3, JVMGroupKey)
gcOldRateMetric:=newMetricItem(JVMOldGCRateMetricKey, 3, JVMGroupKey)
gcOldRateMetric.AddAxi("JVM Old GC Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "jvm_old_gc_rate",
@ -808,8 +894,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "times/s",
})
case JVMOldGCLatencyMetricKey:
//JVM old GC Latency
gcOldLatencyMetric:=newMetricItem("jvm_old_gc_latency", 3, JVMGroupKey)
gcOldLatencyMetric:=newMetricItem(JVMOldGCLatencyMetricKey, 3, JVMGroupKey)
gcOldLatencyMetric.AddAxi("JVM Old GC Time","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "jvm_old_gc_latency",
@ -820,8 +907,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "ms",
})
case TransportTXRateMetricKey:
//Transport 发送速率
transTxRateMetric:=newMetricItem("transport_tx_rate", 19, TransportGroupKey)
transTxRateMetric:=newMetricItem(TransportTXRateMetricKey, 19, TransportGroupKey)
transTxRateMetric.AddAxi("Transport Send Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "transport_tx_rate",
@ -832,8 +920,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "times/s",
})
case TransportRXRateMetricKey:
//Transport 接收速率
transRxRateMetric:=newMetricItem("transport_rx_rate", 19, TransportGroupKey)
transRxRateMetric:=newMetricItem(TransportRXRateMetricKey, 19, TransportGroupKey)
transRxRateMetric.AddAxi("Transport Receive Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "transport_rx_rate",
@ -844,9 +933,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "times/s",
})
case TransportTXBytesMetricKey:
//Transport 发送流量
transTxBytesMetric:=newMetricItem("transport_tx_bytes", 19, TransportGroupKey)
transTxBytesMetric:=newMetricItem(TransportTXBytesMetricKey, 19, TransportGroupKey)
transTxBytesMetric.AddAxi("Transport Send Bytes","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "transport_tx_bytes",
@ -857,8 +946,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "bytes",
Units: "s",
})
case TransportRXBytesMetricKey:
//Transport 接收流量
transRxBytesMetric:=newMetricItem("transport_rx_bytes", 19, TransportGroupKey)
transRxBytesMetric:=newMetricItem(TransportRXBytesMetricKey, 19, TransportGroupKey)
transRxBytesMetric.AddAxi("Transport Receive Bytes","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "transport_rx_bytes",
@ -869,9 +959,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "bytes",
Units: "s",
})
case TransportTCPOutboundMetricKey:
//Transport tcp 连接数
tcpNumMetric:=newMetricItem("transport_outbound_connections", 20, TransportGroupKey)
tcpNumMetric:=newMetricItem(TransportTCPOutboundMetricKey, 20, TransportGroupKey)
tcpNumMetric.AddAxi("Transport Outbound Connections","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "transport_outbound_connections",
@ -882,9 +972,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "",
})
case TotalIOOperationsMetricKey:
//IO total
totalOperationsMetric:=newMetricItem("total_io_operations", 1, IOGroupKey)
totalOperationsMetric:=newMetricItem(TotalIOOperationsMetricKey, 1, IOGroupKey)
totalOperationsMetric.AddAxi("Total I/O Operations Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "total_io_operations",
@ -895,9 +985,8 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "times/s",
})
//IO total
readOperationsMetric:=newMetricItem("total_read_io_operations", 2, IOGroupKey)
case TotalReadIOOperationsMetricKey:
readOperationsMetric:=newMetricItem(TotalReadIOOperationsMetricKey, 2, IOGroupKey)
readOperationsMetric.AddAxi("Total Read I/O Operations Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "total_read_io_operations",
@ -908,9 +997,8 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "times/s",
})
//IO total
writeOperationsMetric:=newMetricItem("total_write_io_operations", 3, IOGroupKey)
case TotalWriteIOOperationsMetricKey:
writeOperationsMetric:=newMetricItem(TotalWriteIOOperationsMetricKey, 3, IOGroupKey)
writeOperationsMetric.AddAxi("Total Write I/O Operations Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "total_write_io_operations",
@ -921,9 +1009,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "times/s",
})
case ScrollOpenContextsMetricKey:
//scroll context
openContextMetric:=newMetricItem("scroll_open_contexts", 7, OperationGroupKey)
openContextMetric:=newMetricItem(ScrollOpenContextsMetricKey, 7, OperationGroupKey)
openContextMetric.AddAxi("Scroll Open Contexts","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
Key: "scroll_open_contexts",
@ -933,9 +1021,9 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "",
})
case ParentBreakerMetricKey:
// Circuit Breaker
parentBreakerMetric := newMetricItem("parent_breaker", 1, CircuitBreakerGroupKey)
parentBreakerMetric := newMetricItem(ParentBreakerMetricKey, 1, CircuitBreakerGroupKey)
parentBreakerMetric.AddAxi("Parent Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
Key: "parent_breaker",
@ -946,7 +1034,8 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "times/s",
})
accountingBreakerMetric := newMetricItem("accounting_breaker", 2, CircuitBreakerGroupKey)
case AccountingBreakerMetricKey:
accountingBreakerMetric := newMetricItem(AccountingBreakerMetricKey, 2, CircuitBreakerGroupKey)
accountingBreakerMetric.AddAxi("Accounting Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
Key: "accounting_breaker",
@ -957,7 +1046,8 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "times/s",
})
fielddataBreakerMetric := newMetricItem("fielddata_breaker", 3, CircuitBreakerGroupKey)
case FielddataBreakerMetricKey:
fielddataBreakerMetric := newMetricItem(FielddataBreakerMetricKey, 3, CircuitBreakerGroupKey)
fielddataBreakerMetric.AddAxi("Fielddata Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
Key: "fielddata_breaker",
@ -968,7 +1058,8 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "times/s",
})
requestBreakerMetric := newMetricItem("request_breaker", 4, CircuitBreakerGroupKey)
case RequestBreakerMetricKey:
requestBreakerMetric := newMetricItem(RequestBreakerMetricKey, 4, CircuitBreakerGroupKey)
requestBreakerMetric.AddAxi("Request Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
Key: "request_breaker",
@ -979,7 +1070,8 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "times/s",
})
inFlightRequestBreakerMetric := newMetricItem("in_flight_requests_breaker", 5, CircuitBreakerGroupKey)
case InFlightRequestsBreakerMetricKey:
inFlightRequestBreakerMetric := newMetricItem(InFlightRequestsBreakerMetricKey, 5, CircuitBreakerGroupKey)
inFlightRequestBreakerMetric.AddAxi("In Flight Requests Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
Key: "in_flight_requests_breaker",
@ -990,7 +1082,8 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "times/s",
})
modelInferenceBreakerMetric := newMetricItem("model_inference_breaker", 6, CircuitBreakerGroupKey)
case ModelInferenceBreakerMetricKey:
modelInferenceBreakerMetric := newMetricItem(ModelInferenceBreakerMetricKey, 6, CircuitBreakerGroupKey)
modelInferenceBreakerMetric.AddAxi("Model Inference Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
Key: "model_inference_breaker",
@ -1001,6 +1094,7 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
FormatType: "num",
Units: "times/s",
})
}
aggs := generateGroupAggs(nodeMetricItems)
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
@ -1027,7 +1121,7 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
},
},
}
return h.getMetrics(query, nodeMetricItems, bucketSize), nil
return h.getMetrics(ctx, query, nodeMetricItems, bucketSize), nil
}

View File

@ -28,6 +28,7 @@
package api
import (
"context"
"fmt"
log "github.com/cihub/seelog"
httprouter "infini.sh/framework/core/api/router"
@ -410,7 +411,7 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
},
},
}
metrics := h.getMetrics(query, nodeMetricItems, bucketSize)
metrics := h.getMetrics(context.Background(), query, nodeMetricItems, bucketSize)
indexMetrics := map[string]util.MapStr{}
for key, item := range metrics {
for _, line := range item.Lines {
@ -562,6 +563,12 @@ func (h *APIHandler) GetNodeInfo(w http.ResponseWriter, req *http.Request, ps ht
h.WriteJSON(w, kvs, http.StatusOK)
}
const (
NodeCPUJVMMetricKey = "jvm"
NodeHealthMetricKey = "node_health"
ShardStateMetricKey = "shard_state"
)
func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
clusterID := ps.MustGetParameter("id")
clusterUUID, err := adapter.GetClusterUUID(clusterID)
@ -628,69 +635,26 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
metricItems:=[]*common.MetricItem{}
metricItem:=newMetricItem("cpu", 1, SystemGroupKey)
metricItem.AddAxi("cpu","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
metricItem.AddLine("Process CPU","Process CPU","process cpu used percent of node.","group1","payload.elasticsearch.node_stats.process.cpu.percent","max",bucketSizeStr,"%","num","0,0.[00]","0,0.[00]",false,false)
metricItem.AddLine("OS CPU","OS CPU","process cpu used percent of node.","group1","payload.elasticsearch.node_stats.os.cpu.percent","max",bucketSizeStr,"%","num","0,0.[00]","0,0.[00]",false,false)
metricItems=append(metricItems,metricItem)
metricItem =newMetricItem("jvm", 2, SystemGroupKey)
metricItem.AddAxi("JVM Heap","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
metricItem.AddLine("Max Heap","Max Heap","JVM max Heap of node.","group1","payload.elasticsearch.node_stats.jvm.mem.heap_max_in_bytes","max",bucketSizeStr,"","bytes","0,0.[00]","0,0.[00]",false,false)
metricItem.AddLine("Used Heap","Used Heap","JVM used Heap of node.","group1","payload.elasticsearch.node_stats.jvm.mem.heap_used_in_bytes","max",bucketSizeStr,"","bytes","0,0.[00]","0,0.[00]",false,false)
metricItems=append(metricItems,metricItem)
metricItem=newMetricItem("index_throughput", 3, OperationGroupKey)
metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
metricItem.AddLine("Indexing Rate","Total Shards","Number of documents being indexed for node.","group1","payload.elasticsearch.node_stats.indices.indexing.index_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true)
metricItems=append(metricItems,metricItem)
metricItem=newMetricItem("search_throughput", 4, OperationGroupKey)
metricItem.AddAxi("searching","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,false)
metricItem.AddLine("Search Rate","Total Shards",
"Number of search requests being executed.",
"group1","payload.elasticsearch.node_stats.indices.search.query_total","max",bucketSizeStr,"query/s","num","0,0.[00]","0,0.[00]",false,true)
metricItems=append(metricItems,metricItem)
metricItem=newMetricItem("index_latency", 5, LatencyGroupKey)
metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
metricItem.AddLine("Indexing","Indexing Latency","Average latency for indexing documents.","group1","payload.elasticsearch.node_stats.indices.indexing.index_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.index_total"
metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
return value/value2
}
metricItem.AddLine("Indexing","Delete Latency","Average latency for delete documents.","group1","payload.elasticsearch.node_stats.indices.indexing.delete_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.delete_total"
metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
return value/value2
}
metricItems=append(metricItems,metricItem)
metricItem=newMetricItem("search_latency", 6, LatencyGroupKey)
metricItem.AddAxi("searching","group2",common.PositionLeft,"num","0,0","0,0.[00]",5,false)
metricItem.AddLine("Searching","Query Latency","Average latency for searching query.","group2","payload.elasticsearch.node_stats.indices.search.query_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.query_total"
metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
return value/value2
}
metricItem.AddLine("Searching","Fetch Latency","Average latency for searching fetch.","group2","payload.elasticsearch.node_stats.indices.search.fetch_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.fetch_total"
metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
return value/value2
}
metricItem.AddLine("Searching","Scroll Latency","Average latency for searching fetch.","group2","payload.elasticsearch.node_stats.indices.search.scroll_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
metricItem.Lines[2].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.scroll_total"
metricItem.Lines[2].Metric.Calc = func(value, value2 float64) float64 {
return value/value2
}
metricItems=append(metricItems,metricItem)
metricItem =newMetricItem("parent_breaker", 8, SystemGroupKey)
metricItem.AddLine("Parent Breaker Tripped","Parent Breaker Tripped","Rate of the circuit breaker has been triggered and prevented an out of memory error.","group1","payload.elasticsearch.node_stats.breakers.parent.tripped","max",bucketSizeStr,"times/s","num","0,0.[00]","0,0.[00]",false,true)
metricItems=append(metricItems,metricItem)
metrics := h.getSingleMetrics(metricItems,query, bucketSize)
healthMetric, err := getNodeHealthMetric(query, bucketSize)
metricKey := h.GetParameter(req, "key")
timeout := h.GetParameterOrDefault(req, "timeout", "60s")
du, err := time.ParseDuration(timeout)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
ctx, cancel := context.WithTimeout(context.Background(), du)
defer cancel()
metrics := map[string]*common.MetricItem{}
if metricKey == NodeHealthMetricKey {
healthMetric, err := getNodeHealthMetric(ctx, query, bucketSize)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
metrics["node_health"] = healthMetric
}else if metricKey == ShardStateMetricKey {
query = util.MapStr{
"size": 0,
"query": util.MapStr{
@ -738,17 +702,88 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque
},
},
}
shardStateMetric, err := getNodeShardStateMetric(query, bucketSize)
shardStateMetric, err := getNodeShardStateMetric(ctx, query, bucketSize)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
metrics["node_health"] = healthMetric
metrics["shard_state"] = shardStateMetric
}else{
switch metricKey {
case NodeProcessCPUMetricKey:
metricItem:=newMetricItem("cpu", 1, SystemGroupKey)
metricItem.AddAxi("cpu","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
metricItem.AddLine("Process CPU","Process CPU","process cpu used percent of node.","group1","payload.elasticsearch.node_stats.process.cpu.percent","max",bucketSizeStr,"%","num","0,0.[00]","0,0.[00]",false,false)
metricItem.AddLine("OS CPU","OS CPU","process cpu used percent of node.","group1","payload.elasticsearch.node_stats.os.cpu.percent","max",bucketSizeStr,"%","num","0,0.[00]","0,0.[00]",false,false)
metricItems=append(metricItems,metricItem)
case NodeCPUJVMMetricKey:
metricItem := newMetricItem("jvm", 2, SystemGroupKey)
metricItem.AddAxi("JVM Heap","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
metricItem.AddLine("Max Heap","Max Heap","JVM max Heap of node.","group1","payload.elasticsearch.node_stats.jvm.mem.heap_max_in_bytes","max",bucketSizeStr,"","bytes","0,0.[00]","0,0.[00]",false,false)
metricItem.AddLine("Used Heap","Used Heap","JVM used Heap of node.","group1","payload.elasticsearch.node_stats.jvm.mem.heap_used_in_bytes","max",bucketSizeStr,"","bytes","0,0.[00]","0,0.[00]",false,false)
metricItems=append(metricItems,metricItem)
case IndexThroughputMetricKey:
metricItem := newMetricItem("index_throughput", 3, OperationGroupKey)
metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
metricItem.AddLine("Indexing Rate","Total Shards","Number of documents being indexed for node.","group1","payload.elasticsearch.node_stats.indices.indexing.index_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true)
metricItems=append(metricItems,metricItem)
case SearchThroughputMetricKey:
metricItem := newMetricItem("search_throughput", 4, OperationGroupKey)
metricItem.AddAxi("searching","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,false)
metricItem.AddLine("Search Rate","Total Shards",
"Number of search requests being executed.",
"group1","payload.elasticsearch.node_stats.indices.search.query_total","max",bucketSizeStr,"query/s","num","0,0.[00]","0,0.[00]",false,true)
metricItems=append(metricItems,metricItem)
case IndexLatencyMetricKey:
metricItem := newMetricItem("index_latency", 5, LatencyGroupKey)
metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
metricItem.AddLine("Indexing","Indexing Latency","Average latency for indexing documents.","group1","payload.elasticsearch.node_stats.indices.indexing.index_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.index_total"
metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
return value/value2
}
metricItem.AddLine("Indexing","Delete Latency","Average latency for delete documents.","group1","payload.elasticsearch.node_stats.indices.indexing.delete_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.delete_total"
metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
return value/value2
}
metricItems=append(metricItems,metricItem)
case SearchLatencyMetricKey:
metricItem := newMetricItem("search_latency", 6, LatencyGroupKey)
metricItem.AddAxi("searching","group2",common.PositionLeft,"num","0,0","0,0.[00]",5,false)
metricItem.AddLine("Searching","Query Latency","Average latency for searching query.","group2","payload.elasticsearch.node_stats.indices.search.query_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.query_total"
metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
return value/value2
}
metricItem.AddLine("Searching","Fetch Latency","Average latency for searching fetch.","group2","payload.elasticsearch.node_stats.indices.search.fetch_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.fetch_total"
metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
return value/value2
}
metricItem.AddLine("Searching","Scroll Latency","Average latency for searching fetch.","group2","payload.elasticsearch.node_stats.indices.search.scroll_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
metricItem.Lines[2].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.scroll_total"
metricItem.Lines[2].Metric.Calc = func(value, value2 float64) float64 {
return value/value2
}
metricItems=append(metricItems,metricItem)
case ParentBreakerMetricKey:
metricItem := newMetricItem("parent_breaker", 8, SystemGroupKey)
metricItem.AddLine("Parent Breaker Tripped","Parent Breaker Tripped","Rate of the circuit breaker has been triggered and prevented an out of memory error.","group1","payload.elasticsearch.node_stats.breakers.parent.tripped","max",bucketSizeStr,"times/s","num","0,0.[00]","0,0.[00]",false,true)
metricItems=append(metricItems,metricItem)
}
metrics = h.getSingleMetrics(ctx, metricItems,query, bucketSize)
}
resBody["metrics"] = metrics
h.WriteJSON(w, resBody, http.StatusOK)
}
func getNodeShardStateMetric(query util.MapStr, bucketSize int)(*common.MetricItem, error){
func getNodeShardStateMetric(ctx context.Context, query util.MapStr, bucketSize int)(*common.MetricItem, error){
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
if err != nil {
@ -771,7 +806,8 @@ func getNodeShardStateMetric(query util.MapStr, bucketSize int)(*common.MetricIt
},
},
}
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
queryDSL := util.MustToJSONBytes(query)
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
if err != nil {
log.Error(err)
return nil, err
@ -789,10 +825,11 @@ func getNodeShardStateMetric(query util.MapStr, bucketSize int)(*common.MetricIt
}
metricItem.Lines[0].Data = metricData
metricItem.Lines[0].Type = common.GraphTypeBar
metricItem.Request = string(queryDSL)
return metricItem, nil
}
func getNodeHealthMetric(query util.MapStr, bucketSize int)(*common.MetricItem, error){
func getNodeHealthMetric(ctx context.Context, query util.MapStr, bucketSize int)(*common.MetricItem, error){
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
if err != nil {
@ -813,7 +850,8 @@ func getNodeHealthMetric(query util.MapStr, bucketSize int)(*common.MetricItem,
},
},
}
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
queryDSL := util.MustToJSONBytes(query)
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
if err != nil {
log.Error(err)
return nil, err
@ -844,6 +882,7 @@ func getNodeHealthMetric(query util.MapStr, bucketSize int)(*common.MetricItem,
}
}
}
metricItem.Request = string(queryDSL)
metricItem.Lines[0].Data = metricData
metricItem.Lines[0].Type = common.GraphTypeBar
return metricItem, nil

View File

@ -24,6 +24,7 @@
package api
import (
"context"
"fmt"
log "github.com/cihub/seelog"
"infini.sh/framework/core/elastic"
@ -45,7 +46,42 @@ const (
ThreadPoolBulkGroupKey = "thread_pool_bulk"
)
func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min, max int64, nodeName string, top int) (map[string]*common.MetricItem, error){
const (
SearchThreadsMetricKey = "search_threads"
IndexThreadsMetricKey = "index_threads"
BulkThreadsMetricKey = "bulk_threads"
FlushThreadsMetricKey = "flush_threads"
RefreshThreadsMetricKey = "refresh_threads"
WriteThreadsMetricKey = "write_threads"
ForceMergeThreadsMetricKey = "force_merge_threads"
SearchQueueMetricKey = "search_queue"
IndexQueueMetricKey = "index_queue"
BulkQueueMetricKey = "bulk_queue"
FlushQueueMetricKey = "flush_queue"
RefreshQueueMetricKey = "refresh_queue"
WriteQueueMetricKey = "write_queue"
SearchActiveMetricKey = "search_active"
IndexActiveMetricKey = "index_active"
BulkActiveMetricKey = "bulk_active"
FlushActiveMetricKey = "flush_active"
WriteActiveMetricKey = "write_active"
ForceMergeActiveMetricKey = "force_merge_active"
SearchRejectedMetricKey = "search_rejected"
IndexRejectedMetricKey = "index_rejected"
BulkRejectedMetricKey = "bulk_rejected"
FlushRejectedMetricKey = "flush_rejected"
WriteRejectedMetricKey = "write_rejected"
ForceMergeRejectedMetricKey = "force_merge_rejected"
GetThreadsMetricKey = "get_threads"
GetQueueMetricKey = "get_queue"
GetActiveMetricKey = "get_active"
GetRejectedMetricKey = "get_rejected"
RefreshActiveMetricKey = "refresh_active"
RefreshRejectedMetricKey = "refresh_rejected"
ForceMergeQueueMetricKey = "force_merge_queue"
)
func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string, bucketSize int, min, max int64, nodeName string, top int, metricKey string) (map[string]*common.MetricItem, error){
clusterUUID, err := adapter.GetClusterUUID(clusterID)
if err != nil {
return nil, err
@ -123,11 +159,12 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
},
},
}
searchThreadsMetric := newMetricItem("search_threads", 1, ThreadPoolSearchGroupKey)
queueMetricItems := []GroupMetricItem{}
switch metricKey {
case SearchThreadsMetricKey:
searchThreadsMetric := newMetricItem(SearchThreadsMetricKey, 1, ThreadPoolSearchGroupKey)
searchThreadsMetric.AddAxi("Search Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems := []GroupMetricItem{
{
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "search_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.search.threads",
ID: util.GetUUID(),
@ -135,9 +172,9 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
MetricItem: searchThreadsMetric,
FormatType: "num",
Units: "",
},
}
searchQueueMetric := newMetricItem("search_queue", 1, ThreadPoolSearchGroupKey)
})
case SearchQueueMetricKey:
searchQueueMetric := newMetricItem(SearchQueueMetricKey, 1, ThreadPoolSearchGroupKey)
searchQueueMetric.AddAxi("Search Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -149,7 +186,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
searchActiveMetric := newMetricItem("search_active", 1, ThreadPoolSearchGroupKey)
case SearchActiveMetricKey:
searchActiveMetric := newMetricItem(SearchActiveMetricKey, 1, ThreadPoolSearchGroupKey)
searchActiveMetric.AddAxi("Search Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -161,7 +199,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
searchRejectedMetric := newMetricItem("search_rejected", 1, ThreadPoolSearchGroupKey)
case SearchRejectedMetricKey:
searchRejectedMetric := newMetricItem(SearchRejectedMetricKey, 1, ThreadPoolSearchGroupKey)
searchRejectedMetric.AddAxi("Search Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -173,8 +212,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "rejected/s",
})
getThreadsMetric := newMetricItem("get_threads", 1, ThreadPoolGetGroupKey)
case GetThreadsMetricKey:
getThreadsMetric := newMetricItem(GetThreadsMetricKey, 1, ThreadPoolGetGroupKey)
getThreadsMetric.AddAxi("Get Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -186,7 +225,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
getQueueMetric := newMetricItem("get_queue", 1, ThreadPoolGetGroupKey)
case GetQueueMetricKey:
getQueueMetric := newMetricItem(GetQueueMetricKey, 1, ThreadPoolGetGroupKey)
getQueueMetric.AddAxi("Get Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -198,7 +238,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
getActiveMetric := newMetricItem("get_active", 1, ThreadPoolGetGroupKey)
case GetActiveMetricKey:
getActiveMetric := newMetricItem(GetActiveMetricKey, 1, ThreadPoolGetGroupKey)
getActiveMetric.AddAxi("Get Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -210,7 +251,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
getRejectedMetric := newMetricItem("get_rejected", 1, ThreadPoolGetGroupKey)
case GetRejectedMetricKey:
getRejectedMetric := newMetricItem(GetRejectedMetricKey, 1, ThreadPoolGetGroupKey)
getRejectedMetric.AddAxi("Get Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -222,8 +264,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "rejected/s",
})
flushThreadsMetric := newMetricItem("flush_threads", 1, ThreadPoolFlushGroupKey)
case FlushThreadsMetricKey:
flushThreadsMetric := newMetricItem(FlushThreadsMetricKey, 1, ThreadPoolFlushGroupKey)
flushThreadsMetric.AddAxi("Flush Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -235,7 +277,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
flushQueueMetric := newMetricItem("flush_queue", 1, ThreadPoolFlushGroupKey)
case FlushQueueMetricKey:
flushQueueMetric := newMetricItem(FlushQueueMetricKey, 1, ThreadPoolFlushGroupKey)
flushQueueMetric.AddAxi("Get Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -247,7 +290,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
flushActiveMetric := newMetricItem("flush_active", 1, ThreadPoolFlushGroupKey)
case FlushActiveMetricKey:
flushActiveMetric := newMetricItem(FlushActiveMetricKey, 1, ThreadPoolFlushGroupKey)
flushActiveMetric.AddAxi("Flush Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -259,7 +303,9 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
flushRejectedMetric := newMetricItem("flush_rejected", 1, ThreadPoolFlushGroupKey)
case FlushRejectedMetricKey:
flushRejectedMetric := newMetricItem(FlushRejectedMetricKey, 1, ThreadPoolFlushGroupKey)
flushRejectedMetric.AddAxi("Flush Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -271,12 +317,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "rejected/s",
})
majorVersion := elastic.GetMetadata(clusterID).GetMajorVersion()
ver := elastic.GetClient(clusterID).GetVersion()
if (ver.Distribution == "" || ver.Distribution == elastic.Elasticsearch) && majorVersion < 6{
indexThreadsMetric := newMetricItem("index_threads", 1, ThreadPoolIndexGroupKey)
case IndexThreadsMetricKey:
indexThreadsMetric := newMetricItem(IndexThreadsMetricKey, 1, ThreadPoolIndexGroupKey)
indexThreadsMetric.AddAxi("Index Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -288,7 +330,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
indexQueueMetric := newMetricItem("index_queue", 1, ThreadPoolIndexGroupKey)
case IndexQueueMetricKey:
indexQueueMetric := newMetricItem(IndexQueueMetricKey, 1, ThreadPoolIndexGroupKey)
indexQueueMetric.AddAxi("Index Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -300,7 +343,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
indexActiveMetric := newMetricItem("index_active", 1, ThreadPoolIndexGroupKey)
case IndexActiveMetricKey:
indexActiveMetric := newMetricItem(IndexActiveMetricKey, 1, ThreadPoolIndexGroupKey)
indexActiveMetric.AddAxi("Index Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -312,7 +356,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
indexRejectedMetric := newMetricItem("index_rejected", 1, ThreadPoolIndexGroupKey)
case IndexRejectedMetricKey:
indexRejectedMetric := newMetricItem(IndexRejectedMetricKey, 1, ThreadPoolIndexGroupKey)
indexRejectedMetric.AddAxi("Index Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -324,8 +369,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "rejected/s",
})
bulkThreadsMetric := newMetricItem("bulk_threads", 1, ThreadPoolBulkGroupKey)
case BulkThreadsMetricKey:
bulkThreadsMetric := newMetricItem(BulkThreadsMetricKey, 1, ThreadPoolBulkGroupKey)
bulkThreadsMetric.AddAxi("Bulk Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -337,7 +382,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
bulkQueueMetric := newMetricItem("bulk_queue", 1, ThreadPoolBulkGroupKey)
case BulkQueueMetricKey:
bulkQueueMetric := newMetricItem(BulkQueueMetricKey, 1, ThreadPoolBulkGroupKey)
bulkQueueMetric.AddAxi("Bulk Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -349,7 +395,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
bulkActiveMetric := newMetricItem("bulk_active", 1, ThreadPoolBulkGroupKey)
case BulkActiveMetricKey:
bulkActiveMetric := newMetricItem(BulkActiveMetricKey, 1, ThreadPoolBulkGroupKey)
bulkActiveMetric.AddAxi("Bulk Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -361,7 +408,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
bulkRejectedMetric := newMetricItem("bulk_rejected", 1, ThreadPoolBulkGroupKey)
case BulkRejectedMetricKey:
bulkRejectedMetric := newMetricItem(BulkRejectedMetricKey, 1, ThreadPoolBulkGroupKey)
bulkRejectedMetric.AddAxi("Bulk Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -373,8 +421,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "rejected/s",
})
}else {
writeThreadsMetric := newMetricItem("write_threads", 1, ThreadPoolWriteGroupKey)
case WriteThreadsMetricKey:
writeThreadsMetric := newMetricItem(WriteThreadsMetricKey, 1, ThreadPoolWriteGroupKey)
writeThreadsMetric.AddAxi("Write Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -386,7 +434,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
writeQueueMetric := newMetricItem("write_queue", 1, ThreadPoolWriteGroupKey)
case WriteQueueMetricKey:
writeQueueMetric := newMetricItem(WriteQueueMetricKey, 1, ThreadPoolWriteGroupKey)
writeQueueMetric.AddAxi("Write Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -398,7 +447,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
writeActiveMetric := newMetricItem("write_active", 1, ThreadPoolWriteGroupKey)
case WriteActiveMetricKey:
writeActiveMetric := newMetricItem(WriteActiveMetricKey, 1, ThreadPoolWriteGroupKey)
writeActiveMetric.AddAxi("Write Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -410,7 +460,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
writeRejectedMetric := newMetricItem("write_rejected", 1, ThreadPoolWriteGroupKey)
case WriteRejectedMetricKey:
writeRejectedMetric := newMetricItem(WriteRejectedMetricKey, 1, ThreadPoolWriteGroupKey)
writeRejectedMetric.AddAxi("Write Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -422,8 +473,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "rejected/s",
})
}
refreshThreadsMetric := newMetricItem("refresh_threads", 1, ThreadPoolRefreshGroupKey)
case RefreshThreadsMetricKey:
refreshThreadsMetric := newMetricItem(RefreshThreadsMetricKey, 1, ThreadPoolRefreshGroupKey)
refreshThreadsMetric.AddAxi("Refresh Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -435,7 +486,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
refreshQueueMetric := newMetricItem("refresh_queue", 1, ThreadPoolRefreshGroupKey)
case RefreshQueueMetricKey:
refreshQueueMetric := newMetricItem(RefreshQueueMetricKey, 1, ThreadPoolRefreshGroupKey)
refreshQueueMetric.AddAxi("Refresh Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -447,7 +499,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
refreshActiveMetric := newMetricItem("refresh_active", 1, ThreadPoolRefreshGroupKey)
case RefreshActiveMetricKey:
refreshActiveMetric := newMetricItem(RefreshActiveMetricKey, 1, ThreadPoolRefreshGroupKey)
refreshActiveMetric.AddAxi("Refresh Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -459,7 +512,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
refreshRejectedMetric := newMetricItem("refresh_rejected", 1, ThreadPoolRefreshGroupKey)
case RefreshRejectedMetricKey:
refreshRejectedMetric := newMetricItem(RefreshRejectedMetricKey, 1, ThreadPoolRefreshGroupKey)
refreshRejectedMetric.AddAxi("Refresh Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -471,7 +525,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "rejected/s",
})
forceMergeThreadsMetric := newMetricItem("force_merge_threads", 1, ThreadPoolForceMergeGroupKey)
case ForceMergeThreadsMetricKey:
forceMergeThreadsMetric := newMetricItem(ForceMergeThreadsMetricKey, 1, ThreadPoolForceMergeGroupKey)
forceMergeThreadsMetric.AddAxi("Force Merge Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -483,7 +538,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
forceMergeQueueMetric := newMetricItem("force_merge_queue", 1, ThreadPoolForceMergeGroupKey)
case ForceMergeQueueMetricKey:
forceMergeQueueMetric := newMetricItem(ForceMergeQueueMetricKey, 1, ThreadPoolForceMergeGroupKey)
forceMergeQueueMetric.AddAxi("Force Merge Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -495,7 +551,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
forceMergeActiveMetric := newMetricItem("force_merge_active", 1, ThreadPoolForceMergeGroupKey)
case ForceMergeActiveMetricKey:
forceMergeActiveMetric := newMetricItem(ForceMergeActiveMetricKey, 1, ThreadPoolForceMergeGroupKey)
forceMergeActiveMetric.AddAxi("Force Merge Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -507,7 +564,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
forceMergeRejectedMetric := newMetricItem("force_merge_rejected", 1, ThreadPoolForceMergeGroupKey)
case ForceMergeRejectedMetricKey:
forceMergeRejectedMetric := newMetricItem(ForceMergeRejectedMetricKey, 1, ThreadPoolForceMergeGroupKey)
forceMergeRejectedMetric.AddAxi("Force Merge Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@ -519,6 +577,9 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "rejected/s",
})
}
//Get Thread Pool queue
aggs:=map[string]interface{}{}
@ -575,5 +636,5 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
},
},
}
return h.getMetrics(query, queueMetricItems, bucketSize), nil
return h.getMetrics(ctx, query, queueMetricItems, bucketSize), nil
}

View File

@ -24,6 +24,7 @@
package v1
import (
"context"
"fmt"
log "github.com/cihub/seelog"
"infini.sh/framework/core/elastic"
@ -37,7 +38,44 @@ import (
"time"
)
func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucketSize int, min, max int64, indexName string, top int) map[string]*common.MetricItem{
const (
IndexStorageMetricKey = "index_storage"
SegmentCountMetricKey = "segment_count"
DocCountMetricKey = "doc_count"
DocsDeletedMetricKey = "docs_deleted"
QueryTimesMetricKey = "query_times"
FetchTimesMetricKey = "fetch_times"
ScrollTimesMetricKey = "scroll_times"
MergeTimesMetricKey = "merge_times"
RefreshTimesMetricKey = "refresh_times"
FlushTimesMetricKey = "flush_times"
IndexingRateMetricKey = "indexing_rate"
IndexingBytesMetricKey = "indexing_bytes"
IndexingLatencyMetricKey = "indexing_latency"
QueryLatencyMetricKey = "query_latency"
FetchLatencyMetricKey = "fetch_latency"
MergeLatencyMetricKey = "merge_latency"
RefreshLatencyMetricKey = "refresh_latency"
ScrollLatencyMetricKey = "scroll_latency"
FlushLatencyMetricKey = "flush_latency"
QueryCacheMetricKey = "query_cache"
RequestCacheMetricKey = "request_cache"
RequestCacheHitMetricKey = "request_cache_hit"
RequestCacheMissMetricKey = "request_cache_miss"
QueryCacheCountMetricKey = "query_cache_count"
QueryCacheHitMetricKey = "query_cache_hit"
QueryCacheMissMetricKey = "query_cache_miss"
FielddataCacheMetricKey = "fielddata_cache"
SegmentMemoryMetricKey = "segment_memory"
SegmentDocValuesMemoryMetricKey = "segment_doc_values_memory"
SegmentTermsMemoryMetricKey = "segment_terms_memory"
SegmentFieldsMemoryMetricKey = "segment_fields_memory"
SegmentIndexWriterMemoryMetricKey = "segment_index_writer_memory"
SegmentTermVectorsMemoryMetricKey = "segment_term_vectors_memory"
DocPercentMetricKey = "doc_percent"
)
func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clusterID string, bucketSize int, min, max int64, indexName string, top int, metricKey string) map[string]*common.MetricItem{
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
var must = []util.MapStr{
@ -128,12 +166,13 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
},
},
}
indexMetricItems := []GroupMetricItem{}
switch metricKey {
case IndexStorageMetricKey:
//索引存储大小
indexStorageMetric := newMetricItem("index_storage", 1, StorageGroupKey)
indexStorageMetric.AddAxi("Index storage","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems := []GroupMetricItem{
{
indexStorageMetric := newMetricItem(IndexStorageMetricKey, 1, StorageGroupKey)
indexStorageMetric.AddAxi("Index storage", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "index_storage",
Field: "payload.elasticsearch.index_stats.total.store.size_in_bytes",
ID: util.GetUUID(),
@ -141,12 +180,12 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
MetricItem: indexStorageMetric,
FormatType: "bytes",
Units: "",
},
}
})
case SegmentCountMetricKey:
// segment 数量
segmentCountMetric:=newMetricItem("segment_count", 15, StorageGroupKey)
segmentCountMetric.AddAxi("segment count","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
segmentCountMetric := newMetricItem(SegmentCountMetricKey, 15, StorageGroupKey)
segmentCountMetric.AddAxi("segment count", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "segment_count",
Field: "payload.elasticsearch.index_stats.total.segments.count",
ID: util.GetUUID(),
@ -155,9 +194,10 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "",
})
case DocCountMetricKey:
//索引文档个数
docCountMetric := newMetricItem("doc_count", 2, DocumentGroupKey)
docCountMetric.AddAxi("Doc count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
docCountMetric := newMetricItem(DocCountMetricKey, 2, DocumentGroupKey)
docCountMetric.AddAxi("Doc count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "doc_count",
@ -168,10 +208,11 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "",
})
case DocsDeletedMetricKey:
// docs 删除数量
docsDeletedMetric:=newMetricItem("docs_deleted", 17, DocumentGroupKey)
docsDeletedMetric.AddAxi("docs deleted","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
docsDeletedMetric := newMetricItem(DocsDeletedMetricKey, 17, DocumentGroupKey)
docsDeletedMetric.AddAxi("docs deleted", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "docs_deleted",
Field: "payload.elasticsearch.index_stats.total.docs.deleted",
ID: util.GetUUID(),
@ -180,9 +221,10 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "",
})
case QueryTimesMetricKey:
//查询次数
queryTimesMetric := newMetricItem("query_times", 2, OperationGroupKey)
queryTimesMetric.AddAxi("Query times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queryTimesMetric := newMetricItem(QueryTimesMetricKey, 2, OperationGroupKey)
queryTimesMetric.AddAxi("Query times", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "query_times",
@ -193,10 +235,10 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "requests/s",
})
case FetchTimesMetricKey:
//Fetch次数
fetchTimesMetric := newMetricItem("fetch_times", 3, OperationGroupKey)
fetchTimesMetric.AddAxi("Fetch times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
fetchTimesMetric := newMetricItem(FetchTimesMetricKey, 3, OperationGroupKey)
fetchTimesMetric.AddAxi("Fetch times", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "fetch_times",
Field: "payload.elasticsearch.index_stats.total.search.fetch_total",
@ -206,9 +248,10 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "requests/s",
})
case ScrollTimesMetricKey:
//scroll 次数
scrollTimesMetric := newMetricItem("scroll_times", 4, OperationGroupKey)
scrollTimesMetric.AddAxi("scroll times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
scrollTimesMetric := newMetricItem(ScrollTimesMetricKey, 4, OperationGroupKey)
scrollTimesMetric.AddAxi("scroll times", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "scroll_times",
Field: "payload.elasticsearch.index_stats.total.search.scroll_total",
@ -218,9 +261,10 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "requests/s",
})
case MergeTimesMetricKey:
//Merge次数
mergeTimesMetric := newMetricItem("merge_times", 7, OperationGroupKey)
mergeTimesMetric.AddAxi("Merge times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
mergeTimesMetric := newMetricItem(MergeTimesMetricKey, 7, OperationGroupKey)
mergeTimesMetric.AddAxi("Merge times", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "merge_times",
Field: "payload.elasticsearch.index_stats.total.merges.total",
@ -230,9 +274,10 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "requests/s",
})
case RefreshTimesMetricKey:
//Refresh次数
refreshTimesMetric := newMetricItem("refresh_times", 5, OperationGroupKey)
refreshTimesMetric.AddAxi("Refresh times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
refreshTimesMetric := newMetricItem(RefreshTimesMetricKey, 5, OperationGroupKey)
refreshTimesMetric.AddAxi("Refresh times", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "refresh_times",
Field: "payload.elasticsearch.index_stats.total.refresh.total",
@ -242,8 +287,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "requests/s",
})
case FlushTimesMetricKey:
//flush 次数
flushTimesMetric := newMetricItem("flush_times", 6, OperationGroupKey)
flushTimesMetric := newMetricItem(FlushTimesMetricKey, 6, OperationGroupKey)
flushTimesMetric.AddAxi("flush times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "flush_times",
@ -254,9 +300,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "requests/s",
})
case IndexingRateMetricKey:
//写入速率
indexingRateMetric := newMetricItem("indexing_rate", 1, OperationGroupKey)
indexingRateMetric := newMetricItem(IndexingRateMetricKey, 1, OperationGroupKey)
indexingRateMetric.AddAxi("Indexing rate","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "indexing_rate",
@ -267,7 +313,8 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "doc/s",
})
indexingBytesMetric := newMetricItem("indexing_bytes", 2, OperationGroupKey)
case IndexingBytesMetricKey:
indexingBytesMetric := newMetricItem(IndexingBytesMetricKey, 2, OperationGroupKey)
indexingBytesMetric.AddAxi("Indexing bytes","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "indexing_bytes",
@ -278,8 +325,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "bytes",
Units: "bytes/s",
})
case IndexingLatencyMetricKey:
//写入时延
indexingLatencyMetric := newMetricItem("indexing_latency", 1, LatencyGroupKey)
indexingLatencyMetric := newMetricItem(IndexingLatencyMetricKey, 1, LatencyGroupKey)
indexingLatencyMetric.AddAxi("Indexing latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "indexing_latency",
@ -294,9 +342,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "ms",
})
case QueryLatencyMetricKey:
//查询时延
queryLatencyMetric := newMetricItem("query_latency", 2, LatencyGroupKey)
queryLatencyMetric := newMetricItem(QueryLatencyMetricKey, 2, LatencyGroupKey)
queryLatencyMetric.AddAxi("Query latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "query_latency",
@ -311,8 +359,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "ms",
})
case FetchLatencyMetricKey:
//fetch时延
fetchLatencyMetric := newMetricItem("fetch_latency", 3, LatencyGroupKey)
fetchLatencyMetric := newMetricItem(FetchLatencyMetricKey, 3, LatencyGroupKey)
fetchLatencyMetric.AddAxi("Fetch latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "fetch_latency",
@ -327,9 +376,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "ms",
})
case MergeLatencyMetricKey:
//merge时延
mergeLatencyMetric := newMetricItem("merge_latency", 7, LatencyGroupKey)
mergeLatencyMetric := newMetricItem(MergeLatencyMetricKey, 7, LatencyGroupKey)
mergeLatencyMetric.AddAxi("Merge latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "merge_latency",
@ -344,8 +393,10 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "ms",
})
case RefreshLatencyMetricKey:
//refresh时延
refreshLatencyMetric := newMetricItem("refresh_latency", 5, LatencyGroupKey)
refreshLatencyMetric := newMetricItem(RefreshLatencyMetricKey, 5, LatencyGroupKey)
refreshLatencyMetric.AddAxi("Refresh latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "refresh_latency",
@ -360,8 +411,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "ms",
})
case ScrollLatencyMetricKey:
//scroll时延
scrollLatencyMetric := newMetricItem("scroll_latency", 4, LatencyGroupKey)
scrollLatencyMetric := newMetricItem(ScrollLatencyMetricKey, 4, LatencyGroupKey)
scrollLatencyMetric.AddAxi("Scroll Latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "scroll_latency",
@ -376,8 +428,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "ms",
})
case FlushLatencyMetricKey:
//flush 时延
flushLatencyMetric := newMetricItem("flush_latency", 6, LatencyGroupKey)
flushLatencyMetric := newMetricItem(FlushLatencyMetricKey, 6, LatencyGroupKey)
flushLatencyMetric.AddAxi("Flush latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "flush_latency",
@ -392,8 +445,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "ms",
})
case QueryCacheMetricKey:
//queryCache
queryCacheMetric := newMetricItem("query_cache", 1, CacheGroupKey)
queryCacheMetric := newMetricItem(QueryCacheMetricKey, 1, CacheGroupKey)
queryCacheMetric.AddAxi("Query cache","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "query_cache",
@ -404,8 +458,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "bytes",
Units: "",
})
case RequestCacheMetricKey:
//requestCache
requestCacheMetric := newMetricItem("request_cache", 2, CacheGroupKey)
requestCacheMetric := newMetricItem(RequestCacheMetricKey, 2, CacheGroupKey)
requestCacheMetric.AddAxi("request cache","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "request_cache",
@ -416,8 +471,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "bytes",
Units: "",
})
case RequestCacheHitMetricKey:
// Request Cache Hit
requestCacheHitMetric:=newMetricItem("request_cache_hit", 6, CacheGroupKey)
requestCacheHitMetric:=newMetricItem(RequestCacheHitMetricKey, 6, CacheGroupKey)
requestCacheHitMetric.AddAxi("request cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "request_cache_hit",
@ -428,8 +484,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "hits",
})
case RequestCacheMissMetricKey:
// Request Cache Miss
requestCacheMissMetric:=newMetricItem("request_cache_miss", 8, CacheGroupKey)
requestCacheMissMetric:=newMetricItem(RequestCacheMissMetricKey, 8, CacheGroupKey)
requestCacheMissMetric.AddAxi("request cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "request_cache_miss",
@ -440,8 +497,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "misses",
})
case QueryCacheCountMetricKey:
// Query Cache Count
queryCacheCountMetric:=newMetricItem("query_cache_count", 4, CacheGroupKey)
queryCacheCountMetric:=newMetricItem(QueryCacheCountMetricKey, 4, CacheGroupKey)
queryCacheCountMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "query_cache_count",
@ -452,8 +510,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "",
})
case QueryCacheHitMetricKey:
// Query Cache Miss
queryCacheHitMetric:=newMetricItem("query_cache_hit", 5, CacheGroupKey)
queryCacheHitMetric:=newMetricItem(QueryCacheHitMetricKey, 5, CacheGroupKey)
queryCacheHitMetric.AddAxi("query cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "query_cache_hit",
@ -464,22 +523,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "hits",
})
//// Query Cache evictions
//queryCacheEvictionsMetric:=newMetricItem("query_cache_evictions", 11, CacheGroupKey)
//queryCacheEvictionsMetric.AddAxi("query cache evictions","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
//indexMetricItems=append(indexMetricItems, GroupMetricItem{
// Key: "query_cache_evictions",
// Field: "payload.elasticsearch.index_stats.total.query_cache.evictions",
// ID: util.GetUUID(),
// IsDerivative: true,
// MetricItem: queryCacheEvictionsMetric,
// FormatType: "num",
// Units: "evictions",
//})
case QueryCacheMissMetricKey:
// Query Cache Miss
queryCacheMissMetric:=newMetricItem("query_cache_miss", 7, CacheGroupKey)
queryCacheMissMetric:=newMetricItem(QueryCacheMissMetricKey, 7, CacheGroupKey)
queryCacheMissMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "query_cache_miss",
@ -490,8 +536,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "num",
Units: "misses",
})
case FielddataCacheMetricKey:
// Fielddata内存占用大小
fieldDataCacheMetric:=newMetricItem("fielddata_cache", 3, CacheGroupKey)
fieldDataCacheMetric:=newMetricItem(FielddataCacheMetricKey, 3, CacheGroupKey)
fieldDataCacheMetric.AddAxi("FieldData Cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "fielddata_cache",
@ -502,8 +549,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "bytes",
Units: "",
})
case SegmentMemoryMetricKey:
//segment memory
segmentMemoryMetric := newMetricItem("segment_memory", 13, MemoryGroupKey)
segmentMemoryMetric := newMetricItem(SegmentMemoryMetricKey, 13, MemoryGroupKey)
segmentMemoryMetric.AddAxi("Segment memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "segment_memory",
@ -514,9 +562,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "bytes",
Units: "",
})
case SegmentDocValuesMemoryMetricKey:
//segment doc values memory
docValuesMemoryMetric := newMetricItem("segment_doc_values_memory", 13, MemoryGroupKey)
docValuesMemoryMetric := newMetricItem(SegmentDocValuesMemoryMetricKey, 13, MemoryGroupKey)
docValuesMemoryMetric.AddAxi("Segment Doc values Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "segment_doc_values_memory",
@ -527,9 +575,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "bytes",
Units: "",
})
case SegmentTermsMemoryMetricKey:
//segment terms memory
termsMemoryMetric := newMetricItem("segment_terms_memory", 13, MemoryGroupKey)
termsMemoryMetric := newMetricItem(SegmentTermsMemoryMetricKey, 13, MemoryGroupKey)
termsMemoryMetric.AddAxi("Segment Terms Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "segment_terms_memory",
@ -540,9 +588,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "bytes",
Units: "",
})
case SegmentFieldsMemoryMetricKey:
//segment fields memory
fieldsMemoryMetric := newMetricItem("segment_fields_memory", 13, MemoryGroupKey)
fieldsMemoryMetric := newMetricItem(SegmentFieldsMemoryMetricKey, 13, MemoryGroupKey)
fieldsMemoryMetric.AddAxi("Segment Fields Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "segment_fields_memory",
@ -553,8 +601,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "bytes",
Units: "",
})
case SegmentIndexWriterMemoryMetricKey:
// segment index writer memory
segmentIndexWriterMemoryMetric:=newMetricItem("segment_index_writer_memory", 16, MemoryGroupKey)
segmentIndexWriterMemoryMetric:=newMetricItem(SegmentIndexWriterMemoryMetricKey, 16, MemoryGroupKey)
segmentIndexWriterMemoryMetric.AddAxi("segment doc values memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "segment_index_writer_memory",
@ -565,8 +614,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
FormatType: "bytes",
Units: "",
})
case SegmentTermVectorsMemoryMetricKey:
// segment term vectors memory
segmentTermVectorsMemoryMetric:=newMetricItem("segment_term_vectors_memory", 16, MemoryGroupKey)
segmentTermVectorsMemoryMetric:=newMetricItem(SegmentTermVectorsMemoryMetricKey, 16, MemoryGroupKey)
segmentTermVectorsMemoryMetric.AddAxi("segment term vectors memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "segment_term_vectors_memory",
@ -578,6 +628,9 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
Units: "",
})
}
aggs:=map[string]interface{}{}
for _,metricItem:=range indexMetricItems {
@ -642,7 +695,7 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
},
},
}
return h.getMetrics(query, indexMetricItems, bucketSize)
return h.getMetrics(ctx, query, indexMetricItems, bucketSize)
}

View File

@ -28,6 +28,7 @@
package v1
import (
"context"
"fmt"
log "github.com/cihub/seelog"
httprouter "infini.sh/framework/core/api/router"
@ -440,7 +441,7 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
},
},
}
metrics := h.getMetrics(query, nodeMetricItems, bucketSize)
metrics := h.getMetrics(context.Background(), query, nodeMetricItems, bucketSize)
indexMetrics := map[string]util.MapStr{}
for key, item := range metrics {
for _, line := range item.Lines {
@ -626,6 +627,8 @@ func (h *APIHandler) GetIndexShards(w http.ResponseWriter, req *http.Request, ps
h.WriteJSON(w, shardInfo, http.StatusOK)
}
const IndexHealthMetricKey = "index_health"
func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
clusterID := ps.MustGetParameter("id")
indexName := ps.MustGetParameter("index")
@ -699,63 +702,81 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
metricItems := []*common.MetricItem{}
metricItem:=newMetricItem("index_throughput", 1, OperationGroupKey)
metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
metricItem.AddLine("Indexing Rate","Primary Indexing","Number of documents being indexed for node.","group1","payload.elasticsearch.index_stats.primaries.indexing.index_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true)
metricItem.AddLine("Deleting Rate","Primary Deleting","Number of documents being deleted for node.","group1","payload.elasticsearch.index_stats.primaries.indexing.delete_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true)
metricItems=append(metricItems,metricItem)
metricItem=newMetricItem("search_throughput", 2, OperationGroupKey)
metricItem.AddAxi("searching","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,false)
metricItem.AddLine("Search Rate","Search Rate",
"Number of search requests being executed.",
"group1","payload.elasticsearch.index_stats.total.search.query_total","max",bucketSizeStr,"query/s","num","0,0.[00]","0,0.[00]",false,true)
metricItems=append(metricItems,metricItem)
metricItem=newMetricItem("index_latency", 3, LatencyGroupKey)
metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
metricItem.AddLine("Indexing Latency","Primary Indexing Latency","Average latency for indexing documents.","group1","payload.elasticsearch.index_stats.primaries.indexing.index_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.index_stats.primaries.indexing.index_total"
metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
return value/value2
metricKey := h.GetParameter(req, "key")
timeout := h.GetParameterOrDefault(req, "timeout", "60s")
du, err := time.ParseDuration(timeout)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
metricItem.AddLine("Deleting Latency","Primary Deleting Latency","Average latency for delete documents.","group1","payload.elasticsearch.index_stats.primaries.indexing.delete_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.index_stats.primaries.indexing.delete_total"
metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
return value/value2
}
metricItems=append(metricItems,metricItem)
metricItem=newMetricItem("search_latency", 4, LatencyGroupKey)
metricItem.AddAxi("searching","group2",common.PositionLeft,"num","0,0","0,0.[00]",5,false)
metricItem.AddLine("Searching","Query Latency","Average latency for searching query.","group2","payload.elasticsearch.index_stats.total.search.query_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.index_stats.total.search.query_total"
metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
return value/value2
}
metricItem.AddLine("Searching","Fetch Latency","Average latency for searching fetch.","group2","payload.elasticsearch.index_stats.total.search.fetch_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.index_stats.total.search.fetch_total"
metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
return value/value2
}
metricItem.AddLine("Searching","Scroll Latency","Average latency for searching fetch.","group2","payload.elasticsearch.index_stats.total.search.scroll_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
metricItem.Lines[2].Metric.Field2 = "payload.elasticsearch.index_stats.total.search.scroll_total"
metricItem.Lines[2].Metric.Calc = func(value, value2 float64) float64 {
return value/value2
}
metricItems=append(metricItems,metricItem)
metrics := h.getSingleMetrics(metricItems,query, bucketSize)
healthMetric, err := h.getIndexHealthMetric(clusterID, indexName, min, max, bucketSize)
ctx, cancel := context.WithTimeout(context.Background(), du)
defer cancel()
metrics := map[string]*common.MetricItem{}
if metricKey == IndexHealthMetricKey {
healthMetric, err := h.getIndexHealthMetric(ctx, clusterID, indexName, min, max, bucketSize)
if err != nil {
log.Error(err)
}
metrics["index_health"] = healthMetric
}else {
switch metricKey {
case IndexThroughputMetricKey:
metricItem := newMetricItem("index_throughput", 1, OperationGroupKey)
metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
metricItem.AddLine("Indexing Rate", "Primary Indexing", "Number of documents being indexed for node.", "group1", "payload.elasticsearch.index_stats.primaries.indexing.index_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.AddLine("Deleting Rate", "Primary Deleting", "Number of documents being deleted for node.", "group1", "payload.elasticsearch.index_stats.primaries.indexing.delete_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItems = append(metricItems, metricItem)
case SearchThroughputMetricKey:
metricItem := newMetricItem("search_throughput", 2, OperationGroupKey)
metricItem.AddAxi("searching", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
metricItem.AddLine("Search Rate", "Search Rate",
"Number of search requests being executed.",
"group1", "payload.elasticsearch.index_stats.total.search.query_total", "max", bucketSizeStr, "query/s", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItems = append(metricItems, metricItem)
case IndexLatencyMetricKey:
metricItem := newMetricItem("index_latency", 3, LatencyGroupKey)
metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
metricItem.AddLine("Indexing Latency", "Primary Indexing Latency", "Average latency for indexing documents.", "group1", "payload.elasticsearch.index_stats.primaries.indexing.index_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.index_stats.primaries.indexing.index_total"
metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
return value / value2
}
metricItem.AddLine("Deleting Latency", "Primary Deleting Latency", "Average latency for delete documents.", "group1", "payload.elasticsearch.index_stats.primaries.indexing.delete_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.index_stats.primaries.indexing.delete_total"
metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
return value / value2
}
metricItems = append(metricItems, metricItem)
case SearchLatencyMetricKey:
metricItem := newMetricItem("search_latency", 4, LatencyGroupKey)
metricItem.AddAxi("searching", "group2", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
metricItem.AddLine("Searching", "Query Latency", "Average latency for searching query.", "group2", "payload.elasticsearch.index_stats.total.search.query_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.index_stats.total.search.query_total"
metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
return value / value2
}
metricItem.AddLine("Searching", "Fetch Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.index_stats.total.search.fetch_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.index_stats.total.search.fetch_total"
metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
return value / value2
}
metricItem.AddLine("Searching", "Scroll Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.index_stats.total.search.scroll_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.Lines[2].Metric.Field2 = "payload.elasticsearch.index_stats.total.search.scroll_total"
metricItem.Lines[2].Metric.Calc = func(value, value2 float64) float64 {
return value / value2
}
metricItems = append(metricItems, metricItem)
}
metrics = h.getSingleMetrics(ctx, metricItems, query, bucketSize)
}
resBody["metrics"] = metrics
h.WriteJSON(w, resBody, http.StatusOK)
}
func (h *APIHandler) getIndexHealthMetric(id, indexName string, min, max int64, bucketSize int)(*common.MetricItem, error){
func (h *APIHandler) getIndexHealthMetric(ctx context.Context, id, indexName string, min, max int64, bucketSize int)(*common.MetricItem, error){
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
if err != nil {
@ -823,7 +844,8 @@ func (h *APIHandler) getIndexHealthMetric(id, indexName string, min, max int64,
},
},
}
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
queryDSL := util.MustToJSONBytes(query)
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
if err != nil {
log.Error(err)
return nil, err
@ -841,6 +863,7 @@ func (h *APIHandler) getIndexHealthMetric(id, indexName string, min, max int64,
}
metricItem.Lines[0].Data = metricData
metricItem.Lines[0].Type = common.GraphTypeBar
metricItem.Request = string(queryDSL)
return metricItem, nil
}

View File

@ -500,6 +500,7 @@ func (h *APIHandler) HandleMetricsSummaryAction(w http.ResponseWriter, req *http
func (h *APIHandler) HandleClusterMetricsAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{}
id := ps.ByName("id")
key := h.GetParameter(req, "key")
bucketSize, min, max, err := h.getMetricRangeAndBucketSize(req, 10, 90)
if err != nil {
@ -514,13 +515,18 @@ func (h *APIHandler) HandleClusterMetricsAction(w http.ResponseWriter, req *http
}
}
//fmt.Println(min," vs ",max,",",rangeFrom,rangeTo,"range hours:",hours)
//metrics:=h.GetClusterMetrics(id,bucketSize,min,max)
isOverview := h.GetIntOrDefault(req, "overview", 0)
timeout := h.GetParameterOrDefault(req, "timeout", "60s")
du, err := time.ParseDuration(timeout)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
ctx, cancel := context.WithTimeout(context.Background(), du)
defer cancel()
var metrics interface{}
if isOverview == 1 {
metrics = h.GetClusterIndexMetrics(id, bucketSize, min, max)
if util.StringInArray([]string{IndexThroughputMetricKey, SearchThroughputMetricKey, IndexLatencyMetricKey, SearchLatencyMetricKey}, key) {
metrics = h.GetClusterIndexMetrics(ctx, id, bucketSize, min, max, key)
} else {
if meta != nil && meta.Config.MonitorConfigs != nil && meta.Config.MonitorConfigs.ClusterStats.Enabled && meta.Config.MonitorConfigs.ClusterStats.Interval != "" {
du, _ := time.ParseDuration(meta.Config.MonitorConfigs.ClusterStats.Interval)
@ -534,7 +540,7 @@ func (h *APIHandler) HandleClusterMetricsAction(w http.ResponseWriter, req *http
bucketSize = int(du.Seconds())
}
}
metrics = h.GetClusterMetrics(id, bucketSize, min, max)
metrics = h.GetClusterMetrics(ctx, id, bucketSize, min, max, key)
}
resBody["metrics"] = metrics
@ -546,48 +552,6 @@ func (h *APIHandler) HandleClusterMetricsAction(w http.ResponseWriter, req *http
}
func (h *APIHandler) HandleNodeMetricsAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{}
id := ps.ByName("id")
bucketSize, min, max, err := h.getMetricRangeAndBucketSize(req, 10, 90)
if err != nil {
log.Error(err)
resBody["error"] = err
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
meta := elastic.GetMetadata(id)
if meta != nil && meta.Config.MonitorConfigs != nil && meta.Config.MonitorConfigs.NodeStats.Interval != "" {
du, _ := time.ParseDuration(meta.Config.MonitorConfigs.NodeStats.Interval)
if bucketSize < int(du.Seconds()) {
bucketSize = int(du.Seconds())
}
}
nodeName := h.Get(req, "node_name", "")
top := h.GetIntOrDefault(req, "top", 5)
resBody["metrics"], err = h.getNodeMetrics(id, bucketSize, min, max, nodeName, top)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
ver := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).GetVersion()
if ver.Distribution == "" {
cr, err := util.VersionCompare(ver.Number, "6.1")
if err != nil {
log.Error(err)
}
if cr < 0 {
resBody["tips"] = "The system cluster version is lower than 6.1, the top node may be inaccurate"
}
}
err = h.WriteJSON(w, resBody, http.StatusOK)
if err != nil {
log.Error(err)
}
}
func (h *APIHandler) HandleIndexMetricsAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{}
id := ps.ByName("id")
@ -607,7 +571,25 @@ func (h *APIHandler) HandleIndexMetricsAction(w http.ResponseWriter, req *http.R
}
indexName := h.Get(req, "index_name", "")
top := h.GetIntOrDefault(req, "top", 5)
metrics := h.getIndexMetrics(req, id, bucketSize, min, max, indexName, top)
key := h.GetParameter(req, "key")
timeout := h.GetParameterOrDefault(req, "timeout", "60s")
du, err := time.ParseDuration(timeout)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
ctx, cancel := context.WithTimeout(context.Background(), du)
defer cancel()
var metrics map[string]*common.MetricItem
if key == DocPercentMetricKey {
metrics = h.getIndexMetrics(ctx, req, id, bucketSize, min, max, indexName, top, DocCountMetricKey)
docsDeletedMetrics := h.getIndexMetrics(ctx, req, id, bucketSize, min, max, indexName, top, DocsDeletedMetricKey)
for k, v := range docsDeletedMetrics {
if v != nil {
metrics[k] = v
}
}
if metrics["doc_count"] != nil && metrics["docs_deleted"] != nil && len(metrics["doc_count"].Lines) > 0 && len(metrics["docs_deleted"].Lines) > 0 {
metricA := metrics["doc_count"]
metricB := metrics["docs_deleted"]
@ -651,8 +633,11 @@ func (h *APIHandler) HandleIndexMetricsAction(w http.ResponseWriter, req *http.R
metrics["doc_percent"] = metricDocPercent
}
}
}
}else{
metrics = h.getIndexMetrics(ctx, req, id, bucketSize, min, max, indexName, top, key)
}
resBody["metrics"] = metrics
ver := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).GetVersion()
if ver.Distribution == "" {
@ -670,42 +655,6 @@ func (h *APIHandler) HandleIndexMetricsAction(w http.ResponseWriter, req *http.R
log.Error(err)
}
}
func (h *APIHandler) HandleQueueMetricsAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{}
id := ps.ByName("id")
bucketSize, min, max, err := h.getMetricRangeAndBucketSize(req, 10, 90)
if err != nil {
log.Error(err)
resBody["error"] = err
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
nodeName := h.Get(req, "node_name", "")
top := h.GetIntOrDefault(req, "top", 5)
meta := elastic.GetMetadata(id)
if meta != nil && meta.Config.MonitorConfigs != nil && meta.Config.MonitorConfigs.NodeStats.Interval != "" {
du, _ := time.ParseDuration(meta.Config.MonitorConfigs.NodeStats.Interval)
if bucketSize < int(du.Seconds()) {
bucketSize = int(du.Seconds())
}
}
resBody["metrics"] = h.getThreadPoolMetrics(id, bucketSize, min, max, nodeName, top)
ver := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).GetVersion()
if ver.Distribution == "" {
cr, err := util.VersionCompare(ver.Number, "6.1")
if err != nil {
log.Error(err)
}
if cr < 0 {
resBody["tips"] = "The system cluster version is lower than 6.1, the top node may be inaccurate"
}
}
err = h.WriteJSON(w, resBody, http.StatusOK)
if err != nil {
log.Error(err)
}
}
// TODO, use expired hash
var clusters = map[string]elastic.ElasticsearchConfig{}
@ -810,56 +759,45 @@ const (
CircuitBreakerGroupKey = "circuit_breaker"
)
func (h *APIHandler) GetClusterMetrics(id string, bucketSize int, min, max int64) map[string]*common.MetricItem {
const (
ClusterStorageMetricKey = "cluster_storage"
ClusterDocumentsMetricKey = "cluster_documents"
ClusterIndicesMetricKey = "cluster_indices"
ClusterNodeCountMetricKey = "node_count"
ClusterHealthMetricKey = "cluster_health"
ShardCountMetricKey = "shard_count"
CircuitBreakerMetricKey = "circuit_breaker"
)
func (h *APIHandler) GetClusterMetrics(ctx context.Context, id string, bucketSize int, min, max int64, metricKey string) map[string]*common.MetricItem {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
clusterMetricItems := []*common.MetricItem{}
metricItem := newMetricItem("cluster_storage", 8, StorageGroupKey)
metricItem.AddAxi("indices_storage", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
metricItem.AddAxi("available_storage", "group2", common.PositionRight, "bytes", "0.[0]", "0.[0]", 5, true)
metricItem.AddLine("Disk", "Indices Storage", "", "group1", "payload.elasticsearch.cluster_stats.indices.store.size_in_bytes", "max", bucketSizeStr, "", "bytes", "0,0.[00]", "0,0.[00]", false, false)
metricItem.AddLine("Disk", "Available Disk", "", "group2", "payload.elasticsearch.cluster_stats.nodes.fs.available_in_bytes", "max", bucketSizeStr, "", "bytes", "0,0.[00]", "0,0.[00]", false, false)
clusterMetricItems = append(clusterMetricItems, metricItem)
metricItem = newMetricItem("cluster_documents", 4, StorageGroupKey)
metricItem.AddAxi("count", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
metricItem.AddAxi("deleted", "group2", common.PositionRight, "num", "0,0", "0,0.[00]", 5, false)
metricItem.AddLine("Documents Count", "Documents Count", "", "group1", "payload.elasticsearch.cluster_stats.indices.docs.count", "max", bucketSizeStr, "", "num", "0,0.[00]", "0,0.[00]", false, false)
metricItem.AddLine("Documents Deleted", "Documents Deleted", "", "group2", "payload.elasticsearch.cluster_stats.indices.docs.deleted", "max", bucketSizeStr, "", "num", "0,0.[00]", "0,0.[00]", false, false)
clusterMetricItems = append(clusterMetricItems, metricItem)
metricItem = newMetricItem("cluster_indices", 6, StorageGroupKey)
metricItem.AddAxi("count", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
metricItem.AddLine("Indices Count", "Indices Count", "", "group1", "payload.elasticsearch.cluster_stats.indices.count", "max", bucketSizeStr, "", "num", "0,0.[00]", "0,0.[00]", false, false)
clusterMetricItems = append(clusterMetricItems, metricItem)
metricItem = newMetricItem("node_count", 5, MemoryGroupKey)
metricItem.AddAxi("count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
meta := elastic.GetMetadata(id)
if meta == nil {
err := fmt.Errorf("metadata of cluster [%s] is not found", id)
panic(err)
}
majorVersion := meta.GetMajorVersion()
metricItem.AddLine("Total", "Total Nodes", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.total", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
//TODO check version difference
if majorVersion < 5 {
metricItem.AddLine("Master Only", "Master Only", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.master_only", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
metricItem.AddLine("Data Node", "Data Only", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.data_only", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
metricItem.AddLine("Master Data", "Master Data", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.master_data", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
var clusterMetricsResult = map[string]*common.MetricItem {}
switch metricKey {
case ClusterDocumentsMetricKey,
ClusterStorageMetricKey,
ClusterIndicesMetricKey,
ClusterNodeCountMetricKey:
clusterMetricsResult = h.getClusterMetricsByKey(ctx, id, bucketSize, min, max, metricKey)
case IndexLatencyMetricKey, IndexThroughputMetricKey, SearchThroughputMetricKey, SearchLatencyMetricKey:
clusterMetricsResult = h.GetClusterIndexMetrics(ctx, id, bucketSize, min, max, metricKey)
case ClusterHealthMetricKey:
statusMetric, err := h.getClusterStatusMetric(ctx, id, min, max, bucketSize)
if err == nil {
clusterMetricsResult[ClusterHealthMetricKey] = statusMetric
} else {
metricItem.AddLine("Master Node", "Master Node", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.master", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
metricItem.AddLine("Data Node", "Data Node", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.data", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
metricItem.AddLine("Coordinating Node Only", "Coordinating Node Only", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.coordinating_only", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
metricItem.AddLine("Ingest Node", "Ingest Node", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.ingest", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
log.Error("get cluster status metric error: ", err)
}
case ShardCountMetricKey:
clusterMetricsResult = h.getShardsMetric(ctx, id, min, max, bucketSize)
case CircuitBreakerMetricKey:
clusterMetricsResult = h.getCircuitBreakerMetric(ctx, id, min, max, bucketSize)
}
clusterMetricItems = append(clusterMetricItems, metricItem)
return clusterMetricsResult
}
func (h *APIHandler) getClusterMetricsByKey(ctx context.Context, id string, bucketSize int, min, max int64, metricKey string) map[string]*common.MetricItem {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
query := map[string]interface{}{}
query["query"] = util.MapStr{
"bool": util.MapStr{
@ -898,47 +836,80 @@ func (h *APIHandler) GetClusterMetrics(id string, bucketSize int, min, max int64
},
},
}
//todo: since there is four queries, we can change these query to async
indexMetricsResult := h.GetClusterIndexMetrics(id, bucketSize, min, max)
clusterMetricsResult := h.getSingleMetrics(clusterMetricItems, query, bucketSize)
for k, v := range clusterMetricsResult {
indexMetricsResult[k] = v
}
statusMetric, err := h.getClusterStatusMetric(id, min, max, bucketSize)
if err == nil {
indexMetricsResult["cluster_health"] = statusMetric
} else {
log.Error("get cluster status metric error: ", err)
}
clusterHealthMetricsResult := h.getShardsMetric(id, min, max, bucketSize)
for k, v := range clusterHealthMetricsResult {
indexMetricsResult[k] = v
}
// get CircuitBreaker metric
circuitBreakerMetricsResult := h.getCircuitBreakerMetric(id, min, max, bucketSize)
for k, v := range circuitBreakerMetricsResult {
indexMetricsResult[k] = v
}
clusterMetricItems := []*common.MetricItem{}
switch metricKey {
case ClusterStorageMetricKey:
metricItem := newMetricItem(ClusterStorageMetricKey, 8, StorageGroupKey)
metricItem.AddAxi("indices_storage", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
metricItem.AddAxi("available_storage", "group2", common.PositionRight, "bytes", "0.[0]", "0.[0]", 5, true)
return indexMetricsResult
metricItem.AddLine("Disk", "Indices Storage", "", "group1", "payload.elasticsearch.cluster_stats.indices.store.size_in_bytes", "max", bucketSizeStr, "", "bytes", "0,0.[00]", "0,0.[00]", false, false)
metricItem.AddLine("Disk", "Available Disk", "", "group2", "payload.elasticsearch.cluster_stats.nodes.fs.available_in_bytes", "max", bucketSizeStr, "", "bytes", "0,0.[00]", "0,0.[00]", false, false)
clusterMetricItems = append(clusterMetricItems, metricItem)
case ClusterDocumentsMetricKey:
metricItem := newMetricItem(ClusterDocumentsMetricKey, 4, StorageGroupKey)
metricItem.AddAxi("count", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
metricItem.AddAxi("deleted", "group2", common.PositionRight, "num", "0,0", "0,0.[00]", 5, false)
metricItem.AddLine("Documents Count", "Documents Count", "", "group1", "payload.elasticsearch.cluster_stats.indices.docs.count", "max", bucketSizeStr, "", "num", "0,0.[00]", "0,0.[00]", false, false)
metricItem.AddLine("Documents Deleted", "Documents Deleted", "", "group2", "payload.elasticsearch.cluster_stats.indices.docs.deleted", "max", bucketSizeStr, "", "num", "0,0.[00]", "0,0.[00]", false, false)
clusterMetricItems = append(clusterMetricItems, metricItem)
case ClusterIndicesMetricKey:
metricItem := newMetricItem(ClusterIndicesMetricKey, 6, StorageGroupKey)
metricItem.AddAxi("count", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
metricItem.AddLine("Indices Count", "Indices Count", "", "group1", "payload.elasticsearch.cluster_stats.indices.count", "max", bucketSizeStr, "", "num", "0,0.[00]", "0,0.[00]", false, false)
clusterMetricItems = append(clusterMetricItems, metricItem)
case ClusterNodeCountMetricKey:
metricItem := newMetricItem("node_count", 5, MemoryGroupKey)
metricItem.AddAxi("count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
meta := elastic.GetMetadata(id)
if meta == nil {
err := fmt.Errorf("metadata of cluster [%s] is not found", id)
panic(err)
}
majorVersion := meta.GetMajorVersion()
metricItem.AddLine("Total", "Total Nodes", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.total", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
if majorVersion < 5 {
metricItem.AddLine("Master Only", "Master Only", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.master_only", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
metricItem.AddLine("Data Node", "Data Only", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.data_only", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
metricItem.AddLine("Master Data", "Master Data", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.master_data", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
} else {
metricItem.AddLine("Master Node", "Master Node", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.master", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
metricItem.AddLine("Data Node", "Data Node", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.data", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
metricItem.AddLine("Coordinating Node Only", "Coordinating Node Only", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.coordinating_only", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
metricItem.AddLine("Ingest Node", "Ingest Node", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.ingest", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
}
clusterMetricItems = append(clusterMetricItems, metricItem)
}
return h.getSingleMetrics(ctx, clusterMetricItems, query, bucketSize)
}
func (h *APIHandler) GetClusterIndexMetrics(id string, bucketSize int, min, max int64) map[string]*common.MetricItem {
const (
IndexThroughputMetricKey = "index_throughput"
SearchThroughputMetricKey = "search_throughput"
IndexLatencyMetricKey = "index_latency"
SearchLatencyMetricKey = "search_latency"
)
func (h *APIHandler) GetClusterIndexMetrics(ctx context.Context, id string, bucketSize int, min, max int64, metricKey string) map[string]*common.MetricItem {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
metricItems := []*common.MetricItem{}
metricItem := newMetricItem("index_throughput", 2, OperationGroupKey)
switch metricKey {
case IndexThroughputMetricKey:
metricItem := newMetricItem(IndexThroughputMetricKey, 2, OperationGroupKey)
metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
metricItem.AddLine("Indexing Rate", "Total Indexing", "Number of documents being indexed for primary and replica shards.", "group1", "payload.elasticsearch.index_stats.total.indexing.index_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.AddLine("Indexing Rate", "Primary Indexing", "Number of documents being indexed for primary shards.", "group1", "payload.elasticsearch.index_stats.primaries.indexing.index_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItems = append(metricItems, metricItem)
metricItem = newMetricItem("search_throughput", 2, OperationGroupKey)
case SearchThroughputMetricKey:
metricItem := newMetricItem(SearchThroughputMetricKey, 2, OperationGroupKey)
metricItem.AddAxi("searching", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
metricItem.AddLine("Search Rate", "Total Query",
"Number of search requests being executed across primary and replica shards. A single search can run against multiple shards!",
"group1", "payload.elasticsearch.index_stats.total.search.query_total", "max", bucketSizeStr, "query/s", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItems = append(metricItems, metricItem)
metricItem = newMetricItem("index_latency", 3, LatencyGroupKey)
case IndexLatencyMetricKey:
metricItem := newMetricItem(IndexLatencyMetricKey, 3, LatencyGroupKey)
metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
metricItem.AddLine("Indexing", "Indexing Latency", "Average latency for indexing documents.", "group1", "payload.elasticsearch.index_stats.primaries.indexing.index_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
@ -952,8 +923,8 @@ func (h *APIHandler) GetClusterIndexMetrics(id string, bucketSize int, min, max
return value / value2
}
metricItems = append(metricItems, metricItem)
metricItem = newMetricItem("search_latency", 3, LatencyGroupKey)
case SearchLatencyMetricKey:
metricItem := newMetricItem(SearchLatencyMetricKey, 3, LatencyGroupKey)
metricItem.AddAxi("searching", "group2", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
metricItem.AddLine("Searching", "Query Latency", "Average latency for searching query.", "group2", "payload.elasticsearch.index_stats.total.search.query_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
@ -972,6 +943,10 @@ func (h *APIHandler) GetClusterIndexMetrics(id string, bucketSize int, min, max
return value / value2
}
metricItems = append(metricItems, metricItem)
default:
panic("not support metric key: " + metricKey)
}
query := map[string]interface{}{}
query["query"] = util.MapStr{
"bool": util.MapStr{
@ -1017,10 +992,10 @@ func (h *APIHandler) GetClusterIndexMetrics(id string, bucketSize int, min, max
},
},
}
return h.getSingleMetrics(metricItems, query, bucketSize)
return h.getSingleMetrics(ctx, metricItems, query, bucketSize)
}
func (h *APIHandler) getShardsMetric(id string, min, max int64, bucketSize int) map[string]*common.MetricItem {
func (h *APIHandler) getShardsMetric(ctx context.Context, id string, min, max int64, bucketSize int) map[string]*common.MetricItem {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
query := util.MapStr{
"query": util.MapStr{
@ -1079,10 +1054,10 @@ func (h *APIHandler) getShardsMetric(id string, min, max int64, bucketSize int)
metricItem.AddLine("Delayed Unassigned Shards", "Delayed Unassigned Shards", "", "group1", "payload.elasticsearch.cluster_health.delayed_unassigned_shards", "max", bucketSizeStr, "", "num", "0,0.[00]", "0,0.[00]", false, false)
var clusterHealthMetrics []*common.MetricItem
clusterHealthMetrics = append(clusterHealthMetrics, metricItem)
return h.getSingleMetrics(clusterHealthMetrics, query, bucketSize)
return h.getSingleMetrics(ctx, clusterHealthMetrics, query, bucketSize)
}
func (h *APIHandler) getCircuitBreakerMetric(id string, min, max int64, bucketSize int) map[string]*common.MetricItem {
func (h *APIHandler) getCircuitBreakerMetric(ctx context.Context, id string, min, max int64, bucketSize int) map[string]*common.MetricItem {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
query := util.MapStr{
"query": util.MapStr{
@ -1140,10 +1115,10 @@ func (h *APIHandler) getCircuitBreakerMetric(id string, min, max int64, bucketSi
metricItem.AddLine("In Flight Requests Breaker Tripped", "In Flight Requests Tripped", "", "group1", "payload.elasticsearch.node_stats.breakers.in_flight_requests.tripped", "sum", bucketSizeStr, "times/s", "num", "0,0.[00]", "0,0.[00]", false, true)
var circuitBreakerMetrics []*common.MetricItem
circuitBreakerMetrics = append(circuitBreakerMetrics, metricItem)
return h.getSingleMetrics(circuitBreakerMetrics, query, bucketSize)
return h.getSingleMetrics(ctx, circuitBreakerMetrics, query, bucketSize)
}
func (h *APIHandler) getClusterStatusMetric(id string, min, max int64, bucketSize int) (*common.MetricItem, error) {
func (h *APIHandler) getClusterStatusMetric(ctx context.Context, id string, min, max int64, bucketSize int) (*common.MetricItem, error) {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
if err != nil {
@ -1204,7 +1179,8 @@ func (h *APIHandler) getClusterStatusMetric(id string, min, max int64, bucketSiz
},
},
}
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
queryDSL := util.MustToJSONBytes(query)
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, util.MustToJSONBytes(query))
if err != nil {
log.Error(err)
return nil, err
@ -1221,6 +1197,7 @@ func (h *APIHandler) getClusterStatusMetric(id string, min, max int64, bucketSiz
}
metricItem.Lines[0].Data = metricData
metricItem.Lines[0].Type = common.GraphTypeBar
metricItem.Request = string(queryDSL)
return metricItem, nil
}

View File

@ -24,6 +24,7 @@
package v1
import (
"context"
"fmt"
"infini.sh/framework/core/env"
"net/http"
@ -109,9 +110,10 @@ func generateGroupAggs(nodeMetricItems []GroupMetricItem) map[string]interface{}
return aggs
}
func (h *APIHandler) getMetrics(query map[string]interface{}, grpMetricItems []GroupMetricItem, bucketSize int) map[string]*common.MetricItem {
func (h *APIHandler) getMetrics(ctx context.Context, query map[string]interface{}, grpMetricItems []GroupMetricItem, bucketSize int) map[string]*common.MetricItem {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
queryDSL := util.MustToJSONBytes(query)
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
if err != nil {
log.Error(err)
panic(err)
@ -205,6 +207,7 @@ func (h *APIHandler) getMetrics(query map[string]interface{}, grpMetricItems []G
}
line.Data = grpMetricData[dataKey][line.Metric.Label]
}
metricItem.MetricItem.Request = string(queryDSL)
result[metricItem.Key] = metricItem.MetricItem
}
return result
@ -328,7 +331,7 @@ func GetMetricRangeAndBucketSize(minStr string, maxStr string, bucketSize int, m
}
// 获取单个指标,可以包含多条曲线
func (h *APIHandler) getSingleMetrics(metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int) map[string]*common.MetricItem {
func (h *APIHandler) getSingleMetrics(ctx context.Context, metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int) map[string]*common.MetricItem {
metricData := map[string][][]interface{}{}
aggs := map[string]interface{}{}
@ -387,7 +390,8 @@ func (h *APIHandler) getSingleMetrics(metricItems []*common.MetricItem, query ma
"aggs": aggs,
},
}
response, err := elastic.GetClient(clusterID).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
queryDSL := util.MustToJSONBytes(query)
response, err := elastic.GetClient(clusterID).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
if err != nil {
log.Error(err)
panic(err)
@ -449,6 +453,7 @@ func (h *APIHandler) getSingleMetrics(metricItems []*common.MetricItem, query ma
line.TimeRange = common.TimeRange{Min: minDate, Max: maxDate}
line.Data = metricData[line.Metric.GetDataKey()]
}
metricItem.Request = string(queryDSL)
result[metricItem.Key] = metricItem
}

File diff suppressed because it is too large Load Diff

View File

@ -28,6 +28,7 @@
package v1
import (
"context"
"fmt"
log "github.com/cihub/seelog"
httprouter "infini.sh/framework/core/api/router"
@ -410,7 +411,7 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
},
},
}
metrics := h.getMetrics(query, nodeMetricItems, bucketSize)
metrics := h.getMetrics(context.Background(), query, nodeMetricItems, bucketSize)
indexMetrics := map[string]util.MapStr{}
for key, item := range metrics {
for _, line := range item.Lines {
@ -692,7 +693,7 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque
metricItem =newMetricItem("parent_breaker", 8, SystemGroupKey)
metricItem.AddLine("Parent Breaker Tripped","Parent Breaker Tripped","Rate of the circuit breaker has been triggered and prevented an out of memory error.","group1","payload.elasticsearch.node_stats.breakers.parent.tripped","max",bucketSizeStr,"times/s","num","0,0.[00]","0,0.[00]",false,true)
metricItems=append(metricItems,metricItem)
metrics := h.getSingleMetrics(metricItems,query, bucketSize)
metrics := h.getSingleMetrics(context.Background(), metricItems,query, bucketSize)
healthMetric, err := getNodeHealthMetric(query, bucketSize)
if err != nil {
log.Error(err)

View File

@ -1,562 +0,0 @@
// Copyright (C) INFINI Labs & INFINI LIMITED.
//
// The INFINI Console is offered under the GNU Affero General Public License v3.0
// and as commercial software.
//
// For commercial licensing, contact us at:
// - Website: infinilabs.com
// - Email: hello@infini.ltd
//
// Open Source licensed under AGPL V3:
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package v1
import (
"fmt"
log "github.com/cihub/seelog"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/global"
"infini.sh/framework/core/util"
"infini.sh/framework/modules/elastic/common"
"strings"
)
const (
ThreadPoolGetGroupKey = "thread_pool_get"
ThreadPoolSearchGroupKey = "thread_pool_search"
ThreadPoolFlushGroupKey = "thread_pool_flush"
ThreadPoolRefreshGroupKey = "thread_pool_refresh"
ThreadPoolWriteGroupKey = "thread_pool_write"
ThreadPoolForceMergeGroupKey = "thread_pool_force_merge"
ThreadPoolIndexGroupKey = "thread_pool_index"
ThreadPoolBulkGroupKey = "thread_pool_bulk"
)
func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min, max int64, nodeName string, top int) map[string]*common.MetricItem{
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
var must = []util.MapStr{
{
"term":util.MapStr{
"metadata.labels.cluster_id":util.MapStr{
"value": clusterID,
},
},
},
{
"term": util.MapStr{
"metadata.category": util.MapStr{
"value": "elasticsearch",
},
},
},
{
"term": util.MapStr{
"metadata.name": util.MapStr{
"value": "node_stats",
},
},
},
}
var (
nodeNames []string
err error
)
if nodeName != "" {
nodeNames = strings.Split(nodeName, ",")
top = len(nodeNames)
}else{
nodeNames, err = h.getTopNodeName(clusterID, top, 15)
if err != nil {
log.Error(err)
}
}
if len(nodeNames) > 0 {
must = append(must, util.MapStr{
"terms": util.MapStr{
"metadata.labels.transport_address": nodeNames,
},
})
}
query:=map[string]interface{}{}
query["query"]=util.MapStr{
"bool": util.MapStr{
"must": must,
"filter": []util.MapStr{
{
"range": util.MapStr{
"timestamp": util.MapStr{
"gte": min,
"lte": max,
},
},
},
},
},
}
searchThreadsMetric := newMetricItem("search_threads", 1, ThreadPoolSearchGroupKey)
searchThreadsMetric.AddAxi("Search Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems := []GroupMetricItem{
{
Key: "search_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.search.threads",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: searchThreadsMetric,
FormatType: "num",
Units: "",
},
}
searchQueueMetric := newMetricItem("search_queue", 1, ThreadPoolSearchGroupKey)
searchQueueMetric.AddAxi("Search Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "search_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.search.queue",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: searchQueueMetric,
FormatType: "num",
Units: "",
})
searchActiveMetric := newMetricItem("search_active", 1, ThreadPoolSearchGroupKey)
searchActiveMetric.AddAxi("Search Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "search_active",
Field: "payload.elasticsearch.node_stats.thread_pool.search.active",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: searchActiveMetric,
FormatType: "num",
Units: "",
})
searchRejectedMetric := newMetricItem("search_rejected", 1, ThreadPoolSearchGroupKey)
searchRejectedMetric.AddAxi("Search Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "search_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.search.rejected",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: searchRejectedMetric,
FormatType: "num",
Units: "rejected/s",
})
getThreadsMetric := newMetricItem("get_threads", 1, ThreadPoolGetGroupKey)
getThreadsMetric.AddAxi("Get Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "get_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.get.threads",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: getThreadsMetric,
FormatType: "num",
Units: "",
})
getQueueMetric := newMetricItem("get_queue", 1, ThreadPoolGetGroupKey)
getQueueMetric.AddAxi("Get Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "get_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.get.queue",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: getQueueMetric,
FormatType: "num",
Units: "",
})
getActiveMetric := newMetricItem("get_active", 1, ThreadPoolGetGroupKey)
getActiveMetric.AddAxi("Get Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "get_active",
Field: "payload.elasticsearch.node_stats.thread_pool.get.active",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: getActiveMetric,
FormatType: "num",
Units: "",
})
getRejectedMetric := newMetricItem("get_rejected", 1, ThreadPoolGetGroupKey)
getRejectedMetric.AddAxi("Get Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "get_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.get.rejected",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: getRejectedMetric,
FormatType: "num",
Units: "rejected/s",
})
flushThreadsMetric := newMetricItem("flush_threads", 1, ThreadPoolFlushGroupKey)
flushThreadsMetric.AddAxi("Flush Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "flush_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.flush.threads",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: flushThreadsMetric,
FormatType: "num",
Units: "",
})
flushQueueMetric := newMetricItem("flush_queue", 1, ThreadPoolFlushGroupKey)
flushQueueMetric.AddAxi("Get Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "flush_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.flush.queue",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: flushQueueMetric,
FormatType: "num",
Units: "",
})
flushActiveMetric := newMetricItem("flush_active", 1, ThreadPoolFlushGroupKey)
flushActiveMetric.AddAxi("Flush Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "flush_active",
Field: "payload.elasticsearch.node_stats.thread_pool.flush.active",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: flushActiveMetric,
FormatType: "num",
Units: "",
})
flushRejectedMetric := newMetricItem("flush_rejected", 1, ThreadPoolFlushGroupKey)
flushRejectedMetric.AddAxi("Flush Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "flush_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.flush.rejected",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: flushRejectedMetric,
FormatType: "num",
Units: "rejected/s",
})
majorVersion := elastic.GetMetadata(clusterID).GetMajorVersion()
ver := elastic.GetClient(clusterID).GetVersion()
if (ver.Distribution == "" || ver.Distribution == elastic.Elasticsearch) && majorVersion < 6{
indexThreadsMetric := newMetricItem("index_threads", 1, ThreadPoolIndexGroupKey)
indexThreadsMetric.AddAxi("Index Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "index_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.index.threads",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: indexThreadsMetric,
FormatType: "num",
Units: "",
})
indexQueueMetric := newMetricItem("index_queue", 1, ThreadPoolIndexGroupKey)
indexQueueMetric.AddAxi("Index Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "index_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.index.queue",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: indexQueueMetric,
FormatType: "num",
Units: "",
})
indexActiveMetric := newMetricItem("index_active", 1, ThreadPoolIndexGroupKey)
indexActiveMetric.AddAxi("Index Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "index_active",
Field: "payload.elasticsearch.node_stats.thread_pool.index.active",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: indexActiveMetric,
FormatType: "num",
Units: "",
})
indexRejectedMetric := newMetricItem("index_rejected", 1, ThreadPoolIndexGroupKey)
indexRejectedMetric.AddAxi("Index Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "index_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.index.rejected",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: indexRejectedMetric,
FormatType: "num",
Units: "rejected/s",
})
bulkThreadsMetric := newMetricItem("bulk_threads", 1, ThreadPoolBulkGroupKey)
bulkThreadsMetric.AddAxi("Bulk Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "bulk_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.bulk.threads",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: bulkThreadsMetric,
FormatType: "num",
Units: "",
})
bulkQueueMetric := newMetricItem("bulk_queue", 1, ThreadPoolBulkGroupKey)
bulkQueueMetric.AddAxi("Bulk Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "bulk_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.bulk.queue",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: bulkQueueMetric,
FormatType: "num",
Units: "",
})
bulkActiveMetric := newMetricItem("bulk_active", 1, ThreadPoolBulkGroupKey)
bulkActiveMetric.AddAxi("Bulk Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "bulk_active",
Field: "payload.elasticsearch.node_stats.thread_pool.bulk.active",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: bulkActiveMetric,
FormatType: "num",
Units: "",
})
bulkRejectedMetric := newMetricItem("bulk_rejected", 1, ThreadPoolBulkGroupKey)
bulkRejectedMetric.AddAxi("Bulk Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "bulk_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.bulk.rejected",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: bulkRejectedMetric,
FormatType: "num",
Units: "rejected/s",
})
}else {
writeThreadsMetric := newMetricItem("write_threads", 1, ThreadPoolWriteGroupKey)
writeThreadsMetric.AddAxi("Write Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "write_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.write.threads",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: writeThreadsMetric,
FormatType: "num",
Units: "",
})
writeQueueMetric := newMetricItem("write_queue", 1, ThreadPoolWriteGroupKey)
writeQueueMetric.AddAxi("Write Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "write_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.write.queue",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: writeQueueMetric,
FormatType: "num",
Units: "",
})
writeActiveMetric := newMetricItem("write_active", 1, ThreadPoolWriteGroupKey)
writeActiveMetric.AddAxi("Write Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "write_active",
Field: "payload.elasticsearch.node_stats.thread_pool.write.active",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: writeActiveMetric,
FormatType: "num",
Units: "",
})
writeRejectedMetric := newMetricItem("write_rejected", 1, ThreadPoolWriteGroupKey)
writeRejectedMetric.AddAxi("Write Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "write_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.write.rejected",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: writeRejectedMetric,
FormatType: "num",
Units: "rejected/s",
})
}
refreshThreadsMetric := newMetricItem("refresh_threads", 1, ThreadPoolRefreshGroupKey)
refreshThreadsMetric.AddAxi("Refresh Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "refresh_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.refresh.threads",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: refreshThreadsMetric,
FormatType: "num",
Units: "",
})
refreshQueueMetric := newMetricItem("refresh_queue", 1, ThreadPoolRefreshGroupKey)
refreshQueueMetric.AddAxi("Refresh Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "refresh_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.refresh.queue",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: refreshQueueMetric,
FormatType: "num",
Units: "",
})
refreshActiveMetric := newMetricItem("refresh_active", 1, ThreadPoolRefreshGroupKey)
refreshActiveMetric.AddAxi("Refresh Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "refresh_active",
Field: "payload.elasticsearch.node_stats.thread_pool.refresh.active",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: refreshActiveMetric,
FormatType: "num",
Units: "",
})
refreshRejectedMetric := newMetricItem("refresh_rejected", 1, ThreadPoolRefreshGroupKey)
refreshRejectedMetric.AddAxi("Refresh Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "refresh_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.refresh.rejected",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: refreshRejectedMetric,
FormatType: "num",
Units: "rejected/s",
})
forceMergeThreadsMetric := newMetricItem("force_merge_threads", 1, ThreadPoolForceMergeGroupKey)
forceMergeThreadsMetric.AddAxi("Force Merge Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "force_merge_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.threads",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: forceMergeThreadsMetric,
FormatType: "num",
Units: "",
})
forceMergeQueueMetric := newMetricItem("force_merge_queue", 1, ThreadPoolForceMergeGroupKey)
forceMergeQueueMetric.AddAxi("Force Merge Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "force_merge_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.queue",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: forceMergeQueueMetric,
FormatType: "num",
Units: "",
})
forceMergeActiveMetric := newMetricItem("force_merge_active", 1, ThreadPoolForceMergeGroupKey)
forceMergeActiveMetric.AddAxi("Force Merge Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "force_merge_active",
Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.active",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: forceMergeActiveMetric,
FormatType: "num",
Units: "",
})
forceMergeRejectedMetric := newMetricItem("force_merge_rejected", 1, ThreadPoolForceMergeGroupKey)
forceMergeRejectedMetric.AddAxi("Force Merge Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "force_merge_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.rejected",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: forceMergeRejectedMetric,
FormatType: "num",
Units: "rejected/s",
})
//Get Thread Pool queue
aggs:=map[string]interface{}{}
for _,metricItem:=range queueMetricItems{
aggs[metricItem.ID]=util.MapStr{
"max":util.MapStr{
"field": metricItem.Field,
},
}
if metricItem.Field2 != "" {
aggs[metricItem.ID + "_field2"]=util.MapStr{
"max":util.MapStr{
"field": metricItem.Field2,
},
}
}
if metricItem.IsDerivative{
aggs[metricItem.ID+"_deriv"]=util.MapStr{
"derivative":util.MapStr{
"buckets_path": metricItem.ID,
},
}
if metricItem.Field2 != "" {
aggs[metricItem.ID + "_field2_deriv"]=util.MapStr{
"derivative":util.MapStr{
"buckets_path": metricItem.ID + "_field2",
},
}
}
}
}
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
if err != nil {
log.Error(err)
panic(err)
}
query["size"]=0
query["aggs"]= util.MapStr{
"group_by_level": util.MapStr{
"terms": util.MapStr{
"field": "metadata.labels.transport_address",
"size": top,
},
"aggs": util.MapStr{
"dates": util.MapStr{
"date_histogram":util.MapStr{
"field": "timestamp",
intervalField: bucketSizeStr,
},
"aggs":aggs,
},
},
},
}
return h.getMetrics(query, queueMetricItems, bucketSize)
}