diff --git a/modules/elastic/api/cluster_overview.go b/modules/elastic/api/cluster_overview.go
index aa14183c..be1330c1 100644
--- a/modules/elastic/api/cluster_overview.go
+++ b/modules/elastic/api/cluster_overview.go
@@ -24,6 +24,7 @@
package api
import (
+ "context"
"fmt"
"infini.sh/framework/modules/elastic/adapter"
"net/http"
@@ -254,7 +255,7 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
},
},
}
- indexMetrics := h.getMetrics(query, indexMetricItems, bucketSize)
+ indexMetrics := h.getMetrics(context.Background(), query, indexMetricItems, bucketSize)
indexingMetricData := util.MapStr{}
for _, line := range indexMetrics["cluster_indexing"].Lines {
// remove first metric dot
diff --git a/modules/elastic/api/host.go b/modules/elastic/api/host.go
index 959ec561..dfd8a40b 100644
--- a/modules/elastic/api/host.go
+++ b/modules/elastic/api/host.go
@@ -28,6 +28,7 @@
package api
import (
+ "context"
"fmt"
log "github.com/cihub/seelog"
httprouter "infini.sh/framework/core/api/router"
@@ -604,10 +605,10 @@ func (h *APIHandler) getSingleHostMetric(agentID string, min, max int64, bucketS
},
},
}
- return h.getSingleMetrics(metricItems, query, bucketSize)
+ return h.getSingleMetrics(context.Background(), metricItems, query, bucketSize)
}
-func (h *APIHandler) getSingleHostMetricFromNode(nodeID string, min, max int64, bucketSize int) map[string]*common.MetricItem {
+func (h *APIHandler) getSingleHostMetricFromNode(ctx context.Context, nodeID string, min, max int64, bucketSize int) map[string]*common.MetricItem {
var must = []util.MapStr{
{
"term": util.MapStr{
@@ -669,7 +670,7 @@ func (h *APIHandler) getSingleHostMetricFromNode(nodeID string, min, max int64,
return 100 - value*100/value2
}
metricItems = append(metricItems, metricItem)
- return h.getSingleMetrics(metricItems, query, bucketSize)
+ return h.getSingleMetrics(ctx, metricItems, query, bucketSize)
}
func (h *APIHandler) GetSingleHostMetrics(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
@@ -696,7 +697,7 @@ func (h *APIHandler) GetSingleHostMetrics(w http.ResponseWriter, req *http.Reque
return
}
if hostInfo.AgentID == "" {
- resBody["metrics"] = h.getSingleHostMetricFromNode(hostInfo.NodeID, min, max, bucketSize)
+ resBody["metrics"] = h.getSingleHostMetricFromNode(context.Background(), hostInfo.NodeID, min, max, bucketSize)
h.WriteJSON(w, resBody, http.StatusOK)
return
}
@@ -866,7 +867,7 @@ func (h *APIHandler) getGroupHostMetric(agentIDs []string, min, max int64, bucke
},
},
}
- return h.getMetrics(query, hostMetricItems, bucketSize)
+ return h.getMetrics(context.Background(), query, hostMetricItems, bucketSize)
}
func getHost(hostID string) (*host.HostInfo, error) {
diff --git a/modules/elastic/api/index_metrics.go b/modules/elastic/api/index_metrics.go
index 634b035a..2638f2c3 100644
--- a/modules/elastic/api/index_metrics.go
+++ b/modules/elastic/api/index_metrics.go
@@ -24,8 +24,10 @@
package api
import (
+ "context"
"fmt"
log "github.com/cihub/seelog"
+ v1 "infini.sh/console/modules/elastic/api/v1"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/global"
"infini.sh/framework/core/radix"
@@ -38,7 +40,7 @@ import (
"time"
)
-func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucketSize int, min, max int64, indexName string, top int, shardID string) (map[string]*common.MetricItem, error){
+func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clusterID string, bucketSize int, min, max int64, indexName string, top int, shardID string, metricKey string) (map[string]*common.MetricItem, error){
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
clusterUUID, err := adapter.GetClusterUUID(clusterID)
if err != nil {
@@ -141,12 +143,13 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
},
},
}
- //索引存储大小
- indexStorageMetric := newMetricItem("index_storage", 1, StorageGroupKey)
- indexStorageMetric.AddAxi("Index storage","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
-
- indexMetricItems := []GroupMetricItem{
- {
+ indexMetricItems := []GroupMetricItem{}
+ switch metricKey {
+ case v1.IndexStorageMetricKey:
+ //索引存储大小
+ indexStorageMetric := newMetricItem(v1.IndexStorageMetricKey, 1, StorageGroupKey)
+ indexStorageMetric.AddAxi("Index storage","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "index_storage",
Field: "payload.elasticsearch.shard_stats.store.size_in_bytes",
ID: util.GetUUID(),
@@ -154,451 +157,463 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
MetricItem: indexStorageMetric,
FormatType: "bytes",
Units: "",
- },
+ })
+ case v1.SegmentCountMetricKey:
+ // segment 数量
+ segmentCountMetric:=newMetricItem(v1.SegmentCountMetricKey, 15, StorageGroupKey)
+ segmentCountMetric.AddAxi("segment count","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ indexMetricItems=append(indexMetricItems, GroupMetricItem{
+ Key: "segment_count",
+ Field: "payload.elasticsearch.shard_stats.segments.count",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: segmentCountMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case v1.DocCountMetricKey:
+ //索引文档个数
+ docCountMetric := newMetricItem(v1.DocCountMetricKey, 2, DocumentGroupKey)
+ docCountMetric.AddAxi("Doc count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "doc_count",
+ Field: "payload.elasticsearch.shard_stats.docs.count",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: docCountMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case v1.DocsDeletedMetricKey:
+ // docs 删除数量
+ docsDeletedMetric:=newMetricItem(v1.DocsDeletedMetricKey, 17, DocumentGroupKey)
+ docsDeletedMetric.AddAxi("docs deleted","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ indexMetricItems=append(indexMetricItems, GroupMetricItem{
+ Key: "docs_deleted",
+ Field: "payload.elasticsearch.shard_stats.docs.deleted",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: docsDeletedMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case v1.QueryTimesMetricKey:
+ //查询次数
+ queryTimesMetric := newMetricItem("query_times", 2, OperationGroupKey)
+ queryTimesMetric.AddAxi("Query times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "query_times",
+ Field: "payload.elasticsearch.shard_stats.search.query_total",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: queryTimesMetric,
+ FormatType: "num",
+ Units: "requests/s",
+ })
+ case v1.FetchTimesMetricKey:
+ //Fetch次数
+ fetchTimesMetric := newMetricItem(v1.FetchTimesMetricKey, 3, OperationGroupKey)
+ fetchTimesMetric.AddAxi("Fetch times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "fetch_times",
+ Field: "payload.elasticsearch.shard_stats.search.fetch_total",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: fetchTimesMetric,
+ FormatType: "num",
+ Units: "requests/s",
+ })
+ case v1.ScrollTimesMetricKey:
+ //scroll 次数
+ scrollTimesMetric := newMetricItem(v1.ScrollTimesMetricKey, 4, OperationGroupKey)
+ scrollTimesMetric.AddAxi("scroll times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "scroll_times",
+ Field: "payload.elasticsearch.shard_stats.search.scroll_total",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: scrollTimesMetric,
+ FormatType: "num",
+ Units: "requests/s",
+ })
+ case v1.MergeTimesMetricKey:
+ //Merge次数
+ mergeTimesMetric := newMetricItem(v1.MergeTimesMetricKey, 7, OperationGroupKey)
+ mergeTimesMetric.AddAxi("Merge times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "merge_times",
+ Field: "payload.elasticsearch.shard_stats.merges.total",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: mergeTimesMetric,
+ FormatType: "num",
+ Units: "requests/s",
+ })
+ case v1.RefreshTimesMetricKey:
+ //Refresh次数
+ refreshTimesMetric := newMetricItem(v1.RefreshTimesMetricKey, 5, OperationGroupKey)
+ refreshTimesMetric.AddAxi("Refresh times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "refresh_times",
+ Field: "payload.elasticsearch.shard_stats.refresh.total",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: refreshTimesMetric,
+ FormatType: "num",
+ Units: "requests/s",
+ })
+ case v1.FlushTimesMetricKey:
+ //flush 次数
+ flushTimesMetric := newMetricItem(v1.FlushTimesMetricKey, 6, OperationGroupKey)
+ flushTimesMetric.AddAxi("flush times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "flush_times",
+ Field: "payload.elasticsearch.shard_stats.flush.total",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: flushTimesMetric,
+ FormatType: "num",
+ Units: "requests/s",
+ })
+ case v1.IndexingRateMetricKey:
+ //写入速率
+ indexingRateMetric := newMetricItem(v1.IndexingRateMetricKey, 1, OperationGroupKey)
+ if shardID == "" {
+ indexingRateMetric.OnlyPrimary = true
+ }
+ indexingRateMetric.AddAxi("Indexing rate","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "indexing_rate",
+ Field: "payload.elasticsearch.shard_stats.indexing.index_total",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: indexingRateMetric,
+ FormatType: "num",
+ Units: "doc/s",
+ })
+ case v1.IndexingBytesMetricKey:
+ indexingBytesMetric := newMetricItem(v1.IndexingBytesMetricKey, 2, OperationGroupKey)
+ if shardID == "" {
+ indexingBytesMetric.OnlyPrimary = true
+ }
+ indexingBytesMetric.AddAxi("Indexing bytes","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "indexing_bytes",
+ Field: "payload.elasticsearch.shard_stats.store.size_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: indexingBytesMetric,
+ FormatType: "bytes",
+ Units: "bytes/s",
+ })
+ case v1.IndexingLatencyMetricKey:
+ //写入时延
+ indexingLatencyMetric := newMetricItem(v1.IndexingLatencyMetricKey, 1, LatencyGroupKey)
+ if shardID == "" {
+ indexingLatencyMetric.OnlyPrimary = true
+ }
+ indexingLatencyMetric.AddAxi("Indexing latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "indexing_latency",
+ Field: "payload.elasticsearch.shard_stats.indexing.index_time_in_millis",
+ Field2: "payload.elasticsearch.shard_stats.indexing.index_total",
+ Calc: func(value, value2 float64) float64 {
+ return value/value2
+ },
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: indexingLatencyMetric,
+ FormatType: "num",
+ Units: "ms",
+ })
+ case v1.QueryLatencyMetricKey:
+ //查询时延
+ queryLatencyMetric := newMetricItem(v1.QueryLatencyMetricKey, 2, LatencyGroupKey)
+ queryLatencyMetric.AddAxi("Query latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "query_latency",
+ Field: "payload.elasticsearch.shard_stats.search.query_time_in_millis",
+ Field2: "payload.elasticsearch.shard_stats.search.query_total",
+ Calc: func(value, value2 float64) float64 {
+ return value/value2
+ },
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: queryLatencyMetric,
+ FormatType: "num",
+ Units: "ms",
+ })
+ case FetchLatencyMetricKey:
+ //fetch时延
+ fetchLatencyMetric := newMetricItem(v1.FetchLatencyMetricKey, 3, LatencyGroupKey)
+ fetchLatencyMetric.AddAxi("Fetch latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "fetch_latency",
+ Field: "payload.elasticsearch.shard_stats.search.fetch_time_in_millis",
+ Field2: "payload.elasticsearch.shard_stats.search.fetch_total",
+ Calc: func(value, value2 float64) float64 {
+ return value/value2
+ },
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: fetchLatencyMetric,
+ FormatType: "num",
+ Units: "ms",
+ })
+ case v1.MergeLatencyMetricKey:
+ //merge时延
+ mergeLatencyMetric := newMetricItem(v1.MergeLatencyMetricKey, 7, LatencyGroupKey)
+ mergeLatencyMetric.AddAxi("Merge latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "merge_latency",
+ Field: "payload.elasticsearch.shard_stats.merges.total_time_in_millis",
+ Field2: "payload.elasticsearch.shard_stats.merges.total",
+ Calc: func(value, value2 float64) float64 {
+ return value/value2
+ },
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: mergeLatencyMetric,
+ FormatType: "num",
+ Units: "ms",
+ })
+ case RefreshLatencyMetricKey:
+ //refresh时延
+ refreshLatencyMetric := newMetricItem(v1.RefreshLatencyMetricKey, 5, LatencyGroupKey)
+ refreshLatencyMetric.AddAxi("Refresh latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "refresh_latency",
+ Field: "payload.elasticsearch.shard_stats.refresh.total_time_in_millis",
+ Field2: "payload.elasticsearch.shard_stats.refresh.total",
+ Calc: func(value, value2 float64) float64 {
+ return value/value2
+ },
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: refreshLatencyMetric,
+ FormatType: "num",
+ Units: "ms",
+ })
+ case v1.ScrollLatencyMetricKey:
+ //scroll时延
+ scrollLatencyMetric := newMetricItem(v1.ScrollLatencyMetricKey, 4, LatencyGroupKey)
+ scrollLatencyMetric.AddAxi("Scroll Latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "scroll_latency",
+ Field: "payload.elasticsearch.shard_stats.search.scroll_time_in_millis",
+ Field2: "payload.elasticsearch.shard_stats.search.scroll_total",
+ Calc: func(value, value2 float64) float64 {
+ return value/value2
+ },
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: scrollLatencyMetric,
+ FormatType: "num",
+ Units: "ms",
+ })
+ case v1.FlushLatencyMetricKey:
+ //flush 时延
+ flushLatencyMetric := newMetricItem(v1.FlushLatencyMetricKey, 6, LatencyGroupKey)
+ flushLatencyMetric.AddAxi("Flush latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "flush_latency",
+ Field: "payload.elasticsearch.shard_stats.flush.total_time_in_millis",
+ Field2: "payload.elasticsearch.shard_stats.flush.total",
+ Calc: func(value, value2 float64) float64 {
+ return value/value2
+ },
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: flushLatencyMetric,
+ FormatType: "num",
+ Units: "ms",
+ })
+ case v1.QueryCacheMetricKey:
+ //queryCache
+ queryCacheMetric := newMetricItem(v1.QueryCacheMetricKey, 1, CacheGroupKey)
+ queryCacheMetric.AddAxi("Query cache","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "query_cache",
+ Field: "payload.elasticsearch.shard_stats.query_cache.memory_size_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: queryCacheMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case v1.RequestCacheMetricKey:
+ //requestCache
+ requestCacheMetric := newMetricItem(v1.RequestCacheMetricKey, 2, CacheGroupKey)
+ requestCacheMetric.AddAxi("request cache","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "request_cache",
+ Field: "payload.elasticsearch.shard_stats.request_cache.memory_size_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: requestCacheMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case v1.RequestCacheHitMetricKey:
+ // Request Cache Hit
+ requestCacheHitMetric:=newMetricItem(v1.RequestCacheHitMetricKey, 6, CacheGroupKey)
+ requestCacheHitMetric.AddAxi("request cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ indexMetricItems=append(indexMetricItems, GroupMetricItem{
+ Key: "request_cache_hit",
+ Field: "payload.elasticsearch.shard_stats.request_cache.hit_count",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: requestCacheHitMetric,
+ FormatType: "num",
+ Units: "hits",
+ })
+ case v1.RequestCacheMissMetricKey:
+ // Request Cache Miss
+ requestCacheMissMetric:=newMetricItem(v1.RequestCacheMissMetricKey, 8, CacheGroupKey)
+ requestCacheMissMetric.AddAxi("request cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ indexMetricItems=append(indexMetricItems, GroupMetricItem{
+ Key: "request_cache_miss",
+ Field: "payload.elasticsearch.shard_stats.request_cache.miss_count",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: requestCacheMissMetric,
+ FormatType: "num",
+ Units: "misses",
+ })
+ case v1.QueryCacheCountMetricKey:
+ // Query Cache Count
+ queryCacheCountMetric:=newMetricItem(v1.QueryCacheCountMetricKey, 4, CacheGroupKey)
+ queryCacheCountMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ indexMetricItems=append(indexMetricItems, GroupMetricItem{
+ Key: "query_cache_count",
+ Field: "payload.elasticsearch.shard_stats.query_cache.cache_count",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: queryCacheCountMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case v1.QueryCacheHitMetricKey:
+ // Query Cache Miss
+ queryCacheHitMetric:=newMetricItem(v1.QueryCacheHitMetricKey, 5, CacheGroupKey)
+ queryCacheHitMetric.AddAxi("query cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ indexMetricItems=append(indexMetricItems, GroupMetricItem{
+ Key: "query_cache_hit",
+ Field: "payload.elasticsearch.shard_stats.query_cache.hit_count",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: queryCacheHitMetric,
+ FormatType: "num",
+ Units: "hits",
+ })
+ case v1.QueryCacheMissMetricKey:
+ // Query Cache Miss
+ queryCacheMissMetric:=newMetricItem(v1.QueryCacheMissMetricKey, 7, CacheGroupKey)
+ queryCacheMissMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ indexMetricItems=append(indexMetricItems, GroupMetricItem{
+ Key: "query_cache_miss",
+ Field: "payload.elasticsearch.shard_stats.query_cache.miss_count",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: queryCacheMissMetric,
+ FormatType: "num",
+ Units: "misses",
+ })
+ case v1.FielddataCacheMetricKey:
+ // Fielddata内存占用大小
+ fieldDataCacheMetric:=newMetricItem(v1.FielddataCacheMetricKey, 3, CacheGroupKey)
+ fieldDataCacheMetric.AddAxi("FieldData Cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
+ indexMetricItems=append(indexMetricItems, GroupMetricItem{
+ Key: "fielddata_cache",
+ Field: "payload.elasticsearch.shard_stats.fielddata.memory_size_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: fieldDataCacheMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case v1.SegmentMemoryMetricKey:
+ //segment memory
+ segmentMemoryMetric := newMetricItem(v1.SegmentMemoryMetricKey, 13, MemoryGroupKey)
+ segmentMemoryMetric.AddAxi("Segment memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "segment_memory",
+ Field: "payload.elasticsearch.shard_stats.segments.memory_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: segmentMemoryMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case v1.SegmentDocValuesMemoryMetricKey:
+ //segment doc values memory
+ docValuesMemoryMetric := newMetricItem(v1.SegmentDocValuesMemoryMetricKey, 13, MemoryGroupKey)
+ docValuesMemoryMetric.AddAxi("Segment Doc values Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "segment_doc_values_memory",
+ Field: "payload.elasticsearch.shard_stats.segments.doc_values_memory_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: docValuesMemoryMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case v1.SegmentTermsMemoryMetricKey:
+ //segment terms memory
+ termsMemoryMetric := newMetricItem(v1.SegmentTermsMemoryMetricKey, 13, MemoryGroupKey)
+ termsMemoryMetric.AddAxi("Segment Terms Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "segment_terms_memory",
+ Field: "payload.elasticsearch.shard_stats.segments.terms_memory_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: termsMemoryMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case v1.SegmentFieldsMemoryMetricKey:
+ //segment fields memory
+ fieldsMemoryMetric := newMetricItem(v1.SegmentFieldsMemoryMetricKey, 13, MemoryGroupKey)
+ fieldsMemoryMetric.AddAxi("Segment Fields Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "segment_fields_memory",
+ Field: "payload.elasticsearch.index_stats.total.segments.stored_fields_memory_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: fieldsMemoryMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case v1.SegmentIndexWriterMemoryMetricKey:
+ // segment index writer memory
+ segmentIndexWriterMemoryMetric:=newMetricItem(v1.SegmentIndexWriterMemoryMetricKey, 16, MemoryGroupKey)
+ segmentIndexWriterMemoryMetric.AddAxi("segment doc values memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
+ indexMetricItems=append(indexMetricItems, GroupMetricItem{
+ Key: "segment_index_writer_memory",
+ Field: "payload.elasticsearch.shard_stats.segments.index_writer_memory_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: segmentIndexWriterMemoryMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case v1.SegmentTermVectorsMemoryMetricKey:
+ // segment term vectors memory
+ segmentTermVectorsMemoryMetric:=newMetricItem(v1.SegmentTermVectorsMemoryMetricKey, 16, MemoryGroupKey)
+ segmentTermVectorsMemoryMetric.AddAxi("segment term vectors memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
+ indexMetricItems=append(indexMetricItems, GroupMetricItem{
+ Key: "segment_term_vectors_memory",
+ Field: "payload.elasticsearch.shard_stats.segments.term_vectors_memory_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: segmentTermVectorsMemoryMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
}
- // segment 数量
- segmentCountMetric:=newMetricItem("segment_count", 15, StorageGroupKey)
- segmentCountMetric.AddAxi("segment count","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- indexMetricItems=append(indexMetricItems, GroupMetricItem{
- Key: "segment_count",
- Field: "payload.elasticsearch.shard_stats.segments.count",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: segmentCountMetric,
- FormatType: "num",
- Units: "",
- })
- //索引文档个数
- docCountMetric := newMetricItem("doc_count", 2, DocumentGroupKey)
- docCountMetric.AddAxi("Doc count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "doc_count",
- Field: "payload.elasticsearch.shard_stats.docs.count",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: docCountMetric,
- FormatType: "num",
- Units: "",
- })
- // docs 删除数量
- docsDeletedMetric:=newMetricItem("docs_deleted", 17, DocumentGroupKey)
- docsDeletedMetric.AddAxi("docs deleted","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- indexMetricItems=append(indexMetricItems, GroupMetricItem{
- Key: "docs_deleted",
- Field: "payload.elasticsearch.shard_stats.docs.deleted",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: docsDeletedMetric,
- FormatType: "num",
- Units: "",
- })
- //查询次数
- queryTimesMetric := newMetricItem("query_times", 2, OperationGroupKey)
- queryTimesMetric.AddAxi("Query times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "query_times",
- Field: "payload.elasticsearch.shard_stats.search.query_total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: queryTimesMetric,
- FormatType: "num",
- Units: "requests/s",
- })
-
- //Fetch次数
- fetchTimesMetric := newMetricItem("fetch_times", 3, OperationGroupKey)
- fetchTimesMetric.AddAxi("Fetch times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "fetch_times",
- Field: "payload.elasticsearch.shard_stats.search.fetch_total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: fetchTimesMetric,
- FormatType: "num",
- Units: "requests/s",
- })
- //scroll 次数
- scrollTimesMetric := newMetricItem("scroll_times", 4, OperationGroupKey)
- scrollTimesMetric.AddAxi("scroll times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "scroll_times",
- Field: "payload.elasticsearch.shard_stats.search.scroll_total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: scrollTimesMetric,
- FormatType: "num",
- Units: "requests/s",
- })
- //Merge次数
- mergeTimesMetric := newMetricItem("merge_times", 7, OperationGroupKey)
- mergeTimesMetric.AddAxi("Merge times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "merge_times",
- Field: "payload.elasticsearch.shard_stats.merges.total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: mergeTimesMetric,
- FormatType: "num",
- Units: "requests/s",
- })
- //Refresh次数
- refreshTimesMetric := newMetricItem("refresh_times", 5, OperationGroupKey)
- refreshTimesMetric.AddAxi("Refresh times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "refresh_times",
- Field: "payload.elasticsearch.shard_stats.refresh.total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: refreshTimesMetric,
- FormatType: "num",
- Units: "requests/s",
- })
- //flush 次数
- flushTimesMetric := newMetricItem("flush_times", 6, OperationGroupKey)
- flushTimesMetric.AddAxi("flush times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "flush_times",
- Field: "payload.elasticsearch.shard_stats.flush.total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: flushTimesMetric,
- FormatType: "num",
- Units: "requests/s",
- })
-
- //写入速率
- indexingRateMetric := newMetricItem("indexing_rate", 1, OperationGroupKey)
- if shardID == "" {
- indexingRateMetric.OnlyPrimary = true
- }
- indexingRateMetric.AddAxi("Indexing rate","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "indexing_rate",
- Field: "payload.elasticsearch.shard_stats.indexing.index_total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: indexingRateMetric,
- FormatType: "num",
- Units: "doc/s",
- })
- indexingBytesMetric := newMetricItem("indexing_bytes", 2, OperationGroupKey)
- if shardID == "" {
- indexingBytesMetric.OnlyPrimary = true
- }
- indexingBytesMetric.AddAxi("Indexing bytes","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "indexing_bytes",
- Field: "payload.elasticsearch.shard_stats.store.size_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: indexingBytesMetric,
- FormatType: "bytes",
- Units: "bytes/s",
- })
- //写入时延
- indexingLatencyMetric := newMetricItem("indexing_latency", 1, LatencyGroupKey)
- if shardID == "" {
- indexingLatencyMetric.OnlyPrimary = true
- }
- indexingLatencyMetric.AddAxi("Indexing latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "indexing_latency",
- Field: "payload.elasticsearch.shard_stats.indexing.index_time_in_millis",
- Field2: "payload.elasticsearch.shard_stats.indexing.index_total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: indexingLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
-
- //查询时延
- queryLatencyMetric := newMetricItem("query_latency", 2, LatencyGroupKey)
- queryLatencyMetric.AddAxi("Query latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "query_latency",
- Field: "payload.elasticsearch.shard_stats.search.query_time_in_millis",
- Field2: "payload.elasticsearch.shard_stats.search.query_total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: queryLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
- //fetch时延
- fetchLatencyMetric := newMetricItem("fetch_latency", 3, LatencyGroupKey)
- fetchLatencyMetric.AddAxi("Fetch latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "fetch_latency",
- Field: "payload.elasticsearch.shard_stats.search.fetch_time_in_millis",
- Field2: "payload.elasticsearch.shard_stats.search.fetch_total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: fetchLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
-
- //merge时延
- mergeLatencyMetric := newMetricItem("merge_latency", 7, LatencyGroupKey)
- mergeLatencyMetric.AddAxi("Merge latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "merge_latency",
- Field: "payload.elasticsearch.shard_stats.merges.total_time_in_millis",
- Field2: "payload.elasticsearch.shard_stats.merges.total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: mergeLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
- //refresh时延
- refreshLatencyMetric := newMetricItem("refresh_latency", 5, LatencyGroupKey)
- refreshLatencyMetric.AddAxi("Refresh latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "refresh_latency",
- Field: "payload.elasticsearch.shard_stats.refresh.total_time_in_millis",
- Field2: "payload.elasticsearch.shard_stats.refresh.total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: refreshLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
- //scroll时延
- scrollLatencyMetric := newMetricItem("scroll_latency", 4, LatencyGroupKey)
- scrollLatencyMetric.AddAxi("Scroll Latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "scroll_latency",
- Field: "payload.elasticsearch.shard_stats.search.scroll_time_in_millis",
- Field2: "payload.elasticsearch.shard_stats.search.scroll_total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: scrollLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
- //flush 时延
- flushLatencyMetric := newMetricItem("flush_latency", 6, LatencyGroupKey)
- flushLatencyMetric.AddAxi("Flush latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "flush_latency",
- Field: "payload.elasticsearch.shard_stats.flush.total_time_in_millis",
- Field2: "payload.elasticsearch.shard_stats.flush.total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: flushLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
- //queryCache
- queryCacheMetric := newMetricItem("query_cache", 1, CacheGroupKey)
- queryCacheMetric.AddAxi("Query cache","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "query_cache",
- Field: "payload.elasticsearch.shard_stats.query_cache.memory_size_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: queryCacheMetric,
- FormatType: "bytes",
- Units: "",
- })
- //requestCache
- requestCacheMetric := newMetricItem("request_cache", 2, CacheGroupKey)
- requestCacheMetric.AddAxi("request cache","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "request_cache",
- Field: "payload.elasticsearch.shard_stats.request_cache.memory_size_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: requestCacheMetric,
- FormatType: "bytes",
- Units: "",
- })
- // Request Cache Hit
- requestCacheHitMetric:=newMetricItem("request_cache_hit", 6, CacheGroupKey)
- requestCacheHitMetric.AddAxi("request cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- indexMetricItems=append(indexMetricItems, GroupMetricItem{
- Key: "request_cache_hit",
- Field: "payload.elasticsearch.shard_stats.request_cache.hit_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: requestCacheHitMetric,
- FormatType: "num",
- Units: "hits",
- })
- // Request Cache Miss
- requestCacheMissMetric:=newMetricItem("request_cache_miss", 8, CacheGroupKey)
- requestCacheMissMetric.AddAxi("request cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- indexMetricItems=append(indexMetricItems, GroupMetricItem{
- Key: "request_cache_miss",
- Field: "payload.elasticsearch.shard_stats.request_cache.miss_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: requestCacheMissMetric,
- FormatType: "num",
- Units: "misses",
- })
- // Query Cache Count
- queryCacheCountMetric:=newMetricItem("query_cache_count", 4, CacheGroupKey)
- queryCacheCountMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- indexMetricItems=append(indexMetricItems, GroupMetricItem{
- Key: "query_cache_count",
- Field: "payload.elasticsearch.shard_stats.query_cache.cache_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: queryCacheCountMetric,
- FormatType: "num",
- Units: "",
- })
- // Query Cache Miss
- queryCacheHitMetric:=newMetricItem("query_cache_hit", 5, CacheGroupKey)
- queryCacheHitMetric.AddAxi("query cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- indexMetricItems=append(indexMetricItems, GroupMetricItem{
- Key: "query_cache_hit",
- Field: "payload.elasticsearch.shard_stats.query_cache.hit_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: queryCacheHitMetric,
- FormatType: "num",
- Units: "hits",
- })
-
- //// Query Cache evictions
- //queryCacheEvictionsMetric:=newMetricItem("query_cache_evictions", 11, CacheGroupKey)
- //queryCacheEvictionsMetric.AddAxi("query cache evictions","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- //indexMetricItems=append(indexMetricItems, GroupMetricItem{
- // Key: "query_cache_evictions",
- // Field: "payload.elasticsearch.index_stats.total.query_cache.evictions",
- // ID: util.GetUUID(),
- // IsDerivative: true,
- // MetricItem: queryCacheEvictionsMetric,
- // FormatType: "num",
- // Units: "evictions",
- //})
-
- // Query Cache Miss
- queryCacheMissMetric:=newMetricItem("query_cache_miss", 7, CacheGroupKey)
- queryCacheMissMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- indexMetricItems=append(indexMetricItems, GroupMetricItem{
- Key: "query_cache_miss",
- Field: "payload.elasticsearch.shard_stats.query_cache.miss_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: queryCacheMissMetric,
- FormatType: "num",
- Units: "misses",
- })
- // Fielddata内存占用大小
- fieldDataCacheMetric:=newMetricItem("fielddata_cache", 3, CacheGroupKey)
- fieldDataCacheMetric.AddAxi("FieldData Cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- indexMetricItems=append(indexMetricItems, GroupMetricItem{
- Key: "fielddata_cache",
- Field: "payload.elasticsearch.shard_stats.fielddata.memory_size_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: fieldDataCacheMetric,
- FormatType: "bytes",
- Units: "",
- })
- //segment memory
- segmentMemoryMetric := newMetricItem("segment_memory", 13, MemoryGroupKey)
- segmentMemoryMetric.AddAxi("Segment memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "segment_memory",
- Field: "payload.elasticsearch.shard_stats.segments.memory_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: segmentMemoryMetric,
- FormatType: "bytes",
- Units: "",
- })
-
- //segment doc values memory
- docValuesMemoryMetric := newMetricItem("segment_doc_values_memory", 13, MemoryGroupKey)
- docValuesMemoryMetric.AddAxi("Segment Doc values Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "segment_doc_values_memory",
- Field: "payload.elasticsearch.shard_stats.segments.doc_values_memory_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: docValuesMemoryMetric,
- FormatType: "bytes",
- Units: "",
- })
-
- //segment terms memory
- termsMemoryMetric := newMetricItem("segment_terms_memory", 13, MemoryGroupKey)
- termsMemoryMetric.AddAxi("Segment Terms Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "segment_terms_memory",
- Field: "payload.elasticsearch.shard_stats.segments.terms_memory_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: termsMemoryMetric,
- FormatType: "bytes",
- Units: "",
- })
-
- //segment fields memory
- fieldsMemoryMetric := newMetricItem("segment_fields_memory", 13, MemoryGroupKey)
- fieldsMemoryMetric.AddAxi("Segment Fields Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "segment_fields_memory",
- Field: "payload.elasticsearch.index_stats.total.segments.stored_fields_memory_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: fieldsMemoryMetric,
- FormatType: "bytes",
- Units: "",
- })
- // segment index writer memory
- segmentIndexWriterMemoryMetric:=newMetricItem("segment_index_writer_memory", 16, MemoryGroupKey)
- segmentIndexWriterMemoryMetric.AddAxi("segment doc values memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- indexMetricItems=append(indexMetricItems, GroupMetricItem{
- Key: "segment_index_writer_memory",
- Field: "payload.elasticsearch.shard_stats.segments.index_writer_memory_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: segmentIndexWriterMemoryMetric,
- FormatType: "bytes",
- Units: "",
- })
- // segment term vectors memory
- segmentTermVectorsMemoryMetric:=newMetricItem("segment_term_vectors_memory", 16, MemoryGroupKey)
- segmentTermVectorsMemoryMetric.AddAxi("segment term vectors memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- indexMetricItems=append(indexMetricItems, GroupMetricItem{
- Key: "segment_term_vectors_memory",
- Field: "payload.elasticsearch.shard_stats.segments.term_vectors_memory_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: segmentTermVectorsMemoryMetric,
- FormatType: "bytes",
- Units: "",
- })
aggs:=map[string]interface{}{}
sumAggs := util.MapStr{}
@@ -727,7 +742,7 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
},
},
}
- return h.getMetrics(query, indexMetricItems, bucketSize), nil
+ return h.getMetrics(ctx, query, indexMetricItems, bucketSize), nil
}
diff --git a/modules/elastic/api/index_overview.go b/modules/elastic/api/index_overview.go
index 816153d8..7a822462 100644
--- a/modules/elastic/api/index_overview.go
+++ b/modules/elastic/api/index_overview.go
@@ -28,6 +28,7 @@
package api
import (
+ "context"
"fmt"
log "github.com/cihub/seelog"
httprouter "infini.sh/framework/core/api/router"
@@ -40,6 +41,7 @@ import (
"infini.sh/framework/modules/elastic/common"
"net/http"
"strings"
+ "time"
)
func (h *APIHandler) SearchIndexMetadata(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
@@ -503,7 +505,7 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
},
},
}
- metrics := h.getMetrics(query, nodeMetricItems, bucketSize)
+ metrics := h.getMetrics(context.Background(), query, nodeMetricItems, bucketSize)
indexMetrics := map[string]util.MapStr{}
for key, item := range metrics {
for _, line := range item.Lines {
@@ -851,6 +853,16 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ
if bucketSize <= 60 {
min = min - int64(2 * bucketSize * 1000)
}
+ metricKey := h.GetParameter(req, "key")
+ timeout := h.GetParameterOrDefault(req, "timeout", "60s")
+ du, err := time.ParseDuration(timeout)
+ if err != nil {
+ log.Error(err)
+ h.WriteError(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), du)
+ defer cancel()
query := map[string]interface{}{}
query["query"] = util.MapStr{
"bool": util.MapStr{
@@ -870,76 +882,87 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
metricItems := []*common.MetricItem{}
- metricItem:=newMetricItem("index_throughput", 1, OperationGroupKey)
- metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- if shardID == "" {
- metricItem.AddLine("Indexing Rate","Primary Indexing","Number of documents being indexed for node.","group1","payload.elasticsearch.shard_stats.indexing.index_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true)
- metricItem.AddLine("Deleting Rate","Primary Deleting","Number of documents being deleted for node.","group1","payload.elasticsearch.shard_stats.indexing.delete_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true)
- metricItem.Lines[0].Metric.OnlyPrimary = true
- metricItem.Lines[1].Metric.OnlyPrimary = true
- }else{
- metricItem.AddLine("Indexing Rate","Indexing Rate","Number of documents being indexed for node.","group1","payload.elasticsearch.shard_stats.indexing.index_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true)
- metricItem.AddLine("Deleting Rate","Deleting Rate","Number of documents being deleted for node.","group1","payload.elasticsearch.shard_stats.indexing.delete_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true)
- }
- metricItems=append(metricItems,metricItem)
- metricItem=newMetricItem("search_throughput", 2, OperationGroupKey)
- metricItem.AddAxi("searching","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,false)
- metricItem.AddLine("Search Rate","Search Rate",
- "Number of search requests being executed.",
- "group1","payload.elasticsearch.shard_stats.search.query_total","max",bucketSizeStr,"query/s","num","0,0.[00]","0,0.[00]",false,true)
- metricItems=append(metricItems,metricItem)
+ metrics := map[string]*common.MetricItem{}
+ if metricKey == ShardStateMetricKey {
+ shardStateMetric, err := h.getIndexShardsMetric(ctx, clusterID, indexName, min, max, bucketSize)
+ if err != nil {
+ log.Error(err)
+ h.WriteError(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ metrics["shard_state"] = shardStateMetric
+ }else {
+ switch metricKey {
+ case IndexThroughputMetricKey:
+ metricItem := newMetricItem("index_throughput", 1, OperationGroupKey)
+ metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
+ if shardID == "" {
+ metricItem.AddLine("Indexing Rate", "Primary Indexing", "Number of documents being indexed for node.", "group1", "payload.elasticsearch.shard_stats.indexing.index_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItem.AddLine("Deleting Rate", "Primary Deleting", "Number of documents being deleted for node.", "group1", "payload.elasticsearch.shard_stats.indexing.delete_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItem.Lines[0].Metric.OnlyPrimary = true
+ metricItem.Lines[1].Metric.OnlyPrimary = true
+ } else {
+ metricItem.AddLine("Indexing Rate", "Indexing Rate", "Number of documents being indexed for node.", "group1", "payload.elasticsearch.shard_stats.indexing.index_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItem.AddLine("Deleting Rate", "Deleting Rate", "Number of documents being deleted for node.", "group1", "payload.elasticsearch.shard_stats.indexing.delete_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
+ }
+ metricItems = append(metricItems, metricItem)
+ case SearchThroughputMetricKey:
+ metricItem := newMetricItem("search_throughput", 2, OperationGroupKey)
+ metricItem.AddAxi("searching", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
+ metricItem.AddLine("Search Rate", "Search Rate",
+ "Number of search requests being executed.",
+ "group1", "payload.elasticsearch.shard_stats.search.query_total", "max", bucketSizeStr, "query/s", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItems = append(metricItems, metricItem)
+ case IndexLatencyMetricKey:
+ metricItem := newMetricItem("index_latency", 3, LatencyGroupKey)
+ metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
+ if shardID == "" { //index level
+ metricItem.AddLine("Indexing Latency", "Primary Indexing Latency", "Average latency for indexing documents.", "group1", "payload.elasticsearch.shard_stats.indexing.index_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItem.AddLine("Deleting Latency", "Primary Deleting Latency", "Average latency for delete documents.", "group1", "payload.elasticsearch.shard_stats.indexing.delete_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItem.Lines[0].Metric.OnlyPrimary = true
+ metricItem.Lines[1].Metric.OnlyPrimary = true
+ } else { // shard level
+ metricItem.AddLine("Indexing Latency", "Indexing Latency", "Average latency for indexing documents.", "group1", "payload.elasticsearch.shard_stats.indexing.index_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItem.AddLine("Deleting Latency", "Deleting Latency", "Average latency for delete documents.", "group1", "payload.elasticsearch.shard_stats.indexing.delete_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
+ }
+ metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.shard_stats.indexing.index_total"
+ metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
+ return value / value2
+ }
+ metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.shard_stats.indexing.delete_total"
+ metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
+ return value / value2
+ }
+ metricItems = append(metricItems, metricItem)
+ case SearchLatencyMetricKey:
+ metricItem := newMetricItem("search_latency", 4, LatencyGroupKey)
+ metricItem.AddAxi("searching", "group2", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
- metricItem=newMetricItem("index_latency", 3, LatencyGroupKey)
- metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- if shardID == "" { //index level
- metricItem.AddLine("Indexing Latency","Primary Indexing Latency","Average latency for indexing documents.","group1","payload.elasticsearch.shard_stats.indexing.index_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
- metricItem.AddLine("Deleting Latency","Primary Deleting Latency","Average latency for delete documents.","group1","payload.elasticsearch.shard_stats.indexing.delete_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
- metricItem.Lines[0].Metric.OnlyPrimary = true
- metricItem.Lines[1].Metric.OnlyPrimary = true
- }else{ // shard level
- metricItem.AddLine("Indexing Latency","Indexing Latency","Average latency for indexing documents.","group1","payload.elasticsearch.shard_stats.indexing.index_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
- metricItem.AddLine("Deleting Latency","Deleting Latency","Average latency for delete documents.","group1","payload.elasticsearch.shard_stats.indexing.delete_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
+ metricItem.AddLine("Searching", "Query Latency", "Average latency for searching query.", "group2", "payload.elasticsearch.shard_stats.search.query_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.shard_stats.search.query_total"
+ metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
+ return value / value2
+ }
+ metricItem.AddLine("Searching", "Fetch Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.shard_stats.search.fetch_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.shard_stats.search.fetch_total"
+ metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
+ return value / value2
+ }
+ metricItem.AddLine("Searching", "Scroll Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.shard_stats.search.scroll_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItem.Lines[2].Metric.Field2 = "payload.elasticsearch.shard_stats.search.scroll_total"
+ metricItem.Lines[2].Metric.Calc = func(value, value2 float64) float64 {
+ return value / value2
+ }
+ metricItems = append(metricItems, metricItem)
+ }
+ metrics = h.getSingleIndexMetrics(context.Background(), metricItems, query, bucketSize)
}
- metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.shard_stats.indexing.index_total"
- metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
- return value/value2
- }
- metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.shard_stats.indexing.delete_total"
- metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
- return value/value2
- }
- metricItems=append(metricItems,metricItem)
- metricItem=newMetricItem("search_latency", 4, LatencyGroupKey)
- metricItem.AddAxi("searching","group2",common.PositionLeft,"num","0,0","0,0.[00]",5,false)
-
- metricItem.AddLine("Searching","Query Latency","Average latency for searching query.","group2","payload.elasticsearch.shard_stats.search.query_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
- metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.shard_stats.search.query_total"
- metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
- return value/value2
- }
- metricItem.AddLine("Searching","Fetch Latency","Average latency for searching fetch.","group2","payload.elasticsearch.shard_stats.search.fetch_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
- metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.shard_stats.search.fetch_total"
- metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
- return value/value2
- }
- metricItem.AddLine("Searching","Scroll Latency","Average latency for searching fetch.","group2","payload.elasticsearch.shard_stats.search.scroll_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
- metricItem.Lines[2].Metric.Field2 = "payload.elasticsearch.shard_stats.search.scroll_total"
- metricItem.Lines[2].Metric.Calc = func(value, value2 float64) float64 {
- return value/value2
- }
- metricItems=append(metricItems,metricItem)
- metrics := h.getSingleIndexMetrics(metricItems,query, bucketSize)
- shardStateMetric, err := h.getIndexShardsMetric(clusterID, indexName, min, max, bucketSize)
- if err != nil {
- log.Error(err)
- }
- metrics["shard_state"] = shardStateMetric
resBody["metrics"] = metrics
h.WriteJSON(w, resBody, http.StatusOK)
}
-func (h *APIHandler) getIndexShardsMetric(id, indexName string, min, max int64, bucketSize int)(*common.MetricItem, error){
+func (h *APIHandler) getIndexShardsMetric(ctx context.Context, id, indexName string, min, max int64, bucketSize int)(*common.MetricItem, error){
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
if err != nil {
@@ -1007,7 +1030,8 @@ func (h *APIHandler) getIndexShardsMetric(id, indexName string, min, max int64,
},
},
}
- response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
+ queryDSL := util.MustToJSONBytes(query)
+ response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
if err != nil {
log.Error(err)
return nil, err
@@ -1025,6 +1049,7 @@ func (h *APIHandler) getIndexShardsMetric(id, indexName string, min, max int64,
}
metricItem.Lines[0].Data = metricData
metricItem.Lines[0].Type = common.GraphTypeBar
+ metricItem.Request = string(queryDSL)
return metricItem, nil
}
diff --git a/modules/elastic/api/manage.go b/modules/elastic/api/manage.go
index 2aaa6f13..7e2ead62 100644
--- a/modules/elastic/api/manage.go
+++ b/modules/elastic/api/manage.go
@@ -532,6 +532,7 @@ func (h *APIHandler) HandleClusterMetricsAction(w http.ResponseWriter, req *http
h.APIHandler.HandleClusterMetricsAction(w, req, ps)
return
}
+ key := h.GetParameter(req, "key")
bucketSize, min, max, err := h.getMetricRangeAndBucketSize(req, 10, 90)
if err != nil {
@@ -546,18 +547,23 @@ func (h *APIHandler) HandleClusterMetricsAction(w http.ResponseWriter, req *http
}
}
- //fmt.Println(min," vs ",max,",",rangeFrom,rangeTo,"range hours:",hours)
-
- //metrics:=h.GetClusterMetrics(id,bucketSize,min,max)
- isOverview := h.GetIntOrDefault(req, "overview", 0)
var metrics interface{}
if bucketSize <= 60 {
min = min - int64(2*bucketSize*1000)
}
- if isOverview == 1 {
- metrics = h.GetClusterIndexMetrics(id, bucketSize, min, max)
- } else {
- metrics = h.GetClusterMetrics(id, bucketSize, min, max)
+ timeout := h.GetParameterOrDefault(req, "timeout", "60s")
+ du, err := time.ParseDuration(timeout)
+ if err != nil {
+ log.Error(err)
+ h.WriteError(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), du)
+ defer cancel()
+ if util.StringInArray([]string{IndexThroughputMetricKey, SearchThroughputMetricKey, IndexLatencyMetricKey, SearchLatencyMetricKey}, key) {
+ metrics = h.GetClusterIndexMetrics(ctx, id, bucketSize, min, max, key)
+ }else{
+ metrics = h.GetClusterMetrics(ctx, id, bucketSize, min, max, key)
}
resBody["metrics"] = metrics
@@ -584,7 +590,17 @@ func (h *APIHandler) HandleNodeMetricsAction(w http.ResponseWriter, req *http.Re
if bucketSize <= 60 {
min = min - int64(2*bucketSize*1000)
}
- resBody["metrics"], err = h.getNodeMetrics(id, bucketSize, min, max, nodeName, top)
+ key := h.GetParameter(req, "key")
+ timeout := h.GetParameterOrDefault(req, "timeout", "60s")
+ du, err := time.ParseDuration(timeout)
+ if err != nil {
+ log.Error(err)
+ h.WriteError(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), du)
+ defer cancel()
+ resBody["metrics"], err = h.getNodeMetrics(ctx, id, bucketSize, min, max, nodeName, top, key)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
@@ -627,57 +643,87 @@ func (h *APIHandler) HandleIndexMetricsAction(w http.ResponseWriter, req *http.R
if bucketSize <= 60 {
min = min - int64(2*bucketSize*1000)
}
- metrics, err := h.getIndexMetrics(req, id, bucketSize, min, max, indexName, top, shardID)
+ key := h.GetParameter(req, "key")
+ timeout := h.GetParameterOrDefault(req, "timeout", "60s")
+ du, err := time.ParseDuration(timeout)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
-
- if metrics["doc_count"] != nil && metrics["docs_deleted"] != nil && len(metrics["doc_count"].Lines) > 0 && len(metrics["docs_deleted"].Lines) > 0 {
- metricA := metrics["doc_count"]
- metricB := metrics["docs_deleted"]
- if dataA, ok := metricA.Lines[0].Data.([][]interface{}); ok {
- if dataB, ok := metricB.Lines[0].Data.([][]interface{}); ok {
- data := make([]map[string]interface{}, 0, len(dataA)*2)
- var (
- x1 float64
- x2 float64
- )
- for i := 0; i < len(dataA); i++ {
- x1 = dataA[i][1].(float64)
- x2 = dataB[i][1].(float64)
- if x1+x2 == 0 {
- continue
- }
- data = append(data, map[string]interface{}{
- "x": dataA[i][0],
- "y": x1 / (x1 + x2) * 100,
- "g": "Doc Count",
- })
- data = append(data, map[string]interface{}{
- "x": dataA[i][0],
- "y": x2 / (x1 + x2) * 100,
- "g": "Doc Deleted",
- })
- }
- metricDocPercent := &common.MetricItem{
- Axis: []*common.MetricAxis{},
- Key: "doc_percent",
- Group: metricA.Group,
- Order: 18,
- Lines: []*common.MetricLine{
- {
- TimeRange: metricA.Lines[0].TimeRange,
- Data: data,
- Type: common.GraphTypeBar,
- },
- },
- }
- metrics["doc_percent"] = metricDocPercent
+ ctx, cancel := context.WithTimeout(context.Background(), du)
+ defer cancel()
+ var metrics map[string]*common.MetricItem
+ if key == v1.DocPercentMetricKey {
+ metrics, err = h.getIndexMetrics(ctx, req, id, bucketSize, min, max, indexName, top, shardID, v1.DocCountMetricKey)
+ if err != nil {
+ log.Error(err)
+ h.WriteError(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ docsDeletedMetrics, err := h.getIndexMetrics(ctx, req, id, bucketSize, min, max, indexName, top, shardID, v1.DocsDeletedMetricKey)
+ if err != nil {
+ log.Error(err)
+ h.WriteError(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ for k, v := range docsDeletedMetrics {
+ if v != nil {
+ metrics[k] = v
}
}
+ if metrics["doc_count"] != nil && metrics["docs_deleted"] != nil && len(metrics["doc_count"].Lines) > 0 && len(metrics["docs_deleted"].Lines) > 0 {
+ metricA := metrics["doc_count"]
+ metricB := metrics["docs_deleted"]
+ if dataA, ok := metricA.Lines[0].Data.([][]interface{}); ok {
+ if dataB, ok := metricB.Lines[0].Data.([][]interface{}); ok {
+ data := make([]map[string]interface{}, 0, len(dataA)*2)
+ var (
+ x1 float64
+ x2 float64
+ )
+ for i := 0; i < len(dataA); i++ {
+ x1 = dataA[i][1].(float64)
+ x2 = dataB[i][1].(float64)
+ if x1+x2 == 0 {
+ continue
+ }
+ data = append(data, map[string]interface{}{
+ "x": dataA[i][0],
+ "y": x1 / (x1 + x2) * 100,
+ "g": "Doc Count",
+ })
+ data = append(data, map[string]interface{}{
+ "x": dataA[i][0],
+ "y": x2 / (x1 + x2) * 100,
+ "g": "Doc Deleted",
+ })
+ }
+ metricDocPercent := &common.MetricItem{
+ Axis: []*common.MetricAxis{},
+ Key: "doc_percent",
+ Group: metricA.Group,
+ Order: 18,
+ Lines: []*common.MetricLine{
+ {
+ TimeRange: metricA.Lines[0].TimeRange,
+ Data: data,
+ Type: common.GraphTypeBar,
+ },
+ },
+ }
+ metrics["doc_percent"] = metricDocPercent
+ }
+ }
+ }
+ }else{
+ metrics, err = h.getIndexMetrics(ctx, req, id, bucketSize, min, max, indexName, top, shardID, key)
+ if err != nil {
+ log.Error(err)
+ h.WriteError(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
}
resBody["metrics"] = metrics
ver := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).GetVersion()
@@ -711,7 +757,17 @@ func (h *APIHandler) HandleQueueMetricsAction(w http.ResponseWriter, req *http.R
if bucketSize <= 60 {
min = min - int64(2*bucketSize*1000)
}
- resBody["metrics"], err = h.getThreadPoolMetrics(id, bucketSize, min, max, nodeName, top)
+ key := h.GetParameter(req, "key")
+ timeout := h.GetParameterOrDefault(req, "timeout", "60s")
+ du, err := time.ParseDuration(timeout)
+ if err != nil {
+ log.Error(err)
+ h.WriteError(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), du)
+ defer cancel()
+ resBody["metrics"], err = h.getThreadPoolMetrics(ctx, id, bucketSize, min, max, nodeName, top, key)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
@@ -837,56 +893,96 @@ const (
CircuitBreakerGroupKey = "circuit_breaker"
)
-func (h *APIHandler) GetClusterMetrics(id string, bucketSize int, min, max int64) map[string]*common.MetricItem {
+const (
+ ClusterStorageMetricKey = "cluster_storage"
+ ClusterDocumentsMetricKey = "cluster_documents"
+ ClusterIndicesMetricKey = "cluster_indices"
+ ClusterNodeCountMetricKey = "node_count"
+ ClusterHealthMetricKey = "cluster_health"
+ ShardCountMetricKey = "shard_count"
+ CircuitBreakerMetricKey = "circuit_breaker"
+)
+func (h *APIHandler) GetClusterMetrics(ctx context.Context, id string, bucketSize int, min, max int64, metricKey string) map[string]*common.MetricItem {
+
+ var clusterMetricsResult = map[string]*common.MetricItem {}
+ switch metricKey {
+ case ClusterDocumentsMetricKey,
+ ClusterStorageMetricKey,
+ ClusterIndicesMetricKey,
+ ClusterNodeCountMetricKey:
+ clusterMetricsResult = h.getClusterMetricsByKey(ctx, id, bucketSize, min, max, metricKey)
+ case IndexLatencyMetricKey, IndexThroughputMetricKey, SearchThroughputMetricKey, SearchLatencyMetricKey:
+ clusterMetricsResult = h.GetClusterIndexMetrics(ctx, id, bucketSize, min, max, metricKey)
+ case ClusterHealthMetricKey:
+ statusMetric, err := h.getClusterStatusMetric(ctx, id, min, max, bucketSize)
+ if err == nil {
+ clusterMetricsResult[ClusterHealthMetricKey] = statusMetric
+ } else {
+ log.Error("get cluster status metric error: ", err)
+ }
+ case ShardCountMetricKey:
+ clusterMetricsResult = h.getShardsMetric(ctx, id, min, max, bucketSize)
+
+ case CircuitBreakerMetricKey:
+ clusterMetricsResult = h.getCircuitBreakerMetric(ctx, id, min, max, bucketSize)
+ }
+
+ return clusterMetricsResult
+}
+
+func (h *APIHandler) getClusterMetricsByKey(ctx context.Context, id string, bucketSize int, min, max int64, metricKey string) map[string]*common.MetricItem {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
clusterMetricItems := []*common.MetricItem{}
- metricItem := newMetricItem("cluster_storage", 8, StorageGroupKey)
- metricItem.AddAxi("indices_storage", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
- metricItem.AddAxi("available_storage", "group2", common.PositionRight, "bytes", "0.[0]", "0.[0]", 5, true)
+ switch metricKey {
+ case ClusterStorageMetricKey:
+ metricItem := newMetricItem("cluster_storage", 8, StorageGroupKey)
+ metricItem.AddAxi("indices_storage", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
+ metricItem.AddAxi("available_storage", "group2", common.PositionRight, "bytes", "0.[0]", "0.[0]", 5, true)
- metricItem.AddLine("Disk", "Indices Storage", "", "group1", "payload.elasticsearch.cluster_stats.indices.store.size_in_bytes", "max", bucketSizeStr, "", "bytes", "0,0.[00]", "0,0.[00]", false, false)
- metricItem.AddLine("Disk", "Available Disk", "", "group2", "payload.elasticsearch.cluster_stats.nodes.fs.available_in_bytes", "max", bucketSizeStr, "", "bytes", "0,0.[00]", "0,0.[00]", false, false)
+ metricItem.AddLine("Disk", "Indices Storage", "", "group1", "payload.elasticsearch.cluster_stats.indices.store.size_in_bytes", "max", bucketSizeStr, "", "bytes", "0,0.[00]", "0,0.[00]", false, false)
+ metricItem.AddLine("Disk", "Available Disk", "", "group2", "payload.elasticsearch.cluster_stats.nodes.fs.available_in_bytes", "max", bucketSizeStr, "", "bytes", "0,0.[00]", "0,0.[00]", false, false)
- clusterMetricItems = append(clusterMetricItems, metricItem)
+ clusterMetricItems = append(clusterMetricItems, metricItem)
- metricItem = newMetricItem("cluster_documents", 4, StorageGroupKey)
- metricItem.AddAxi("count", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
- metricItem.AddAxi("deleted", "group2", common.PositionRight, "num", "0,0", "0,0.[00]", 5, false)
- metricItem.AddLine("Documents Count", "Documents Count", "", "group1", "payload.elasticsearch.cluster_stats.indices.docs.count", "max", bucketSizeStr, "", "num", "0,0.[00]", "0,0.[00]", false, false)
- metricItem.AddLine("Documents Deleted", "Documents Deleted", "", "group2", "payload.elasticsearch.cluster_stats.indices.docs.deleted", "max", bucketSizeStr, "", "num", "0,0.[00]", "0,0.[00]", false, false)
- clusterMetricItems = append(clusterMetricItems, metricItem)
+ case ClusterDocumentsMetricKey:
+ metricItem := newMetricItem("cluster_documents", 4, StorageGroupKey)
+ metricItem.AddAxi("count", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
+ metricItem.AddAxi("deleted", "group2", common.PositionRight, "num", "0,0", "0,0.[00]", 5, false)
+ metricItem.AddLine("Documents Count", "Documents Count", "", "group1", "payload.elasticsearch.cluster_stats.indices.docs.count", "max", bucketSizeStr, "", "num", "0,0.[00]", "0,0.[00]", false, false)
+ metricItem.AddLine("Documents Deleted", "Documents Deleted", "", "group2", "payload.elasticsearch.cluster_stats.indices.docs.deleted", "max", bucketSizeStr, "", "num", "0,0.[00]", "0,0.[00]", false, false)
+ clusterMetricItems = append(clusterMetricItems, metricItem)
+ case ClusterIndicesMetricKey:
+ metricItem := newMetricItem("cluster_indices", 6, StorageGroupKey)
+ metricItem.AddAxi("count", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
+ metricItem.AddLine("Indices Count", "Indices Count", "", "group1", "payload.elasticsearch.cluster_stats.indices.count", "max", bucketSizeStr, "", "num", "0,0.[00]", "0,0.[00]", false, false)
+ clusterMetricItems = append(clusterMetricItems, metricItem)
+ case ClusterNodeCountMetricKey:
+ metricItem := newMetricItem("node_count", 5, MemoryGroupKey)
+ metricItem.AddAxi("count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
+ meta := elastic.GetMetadata(id)
+ if meta == nil {
+ err := fmt.Errorf("metadata of cluster [%s] is not found", id)
+ panic(err)
+ }
+ majorVersion := meta.GetMajorVersion()
- metricItem = newMetricItem("cluster_indices", 6, StorageGroupKey)
- metricItem.AddAxi("count", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
- metricItem.AddLine("Indices Count", "Indices Count", "", "group1", "payload.elasticsearch.cluster_stats.indices.count", "max", bucketSizeStr, "", "num", "0,0.[00]", "0,0.[00]", false, false)
- clusterMetricItems = append(clusterMetricItems, metricItem)
+ metricItem.AddLine("Total", "Total Nodes", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.total", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
+ if majorVersion < 5 {
+ metricItem.AddLine("Master Only", "Master Only", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.master_only", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
+ metricItem.AddLine("Data Node", "Data Only", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.data_only", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
+ metricItem.AddLine("Master Data", "Master Data", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.master_data", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
+ } else {
+ metricItem.AddLine("Master Node", "Master Node", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.master", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
+ metricItem.AddLine("Data Node", "Data Node", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.data", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
+ metricItem.AddLine("Coordinating Node Only", "Coordinating Node Only", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.coordinating_only", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
+ metricItem.AddLine("Ingest Node", "Ingest Node", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.ingest", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
+ }
- metricItem = newMetricItem("node_count", 5, MemoryGroupKey)
- metricItem.AddAxi("count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
- meta := elastic.GetMetadata(id)
- if meta == nil {
- err := fmt.Errorf("metadata of cluster [%s] is not found", id)
- panic(err)
- }
- majorVersion := meta.GetMajorVersion()
-
- metricItem.AddLine("Total", "Total Nodes", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.total", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
-
- //TODO check version difference
- if majorVersion < 5 {
- metricItem.AddLine("Master Only", "Master Only", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.master_only", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
- metricItem.AddLine("Data Node", "Data Only", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.data_only", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
- metricItem.AddLine("Master Data", "Master Data", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.master_data", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
- } else {
- metricItem.AddLine("Master Node", "Master Node", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.master", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
- metricItem.AddLine("Data Node", "Data Node", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.data", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
- metricItem.AddLine("Coordinating Node Only", "Coordinating Node Only", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.coordinating_only", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
- metricItem.AddLine("Ingest Node", "Ingest Node", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.ingest", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
+ clusterMetricItems = append(clusterMetricItems, metricItem)
}
- clusterMetricItems = append(clusterMetricItems, metricItem)
query := map[string]interface{}{}
query["query"] = util.MapStr{
"bool": util.MapStr{
@@ -925,79 +1021,70 @@ func (h *APIHandler) GetClusterMetrics(id string, bucketSize int, min, max int64
},
},
}
- //todo: since there is four queries, we can change these query to async
- indexMetricsResult := h.GetClusterIndexMetrics(id, bucketSize, min, max)
- clusterMetricsResult := h.getSingleMetrics(clusterMetricItems, query, bucketSize)
- for k, v := range clusterMetricsResult {
- indexMetricsResult[k] = v
- }
- statusMetric, err := h.getClusterStatusMetric(id, min, max, bucketSize)
- if err == nil {
- indexMetricsResult["cluster_health"] = statusMetric
- } else {
- log.Error("get cluster status metric error: ", err)
- }
- clusterHealthMetricsResult := h.getShardsMetric(id, min, max, bucketSize)
- for k, v := range clusterHealthMetricsResult {
- indexMetricsResult[k] = v
- }
- // get CircuitBreaker metric
- circuitBreakerMetricsResult := h.getCircuitBreakerMetric(id, min, max, bucketSize)
- for k, v := range circuitBreakerMetricsResult {
- indexMetricsResult[k] = v
- }
-
- return indexMetricsResult
+ return h.getSingleMetrics(ctx, clusterMetricItems, query, bucketSize)
}
-func (h *APIHandler) GetClusterIndexMetrics(id string, bucketSize int, min, max int64) map[string]*common.MetricItem {
+
+const (
+ IndexThroughputMetricKey = "index_throughput"
+ SearchThroughputMetricKey = "search_throughput"
+ IndexLatencyMetricKey = "index_latency"
+ SearchLatencyMetricKey = "search_latency"
+)
+
+func (h *APIHandler) GetClusterIndexMetrics(ctx context.Context, id string, bucketSize int, min, max int64, metricKey string) map[string]*common.MetricItem {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
metricItems := []*common.MetricItem{}
- metricItem := newMetricItem("index_throughput", 2, OperationGroupKey)
- metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
- metricItem.AddLine("Indexing Rate", "Total Indexing", "Number of documents being indexed for primary and replica shards.", "group1", "payload.elasticsearch.node_stats.indices.indexing.index_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
- metricItems = append(metricItems, metricItem)
+ switch metricKey {
+ case IndexThroughputMetricKey:
+ metricItem := newMetricItem(IndexThroughputMetricKey, 2, OperationGroupKey)
+ metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
+ metricItem.AddLine("Indexing Rate", "Total Indexing", "Number of documents being indexed for primary and replica shards.", "group1", "payload.elasticsearch.node_stats.indices.indexing.index_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItems = append(metricItems, metricItem)
+ case SearchThroughputMetricKey:
+ metricItem := newMetricItem(SearchThroughputMetricKey, 2, OperationGroupKey)
+ metricItem.AddAxi("searching", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
+ metricItem.AddLine("Search Rate", "Total Query",
+ "Number of search requests being executed across primary and replica shards. A single search can run against multiple shards!",
+ "group1", "payload.elasticsearch.node_stats.indices.search.query_total", "max", bucketSizeStr, "query/s", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItems = append(metricItems, metricItem)
+ case IndexLatencyMetricKey:
+ metricItem := newMetricItem(IndexLatencyMetricKey, 3, LatencyGroupKey)
+ metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
- metricItem = newMetricItem("search_throughput", 2, OperationGroupKey)
- metricItem.AddAxi("searching", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
- metricItem.AddLine("Search Rate", "Total Query",
- "Number of search requests being executed across primary and replica shards. A single search can run against multiple shards!",
- "group1", "payload.elasticsearch.node_stats.indices.search.query_total", "max", bucketSizeStr, "query/s", "num", "0,0.[00]", "0,0.[00]", false, true)
- metricItems = append(metricItems, metricItem)
+ metricItem.AddLine("Indexing", "Indexing Latency", "Average latency for indexing documents.", "group1", "payload.elasticsearch.node_stats.indices.indexing.index_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.index_total"
+ metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
+ return value / value2
+ }
+ metricItem.AddLine("Indexing", "Delete Latency", "Average latency for delete documents.", "group1", "payload.elasticsearch.node_stats.indices.indexing.delete_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.delete_total"
+ metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
+ return value / value2
+ }
+ metricItems = append(metricItems, metricItem)
+ case SearchLatencyMetricKey:
+ metricItem := newMetricItem(SearchLatencyMetricKey, 3, LatencyGroupKey)
+ metricItem.AddAxi("searching", "group2", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
- metricItem = newMetricItem("index_latency", 3, LatencyGroupKey)
- metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
-
- metricItem.AddLine("Indexing", "Indexing Latency", "Average latency for indexing documents.", "group1", "payload.elasticsearch.node_stats.indices.indexing.index_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
- metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.index_total"
- metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
- return value / value2
+ metricItem.AddLine("Searching", "Query Latency", "Average latency for searching query.", "group2", "payload.elasticsearch.node_stats.indices.search.query_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.query_total"
+ metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
+ return value / value2
+ }
+ metricItem.AddLine("Searching", "Fetch Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.node_stats.indices.search.fetch_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.fetch_total"
+ metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
+ return value / value2
+ }
+ metricItem.AddLine("Searching", "Scroll Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.node_stats.indices.search.scroll_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItem.Lines[2].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.scroll_total"
+ metricItem.Lines[2].Metric.Calc = func(value, value2 float64) float64 {
+ return value / value2
+ }
+ metricItems = append(metricItems, metricItem)
+ default:
+ panic("unknown metric key: " + metricKey)
}
- metricItem.AddLine("Indexing", "Delete Latency", "Average latency for delete documents.", "group1", "payload.elasticsearch.node_stats.indices.indexing.delete_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
- metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.delete_total"
- metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
- return value / value2
- }
- metricItems = append(metricItems, metricItem)
-
- metricItem = newMetricItem("search_latency", 3, LatencyGroupKey)
- metricItem.AddAxi("searching", "group2", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
-
- metricItem.AddLine("Searching", "Query Latency", "Average latency for searching query.", "group2", "payload.elasticsearch.node_stats.indices.search.query_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
- metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.query_total"
- metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
- return value / value2
- }
- metricItem.AddLine("Searching", "Fetch Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.node_stats.indices.search.fetch_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
- metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.fetch_total"
- metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
- return value / value2
- }
- metricItem.AddLine("Searching", "Scroll Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.node_stats.indices.search.scroll_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
- metricItem.Lines[2].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.scroll_total"
- metricItem.Lines[2].Metric.Calc = func(value, value2 float64) float64 {
- return value / value2
- }
- metricItems = append(metricItems, metricItem)
query := map[string]interface{}{}
clusterUUID, err := adapter.GetClusterUUID(id)
if err != nil {
@@ -1040,10 +1127,10 @@ func (h *APIHandler) GetClusterIndexMetrics(id string, bucketSize int, min, max
},
},
}
- return h.getSingleIndexMetricsByNodeStats(metricItems, query, bucketSize)
+ return h.getSingleIndexMetricsByNodeStats(ctx, metricItems, query, bucketSize)
}
-func (h *APIHandler) getShardsMetric(id string, min, max int64, bucketSize int) map[string]*common.MetricItem {
+func (h *APIHandler) getShardsMetric(ctx context.Context, id string, min, max int64, bucketSize int) map[string]*common.MetricItem {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
query := util.MapStr{
"query": util.MapStr{
@@ -1102,10 +1189,10 @@ func (h *APIHandler) getShardsMetric(id string, min, max int64, bucketSize int)
metricItem.AddLine("Delayed Unassigned Shards", "Delayed Unassigned Shards", "", "group1", "payload.elasticsearch.cluster_health.delayed_unassigned_shards", "max", bucketSizeStr, "", "num", "0,0.[00]", "0,0.[00]", false, false)
var clusterHealthMetrics []*common.MetricItem
clusterHealthMetrics = append(clusterHealthMetrics, metricItem)
- return h.getSingleMetrics(clusterHealthMetrics, query, bucketSize)
+ return h.getSingleMetrics(ctx, clusterHealthMetrics, query, bucketSize)
}
-func (h *APIHandler) getCircuitBreakerMetric(id string, min, max int64, bucketSize int) map[string]*common.MetricItem {
+func (h *APIHandler) getCircuitBreakerMetric(ctx context.Context, id string, min, max int64, bucketSize int) map[string]*common.MetricItem {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
query := util.MapStr{
"query": util.MapStr{
@@ -1163,10 +1250,10 @@ func (h *APIHandler) getCircuitBreakerMetric(id string, min, max int64, bucketSi
metricItem.AddLine("In Flight Requests Breaker Tripped", "In Flight Requests Tripped", "", "group1", "payload.elasticsearch.node_stats.breakers.in_flight_requests.tripped", "sum", bucketSizeStr, "times/s", "num", "0,0.[00]", "0,0.[00]", false, true)
var circuitBreakerMetrics []*common.MetricItem
circuitBreakerMetrics = append(circuitBreakerMetrics, metricItem)
- return h.getSingleMetrics(circuitBreakerMetrics, query, bucketSize)
+ return h.getSingleMetrics(ctx, circuitBreakerMetrics, query, bucketSize)
}
-func (h *APIHandler) getClusterStatusMetric(id string, min, max int64, bucketSize int) (*common.MetricItem, error) {
+func (h *APIHandler) getClusterStatusMetric(ctx context.Context, id string, min, max int64, bucketSize int) (*common.MetricItem, error) {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
if err != nil {
@@ -1227,7 +1314,8 @@ func (h *APIHandler) getClusterStatusMetric(id string, min, max int64, bucketSiz
},
},
}
- response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
+ queryDSL := util.MustToJSONBytes(query)
+ response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
if err != nil {
log.Error(err)
return nil, err
@@ -1244,6 +1332,7 @@ func (h *APIHandler) getClusterStatusMetric(id string, min, max int64, bucketSiz
}
metricItem.Lines[0].Data = metricData
metricItem.Lines[0].Type = common.GraphTypeBar
+ metricItem.Request = string(queryDSL)
return metricItem, nil
}
diff --git a/modules/elastic/api/metrics_util.go b/modules/elastic/api/metrics_util.go
index d9f6e76a..7279c5de 100644
--- a/modules/elastic/api/metrics_util.go
+++ b/modules/elastic/api/metrics_util.go
@@ -24,6 +24,7 @@
package api
import (
+ "context"
"fmt"
"infini.sh/framework/core/env"
"net/http"
@@ -109,9 +110,10 @@ func generateGroupAggs(nodeMetricItems []GroupMetricItem) map[string]interface{}
return aggs
}
-func (h *APIHandler) getMetrics(query map[string]interface{}, grpMetricItems []GroupMetricItem, bucketSize int) map[string]*common.MetricItem {
+func (h *APIHandler) getMetrics(ctx context.Context, query map[string]interface{}, grpMetricItems []GroupMetricItem, bucketSize int) map[string]*common.MetricItem {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
- response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
+ queryDSL := util.MustToJSONBytes(query)
+ response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(),nil, queryDSL)
if err != nil {
log.Error(err)
panic(err)
@@ -214,6 +216,7 @@ func (h *APIHandler) getMetrics(query map[string]interface{}, grpMetricItems []G
line.Data = temp
}
}
+ metricItem.MetricItem.Request = string(queryDSL)
result[metricItem.Key] = metricItem.MetricItem
}
return result
@@ -337,7 +340,7 @@ func GetMetricRangeAndBucketSize(minStr string, maxStr string, bucketSize int, m
}
// 获取单个指标,可以包含多条曲线
-func (h *APIHandler) getSingleMetrics(metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int) map[string]*common.MetricItem {
+func (h *APIHandler) getSingleMetrics(ctx context.Context, metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int) map[string]*common.MetricItem {
metricData := map[string][][]interface{}{}
aggs := map[string]interface{}{}
@@ -396,7 +399,8 @@ func (h *APIHandler) getSingleMetrics(metricItems []*common.MetricItem, query ma
"aggs": aggs,
},
}
- response, err := elastic.GetClient(clusterID).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
+ queryDSL := util.MustToJSONBytes(query)
+ response, err := elastic.GetClient(clusterID).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
if err != nil {
log.Error(err)
panic(err)
@@ -467,6 +471,7 @@ func (h *APIHandler) getSingleMetrics(metricItems []*common.MetricItem, query ma
line.Data = temp
}
}
+ metricItem.Request = string(queryDSL)
result[metricItem.Key] = metricItem
}
@@ -959,7 +964,7 @@ func parseGroupMetricData(buckets []elastic.BucketBase, isPercent bool) ([]inter
return metricData, nil
}
-func (h *APIHandler) getSingleIndexMetricsByNodeStats(metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int) map[string]*common.MetricItem {
+func (h *APIHandler) getSingleIndexMetricsByNodeStats(ctx context.Context, metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int) map[string]*common.MetricItem {
metricData := map[string][][]interface{}{}
aggs := util.MapStr{}
@@ -1041,10 +1046,10 @@ func (h *APIHandler) getSingleIndexMetricsByNodeStats(metricItems []*common.Metr
"aggs": sumAggs,
},
}
- return parseSingleIndexMetrics(clusterID, metricItems, query, bucketSize,metricData, metricItemsMap)
+ return parseSingleIndexMetrics(ctx, clusterID, metricItems, query, bucketSize,metricData, metricItemsMap)
}
-func (h *APIHandler) getSingleIndexMetrics(metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int) map[string]*common.MetricItem {
+func (h *APIHandler) getSingleIndexMetrics(ctx context.Context, metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int) map[string]*common.MetricItem {
metricData := map[string][][]interface{}{}
aggs := util.MapStr{}
@@ -1146,11 +1151,12 @@ func (h *APIHandler) getSingleIndexMetrics(metricItems []*common.MetricItem, que
"aggs": sumAggs,
},
}
- return parseSingleIndexMetrics(clusterID, metricItems, query, bucketSize,metricData, metricItemsMap)
+ return parseSingleIndexMetrics(ctx, clusterID, metricItems, query, bucketSize,metricData, metricItemsMap)
}
-func parseSingleIndexMetrics(clusterID string, metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int, metricData map[string][][]interface{}, metricItemsMap map[string]*common.MetricLine) map[string]*common.MetricItem {
- response, err := elastic.GetClient(clusterID).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
+func parseSingleIndexMetrics(ctx context.Context, clusterID string, metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int, metricData map[string][][]interface{}, metricItemsMap map[string]*common.MetricLine) map[string]*common.MetricItem {
+ queryDSL := util.MustToJSONBytes(query)
+ response, err := elastic.GetClient(clusterID).QueryDSL(ctx, getAllMetricsIndex(), nil, util.MustToJSONBytes(query))
if err != nil {
panic(err)
}
@@ -1220,6 +1226,7 @@ func parseSingleIndexMetrics(clusterID string, metricItems []*common.MetricItem,
line.Data = temp
}
}
+ metricItem.Request = string(queryDSL)
result[metricItem.Key] = metricItem
}
diff --git a/modules/elastic/api/node_metrics.go b/modules/elastic/api/node_metrics.go
index 551ca5a0..3269139a 100644
--- a/modules/elastic/api/node_metrics.go
+++ b/modules/elastic/api/node_metrics.go
@@ -24,6 +24,7 @@
package api
import (
+ "context"
"fmt"
log "github.com/cihub/seelog"
"infini.sh/framework/core/elastic"
@@ -36,7 +37,78 @@ import (
"time"
)
-func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max int64, nodeName string, top int) (map[string]*common.MetricItem, error){
+const (
+ NodeOSCPUMetricKey = "os_cpu"
+ NodeProcessCPUMetricKey = "cpu"
+ OSUsedMemoryMetricKey = "os_used_mem"
+ OSLoadAverage1mMetricKey = "os_load_average_1m"
+ OSUsedSwapMetricKey = "os_used_swap"
+ OpenFileMetricKey = "open_file"
+ OpenFilePercentMetricKey = "open_file_percent"
+ TotalDiskMetricKey = "disk"
+ IndexingRateMetricKey = "indexing_rate"
+ IndexingBytesMetricKey = "indexing_bytes"
+ IndexingLatencyMetricKey = "indexing_latency"
+ QueryRateMetricKey = "query_rate"
+ QueryLatencyMetricKey = "query_latency"
+ FetchRateMetricKey = "fetch_rate"
+ ScrollRateMetricKey = "scroll_rate"
+ RefreshRateMetricKey = "refresh_rate"
+ FlushRateMetricKey = "flush_rate"
+ MergesRateMetricKey = "merges_rate"
+ FetchLatencyMetricKey = "fetch_latency"
+ ScrollLatencyMetricKey = "scroll_latency"
+ MergeLatencyMetricKey = "merge_latency"
+ RefreshLatencyMetricKey = "refresh_latency"
+ FlushLatencyMetricKey = "flush_latency"
+ QueryCacheMetricKey = "query_cache"
+ RequestCacheMetricKey = "request_cache"
+ RequestCacheHitMetricKey = "request_cache_hit"
+ RequestCacheMissMetricKey = "request_cache_miss"
+ QueryCacheCountMetricKey = "query_cache_count"
+ QueryCacheMissMetricKey = "query_cache_miss"
+ QueryCacheHitMetricKey = "query_cache_hit"
+ FielddataCacheMetricKey = "fielddata_cache"
+ HttpConnectNumMetricKey = "http_connect_num"
+ HttpRateMetricKey = "http_rate"
+ SegmentCountMetricKey = "segment_count"
+ SegmentMemoryMetricKey = "segment_memory"
+ SegmentStoredFieldsMemoryMetricKey = "segment_stored_fields_memory"
+ SegmentTermsMemoryMetricKey = "segment_terms_memory"
+ SegmentDocValuesMemoryMetricKey = "segment_doc_values_memory"
+ SegmentIndexWriterMemoryMetricKey = "segment_index_writer_memory"
+ SegmentTermVectorsMemoryMetricKey = "segment_term_vectors_memory"
+ DocsCountMetricKey = "docs_count"
+ DocsDeletedMetricKey = "docs_deleted"
+ IndexStorageMetricKey = "index_storage"
+ JVMHeapUsedPercentMetricKey = "jvm_heap_used_percent"
+ JVMMemYoungUsedMetricKey = "jvm_mem_young_used"
+ JVMMemYoungPeakUsedMetricKey = "jvm_mem_young_peak_used"
+ JVMMemOldUsedMetricKey = "jvm_mem_old_used"
+ JVMMemOldPeakUsedMetricKey = "jvm_mem_old_peak_used"
+ JVMUsedHeapMetricKey = "jvm_used_heap"
+ JVMYoungGCRateMetricKey = "jvm_young_gc_rate"
+ JVMYoungGCLatencyMetricKey = "jvm_young_gc_latency"
+ JVMOldGCRateMetricKey = "jvm_old_gc_rate"
+ JVMOldGCLatencyMetricKey = "jvm_old_gc_latency"
+ TransportTXRateMetricKey = "transport_tx_rate"
+ TransportRXRateMetricKey = "transport_rx_rate"
+ TransportTXBytesMetricKey = "transport_tx_bytes"
+ TransportRXBytesMetricKey = "transport_rx_bytes"
+ TransportTCPOutboundMetricKey = "transport_outbound_connections"
+ TotalIOOperationsMetricKey = "total_io_operations"
+ TotalReadIOOperationsMetricKey = "total_read_io_operations"
+ TotalWriteIOOperationsMetricKey = "total_write_io_operations"
+ ScrollOpenContextsMetricKey = "scroll_open_contexts"
+ ParentBreakerMetricKey = "parent_breaker"
+ AccountingBreakerMetricKey = "accounting_breaker"
+ FielddataBreakerMetricKey = "fielddata_breaker"
+ RequestBreakerMetricKey = "request_breaker"
+ InFlightRequestsBreakerMetricKey = "in_flight_requests_breaker"
+ ModelInferenceBreakerMetricKey = "model_inference_breaker"
+)
+
+func (h *APIHandler) getNodeMetrics(ctx context.Context, clusterID string, bucketSize int, min, max int64, nodeName string, top int, metricKey string) (map[string]*common.MetricItem, error){
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
clusterUUID, err := adapter.GetClusterUUID(clusterID)
if err != nil {
@@ -115,893 +187,915 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
},
},
}
- cpuMetric := newMetricItem("cpu", 1, SystemGroupKey)
- cpuMetric.AddAxi("cpu","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
-
- nodeMetricItems := []GroupMetricItem{
- {
- Key: "cpu",
- Field: "payload.elasticsearch.node_stats.process.cpu.percent",
+ nodeMetricItems := []GroupMetricItem{}
+ switch metricKey {
+ case NodeProcessCPUMetricKey:
+ cpuMetric := newMetricItem(NodeProcessCPUMetricKey, 1, SystemGroupKey)
+ cpuMetric.AddAxi("cpu","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "cpu",
+ Field: "payload.elasticsearch.node_stats.process.cpu.percent",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: cpuMetric,
+ FormatType: "ratio",
+ Units: "%",
+ })
+ case NodeOSCPUMetricKey:
+ osCpuMetric := newMetricItem(NodeOSCPUMetricKey, 2, SystemGroupKey)
+ osCpuMetric.AddAxi("OS CPU Percent","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "os_cpu",
+ Field: "payload.elasticsearch.node_stats.os.cpu.percent",
ID: util.GetUUID(),
IsDerivative: false,
- MetricItem: cpuMetric,
+ MetricItem: osCpuMetric,
FormatType: "ratio",
Units: "%",
- },
+ })
+ case OSUsedMemoryMetricKey:
+ osMemMetric := newMetricItem(OSUsedMemoryMetricKey, 2, SystemGroupKey)
+ osMemMetric.AddAxi("OS Mem Used Percent","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "os_used_mem",
+ Field: "payload.elasticsearch.node_stats.os.mem.used_percent",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: osMemMetric,
+ FormatType: "ratio",
+ Units: "%",
+ })
+ case OSLoadAverage1mMetricKey:
+ osLoadMetric := newMetricItem(OSLoadAverage1mMetricKey, 2, SystemGroupKey)
+ osLoadMetric.AddAxi("OS Load 1m Average","group1",common.PositionLeft,"","0.[0]","0.[0]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "os_load_average_1m",
+ Field: "payload.elasticsearch.node_stats.os.cpu.load_average.1m",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: osLoadMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case OSUsedSwapMetricKey:
+ //swap usage
+ osSwapMetric := newMetricItem(OSUsedSwapMetricKey, 3, SystemGroupKey)
+ osSwapMetric.AddAxi("OS Swap Used Percent","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "os_used_swap",
+ Field: "payload.elasticsearch.node_stats.os.swap.used_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ Field2: "payload.elasticsearch.node_stats.os.swap.total_in_bytes",
+ Calc: func(value, value2 float64) float64 {
+ return util.ToFixed((value / value2)*100, 2)
+ },
+ MetricItem: osSwapMetric,
+ FormatType: "ratio",
+ Units: "%",
+ })
+ case OpenFileMetricKey:
+ openFileMetric := newMetricItem(OpenFileMetricKey, 2, SystemGroupKey)
+ openFileMetric.AddAxi("Open File Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "open_file",
+ Field: "payload.elasticsearch.node_stats.process.open_file_descriptors",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: openFileMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case OpenFilePercentMetricKey:
+ openFilePercentMetric := newMetricItem(OpenFilePercentMetricKey, 2, SystemGroupKey)
+ openFilePercentMetric.AddAxi("Open File Percent","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "open_file_percent",
+ Field: "payload.elasticsearch.node_stats.process.open_file_descriptors",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ Field2: "payload.elasticsearch.node_stats.process.max_file_descriptors",
+ Calc: func(value, value2 float64) float64 {
+ if value < 0 {
+ return value
+ }
+ return util.ToFixed((value / value2)*100, 2)
+ },
+ MetricItem: openFilePercentMetric,
+ FormatType: "ratio",
+ Units: "%",
+ })
+ case TotalDiskMetricKey:
+ diskMetric := newMetricItem(TotalDiskMetricKey, 2, SystemGroupKey)
+ diskMetric.AddAxi("disk available percent","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
+
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "disk",
+ Field: "payload.elasticsearch.node_stats.fs.total.total_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: diskMetric,
+ FormatType: "ratio",
+ Units: "%",
+ Field2: "payload.elasticsearch.node_stats.fs.total.available_in_bytes",
+ Calc: func(value, value2 float64) float64 {
+ return util.ToFixed((value2 / value)*100, 2)
+ },
+ })
+ case IndexingRateMetricKey:
+ // 索引速率
+ indexMetric:=newMetricItem(IndexingRateMetricKey, 1, OperationGroupKey)
+ indexMetric.AddAxi("indexing rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "indexing_rate",
+ Field: "payload.elasticsearch.node_stats.indices.indexing.index_total",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: indexMetric,
+ FormatType: "num",
+ Units: "doc/s",
+ })
+ case IndexingBytesMetricKey:
+ indexingBytesMetric := newMetricItem(IndexingBytesMetricKey, 2, OperationGroupKey)
+ indexingBytesMetric.AddAxi("Indexing bytes","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
+ nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
+ Key: "indexing_bytes",
+ Field: "payload.elasticsearch.node_stats.indices.store.size_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: indexingBytesMetric,
+ FormatType: "bytes",
+ Units: "bytes/s",
+ })
+ case IndexingLatencyMetricKey:
+ // 索引延时
+ indexLatencyMetric:=newMetricItem(IndexingLatencyMetricKey, 1, LatencyGroupKey)
+ indexLatencyMetric.AddAxi("indexing latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "indexing_latency",
+ Field: "payload.elasticsearch.node_stats.indices.indexing.index_time_in_millis",
+ Field2: "payload.elasticsearch.node_stats.indices.indexing.index_total",
+ Calc: func(value, value2 float64) float64 {
+ return value/value2
+ },
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: indexLatencyMetric,
+ FormatType: "num",
+ Units: "ms",
+ })
+ case QueryRateMetricKey:
+ queryMetric:=newMetricItem(QueryRateMetricKey, 2, OperationGroupKey)
+ queryMetric.AddAxi("query rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "query_rate",
+ Field: "payload.elasticsearch.node_stats.indices.search.query_total",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: queryMetric,
+ FormatType: "num",
+ Units: "requests/s",
+ })
+ case QueryLatencyMetricKey:
+ // 查询延时
+ queryLatencyMetric:=newMetricItem(QueryLatencyMetricKey, 2, LatencyGroupKey)
+ queryLatencyMetric.AddAxi("query latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "query_latency",
+ Field: "payload.elasticsearch.node_stats.indices.search.query_time_in_millis",
+ Field2: "payload.elasticsearch.node_stats.indices.search.query_total",
+ Calc: func(value, value2 float64) float64 {
+ return value/value2
+ },
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: queryLatencyMetric,
+ FormatType: "num",
+ Units: "ms",
+ })
+ case FetchRateMetricKey:
+ fetchMetric:=newMetricItem(FetchRateMetricKey, 3, OperationGroupKey)
+ fetchMetric.AddAxi("fetch rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "fetch_rate",
+ Field: "payload.elasticsearch.node_stats.indices.search.fetch_total",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: fetchMetric,
+ FormatType: "num",
+ Units: "requests/s",
+ })
+ case ScrollRateMetricKey:
+ scrollMetric:=newMetricItem(ScrollRateMetricKey, 4, OperationGroupKey)
+ scrollMetric.AddAxi("scroll rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "scroll_rate",
+ Field: "payload.elasticsearch.node_stats.indices.search.scroll_total",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: scrollMetric,
+ FormatType: "num",
+ Units: "requests/s",
+ })
+ case RefreshRateMetricKey:
+ refreshMetric:=newMetricItem(RefreshRateMetricKey, 5, OperationGroupKey)
+ refreshMetric.AddAxi("refresh rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "refresh_rate",
+ Field: "payload.elasticsearch.node_stats.indices.refresh.total",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: refreshMetric,
+ FormatType: "num",
+ Units: "requests/s",
+ })
+ case FlushRateMetricKey:
+ flushMetric:=newMetricItem(FlushRateMetricKey, 6, OperationGroupKey)
+ flushMetric.AddAxi("flush rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "flush_rate",
+ Field: "payload.elasticsearch.node_stats.indices.flush.total",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: flushMetric,
+ FormatType: "num",
+ Units: "requests/s",
+ })
+ case MergesRateMetricKey:
+ mergeMetric:=newMetricItem(MergesRateMetricKey, 7, OperationGroupKey)
+ mergeMetric.AddAxi("merges rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "merges_rate",
+ Field: "payload.elasticsearch.node_stats.indices.merges.total",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: mergeMetric,
+ FormatType: "num",
+ Units: "requests/s",
+ })
+ case FetchLatencyMetricKey:
+ // fetch延时
+ fetchLatencyMetric:=newMetricItem(FetchLatencyMetricKey, 3, LatencyGroupKey)
+ fetchLatencyMetric.AddAxi("fetch latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "fetch_latency",
+ Field: "payload.elasticsearch.node_stats.indices.search.fetch_time_in_millis",
+ Field2: "payload.elasticsearch.node_stats.indices.search.fetch_total",
+ Calc: func(value, value2 float64) float64 {
+ return value/value2
+ },
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: fetchLatencyMetric,
+ FormatType: "num",
+ Units: "ms",
+ })
+ case ScrollLatencyMetricKey:
+ // scroll 延时
+ scrollLatencyMetric:=newMetricItem(ScrollLatencyMetricKey, 4, LatencyGroupKey)
+ scrollLatencyMetric.AddAxi("scroll latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "scroll_latency",
+ Field: "payload.elasticsearch.node_stats.indices.search.scroll_time_in_millis",
+ Field2: "payload.elasticsearch.node_stats.indices.search.scroll_total",
+ Calc: func(value, value2 float64) float64 {
+ return value/value2
+ },
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: scrollLatencyMetric,
+ FormatType: "num",
+ Units: "ms",
+ })
+ case MergeLatencyMetricKey:
+ // merge 延时
+ mergeLatencyMetric:=newMetricItem(MergeLatencyMetricKey, 7, LatencyGroupKey)
+ mergeLatencyMetric.AddAxi("merge latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "merge_latency",
+ Field: "payload.elasticsearch.node_stats.indices.merges.total_time_in_millis",
+ Field2: "payload.elasticsearch.node_stats.indices.merges.total",
+ Calc: func(value, value2 float64) float64 {
+ return value/value2
+ },
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: mergeLatencyMetric,
+ FormatType: "num",
+ Units: "ms",
+ })
+ case RefreshLatencyMetricKey:
+ // refresh 延时
+ refreshLatencyMetric:=newMetricItem(RefreshLatencyMetricKey, 5, LatencyGroupKey)
+ refreshLatencyMetric.AddAxi("refresh latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "refresh_latency",
+ Field: "payload.elasticsearch.node_stats.indices.refresh.total_time_in_millis",
+ Field2: "payload.elasticsearch.node_stats.indices.refresh.total",
+ Calc: func(value, value2 float64) float64 {
+ return value/value2
+ },
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: refreshLatencyMetric,
+ FormatType: "num",
+ Units: "ms",
+ })
+ case FlushLatencyMetricKey:
+ // flush 时延
+ flushLatencyMetric:=newMetricItem(FlushLatencyMetricKey, 6, LatencyGroupKey)
+ flushLatencyMetric.AddAxi("flush latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "flush_latency",
+ Field: "payload.elasticsearch.node_stats.indices.flush.total_time_in_millis",
+ Field2: "payload.elasticsearch.node_stats.indices.flush.total",
+ Calc: func(value, value2 float64) float64 {
+ return value/value2
+ },
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: flushLatencyMetric,
+ FormatType: "num",
+ Units: "ms",
+ })
+ case QueryCacheMetricKey:
+ // Query Cache 内存占用大小
+ queryCacheMetric:=newMetricItem(QueryCacheMetricKey, 1, CacheGroupKey)
+ queryCacheMetric.AddAxi("query cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "query_cache",
+ Field: "payload.elasticsearch.node_stats.indices.query_cache.memory_size_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: queryCacheMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case RequestCacheMetricKey:
+ // Request Cache 内存占用大小
+ requestCacheMetric:=newMetricItem(RequestCacheMetricKey, 2, CacheGroupKey)
+ requestCacheMetric.AddAxi("request cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "request_cache",
+ Field: "payload.elasticsearch.node_stats.indices.request_cache.memory_size_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: requestCacheMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case RequestCacheHitMetricKey:
+ // Request Cache Hit
+ requestCacheHitMetric:=newMetricItem(RequestCacheHitMetricKey, 6, CacheGroupKey)
+ requestCacheHitMetric.AddAxi("request cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "request_cache_hit",
+ Field: "payload.elasticsearch.node_stats.indices.request_cache.hit_count",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: requestCacheHitMetric,
+ FormatType: "num",
+ Units: "hits",
+ })
+ case RequestCacheMissMetricKey:
+ // Request Cache Miss
+ requestCacheMissMetric:=newMetricItem(RequestCacheMissMetricKey, 8, CacheGroupKey)
+ requestCacheMissMetric.AddAxi("request cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "request_cache_miss",
+ Field: "payload.elasticsearch.node_stats.indices.request_cache.miss_count",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: requestCacheMissMetric,
+ FormatType: "num",
+ Units: "misses",
+ })
+ case QueryCacheCountMetricKey:
+ // Query Cache Count
+ queryCacheCountMetric:=newMetricItem(QueryCacheCountMetricKey, 4, CacheGroupKey)
+ queryCacheCountMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "query_cache_count",
+ Field: "payload.elasticsearch.node_stats.indices.query_cache.cache_count",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: queryCacheCountMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case QueryCacheHitMetricKey:
+ queryCacheHitMetric:=newMetricItem(QueryCacheHitMetricKey, 5, CacheGroupKey)
+ queryCacheHitMetric.AddAxi("query cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "query_cache_hit",
+ Field: "payload.elasticsearch.node_stats.indices.query_cache.hit_count",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: queryCacheHitMetric,
+ FormatType: "num",
+ Units: "hits",
+ })
+ case QueryCacheMissMetricKey:
+ // Query Cache Miss
+ queryCacheMissMetric:=newMetricItem(QueryCacheMissMetricKey, 7, CacheGroupKey)
+ queryCacheMissMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "query_cache_miss",
+ Field: "payload.elasticsearch.node_stats.indices.query_cache.miss_count",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: queryCacheMissMetric,
+ FormatType: "num",
+ Units: "misses",
+ })
+ case FielddataCacheMetricKey:
+ // Fielddata内存占用大小
+ fieldDataCacheMetric:=newMetricItem(FielddataCacheMetricKey, 3, CacheGroupKey)
+ fieldDataCacheMetric.AddAxi("FieldData Cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "fielddata_cache",
+ Field: "payload.elasticsearch.node_stats.indices.fielddata.memory_size_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: fieldDataCacheMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case HttpConnectNumMetricKey:
+ // http 活跃连接数
+ httpActiveMetric:=newMetricItem(HttpConnectNumMetricKey, 12, HttpGroupKey)
+ httpActiveMetric.AddAxi("http connect number","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "http_connect_num",
+ Field: "payload.elasticsearch.node_stats.http.current_open",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: httpActiveMetric,
+ FormatType: "num",
+ Units: "conns",
+ })
+ case HttpRateMetricKey:
+ // http 活跃连接数速率
+ httpRateMetric:=newMetricItem(HttpRateMetricKey, 12, HttpGroupKey)
+ httpRateMetric.AddAxi("http rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "http_rate",
+ Field: "payload.elasticsearch.node_stats.http.total_opened",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: httpRateMetric,
+ FormatType: "num",
+ Units: "conns/s",
+ })
+ case SegmentCountMetricKey:
+ // segment 数量
+ segmentCountMetric:=newMetricItem(SegmentCountMetricKey, 15, StorageGroupKey)
+ segmentCountMetric.AddAxi("segment count","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "segment_count",
+ Field: "payload.elasticsearch.node_stats.indices.segments.count",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: segmentCountMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case SegmentMemoryMetricKey:
+ // segment memory
+ segmentMemoryMetric:=newMetricItem(SegmentMemoryMetricKey, 16, MemoryGroupKey)
+ segmentMemoryMetric.AddAxi("segment memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "segment_memory",
+ Field: "payload.elasticsearch.node_stats.indices.segments.memory_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: segmentMemoryMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case SegmentStoredFieldsMemoryMetricKey:
+ // segment stored fields memory
+ segmentStoredFieldsMemoryMetric:=newMetricItem(SegmentStoredFieldsMemoryMetricKey, 16, MemoryGroupKey)
+ segmentStoredFieldsMemoryMetric.AddAxi("segment stored fields memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "segment_stored_fields_memory",
+ Field: "payload.elasticsearch.node_stats.indices.segments.stored_fields_memory_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: segmentStoredFieldsMemoryMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case SegmentTermsMemoryMetricKey:
+ // segment terms fields memory
+ segmentTermsMemoryMetric:=newMetricItem(SegmentTermsMemoryMetricKey, 16, MemoryGroupKey)
+ segmentTermsMemoryMetric.AddAxi("segment terms memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "segment_terms_memory",
+ Field: "payload.elasticsearch.node_stats.indices.segments.terms_memory_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: segmentTermsMemoryMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case SegmentDocValuesMemoryMetricKey:
+ // segment doc values memory
+ segmentDocValuesMemoryMetric:=newMetricItem(SegmentDocValuesMemoryMetricKey, 16, MemoryGroupKey)
+ segmentDocValuesMemoryMetric.AddAxi("segment doc values memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "segment_doc_values_memory",
+ Field: "payload.elasticsearch.node_stats.indices.segments.doc_values_memory_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: segmentDocValuesMemoryMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case SegmentIndexWriterMemoryMetricKey:
+ // segment index writer memory
+ segmentIndexWriterMemoryMetric:=newMetricItem(SegmentIndexWriterMemoryMetricKey, 16, MemoryGroupKey)
+ segmentIndexWriterMemoryMetric.AddAxi("segment doc values memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "segment_index_writer_memory",
+ Field: "payload.elasticsearch.node_stats.indices.segments.index_writer_memory_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: segmentIndexWriterMemoryMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case SegmentTermVectorsMemoryMetricKey:
+ // segment term vectors memory
+ segmentTermVectorsMemoryMetric:=newMetricItem(SegmentTermVectorsMemoryMetricKey, 16, MemoryGroupKey)
+ segmentTermVectorsMemoryMetric.AddAxi("segment term vectors memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "segment_term_vectors_memory",
+ Field: "payload.elasticsearch.node_stats.indices.segments.term_vectors_memory_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: segmentTermVectorsMemoryMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case DocsCountMetricKey:
+ // docs 数量
+ docsCountMetric:=newMetricItem(DocsCountMetricKey, 17, DocumentGroupKey)
+ docsCountMetric.AddAxi("docs count","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "docs_count",
+ Field: "payload.elasticsearch.node_stats.indices.docs.count",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: docsCountMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case DocsDeletedMetricKey:
+ // docs 删除数量
+ docsDeletedMetric:=newMetricItem(DocsDeletedMetricKey, 17, DocumentGroupKey)
+ docsDeletedMetric.AddAxi("docs deleted","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "docs_deleted",
+ Field: "payload.elasticsearch.node_stats.indices.docs.deleted",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: docsDeletedMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case IndexStorageMetricKey:
+ // index store size
+ indexStoreMetric:=newMetricItem(IndexStorageMetricKey, 18, StorageGroupKey)
+ indexStoreMetric.AddAxi("indices storage","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "index_storage",
+ Field: "payload.elasticsearch.node_stats.indices.store.size_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: indexStoreMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case JVMHeapUsedPercentMetricKey:
+ // jvm used heap
+ jvmUsedPercentMetric:=newMetricItem(JVMHeapUsedPercentMetricKey, 1, JVMGroupKey)
+ jvmUsedPercentMetric.AddAxi("JVM heap used percent","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "jvm_heap_used_percent",
+ Field: "payload.elasticsearch.node_stats.jvm.mem.heap_used_percent",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: jvmUsedPercentMetric,
+ FormatType: "num",
+ Units: "%",
+ })
+ case JVMMemYoungUsedMetricKey:
+ //JVM mem Young pools used
+ youngPoolsUsedMetric:=newMetricItem(JVMMemYoungUsedMetricKey, 2, JVMGroupKey)
+ youngPoolsUsedMetric.AddAxi("Mem Pools Young Used","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "jvm_mem_young_used",
+ Field: "payload.elasticsearch.node_stats.jvm.mem.pools.young.used_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: youngPoolsUsedMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case JVMMemYoungPeakUsedMetricKey:
+ //JVM mem Young pools peak used
+ youngPoolsUsedPeakMetric:=newMetricItem(JVMMemYoungPeakUsedMetricKey, 2, JVMGroupKey)
+ youngPoolsUsedPeakMetric.AddAxi("Mem Pools Young Peak Used","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "jvm_mem_young_peak_used",
+ Field: "payload.elasticsearch.node_stats.jvm.mem.pools.young.peak_used_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: youngPoolsUsedPeakMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case JVMMemOldUsedMetricKey:
+ //JVM mem old pools used
+ oldPoolsUsedMetric:=newMetricItem(JVMMemOldUsedMetricKey, 3, JVMGroupKey)
+ oldPoolsUsedMetric.AddAxi("Mem Pools Old Used","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "jvm_mem_old_used",
+ Field: "payload.elasticsearch.node_stats.jvm.mem.pools.old.used_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: oldPoolsUsedMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case JVMMemOldPeakUsedMetricKey:
+ //JVM mem old pools peak used
+ oldPoolsUsedPeakMetric:=newMetricItem(JVMMemOldPeakUsedMetricKey, 3, JVMGroupKey)
+ oldPoolsUsedPeakMetric.AddAxi("Mem Pools Old Peak Used","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "jvm_mem_old_peak_used",
+ Field: "payload.elasticsearch.node_stats.jvm.mem.pools.old.peak_used_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: oldPoolsUsedPeakMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case JVMUsedHeapMetricKey:
+ //JVM used heap
+ heapUsedMetric:=newMetricItem(JVMUsedHeapMetricKey, 1, JVMGroupKey)
+ heapUsedMetric.AddAxi("JVM Used Heap","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "jvm_used_heap",
+ Field: "payload.elasticsearch.node_stats.jvm.mem.heap_used_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: heapUsedMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case JVMYoungGCRateMetricKey:
+ //JVM Young GC Rate
+ gcYoungRateMetric:=newMetricItem(JVMYoungGCRateMetricKey, 2, JVMGroupKey)
+ gcYoungRateMetric.AddAxi("JVM Young GC Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "jvm_young_gc_rate",
+ Field: "payload.elasticsearch.node_stats.jvm.gc.collectors.young.collection_count",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: gcYoungRateMetric,
+ FormatType: "num",
+ Units: "times/s",
+ })
+ case JVMYoungGCLatencyMetricKey:
+ //JVM Young GC Latency
+ gcYoungLatencyMetric:=newMetricItem(JVMYoungGCLatencyMetricKey, 2, JVMGroupKey)
+ gcYoungLatencyMetric.AddAxi("JVM Young GC Time","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "jvm_young_gc_latency",
+ Field: "payload.elasticsearch.node_stats.jvm.gc.collectors.young.collection_time_in_millis",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: gcYoungLatencyMetric,
+ FormatType: "num",
+ Units: "ms",
+ })
+ case JVMOldGCRateMetricKey:
+ //JVM old GC Rate
+ gcOldRateMetric:=newMetricItem(JVMOldGCRateMetricKey, 3, JVMGroupKey)
+ gcOldRateMetric.AddAxi("JVM Old GC Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "jvm_old_gc_rate",
+ Field: "payload.elasticsearch.node_stats.jvm.gc.collectors.old.collection_count",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: gcOldRateMetric,
+ FormatType: "num",
+ Units: "times/s",
+ })
+ case JVMOldGCLatencyMetricKey:
+ //JVM old GC Latency
+ gcOldLatencyMetric:=newMetricItem(JVMOldGCLatencyMetricKey, 3, JVMGroupKey)
+ gcOldLatencyMetric.AddAxi("JVM Old GC Time","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "jvm_old_gc_latency",
+ Field: "payload.elasticsearch.node_stats.jvm.gc.collectors.old.collection_time_in_millis",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: gcOldLatencyMetric,
+ FormatType: "num",
+ Units: "ms",
+ })
+ case TransportTXRateMetricKey:
+ //Transport 发送速率
+ transTxRateMetric:=newMetricItem(TransportTXRateMetricKey, 19, TransportGroupKey)
+ transTxRateMetric.AddAxi("Transport Send Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "transport_tx_rate",
+ Field: "payload.elasticsearch.node_stats.transport.tx_count",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: transTxRateMetric,
+ FormatType: "num",
+ Units: "times/s",
+ })
+ case TransportRXRateMetricKey:
+ //Transport 接收速率
+ transRxRateMetric:=newMetricItem(TransportRXRateMetricKey, 19, TransportGroupKey)
+ transRxRateMetric.AddAxi("Transport Receive Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "transport_rx_rate",
+ Field: "payload.elasticsearch.node_stats.transport.rx_count",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: transRxRateMetric,
+ FormatType: "num",
+ Units: "times/s",
+ })
+ case TransportTXBytesMetricKey:
+ //Transport 发送流量
+ transTxBytesMetric:=newMetricItem(TransportTXBytesMetricKey, 19, TransportGroupKey)
+ transTxBytesMetric.AddAxi("Transport Send Bytes","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "transport_tx_bytes",
+ Field: "payload.elasticsearch.node_stats.transport.tx_size_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: transTxBytesMetric,
+ FormatType: "bytes",
+ Units: "s",
+ })
+ case TransportRXBytesMetricKey:
+ //Transport 接收流量
+ transRxBytesMetric:=newMetricItem(TransportRXBytesMetricKey, 19, TransportGroupKey)
+ transRxBytesMetric.AddAxi("Transport Receive Bytes","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "transport_rx_bytes",
+ Field: "payload.elasticsearch.node_stats.transport.rx_size_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: transRxBytesMetric,
+ FormatType: "bytes",
+ Units: "s",
+ })
+ case TransportTCPOutboundMetricKey:
+ //Transport tcp 连接数
+ tcpNumMetric:=newMetricItem(TransportTCPOutboundMetricKey, 20, TransportGroupKey)
+ tcpNumMetric.AddAxi("Transport Outbound Connections","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "transport_outbound_connections",
+ Field: "payload.elasticsearch.node_stats.transport.total_outbound_connections",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: tcpNumMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case TotalIOOperationsMetricKey:
+ //IO total
+ totalOperationsMetric:=newMetricItem(TotalIOOperationsMetricKey, 1, IOGroupKey)
+ totalOperationsMetric.AddAxi("Total I/O Operations Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "total_io_operations",
+ Field: "payload.elasticsearch.node_stats.fs.io_stats.total.operations",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: totalOperationsMetric,
+ FormatType: "num",
+ Units: "times/s",
+ })
+ case TotalReadIOOperationsMetricKey:
+ readOperationsMetric:=newMetricItem(TotalReadIOOperationsMetricKey, 2, IOGroupKey)
+ readOperationsMetric.AddAxi("Total Read I/O Operations Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "total_read_io_operations",
+ Field: "payload.elasticsearch.node_stats.fs.io_stats.total.read_operations",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: readOperationsMetric,
+ FormatType: "num",
+ Units: "times/s",
+ })
+ case TotalWriteIOOperationsMetricKey:
+ writeOperationsMetric:=newMetricItem(TotalWriteIOOperationsMetricKey, 3, IOGroupKey)
+ writeOperationsMetric.AddAxi("Total Write I/O Operations Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "total_write_io_operations",
+ Field: "payload.elasticsearch.node_stats.fs.io_stats.total.write_operations",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: writeOperationsMetric,
+ FormatType: "num",
+ Units: "times/s",
+ })
+ case ScrollOpenContextsMetricKey:
+ //scroll context
+ openContextMetric:=newMetricItem(ScrollOpenContextsMetricKey, 7, OperationGroupKey)
+ openContextMetric.AddAxi("Scroll Open Contexts","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
+ Key: "scroll_open_contexts",
+ Field: "payload.elasticsearch.node_stats.indices.search.open_contexts",
+ ID: util.GetUUID(),
+ MetricItem: openContextMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case ParentBreakerMetricKey:
+ // Circuit Breaker
+ parentBreakerMetric := newMetricItem(ParentBreakerMetricKey, 1, CircuitBreakerGroupKey)
+ parentBreakerMetric.AddAxi("Parent Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
+ Key: "parent_breaker",
+ Field: "payload.elasticsearch.node_stats.breakers.parent.tripped",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: parentBreakerMetric,
+ FormatType: "num",
+ Units: "times/s",
+ })
+ case AccountingBreakerMetricKey:
+ accountingBreakerMetric := newMetricItem(AccountingBreakerMetricKey, 2, CircuitBreakerGroupKey)
+ accountingBreakerMetric.AddAxi("Accounting Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
+ Key: "accounting_breaker",
+ Field: "payload.elasticsearch.node_stats.breakers.accounting.tripped",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: accountingBreakerMetric,
+ FormatType: "num",
+ Units: "times/s",
+ })
+ case FielddataBreakerMetricKey:
+ fielddataBreakerMetric := newMetricItem(FielddataBreakerMetricKey, 3, CircuitBreakerGroupKey)
+ fielddataBreakerMetric.AddAxi("Fielddata Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
+ Key: "fielddata_breaker",
+ Field: "payload.elasticsearch.node_stats.breakers.fielddata.tripped",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: fielddataBreakerMetric,
+ FormatType: "num",
+ Units: "times/s",
+ })
+ case RequestBreakerMetricKey:
+ requestBreakerMetric := newMetricItem(RequestBreakerMetricKey, 4, CircuitBreakerGroupKey)
+ requestBreakerMetric.AddAxi("Request Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
+ Key: "request_breaker",
+ Field: "payload.elasticsearch.node_stats.breakers.request.tripped",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: requestBreakerMetric,
+ FormatType: "num",
+ Units: "times/s",
+ })
+ case InFlightRequestsBreakerMetricKey:
+ inFlightRequestBreakerMetric := newMetricItem(InFlightRequestsBreakerMetricKey, 5, CircuitBreakerGroupKey)
+ inFlightRequestBreakerMetric.AddAxi("In Flight Requests Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
+ Key: "in_flight_requests_breaker",
+ Field: "payload.elasticsearch.node_stats.breakers.in_flight_requests.tripped",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: inFlightRequestBreakerMetric,
+ FormatType: "num",
+ Units: "times/s",
+ })
+ case ModelInferenceBreakerMetricKey:
+ modelInferenceBreakerMetric := newMetricItem(ModelInferenceBreakerMetricKey, 6, CircuitBreakerGroupKey)
+ modelInferenceBreakerMetric.AddAxi("Model Inference Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
+ Key: "model_inference_breaker",
+ Field: "payload.elasticsearch.node_stats.breakers.model_inference.tripped",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: modelInferenceBreakerMetric,
+ FormatType: "num",
+ Units: "times/s",
+ })
}
- osCpuMetric := newMetricItem("os_cpu", 2, SystemGroupKey)
- osCpuMetric.AddAxi("OS CPU Percent","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "os_cpu",
- Field: "payload.elasticsearch.node_stats.os.cpu.percent",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: osCpuMetric,
- FormatType: "ratio",
- Units: "%",
- })
-
- osMemMetric := newMetricItem("os_used_mem", 2, SystemGroupKey)
- osMemMetric.AddAxi("OS Mem Used Percent","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "os_used_mem",
- Field: "payload.elasticsearch.node_stats.os.mem.used_percent",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: osMemMetric,
- FormatType: "ratio",
- Units: "%",
- })
- osLoadMetric := newMetricItem("os_load_average_1m", 2, SystemGroupKey)
- osLoadMetric.AddAxi("OS Load 1m Average","group1",common.PositionLeft,"","0.[0]","0.[0]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "os_load_average_1m",
- Field: "payload.elasticsearch.node_stats.os.cpu.load_average.1m",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: osLoadMetric,
- FormatType: "num",
- Units: "",
- })
- //swap usage
- osSwapMetric := newMetricItem("os_used_swap", 3, SystemGroupKey)
- osSwapMetric.AddAxi("OS Swap Used Percent","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "os_used_swap",
- Field: "payload.elasticsearch.node_stats.os.swap.used_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- Field2: "payload.elasticsearch.node_stats.os.swap.total_in_bytes",
- Calc: func(value, value2 float64) float64 {
- return util.ToFixed((value / value2)*100, 2)
- },
- MetricItem: osSwapMetric,
- FormatType: "ratio",
- Units: "%",
- })
- openFileMetric := newMetricItem("open_file", 2, SystemGroupKey)
- openFileMetric.AddAxi("Open File Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "open_file",
- Field: "payload.elasticsearch.node_stats.process.open_file_descriptors",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: openFileMetric,
- FormatType: "num",
- Units: "",
- })
- openFilePercentMetric := newMetricItem("open_file_percent", 2, SystemGroupKey)
- openFilePercentMetric.AddAxi("Open File Percent","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "open_file_percent",
- Field: "payload.elasticsearch.node_stats.process.open_file_descriptors",
- ID: util.GetUUID(),
- IsDerivative: false,
- Field2: "payload.elasticsearch.node_stats.process.max_file_descriptors",
- Calc: func(value, value2 float64) float64 {
- if value < 0 {
- return value
- }
- return util.ToFixed((value / value2)*100, 2)
- },
- MetricItem: openFilePercentMetric,
- FormatType: "ratio",
- Units: "%",
- })
-
- diskMetric := newMetricItem("disk", 2, SystemGroupKey)
- diskMetric.AddAxi("disk available percent","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
-
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "disk",
- Field: "payload.elasticsearch.node_stats.fs.total.total_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: diskMetric,
- FormatType: "ratio",
- Units: "%",
- Field2: "payload.elasticsearch.node_stats.fs.total.available_in_bytes",
- Calc: func(value, value2 float64) float64 {
- return util.ToFixed((value2 / value)*100, 2)
- },
- })
- // 索引速率
- indexMetric:=newMetricItem("indexing_rate", 1, OperationGroupKey)
- indexMetric.AddAxi("indexing rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "indexing_rate",
- Field: "payload.elasticsearch.node_stats.indices.indexing.index_total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: indexMetric,
- FormatType: "num",
- Units: "doc/s",
- })
-
- indexingBytesMetric := newMetricItem("indexing_bytes", 2, OperationGroupKey)
- indexingBytesMetric.AddAxi("Indexing bytes","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
- nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
- Key: "indexing_bytes",
- Field: "payload.elasticsearch.node_stats.indices.store.size_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: indexingBytesMetric,
- FormatType: "bytes",
- Units: "bytes/s",
- })
-
- // 索引延时
- indexLatencyMetric:=newMetricItem("indexing_latency", 1, LatencyGroupKey)
- indexLatencyMetric.AddAxi("indexing latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "indexing_latency",
- Field: "payload.elasticsearch.node_stats.indices.indexing.index_time_in_millis",
- Field2: "payload.elasticsearch.node_stats.indices.indexing.index_total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: indexLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
-
- queryMetric:=newMetricItem("query_rate", 2, OperationGroupKey)
- queryMetric.AddAxi("query rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "query_rate",
- Field: "payload.elasticsearch.node_stats.indices.search.query_total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: queryMetric,
- FormatType: "num",
- Units: "requests/s",
- })
-
- // 查询延时
- queryLatencyMetric:=newMetricItem("query_latency", 2, LatencyGroupKey)
- queryLatencyMetric.AddAxi("query latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "query_latency",
- Field: "payload.elasticsearch.node_stats.indices.search.query_time_in_millis",
- Field2: "payload.elasticsearch.node_stats.indices.search.query_total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: queryLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
-
- fetchMetric:=newMetricItem("fetch_rate", 3, OperationGroupKey)
- fetchMetric.AddAxi("fetch rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "fetch_rate",
- Field: "payload.elasticsearch.node_stats.indices.search.fetch_total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: fetchMetric,
- FormatType: "num",
- Units: "requests/s",
- })
- scrollMetric:=newMetricItem("scroll_rate", 4, OperationGroupKey)
- scrollMetric.AddAxi("scroll rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "scroll_rate",
- Field: "payload.elasticsearch.node_stats.indices.search.scroll_total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: scrollMetric,
- FormatType: "num",
- Units: "requests/s",
- })
-
- refreshMetric:=newMetricItem("refresh_rate", 5, OperationGroupKey)
- refreshMetric.AddAxi("refresh rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "refresh_rate",
- Field: "payload.elasticsearch.node_stats.indices.refresh.total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: refreshMetric,
- FormatType: "num",
- Units: "requests/s",
- })
- flushMetric:=newMetricItem("flush_rate", 6, OperationGroupKey)
- flushMetric.AddAxi("flush rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "flush_rate",
- Field: "payload.elasticsearch.node_stats.indices.flush.total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: flushMetric,
- FormatType: "num",
- Units: "requests/s",
- })
- mergeMetric:=newMetricItem("merges_rate", 7, OperationGroupKey)
- mergeMetric.AddAxi("merges rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "merges_rate",
- Field: "payload.elasticsearch.node_stats.indices.merges.total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: mergeMetric,
- FormatType: "num",
- Units: "requests/s",
- })
-
- // fetch延时
- fetchLatencyMetric:=newMetricItem("fetch_latency", 3, LatencyGroupKey)
- fetchLatencyMetric.AddAxi("fetch latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "fetch_latency",
- Field: "payload.elasticsearch.node_stats.indices.search.fetch_time_in_millis",
- Field2: "payload.elasticsearch.node_stats.indices.search.fetch_total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: fetchLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
- // scroll 延时
- scrollLatencyMetric:=newMetricItem("scroll_latency", 4, LatencyGroupKey)
- scrollLatencyMetric.AddAxi("scroll latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "scroll_latency",
- Field: "payload.elasticsearch.node_stats.indices.search.scroll_time_in_millis",
- Field2: "payload.elasticsearch.node_stats.indices.search.scroll_total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: scrollLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
-
- // merge 延时
- mergeLatencyMetric:=newMetricItem("merge_latency", 7, LatencyGroupKey)
- mergeLatencyMetric.AddAxi("merge latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "merge_latency",
- Field: "payload.elasticsearch.node_stats.indices.merges.total_time_in_millis",
- Field2: "payload.elasticsearch.node_stats.indices.merges.total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: mergeLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
-
- // refresh 延时
- refreshLatencyMetric:=newMetricItem("refresh_latency", 5, LatencyGroupKey)
- refreshLatencyMetric.AddAxi("refresh latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "refresh_latency",
- Field: "payload.elasticsearch.node_stats.indices.refresh.total_time_in_millis",
- Field2: "payload.elasticsearch.node_stats.indices.refresh.total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: refreshLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
- // flush 时延
- flushLatencyMetric:=newMetricItem("flush_latency", 6, LatencyGroupKey)
- flushLatencyMetric.AddAxi("flush latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "flush_latency",
- Field: "payload.elasticsearch.node_stats.indices.flush.total_time_in_millis",
- Field2: "payload.elasticsearch.node_stats.indices.flush.total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: flushLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
- // Query Cache 内存占用大小
- queryCacheMetric:=newMetricItem("query_cache", 1, CacheGroupKey)
- queryCacheMetric.AddAxi("query cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "query_cache",
- Field: "payload.elasticsearch.node_stats.indices.query_cache.memory_size_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: queryCacheMetric,
- FormatType: "bytes",
- Units: "",
- })
- // Request Cache 内存占用大小
- requestCacheMetric:=newMetricItem("request_cache", 2, CacheGroupKey)
- requestCacheMetric.AddAxi("request cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "request_cache",
- Field: "payload.elasticsearch.node_stats.indices.request_cache.memory_size_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: requestCacheMetric,
- FormatType: "bytes",
- Units: "",
- })
- // Request Cache Hit
- requestCacheHitMetric:=newMetricItem("request_cache_hit", 6, CacheGroupKey)
- requestCacheHitMetric.AddAxi("request cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "request_cache_hit",
- Field: "payload.elasticsearch.node_stats.indices.request_cache.hit_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: requestCacheHitMetric,
- FormatType: "num",
- Units: "hits",
- })
- // Request Cache Miss
- requestCacheMissMetric:=newMetricItem("request_cache_miss", 8, CacheGroupKey)
- requestCacheMissMetric.AddAxi("request cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "request_cache_miss",
- Field: "payload.elasticsearch.node_stats.indices.request_cache.miss_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: requestCacheMissMetric,
- FormatType: "num",
- Units: "misses",
- })
- // Query Cache Count
- queryCacheCountMetric:=newMetricItem("query_cache_count", 4, CacheGroupKey)
- queryCacheCountMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "query_cache_count",
- Field: "payload.elasticsearch.node_stats.indices.query_cache.cache_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: queryCacheCountMetric,
- FormatType: "num",
- Units: "",
- })
- // Query Cache Miss
- queryCacheHitMetric:=newMetricItem("query_cache_hit", 5, CacheGroupKey)
- queryCacheHitMetric.AddAxi("query cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "query_cache_hit",
- Field: "payload.elasticsearch.node_stats.indices.query_cache.hit_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: queryCacheHitMetric,
- FormatType: "num",
- Units: "hits",
- })
-
- //// Query Cache evictions
- //queryCacheEvictionsMetric:=newMetricItem("query_cache_evictions", 5, CacheGroupKey)
- //queryCacheEvictionsMetric.AddAxi("query cache evictions","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- //nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- // Key: "query_cache_evictions",
- // Field: "payload.elasticsearch.node_stats.indices.query_cache.evictions",
- // ID: util.GetUUID(),
- // IsDerivative: true,
- // MetricItem: queryCacheEvictionsMetric,
- // FormatType: "num",
- // Units: "evictions",
- //})
-
- // Query Cache Miss
- queryCacheMissMetric:=newMetricItem("query_cache_miss", 7, CacheGroupKey)
- queryCacheMissMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "query_cache_miss",
- Field: "payload.elasticsearch.node_stats.indices.query_cache.miss_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: queryCacheMissMetric,
- FormatType: "num",
- Units: "misses",
- })
-
- // Fielddata内存占用大小
- fieldDataCacheMetric:=newMetricItem("fielddata_cache", 3, CacheGroupKey)
- fieldDataCacheMetric.AddAxi("FieldData Cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "fielddata_cache",
- Field: "payload.elasticsearch.node_stats.indices.fielddata.memory_size_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: fieldDataCacheMetric,
- FormatType: "bytes",
- Units: "",
- })
-
- // http 活跃连接数
- httpActiveMetric:=newMetricItem("http_connect_num", 12, HttpGroupKey)
- httpActiveMetric.AddAxi("http connect number","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "http_connect_num",
- Field: "payload.elasticsearch.node_stats.http.current_open",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: httpActiveMetric,
- FormatType: "num",
- Units: "conns",
- })
- // http 活跃连接数速率
- httpRateMetric:=newMetricItem("http_rate", 12, HttpGroupKey)
- httpRateMetric.AddAxi("http rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "http_rate",
- Field: "payload.elasticsearch.node_stats.http.total_opened",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: httpRateMetric,
- FormatType: "num",
- Units: "conns/s",
- })
-
- // segment 数量
- segmentCountMetric:=newMetricItem("segment_count", 15, StorageGroupKey)
- segmentCountMetric.AddAxi("segment count","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "segment_count",
- Field: "payload.elasticsearch.node_stats.indices.segments.count",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: segmentCountMetric,
- FormatType: "num",
- Units: "",
- })
-
- // segment memory
- segmentMemoryMetric:=newMetricItem("segment_memory", 16, MemoryGroupKey)
- segmentMemoryMetric.AddAxi("segment memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "segment_memory",
- Field: "payload.elasticsearch.node_stats.indices.segments.memory_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: segmentMemoryMetric,
- FormatType: "bytes",
- Units: "",
- })
- // segment stored fields memory
- segmentStoredFieldsMemoryMetric:=newMetricItem("segment_stored_fields_memory", 16, MemoryGroupKey)
- segmentStoredFieldsMemoryMetric.AddAxi("segment stored fields memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "segment_stored_fields_memory",
- Field: "payload.elasticsearch.node_stats.indices.segments.stored_fields_memory_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: segmentStoredFieldsMemoryMetric,
- FormatType: "bytes",
- Units: "",
- })
- // segment terms fields memory
- segmentTermsMemoryMetric:=newMetricItem("segment_terms_memory", 16, MemoryGroupKey)
- segmentTermsMemoryMetric.AddAxi("segment terms memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "segment_terms_memory",
- Field: "payload.elasticsearch.node_stats.indices.segments.terms_memory_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: segmentTermsMemoryMetric,
- FormatType: "bytes",
- Units: "",
- })
- // segment doc values memory
- segmentDocValuesMemoryMetric:=newMetricItem("segment_doc_values_memory", 16, MemoryGroupKey)
- segmentDocValuesMemoryMetric.AddAxi("segment doc values memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "segment_doc_values_memory",
- Field: "payload.elasticsearch.node_stats.indices.segments.doc_values_memory_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: segmentDocValuesMemoryMetric,
- FormatType: "bytes",
- Units: "",
- })
- // segment index writer memory
- segmentIndexWriterMemoryMetric:=newMetricItem("segment_index_writer_memory", 16, MemoryGroupKey)
- segmentIndexWriterMemoryMetric.AddAxi("segment doc values memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "segment_index_writer_memory",
- Field: "payload.elasticsearch.node_stats.indices.segments.index_writer_memory_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: segmentIndexWriterMemoryMetric,
- FormatType: "bytes",
- Units: "",
- })
- // segment term vectors memory
- segmentTermVectorsMemoryMetric:=newMetricItem("segment_term_vectors_memory", 16, MemoryGroupKey)
- segmentTermVectorsMemoryMetric.AddAxi("segment term vectors memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "segment_term_vectors_memory",
- Field: "payload.elasticsearch.node_stats.indices.segments.term_vectors_memory_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: segmentTermVectorsMemoryMetric,
- FormatType: "bytes",
- Units: "",
- })
-
- // docs 数量
- docsCountMetric:=newMetricItem("docs_count", 17, DocumentGroupKey)
- docsCountMetric.AddAxi("docs count","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "docs_count",
- Field: "payload.elasticsearch.node_stats.indices.docs.count",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: docsCountMetric,
- FormatType: "num",
- Units: "",
- })
- // docs 删除数量
- docsDeletedMetric:=newMetricItem("docs_deleted", 17, DocumentGroupKey)
- docsDeletedMetric.AddAxi("docs deleted","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "docs_deleted",
- Field: "payload.elasticsearch.node_stats.indices.docs.deleted",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: docsDeletedMetric,
- FormatType: "num",
- Units: "",
- })
-
- // index store size
- indexStoreMetric:=newMetricItem("index_storage", 18, StorageGroupKey)
- indexStoreMetric.AddAxi("indices storage","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "index_storage",
- Field: "payload.elasticsearch.node_stats.indices.store.size_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: indexStoreMetric,
- FormatType: "bytes",
- Units: "",
- })
-
- // jvm used heap
- jvmUsedPercentMetric:=newMetricItem("jvm_heap_used_percent", 1, JVMGroupKey)
- jvmUsedPercentMetric.AddAxi("JVM heap used percent","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "jvm_heap_used_percent",
- Field: "payload.elasticsearch.node_stats.jvm.mem.heap_used_percent",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: jvmUsedPercentMetric,
- FormatType: "num",
- Units: "%",
- })
- //JVM mem Young pools used
- youngPoolsUsedMetric:=newMetricItem("jvm_mem_young_used", 2, JVMGroupKey)
- youngPoolsUsedMetric.AddAxi("Mem Pools Young Used","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "jvm_mem_young_used",
- Field: "payload.elasticsearch.node_stats.jvm.mem.pools.young.used_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: youngPoolsUsedMetric,
- FormatType: "bytes",
- Units: "",
- })
- //JVM mem Young pools peak used
- youngPoolsUsedPeakMetric:=newMetricItem("jvm_mem_young_peak_used", 2, JVMGroupKey)
- youngPoolsUsedPeakMetric.AddAxi("Mem Pools Young Peak Used","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "jvm_mem_young_peak_used",
- Field: "payload.elasticsearch.node_stats.jvm.mem.pools.young.peak_used_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: youngPoolsUsedPeakMetric,
- FormatType: "bytes",
- Units: "",
- })
-
- //JVM mem old pools used
- oldPoolsUsedMetric:=newMetricItem("jvm_mem_old_used", 3, JVMGroupKey)
- oldPoolsUsedMetric.AddAxi("Mem Pools Old Used","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "jvm_mem_old_used",
- Field: "payload.elasticsearch.node_stats.jvm.mem.pools.old.used_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: oldPoolsUsedMetric,
- FormatType: "bytes",
- Units: "",
- })
- //JVM mem old pools peak used
- oldPoolsUsedPeakMetric:=newMetricItem("jvm_mem_old_peak_used", 3, JVMGroupKey)
- oldPoolsUsedPeakMetric.AddAxi("Mem Pools Old Peak Used","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "jvm_mem_old_peak_used",
- Field: "payload.elasticsearch.node_stats.jvm.mem.pools.old.peak_used_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: oldPoolsUsedPeakMetric,
- FormatType: "bytes",
- Units: "",
- })
-
- //JVM used heap
- heapUsedMetric:=newMetricItem("jvm_used_heap", 1, JVMGroupKey)
- heapUsedMetric.AddAxi("JVM Used Heap","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "jvm_used_heap",
- Field: "payload.elasticsearch.node_stats.jvm.mem.heap_used_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: heapUsedMetric,
- FormatType: "bytes",
- Units: "",
- })
- //JVM Young GC Rate
- gcYoungRateMetric:=newMetricItem("jvm_young_gc_rate", 2, JVMGroupKey)
- gcYoungRateMetric.AddAxi("JVM Young GC Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "jvm_young_gc_rate",
- Field: "payload.elasticsearch.node_stats.jvm.gc.collectors.young.collection_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: gcYoungRateMetric,
- FormatType: "num",
- Units: "times/s",
- })
- //JVM Young GC Latency
- gcYoungLatencyMetric:=newMetricItem("jvm_young_gc_latency", 2, JVMGroupKey)
- gcYoungLatencyMetric.AddAxi("JVM Young GC Time","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "jvm_young_gc_latency",
- Field: "payload.elasticsearch.node_stats.jvm.gc.collectors.young.collection_time_in_millis",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: gcYoungLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
-
- //JVM old GC Rate
- gcOldRateMetric:=newMetricItem("jvm_old_gc_rate", 3, JVMGroupKey)
- gcOldRateMetric.AddAxi("JVM Old GC Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "jvm_old_gc_rate",
- Field: "payload.elasticsearch.node_stats.jvm.gc.collectors.old.collection_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: gcOldRateMetric,
- FormatType: "num",
- Units: "times/s",
- })
- //JVM old GC Latency
- gcOldLatencyMetric:=newMetricItem("jvm_old_gc_latency", 3, JVMGroupKey)
- gcOldLatencyMetric.AddAxi("JVM Old GC Time","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "jvm_old_gc_latency",
- Field: "payload.elasticsearch.node_stats.jvm.gc.collectors.old.collection_time_in_millis",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: gcOldLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
- //Transport 发送速率
- transTxRateMetric:=newMetricItem("transport_tx_rate", 19, TransportGroupKey)
- transTxRateMetric.AddAxi("Transport Send Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "transport_tx_rate",
- Field: "payload.elasticsearch.node_stats.transport.tx_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: transTxRateMetric,
- FormatType: "num",
- Units: "times/s",
- })
- //Transport 接收速率
- transRxRateMetric:=newMetricItem("transport_rx_rate", 19, TransportGroupKey)
- transRxRateMetric.AddAxi("Transport Receive Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "transport_rx_rate",
- Field: "payload.elasticsearch.node_stats.transport.rx_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: transRxRateMetric,
- FormatType: "num",
- Units: "times/s",
- })
-
- //Transport 发送流量
- transTxBytesMetric:=newMetricItem("transport_tx_bytes", 19, TransportGroupKey)
- transTxBytesMetric.AddAxi("Transport Send Bytes","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "transport_tx_bytes",
- Field: "payload.elasticsearch.node_stats.transport.tx_size_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: transTxBytesMetric,
- FormatType: "bytes",
- Units: "s",
- })
- //Transport 接收流量
- transRxBytesMetric:=newMetricItem("transport_rx_bytes", 19, TransportGroupKey)
- transRxBytesMetric.AddAxi("Transport Receive Bytes","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "transport_rx_bytes",
- Field: "payload.elasticsearch.node_stats.transport.rx_size_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: transRxBytesMetric,
- FormatType: "bytes",
- Units: "s",
- })
-
- //Transport tcp 连接数
- tcpNumMetric:=newMetricItem("transport_outbound_connections", 20, TransportGroupKey)
- tcpNumMetric.AddAxi("Transport Outbound Connections","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "transport_outbound_connections",
- Field: "payload.elasticsearch.node_stats.transport.total_outbound_connections",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: tcpNumMetric,
- FormatType: "num",
- Units: "",
- })
-
- //IO total
- totalOperationsMetric:=newMetricItem("total_io_operations", 1, IOGroupKey)
- totalOperationsMetric.AddAxi("Total I/O Operations Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "total_io_operations",
- Field: "payload.elasticsearch.node_stats.fs.io_stats.total.operations",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: totalOperationsMetric,
- FormatType: "num",
- Units: "times/s",
- })
-
- //IO total
- readOperationsMetric:=newMetricItem("total_read_io_operations", 2, IOGroupKey)
- readOperationsMetric.AddAxi("Total Read I/O Operations Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "total_read_io_operations",
- Field: "payload.elasticsearch.node_stats.fs.io_stats.total.read_operations",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: readOperationsMetric,
- FormatType: "num",
- Units: "times/s",
- })
-
- //IO total
- writeOperationsMetric:=newMetricItem("total_write_io_operations", 3, IOGroupKey)
- writeOperationsMetric.AddAxi("Total Write I/O Operations Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "total_write_io_operations",
- Field: "payload.elasticsearch.node_stats.fs.io_stats.total.write_operations",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: writeOperationsMetric,
- FormatType: "num",
- Units: "times/s",
- })
-
- //scroll context
- openContextMetric:=newMetricItem("scroll_open_contexts", 7, OperationGroupKey)
- openContextMetric.AddAxi("Scroll Open Contexts","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "scroll_open_contexts",
- Field: "payload.elasticsearch.node_stats.indices.search.open_contexts",
- ID: util.GetUUID(),
- MetricItem: openContextMetric,
- FormatType: "num",
- Units: "",
- })
-
- // Circuit Breaker
- parentBreakerMetric := newMetricItem("parent_breaker", 1, CircuitBreakerGroupKey)
- parentBreakerMetric.AddAxi("Parent Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
- Key: "parent_breaker",
- Field: "payload.elasticsearch.node_stats.breakers.parent.tripped",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: parentBreakerMetric,
- FormatType: "num",
- Units: "times/s",
- })
- accountingBreakerMetric := newMetricItem("accounting_breaker", 2, CircuitBreakerGroupKey)
- accountingBreakerMetric.AddAxi("Accounting Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
- Key: "accounting_breaker",
- Field: "payload.elasticsearch.node_stats.breakers.accounting.tripped",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: accountingBreakerMetric,
- FormatType: "num",
- Units: "times/s",
- })
- fielddataBreakerMetric := newMetricItem("fielddata_breaker", 3, CircuitBreakerGroupKey)
- fielddataBreakerMetric.AddAxi("Fielddata Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
- Key: "fielddata_breaker",
- Field: "payload.elasticsearch.node_stats.breakers.fielddata.tripped",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: fielddataBreakerMetric,
- FormatType: "num",
- Units: "times/s",
- })
- requestBreakerMetric := newMetricItem("request_breaker", 4, CircuitBreakerGroupKey)
- requestBreakerMetric.AddAxi("Request Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
- Key: "request_breaker",
- Field: "payload.elasticsearch.node_stats.breakers.request.tripped",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: requestBreakerMetric,
- FormatType: "num",
- Units: "times/s",
- })
- inFlightRequestBreakerMetric := newMetricItem("in_flight_requests_breaker", 5, CircuitBreakerGroupKey)
- inFlightRequestBreakerMetric.AddAxi("In Flight Requests Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
- Key: "in_flight_requests_breaker",
- Field: "payload.elasticsearch.node_stats.breakers.in_flight_requests.tripped",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: inFlightRequestBreakerMetric,
- FormatType: "num",
- Units: "times/s",
- })
- modelInferenceBreakerMetric := newMetricItem("model_inference_breaker", 6, CircuitBreakerGroupKey)
- modelInferenceBreakerMetric.AddAxi("Model Inference Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
- Key: "model_inference_breaker",
- Field: "payload.elasticsearch.node_stats.breakers.model_inference.tripped",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: modelInferenceBreakerMetric,
- FormatType: "num",
- Units: "times/s",
- })
-
aggs := generateGroupAggs(nodeMetricItems)
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
if err != nil {
@@ -1027,7 +1121,7 @@ func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max i
},
},
}
- return h.getMetrics(query, nodeMetricItems, bucketSize), nil
+ return h.getMetrics(ctx, query, nodeMetricItems, bucketSize), nil
}
diff --git a/modules/elastic/api/node_overview.go b/modules/elastic/api/node_overview.go
index c7ef72bb..68cd3ad4 100644
--- a/modules/elastic/api/node_overview.go
+++ b/modules/elastic/api/node_overview.go
@@ -28,6 +28,7 @@
package api
import (
+ "context"
"fmt"
log "github.com/cihub/seelog"
httprouter "infini.sh/framework/core/api/router"
@@ -410,7 +411,7 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
},
},
}
- metrics := h.getMetrics(query, nodeMetricItems, bucketSize)
+ metrics := h.getMetrics(context.Background(), query, nodeMetricItems, bucketSize)
indexMetrics := map[string]util.MapStr{}
for key, item := range metrics {
for _, line := range item.Lines {
@@ -562,6 +563,12 @@ func (h *APIHandler) GetNodeInfo(w http.ResponseWriter, req *http.Request, ps ht
h.WriteJSON(w, kvs, http.StatusOK)
}
+const (
+ NodeCPUJVMMetricKey = "jvm"
+ NodeHealthMetricKey = "node_health"
+ ShardStateMetricKey = "shard_state"
+)
+
func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
clusterID := ps.MustGetParameter("id")
clusterUUID, err := adapter.GetClusterUUID(clusterID)
@@ -628,127 +635,155 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
metricItems:=[]*common.MetricItem{}
- metricItem:=newMetricItem("cpu", 1, SystemGroupKey)
- metricItem.AddAxi("cpu","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
- metricItem.AddLine("Process CPU","Process CPU","process cpu used percent of node.","group1","payload.elasticsearch.node_stats.process.cpu.percent","max",bucketSizeStr,"%","num","0,0.[00]","0,0.[00]",false,false)
- metricItem.AddLine("OS CPU","OS CPU","process cpu used percent of node.","group1","payload.elasticsearch.node_stats.os.cpu.percent","max",bucketSizeStr,"%","num","0,0.[00]","0,0.[00]",false,false)
- metricItems=append(metricItems,metricItem)
- metricItem =newMetricItem("jvm", 2, SystemGroupKey)
- metricItem.AddAxi("JVM Heap","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
- metricItem.AddLine("Max Heap","Max Heap","JVM max Heap of node.","group1","payload.elasticsearch.node_stats.jvm.mem.heap_max_in_bytes","max",bucketSizeStr,"","bytes","0,0.[00]","0,0.[00]",false,false)
- metricItem.AddLine("Used Heap","Used Heap","JVM used Heap of node.","group1","payload.elasticsearch.node_stats.jvm.mem.heap_used_in_bytes","max",bucketSizeStr,"","bytes","0,0.[00]","0,0.[00]",false,false)
- metricItems=append(metricItems,metricItem)
- metricItem=newMetricItem("index_throughput", 3, OperationGroupKey)
- metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- metricItem.AddLine("Indexing Rate","Total Shards","Number of documents being indexed for node.","group1","payload.elasticsearch.node_stats.indices.indexing.index_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true)
- metricItems=append(metricItems,metricItem)
- metricItem=newMetricItem("search_throughput", 4, OperationGroupKey)
- metricItem.AddAxi("searching","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,false)
- metricItem.AddLine("Search Rate","Total Shards",
- "Number of search requests being executed.",
- "group1","payload.elasticsearch.node_stats.indices.search.query_total","max",bucketSizeStr,"query/s","num","0,0.[00]","0,0.[00]",false,true)
- metricItems=append(metricItems,metricItem)
-
- metricItem=newMetricItem("index_latency", 5, LatencyGroupKey)
- metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
-
- metricItem.AddLine("Indexing","Indexing Latency","Average latency for indexing documents.","group1","payload.elasticsearch.node_stats.indices.indexing.index_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
- metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.index_total"
- metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
- return value/value2
- }
- metricItem.AddLine("Indexing","Delete Latency","Average latency for delete documents.","group1","payload.elasticsearch.node_stats.indices.indexing.delete_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
- metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.delete_total"
- metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
- return value/value2
- }
- metricItems=append(metricItems,metricItem)
-
- metricItem=newMetricItem("search_latency", 6, LatencyGroupKey)
- metricItem.AddAxi("searching","group2",common.PositionLeft,"num","0,0","0,0.[00]",5,false)
-
- metricItem.AddLine("Searching","Query Latency","Average latency for searching query.","group2","payload.elasticsearch.node_stats.indices.search.query_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
- metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.query_total"
- metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
- return value/value2
- }
- metricItem.AddLine("Searching","Fetch Latency","Average latency for searching fetch.","group2","payload.elasticsearch.node_stats.indices.search.fetch_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
- metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.fetch_total"
- metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
- return value/value2
- }
- metricItem.AddLine("Searching","Scroll Latency","Average latency for searching fetch.","group2","payload.elasticsearch.node_stats.indices.search.scroll_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
- metricItem.Lines[2].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.scroll_total"
- metricItem.Lines[2].Metric.Calc = func(value, value2 float64) float64 {
- return value/value2
- }
- metricItems=append(metricItems,metricItem)
- metricItem =newMetricItem("parent_breaker", 8, SystemGroupKey)
- metricItem.AddLine("Parent Breaker Tripped","Parent Breaker Tripped","Rate of the circuit breaker has been triggered and prevented an out of memory error.","group1","payload.elasticsearch.node_stats.breakers.parent.tripped","max",bucketSizeStr,"times/s","num","0,0.[00]","0,0.[00]",false,true)
- metricItems=append(metricItems,metricItem)
- metrics := h.getSingleMetrics(metricItems,query, bucketSize)
- healthMetric, err := getNodeHealthMetric(query, bucketSize)
+ metricKey := h.GetParameter(req, "key")
+ timeout := h.GetParameterOrDefault(req, "timeout", "60s")
+ du, err := time.ParseDuration(timeout)
if err != nil {
log.Error(err)
+ h.WriteError(w, err.Error(), http.StatusInternalServerError)
+ return
}
- query = util.MapStr{
- "size": 0,
- "query": util.MapStr{
- "bool": util.MapStr{
- "must": []util.MapStr{
- {
- "term":util.MapStr{
- "metadata.labels.cluster_uuid":util.MapStr{
- "value": clusterUUID,
+ ctx, cancel := context.WithTimeout(context.Background(), du)
+ defer cancel()
+ metrics := map[string]*common.MetricItem{}
+ if metricKey == NodeHealthMetricKey {
+ healthMetric, err := getNodeHealthMetric(ctx, query, bucketSize)
+ if err != nil {
+ log.Error(err)
+ h.WriteError(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ metrics["node_health"] = healthMetric
+ }else if metricKey == ShardStateMetricKey {
+ query = util.MapStr{
+ "size": 0,
+ "query": util.MapStr{
+ "bool": util.MapStr{
+ "must": []util.MapStr{
+ {
+ "term":util.MapStr{
+ "metadata.labels.cluster_uuid":util.MapStr{
+ "value": clusterUUID,
+ },
+ },
+ },
+ {
+ "term": util.MapStr{
+ "metadata.category": util.MapStr{
+ "value": "elasticsearch",
+ },
+ },
+ },
+ {
+ "term": util.MapStr{
+ "metadata.name": util.MapStr{
+ "value": "shard_stats",
+ },
+ },
+ },
+ {
+ "term": util.MapStr{
+ "metadata.labels.node_id": util.MapStr{
+ "value": nodeID,
+ },
},
},
},
- {
- "term": util.MapStr{
- "metadata.category": util.MapStr{
- "value": "elasticsearch",
- },
- },
- },
- {
- "term": util.MapStr{
- "metadata.name": util.MapStr{
- "value": "shard_stats",
- },
- },
- },
- {
- "term": util.MapStr{
- "metadata.labels.node_id": util.MapStr{
- "value": nodeID,
- },
- },
- },
- },
- "filter": []util.MapStr{
- {
- "range": util.MapStr{
- "timestamp": util.MapStr{
- "gte": min,
- "lte": max,
+ "filter": []util.MapStr{
+ {
+ "range": util.MapStr{
+ "timestamp": util.MapStr{
+ "gte": min,
+ "lte": max,
+ },
},
},
},
},
},
- },
+ }
+ shardStateMetric, err := getNodeShardStateMetric(ctx, query, bucketSize)
+ if err != nil {
+ log.Error(err)
+ h.WriteError(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ metrics["shard_state"] = shardStateMetric
+ }else{
+ switch metricKey {
+ case NodeProcessCPUMetricKey:
+ metricItem:=newMetricItem("cpu", 1, SystemGroupKey)
+ metricItem.AddAxi("cpu","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
+ metricItem.AddLine("Process CPU","Process CPU","process cpu used percent of node.","group1","payload.elasticsearch.node_stats.process.cpu.percent","max",bucketSizeStr,"%","num","0,0.[00]","0,0.[00]",false,false)
+ metricItem.AddLine("OS CPU","OS CPU","process cpu used percent of node.","group1","payload.elasticsearch.node_stats.os.cpu.percent","max",bucketSizeStr,"%","num","0,0.[00]","0,0.[00]",false,false)
+ metricItems=append(metricItems,metricItem)
+ case NodeCPUJVMMetricKey:
+ metricItem := newMetricItem("jvm", 2, SystemGroupKey)
+ metricItem.AddAxi("JVM Heap","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
+ metricItem.AddLine("Max Heap","Max Heap","JVM max Heap of node.","group1","payload.elasticsearch.node_stats.jvm.mem.heap_max_in_bytes","max",bucketSizeStr,"","bytes","0,0.[00]","0,0.[00]",false,false)
+ metricItem.AddLine("Used Heap","Used Heap","JVM used Heap of node.","group1","payload.elasticsearch.node_stats.jvm.mem.heap_used_in_bytes","max",bucketSizeStr,"","bytes","0,0.[00]","0,0.[00]",false,false)
+ metricItems=append(metricItems,metricItem)
+ case IndexThroughputMetricKey:
+ metricItem := newMetricItem("index_throughput", 3, OperationGroupKey)
+ metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ metricItem.AddLine("Indexing Rate","Total Shards","Number of documents being indexed for node.","group1","payload.elasticsearch.node_stats.indices.indexing.index_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true)
+ metricItems=append(metricItems,metricItem)
+ case SearchThroughputMetricKey:
+ metricItem := newMetricItem("search_throughput", 4, OperationGroupKey)
+ metricItem.AddAxi("searching","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,false)
+ metricItem.AddLine("Search Rate","Total Shards",
+ "Number of search requests being executed.",
+ "group1","payload.elasticsearch.node_stats.indices.search.query_total","max",bucketSizeStr,"query/s","num","0,0.[00]","0,0.[00]",false,true)
+ metricItems=append(metricItems,metricItem)
+ case IndexLatencyMetricKey:
+ metricItem := newMetricItem("index_latency", 5, LatencyGroupKey)
+ metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+
+ metricItem.AddLine("Indexing","Indexing Latency","Average latency for indexing documents.","group1","payload.elasticsearch.node_stats.indices.indexing.index_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
+ metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.index_total"
+ metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
+ return value/value2
+ }
+ metricItem.AddLine("Indexing","Delete Latency","Average latency for delete documents.","group1","payload.elasticsearch.node_stats.indices.indexing.delete_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
+ metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.delete_total"
+ metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
+ return value/value2
+ }
+ metricItems=append(metricItems,metricItem)
+ case SearchLatencyMetricKey:
+ metricItem := newMetricItem("search_latency", 6, LatencyGroupKey)
+ metricItem.AddAxi("searching","group2",common.PositionLeft,"num","0,0","0,0.[00]",5,false)
+
+ metricItem.AddLine("Searching","Query Latency","Average latency for searching query.","group2","payload.elasticsearch.node_stats.indices.search.query_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
+ metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.query_total"
+ metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
+ return value/value2
+ }
+ metricItem.AddLine("Searching","Fetch Latency","Average latency for searching fetch.","group2","payload.elasticsearch.node_stats.indices.search.fetch_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
+ metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.fetch_total"
+ metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
+ return value/value2
+ }
+ metricItem.AddLine("Searching","Scroll Latency","Average latency for searching fetch.","group2","payload.elasticsearch.node_stats.indices.search.scroll_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
+ metricItem.Lines[2].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.scroll_total"
+ metricItem.Lines[2].Metric.Calc = func(value, value2 float64) float64 {
+ return value/value2
+ }
+ metricItems=append(metricItems,metricItem)
+ case ParentBreakerMetricKey:
+ metricItem := newMetricItem("parent_breaker", 8, SystemGroupKey)
+ metricItem.AddLine("Parent Breaker Tripped","Parent Breaker Tripped","Rate of the circuit breaker has been triggered and prevented an out of memory error.","group1","payload.elasticsearch.node_stats.breakers.parent.tripped","max",bucketSizeStr,"times/s","num","0,0.[00]","0,0.[00]",false,true)
+ metricItems=append(metricItems,metricItem)
+ }
+
+ metrics = h.getSingleMetrics(ctx, metricItems,query, bucketSize)
}
- shardStateMetric, err := getNodeShardStateMetric(query, bucketSize)
- if err != nil {
- log.Error(err)
- }
- metrics["node_health"] = healthMetric
- metrics["shard_state"] = shardStateMetric
+
resBody["metrics"] = metrics
h.WriteJSON(w, resBody, http.StatusOK)
}
-func getNodeShardStateMetric(query util.MapStr, bucketSize int)(*common.MetricItem, error){
+func getNodeShardStateMetric(ctx context.Context, query util.MapStr, bucketSize int)(*common.MetricItem, error){
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
if err != nil {
@@ -771,7 +806,8 @@ func getNodeShardStateMetric(query util.MapStr, bucketSize int)(*common.MetricIt
},
},
}
- response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
+ queryDSL := util.MustToJSONBytes(query)
+ response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
if err != nil {
log.Error(err)
return nil, err
@@ -789,10 +825,11 @@ func getNodeShardStateMetric(query util.MapStr, bucketSize int)(*common.MetricIt
}
metricItem.Lines[0].Data = metricData
metricItem.Lines[0].Type = common.GraphTypeBar
+ metricItem.Request = string(queryDSL)
return metricItem, nil
}
-func getNodeHealthMetric(query util.MapStr, bucketSize int)(*common.MetricItem, error){
+func getNodeHealthMetric(ctx context.Context, query util.MapStr, bucketSize int)(*common.MetricItem, error){
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
if err != nil {
@@ -813,7 +850,8 @@ func getNodeHealthMetric(query util.MapStr, bucketSize int)(*common.MetricItem,
},
},
}
- response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
+ queryDSL := util.MustToJSONBytes(query)
+ response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
if err != nil {
log.Error(err)
return nil, err
@@ -844,6 +882,7 @@ func getNodeHealthMetric(query util.MapStr, bucketSize int)(*common.MetricItem,
}
}
}
+ metricItem.Request = string(queryDSL)
metricItem.Lines[0].Data = metricData
metricItem.Lines[0].Type = common.GraphTypeBar
return metricItem, nil
diff --git a/modules/elastic/api/threadpool_metrics.go b/modules/elastic/api/threadpool_metrics.go
index 3e8f7003..f385fe3f 100644
--- a/modules/elastic/api/threadpool_metrics.go
+++ b/modules/elastic/api/threadpool_metrics.go
@@ -24,6 +24,7 @@
package api
import (
+ "context"
"fmt"
log "github.com/cihub/seelog"
"infini.sh/framework/core/elastic"
@@ -45,7 +46,42 @@ const (
ThreadPoolBulkGroupKey = "thread_pool_bulk"
)
-func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min, max int64, nodeName string, top int) (map[string]*common.MetricItem, error){
+const (
+ SearchThreadsMetricKey = "search_threads"
+ IndexThreadsMetricKey = "index_threads"
+ BulkThreadsMetricKey = "bulk_threads"
+ FlushThreadsMetricKey = "flush_threads"
+ RefreshThreadsMetricKey = "refresh_threads"
+ WriteThreadsMetricKey = "write_threads"
+ ForceMergeThreadsMetricKey = "force_merge_threads"
+ SearchQueueMetricKey = "search_queue"
+ IndexQueueMetricKey = "index_queue"
+ BulkQueueMetricKey = "bulk_queue"
+ FlushQueueMetricKey = "flush_queue"
+ RefreshQueueMetricKey = "refresh_queue"
+ WriteQueueMetricKey = "write_queue"
+ SearchActiveMetricKey = "search_active"
+ IndexActiveMetricKey = "index_active"
+ BulkActiveMetricKey = "bulk_active"
+ FlushActiveMetricKey = "flush_active"
+ WriteActiveMetricKey = "write_active"
+ ForceMergeActiveMetricKey = "force_merge_active"
+ SearchRejectedMetricKey = "search_rejected"
+ IndexRejectedMetricKey = "index_rejected"
+ BulkRejectedMetricKey = "bulk_rejected"
+ FlushRejectedMetricKey = "flush_rejected"
+ WriteRejectedMetricKey = "write_rejected"
+ ForceMergeRejectedMetricKey = "force_merge_rejected"
+ GetThreadsMetricKey = "get_threads"
+ GetQueueMetricKey = "get_queue"
+ GetActiveMetricKey = "get_active"
+ GetRejectedMetricKey = "get_rejected"
+ RefreshActiveMetricKey = "refresh_active"
+ RefreshRejectedMetricKey = "refresh_rejected"
+ ForceMergeQueueMetricKey = "force_merge_queue"
+)
+
+func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string, bucketSize int, min, max int64, nodeName string, top int, metricKey string) (map[string]*common.MetricItem, error){
clusterUUID, err := adapter.GetClusterUUID(clusterID)
if err != nil {
return nil, err
@@ -123,11 +159,12 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
},
},
}
- searchThreadsMetric := newMetricItem("search_threads", 1, ThreadPoolSearchGroupKey)
- searchThreadsMetric.AddAxi("Search Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems := []GroupMetricItem{
- {
+ queueMetricItems := []GroupMetricItem{}
+ switch metricKey {
+ case SearchThreadsMetricKey:
+ searchThreadsMetric := newMetricItem(SearchThreadsMetricKey, 1, ThreadPoolSearchGroupKey)
+ searchThreadsMetric.AddAxi("Search Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "search_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.search.threads",
ID: util.GetUUID(),
@@ -135,148 +172,153 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
MetricItem: searchThreadsMetric,
FormatType: "num",
Units: "",
- },
- }
- searchQueueMetric := newMetricItem("search_queue", 1, ThreadPoolSearchGroupKey)
- searchQueueMetric.AddAxi("Search Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ })
+ case SearchQueueMetricKey:
+ searchQueueMetric := newMetricItem(SearchQueueMetricKey, 1, ThreadPoolSearchGroupKey)
+ searchQueueMetric.AddAxi("Search Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "search_queue",
- Field: "payload.elasticsearch.node_stats.thread_pool.search.queue",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: searchQueueMetric,
- FormatType: "num",
- Units: "",
- })
- searchActiveMetric := newMetricItem("search_active", 1, ThreadPoolSearchGroupKey)
- searchActiveMetric.AddAxi("Search Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ queueMetricItems = append(queueMetricItems, GroupMetricItem{
+ Key: "search_queue",
+ Field: "payload.elasticsearch.node_stats.thread_pool.search.queue",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: searchQueueMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case SearchActiveMetricKey:
+ searchActiveMetric := newMetricItem(SearchActiveMetricKey, 1, ThreadPoolSearchGroupKey)
+ searchActiveMetric.AddAxi("Search Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "search_active",
- Field: "payload.elasticsearch.node_stats.thread_pool.search.active",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: searchActiveMetric,
- FormatType: "num",
- Units: "",
- })
- searchRejectedMetric := newMetricItem("search_rejected", 1, ThreadPoolSearchGroupKey)
- searchRejectedMetric.AddAxi("Search Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ queueMetricItems = append(queueMetricItems, GroupMetricItem{
+ Key: "search_active",
+ Field: "payload.elasticsearch.node_stats.thread_pool.search.active",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: searchActiveMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case SearchRejectedMetricKey:
+ searchRejectedMetric := newMetricItem(SearchRejectedMetricKey, 1, ThreadPoolSearchGroupKey)
+ searchRejectedMetric.AddAxi("Search Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "search_rejected",
- Field: "payload.elasticsearch.node_stats.thread_pool.search.rejected",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: searchRejectedMetric,
- FormatType: "num",
- Units: "rejected/s",
- })
+ queueMetricItems = append(queueMetricItems, GroupMetricItem{
+ Key: "search_rejected",
+ Field: "payload.elasticsearch.node_stats.thread_pool.search.rejected",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: searchRejectedMetric,
+ FormatType: "num",
+ Units: "rejected/s",
+ })
+ case GetThreadsMetricKey:
+ getThreadsMetric := newMetricItem(GetThreadsMetricKey, 1, ThreadPoolGetGroupKey)
+ getThreadsMetric.AddAxi("Get Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- getThreadsMetric := newMetricItem("get_threads", 1, ThreadPoolGetGroupKey)
- getThreadsMetric.AddAxi("Get Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ queueMetricItems = append(queueMetricItems, GroupMetricItem{
+ Key: "get_threads",
+ Field: "payload.elasticsearch.node_stats.thread_pool.get.threads",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: getThreadsMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case GetQueueMetricKey:
+ getQueueMetric := newMetricItem(GetQueueMetricKey, 1, ThreadPoolGetGroupKey)
+ getQueueMetric.AddAxi("Get Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "get_threads",
- Field: "payload.elasticsearch.node_stats.thread_pool.get.threads",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: getThreadsMetric,
- FormatType: "num",
- Units: "",
- })
- getQueueMetric := newMetricItem("get_queue", 1, ThreadPoolGetGroupKey)
- getQueueMetric.AddAxi("Get Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ queueMetricItems = append(queueMetricItems, GroupMetricItem{
+ Key: "get_queue",
+ Field: "payload.elasticsearch.node_stats.thread_pool.get.queue",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: getQueueMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case GetActiveMetricKey:
+ getActiveMetric := newMetricItem(GetActiveMetricKey, 1, ThreadPoolGetGroupKey)
+ getActiveMetric.AddAxi("Get Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "get_queue",
- Field: "payload.elasticsearch.node_stats.thread_pool.get.queue",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: getQueueMetric,
- FormatType: "num",
- Units: "",
- })
- getActiveMetric := newMetricItem("get_active", 1, ThreadPoolGetGroupKey)
- getActiveMetric.AddAxi("Get Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ queueMetricItems = append(queueMetricItems, GroupMetricItem{
+ Key: "get_active",
+ Field: "payload.elasticsearch.node_stats.thread_pool.get.active",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: getActiveMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case GetRejectedMetricKey:
+ getRejectedMetric := newMetricItem(GetRejectedMetricKey, 1, ThreadPoolGetGroupKey)
+ getRejectedMetric.AddAxi("Get Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "get_active",
- Field: "payload.elasticsearch.node_stats.thread_pool.get.active",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: getActiveMetric,
- FormatType: "num",
- Units: "",
- })
- getRejectedMetric := newMetricItem("get_rejected", 1, ThreadPoolGetGroupKey)
- getRejectedMetric.AddAxi("Get Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ queueMetricItems = append(queueMetricItems, GroupMetricItem{
+ Key: "get_rejected",
+ Field: "payload.elasticsearch.node_stats.thread_pool.get.rejected",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: getRejectedMetric,
+ FormatType: "num",
+ Units: "rejected/s",
+ })
+ case FlushThreadsMetricKey:
+ flushThreadsMetric := newMetricItem(FlushThreadsMetricKey, 1, ThreadPoolFlushGroupKey)
+ flushThreadsMetric.AddAxi("Flush Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "get_rejected",
- Field: "payload.elasticsearch.node_stats.thread_pool.get.rejected",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: getRejectedMetric,
- FormatType: "num",
- Units: "rejected/s",
- })
+ queueMetricItems = append(queueMetricItems, GroupMetricItem{
+ Key: "flush_threads",
+ Field: "payload.elasticsearch.node_stats.thread_pool.flush.threads",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: flushThreadsMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case FlushQueueMetricKey:
+ flushQueueMetric := newMetricItem(FlushQueueMetricKey, 1, ThreadPoolFlushGroupKey)
+ flushQueueMetric.AddAxi("Get Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- flushThreadsMetric := newMetricItem("flush_threads", 1, ThreadPoolFlushGroupKey)
- flushThreadsMetric.AddAxi("Flush Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ queueMetricItems = append(queueMetricItems, GroupMetricItem{
+ Key: "flush_queue",
+ Field: "payload.elasticsearch.node_stats.thread_pool.flush.queue",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: flushQueueMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case FlushActiveMetricKey:
+ flushActiveMetric := newMetricItem(FlushActiveMetricKey, 1, ThreadPoolFlushGroupKey)
+ flushActiveMetric.AddAxi("Flush Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "flush_threads",
- Field: "payload.elasticsearch.node_stats.thread_pool.flush.threads",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: flushThreadsMetric,
- FormatType: "num",
- Units: "",
- })
- flushQueueMetric := newMetricItem("flush_queue", 1, ThreadPoolFlushGroupKey)
- flushQueueMetric.AddAxi("Get Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ queueMetricItems = append(queueMetricItems, GroupMetricItem{
+ Key: "flush_active",
+ Field: "payload.elasticsearch.node_stats.thread_pool.flush.active",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: flushActiveMetric,
+ FormatType: "num",
+ Units: "",
+ })
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "flush_queue",
- Field: "payload.elasticsearch.node_stats.thread_pool.flush.queue",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: flushQueueMetric,
- FormatType: "num",
- Units: "",
- })
- flushActiveMetric := newMetricItem("flush_active", 1, ThreadPoolFlushGroupKey)
- flushActiveMetric.AddAxi("Flush Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ case FlushRejectedMetricKey:
+ flushRejectedMetric := newMetricItem(FlushRejectedMetricKey, 1, ThreadPoolFlushGroupKey)
+ flushRejectedMetric.AddAxi("Flush Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "flush_active",
- Field: "payload.elasticsearch.node_stats.thread_pool.flush.active",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: flushActiveMetric,
- FormatType: "num",
- Units: "",
- })
- flushRejectedMetric := newMetricItem("flush_rejected", 1, ThreadPoolFlushGroupKey)
- flushRejectedMetric.AddAxi("Flush Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "flush_rejected",
- Field: "payload.elasticsearch.node_stats.thread_pool.flush.rejected",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: flushRejectedMetric,
- FormatType: "num",
- Units: "rejected/s",
- })
-
- majorVersion := elastic.GetMetadata(clusterID).GetMajorVersion()
- ver := elastic.GetClient(clusterID).GetVersion()
-
- if (ver.Distribution == "" || ver.Distribution == elastic.Elasticsearch) && majorVersion < 6{
- indexThreadsMetric := newMetricItem("index_threads", 1, ThreadPoolIndexGroupKey)
+ queueMetricItems = append(queueMetricItems, GroupMetricItem{
+ Key: "flush_rejected",
+ Field: "payload.elasticsearch.node_stats.thread_pool.flush.rejected",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: flushRejectedMetric,
+ FormatType: "num",
+ Units: "rejected/s",
+ })
+ case IndexThreadsMetricKey:
+ indexThreadsMetric := newMetricItem(IndexThreadsMetricKey, 1, ThreadPoolIndexGroupKey)
indexThreadsMetric.AddAxi("Index Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@@ -288,7 +330,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
- indexQueueMetric := newMetricItem("index_queue", 1, ThreadPoolIndexGroupKey)
+ case IndexQueueMetricKey:
+ indexQueueMetric := newMetricItem(IndexQueueMetricKey, 1, ThreadPoolIndexGroupKey)
indexQueueMetric.AddAxi("Index Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@@ -300,7 +343,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
- indexActiveMetric := newMetricItem("index_active", 1, ThreadPoolIndexGroupKey)
+ case IndexActiveMetricKey:
+ indexActiveMetric := newMetricItem(IndexActiveMetricKey, 1, ThreadPoolIndexGroupKey)
indexActiveMetric.AddAxi("Index Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@@ -312,7 +356,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
- indexRejectedMetric := newMetricItem("index_rejected", 1, ThreadPoolIndexGroupKey)
+ case IndexRejectedMetricKey:
+ indexRejectedMetric := newMetricItem(IndexRejectedMetricKey, 1, ThreadPoolIndexGroupKey)
indexRejectedMetric.AddAxi("Index Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@@ -324,8 +369,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "rejected/s",
})
-
- bulkThreadsMetric := newMetricItem("bulk_threads", 1, ThreadPoolBulkGroupKey)
+ case BulkThreadsMetricKey:
+ bulkThreadsMetric := newMetricItem(BulkThreadsMetricKey, 1, ThreadPoolBulkGroupKey)
bulkThreadsMetric.AddAxi("Bulk Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@@ -337,7 +382,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
- bulkQueueMetric := newMetricItem("bulk_queue", 1, ThreadPoolBulkGroupKey)
+ case BulkQueueMetricKey:
+ bulkQueueMetric := newMetricItem(BulkQueueMetricKey, 1, ThreadPoolBulkGroupKey)
bulkQueueMetric.AddAxi("Bulk Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@@ -349,7 +395,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
- bulkActiveMetric := newMetricItem("bulk_active", 1, ThreadPoolBulkGroupKey)
+ case BulkActiveMetricKey:
+ bulkActiveMetric := newMetricItem(BulkActiveMetricKey, 1, ThreadPoolBulkGroupKey)
bulkActiveMetric.AddAxi("Bulk Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@@ -361,7 +408,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
- bulkRejectedMetric := newMetricItem("bulk_rejected", 1, ThreadPoolBulkGroupKey)
+ case BulkRejectedMetricKey:
+ bulkRejectedMetric := newMetricItem(BulkRejectedMetricKey, 1, ThreadPoolBulkGroupKey)
bulkRejectedMetric.AddAxi("Bulk Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@@ -373,8 +421,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "rejected/s",
})
- }else {
- writeThreadsMetric := newMetricItem("write_threads", 1, ThreadPoolWriteGroupKey)
+ case WriteThreadsMetricKey:
+ writeThreadsMetric := newMetricItem(WriteThreadsMetricKey, 1, ThreadPoolWriteGroupKey)
writeThreadsMetric.AddAxi("Write Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@@ -386,7 +434,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
- writeQueueMetric := newMetricItem("write_queue", 1, ThreadPoolWriteGroupKey)
+ case WriteQueueMetricKey:
+ writeQueueMetric := newMetricItem(WriteQueueMetricKey, 1, ThreadPoolWriteGroupKey)
writeQueueMetric.AddAxi("Write Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@@ -398,7 +447,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
- writeActiveMetric := newMetricItem("write_active", 1, ThreadPoolWriteGroupKey)
+ case WriteActiveMetricKey:
+ writeActiveMetric := newMetricItem(WriteActiveMetricKey, 1, ThreadPoolWriteGroupKey)
writeActiveMetric.AddAxi("Write Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@@ -410,7 +460,8 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "",
})
- writeRejectedMetric := newMetricItem("write_rejected", 1, ThreadPoolWriteGroupKey)
+ case WriteRejectedMetricKey:
+ writeRejectedMetric := newMetricItem(WriteRejectedMetricKey, 1, ThreadPoolWriteGroupKey)
writeRejectedMetric.AddAxi("Write Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
@@ -422,103 +473,113 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
FormatType: "num",
Units: "rejected/s",
})
+ case RefreshThreadsMetricKey:
+ refreshThreadsMetric := newMetricItem(RefreshThreadsMetricKey, 1, ThreadPoolRefreshGroupKey)
+ refreshThreadsMetric.AddAxi("Refresh Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+
+ queueMetricItems = append(queueMetricItems, GroupMetricItem{
+ Key: "refresh_threads",
+ Field: "payload.elasticsearch.node_stats.thread_pool.refresh.threads",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: refreshThreadsMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case RefreshQueueMetricKey:
+ refreshQueueMetric := newMetricItem(RefreshQueueMetricKey, 1, ThreadPoolRefreshGroupKey)
+ refreshQueueMetric.AddAxi("Refresh Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+
+ queueMetricItems = append(queueMetricItems, GroupMetricItem{
+ Key: "refresh_queue",
+ Field: "payload.elasticsearch.node_stats.thread_pool.refresh.queue",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: refreshQueueMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case RefreshActiveMetricKey:
+ refreshActiveMetric := newMetricItem(RefreshActiveMetricKey, 1, ThreadPoolRefreshGroupKey)
+ refreshActiveMetric.AddAxi("Refresh Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+
+ queueMetricItems = append(queueMetricItems, GroupMetricItem{
+ Key: "refresh_active",
+ Field: "payload.elasticsearch.node_stats.thread_pool.refresh.active",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: refreshActiveMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case RefreshRejectedMetricKey:
+ refreshRejectedMetric := newMetricItem(RefreshRejectedMetricKey, 1, ThreadPoolRefreshGroupKey)
+ refreshRejectedMetric.AddAxi("Refresh Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+
+ queueMetricItems = append(queueMetricItems, GroupMetricItem{
+ Key: "refresh_rejected",
+ Field: "payload.elasticsearch.node_stats.thread_pool.refresh.rejected",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: refreshRejectedMetric,
+ FormatType: "num",
+ Units: "rejected/s",
+ })
+ case ForceMergeThreadsMetricKey:
+ forceMergeThreadsMetric := newMetricItem(ForceMergeThreadsMetricKey, 1, ThreadPoolForceMergeGroupKey)
+ forceMergeThreadsMetric.AddAxi("Force Merge Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+
+ queueMetricItems = append(queueMetricItems, GroupMetricItem{
+ Key: "force_merge_threads",
+ Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.threads",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: forceMergeThreadsMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case ForceMergeQueueMetricKey:
+ forceMergeQueueMetric := newMetricItem(ForceMergeQueueMetricKey, 1, ThreadPoolForceMergeGroupKey)
+ forceMergeQueueMetric.AddAxi("Force Merge Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+
+ queueMetricItems = append(queueMetricItems, GroupMetricItem{
+ Key: "force_merge_queue",
+ Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.queue",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: forceMergeQueueMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case ForceMergeActiveMetricKey:
+ forceMergeActiveMetric := newMetricItem(ForceMergeActiveMetricKey, 1, ThreadPoolForceMergeGroupKey)
+ forceMergeActiveMetric.AddAxi("Force Merge Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+
+ queueMetricItems = append(queueMetricItems, GroupMetricItem{
+ Key: "force_merge_active",
+ Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.active",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: forceMergeActiveMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case ForceMergeRejectedMetricKey:
+ forceMergeRejectedMetric := newMetricItem(ForceMergeRejectedMetricKey, 1, ThreadPoolForceMergeGroupKey)
+ forceMergeRejectedMetric.AddAxi("Force Merge Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+
+ queueMetricItems = append(queueMetricItems, GroupMetricItem{
+ Key: "force_merge_rejected",
+ Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.rejected",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: forceMergeRejectedMetric,
+ FormatType: "num",
+ Units: "rejected/s",
+ })
}
- refreshThreadsMetric := newMetricItem("refresh_threads", 1, ThreadPoolRefreshGroupKey)
- refreshThreadsMetric.AddAxi("Refresh Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "refresh_threads",
- Field: "payload.elasticsearch.node_stats.thread_pool.refresh.threads",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: refreshThreadsMetric,
- FormatType: "num",
- Units: "",
- })
- refreshQueueMetric := newMetricItem("refresh_queue", 1, ThreadPoolRefreshGroupKey)
- refreshQueueMetric.AddAxi("Refresh Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "refresh_queue",
- Field: "payload.elasticsearch.node_stats.thread_pool.refresh.queue",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: refreshQueueMetric,
- FormatType: "num",
- Units: "",
- })
- refreshActiveMetric := newMetricItem("refresh_active", 1, ThreadPoolRefreshGroupKey)
- refreshActiveMetric.AddAxi("Refresh Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "refresh_active",
- Field: "payload.elasticsearch.node_stats.thread_pool.refresh.active",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: refreshActiveMetric,
- FormatType: "num",
- Units: "",
- })
- refreshRejectedMetric := newMetricItem("refresh_rejected", 1, ThreadPoolRefreshGroupKey)
- refreshRejectedMetric.AddAxi("Refresh Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "refresh_rejected",
- Field: "payload.elasticsearch.node_stats.thread_pool.refresh.rejected",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: refreshRejectedMetric,
- FormatType: "num",
- Units: "rejected/s",
- })
- forceMergeThreadsMetric := newMetricItem("force_merge_threads", 1, ThreadPoolForceMergeGroupKey)
- forceMergeThreadsMetric.AddAxi("Force Merge Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "force_merge_threads",
- Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.threads",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: forceMergeThreadsMetric,
- FormatType: "num",
- Units: "",
- })
- forceMergeQueueMetric := newMetricItem("force_merge_queue", 1, ThreadPoolForceMergeGroupKey)
- forceMergeQueueMetric.AddAxi("Force Merge Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "force_merge_queue",
- Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.queue",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: forceMergeQueueMetric,
- FormatType: "num",
- Units: "",
- })
- forceMergeActiveMetric := newMetricItem("force_merge_active", 1, ThreadPoolForceMergeGroupKey)
- forceMergeActiveMetric.AddAxi("Force Merge Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "force_merge_active",
- Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.active",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: forceMergeActiveMetric,
- FormatType: "num",
- Units: "",
- })
- forceMergeRejectedMetric := newMetricItem("force_merge_rejected", 1, ThreadPoolForceMergeGroupKey)
- forceMergeRejectedMetric.AddAxi("Force Merge Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "force_merge_rejected",
- Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.rejected",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: forceMergeRejectedMetric,
- FormatType: "num",
- Units: "rejected/s",
- })
//Get Thread Pool queue
aggs:=map[string]interface{}{}
@@ -575,5 +636,5 @@ func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min,
},
},
}
- return h.getMetrics(query, queueMetricItems, bucketSize), nil
+ return h.getMetrics(ctx, query, queueMetricItems, bucketSize), nil
}
diff --git a/modules/elastic/api/v1/index_metrics.go b/modules/elastic/api/v1/index_metrics.go
index 5c81330a..84392b16 100644
--- a/modules/elastic/api/v1/index_metrics.go
+++ b/modules/elastic/api/v1/index_metrics.go
@@ -24,6 +24,7 @@
package v1
import (
+ "context"
"fmt"
log "github.com/cihub/seelog"
"infini.sh/framework/core/elastic"
@@ -37,7 +38,44 @@ import (
"time"
)
-func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucketSize int, min, max int64, indexName string, top int) map[string]*common.MetricItem{
+const (
+ IndexStorageMetricKey = "index_storage"
+ SegmentCountMetricKey = "segment_count"
+ DocCountMetricKey = "doc_count"
+ DocsDeletedMetricKey = "docs_deleted"
+ QueryTimesMetricKey = "query_times"
+ FetchTimesMetricKey = "fetch_times"
+ ScrollTimesMetricKey = "scroll_times"
+ MergeTimesMetricKey = "merge_times"
+ RefreshTimesMetricKey = "refresh_times"
+ FlushTimesMetricKey = "flush_times"
+ IndexingRateMetricKey = "indexing_rate"
+ IndexingBytesMetricKey = "indexing_bytes"
+ IndexingLatencyMetricKey = "indexing_latency"
+ QueryLatencyMetricKey = "query_latency"
+ FetchLatencyMetricKey = "fetch_latency"
+ MergeLatencyMetricKey = "merge_latency"
+ RefreshLatencyMetricKey = "refresh_latency"
+ ScrollLatencyMetricKey = "scroll_latency"
+ FlushLatencyMetricKey = "flush_latency"
+ QueryCacheMetricKey = "query_cache"
+ RequestCacheMetricKey = "request_cache"
+ RequestCacheHitMetricKey = "request_cache_hit"
+ RequestCacheMissMetricKey = "request_cache_miss"
+ QueryCacheCountMetricKey = "query_cache_count"
+ QueryCacheHitMetricKey = "query_cache_hit"
+ QueryCacheMissMetricKey = "query_cache_miss"
+ FielddataCacheMetricKey = "fielddata_cache"
+ SegmentMemoryMetricKey = "segment_memory"
+ SegmentDocValuesMemoryMetricKey = "segment_doc_values_memory"
+ SegmentTermsMemoryMetricKey = "segment_terms_memory"
+ SegmentFieldsMemoryMetricKey = "segment_fields_memory"
+ SegmentIndexWriterMemoryMetricKey = "segment_index_writer_memory"
+ SegmentTermVectorsMemoryMetricKey = "segment_term_vectors_memory"
+ DocPercentMetricKey = "doc_percent"
+)
+
+func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clusterID string, bucketSize int, min, max int64, indexName string, top int, metricKey string) map[string]*common.MetricItem{
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
var must = []util.MapStr{
@@ -128,455 +166,470 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
},
},
}
- //索引存储大小
- indexStorageMetric := newMetricItem("index_storage", 1, StorageGroupKey)
- indexStorageMetric.AddAxi("Index storage","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
+ indexMetricItems := []GroupMetricItem{}
+ switch metricKey {
+ case IndexStorageMetricKey:
+ //索引存储大小
+ indexStorageMetric := newMetricItem(IndexStorageMetricKey, 1, StorageGroupKey)
+ indexStorageMetric.AddAxi("Index storage", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "index_storage",
+ Field: "payload.elasticsearch.index_stats.total.store.size_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: indexStorageMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case SegmentCountMetricKey:
+ // segment 数量
+ segmentCountMetric := newMetricItem(SegmentCountMetricKey, 15, StorageGroupKey)
+ segmentCountMetric.AddAxi("segment count", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "segment_count",
+ Field: "payload.elasticsearch.index_stats.total.segments.count",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: segmentCountMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case DocCountMetricKey:
+ //索引文档个数
+ docCountMetric := newMetricItem(DocCountMetricKey, 2, DocumentGroupKey)
+ docCountMetric.AddAxi("Doc count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
- indexMetricItems := []GroupMetricItem{
- {
- Key: "index_storage",
- Field: "payload.elasticsearch.index_stats.total.store.size_in_bytes",
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "doc_count",
+ Field: "payload.elasticsearch.index_stats.total.docs.count",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: docCountMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case DocsDeletedMetricKey:
+ // docs 删除数量
+ docsDeletedMetric := newMetricItem(DocsDeletedMetricKey, 17, DocumentGroupKey)
+ docsDeletedMetric.AddAxi("docs deleted", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "docs_deleted",
+ Field: "payload.elasticsearch.index_stats.total.docs.deleted",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: docsDeletedMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case QueryTimesMetricKey:
+ //查询次数
+ queryTimesMetric := newMetricItem(QueryTimesMetricKey, 2, OperationGroupKey)
+ queryTimesMetric.AddAxi("Query times", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
+
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "query_times",
+ Field: "payload.elasticsearch.index_stats.total.search.query_total",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: queryTimesMetric,
+ FormatType: "num",
+ Units: "requests/s",
+ })
+ case FetchTimesMetricKey:
+ //Fetch次数
+ fetchTimesMetric := newMetricItem(FetchTimesMetricKey, 3, OperationGroupKey)
+ fetchTimesMetric.AddAxi("Fetch times", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "fetch_times",
+ Field: "payload.elasticsearch.index_stats.total.search.fetch_total",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: fetchTimesMetric,
+ FormatType: "num",
+ Units: "requests/s",
+ })
+ case ScrollTimesMetricKey:
+ //scroll 次数
+ scrollTimesMetric := newMetricItem(ScrollTimesMetricKey, 4, OperationGroupKey)
+ scrollTimesMetric.AddAxi("scroll times", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "scroll_times",
+ Field: "payload.elasticsearch.index_stats.total.search.scroll_total",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: scrollTimesMetric,
+ FormatType: "num",
+ Units: "requests/s",
+ })
+ case MergeTimesMetricKey:
+ //Merge次数
+ mergeTimesMetric := newMetricItem(MergeTimesMetricKey, 7, OperationGroupKey)
+ mergeTimesMetric.AddAxi("Merge times", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "merge_times",
+ Field: "payload.elasticsearch.index_stats.total.merges.total",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: mergeTimesMetric,
+ FormatType: "num",
+ Units: "requests/s",
+ })
+ case RefreshTimesMetricKey:
+ //Refresh次数
+ refreshTimesMetric := newMetricItem(RefreshTimesMetricKey, 5, OperationGroupKey)
+ refreshTimesMetric.AddAxi("Refresh times", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "refresh_times",
+ Field: "payload.elasticsearch.index_stats.total.refresh.total",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: refreshTimesMetric,
+ FormatType: "num",
+ Units: "requests/s",
+ })
+ case FlushTimesMetricKey:
+ //flush 次数
+ flushTimesMetric := newMetricItem(FlushTimesMetricKey, 6, OperationGroupKey)
+ flushTimesMetric.AddAxi("flush times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "flush_times",
+ Field: "payload.elasticsearch.index_stats.total.flush.total",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: flushTimesMetric,
+ FormatType: "num",
+ Units: "requests/s",
+ })
+ case IndexingRateMetricKey:
+ //写入速率
+ indexingRateMetric := newMetricItem(IndexingRateMetricKey, 1, OperationGroupKey)
+ indexingRateMetric.AddAxi("Indexing rate","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "indexing_rate",
+ Field: "payload.elasticsearch.index_stats.primaries.indexing.index_total",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: indexingRateMetric,
+ FormatType: "num",
+ Units: "doc/s",
+ })
+ case IndexingBytesMetricKey:
+ indexingBytesMetric := newMetricItem(IndexingBytesMetricKey, 2, OperationGroupKey)
+ indexingBytesMetric.AddAxi("Indexing bytes","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "indexing_bytes",
+ Field: "payload.elasticsearch.index_stats.primaries.store.size_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: indexingBytesMetric,
+ FormatType: "bytes",
+ Units: "bytes/s",
+ })
+ case IndexingLatencyMetricKey:
+ //写入时延
+ indexingLatencyMetric := newMetricItem(IndexingLatencyMetricKey, 1, LatencyGroupKey)
+ indexingLatencyMetric.AddAxi("Indexing latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "indexing_latency",
+ Field: "payload.elasticsearch.index_stats.primaries.indexing.index_time_in_millis",
+ Field2: "payload.elasticsearch.index_stats.primaries.indexing.index_total",
+ Calc: func(value, value2 float64) float64 {
+ return value/value2
+ },
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: indexingLatencyMetric,
+ FormatType: "num",
+ Units: "ms",
+ })
+ case QueryLatencyMetricKey:
+ //查询时延
+ queryLatencyMetric := newMetricItem(QueryLatencyMetricKey, 2, LatencyGroupKey)
+ queryLatencyMetric.AddAxi("Query latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "query_latency",
+ Field: "payload.elasticsearch.index_stats.total.search.query_time_in_millis",
+ Field2: "payload.elasticsearch.index_stats.total.search.query_total",
+ Calc: func(value, value2 float64) float64 {
+ return value/value2
+ },
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: queryLatencyMetric,
+ FormatType: "num",
+ Units: "ms",
+ })
+ case FetchLatencyMetricKey:
+ //fetch时延
+ fetchLatencyMetric := newMetricItem(FetchLatencyMetricKey, 3, LatencyGroupKey)
+ fetchLatencyMetric.AddAxi("Fetch latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "fetch_latency",
+ Field: "payload.elasticsearch.index_stats.total.search.fetch_time_in_millis",
+ Field2: "payload.elasticsearch.index_stats.total.search.fetch_total",
+ Calc: func(value, value2 float64) float64 {
+ return value/value2
+ },
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: fetchLatencyMetric,
+ FormatType: "num",
+ Units: "ms",
+ })
+ case MergeLatencyMetricKey:
+ //merge时延
+ mergeLatencyMetric := newMetricItem(MergeLatencyMetricKey, 7, LatencyGroupKey)
+ mergeLatencyMetric.AddAxi("Merge latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "merge_latency",
+ Field: "payload.elasticsearch.index_stats.total.merges.total_time_in_millis",
+ Field2: "payload.elasticsearch.index_stats.total.merges.total",
+ Calc: func(value, value2 float64) float64 {
+ return value/value2
+ },
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: mergeLatencyMetric,
+ FormatType: "num",
+ Units: "ms",
+ })
+ case RefreshLatencyMetricKey:
+
+ //refresh时延
+ refreshLatencyMetric := newMetricItem(RefreshLatencyMetricKey, 5, LatencyGroupKey)
+ refreshLatencyMetric.AddAxi("Refresh latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "refresh_latency",
+ Field: "payload.elasticsearch.index_stats.total.refresh.total_time_in_millis",
+ Field2: "payload.elasticsearch.index_stats.total.refresh.total",
+ Calc: func(value, value2 float64) float64 {
+ return value/value2
+ },
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: refreshLatencyMetric,
+ FormatType: "num",
+ Units: "ms",
+ })
+ case ScrollLatencyMetricKey:
+ //scroll时延
+ scrollLatencyMetric := newMetricItem(ScrollLatencyMetricKey, 4, LatencyGroupKey)
+ scrollLatencyMetric.AddAxi("Scroll Latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "scroll_latency",
+ Field: "payload.elasticsearch.index_stats.total.search.scroll_time_in_millis",
+ Field2: "payload.elasticsearch.index_stats.total.search.scroll_total",
+ Calc: func(value, value2 float64) float64 {
+ return value/value2
+ },
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: scrollLatencyMetric,
+ FormatType: "num",
+ Units: "ms",
+ })
+ case FlushLatencyMetricKey:
+ //flush 时延
+ flushLatencyMetric := newMetricItem(FlushLatencyMetricKey, 6, LatencyGroupKey)
+ flushLatencyMetric.AddAxi("Flush latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "flush_latency",
+ Field: "payload.elasticsearch.index_stats.total.flush.total_time_in_millis",
+ Field2: "payload.elasticsearch.index_stats.total.flush.total",
+ Calc: func(value, value2 float64) float64 {
+ return value/value2
+ },
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: flushLatencyMetric,
+ FormatType: "num",
+ Units: "ms",
+ })
+ case QueryCacheMetricKey:
+ //queryCache
+ queryCacheMetric := newMetricItem(QueryCacheMetricKey, 1, CacheGroupKey)
+ queryCacheMetric.AddAxi("Query cache","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "query_cache",
+ Field: "payload.elasticsearch.index_stats.total.query_cache.memory_size_in_bytes",
ID: util.GetUUID(),
IsDerivative: false,
- MetricItem: indexStorageMetric,
+ MetricItem: queryCacheMetric,
FormatType: "bytes",
Units: "",
- },
+ })
+ case RequestCacheMetricKey:
+ //requestCache
+ requestCacheMetric := newMetricItem(RequestCacheMetricKey, 2, CacheGroupKey)
+ requestCacheMetric.AddAxi("request cache","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "request_cache",
+ Field: "payload.elasticsearch.index_stats.total.request_cache.memory_size_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: requestCacheMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case RequestCacheHitMetricKey:
+ // Request Cache Hit
+ requestCacheHitMetric:=newMetricItem(RequestCacheHitMetricKey, 6, CacheGroupKey)
+ requestCacheHitMetric.AddAxi("request cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ indexMetricItems=append(indexMetricItems, GroupMetricItem{
+ Key: "request_cache_hit",
+ Field: "payload.elasticsearch.index_stats.total.request_cache.hit_count",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: requestCacheHitMetric,
+ FormatType: "num",
+ Units: "hits",
+ })
+ case RequestCacheMissMetricKey:
+ // Request Cache Miss
+ requestCacheMissMetric:=newMetricItem(RequestCacheMissMetricKey, 8, CacheGroupKey)
+ requestCacheMissMetric.AddAxi("request cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ indexMetricItems=append(indexMetricItems, GroupMetricItem{
+ Key: "request_cache_miss",
+ Field: "payload.elasticsearch.index_stats.total.request_cache.miss_count",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: requestCacheMissMetric,
+ FormatType: "num",
+ Units: "misses",
+ })
+ case QueryCacheCountMetricKey:
+ // Query Cache Count
+ queryCacheCountMetric:=newMetricItem(QueryCacheCountMetricKey, 4, CacheGroupKey)
+ queryCacheCountMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ indexMetricItems=append(indexMetricItems, GroupMetricItem{
+ Key: "query_cache_count",
+ Field: "payload.elasticsearch.index_stats.total.query_cache.cache_count",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: queryCacheCountMetric,
+ FormatType: "num",
+ Units: "",
+ })
+ case QueryCacheHitMetricKey:
+ // Query Cache Miss
+ queryCacheHitMetric:=newMetricItem(QueryCacheHitMetricKey, 5, CacheGroupKey)
+ queryCacheHitMetric.AddAxi("query cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ indexMetricItems=append(indexMetricItems, GroupMetricItem{
+ Key: "query_cache_hit",
+ Field: "payload.elasticsearch.index_stats.total.query_cache.hit_count",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: queryCacheHitMetric,
+ FormatType: "num",
+ Units: "hits",
+ })
+ case QueryCacheMissMetricKey:
+ // Query Cache Miss
+ queryCacheMissMetric:=newMetricItem(QueryCacheMissMetricKey, 7, CacheGroupKey)
+ queryCacheMissMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
+ indexMetricItems=append(indexMetricItems, GroupMetricItem{
+ Key: "query_cache_miss",
+ Field: "payload.elasticsearch.index_stats.total.query_cache.miss_count",
+ ID: util.GetUUID(),
+ IsDerivative: true,
+ MetricItem: queryCacheMissMetric,
+ FormatType: "num",
+ Units: "misses",
+ })
+ case FielddataCacheMetricKey:
+ // Fielddata内存占用大小
+ fieldDataCacheMetric:=newMetricItem(FielddataCacheMetricKey, 3, CacheGroupKey)
+ fieldDataCacheMetric.AddAxi("FieldData Cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
+ indexMetricItems=append(indexMetricItems, GroupMetricItem{
+ Key: "fielddata_cache",
+ Field: "payload.elasticsearch.index_stats.total.fielddata.memory_size_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: fieldDataCacheMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case SegmentMemoryMetricKey:
+ //segment memory
+ segmentMemoryMetric := newMetricItem(SegmentMemoryMetricKey, 13, MemoryGroupKey)
+ segmentMemoryMetric.AddAxi("Segment memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "segment_memory",
+ Field: "payload.elasticsearch.index_stats.total.segments.memory_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: segmentMemoryMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case SegmentDocValuesMemoryMetricKey:
+ //segment doc values memory
+ docValuesMemoryMetric := newMetricItem(SegmentDocValuesMemoryMetricKey, 13, MemoryGroupKey)
+ docValuesMemoryMetric.AddAxi("Segment Doc values Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "segment_doc_values_memory",
+ Field: "payload.elasticsearch.index_stats.total.segments.doc_values_memory_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: docValuesMemoryMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case SegmentTermsMemoryMetricKey:
+ //segment terms memory
+ termsMemoryMetric := newMetricItem(SegmentTermsMemoryMetricKey, 13, MemoryGroupKey)
+ termsMemoryMetric.AddAxi("Segment Terms Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "segment_terms_memory",
+ Field: "payload.elasticsearch.index_stats.total.segments.terms_memory_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: termsMemoryMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case SegmentFieldsMemoryMetricKey:
+ //segment fields memory
+ fieldsMemoryMetric := newMetricItem(SegmentFieldsMemoryMetricKey, 13, MemoryGroupKey)
+ fieldsMemoryMetric.AddAxi("Segment Fields Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
+ indexMetricItems = append(indexMetricItems, GroupMetricItem{
+ Key: "segment_fields_memory",
+ Field: "payload.elasticsearch.index_stats.total.segments.stored_fields_memory_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: fieldsMemoryMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case SegmentIndexWriterMemoryMetricKey:
+ // segment index writer memory
+ segmentIndexWriterMemoryMetric:=newMetricItem(SegmentIndexWriterMemoryMetricKey, 16, MemoryGroupKey)
+ segmentIndexWriterMemoryMetric.AddAxi("segment doc values memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
+ indexMetricItems=append(indexMetricItems, GroupMetricItem{
+ Key: "segment_index_writer_memory",
+ Field: "payload.elasticsearch.index_stats.total.segments.index_writer_memory_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: segmentIndexWriterMemoryMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+ case SegmentTermVectorsMemoryMetricKey:
+ // segment term vectors memory
+ segmentTermVectorsMemoryMetric:=newMetricItem(SegmentTermVectorsMemoryMetricKey, 16, MemoryGroupKey)
+ segmentTermVectorsMemoryMetric.AddAxi("segment term vectors memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
+ indexMetricItems=append(indexMetricItems, GroupMetricItem{
+ Key: "segment_term_vectors_memory",
+ Field: "payload.elasticsearch.index_stats.total.segments.term_vectors_memory_in_bytes",
+ ID: util.GetUUID(),
+ IsDerivative: false,
+ MetricItem: segmentTermVectorsMemoryMetric,
+ FormatType: "bytes",
+ Units: "",
+ })
+
}
- // segment 数量
- segmentCountMetric:=newMetricItem("segment_count", 15, StorageGroupKey)
- segmentCountMetric.AddAxi("segment count","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- indexMetricItems=append(indexMetricItems, GroupMetricItem{
- Key: "segment_count",
- Field: "payload.elasticsearch.index_stats.total.segments.count",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: segmentCountMetric,
- FormatType: "num",
- Units: "",
- })
- //索引文档个数
- docCountMetric := newMetricItem("doc_count", 2, DocumentGroupKey)
- docCountMetric.AddAxi("Doc count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "doc_count",
- Field: "payload.elasticsearch.index_stats.total.docs.count",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: docCountMetric,
- FormatType: "num",
- Units: "",
- })
- // docs 删除数量
- docsDeletedMetric:=newMetricItem("docs_deleted", 17, DocumentGroupKey)
- docsDeletedMetric.AddAxi("docs deleted","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- indexMetricItems=append(indexMetricItems, GroupMetricItem{
- Key: "docs_deleted",
- Field: "payload.elasticsearch.index_stats.total.docs.deleted",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: docsDeletedMetric,
- FormatType: "num",
- Units: "",
- })
- //查询次数
- queryTimesMetric := newMetricItem("query_times", 2, OperationGroupKey)
- queryTimesMetric.AddAxi("Query times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "query_times",
- Field: "payload.elasticsearch.index_stats.total.search.query_total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: queryTimesMetric,
- FormatType: "num",
- Units: "requests/s",
- })
-
- //Fetch次数
- fetchTimesMetric := newMetricItem("fetch_times", 3, OperationGroupKey)
- fetchTimesMetric.AddAxi("Fetch times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "fetch_times",
- Field: "payload.elasticsearch.index_stats.total.search.fetch_total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: fetchTimesMetric,
- FormatType: "num",
- Units: "requests/s",
- })
- //scroll 次数
- scrollTimesMetric := newMetricItem("scroll_times", 4, OperationGroupKey)
- scrollTimesMetric.AddAxi("scroll times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "scroll_times",
- Field: "payload.elasticsearch.index_stats.total.search.scroll_total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: scrollTimesMetric,
- FormatType: "num",
- Units: "requests/s",
- })
- //Merge次数
- mergeTimesMetric := newMetricItem("merge_times", 7, OperationGroupKey)
- mergeTimesMetric.AddAxi("Merge times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "merge_times",
- Field: "payload.elasticsearch.index_stats.total.merges.total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: mergeTimesMetric,
- FormatType: "num",
- Units: "requests/s",
- })
- //Refresh次数
- refreshTimesMetric := newMetricItem("refresh_times", 5, OperationGroupKey)
- refreshTimesMetric.AddAxi("Refresh times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "refresh_times",
- Field: "payload.elasticsearch.index_stats.total.refresh.total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: refreshTimesMetric,
- FormatType: "num",
- Units: "requests/s",
- })
- //flush 次数
- flushTimesMetric := newMetricItem("flush_times", 6, OperationGroupKey)
- flushTimesMetric.AddAxi("flush times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "flush_times",
- Field: "payload.elasticsearch.index_stats.total.flush.total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: flushTimesMetric,
- FormatType: "num",
- Units: "requests/s",
- })
-
- //写入速率
- indexingRateMetric := newMetricItem("indexing_rate", 1, OperationGroupKey)
- indexingRateMetric.AddAxi("Indexing rate","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "indexing_rate",
- Field: "payload.elasticsearch.index_stats.primaries.indexing.index_total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: indexingRateMetric,
- FormatType: "num",
- Units: "doc/s",
- })
- indexingBytesMetric := newMetricItem("indexing_bytes", 2, OperationGroupKey)
- indexingBytesMetric.AddAxi("Indexing bytes","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "indexing_bytes",
- Field: "payload.elasticsearch.index_stats.primaries.store.size_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: indexingBytesMetric,
- FormatType: "bytes",
- Units: "bytes/s",
- })
- //写入时延
- indexingLatencyMetric := newMetricItem("indexing_latency", 1, LatencyGroupKey)
- indexingLatencyMetric.AddAxi("Indexing latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "indexing_latency",
- Field: "payload.elasticsearch.index_stats.primaries.indexing.index_time_in_millis",
- Field2: "payload.elasticsearch.index_stats.primaries.indexing.index_total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: indexingLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
-
- //查询时延
- queryLatencyMetric := newMetricItem("query_latency", 2, LatencyGroupKey)
- queryLatencyMetric.AddAxi("Query latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "query_latency",
- Field: "payload.elasticsearch.index_stats.total.search.query_time_in_millis",
- Field2: "payload.elasticsearch.index_stats.total.search.query_total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: queryLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
- //fetch时延
- fetchLatencyMetric := newMetricItem("fetch_latency", 3, LatencyGroupKey)
- fetchLatencyMetric.AddAxi("Fetch latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "fetch_latency",
- Field: "payload.elasticsearch.index_stats.total.search.fetch_time_in_millis",
- Field2: "payload.elasticsearch.index_stats.total.search.fetch_total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: fetchLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
-
- //merge时延
- mergeLatencyMetric := newMetricItem("merge_latency", 7, LatencyGroupKey)
- mergeLatencyMetric.AddAxi("Merge latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "merge_latency",
- Field: "payload.elasticsearch.index_stats.total.merges.total_time_in_millis",
- Field2: "payload.elasticsearch.index_stats.total.merges.total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: mergeLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
- //refresh时延
- refreshLatencyMetric := newMetricItem("refresh_latency", 5, LatencyGroupKey)
- refreshLatencyMetric.AddAxi("Refresh latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "refresh_latency",
- Field: "payload.elasticsearch.index_stats.total.refresh.total_time_in_millis",
- Field2: "payload.elasticsearch.index_stats.total.refresh.total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: refreshLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
- //scroll时延
- scrollLatencyMetric := newMetricItem("scroll_latency", 4, LatencyGroupKey)
- scrollLatencyMetric.AddAxi("Scroll Latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "scroll_latency",
- Field: "payload.elasticsearch.index_stats.total.search.scroll_time_in_millis",
- Field2: "payload.elasticsearch.index_stats.total.search.scroll_total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: scrollLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
- //flush 时延
- flushLatencyMetric := newMetricItem("flush_latency", 6, LatencyGroupKey)
- flushLatencyMetric.AddAxi("Flush latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "flush_latency",
- Field: "payload.elasticsearch.index_stats.total.flush.total_time_in_millis",
- Field2: "payload.elasticsearch.index_stats.total.flush.total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: flushLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
- //queryCache
- queryCacheMetric := newMetricItem("query_cache", 1, CacheGroupKey)
- queryCacheMetric.AddAxi("Query cache","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "query_cache",
- Field: "payload.elasticsearch.index_stats.total.query_cache.memory_size_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: queryCacheMetric,
- FormatType: "bytes",
- Units: "",
- })
- //requestCache
- requestCacheMetric := newMetricItem("request_cache", 2, CacheGroupKey)
- requestCacheMetric.AddAxi("request cache","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "request_cache",
- Field: "payload.elasticsearch.index_stats.total.request_cache.memory_size_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: requestCacheMetric,
- FormatType: "bytes",
- Units: "",
- })
- // Request Cache Hit
- requestCacheHitMetric:=newMetricItem("request_cache_hit", 6, CacheGroupKey)
- requestCacheHitMetric.AddAxi("request cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- indexMetricItems=append(indexMetricItems, GroupMetricItem{
- Key: "request_cache_hit",
- Field: "payload.elasticsearch.index_stats.total.request_cache.hit_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: requestCacheHitMetric,
- FormatType: "num",
- Units: "hits",
- })
- // Request Cache Miss
- requestCacheMissMetric:=newMetricItem("request_cache_miss", 8, CacheGroupKey)
- requestCacheMissMetric.AddAxi("request cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- indexMetricItems=append(indexMetricItems, GroupMetricItem{
- Key: "request_cache_miss",
- Field: "payload.elasticsearch.index_stats.total.request_cache.miss_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: requestCacheMissMetric,
- FormatType: "num",
- Units: "misses",
- })
- // Query Cache Count
- queryCacheCountMetric:=newMetricItem("query_cache_count", 4, CacheGroupKey)
- queryCacheCountMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- indexMetricItems=append(indexMetricItems, GroupMetricItem{
- Key: "query_cache_count",
- Field: "payload.elasticsearch.index_stats.total.query_cache.cache_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: queryCacheCountMetric,
- FormatType: "num",
- Units: "",
- })
- // Query Cache Miss
- queryCacheHitMetric:=newMetricItem("query_cache_hit", 5, CacheGroupKey)
- queryCacheHitMetric.AddAxi("query cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- indexMetricItems=append(indexMetricItems, GroupMetricItem{
- Key: "query_cache_hit",
- Field: "payload.elasticsearch.index_stats.total.query_cache.hit_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: queryCacheHitMetric,
- FormatType: "num",
- Units: "hits",
- })
-
- //// Query Cache evictions
- //queryCacheEvictionsMetric:=newMetricItem("query_cache_evictions", 11, CacheGroupKey)
- //queryCacheEvictionsMetric.AddAxi("query cache evictions","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- //indexMetricItems=append(indexMetricItems, GroupMetricItem{
- // Key: "query_cache_evictions",
- // Field: "payload.elasticsearch.index_stats.total.query_cache.evictions",
- // ID: util.GetUUID(),
- // IsDerivative: true,
- // MetricItem: queryCacheEvictionsMetric,
- // FormatType: "num",
- // Units: "evictions",
- //})
-
- // Query Cache Miss
- queryCacheMissMetric:=newMetricItem("query_cache_miss", 7, CacheGroupKey)
- queryCacheMissMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- indexMetricItems=append(indexMetricItems, GroupMetricItem{
- Key: "query_cache_miss",
- Field: "payload.elasticsearch.index_stats.total.query_cache.miss_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: queryCacheMissMetric,
- FormatType: "num",
- Units: "misses",
- })
- // Fielddata内存占用大小
- fieldDataCacheMetric:=newMetricItem("fielddata_cache", 3, CacheGroupKey)
- fieldDataCacheMetric.AddAxi("FieldData Cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- indexMetricItems=append(indexMetricItems, GroupMetricItem{
- Key: "fielddata_cache",
- Field: "payload.elasticsearch.index_stats.total.fielddata.memory_size_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: fieldDataCacheMetric,
- FormatType: "bytes",
- Units: "",
- })
- //segment memory
- segmentMemoryMetric := newMetricItem("segment_memory", 13, MemoryGroupKey)
- segmentMemoryMetric.AddAxi("Segment memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "segment_memory",
- Field: "payload.elasticsearch.index_stats.total.segments.memory_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: segmentMemoryMetric,
- FormatType: "bytes",
- Units: "",
- })
-
- //segment doc values memory
- docValuesMemoryMetric := newMetricItem("segment_doc_values_memory", 13, MemoryGroupKey)
- docValuesMemoryMetric.AddAxi("Segment Doc values Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "segment_doc_values_memory",
- Field: "payload.elasticsearch.index_stats.total.segments.doc_values_memory_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: docValuesMemoryMetric,
- FormatType: "bytes",
- Units: "",
- })
-
- //segment terms memory
- termsMemoryMetric := newMetricItem("segment_terms_memory", 13, MemoryGroupKey)
- termsMemoryMetric.AddAxi("Segment Terms Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "segment_terms_memory",
- Field: "payload.elasticsearch.index_stats.total.segments.terms_memory_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: termsMemoryMetric,
- FormatType: "bytes",
- Units: "",
- })
-
- //segment fields memory
- fieldsMemoryMetric := newMetricItem("segment_fields_memory", 13, MemoryGroupKey)
- fieldsMemoryMetric.AddAxi("Segment Fields Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
- indexMetricItems = append(indexMetricItems, GroupMetricItem{
- Key: "segment_fields_memory",
- Field: "payload.elasticsearch.index_stats.total.segments.stored_fields_memory_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: fieldsMemoryMetric,
- FormatType: "bytes",
- Units: "",
- })
- // segment index writer memory
- segmentIndexWriterMemoryMetric:=newMetricItem("segment_index_writer_memory", 16, MemoryGroupKey)
- segmentIndexWriterMemoryMetric.AddAxi("segment doc values memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- indexMetricItems=append(indexMetricItems, GroupMetricItem{
- Key: "segment_index_writer_memory",
- Field: "payload.elasticsearch.index_stats.total.segments.index_writer_memory_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: segmentIndexWriterMemoryMetric,
- FormatType: "bytes",
- Units: "",
- })
- // segment term vectors memory
- segmentTermVectorsMemoryMetric:=newMetricItem("segment_term_vectors_memory", 16, MemoryGroupKey)
- segmentTermVectorsMemoryMetric.AddAxi("segment term vectors memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- indexMetricItems=append(indexMetricItems, GroupMetricItem{
- Key: "segment_term_vectors_memory",
- Field: "payload.elasticsearch.index_stats.total.segments.term_vectors_memory_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: segmentTermVectorsMemoryMetric,
- FormatType: "bytes",
- Units: "",
- })
aggs:=map[string]interface{}{}
@@ -642,7 +695,7 @@ func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucket
},
},
}
- return h.getMetrics(query, indexMetricItems, bucketSize)
+ return h.getMetrics(ctx, query, indexMetricItems, bucketSize)
}
diff --git a/modules/elastic/api/v1/index_overview.go b/modules/elastic/api/v1/index_overview.go
index 976142b7..20b2b555 100644
--- a/modules/elastic/api/v1/index_overview.go
+++ b/modules/elastic/api/v1/index_overview.go
@@ -28,6 +28,7 @@
package v1
import (
+ "context"
"fmt"
log "github.com/cihub/seelog"
httprouter "infini.sh/framework/core/api/router"
@@ -440,7 +441,7 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
},
},
}
- metrics := h.getMetrics(query, nodeMetricItems, bucketSize)
+ metrics := h.getMetrics(context.Background(), query, nodeMetricItems, bucketSize)
indexMetrics := map[string]util.MapStr{}
for key, item := range metrics {
for _, line := range item.Lines {
@@ -626,6 +627,8 @@ func (h *APIHandler) GetIndexShards(w http.ResponseWriter, req *http.Request, ps
h.WriteJSON(w, shardInfo, http.StatusOK)
}
+const IndexHealthMetricKey = "index_health"
+
func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
clusterID := ps.MustGetParameter("id")
indexName := ps.MustGetParameter("index")
@@ -699,63 +702,81 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
metricItems := []*common.MetricItem{}
- metricItem:=newMetricItem("index_throughput", 1, OperationGroupKey)
- metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- metricItem.AddLine("Indexing Rate","Primary Indexing","Number of documents being indexed for node.","group1","payload.elasticsearch.index_stats.primaries.indexing.index_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true)
- metricItem.AddLine("Deleting Rate","Primary Deleting","Number of documents being deleted for node.","group1","payload.elasticsearch.index_stats.primaries.indexing.delete_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true)
- metricItems=append(metricItems,metricItem)
- metricItem=newMetricItem("search_throughput", 2, OperationGroupKey)
- metricItem.AddAxi("searching","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,false)
- metricItem.AddLine("Search Rate","Search Rate",
- "Number of search requests being executed.",
- "group1","payload.elasticsearch.index_stats.total.search.query_total","max",bucketSizeStr,"query/s","num","0,0.[00]","0,0.[00]",false,true)
- metricItems=append(metricItems,metricItem)
-
- metricItem=newMetricItem("index_latency", 3, LatencyGroupKey)
- metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
-
- metricItem.AddLine("Indexing Latency","Primary Indexing Latency","Average latency for indexing documents.","group1","payload.elasticsearch.index_stats.primaries.indexing.index_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
- metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.index_stats.primaries.indexing.index_total"
- metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
- return value/value2
- }
- metricItem.AddLine("Deleting Latency","Primary Deleting Latency","Average latency for delete documents.","group1","payload.elasticsearch.index_stats.primaries.indexing.delete_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
- metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.index_stats.primaries.indexing.delete_total"
- metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
- return value/value2
- }
- metricItems=append(metricItems,metricItem)
-
- metricItem=newMetricItem("search_latency", 4, LatencyGroupKey)
- metricItem.AddAxi("searching","group2",common.PositionLeft,"num","0,0","0,0.[00]",5,false)
-
- metricItem.AddLine("Searching","Query Latency","Average latency for searching query.","group2","payload.elasticsearch.index_stats.total.search.query_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
- metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.index_stats.total.search.query_total"
- metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
- return value/value2
- }
- metricItem.AddLine("Searching","Fetch Latency","Average latency for searching fetch.","group2","payload.elasticsearch.index_stats.total.search.fetch_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
- metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.index_stats.total.search.fetch_total"
- metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
- return value/value2
- }
- metricItem.AddLine("Searching","Scroll Latency","Average latency for searching fetch.","group2","payload.elasticsearch.index_stats.total.search.scroll_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
- metricItem.Lines[2].Metric.Field2 = "payload.elasticsearch.index_stats.total.search.scroll_total"
- metricItem.Lines[2].Metric.Calc = func(value, value2 float64) float64 {
- return value/value2
- }
- metricItems=append(metricItems,metricItem)
- metrics := h.getSingleMetrics(metricItems,query, bucketSize)
- healthMetric, err := h.getIndexHealthMetric(clusterID, indexName, min, max, bucketSize)
+ metricKey := h.GetParameter(req, "key")
+ timeout := h.GetParameterOrDefault(req, "timeout", "60s")
+ du, err := time.ParseDuration(timeout)
if err != nil {
log.Error(err)
+ h.WriteError(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), du)
+ defer cancel()
+ metrics := map[string]*common.MetricItem{}
+ if metricKey == IndexHealthMetricKey {
+ healthMetric, err := h.getIndexHealthMetric(ctx, clusterID, indexName, min, max, bucketSize)
+ if err != nil {
+ log.Error(err)
+ }
+ metrics["index_health"] = healthMetric
+ }else {
+ switch metricKey {
+ case IndexThroughputMetricKey:
+ metricItem := newMetricItem("index_throughput", 1, OperationGroupKey)
+ metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
+ metricItem.AddLine("Indexing Rate", "Primary Indexing", "Number of documents being indexed for node.", "group1", "payload.elasticsearch.index_stats.primaries.indexing.index_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItem.AddLine("Deleting Rate", "Primary Deleting", "Number of documents being deleted for node.", "group1", "payload.elasticsearch.index_stats.primaries.indexing.delete_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItems = append(metricItems, metricItem)
+ case SearchThroughputMetricKey:
+ metricItem := newMetricItem("search_throughput", 2, OperationGroupKey)
+ metricItem.AddAxi("searching", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
+ metricItem.AddLine("Search Rate", "Search Rate",
+ "Number of search requests being executed.",
+ "group1", "payload.elasticsearch.index_stats.total.search.query_total", "max", bucketSizeStr, "query/s", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItems = append(metricItems, metricItem)
+ case IndexLatencyMetricKey:
+ metricItem := newMetricItem("index_latency", 3, LatencyGroupKey)
+ metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
+
+ metricItem.AddLine("Indexing Latency", "Primary Indexing Latency", "Average latency for indexing documents.", "group1", "payload.elasticsearch.index_stats.primaries.indexing.index_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.index_stats.primaries.indexing.index_total"
+ metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
+ return value / value2
+ }
+ metricItem.AddLine("Deleting Latency", "Primary Deleting Latency", "Average latency for delete documents.", "group1", "payload.elasticsearch.index_stats.primaries.indexing.delete_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.index_stats.primaries.indexing.delete_total"
+ metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
+ return value / value2
+ }
+ metricItems = append(metricItems, metricItem)
+ case SearchLatencyMetricKey:
+ metricItem := newMetricItem("search_latency", 4, LatencyGroupKey)
+ metricItem.AddAxi("searching", "group2", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
+
+ metricItem.AddLine("Searching", "Query Latency", "Average latency for searching query.", "group2", "payload.elasticsearch.index_stats.total.search.query_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.index_stats.total.search.query_total"
+ metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
+ return value / value2
+ }
+ metricItem.AddLine("Searching", "Fetch Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.index_stats.total.search.fetch_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.index_stats.total.search.fetch_total"
+ metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
+ return value / value2
+ }
+ metricItem.AddLine("Searching", "Scroll Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.index_stats.total.search.scroll_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItem.Lines[2].Metric.Field2 = "payload.elasticsearch.index_stats.total.search.scroll_total"
+ metricItem.Lines[2].Metric.Calc = func(value, value2 float64) float64 {
+ return value / value2
+ }
+ metricItems = append(metricItems, metricItem)
+ }
+ metrics = h.getSingleMetrics(ctx, metricItems, query, bucketSize)
}
- metrics["index_health"] = healthMetric
resBody["metrics"] = metrics
h.WriteJSON(w, resBody, http.StatusOK)
}
-func (h *APIHandler) getIndexHealthMetric(id, indexName string, min, max int64, bucketSize int)(*common.MetricItem, error){
+func (h *APIHandler) getIndexHealthMetric(ctx context.Context, id, indexName string, min, max int64, bucketSize int)(*common.MetricItem, error){
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
if err != nil {
@@ -823,7 +844,8 @@ func (h *APIHandler) getIndexHealthMetric(id, indexName string, min, max int64,
},
},
}
- response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
+ queryDSL := util.MustToJSONBytes(query)
+ response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
if err != nil {
log.Error(err)
return nil, err
@@ -841,6 +863,7 @@ func (h *APIHandler) getIndexHealthMetric(id, indexName string, min, max int64,
}
metricItem.Lines[0].Data = metricData
metricItem.Lines[0].Type = common.GraphTypeBar
+ metricItem.Request = string(queryDSL)
return metricItem, nil
}
diff --git a/modules/elastic/api/v1/manage.go b/modules/elastic/api/v1/manage.go
index 531976dd..fbd15f20 100644
--- a/modules/elastic/api/v1/manage.go
+++ b/modules/elastic/api/v1/manage.go
@@ -500,6 +500,7 @@ func (h *APIHandler) HandleMetricsSummaryAction(w http.ResponseWriter, req *http
func (h *APIHandler) HandleClusterMetricsAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{}
id := ps.ByName("id")
+ key := h.GetParameter(req, "key")
bucketSize, min, max, err := h.getMetricRangeAndBucketSize(req, 10, 90)
if err != nil {
@@ -514,13 +515,18 @@ func (h *APIHandler) HandleClusterMetricsAction(w http.ResponseWriter, req *http
}
}
- //fmt.Println(min," vs ",max,",",rangeFrom,rangeTo,"range hours:",hours)
-
- //metrics:=h.GetClusterMetrics(id,bucketSize,min,max)
- isOverview := h.GetIntOrDefault(req, "overview", 0)
+ timeout := h.GetParameterOrDefault(req, "timeout", "60s")
+ du, err := time.ParseDuration(timeout)
+ if err != nil {
+ log.Error(err)
+ h.WriteError(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), du)
+ defer cancel()
var metrics interface{}
- if isOverview == 1 {
- metrics = h.GetClusterIndexMetrics(id, bucketSize, min, max)
+ if util.StringInArray([]string{IndexThroughputMetricKey, SearchThroughputMetricKey, IndexLatencyMetricKey, SearchLatencyMetricKey}, key) {
+ metrics = h.GetClusterIndexMetrics(ctx, id, bucketSize, min, max, key)
} else {
if meta != nil && meta.Config.MonitorConfigs != nil && meta.Config.MonitorConfigs.ClusterStats.Enabled && meta.Config.MonitorConfigs.ClusterStats.Interval != "" {
du, _ := time.ParseDuration(meta.Config.MonitorConfigs.ClusterStats.Interval)
@@ -534,7 +540,7 @@ func (h *APIHandler) HandleClusterMetricsAction(w http.ResponseWriter, req *http
bucketSize = int(du.Seconds())
}
}
- metrics = h.GetClusterMetrics(id, bucketSize, min, max)
+ metrics = h.GetClusterMetrics(ctx, id, bucketSize, min, max, key)
}
resBody["metrics"] = metrics
@@ -546,48 +552,6 @@ func (h *APIHandler) HandleClusterMetricsAction(w http.ResponseWriter, req *http
}
-func (h *APIHandler) HandleNodeMetricsAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
- resBody := map[string]interface{}{}
- id := ps.ByName("id")
- bucketSize, min, max, err := h.getMetricRangeAndBucketSize(req, 10, 90)
- if err != nil {
- log.Error(err)
- resBody["error"] = err
- h.WriteJSON(w, resBody, http.StatusInternalServerError)
- return
- }
- meta := elastic.GetMetadata(id)
- if meta != nil && meta.Config.MonitorConfigs != nil && meta.Config.MonitorConfigs.NodeStats.Interval != "" {
- du, _ := time.ParseDuration(meta.Config.MonitorConfigs.NodeStats.Interval)
- if bucketSize < int(du.Seconds()) {
- bucketSize = int(du.Seconds())
- }
- }
- nodeName := h.Get(req, "node_name", "")
- top := h.GetIntOrDefault(req, "top", 5)
- resBody["metrics"], err = h.getNodeMetrics(id, bucketSize, min, max, nodeName, top)
- if err != nil {
- log.Error(err)
- h.WriteError(w, err.Error(), http.StatusInternalServerError)
- return
- }
- ver := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).GetVersion()
- if ver.Distribution == "" {
- cr, err := util.VersionCompare(ver.Number, "6.1")
- if err != nil {
- log.Error(err)
- }
- if cr < 0 {
- resBody["tips"] = "The system cluster version is lower than 6.1, the top node may be inaccurate"
- }
- }
-
- err = h.WriteJSON(w, resBody, http.StatusOK)
- if err != nil {
- log.Error(err)
- }
-}
-
func (h *APIHandler) HandleIndexMetricsAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{}
id := ps.ByName("id")
@@ -607,52 +571,73 @@ func (h *APIHandler) HandleIndexMetricsAction(w http.ResponseWriter, req *http.R
}
indexName := h.Get(req, "index_name", "")
top := h.GetIntOrDefault(req, "top", 5)
- metrics := h.getIndexMetrics(req, id, bucketSize, min, max, indexName, top)
- if metrics["doc_count"] != nil && metrics["docs_deleted"] != nil && len(metrics["doc_count"].Lines) > 0 && len(metrics["docs_deleted"].Lines) > 0 {
- metricA := metrics["doc_count"]
- metricB := metrics["docs_deleted"]
- if dataA, ok := metricA.Lines[0].Data.([][]interface{}); ok {
- if dataB, ok := metricB.Lines[0].Data.([][]interface{}); ok {
- data := make([]map[string]interface{}, 0, len(dataA)*2)
- var (
- x1 float64
- x2 float64
- )
- for i := 0; i < len(dataA); i++ {
- x1 = dataA[i][1].(float64)
- x2 = dataB[i][1].(float64)
- if x1+x2 == 0 {
- continue
- }
- data = append(data, map[string]interface{}{
- "x": dataA[i][0],
- "y": x1 / (x1 + x2) * 100,
- "g": "Doc Count",
- })
- data = append(data, map[string]interface{}{
- "x": dataA[i][0],
- "y": x2 / (x1 + x2) * 100,
- "g": "Doc Deleted",
- })
- }
- metricDocPercent := &common.MetricItem{
- Axis: []*common.MetricAxis{},
- Key: "doc_percent",
- Group: metricA.Group,
- Order: 18,
- Lines: []*common.MetricLine{
- {
- TimeRange: metricA.Lines[0].TimeRange,
- Data: data,
- Type: common.GraphTypeBar,
- },
- },
- }
- metrics["doc_percent"] = metricDocPercent
+ key := h.GetParameter(req, "key")
+ timeout := h.GetParameterOrDefault(req, "timeout", "60s")
+ du, err := time.ParseDuration(timeout)
+ if err != nil {
+ log.Error(err)
+ h.WriteError(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), du)
+ defer cancel()
+ var metrics map[string]*common.MetricItem
+ if key == DocPercentMetricKey {
+ metrics = h.getIndexMetrics(ctx, req, id, bucketSize, min, max, indexName, top, DocCountMetricKey)
+ docsDeletedMetrics := h.getIndexMetrics(ctx, req, id, bucketSize, min, max, indexName, top, DocsDeletedMetricKey)
+ for k, v := range docsDeletedMetrics {
+ if v != nil {
+ metrics[k] = v
}
}
-
+ if metrics["doc_count"] != nil && metrics["docs_deleted"] != nil && len(metrics["doc_count"].Lines) > 0 && len(metrics["docs_deleted"].Lines) > 0 {
+ metricA := metrics["doc_count"]
+ metricB := metrics["docs_deleted"]
+ if dataA, ok := metricA.Lines[0].Data.([][]interface{}); ok {
+ if dataB, ok := metricB.Lines[0].Data.([][]interface{}); ok {
+ data := make([]map[string]interface{}, 0, len(dataA)*2)
+ var (
+ x1 float64
+ x2 float64
+ )
+ for i := 0; i < len(dataA); i++ {
+ x1 = dataA[i][1].(float64)
+ x2 = dataB[i][1].(float64)
+ if x1+x2 == 0 {
+ continue
+ }
+ data = append(data, map[string]interface{}{
+ "x": dataA[i][0],
+ "y": x1 / (x1 + x2) * 100,
+ "g": "Doc Count",
+ })
+ data = append(data, map[string]interface{}{
+ "x": dataA[i][0],
+ "y": x2 / (x1 + x2) * 100,
+ "g": "Doc Deleted",
+ })
+ }
+ metricDocPercent := &common.MetricItem{
+ Axis: []*common.MetricAxis{},
+ Key: "doc_percent",
+ Group: metricA.Group,
+ Order: 18,
+ Lines: []*common.MetricLine{
+ {
+ TimeRange: metricA.Lines[0].TimeRange,
+ Data: data,
+ Type: common.GraphTypeBar,
+ },
+ },
+ }
+ metrics["doc_percent"] = metricDocPercent
+ }
+ }
+ }
+ }else{
+ metrics = h.getIndexMetrics(ctx, req, id, bucketSize, min, max, indexName, top, key)
}
+
resBody["metrics"] = metrics
ver := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).GetVersion()
if ver.Distribution == "" {
@@ -670,42 +655,6 @@ func (h *APIHandler) HandleIndexMetricsAction(w http.ResponseWriter, req *http.R
log.Error(err)
}
}
-func (h *APIHandler) HandleQueueMetricsAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
- resBody := map[string]interface{}{}
- id := ps.ByName("id")
- bucketSize, min, max, err := h.getMetricRangeAndBucketSize(req, 10, 90)
- if err != nil {
- log.Error(err)
- resBody["error"] = err
- h.WriteJSON(w, resBody, http.StatusInternalServerError)
- return
- }
- nodeName := h.Get(req, "node_name", "")
- top := h.GetIntOrDefault(req, "top", 5)
- meta := elastic.GetMetadata(id)
- if meta != nil && meta.Config.MonitorConfigs != nil && meta.Config.MonitorConfigs.NodeStats.Interval != "" {
- du, _ := time.ParseDuration(meta.Config.MonitorConfigs.NodeStats.Interval)
- if bucketSize < int(du.Seconds()) {
- bucketSize = int(du.Seconds())
- }
- }
- resBody["metrics"] = h.getThreadPoolMetrics(id, bucketSize, min, max, nodeName, top)
- ver := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).GetVersion()
- if ver.Distribution == "" {
- cr, err := util.VersionCompare(ver.Number, "6.1")
- if err != nil {
- log.Error(err)
- }
- if cr < 0 {
- resBody["tips"] = "The system cluster version is lower than 6.1, the top node may be inaccurate"
- }
- }
-
- err = h.WriteJSON(w, resBody, http.StatusOK)
- if err != nil {
- log.Error(err)
- }
-}
// TODO, use expired hash
var clusters = map[string]elastic.ElasticsearchConfig{}
@@ -810,56 +759,45 @@ const (
CircuitBreakerGroupKey = "circuit_breaker"
)
-func (h *APIHandler) GetClusterMetrics(id string, bucketSize int, min, max int64) map[string]*common.MetricItem {
+const (
+ ClusterStorageMetricKey = "cluster_storage"
+ ClusterDocumentsMetricKey = "cluster_documents"
+ ClusterIndicesMetricKey = "cluster_indices"
+ ClusterNodeCountMetricKey = "node_count"
+ ClusterHealthMetricKey = "cluster_health"
+ ShardCountMetricKey = "shard_count"
+ CircuitBreakerMetricKey = "circuit_breaker"
+)
+func (h *APIHandler) GetClusterMetrics(ctx context.Context, id string, bucketSize int, min, max int64, metricKey string) map[string]*common.MetricItem {
+ var clusterMetricsResult = map[string]*common.MetricItem {}
+ switch metricKey {
+ case ClusterDocumentsMetricKey,
+ ClusterStorageMetricKey,
+ ClusterIndicesMetricKey,
+ ClusterNodeCountMetricKey:
+ clusterMetricsResult = h.getClusterMetricsByKey(ctx, id, bucketSize, min, max, metricKey)
+ case IndexLatencyMetricKey, IndexThroughputMetricKey, SearchThroughputMetricKey, SearchLatencyMetricKey:
+ clusterMetricsResult = h.GetClusterIndexMetrics(ctx, id, bucketSize, min, max, metricKey)
+ case ClusterHealthMetricKey:
+ statusMetric, err := h.getClusterStatusMetric(ctx, id, min, max, bucketSize)
+ if err == nil {
+ clusterMetricsResult[ClusterHealthMetricKey] = statusMetric
+ } else {
+ log.Error("get cluster status metric error: ", err)
+ }
+ case ShardCountMetricKey:
+ clusterMetricsResult = h.getShardsMetric(ctx, id, min, max, bucketSize)
+
+ case CircuitBreakerMetricKey:
+ clusterMetricsResult = h.getCircuitBreakerMetric(ctx, id, min, max, bucketSize)
+ }
+
+ return clusterMetricsResult
+}
+
+func (h *APIHandler) getClusterMetricsByKey(ctx context.Context, id string, bucketSize int, min, max int64, metricKey string) map[string]*common.MetricItem {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
-
- clusterMetricItems := []*common.MetricItem{}
- metricItem := newMetricItem("cluster_storage", 8, StorageGroupKey)
- metricItem.AddAxi("indices_storage", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
- metricItem.AddAxi("available_storage", "group2", common.PositionRight, "bytes", "0.[0]", "0.[0]", 5, true)
-
- metricItem.AddLine("Disk", "Indices Storage", "", "group1", "payload.elasticsearch.cluster_stats.indices.store.size_in_bytes", "max", bucketSizeStr, "", "bytes", "0,0.[00]", "0,0.[00]", false, false)
- metricItem.AddLine("Disk", "Available Disk", "", "group2", "payload.elasticsearch.cluster_stats.nodes.fs.available_in_bytes", "max", bucketSizeStr, "", "bytes", "0,0.[00]", "0,0.[00]", false, false)
-
- clusterMetricItems = append(clusterMetricItems, metricItem)
-
- metricItem = newMetricItem("cluster_documents", 4, StorageGroupKey)
- metricItem.AddAxi("count", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
- metricItem.AddAxi("deleted", "group2", common.PositionRight, "num", "0,0", "0,0.[00]", 5, false)
- metricItem.AddLine("Documents Count", "Documents Count", "", "group1", "payload.elasticsearch.cluster_stats.indices.docs.count", "max", bucketSizeStr, "", "num", "0,0.[00]", "0,0.[00]", false, false)
- metricItem.AddLine("Documents Deleted", "Documents Deleted", "", "group2", "payload.elasticsearch.cluster_stats.indices.docs.deleted", "max", bucketSizeStr, "", "num", "0,0.[00]", "0,0.[00]", false, false)
- clusterMetricItems = append(clusterMetricItems, metricItem)
-
- metricItem = newMetricItem("cluster_indices", 6, StorageGroupKey)
- metricItem.AddAxi("count", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
- metricItem.AddLine("Indices Count", "Indices Count", "", "group1", "payload.elasticsearch.cluster_stats.indices.count", "max", bucketSizeStr, "", "num", "0,0.[00]", "0,0.[00]", false, false)
- clusterMetricItems = append(clusterMetricItems, metricItem)
-
- metricItem = newMetricItem("node_count", 5, MemoryGroupKey)
- metricItem.AddAxi("count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
- meta := elastic.GetMetadata(id)
- if meta == nil {
- err := fmt.Errorf("metadata of cluster [%s] is not found", id)
- panic(err)
- }
- majorVersion := meta.GetMajorVersion()
-
- metricItem.AddLine("Total", "Total Nodes", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.total", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
-
- //TODO check version difference
- if majorVersion < 5 {
- metricItem.AddLine("Master Only", "Master Only", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.master_only", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
- metricItem.AddLine("Data Node", "Data Only", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.data_only", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
- metricItem.AddLine("Master Data", "Master Data", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.master_data", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
- } else {
- metricItem.AddLine("Master Node", "Master Node", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.master", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
- metricItem.AddLine("Data Node", "Data Node", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.data", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
- metricItem.AddLine("Coordinating Node Only", "Coordinating Node Only", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.coordinating_only", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
- metricItem.AddLine("Ingest Node", "Ingest Node", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.ingest", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
- }
-
- clusterMetricItems = append(clusterMetricItems, metricItem)
query := map[string]interface{}{}
query["query"] = util.MapStr{
"bool": util.MapStr{
@@ -898,80 +836,117 @@ func (h *APIHandler) GetClusterMetrics(id string, bucketSize int, min, max int64
},
},
}
- //todo: since there is four queries, we can change these query to async
- indexMetricsResult := h.GetClusterIndexMetrics(id, bucketSize, min, max)
- clusterMetricsResult := h.getSingleMetrics(clusterMetricItems, query, bucketSize)
- for k, v := range clusterMetricsResult {
- indexMetricsResult[k] = v
- }
- statusMetric, err := h.getClusterStatusMetric(id, min, max, bucketSize)
- if err == nil {
- indexMetricsResult["cluster_health"] = statusMetric
- } else {
- log.Error("get cluster status metric error: ", err)
- }
- clusterHealthMetricsResult := h.getShardsMetric(id, min, max, bucketSize)
- for k, v := range clusterHealthMetricsResult {
- indexMetricsResult[k] = v
- }
- // get CircuitBreaker metric
- circuitBreakerMetricsResult := h.getCircuitBreakerMetric(id, min, max, bucketSize)
- for k, v := range circuitBreakerMetricsResult {
- indexMetricsResult[k] = v
- }
+ clusterMetricItems := []*common.MetricItem{}
+ switch metricKey {
+ case ClusterStorageMetricKey:
+ metricItem := newMetricItem(ClusterStorageMetricKey, 8, StorageGroupKey)
+ metricItem.AddAxi("indices_storage", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
+ metricItem.AddAxi("available_storage", "group2", common.PositionRight, "bytes", "0.[0]", "0.[0]", 5, true)
- return indexMetricsResult
+ metricItem.AddLine("Disk", "Indices Storage", "", "group1", "payload.elasticsearch.cluster_stats.indices.store.size_in_bytes", "max", bucketSizeStr, "", "bytes", "0,0.[00]", "0,0.[00]", false, false)
+ metricItem.AddLine("Disk", "Available Disk", "", "group2", "payload.elasticsearch.cluster_stats.nodes.fs.available_in_bytes", "max", bucketSizeStr, "", "bytes", "0,0.[00]", "0,0.[00]", false, false)
+
+ clusterMetricItems = append(clusterMetricItems, metricItem)
+ case ClusterDocumentsMetricKey:
+ metricItem := newMetricItem(ClusterDocumentsMetricKey, 4, StorageGroupKey)
+ metricItem.AddAxi("count", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
+ metricItem.AddAxi("deleted", "group2", common.PositionRight, "num", "0,0", "0,0.[00]", 5, false)
+ metricItem.AddLine("Documents Count", "Documents Count", "", "group1", "payload.elasticsearch.cluster_stats.indices.docs.count", "max", bucketSizeStr, "", "num", "0,0.[00]", "0,0.[00]", false, false)
+ metricItem.AddLine("Documents Deleted", "Documents Deleted", "", "group2", "payload.elasticsearch.cluster_stats.indices.docs.deleted", "max", bucketSizeStr, "", "num", "0,0.[00]", "0,0.[00]", false, false)
+ clusterMetricItems = append(clusterMetricItems, metricItem)
+ case ClusterIndicesMetricKey:
+ metricItem := newMetricItem(ClusterIndicesMetricKey, 6, StorageGroupKey)
+ metricItem.AddAxi("count", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
+ metricItem.AddLine("Indices Count", "Indices Count", "", "group1", "payload.elasticsearch.cluster_stats.indices.count", "max", bucketSizeStr, "", "num", "0,0.[00]", "0,0.[00]", false, false)
+ clusterMetricItems = append(clusterMetricItems, metricItem)
+ case ClusterNodeCountMetricKey:
+ metricItem := newMetricItem("node_count", 5, MemoryGroupKey)
+ metricItem.AddAxi("count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
+ meta := elastic.GetMetadata(id)
+ if meta == nil {
+ err := fmt.Errorf("metadata of cluster [%s] is not found", id)
+ panic(err)
+ }
+ majorVersion := meta.GetMajorVersion()
+
+ metricItem.AddLine("Total", "Total Nodes", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.total", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
+
+ if majorVersion < 5 {
+ metricItem.AddLine("Master Only", "Master Only", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.master_only", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
+ metricItem.AddLine("Data Node", "Data Only", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.data_only", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
+ metricItem.AddLine("Master Data", "Master Data", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.master_data", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
+ } else {
+ metricItem.AddLine("Master Node", "Master Node", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.master", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
+ metricItem.AddLine("Data Node", "Data Node", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.data", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
+ metricItem.AddLine("Coordinating Node Only", "Coordinating Node Only", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.coordinating_only", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
+ metricItem.AddLine("Ingest Node", "Ingest Node", "", "group1", "payload.elasticsearch.cluster_stats.nodes.count.ingest", "max", bucketSizeStr, "", "num", "0.[00]", "0.[00]", false, false)
+ }
+ clusterMetricItems = append(clusterMetricItems, metricItem)
+ }
+ return h.getSingleMetrics(ctx, clusterMetricItems, query, bucketSize)
}
-func (h *APIHandler) GetClusterIndexMetrics(id string, bucketSize int, min, max int64) map[string]*common.MetricItem {
+const (
+ IndexThroughputMetricKey = "index_throughput"
+ SearchThroughputMetricKey = "search_throughput"
+ IndexLatencyMetricKey = "index_latency"
+ SearchLatencyMetricKey = "search_latency"
+)
+func (h *APIHandler) GetClusterIndexMetrics(ctx context.Context, id string, bucketSize int, min, max int64, metricKey string) map[string]*common.MetricItem {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
metricItems := []*common.MetricItem{}
- metricItem := newMetricItem("index_throughput", 2, OperationGroupKey)
- metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
- metricItem.AddLine("Indexing Rate", "Total Indexing", "Number of documents being indexed for primary and replica shards.", "group1", "payload.elasticsearch.index_stats.total.indexing.index_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
- metricItem.AddLine("Indexing Rate", "Primary Indexing", "Number of documents being indexed for primary shards.", "group1", "payload.elasticsearch.index_stats.primaries.indexing.index_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
- metricItems = append(metricItems, metricItem)
+ switch metricKey {
+ case IndexThroughputMetricKey:
+ metricItem := newMetricItem(IndexThroughputMetricKey, 2, OperationGroupKey)
+ metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
+ metricItem.AddLine("Indexing Rate", "Total Indexing", "Number of documents being indexed for primary and replica shards.", "group1", "payload.elasticsearch.index_stats.total.indexing.index_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItem.AddLine("Indexing Rate", "Primary Indexing", "Number of documents being indexed for primary shards.", "group1", "payload.elasticsearch.index_stats.primaries.indexing.index_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItems = append(metricItems, metricItem)
+ case SearchThroughputMetricKey:
+ metricItem := newMetricItem(SearchThroughputMetricKey, 2, OperationGroupKey)
+ metricItem.AddAxi("searching", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
+ metricItem.AddLine("Search Rate", "Total Query",
+ "Number of search requests being executed across primary and replica shards. A single search can run against multiple shards!",
+ "group1", "payload.elasticsearch.index_stats.total.search.query_total", "max", bucketSizeStr, "query/s", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItems = append(metricItems, metricItem)
+ case IndexLatencyMetricKey:
+ metricItem := newMetricItem(IndexLatencyMetricKey, 3, LatencyGroupKey)
+ metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
- metricItem = newMetricItem("search_throughput", 2, OperationGroupKey)
- metricItem.AddAxi("searching", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
- metricItem.AddLine("Search Rate", "Total Query",
- "Number of search requests being executed across primary and replica shards. A single search can run against multiple shards!",
- "group1", "payload.elasticsearch.index_stats.total.search.query_total", "max", bucketSizeStr, "query/s", "num", "0,0.[00]", "0,0.[00]", false, true)
- metricItems = append(metricItems, metricItem)
+ metricItem.AddLine("Indexing", "Indexing Latency", "Average latency for indexing documents.", "group1", "payload.elasticsearch.index_stats.primaries.indexing.index_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.index_stats.primaries.indexing.index_total"
+ metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
+ return value / value2
+ }
+ metricItem.AddLine("Indexing", "Delete Latency", "Average latency for delete documents.", "group1", "payload.elasticsearch.index_stats.primaries.indexing.delete_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.index_stats.primaries.indexing.delete_total"
+ metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
+ return value / value2
+ }
+ metricItems = append(metricItems, metricItem)
+ case SearchLatencyMetricKey:
+ metricItem := newMetricItem(SearchLatencyMetricKey, 3, LatencyGroupKey)
+ metricItem.AddAxi("searching", "group2", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
- metricItem = newMetricItem("index_latency", 3, LatencyGroupKey)
- metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
+ metricItem.AddLine("Searching", "Query Latency", "Average latency for searching query.", "group2", "payload.elasticsearch.index_stats.total.search.query_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.index_stats.total.search.query_total"
+ metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
+ return value / value2
+ }
+ metricItem.AddLine("Searching", "Fetch Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.index_stats.total.search.fetch_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.index_stats.total.search.fetch_total"
+ metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
+ return value / value2
+ }
+ metricItem.AddLine("Searching", "Scroll Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.index_stats.total.search.scroll_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
+ metricItem.Lines[2].Metric.Field2 = "payload.elasticsearch.index_stats.total.search.scroll_total"
+ metricItem.Lines[2].Metric.Calc = func(value, value2 float64) float64 {
+ return value / value2
+ }
+ metricItems = append(metricItems, metricItem)
+ default:
+ panic("not support metric key: " + metricKey)
+ }
- metricItem.AddLine("Indexing", "Indexing Latency", "Average latency for indexing documents.", "group1", "payload.elasticsearch.index_stats.primaries.indexing.index_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
- metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.index_stats.primaries.indexing.index_total"
- metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
- return value / value2
- }
- metricItem.AddLine("Indexing", "Delete Latency", "Average latency for delete documents.", "group1", "payload.elasticsearch.index_stats.primaries.indexing.delete_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
- metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.index_stats.primaries.indexing.delete_total"
- metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
- return value / value2
- }
- metricItems = append(metricItems, metricItem)
-
- metricItem = newMetricItem("search_latency", 3, LatencyGroupKey)
- metricItem.AddAxi("searching", "group2", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
-
- metricItem.AddLine("Searching", "Query Latency", "Average latency for searching query.", "group2", "payload.elasticsearch.index_stats.total.search.query_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
- metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.index_stats.total.search.query_total"
- metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
- return value / value2
- }
- metricItem.AddLine("Searching", "Fetch Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.index_stats.total.search.fetch_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
- metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.index_stats.total.search.fetch_total"
- metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
- return value / value2
- }
- metricItem.AddLine("Searching", "Scroll Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.index_stats.total.search.scroll_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
- metricItem.Lines[2].Metric.Field2 = "payload.elasticsearch.index_stats.total.search.scroll_total"
- metricItem.Lines[2].Metric.Calc = func(value, value2 float64) float64 {
- return value / value2
- }
- metricItems = append(metricItems, metricItem)
query := map[string]interface{}{}
query["query"] = util.MapStr{
"bool": util.MapStr{
@@ -1017,10 +992,10 @@ func (h *APIHandler) GetClusterIndexMetrics(id string, bucketSize int, min, max
},
},
}
- return h.getSingleMetrics(metricItems, query, bucketSize)
+ return h.getSingleMetrics(ctx, metricItems, query, bucketSize)
}
-func (h *APIHandler) getShardsMetric(id string, min, max int64, bucketSize int) map[string]*common.MetricItem {
+func (h *APIHandler) getShardsMetric(ctx context.Context, id string, min, max int64, bucketSize int) map[string]*common.MetricItem {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
query := util.MapStr{
"query": util.MapStr{
@@ -1079,10 +1054,10 @@ func (h *APIHandler) getShardsMetric(id string, min, max int64, bucketSize int)
metricItem.AddLine("Delayed Unassigned Shards", "Delayed Unassigned Shards", "", "group1", "payload.elasticsearch.cluster_health.delayed_unassigned_shards", "max", bucketSizeStr, "", "num", "0,0.[00]", "0,0.[00]", false, false)
var clusterHealthMetrics []*common.MetricItem
clusterHealthMetrics = append(clusterHealthMetrics, metricItem)
- return h.getSingleMetrics(clusterHealthMetrics, query, bucketSize)
+ return h.getSingleMetrics(ctx, clusterHealthMetrics, query, bucketSize)
}
-func (h *APIHandler) getCircuitBreakerMetric(id string, min, max int64, bucketSize int) map[string]*common.MetricItem {
+func (h *APIHandler) getCircuitBreakerMetric(ctx context.Context, id string, min, max int64, bucketSize int) map[string]*common.MetricItem {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
query := util.MapStr{
"query": util.MapStr{
@@ -1140,10 +1115,10 @@ func (h *APIHandler) getCircuitBreakerMetric(id string, min, max int64, bucketSi
metricItem.AddLine("In Flight Requests Breaker Tripped", "In Flight Requests Tripped", "", "group1", "payload.elasticsearch.node_stats.breakers.in_flight_requests.tripped", "sum", bucketSizeStr, "times/s", "num", "0,0.[00]", "0,0.[00]", false, true)
var circuitBreakerMetrics []*common.MetricItem
circuitBreakerMetrics = append(circuitBreakerMetrics, metricItem)
- return h.getSingleMetrics(circuitBreakerMetrics, query, bucketSize)
+ return h.getSingleMetrics(ctx, circuitBreakerMetrics, query, bucketSize)
}
-func (h *APIHandler) getClusterStatusMetric(id string, min, max int64, bucketSize int) (*common.MetricItem, error) {
+func (h *APIHandler) getClusterStatusMetric(ctx context.Context, id string, min, max int64, bucketSize int) (*common.MetricItem, error) {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
if err != nil {
@@ -1204,7 +1179,8 @@ func (h *APIHandler) getClusterStatusMetric(id string, min, max int64, bucketSiz
},
},
}
- response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
+ queryDSL := util.MustToJSONBytes(query)
+ response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, util.MustToJSONBytes(query))
if err != nil {
log.Error(err)
return nil, err
@@ -1221,6 +1197,7 @@ func (h *APIHandler) getClusterStatusMetric(id string, min, max int64, bucketSiz
}
metricItem.Lines[0].Data = metricData
metricItem.Lines[0].Type = common.GraphTypeBar
+ metricItem.Request = string(queryDSL)
return metricItem, nil
}
diff --git a/modules/elastic/api/v1/metrics_util.go b/modules/elastic/api/v1/metrics_util.go
index 1dc2635e..213d7847 100644
--- a/modules/elastic/api/v1/metrics_util.go
+++ b/modules/elastic/api/v1/metrics_util.go
@@ -24,6 +24,7 @@
package v1
import (
+ "context"
"fmt"
"infini.sh/framework/core/env"
"net/http"
@@ -109,9 +110,10 @@ func generateGroupAggs(nodeMetricItems []GroupMetricItem) map[string]interface{}
return aggs
}
-func (h *APIHandler) getMetrics(query map[string]interface{}, grpMetricItems []GroupMetricItem, bucketSize int) map[string]*common.MetricItem {
+func (h *APIHandler) getMetrics(ctx context.Context, query map[string]interface{}, grpMetricItems []GroupMetricItem, bucketSize int) map[string]*common.MetricItem {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
- response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
+ queryDSL := util.MustToJSONBytes(query)
+ response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
if err != nil {
log.Error(err)
panic(err)
@@ -205,6 +207,7 @@ func (h *APIHandler) getMetrics(query map[string]interface{}, grpMetricItems []G
}
line.Data = grpMetricData[dataKey][line.Metric.Label]
}
+ metricItem.MetricItem.Request = string(queryDSL)
result[metricItem.Key] = metricItem.MetricItem
}
return result
@@ -328,7 +331,7 @@ func GetMetricRangeAndBucketSize(minStr string, maxStr string, bucketSize int, m
}
// 获取单个指标,可以包含多条曲线
-func (h *APIHandler) getSingleMetrics(metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int) map[string]*common.MetricItem {
+func (h *APIHandler) getSingleMetrics(ctx context.Context, metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int) map[string]*common.MetricItem {
metricData := map[string][][]interface{}{}
aggs := map[string]interface{}{}
@@ -387,7 +390,8 @@ func (h *APIHandler) getSingleMetrics(metricItems []*common.MetricItem, query ma
"aggs": aggs,
},
}
- response, err := elastic.GetClient(clusterID).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
+ queryDSL := util.MustToJSONBytes(query)
+ response, err := elastic.GetClient(clusterID).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
if err != nil {
log.Error(err)
panic(err)
@@ -449,6 +453,7 @@ func (h *APIHandler) getSingleMetrics(metricItems []*common.MetricItem, query ma
line.TimeRange = common.TimeRange{Min: minDate, Max: maxDate}
line.Data = metricData[line.Metric.GetDataKey()]
}
+ metricItem.Request = string(queryDSL)
result[metricItem.Key] = metricItem
}
diff --git a/modules/elastic/api/v1/node_metrics.go b/modules/elastic/api/v1/node_metrics.go
deleted file mode 100644
index 1b037a2d..00000000
--- a/modules/elastic/api/v1/node_metrics.go
+++ /dev/null
@@ -1,1208 +0,0 @@
-// Copyright (C) INFINI Labs & INFINI LIMITED.
-//
-// The INFINI Console is offered under the GNU Affero General Public License v3.0
-// and as commercial software.
-//
-// For commercial licensing, contact us at:
-// - Website: infinilabs.com
-// - Email: hello@infini.ltd
-//
-// Open Source licensed under AGPL V3:
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package v1
-
-import (
- "fmt"
- log "github.com/cihub/seelog"
- "infini.sh/framework/core/elastic"
- "infini.sh/framework/core/global"
- "infini.sh/framework/core/util"
- "infini.sh/framework/modules/elastic/adapter"
- "infini.sh/framework/modules/elastic/common"
- "sort"
- "strings"
- "time"
-)
-
-func (h *APIHandler) getNodeMetrics(clusterID string, bucketSize int, min, max int64, nodeName string, top int) (map[string]*common.MetricItem, error){
- bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
- clusterUUID, err := adapter.GetClusterUUID(clusterID)
- if err != nil {
- return nil, err
- }
-
- var must = []util.MapStr{
- {
- "term":util.MapStr{
- "metadata.labels.cluster_uuid":util.MapStr{
- "value": clusterUUID,
- },
- },
- },
- {
- "term": util.MapStr{
- "metadata.category": util.MapStr{
- "value": "elasticsearch",
- },
- },
- },
- {
- "term": util.MapStr{
- "metadata.name": util.MapStr{
- "value": "node_stats",
- },
- },
- },
- }
- var (
- nodeNames []string
- )
- if nodeName != "" {
- nodeNames = strings.Split(nodeName, ",")
- top = len(nodeNames)
- }else{
- nodeNames, err = h.getTopNodeName(clusterID, top, 15)
- if err != nil {
- log.Error(err)
- }
- }
- if len(nodeNames) > 0 {
- must = append(must, util.MapStr{
- "bool": util.MapStr{
- "minimum_should_match": 1,
- "should": []util.MapStr{
- {
- "terms": util.MapStr{
- "metadata.labels.transport_address": nodeNames,
- },
- },
- {
- "terms": util.MapStr{
- "metadata.labels.node_id": nodeNames,
- },
- },
- },
- },
-
- })
- }
-
- query:=map[string]interface{}{}
- query["query"]=util.MapStr{
- "bool": util.MapStr{
- "must": must,
- "filter": []util.MapStr{
- {
- "range": util.MapStr{
- "timestamp": util.MapStr{
- "gte": min,
- "lte": max,
- },
- },
- },
- },
- },
- }
- cpuMetric := newMetricItem("cpu", 1, SystemGroupKey)
- cpuMetric.AddAxi("cpu","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
-
- nodeMetricItems := []GroupMetricItem{
- {
- Key: "cpu",
- Field: "payload.elasticsearch.node_stats.process.cpu.percent",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: cpuMetric,
- FormatType: "ratio",
- Units: "%",
- },
- }
-
- osCpuMetric := newMetricItem("os_cpu", 2, SystemGroupKey)
- osCpuMetric.AddAxi("OS CPU Percent","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "os_cpu",
- Field: "payload.elasticsearch.node_stats.os.cpu.percent",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: osCpuMetric,
- FormatType: "ratio",
- Units: "%",
- })
-
- osMemMetric := newMetricItem("os_used_mem", 2, SystemGroupKey)
- osMemMetric.AddAxi("OS Mem Used Percent","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "os_used_mem",
- Field: "payload.elasticsearch.node_stats.os.mem.used_percent",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: osMemMetric,
- FormatType: "ratio",
- Units: "%",
- })
- osLoadMetric := newMetricItem("os_load_average_1m", 2, SystemGroupKey)
- osLoadMetric.AddAxi("OS Load 1m Average","group1",common.PositionLeft,"","0.[0]","0.[0]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "os_load_average_1m",
- Field: "payload.elasticsearch.node_stats.os.cpu.load_average.1m",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: osLoadMetric,
- FormatType: "num",
- Units: "",
- })
- //swap usage
- osSwapMetric := newMetricItem("os_used_swap", 3, SystemGroupKey)
- osSwapMetric.AddAxi("OS Swap Used Percent","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "os_used_swap",
- Field: "payload.elasticsearch.node_stats.os.swap.used_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- Field2: "payload.elasticsearch.node_stats.os.swap.total_in_bytes",
- Calc: func(value, value2 float64) float64 {
- return util.ToFixed((value / value2)*100, 2)
- },
- MetricItem: osSwapMetric,
- FormatType: "ratio",
- Units: "%",
- })
- openFileMetric := newMetricItem("open_file", 2, SystemGroupKey)
- openFileMetric.AddAxi("Open File Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "open_file",
- Field: "payload.elasticsearch.node_stats.process.open_file_descriptors",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: openFileMetric,
- FormatType: "num",
- Units: "",
- })
- openFilePercentMetric := newMetricItem("open_file_percent", 2, SystemGroupKey)
- openFilePercentMetric.AddAxi("Open File Percent","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "open_file_percent",
- Field: "payload.elasticsearch.node_stats.process.open_file_descriptors",
- ID: util.GetUUID(),
- IsDerivative: false,
- Field2: "payload.elasticsearch.node_stats.process.max_file_descriptors",
- Calc: func(value, value2 float64) float64 {
- if value < 0 {
- return value
- }
- return util.ToFixed((value / value2)*100, 2)
- },
- MetricItem: openFilePercentMetric,
- FormatType: "ratio",
- Units: "%",
- })
-
- diskMetric := newMetricItem("disk", 2, SystemGroupKey)
- diskMetric.AddAxi("disk available percent","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
-
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "disk",
- Field: "payload.elasticsearch.node_stats.fs.total.total_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: diskMetric,
- FormatType: "ratio",
- Units: "%",
- Field2: "payload.elasticsearch.node_stats.fs.total.available_in_bytes",
- Calc: func(value, value2 float64) float64 {
- return util.ToFixed((value2 / value)*100, 2)
- },
- })
- // 索引速率
- indexMetric:=newMetricItem("indexing_rate", 1, OperationGroupKey)
- indexMetric.AddAxi("indexing rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "indexing_rate",
- Field: "payload.elasticsearch.node_stats.indices.indexing.index_total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: indexMetric,
- FormatType: "num",
- Units: "doc/s",
- })
-
- indexingBytesMetric := newMetricItem("indexing_bytes", 2, OperationGroupKey)
- indexingBytesMetric.AddAxi("Indexing bytes","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
- nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
- Key: "indexing_bytes",
- Field: "payload.elasticsearch.node_stats.indices.store.size_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: indexingBytesMetric,
- FormatType: "bytes",
- Units: "bytes/s",
- })
-
- // 索引延时
- indexLatencyMetric:=newMetricItem("indexing_latency", 1, LatencyGroupKey)
- indexLatencyMetric.AddAxi("indexing latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "indexing_latency",
- Field: "payload.elasticsearch.node_stats.indices.indexing.index_time_in_millis",
- Field2: "payload.elasticsearch.node_stats.indices.indexing.index_total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: indexLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
-
- queryMetric:=newMetricItem("query_rate", 2, OperationGroupKey)
- queryMetric.AddAxi("query rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "query_rate",
- Field: "payload.elasticsearch.node_stats.indices.search.query_total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: queryMetric,
- FormatType: "num",
- Units: "requests/s",
- })
-
- // 查询延时
- queryLatencyMetric:=newMetricItem("query_latency", 2, LatencyGroupKey)
- queryLatencyMetric.AddAxi("query latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "query_latency",
- Field: "payload.elasticsearch.node_stats.indices.search.query_time_in_millis",
- Field2: "payload.elasticsearch.node_stats.indices.search.query_total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: queryLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
-
- fetchMetric:=newMetricItem("fetch_rate", 3, OperationGroupKey)
- fetchMetric.AddAxi("fetch rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "fetch_rate",
- Field: "payload.elasticsearch.node_stats.indices.search.fetch_total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: fetchMetric,
- FormatType: "num",
- Units: "requests/s",
- })
- scrollMetric:=newMetricItem("scroll_rate", 4, OperationGroupKey)
- scrollMetric.AddAxi("scroll rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "scroll_rate",
- Field: "payload.elasticsearch.node_stats.indices.search.scroll_total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: scrollMetric,
- FormatType: "num",
- Units: "requests/s",
- })
-
- refreshMetric:=newMetricItem("refresh_rate", 5, OperationGroupKey)
- refreshMetric.AddAxi("refresh rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "refresh_rate",
- Field: "payload.elasticsearch.node_stats.indices.refresh.total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: refreshMetric,
- FormatType: "num",
- Units: "requests/s",
- })
- flushMetric:=newMetricItem("flush_rate", 6, OperationGroupKey)
- flushMetric.AddAxi("flush rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "flush_rate",
- Field: "payload.elasticsearch.node_stats.indices.flush.total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: flushMetric,
- FormatType: "num",
- Units: "requests/s",
- })
- mergeMetric:=newMetricItem("merges_rate", 7, OperationGroupKey)
- mergeMetric.AddAxi("merges rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "merges_rate",
- Field: "payload.elasticsearch.node_stats.indices.merges.total",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: mergeMetric,
- FormatType: "num",
- Units: "requests/s",
- })
-
- // fetch延时
- fetchLatencyMetric:=newMetricItem("fetch_latency", 3, LatencyGroupKey)
- fetchLatencyMetric.AddAxi("fetch latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "fetch_latency",
- Field: "payload.elasticsearch.node_stats.indices.search.fetch_time_in_millis",
- Field2: "payload.elasticsearch.node_stats.indices.search.fetch_total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: fetchLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
- // scroll 延时
- scrollLatencyMetric:=newMetricItem("scroll_latency", 4, LatencyGroupKey)
- scrollLatencyMetric.AddAxi("scroll latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "scroll_latency",
- Field: "payload.elasticsearch.node_stats.indices.search.scroll_time_in_millis",
- Field2: "payload.elasticsearch.node_stats.indices.search.scroll_total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: scrollLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
-
- // merge 延时
- mergeLatencyMetric:=newMetricItem("merge_latency", 7, LatencyGroupKey)
- mergeLatencyMetric.AddAxi("merge latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "merge_latency",
- Field: "payload.elasticsearch.node_stats.indices.merges.total_time_in_millis",
- Field2: "payload.elasticsearch.node_stats.indices.merges.total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: mergeLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
-
- // refresh 延时
- refreshLatencyMetric:=newMetricItem("refresh_latency", 5, LatencyGroupKey)
- refreshLatencyMetric.AddAxi("refresh latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "refresh_latency",
- Field: "payload.elasticsearch.node_stats.indices.refresh.total_time_in_millis",
- Field2: "payload.elasticsearch.node_stats.indices.refresh.total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: refreshLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
- // flush 时延
- flushLatencyMetric:=newMetricItem("flush_latency", 6, LatencyGroupKey)
- flushLatencyMetric.AddAxi("flush latency","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "flush_latency",
- Field: "payload.elasticsearch.node_stats.indices.flush.total_time_in_millis",
- Field2: "payload.elasticsearch.node_stats.indices.flush.total",
- Calc: func(value, value2 float64) float64 {
- return value/value2
- },
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: flushLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
- // Query Cache 内存占用大小
- queryCacheMetric:=newMetricItem("query_cache", 1, CacheGroupKey)
- queryCacheMetric.AddAxi("query cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "query_cache",
- Field: "payload.elasticsearch.node_stats.indices.query_cache.memory_size_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: queryCacheMetric,
- FormatType: "bytes",
- Units: "",
- })
- // Request Cache 内存占用大小
- requestCacheMetric:=newMetricItem("request_cache", 2, CacheGroupKey)
- requestCacheMetric.AddAxi("request cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "request_cache",
- Field: "payload.elasticsearch.node_stats.indices.request_cache.memory_size_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: requestCacheMetric,
- FormatType: "bytes",
- Units: "",
- })
- // Request Cache Hit
- requestCacheHitMetric:=newMetricItem("request_cache_hit", 6, CacheGroupKey)
- requestCacheHitMetric.AddAxi("request cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "request_cache_hit",
- Field: "payload.elasticsearch.node_stats.indices.request_cache.hit_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: requestCacheHitMetric,
- FormatType: "num",
- Units: "hits",
- })
- // Request Cache Miss
- requestCacheMissMetric:=newMetricItem("request_cache_miss", 8, CacheGroupKey)
- requestCacheMissMetric.AddAxi("request cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "request_cache_miss",
- Field: "payload.elasticsearch.node_stats.indices.request_cache.miss_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: requestCacheMissMetric,
- FormatType: "num",
- Units: "misses",
- })
- // Query Cache Count
- queryCacheCountMetric:=newMetricItem("query_cache_count", 4, CacheGroupKey)
- queryCacheCountMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "query_cache_count",
- Field: "payload.elasticsearch.node_stats.indices.query_cache.cache_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: queryCacheCountMetric,
- FormatType: "num",
- Units: "",
- })
- // Query Cache Miss
- queryCacheHitMetric:=newMetricItem("query_cache_hit", 5, CacheGroupKey)
- queryCacheHitMetric.AddAxi("query cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "query_cache_hit",
- Field: "payload.elasticsearch.node_stats.indices.query_cache.hit_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: queryCacheHitMetric,
- FormatType: "num",
- Units: "hits",
- })
-
- //// Query Cache evictions
- //queryCacheEvictionsMetric:=newMetricItem("query_cache_evictions", 5, CacheGroupKey)
- //queryCacheEvictionsMetric.AddAxi("query cache evictions","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- //nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- // Key: "query_cache_evictions",
- // Field: "payload.elasticsearch.node_stats.indices.query_cache.evictions",
- // ID: util.GetUUID(),
- // IsDerivative: true,
- // MetricItem: queryCacheEvictionsMetric,
- // FormatType: "num",
- // Units: "evictions",
- //})
-
- // Query Cache Miss
- queryCacheMissMetric:=newMetricItem("query_cache_miss", 7, CacheGroupKey)
- queryCacheMissMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "query_cache_miss",
- Field: "payload.elasticsearch.node_stats.indices.query_cache.miss_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: queryCacheMissMetric,
- FormatType: "num",
- Units: "misses",
- })
-
- // Fielddata内存占用大小
- fieldDataCacheMetric:=newMetricItem("fielddata_cache", 3, CacheGroupKey)
- fieldDataCacheMetric.AddAxi("FieldData Cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "fielddata_cache",
- Field: "payload.elasticsearch.node_stats.indices.fielddata.memory_size_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: fieldDataCacheMetric,
- FormatType: "bytes",
- Units: "",
- })
-
- // http 活跃连接数
- httpActiveMetric:=newMetricItem("http_connect_num", 12, HttpGroupKey)
- httpActiveMetric.AddAxi("http connect number","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "http_connect_num",
- Field: "payload.elasticsearch.node_stats.http.current_open",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: httpActiveMetric,
- FormatType: "num",
- Units: "conns",
- })
- // http 活跃连接数速率
- httpRateMetric:=newMetricItem("http_rate", 12, HttpGroupKey)
- httpRateMetric.AddAxi("http rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "http_rate",
- Field: "payload.elasticsearch.node_stats.http.total_opened",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: httpRateMetric,
- FormatType: "num",
- Units: "conns/s",
- })
-
- // segment 数量
- segmentCountMetric:=newMetricItem("segment_count", 15, StorageGroupKey)
- segmentCountMetric.AddAxi("segment count","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "segment_count",
- Field: "payload.elasticsearch.node_stats.indices.segments.count",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: segmentCountMetric,
- FormatType: "num",
- Units: "",
- })
-
- // segment memory
- segmentMemoryMetric:=newMetricItem("segment_memory", 16, MemoryGroupKey)
- segmentMemoryMetric.AddAxi("segment memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "segment_memory",
- Field: "payload.elasticsearch.node_stats.indices.segments.memory_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: segmentMemoryMetric,
- FormatType: "bytes",
- Units: "",
- })
- // segment stored fields memory
- segmentStoredFieldsMemoryMetric:=newMetricItem("segment_stored_fields_memory", 16, MemoryGroupKey)
- segmentStoredFieldsMemoryMetric.AddAxi("segment stored fields memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "segment_stored_fields_memory",
- Field: "payload.elasticsearch.node_stats.indices.segments.stored_fields_memory_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: segmentStoredFieldsMemoryMetric,
- FormatType: "bytes",
- Units: "",
- })
- // segment terms fields memory
- segmentTermsMemoryMetric:=newMetricItem("segment_terms_memory", 16, MemoryGroupKey)
- segmentTermsMemoryMetric.AddAxi("segment terms memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "segment_terms_memory",
- Field: "payload.elasticsearch.node_stats.indices.segments.terms_memory_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: segmentTermsMemoryMetric,
- FormatType: "bytes",
- Units: "",
- })
- // segment doc values memory
- segmentDocValuesMemoryMetric:=newMetricItem("segment_doc_values_memory", 16, MemoryGroupKey)
- segmentDocValuesMemoryMetric.AddAxi("segment doc values memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "segment_doc_values_memory",
- Field: "payload.elasticsearch.node_stats.indices.segments.doc_values_memory_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: segmentDocValuesMemoryMetric,
- FormatType: "bytes",
- Units: "",
- })
- // segment index writer memory
- segmentIndexWriterMemoryMetric:=newMetricItem("segment_index_writer_memory", 16, MemoryGroupKey)
- segmentIndexWriterMemoryMetric.AddAxi("segment doc values memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "segment_index_writer_memory",
- Field: "payload.elasticsearch.node_stats.indices.segments.index_writer_memory_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: segmentIndexWriterMemoryMetric,
- FormatType: "bytes",
- Units: "",
- })
- // segment term vectors memory
- segmentTermVectorsMemoryMetric:=newMetricItem("segment_term_vectors_memory", 16, MemoryGroupKey)
- segmentTermVectorsMemoryMetric.AddAxi("segment term vectors memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "segment_term_vectors_memory",
- Field: "payload.elasticsearch.node_stats.indices.segments.term_vectors_memory_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: segmentTermVectorsMemoryMetric,
- FormatType: "bytes",
- Units: "",
- })
-
- // docs 数量
- docsCountMetric:=newMetricItem("docs_count", 17, DocumentGroupKey)
- docsCountMetric.AddAxi("docs count","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "docs_count",
- Field: "payload.elasticsearch.node_stats.indices.docs.count",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: docsCountMetric,
- FormatType: "num",
- Units: "",
- })
- // docs 删除数量
- docsDeletedMetric:=newMetricItem("docs_deleted", 17, DocumentGroupKey)
- docsDeletedMetric.AddAxi("docs deleted","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "docs_deleted",
- Field: "payload.elasticsearch.node_stats.indices.docs.deleted",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: docsDeletedMetric,
- FormatType: "num",
- Units: "",
- })
-
- // index store size
- indexStoreMetric:=newMetricItem("index_storage", 18, StorageGroupKey)
- indexStoreMetric.AddAxi("indices storage","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "index_storage",
- Field: "payload.elasticsearch.node_stats.indices.store.size_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: indexStoreMetric,
- FormatType: "bytes",
- Units: "",
- })
-
- // jvm used heap
- jvmUsedPercentMetric:=newMetricItem("jvm_heap_used_percent", 1, JVMGroupKey)
- jvmUsedPercentMetric.AddAxi("JVM heap used percent","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "jvm_heap_used_percent",
- Field: "payload.elasticsearch.node_stats.jvm.mem.heap_used_percent",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: jvmUsedPercentMetric,
- FormatType: "num",
- Units: "%",
- })
- //JVM mem Young pools used
- youngPoolsUsedMetric:=newMetricItem("jvm_mem_young_used", 2, JVMGroupKey)
- youngPoolsUsedMetric.AddAxi("Mem Pools Young Used","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "jvm_mem_young_used",
- Field: "payload.elasticsearch.node_stats.jvm.mem.pools.young.used_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: youngPoolsUsedMetric,
- FormatType: "bytes",
- Units: "",
- })
- //JVM mem Young pools peak used
- youngPoolsUsedPeakMetric:=newMetricItem("jvm_mem_young_peak_used", 2, JVMGroupKey)
- youngPoolsUsedPeakMetric.AddAxi("Mem Pools Young Peak Used","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "jvm_mem_young_peak_used",
- Field: "payload.elasticsearch.node_stats.jvm.mem.pools.young.peak_used_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: youngPoolsUsedPeakMetric,
- FormatType: "bytes",
- Units: "",
- })
-
- //JVM mem old pools used
- oldPoolsUsedMetric:=newMetricItem("jvm_mem_old_used", 3, JVMGroupKey)
- oldPoolsUsedMetric.AddAxi("Mem Pools Old Used","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "jvm_mem_old_used",
- Field: "payload.elasticsearch.node_stats.jvm.mem.pools.old.used_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: oldPoolsUsedMetric,
- FormatType: "bytes",
- Units: "",
- })
- //JVM mem old pools peak used
- oldPoolsUsedPeakMetric:=newMetricItem("jvm_mem_old_peak_used", 3, JVMGroupKey)
- oldPoolsUsedPeakMetric.AddAxi("Mem Pools Old Peak Used","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "jvm_mem_old_peak_used",
- Field: "payload.elasticsearch.node_stats.jvm.mem.pools.old.peak_used_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: oldPoolsUsedPeakMetric,
- FormatType: "bytes",
- Units: "",
- })
-
- //JVM used heap
- heapUsedMetric:=newMetricItem("jvm_used_heap", 1, JVMGroupKey)
- heapUsedMetric.AddAxi("JVM Used Heap","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "jvm_used_heap",
- Field: "payload.elasticsearch.node_stats.jvm.mem.heap_used_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: heapUsedMetric,
- FormatType: "bytes",
- Units: "",
- })
- //JVM Young GC Rate
- gcYoungRateMetric:=newMetricItem("jvm_young_gc_rate", 2, JVMGroupKey)
- gcYoungRateMetric.AddAxi("JVM Young GC Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "jvm_young_gc_rate",
- Field: "payload.elasticsearch.node_stats.jvm.gc.collectors.young.collection_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: gcYoungRateMetric,
- FormatType: "num",
- Units: "times/s",
- })
- //JVM Young GC Latency
- gcYoungLatencyMetric:=newMetricItem("jvm_young_gc_latency", 2, JVMGroupKey)
- gcYoungLatencyMetric.AddAxi("JVM Young GC Time","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "jvm_young_gc_latency",
- Field: "payload.elasticsearch.node_stats.jvm.gc.collectors.young.collection_time_in_millis",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: gcYoungLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
-
- //JVM old GC Rate
- gcOldRateMetric:=newMetricItem("jvm_old_gc_rate", 3, JVMGroupKey)
- gcOldRateMetric.AddAxi("JVM Old GC Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "jvm_old_gc_rate",
- Field: "payload.elasticsearch.node_stats.jvm.gc.collectors.old.collection_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: gcOldRateMetric,
- FormatType: "num",
- Units: "times/s",
- })
- //JVM old GC Latency
- gcOldLatencyMetric:=newMetricItem("jvm_old_gc_latency", 3, JVMGroupKey)
- gcOldLatencyMetric.AddAxi("JVM Old GC Time","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "jvm_old_gc_latency",
- Field: "payload.elasticsearch.node_stats.jvm.gc.collectors.old.collection_time_in_millis",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: gcOldLatencyMetric,
- FormatType: "num",
- Units: "ms",
- })
- //Transport 发送速率
- transTxRateMetric:=newMetricItem("transport_tx_rate", 19, TransportGroupKey)
- transTxRateMetric.AddAxi("Transport Send Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "transport_tx_rate",
- Field: "payload.elasticsearch.node_stats.transport.tx_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: transTxRateMetric,
- FormatType: "num",
- Units: "times/s",
- })
- //Transport 接收速率
- transRxRateMetric:=newMetricItem("transport_rx_rate", 19, TransportGroupKey)
- transRxRateMetric.AddAxi("Transport Receive Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "transport_rx_rate",
- Field: "payload.elasticsearch.node_stats.transport.rx_count",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: transRxRateMetric,
- FormatType: "num",
- Units: "times/s",
- })
-
- //Transport 发送流量
- transTxBytesMetric:=newMetricItem("transport_tx_bytes", 19, TransportGroupKey)
- transTxBytesMetric.AddAxi("Transport Send Bytes","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "transport_tx_bytes",
- Field: "payload.elasticsearch.node_stats.transport.tx_size_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: transTxBytesMetric,
- FormatType: "bytes",
- Units: "s",
- })
- //Transport 接收流量
- transRxBytesMetric:=newMetricItem("transport_rx_bytes", 19, TransportGroupKey)
- transRxBytesMetric.AddAxi("Transport Receive Bytes","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "transport_rx_bytes",
- Field: "payload.elasticsearch.node_stats.transport.rx_size_in_bytes",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: transRxBytesMetric,
- FormatType: "bytes",
- Units: "s",
- })
-
- //Transport tcp 连接数
- tcpNumMetric:=newMetricItem("transport_outbound_connections", 20, TransportGroupKey)
- tcpNumMetric.AddAxi("Transport Outbound Connections","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "transport_outbound_connections",
- Field: "payload.elasticsearch.node_stats.transport.total_outbound_connections",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: tcpNumMetric,
- FormatType: "num",
- Units: "",
- })
-
- //IO total
- totalOperationsMetric:=newMetricItem("total_io_operations", 1, IOGroupKey)
- totalOperationsMetric.AddAxi("Total I/O Operations Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "total_io_operations",
- Field: "payload.elasticsearch.node_stats.fs.io_stats.total.operations",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: totalOperationsMetric,
- FormatType: "num",
- Units: "times/s",
- })
-
- //IO total
- readOperationsMetric:=newMetricItem("total_read_io_operations", 2, IOGroupKey)
- readOperationsMetric.AddAxi("Total Read I/O Operations Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "total_read_io_operations",
- Field: "payload.elasticsearch.node_stats.fs.io_stats.total.read_operations",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: readOperationsMetric,
- FormatType: "num",
- Units: "times/s",
- })
-
- //IO total
- writeOperationsMetric:=newMetricItem("total_write_io_operations", 3, IOGroupKey)
- writeOperationsMetric.AddAxi("Total Write I/O Operations Rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "total_write_io_operations",
- Field: "payload.elasticsearch.node_stats.fs.io_stats.total.write_operations",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: writeOperationsMetric,
- FormatType: "num",
- Units: "times/s",
- })
-
- //scroll context
- openContextMetric:=newMetricItem("scroll_open_contexts", 7, OperationGroupKey)
- writeOperationsMetric.AddAxi("Scroll Open Contexts","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
- nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
- Key: "scroll_open_contexts",
- Field: "payload.elasticsearch.node_stats.indices.search.open_contexts",
- ID: util.GetUUID(),
- MetricItem: openContextMetric,
- FormatType: "num",
- Units: "",
- })
-
- // Circuit Breaker
- parentBreakerMetric := newMetricItem("parent_breaker", 1, CircuitBreakerGroupKey)
- parentBreakerMetric.AddAxi("Parent Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
- Key: "parent_breaker",
- Field: "payload.elasticsearch.node_stats.breakers.parent.tripped",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: parentBreakerMetric,
- FormatType: "num",
- Units: "times/s",
- })
- accountingBreakerMetric := newMetricItem("accounting_breaker", 2, CircuitBreakerGroupKey)
- accountingBreakerMetric.AddAxi("Accounting Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
- Key: "accounting_breaker",
- Field: "payload.elasticsearch.node_stats.breakers.accounting.tripped",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: accountingBreakerMetric,
- FormatType: "num",
- Units: "times/s",
- })
- fielddataBreakerMetric := newMetricItem("fielddata_breaker", 3, CircuitBreakerGroupKey)
- fielddataBreakerMetric.AddAxi("Fielddata Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
- Key: "fielddata_breaker",
- Field: "payload.elasticsearch.node_stats.breakers.fielddata.tripped",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: fielddataBreakerMetric,
- FormatType: "num",
- Units: "times/s",
- })
- requestBreakerMetric := newMetricItem("request_breaker", 4, CircuitBreakerGroupKey)
- requestBreakerMetric.AddAxi("Request Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
- Key: "request_breaker",
- Field: "payload.elasticsearch.node_stats.breakers.request.tripped",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: requestBreakerMetric,
- FormatType: "num",
- Units: "times/s",
- })
- inFlightRequestBreakerMetric := newMetricItem("in_flight_requests_breaker", 5, CircuitBreakerGroupKey)
- inFlightRequestBreakerMetric.AddAxi("In Flight Requests Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
- Key: "in_flight_requests_breaker",
- Field: "payload.elasticsearch.node_stats.breakers.in_flight_requests.tripped",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: inFlightRequestBreakerMetric,
- FormatType: "num",
- Units: "times/s",
- })
- modelInferenceBreakerMetric := newMetricItem("model_inference_breaker", 6, CircuitBreakerGroupKey)
- modelInferenceBreakerMetric.AddAxi("Model Inference Breaker","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
- nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
- Key: "model_inference_breaker",
- Field: "payload.elasticsearch.node_stats.breakers.model_inference.tripped",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: modelInferenceBreakerMetric,
- FormatType: "num",
- Units: "times/s",
- })
-
- aggs := generateGroupAggs(nodeMetricItems)
- intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
- if err != nil {
- log.Error(err)
- panic(err)
- }
-
- query["size"] = 0
- query["aggs"] = util.MapStr{
- "group_by_level": util.MapStr{
- "terms": util.MapStr{
- "field": "metadata.labels.transport_address",
- "size": top,
- },
- "aggs": util.MapStr{
- "dates": util.MapStr{
- "date_histogram": util.MapStr{
- "field": "timestamp",
- intervalField: bucketSizeStr,
- },
- "aggs": aggs,
- },
- },
- },
- }
- return h.getMetrics(query, nodeMetricItems, bucketSize), nil
-
-}
-
-func (h *APIHandler) getTopNodeName(clusterID string, top int, lastMinutes int) ([]string, error){
- ver := h.Client().GetVersion()
- cr, _ := util.VersionCompare(ver.Number, "6.1")
- if (ver.Distribution == "" || ver.Distribution == elastic.Elasticsearch) && cr == -1 {
- return nil, nil
- }
- var (
- now = time.Now()
- max = now.UnixNano()/1e6
- min = now.Add(-time.Duration(lastMinutes) * time.Minute).UnixNano()/1e6
- bucketSizeStr = "60s"
- )
- intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
- if err != nil {
- return nil, err
- }
-
- query := util.MapStr{
- "size": 0,
- "query": util.MapStr{
- "bool": util.MapStr{
- "must": []util.MapStr{
- {
- "term": util.MapStr{
- "metadata.category": util.MapStr{
- "value": "elasticsearch",
- },
- },
- },
- {
- "term": util.MapStr{
- "metadata.name": util.MapStr{
- "value": "node_stats",
- },
- },
- },
- {
- "term": util.MapStr{
- "metadata.labels.cluster_id": util.MapStr{
- "value": clusterID,
- },
- },
- },
- },
- "filter": []util.MapStr{
- {
- "range": util.MapStr{
- "timestamp": util.MapStr{
- "gte": min,
- "lte": max,
- },
- },
- },
- },
- },
- },
- "aggs": util.MapStr{
- "group_by_index": util.MapStr{
- "terms": util.MapStr{
- "field": "metadata.labels.transport_address",
- "size": 10000,
- },
- "aggs": util.MapStr{
- "max_qps": util.MapStr{
- "max_bucket": util.MapStr{
- "buckets_path": "dates>search_qps",
- },
- },
- "max_qps_bucket_sort": util.MapStr{
- "bucket_sort": util.MapStr{
- "sort": []util.MapStr{
- {"max_qps": util.MapStr{"order": "desc"}}},
- "size": top,
- },
- },
- "dates": util.MapStr{
- "date_histogram": util.MapStr{
- "field": "timestamp",
- intervalField: bucketSizeStr,
- },
- "aggs": util.MapStr{
- "search_query_total": util.MapStr{
- "max": util.MapStr{
- "field": "payload.elasticsearch.node_stats.indices.search.query_total",
- },
- },
- "search_qps": util.MapStr{
- "derivative": util.MapStr{
- "buckets_path": "search_query_total",
- },
- },
- },
- },
- },
- },
- "group_by_index1": util.MapStr{
- "terms": util.MapStr{
- "field": "metadata.labels.transport_address",
- "size": 10000,
- },
- "aggs": util.MapStr{
- "max_qps": util.MapStr{
- "max_bucket": util.MapStr{
- "buckets_path": "dates>index_qps",
- },
- },
- "max_qps_bucket_sort": util.MapStr{
- "bucket_sort": util.MapStr{
- "sort": []util.MapStr{
- {"max_qps": util.MapStr{"order": "desc"}},
- },
- "size": top,
- },
- },
- "dates": util.MapStr{
- "date_histogram": util.MapStr{
- "field": "timestamp",
- intervalField: bucketSizeStr,
- },
- "aggs": util.MapStr{
- "index_total": util.MapStr{
- "max": util.MapStr{
- "field": "payload.elasticsearch.node_stats.indices.indexing.index_total",
- },
- },
- "index_qps": util.MapStr{
- "derivative": util.MapStr{
- "buckets_path": "index_total",
- },
- },
- },
- },
- },
- },
- },
- }
- response,err:=elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(),util.MustToJSONBytes(query))
- if err!=nil{
- log.Error(err)
- return nil, err
- }
- var maxQpsKVS = map[string] float64{}
- for _, agg := range response.Aggregations {
- for _, bk := range agg.Buckets {
- key := bk["key"].(string)
- if maxQps, ok := bk["max_qps"].(map[string]interface{}); ok {
- val := maxQps["value"].(float64)
- if _, ok = maxQpsKVS[key] ; ok {
- maxQpsKVS[key] = maxQpsKVS[key] + val
- }else{
- maxQpsKVS[key] = val
- }
- }
- }
- }
- var (
- qpsValues TopTermOrder
- )
- for k, v := range maxQpsKVS {
- qpsValues = append(qpsValues, TopTerm{
- Key: k,
- Value: v,
- })
- }
- sort.Sort(qpsValues)
- var length = top
- if top > len(qpsValues) {
- length = len(qpsValues)
- }
- nodeNames := []string{}
- for i := 0; i .
-
-package v1
-
-import (
- "fmt"
- log "github.com/cihub/seelog"
- "infini.sh/framework/core/elastic"
- "infini.sh/framework/core/global"
- "infini.sh/framework/core/util"
- "infini.sh/framework/modules/elastic/common"
- "strings"
-)
-
-const (
- ThreadPoolGetGroupKey = "thread_pool_get"
- ThreadPoolSearchGroupKey = "thread_pool_search"
- ThreadPoolFlushGroupKey = "thread_pool_flush"
- ThreadPoolRefreshGroupKey = "thread_pool_refresh"
- ThreadPoolWriteGroupKey = "thread_pool_write"
- ThreadPoolForceMergeGroupKey = "thread_pool_force_merge"
- ThreadPoolIndexGroupKey = "thread_pool_index"
- ThreadPoolBulkGroupKey = "thread_pool_bulk"
-)
-
-func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min, max int64, nodeName string, top int) map[string]*common.MetricItem{
- bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
- var must = []util.MapStr{
- {
- "term":util.MapStr{
- "metadata.labels.cluster_id":util.MapStr{
- "value": clusterID,
- },
- },
- },
- {
- "term": util.MapStr{
- "metadata.category": util.MapStr{
- "value": "elasticsearch",
- },
- },
- },
- {
- "term": util.MapStr{
- "metadata.name": util.MapStr{
- "value": "node_stats",
- },
- },
- },
- }
- var (
- nodeNames []string
- err error
- )
- if nodeName != "" {
- nodeNames = strings.Split(nodeName, ",")
- top = len(nodeNames)
- }else{
- nodeNames, err = h.getTopNodeName(clusterID, top, 15)
- if err != nil {
- log.Error(err)
- }
- }
- if len(nodeNames) > 0 {
- must = append(must, util.MapStr{
- "terms": util.MapStr{
- "metadata.labels.transport_address": nodeNames,
- },
- })
- }
-
- query:=map[string]interface{}{}
- query["query"]=util.MapStr{
- "bool": util.MapStr{
- "must": must,
- "filter": []util.MapStr{
- {
- "range": util.MapStr{
- "timestamp": util.MapStr{
- "gte": min,
- "lte": max,
- },
- },
- },
- },
- },
- }
- searchThreadsMetric := newMetricItem("search_threads", 1, ThreadPoolSearchGroupKey)
- searchThreadsMetric.AddAxi("Search Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems := []GroupMetricItem{
- {
- Key: "search_threads",
- Field: "payload.elasticsearch.node_stats.thread_pool.search.threads",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: searchThreadsMetric,
- FormatType: "num",
- Units: "",
- },
- }
- searchQueueMetric := newMetricItem("search_queue", 1, ThreadPoolSearchGroupKey)
- searchQueueMetric.AddAxi("Search Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "search_queue",
- Field: "payload.elasticsearch.node_stats.thread_pool.search.queue",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: searchQueueMetric,
- FormatType: "num",
- Units: "",
- })
- searchActiveMetric := newMetricItem("search_active", 1, ThreadPoolSearchGroupKey)
- searchActiveMetric.AddAxi("Search Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "search_active",
- Field: "payload.elasticsearch.node_stats.thread_pool.search.active",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: searchActiveMetric,
- FormatType: "num",
- Units: "",
- })
- searchRejectedMetric := newMetricItem("search_rejected", 1, ThreadPoolSearchGroupKey)
- searchRejectedMetric.AddAxi("Search Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "search_rejected",
- Field: "payload.elasticsearch.node_stats.thread_pool.search.rejected",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: searchRejectedMetric,
- FormatType: "num",
- Units: "rejected/s",
- })
-
- getThreadsMetric := newMetricItem("get_threads", 1, ThreadPoolGetGroupKey)
- getThreadsMetric.AddAxi("Get Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "get_threads",
- Field: "payload.elasticsearch.node_stats.thread_pool.get.threads",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: getThreadsMetric,
- FormatType: "num",
- Units: "",
- })
- getQueueMetric := newMetricItem("get_queue", 1, ThreadPoolGetGroupKey)
- getQueueMetric.AddAxi("Get Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "get_queue",
- Field: "payload.elasticsearch.node_stats.thread_pool.get.queue",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: getQueueMetric,
- FormatType: "num",
- Units: "",
- })
- getActiveMetric := newMetricItem("get_active", 1, ThreadPoolGetGroupKey)
- getActiveMetric.AddAxi("Get Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "get_active",
- Field: "payload.elasticsearch.node_stats.thread_pool.get.active",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: getActiveMetric,
- FormatType: "num",
- Units: "",
- })
- getRejectedMetric := newMetricItem("get_rejected", 1, ThreadPoolGetGroupKey)
- getRejectedMetric.AddAxi("Get Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "get_rejected",
- Field: "payload.elasticsearch.node_stats.thread_pool.get.rejected",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: getRejectedMetric,
- FormatType: "num",
- Units: "rejected/s",
- })
-
- flushThreadsMetric := newMetricItem("flush_threads", 1, ThreadPoolFlushGroupKey)
- flushThreadsMetric.AddAxi("Flush Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "flush_threads",
- Field: "payload.elasticsearch.node_stats.thread_pool.flush.threads",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: flushThreadsMetric,
- FormatType: "num",
- Units: "",
- })
- flushQueueMetric := newMetricItem("flush_queue", 1, ThreadPoolFlushGroupKey)
- flushQueueMetric.AddAxi("Get Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "flush_queue",
- Field: "payload.elasticsearch.node_stats.thread_pool.flush.queue",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: flushQueueMetric,
- FormatType: "num",
- Units: "",
- })
- flushActiveMetric := newMetricItem("flush_active", 1, ThreadPoolFlushGroupKey)
- flushActiveMetric.AddAxi("Flush Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "flush_active",
- Field: "payload.elasticsearch.node_stats.thread_pool.flush.active",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: flushActiveMetric,
- FormatType: "num",
- Units: "",
- })
- flushRejectedMetric := newMetricItem("flush_rejected", 1, ThreadPoolFlushGroupKey)
- flushRejectedMetric.AddAxi("Flush Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "flush_rejected",
- Field: "payload.elasticsearch.node_stats.thread_pool.flush.rejected",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: flushRejectedMetric,
- FormatType: "num",
- Units: "rejected/s",
- })
-
- majorVersion := elastic.GetMetadata(clusterID).GetMajorVersion()
- ver := elastic.GetClient(clusterID).GetVersion()
-
- if (ver.Distribution == "" || ver.Distribution == elastic.Elasticsearch) && majorVersion < 6{
- indexThreadsMetric := newMetricItem("index_threads", 1, ThreadPoolIndexGroupKey)
- indexThreadsMetric.AddAxi("Index Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "index_threads",
- Field: "payload.elasticsearch.node_stats.thread_pool.index.threads",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: indexThreadsMetric,
- FormatType: "num",
- Units: "",
- })
- indexQueueMetric := newMetricItem("index_queue", 1, ThreadPoolIndexGroupKey)
- indexQueueMetric.AddAxi("Index Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "index_queue",
- Field: "payload.elasticsearch.node_stats.thread_pool.index.queue",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: indexQueueMetric,
- FormatType: "num",
- Units: "",
- })
- indexActiveMetric := newMetricItem("index_active", 1, ThreadPoolIndexGroupKey)
- indexActiveMetric.AddAxi("Index Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "index_active",
- Field: "payload.elasticsearch.node_stats.thread_pool.index.active",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: indexActiveMetric,
- FormatType: "num",
- Units: "",
- })
- indexRejectedMetric := newMetricItem("index_rejected", 1, ThreadPoolIndexGroupKey)
- indexRejectedMetric.AddAxi("Index Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "index_rejected",
- Field: "payload.elasticsearch.node_stats.thread_pool.index.rejected",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: indexRejectedMetric,
- FormatType: "num",
- Units: "rejected/s",
- })
-
- bulkThreadsMetric := newMetricItem("bulk_threads", 1, ThreadPoolBulkGroupKey)
- bulkThreadsMetric.AddAxi("Bulk Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "bulk_threads",
- Field: "payload.elasticsearch.node_stats.thread_pool.bulk.threads",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: bulkThreadsMetric,
- FormatType: "num",
- Units: "",
- })
- bulkQueueMetric := newMetricItem("bulk_queue", 1, ThreadPoolBulkGroupKey)
- bulkQueueMetric.AddAxi("Bulk Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "bulk_queue",
- Field: "payload.elasticsearch.node_stats.thread_pool.bulk.queue",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: bulkQueueMetric,
- FormatType: "num",
- Units: "",
- })
- bulkActiveMetric := newMetricItem("bulk_active", 1, ThreadPoolBulkGroupKey)
- bulkActiveMetric.AddAxi("Bulk Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "bulk_active",
- Field: "payload.elasticsearch.node_stats.thread_pool.bulk.active",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: bulkActiveMetric,
- FormatType: "num",
- Units: "",
- })
- bulkRejectedMetric := newMetricItem("bulk_rejected", 1, ThreadPoolBulkGroupKey)
- bulkRejectedMetric.AddAxi("Bulk Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "bulk_rejected",
- Field: "payload.elasticsearch.node_stats.thread_pool.bulk.rejected",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: bulkRejectedMetric,
- FormatType: "num",
- Units: "rejected/s",
- })
- }else {
- writeThreadsMetric := newMetricItem("write_threads", 1, ThreadPoolWriteGroupKey)
- writeThreadsMetric.AddAxi("Write Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "write_threads",
- Field: "payload.elasticsearch.node_stats.thread_pool.write.threads",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: writeThreadsMetric,
- FormatType: "num",
- Units: "",
- })
- writeQueueMetric := newMetricItem("write_queue", 1, ThreadPoolWriteGroupKey)
- writeQueueMetric.AddAxi("Write Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "write_queue",
- Field: "payload.elasticsearch.node_stats.thread_pool.write.queue",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: writeQueueMetric,
- FormatType: "num",
- Units: "",
- })
- writeActiveMetric := newMetricItem("write_active", 1, ThreadPoolWriteGroupKey)
- writeActiveMetric.AddAxi("Write Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "write_active",
- Field: "payload.elasticsearch.node_stats.thread_pool.write.active",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: writeActiveMetric,
- FormatType: "num",
- Units: "",
- })
- writeRejectedMetric := newMetricItem("write_rejected", 1, ThreadPoolWriteGroupKey)
- writeRejectedMetric.AddAxi("Write Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "write_rejected",
- Field: "payload.elasticsearch.node_stats.thread_pool.write.rejected",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: writeRejectedMetric,
- FormatType: "num",
- Units: "rejected/s",
- })
- }
- refreshThreadsMetric := newMetricItem("refresh_threads", 1, ThreadPoolRefreshGroupKey)
- refreshThreadsMetric.AddAxi("Refresh Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "refresh_threads",
- Field: "payload.elasticsearch.node_stats.thread_pool.refresh.threads",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: refreshThreadsMetric,
- FormatType: "num",
- Units: "",
- })
- refreshQueueMetric := newMetricItem("refresh_queue", 1, ThreadPoolRefreshGroupKey)
- refreshQueueMetric.AddAxi("Refresh Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "refresh_queue",
- Field: "payload.elasticsearch.node_stats.thread_pool.refresh.queue",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: refreshQueueMetric,
- FormatType: "num",
- Units: "",
- })
- refreshActiveMetric := newMetricItem("refresh_active", 1, ThreadPoolRefreshGroupKey)
- refreshActiveMetric.AddAxi("Refresh Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "refresh_active",
- Field: "payload.elasticsearch.node_stats.thread_pool.refresh.active",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: refreshActiveMetric,
- FormatType: "num",
- Units: "",
- })
- refreshRejectedMetric := newMetricItem("refresh_rejected", 1, ThreadPoolRefreshGroupKey)
- refreshRejectedMetric.AddAxi("Refresh Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "refresh_rejected",
- Field: "payload.elasticsearch.node_stats.thread_pool.refresh.rejected",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: refreshRejectedMetric,
- FormatType: "num",
- Units: "rejected/s",
- })
- forceMergeThreadsMetric := newMetricItem("force_merge_threads", 1, ThreadPoolForceMergeGroupKey)
- forceMergeThreadsMetric.AddAxi("Force Merge Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "force_merge_threads",
- Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.threads",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: forceMergeThreadsMetric,
- FormatType: "num",
- Units: "",
- })
- forceMergeQueueMetric := newMetricItem("force_merge_queue", 1, ThreadPoolForceMergeGroupKey)
- forceMergeQueueMetric.AddAxi("Force Merge Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "force_merge_queue",
- Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.queue",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: forceMergeQueueMetric,
- FormatType: "num",
- Units: "",
- })
- forceMergeActiveMetric := newMetricItem("force_merge_active", 1, ThreadPoolForceMergeGroupKey)
- forceMergeActiveMetric.AddAxi("Force Merge Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "force_merge_active",
- Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.active",
- ID: util.GetUUID(),
- IsDerivative: false,
- MetricItem: forceMergeActiveMetric,
- FormatType: "num",
- Units: "",
- })
- forceMergeRejectedMetric := newMetricItem("force_merge_rejected", 1, ThreadPoolForceMergeGroupKey)
- forceMergeRejectedMetric.AddAxi("Force Merge Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
-
- queueMetricItems = append(queueMetricItems, GroupMetricItem{
- Key: "force_merge_rejected",
- Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.rejected",
- ID: util.GetUUID(),
- IsDerivative: true,
- MetricItem: forceMergeRejectedMetric,
- FormatType: "num",
- Units: "rejected/s",
- })
- //Get Thread Pool queue
- aggs:=map[string]interface{}{}
-
- for _,metricItem:=range queueMetricItems{
- aggs[metricItem.ID]=util.MapStr{
- "max":util.MapStr{
- "field": metricItem.Field,
- },
- }
- if metricItem.Field2 != "" {
- aggs[metricItem.ID + "_field2"]=util.MapStr{
- "max":util.MapStr{
- "field": metricItem.Field2,
- },
- }
- }
-
- if metricItem.IsDerivative{
- aggs[metricItem.ID+"_deriv"]=util.MapStr{
- "derivative":util.MapStr{
- "buckets_path": metricItem.ID,
- },
- }
- if metricItem.Field2 != "" {
- aggs[metricItem.ID + "_field2_deriv"]=util.MapStr{
- "derivative":util.MapStr{
- "buckets_path": metricItem.ID + "_field2",
- },
- }
- }
- }
- }
- intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
- if err != nil {
- log.Error(err)
- panic(err)
- }
-
- query["size"]=0
- query["aggs"]= util.MapStr{
- "group_by_level": util.MapStr{
- "terms": util.MapStr{
- "field": "metadata.labels.transport_address",
- "size": top,
- },
- "aggs": util.MapStr{
- "dates": util.MapStr{
- "date_histogram":util.MapStr{
- "field": "timestamp",
- intervalField: bucketSizeStr,
- },
- "aggs":aggs,
- },
- },
- },
- }
- return h.getMetrics(query, queueMetricItems, bucketSize)
-}