Merge pull request 'monitoring' (#63) from zhangweiii/pcm-coordinator:master into master

Former-commit-id: 416bd0e54c6dd7c7ed35affa6f2c13ceda4ee1e3
This commit is contained in:
zhangweiii 2024-03-21 16:28:22 +08:00
commit d5dc85dca5
5 changed files with 41 additions and 8 deletions

View File

@ -23,6 +23,7 @@ type (
CentersIndex []CenterIndex `json:"centersIndex"`
}
CenterIndex {
id int64 `json:"id"`
name string `json:"name"`
cpu string `json:"cpu"`
memory string `json:"memory"`

View File

@ -4,6 +4,7 @@ import (
"context"
"github.com/prometheus/common/model"
"gitlink.org.cn/JointCloud/pcm-coordinator/pkg/tracker"
"strconv"
"gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/svc"
"gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/types"
@ -27,7 +28,7 @@ func NewCenterResourcesLogic(ctx context.Context, svcCtx *svc.ServiceContext) *C
func (l *CenterResourcesLogic) CenterResources() (resp *types.CenterResourcesResp, err error) {
resp = &types.CenterResourcesResp{}
rawData, err := l.svcCtx.PromClient.GetRawData("resource_top3", tracker.ClusterOption{})
rawData, err := l.svcCtx.PromClient.GetRawData("center_top3", tracker.AdapterOption{})
if err != nil {
return nil, err
}
@ -35,26 +36,30 @@ func (l *CenterResourcesLogic) CenterResources() (resp *types.CenterResourcesRes
data := rawData.(model.Vector)
for _, d := range data {
for _, v := range d.Metric {
centersIndex = append(centersIndex, &types.CenterIndex{Name: string(v)})
num, err := strconv.ParseInt(string(v), 10, 64)
if err != nil {
return nil, err
}
centersIndex = append(centersIndex, &types.CenterIndex{Id: num})
}
}
for _, centerIndex := range centersIndex {
// Query the types of resource centers
//l.svcCtx.DbEngin.Raw().Scan(&centerIndex.CenterType)
cpuRawData, err := l.svcCtx.PromClient.GetRawData("cluster_cpu_usage", tracker.ClusterOption{ClusterName: centerIndex.Name})
l.svcCtx.DbEngin.Raw("select name,type as CenterType from t_adapter where id = ?", centerIndex.Id).Scan(&centerIndex)
cpuRawData, err := l.svcCtx.PromClient.GetRawData("center_cpu_usage", tracker.AdapterOption{AdapterId: centerIndex.Id})
cpuData := cpuRawData.(model.Vector)
if err != nil {
return nil, err
}
centerIndex.Cpu = cpuData[0].Value.String()
memoryRawData, err := l.svcCtx.PromClient.GetRawData("cluster_memory_usage", tracker.ClusterOption{ClusterName: centerIndex.Name})
memoryRawData, err := l.svcCtx.PromClient.GetRawData("center_memory_usage", tracker.AdapterOption{AdapterId: centerIndex.Id})
if err != nil {
return nil, err
}
memoryData := memoryRawData.(model.Vector)
centerIndex.Memory = memoryData[0].Value.String()
diskRawData, err := l.svcCtx.PromClient.GetRawData("cluster_disk_usage", tracker.ClusterOption{ClusterName: centerIndex.Name})
diskRawData, err := l.svcCtx.PromClient.GetRawData("center_disk_usage", tracker.AdapterOption{AdapterId: centerIndex.Id})
if err != nil {
return nil, err
}

View File

@ -17,6 +17,7 @@ type CenterResourcesResp struct {
}
type CenterIndex struct {
Id int64 `json:"id"`
Name string `json:"name"`
Cpu string `json:"cpu"`
Memory string `json:"memory"`

View File

@ -30,7 +30,10 @@ var promQLTemplates = map[string]string{
"cluster_cpu_usage": "sum by (cluster_name)(cluster_cpu_usage{$1})",
"cluster_memory_usage": "sum by (cluster_name)(cluster_memory_usage{$1})",
"cluster_disk_usage": "sum by (cluster_name)(cluster_disk_usage{$1})",
"resource_top3": "topk(3,sum by (cluster_name)(cluster_cpu_usage +cluster_memory_usage +cluster_disk_usage)/3)",
"center_cpu_usage": "(sum by (adapter_id)(cluster_cpu_total{$1})-sum by (adapter_id)(cluster_cpu_avail{$1}))/sum by (adapter_id)(cluster_cpu_total{$1})",
"center_memory_usage": "(sum by (adapter_id)(cluster_memory_total{$1})-sum by (adapter_id)(cluster_memory_avail{$1}))/sum by (adapter_id)(cluster_memory_total{$1})",
"center_disk_usage": "(sum by (adapter_id)(cluster_disk_total{$1})-sum by (adapter_id)(cluster_disk_avail{$1}))/sum by (adapter_id)(cluster_disk_total{$1})",
"center_top3": "topk(3,((sum by (adapter_id)(cluster_cpu_total)-sum by (adapter_id)(cluster_cpu_avail))/sum by (adapter_id)(cluster_cpu_total) + (sum by (adapter_id)(cluster_memory_total) - sum by (adapter_id)(cluster_memory_avail))/sum by (adapter_id)(cluster_memory_total) + (sum by (adapter_id)(cluster_disk_total)-sum by (adapter_id)(cluster_disk_avail))/sum by (adapter_id)(cluster_disk_total))/3)",
"namespace_cpu_usage": `round(namespace:container_cpu_usage_seconds_total:sum_rate{namespace!="", $1}, 0.001)`,
"namespace_memory_usage": `namespace:container_memory_usage_bytes:sum{namespace!="", $1}`,
"namespace_memory_usage_wo_cache": `namespace:container_memory_usage_bytes_wo_cache:sum{namespace!="", $1}`,
@ -58,6 +61,8 @@ var promQLTemplates = map[string]string{
func makeExpr(metric string, opts QueryOptions) string {
tmpl := promQLTemplates[metric]
switch opts.Level {
case LevelAdapter:
return makeAdapterMetricExpr(tmpl, opts)
case LevelCluster:
return makeClusterMetricExpr(tmpl, opts)
case LevelNode:
@ -91,6 +96,16 @@ func makeClusterMetricExpr(tmpl string, o QueryOptions) string {
return strings.Replace(tmpl, "$1", clusterSelector, -1)
}
func makeAdapterMetricExpr(tmpl string, o QueryOptions) string {
var adapterSelector string
if o.AdapterId != 0 {
adapterSelector = fmt.Sprintf(`adapter_id="%d"`, o.AdapterId)
}
return strings.Replace(tmpl, "$1", adapterSelector, -1)
}
func makeNodeMetricExpr(tmpl string, o QueryOptions) string {
var nodeSelector string
if o.NodeName != "" {

View File

@ -28,7 +28,6 @@ const (
LevelWorkspace
LevelNamespace
LevelApplication
LevelOpenpitrix
LevelController
LevelService
LevelPod
@ -36,9 +35,11 @@ const (
LevelPVC
LevelComponent
LevelIngress
LevelAdapter
)
var MeteringLevelMap = map[string]int{
"LevelAdapter": LevelAdapter,
"LevelCluster": LevelCluster,
"LevelNode": LevelNode,
"LevelWorkspace": LevelWorkspace,
@ -78,6 +79,7 @@ type QueryOptions struct {
PodName string
PodsName string
ContainerName string
AdapterId int64
ServiceName string
Ingress string
Job string
@ -89,6 +91,15 @@ func NewQueryOptions() *QueryOptions {
return &QueryOptions{}
}
type AdapterOption struct {
AdapterId int64
}
func (a AdapterOption) Apply(o *QueryOptions) {
o.Level = LevelAdapter
o.AdapterId = a.AdapterId
}
type ClusterOption struct {
ClusterName string
}